1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Software workaround for a Neptune (PCI-E) 50 * hardware interrupt bug which the hardware 51 * may generate spurious interrupts after the 52 * device interrupt handler was removed. If this flag 53 * is enabled, the driver will reset the 54 * hardware when devices are being detached. 55 */ 56 uint32_t nxge_peu_reset_enable = 0; 57 58 /* 59 * Software workaround for the hardware 60 * checksum bugs that affect packet transmission 61 * and receive: 62 * 63 * Usage of nxge_cksum_offload: 64 * 65 * (1) nxge_cksum_offload = 0 (default): 66 * - transmits packets: 67 * TCP: uses the hardware checksum feature. 68 * UDP: driver will compute the software checksum 69 * based on the partial checksum computed 70 * by the IP layer. 71 * - receives packets 72 * TCP: marks packets checksum flags based on hardware result. 73 * UDP: will not mark checksum flags. 74 * 75 * (2) nxge_cksum_offload = 1: 76 * - transmit packets: 77 * TCP/UDP: uses the hardware checksum feature. 78 * - receives packets 79 * TCP/UDP: marks packet checksum flags based on hardware result. 80 * 81 * (3) nxge_cksum_offload = 2: 82 * - The driver will not register its checksum capability. 83 * Checksum for both TCP and UDP will be computed 84 * by the stack. 85 * - The software LSO is not allowed in this case. 86 * 87 * (4) nxge_cksum_offload > 2: 88 * - Will be treated as it is set to 2 89 * (stack will compute the checksum). 90 * 91 * (5) If the hardware bug is fixed, this workaround 92 * needs to be updated accordingly to reflect 93 * the new hardware revision. 94 */ 95 uint32_t nxge_cksum_offload = 0; 96 97 /* 98 * Globals: tunable parameters (/etc/system or adb) 99 * 100 */ 101 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 102 uint32_t nxge_rbr_spare_size = 0; 103 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 104 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 105 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 106 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 107 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 108 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 109 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 110 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 111 boolean_t nxge_jumbo_enable = B_FALSE; 112 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 113 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 114 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 115 116 /* MAX LSO size */ 117 #define NXGE_LSO_MAXLEN 65535 118 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 119 120 /* 121 * Debugging flags: 122 * nxge_no_tx_lb : transmit load balancing 123 * nxge_tx_lb_policy: 0 - TCP port (default) 124 * 3 - DEST MAC 125 */ 126 uint32_t nxge_no_tx_lb = 0; 127 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 128 129 /* 130 * Add tunable to reduce the amount of time spent in the 131 * ISR doing Rx Processing. 132 */ 133 uint32_t nxge_max_rx_pkts = 1024; 134 135 /* 136 * Tunables to manage the receive buffer blocks. 137 * 138 * nxge_rx_threshold_hi: copy all buffers. 139 * nxge_rx_bcopy_size_type: receive buffer block size type. 140 * nxge_rx_threshold_lo: copy only up to tunable block size type. 141 */ 142 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 143 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 144 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 145 146 /* Use kmem_alloc() to allocate data buffers. */ 147 #if defined(_BIG_ENDIAN) 148 uint32_t nxge_use_kmem_alloc = 1; 149 #else 150 uint32_t nxge_use_kmem_alloc = 0; 151 #endif 152 153 rtrace_t npi_rtracebuf; 154 155 /* 156 * The hardware sometimes fails to allow enough time for the link partner 157 * to send an acknowledgement for packets that the hardware sent to it. The 158 * hardware resends the packets earlier than it should be in those instances. 159 * This behavior caused some switches to acknowledge the wrong packets 160 * and it triggered the fatal error. 161 * This software workaround is to set the replay timer to a value 162 * suggested by the hardware team. 163 * 164 * PCI config space replay timer register: 165 * The following replay timeout value is 0xc 166 * for bit 14:18. 167 */ 168 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 169 #define PCI_REPLAY_TIMEOUT_SHIFT 14 170 171 uint32_t nxge_set_replay_timer = 1; 172 uint32_t nxge_replay_timeout = 0xc; 173 174 /* 175 * The transmit serialization sometimes causes 176 * longer sleep before calling the driver transmit 177 * function as it sleeps longer than it should. 178 * The performace group suggests that a time wait tunable 179 * can be used to set the maximum wait time when needed 180 * and the default is set to 1 tick. 181 */ 182 uint32_t nxge_tx_serial_maxsleep = 1; 183 184 #if defined(sun4v) 185 /* 186 * Hypervisor N2/NIU services information. 187 */ 188 static hsvc_info_t niu_hsvc = { 189 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 190 NIU_MINOR_VER, "nxge" 191 }; 192 193 static int nxge_hsvc_register(p_nxge_t); 194 #endif 195 196 /* 197 * Function Prototypes 198 */ 199 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 200 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 201 static void nxge_unattach(p_nxge_t); 202 static int nxge_quiesce(dev_info_t *); 203 204 #if NXGE_PROPERTY 205 static void nxge_remove_hard_properties(p_nxge_t); 206 #endif 207 208 /* 209 * These two functions are required by nxge_hio.c 210 */ 211 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 212 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 213 214 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 215 216 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 217 static void nxge_destroy_mutexes(p_nxge_t); 218 219 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 220 static void nxge_unmap_regs(p_nxge_t nxgep); 221 #ifdef NXGE_DEBUG 222 static void nxge_test_map_regs(p_nxge_t nxgep); 223 #endif 224 225 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 226 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 227 static void nxge_remove_intrs(p_nxge_t nxgep); 228 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 229 230 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 231 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 232 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 233 static void nxge_intrs_enable(p_nxge_t nxgep); 234 static void nxge_intrs_disable(p_nxge_t nxgep); 235 236 static void nxge_suspend(p_nxge_t); 237 static nxge_status_t nxge_resume(p_nxge_t); 238 239 static nxge_status_t nxge_setup_dev(p_nxge_t); 240 static void nxge_destroy_dev(p_nxge_t); 241 242 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 243 static void nxge_free_mem_pool(p_nxge_t); 244 245 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 246 static void nxge_free_rx_mem_pool(p_nxge_t); 247 248 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 249 static void nxge_free_tx_mem_pool(p_nxge_t); 250 251 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 252 struct ddi_dma_attr *, 253 size_t, ddi_device_acc_attr_t *, uint_t, 254 p_nxge_dma_common_t); 255 256 static void nxge_dma_mem_free(p_nxge_dma_common_t); 257 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 258 259 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 260 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 261 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 262 263 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 264 p_nxge_dma_common_t *, size_t); 265 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 266 267 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 268 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 269 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 270 271 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 272 p_nxge_dma_common_t *, 273 size_t); 274 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 275 276 static int nxge_init_common_dev(p_nxge_t); 277 static void nxge_uninit_common_dev(p_nxge_t); 278 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 279 char *, caddr_t); 280 281 /* 282 * The next declarations are for the GLDv3 interface. 283 */ 284 static int nxge_m_start(void *); 285 static void nxge_m_stop(void *); 286 static int nxge_m_unicst(void *, const uint8_t *); 287 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 288 static int nxge_m_promisc(void *, boolean_t); 289 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 290 static void nxge_m_resources(void *); 291 mblk_t *nxge_m_tx(void *arg, mblk_t *); 292 static nxge_status_t nxge_mac_register(p_nxge_t); 293 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 294 mac_addr_slot_t slot); 295 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 296 boolean_t factory); 297 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 298 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 299 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 300 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 301 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 302 uint_t, const void *); 303 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 304 uint_t, uint_t, void *); 305 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 306 const void *); 307 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 308 void *); 309 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 310 311 static void nxge_niu_peu_reset(p_nxge_t nxgep); 312 static void nxge_set_pci_replay_timeout(nxge_t *); 313 314 mac_priv_prop_t nxge_priv_props[] = { 315 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 316 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 317 {"_function_number", MAC_PROP_PERM_READ}, 318 {"_fw_version", MAC_PROP_PERM_READ}, 319 {"_port_mode", MAC_PROP_PERM_READ}, 320 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 321 {"_accept_jumbo", MAC_PROP_PERM_RW}, 322 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 323 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 324 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 325 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 326 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 327 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 328 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 329 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 330 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 331 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 332 {"_soft_lso_enable", MAC_PROP_PERM_RW} 333 }; 334 335 #define NXGE_MAX_PRIV_PROPS \ 336 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 337 338 #define NXGE_M_CALLBACK_FLAGS\ 339 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 340 341 342 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 343 #define MAX_DUMP_SZ 256 344 345 #define NXGE_M_CALLBACK_FLAGS \ 346 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 347 348 mac_callbacks_t nxge_m_callbacks = { 349 NXGE_M_CALLBACK_FLAGS, 350 nxge_m_stat, 351 nxge_m_start, 352 nxge_m_stop, 353 nxge_m_promisc, 354 nxge_m_multicst, 355 nxge_m_unicst, 356 nxge_m_tx, 357 nxge_m_resources, 358 nxge_m_ioctl, 359 nxge_m_getcapab, 360 NULL, 361 NULL, 362 nxge_m_setprop, 363 nxge_m_getprop 364 }; 365 366 void 367 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 368 369 /* PSARC/2007/453 MSI-X interrupt limit override. */ 370 #define NXGE_MSIX_REQUEST_10G 8 371 #define NXGE_MSIX_REQUEST_1G 2 372 static int nxge_create_msi_property(p_nxge_t); 373 374 /* 375 * These global variables control the message 376 * output. 377 */ 378 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 379 uint64_t nxge_debug_level; 380 381 /* 382 * This list contains the instance structures for the Neptune 383 * devices present in the system. The lock exists to guarantee 384 * mutually exclusive access to the list. 385 */ 386 void *nxge_list = NULL; 387 388 void *nxge_hw_list = NULL; 389 nxge_os_mutex_t nxge_common_lock; 390 391 extern uint64_t npi_debug_level; 392 393 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 394 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 395 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 396 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 397 extern void nxge_fm_init(p_nxge_t, 398 ddi_device_acc_attr_t *, 399 ddi_device_acc_attr_t *, 400 ddi_dma_attr_t *); 401 extern void nxge_fm_fini(p_nxge_t); 402 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 403 404 /* 405 * Count used to maintain the number of buffers being used 406 * by Neptune instances and loaned up to the upper layers. 407 */ 408 uint32_t nxge_mblks_pending = 0; 409 410 /* 411 * Device register access attributes for PIO. 412 */ 413 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 414 DDI_DEVICE_ATTR_V0, 415 DDI_STRUCTURE_LE_ACC, 416 DDI_STRICTORDER_ACC, 417 }; 418 419 /* 420 * Device descriptor access attributes for DMA. 421 */ 422 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 423 DDI_DEVICE_ATTR_V0, 424 DDI_STRUCTURE_LE_ACC, 425 DDI_STRICTORDER_ACC 426 }; 427 428 /* 429 * Device buffer access attributes for DMA. 430 */ 431 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 432 DDI_DEVICE_ATTR_V0, 433 DDI_STRUCTURE_BE_ACC, 434 DDI_STRICTORDER_ACC 435 }; 436 437 ddi_dma_attr_t nxge_desc_dma_attr = { 438 DMA_ATTR_V0, /* version number. */ 439 0, /* low address */ 440 0xffffffffffffffff, /* high address */ 441 0xffffffffffffffff, /* address counter max */ 442 #ifndef NIU_PA_WORKAROUND 443 0x100000, /* alignment */ 444 #else 445 0x2000, 446 #endif 447 0xfc00fc, /* dlim_burstsizes */ 448 0x1, /* minimum transfer size */ 449 0xffffffffffffffff, /* maximum transfer size */ 450 0xffffffffffffffff, /* maximum segment size */ 451 1, /* scatter/gather list length */ 452 (unsigned int) 1, /* granularity */ 453 0 /* attribute flags */ 454 }; 455 456 ddi_dma_attr_t nxge_tx_dma_attr = { 457 DMA_ATTR_V0, /* version number. */ 458 0, /* low address */ 459 0xffffffffffffffff, /* high address */ 460 0xffffffffffffffff, /* address counter max */ 461 #if defined(_BIG_ENDIAN) 462 0x2000, /* alignment */ 463 #else 464 0x1000, /* alignment */ 465 #endif 466 0xfc00fc, /* dlim_burstsizes */ 467 0x1, /* minimum transfer size */ 468 0xffffffffffffffff, /* maximum transfer size */ 469 0xffffffffffffffff, /* maximum segment size */ 470 5, /* scatter/gather list length */ 471 (unsigned int) 1, /* granularity */ 472 0 /* attribute flags */ 473 }; 474 475 ddi_dma_attr_t nxge_rx_dma_attr = { 476 DMA_ATTR_V0, /* version number. */ 477 0, /* low address */ 478 0xffffffffffffffff, /* high address */ 479 0xffffffffffffffff, /* address counter max */ 480 0x2000, /* alignment */ 481 0xfc00fc, /* dlim_burstsizes */ 482 0x1, /* minimum transfer size */ 483 0xffffffffffffffff, /* maximum transfer size */ 484 0xffffffffffffffff, /* maximum segment size */ 485 1, /* scatter/gather list length */ 486 (unsigned int) 1, /* granularity */ 487 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 488 }; 489 490 ddi_dma_lim_t nxge_dma_limits = { 491 (uint_t)0, /* dlim_addr_lo */ 492 (uint_t)0xffffffff, /* dlim_addr_hi */ 493 (uint_t)0xffffffff, /* dlim_cntr_max */ 494 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 495 0x1, /* dlim_minxfer */ 496 1024 /* dlim_speed */ 497 }; 498 499 dma_method_t nxge_force_dma = DVMA; 500 501 /* 502 * dma chunk sizes. 503 * 504 * Try to allocate the largest possible size 505 * so that fewer number of dma chunks would be managed 506 */ 507 #ifdef NIU_PA_WORKAROUND 508 size_t alloc_sizes [] = {0x2000}; 509 #else 510 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 511 0x10000, 0x20000, 0x40000, 0x80000, 512 0x100000, 0x200000, 0x400000, 0x800000, 513 0x1000000, 0x2000000, 0x4000000}; 514 #endif 515 516 /* 517 * Translate "dev_t" to a pointer to the associated "dev_info_t". 518 */ 519 520 extern void nxge_get_environs(nxge_t *); 521 522 static int 523 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 524 { 525 p_nxge_t nxgep = NULL; 526 int instance; 527 int status = DDI_SUCCESS; 528 uint8_t portn; 529 nxge_mmac_t *mmac_info; 530 p_nxge_param_t param_arr; 531 532 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 533 534 /* 535 * Get the device instance since we'll need to setup 536 * or retrieve a soft state for this instance. 537 */ 538 instance = ddi_get_instance(dip); 539 540 switch (cmd) { 541 case DDI_ATTACH: 542 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 543 break; 544 545 case DDI_RESUME: 546 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 547 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 548 if (nxgep == NULL) { 549 status = DDI_FAILURE; 550 break; 551 } 552 if (nxgep->dip != dip) { 553 status = DDI_FAILURE; 554 break; 555 } 556 if (nxgep->suspended == DDI_PM_SUSPEND) { 557 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 558 } else { 559 status = nxge_resume(nxgep); 560 } 561 goto nxge_attach_exit; 562 563 case DDI_PM_RESUME: 564 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 565 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 566 if (nxgep == NULL) { 567 status = DDI_FAILURE; 568 break; 569 } 570 if (nxgep->dip != dip) { 571 status = DDI_FAILURE; 572 break; 573 } 574 status = nxge_resume(nxgep); 575 goto nxge_attach_exit; 576 577 default: 578 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 579 status = DDI_FAILURE; 580 goto nxge_attach_exit; 581 } 582 583 584 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 585 status = DDI_FAILURE; 586 goto nxge_attach_exit; 587 } 588 589 nxgep = ddi_get_soft_state(nxge_list, instance); 590 if (nxgep == NULL) { 591 status = NXGE_ERROR; 592 goto nxge_attach_fail2; 593 } 594 595 nxgep->nxge_magic = NXGE_MAGIC; 596 597 nxgep->drv_state = 0; 598 nxgep->dip = dip; 599 nxgep->instance = instance; 600 nxgep->p_dip = ddi_get_parent(dip); 601 nxgep->nxge_debug_level = nxge_debug_level; 602 npi_debug_level = nxge_debug_level; 603 604 /* Are we a guest running in a Hybrid I/O environment? */ 605 nxge_get_environs(nxgep); 606 607 status = nxge_map_regs(nxgep); 608 609 if (status != NXGE_OK) { 610 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 611 goto nxge_attach_fail3; 612 } 613 614 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 615 &nxge_dev_desc_dma_acc_attr, 616 &nxge_rx_dma_attr); 617 618 /* Create & initialize the per-Neptune data structure */ 619 /* (even if we're a guest). */ 620 status = nxge_init_common_dev(nxgep); 621 if (status != NXGE_OK) { 622 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 623 "nxge_init_common_dev failed")); 624 goto nxge_attach_fail4; 625 } 626 627 /* 628 * Software workaround: set the replay timer. 629 */ 630 if (nxgep->niu_type != N2_NIU) { 631 nxge_set_pci_replay_timeout(nxgep); 632 } 633 634 #if defined(sun4v) 635 /* This is required by nxge_hio_init(), which follows. */ 636 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 637 goto nxge_attach_fail4; 638 #endif 639 640 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 641 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 642 "nxge_hio_init failed")); 643 goto nxge_attach_fail4; 644 } 645 646 if (nxgep->niu_type == NEPTUNE_2_10GF) { 647 if (nxgep->function_num > 1) { 648 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 649 " function %d. Only functions 0 and 1 are " 650 "supported for this card.", nxgep->function_num)); 651 status = NXGE_ERROR; 652 goto nxge_attach_fail4; 653 } 654 } 655 656 if (isLDOMguest(nxgep)) { 657 /* 658 * Use the function number here. 659 */ 660 nxgep->mac.portnum = nxgep->function_num; 661 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 662 663 /* XXX We'll set the MAC address counts to 1 for now. */ 664 mmac_info = &nxgep->nxge_mmac_info; 665 mmac_info->num_mmac = 1; 666 mmac_info->naddrfree = 1; 667 } else { 668 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 669 nxgep->mac.portnum = portn; 670 if ((portn == 0) || (portn == 1)) 671 nxgep->mac.porttype = PORT_TYPE_XMAC; 672 else 673 nxgep->mac.porttype = PORT_TYPE_BMAC; 674 /* 675 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 676 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 677 * The two types of MACs have different characterizations. 678 */ 679 mmac_info = &nxgep->nxge_mmac_info; 680 if (nxgep->function_num < 2) { 681 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 682 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 683 } else { 684 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 685 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 686 } 687 } 688 /* 689 * Setup the Ndd parameters for the this instance. 690 */ 691 nxge_init_param(nxgep); 692 693 /* 694 * Setup Register Tracing Buffer. 695 */ 696 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 697 698 /* init stats ptr */ 699 nxge_init_statsp(nxgep); 700 701 /* 702 * Copy the vpd info from eeprom to a local data 703 * structure, and then check its validity. 704 */ 705 if (!isLDOMguest(nxgep)) { 706 int *regp; 707 uint_t reglen; 708 int rv; 709 710 nxge_vpd_info_get(nxgep); 711 712 /* Find the NIU config handle. */ 713 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 714 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 715 "reg", ®p, ®len); 716 717 if (rv != DDI_PROP_SUCCESS) { 718 goto nxge_attach_fail5; 719 } 720 /* 721 * The address_hi, that is the first int, in the reg 722 * property consists of config handle, but need to remove 723 * the bits 28-31 which are OBP specific info. 724 */ 725 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 726 ddi_prop_free(regp); 727 } 728 729 if (isLDOMguest(nxgep)) { 730 uchar_t *prop_val; 731 uint_t prop_len; 732 uint32_t max_frame_size; 733 734 extern void nxge_get_logical_props(p_nxge_t); 735 736 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 737 nxgep->mac.portmode = PORT_LOGICAL; 738 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 739 "phy-type", "virtual transceiver"); 740 741 nxgep->nports = 1; 742 nxgep->board_ver = 0; /* XXX What? */ 743 744 /* 745 * local-mac-address property gives us info on which 746 * specific MAC address the Hybrid resource is associated 747 * with. 748 */ 749 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 750 "local-mac-address", &prop_val, 751 &prop_len) != DDI_PROP_SUCCESS) { 752 goto nxge_attach_fail5; 753 } 754 if (prop_len != ETHERADDRL) { 755 ddi_prop_free(prop_val); 756 goto nxge_attach_fail5; 757 } 758 ether_copy(prop_val, nxgep->hio_mac_addr); 759 ddi_prop_free(prop_val); 760 nxge_get_logical_props(nxgep); 761 762 /* 763 * Enable Jumbo property based on the "max-frame-size" 764 * property value. 765 */ 766 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 767 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 768 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 769 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 770 (max_frame_size <= TX_JUMBO_MTU)) { 771 param_arr = nxgep->param_arr; 772 773 param_arr[param_accept_jumbo].value = 1; 774 nxgep->mac.is_jumbo = B_TRUE; 775 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 776 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 777 NXGE_EHEADER_VLAN_CRC; 778 } 779 } else { 780 status = nxge_xcvr_find(nxgep); 781 782 if (status != NXGE_OK) { 783 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 784 " Couldn't determine card type" 785 " .... exit ")); 786 goto nxge_attach_fail5; 787 } 788 789 status = nxge_get_config_properties(nxgep); 790 791 if (status != NXGE_OK) { 792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 793 "get_hw create failed")); 794 goto nxge_attach_fail; 795 } 796 } 797 798 /* 799 * Setup the Kstats for the driver. 800 */ 801 nxge_setup_kstats(nxgep); 802 803 if (!isLDOMguest(nxgep)) 804 nxge_setup_param(nxgep); 805 806 status = nxge_setup_system_dma_pages(nxgep); 807 if (status != NXGE_OK) { 808 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 809 goto nxge_attach_fail; 810 } 811 812 nxge_hw_id_init(nxgep); 813 814 if (!isLDOMguest(nxgep)) 815 nxge_hw_init_niu_common(nxgep); 816 817 status = nxge_setup_mutexes(nxgep); 818 if (status != NXGE_OK) { 819 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 820 goto nxge_attach_fail; 821 } 822 823 #if defined(sun4v) 824 if (isLDOMguest(nxgep)) { 825 /* Find our VR & channel sets. */ 826 status = nxge_hio_vr_add(nxgep); 827 goto nxge_attach_exit; 828 } 829 #endif 830 831 status = nxge_setup_dev(nxgep); 832 if (status != DDI_SUCCESS) { 833 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 834 goto nxge_attach_fail; 835 } 836 837 status = nxge_add_intrs(nxgep); 838 if (status != DDI_SUCCESS) { 839 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 840 goto nxge_attach_fail; 841 } 842 status = nxge_add_soft_intrs(nxgep); 843 if (status != DDI_SUCCESS) { 844 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 845 "add_soft_intr failed")); 846 goto nxge_attach_fail; 847 } 848 849 /* If a guest, register with vio_net instead. */ 850 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 851 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 852 "unable to register to mac layer (%d)", status)); 853 goto nxge_attach_fail; 854 } 855 856 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 857 858 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 859 "registered to mac (instance %d)", instance)); 860 861 /* nxge_link_monitor calls xcvr.check_link recursively */ 862 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 863 864 goto nxge_attach_exit; 865 866 nxge_attach_fail: 867 nxge_unattach(nxgep); 868 goto nxge_attach_fail1; 869 870 nxge_attach_fail5: 871 /* 872 * Tear down the ndd parameters setup. 873 */ 874 nxge_destroy_param(nxgep); 875 876 /* 877 * Tear down the kstat setup. 878 */ 879 nxge_destroy_kstats(nxgep); 880 881 nxge_attach_fail4: 882 if (nxgep->nxge_hw_p) { 883 nxge_uninit_common_dev(nxgep); 884 nxgep->nxge_hw_p = NULL; 885 } 886 887 nxge_attach_fail3: 888 /* 889 * Unmap the register setup. 890 */ 891 nxge_unmap_regs(nxgep); 892 893 nxge_fm_fini(nxgep); 894 895 nxge_attach_fail2: 896 ddi_soft_state_free(nxge_list, nxgep->instance); 897 898 nxge_attach_fail1: 899 if (status != NXGE_OK) 900 status = (NXGE_ERROR | NXGE_DDI_FAILED); 901 nxgep = NULL; 902 903 nxge_attach_exit: 904 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 905 status)); 906 907 return (status); 908 } 909 910 static int 911 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 912 { 913 int status = DDI_SUCCESS; 914 int instance; 915 p_nxge_t nxgep = NULL; 916 917 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 918 instance = ddi_get_instance(dip); 919 nxgep = ddi_get_soft_state(nxge_list, instance); 920 if (nxgep == NULL) { 921 status = DDI_FAILURE; 922 goto nxge_detach_exit; 923 } 924 925 switch (cmd) { 926 case DDI_DETACH: 927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 928 break; 929 930 case DDI_PM_SUSPEND: 931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 932 nxgep->suspended = DDI_PM_SUSPEND; 933 nxge_suspend(nxgep); 934 break; 935 936 case DDI_SUSPEND: 937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 938 if (nxgep->suspended != DDI_PM_SUSPEND) { 939 nxgep->suspended = DDI_SUSPEND; 940 nxge_suspend(nxgep); 941 } 942 break; 943 944 default: 945 status = DDI_FAILURE; 946 } 947 948 if (cmd != DDI_DETACH) 949 goto nxge_detach_exit; 950 951 /* 952 * Stop the xcvr polling. 953 */ 954 nxgep->suspended = cmd; 955 956 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 957 958 if (isLDOMguest(nxgep)) { 959 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 960 nxge_m_stop((void *)nxgep); 961 nxge_hio_unregister(nxgep); 962 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 963 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 964 "<== nxge_detach status = 0x%08X", status)); 965 return (DDI_FAILURE); 966 } 967 968 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 969 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 970 971 nxge_unattach(nxgep); 972 nxgep = NULL; 973 974 nxge_detach_exit: 975 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 976 status)); 977 978 return (status); 979 } 980 981 static void 982 nxge_unattach(p_nxge_t nxgep) 983 { 984 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 985 986 if (nxgep == NULL || nxgep->dev_regs == NULL) { 987 return; 988 } 989 990 nxgep->nxge_magic = 0; 991 992 if (nxgep->nxge_timerid) { 993 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 994 nxgep->nxge_timerid = 0; 995 } 996 997 /* 998 * If this flag is set, it will affect the Neptune 999 * only. 1000 */ 1001 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1002 nxge_niu_peu_reset(nxgep); 1003 } 1004 1005 #if defined(sun4v) 1006 if (isLDOMguest(nxgep)) { 1007 (void) nxge_hio_vr_release(nxgep); 1008 } 1009 #endif 1010 1011 if (nxgep->nxge_hw_p) { 1012 nxge_uninit_common_dev(nxgep); 1013 nxgep->nxge_hw_p = NULL; 1014 } 1015 1016 #if defined(sun4v) 1017 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1018 (void) hsvc_unregister(&nxgep->niu_hsvc); 1019 nxgep->niu_hsvc_available = B_FALSE; 1020 } 1021 #endif 1022 /* 1023 * Stop any further interrupts. 1024 */ 1025 nxge_remove_intrs(nxgep); 1026 1027 /* remove soft interrups */ 1028 nxge_remove_soft_intrs(nxgep); 1029 1030 /* 1031 * Stop the device and free resources. 1032 */ 1033 if (!isLDOMguest(nxgep)) { 1034 nxge_destroy_dev(nxgep); 1035 } 1036 1037 /* 1038 * Tear down the ndd parameters setup. 1039 */ 1040 nxge_destroy_param(nxgep); 1041 1042 /* 1043 * Tear down the kstat setup. 1044 */ 1045 nxge_destroy_kstats(nxgep); 1046 1047 /* 1048 * Destroy all mutexes. 1049 */ 1050 nxge_destroy_mutexes(nxgep); 1051 1052 /* 1053 * Remove the list of ndd parameters which 1054 * were setup during attach. 1055 */ 1056 if (nxgep->dip) { 1057 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1058 " nxge_unattach: remove all properties")); 1059 1060 (void) ddi_prop_remove_all(nxgep->dip); 1061 } 1062 1063 #if NXGE_PROPERTY 1064 nxge_remove_hard_properties(nxgep); 1065 #endif 1066 1067 /* 1068 * Unmap the register setup. 1069 */ 1070 nxge_unmap_regs(nxgep); 1071 1072 nxge_fm_fini(nxgep); 1073 1074 ddi_soft_state_free(nxge_list, nxgep->instance); 1075 1076 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1077 } 1078 1079 #if defined(sun4v) 1080 int 1081 nxge_hsvc_register(nxge_t *nxgep) 1082 { 1083 nxge_status_t status; 1084 1085 if (nxgep->niu_type == N2_NIU) { 1086 nxgep->niu_hsvc_available = B_FALSE; 1087 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1088 if ((status = hsvc_register(&nxgep->niu_hsvc, 1089 &nxgep->niu_min_ver)) != 0) { 1090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1091 "nxge_attach: %s: cannot negotiate " 1092 "hypervisor services revision %d group: 0x%lx " 1093 "major: 0x%lx minor: 0x%lx errno: %d", 1094 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1095 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1096 niu_hsvc.hsvc_minor, status)); 1097 return (DDI_FAILURE); 1098 } 1099 nxgep->niu_hsvc_available = B_TRUE; 1100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1101 "NIU Hypervisor service enabled")); 1102 } 1103 1104 return (DDI_SUCCESS); 1105 } 1106 #endif 1107 1108 static char n2_siu_name[] = "niu"; 1109 1110 static nxge_status_t 1111 nxge_map_regs(p_nxge_t nxgep) 1112 { 1113 int ddi_status = DDI_SUCCESS; 1114 p_dev_regs_t dev_regs; 1115 char buf[MAXPATHLEN + 1]; 1116 char *devname; 1117 #ifdef NXGE_DEBUG 1118 char *sysname; 1119 #endif 1120 off_t regsize; 1121 nxge_status_t status = NXGE_OK; 1122 #if !defined(_BIG_ENDIAN) 1123 off_t pci_offset; 1124 uint16_t pcie_devctl; 1125 #endif 1126 1127 if (isLDOMguest(nxgep)) { 1128 return (nxge_guest_regs_map(nxgep)); 1129 } 1130 1131 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1132 nxgep->dev_regs = NULL; 1133 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1134 dev_regs->nxge_regh = NULL; 1135 dev_regs->nxge_pciregh = NULL; 1136 dev_regs->nxge_msix_regh = NULL; 1137 dev_regs->nxge_vir_regh = NULL; 1138 dev_regs->nxge_vir2_regh = NULL; 1139 nxgep->niu_type = NIU_TYPE_NONE; 1140 1141 devname = ddi_pathname(nxgep->dip, buf); 1142 ASSERT(strlen(devname) > 0); 1143 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1144 "nxge_map_regs: pathname devname %s", devname)); 1145 1146 /* 1147 * The driver is running on a N2-NIU system if devname is something 1148 * like "/niu@80/network@0" 1149 */ 1150 if (strstr(devname, n2_siu_name)) { 1151 /* N2/NIU */ 1152 nxgep->niu_type = N2_NIU; 1153 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1154 "nxge_map_regs: N2/NIU devname %s", devname)); 1155 /* get function number */ 1156 nxgep->function_num = 1157 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1158 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1159 "nxge_map_regs: N2/NIU function number %d", 1160 nxgep->function_num)); 1161 } else { 1162 int *prop_val; 1163 uint_t prop_len; 1164 uint8_t func_num; 1165 1166 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1167 0, "reg", 1168 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1169 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1170 "Reg property not found")); 1171 ddi_status = DDI_FAILURE; 1172 goto nxge_map_regs_fail0; 1173 1174 } else { 1175 func_num = (prop_val[0] >> 8) & 0x7; 1176 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1177 "Reg property found: fun # %d", 1178 func_num)); 1179 nxgep->function_num = func_num; 1180 if (isLDOMguest(nxgep)) { 1181 nxgep->function_num /= 2; 1182 return (NXGE_OK); 1183 } 1184 ddi_prop_free(prop_val); 1185 } 1186 } 1187 1188 switch (nxgep->niu_type) { 1189 default: 1190 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1191 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1192 "nxge_map_regs: pci config size 0x%x", regsize)); 1193 1194 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1195 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1196 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1197 if (ddi_status != DDI_SUCCESS) { 1198 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1199 "ddi_map_regs, nxge bus config regs failed")); 1200 goto nxge_map_regs_fail0; 1201 } 1202 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1203 "nxge_map_reg: PCI config addr 0x%0llx " 1204 " handle 0x%0llx", dev_regs->nxge_pciregp, 1205 dev_regs->nxge_pciregh)); 1206 /* 1207 * IMP IMP 1208 * workaround for bit swapping bug in HW 1209 * which ends up in no-snoop = yes 1210 * resulting, in DMA not synched properly 1211 */ 1212 #if !defined(_BIG_ENDIAN) 1213 /* workarounds for x86 systems */ 1214 pci_offset = 0x80 + PCIE_DEVCTL; 1215 pcie_devctl = 0x0; 1216 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1217 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1218 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1219 pcie_devctl); 1220 #endif 1221 1222 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1223 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1224 "nxge_map_regs: pio size 0x%x", regsize)); 1225 /* set up the device mapped register */ 1226 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1227 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1228 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1229 if (ddi_status != DDI_SUCCESS) { 1230 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1231 "ddi_map_regs for Neptune global reg failed")); 1232 goto nxge_map_regs_fail1; 1233 } 1234 1235 /* set up the msi/msi-x mapped register */ 1236 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1237 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1238 "nxge_map_regs: msix size 0x%x", regsize)); 1239 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1240 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1241 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1242 if (ddi_status != DDI_SUCCESS) { 1243 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1244 "ddi_map_regs for msi reg failed")); 1245 goto nxge_map_regs_fail2; 1246 } 1247 1248 /* set up the vio region mapped register */ 1249 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1250 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1251 "nxge_map_regs: vio size 0x%x", regsize)); 1252 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1253 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1254 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1255 1256 if (ddi_status != DDI_SUCCESS) { 1257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1258 "ddi_map_regs for nxge vio reg failed")); 1259 goto nxge_map_regs_fail3; 1260 } 1261 nxgep->dev_regs = dev_regs; 1262 1263 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1264 NPI_PCI_ADD_HANDLE_SET(nxgep, 1265 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1266 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1267 NPI_MSI_ADD_HANDLE_SET(nxgep, 1268 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1269 1270 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1271 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1272 1273 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1274 NPI_REG_ADD_HANDLE_SET(nxgep, 1275 (npi_reg_ptr_t)dev_regs->nxge_regp); 1276 1277 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1278 NPI_VREG_ADD_HANDLE_SET(nxgep, 1279 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1280 1281 break; 1282 1283 case N2_NIU: 1284 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1285 /* 1286 * Set up the device mapped register (FWARC 2006/556) 1287 * (changed back to 1: reg starts at 1!) 1288 */ 1289 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1290 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1291 "nxge_map_regs: dev size 0x%x", regsize)); 1292 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1293 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1294 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1295 1296 if (ddi_status != DDI_SUCCESS) { 1297 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1298 "ddi_map_regs for N2/NIU, global reg failed ")); 1299 goto nxge_map_regs_fail1; 1300 } 1301 1302 /* set up the first vio region mapped register */ 1303 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1304 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1305 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1306 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1307 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1308 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1309 1310 if (ddi_status != DDI_SUCCESS) { 1311 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1312 "ddi_map_regs for nxge vio reg failed")); 1313 goto nxge_map_regs_fail2; 1314 } 1315 /* set up the second vio region mapped register */ 1316 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1317 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1318 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1319 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1320 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1321 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1322 1323 if (ddi_status != DDI_SUCCESS) { 1324 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1325 "ddi_map_regs for nxge vio2 reg failed")); 1326 goto nxge_map_regs_fail3; 1327 } 1328 nxgep->dev_regs = dev_regs; 1329 1330 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1331 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1332 1333 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1334 NPI_REG_ADD_HANDLE_SET(nxgep, 1335 (npi_reg_ptr_t)dev_regs->nxge_regp); 1336 1337 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1338 NPI_VREG_ADD_HANDLE_SET(nxgep, 1339 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1340 1341 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1342 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1343 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1344 1345 break; 1346 } 1347 1348 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1349 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1350 1351 goto nxge_map_regs_exit; 1352 nxge_map_regs_fail3: 1353 if (dev_regs->nxge_msix_regh) { 1354 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1355 } 1356 if (dev_regs->nxge_vir_regh) { 1357 ddi_regs_map_free(&dev_regs->nxge_regh); 1358 } 1359 nxge_map_regs_fail2: 1360 if (dev_regs->nxge_regh) { 1361 ddi_regs_map_free(&dev_regs->nxge_regh); 1362 } 1363 nxge_map_regs_fail1: 1364 if (dev_regs->nxge_pciregh) { 1365 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1366 } 1367 nxge_map_regs_fail0: 1368 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1369 kmem_free(dev_regs, sizeof (dev_regs_t)); 1370 1371 nxge_map_regs_exit: 1372 if (ddi_status != DDI_SUCCESS) 1373 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1374 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1375 return (status); 1376 } 1377 1378 static void 1379 nxge_unmap_regs(p_nxge_t nxgep) 1380 { 1381 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1382 1383 if (isLDOMguest(nxgep)) { 1384 nxge_guest_regs_map_free(nxgep); 1385 return; 1386 } 1387 1388 if (nxgep->dev_regs) { 1389 if (nxgep->dev_regs->nxge_pciregh) { 1390 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1391 "==> nxge_unmap_regs: bus")); 1392 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1393 nxgep->dev_regs->nxge_pciregh = NULL; 1394 } 1395 if (nxgep->dev_regs->nxge_regh) { 1396 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1397 "==> nxge_unmap_regs: device registers")); 1398 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1399 nxgep->dev_regs->nxge_regh = NULL; 1400 } 1401 if (nxgep->dev_regs->nxge_msix_regh) { 1402 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1403 "==> nxge_unmap_regs: device interrupts")); 1404 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1405 nxgep->dev_regs->nxge_msix_regh = NULL; 1406 } 1407 if (nxgep->dev_regs->nxge_vir_regh) { 1408 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1409 "==> nxge_unmap_regs: vio region")); 1410 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1411 nxgep->dev_regs->nxge_vir_regh = NULL; 1412 } 1413 if (nxgep->dev_regs->nxge_vir2_regh) { 1414 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1415 "==> nxge_unmap_regs: vio2 region")); 1416 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1417 nxgep->dev_regs->nxge_vir2_regh = NULL; 1418 } 1419 1420 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1421 nxgep->dev_regs = NULL; 1422 } 1423 1424 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1425 } 1426 1427 static nxge_status_t 1428 nxge_setup_mutexes(p_nxge_t nxgep) 1429 { 1430 int ddi_status = DDI_SUCCESS; 1431 nxge_status_t status = NXGE_OK; 1432 nxge_classify_t *classify_ptr; 1433 int partition; 1434 1435 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1436 1437 /* 1438 * Get the interrupt cookie so the mutexes can be 1439 * Initialized. 1440 */ 1441 if (isLDOMguest(nxgep)) { 1442 nxgep->interrupt_cookie = 0; 1443 } else { 1444 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1445 &nxgep->interrupt_cookie); 1446 1447 if (ddi_status != DDI_SUCCESS) { 1448 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1449 "<== nxge_setup_mutexes: failed 0x%x", 1450 ddi_status)); 1451 goto nxge_setup_mutexes_exit; 1452 } 1453 } 1454 1455 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1456 MUTEX_INIT(&nxgep->poll_lock, NULL, 1457 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1458 1459 /* 1460 * Initialize mutexes for this device. 1461 */ 1462 MUTEX_INIT(nxgep->genlock, NULL, 1463 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1464 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1465 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1466 MUTEX_INIT(&nxgep->mif_lock, NULL, 1467 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1468 MUTEX_INIT(&nxgep->group_lock, NULL, 1469 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1470 RW_INIT(&nxgep->filter_lock, NULL, 1471 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1472 1473 classify_ptr = &nxgep->classifier; 1474 /* 1475 * FFLP Mutexes are never used in interrupt context 1476 * as fflp operation can take very long time to 1477 * complete and hence not suitable to invoke from interrupt 1478 * handlers. 1479 */ 1480 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1481 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1482 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1483 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1484 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1485 for (partition = 0; partition < MAX_PARTITION; partition++) { 1486 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1487 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1488 } 1489 } 1490 1491 nxge_setup_mutexes_exit: 1492 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1493 "<== nxge_setup_mutexes status = %x", status)); 1494 1495 if (ddi_status != DDI_SUCCESS) 1496 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1497 1498 return (status); 1499 } 1500 1501 static void 1502 nxge_destroy_mutexes(p_nxge_t nxgep) 1503 { 1504 int partition; 1505 nxge_classify_t *classify_ptr; 1506 1507 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1508 RW_DESTROY(&nxgep->filter_lock); 1509 MUTEX_DESTROY(&nxgep->group_lock); 1510 MUTEX_DESTROY(&nxgep->mif_lock); 1511 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1512 MUTEX_DESTROY(nxgep->genlock); 1513 1514 classify_ptr = &nxgep->classifier; 1515 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1516 1517 /* Destroy all polling resources. */ 1518 MUTEX_DESTROY(&nxgep->poll_lock); 1519 cv_destroy(&nxgep->poll_cv); 1520 1521 /* free data structures, based on HW type */ 1522 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1523 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1524 for (partition = 0; partition < MAX_PARTITION; partition++) { 1525 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1526 } 1527 } 1528 1529 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1530 } 1531 1532 nxge_status_t 1533 nxge_init(p_nxge_t nxgep) 1534 { 1535 nxge_status_t status = NXGE_OK; 1536 1537 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1538 1539 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1540 return (status); 1541 } 1542 1543 /* 1544 * Allocate system memory for the receive/transmit buffer blocks 1545 * and receive/transmit descriptor rings. 1546 */ 1547 status = nxge_alloc_mem_pool(nxgep); 1548 if (status != NXGE_OK) { 1549 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1550 goto nxge_init_fail1; 1551 } 1552 1553 if (!isLDOMguest(nxgep)) { 1554 /* 1555 * Initialize and enable the TXC registers. 1556 * (Globally enable the Tx controller, 1557 * enable the port, configure the dma channel bitmap, 1558 * configure the max burst size). 1559 */ 1560 status = nxge_txc_init(nxgep); 1561 if (status != NXGE_OK) { 1562 NXGE_ERROR_MSG((nxgep, 1563 NXGE_ERR_CTL, "init txc failed\n")); 1564 goto nxge_init_fail2; 1565 } 1566 } 1567 1568 /* 1569 * Initialize and enable TXDMA channels. 1570 */ 1571 status = nxge_init_txdma_channels(nxgep); 1572 if (status != NXGE_OK) { 1573 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1574 goto nxge_init_fail3; 1575 } 1576 1577 /* 1578 * Initialize and enable RXDMA channels. 1579 */ 1580 status = nxge_init_rxdma_channels(nxgep); 1581 if (status != NXGE_OK) { 1582 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1583 goto nxge_init_fail4; 1584 } 1585 1586 /* 1587 * The guest domain is now done. 1588 */ 1589 if (isLDOMguest(nxgep)) { 1590 nxgep->drv_state |= STATE_HW_INITIALIZED; 1591 goto nxge_init_exit; 1592 } 1593 1594 /* 1595 * Initialize TCAM and FCRAM (Neptune). 1596 */ 1597 status = nxge_classify_init(nxgep); 1598 if (status != NXGE_OK) { 1599 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1600 goto nxge_init_fail5; 1601 } 1602 1603 /* 1604 * Initialize ZCP 1605 */ 1606 status = nxge_zcp_init(nxgep); 1607 if (status != NXGE_OK) { 1608 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1609 goto nxge_init_fail5; 1610 } 1611 1612 /* 1613 * Initialize IPP. 1614 */ 1615 status = nxge_ipp_init(nxgep); 1616 if (status != NXGE_OK) { 1617 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1618 goto nxge_init_fail5; 1619 } 1620 1621 /* 1622 * Initialize the MAC block. 1623 */ 1624 status = nxge_mac_init(nxgep); 1625 if (status != NXGE_OK) { 1626 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1627 goto nxge_init_fail5; 1628 } 1629 1630 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1631 1632 /* 1633 * Enable hardware interrupts. 1634 */ 1635 nxge_intr_hw_enable(nxgep); 1636 nxgep->drv_state |= STATE_HW_INITIALIZED; 1637 1638 goto nxge_init_exit; 1639 1640 nxge_init_fail5: 1641 nxge_uninit_rxdma_channels(nxgep); 1642 nxge_init_fail4: 1643 nxge_uninit_txdma_channels(nxgep); 1644 nxge_init_fail3: 1645 if (!isLDOMguest(nxgep)) { 1646 (void) nxge_txc_uninit(nxgep); 1647 } 1648 nxge_init_fail2: 1649 nxge_free_mem_pool(nxgep); 1650 nxge_init_fail1: 1651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1652 "<== nxge_init status (failed) = 0x%08x", status)); 1653 return (status); 1654 1655 nxge_init_exit: 1656 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1657 status)); 1658 return (status); 1659 } 1660 1661 1662 timeout_id_t 1663 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1664 { 1665 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1666 return (timeout(func, (caddr_t)nxgep, 1667 drv_usectohz(1000 * msec))); 1668 } 1669 return (NULL); 1670 } 1671 1672 /*ARGSUSED*/ 1673 void 1674 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1675 { 1676 if (timerid) { 1677 (void) untimeout(timerid); 1678 } 1679 } 1680 1681 void 1682 nxge_uninit(p_nxge_t nxgep) 1683 { 1684 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1685 1686 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1687 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1688 "==> nxge_uninit: not initialized")); 1689 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1690 "<== nxge_uninit")); 1691 return; 1692 } 1693 1694 /* stop timer */ 1695 if (nxgep->nxge_timerid) { 1696 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1697 nxgep->nxge_timerid = 0; 1698 } 1699 1700 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1701 (void) nxge_intr_hw_disable(nxgep); 1702 1703 /* 1704 * Reset the receive MAC side. 1705 */ 1706 (void) nxge_rx_mac_disable(nxgep); 1707 1708 /* Disable and soft reset the IPP */ 1709 if (!isLDOMguest(nxgep)) 1710 (void) nxge_ipp_disable(nxgep); 1711 1712 /* Free classification resources */ 1713 (void) nxge_classify_uninit(nxgep); 1714 1715 /* 1716 * Reset the transmit/receive DMA side. 1717 */ 1718 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1719 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1720 1721 nxge_uninit_txdma_channels(nxgep); 1722 nxge_uninit_rxdma_channels(nxgep); 1723 1724 /* 1725 * Reset the transmit MAC side. 1726 */ 1727 (void) nxge_tx_mac_disable(nxgep); 1728 1729 nxge_free_mem_pool(nxgep); 1730 1731 /* 1732 * Start the timer if the reset flag is not set. 1733 * If this reset flag is set, the link monitor 1734 * will not be started in order to stop furthur bus 1735 * activities coming from this interface. 1736 * The driver will start the monitor function 1737 * if the interface was initialized again later. 1738 */ 1739 if (!nxge_peu_reset_enable) { 1740 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1741 } 1742 1743 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1744 1745 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1746 "nxge_mblks_pending %d", nxge_mblks_pending)); 1747 } 1748 1749 void 1750 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1751 { 1752 #if defined(__i386) 1753 size_t reg; 1754 #else 1755 uint64_t reg; 1756 #endif 1757 uint64_t regdata; 1758 int i, retry; 1759 1760 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1761 regdata = 0; 1762 retry = 1; 1763 1764 for (i = 0; i < retry; i++) { 1765 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1766 } 1767 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1768 } 1769 1770 void 1771 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1772 { 1773 #if defined(__i386) 1774 size_t reg; 1775 #else 1776 uint64_t reg; 1777 #endif 1778 uint64_t buf[2]; 1779 1780 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1781 #if defined(__i386) 1782 reg = (size_t)buf[0]; 1783 #else 1784 reg = buf[0]; 1785 #endif 1786 1787 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1788 } 1789 1790 1791 nxge_os_mutex_t nxgedebuglock; 1792 int nxge_debug_init = 0; 1793 1794 /*ARGSUSED*/ 1795 /*VARARGS*/ 1796 void 1797 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1798 { 1799 char msg_buffer[1048]; 1800 char prefix_buffer[32]; 1801 int instance; 1802 uint64_t debug_level; 1803 int cmn_level = CE_CONT; 1804 va_list ap; 1805 1806 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1807 /* In case a developer has changed nxge_debug_level. */ 1808 if (nxgep->nxge_debug_level != nxge_debug_level) 1809 nxgep->nxge_debug_level = nxge_debug_level; 1810 } 1811 1812 debug_level = (nxgep == NULL) ? nxge_debug_level : 1813 nxgep->nxge_debug_level; 1814 1815 if ((level & debug_level) || 1816 (level == NXGE_NOTE) || 1817 (level == NXGE_ERR_CTL)) { 1818 /* do the msg processing */ 1819 if (nxge_debug_init == 0) { 1820 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1821 nxge_debug_init = 1; 1822 } 1823 1824 MUTEX_ENTER(&nxgedebuglock); 1825 1826 if ((level & NXGE_NOTE)) { 1827 cmn_level = CE_NOTE; 1828 } 1829 1830 if (level & NXGE_ERR_CTL) { 1831 cmn_level = CE_WARN; 1832 } 1833 1834 va_start(ap, fmt); 1835 (void) vsprintf(msg_buffer, fmt, ap); 1836 va_end(ap); 1837 if (nxgep == NULL) { 1838 instance = -1; 1839 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1840 } else { 1841 instance = nxgep->instance; 1842 (void) sprintf(prefix_buffer, 1843 "%s%d :", "nxge", instance); 1844 } 1845 1846 MUTEX_EXIT(&nxgedebuglock); 1847 cmn_err(cmn_level, "!%s %s\n", 1848 prefix_buffer, msg_buffer); 1849 1850 } 1851 } 1852 1853 char * 1854 nxge_dump_packet(char *addr, int size) 1855 { 1856 uchar_t *ap = (uchar_t *)addr; 1857 int i; 1858 static char etherbuf[1024]; 1859 char *cp = etherbuf; 1860 char digits[] = "0123456789abcdef"; 1861 1862 if (!size) 1863 size = 60; 1864 1865 if (size > MAX_DUMP_SZ) { 1866 /* Dump the leading bytes */ 1867 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1868 if (*ap > 0x0f) 1869 *cp++ = digits[*ap >> 4]; 1870 *cp++ = digits[*ap++ & 0xf]; 1871 *cp++ = ':'; 1872 } 1873 for (i = 0; i < 20; i++) 1874 *cp++ = '.'; 1875 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1876 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1877 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1878 if (*ap > 0x0f) 1879 *cp++ = digits[*ap >> 4]; 1880 *cp++ = digits[*ap++ & 0xf]; 1881 *cp++ = ':'; 1882 } 1883 } else { 1884 for (i = 0; i < size; i++) { 1885 if (*ap > 0x0f) 1886 *cp++ = digits[*ap >> 4]; 1887 *cp++ = digits[*ap++ & 0xf]; 1888 *cp++ = ':'; 1889 } 1890 } 1891 *--cp = 0; 1892 return (etherbuf); 1893 } 1894 1895 #ifdef NXGE_DEBUG 1896 static void 1897 nxge_test_map_regs(p_nxge_t nxgep) 1898 { 1899 ddi_acc_handle_t cfg_handle; 1900 p_pci_cfg_t cfg_ptr; 1901 ddi_acc_handle_t dev_handle; 1902 char *dev_ptr; 1903 ddi_acc_handle_t pci_config_handle; 1904 uint32_t regval; 1905 int i; 1906 1907 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1908 1909 dev_handle = nxgep->dev_regs->nxge_regh; 1910 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1911 1912 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1913 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1914 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1915 1916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1917 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1918 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1919 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1920 &cfg_ptr->vendorid)); 1921 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1922 "\tvendorid 0x%x devid 0x%x", 1923 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1924 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1925 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1926 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1927 "bar1c 0x%x", 1928 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1929 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1930 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1931 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1933 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1934 "base 28 0x%x bar2c 0x%x\n", 1935 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1936 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1937 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1938 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1939 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1940 "\nNeptune PCI BAR: base30 0x%x\n", 1941 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1942 1943 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1944 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1945 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1946 "first 0x%llx second 0x%llx third 0x%llx " 1947 "last 0x%llx ", 1948 NXGE_PIO_READ64(dev_handle, 1949 (uint64_t *)(dev_ptr + 0), 0), 1950 NXGE_PIO_READ64(dev_handle, 1951 (uint64_t *)(dev_ptr + 8), 0), 1952 NXGE_PIO_READ64(dev_handle, 1953 (uint64_t *)(dev_ptr + 16), 0), 1954 NXGE_PIO_READ64(cfg_handle, 1955 (uint64_t *)(dev_ptr + 24), 0))); 1956 } 1957 } 1958 1959 #endif 1960 1961 static void 1962 nxge_suspend(p_nxge_t nxgep) 1963 { 1964 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1965 1966 nxge_intrs_disable(nxgep); 1967 nxge_destroy_dev(nxgep); 1968 1969 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1970 } 1971 1972 static nxge_status_t 1973 nxge_resume(p_nxge_t nxgep) 1974 { 1975 nxge_status_t status = NXGE_OK; 1976 1977 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1978 1979 nxgep->suspended = DDI_RESUME; 1980 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1981 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1982 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1983 (void) nxge_rx_mac_enable(nxgep); 1984 (void) nxge_tx_mac_enable(nxgep); 1985 nxge_intrs_enable(nxgep); 1986 nxgep->suspended = 0; 1987 1988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1989 "<== nxge_resume status = 0x%x", status)); 1990 return (status); 1991 } 1992 1993 static nxge_status_t 1994 nxge_setup_dev(p_nxge_t nxgep) 1995 { 1996 nxge_status_t status = NXGE_OK; 1997 1998 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1999 nxgep->mac.portnum)); 2000 2001 status = nxge_link_init(nxgep); 2002 2003 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2004 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2005 "port%d Bad register acc handle", nxgep->mac.portnum)); 2006 status = NXGE_ERROR; 2007 } 2008 2009 if (status != NXGE_OK) { 2010 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2011 " nxge_setup_dev status " 2012 "(xcvr init 0x%08x)", status)); 2013 goto nxge_setup_dev_exit; 2014 } 2015 2016 nxge_setup_dev_exit: 2017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2018 "<== nxge_setup_dev port %d status = 0x%08x", 2019 nxgep->mac.portnum, status)); 2020 2021 return (status); 2022 } 2023 2024 static void 2025 nxge_destroy_dev(p_nxge_t nxgep) 2026 { 2027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2028 2029 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2030 2031 (void) nxge_hw_stop(nxgep); 2032 2033 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2034 } 2035 2036 static nxge_status_t 2037 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2038 { 2039 int ddi_status = DDI_SUCCESS; 2040 uint_t count; 2041 ddi_dma_cookie_t cookie; 2042 uint_t iommu_pagesize; 2043 nxge_status_t status = NXGE_OK; 2044 2045 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2046 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2047 if (nxgep->niu_type != N2_NIU) { 2048 iommu_pagesize = dvma_pagesize(nxgep->dip); 2049 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2050 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2051 " default_block_size %d iommu_pagesize %d", 2052 nxgep->sys_page_sz, 2053 ddi_ptob(nxgep->dip, (ulong_t)1), 2054 nxgep->rx_default_block_size, 2055 iommu_pagesize)); 2056 2057 if (iommu_pagesize != 0) { 2058 if (nxgep->sys_page_sz == iommu_pagesize) { 2059 if (iommu_pagesize > 0x4000) 2060 nxgep->sys_page_sz = 0x4000; 2061 } else { 2062 if (nxgep->sys_page_sz > iommu_pagesize) 2063 nxgep->sys_page_sz = iommu_pagesize; 2064 } 2065 } 2066 } 2067 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2068 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2069 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2070 "default_block_size %d page mask %d", 2071 nxgep->sys_page_sz, 2072 ddi_ptob(nxgep->dip, (ulong_t)1), 2073 nxgep->rx_default_block_size, 2074 nxgep->sys_page_mask)); 2075 2076 2077 switch (nxgep->sys_page_sz) { 2078 default: 2079 nxgep->sys_page_sz = 0x1000; 2080 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2081 nxgep->rx_default_block_size = 0x1000; 2082 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2083 break; 2084 case 0x1000: 2085 nxgep->rx_default_block_size = 0x1000; 2086 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2087 break; 2088 case 0x2000: 2089 nxgep->rx_default_block_size = 0x2000; 2090 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2091 break; 2092 case 0x4000: 2093 nxgep->rx_default_block_size = 0x4000; 2094 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2095 break; 2096 case 0x8000: 2097 nxgep->rx_default_block_size = 0x8000; 2098 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2099 break; 2100 } 2101 2102 #ifndef USE_RX_BIG_BUF 2103 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2104 #else 2105 nxgep->rx_default_block_size = 0x2000; 2106 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2107 #endif 2108 /* 2109 * Get the system DMA burst size. 2110 */ 2111 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2112 DDI_DMA_DONTWAIT, 0, 2113 &nxgep->dmasparehandle); 2114 if (ddi_status != DDI_SUCCESS) { 2115 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2116 "ddi_dma_alloc_handle: failed " 2117 " status 0x%x", ddi_status)); 2118 goto nxge_get_soft_properties_exit; 2119 } 2120 2121 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2122 (caddr_t)nxgep->dmasparehandle, 2123 sizeof (nxgep->dmasparehandle), 2124 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2125 DDI_DMA_DONTWAIT, 0, 2126 &cookie, &count); 2127 if (ddi_status != DDI_DMA_MAPPED) { 2128 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2129 "Binding spare handle to find system" 2130 " burstsize failed.")); 2131 ddi_status = DDI_FAILURE; 2132 goto nxge_get_soft_properties_fail1; 2133 } 2134 2135 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2136 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2137 2138 nxge_get_soft_properties_fail1: 2139 ddi_dma_free_handle(&nxgep->dmasparehandle); 2140 2141 nxge_get_soft_properties_exit: 2142 2143 if (ddi_status != DDI_SUCCESS) 2144 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2145 2146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2147 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2148 return (status); 2149 } 2150 2151 static nxge_status_t 2152 nxge_alloc_mem_pool(p_nxge_t nxgep) 2153 { 2154 nxge_status_t status = NXGE_OK; 2155 2156 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2157 2158 status = nxge_alloc_rx_mem_pool(nxgep); 2159 if (status != NXGE_OK) { 2160 return (NXGE_ERROR); 2161 } 2162 2163 status = nxge_alloc_tx_mem_pool(nxgep); 2164 if (status != NXGE_OK) { 2165 nxge_free_rx_mem_pool(nxgep); 2166 return (NXGE_ERROR); 2167 } 2168 2169 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2170 return (NXGE_OK); 2171 } 2172 2173 static void 2174 nxge_free_mem_pool(p_nxge_t nxgep) 2175 { 2176 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2177 2178 nxge_free_rx_mem_pool(nxgep); 2179 nxge_free_tx_mem_pool(nxgep); 2180 2181 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2182 } 2183 2184 nxge_status_t 2185 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2186 { 2187 uint32_t rdc_max; 2188 p_nxge_dma_pt_cfg_t p_all_cfgp; 2189 p_nxge_hw_pt_cfg_t p_cfgp; 2190 p_nxge_dma_pool_t dma_poolp; 2191 p_nxge_dma_common_t *dma_buf_p; 2192 p_nxge_dma_pool_t dma_cntl_poolp; 2193 p_nxge_dma_common_t *dma_cntl_p; 2194 uint32_t *num_chunks; /* per dma */ 2195 nxge_status_t status = NXGE_OK; 2196 2197 uint32_t nxge_port_rbr_size; 2198 uint32_t nxge_port_rbr_spare_size; 2199 uint32_t nxge_port_rcr_size; 2200 uint32_t rx_cntl_alloc_size; 2201 2202 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2203 2204 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2205 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2206 rdc_max = NXGE_MAX_RDCS; 2207 2208 /* 2209 * Allocate memory for the common DMA data structures. 2210 */ 2211 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2212 KM_SLEEP); 2213 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2214 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2215 2216 dma_cntl_poolp = (p_nxge_dma_pool_t) 2217 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2218 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2219 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2220 2221 num_chunks = (uint32_t *)KMEM_ZALLOC( 2222 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2223 2224 /* 2225 * Assume that each DMA channel will be configured with 2226 * the default block size. 2227 * rbr block counts are modulo the batch count (16). 2228 */ 2229 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2230 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2231 2232 if (!nxge_port_rbr_size) { 2233 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2234 } 2235 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2236 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2237 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2238 } 2239 2240 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2241 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2242 2243 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2244 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2245 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2246 } 2247 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2248 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2249 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2250 "set to default %d", 2251 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2252 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2253 } 2254 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2255 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2256 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2257 "set to default %d", 2258 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2259 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2260 } 2261 2262 /* 2263 * N2/NIU has limitation on the descriptor sizes (contiguous 2264 * memory allocation on data buffers to 4M (contig_mem_alloc) 2265 * and little endian for control buffers (must use the ddi/dki mem alloc 2266 * function). 2267 */ 2268 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2269 if (nxgep->niu_type == N2_NIU) { 2270 nxge_port_rbr_spare_size = 0; 2271 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2272 (!ISP2(nxge_port_rbr_size))) { 2273 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2274 } 2275 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2276 (!ISP2(nxge_port_rcr_size))) { 2277 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2278 } 2279 } 2280 #endif 2281 2282 /* 2283 * Addresses of receive block ring, receive completion ring and the 2284 * mailbox must be all cache-aligned (64 bytes). 2285 */ 2286 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2287 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2288 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2289 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2290 2291 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2292 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2293 "nxge_port_rcr_size = %d " 2294 "rx_cntl_alloc_size = %d", 2295 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2296 nxge_port_rcr_size, 2297 rx_cntl_alloc_size)); 2298 2299 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2300 if (nxgep->niu_type == N2_NIU) { 2301 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2302 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2303 2304 if (!ISP2(rx_buf_alloc_size)) { 2305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2306 "==> nxge_alloc_rx_mem_pool: " 2307 " must be power of 2")); 2308 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2309 goto nxge_alloc_rx_mem_pool_exit; 2310 } 2311 2312 if (rx_buf_alloc_size > (1 << 22)) { 2313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2314 "==> nxge_alloc_rx_mem_pool: " 2315 " limit size to 4M")); 2316 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2317 goto nxge_alloc_rx_mem_pool_exit; 2318 } 2319 2320 if (rx_cntl_alloc_size < 0x2000) { 2321 rx_cntl_alloc_size = 0x2000; 2322 } 2323 } 2324 #endif 2325 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2326 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2327 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2328 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2329 2330 dma_poolp->ndmas = p_cfgp->max_rdcs; 2331 dma_poolp->num_chunks = num_chunks; 2332 dma_poolp->buf_allocated = B_TRUE; 2333 nxgep->rx_buf_pool_p = dma_poolp; 2334 dma_poolp->dma_buf_pool_p = dma_buf_p; 2335 2336 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2337 dma_cntl_poolp->buf_allocated = B_TRUE; 2338 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2339 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2340 2341 /* Allocate the receive rings, too. */ 2342 nxgep->rx_rbr_rings = 2343 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2344 nxgep->rx_rbr_rings->rbr_rings = 2345 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2346 nxgep->rx_rcr_rings = 2347 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2348 nxgep->rx_rcr_rings->rcr_rings = 2349 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2350 nxgep->rx_mbox_areas_p = 2351 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2352 nxgep->rx_mbox_areas_p->rxmbox_areas = 2353 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2354 2355 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2356 p_cfgp->max_rdcs; 2357 2358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2359 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2360 2361 nxge_alloc_rx_mem_pool_exit: 2362 return (status); 2363 } 2364 2365 /* 2366 * nxge_alloc_rxb 2367 * 2368 * Allocate buffers for an RDC. 2369 * 2370 * Arguments: 2371 * nxgep 2372 * channel The channel to map into our kernel space. 2373 * 2374 * Notes: 2375 * 2376 * NPI function calls: 2377 * 2378 * NXGE function calls: 2379 * 2380 * Registers accessed: 2381 * 2382 * Context: 2383 * 2384 * Taking apart: 2385 * 2386 * Open questions: 2387 * 2388 */ 2389 nxge_status_t 2390 nxge_alloc_rxb( 2391 p_nxge_t nxgep, 2392 int channel) 2393 { 2394 size_t rx_buf_alloc_size; 2395 nxge_status_t status = NXGE_OK; 2396 2397 nxge_dma_common_t **data; 2398 nxge_dma_common_t **control; 2399 uint32_t *num_chunks; 2400 2401 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2402 2403 /* 2404 * Allocate memory for the receive buffers and descriptor rings. 2405 * Replace these allocation functions with the interface functions 2406 * provided by the partition manager if/when they are available. 2407 */ 2408 2409 /* 2410 * Allocate memory for the receive buffer blocks. 2411 */ 2412 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2413 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2414 2415 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2416 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2417 2418 if ((status = nxge_alloc_rx_buf_dma( 2419 nxgep, channel, data, rx_buf_alloc_size, 2420 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2421 return (status); 2422 } 2423 2424 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2425 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2426 2427 /* 2428 * Allocate memory for descriptor rings and mailbox. 2429 */ 2430 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2431 2432 if ((status = nxge_alloc_rx_cntl_dma( 2433 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2434 != NXGE_OK) { 2435 nxge_free_rx_cntl_dma(nxgep, *control); 2436 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2437 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2438 return (status); 2439 } 2440 2441 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2442 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2443 2444 return (status); 2445 } 2446 2447 void 2448 nxge_free_rxb( 2449 p_nxge_t nxgep, 2450 int channel) 2451 { 2452 nxge_dma_common_t *data; 2453 nxge_dma_common_t *control; 2454 uint32_t num_chunks; 2455 2456 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2457 2458 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2459 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2460 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2461 2462 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2463 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2464 2465 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2466 nxge_free_rx_cntl_dma(nxgep, control); 2467 2468 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2469 2470 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2471 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2472 2473 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2474 } 2475 2476 static void 2477 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2478 { 2479 int rdc_max = NXGE_MAX_RDCS; 2480 2481 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2482 2483 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2484 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2485 "<== nxge_free_rx_mem_pool " 2486 "(null rx buf pool or buf not allocated")); 2487 return; 2488 } 2489 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2490 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2491 "<== nxge_free_rx_mem_pool " 2492 "(null rx cntl buf pool or cntl buf not allocated")); 2493 return; 2494 } 2495 2496 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2497 sizeof (p_nxge_dma_common_t) * rdc_max); 2498 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2499 2500 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2501 sizeof (uint32_t) * rdc_max); 2502 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2503 sizeof (p_nxge_dma_common_t) * rdc_max); 2504 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2505 2506 nxgep->rx_buf_pool_p = 0; 2507 nxgep->rx_cntl_pool_p = 0; 2508 2509 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2510 sizeof (p_rx_rbr_ring_t) * rdc_max); 2511 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2512 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2513 sizeof (p_rx_rcr_ring_t) * rdc_max); 2514 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2515 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2516 sizeof (p_rx_mbox_t) * rdc_max); 2517 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2518 2519 nxgep->rx_rbr_rings = 0; 2520 nxgep->rx_rcr_rings = 0; 2521 nxgep->rx_mbox_areas_p = 0; 2522 2523 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2524 } 2525 2526 2527 static nxge_status_t 2528 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2529 p_nxge_dma_common_t *dmap, 2530 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2531 { 2532 p_nxge_dma_common_t rx_dmap; 2533 nxge_status_t status = NXGE_OK; 2534 size_t total_alloc_size; 2535 size_t allocated = 0; 2536 int i, size_index, array_size; 2537 boolean_t use_kmem_alloc = B_FALSE; 2538 2539 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2540 2541 rx_dmap = (p_nxge_dma_common_t) 2542 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2543 KM_SLEEP); 2544 2545 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2546 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2547 dma_channel, alloc_size, block_size, dmap)); 2548 2549 total_alloc_size = alloc_size; 2550 2551 #if defined(RX_USE_RECLAIM_POST) 2552 total_alloc_size = alloc_size + alloc_size/4; 2553 #endif 2554 2555 i = 0; 2556 size_index = 0; 2557 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2558 while ((alloc_sizes[size_index] < alloc_size) && 2559 (size_index < array_size)) 2560 size_index++; 2561 if (size_index >= array_size) { 2562 size_index = array_size - 1; 2563 } 2564 2565 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2566 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2567 use_kmem_alloc = B_TRUE; 2568 #if defined(__i386) || defined(__amd64) 2569 size_index = 0; 2570 #endif 2571 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2572 "==> nxge_alloc_rx_buf_dma: " 2573 "Neptune use kmem_alloc() - size_index %d", 2574 size_index)); 2575 } 2576 2577 while ((allocated < total_alloc_size) && 2578 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2579 rx_dmap[i].dma_chunk_index = i; 2580 rx_dmap[i].block_size = block_size; 2581 rx_dmap[i].alength = alloc_sizes[size_index]; 2582 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2583 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2584 rx_dmap[i].dma_channel = dma_channel; 2585 rx_dmap[i].contig_alloc_type = B_FALSE; 2586 rx_dmap[i].kmem_alloc_type = B_FALSE; 2587 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2588 2589 /* 2590 * N2/NIU: data buffers must be contiguous as the driver 2591 * needs to call Hypervisor api to set up 2592 * logical pages. 2593 */ 2594 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2595 rx_dmap[i].contig_alloc_type = B_TRUE; 2596 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2597 } else if (use_kmem_alloc) { 2598 /* For Neptune, use kmem_alloc */ 2599 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2600 "==> nxge_alloc_rx_buf_dma: " 2601 "Neptune use kmem_alloc()")); 2602 rx_dmap[i].kmem_alloc_type = B_TRUE; 2603 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2604 } 2605 2606 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2607 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2608 "i %d nblocks %d alength %d", 2609 dma_channel, i, &rx_dmap[i], block_size, 2610 i, rx_dmap[i].nblocks, 2611 rx_dmap[i].alength)); 2612 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2613 &nxge_rx_dma_attr, 2614 rx_dmap[i].alength, 2615 &nxge_dev_buf_dma_acc_attr, 2616 DDI_DMA_READ | DDI_DMA_STREAMING, 2617 (p_nxge_dma_common_t)(&rx_dmap[i])); 2618 if (status != NXGE_OK) { 2619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2620 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2621 "dma %d size_index %d size requested %d", 2622 dma_channel, 2623 size_index, 2624 rx_dmap[i].alength)); 2625 size_index--; 2626 } else { 2627 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2628 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2629 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2630 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2631 "buf_alloc_state %d alloc_type %d", 2632 dma_channel, 2633 &rx_dmap[i], 2634 rx_dmap[i].kaddrp, 2635 rx_dmap[i].alength, 2636 rx_dmap[i].buf_alloc_state, 2637 rx_dmap[i].buf_alloc_type)); 2638 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2639 " alloc_rx_buf_dma allocated rdc %d " 2640 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2641 dma_channel, i, rx_dmap[i].alength, 2642 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2643 rx_dmap[i].kaddrp)); 2644 i++; 2645 allocated += alloc_sizes[size_index]; 2646 } 2647 } 2648 2649 if (allocated < total_alloc_size) { 2650 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2651 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2652 "allocated 0x%x requested 0x%x", 2653 dma_channel, 2654 allocated, total_alloc_size)); 2655 status = NXGE_ERROR; 2656 goto nxge_alloc_rx_mem_fail1; 2657 } 2658 2659 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2660 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2661 "allocated 0x%x requested 0x%x", 2662 dma_channel, 2663 allocated, total_alloc_size)); 2664 2665 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2666 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2667 dma_channel, i)); 2668 *num_chunks = i; 2669 *dmap = rx_dmap; 2670 2671 goto nxge_alloc_rx_mem_exit; 2672 2673 nxge_alloc_rx_mem_fail1: 2674 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2675 2676 nxge_alloc_rx_mem_exit: 2677 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2678 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2679 2680 return (status); 2681 } 2682 2683 /*ARGSUSED*/ 2684 static void 2685 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2686 uint32_t num_chunks) 2687 { 2688 int i; 2689 2690 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2691 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2692 2693 if (dmap == 0) 2694 return; 2695 2696 for (i = 0; i < num_chunks; i++) { 2697 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2698 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2699 i, dmap)); 2700 nxge_dma_free_rx_data_buf(dmap++); 2701 } 2702 2703 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2704 } 2705 2706 /*ARGSUSED*/ 2707 static nxge_status_t 2708 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2709 p_nxge_dma_common_t *dmap, size_t size) 2710 { 2711 p_nxge_dma_common_t rx_dmap; 2712 nxge_status_t status = NXGE_OK; 2713 2714 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2715 2716 rx_dmap = (p_nxge_dma_common_t) 2717 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2718 2719 rx_dmap->contig_alloc_type = B_FALSE; 2720 rx_dmap->kmem_alloc_type = B_FALSE; 2721 2722 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2723 &nxge_desc_dma_attr, 2724 size, 2725 &nxge_dev_desc_dma_acc_attr, 2726 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2727 rx_dmap); 2728 if (status != NXGE_OK) { 2729 goto nxge_alloc_rx_cntl_dma_fail1; 2730 } 2731 2732 *dmap = rx_dmap; 2733 goto nxge_alloc_rx_cntl_dma_exit; 2734 2735 nxge_alloc_rx_cntl_dma_fail1: 2736 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2737 2738 nxge_alloc_rx_cntl_dma_exit: 2739 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2740 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2741 2742 return (status); 2743 } 2744 2745 /*ARGSUSED*/ 2746 static void 2747 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2748 { 2749 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2750 2751 if (dmap == 0) 2752 return; 2753 2754 nxge_dma_mem_free(dmap); 2755 2756 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2757 } 2758 2759 typedef struct { 2760 size_t tx_size; 2761 size_t cr_size; 2762 size_t threshhold; 2763 } nxge_tdc_sizes_t; 2764 2765 static 2766 nxge_status_t 2767 nxge_tdc_sizes( 2768 nxge_t *nxgep, 2769 nxge_tdc_sizes_t *sizes) 2770 { 2771 uint32_t threshhold; /* The bcopy() threshhold */ 2772 size_t tx_size; /* Transmit buffer size */ 2773 size_t cr_size; /* Completion ring size */ 2774 2775 /* 2776 * Assume that each DMA channel will be configured with the 2777 * default transmit buffer size for copying transmit data. 2778 * (If a packet is bigger than this, it will not be copied.) 2779 */ 2780 if (nxgep->niu_type == N2_NIU) { 2781 threshhold = TX_BCOPY_SIZE; 2782 } else { 2783 threshhold = nxge_bcopy_thresh; 2784 } 2785 tx_size = nxge_tx_ring_size * threshhold; 2786 2787 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2788 cr_size += sizeof (txdma_mailbox_t); 2789 2790 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2791 if (nxgep->niu_type == N2_NIU) { 2792 if (!ISP2(tx_size)) { 2793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2794 "==> nxge_tdc_sizes: Tx size" 2795 " must be power of 2")); 2796 return (NXGE_ERROR); 2797 } 2798 2799 if (tx_size > (1 << 22)) { 2800 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2801 "==> nxge_tdc_sizes: Tx size" 2802 " limited to 4M")); 2803 return (NXGE_ERROR); 2804 } 2805 2806 if (cr_size < 0x2000) 2807 cr_size = 0x2000; 2808 } 2809 #endif 2810 2811 sizes->threshhold = threshhold; 2812 sizes->tx_size = tx_size; 2813 sizes->cr_size = cr_size; 2814 2815 return (NXGE_OK); 2816 } 2817 /* 2818 * nxge_alloc_txb 2819 * 2820 * Allocate buffers for an TDC. 2821 * 2822 * Arguments: 2823 * nxgep 2824 * channel The channel to map into our kernel space. 2825 * 2826 * Notes: 2827 * 2828 * NPI function calls: 2829 * 2830 * NXGE function calls: 2831 * 2832 * Registers accessed: 2833 * 2834 * Context: 2835 * 2836 * Taking apart: 2837 * 2838 * Open questions: 2839 * 2840 */ 2841 nxge_status_t 2842 nxge_alloc_txb( 2843 p_nxge_t nxgep, 2844 int channel) 2845 { 2846 nxge_dma_common_t **dma_buf_p; 2847 nxge_dma_common_t **dma_cntl_p; 2848 uint32_t *num_chunks; 2849 nxge_status_t status = NXGE_OK; 2850 2851 nxge_tdc_sizes_t sizes; 2852 2853 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2854 2855 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2856 return (NXGE_ERROR); 2857 2858 /* 2859 * Allocate memory for transmit buffers and descriptor rings. 2860 * Replace these allocation functions with the interface functions 2861 * provided by the partition manager Real Soon Now. 2862 */ 2863 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2864 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2865 2866 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2867 2868 /* 2869 * Allocate memory for transmit buffers and descriptor rings. 2870 * Replace allocation functions with interface functions provided 2871 * by the partition manager when it is available. 2872 * 2873 * Allocate memory for the transmit buffer pool. 2874 */ 2875 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2876 "sizes: tx: %ld, cr:%ld, th:%ld", 2877 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2878 2879 *num_chunks = 0; 2880 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2881 sizes.tx_size, sizes.threshhold, num_chunks); 2882 if (status != NXGE_OK) { 2883 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2884 return (status); 2885 } 2886 2887 /* 2888 * Allocate memory for descriptor rings and mailbox. 2889 */ 2890 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2891 sizes.cr_size); 2892 if (status != NXGE_OK) { 2893 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2894 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2895 return (status); 2896 } 2897 2898 return (NXGE_OK); 2899 } 2900 2901 void 2902 nxge_free_txb( 2903 p_nxge_t nxgep, 2904 int channel) 2905 { 2906 nxge_dma_common_t *data; 2907 nxge_dma_common_t *control; 2908 uint32_t num_chunks; 2909 2910 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2911 2912 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2913 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2914 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2915 2916 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2917 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2918 2919 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2920 nxge_free_tx_cntl_dma(nxgep, control); 2921 2922 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2923 2924 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2925 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2926 2927 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2928 } 2929 2930 /* 2931 * nxge_alloc_tx_mem_pool 2932 * 2933 * This function allocates all of the per-port TDC control data structures. 2934 * The per-channel (TDC) data structures are allocated when needed. 2935 * 2936 * Arguments: 2937 * nxgep 2938 * 2939 * Notes: 2940 * 2941 * Context: 2942 * Any domain 2943 */ 2944 nxge_status_t 2945 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2946 { 2947 nxge_hw_pt_cfg_t *p_cfgp; 2948 nxge_dma_pool_t *dma_poolp; 2949 nxge_dma_common_t **dma_buf_p; 2950 nxge_dma_pool_t *dma_cntl_poolp; 2951 nxge_dma_common_t **dma_cntl_p; 2952 uint32_t *num_chunks; /* per dma */ 2953 int tdc_max; 2954 2955 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2956 2957 p_cfgp = &nxgep->pt_config.hw_config; 2958 tdc_max = NXGE_MAX_TDCS; 2959 2960 /* 2961 * Allocate memory for each transmit DMA channel. 2962 */ 2963 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2964 KM_SLEEP); 2965 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2966 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2967 2968 dma_cntl_poolp = (p_nxge_dma_pool_t) 2969 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2970 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2971 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2972 2973 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2974 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2975 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2976 "set to default %d", 2977 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2978 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2979 } 2980 2981 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2982 /* 2983 * N2/NIU has limitation on the descriptor sizes (contiguous 2984 * memory allocation on data buffers to 4M (contig_mem_alloc) 2985 * and little endian for control buffers (must use the ddi/dki mem alloc 2986 * function). The transmit ring is limited to 8K (includes the 2987 * mailbox). 2988 */ 2989 if (nxgep->niu_type == N2_NIU) { 2990 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2991 (!ISP2(nxge_tx_ring_size))) { 2992 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2993 } 2994 } 2995 #endif 2996 2997 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2998 2999 num_chunks = (uint32_t *)KMEM_ZALLOC( 3000 sizeof (uint32_t) * tdc_max, KM_SLEEP); 3001 3002 dma_poolp->ndmas = p_cfgp->tdc.owned; 3003 dma_poolp->num_chunks = num_chunks; 3004 dma_poolp->dma_buf_pool_p = dma_buf_p; 3005 nxgep->tx_buf_pool_p = dma_poolp; 3006 3007 dma_poolp->buf_allocated = B_TRUE; 3008 3009 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3010 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3011 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3012 3013 dma_cntl_poolp->buf_allocated = B_TRUE; 3014 3015 nxgep->tx_rings = 3016 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3017 nxgep->tx_rings->rings = 3018 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3019 nxgep->tx_mbox_areas_p = 3020 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3021 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3022 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3023 3024 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3025 3026 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3027 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3028 tdc_max, dma_poolp->ndmas)); 3029 3030 return (NXGE_OK); 3031 } 3032 3033 nxge_status_t 3034 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3035 p_nxge_dma_common_t *dmap, size_t alloc_size, 3036 size_t block_size, uint32_t *num_chunks) 3037 { 3038 p_nxge_dma_common_t tx_dmap; 3039 nxge_status_t status = NXGE_OK; 3040 size_t total_alloc_size; 3041 size_t allocated = 0; 3042 int i, size_index, array_size; 3043 3044 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3045 3046 tx_dmap = (p_nxge_dma_common_t) 3047 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3048 KM_SLEEP); 3049 3050 total_alloc_size = alloc_size; 3051 i = 0; 3052 size_index = 0; 3053 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3054 while ((alloc_sizes[size_index] < alloc_size) && 3055 (size_index < array_size)) 3056 size_index++; 3057 if (size_index >= array_size) { 3058 size_index = array_size - 1; 3059 } 3060 3061 while ((allocated < total_alloc_size) && 3062 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3063 3064 tx_dmap[i].dma_chunk_index = i; 3065 tx_dmap[i].block_size = block_size; 3066 tx_dmap[i].alength = alloc_sizes[size_index]; 3067 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3068 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3069 tx_dmap[i].dma_channel = dma_channel; 3070 tx_dmap[i].contig_alloc_type = B_FALSE; 3071 tx_dmap[i].kmem_alloc_type = B_FALSE; 3072 3073 /* 3074 * N2/NIU: data buffers must be contiguous as the driver 3075 * needs to call Hypervisor api to set up 3076 * logical pages. 3077 */ 3078 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3079 tx_dmap[i].contig_alloc_type = B_TRUE; 3080 } 3081 3082 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3083 &nxge_tx_dma_attr, 3084 tx_dmap[i].alength, 3085 &nxge_dev_buf_dma_acc_attr, 3086 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3087 (p_nxge_dma_common_t)(&tx_dmap[i])); 3088 if (status != NXGE_OK) { 3089 size_index--; 3090 } else { 3091 i++; 3092 allocated += alloc_sizes[size_index]; 3093 } 3094 } 3095 3096 if (allocated < total_alloc_size) { 3097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3098 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3099 "allocated 0x%x requested 0x%x", 3100 dma_channel, 3101 allocated, total_alloc_size)); 3102 status = NXGE_ERROR; 3103 goto nxge_alloc_tx_mem_fail1; 3104 } 3105 3106 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3107 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3108 "allocated 0x%x requested 0x%x", 3109 dma_channel, 3110 allocated, total_alloc_size)); 3111 3112 *num_chunks = i; 3113 *dmap = tx_dmap; 3114 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3115 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3116 *dmap, i)); 3117 goto nxge_alloc_tx_mem_exit; 3118 3119 nxge_alloc_tx_mem_fail1: 3120 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3121 3122 nxge_alloc_tx_mem_exit: 3123 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3124 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3125 3126 return (status); 3127 } 3128 3129 /*ARGSUSED*/ 3130 static void 3131 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3132 uint32_t num_chunks) 3133 { 3134 int i; 3135 3136 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3137 3138 if (dmap == 0) 3139 return; 3140 3141 for (i = 0; i < num_chunks; i++) { 3142 nxge_dma_mem_free(dmap++); 3143 } 3144 3145 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3146 } 3147 3148 /*ARGSUSED*/ 3149 nxge_status_t 3150 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3151 p_nxge_dma_common_t *dmap, size_t size) 3152 { 3153 p_nxge_dma_common_t tx_dmap; 3154 nxge_status_t status = NXGE_OK; 3155 3156 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3157 tx_dmap = (p_nxge_dma_common_t) 3158 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3159 3160 tx_dmap->contig_alloc_type = B_FALSE; 3161 tx_dmap->kmem_alloc_type = B_FALSE; 3162 3163 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3164 &nxge_desc_dma_attr, 3165 size, 3166 &nxge_dev_desc_dma_acc_attr, 3167 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3168 tx_dmap); 3169 if (status != NXGE_OK) { 3170 goto nxge_alloc_tx_cntl_dma_fail1; 3171 } 3172 3173 *dmap = tx_dmap; 3174 goto nxge_alloc_tx_cntl_dma_exit; 3175 3176 nxge_alloc_tx_cntl_dma_fail1: 3177 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3178 3179 nxge_alloc_tx_cntl_dma_exit: 3180 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3181 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3182 3183 return (status); 3184 } 3185 3186 /*ARGSUSED*/ 3187 static void 3188 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3189 { 3190 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3191 3192 if (dmap == 0) 3193 return; 3194 3195 nxge_dma_mem_free(dmap); 3196 3197 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3198 } 3199 3200 /* 3201 * nxge_free_tx_mem_pool 3202 * 3203 * This function frees all of the per-port TDC control data structures. 3204 * The per-channel (TDC) data structures are freed when the channel 3205 * is stopped. 3206 * 3207 * Arguments: 3208 * nxgep 3209 * 3210 * Notes: 3211 * 3212 * Context: 3213 * Any domain 3214 */ 3215 static void 3216 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3217 { 3218 int tdc_max = NXGE_MAX_TDCS; 3219 3220 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3221 3222 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3223 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3224 "<== nxge_free_tx_mem_pool " 3225 "(null tx buf pool or buf not allocated")); 3226 return; 3227 } 3228 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3229 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3230 "<== nxge_free_tx_mem_pool " 3231 "(null tx cntl buf pool or cntl buf not allocated")); 3232 return; 3233 } 3234 3235 /* 1. Free the mailboxes. */ 3236 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3237 sizeof (p_tx_mbox_t) * tdc_max); 3238 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3239 3240 nxgep->tx_mbox_areas_p = 0; 3241 3242 /* 2. Free the transmit ring arrays. */ 3243 KMEM_FREE(nxgep->tx_rings->rings, 3244 sizeof (p_tx_ring_t) * tdc_max); 3245 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3246 3247 nxgep->tx_rings = 0; 3248 3249 /* 3. Free the completion ring data structures. */ 3250 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3251 sizeof (p_nxge_dma_common_t) * tdc_max); 3252 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3253 3254 nxgep->tx_cntl_pool_p = 0; 3255 3256 /* 4. Free the data ring data structures. */ 3257 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3258 sizeof (uint32_t) * tdc_max); 3259 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3260 sizeof (p_nxge_dma_common_t) * tdc_max); 3261 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3262 3263 nxgep->tx_buf_pool_p = 0; 3264 3265 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3266 } 3267 3268 /*ARGSUSED*/ 3269 static nxge_status_t 3270 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3271 struct ddi_dma_attr *dma_attrp, 3272 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3273 p_nxge_dma_common_t dma_p) 3274 { 3275 caddr_t kaddrp; 3276 int ddi_status = DDI_SUCCESS; 3277 boolean_t contig_alloc_type; 3278 boolean_t kmem_alloc_type; 3279 3280 contig_alloc_type = dma_p->contig_alloc_type; 3281 3282 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3283 /* 3284 * contig_alloc_type for contiguous memory only allowed 3285 * for N2/NIU. 3286 */ 3287 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3288 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3289 dma_p->contig_alloc_type)); 3290 return (NXGE_ERROR | NXGE_DDI_FAILED); 3291 } 3292 3293 dma_p->dma_handle = NULL; 3294 dma_p->acc_handle = NULL; 3295 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3296 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3297 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3298 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3299 if (ddi_status != DDI_SUCCESS) { 3300 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3301 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3302 return (NXGE_ERROR | NXGE_DDI_FAILED); 3303 } 3304 3305 kmem_alloc_type = dma_p->kmem_alloc_type; 3306 3307 switch (contig_alloc_type) { 3308 case B_FALSE: 3309 switch (kmem_alloc_type) { 3310 case B_FALSE: 3311 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3312 length, 3313 acc_attr_p, 3314 xfer_flags, 3315 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3316 &dma_p->acc_handle); 3317 if (ddi_status != DDI_SUCCESS) { 3318 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3319 "nxge_dma_mem_alloc: " 3320 "ddi_dma_mem_alloc failed")); 3321 ddi_dma_free_handle(&dma_p->dma_handle); 3322 dma_p->dma_handle = NULL; 3323 return (NXGE_ERROR | NXGE_DDI_FAILED); 3324 } 3325 if (dma_p->alength < length) { 3326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3327 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3328 "< length.")); 3329 ddi_dma_mem_free(&dma_p->acc_handle); 3330 ddi_dma_free_handle(&dma_p->dma_handle); 3331 dma_p->acc_handle = NULL; 3332 dma_p->dma_handle = NULL; 3333 return (NXGE_ERROR); 3334 } 3335 3336 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3337 NULL, 3338 kaddrp, dma_p->alength, xfer_flags, 3339 DDI_DMA_DONTWAIT, 3340 0, &dma_p->dma_cookie, &dma_p->ncookies); 3341 if (ddi_status != DDI_DMA_MAPPED) { 3342 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3343 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3344 "failed " 3345 "(staus 0x%x ncookies %d.)", ddi_status, 3346 dma_p->ncookies)); 3347 if (dma_p->acc_handle) { 3348 ddi_dma_mem_free(&dma_p->acc_handle); 3349 dma_p->acc_handle = NULL; 3350 } 3351 ddi_dma_free_handle(&dma_p->dma_handle); 3352 dma_p->dma_handle = NULL; 3353 return (NXGE_ERROR | NXGE_DDI_FAILED); 3354 } 3355 3356 if (dma_p->ncookies != 1) { 3357 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3358 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3359 "> 1 cookie" 3360 "(staus 0x%x ncookies %d.)", ddi_status, 3361 dma_p->ncookies)); 3362 if (dma_p->acc_handle) { 3363 ddi_dma_mem_free(&dma_p->acc_handle); 3364 dma_p->acc_handle = NULL; 3365 } 3366 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3367 ddi_dma_free_handle(&dma_p->dma_handle); 3368 dma_p->dma_handle = NULL; 3369 return (NXGE_ERROR); 3370 } 3371 break; 3372 3373 case B_TRUE: 3374 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3375 if (kaddrp == NULL) { 3376 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3377 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3378 "kmem alloc failed")); 3379 return (NXGE_ERROR); 3380 } 3381 3382 dma_p->alength = length; 3383 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3384 NULL, kaddrp, dma_p->alength, xfer_flags, 3385 DDI_DMA_DONTWAIT, 0, 3386 &dma_p->dma_cookie, &dma_p->ncookies); 3387 if (ddi_status != DDI_DMA_MAPPED) { 3388 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3389 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3390 "(kmem_alloc) failed kaddrp $%p length %d " 3391 "(staus 0x%x (%d) ncookies %d.)", 3392 kaddrp, length, 3393 ddi_status, ddi_status, dma_p->ncookies)); 3394 KMEM_FREE(kaddrp, length); 3395 dma_p->acc_handle = NULL; 3396 ddi_dma_free_handle(&dma_p->dma_handle); 3397 dma_p->dma_handle = NULL; 3398 dma_p->kaddrp = NULL; 3399 return (NXGE_ERROR | NXGE_DDI_FAILED); 3400 } 3401 3402 if (dma_p->ncookies != 1) { 3403 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3404 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3405 "(kmem_alloc) > 1 cookie" 3406 "(staus 0x%x ncookies %d.)", ddi_status, 3407 dma_p->ncookies)); 3408 KMEM_FREE(kaddrp, length); 3409 dma_p->acc_handle = NULL; 3410 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3411 ddi_dma_free_handle(&dma_p->dma_handle); 3412 dma_p->dma_handle = NULL; 3413 dma_p->kaddrp = NULL; 3414 return (NXGE_ERROR); 3415 } 3416 3417 dma_p->kaddrp = kaddrp; 3418 3419 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3420 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3421 "kaddr $%p alength %d", 3422 dma_p, 3423 kaddrp, 3424 dma_p->alength)); 3425 break; 3426 } 3427 break; 3428 3429 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3430 case B_TRUE: 3431 kaddrp = (caddr_t)contig_mem_alloc(length); 3432 if (kaddrp == NULL) { 3433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3434 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3435 ddi_dma_free_handle(&dma_p->dma_handle); 3436 return (NXGE_ERROR | NXGE_DDI_FAILED); 3437 } 3438 3439 dma_p->alength = length; 3440 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3441 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3442 &dma_p->dma_cookie, &dma_p->ncookies); 3443 if (ddi_status != DDI_DMA_MAPPED) { 3444 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3445 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3446 "(status 0x%x ncookies %d.)", ddi_status, 3447 dma_p->ncookies)); 3448 3449 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3450 "==> nxge_dma_mem_alloc: (not mapped)" 3451 "length %lu (0x%x) " 3452 "free contig kaddrp $%p " 3453 "va_to_pa $%p", 3454 length, length, 3455 kaddrp, 3456 va_to_pa(kaddrp))); 3457 3458 3459 contig_mem_free((void *)kaddrp, length); 3460 ddi_dma_free_handle(&dma_p->dma_handle); 3461 3462 dma_p->dma_handle = NULL; 3463 dma_p->acc_handle = NULL; 3464 dma_p->alength = NULL; 3465 dma_p->kaddrp = NULL; 3466 3467 return (NXGE_ERROR | NXGE_DDI_FAILED); 3468 } 3469 3470 if (dma_p->ncookies != 1 || 3471 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3473 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3474 "cookie or " 3475 "dmac_laddress is NULL $%p size %d " 3476 " (status 0x%x ncookies %d.)", 3477 ddi_status, 3478 dma_p->dma_cookie.dmac_laddress, 3479 dma_p->dma_cookie.dmac_size, 3480 dma_p->ncookies)); 3481 3482 contig_mem_free((void *)kaddrp, length); 3483 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3484 ddi_dma_free_handle(&dma_p->dma_handle); 3485 3486 dma_p->alength = 0; 3487 dma_p->dma_handle = NULL; 3488 dma_p->acc_handle = NULL; 3489 dma_p->kaddrp = NULL; 3490 3491 return (NXGE_ERROR | NXGE_DDI_FAILED); 3492 } 3493 break; 3494 3495 #else 3496 case B_TRUE: 3497 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3498 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3499 return (NXGE_ERROR | NXGE_DDI_FAILED); 3500 #endif 3501 } 3502 3503 dma_p->kaddrp = kaddrp; 3504 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3505 dma_p->alength - RXBUF_64B_ALIGNED; 3506 #if defined(__i386) 3507 dma_p->ioaddr_pp = 3508 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3509 #else 3510 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3511 #endif 3512 dma_p->last_ioaddr_pp = 3513 #if defined(__i386) 3514 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3515 #else 3516 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3517 #endif 3518 dma_p->alength - RXBUF_64B_ALIGNED; 3519 3520 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3521 3522 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3523 dma_p->orig_ioaddr_pp = 3524 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3525 dma_p->orig_alength = length; 3526 dma_p->orig_kaddrp = kaddrp; 3527 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3528 #endif 3529 3530 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3531 "dma buffer allocated: dma_p $%p " 3532 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3533 "dma_p->ioaddr_p $%p " 3534 "dma_p->orig_ioaddr_p $%p " 3535 "orig_vatopa $%p " 3536 "alength %d (0x%x) " 3537 "kaddrp $%p " 3538 "length %d (0x%x)", 3539 dma_p, 3540 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3541 dma_p->ioaddr_pp, 3542 dma_p->orig_ioaddr_pp, 3543 dma_p->orig_vatopa, 3544 dma_p->alength, dma_p->alength, 3545 kaddrp, 3546 length, length)); 3547 3548 return (NXGE_OK); 3549 } 3550 3551 static void 3552 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3553 { 3554 if (dma_p->dma_handle != NULL) { 3555 if (dma_p->ncookies) { 3556 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3557 dma_p->ncookies = 0; 3558 } 3559 ddi_dma_free_handle(&dma_p->dma_handle); 3560 dma_p->dma_handle = NULL; 3561 } 3562 3563 if (dma_p->acc_handle != NULL) { 3564 ddi_dma_mem_free(&dma_p->acc_handle); 3565 dma_p->acc_handle = NULL; 3566 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3567 } 3568 3569 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3570 if (dma_p->contig_alloc_type && 3571 dma_p->orig_kaddrp && dma_p->orig_alength) { 3572 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3573 "kaddrp $%p (orig_kaddrp $%p)" 3574 "mem type %d ", 3575 "orig_alength %d " 3576 "alength 0x%x (%d)", 3577 dma_p->kaddrp, 3578 dma_p->orig_kaddrp, 3579 dma_p->contig_alloc_type, 3580 dma_p->orig_alength, 3581 dma_p->alength, dma_p->alength)); 3582 3583 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3584 dma_p->orig_alength = NULL; 3585 dma_p->orig_kaddrp = NULL; 3586 dma_p->contig_alloc_type = B_FALSE; 3587 } 3588 #endif 3589 dma_p->kaddrp = NULL; 3590 dma_p->alength = NULL; 3591 } 3592 3593 static void 3594 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3595 { 3596 uint64_t kaddr; 3597 uint32_t buf_size; 3598 3599 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3600 3601 if (dma_p->dma_handle != NULL) { 3602 if (dma_p->ncookies) { 3603 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3604 dma_p->ncookies = 0; 3605 } 3606 ddi_dma_free_handle(&dma_p->dma_handle); 3607 dma_p->dma_handle = NULL; 3608 } 3609 3610 if (dma_p->acc_handle != NULL) { 3611 ddi_dma_mem_free(&dma_p->acc_handle); 3612 dma_p->acc_handle = NULL; 3613 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3614 } 3615 3616 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3617 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3618 dma_p, 3619 dma_p->buf_alloc_state)); 3620 3621 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3622 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3623 "<== nxge_dma_free_rx_data_buf: " 3624 "outstanding data buffers")); 3625 return; 3626 } 3627 3628 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3629 if (dma_p->contig_alloc_type && 3630 dma_p->orig_kaddrp && dma_p->orig_alength) { 3631 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3632 "kaddrp $%p (orig_kaddrp $%p)" 3633 "mem type %d ", 3634 "orig_alength %d " 3635 "alength 0x%x (%d)", 3636 dma_p->kaddrp, 3637 dma_p->orig_kaddrp, 3638 dma_p->contig_alloc_type, 3639 dma_p->orig_alength, 3640 dma_p->alength, dma_p->alength)); 3641 3642 kaddr = (uint64_t)dma_p->orig_kaddrp; 3643 buf_size = dma_p->orig_alength; 3644 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3645 dma_p->orig_alength = NULL; 3646 dma_p->orig_kaddrp = NULL; 3647 dma_p->contig_alloc_type = B_FALSE; 3648 dma_p->kaddrp = NULL; 3649 dma_p->alength = NULL; 3650 return; 3651 } 3652 #endif 3653 3654 if (dma_p->kmem_alloc_type) { 3655 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3656 "nxge_dma_free_rx_data_buf: free kmem " 3657 "kaddrp $%p (orig_kaddrp $%p)" 3658 "alloc type %d " 3659 "orig_alength %d " 3660 "alength 0x%x (%d)", 3661 dma_p->kaddrp, 3662 dma_p->orig_kaddrp, 3663 dma_p->kmem_alloc_type, 3664 dma_p->orig_alength, 3665 dma_p->alength, dma_p->alength)); 3666 #if defined(__i386) 3667 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3668 #else 3669 kaddr = (uint64_t)dma_p->kaddrp; 3670 #endif 3671 buf_size = dma_p->orig_alength; 3672 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3673 "nxge_dma_free_rx_data_buf: free dmap $%p " 3674 "kaddr $%p buf_size %d", 3675 dma_p, 3676 kaddr, buf_size)); 3677 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3678 dma_p->alength = 0; 3679 dma_p->orig_alength = 0; 3680 dma_p->kaddrp = NULL; 3681 dma_p->kmem_alloc_type = B_FALSE; 3682 } 3683 3684 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3685 } 3686 3687 /* 3688 * nxge_m_start() -- start transmitting and receiving. 3689 * 3690 * This function is called by the MAC layer when the first 3691 * stream is open to prepare the hardware ready for sending 3692 * and transmitting packets. 3693 */ 3694 static int 3695 nxge_m_start(void *arg) 3696 { 3697 p_nxge_t nxgep = (p_nxge_t)arg; 3698 3699 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3700 3701 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3702 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3703 } 3704 3705 MUTEX_ENTER(nxgep->genlock); 3706 if (nxge_init(nxgep) != NXGE_OK) { 3707 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3708 "<== nxge_m_start: initialization failed")); 3709 MUTEX_EXIT(nxgep->genlock); 3710 return (EIO); 3711 } 3712 3713 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3714 goto nxge_m_start_exit; 3715 /* 3716 * Start timer to check the system error and tx hangs 3717 */ 3718 if (!isLDOMguest(nxgep)) 3719 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3720 nxge_check_hw_state, NXGE_CHECK_TIMER); 3721 #if defined(sun4v) 3722 else 3723 nxge_hio_start_timer(nxgep); 3724 #endif 3725 3726 nxgep->link_notify = B_TRUE; 3727 3728 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3729 3730 nxge_m_start_exit: 3731 MUTEX_EXIT(nxgep->genlock); 3732 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3733 return (0); 3734 } 3735 3736 /* 3737 * nxge_m_stop(): stop transmitting and receiving. 3738 */ 3739 static void 3740 nxge_m_stop(void *arg) 3741 { 3742 p_nxge_t nxgep = (p_nxge_t)arg; 3743 3744 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3745 3746 MUTEX_ENTER(nxgep->genlock); 3747 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3748 3749 if (nxgep->nxge_timerid) { 3750 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3751 nxgep->nxge_timerid = 0; 3752 } 3753 3754 nxge_uninit(nxgep); 3755 3756 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3757 3758 MUTEX_EXIT(nxgep->genlock); 3759 3760 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3761 } 3762 3763 static int 3764 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3765 { 3766 p_nxge_t nxgep = (p_nxge_t)arg; 3767 struct ether_addr addrp; 3768 3769 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3770 3771 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3772 if (nxge_set_mac_addr(nxgep, &addrp)) { 3773 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3774 "<== nxge_m_unicst: set unitcast failed")); 3775 return (EINVAL); 3776 } 3777 3778 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3779 3780 return (0); 3781 } 3782 3783 static int 3784 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3785 { 3786 p_nxge_t nxgep = (p_nxge_t)arg; 3787 struct ether_addr addrp; 3788 3789 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3790 "==> nxge_m_multicst: add %d", add)); 3791 3792 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3793 if (add) { 3794 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3795 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3796 "<== nxge_m_multicst: add multicast failed")); 3797 return (EINVAL); 3798 } 3799 } else { 3800 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3801 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3802 "<== nxge_m_multicst: del multicast failed")); 3803 return (EINVAL); 3804 } 3805 } 3806 3807 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3808 3809 return (0); 3810 } 3811 3812 static int 3813 nxge_m_promisc(void *arg, boolean_t on) 3814 { 3815 p_nxge_t nxgep = (p_nxge_t)arg; 3816 3817 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3818 "==> nxge_m_promisc: on %d", on)); 3819 3820 if (nxge_set_promisc(nxgep, on)) { 3821 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3822 "<== nxge_m_promisc: set promisc failed")); 3823 return (EINVAL); 3824 } 3825 3826 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3827 "<== nxge_m_promisc: on %d", on)); 3828 3829 return (0); 3830 } 3831 3832 static void 3833 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3834 { 3835 p_nxge_t nxgep = (p_nxge_t)arg; 3836 struct iocblk *iocp; 3837 boolean_t need_privilege; 3838 int err; 3839 int cmd; 3840 3841 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3842 3843 iocp = (struct iocblk *)mp->b_rptr; 3844 iocp->ioc_error = 0; 3845 need_privilege = B_TRUE; 3846 cmd = iocp->ioc_cmd; 3847 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3848 switch (cmd) { 3849 default: 3850 miocnak(wq, mp, 0, EINVAL); 3851 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3852 return; 3853 3854 case LB_GET_INFO_SIZE: 3855 case LB_GET_INFO: 3856 case LB_GET_MODE: 3857 need_privilege = B_FALSE; 3858 break; 3859 case LB_SET_MODE: 3860 break; 3861 3862 3863 case NXGE_GET_MII: 3864 case NXGE_PUT_MII: 3865 case NXGE_GET64: 3866 case NXGE_PUT64: 3867 case NXGE_GET_TX_RING_SZ: 3868 case NXGE_GET_TX_DESC: 3869 case NXGE_TX_SIDE_RESET: 3870 case NXGE_RX_SIDE_RESET: 3871 case NXGE_GLOBAL_RESET: 3872 case NXGE_RESET_MAC: 3873 case NXGE_TX_REGS_DUMP: 3874 case NXGE_RX_REGS_DUMP: 3875 case NXGE_INT_REGS_DUMP: 3876 case NXGE_VIR_INT_REGS_DUMP: 3877 case NXGE_PUT_TCAM: 3878 case NXGE_GET_TCAM: 3879 case NXGE_RTRACE: 3880 case NXGE_RDUMP: 3881 3882 need_privilege = B_FALSE; 3883 break; 3884 case NXGE_INJECT_ERR: 3885 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3886 nxge_err_inject(nxgep, wq, mp); 3887 break; 3888 } 3889 3890 if (need_privilege) { 3891 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3892 if (err != 0) { 3893 miocnak(wq, mp, 0, err); 3894 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3895 "<== nxge_m_ioctl: no priv")); 3896 return; 3897 } 3898 } 3899 3900 switch (cmd) { 3901 3902 case LB_GET_MODE: 3903 case LB_SET_MODE: 3904 case LB_GET_INFO_SIZE: 3905 case LB_GET_INFO: 3906 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3907 break; 3908 3909 case NXGE_GET_MII: 3910 case NXGE_PUT_MII: 3911 case NXGE_PUT_TCAM: 3912 case NXGE_GET_TCAM: 3913 case NXGE_GET64: 3914 case NXGE_PUT64: 3915 case NXGE_GET_TX_RING_SZ: 3916 case NXGE_GET_TX_DESC: 3917 case NXGE_TX_SIDE_RESET: 3918 case NXGE_RX_SIDE_RESET: 3919 case NXGE_GLOBAL_RESET: 3920 case NXGE_RESET_MAC: 3921 case NXGE_TX_REGS_DUMP: 3922 case NXGE_RX_REGS_DUMP: 3923 case NXGE_INT_REGS_DUMP: 3924 case NXGE_VIR_INT_REGS_DUMP: 3925 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3926 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3927 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3928 break; 3929 } 3930 3931 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3932 } 3933 3934 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3935 3936 static void 3937 nxge_m_resources(void *arg) 3938 { 3939 p_nxge_t nxgep = arg; 3940 mac_rx_fifo_t mrf; 3941 3942 nxge_grp_set_t *set = &nxgep->rx_set; 3943 uint8_t rdc; 3944 3945 rx_rcr_ring_t *ring; 3946 3947 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3948 3949 MUTEX_ENTER(nxgep->genlock); 3950 3951 if (set->owned.map == 0) { 3952 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3953 "nxge_m_resources: no receive resources")); 3954 goto nxge_m_resources_exit; 3955 } 3956 3957 /* 3958 * CR 6492541 Check to see if the drv_state has been initialized, 3959 * if not * call nxge_init(). 3960 */ 3961 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3962 if (nxge_init(nxgep) != NXGE_OK) 3963 goto nxge_m_resources_exit; 3964 } 3965 3966 mrf.mrf_type = MAC_RX_FIFO; 3967 mrf.mrf_blank = nxge_rx_hw_blank; 3968 mrf.mrf_arg = (void *)nxgep; 3969 3970 mrf.mrf_normal_blank_time = 128; 3971 mrf.mrf_normal_pkt_count = 8; 3972 3973 /* 3974 * Export our receive resources to the MAC layer. 3975 */ 3976 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3977 if ((1 << rdc) & set->owned.map) { 3978 ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 3979 if (ring == 0) { 3980 /* 3981 * This is a big deal only if we are 3982 * *not* in an LDOMs environment. 3983 */ 3984 if (nxgep->environs == SOLARIS_DOMAIN) { 3985 cmn_err(CE_NOTE, 3986 "==> nxge_m_resources: " 3987 "ring %d == 0", rdc); 3988 } 3989 continue; 3990 } 3991 ring->rcr_mac_handle = mac_resource_add 3992 (nxgep->mach, (mac_resource_t *)&mrf); 3993 3994 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3995 "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 3996 rdc, ring, ring->rcr_mac_handle)); 3997 } 3998 } 3999 4000 nxge_m_resources_exit: 4001 MUTEX_EXIT(nxgep->genlock); 4002 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 4003 } 4004 4005 void 4006 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 4007 { 4008 p_nxge_mmac_stats_t mmac_stats; 4009 int i; 4010 nxge_mmac_t *mmac_info; 4011 4012 mmac_info = &nxgep->nxge_mmac_info; 4013 4014 mmac_stats = &nxgep->statsp->mmac_stats; 4015 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4016 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4017 4018 for (i = 0; i < ETHERADDRL; i++) { 4019 if (factory) { 4020 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4021 = mmac_info->factory_mac_pool[slot][ 4022 (ETHERADDRL-1) - i]; 4023 } else { 4024 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4025 = mmac_info->mac_pool[slot].addr[ 4026 (ETHERADDRL - 1) - i]; 4027 } 4028 } 4029 } 4030 4031 /* 4032 * nxge_altmac_set() -- Set an alternate MAC address 4033 */ 4034 static int 4035 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 4036 { 4037 uint8_t addrn; 4038 uint8_t portn; 4039 npi_mac_addr_t altmac; 4040 hostinfo_t mac_rdc; 4041 p_nxge_class_pt_cfg_t clscfgp; 4042 4043 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4044 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4045 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4046 4047 portn = nxgep->mac.portnum; 4048 addrn = (uint8_t)slot - 1; 4049 4050 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 4051 addrn, &altmac) != NPI_SUCCESS) 4052 return (EIO); 4053 4054 /* 4055 * Set the rdc table number for the host info entry 4056 * for this mac address slot. 4057 */ 4058 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4059 mac_rdc.value = 0; 4060 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 4061 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4062 4063 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4064 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4065 return (EIO); 4066 } 4067 4068 /* 4069 * Enable comparison with the alternate MAC address. 4070 * While the first alternate addr is enabled by bit 1 of register 4071 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4072 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4073 * accordingly before calling npi_mac_altaddr_entry. 4074 */ 4075 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4076 addrn = (uint8_t)slot - 1; 4077 else 4078 addrn = (uint8_t)slot; 4079 4080 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 4081 != NPI_SUCCESS) 4082 return (EIO); 4083 4084 return (0); 4085 } 4086 4087 /* 4088 * nxeg_m_mmac_add() - find an unused address slot, set the address 4089 * value to the one specified, enable the port to start filtering on 4090 * the new MAC address. Returns 0 on success. 4091 */ 4092 int 4093 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 4094 { 4095 p_nxge_t nxgep = arg; 4096 mac_addr_slot_t slot; 4097 nxge_mmac_t *mmac_info; 4098 int err; 4099 nxge_status_t status; 4100 4101 mutex_enter(nxgep->genlock); 4102 4103 /* 4104 * Make sure that nxge is initialized, if _start() has 4105 * not been called. 4106 */ 4107 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4108 status = nxge_init(nxgep); 4109 if (status != NXGE_OK) { 4110 mutex_exit(nxgep->genlock); 4111 return (ENXIO); 4112 } 4113 } 4114 4115 mmac_info = &nxgep->nxge_mmac_info; 4116 if (mmac_info->naddrfree == 0) { 4117 mutex_exit(nxgep->genlock); 4118 return (ENOSPC); 4119 } 4120 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4121 maddr->mma_addrlen)) { 4122 mutex_exit(nxgep->genlock); 4123 return (EINVAL); 4124 } 4125 /* 4126 * Search for the first available slot. Because naddrfree 4127 * is not zero, we are guaranteed to find one. 4128 * Slot 0 is for unique (primary) MAC. The first alternate 4129 * MAC slot is slot 1. 4130 * Each of the first two ports of Neptune has 16 alternate 4131 * MAC slots but only the first 7 (of 15) slots have assigned factory 4132 * MAC addresses. We first search among the slots without bundled 4133 * factory MACs. If we fail to find one in that range, then we 4134 * search the slots with bundled factory MACs. A factory MAC 4135 * will be wasted while the slot is used with a user MAC address. 4136 * But the slot could be used by factory MAC again after calling 4137 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4138 */ 4139 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 4140 for (slot = mmac_info->num_factory_mmac + 1; 4141 slot <= mmac_info->num_mmac; slot++) { 4142 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4143 break; 4144 } 4145 if (slot > mmac_info->num_mmac) { 4146 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4147 slot++) { 4148 if (!(mmac_info->mac_pool[slot].flags 4149 & MMAC_SLOT_USED)) 4150 break; 4151 } 4152 } 4153 } else { 4154 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 4155 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4156 break; 4157 } 4158 } 4159 ASSERT(slot <= mmac_info->num_mmac); 4160 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 4161 mutex_exit(nxgep->genlock); 4162 return (err); 4163 } 4164 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4165 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4166 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4167 mmac_info->naddrfree--; 4168 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4169 4170 maddr->mma_slot = slot; 4171 4172 mutex_exit(nxgep->genlock); 4173 return (0); 4174 } 4175 4176 /* 4177 * This function reserves an unused slot and programs the slot and the HW 4178 * with a factory mac address. 4179 */ 4180 static int 4181 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 4182 { 4183 p_nxge_t nxgep = arg; 4184 mac_addr_slot_t slot; 4185 nxge_mmac_t *mmac_info; 4186 int err; 4187 nxge_status_t status; 4188 4189 mutex_enter(nxgep->genlock); 4190 4191 /* 4192 * Make sure that nxge is initialized, if _start() has 4193 * not been called. 4194 */ 4195 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4196 status = nxge_init(nxgep); 4197 if (status != NXGE_OK) { 4198 mutex_exit(nxgep->genlock); 4199 return (ENXIO); 4200 } 4201 } 4202 4203 mmac_info = &nxgep->nxge_mmac_info; 4204 if (mmac_info->naddrfree == 0) { 4205 mutex_exit(nxgep->genlock); 4206 return (ENOSPC); 4207 } 4208 4209 slot = maddr->mma_slot; 4210 if (slot == -1) { /* -1: Take the first available slot */ 4211 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 4212 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4213 break; 4214 } 4215 if (slot > mmac_info->num_factory_mmac) { 4216 mutex_exit(nxgep->genlock); 4217 return (ENOSPC); 4218 } 4219 } 4220 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 4221 /* 4222 * Do not support factory MAC at a slot greater than 4223 * num_factory_mmac even when there are available factory 4224 * MAC addresses because the alternate MACs are bundled with 4225 * slot[1] through slot[num_factory_mmac] 4226 */ 4227 mutex_exit(nxgep->genlock); 4228 return (EINVAL); 4229 } 4230 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4231 mutex_exit(nxgep->genlock); 4232 return (EBUSY); 4233 } 4234 /* Verify the address to be reserved */ 4235 if (!mac_unicst_verify(nxgep->mach, 4236 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 4237 mutex_exit(nxgep->genlock); 4238 return (EINVAL); 4239 } 4240 if (err = nxge_altmac_set(nxgep, 4241 mmac_info->factory_mac_pool[slot], slot)) { 4242 mutex_exit(nxgep->genlock); 4243 return (err); 4244 } 4245 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 4246 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4247 mmac_info->naddrfree--; 4248 4249 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 4250 mutex_exit(nxgep->genlock); 4251 4252 /* Pass info back to the caller */ 4253 maddr->mma_slot = slot; 4254 maddr->mma_addrlen = ETHERADDRL; 4255 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4256 4257 return (0); 4258 } 4259 4260 /* 4261 * Remove the specified mac address and update the HW not to filter 4262 * the mac address anymore. 4263 */ 4264 int 4265 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 4266 { 4267 p_nxge_t nxgep = arg; 4268 nxge_mmac_t *mmac_info; 4269 uint8_t addrn; 4270 uint8_t portn; 4271 int err = 0; 4272 nxge_status_t status; 4273 4274 mutex_enter(nxgep->genlock); 4275 4276 /* 4277 * Make sure that nxge is initialized, if _start() has 4278 * not been called. 4279 */ 4280 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4281 status = nxge_init(nxgep); 4282 if (status != NXGE_OK) { 4283 mutex_exit(nxgep->genlock); 4284 return (ENXIO); 4285 } 4286 } 4287 4288 mmac_info = &nxgep->nxge_mmac_info; 4289 if (slot < 1 || slot > mmac_info->num_mmac) { 4290 mutex_exit(nxgep->genlock); 4291 return (EINVAL); 4292 } 4293 4294 portn = nxgep->mac.portnum; 4295 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4296 addrn = (uint8_t)slot - 1; 4297 else 4298 addrn = (uint8_t)slot; 4299 4300 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4301 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4302 == NPI_SUCCESS) { 4303 mmac_info->naddrfree++; 4304 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4305 /* 4306 * Regardless if the MAC we just stopped filtering 4307 * is a user addr or a facory addr, we must set 4308 * the MMAC_VENDOR_ADDR flag if this slot has an 4309 * associated factory MAC to indicate that a factory 4310 * MAC is available. 4311 */ 4312 if (slot <= mmac_info->num_factory_mmac) { 4313 mmac_info->mac_pool[slot].flags 4314 |= MMAC_VENDOR_ADDR; 4315 } 4316 /* 4317 * Clear mac_pool[slot].addr so that kstat shows 0 4318 * alternate MAC address if the slot is not used. 4319 * (But nxge_m_mmac_get returns the factory MAC even 4320 * when the slot is not used!) 4321 */ 4322 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4323 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4324 } else { 4325 err = EIO; 4326 } 4327 } else { 4328 err = EINVAL; 4329 } 4330 4331 mutex_exit(nxgep->genlock); 4332 return (err); 4333 } 4334 4335 /* 4336 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 4337 */ 4338 static int 4339 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 4340 { 4341 p_nxge_t nxgep = arg; 4342 mac_addr_slot_t slot; 4343 nxge_mmac_t *mmac_info; 4344 int err = 0; 4345 nxge_status_t status; 4346 4347 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4348 maddr->mma_addrlen)) 4349 return (EINVAL); 4350 4351 slot = maddr->mma_slot; 4352 4353 mutex_enter(nxgep->genlock); 4354 4355 /* 4356 * Make sure that nxge is initialized, if _start() has 4357 * not been called. 4358 */ 4359 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4360 status = nxge_init(nxgep); 4361 if (status != NXGE_OK) { 4362 mutex_exit(nxgep->genlock); 4363 return (ENXIO); 4364 } 4365 } 4366 4367 mmac_info = &nxgep->nxge_mmac_info; 4368 if (slot < 1 || slot > mmac_info->num_mmac) { 4369 mutex_exit(nxgep->genlock); 4370 return (EINVAL); 4371 } 4372 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4373 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4374 != 0) { 4375 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4376 ETHERADDRL); 4377 /* 4378 * Assume that the MAC passed down from the caller 4379 * is not a factory MAC address (The user should 4380 * call mmac_remove followed by mmac_reserve if 4381 * he wants to use the factory MAC for this slot). 4382 */ 4383 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4384 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4385 } 4386 } else { 4387 err = EINVAL; 4388 } 4389 mutex_exit(nxgep->genlock); 4390 return (err); 4391 } 4392 4393 /* 4394 * nxge_m_mmac_get() - Get the MAC address and other information 4395 * related to the slot. mma_flags should be set to 0 in the call. 4396 * Note: although kstat shows MAC address as zero when a slot is 4397 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 4398 * to the caller as long as the slot is not using a user MAC address. 4399 * The following table shows the rules, 4400 * 4401 * USED VENDOR mma_addr 4402 * ------------------------------------------------------------ 4403 * (1) Slot uses a user MAC: yes no user MAC 4404 * (2) Slot uses a factory MAC: yes yes factory MAC 4405 * (3) Slot is not used but is 4406 * factory MAC capable: no yes factory MAC 4407 * (4) Slot is not used and is 4408 * not factory MAC capable: no no 0 4409 * ------------------------------------------------------------ 4410 */ 4411 static int 4412 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 4413 { 4414 nxge_t *nxgep = arg; 4415 mac_addr_slot_t slot; 4416 nxge_mmac_t *mmac_info; 4417 nxge_status_t status; 4418 4419 slot = maddr->mma_slot; 4420 4421 mutex_enter(nxgep->genlock); 4422 4423 /* 4424 * Make sure that nxge is initialized, if _start() has 4425 * not been called. 4426 */ 4427 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4428 status = nxge_init(nxgep); 4429 if (status != NXGE_OK) { 4430 mutex_exit(nxgep->genlock); 4431 return (ENXIO); 4432 } 4433 } 4434 4435 mmac_info = &nxgep->nxge_mmac_info; 4436 4437 if (slot < 1 || slot > mmac_info->num_mmac) { 4438 mutex_exit(nxgep->genlock); 4439 return (EINVAL); 4440 } 4441 maddr->mma_flags = 0; 4442 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 4443 maddr->mma_flags |= MMAC_SLOT_USED; 4444 4445 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 4446 maddr->mma_flags |= MMAC_VENDOR_ADDR; 4447 bcopy(mmac_info->factory_mac_pool[slot], 4448 maddr->mma_addr, ETHERADDRL); 4449 maddr->mma_addrlen = ETHERADDRL; 4450 } else { 4451 if (maddr->mma_flags & MMAC_SLOT_USED) { 4452 bcopy(mmac_info->mac_pool[slot].addr, 4453 maddr->mma_addr, ETHERADDRL); 4454 maddr->mma_addrlen = ETHERADDRL; 4455 } else { 4456 bzero(maddr->mma_addr, ETHERADDRL); 4457 maddr->mma_addrlen = 0; 4458 } 4459 } 4460 mutex_exit(nxgep->genlock); 4461 return (0); 4462 } 4463 4464 static boolean_t 4465 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4466 { 4467 nxge_t *nxgep = arg; 4468 uint32_t *txflags = cap_data; 4469 multiaddress_capab_t *mmacp = cap_data; 4470 4471 switch (cap) { 4472 case MAC_CAPAB_HCKSUM: 4473 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4474 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4475 if (nxge_cksum_offload <= 1) { 4476 *txflags = HCKSUM_INET_PARTIAL; 4477 } 4478 break; 4479 4480 case MAC_CAPAB_POLL: 4481 /* 4482 * There's nothing for us to fill in, simply returning 4483 * B_TRUE stating that we support polling is sufficient. 4484 */ 4485 break; 4486 4487 case MAC_CAPAB_MULTIADDRESS: 4488 mmacp = (multiaddress_capab_t *)cap_data; 4489 mutex_enter(nxgep->genlock); 4490 4491 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 4492 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 4493 mmacp->maddr_flag = 0; /* 0 is required by PSARC2006/265 */ 4494 /* 4495 * maddr_handle is driver's private data, passed back to 4496 * entry point functions as arg. 4497 */ 4498 mmacp->maddr_handle = nxgep; 4499 mmacp->maddr_add = nxge_m_mmac_add; 4500 mmacp->maddr_remove = nxge_m_mmac_remove; 4501 mmacp->maddr_modify = nxge_m_mmac_modify; 4502 mmacp->maddr_get = nxge_m_mmac_get; 4503 mmacp->maddr_reserve = nxge_m_mmac_reserve; 4504 4505 mutex_exit(nxgep->genlock); 4506 break; 4507 4508 case MAC_CAPAB_LSO: { 4509 mac_capab_lso_t *cap_lso = cap_data; 4510 4511 if (nxgep->soft_lso_enable) { 4512 if (nxge_cksum_offload <= 1) { 4513 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4514 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4515 nxge_lso_max = NXGE_LSO_MAXLEN; 4516 } 4517 cap_lso->lso_basic_tcp_ipv4.lso_max = 4518 nxge_lso_max; 4519 } 4520 break; 4521 } else { 4522 return (B_FALSE); 4523 } 4524 } 4525 4526 #if defined(sun4v) 4527 case MAC_CAPAB_RINGS: { 4528 mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 4529 4530 /* 4531 * Only the service domain driver responds to 4532 * this capability request. 4533 */ 4534 if (isLDOMservice(nxgep)) { 4535 mrings->mr_handle = (void *)nxgep; 4536 4537 /* 4538 * No dynamic allocation of groups and 4539 * rings at this time. Shares dictate the 4540 * configuration. 4541 */ 4542 mrings->mr_gadd_ring = NULL; 4543 mrings->mr_grem_ring = NULL; 4544 mrings->mr_rget = NULL; 4545 mrings->mr_gget = nxge_hio_group_get; 4546 4547 if (mrings->mr_type == MAC_RING_TYPE_RX) { 4548 mrings->mr_rnum = 8; /* XXX */ 4549 mrings->mr_gnum = 6; /* XXX */ 4550 } else { 4551 mrings->mr_rnum = 8; /* XXX */ 4552 mrings->mr_gnum = 0; /* XXX */ 4553 } 4554 } else 4555 return (B_FALSE); 4556 break; 4557 } 4558 4559 case MAC_CAPAB_SHARES: { 4560 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4561 4562 /* 4563 * Only the service domain driver responds to 4564 * this capability request. 4565 */ 4566 if (isLDOMservice(nxgep)) { 4567 mshares->ms_snum = 3; 4568 mshares->ms_handle = (void *)nxgep; 4569 mshares->ms_salloc = nxge_hio_share_alloc; 4570 mshares->ms_sfree = nxge_hio_share_free; 4571 mshares->ms_sadd = NULL; 4572 mshares->ms_sremove = NULL; 4573 mshares->ms_squery = nxge_hio_share_query; 4574 } else 4575 return (B_FALSE); 4576 break; 4577 } 4578 #endif 4579 default: 4580 return (B_FALSE); 4581 } 4582 return (B_TRUE); 4583 } 4584 4585 static boolean_t 4586 nxge_param_locked(mac_prop_id_t pr_num) 4587 { 4588 /* 4589 * All adv_* parameters are locked (read-only) while 4590 * the device is in any sort of loopback mode ... 4591 */ 4592 switch (pr_num) { 4593 case MAC_PROP_ADV_1000FDX_CAP: 4594 case MAC_PROP_EN_1000FDX_CAP: 4595 case MAC_PROP_ADV_1000HDX_CAP: 4596 case MAC_PROP_EN_1000HDX_CAP: 4597 case MAC_PROP_ADV_100FDX_CAP: 4598 case MAC_PROP_EN_100FDX_CAP: 4599 case MAC_PROP_ADV_100HDX_CAP: 4600 case MAC_PROP_EN_100HDX_CAP: 4601 case MAC_PROP_ADV_10FDX_CAP: 4602 case MAC_PROP_EN_10FDX_CAP: 4603 case MAC_PROP_ADV_10HDX_CAP: 4604 case MAC_PROP_EN_10HDX_CAP: 4605 case MAC_PROP_AUTONEG: 4606 case MAC_PROP_FLOWCTRL: 4607 return (B_TRUE); 4608 } 4609 return (B_FALSE); 4610 } 4611 4612 /* 4613 * callback functions for set/get of properties 4614 */ 4615 static int 4616 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4617 uint_t pr_valsize, const void *pr_val) 4618 { 4619 nxge_t *nxgep = barg; 4620 p_nxge_param_t param_arr; 4621 p_nxge_stats_t statsp; 4622 int err = 0; 4623 uint8_t val; 4624 uint32_t cur_mtu, new_mtu, old_framesize; 4625 link_flowctrl_t fl; 4626 4627 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4628 param_arr = nxgep->param_arr; 4629 statsp = nxgep->statsp; 4630 mutex_enter(nxgep->genlock); 4631 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4632 nxge_param_locked(pr_num)) { 4633 /* 4634 * All adv_* parameters are locked (read-only) 4635 * while the device is in any sort of loopback mode. 4636 */ 4637 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4638 "==> nxge_m_setprop: loopback mode: read only")); 4639 mutex_exit(nxgep->genlock); 4640 return (EBUSY); 4641 } 4642 4643 val = *(uint8_t *)pr_val; 4644 switch (pr_num) { 4645 case MAC_PROP_EN_1000FDX_CAP: 4646 nxgep->param_en_1000fdx = val; 4647 param_arr[param_anar_1000fdx].value = val; 4648 4649 goto reprogram; 4650 4651 case MAC_PROP_EN_100FDX_CAP: 4652 nxgep->param_en_100fdx = val; 4653 param_arr[param_anar_100fdx].value = val; 4654 4655 goto reprogram; 4656 4657 case MAC_PROP_EN_10FDX_CAP: 4658 nxgep->param_en_10fdx = val; 4659 param_arr[param_anar_10fdx].value = val; 4660 4661 goto reprogram; 4662 4663 case MAC_PROP_EN_1000HDX_CAP: 4664 case MAC_PROP_EN_100HDX_CAP: 4665 case MAC_PROP_EN_10HDX_CAP: 4666 case MAC_PROP_ADV_1000FDX_CAP: 4667 case MAC_PROP_ADV_1000HDX_CAP: 4668 case MAC_PROP_ADV_100FDX_CAP: 4669 case MAC_PROP_ADV_100HDX_CAP: 4670 case MAC_PROP_ADV_10FDX_CAP: 4671 case MAC_PROP_ADV_10HDX_CAP: 4672 case MAC_PROP_STATUS: 4673 case MAC_PROP_SPEED: 4674 case MAC_PROP_DUPLEX: 4675 err = EINVAL; /* cannot set read-only properties */ 4676 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4677 "==> nxge_m_setprop: read only property %d", 4678 pr_num)); 4679 break; 4680 4681 case MAC_PROP_AUTONEG: 4682 param_arr[param_autoneg].value = val; 4683 4684 goto reprogram; 4685 4686 case MAC_PROP_MTU: 4687 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4688 err = EBUSY; 4689 break; 4690 } 4691 4692 cur_mtu = nxgep->mac.default_mtu; 4693 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4694 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4695 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4696 new_mtu, nxgep->mac.is_jumbo)); 4697 4698 if (new_mtu == cur_mtu) { 4699 err = 0; 4700 break; 4701 } 4702 if (new_mtu < NXGE_DEFAULT_MTU || 4703 new_mtu > NXGE_MAXIMUM_MTU) { 4704 err = EINVAL; 4705 break; 4706 } 4707 4708 if ((new_mtu > NXGE_DEFAULT_MTU) && 4709 !nxgep->mac.is_jumbo) { 4710 err = EINVAL; 4711 break; 4712 } 4713 4714 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4715 nxgep->mac.maxframesize = (uint16_t) 4716 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4717 if (nxge_mac_set_framesize(nxgep)) { 4718 nxgep->mac.maxframesize = 4719 (uint16_t)old_framesize; 4720 err = EINVAL; 4721 break; 4722 } 4723 4724 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4725 if (err) { 4726 nxgep->mac.maxframesize = 4727 (uint16_t)old_framesize; 4728 err = EINVAL; 4729 break; 4730 } 4731 4732 nxgep->mac.default_mtu = new_mtu; 4733 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4734 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4735 new_mtu, nxgep->mac.maxframesize)); 4736 break; 4737 4738 case MAC_PROP_FLOWCTRL: 4739 bcopy(pr_val, &fl, sizeof (fl)); 4740 switch (fl) { 4741 default: 4742 err = EINVAL; 4743 break; 4744 4745 case LINK_FLOWCTRL_NONE: 4746 param_arr[param_anar_pause].value = 0; 4747 break; 4748 4749 case LINK_FLOWCTRL_RX: 4750 param_arr[param_anar_pause].value = 1; 4751 break; 4752 4753 case LINK_FLOWCTRL_TX: 4754 case LINK_FLOWCTRL_BI: 4755 err = EINVAL; 4756 break; 4757 } 4758 4759 reprogram: 4760 if (err == 0) { 4761 if (!nxge_param_link_update(nxgep)) { 4762 err = EINVAL; 4763 } 4764 } 4765 break; 4766 case MAC_PROP_PRIVATE: 4767 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4768 "==> nxge_m_setprop: private property")); 4769 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4770 pr_val); 4771 break; 4772 4773 default: 4774 err = ENOTSUP; 4775 break; 4776 } 4777 4778 mutex_exit(nxgep->genlock); 4779 4780 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4781 "<== nxge_m_setprop (return %d)", err)); 4782 return (err); 4783 } 4784 4785 static int 4786 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4787 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 4788 { 4789 nxge_t *nxgep = barg; 4790 p_nxge_param_t param_arr = nxgep->param_arr; 4791 p_nxge_stats_t statsp = nxgep->statsp; 4792 int err = 0; 4793 link_flowctrl_t fl; 4794 uint64_t tmp = 0; 4795 link_state_t ls; 4796 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4797 4798 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4799 "==> nxge_m_getprop: pr_num %d", pr_num)); 4800 4801 if (pr_valsize == 0) 4802 return (EINVAL); 4803 4804 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4805 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4806 return (err); 4807 } 4808 4809 bzero(pr_val, pr_valsize); 4810 switch (pr_num) { 4811 case MAC_PROP_DUPLEX: 4812 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4813 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4814 "==> nxge_m_getprop: duplex mode %d", 4815 *(uint8_t *)pr_val)); 4816 break; 4817 4818 case MAC_PROP_SPEED: 4819 if (pr_valsize < sizeof (uint64_t)) 4820 return (EINVAL); 4821 tmp = statsp->mac_stats.link_speed * 1000000ull; 4822 bcopy(&tmp, pr_val, sizeof (tmp)); 4823 break; 4824 4825 case MAC_PROP_STATUS: 4826 if (pr_valsize < sizeof (link_state_t)) 4827 return (EINVAL); 4828 if (!statsp->mac_stats.link_up) 4829 ls = LINK_STATE_DOWN; 4830 else 4831 ls = LINK_STATE_UP; 4832 bcopy(&ls, pr_val, sizeof (ls)); 4833 break; 4834 4835 case MAC_PROP_AUTONEG: 4836 *(uint8_t *)pr_val = 4837 param_arr[param_autoneg].value; 4838 break; 4839 4840 case MAC_PROP_FLOWCTRL: 4841 if (pr_valsize < sizeof (link_flowctrl_t)) 4842 return (EINVAL); 4843 4844 fl = LINK_FLOWCTRL_NONE; 4845 if (param_arr[param_anar_pause].value) { 4846 fl = LINK_FLOWCTRL_RX; 4847 } 4848 bcopy(&fl, pr_val, sizeof (fl)); 4849 break; 4850 4851 case MAC_PROP_ADV_1000FDX_CAP: 4852 *(uint8_t *)pr_val = 4853 param_arr[param_anar_1000fdx].value; 4854 break; 4855 4856 case MAC_PROP_EN_1000FDX_CAP: 4857 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4858 break; 4859 4860 case MAC_PROP_ADV_100FDX_CAP: 4861 *(uint8_t *)pr_val = 4862 param_arr[param_anar_100fdx].value; 4863 break; 4864 4865 case MAC_PROP_EN_100FDX_CAP: 4866 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4867 break; 4868 4869 case MAC_PROP_ADV_10FDX_CAP: 4870 *(uint8_t *)pr_val = 4871 param_arr[param_anar_10fdx].value; 4872 break; 4873 4874 case MAC_PROP_EN_10FDX_CAP: 4875 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4876 break; 4877 4878 case MAC_PROP_EN_1000HDX_CAP: 4879 case MAC_PROP_EN_100HDX_CAP: 4880 case MAC_PROP_EN_10HDX_CAP: 4881 case MAC_PROP_ADV_1000HDX_CAP: 4882 case MAC_PROP_ADV_100HDX_CAP: 4883 case MAC_PROP_ADV_10HDX_CAP: 4884 err = ENOTSUP; 4885 break; 4886 4887 case MAC_PROP_PRIVATE: 4888 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4889 pr_valsize, pr_val); 4890 break; 4891 default: 4892 err = EINVAL; 4893 break; 4894 } 4895 4896 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4897 4898 return (err); 4899 } 4900 4901 /* ARGSUSED */ 4902 static int 4903 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4904 const void *pr_val) 4905 { 4906 p_nxge_param_t param_arr = nxgep->param_arr; 4907 int err = 0; 4908 long result; 4909 4910 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4911 "==> nxge_set_priv_prop: name %s", pr_name)); 4912 4913 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4914 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4915 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4916 "<== nxge_set_priv_prop: name %s " 4917 "pr_val %s result %d " 4918 "param %d is_jumbo %d", 4919 pr_name, pr_val, result, 4920 param_arr[param_accept_jumbo].value, 4921 nxgep->mac.is_jumbo)); 4922 4923 if (result > 1 || result < 0) { 4924 err = EINVAL; 4925 } else { 4926 if (nxgep->mac.is_jumbo == 4927 (uint32_t)result) { 4928 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4929 "no change (%d %d)", 4930 nxgep->mac.is_jumbo, 4931 result)); 4932 return (0); 4933 } 4934 } 4935 4936 param_arr[param_accept_jumbo].value = result; 4937 nxgep->mac.is_jumbo = B_FALSE; 4938 if (result) { 4939 nxgep->mac.is_jumbo = B_TRUE; 4940 } 4941 4942 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4943 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4944 pr_name, result, nxgep->mac.is_jumbo)); 4945 4946 return (err); 4947 } 4948 4949 /* Blanking */ 4950 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4951 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4952 (char *)pr_val, 4953 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4954 if (err) { 4955 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4956 "<== nxge_set_priv_prop: " 4957 "unable to set (%s)", pr_name)); 4958 err = EINVAL; 4959 } else { 4960 err = 0; 4961 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4962 "<== nxge_set_priv_prop: " 4963 "set (%s)", pr_name)); 4964 } 4965 4966 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4967 "<== nxge_set_priv_prop: name %s (value %d)", 4968 pr_name, result)); 4969 4970 return (err); 4971 } 4972 4973 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4974 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4975 (char *)pr_val, 4976 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4977 if (err) { 4978 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4979 "<== nxge_set_priv_prop: " 4980 "unable to set (%s)", pr_name)); 4981 err = EINVAL; 4982 } else { 4983 err = 0; 4984 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4985 "<== nxge_set_priv_prop: " 4986 "set (%s)", pr_name)); 4987 } 4988 4989 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4990 "<== nxge_set_priv_prop: name %s (value %d)", 4991 pr_name, result)); 4992 4993 return (err); 4994 } 4995 4996 /* Classification */ 4997 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4998 if (pr_val == NULL) { 4999 err = EINVAL; 5000 return (err); 5001 } 5002 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5003 5004 err = nxge_param_set_ip_opt(nxgep, NULL, 5005 NULL, (char *)pr_val, 5006 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5007 5008 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5009 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5010 pr_name, result)); 5011 5012 return (err); 5013 } 5014 5015 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5016 if (pr_val == NULL) { 5017 err = EINVAL; 5018 return (err); 5019 } 5020 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5021 5022 err = nxge_param_set_ip_opt(nxgep, NULL, 5023 NULL, (char *)pr_val, 5024 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5025 5026 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5027 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5028 pr_name, result)); 5029 5030 return (err); 5031 } 5032 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5033 if (pr_val == NULL) { 5034 err = EINVAL; 5035 return (err); 5036 } 5037 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5038 5039 err = nxge_param_set_ip_opt(nxgep, NULL, 5040 NULL, (char *)pr_val, 5041 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5042 5043 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5044 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5045 pr_name, result)); 5046 5047 return (err); 5048 } 5049 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5050 if (pr_val == NULL) { 5051 err = EINVAL; 5052 return (err); 5053 } 5054 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5055 5056 err = nxge_param_set_ip_opt(nxgep, NULL, 5057 NULL, (char *)pr_val, 5058 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5059 5060 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5061 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5062 pr_name, result)); 5063 5064 return (err); 5065 } 5066 5067 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5068 if (pr_val == NULL) { 5069 err = EINVAL; 5070 return (err); 5071 } 5072 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5073 5074 err = nxge_param_set_ip_opt(nxgep, NULL, 5075 NULL, (char *)pr_val, 5076 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5077 5078 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5079 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5080 pr_name, result)); 5081 5082 return (err); 5083 } 5084 5085 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5086 if (pr_val == NULL) { 5087 err = EINVAL; 5088 return (err); 5089 } 5090 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5091 5092 err = nxge_param_set_ip_opt(nxgep, NULL, 5093 NULL, (char *)pr_val, 5094 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5095 5096 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5097 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5098 pr_name, result)); 5099 5100 return (err); 5101 } 5102 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5103 if (pr_val == NULL) { 5104 err = EINVAL; 5105 return (err); 5106 } 5107 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5108 5109 err = nxge_param_set_ip_opt(nxgep, NULL, 5110 NULL, (char *)pr_val, 5111 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5112 5113 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5114 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5115 pr_name, result)); 5116 5117 return (err); 5118 } 5119 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5120 if (pr_val == NULL) { 5121 err = EINVAL; 5122 return (err); 5123 } 5124 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5125 5126 err = nxge_param_set_ip_opt(nxgep, NULL, 5127 NULL, (char *)pr_val, 5128 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5129 5130 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5131 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5132 pr_name, result)); 5133 5134 return (err); 5135 } 5136 5137 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5138 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 5139 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5140 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 5141 err = EBUSY; 5142 return (err); 5143 } 5144 if (pr_val == NULL) { 5145 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5146 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5147 err = EINVAL; 5148 return (err); 5149 } 5150 5151 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5153 "<== nxge_set_priv_prop: name %s " 5154 "(lso %d pr_val %s value %d)", 5155 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5156 5157 if (result > 1 || result < 0) { 5158 err = EINVAL; 5159 } else { 5160 if (nxgep->soft_lso_enable == (uint32_t)result) { 5161 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5162 "no change (%d %d)", 5163 nxgep->soft_lso_enable, result)); 5164 return (0); 5165 } 5166 } 5167 5168 nxgep->soft_lso_enable = (int)result; 5169 5170 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5171 "<== nxge_set_priv_prop: name %s (value %d)", 5172 pr_name, result)); 5173 5174 return (err); 5175 } 5176 /* 5177 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5178 * following code to be executed. 5179 */ 5180 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5181 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5182 (caddr_t)¶m_arr[param_anar_10gfdx]); 5183 return (err); 5184 } 5185 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5186 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5187 (caddr_t)¶m_arr[param_anar_pause]); 5188 return (err); 5189 } 5190 5191 return (EINVAL); 5192 } 5193 5194 static int 5195 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5196 uint_t pr_valsize, void *pr_val) 5197 { 5198 p_nxge_param_t param_arr = nxgep->param_arr; 5199 char valstr[MAXNAMELEN]; 5200 int err = EINVAL; 5201 uint_t strsize; 5202 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5203 5204 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5205 "==> nxge_get_priv_prop: property %s", pr_name)); 5206 5207 /* function number */ 5208 if (strcmp(pr_name, "_function_number") == 0) { 5209 if (is_default) 5210 return (ENOTSUP); 5211 (void) snprintf(valstr, sizeof (valstr), "%d", 5212 nxgep->function_num); 5213 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5214 "==> nxge_get_priv_prop: name %s " 5215 "(value %d valstr %s)", 5216 pr_name, nxgep->function_num, valstr)); 5217 5218 err = 0; 5219 goto done; 5220 } 5221 5222 /* Neptune firmware version */ 5223 if (strcmp(pr_name, "_fw_version") == 0) { 5224 if (is_default) 5225 return (ENOTSUP); 5226 (void) snprintf(valstr, sizeof (valstr), "%s", 5227 nxgep->vpd_info.ver); 5228 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5229 "==> nxge_get_priv_prop: name %s " 5230 "(value %d valstr %s)", 5231 pr_name, nxgep->vpd_info.ver, valstr)); 5232 5233 err = 0; 5234 goto done; 5235 } 5236 5237 /* port PHY mode */ 5238 if (strcmp(pr_name, "_port_mode") == 0) { 5239 if (is_default) 5240 return (ENOTSUP); 5241 switch (nxgep->mac.portmode) { 5242 case PORT_1G_COPPER: 5243 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5244 nxgep->hot_swappable_phy ? 5245 "[Hot Swappable]" : ""); 5246 break; 5247 case PORT_1G_FIBER: 5248 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5249 nxgep->hot_swappable_phy ? 5250 "[hot swappable]" : ""); 5251 break; 5252 case PORT_10G_COPPER: 5253 (void) snprintf(valstr, sizeof (valstr), 5254 "10G copper %s", 5255 nxgep->hot_swappable_phy ? 5256 "[hot swappable]" : ""); 5257 break; 5258 case PORT_10G_FIBER: 5259 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5260 nxgep->hot_swappable_phy ? 5261 "[hot swappable]" : ""); 5262 break; 5263 case PORT_10G_SERDES: 5264 (void) snprintf(valstr, sizeof (valstr), 5265 "10G serdes %s", nxgep->hot_swappable_phy ? 5266 "[hot swappable]" : ""); 5267 break; 5268 case PORT_1G_SERDES: 5269 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5270 nxgep->hot_swappable_phy ? 5271 "[hot swappable]" : ""); 5272 break; 5273 case PORT_1G_TN1010: 5274 (void) snprintf(valstr, sizeof (valstr), 5275 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5276 "[hot swappable]" : ""); 5277 break; 5278 case PORT_10G_TN1010: 5279 (void) snprintf(valstr, sizeof (valstr), 5280 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5281 "[hot swappable]" : ""); 5282 break; 5283 case PORT_1G_RGMII_FIBER: 5284 (void) snprintf(valstr, sizeof (valstr), 5285 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5286 "[hot swappable]" : ""); 5287 break; 5288 case PORT_HSP_MODE: 5289 (void) snprintf(valstr, sizeof (valstr), 5290 "phy not present[hot swappable]"); 5291 break; 5292 default: 5293 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5294 nxgep->hot_swappable_phy ? 5295 "[hot swappable]" : ""); 5296 break; 5297 } 5298 5299 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5300 "==> nxge_get_priv_prop: name %s (value %s)", 5301 pr_name, valstr)); 5302 5303 err = 0; 5304 goto done; 5305 } 5306 5307 /* Hot swappable PHY */ 5308 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5309 if (is_default) 5310 return (ENOTSUP); 5311 (void) snprintf(valstr, sizeof (valstr), "%s", 5312 nxgep->hot_swappable_phy ? 5313 "yes" : "no"); 5314 5315 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5316 "==> nxge_get_priv_prop: name %s " 5317 "(value %d valstr %s)", 5318 pr_name, nxgep->hot_swappable_phy, valstr)); 5319 5320 err = 0; 5321 goto done; 5322 } 5323 5324 5325 /* accept jumbo */ 5326 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5327 if (is_default) 5328 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5329 else 5330 (void) snprintf(valstr, sizeof (valstr), 5331 "%d", nxgep->mac.is_jumbo); 5332 err = 0; 5333 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5334 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5335 pr_name, 5336 (uint32_t)param_arr[param_accept_jumbo].value, 5337 nxgep->mac.is_jumbo, 5338 nxge_jumbo_enable)); 5339 5340 goto done; 5341 } 5342 5343 /* Receive Interrupt Blanking Parameters */ 5344 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5345 err = 0; 5346 if (is_default) { 5347 (void) snprintf(valstr, sizeof (valstr), 5348 "%d", RXDMA_RCR_TO_DEFAULT); 5349 goto done; 5350 } 5351 5352 (void) snprintf(valstr, sizeof (valstr), "%d", 5353 nxgep->intr_timeout); 5354 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5355 "==> nxge_get_priv_prop: name %s (value %d)", 5356 pr_name, 5357 (uint32_t)nxgep->intr_timeout)); 5358 goto done; 5359 } 5360 5361 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5362 err = 0; 5363 if (is_default) { 5364 (void) snprintf(valstr, sizeof (valstr), 5365 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5366 goto done; 5367 } 5368 (void) snprintf(valstr, sizeof (valstr), "%d", 5369 nxgep->intr_threshold); 5370 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5371 "==> nxge_get_priv_prop: name %s (value %d)", 5372 pr_name, (uint32_t)nxgep->intr_threshold)); 5373 5374 goto done; 5375 } 5376 5377 /* Classification and Load Distribution Configuration */ 5378 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5379 if (is_default) { 5380 (void) snprintf(valstr, sizeof (valstr), "%x", 5381 NXGE_CLASS_FLOW_GEN_SERVER); 5382 err = 0; 5383 goto done; 5384 } 5385 err = nxge_dld_get_ip_opt(nxgep, 5386 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5387 5388 (void) snprintf(valstr, sizeof (valstr), "%x", 5389 (int)param_arr[param_class_opt_ipv4_tcp].value); 5390 5391 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5392 "==> nxge_get_priv_prop: %s", valstr)); 5393 goto done; 5394 } 5395 5396 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5397 if (is_default) { 5398 (void) snprintf(valstr, sizeof (valstr), "%x", 5399 NXGE_CLASS_FLOW_GEN_SERVER); 5400 err = 0; 5401 goto done; 5402 } 5403 err = nxge_dld_get_ip_opt(nxgep, 5404 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5405 5406 (void) snprintf(valstr, sizeof (valstr), "%x", 5407 (int)param_arr[param_class_opt_ipv4_udp].value); 5408 5409 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5410 "==> nxge_get_priv_prop: %s", valstr)); 5411 goto done; 5412 } 5413 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5414 if (is_default) { 5415 (void) snprintf(valstr, sizeof (valstr), "%x", 5416 NXGE_CLASS_FLOW_GEN_SERVER); 5417 err = 0; 5418 goto done; 5419 } 5420 err = nxge_dld_get_ip_opt(nxgep, 5421 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5422 5423 (void) snprintf(valstr, sizeof (valstr), "%x", 5424 (int)param_arr[param_class_opt_ipv4_ah].value); 5425 5426 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5427 "==> nxge_get_priv_prop: %s", valstr)); 5428 goto done; 5429 } 5430 5431 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5432 if (is_default) { 5433 (void) snprintf(valstr, sizeof (valstr), "%x", 5434 NXGE_CLASS_FLOW_GEN_SERVER); 5435 err = 0; 5436 goto done; 5437 } 5438 err = nxge_dld_get_ip_opt(nxgep, 5439 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5440 5441 (void) snprintf(valstr, sizeof (valstr), "%x", 5442 (int)param_arr[param_class_opt_ipv4_sctp].value); 5443 5444 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5445 "==> nxge_get_priv_prop: %s", valstr)); 5446 goto done; 5447 } 5448 5449 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5450 if (is_default) { 5451 (void) snprintf(valstr, sizeof (valstr), "%x", 5452 NXGE_CLASS_FLOW_GEN_SERVER); 5453 err = 0; 5454 goto done; 5455 } 5456 err = nxge_dld_get_ip_opt(nxgep, 5457 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5458 5459 (void) snprintf(valstr, sizeof (valstr), "%x", 5460 (int)param_arr[param_class_opt_ipv6_tcp].value); 5461 5462 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5463 "==> nxge_get_priv_prop: %s", valstr)); 5464 goto done; 5465 } 5466 5467 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5468 if (is_default) { 5469 (void) snprintf(valstr, sizeof (valstr), "%x", 5470 NXGE_CLASS_FLOW_GEN_SERVER); 5471 err = 0; 5472 goto done; 5473 } 5474 err = nxge_dld_get_ip_opt(nxgep, 5475 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5476 5477 (void) snprintf(valstr, sizeof (valstr), "%x", 5478 (int)param_arr[param_class_opt_ipv6_udp].value); 5479 5480 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5481 "==> nxge_get_priv_prop: %s", valstr)); 5482 goto done; 5483 } 5484 5485 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5486 if (is_default) { 5487 (void) snprintf(valstr, sizeof (valstr), "%x", 5488 NXGE_CLASS_FLOW_GEN_SERVER); 5489 err = 0; 5490 goto done; 5491 } 5492 err = nxge_dld_get_ip_opt(nxgep, 5493 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5494 5495 (void) snprintf(valstr, sizeof (valstr), "%x", 5496 (int)param_arr[param_class_opt_ipv6_ah].value); 5497 5498 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5499 "==> nxge_get_priv_prop: %s", valstr)); 5500 goto done; 5501 } 5502 5503 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5504 if (is_default) { 5505 (void) snprintf(valstr, sizeof (valstr), "%x", 5506 NXGE_CLASS_FLOW_GEN_SERVER); 5507 err = 0; 5508 goto done; 5509 } 5510 err = nxge_dld_get_ip_opt(nxgep, 5511 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5512 5513 (void) snprintf(valstr, sizeof (valstr), "%x", 5514 (int)param_arr[param_class_opt_ipv6_sctp].value); 5515 5516 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5517 "==> nxge_get_priv_prop: %s", valstr)); 5518 goto done; 5519 } 5520 5521 /* Software LSO */ 5522 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5523 if (is_default) { 5524 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5525 err = 0; 5526 goto done; 5527 } 5528 (void) snprintf(valstr, sizeof (valstr), 5529 "%d", nxgep->soft_lso_enable); 5530 err = 0; 5531 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5532 "==> nxge_get_priv_prop: name %s (value %d)", 5533 pr_name, nxgep->soft_lso_enable)); 5534 5535 goto done; 5536 } 5537 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5538 err = 0; 5539 if (is_default || 5540 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5541 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5542 goto done; 5543 } else { 5544 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5545 goto done; 5546 } 5547 } 5548 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5549 err = 0; 5550 if (is_default || 5551 nxgep->param_arr[param_anar_pause].value != 0) { 5552 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5553 goto done; 5554 } else { 5555 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5556 goto done; 5557 } 5558 } 5559 5560 done: 5561 if (err == 0) { 5562 strsize = (uint_t)strlen(valstr); 5563 if (pr_valsize < strsize) { 5564 err = ENOBUFS; 5565 } else { 5566 (void) strlcpy(pr_val, valstr, pr_valsize); 5567 } 5568 } 5569 5570 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5571 "<== nxge_get_priv_prop: return %d", err)); 5572 return (err); 5573 } 5574 5575 /* 5576 * Module loading and removing entry points. 5577 */ 5578 5579 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5580 nodev, NULL, D_MP, NULL, nxge_quiesce); 5581 5582 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5583 5584 /* 5585 * Module linkage information for the kernel. 5586 */ 5587 static struct modldrv nxge_modldrv = { 5588 &mod_driverops, 5589 NXGE_DESC_VER, 5590 &nxge_dev_ops 5591 }; 5592 5593 static struct modlinkage modlinkage = { 5594 MODREV_1, (void *) &nxge_modldrv, NULL 5595 }; 5596 5597 int 5598 _init(void) 5599 { 5600 int status; 5601 5602 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5603 mac_init_ops(&nxge_dev_ops, "nxge"); 5604 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5605 if (status != 0) { 5606 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5607 "failed to init device soft state")); 5608 goto _init_exit; 5609 } 5610 status = mod_install(&modlinkage); 5611 if (status != 0) { 5612 ddi_soft_state_fini(&nxge_list); 5613 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5614 goto _init_exit; 5615 } 5616 5617 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5618 5619 _init_exit: 5620 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5621 5622 return (status); 5623 } 5624 5625 int 5626 _fini(void) 5627 { 5628 int status; 5629 5630 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5631 5632 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5633 5634 if (nxge_mblks_pending) 5635 return (EBUSY); 5636 5637 status = mod_remove(&modlinkage); 5638 if (status != DDI_SUCCESS) { 5639 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5640 "Module removal failed 0x%08x", 5641 status)); 5642 goto _fini_exit; 5643 } 5644 5645 mac_fini_ops(&nxge_dev_ops); 5646 5647 ddi_soft_state_fini(&nxge_list); 5648 5649 MUTEX_DESTROY(&nxge_common_lock); 5650 _fini_exit: 5651 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5652 5653 return (status); 5654 } 5655 5656 int 5657 _info(struct modinfo *modinfop) 5658 { 5659 int status; 5660 5661 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5662 status = mod_info(&modlinkage, modinfop); 5663 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5664 5665 return (status); 5666 } 5667 5668 /*ARGSUSED*/ 5669 static nxge_status_t 5670 nxge_add_intrs(p_nxge_t nxgep) 5671 { 5672 5673 int intr_types; 5674 int type = 0; 5675 int ddi_status = DDI_SUCCESS; 5676 nxge_status_t status = NXGE_OK; 5677 5678 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5679 5680 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5681 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5682 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5683 nxgep->nxge_intr_type.intr_added = 0; 5684 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5685 nxgep->nxge_intr_type.intr_type = 0; 5686 5687 if (nxgep->niu_type == N2_NIU) { 5688 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5689 } else if (nxge_msi_enable) { 5690 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5691 } 5692 5693 /* Get the supported interrupt types */ 5694 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5695 != DDI_SUCCESS) { 5696 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5697 "ddi_intr_get_supported_types failed: status 0x%08x", 5698 ddi_status)); 5699 return (NXGE_ERROR | NXGE_DDI_FAILED); 5700 } 5701 nxgep->nxge_intr_type.intr_types = intr_types; 5702 5703 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5704 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5705 5706 /* 5707 * Solaris MSIX is not supported yet. use MSI for now. 5708 * nxge_msi_enable (1): 5709 * 1 - MSI 2 - MSI-X others - FIXED 5710 */ 5711 switch (nxge_msi_enable) { 5712 default: 5713 type = DDI_INTR_TYPE_FIXED; 5714 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5715 "use fixed (intx emulation) type %08x", 5716 type)); 5717 break; 5718 5719 case 2: 5720 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5721 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5722 if (intr_types & DDI_INTR_TYPE_MSIX) { 5723 type = DDI_INTR_TYPE_MSIX; 5724 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5725 "ddi_intr_get_supported_types: MSIX 0x%08x", 5726 type)); 5727 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5728 type = DDI_INTR_TYPE_MSI; 5729 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5730 "ddi_intr_get_supported_types: MSI 0x%08x", 5731 type)); 5732 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5733 type = DDI_INTR_TYPE_FIXED; 5734 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5735 "ddi_intr_get_supported_types: MSXED0x%08x", 5736 type)); 5737 } 5738 break; 5739 5740 case 1: 5741 if (intr_types & DDI_INTR_TYPE_MSI) { 5742 type = DDI_INTR_TYPE_MSI; 5743 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5744 "ddi_intr_get_supported_types: MSI 0x%08x", 5745 type)); 5746 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5747 type = DDI_INTR_TYPE_MSIX; 5748 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5749 "ddi_intr_get_supported_types: MSIX 0x%08x", 5750 type)); 5751 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5752 type = DDI_INTR_TYPE_FIXED; 5753 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5754 "ddi_intr_get_supported_types: MSXED0x%08x", 5755 type)); 5756 } 5757 } 5758 5759 nxgep->nxge_intr_type.intr_type = type; 5760 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5761 type == DDI_INTR_TYPE_FIXED) && 5762 nxgep->nxge_intr_type.niu_msi_enable) { 5763 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5764 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5765 " nxge_add_intrs: " 5766 " nxge_add_intrs_adv failed: status 0x%08x", 5767 status)); 5768 return (status); 5769 } else { 5770 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5771 "interrupts registered : type %d", type)); 5772 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5773 5774 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5775 "\nAdded advanced nxge add_intr_adv " 5776 "intr type 0x%x\n", type)); 5777 5778 return (status); 5779 } 5780 } 5781 5782 if (!nxgep->nxge_intr_type.intr_registered) { 5783 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5784 "failed to register interrupts")); 5785 return (NXGE_ERROR | NXGE_DDI_FAILED); 5786 } 5787 5788 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5789 return (status); 5790 } 5791 5792 /*ARGSUSED*/ 5793 static nxge_status_t 5794 nxge_add_soft_intrs(p_nxge_t nxgep) 5795 { 5796 5797 int ddi_status = DDI_SUCCESS; 5798 nxge_status_t status = NXGE_OK; 5799 5800 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 5801 5802 nxgep->resched_id = NULL; 5803 nxgep->resched_running = B_FALSE; 5804 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5805 &nxgep->resched_id, 5806 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 5807 if (ddi_status != DDI_SUCCESS) { 5808 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5809 "ddi_add_softintrs failed: status 0x%08x", 5810 ddi_status)); 5811 return (NXGE_ERROR | NXGE_DDI_FAILED); 5812 } 5813 5814 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 5815 5816 return (status); 5817 } 5818 5819 static nxge_status_t 5820 nxge_add_intrs_adv(p_nxge_t nxgep) 5821 { 5822 int intr_type; 5823 p_nxge_intr_t intrp; 5824 5825 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5826 5827 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5828 intr_type = intrp->intr_type; 5829 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5830 intr_type)); 5831 5832 switch (intr_type) { 5833 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5834 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5835 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5836 5837 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5838 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5839 5840 default: 5841 return (NXGE_ERROR); 5842 } 5843 } 5844 5845 5846 /*ARGSUSED*/ 5847 static nxge_status_t 5848 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5849 { 5850 dev_info_t *dip = nxgep->dip; 5851 p_nxge_ldg_t ldgp; 5852 p_nxge_intr_t intrp; 5853 uint_t *inthandler; 5854 void *arg1, *arg2; 5855 int behavior; 5856 int nintrs, navail, nrequest; 5857 int nactual, nrequired; 5858 int inum = 0; 5859 int x, y; 5860 int ddi_status = DDI_SUCCESS; 5861 nxge_status_t status = NXGE_OK; 5862 5863 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5864 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5865 intrp->start_inum = 0; 5866 5867 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5868 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5869 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5870 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5871 "nintrs: %d", ddi_status, nintrs)); 5872 return (NXGE_ERROR | NXGE_DDI_FAILED); 5873 } 5874 5875 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5876 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5877 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5878 "ddi_intr_get_navail() failed, status: 0x%x%, " 5879 "nintrs: %d", ddi_status, navail)); 5880 return (NXGE_ERROR | NXGE_DDI_FAILED); 5881 } 5882 5883 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5884 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5885 nintrs, navail)); 5886 5887 /* PSARC/2007/453 MSI-X interrupt limit override */ 5888 if (int_type == DDI_INTR_TYPE_MSIX) { 5889 nrequest = nxge_create_msi_property(nxgep); 5890 if (nrequest < navail) { 5891 navail = nrequest; 5892 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5893 "nxge_add_intrs_adv_type: nintrs %d " 5894 "navail %d (nrequest %d)", 5895 nintrs, navail, nrequest)); 5896 } 5897 } 5898 5899 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5900 /* MSI must be power of 2 */ 5901 if ((navail & 16) == 16) { 5902 navail = 16; 5903 } else if ((navail & 8) == 8) { 5904 navail = 8; 5905 } else if ((navail & 4) == 4) { 5906 navail = 4; 5907 } else if ((navail & 2) == 2) { 5908 navail = 2; 5909 } else { 5910 navail = 1; 5911 } 5912 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5913 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5914 "navail %d", nintrs, navail)); 5915 } 5916 5917 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5918 DDI_INTR_ALLOC_NORMAL); 5919 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5920 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5921 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5922 navail, &nactual, behavior); 5923 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5924 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5925 " ddi_intr_alloc() failed: %d", 5926 ddi_status)); 5927 kmem_free(intrp->htable, intrp->intr_size); 5928 return (NXGE_ERROR | NXGE_DDI_FAILED); 5929 } 5930 5931 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5932 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5933 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5934 " ddi_intr_get_pri() failed: %d", 5935 ddi_status)); 5936 /* Free already allocated interrupts */ 5937 for (y = 0; y < nactual; y++) { 5938 (void) ddi_intr_free(intrp->htable[y]); 5939 } 5940 5941 kmem_free(intrp->htable, intrp->intr_size); 5942 return (NXGE_ERROR | NXGE_DDI_FAILED); 5943 } 5944 5945 nrequired = 0; 5946 switch (nxgep->niu_type) { 5947 default: 5948 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5949 break; 5950 5951 case N2_NIU: 5952 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5953 break; 5954 } 5955 5956 if (status != NXGE_OK) { 5957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5958 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5959 "failed: 0x%x", status)); 5960 /* Free already allocated interrupts */ 5961 for (y = 0; y < nactual; y++) { 5962 (void) ddi_intr_free(intrp->htable[y]); 5963 } 5964 5965 kmem_free(intrp->htable, intrp->intr_size); 5966 return (status); 5967 } 5968 5969 ldgp = nxgep->ldgvp->ldgp; 5970 for (x = 0; x < nrequired; x++, ldgp++) { 5971 ldgp->vector = (uint8_t)x; 5972 ldgp->intdata = SID_DATA(ldgp->func, x); 5973 arg1 = ldgp->ldvp; 5974 arg2 = nxgep; 5975 if (ldgp->nldvs == 1) { 5976 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5977 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5978 "nxge_add_intrs_adv_type: " 5979 "arg1 0x%x arg2 0x%x: " 5980 "1-1 int handler (entry %d intdata 0x%x)\n", 5981 arg1, arg2, 5982 x, ldgp->intdata)); 5983 } else if (ldgp->nldvs > 1) { 5984 inthandler = (uint_t *)ldgp->sys_intr_handler; 5985 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5986 "nxge_add_intrs_adv_type: " 5987 "arg1 0x%x arg2 0x%x: " 5988 "nldevs %d int handler " 5989 "(entry %d intdata 0x%x)\n", 5990 arg1, arg2, 5991 ldgp->nldvs, x, ldgp->intdata)); 5992 } 5993 5994 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5995 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5996 "htable 0x%llx", x, intrp->htable[x])); 5997 5998 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5999 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6000 != DDI_SUCCESS) { 6001 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6002 "==> nxge_add_intrs_adv_type: failed #%d " 6003 "status 0x%x", x, ddi_status)); 6004 for (y = 0; y < intrp->intr_added; y++) { 6005 (void) ddi_intr_remove_handler( 6006 intrp->htable[y]); 6007 } 6008 /* Free already allocated intr */ 6009 for (y = 0; y < nactual; y++) { 6010 (void) ddi_intr_free(intrp->htable[y]); 6011 } 6012 kmem_free(intrp->htable, intrp->intr_size); 6013 6014 (void) nxge_ldgv_uninit(nxgep); 6015 6016 return (NXGE_ERROR | NXGE_DDI_FAILED); 6017 } 6018 intrp->intr_added++; 6019 } 6020 6021 intrp->msi_intx_cnt = nactual; 6022 6023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6024 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6025 navail, nactual, 6026 intrp->msi_intx_cnt, 6027 intrp->intr_added)); 6028 6029 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6030 6031 (void) nxge_intr_ldgv_init(nxgep); 6032 6033 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6034 6035 return (status); 6036 } 6037 6038 /*ARGSUSED*/ 6039 static nxge_status_t 6040 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6041 { 6042 dev_info_t *dip = nxgep->dip; 6043 p_nxge_ldg_t ldgp; 6044 p_nxge_intr_t intrp; 6045 uint_t *inthandler; 6046 void *arg1, *arg2; 6047 int behavior; 6048 int nintrs, navail; 6049 int nactual, nrequired; 6050 int inum = 0; 6051 int x, y; 6052 int ddi_status = DDI_SUCCESS; 6053 nxge_status_t status = NXGE_OK; 6054 6055 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6056 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6057 intrp->start_inum = 0; 6058 6059 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6060 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6061 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6062 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6063 "nintrs: %d", status, nintrs)); 6064 return (NXGE_ERROR | NXGE_DDI_FAILED); 6065 } 6066 6067 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6068 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6069 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6070 "ddi_intr_get_navail() failed, status: 0x%x%, " 6071 "nintrs: %d", ddi_status, navail)); 6072 return (NXGE_ERROR | NXGE_DDI_FAILED); 6073 } 6074 6075 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6076 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6077 nintrs, navail)); 6078 6079 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6080 DDI_INTR_ALLOC_NORMAL); 6081 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6082 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6083 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6084 navail, &nactual, behavior); 6085 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6086 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6087 " ddi_intr_alloc() failed: %d", 6088 ddi_status)); 6089 kmem_free(intrp->htable, intrp->intr_size); 6090 return (NXGE_ERROR | NXGE_DDI_FAILED); 6091 } 6092 6093 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6094 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6095 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6096 " ddi_intr_get_pri() failed: %d", 6097 ddi_status)); 6098 /* Free already allocated interrupts */ 6099 for (y = 0; y < nactual; y++) { 6100 (void) ddi_intr_free(intrp->htable[y]); 6101 } 6102 6103 kmem_free(intrp->htable, intrp->intr_size); 6104 return (NXGE_ERROR | NXGE_DDI_FAILED); 6105 } 6106 6107 nrequired = 0; 6108 switch (nxgep->niu_type) { 6109 default: 6110 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6111 break; 6112 6113 case N2_NIU: 6114 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6115 break; 6116 } 6117 6118 if (status != NXGE_OK) { 6119 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6120 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6121 "failed: 0x%x", status)); 6122 /* Free already allocated interrupts */ 6123 for (y = 0; y < nactual; y++) { 6124 (void) ddi_intr_free(intrp->htable[y]); 6125 } 6126 6127 kmem_free(intrp->htable, intrp->intr_size); 6128 return (status); 6129 } 6130 6131 ldgp = nxgep->ldgvp->ldgp; 6132 for (x = 0; x < nrequired; x++, ldgp++) { 6133 ldgp->vector = (uint8_t)x; 6134 if (nxgep->niu_type != N2_NIU) { 6135 ldgp->intdata = SID_DATA(ldgp->func, x); 6136 } 6137 6138 arg1 = ldgp->ldvp; 6139 arg2 = nxgep; 6140 if (ldgp->nldvs == 1) { 6141 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6142 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6143 "nxge_add_intrs_adv_type_fix: " 6144 "1-1 int handler(%d) ldg %d ldv %d " 6145 "arg1 $%p arg2 $%p\n", 6146 x, ldgp->ldg, ldgp->ldvp->ldv, 6147 arg1, arg2)); 6148 } else if (ldgp->nldvs > 1) { 6149 inthandler = (uint_t *)ldgp->sys_intr_handler; 6150 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6151 "nxge_add_intrs_adv_type_fix: " 6152 "shared ldv %d int handler(%d) ldv %d ldg %d" 6153 "arg1 0x%016llx arg2 0x%016llx\n", 6154 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6155 arg1, arg2)); 6156 } 6157 6158 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6159 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6160 != DDI_SUCCESS) { 6161 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6162 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6163 "status 0x%x", x, ddi_status)); 6164 for (y = 0; y < intrp->intr_added; y++) { 6165 (void) ddi_intr_remove_handler( 6166 intrp->htable[y]); 6167 } 6168 for (y = 0; y < nactual; y++) { 6169 (void) ddi_intr_free(intrp->htable[y]); 6170 } 6171 /* Free already allocated intr */ 6172 kmem_free(intrp->htable, intrp->intr_size); 6173 6174 (void) nxge_ldgv_uninit(nxgep); 6175 6176 return (NXGE_ERROR | NXGE_DDI_FAILED); 6177 } 6178 intrp->intr_added++; 6179 } 6180 6181 intrp->msi_intx_cnt = nactual; 6182 6183 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6184 6185 status = nxge_intr_ldgv_init(nxgep); 6186 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6187 6188 return (status); 6189 } 6190 6191 static void 6192 nxge_remove_intrs(p_nxge_t nxgep) 6193 { 6194 int i, inum; 6195 p_nxge_intr_t intrp; 6196 6197 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6198 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6199 if (!intrp->intr_registered) { 6200 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6201 "<== nxge_remove_intrs: interrupts not registered")); 6202 return; 6203 } 6204 6205 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6206 6207 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6208 (void) ddi_intr_block_disable(intrp->htable, 6209 intrp->intr_added); 6210 } else { 6211 for (i = 0; i < intrp->intr_added; i++) { 6212 (void) ddi_intr_disable(intrp->htable[i]); 6213 } 6214 } 6215 6216 for (inum = 0; inum < intrp->intr_added; inum++) { 6217 if (intrp->htable[inum]) { 6218 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6219 } 6220 } 6221 6222 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6223 if (intrp->htable[inum]) { 6224 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6225 "nxge_remove_intrs: ddi_intr_free inum %d " 6226 "msi_intx_cnt %d intr_added %d", 6227 inum, 6228 intrp->msi_intx_cnt, 6229 intrp->intr_added)); 6230 6231 (void) ddi_intr_free(intrp->htable[inum]); 6232 } 6233 } 6234 6235 kmem_free(intrp->htable, intrp->intr_size); 6236 intrp->intr_registered = B_FALSE; 6237 intrp->intr_enabled = B_FALSE; 6238 intrp->msi_intx_cnt = 0; 6239 intrp->intr_added = 0; 6240 6241 (void) nxge_ldgv_uninit(nxgep); 6242 6243 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6244 "#msix-request"); 6245 6246 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6247 } 6248 6249 /*ARGSUSED*/ 6250 static void 6251 nxge_remove_soft_intrs(p_nxge_t nxgep) 6252 { 6253 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 6254 if (nxgep->resched_id) { 6255 ddi_remove_softintr(nxgep->resched_id); 6256 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6257 "==> nxge_remove_soft_intrs: removed")); 6258 nxgep->resched_id = NULL; 6259 } 6260 6261 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 6262 } 6263 6264 /*ARGSUSED*/ 6265 static void 6266 nxge_intrs_enable(p_nxge_t nxgep) 6267 { 6268 p_nxge_intr_t intrp; 6269 int i; 6270 int status; 6271 6272 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6273 6274 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6275 6276 if (!intrp->intr_registered) { 6277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6278 "interrupts are not registered")); 6279 return; 6280 } 6281 6282 if (intrp->intr_enabled) { 6283 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6284 "<== nxge_intrs_enable: already enabled")); 6285 return; 6286 } 6287 6288 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6289 status = ddi_intr_block_enable(intrp->htable, 6290 intrp->intr_added); 6291 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6292 "block enable - status 0x%x total inums #%d\n", 6293 status, intrp->intr_added)); 6294 } else { 6295 for (i = 0; i < intrp->intr_added; i++) { 6296 status = ddi_intr_enable(intrp->htable[i]); 6297 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6298 "ddi_intr_enable:enable - status 0x%x " 6299 "total inums %d enable inum #%d\n", 6300 status, intrp->intr_added, i)); 6301 if (status == DDI_SUCCESS) { 6302 intrp->intr_enabled = B_TRUE; 6303 } 6304 } 6305 } 6306 6307 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6308 } 6309 6310 /*ARGSUSED*/ 6311 static void 6312 nxge_intrs_disable(p_nxge_t nxgep) 6313 { 6314 p_nxge_intr_t intrp; 6315 int i; 6316 6317 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6318 6319 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6320 6321 if (!intrp->intr_registered) { 6322 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6323 "interrupts are not registered")); 6324 return; 6325 } 6326 6327 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6328 (void) ddi_intr_block_disable(intrp->htable, 6329 intrp->intr_added); 6330 } else { 6331 for (i = 0; i < intrp->intr_added; i++) { 6332 (void) ddi_intr_disable(intrp->htable[i]); 6333 } 6334 } 6335 6336 intrp->intr_enabled = B_FALSE; 6337 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6338 } 6339 6340 static nxge_status_t 6341 nxge_mac_register(p_nxge_t nxgep) 6342 { 6343 mac_register_t *macp; 6344 int status; 6345 6346 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6347 6348 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6349 return (NXGE_ERROR); 6350 6351 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6352 macp->m_driver = nxgep; 6353 macp->m_dip = nxgep->dip; 6354 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6355 macp->m_callbacks = &nxge_m_callbacks; 6356 macp->m_min_sdu = 0; 6357 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6358 NXGE_EHEADER_VLAN_CRC; 6359 macp->m_max_sdu = nxgep->mac.default_mtu; 6360 macp->m_margin = VLAN_TAGSZ; 6361 macp->m_priv_props = nxge_priv_props; 6362 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6363 6364 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6365 "==> nxge_mac_register: instance %d " 6366 "max_sdu %d margin %d maxframe %d (header %d)", 6367 nxgep->instance, 6368 macp->m_max_sdu, macp->m_margin, 6369 nxgep->mac.maxframesize, 6370 NXGE_EHEADER_VLAN_CRC)); 6371 6372 status = mac_register(macp, &nxgep->mach); 6373 mac_free(macp); 6374 6375 if (status != 0) { 6376 cmn_err(CE_WARN, 6377 "!nxge_mac_register failed (status %d instance %d)", 6378 status, nxgep->instance); 6379 return (NXGE_ERROR); 6380 } 6381 6382 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6383 "(instance %d)", nxgep->instance)); 6384 6385 return (NXGE_OK); 6386 } 6387 6388 void 6389 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6390 { 6391 ssize_t size; 6392 mblk_t *nmp; 6393 uint8_t blk_id; 6394 uint8_t chan; 6395 uint32_t err_id; 6396 err_inject_t *eip; 6397 6398 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6399 6400 size = 1024; 6401 nmp = mp->b_cont; 6402 eip = (err_inject_t *)nmp->b_rptr; 6403 blk_id = eip->blk_id; 6404 err_id = eip->err_id; 6405 chan = eip->chan; 6406 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6407 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6408 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6409 switch (blk_id) { 6410 case MAC_BLK_ID: 6411 break; 6412 case TXMAC_BLK_ID: 6413 break; 6414 case RXMAC_BLK_ID: 6415 break; 6416 case MIF_BLK_ID: 6417 break; 6418 case IPP_BLK_ID: 6419 nxge_ipp_inject_err(nxgep, err_id); 6420 break; 6421 case TXC_BLK_ID: 6422 nxge_txc_inject_err(nxgep, err_id); 6423 break; 6424 case TXDMA_BLK_ID: 6425 nxge_txdma_inject_err(nxgep, err_id, chan); 6426 break; 6427 case RXDMA_BLK_ID: 6428 nxge_rxdma_inject_err(nxgep, err_id, chan); 6429 break; 6430 case ZCP_BLK_ID: 6431 nxge_zcp_inject_err(nxgep, err_id); 6432 break; 6433 case ESPC_BLK_ID: 6434 break; 6435 case FFLP_BLK_ID: 6436 break; 6437 case PHY_BLK_ID: 6438 break; 6439 case ETHER_SERDES_BLK_ID: 6440 break; 6441 case PCIE_SERDES_BLK_ID: 6442 break; 6443 case VIR_BLK_ID: 6444 break; 6445 } 6446 6447 nmp->b_wptr = nmp->b_rptr + size; 6448 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6449 6450 miocack(wq, mp, (int)size, 0); 6451 } 6452 6453 static int 6454 nxge_init_common_dev(p_nxge_t nxgep) 6455 { 6456 p_nxge_hw_list_t hw_p; 6457 dev_info_t *p_dip; 6458 6459 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6460 6461 p_dip = nxgep->p_dip; 6462 MUTEX_ENTER(&nxge_common_lock); 6463 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6464 "==> nxge_init_common_dev:func # %d", 6465 nxgep->function_num)); 6466 /* 6467 * Loop through existing per neptune hardware list. 6468 */ 6469 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6470 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6471 "==> nxge_init_common_device:func # %d " 6472 "hw_p $%p parent dip $%p", 6473 nxgep->function_num, 6474 hw_p, 6475 p_dip)); 6476 if (hw_p->parent_devp == p_dip) { 6477 nxgep->nxge_hw_p = hw_p; 6478 hw_p->ndevs++; 6479 hw_p->nxge_p[nxgep->function_num] = nxgep; 6480 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6481 "==> nxge_init_common_device:func # %d " 6482 "hw_p $%p parent dip $%p " 6483 "ndevs %d (found)", 6484 nxgep->function_num, 6485 hw_p, 6486 p_dip, 6487 hw_p->ndevs)); 6488 break; 6489 } 6490 } 6491 6492 if (hw_p == NULL) { 6493 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6494 "==> nxge_init_common_device:func # %d " 6495 "parent dip $%p (new)", 6496 nxgep->function_num, 6497 p_dip)); 6498 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6499 hw_p->parent_devp = p_dip; 6500 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6501 nxgep->nxge_hw_p = hw_p; 6502 hw_p->ndevs++; 6503 hw_p->nxge_p[nxgep->function_num] = nxgep; 6504 hw_p->next = nxge_hw_list; 6505 if (nxgep->niu_type == N2_NIU) { 6506 hw_p->niu_type = N2_NIU; 6507 hw_p->platform_type = P_NEPTUNE_NIU; 6508 } else { 6509 hw_p->niu_type = NIU_TYPE_NONE; 6510 hw_p->platform_type = P_NEPTUNE_NONE; 6511 } 6512 6513 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6514 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6515 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6516 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6517 6518 nxge_hw_list = hw_p; 6519 6520 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6521 } 6522 6523 MUTEX_EXIT(&nxge_common_lock); 6524 6525 nxgep->platform_type = hw_p->platform_type; 6526 if (nxgep->niu_type != N2_NIU) { 6527 nxgep->niu_type = hw_p->niu_type; 6528 } 6529 6530 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6531 "==> nxge_init_common_device (nxge_hw_list) $%p", 6532 nxge_hw_list)); 6533 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6534 6535 return (NXGE_OK); 6536 } 6537 6538 static void 6539 nxge_uninit_common_dev(p_nxge_t nxgep) 6540 { 6541 p_nxge_hw_list_t hw_p, h_hw_p; 6542 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6543 p_nxge_hw_pt_cfg_t p_cfgp; 6544 dev_info_t *p_dip; 6545 6546 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6547 if (nxgep->nxge_hw_p == NULL) { 6548 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6549 "<== nxge_uninit_common_device (no common)")); 6550 return; 6551 } 6552 6553 MUTEX_ENTER(&nxge_common_lock); 6554 h_hw_p = nxge_hw_list; 6555 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6556 p_dip = hw_p->parent_devp; 6557 if (nxgep->nxge_hw_p == hw_p && 6558 p_dip == nxgep->p_dip && 6559 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6560 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6561 6562 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6563 "==> nxge_uninit_common_device:func # %d " 6564 "hw_p $%p parent dip $%p " 6565 "ndevs %d (found)", 6566 nxgep->function_num, 6567 hw_p, 6568 p_dip, 6569 hw_p->ndevs)); 6570 6571 /* 6572 * Release the RDC table, a shared resoruce 6573 * of the nxge hardware. The RDC table was 6574 * assigned to this instance of nxge in 6575 * nxge_use_cfg_dma_config(). 6576 */ 6577 if (!isLDOMguest(nxgep)) { 6578 p_dma_cfgp = 6579 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6580 p_cfgp = 6581 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6582 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6583 p_cfgp->def_mac_rxdma_grpid); 6584 } 6585 6586 if (hw_p->ndevs) { 6587 hw_p->ndevs--; 6588 } 6589 hw_p->nxge_p[nxgep->function_num] = NULL; 6590 if (!hw_p->ndevs) { 6591 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6592 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6593 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6594 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6595 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6596 "==> nxge_uninit_common_device: " 6597 "func # %d " 6598 "hw_p $%p parent dip $%p " 6599 "ndevs %d (last)", 6600 nxgep->function_num, 6601 hw_p, 6602 p_dip, 6603 hw_p->ndevs)); 6604 6605 nxge_hio_uninit(nxgep); 6606 6607 if (hw_p == nxge_hw_list) { 6608 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6609 "==> nxge_uninit_common_device:" 6610 "remove head func # %d " 6611 "hw_p $%p parent dip $%p " 6612 "ndevs %d (head)", 6613 nxgep->function_num, 6614 hw_p, 6615 p_dip, 6616 hw_p->ndevs)); 6617 nxge_hw_list = hw_p->next; 6618 } else { 6619 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6620 "==> nxge_uninit_common_device:" 6621 "remove middle func # %d " 6622 "hw_p $%p parent dip $%p " 6623 "ndevs %d (middle)", 6624 nxgep->function_num, 6625 hw_p, 6626 p_dip, 6627 hw_p->ndevs)); 6628 h_hw_p->next = hw_p->next; 6629 } 6630 6631 nxgep->nxge_hw_p = NULL; 6632 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6633 } 6634 break; 6635 } else { 6636 h_hw_p = hw_p; 6637 } 6638 } 6639 6640 MUTEX_EXIT(&nxge_common_lock); 6641 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6642 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6643 nxge_hw_list)); 6644 6645 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6646 } 6647 6648 /* 6649 * Determines the number of ports from the niu_type or the platform type. 6650 * Returns the number of ports, or returns zero on failure. 6651 */ 6652 6653 int 6654 nxge_get_nports(p_nxge_t nxgep) 6655 { 6656 int nports = 0; 6657 6658 switch (nxgep->niu_type) { 6659 case N2_NIU: 6660 case NEPTUNE_2_10GF: 6661 nports = 2; 6662 break; 6663 case NEPTUNE_4_1GC: 6664 case NEPTUNE_2_10GF_2_1GC: 6665 case NEPTUNE_1_10GF_3_1GC: 6666 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6667 case NEPTUNE_2_10GF_2_1GRF: 6668 nports = 4; 6669 break; 6670 default: 6671 switch (nxgep->platform_type) { 6672 case P_NEPTUNE_NIU: 6673 case P_NEPTUNE_ATLAS_2PORT: 6674 nports = 2; 6675 break; 6676 case P_NEPTUNE_ATLAS_4PORT: 6677 case P_NEPTUNE_MARAMBA_P0: 6678 case P_NEPTUNE_MARAMBA_P1: 6679 case P_NEPTUNE_ALONSO: 6680 nports = 4; 6681 break; 6682 default: 6683 break; 6684 } 6685 break; 6686 } 6687 6688 return (nports); 6689 } 6690 6691 /* 6692 * The following two functions are to support 6693 * PSARC/2007/453 MSI-X interrupt limit override. 6694 */ 6695 static int 6696 nxge_create_msi_property(p_nxge_t nxgep) 6697 { 6698 int nmsi; 6699 extern int ncpus; 6700 6701 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6702 6703 switch (nxgep->mac.portmode) { 6704 case PORT_10G_COPPER: 6705 case PORT_10G_FIBER: 6706 case PORT_10G_TN1010: 6707 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6708 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6709 /* 6710 * The maximum MSI-X requested will be 8. 6711 * If the # of CPUs is less than 8, we will reqeust 6712 * # MSI-X based on the # of CPUs. 6713 */ 6714 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6715 nmsi = NXGE_MSIX_REQUEST_10G; 6716 } else { 6717 nmsi = ncpus; 6718 } 6719 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6720 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6721 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6722 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6723 break; 6724 6725 default: 6726 nmsi = NXGE_MSIX_REQUEST_1G; 6727 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6728 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6729 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6730 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6731 break; 6732 } 6733 6734 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6735 return (nmsi); 6736 } 6737 6738 /* ARGSUSED */ 6739 static int 6740 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6741 void *pr_val) 6742 { 6743 int err = 0; 6744 link_flowctrl_t fl; 6745 6746 switch (pr_num) { 6747 case MAC_PROP_AUTONEG: 6748 *(uint8_t *)pr_val = 1; 6749 break; 6750 case MAC_PROP_FLOWCTRL: 6751 if (pr_valsize < sizeof (link_flowctrl_t)) 6752 return (EINVAL); 6753 fl = LINK_FLOWCTRL_RX; 6754 bcopy(&fl, pr_val, sizeof (fl)); 6755 break; 6756 case MAC_PROP_ADV_1000FDX_CAP: 6757 case MAC_PROP_EN_1000FDX_CAP: 6758 *(uint8_t *)pr_val = 1; 6759 break; 6760 case MAC_PROP_ADV_100FDX_CAP: 6761 case MAC_PROP_EN_100FDX_CAP: 6762 *(uint8_t *)pr_val = 1; 6763 break; 6764 default: 6765 err = ENOTSUP; 6766 break; 6767 } 6768 return (err); 6769 } 6770 6771 6772 /* 6773 * The following is a software around for the Neptune hardware's 6774 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6775 * an interrupr handler is removed. 6776 */ 6777 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6778 #define NXGE_PIM_RESET (1ULL << 29) 6779 #define NXGE_GLU_RESET (1ULL << 30) 6780 #define NXGE_NIU_RESET (1ULL << 31) 6781 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6782 NXGE_GLU_RESET | \ 6783 NXGE_NIU_RESET) 6784 6785 #define NXGE_WAIT_QUITE_TIME 200000 6786 #define NXGE_WAIT_QUITE_RETRY 40 6787 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6788 6789 static void 6790 nxge_niu_peu_reset(p_nxge_t nxgep) 6791 { 6792 uint32_t rvalue; 6793 p_nxge_hw_list_t hw_p; 6794 p_nxge_t fnxgep; 6795 int i, j; 6796 6797 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6798 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6800 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6801 return; 6802 } 6803 6804 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6805 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6806 hw_p->flags, nxgep->nxge_link_poll_timerid, 6807 nxgep->nxge_timerid)); 6808 6809 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6810 /* 6811 * Make sure other instances from the same hardware 6812 * stop sending PIO and in quiescent state. 6813 */ 6814 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6815 fnxgep = hw_p->nxge_p[i]; 6816 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6817 "==> nxge_niu_peu_reset: checking entry %d " 6818 "nxgep $%p", i, fnxgep)); 6819 #ifdef NXGE_DEBUG 6820 if (fnxgep) { 6821 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6822 "==> nxge_niu_peu_reset: entry %d (function %d) " 6823 "link timer id %d hw timer id %d", 6824 i, fnxgep->function_num, 6825 fnxgep->nxge_link_poll_timerid, 6826 fnxgep->nxge_timerid)); 6827 } 6828 #endif 6829 if (fnxgep && fnxgep != nxgep && 6830 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6831 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6832 "==> nxge_niu_peu_reset: checking $%p " 6833 "(function %d) timer ids", 6834 fnxgep, fnxgep->function_num)); 6835 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6836 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6837 "==> nxge_niu_peu_reset: waiting")); 6838 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6839 if (!fnxgep->nxge_timerid && 6840 !fnxgep->nxge_link_poll_timerid) { 6841 break; 6842 } 6843 } 6844 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6845 if (fnxgep->nxge_timerid || 6846 fnxgep->nxge_link_poll_timerid) { 6847 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6848 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6849 "<== nxge_niu_peu_reset: cannot reset " 6850 "hardware (devices are still in use)")); 6851 return; 6852 } 6853 } 6854 } 6855 6856 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6857 hw_p->flags |= COMMON_RESET_NIU_PCI; 6858 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6859 NXGE_PCI_PORT_LOGIC_OFFSET); 6860 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6861 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6862 "(data 0x%x)", 6863 NXGE_PCI_PORT_LOGIC_OFFSET, 6864 NXGE_PCI_PORT_LOGIC_OFFSET, 6865 rvalue)); 6866 6867 rvalue |= NXGE_PCI_RESET_ALL; 6868 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6869 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6870 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6871 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6872 rvalue)); 6873 6874 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6875 } 6876 6877 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6878 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6879 } 6880 6881 static void 6882 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6883 { 6884 p_dev_regs_t dev_regs; 6885 uint32_t value; 6886 6887 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6888 6889 if (!nxge_set_replay_timer) { 6890 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6891 "==> nxge_set_pci_replay_timeout: will not change " 6892 "the timeout")); 6893 return; 6894 } 6895 6896 dev_regs = nxgep->dev_regs; 6897 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6898 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6899 dev_regs, dev_regs->nxge_pciregh)); 6900 6901 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6902 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6903 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 6904 "no PCI handle", 6905 dev_regs)); 6906 return; 6907 } 6908 value = (pci_config_get32(dev_regs->nxge_pciregh, 6909 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 6910 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 6911 6912 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6913 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 6914 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 6915 pci_config_get32(dev_regs->nxge_pciregh, 6916 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 6917 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 6918 6919 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 6920 value); 6921 6922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6923 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 6924 pci_config_get32(dev_regs->nxge_pciregh, 6925 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 6926 6927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 6928 } 6929 6930 /* 6931 * quiesce(9E) entry point. 6932 * 6933 * This function is called when the system is single-threaded at high 6934 * PIL with preemption disabled. Therefore, this function must not be 6935 * blocked. 6936 * 6937 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6938 * DDI_FAILURE indicates an error condition and should almost never happen. 6939 */ 6940 static int 6941 nxge_quiesce(dev_info_t *dip) 6942 { 6943 int instance = ddi_get_instance(dip); 6944 p_nxge_t nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 6945 6946 if (nxgep == NULL) 6947 return (DDI_FAILURE); 6948 6949 /* Turn off debugging */ 6950 nxge_debug_level = NO_DEBUG; 6951 nxgep->nxge_debug_level = NO_DEBUG; 6952 npi_debug_level = NO_DEBUG; 6953 6954 /* 6955 * Stop link monitor only when linkchkmod is interrupt based 6956 */ 6957 if (nxgep->mac.linkchkmode == LINKCHK_INTR) { 6958 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 6959 } 6960 6961 (void) nxge_intr_hw_disable(nxgep); 6962 6963 /* 6964 * Reset the receive MAC side. 6965 */ 6966 (void) nxge_rx_mac_disable(nxgep); 6967 6968 /* Disable and soft reset the IPP */ 6969 if (!isLDOMguest(nxgep)) 6970 (void) nxge_ipp_disable(nxgep); 6971 6972 /* 6973 * Reset the transmit/receive DMA side. 6974 */ 6975 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 6976 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 6977 6978 /* 6979 * Reset the transmit MAC side. 6980 */ 6981 (void) nxge_tx_mac_disable(nxgep); 6982 6983 return (DDI_SUCCESS); 6984 } 6985