1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Software workaround for a Neptune (PCI-E) 50 * hardware interrupt bug which the hardware 51 * may generate spurious interrupts after the 52 * device interrupt handler was removed. If this flag 53 * is enabled, the driver will reset the 54 * hardware when devices are being detached. 55 */ 56 uint32_t nxge_peu_reset_enable = 0; 57 58 /* 59 * Software workaround for the hardware 60 * checksum bugs that affect packet transmission 61 * and receive: 62 * 63 * Usage of nxge_cksum_offload: 64 * 65 * (1) nxge_cksum_offload = 0 (default): 66 * - transmits packets: 67 * TCP: uses the hardware checksum feature. 68 * UDP: driver will compute the software checksum 69 * based on the partial checksum computed 70 * by the IP layer. 71 * - receives packets 72 * TCP: marks packets checksum flags based on hardware result. 73 * UDP: will not mark checksum flags. 74 * 75 * (2) nxge_cksum_offload = 1: 76 * - transmit packets: 77 * TCP/UDP: uses the hardware checksum feature. 78 * - receives packets 79 * TCP/UDP: marks packet checksum flags based on hardware result. 80 * 81 * (3) nxge_cksum_offload = 2: 82 * - The driver will not register its checksum capability. 83 * Checksum for both TCP and UDP will be computed 84 * by the stack. 85 * - The software LSO is not allowed in this case. 86 * 87 * (4) nxge_cksum_offload > 2: 88 * - Will be treated as it is set to 2 89 * (stack will compute the checksum). 90 * 91 * (5) If the hardware bug is fixed, this workaround 92 * needs to be updated accordingly to reflect 93 * the new hardware revision. 94 */ 95 uint32_t nxge_cksum_offload = 0; 96 97 /* 98 * Globals: tunable parameters (/etc/system or adb) 99 * 100 */ 101 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 102 uint32_t nxge_rbr_spare_size = 0; 103 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 104 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 105 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 106 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 107 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 108 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 109 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 110 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 111 boolean_t nxge_jumbo_enable = B_FALSE; 112 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 113 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 114 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 115 116 /* MAX LSO size */ 117 #define NXGE_LSO_MAXLEN 65535 118 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 119 120 /* 121 * Debugging flags: 122 * nxge_no_tx_lb : transmit load balancing 123 * nxge_tx_lb_policy: 0 - TCP port (default) 124 * 3 - DEST MAC 125 */ 126 uint32_t nxge_no_tx_lb = 0; 127 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 128 129 /* 130 * Add tunable to reduce the amount of time spent in the 131 * ISR doing Rx Processing. 132 */ 133 uint32_t nxge_max_rx_pkts = 1024; 134 135 /* 136 * Tunables to manage the receive buffer blocks. 137 * 138 * nxge_rx_threshold_hi: copy all buffers. 139 * nxge_rx_bcopy_size_type: receive buffer block size type. 140 * nxge_rx_threshold_lo: copy only up to tunable block size type. 141 */ 142 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 143 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 144 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 145 146 /* Use kmem_alloc() to allocate data buffers. */ 147 #if defined(_BIG_ENDIAN) 148 uint32_t nxge_use_kmem_alloc = 1; 149 #else 150 uint32_t nxge_use_kmem_alloc = 0; 151 #endif 152 153 rtrace_t npi_rtracebuf; 154 155 /* 156 * The hardware sometimes fails to allow enough time for the link partner 157 * to send an acknowledgement for packets that the hardware sent to it. The 158 * hardware resends the packets earlier than it should be in those instances. 159 * This behavior caused some switches to acknowledge the wrong packets 160 * and it triggered the fatal error. 161 * This software workaround is to set the replay timer to a value 162 * suggested by the hardware team. 163 * 164 * PCI config space replay timer register: 165 * The following replay timeout value is 0xc 166 * for bit 14:18. 167 */ 168 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 169 #define PCI_REPLAY_TIMEOUT_SHIFT 14 170 171 uint32_t nxge_set_replay_timer = 1; 172 uint32_t nxge_replay_timeout = 0xc; 173 174 /* 175 * The transmit serialization sometimes causes 176 * longer sleep before calling the driver transmit 177 * function as it sleeps longer than it should. 178 * The performace group suggests that a time wait tunable 179 * can be used to set the maximum wait time when needed 180 * and the default is set to 1 tick. 181 */ 182 uint32_t nxge_tx_serial_maxsleep = 1; 183 184 #if defined(sun4v) 185 /* 186 * Hypervisor N2/NIU services information. 187 */ 188 static hsvc_info_t niu_hsvc = { 189 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 190 NIU_MINOR_VER, "nxge" 191 }; 192 193 static int nxge_hsvc_register(p_nxge_t); 194 #endif 195 196 /* 197 * Function Prototypes 198 */ 199 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 200 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 201 static void nxge_unattach(p_nxge_t); 202 203 #if NXGE_PROPERTY 204 static void nxge_remove_hard_properties(p_nxge_t); 205 #endif 206 207 /* 208 * These two functions are required by nxge_hio.c 209 */ 210 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 211 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 212 213 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 214 215 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 216 static void nxge_destroy_mutexes(p_nxge_t); 217 218 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 219 static void nxge_unmap_regs(p_nxge_t nxgep); 220 #ifdef NXGE_DEBUG 221 static void nxge_test_map_regs(p_nxge_t nxgep); 222 #endif 223 224 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 225 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 226 static void nxge_remove_intrs(p_nxge_t nxgep); 227 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 228 229 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 230 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 231 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 232 static void nxge_intrs_enable(p_nxge_t nxgep); 233 static void nxge_intrs_disable(p_nxge_t nxgep); 234 235 static void nxge_suspend(p_nxge_t); 236 static nxge_status_t nxge_resume(p_nxge_t); 237 238 static nxge_status_t nxge_setup_dev(p_nxge_t); 239 static void nxge_destroy_dev(p_nxge_t); 240 241 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 242 static void nxge_free_mem_pool(p_nxge_t); 243 244 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 245 static void nxge_free_rx_mem_pool(p_nxge_t); 246 247 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 248 static void nxge_free_tx_mem_pool(p_nxge_t); 249 250 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 251 struct ddi_dma_attr *, 252 size_t, ddi_device_acc_attr_t *, uint_t, 253 p_nxge_dma_common_t); 254 255 static void nxge_dma_mem_free(p_nxge_dma_common_t); 256 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 257 258 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 259 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 260 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 261 262 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 263 p_nxge_dma_common_t *, size_t); 264 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 265 266 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 267 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 268 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 269 270 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 271 p_nxge_dma_common_t *, 272 size_t); 273 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 274 275 static int nxge_init_common_dev(p_nxge_t); 276 static void nxge_uninit_common_dev(p_nxge_t); 277 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 278 char *, caddr_t); 279 280 /* 281 * The next declarations are for the GLDv3 interface. 282 */ 283 static int nxge_m_start(void *); 284 static void nxge_m_stop(void *); 285 static int nxge_m_unicst(void *, const uint8_t *); 286 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 287 static int nxge_m_promisc(void *, boolean_t); 288 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 289 static void nxge_m_resources(void *); 290 mblk_t *nxge_m_tx(void *arg, mblk_t *); 291 static nxge_status_t nxge_mac_register(p_nxge_t); 292 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 293 mac_addr_slot_t slot); 294 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 295 boolean_t factory); 296 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 297 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 298 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 299 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 300 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 301 uint_t, const void *); 302 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 303 uint_t, uint_t, void *); 304 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 305 const void *); 306 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 307 void *); 308 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 309 310 static void nxge_niu_peu_reset(p_nxge_t nxgep); 311 static void nxge_set_pci_replay_timeout(nxge_t *); 312 313 mac_priv_prop_t nxge_priv_props[] = { 314 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 315 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 316 {"_function_number", MAC_PROP_PERM_READ}, 317 {"_fw_version", MAC_PROP_PERM_READ}, 318 {"_port_mode", MAC_PROP_PERM_READ}, 319 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 320 {"_accept_jumbo", MAC_PROP_PERM_RW}, 321 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 322 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 323 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 324 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 325 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 326 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 327 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 328 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 329 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 330 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 331 {"_soft_lso_enable", MAC_PROP_PERM_RW} 332 }; 333 334 #define NXGE_MAX_PRIV_PROPS \ 335 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 336 337 #define NXGE_M_CALLBACK_FLAGS\ 338 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 339 340 341 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 342 #define MAX_DUMP_SZ 256 343 344 #define NXGE_M_CALLBACK_FLAGS \ 345 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 346 347 mac_callbacks_t nxge_m_callbacks = { 348 NXGE_M_CALLBACK_FLAGS, 349 nxge_m_stat, 350 nxge_m_start, 351 nxge_m_stop, 352 nxge_m_promisc, 353 nxge_m_multicst, 354 nxge_m_unicst, 355 nxge_m_tx, 356 nxge_m_resources, 357 nxge_m_ioctl, 358 nxge_m_getcapab, 359 NULL, 360 NULL, 361 nxge_m_setprop, 362 nxge_m_getprop 363 }; 364 365 void 366 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 367 368 /* PSARC/2007/453 MSI-X interrupt limit override. */ 369 #define NXGE_MSIX_REQUEST_10G 8 370 #define NXGE_MSIX_REQUEST_1G 2 371 static int nxge_create_msi_property(p_nxge_t); 372 373 /* 374 * These global variables control the message 375 * output. 376 */ 377 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 378 uint64_t nxge_debug_level; 379 380 /* 381 * This list contains the instance structures for the Neptune 382 * devices present in the system. The lock exists to guarantee 383 * mutually exclusive access to the list. 384 */ 385 void *nxge_list = NULL; 386 387 void *nxge_hw_list = NULL; 388 nxge_os_mutex_t nxge_common_lock; 389 390 extern uint64_t npi_debug_level; 391 392 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 393 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 394 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 395 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 396 extern void nxge_fm_init(p_nxge_t, 397 ddi_device_acc_attr_t *, 398 ddi_device_acc_attr_t *, 399 ddi_dma_attr_t *); 400 extern void nxge_fm_fini(p_nxge_t); 401 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 402 403 /* 404 * Count used to maintain the number of buffers being used 405 * by Neptune instances and loaned up to the upper layers. 406 */ 407 uint32_t nxge_mblks_pending = 0; 408 409 /* 410 * Device register access attributes for PIO. 411 */ 412 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 413 DDI_DEVICE_ATTR_V0, 414 DDI_STRUCTURE_LE_ACC, 415 DDI_STRICTORDER_ACC, 416 }; 417 418 /* 419 * Device descriptor access attributes for DMA. 420 */ 421 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 422 DDI_DEVICE_ATTR_V0, 423 DDI_STRUCTURE_LE_ACC, 424 DDI_STRICTORDER_ACC 425 }; 426 427 /* 428 * Device buffer access attributes for DMA. 429 */ 430 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 431 DDI_DEVICE_ATTR_V0, 432 DDI_STRUCTURE_BE_ACC, 433 DDI_STRICTORDER_ACC 434 }; 435 436 ddi_dma_attr_t nxge_desc_dma_attr = { 437 DMA_ATTR_V0, /* version number. */ 438 0, /* low address */ 439 0xffffffffffffffff, /* high address */ 440 0xffffffffffffffff, /* address counter max */ 441 #ifndef NIU_PA_WORKAROUND 442 0x100000, /* alignment */ 443 #else 444 0x2000, 445 #endif 446 0xfc00fc, /* dlim_burstsizes */ 447 0x1, /* minimum transfer size */ 448 0xffffffffffffffff, /* maximum transfer size */ 449 0xffffffffffffffff, /* maximum segment size */ 450 1, /* scatter/gather list length */ 451 (unsigned int) 1, /* granularity */ 452 0 /* attribute flags */ 453 }; 454 455 ddi_dma_attr_t nxge_tx_dma_attr = { 456 DMA_ATTR_V0, /* version number. */ 457 0, /* low address */ 458 0xffffffffffffffff, /* high address */ 459 0xffffffffffffffff, /* address counter max */ 460 #if defined(_BIG_ENDIAN) 461 0x2000, /* alignment */ 462 #else 463 0x1000, /* alignment */ 464 #endif 465 0xfc00fc, /* dlim_burstsizes */ 466 0x1, /* minimum transfer size */ 467 0xffffffffffffffff, /* maximum transfer size */ 468 0xffffffffffffffff, /* maximum segment size */ 469 5, /* scatter/gather list length */ 470 (unsigned int) 1, /* granularity */ 471 0 /* attribute flags */ 472 }; 473 474 ddi_dma_attr_t nxge_rx_dma_attr = { 475 DMA_ATTR_V0, /* version number. */ 476 0, /* low address */ 477 0xffffffffffffffff, /* high address */ 478 0xffffffffffffffff, /* address counter max */ 479 0x2000, /* alignment */ 480 0xfc00fc, /* dlim_burstsizes */ 481 0x1, /* minimum transfer size */ 482 0xffffffffffffffff, /* maximum transfer size */ 483 0xffffffffffffffff, /* maximum segment size */ 484 1, /* scatter/gather list length */ 485 (unsigned int) 1, /* granularity */ 486 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 487 }; 488 489 ddi_dma_lim_t nxge_dma_limits = { 490 (uint_t)0, /* dlim_addr_lo */ 491 (uint_t)0xffffffff, /* dlim_addr_hi */ 492 (uint_t)0xffffffff, /* dlim_cntr_max */ 493 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 494 0x1, /* dlim_minxfer */ 495 1024 /* dlim_speed */ 496 }; 497 498 dma_method_t nxge_force_dma = DVMA; 499 500 /* 501 * dma chunk sizes. 502 * 503 * Try to allocate the largest possible size 504 * so that fewer number of dma chunks would be managed 505 */ 506 #ifdef NIU_PA_WORKAROUND 507 size_t alloc_sizes [] = {0x2000}; 508 #else 509 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 510 0x10000, 0x20000, 0x40000, 0x80000, 511 0x100000, 0x200000, 0x400000, 0x800000, 512 0x1000000, 0x2000000, 0x4000000}; 513 #endif 514 515 /* 516 * Translate "dev_t" to a pointer to the associated "dev_info_t". 517 */ 518 519 extern void nxge_get_environs(nxge_t *); 520 521 static int 522 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 523 { 524 p_nxge_t nxgep = NULL; 525 int instance; 526 int status = DDI_SUCCESS; 527 uint8_t portn; 528 nxge_mmac_t *mmac_info; 529 p_nxge_param_t param_arr; 530 531 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 532 533 /* 534 * Get the device instance since we'll need to setup 535 * or retrieve a soft state for this instance. 536 */ 537 instance = ddi_get_instance(dip); 538 539 switch (cmd) { 540 case DDI_ATTACH: 541 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 542 break; 543 544 case DDI_RESUME: 545 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 546 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 547 if (nxgep == NULL) { 548 status = DDI_FAILURE; 549 break; 550 } 551 if (nxgep->dip != dip) { 552 status = DDI_FAILURE; 553 break; 554 } 555 if (nxgep->suspended == DDI_PM_SUSPEND) { 556 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 557 } else { 558 status = nxge_resume(nxgep); 559 } 560 goto nxge_attach_exit; 561 562 case DDI_PM_RESUME: 563 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 564 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 565 if (nxgep == NULL) { 566 status = DDI_FAILURE; 567 break; 568 } 569 if (nxgep->dip != dip) { 570 status = DDI_FAILURE; 571 break; 572 } 573 status = nxge_resume(nxgep); 574 goto nxge_attach_exit; 575 576 default: 577 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 578 status = DDI_FAILURE; 579 goto nxge_attach_exit; 580 } 581 582 583 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 584 status = DDI_FAILURE; 585 goto nxge_attach_exit; 586 } 587 588 nxgep = ddi_get_soft_state(nxge_list, instance); 589 if (nxgep == NULL) { 590 status = NXGE_ERROR; 591 goto nxge_attach_fail2; 592 } 593 594 nxgep->nxge_magic = NXGE_MAGIC; 595 596 nxgep->drv_state = 0; 597 nxgep->dip = dip; 598 nxgep->instance = instance; 599 nxgep->p_dip = ddi_get_parent(dip); 600 nxgep->nxge_debug_level = nxge_debug_level; 601 npi_debug_level = nxge_debug_level; 602 603 /* Are we a guest running in a Hybrid I/O environment? */ 604 nxge_get_environs(nxgep); 605 606 status = nxge_map_regs(nxgep); 607 608 if (status != NXGE_OK) { 609 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 610 goto nxge_attach_fail3; 611 } 612 613 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 614 &nxge_dev_desc_dma_acc_attr, 615 &nxge_rx_dma_attr); 616 617 /* Create & initialize the per-Neptune data structure */ 618 /* (even if we're a guest). */ 619 status = nxge_init_common_dev(nxgep); 620 if (status != NXGE_OK) { 621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 622 "nxge_init_common_dev failed")); 623 goto nxge_attach_fail4; 624 } 625 626 /* 627 * Software workaround: set the replay timer. 628 */ 629 if (nxgep->niu_type != N2_NIU) { 630 nxge_set_pci_replay_timeout(nxgep); 631 } 632 633 #if defined(sun4v) 634 /* This is required by nxge_hio_init(), which follows. */ 635 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 636 goto nxge_attach_fail4; 637 #endif 638 639 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 640 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 641 "nxge_hio_init failed")); 642 goto nxge_attach_fail4; 643 } 644 645 if (nxgep->niu_type == NEPTUNE_2_10GF) { 646 if (nxgep->function_num > 1) { 647 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 648 " function %d. Only functions 0 and 1 are " 649 "supported for this card.", nxgep->function_num)); 650 status = NXGE_ERROR; 651 goto nxge_attach_fail4; 652 } 653 } 654 655 if (isLDOMguest(nxgep)) { 656 /* 657 * Use the function number here. 658 */ 659 nxgep->mac.portnum = nxgep->function_num; 660 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 661 662 /* XXX We'll set the MAC address counts to 1 for now. */ 663 mmac_info = &nxgep->nxge_mmac_info; 664 mmac_info->num_mmac = 1; 665 mmac_info->naddrfree = 1; 666 } else { 667 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 668 nxgep->mac.portnum = portn; 669 if ((portn == 0) || (portn == 1)) 670 nxgep->mac.porttype = PORT_TYPE_XMAC; 671 else 672 nxgep->mac.porttype = PORT_TYPE_BMAC; 673 /* 674 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 675 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 676 * The two types of MACs have different characterizations. 677 */ 678 mmac_info = &nxgep->nxge_mmac_info; 679 if (nxgep->function_num < 2) { 680 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 681 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 682 } else { 683 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 684 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 685 } 686 } 687 /* 688 * Setup the Ndd parameters for the this instance. 689 */ 690 nxge_init_param(nxgep); 691 692 /* 693 * Setup Register Tracing Buffer. 694 */ 695 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 696 697 /* init stats ptr */ 698 nxge_init_statsp(nxgep); 699 700 /* 701 * Copy the vpd info from eeprom to a local data 702 * structure, and then check its validity. 703 */ 704 if (!isLDOMguest(nxgep)) { 705 int *regp; 706 uint_t reglen; 707 int rv; 708 709 nxge_vpd_info_get(nxgep); 710 711 /* Find the NIU config handle. */ 712 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 713 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 714 "reg", ®p, ®len); 715 716 if (rv != DDI_PROP_SUCCESS) { 717 goto nxge_attach_fail5; 718 } 719 /* 720 * The address_hi, that is the first int, in the reg 721 * property consists of config handle, but need to remove 722 * the bits 28-31 which are OBP specific info. 723 */ 724 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 725 ddi_prop_free(regp); 726 } 727 728 if (isLDOMguest(nxgep)) { 729 uchar_t *prop_val; 730 uint_t prop_len; 731 uint32_t max_frame_size; 732 733 extern void nxge_get_logical_props(p_nxge_t); 734 735 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 736 nxgep->mac.portmode = PORT_LOGICAL; 737 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 738 "phy-type", "virtual transceiver"); 739 740 nxgep->nports = 1; 741 nxgep->board_ver = 0; /* XXX What? */ 742 743 /* 744 * local-mac-address property gives us info on which 745 * specific MAC address the Hybrid resource is associated 746 * with. 747 */ 748 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 749 "local-mac-address", &prop_val, 750 &prop_len) != DDI_PROP_SUCCESS) { 751 goto nxge_attach_fail5; 752 } 753 if (prop_len != ETHERADDRL) { 754 ddi_prop_free(prop_val); 755 goto nxge_attach_fail5; 756 } 757 ether_copy(prop_val, nxgep->hio_mac_addr); 758 ddi_prop_free(prop_val); 759 nxge_get_logical_props(nxgep); 760 761 /* 762 * Enable Jumbo property based on the "max-frame-size" 763 * property value. 764 */ 765 max_frame_size = ddi_prop_get_int(DDI_DEV_T_ANY, 766 nxgep->dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 767 "max-frame-size", NXGE_MTU_DEFAULT_MAX); 768 if ((max_frame_size > NXGE_MTU_DEFAULT_MAX) && 769 (max_frame_size <= TX_JUMBO_MTU)) { 770 param_arr = nxgep->param_arr; 771 772 param_arr[param_accept_jumbo].value = 1; 773 nxgep->mac.is_jumbo = B_TRUE; 774 nxgep->mac.maxframesize = (uint16_t)max_frame_size; 775 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 776 NXGE_EHEADER_VLAN_CRC; 777 } 778 } else { 779 status = nxge_xcvr_find(nxgep); 780 781 if (status != NXGE_OK) { 782 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 783 " Couldn't determine card type" 784 " .... exit ")); 785 goto nxge_attach_fail5; 786 } 787 788 status = nxge_get_config_properties(nxgep); 789 790 if (status != NXGE_OK) { 791 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 792 "get_hw create failed")); 793 goto nxge_attach_fail; 794 } 795 } 796 797 /* 798 * Setup the Kstats for the driver. 799 */ 800 nxge_setup_kstats(nxgep); 801 802 if (!isLDOMguest(nxgep)) 803 nxge_setup_param(nxgep); 804 805 status = nxge_setup_system_dma_pages(nxgep); 806 if (status != NXGE_OK) { 807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 808 goto nxge_attach_fail; 809 } 810 811 nxge_hw_id_init(nxgep); 812 813 if (!isLDOMguest(nxgep)) 814 nxge_hw_init_niu_common(nxgep); 815 816 status = nxge_setup_mutexes(nxgep); 817 if (status != NXGE_OK) { 818 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 819 goto nxge_attach_fail; 820 } 821 822 #if defined(sun4v) 823 if (isLDOMguest(nxgep)) { 824 /* Find our VR & channel sets. */ 825 status = nxge_hio_vr_add(nxgep); 826 goto nxge_attach_exit; 827 } 828 #endif 829 830 status = nxge_setup_dev(nxgep); 831 if (status != DDI_SUCCESS) { 832 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 833 goto nxge_attach_fail; 834 } 835 836 status = nxge_add_intrs(nxgep); 837 if (status != DDI_SUCCESS) { 838 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 839 goto nxge_attach_fail; 840 } 841 status = nxge_add_soft_intrs(nxgep); 842 if (status != DDI_SUCCESS) { 843 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 844 "add_soft_intr failed")); 845 goto nxge_attach_fail; 846 } 847 848 /* 849 * Enable interrupts. 850 */ 851 nxge_intrs_enable(nxgep); 852 853 /* If a guest, register with vio_net instead. */ 854 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 856 "unable to register to mac layer (%d)", status)); 857 goto nxge_attach_fail; 858 } 859 860 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 861 862 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 863 "registered to mac (instance %d)", instance)); 864 865 /* nxge_link_monitor calls xcvr.check_link recursively */ 866 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 867 868 goto nxge_attach_exit; 869 870 nxge_attach_fail: 871 nxge_unattach(nxgep); 872 goto nxge_attach_fail1; 873 874 nxge_attach_fail5: 875 /* 876 * Tear down the ndd parameters setup. 877 */ 878 nxge_destroy_param(nxgep); 879 880 /* 881 * Tear down the kstat setup. 882 */ 883 nxge_destroy_kstats(nxgep); 884 885 nxge_attach_fail4: 886 if (nxgep->nxge_hw_p) { 887 nxge_uninit_common_dev(nxgep); 888 nxgep->nxge_hw_p = NULL; 889 } 890 891 nxge_attach_fail3: 892 /* 893 * Unmap the register setup. 894 */ 895 nxge_unmap_regs(nxgep); 896 897 nxge_fm_fini(nxgep); 898 899 nxge_attach_fail2: 900 ddi_soft_state_free(nxge_list, nxgep->instance); 901 902 nxge_attach_fail1: 903 if (status != NXGE_OK) 904 status = (NXGE_ERROR | NXGE_DDI_FAILED); 905 nxgep = NULL; 906 907 nxge_attach_exit: 908 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 909 status)); 910 911 return (status); 912 } 913 914 static int 915 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 916 { 917 int status = DDI_SUCCESS; 918 int instance; 919 p_nxge_t nxgep = NULL; 920 921 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 922 instance = ddi_get_instance(dip); 923 nxgep = ddi_get_soft_state(nxge_list, instance); 924 if (nxgep == NULL) { 925 status = DDI_FAILURE; 926 goto nxge_detach_exit; 927 } 928 929 switch (cmd) { 930 case DDI_DETACH: 931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 932 break; 933 934 case DDI_PM_SUSPEND: 935 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 936 nxgep->suspended = DDI_PM_SUSPEND; 937 nxge_suspend(nxgep); 938 break; 939 940 case DDI_SUSPEND: 941 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 942 if (nxgep->suspended != DDI_PM_SUSPEND) { 943 nxgep->suspended = DDI_SUSPEND; 944 nxge_suspend(nxgep); 945 } 946 break; 947 948 default: 949 status = DDI_FAILURE; 950 } 951 952 if (cmd != DDI_DETACH) 953 goto nxge_detach_exit; 954 955 /* 956 * Stop the xcvr polling. 957 */ 958 nxgep->suspended = cmd; 959 960 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 961 962 if (isLDOMguest(nxgep)) { 963 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 964 nxge_m_stop((void *)nxgep); 965 nxge_hio_unregister(nxgep); 966 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 968 "<== nxge_detach status = 0x%08X", status)); 969 return (DDI_FAILURE); 970 } 971 972 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 973 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 974 975 nxge_unattach(nxgep); 976 nxgep = NULL; 977 978 nxge_detach_exit: 979 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 980 status)); 981 982 return (status); 983 } 984 985 static void 986 nxge_unattach(p_nxge_t nxgep) 987 { 988 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 989 990 if (nxgep == NULL || nxgep->dev_regs == NULL) { 991 return; 992 } 993 994 nxgep->nxge_magic = 0; 995 996 if (nxgep->nxge_timerid) { 997 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 998 nxgep->nxge_timerid = 0; 999 } 1000 1001 /* 1002 * If this flag is set, it will affect the Neptune 1003 * only. 1004 */ 1005 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 1006 nxge_niu_peu_reset(nxgep); 1007 } 1008 1009 #if defined(sun4v) 1010 if (isLDOMguest(nxgep)) { 1011 (void) nxge_hio_vr_release(nxgep); 1012 } 1013 #endif 1014 1015 if (nxgep->nxge_hw_p) { 1016 nxge_uninit_common_dev(nxgep); 1017 nxgep->nxge_hw_p = NULL; 1018 } 1019 1020 #if defined(sun4v) 1021 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1022 (void) hsvc_unregister(&nxgep->niu_hsvc); 1023 nxgep->niu_hsvc_available = B_FALSE; 1024 } 1025 #endif 1026 /* 1027 * Stop any further interrupts. 1028 */ 1029 nxge_remove_intrs(nxgep); 1030 1031 /* remove soft interrups */ 1032 nxge_remove_soft_intrs(nxgep); 1033 1034 /* 1035 * Stop the device and free resources. 1036 */ 1037 if (!isLDOMguest(nxgep)) { 1038 nxge_destroy_dev(nxgep); 1039 } 1040 1041 /* 1042 * Tear down the ndd parameters setup. 1043 */ 1044 nxge_destroy_param(nxgep); 1045 1046 /* 1047 * Tear down the kstat setup. 1048 */ 1049 nxge_destroy_kstats(nxgep); 1050 1051 /* 1052 * Destroy all mutexes. 1053 */ 1054 nxge_destroy_mutexes(nxgep); 1055 1056 /* 1057 * Remove the list of ndd parameters which 1058 * were setup during attach. 1059 */ 1060 if (nxgep->dip) { 1061 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1062 " nxge_unattach: remove all properties")); 1063 1064 (void) ddi_prop_remove_all(nxgep->dip); 1065 } 1066 1067 #if NXGE_PROPERTY 1068 nxge_remove_hard_properties(nxgep); 1069 #endif 1070 1071 /* 1072 * Unmap the register setup. 1073 */ 1074 nxge_unmap_regs(nxgep); 1075 1076 nxge_fm_fini(nxgep); 1077 1078 ddi_soft_state_free(nxge_list, nxgep->instance); 1079 1080 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1081 } 1082 1083 #if defined(sun4v) 1084 int 1085 nxge_hsvc_register(nxge_t *nxgep) 1086 { 1087 nxge_status_t status; 1088 1089 if (nxgep->niu_type == N2_NIU) { 1090 nxgep->niu_hsvc_available = B_FALSE; 1091 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1092 if ((status = hsvc_register(&nxgep->niu_hsvc, 1093 &nxgep->niu_min_ver)) != 0) { 1094 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1095 "nxge_attach: %s: cannot negotiate " 1096 "hypervisor services revision %d group: 0x%lx " 1097 "major: 0x%lx minor: 0x%lx errno: %d", 1098 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1099 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1100 niu_hsvc.hsvc_minor, status)); 1101 return (DDI_FAILURE); 1102 } 1103 nxgep->niu_hsvc_available = B_TRUE; 1104 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1105 "NIU Hypervisor service enabled")); 1106 } 1107 1108 return (DDI_SUCCESS); 1109 } 1110 #endif 1111 1112 static char n2_siu_name[] = "niu"; 1113 1114 static nxge_status_t 1115 nxge_map_regs(p_nxge_t nxgep) 1116 { 1117 int ddi_status = DDI_SUCCESS; 1118 p_dev_regs_t dev_regs; 1119 char buf[MAXPATHLEN + 1]; 1120 char *devname; 1121 #ifdef NXGE_DEBUG 1122 char *sysname; 1123 #endif 1124 off_t regsize; 1125 nxge_status_t status = NXGE_OK; 1126 #if !defined(_BIG_ENDIAN) 1127 off_t pci_offset; 1128 uint16_t pcie_devctl; 1129 #endif 1130 1131 if (isLDOMguest(nxgep)) { 1132 return (nxge_guest_regs_map(nxgep)); 1133 } 1134 1135 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1136 nxgep->dev_regs = NULL; 1137 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1138 dev_regs->nxge_regh = NULL; 1139 dev_regs->nxge_pciregh = NULL; 1140 dev_regs->nxge_msix_regh = NULL; 1141 dev_regs->nxge_vir_regh = NULL; 1142 dev_regs->nxge_vir2_regh = NULL; 1143 nxgep->niu_type = NIU_TYPE_NONE; 1144 1145 devname = ddi_pathname(nxgep->dip, buf); 1146 ASSERT(strlen(devname) > 0); 1147 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1148 "nxge_map_regs: pathname devname %s", devname)); 1149 1150 /* 1151 * The driver is running on a N2-NIU system if devname is something 1152 * like "/niu@80/network@0" 1153 */ 1154 if (strstr(devname, n2_siu_name)) { 1155 /* N2/NIU */ 1156 nxgep->niu_type = N2_NIU; 1157 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1158 "nxge_map_regs: N2/NIU devname %s", devname)); 1159 /* get function number */ 1160 nxgep->function_num = 1161 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1162 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1163 "nxge_map_regs: N2/NIU function number %d", 1164 nxgep->function_num)); 1165 } else { 1166 int *prop_val; 1167 uint_t prop_len; 1168 uint8_t func_num; 1169 1170 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1171 0, "reg", 1172 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1173 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1174 "Reg property not found")); 1175 ddi_status = DDI_FAILURE; 1176 goto nxge_map_regs_fail0; 1177 1178 } else { 1179 func_num = (prop_val[0] >> 8) & 0x7; 1180 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1181 "Reg property found: fun # %d", 1182 func_num)); 1183 nxgep->function_num = func_num; 1184 if (isLDOMguest(nxgep)) { 1185 nxgep->function_num /= 2; 1186 return (NXGE_OK); 1187 } 1188 ddi_prop_free(prop_val); 1189 } 1190 } 1191 1192 switch (nxgep->niu_type) { 1193 default: 1194 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1195 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1196 "nxge_map_regs: pci config size 0x%x", regsize)); 1197 1198 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1199 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1200 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1201 if (ddi_status != DDI_SUCCESS) { 1202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1203 "ddi_map_regs, nxge bus config regs failed")); 1204 goto nxge_map_regs_fail0; 1205 } 1206 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1207 "nxge_map_reg: PCI config addr 0x%0llx " 1208 " handle 0x%0llx", dev_regs->nxge_pciregp, 1209 dev_regs->nxge_pciregh)); 1210 /* 1211 * IMP IMP 1212 * workaround for bit swapping bug in HW 1213 * which ends up in no-snoop = yes 1214 * resulting, in DMA not synched properly 1215 */ 1216 #if !defined(_BIG_ENDIAN) 1217 /* workarounds for x86 systems */ 1218 pci_offset = 0x80 + PCIE_DEVCTL; 1219 pcie_devctl = 0x0; 1220 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1221 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1222 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1223 pcie_devctl); 1224 #endif 1225 1226 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1227 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1228 "nxge_map_regs: pio size 0x%x", regsize)); 1229 /* set up the device mapped register */ 1230 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1231 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1232 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1233 if (ddi_status != DDI_SUCCESS) { 1234 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1235 "ddi_map_regs for Neptune global reg failed")); 1236 goto nxge_map_regs_fail1; 1237 } 1238 1239 /* set up the msi/msi-x mapped register */ 1240 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1241 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1242 "nxge_map_regs: msix size 0x%x", regsize)); 1243 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1244 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1245 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1246 if (ddi_status != DDI_SUCCESS) { 1247 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1248 "ddi_map_regs for msi reg failed")); 1249 goto nxge_map_regs_fail2; 1250 } 1251 1252 /* set up the vio region mapped register */ 1253 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1254 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1255 "nxge_map_regs: vio size 0x%x", regsize)); 1256 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1257 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1258 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1259 1260 if (ddi_status != DDI_SUCCESS) { 1261 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1262 "ddi_map_regs for nxge vio reg failed")); 1263 goto nxge_map_regs_fail3; 1264 } 1265 nxgep->dev_regs = dev_regs; 1266 1267 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1268 NPI_PCI_ADD_HANDLE_SET(nxgep, 1269 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1270 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1271 NPI_MSI_ADD_HANDLE_SET(nxgep, 1272 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1273 1274 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1275 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1276 1277 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1278 NPI_REG_ADD_HANDLE_SET(nxgep, 1279 (npi_reg_ptr_t)dev_regs->nxge_regp); 1280 1281 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1282 NPI_VREG_ADD_HANDLE_SET(nxgep, 1283 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1284 1285 break; 1286 1287 case N2_NIU: 1288 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1289 /* 1290 * Set up the device mapped register (FWARC 2006/556) 1291 * (changed back to 1: reg starts at 1!) 1292 */ 1293 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1294 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1295 "nxge_map_regs: dev size 0x%x", regsize)); 1296 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1297 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1298 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1299 1300 if (ddi_status != DDI_SUCCESS) { 1301 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1302 "ddi_map_regs for N2/NIU, global reg failed ")); 1303 goto nxge_map_regs_fail1; 1304 } 1305 1306 /* set up the first vio region mapped register */ 1307 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1308 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1309 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1310 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1311 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1312 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1313 1314 if (ddi_status != DDI_SUCCESS) { 1315 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1316 "ddi_map_regs for nxge vio reg failed")); 1317 goto nxge_map_regs_fail2; 1318 } 1319 /* set up the second vio region mapped register */ 1320 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1321 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1322 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1323 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1324 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1325 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1326 1327 if (ddi_status != DDI_SUCCESS) { 1328 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1329 "ddi_map_regs for nxge vio2 reg failed")); 1330 goto nxge_map_regs_fail3; 1331 } 1332 nxgep->dev_regs = dev_regs; 1333 1334 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1335 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1336 1337 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1338 NPI_REG_ADD_HANDLE_SET(nxgep, 1339 (npi_reg_ptr_t)dev_regs->nxge_regp); 1340 1341 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1342 NPI_VREG_ADD_HANDLE_SET(nxgep, 1343 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1344 1345 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1346 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1347 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1348 1349 break; 1350 } 1351 1352 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1353 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1354 1355 goto nxge_map_regs_exit; 1356 nxge_map_regs_fail3: 1357 if (dev_regs->nxge_msix_regh) { 1358 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1359 } 1360 if (dev_regs->nxge_vir_regh) { 1361 ddi_regs_map_free(&dev_regs->nxge_regh); 1362 } 1363 nxge_map_regs_fail2: 1364 if (dev_regs->nxge_regh) { 1365 ddi_regs_map_free(&dev_regs->nxge_regh); 1366 } 1367 nxge_map_regs_fail1: 1368 if (dev_regs->nxge_pciregh) { 1369 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1370 } 1371 nxge_map_regs_fail0: 1372 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1373 kmem_free(dev_regs, sizeof (dev_regs_t)); 1374 1375 nxge_map_regs_exit: 1376 if (ddi_status != DDI_SUCCESS) 1377 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1378 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1379 return (status); 1380 } 1381 1382 static void 1383 nxge_unmap_regs(p_nxge_t nxgep) 1384 { 1385 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1386 1387 if (isLDOMguest(nxgep)) { 1388 nxge_guest_regs_map_free(nxgep); 1389 return; 1390 } 1391 1392 if (nxgep->dev_regs) { 1393 if (nxgep->dev_regs->nxge_pciregh) { 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "==> nxge_unmap_regs: bus")); 1396 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1397 nxgep->dev_regs->nxge_pciregh = NULL; 1398 } 1399 if (nxgep->dev_regs->nxge_regh) { 1400 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1401 "==> nxge_unmap_regs: device registers")); 1402 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1403 nxgep->dev_regs->nxge_regh = NULL; 1404 } 1405 if (nxgep->dev_regs->nxge_msix_regh) { 1406 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1407 "==> nxge_unmap_regs: device interrupts")); 1408 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1409 nxgep->dev_regs->nxge_msix_regh = NULL; 1410 } 1411 if (nxgep->dev_regs->nxge_vir_regh) { 1412 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1413 "==> nxge_unmap_regs: vio region")); 1414 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1415 nxgep->dev_regs->nxge_vir_regh = NULL; 1416 } 1417 if (nxgep->dev_regs->nxge_vir2_regh) { 1418 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1419 "==> nxge_unmap_regs: vio2 region")); 1420 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1421 nxgep->dev_regs->nxge_vir2_regh = NULL; 1422 } 1423 1424 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1425 nxgep->dev_regs = NULL; 1426 } 1427 1428 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1429 } 1430 1431 static nxge_status_t 1432 nxge_setup_mutexes(p_nxge_t nxgep) 1433 { 1434 int ddi_status = DDI_SUCCESS; 1435 nxge_status_t status = NXGE_OK; 1436 nxge_classify_t *classify_ptr; 1437 int partition; 1438 1439 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1440 1441 /* 1442 * Get the interrupt cookie so the mutexes can be 1443 * Initialized. 1444 */ 1445 if (isLDOMguest(nxgep)) { 1446 nxgep->interrupt_cookie = 0; 1447 } else { 1448 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1449 &nxgep->interrupt_cookie); 1450 1451 if (ddi_status != DDI_SUCCESS) { 1452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1453 "<== nxge_setup_mutexes: failed 0x%x", 1454 ddi_status)); 1455 goto nxge_setup_mutexes_exit; 1456 } 1457 } 1458 1459 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1460 MUTEX_INIT(&nxgep->poll_lock, NULL, 1461 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1462 1463 /* 1464 * Initialize mutexes for this device. 1465 */ 1466 MUTEX_INIT(nxgep->genlock, NULL, 1467 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1468 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1469 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1470 MUTEX_INIT(&nxgep->mif_lock, NULL, 1471 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1472 MUTEX_INIT(&nxgep->group_lock, NULL, 1473 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1474 RW_INIT(&nxgep->filter_lock, NULL, 1475 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1476 1477 classify_ptr = &nxgep->classifier; 1478 /* 1479 * FFLP Mutexes are never used in interrupt context 1480 * as fflp operation can take very long time to 1481 * complete and hence not suitable to invoke from interrupt 1482 * handlers. 1483 */ 1484 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1485 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1486 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1487 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1488 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1489 for (partition = 0; partition < MAX_PARTITION; partition++) { 1490 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1491 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1492 } 1493 } 1494 1495 nxge_setup_mutexes_exit: 1496 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1497 "<== nxge_setup_mutexes status = %x", status)); 1498 1499 if (ddi_status != DDI_SUCCESS) 1500 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1501 1502 return (status); 1503 } 1504 1505 static void 1506 nxge_destroy_mutexes(p_nxge_t nxgep) 1507 { 1508 int partition; 1509 nxge_classify_t *classify_ptr; 1510 1511 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1512 RW_DESTROY(&nxgep->filter_lock); 1513 MUTEX_DESTROY(&nxgep->group_lock); 1514 MUTEX_DESTROY(&nxgep->mif_lock); 1515 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1516 MUTEX_DESTROY(nxgep->genlock); 1517 1518 classify_ptr = &nxgep->classifier; 1519 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1520 1521 /* Destroy all polling resources. */ 1522 MUTEX_DESTROY(&nxgep->poll_lock); 1523 cv_destroy(&nxgep->poll_cv); 1524 1525 /* free data structures, based on HW type */ 1526 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1527 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1528 for (partition = 0; partition < MAX_PARTITION; partition++) { 1529 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1530 } 1531 } 1532 1533 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1534 } 1535 1536 nxge_status_t 1537 nxge_init(p_nxge_t nxgep) 1538 { 1539 nxge_status_t status = NXGE_OK; 1540 1541 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1542 1543 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1544 return (status); 1545 } 1546 1547 /* 1548 * Allocate system memory for the receive/transmit buffer blocks 1549 * and receive/transmit descriptor rings. 1550 */ 1551 status = nxge_alloc_mem_pool(nxgep); 1552 if (status != NXGE_OK) { 1553 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1554 goto nxge_init_fail1; 1555 } 1556 1557 if (!isLDOMguest(nxgep)) { 1558 /* 1559 * Initialize and enable the TXC registers. 1560 * (Globally enable the Tx controller, 1561 * enable the port, configure the dma channel bitmap, 1562 * configure the max burst size). 1563 */ 1564 status = nxge_txc_init(nxgep); 1565 if (status != NXGE_OK) { 1566 NXGE_ERROR_MSG((nxgep, 1567 NXGE_ERR_CTL, "init txc failed\n")); 1568 goto nxge_init_fail2; 1569 } 1570 } 1571 1572 /* 1573 * Initialize and enable TXDMA channels. 1574 */ 1575 status = nxge_init_txdma_channels(nxgep); 1576 if (status != NXGE_OK) { 1577 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1578 goto nxge_init_fail3; 1579 } 1580 1581 /* 1582 * Initialize and enable RXDMA channels. 1583 */ 1584 status = nxge_init_rxdma_channels(nxgep); 1585 if (status != NXGE_OK) { 1586 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1587 goto nxge_init_fail4; 1588 } 1589 1590 /* 1591 * The guest domain is now done. 1592 */ 1593 if (isLDOMguest(nxgep)) { 1594 nxgep->drv_state |= STATE_HW_INITIALIZED; 1595 goto nxge_init_exit; 1596 } 1597 1598 /* 1599 * Initialize TCAM and FCRAM (Neptune). 1600 */ 1601 status = nxge_classify_init(nxgep); 1602 if (status != NXGE_OK) { 1603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1604 goto nxge_init_fail5; 1605 } 1606 1607 /* 1608 * Initialize ZCP 1609 */ 1610 status = nxge_zcp_init(nxgep); 1611 if (status != NXGE_OK) { 1612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1613 goto nxge_init_fail5; 1614 } 1615 1616 /* 1617 * Initialize IPP. 1618 */ 1619 status = nxge_ipp_init(nxgep); 1620 if (status != NXGE_OK) { 1621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1622 goto nxge_init_fail5; 1623 } 1624 1625 /* 1626 * Initialize the MAC block. 1627 */ 1628 status = nxge_mac_init(nxgep); 1629 if (status != NXGE_OK) { 1630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1631 goto nxge_init_fail5; 1632 } 1633 1634 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1635 1636 /* 1637 * Enable hardware interrupts. 1638 */ 1639 nxge_intr_hw_enable(nxgep); 1640 nxgep->drv_state |= STATE_HW_INITIALIZED; 1641 1642 goto nxge_init_exit; 1643 1644 nxge_init_fail5: 1645 nxge_uninit_rxdma_channels(nxgep); 1646 nxge_init_fail4: 1647 nxge_uninit_txdma_channels(nxgep); 1648 nxge_init_fail3: 1649 if (!isLDOMguest(nxgep)) { 1650 (void) nxge_txc_uninit(nxgep); 1651 } 1652 nxge_init_fail2: 1653 nxge_free_mem_pool(nxgep); 1654 nxge_init_fail1: 1655 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1656 "<== nxge_init status (failed) = 0x%08x", status)); 1657 return (status); 1658 1659 nxge_init_exit: 1660 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1661 status)); 1662 return (status); 1663 } 1664 1665 1666 timeout_id_t 1667 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1668 { 1669 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1670 return (timeout(func, (caddr_t)nxgep, 1671 drv_usectohz(1000 * msec))); 1672 } 1673 return (NULL); 1674 } 1675 1676 /*ARGSUSED*/ 1677 void 1678 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1679 { 1680 if (timerid) { 1681 (void) untimeout(timerid); 1682 } 1683 } 1684 1685 void 1686 nxge_uninit(p_nxge_t nxgep) 1687 { 1688 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1689 1690 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1691 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1692 "==> nxge_uninit: not initialized")); 1693 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1694 "<== nxge_uninit")); 1695 return; 1696 } 1697 1698 /* stop timer */ 1699 if (nxgep->nxge_timerid) { 1700 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1701 nxgep->nxge_timerid = 0; 1702 } 1703 1704 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1705 (void) nxge_intr_hw_disable(nxgep); 1706 1707 /* 1708 * Reset the receive MAC side. 1709 */ 1710 (void) nxge_rx_mac_disable(nxgep); 1711 1712 /* Disable and soft reset the IPP */ 1713 if (!isLDOMguest(nxgep)) 1714 (void) nxge_ipp_disable(nxgep); 1715 1716 /* Free classification resources */ 1717 (void) nxge_classify_uninit(nxgep); 1718 1719 /* 1720 * Reset the transmit/receive DMA side. 1721 */ 1722 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1723 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1724 1725 nxge_uninit_txdma_channels(nxgep); 1726 nxge_uninit_rxdma_channels(nxgep); 1727 1728 /* 1729 * Reset the transmit MAC side. 1730 */ 1731 (void) nxge_tx_mac_disable(nxgep); 1732 1733 nxge_free_mem_pool(nxgep); 1734 1735 /* 1736 * Start the timer if the reset flag is not set. 1737 * If this reset flag is set, the link monitor 1738 * will not be started in order to stop furthur bus 1739 * activities coming from this interface. 1740 * The driver will start the monitor function 1741 * if the interface was initialized again later. 1742 */ 1743 if (!nxge_peu_reset_enable) { 1744 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1745 } 1746 1747 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1748 1749 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1750 "nxge_mblks_pending %d", nxge_mblks_pending)); 1751 } 1752 1753 void 1754 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1755 { 1756 #if defined(__i386) 1757 size_t reg; 1758 #else 1759 uint64_t reg; 1760 #endif 1761 uint64_t regdata; 1762 int i, retry; 1763 1764 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1765 regdata = 0; 1766 retry = 1; 1767 1768 for (i = 0; i < retry; i++) { 1769 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1770 } 1771 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1772 } 1773 1774 void 1775 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1776 { 1777 #if defined(__i386) 1778 size_t reg; 1779 #else 1780 uint64_t reg; 1781 #endif 1782 uint64_t buf[2]; 1783 1784 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1785 #if defined(__i386) 1786 reg = (size_t)buf[0]; 1787 #else 1788 reg = buf[0]; 1789 #endif 1790 1791 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1792 } 1793 1794 1795 nxge_os_mutex_t nxgedebuglock; 1796 int nxge_debug_init = 0; 1797 1798 /*ARGSUSED*/ 1799 /*VARARGS*/ 1800 void 1801 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1802 { 1803 char msg_buffer[1048]; 1804 char prefix_buffer[32]; 1805 int instance; 1806 uint64_t debug_level; 1807 int cmn_level = CE_CONT; 1808 va_list ap; 1809 1810 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1811 /* In case a developer has changed nxge_debug_level. */ 1812 if (nxgep->nxge_debug_level != nxge_debug_level) 1813 nxgep->nxge_debug_level = nxge_debug_level; 1814 } 1815 1816 debug_level = (nxgep == NULL) ? nxge_debug_level : 1817 nxgep->nxge_debug_level; 1818 1819 if ((level & debug_level) || 1820 (level == NXGE_NOTE) || 1821 (level == NXGE_ERR_CTL)) { 1822 /* do the msg processing */ 1823 if (nxge_debug_init == 0) { 1824 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1825 nxge_debug_init = 1; 1826 } 1827 1828 MUTEX_ENTER(&nxgedebuglock); 1829 1830 if ((level & NXGE_NOTE)) { 1831 cmn_level = CE_NOTE; 1832 } 1833 1834 if (level & NXGE_ERR_CTL) { 1835 cmn_level = CE_WARN; 1836 } 1837 1838 va_start(ap, fmt); 1839 (void) vsprintf(msg_buffer, fmt, ap); 1840 va_end(ap); 1841 if (nxgep == NULL) { 1842 instance = -1; 1843 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1844 } else { 1845 instance = nxgep->instance; 1846 (void) sprintf(prefix_buffer, 1847 "%s%d :", "nxge", instance); 1848 } 1849 1850 MUTEX_EXIT(&nxgedebuglock); 1851 cmn_err(cmn_level, "!%s %s\n", 1852 prefix_buffer, msg_buffer); 1853 1854 } 1855 } 1856 1857 char * 1858 nxge_dump_packet(char *addr, int size) 1859 { 1860 uchar_t *ap = (uchar_t *)addr; 1861 int i; 1862 static char etherbuf[1024]; 1863 char *cp = etherbuf; 1864 char digits[] = "0123456789abcdef"; 1865 1866 if (!size) 1867 size = 60; 1868 1869 if (size > MAX_DUMP_SZ) { 1870 /* Dump the leading bytes */ 1871 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1872 if (*ap > 0x0f) 1873 *cp++ = digits[*ap >> 4]; 1874 *cp++ = digits[*ap++ & 0xf]; 1875 *cp++ = ':'; 1876 } 1877 for (i = 0; i < 20; i++) 1878 *cp++ = '.'; 1879 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1880 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1881 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1882 if (*ap > 0x0f) 1883 *cp++ = digits[*ap >> 4]; 1884 *cp++ = digits[*ap++ & 0xf]; 1885 *cp++ = ':'; 1886 } 1887 } else { 1888 for (i = 0; i < size; i++) { 1889 if (*ap > 0x0f) 1890 *cp++ = digits[*ap >> 4]; 1891 *cp++ = digits[*ap++ & 0xf]; 1892 *cp++ = ':'; 1893 } 1894 } 1895 *--cp = 0; 1896 return (etherbuf); 1897 } 1898 1899 #ifdef NXGE_DEBUG 1900 static void 1901 nxge_test_map_regs(p_nxge_t nxgep) 1902 { 1903 ddi_acc_handle_t cfg_handle; 1904 p_pci_cfg_t cfg_ptr; 1905 ddi_acc_handle_t dev_handle; 1906 char *dev_ptr; 1907 ddi_acc_handle_t pci_config_handle; 1908 uint32_t regval; 1909 int i; 1910 1911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1912 1913 dev_handle = nxgep->dev_regs->nxge_regh; 1914 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1915 1916 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1917 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1918 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1919 1920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1921 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1923 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1924 &cfg_ptr->vendorid)); 1925 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1926 "\tvendorid 0x%x devid 0x%x", 1927 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1928 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1929 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1930 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1931 "bar1c 0x%x", 1932 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1933 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1934 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1935 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1936 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1937 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1938 "base 28 0x%x bar2c 0x%x\n", 1939 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1940 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1941 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1942 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1943 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1944 "\nNeptune PCI BAR: base30 0x%x\n", 1945 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1946 1947 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1948 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1949 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1950 "first 0x%llx second 0x%llx third 0x%llx " 1951 "last 0x%llx ", 1952 NXGE_PIO_READ64(dev_handle, 1953 (uint64_t *)(dev_ptr + 0), 0), 1954 NXGE_PIO_READ64(dev_handle, 1955 (uint64_t *)(dev_ptr + 8), 0), 1956 NXGE_PIO_READ64(dev_handle, 1957 (uint64_t *)(dev_ptr + 16), 0), 1958 NXGE_PIO_READ64(cfg_handle, 1959 (uint64_t *)(dev_ptr + 24), 0))); 1960 } 1961 } 1962 1963 #endif 1964 1965 static void 1966 nxge_suspend(p_nxge_t nxgep) 1967 { 1968 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1969 1970 nxge_intrs_disable(nxgep); 1971 nxge_destroy_dev(nxgep); 1972 1973 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1974 } 1975 1976 static nxge_status_t 1977 nxge_resume(p_nxge_t nxgep) 1978 { 1979 nxge_status_t status = NXGE_OK; 1980 1981 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1982 1983 nxgep->suspended = DDI_RESUME; 1984 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1985 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1986 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1987 (void) nxge_rx_mac_enable(nxgep); 1988 (void) nxge_tx_mac_enable(nxgep); 1989 nxge_intrs_enable(nxgep); 1990 nxgep->suspended = 0; 1991 1992 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1993 "<== nxge_resume status = 0x%x", status)); 1994 return (status); 1995 } 1996 1997 static nxge_status_t 1998 nxge_setup_dev(p_nxge_t nxgep) 1999 { 2000 nxge_status_t status = NXGE_OK; 2001 2002 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 2003 nxgep->mac.portnum)); 2004 2005 status = nxge_link_init(nxgep); 2006 2007 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 2008 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2009 "port%d Bad register acc handle", nxgep->mac.portnum)); 2010 status = NXGE_ERROR; 2011 } 2012 2013 if (status != NXGE_OK) { 2014 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2015 " nxge_setup_dev status " 2016 "(xcvr init 0x%08x)", status)); 2017 goto nxge_setup_dev_exit; 2018 } 2019 2020 nxge_setup_dev_exit: 2021 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2022 "<== nxge_setup_dev port %d status = 0x%08x", 2023 nxgep->mac.portnum, status)); 2024 2025 return (status); 2026 } 2027 2028 static void 2029 nxge_destroy_dev(p_nxge_t nxgep) 2030 { 2031 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2032 2033 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2034 2035 (void) nxge_hw_stop(nxgep); 2036 2037 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2038 } 2039 2040 static nxge_status_t 2041 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2042 { 2043 int ddi_status = DDI_SUCCESS; 2044 uint_t count; 2045 ddi_dma_cookie_t cookie; 2046 uint_t iommu_pagesize; 2047 nxge_status_t status = NXGE_OK; 2048 2049 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2050 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2051 if (nxgep->niu_type != N2_NIU) { 2052 iommu_pagesize = dvma_pagesize(nxgep->dip); 2053 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2054 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2055 " default_block_size %d iommu_pagesize %d", 2056 nxgep->sys_page_sz, 2057 ddi_ptob(nxgep->dip, (ulong_t)1), 2058 nxgep->rx_default_block_size, 2059 iommu_pagesize)); 2060 2061 if (iommu_pagesize != 0) { 2062 if (nxgep->sys_page_sz == iommu_pagesize) { 2063 if (iommu_pagesize > 0x4000) 2064 nxgep->sys_page_sz = 0x4000; 2065 } else { 2066 if (nxgep->sys_page_sz > iommu_pagesize) 2067 nxgep->sys_page_sz = iommu_pagesize; 2068 } 2069 } 2070 } 2071 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2073 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2074 "default_block_size %d page mask %d", 2075 nxgep->sys_page_sz, 2076 ddi_ptob(nxgep->dip, (ulong_t)1), 2077 nxgep->rx_default_block_size, 2078 nxgep->sys_page_mask)); 2079 2080 2081 switch (nxgep->sys_page_sz) { 2082 default: 2083 nxgep->sys_page_sz = 0x1000; 2084 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2085 nxgep->rx_default_block_size = 0x1000; 2086 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2087 break; 2088 case 0x1000: 2089 nxgep->rx_default_block_size = 0x1000; 2090 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2091 break; 2092 case 0x2000: 2093 nxgep->rx_default_block_size = 0x2000; 2094 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2095 break; 2096 case 0x4000: 2097 nxgep->rx_default_block_size = 0x4000; 2098 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2099 break; 2100 case 0x8000: 2101 nxgep->rx_default_block_size = 0x8000; 2102 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2103 break; 2104 } 2105 2106 #ifndef USE_RX_BIG_BUF 2107 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2108 #else 2109 nxgep->rx_default_block_size = 0x2000; 2110 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2111 #endif 2112 /* 2113 * Get the system DMA burst size. 2114 */ 2115 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2116 DDI_DMA_DONTWAIT, 0, 2117 &nxgep->dmasparehandle); 2118 if (ddi_status != DDI_SUCCESS) { 2119 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2120 "ddi_dma_alloc_handle: failed " 2121 " status 0x%x", ddi_status)); 2122 goto nxge_get_soft_properties_exit; 2123 } 2124 2125 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2126 (caddr_t)nxgep->dmasparehandle, 2127 sizeof (nxgep->dmasparehandle), 2128 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2129 DDI_DMA_DONTWAIT, 0, 2130 &cookie, &count); 2131 if (ddi_status != DDI_DMA_MAPPED) { 2132 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2133 "Binding spare handle to find system" 2134 " burstsize failed.")); 2135 ddi_status = DDI_FAILURE; 2136 goto nxge_get_soft_properties_fail1; 2137 } 2138 2139 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2140 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2141 2142 nxge_get_soft_properties_fail1: 2143 ddi_dma_free_handle(&nxgep->dmasparehandle); 2144 2145 nxge_get_soft_properties_exit: 2146 2147 if (ddi_status != DDI_SUCCESS) 2148 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2149 2150 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2151 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2152 return (status); 2153 } 2154 2155 static nxge_status_t 2156 nxge_alloc_mem_pool(p_nxge_t nxgep) 2157 { 2158 nxge_status_t status = NXGE_OK; 2159 2160 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2161 2162 status = nxge_alloc_rx_mem_pool(nxgep); 2163 if (status != NXGE_OK) { 2164 return (NXGE_ERROR); 2165 } 2166 2167 status = nxge_alloc_tx_mem_pool(nxgep); 2168 if (status != NXGE_OK) { 2169 nxge_free_rx_mem_pool(nxgep); 2170 return (NXGE_ERROR); 2171 } 2172 2173 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2174 return (NXGE_OK); 2175 } 2176 2177 static void 2178 nxge_free_mem_pool(p_nxge_t nxgep) 2179 { 2180 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2181 2182 nxge_free_rx_mem_pool(nxgep); 2183 nxge_free_tx_mem_pool(nxgep); 2184 2185 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2186 } 2187 2188 nxge_status_t 2189 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2190 { 2191 uint32_t rdc_max; 2192 p_nxge_dma_pt_cfg_t p_all_cfgp; 2193 p_nxge_hw_pt_cfg_t p_cfgp; 2194 p_nxge_dma_pool_t dma_poolp; 2195 p_nxge_dma_common_t *dma_buf_p; 2196 p_nxge_dma_pool_t dma_cntl_poolp; 2197 p_nxge_dma_common_t *dma_cntl_p; 2198 uint32_t *num_chunks; /* per dma */ 2199 nxge_status_t status = NXGE_OK; 2200 2201 uint32_t nxge_port_rbr_size; 2202 uint32_t nxge_port_rbr_spare_size; 2203 uint32_t nxge_port_rcr_size; 2204 uint32_t rx_cntl_alloc_size; 2205 2206 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2207 2208 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2209 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2210 rdc_max = NXGE_MAX_RDCS; 2211 2212 /* 2213 * Allocate memory for the common DMA data structures. 2214 */ 2215 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2216 KM_SLEEP); 2217 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2218 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2219 2220 dma_cntl_poolp = (p_nxge_dma_pool_t) 2221 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2222 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2223 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2224 2225 num_chunks = (uint32_t *)KMEM_ZALLOC( 2226 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2227 2228 /* 2229 * Assume that each DMA channel will be configured with 2230 * the default block size. 2231 * rbr block counts are modulo the batch count (16). 2232 */ 2233 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2234 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2235 2236 if (!nxge_port_rbr_size) { 2237 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2238 } 2239 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2240 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2241 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2242 } 2243 2244 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2245 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2246 2247 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2248 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2249 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2250 } 2251 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2252 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2253 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2254 "set to default %d", 2255 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2256 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2257 } 2258 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2259 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2260 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2261 "set to default %d", 2262 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2263 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2264 } 2265 2266 /* 2267 * N2/NIU has limitation on the descriptor sizes (contiguous 2268 * memory allocation on data buffers to 4M (contig_mem_alloc) 2269 * and little endian for control buffers (must use the ddi/dki mem alloc 2270 * function). 2271 */ 2272 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2273 if (nxgep->niu_type == N2_NIU) { 2274 nxge_port_rbr_spare_size = 0; 2275 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2276 (!ISP2(nxge_port_rbr_size))) { 2277 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2278 } 2279 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2280 (!ISP2(nxge_port_rcr_size))) { 2281 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2282 } 2283 } 2284 #endif 2285 2286 /* 2287 * Addresses of receive block ring, receive completion ring and the 2288 * mailbox must be all cache-aligned (64 bytes). 2289 */ 2290 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2291 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2292 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2293 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2294 2295 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2296 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2297 "nxge_port_rcr_size = %d " 2298 "rx_cntl_alloc_size = %d", 2299 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2300 nxge_port_rcr_size, 2301 rx_cntl_alloc_size)); 2302 2303 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2304 if (nxgep->niu_type == N2_NIU) { 2305 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2306 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2307 2308 if (!ISP2(rx_buf_alloc_size)) { 2309 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2310 "==> nxge_alloc_rx_mem_pool: " 2311 " must be power of 2")); 2312 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2313 goto nxge_alloc_rx_mem_pool_exit; 2314 } 2315 2316 if (rx_buf_alloc_size > (1 << 22)) { 2317 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2318 "==> nxge_alloc_rx_mem_pool: " 2319 " limit size to 4M")); 2320 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2321 goto nxge_alloc_rx_mem_pool_exit; 2322 } 2323 2324 if (rx_cntl_alloc_size < 0x2000) { 2325 rx_cntl_alloc_size = 0x2000; 2326 } 2327 } 2328 #endif 2329 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2330 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2331 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2332 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2333 2334 dma_poolp->ndmas = p_cfgp->max_rdcs; 2335 dma_poolp->num_chunks = num_chunks; 2336 dma_poolp->buf_allocated = B_TRUE; 2337 nxgep->rx_buf_pool_p = dma_poolp; 2338 dma_poolp->dma_buf_pool_p = dma_buf_p; 2339 2340 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2341 dma_cntl_poolp->buf_allocated = B_TRUE; 2342 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2343 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2344 2345 /* Allocate the receive rings, too. */ 2346 nxgep->rx_rbr_rings = 2347 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2348 nxgep->rx_rbr_rings->rbr_rings = 2349 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2350 nxgep->rx_rcr_rings = 2351 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2352 nxgep->rx_rcr_rings->rcr_rings = 2353 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2354 nxgep->rx_mbox_areas_p = 2355 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2356 nxgep->rx_mbox_areas_p->rxmbox_areas = 2357 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2358 2359 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2360 p_cfgp->max_rdcs; 2361 2362 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2363 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2364 2365 nxge_alloc_rx_mem_pool_exit: 2366 return (status); 2367 } 2368 2369 /* 2370 * nxge_alloc_rxb 2371 * 2372 * Allocate buffers for an RDC. 2373 * 2374 * Arguments: 2375 * nxgep 2376 * channel The channel to map into our kernel space. 2377 * 2378 * Notes: 2379 * 2380 * NPI function calls: 2381 * 2382 * NXGE function calls: 2383 * 2384 * Registers accessed: 2385 * 2386 * Context: 2387 * 2388 * Taking apart: 2389 * 2390 * Open questions: 2391 * 2392 */ 2393 nxge_status_t 2394 nxge_alloc_rxb( 2395 p_nxge_t nxgep, 2396 int channel) 2397 { 2398 size_t rx_buf_alloc_size; 2399 nxge_status_t status = NXGE_OK; 2400 2401 nxge_dma_common_t **data; 2402 nxge_dma_common_t **control; 2403 uint32_t *num_chunks; 2404 2405 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2406 2407 /* 2408 * Allocate memory for the receive buffers and descriptor rings. 2409 * Replace these allocation functions with the interface functions 2410 * provided by the partition manager if/when they are available. 2411 */ 2412 2413 /* 2414 * Allocate memory for the receive buffer blocks. 2415 */ 2416 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2417 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2418 2419 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2420 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2421 2422 if ((status = nxge_alloc_rx_buf_dma( 2423 nxgep, channel, data, rx_buf_alloc_size, 2424 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2425 return (status); 2426 } 2427 2428 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2429 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2430 2431 /* 2432 * Allocate memory for descriptor rings and mailbox. 2433 */ 2434 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2435 2436 if ((status = nxge_alloc_rx_cntl_dma( 2437 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2438 != NXGE_OK) { 2439 nxge_free_rx_cntl_dma(nxgep, *control); 2440 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2441 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2442 return (status); 2443 } 2444 2445 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2446 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2447 2448 return (status); 2449 } 2450 2451 void 2452 nxge_free_rxb( 2453 p_nxge_t nxgep, 2454 int channel) 2455 { 2456 nxge_dma_common_t *data; 2457 nxge_dma_common_t *control; 2458 uint32_t num_chunks; 2459 2460 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2461 2462 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2463 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2464 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2465 2466 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2467 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2468 2469 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2470 nxge_free_rx_cntl_dma(nxgep, control); 2471 2472 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2473 2474 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2475 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2476 2477 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2478 } 2479 2480 static void 2481 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2482 { 2483 int rdc_max = NXGE_MAX_RDCS; 2484 2485 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2486 2487 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2488 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2489 "<== nxge_free_rx_mem_pool " 2490 "(null rx buf pool or buf not allocated")); 2491 return; 2492 } 2493 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2494 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2495 "<== nxge_free_rx_mem_pool " 2496 "(null rx cntl buf pool or cntl buf not allocated")); 2497 return; 2498 } 2499 2500 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2501 sizeof (p_nxge_dma_common_t) * rdc_max); 2502 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2503 2504 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2505 sizeof (uint32_t) * rdc_max); 2506 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2507 sizeof (p_nxge_dma_common_t) * rdc_max); 2508 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2509 2510 nxgep->rx_buf_pool_p = 0; 2511 nxgep->rx_cntl_pool_p = 0; 2512 2513 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2514 sizeof (p_rx_rbr_ring_t) * rdc_max); 2515 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2516 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2517 sizeof (p_rx_rcr_ring_t) * rdc_max); 2518 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2519 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2520 sizeof (p_rx_mbox_t) * rdc_max); 2521 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2522 2523 nxgep->rx_rbr_rings = 0; 2524 nxgep->rx_rcr_rings = 0; 2525 nxgep->rx_mbox_areas_p = 0; 2526 2527 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2528 } 2529 2530 2531 static nxge_status_t 2532 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2533 p_nxge_dma_common_t *dmap, 2534 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2535 { 2536 p_nxge_dma_common_t rx_dmap; 2537 nxge_status_t status = NXGE_OK; 2538 size_t total_alloc_size; 2539 size_t allocated = 0; 2540 int i, size_index, array_size; 2541 boolean_t use_kmem_alloc = B_FALSE; 2542 2543 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2544 2545 rx_dmap = (p_nxge_dma_common_t) 2546 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2547 KM_SLEEP); 2548 2549 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2550 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2551 dma_channel, alloc_size, block_size, dmap)); 2552 2553 total_alloc_size = alloc_size; 2554 2555 #if defined(RX_USE_RECLAIM_POST) 2556 total_alloc_size = alloc_size + alloc_size/4; 2557 #endif 2558 2559 i = 0; 2560 size_index = 0; 2561 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2562 while ((alloc_sizes[size_index] < alloc_size) && 2563 (size_index < array_size)) 2564 size_index++; 2565 if (size_index >= array_size) { 2566 size_index = array_size - 1; 2567 } 2568 2569 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2570 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2571 use_kmem_alloc = B_TRUE; 2572 #if defined(__i386) || defined(__amd64) 2573 size_index = 0; 2574 #endif 2575 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2576 "==> nxge_alloc_rx_buf_dma: " 2577 "Neptune use kmem_alloc() - size_index %d", 2578 size_index)); 2579 } 2580 2581 while ((allocated < total_alloc_size) && 2582 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2583 rx_dmap[i].dma_chunk_index = i; 2584 rx_dmap[i].block_size = block_size; 2585 rx_dmap[i].alength = alloc_sizes[size_index]; 2586 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2587 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2588 rx_dmap[i].dma_channel = dma_channel; 2589 rx_dmap[i].contig_alloc_type = B_FALSE; 2590 rx_dmap[i].kmem_alloc_type = B_FALSE; 2591 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2592 2593 /* 2594 * N2/NIU: data buffers must be contiguous as the driver 2595 * needs to call Hypervisor api to set up 2596 * logical pages. 2597 */ 2598 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2599 rx_dmap[i].contig_alloc_type = B_TRUE; 2600 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2601 } else if (use_kmem_alloc) { 2602 /* For Neptune, use kmem_alloc */ 2603 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2604 "==> nxge_alloc_rx_buf_dma: " 2605 "Neptune use kmem_alloc()")); 2606 rx_dmap[i].kmem_alloc_type = B_TRUE; 2607 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2608 } 2609 2610 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2611 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2612 "i %d nblocks %d alength %d", 2613 dma_channel, i, &rx_dmap[i], block_size, 2614 i, rx_dmap[i].nblocks, 2615 rx_dmap[i].alength)); 2616 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2617 &nxge_rx_dma_attr, 2618 rx_dmap[i].alength, 2619 &nxge_dev_buf_dma_acc_attr, 2620 DDI_DMA_READ | DDI_DMA_STREAMING, 2621 (p_nxge_dma_common_t)(&rx_dmap[i])); 2622 if (status != NXGE_OK) { 2623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2624 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2625 "dma %d size_index %d size requested %d", 2626 dma_channel, 2627 size_index, 2628 rx_dmap[i].alength)); 2629 size_index--; 2630 } else { 2631 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2632 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2633 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2634 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2635 "buf_alloc_state %d alloc_type %d", 2636 dma_channel, 2637 &rx_dmap[i], 2638 rx_dmap[i].kaddrp, 2639 rx_dmap[i].alength, 2640 rx_dmap[i].buf_alloc_state, 2641 rx_dmap[i].buf_alloc_type)); 2642 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2643 " alloc_rx_buf_dma allocated rdc %d " 2644 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2645 dma_channel, i, rx_dmap[i].alength, 2646 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2647 rx_dmap[i].kaddrp)); 2648 i++; 2649 allocated += alloc_sizes[size_index]; 2650 } 2651 } 2652 2653 if (allocated < total_alloc_size) { 2654 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2655 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2656 "allocated 0x%x requested 0x%x", 2657 dma_channel, 2658 allocated, total_alloc_size)); 2659 status = NXGE_ERROR; 2660 goto nxge_alloc_rx_mem_fail1; 2661 } 2662 2663 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2664 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2665 "allocated 0x%x requested 0x%x", 2666 dma_channel, 2667 allocated, total_alloc_size)); 2668 2669 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2670 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2671 dma_channel, i)); 2672 *num_chunks = i; 2673 *dmap = rx_dmap; 2674 2675 goto nxge_alloc_rx_mem_exit; 2676 2677 nxge_alloc_rx_mem_fail1: 2678 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2679 2680 nxge_alloc_rx_mem_exit: 2681 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2682 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2683 2684 return (status); 2685 } 2686 2687 /*ARGSUSED*/ 2688 static void 2689 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2690 uint32_t num_chunks) 2691 { 2692 int i; 2693 2694 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2695 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2696 2697 if (dmap == 0) 2698 return; 2699 2700 for (i = 0; i < num_chunks; i++) { 2701 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2702 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2703 i, dmap)); 2704 nxge_dma_free_rx_data_buf(dmap++); 2705 } 2706 2707 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2708 } 2709 2710 /*ARGSUSED*/ 2711 static nxge_status_t 2712 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2713 p_nxge_dma_common_t *dmap, size_t size) 2714 { 2715 p_nxge_dma_common_t rx_dmap; 2716 nxge_status_t status = NXGE_OK; 2717 2718 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2719 2720 rx_dmap = (p_nxge_dma_common_t) 2721 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2722 2723 rx_dmap->contig_alloc_type = B_FALSE; 2724 rx_dmap->kmem_alloc_type = B_FALSE; 2725 2726 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2727 &nxge_desc_dma_attr, 2728 size, 2729 &nxge_dev_desc_dma_acc_attr, 2730 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2731 rx_dmap); 2732 if (status != NXGE_OK) { 2733 goto nxge_alloc_rx_cntl_dma_fail1; 2734 } 2735 2736 *dmap = rx_dmap; 2737 goto nxge_alloc_rx_cntl_dma_exit; 2738 2739 nxge_alloc_rx_cntl_dma_fail1: 2740 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2741 2742 nxge_alloc_rx_cntl_dma_exit: 2743 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2744 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2745 2746 return (status); 2747 } 2748 2749 /*ARGSUSED*/ 2750 static void 2751 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2752 { 2753 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2754 2755 if (dmap == 0) 2756 return; 2757 2758 nxge_dma_mem_free(dmap); 2759 2760 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2761 } 2762 2763 typedef struct { 2764 size_t tx_size; 2765 size_t cr_size; 2766 size_t threshhold; 2767 } nxge_tdc_sizes_t; 2768 2769 static 2770 nxge_status_t 2771 nxge_tdc_sizes( 2772 nxge_t *nxgep, 2773 nxge_tdc_sizes_t *sizes) 2774 { 2775 uint32_t threshhold; /* The bcopy() threshhold */ 2776 size_t tx_size; /* Transmit buffer size */ 2777 size_t cr_size; /* Completion ring size */ 2778 2779 /* 2780 * Assume that each DMA channel will be configured with the 2781 * default transmit buffer size for copying transmit data. 2782 * (If a packet is bigger than this, it will not be copied.) 2783 */ 2784 if (nxgep->niu_type == N2_NIU) { 2785 threshhold = TX_BCOPY_SIZE; 2786 } else { 2787 threshhold = nxge_bcopy_thresh; 2788 } 2789 tx_size = nxge_tx_ring_size * threshhold; 2790 2791 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2792 cr_size += sizeof (txdma_mailbox_t); 2793 2794 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2795 if (nxgep->niu_type == N2_NIU) { 2796 if (!ISP2(tx_size)) { 2797 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2798 "==> nxge_tdc_sizes: Tx size" 2799 " must be power of 2")); 2800 return (NXGE_ERROR); 2801 } 2802 2803 if (tx_size > (1 << 22)) { 2804 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2805 "==> nxge_tdc_sizes: Tx size" 2806 " limited to 4M")); 2807 return (NXGE_ERROR); 2808 } 2809 2810 if (cr_size < 0x2000) 2811 cr_size = 0x2000; 2812 } 2813 #endif 2814 2815 sizes->threshhold = threshhold; 2816 sizes->tx_size = tx_size; 2817 sizes->cr_size = cr_size; 2818 2819 return (NXGE_OK); 2820 } 2821 /* 2822 * nxge_alloc_txb 2823 * 2824 * Allocate buffers for an TDC. 2825 * 2826 * Arguments: 2827 * nxgep 2828 * channel The channel to map into our kernel space. 2829 * 2830 * Notes: 2831 * 2832 * NPI function calls: 2833 * 2834 * NXGE function calls: 2835 * 2836 * Registers accessed: 2837 * 2838 * Context: 2839 * 2840 * Taking apart: 2841 * 2842 * Open questions: 2843 * 2844 */ 2845 nxge_status_t 2846 nxge_alloc_txb( 2847 p_nxge_t nxgep, 2848 int channel) 2849 { 2850 nxge_dma_common_t **dma_buf_p; 2851 nxge_dma_common_t **dma_cntl_p; 2852 uint32_t *num_chunks; 2853 nxge_status_t status = NXGE_OK; 2854 2855 nxge_tdc_sizes_t sizes; 2856 2857 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2858 2859 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2860 return (NXGE_ERROR); 2861 2862 /* 2863 * Allocate memory for transmit buffers and descriptor rings. 2864 * Replace these allocation functions with the interface functions 2865 * provided by the partition manager Real Soon Now. 2866 */ 2867 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2868 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2869 2870 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2871 2872 /* 2873 * Allocate memory for transmit buffers and descriptor rings. 2874 * Replace allocation functions with interface functions provided 2875 * by the partition manager when it is available. 2876 * 2877 * Allocate memory for the transmit buffer pool. 2878 */ 2879 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2880 "sizes: tx: %ld, cr:%ld, th:%ld", 2881 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2882 2883 *num_chunks = 0; 2884 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2885 sizes.tx_size, sizes.threshhold, num_chunks); 2886 if (status != NXGE_OK) { 2887 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2888 return (status); 2889 } 2890 2891 /* 2892 * Allocate memory for descriptor rings and mailbox. 2893 */ 2894 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2895 sizes.cr_size); 2896 if (status != NXGE_OK) { 2897 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2898 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2899 return (status); 2900 } 2901 2902 return (NXGE_OK); 2903 } 2904 2905 void 2906 nxge_free_txb( 2907 p_nxge_t nxgep, 2908 int channel) 2909 { 2910 nxge_dma_common_t *data; 2911 nxge_dma_common_t *control; 2912 uint32_t num_chunks; 2913 2914 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2915 2916 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2917 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2918 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2919 2920 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2921 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2922 2923 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2924 nxge_free_tx_cntl_dma(nxgep, control); 2925 2926 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2927 2928 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2929 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2930 2931 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2932 } 2933 2934 /* 2935 * nxge_alloc_tx_mem_pool 2936 * 2937 * This function allocates all of the per-port TDC control data structures. 2938 * The per-channel (TDC) data structures are allocated when needed. 2939 * 2940 * Arguments: 2941 * nxgep 2942 * 2943 * Notes: 2944 * 2945 * Context: 2946 * Any domain 2947 */ 2948 nxge_status_t 2949 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2950 { 2951 nxge_hw_pt_cfg_t *p_cfgp; 2952 nxge_dma_pool_t *dma_poolp; 2953 nxge_dma_common_t **dma_buf_p; 2954 nxge_dma_pool_t *dma_cntl_poolp; 2955 nxge_dma_common_t **dma_cntl_p; 2956 uint32_t *num_chunks; /* per dma */ 2957 int tdc_max; 2958 2959 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2960 2961 p_cfgp = &nxgep->pt_config.hw_config; 2962 tdc_max = NXGE_MAX_TDCS; 2963 2964 /* 2965 * Allocate memory for each transmit DMA channel. 2966 */ 2967 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2968 KM_SLEEP); 2969 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2970 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2971 2972 dma_cntl_poolp = (p_nxge_dma_pool_t) 2973 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2974 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2975 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2976 2977 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2978 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2979 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2980 "set to default %d", 2981 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2982 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2983 } 2984 2985 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2986 /* 2987 * N2/NIU has limitation on the descriptor sizes (contiguous 2988 * memory allocation on data buffers to 4M (contig_mem_alloc) 2989 * and little endian for control buffers (must use the ddi/dki mem alloc 2990 * function). The transmit ring is limited to 8K (includes the 2991 * mailbox). 2992 */ 2993 if (nxgep->niu_type == N2_NIU) { 2994 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2995 (!ISP2(nxge_tx_ring_size))) { 2996 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2997 } 2998 } 2999 #endif 3000 3001 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 3002 3003 num_chunks = (uint32_t *)KMEM_ZALLOC( 3004 sizeof (uint32_t) * tdc_max, KM_SLEEP); 3005 3006 dma_poolp->ndmas = p_cfgp->tdc.owned; 3007 dma_poolp->num_chunks = num_chunks; 3008 dma_poolp->dma_buf_pool_p = dma_buf_p; 3009 nxgep->tx_buf_pool_p = dma_poolp; 3010 3011 dma_poolp->buf_allocated = B_TRUE; 3012 3013 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 3014 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 3015 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 3016 3017 dma_cntl_poolp->buf_allocated = B_TRUE; 3018 3019 nxgep->tx_rings = 3020 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3021 nxgep->tx_rings->rings = 3022 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3023 nxgep->tx_mbox_areas_p = 3024 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3025 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3026 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3027 3028 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3029 3030 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3031 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3032 tdc_max, dma_poolp->ndmas)); 3033 3034 return (NXGE_OK); 3035 } 3036 3037 nxge_status_t 3038 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3039 p_nxge_dma_common_t *dmap, size_t alloc_size, 3040 size_t block_size, uint32_t *num_chunks) 3041 { 3042 p_nxge_dma_common_t tx_dmap; 3043 nxge_status_t status = NXGE_OK; 3044 size_t total_alloc_size; 3045 size_t allocated = 0; 3046 int i, size_index, array_size; 3047 3048 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3049 3050 tx_dmap = (p_nxge_dma_common_t) 3051 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3052 KM_SLEEP); 3053 3054 total_alloc_size = alloc_size; 3055 i = 0; 3056 size_index = 0; 3057 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3058 while ((alloc_sizes[size_index] < alloc_size) && 3059 (size_index < array_size)) 3060 size_index++; 3061 if (size_index >= array_size) { 3062 size_index = array_size - 1; 3063 } 3064 3065 while ((allocated < total_alloc_size) && 3066 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3067 3068 tx_dmap[i].dma_chunk_index = i; 3069 tx_dmap[i].block_size = block_size; 3070 tx_dmap[i].alength = alloc_sizes[size_index]; 3071 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3072 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3073 tx_dmap[i].dma_channel = dma_channel; 3074 tx_dmap[i].contig_alloc_type = B_FALSE; 3075 tx_dmap[i].kmem_alloc_type = B_FALSE; 3076 3077 /* 3078 * N2/NIU: data buffers must be contiguous as the driver 3079 * needs to call Hypervisor api to set up 3080 * logical pages. 3081 */ 3082 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3083 tx_dmap[i].contig_alloc_type = B_TRUE; 3084 } 3085 3086 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3087 &nxge_tx_dma_attr, 3088 tx_dmap[i].alength, 3089 &nxge_dev_buf_dma_acc_attr, 3090 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3091 (p_nxge_dma_common_t)(&tx_dmap[i])); 3092 if (status != NXGE_OK) { 3093 size_index--; 3094 } else { 3095 i++; 3096 allocated += alloc_sizes[size_index]; 3097 } 3098 } 3099 3100 if (allocated < total_alloc_size) { 3101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3102 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3103 "allocated 0x%x requested 0x%x", 3104 dma_channel, 3105 allocated, total_alloc_size)); 3106 status = NXGE_ERROR; 3107 goto nxge_alloc_tx_mem_fail1; 3108 } 3109 3110 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3111 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3112 "allocated 0x%x requested 0x%x", 3113 dma_channel, 3114 allocated, total_alloc_size)); 3115 3116 *num_chunks = i; 3117 *dmap = tx_dmap; 3118 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3119 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3120 *dmap, i)); 3121 goto nxge_alloc_tx_mem_exit; 3122 3123 nxge_alloc_tx_mem_fail1: 3124 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3125 3126 nxge_alloc_tx_mem_exit: 3127 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3128 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3129 3130 return (status); 3131 } 3132 3133 /*ARGSUSED*/ 3134 static void 3135 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3136 uint32_t num_chunks) 3137 { 3138 int i; 3139 3140 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3141 3142 if (dmap == 0) 3143 return; 3144 3145 for (i = 0; i < num_chunks; i++) { 3146 nxge_dma_mem_free(dmap++); 3147 } 3148 3149 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3150 } 3151 3152 /*ARGSUSED*/ 3153 nxge_status_t 3154 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3155 p_nxge_dma_common_t *dmap, size_t size) 3156 { 3157 p_nxge_dma_common_t tx_dmap; 3158 nxge_status_t status = NXGE_OK; 3159 3160 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3161 tx_dmap = (p_nxge_dma_common_t) 3162 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3163 3164 tx_dmap->contig_alloc_type = B_FALSE; 3165 tx_dmap->kmem_alloc_type = B_FALSE; 3166 3167 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3168 &nxge_desc_dma_attr, 3169 size, 3170 &nxge_dev_desc_dma_acc_attr, 3171 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3172 tx_dmap); 3173 if (status != NXGE_OK) { 3174 goto nxge_alloc_tx_cntl_dma_fail1; 3175 } 3176 3177 *dmap = tx_dmap; 3178 goto nxge_alloc_tx_cntl_dma_exit; 3179 3180 nxge_alloc_tx_cntl_dma_fail1: 3181 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3182 3183 nxge_alloc_tx_cntl_dma_exit: 3184 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3185 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3186 3187 return (status); 3188 } 3189 3190 /*ARGSUSED*/ 3191 static void 3192 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3193 { 3194 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3195 3196 if (dmap == 0) 3197 return; 3198 3199 nxge_dma_mem_free(dmap); 3200 3201 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3202 } 3203 3204 /* 3205 * nxge_free_tx_mem_pool 3206 * 3207 * This function frees all of the per-port TDC control data structures. 3208 * The per-channel (TDC) data structures are freed when the channel 3209 * is stopped. 3210 * 3211 * Arguments: 3212 * nxgep 3213 * 3214 * Notes: 3215 * 3216 * Context: 3217 * Any domain 3218 */ 3219 static void 3220 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3221 { 3222 int tdc_max = NXGE_MAX_TDCS; 3223 3224 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3225 3226 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3227 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3228 "<== nxge_free_tx_mem_pool " 3229 "(null tx buf pool or buf not allocated")); 3230 return; 3231 } 3232 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3233 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3234 "<== nxge_free_tx_mem_pool " 3235 "(null tx cntl buf pool or cntl buf not allocated")); 3236 return; 3237 } 3238 3239 /* 1. Free the mailboxes. */ 3240 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3241 sizeof (p_tx_mbox_t) * tdc_max); 3242 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3243 3244 nxgep->tx_mbox_areas_p = 0; 3245 3246 /* 2. Free the transmit ring arrays. */ 3247 KMEM_FREE(nxgep->tx_rings->rings, 3248 sizeof (p_tx_ring_t) * tdc_max); 3249 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3250 3251 nxgep->tx_rings = 0; 3252 3253 /* 3. Free the completion ring data structures. */ 3254 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3255 sizeof (p_nxge_dma_common_t) * tdc_max); 3256 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3257 3258 nxgep->tx_cntl_pool_p = 0; 3259 3260 /* 4. Free the data ring data structures. */ 3261 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3262 sizeof (uint32_t) * tdc_max); 3263 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3264 sizeof (p_nxge_dma_common_t) * tdc_max); 3265 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3266 3267 nxgep->tx_buf_pool_p = 0; 3268 3269 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3270 } 3271 3272 /*ARGSUSED*/ 3273 static nxge_status_t 3274 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3275 struct ddi_dma_attr *dma_attrp, 3276 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3277 p_nxge_dma_common_t dma_p) 3278 { 3279 caddr_t kaddrp; 3280 int ddi_status = DDI_SUCCESS; 3281 boolean_t contig_alloc_type; 3282 boolean_t kmem_alloc_type; 3283 3284 contig_alloc_type = dma_p->contig_alloc_type; 3285 3286 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3287 /* 3288 * contig_alloc_type for contiguous memory only allowed 3289 * for N2/NIU. 3290 */ 3291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3292 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3293 dma_p->contig_alloc_type)); 3294 return (NXGE_ERROR | NXGE_DDI_FAILED); 3295 } 3296 3297 dma_p->dma_handle = NULL; 3298 dma_p->acc_handle = NULL; 3299 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3300 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3301 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3302 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3303 if (ddi_status != DDI_SUCCESS) { 3304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3305 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3306 return (NXGE_ERROR | NXGE_DDI_FAILED); 3307 } 3308 3309 kmem_alloc_type = dma_p->kmem_alloc_type; 3310 3311 switch (contig_alloc_type) { 3312 case B_FALSE: 3313 switch (kmem_alloc_type) { 3314 case B_FALSE: 3315 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3316 length, 3317 acc_attr_p, 3318 xfer_flags, 3319 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3320 &dma_p->acc_handle); 3321 if (ddi_status != DDI_SUCCESS) { 3322 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3323 "nxge_dma_mem_alloc: " 3324 "ddi_dma_mem_alloc failed")); 3325 ddi_dma_free_handle(&dma_p->dma_handle); 3326 dma_p->dma_handle = NULL; 3327 return (NXGE_ERROR | NXGE_DDI_FAILED); 3328 } 3329 if (dma_p->alength < length) { 3330 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3331 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3332 "< length.")); 3333 ddi_dma_mem_free(&dma_p->acc_handle); 3334 ddi_dma_free_handle(&dma_p->dma_handle); 3335 dma_p->acc_handle = NULL; 3336 dma_p->dma_handle = NULL; 3337 return (NXGE_ERROR); 3338 } 3339 3340 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3341 NULL, 3342 kaddrp, dma_p->alength, xfer_flags, 3343 DDI_DMA_DONTWAIT, 3344 0, &dma_p->dma_cookie, &dma_p->ncookies); 3345 if (ddi_status != DDI_DMA_MAPPED) { 3346 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3347 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3348 "failed " 3349 "(staus 0x%x ncookies %d.)", ddi_status, 3350 dma_p->ncookies)); 3351 if (dma_p->acc_handle) { 3352 ddi_dma_mem_free(&dma_p->acc_handle); 3353 dma_p->acc_handle = NULL; 3354 } 3355 ddi_dma_free_handle(&dma_p->dma_handle); 3356 dma_p->dma_handle = NULL; 3357 return (NXGE_ERROR | NXGE_DDI_FAILED); 3358 } 3359 3360 if (dma_p->ncookies != 1) { 3361 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3362 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3363 "> 1 cookie" 3364 "(staus 0x%x ncookies %d.)", ddi_status, 3365 dma_p->ncookies)); 3366 if (dma_p->acc_handle) { 3367 ddi_dma_mem_free(&dma_p->acc_handle); 3368 dma_p->acc_handle = NULL; 3369 } 3370 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3371 ddi_dma_free_handle(&dma_p->dma_handle); 3372 dma_p->dma_handle = NULL; 3373 return (NXGE_ERROR); 3374 } 3375 break; 3376 3377 case B_TRUE: 3378 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3379 if (kaddrp == NULL) { 3380 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3381 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3382 "kmem alloc failed")); 3383 return (NXGE_ERROR); 3384 } 3385 3386 dma_p->alength = length; 3387 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3388 NULL, kaddrp, dma_p->alength, xfer_flags, 3389 DDI_DMA_DONTWAIT, 0, 3390 &dma_p->dma_cookie, &dma_p->ncookies); 3391 if (ddi_status != DDI_DMA_MAPPED) { 3392 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3393 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3394 "(kmem_alloc) failed kaddrp $%p length %d " 3395 "(staus 0x%x (%d) ncookies %d.)", 3396 kaddrp, length, 3397 ddi_status, ddi_status, dma_p->ncookies)); 3398 KMEM_FREE(kaddrp, length); 3399 dma_p->acc_handle = NULL; 3400 ddi_dma_free_handle(&dma_p->dma_handle); 3401 dma_p->dma_handle = NULL; 3402 dma_p->kaddrp = NULL; 3403 return (NXGE_ERROR | NXGE_DDI_FAILED); 3404 } 3405 3406 if (dma_p->ncookies != 1) { 3407 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3408 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3409 "(kmem_alloc) > 1 cookie" 3410 "(staus 0x%x ncookies %d.)", ddi_status, 3411 dma_p->ncookies)); 3412 KMEM_FREE(kaddrp, length); 3413 dma_p->acc_handle = NULL; 3414 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3415 ddi_dma_free_handle(&dma_p->dma_handle); 3416 dma_p->dma_handle = NULL; 3417 dma_p->kaddrp = NULL; 3418 return (NXGE_ERROR); 3419 } 3420 3421 dma_p->kaddrp = kaddrp; 3422 3423 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3424 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3425 "kaddr $%p alength %d", 3426 dma_p, 3427 kaddrp, 3428 dma_p->alength)); 3429 break; 3430 } 3431 break; 3432 3433 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3434 case B_TRUE: 3435 kaddrp = (caddr_t)contig_mem_alloc(length); 3436 if (kaddrp == NULL) { 3437 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3438 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3439 ddi_dma_free_handle(&dma_p->dma_handle); 3440 return (NXGE_ERROR | NXGE_DDI_FAILED); 3441 } 3442 3443 dma_p->alength = length; 3444 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3445 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3446 &dma_p->dma_cookie, &dma_p->ncookies); 3447 if (ddi_status != DDI_DMA_MAPPED) { 3448 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3449 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3450 "(status 0x%x ncookies %d.)", ddi_status, 3451 dma_p->ncookies)); 3452 3453 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3454 "==> nxge_dma_mem_alloc: (not mapped)" 3455 "length %lu (0x%x) " 3456 "free contig kaddrp $%p " 3457 "va_to_pa $%p", 3458 length, length, 3459 kaddrp, 3460 va_to_pa(kaddrp))); 3461 3462 3463 contig_mem_free((void *)kaddrp, length); 3464 ddi_dma_free_handle(&dma_p->dma_handle); 3465 3466 dma_p->dma_handle = NULL; 3467 dma_p->acc_handle = NULL; 3468 dma_p->alength = NULL; 3469 dma_p->kaddrp = NULL; 3470 3471 return (NXGE_ERROR | NXGE_DDI_FAILED); 3472 } 3473 3474 if (dma_p->ncookies != 1 || 3475 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3476 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3477 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3478 "cookie or " 3479 "dmac_laddress is NULL $%p size %d " 3480 " (status 0x%x ncookies %d.)", 3481 ddi_status, 3482 dma_p->dma_cookie.dmac_laddress, 3483 dma_p->dma_cookie.dmac_size, 3484 dma_p->ncookies)); 3485 3486 contig_mem_free((void *)kaddrp, length); 3487 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3488 ddi_dma_free_handle(&dma_p->dma_handle); 3489 3490 dma_p->alength = 0; 3491 dma_p->dma_handle = NULL; 3492 dma_p->acc_handle = NULL; 3493 dma_p->kaddrp = NULL; 3494 3495 return (NXGE_ERROR | NXGE_DDI_FAILED); 3496 } 3497 break; 3498 3499 #else 3500 case B_TRUE: 3501 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3502 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3503 return (NXGE_ERROR | NXGE_DDI_FAILED); 3504 #endif 3505 } 3506 3507 dma_p->kaddrp = kaddrp; 3508 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3509 dma_p->alength - RXBUF_64B_ALIGNED; 3510 #if defined(__i386) 3511 dma_p->ioaddr_pp = 3512 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3513 #else 3514 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3515 #endif 3516 dma_p->last_ioaddr_pp = 3517 #if defined(__i386) 3518 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3519 #else 3520 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3521 #endif 3522 dma_p->alength - RXBUF_64B_ALIGNED; 3523 3524 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3525 3526 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3527 dma_p->orig_ioaddr_pp = 3528 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3529 dma_p->orig_alength = length; 3530 dma_p->orig_kaddrp = kaddrp; 3531 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3532 #endif 3533 3534 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3535 "dma buffer allocated: dma_p $%p " 3536 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3537 "dma_p->ioaddr_p $%p " 3538 "dma_p->orig_ioaddr_p $%p " 3539 "orig_vatopa $%p " 3540 "alength %d (0x%x) " 3541 "kaddrp $%p " 3542 "length %d (0x%x)", 3543 dma_p, 3544 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3545 dma_p->ioaddr_pp, 3546 dma_p->orig_ioaddr_pp, 3547 dma_p->orig_vatopa, 3548 dma_p->alength, dma_p->alength, 3549 kaddrp, 3550 length, length)); 3551 3552 return (NXGE_OK); 3553 } 3554 3555 static void 3556 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3557 { 3558 if (dma_p->dma_handle != NULL) { 3559 if (dma_p->ncookies) { 3560 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3561 dma_p->ncookies = 0; 3562 } 3563 ddi_dma_free_handle(&dma_p->dma_handle); 3564 dma_p->dma_handle = NULL; 3565 } 3566 3567 if (dma_p->acc_handle != NULL) { 3568 ddi_dma_mem_free(&dma_p->acc_handle); 3569 dma_p->acc_handle = NULL; 3570 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3571 } 3572 3573 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3574 if (dma_p->contig_alloc_type && 3575 dma_p->orig_kaddrp && dma_p->orig_alength) { 3576 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3577 "kaddrp $%p (orig_kaddrp $%p)" 3578 "mem type %d ", 3579 "orig_alength %d " 3580 "alength 0x%x (%d)", 3581 dma_p->kaddrp, 3582 dma_p->orig_kaddrp, 3583 dma_p->contig_alloc_type, 3584 dma_p->orig_alength, 3585 dma_p->alength, dma_p->alength)); 3586 3587 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3588 dma_p->orig_alength = NULL; 3589 dma_p->orig_kaddrp = NULL; 3590 dma_p->contig_alloc_type = B_FALSE; 3591 } 3592 #endif 3593 dma_p->kaddrp = NULL; 3594 dma_p->alength = NULL; 3595 } 3596 3597 static void 3598 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3599 { 3600 uint64_t kaddr; 3601 uint32_t buf_size; 3602 3603 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3604 3605 if (dma_p->dma_handle != NULL) { 3606 if (dma_p->ncookies) { 3607 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3608 dma_p->ncookies = 0; 3609 } 3610 ddi_dma_free_handle(&dma_p->dma_handle); 3611 dma_p->dma_handle = NULL; 3612 } 3613 3614 if (dma_p->acc_handle != NULL) { 3615 ddi_dma_mem_free(&dma_p->acc_handle); 3616 dma_p->acc_handle = NULL; 3617 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3618 } 3619 3620 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3621 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3622 dma_p, 3623 dma_p->buf_alloc_state)); 3624 3625 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3626 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3627 "<== nxge_dma_free_rx_data_buf: " 3628 "outstanding data buffers")); 3629 return; 3630 } 3631 3632 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3633 if (dma_p->contig_alloc_type && 3634 dma_p->orig_kaddrp && dma_p->orig_alength) { 3635 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3636 "kaddrp $%p (orig_kaddrp $%p)" 3637 "mem type %d ", 3638 "orig_alength %d " 3639 "alength 0x%x (%d)", 3640 dma_p->kaddrp, 3641 dma_p->orig_kaddrp, 3642 dma_p->contig_alloc_type, 3643 dma_p->orig_alength, 3644 dma_p->alength, dma_p->alength)); 3645 3646 kaddr = (uint64_t)dma_p->orig_kaddrp; 3647 buf_size = dma_p->orig_alength; 3648 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3649 dma_p->orig_alength = NULL; 3650 dma_p->orig_kaddrp = NULL; 3651 dma_p->contig_alloc_type = B_FALSE; 3652 dma_p->kaddrp = NULL; 3653 dma_p->alength = NULL; 3654 return; 3655 } 3656 #endif 3657 3658 if (dma_p->kmem_alloc_type) { 3659 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3660 "nxge_dma_free_rx_data_buf: free kmem " 3661 "kaddrp $%p (orig_kaddrp $%p)" 3662 "alloc type %d " 3663 "orig_alength %d " 3664 "alength 0x%x (%d)", 3665 dma_p->kaddrp, 3666 dma_p->orig_kaddrp, 3667 dma_p->kmem_alloc_type, 3668 dma_p->orig_alength, 3669 dma_p->alength, dma_p->alength)); 3670 #if defined(__i386) 3671 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3672 #else 3673 kaddr = (uint64_t)dma_p->kaddrp; 3674 #endif 3675 buf_size = dma_p->orig_alength; 3676 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3677 "nxge_dma_free_rx_data_buf: free dmap $%p " 3678 "kaddr $%p buf_size %d", 3679 dma_p, 3680 kaddr, buf_size)); 3681 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3682 dma_p->alength = 0; 3683 dma_p->orig_alength = 0; 3684 dma_p->kaddrp = NULL; 3685 dma_p->kmem_alloc_type = B_FALSE; 3686 } 3687 3688 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3689 } 3690 3691 /* 3692 * nxge_m_start() -- start transmitting and receiving. 3693 * 3694 * This function is called by the MAC layer when the first 3695 * stream is open to prepare the hardware ready for sending 3696 * and transmitting packets. 3697 */ 3698 static int 3699 nxge_m_start(void *arg) 3700 { 3701 p_nxge_t nxgep = (p_nxge_t)arg; 3702 3703 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3704 3705 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3706 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3707 } 3708 3709 MUTEX_ENTER(nxgep->genlock); 3710 if (nxge_init(nxgep) != NXGE_OK) { 3711 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3712 "<== nxge_m_start: initialization failed")); 3713 MUTEX_EXIT(nxgep->genlock); 3714 return (EIO); 3715 } 3716 3717 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3718 goto nxge_m_start_exit; 3719 /* 3720 * Start timer to check the system error and tx hangs 3721 */ 3722 if (!isLDOMguest(nxgep)) 3723 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3724 nxge_check_hw_state, NXGE_CHECK_TIMER); 3725 #if defined(sun4v) 3726 else 3727 nxge_hio_start_timer(nxgep); 3728 #endif 3729 3730 nxgep->link_notify = B_TRUE; 3731 3732 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3733 3734 nxge_m_start_exit: 3735 MUTEX_EXIT(nxgep->genlock); 3736 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3737 return (0); 3738 } 3739 3740 /* 3741 * nxge_m_stop(): stop transmitting and receiving. 3742 */ 3743 static void 3744 nxge_m_stop(void *arg) 3745 { 3746 p_nxge_t nxgep = (p_nxge_t)arg; 3747 3748 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3749 3750 MUTEX_ENTER(nxgep->genlock); 3751 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3752 3753 if (nxgep->nxge_timerid) { 3754 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3755 nxgep->nxge_timerid = 0; 3756 } 3757 3758 nxge_uninit(nxgep); 3759 3760 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3761 3762 MUTEX_EXIT(nxgep->genlock); 3763 3764 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3765 } 3766 3767 static int 3768 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3769 { 3770 p_nxge_t nxgep = (p_nxge_t)arg; 3771 struct ether_addr addrp; 3772 3773 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3774 3775 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3776 if (nxge_set_mac_addr(nxgep, &addrp)) { 3777 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3778 "<== nxge_m_unicst: set unitcast failed")); 3779 return (EINVAL); 3780 } 3781 3782 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3783 3784 return (0); 3785 } 3786 3787 static int 3788 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3789 { 3790 p_nxge_t nxgep = (p_nxge_t)arg; 3791 struct ether_addr addrp; 3792 3793 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3794 "==> nxge_m_multicst: add %d", add)); 3795 3796 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3797 if (add) { 3798 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3800 "<== nxge_m_multicst: add multicast failed")); 3801 return (EINVAL); 3802 } 3803 } else { 3804 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3805 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3806 "<== nxge_m_multicst: del multicast failed")); 3807 return (EINVAL); 3808 } 3809 } 3810 3811 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3812 3813 return (0); 3814 } 3815 3816 static int 3817 nxge_m_promisc(void *arg, boolean_t on) 3818 { 3819 p_nxge_t nxgep = (p_nxge_t)arg; 3820 3821 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3822 "==> nxge_m_promisc: on %d", on)); 3823 3824 if (nxge_set_promisc(nxgep, on)) { 3825 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3826 "<== nxge_m_promisc: set promisc failed")); 3827 return (EINVAL); 3828 } 3829 3830 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3831 "<== nxge_m_promisc: on %d", on)); 3832 3833 return (0); 3834 } 3835 3836 static void 3837 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3838 { 3839 p_nxge_t nxgep = (p_nxge_t)arg; 3840 struct iocblk *iocp; 3841 boolean_t need_privilege; 3842 int err; 3843 int cmd; 3844 3845 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3846 3847 iocp = (struct iocblk *)mp->b_rptr; 3848 iocp->ioc_error = 0; 3849 need_privilege = B_TRUE; 3850 cmd = iocp->ioc_cmd; 3851 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3852 switch (cmd) { 3853 default: 3854 miocnak(wq, mp, 0, EINVAL); 3855 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3856 return; 3857 3858 case LB_GET_INFO_SIZE: 3859 case LB_GET_INFO: 3860 case LB_GET_MODE: 3861 need_privilege = B_FALSE; 3862 break; 3863 case LB_SET_MODE: 3864 break; 3865 3866 3867 case NXGE_GET_MII: 3868 case NXGE_PUT_MII: 3869 case NXGE_GET64: 3870 case NXGE_PUT64: 3871 case NXGE_GET_TX_RING_SZ: 3872 case NXGE_GET_TX_DESC: 3873 case NXGE_TX_SIDE_RESET: 3874 case NXGE_RX_SIDE_RESET: 3875 case NXGE_GLOBAL_RESET: 3876 case NXGE_RESET_MAC: 3877 case NXGE_TX_REGS_DUMP: 3878 case NXGE_RX_REGS_DUMP: 3879 case NXGE_INT_REGS_DUMP: 3880 case NXGE_VIR_INT_REGS_DUMP: 3881 case NXGE_PUT_TCAM: 3882 case NXGE_GET_TCAM: 3883 case NXGE_RTRACE: 3884 case NXGE_RDUMP: 3885 3886 need_privilege = B_FALSE; 3887 break; 3888 case NXGE_INJECT_ERR: 3889 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3890 nxge_err_inject(nxgep, wq, mp); 3891 break; 3892 } 3893 3894 if (need_privilege) { 3895 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3896 if (err != 0) { 3897 miocnak(wq, mp, 0, err); 3898 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3899 "<== nxge_m_ioctl: no priv")); 3900 return; 3901 } 3902 } 3903 3904 switch (cmd) { 3905 3906 case LB_GET_MODE: 3907 case LB_SET_MODE: 3908 case LB_GET_INFO_SIZE: 3909 case LB_GET_INFO: 3910 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3911 break; 3912 3913 case NXGE_GET_MII: 3914 case NXGE_PUT_MII: 3915 case NXGE_PUT_TCAM: 3916 case NXGE_GET_TCAM: 3917 case NXGE_GET64: 3918 case NXGE_PUT64: 3919 case NXGE_GET_TX_RING_SZ: 3920 case NXGE_GET_TX_DESC: 3921 case NXGE_TX_SIDE_RESET: 3922 case NXGE_RX_SIDE_RESET: 3923 case NXGE_GLOBAL_RESET: 3924 case NXGE_RESET_MAC: 3925 case NXGE_TX_REGS_DUMP: 3926 case NXGE_RX_REGS_DUMP: 3927 case NXGE_INT_REGS_DUMP: 3928 case NXGE_VIR_INT_REGS_DUMP: 3929 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3930 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3931 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3932 break; 3933 } 3934 3935 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3936 } 3937 3938 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3939 3940 static void 3941 nxge_m_resources(void *arg) 3942 { 3943 p_nxge_t nxgep = arg; 3944 mac_rx_fifo_t mrf; 3945 3946 nxge_grp_set_t *set = &nxgep->rx_set; 3947 uint8_t rdc; 3948 3949 rx_rcr_ring_t *ring; 3950 3951 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3952 3953 MUTEX_ENTER(nxgep->genlock); 3954 3955 if (set->owned.map == 0) { 3956 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3957 "nxge_m_resources: no receive resources")); 3958 goto nxge_m_resources_exit; 3959 } 3960 3961 /* 3962 * CR 6492541 Check to see if the drv_state has been initialized, 3963 * if not * call nxge_init(). 3964 */ 3965 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3966 if (nxge_init(nxgep) != NXGE_OK) 3967 goto nxge_m_resources_exit; 3968 } 3969 3970 mrf.mrf_type = MAC_RX_FIFO; 3971 mrf.mrf_blank = nxge_rx_hw_blank; 3972 mrf.mrf_arg = (void *)nxgep; 3973 3974 mrf.mrf_normal_blank_time = 128; 3975 mrf.mrf_normal_pkt_count = 8; 3976 3977 /* 3978 * Export our receive resources to the MAC layer. 3979 */ 3980 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3981 if ((1 << rdc) & set->owned.map) { 3982 ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 3983 if (ring == 0) { 3984 /* 3985 * This is a big deal only if we are 3986 * *not* in an LDOMs environment. 3987 */ 3988 if (nxgep->environs == SOLARIS_DOMAIN) { 3989 cmn_err(CE_NOTE, 3990 "==> nxge_m_resources: " 3991 "ring %d == 0", rdc); 3992 } 3993 continue; 3994 } 3995 ring->rcr_mac_handle = mac_resource_add 3996 (nxgep->mach, (mac_resource_t *)&mrf); 3997 3998 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3999 "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 4000 rdc, ring, ring->rcr_mac_handle)); 4001 } 4002 } 4003 4004 nxge_m_resources_exit: 4005 MUTEX_EXIT(nxgep->genlock); 4006 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 4007 } 4008 4009 void 4010 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 4011 { 4012 p_nxge_mmac_stats_t mmac_stats; 4013 int i; 4014 nxge_mmac_t *mmac_info; 4015 4016 mmac_info = &nxgep->nxge_mmac_info; 4017 4018 mmac_stats = &nxgep->statsp->mmac_stats; 4019 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4020 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4021 4022 for (i = 0; i < ETHERADDRL; i++) { 4023 if (factory) { 4024 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4025 = mmac_info->factory_mac_pool[slot][ 4026 (ETHERADDRL-1) - i]; 4027 } else { 4028 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4029 = mmac_info->mac_pool[slot].addr[ 4030 (ETHERADDRL - 1) - i]; 4031 } 4032 } 4033 } 4034 4035 /* 4036 * nxge_altmac_set() -- Set an alternate MAC address 4037 */ 4038 static int 4039 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 4040 { 4041 uint8_t addrn; 4042 uint8_t portn; 4043 npi_mac_addr_t altmac; 4044 hostinfo_t mac_rdc; 4045 p_nxge_class_pt_cfg_t clscfgp; 4046 4047 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4048 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4049 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4050 4051 portn = nxgep->mac.portnum; 4052 addrn = (uint8_t)slot - 1; 4053 4054 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 4055 addrn, &altmac) != NPI_SUCCESS) 4056 return (EIO); 4057 4058 /* 4059 * Set the rdc table number for the host info entry 4060 * for this mac address slot. 4061 */ 4062 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4063 mac_rdc.value = 0; 4064 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 4065 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4066 4067 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4068 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4069 return (EIO); 4070 } 4071 4072 /* 4073 * Enable comparison with the alternate MAC address. 4074 * While the first alternate addr is enabled by bit 1 of register 4075 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4076 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4077 * accordingly before calling npi_mac_altaddr_entry. 4078 */ 4079 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4080 addrn = (uint8_t)slot - 1; 4081 else 4082 addrn = (uint8_t)slot; 4083 4084 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 4085 != NPI_SUCCESS) 4086 return (EIO); 4087 4088 return (0); 4089 } 4090 4091 /* 4092 * nxeg_m_mmac_add() - find an unused address slot, set the address 4093 * value to the one specified, enable the port to start filtering on 4094 * the new MAC address. Returns 0 on success. 4095 */ 4096 int 4097 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 4098 { 4099 p_nxge_t nxgep = arg; 4100 mac_addr_slot_t slot; 4101 nxge_mmac_t *mmac_info; 4102 int err; 4103 nxge_status_t status; 4104 4105 mutex_enter(nxgep->genlock); 4106 4107 /* 4108 * Make sure that nxge is initialized, if _start() has 4109 * not been called. 4110 */ 4111 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4112 status = nxge_init(nxgep); 4113 if (status != NXGE_OK) { 4114 mutex_exit(nxgep->genlock); 4115 return (ENXIO); 4116 } 4117 } 4118 4119 mmac_info = &nxgep->nxge_mmac_info; 4120 if (mmac_info->naddrfree == 0) { 4121 mutex_exit(nxgep->genlock); 4122 return (ENOSPC); 4123 } 4124 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4125 maddr->mma_addrlen)) { 4126 mutex_exit(nxgep->genlock); 4127 return (EINVAL); 4128 } 4129 /* 4130 * Search for the first available slot. Because naddrfree 4131 * is not zero, we are guaranteed to find one. 4132 * Slot 0 is for unique (primary) MAC. The first alternate 4133 * MAC slot is slot 1. 4134 * Each of the first two ports of Neptune has 16 alternate 4135 * MAC slots but only the first 7 (of 15) slots have assigned factory 4136 * MAC addresses. We first search among the slots without bundled 4137 * factory MACs. If we fail to find one in that range, then we 4138 * search the slots with bundled factory MACs. A factory MAC 4139 * will be wasted while the slot is used with a user MAC address. 4140 * But the slot could be used by factory MAC again after calling 4141 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4142 */ 4143 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 4144 for (slot = mmac_info->num_factory_mmac + 1; 4145 slot <= mmac_info->num_mmac; slot++) { 4146 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4147 break; 4148 } 4149 if (slot > mmac_info->num_mmac) { 4150 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4151 slot++) { 4152 if (!(mmac_info->mac_pool[slot].flags 4153 & MMAC_SLOT_USED)) 4154 break; 4155 } 4156 } 4157 } else { 4158 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 4159 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4160 break; 4161 } 4162 } 4163 ASSERT(slot <= mmac_info->num_mmac); 4164 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 4165 mutex_exit(nxgep->genlock); 4166 return (err); 4167 } 4168 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4169 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4170 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4171 mmac_info->naddrfree--; 4172 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4173 4174 maddr->mma_slot = slot; 4175 4176 mutex_exit(nxgep->genlock); 4177 return (0); 4178 } 4179 4180 /* 4181 * This function reserves an unused slot and programs the slot and the HW 4182 * with a factory mac address. 4183 */ 4184 static int 4185 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 4186 { 4187 p_nxge_t nxgep = arg; 4188 mac_addr_slot_t slot; 4189 nxge_mmac_t *mmac_info; 4190 int err; 4191 nxge_status_t status; 4192 4193 mutex_enter(nxgep->genlock); 4194 4195 /* 4196 * Make sure that nxge is initialized, if _start() has 4197 * not been called. 4198 */ 4199 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4200 status = nxge_init(nxgep); 4201 if (status != NXGE_OK) { 4202 mutex_exit(nxgep->genlock); 4203 return (ENXIO); 4204 } 4205 } 4206 4207 mmac_info = &nxgep->nxge_mmac_info; 4208 if (mmac_info->naddrfree == 0) { 4209 mutex_exit(nxgep->genlock); 4210 return (ENOSPC); 4211 } 4212 4213 slot = maddr->mma_slot; 4214 if (slot == -1) { /* -1: Take the first available slot */ 4215 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 4216 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4217 break; 4218 } 4219 if (slot > mmac_info->num_factory_mmac) { 4220 mutex_exit(nxgep->genlock); 4221 return (ENOSPC); 4222 } 4223 } 4224 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 4225 /* 4226 * Do not support factory MAC at a slot greater than 4227 * num_factory_mmac even when there are available factory 4228 * MAC addresses because the alternate MACs are bundled with 4229 * slot[1] through slot[num_factory_mmac] 4230 */ 4231 mutex_exit(nxgep->genlock); 4232 return (EINVAL); 4233 } 4234 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4235 mutex_exit(nxgep->genlock); 4236 return (EBUSY); 4237 } 4238 /* Verify the address to be reserved */ 4239 if (!mac_unicst_verify(nxgep->mach, 4240 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 4241 mutex_exit(nxgep->genlock); 4242 return (EINVAL); 4243 } 4244 if (err = nxge_altmac_set(nxgep, 4245 mmac_info->factory_mac_pool[slot], slot)) { 4246 mutex_exit(nxgep->genlock); 4247 return (err); 4248 } 4249 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 4250 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4251 mmac_info->naddrfree--; 4252 4253 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 4254 mutex_exit(nxgep->genlock); 4255 4256 /* Pass info back to the caller */ 4257 maddr->mma_slot = slot; 4258 maddr->mma_addrlen = ETHERADDRL; 4259 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4260 4261 return (0); 4262 } 4263 4264 /* 4265 * Remove the specified mac address and update the HW not to filter 4266 * the mac address anymore. 4267 */ 4268 int 4269 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 4270 { 4271 p_nxge_t nxgep = arg; 4272 nxge_mmac_t *mmac_info; 4273 uint8_t addrn; 4274 uint8_t portn; 4275 int err = 0; 4276 nxge_status_t status; 4277 4278 mutex_enter(nxgep->genlock); 4279 4280 /* 4281 * Make sure that nxge is initialized, if _start() has 4282 * not been called. 4283 */ 4284 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4285 status = nxge_init(nxgep); 4286 if (status != NXGE_OK) { 4287 mutex_exit(nxgep->genlock); 4288 return (ENXIO); 4289 } 4290 } 4291 4292 mmac_info = &nxgep->nxge_mmac_info; 4293 if (slot < 1 || slot > mmac_info->num_mmac) { 4294 mutex_exit(nxgep->genlock); 4295 return (EINVAL); 4296 } 4297 4298 portn = nxgep->mac.portnum; 4299 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4300 addrn = (uint8_t)slot - 1; 4301 else 4302 addrn = (uint8_t)slot; 4303 4304 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4305 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4306 == NPI_SUCCESS) { 4307 mmac_info->naddrfree++; 4308 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4309 /* 4310 * Regardless if the MAC we just stopped filtering 4311 * is a user addr or a facory addr, we must set 4312 * the MMAC_VENDOR_ADDR flag if this slot has an 4313 * associated factory MAC to indicate that a factory 4314 * MAC is available. 4315 */ 4316 if (slot <= mmac_info->num_factory_mmac) { 4317 mmac_info->mac_pool[slot].flags 4318 |= MMAC_VENDOR_ADDR; 4319 } 4320 /* 4321 * Clear mac_pool[slot].addr so that kstat shows 0 4322 * alternate MAC address if the slot is not used. 4323 * (But nxge_m_mmac_get returns the factory MAC even 4324 * when the slot is not used!) 4325 */ 4326 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4327 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4328 } else { 4329 err = EIO; 4330 } 4331 } else { 4332 err = EINVAL; 4333 } 4334 4335 mutex_exit(nxgep->genlock); 4336 return (err); 4337 } 4338 4339 /* 4340 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 4341 */ 4342 static int 4343 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 4344 { 4345 p_nxge_t nxgep = arg; 4346 mac_addr_slot_t slot; 4347 nxge_mmac_t *mmac_info; 4348 int err = 0; 4349 nxge_status_t status; 4350 4351 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4352 maddr->mma_addrlen)) 4353 return (EINVAL); 4354 4355 slot = maddr->mma_slot; 4356 4357 mutex_enter(nxgep->genlock); 4358 4359 /* 4360 * Make sure that nxge is initialized, if _start() has 4361 * not been called. 4362 */ 4363 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4364 status = nxge_init(nxgep); 4365 if (status != NXGE_OK) { 4366 mutex_exit(nxgep->genlock); 4367 return (ENXIO); 4368 } 4369 } 4370 4371 mmac_info = &nxgep->nxge_mmac_info; 4372 if (slot < 1 || slot > mmac_info->num_mmac) { 4373 mutex_exit(nxgep->genlock); 4374 return (EINVAL); 4375 } 4376 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4377 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4378 != 0) { 4379 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4380 ETHERADDRL); 4381 /* 4382 * Assume that the MAC passed down from the caller 4383 * is not a factory MAC address (The user should 4384 * call mmac_remove followed by mmac_reserve if 4385 * he wants to use the factory MAC for this slot). 4386 */ 4387 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4388 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4389 } 4390 } else { 4391 err = EINVAL; 4392 } 4393 mutex_exit(nxgep->genlock); 4394 return (err); 4395 } 4396 4397 /* 4398 * nxge_m_mmac_get() - Get the MAC address and other information 4399 * related to the slot. mma_flags should be set to 0 in the call. 4400 * Note: although kstat shows MAC address as zero when a slot is 4401 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 4402 * to the caller as long as the slot is not using a user MAC address. 4403 * The following table shows the rules, 4404 * 4405 * USED VENDOR mma_addr 4406 * ------------------------------------------------------------ 4407 * (1) Slot uses a user MAC: yes no user MAC 4408 * (2) Slot uses a factory MAC: yes yes factory MAC 4409 * (3) Slot is not used but is 4410 * factory MAC capable: no yes factory MAC 4411 * (4) Slot is not used and is 4412 * not factory MAC capable: no no 0 4413 * ------------------------------------------------------------ 4414 */ 4415 static int 4416 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 4417 { 4418 nxge_t *nxgep = arg; 4419 mac_addr_slot_t slot; 4420 nxge_mmac_t *mmac_info; 4421 nxge_status_t status; 4422 4423 slot = maddr->mma_slot; 4424 4425 mutex_enter(nxgep->genlock); 4426 4427 /* 4428 * Make sure that nxge is initialized, if _start() has 4429 * not been called. 4430 */ 4431 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4432 status = nxge_init(nxgep); 4433 if (status != NXGE_OK) { 4434 mutex_exit(nxgep->genlock); 4435 return (ENXIO); 4436 } 4437 } 4438 4439 mmac_info = &nxgep->nxge_mmac_info; 4440 4441 if (slot < 1 || slot > mmac_info->num_mmac) { 4442 mutex_exit(nxgep->genlock); 4443 return (EINVAL); 4444 } 4445 maddr->mma_flags = 0; 4446 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 4447 maddr->mma_flags |= MMAC_SLOT_USED; 4448 4449 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 4450 maddr->mma_flags |= MMAC_VENDOR_ADDR; 4451 bcopy(mmac_info->factory_mac_pool[slot], 4452 maddr->mma_addr, ETHERADDRL); 4453 maddr->mma_addrlen = ETHERADDRL; 4454 } else { 4455 if (maddr->mma_flags & MMAC_SLOT_USED) { 4456 bcopy(mmac_info->mac_pool[slot].addr, 4457 maddr->mma_addr, ETHERADDRL); 4458 maddr->mma_addrlen = ETHERADDRL; 4459 } else { 4460 bzero(maddr->mma_addr, ETHERADDRL); 4461 maddr->mma_addrlen = 0; 4462 } 4463 } 4464 mutex_exit(nxgep->genlock); 4465 return (0); 4466 } 4467 4468 static boolean_t 4469 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4470 { 4471 nxge_t *nxgep = arg; 4472 uint32_t *txflags = cap_data; 4473 multiaddress_capab_t *mmacp = cap_data; 4474 4475 switch (cap) { 4476 case MAC_CAPAB_HCKSUM: 4477 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4478 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4479 if (nxge_cksum_offload <= 1) { 4480 *txflags = HCKSUM_INET_PARTIAL; 4481 } 4482 break; 4483 4484 case MAC_CAPAB_POLL: 4485 /* 4486 * There's nothing for us to fill in, simply returning 4487 * B_TRUE stating that we support polling is sufficient. 4488 */ 4489 break; 4490 4491 case MAC_CAPAB_MULTIADDRESS: 4492 mmacp = (multiaddress_capab_t *)cap_data; 4493 mutex_enter(nxgep->genlock); 4494 4495 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 4496 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 4497 mmacp->maddr_flag = 0; /* 0 is required by PSARC2006/265 */ 4498 /* 4499 * maddr_handle is driver's private data, passed back to 4500 * entry point functions as arg. 4501 */ 4502 mmacp->maddr_handle = nxgep; 4503 mmacp->maddr_add = nxge_m_mmac_add; 4504 mmacp->maddr_remove = nxge_m_mmac_remove; 4505 mmacp->maddr_modify = nxge_m_mmac_modify; 4506 mmacp->maddr_get = nxge_m_mmac_get; 4507 mmacp->maddr_reserve = nxge_m_mmac_reserve; 4508 4509 mutex_exit(nxgep->genlock); 4510 break; 4511 4512 case MAC_CAPAB_LSO: { 4513 mac_capab_lso_t *cap_lso = cap_data; 4514 4515 if (nxgep->soft_lso_enable) { 4516 if (nxge_cksum_offload <= 1) { 4517 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4518 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4519 nxge_lso_max = NXGE_LSO_MAXLEN; 4520 } 4521 cap_lso->lso_basic_tcp_ipv4.lso_max = 4522 nxge_lso_max; 4523 } 4524 break; 4525 } else { 4526 return (B_FALSE); 4527 } 4528 } 4529 4530 #if defined(sun4v) 4531 case MAC_CAPAB_RINGS: { 4532 mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 4533 4534 /* 4535 * Only the service domain driver responds to 4536 * this capability request. 4537 */ 4538 if (isLDOMservice(nxgep)) { 4539 mrings->mr_handle = (void *)nxgep; 4540 4541 /* 4542 * No dynamic allocation of groups and 4543 * rings at this time. Shares dictate the 4544 * configuration. 4545 */ 4546 mrings->mr_gadd_ring = NULL; 4547 mrings->mr_grem_ring = NULL; 4548 mrings->mr_rget = NULL; 4549 mrings->mr_gget = nxge_hio_group_get; 4550 4551 if (mrings->mr_type == MAC_RING_TYPE_RX) { 4552 mrings->mr_rnum = 8; /* XXX */ 4553 mrings->mr_gnum = 6; /* XXX */ 4554 } else { 4555 mrings->mr_rnum = 8; /* XXX */ 4556 mrings->mr_gnum = 0; /* XXX */ 4557 } 4558 } else 4559 return (B_FALSE); 4560 break; 4561 } 4562 4563 case MAC_CAPAB_SHARES: { 4564 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4565 4566 /* 4567 * Only the service domain driver responds to 4568 * this capability request. 4569 */ 4570 if (isLDOMservice(nxgep)) { 4571 mshares->ms_snum = 3; 4572 mshares->ms_handle = (void *)nxgep; 4573 mshares->ms_salloc = nxge_hio_share_alloc; 4574 mshares->ms_sfree = nxge_hio_share_free; 4575 mshares->ms_sadd = NULL; 4576 mshares->ms_sremove = NULL; 4577 mshares->ms_squery = nxge_hio_share_query; 4578 } else 4579 return (B_FALSE); 4580 break; 4581 } 4582 #endif 4583 default: 4584 return (B_FALSE); 4585 } 4586 return (B_TRUE); 4587 } 4588 4589 static boolean_t 4590 nxge_param_locked(mac_prop_id_t pr_num) 4591 { 4592 /* 4593 * All adv_* parameters are locked (read-only) while 4594 * the device is in any sort of loopback mode ... 4595 */ 4596 switch (pr_num) { 4597 case MAC_PROP_ADV_1000FDX_CAP: 4598 case MAC_PROP_EN_1000FDX_CAP: 4599 case MAC_PROP_ADV_1000HDX_CAP: 4600 case MAC_PROP_EN_1000HDX_CAP: 4601 case MAC_PROP_ADV_100FDX_CAP: 4602 case MAC_PROP_EN_100FDX_CAP: 4603 case MAC_PROP_ADV_100HDX_CAP: 4604 case MAC_PROP_EN_100HDX_CAP: 4605 case MAC_PROP_ADV_10FDX_CAP: 4606 case MAC_PROP_EN_10FDX_CAP: 4607 case MAC_PROP_ADV_10HDX_CAP: 4608 case MAC_PROP_EN_10HDX_CAP: 4609 case MAC_PROP_AUTONEG: 4610 case MAC_PROP_FLOWCTRL: 4611 return (B_TRUE); 4612 } 4613 return (B_FALSE); 4614 } 4615 4616 /* 4617 * callback functions for set/get of properties 4618 */ 4619 static int 4620 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4621 uint_t pr_valsize, const void *pr_val) 4622 { 4623 nxge_t *nxgep = barg; 4624 p_nxge_param_t param_arr; 4625 p_nxge_stats_t statsp; 4626 int err = 0; 4627 uint8_t val; 4628 uint32_t cur_mtu, new_mtu, old_framesize; 4629 link_flowctrl_t fl; 4630 4631 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4632 param_arr = nxgep->param_arr; 4633 statsp = nxgep->statsp; 4634 mutex_enter(nxgep->genlock); 4635 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4636 nxge_param_locked(pr_num)) { 4637 /* 4638 * All adv_* parameters are locked (read-only) 4639 * while the device is in any sort of loopback mode. 4640 */ 4641 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4642 "==> nxge_m_setprop: loopback mode: read only")); 4643 mutex_exit(nxgep->genlock); 4644 return (EBUSY); 4645 } 4646 4647 val = *(uint8_t *)pr_val; 4648 switch (pr_num) { 4649 case MAC_PROP_EN_1000FDX_CAP: 4650 nxgep->param_en_1000fdx = val; 4651 param_arr[param_anar_1000fdx].value = val; 4652 4653 goto reprogram; 4654 4655 case MAC_PROP_EN_100FDX_CAP: 4656 nxgep->param_en_100fdx = val; 4657 param_arr[param_anar_100fdx].value = val; 4658 4659 goto reprogram; 4660 4661 case MAC_PROP_EN_10FDX_CAP: 4662 nxgep->param_en_10fdx = val; 4663 param_arr[param_anar_10fdx].value = val; 4664 4665 goto reprogram; 4666 4667 case MAC_PROP_EN_1000HDX_CAP: 4668 case MAC_PROP_EN_100HDX_CAP: 4669 case MAC_PROP_EN_10HDX_CAP: 4670 case MAC_PROP_ADV_1000FDX_CAP: 4671 case MAC_PROP_ADV_1000HDX_CAP: 4672 case MAC_PROP_ADV_100FDX_CAP: 4673 case MAC_PROP_ADV_100HDX_CAP: 4674 case MAC_PROP_ADV_10FDX_CAP: 4675 case MAC_PROP_ADV_10HDX_CAP: 4676 case MAC_PROP_STATUS: 4677 case MAC_PROP_SPEED: 4678 case MAC_PROP_DUPLEX: 4679 err = EINVAL; /* cannot set read-only properties */ 4680 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4681 "==> nxge_m_setprop: read only property %d", 4682 pr_num)); 4683 break; 4684 4685 case MAC_PROP_AUTONEG: 4686 param_arr[param_autoneg].value = val; 4687 4688 goto reprogram; 4689 4690 case MAC_PROP_MTU: 4691 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4692 err = EBUSY; 4693 break; 4694 } 4695 4696 cur_mtu = nxgep->mac.default_mtu; 4697 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4698 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4699 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4700 new_mtu, nxgep->mac.is_jumbo)); 4701 4702 if (new_mtu == cur_mtu) { 4703 err = 0; 4704 break; 4705 } 4706 if (new_mtu < NXGE_DEFAULT_MTU || 4707 new_mtu > NXGE_MAXIMUM_MTU) { 4708 err = EINVAL; 4709 break; 4710 } 4711 4712 if ((new_mtu > NXGE_DEFAULT_MTU) && 4713 !nxgep->mac.is_jumbo) { 4714 err = EINVAL; 4715 break; 4716 } 4717 4718 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4719 nxgep->mac.maxframesize = (uint16_t) 4720 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4721 if (nxge_mac_set_framesize(nxgep)) { 4722 nxgep->mac.maxframesize = 4723 (uint16_t)old_framesize; 4724 err = EINVAL; 4725 break; 4726 } 4727 4728 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4729 if (err) { 4730 nxgep->mac.maxframesize = 4731 (uint16_t)old_framesize; 4732 err = EINVAL; 4733 break; 4734 } 4735 4736 nxgep->mac.default_mtu = new_mtu; 4737 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4738 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4739 new_mtu, nxgep->mac.maxframesize)); 4740 break; 4741 4742 case MAC_PROP_FLOWCTRL: 4743 bcopy(pr_val, &fl, sizeof (fl)); 4744 switch (fl) { 4745 default: 4746 err = EINVAL; 4747 break; 4748 4749 case LINK_FLOWCTRL_NONE: 4750 param_arr[param_anar_pause].value = 0; 4751 break; 4752 4753 case LINK_FLOWCTRL_RX: 4754 param_arr[param_anar_pause].value = 1; 4755 break; 4756 4757 case LINK_FLOWCTRL_TX: 4758 case LINK_FLOWCTRL_BI: 4759 err = EINVAL; 4760 break; 4761 } 4762 4763 reprogram: 4764 if (err == 0) { 4765 if (!nxge_param_link_update(nxgep)) { 4766 err = EINVAL; 4767 } 4768 } 4769 break; 4770 case MAC_PROP_PRIVATE: 4771 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4772 "==> nxge_m_setprop: private property")); 4773 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4774 pr_val); 4775 break; 4776 4777 default: 4778 err = ENOTSUP; 4779 break; 4780 } 4781 4782 mutex_exit(nxgep->genlock); 4783 4784 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4785 "<== nxge_m_setprop (return %d)", err)); 4786 return (err); 4787 } 4788 4789 static int 4790 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4791 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 4792 { 4793 nxge_t *nxgep = barg; 4794 p_nxge_param_t param_arr = nxgep->param_arr; 4795 p_nxge_stats_t statsp = nxgep->statsp; 4796 int err = 0; 4797 link_flowctrl_t fl; 4798 uint64_t tmp = 0; 4799 link_state_t ls; 4800 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4801 4802 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4803 "==> nxge_m_getprop: pr_num %d", pr_num)); 4804 4805 if (pr_valsize == 0) 4806 return (EINVAL); 4807 4808 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4809 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4810 return (err); 4811 } 4812 4813 bzero(pr_val, pr_valsize); 4814 switch (pr_num) { 4815 case MAC_PROP_DUPLEX: 4816 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4817 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4818 "==> nxge_m_getprop: duplex mode %d", 4819 *(uint8_t *)pr_val)); 4820 break; 4821 4822 case MAC_PROP_SPEED: 4823 if (pr_valsize < sizeof (uint64_t)) 4824 return (EINVAL); 4825 tmp = statsp->mac_stats.link_speed * 1000000ull; 4826 bcopy(&tmp, pr_val, sizeof (tmp)); 4827 break; 4828 4829 case MAC_PROP_STATUS: 4830 if (pr_valsize < sizeof (link_state_t)) 4831 return (EINVAL); 4832 if (!statsp->mac_stats.link_up) 4833 ls = LINK_STATE_DOWN; 4834 else 4835 ls = LINK_STATE_UP; 4836 bcopy(&ls, pr_val, sizeof (ls)); 4837 break; 4838 4839 case MAC_PROP_AUTONEG: 4840 *(uint8_t *)pr_val = 4841 param_arr[param_autoneg].value; 4842 break; 4843 4844 case MAC_PROP_FLOWCTRL: 4845 if (pr_valsize < sizeof (link_flowctrl_t)) 4846 return (EINVAL); 4847 4848 fl = LINK_FLOWCTRL_NONE; 4849 if (param_arr[param_anar_pause].value) { 4850 fl = LINK_FLOWCTRL_RX; 4851 } 4852 bcopy(&fl, pr_val, sizeof (fl)); 4853 break; 4854 4855 case MAC_PROP_ADV_1000FDX_CAP: 4856 *(uint8_t *)pr_val = 4857 param_arr[param_anar_1000fdx].value; 4858 break; 4859 4860 case MAC_PROP_EN_1000FDX_CAP: 4861 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4862 break; 4863 4864 case MAC_PROP_ADV_100FDX_CAP: 4865 *(uint8_t *)pr_val = 4866 param_arr[param_anar_100fdx].value; 4867 break; 4868 4869 case MAC_PROP_EN_100FDX_CAP: 4870 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4871 break; 4872 4873 case MAC_PROP_ADV_10FDX_CAP: 4874 *(uint8_t *)pr_val = 4875 param_arr[param_anar_10fdx].value; 4876 break; 4877 4878 case MAC_PROP_EN_10FDX_CAP: 4879 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4880 break; 4881 4882 case MAC_PROP_EN_1000HDX_CAP: 4883 case MAC_PROP_EN_100HDX_CAP: 4884 case MAC_PROP_EN_10HDX_CAP: 4885 case MAC_PROP_ADV_1000HDX_CAP: 4886 case MAC_PROP_ADV_100HDX_CAP: 4887 case MAC_PROP_ADV_10HDX_CAP: 4888 err = ENOTSUP; 4889 break; 4890 4891 case MAC_PROP_PRIVATE: 4892 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4893 pr_valsize, pr_val); 4894 break; 4895 default: 4896 err = EINVAL; 4897 break; 4898 } 4899 4900 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4901 4902 return (err); 4903 } 4904 4905 /* ARGSUSED */ 4906 static int 4907 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4908 const void *pr_val) 4909 { 4910 p_nxge_param_t param_arr = nxgep->param_arr; 4911 int err = 0; 4912 long result; 4913 4914 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4915 "==> nxge_set_priv_prop: name %s", pr_name)); 4916 4917 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4918 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4919 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4920 "<== nxge_set_priv_prop: name %s " 4921 "pr_val %s result %d " 4922 "param %d is_jumbo %d", 4923 pr_name, pr_val, result, 4924 param_arr[param_accept_jumbo].value, 4925 nxgep->mac.is_jumbo)); 4926 4927 if (result > 1 || result < 0) { 4928 err = EINVAL; 4929 } else { 4930 if (nxgep->mac.is_jumbo == 4931 (uint32_t)result) { 4932 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4933 "no change (%d %d)", 4934 nxgep->mac.is_jumbo, 4935 result)); 4936 return (0); 4937 } 4938 } 4939 4940 param_arr[param_accept_jumbo].value = result; 4941 nxgep->mac.is_jumbo = B_FALSE; 4942 if (result) { 4943 nxgep->mac.is_jumbo = B_TRUE; 4944 } 4945 4946 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4947 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4948 pr_name, result, nxgep->mac.is_jumbo)); 4949 4950 return (err); 4951 } 4952 4953 /* Blanking */ 4954 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4955 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4956 (char *)pr_val, 4957 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4958 if (err) { 4959 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4960 "<== nxge_set_priv_prop: " 4961 "unable to set (%s)", pr_name)); 4962 err = EINVAL; 4963 } else { 4964 err = 0; 4965 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4966 "<== nxge_set_priv_prop: " 4967 "set (%s)", pr_name)); 4968 } 4969 4970 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4971 "<== nxge_set_priv_prop: name %s (value %d)", 4972 pr_name, result)); 4973 4974 return (err); 4975 } 4976 4977 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4978 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4979 (char *)pr_val, 4980 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4981 if (err) { 4982 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4983 "<== nxge_set_priv_prop: " 4984 "unable to set (%s)", pr_name)); 4985 err = EINVAL; 4986 } else { 4987 err = 0; 4988 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4989 "<== nxge_set_priv_prop: " 4990 "set (%s)", pr_name)); 4991 } 4992 4993 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4994 "<== nxge_set_priv_prop: name %s (value %d)", 4995 pr_name, result)); 4996 4997 return (err); 4998 } 4999 5000 /* Classification */ 5001 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5002 if (pr_val == NULL) { 5003 err = EINVAL; 5004 return (err); 5005 } 5006 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5007 5008 err = nxge_param_set_ip_opt(nxgep, NULL, 5009 NULL, (char *)pr_val, 5010 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5011 5012 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5013 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5014 pr_name, result)); 5015 5016 return (err); 5017 } 5018 5019 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5020 if (pr_val == NULL) { 5021 err = EINVAL; 5022 return (err); 5023 } 5024 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5025 5026 err = nxge_param_set_ip_opt(nxgep, NULL, 5027 NULL, (char *)pr_val, 5028 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5029 5030 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5031 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5032 pr_name, result)); 5033 5034 return (err); 5035 } 5036 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5037 if (pr_val == NULL) { 5038 err = EINVAL; 5039 return (err); 5040 } 5041 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5042 5043 err = nxge_param_set_ip_opt(nxgep, NULL, 5044 NULL, (char *)pr_val, 5045 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5046 5047 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5048 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5049 pr_name, result)); 5050 5051 return (err); 5052 } 5053 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5054 if (pr_val == NULL) { 5055 err = EINVAL; 5056 return (err); 5057 } 5058 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5059 5060 err = nxge_param_set_ip_opt(nxgep, NULL, 5061 NULL, (char *)pr_val, 5062 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5063 5064 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5065 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5066 pr_name, result)); 5067 5068 return (err); 5069 } 5070 5071 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5072 if (pr_val == NULL) { 5073 err = EINVAL; 5074 return (err); 5075 } 5076 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5077 5078 err = nxge_param_set_ip_opt(nxgep, NULL, 5079 NULL, (char *)pr_val, 5080 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5081 5082 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5083 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5084 pr_name, result)); 5085 5086 return (err); 5087 } 5088 5089 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5090 if (pr_val == NULL) { 5091 err = EINVAL; 5092 return (err); 5093 } 5094 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5095 5096 err = nxge_param_set_ip_opt(nxgep, NULL, 5097 NULL, (char *)pr_val, 5098 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5099 5100 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5101 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5102 pr_name, result)); 5103 5104 return (err); 5105 } 5106 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5107 if (pr_val == NULL) { 5108 err = EINVAL; 5109 return (err); 5110 } 5111 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5112 5113 err = nxge_param_set_ip_opt(nxgep, NULL, 5114 NULL, (char *)pr_val, 5115 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5116 5117 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5118 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5119 pr_name, result)); 5120 5121 return (err); 5122 } 5123 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5124 if (pr_val == NULL) { 5125 err = EINVAL; 5126 return (err); 5127 } 5128 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5129 5130 err = nxge_param_set_ip_opt(nxgep, NULL, 5131 NULL, (char *)pr_val, 5132 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5133 5134 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5135 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5136 pr_name, result)); 5137 5138 return (err); 5139 } 5140 5141 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5142 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 5143 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5144 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 5145 err = EBUSY; 5146 return (err); 5147 } 5148 if (pr_val == NULL) { 5149 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5150 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5151 err = EINVAL; 5152 return (err); 5153 } 5154 5155 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5156 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5157 "<== nxge_set_priv_prop: name %s " 5158 "(lso %d pr_val %s value %d)", 5159 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5160 5161 if (result > 1 || result < 0) { 5162 err = EINVAL; 5163 } else { 5164 if (nxgep->soft_lso_enable == (uint32_t)result) { 5165 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5166 "no change (%d %d)", 5167 nxgep->soft_lso_enable, result)); 5168 return (0); 5169 } 5170 } 5171 5172 nxgep->soft_lso_enable = (int)result; 5173 5174 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5175 "<== nxge_set_priv_prop: name %s (value %d)", 5176 pr_name, result)); 5177 5178 return (err); 5179 } 5180 /* 5181 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5182 * following code to be executed. 5183 */ 5184 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5185 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5186 (caddr_t)¶m_arr[param_anar_10gfdx]); 5187 return (err); 5188 } 5189 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5190 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5191 (caddr_t)¶m_arr[param_anar_pause]); 5192 return (err); 5193 } 5194 5195 return (EINVAL); 5196 } 5197 5198 static int 5199 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5200 uint_t pr_valsize, void *pr_val) 5201 { 5202 p_nxge_param_t param_arr = nxgep->param_arr; 5203 char valstr[MAXNAMELEN]; 5204 int err = EINVAL; 5205 uint_t strsize; 5206 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5207 5208 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5209 "==> nxge_get_priv_prop: property %s", pr_name)); 5210 5211 /* function number */ 5212 if (strcmp(pr_name, "_function_number") == 0) { 5213 if (is_default) 5214 return (ENOTSUP); 5215 (void) snprintf(valstr, sizeof (valstr), "%d", 5216 nxgep->function_num); 5217 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5218 "==> nxge_get_priv_prop: name %s " 5219 "(value %d valstr %s)", 5220 pr_name, nxgep->function_num, valstr)); 5221 5222 err = 0; 5223 goto done; 5224 } 5225 5226 /* Neptune firmware version */ 5227 if (strcmp(pr_name, "_fw_version") == 0) { 5228 if (is_default) 5229 return (ENOTSUP); 5230 (void) snprintf(valstr, sizeof (valstr), "%s", 5231 nxgep->vpd_info.ver); 5232 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5233 "==> nxge_get_priv_prop: name %s " 5234 "(value %d valstr %s)", 5235 pr_name, nxgep->vpd_info.ver, valstr)); 5236 5237 err = 0; 5238 goto done; 5239 } 5240 5241 /* port PHY mode */ 5242 if (strcmp(pr_name, "_port_mode") == 0) { 5243 if (is_default) 5244 return (ENOTSUP); 5245 switch (nxgep->mac.portmode) { 5246 case PORT_1G_COPPER: 5247 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5248 nxgep->hot_swappable_phy ? 5249 "[Hot Swappable]" : ""); 5250 break; 5251 case PORT_1G_FIBER: 5252 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5253 nxgep->hot_swappable_phy ? 5254 "[hot swappable]" : ""); 5255 break; 5256 case PORT_10G_COPPER: 5257 (void) snprintf(valstr, sizeof (valstr), 5258 "10G copper %s", 5259 nxgep->hot_swappable_phy ? 5260 "[hot swappable]" : ""); 5261 break; 5262 case PORT_10G_FIBER: 5263 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5264 nxgep->hot_swappable_phy ? 5265 "[hot swappable]" : ""); 5266 break; 5267 case PORT_10G_SERDES: 5268 (void) snprintf(valstr, sizeof (valstr), 5269 "10G serdes %s", nxgep->hot_swappable_phy ? 5270 "[hot swappable]" : ""); 5271 break; 5272 case PORT_1G_SERDES: 5273 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5274 nxgep->hot_swappable_phy ? 5275 "[hot swappable]" : ""); 5276 break; 5277 case PORT_1G_TN1010: 5278 (void) snprintf(valstr, sizeof (valstr), 5279 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5280 "[hot swappable]" : ""); 5281 break; 5282 case PORT_10G_TN1010: 5283 (void) snprintf(valstr, sizeof (valstr), 5284 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5285 "[hot swappable]" : ""); 5286 break; 5287 case PORT_1G_RGMII_FIBER: 5288 (void) snprintf(valstr, sizeof (valstr), 5289 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5290 "[hot swappable]" : ""); 5291 break; 5292 case PORT_HSP_MODE: 5293 (void) snprintf(valstr, sizeof (valstr), 5294 "phy not present[hot swappable]"); 5295 break; 5296 default: 5297 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5298 nxgep->hot_swappable_phy ? 5299 "[hot swappable]" : ""); 5300 break; 5301 } 5302 5303 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5304 "==> nxge_get_priv_prop: name %s (value %s)", 5305 pr_name, valstr)); 5306 5307 err = 0; 5308 goto done; 5309 } 5310 5311 /* Hot swappable PHY */ 5312 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5313 if (is_default) 5314 return (ENOTSUP); 5315 (void) snprintf(valstr, sizeof (valstr), "%s", 5316 nxgep->hot_swappable_phy ? 5317 "yes" : "no"); 5318 5319 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5320 "==> nxge_get_priv_prop: name %s " 5321 "(value %d valstr %s)", 5322 pr_name, nxgep->hot_swappable_phy, valstr)); 5323 5324 err = 0; 5325 goto done; 5326 } 5327 5328 5329 /* accept jumbo */ 5330 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5331 if (is_default) 5332 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5333 else 5334 (void) snprintf(valstr, sizeof (valstr), 5335 "%d", nxgep->mac.is_jumbo); 5336 err = 0; 5337 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5338 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5339 pr_name, 5340 (uint32_t)param_arr[param_accept_jumbo].value, 5341 nxgep->mac.is_jumbo, 5342 nxge_jumbo_enable)); 5343 5344 goto done; 5345 } 5346 5347 /* Receive Interrupt Blanking Parameters */ 5348 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5349 err = 0; 5350 if (is_default) { 5351 (void) snprintf(valstr, sizeof (valstr), 5352 "%d", RXDMA_RCR_TO_DEFAULT); 5353 goto done; 5354 } 5355 5356 (void) snprintf(valstr, sizeof (valstr), "%d", 5357 nxgep->intr_timeout); 5358 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5359 "==> nxge_get_priv_prop: name %s (value %d)", 5360 pr_name, 5361 (uint32_t)nxgep->intr_timeout)); 5362 goto done; 5363 } 5364 5365 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5366 err = 0; 5367 if (is_default) { 5368 (void) snprintf(valstr, sizeof (valstr), 5369 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5370 goto done; 5371 } 5372 (void) snprintf(valstr, sizeof (valstr), "%d", 5373 nxgep->intr_threshold); 5374 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5375 "==> nxge_get_priv_prop: name %s (value %d)", 5376 pr_name, (uint32_t)nxgep->intr_threshold)); 5377 5378 goto done; 5379 } 5380 5381 /* Classification and Load Distribution Configuration */ 5382 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5383 if (is_default) { 5384 (void) snprintf(valstr, sizeof (valstr), "%x", 5385 NXGE_CLASS_FLOW_GEN_SERVER); 5386 err = 0; 5387 goto done; 5388 } 5389 err = nxge_dld_get_ip_opt(nxgep, 5390 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5391 5392 (void) snprintf(valstr, sizeof (valstr), "%x", 5393 (int)param_arr[param_class_opt_ipv4_tcp].value); 5394 5395 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5396 "==> nxge_get_priv_prop: %s", valstr)); 5397 goto done; 5398 } 5399 5400 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5401 if (is_default) { 5402 (void) snprintf(valstr, sizeof (valstr), "%x", 5403 NXGE_CLASS_FLOW_GEN_SERVER); 5404 err = 0; 5405 goto done; 5406 } 5407 err = nxge_dld_get_ip_opt(nxgep, 5408 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5409 5410 (void) snprintf(valstr, sizeof (valstr), "%x", 5411 (int)param_arr[param_class_opt_ipv4_udp].value); 5412 5413 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5414 "==> nxge_get_priv_prop: %s", valstr)); 5415 goto done; 5416 } 5417 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5418 if (is_default) { 5419 (void) snprintf(valstr, sizeof (valstr), "%x", 5420 NXGE_CLASS_FLOW_GEN_SERVER); 5421 err = 0; 5422 goto done; 5423 } 5424 err = nxge_dld_get_ip_opt(nxgep, 5425 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5426 5427 (void) snprintf(valstr, sizeof (valstr), "%x", 5428 (int)param_arr[param_class_opt_ipv4_ah].value); 5429 5430 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5431 "==> nxge_get_priv_prop: %s", valstr)); 5432 goto done; 5433 } 5434 5435 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5436 if (is_default) { 5437 (void) snprintf(valstr, sizeof (valstr), "%x", 5438 NXGE_CLASS_FLOW_GEN_SERVER); 5439 err = 0; 5440 goto done; 5441 } 5442 err = nxge_dld_get_ip_opt(nxgep, 5443 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5444 5445 (void) snprintf(valstr, sizeof (valstr), "%x", 5446 (int)param_arr[param_class_opt_ipv4_sctp].value); 5447 5448 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5449 "==> nxge_get_priv_prop: %s", valstr)); 5450 goto done; 5451 } 5452 5453 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5454 if (is_default) { 5455 (void) snprintf(valstr, sizeof (valstr), "%x", 5456 NXGE_CLASS_FLOW_GEN_SERVER); 5457 err = 0; 5458 goto done; 5459 } 5460 err = nxge_dld_get_ip_opt(nxgep, 5461 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5462 5463 (void) snprintf(valstr, sizeof (valstr), "%x", 5464 (int)param_arr[param_class_opt_ipv6_tcp].value); 5465 5466 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5467 "==> nxge_get_priv_prop: %s", valstr)); 5468 goto done; 5469 } 5470 5471 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5472 if (is_default) { 5473 (void) snprintf(valstr, sizeof (valstr), "%x", 5474 NXGE_CLASS_FLOW_GEN_SERVER); 5475 err = 0; 5476 goto done; 5477 } 5478 err = nxge_dld_get_ip_opt(nxgep, 5479 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5480 5481 (void) snprintf(valstr, sizeof (valstr), "%x", 5482 (int)param_arr[param_class_opt_ipv6_udp].value); 5483 5484 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5485 "==> nxge_get_priv_prop: %s", valstr)); 5486 goto done; 5487 } 5488 5489 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5490 if (is_default) { 5491 (void) snprintf(valstr, sizeof (valstr), "%x", 5492 NXGE_CLASS_FLOW_GEN_SERVER); 5493 err = 0; 5494 goto done; 5495 } 5496 err = nxge_dld_get_ip_opt(nxgep, 5497 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5498 5499 (void) snprintf(valstr, sizeof (valstr), "%x", 5500 (int)param_arr[param_class_opt_ipv6_ah].value); 5501 5502 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5503 "==> nxge_get_priv_prop: %s", valstr)); 5504 goto done; 5505 } 5506 5507 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5508 if (is_default) { 5509 (void) snprintf(valstr, sizeof (valstr), "%x", 5510 NXGE_CLASS_FLOW_GEN_SERVER); 5511 err = 0; 5512 goto done; 5513 } 5514 err = nxge_dld_get_ip_opt(nxgep, 5515 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5516 5517 (void) snprintf(valstr, sizeof (valstr), "%x", 5518 (int)param_arr[param_class_opt_ipv6_sctp].value); 5519 5520 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5521 "==> nxge_get_priv_prop: %s", valstr)); 5522 goto done; 5523 } 5524 5525 /* Software LSO */ 5526 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5527 if (is_default) { 5528 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5529 err = 0; 5530 goto done; 5531 } 5532 (void) snprintf(valstr, sizeof (valstr), 5533 "%d", nxgep->soft_lso_enable); 5534 err = 0; 5535 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5536 "==> nxge_get_priv_prop: name %s (value %d)", 5537 pr_name, nxgep->soft_lso_enable)); 5538 5539 goto done; 5540 } 5541 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5542 err = 0; 5543 if (is_default || 5544 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5545 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5546 goto done; 5547 } else { 5548 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5549 goto done; 5550 } 5551 } 5552 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5553 err = 0; 5554 if (is_default || 5555 nxgep->param_arr[param_anar_pause].value != 0) { 5556 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5557 goto done; 5558 } else { 5559 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5560 goto done; 5561 } 5562 } 5563 5564 done: 5565 if (err == 0) { 5566 strsize = (uint_t)strlen(valstr); 5567 if (pr_valsize < strsize) { 5568 err = ENOBUFS; 5569 } else { 5570 (void) strlcpy(pr_val, valstr, pr_valsize); 5571 } 5572 } 5573 5574 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5575 "<== nxge_get_priv_prop: return %d", err)); 5576 return (err); 5577 } 5578 5579 /* 5580 * Module loading and removing entry points. 5581 */ 5582 5583 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5584 nodev, NULL, D_MP, NULL); 5585 5586 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5587 5588 /* 5589 * Module linkage information for the kernel. 5590 */ 5591 static struct modldrv nxge_modldrv = { 5592 &mod_driverops, 5593 NXGE_DESC_VER, 5594 &nxge_dev_ops 5595 }; 5596 5597 static struct modlinkage modlinkage = { 5598 MODREV_1, (void *) &nxge_modldrv, NULL 5599 }; 5600 5601 int 5602 _init(void) 5603 { 5604 int status; 5605 5606 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5607 mac_init_ops(&nxge_dev_ops, "nxge"); 5608 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5609 if (status != 0) { 5610 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5611 "failed to init device soft state")); 5612 goto _init_exit; 5613 } 5614 status = mod_install(&modlinkage); 5615 if (status != 0) { 5616 ddi_soft_state_fini(&nxge_list); 5617 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5618 goto _init_exit; 5619 } 5620 5621 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5622 5623 _init_exit: 5624 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5625 5626 return (status); 5627 } 5628 5629 int 5630 _fini(void) 5631 { 5632 int status; 5633 5634 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5635 5636 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5637 5638 if (nxge_mblks_pending) 5639 return (EBUSY); 5640 5641 status = mod_remove(&modlinkage); 5642 if (status != DDI_SUCCESS) { 5643 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5644 "Module removal failed 0x%08x", 5645 status)); 5646 goto _fini_exit; 5647 } 5648 5649 mac_fini_ops(&nxge_dev_ops); 5650 5651 ddi_soft_state_fini(&nxge_list); 5652 5653 MUTEX_DESTROY(&nxge_common_lock); 5654 _fini_exit: 5655 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5656 5657 return (status); 5658 } 5659 5660 int 5661 _info(struct modinfo *modinfop) 5662 { 5663 int status; 5664 5665 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5666 status = mod_info(&modlinkage, modinfop); 5667 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5668 5669 return (status); 5670 } 5671 5672 /*ARGSUSED*/ 5673 static nxge_status_t 5674 nxge_add_intrs(p_nxge_t nxgep) 5675 { 5676 5677 int intr_types; 5678 int type = 0; 5679 int ddi_status = DDI_SUCCESS; 5680 nxge_status_t status = NXGE_OK; 5681 5682 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5683 5684 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5685 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5686 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5687 nxgep->nxge_intr_type.intr_added = 0; 5688 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5689 nxgep->nxge_intr_type.intr_type = 0; 5690 5691 if (nxgep->niu_type == N2_NIU) { 5692 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5693 } else if (nxge_msi_enable) { 5694 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5695 } 5696 5697 /* Get the supported interrupt types */ 5698 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5699 != DDI_SUCCESS) { 5700 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5701 "ddi_intr_get_supported_types failed: status 0x%08x", 5702 ddi_status)); 5703 return (NXGE_ERROR | NXGE_DDI_FAILED); 5704 } 5705 nxgep->nxge_intr_type.intr_types = intr_types; 5706 5707 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5708 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5709 5710 /* 5711 * Solaris MSIX is not supported yet. use MSI for now. 5712 * nxge_msi_enable (1): 5713 * 1 - MSI 2 - MSI-X others - FIXED 5714 */ 5715 switch (nxge_msi_enable) { 5716 default: 5717 type = DDI_INTR_TYPE_FIXED; 5718 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5719 "use fixed (intx emulation) type %08x", 5720 type)); 5721 break; 5722 5723 case 2: 5724 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5725 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5726 if (intr_types & DDI_INTR_TYPE_MSIX) { 5727 type = DDI_INTR_TYPE_MSIX; 5728 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5729 "ddi_intr_get_supported_types: MSIX 0x%08x", 5730 type)); 5731 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5732 type = DDI_INTR_TYPE_MSI; 5733 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5734 "ddi_intr_get_supported_types: MSI 0x%08x", 5735 type)); 5736 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5737 type = DDI_INTR_TYPE_FIXED; 5738 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5739 "ddi_intr_get_supported_types: MSXED0x%08x", 5740 type)); 5741 } 5742 break; 5743 5744 case 1: 5745 if (intr_types & DDI_INTR_TYPE_MSI) { 5746 type = DDI_INTR_TYPE_MSI; 5747 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5748 "ddi_intr_get_supported_types: MSI 0x%08x", 5749 type)); 5750 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5751 type = DDI_INTR_TYPE_MSIX; 5752 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5753 "ddi_intr_get_supported_types: MSIX 0x%08x", 5754 type)); 5755 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5756 type = DDI_INTR_TYPE_FIXED; 5757 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5758 "ddi_intr_get_supported_types: MSXED0x%08x", 5759 type)); 5760 } 5761 } 5762 5763 nxgep->nxge_intr_type.intr_type = type; 5764 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5765 type == DDI_INTR_TYPE_FIXED) && 5766 nxgep->nxge_intr_type.niu_msi_enable) { 5767 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5768 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5769 " nxge_add_intrs: " 5770 " nxge_add_intrs_adv failed: status 0x%08x", 5771 status)); 5772 return (status); 5773 } else { 5774 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5775 "interrupts registered : type %d", type)); 5776 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5777 5778 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5779 "\nAdded advanced nxge add_intr_adv " 5780 "intr type 0x%x\n", type)); 5781 5782 return (status); 5783 } 5784 } 5785 5786 if (!nxgep->nxge_intr_type.intr_registered) { 5787 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5788 "failed to register interrupts")); 5789 return (NXGE_ERROR | NXGE_DDI_FAILED); 5790 } 5791 5792 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5793 return (status); 5794 } 5795 5796 /*ARGSUSED*/ 5797 static nxge_status_t 5798 nxge_add_soft_intrs(p_nxge_t nxgep) 5799 { 5800 5801 int ddi_status = DDI_SUCCESS; 5802 nxge_status_t status = NXGE_OK; 5803 5804 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 5805 5806 nxgep->resched_id = NULL; 5807 nxgep->resched_running = B_FALSE; 5808 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5809 &nxgep->resched_id, 5810 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 5811 if (ddi_status != DDI_SUCCESS) { 5812 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5813 "ddi_add_softintrs failed: status 0x%08x", 5814 ddi_status)); 5815 return (NXGE_ERROR | NXGE_DDI_FAILED); 5816 } 5817 5818 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 5819 5820 return (status); 5821 } 5822 5823 static nxge_status_t 5824 nxge_add_intrs_adv(p_nxge_t nxgep) 5825 { 5826 int intr_type; 5827 p_nxge_intr_t intrp; 5828 5829 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5830 5831 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5832 intr_type = intrp->intr_type; 5833 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5834 intr_type)); 5835 5836 switch (intr_type) { 5837 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5838 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5839 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5840 5841 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5842 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5843 5844 default: 5845 return (NXGE_ERROR); 5846 } 5847 } 5848 5849 5850 /*ARGSUSED*/ 5851 static nxge_status_t 5852 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5853 { 5854 dev_info_t *dip = nxgep->dip; 5855 p_nxge_ldg_t ldgp; 5856 p_nxge_intr_t intrp; 5857 uint_t *inthandler; 5858 void *arg1, *arg2; 5859 int behavior; 5860 int nintrs, navail, nrequest; 5861 int nactual, nrequired; 5862 int inum = 0; 5863 int x, y; 5864 int ddi_status = DDI_SUCCESS; 5865 nxge_status_t status = NXGE_OK; 5866 5867 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5868 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5869 intrp->start_inum = 0; 5870 5871 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5872 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5873 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5874 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5875 "nintrs: %d", ddi_status, nintrs)); 5876 return (NXGE_ERROR | NXGE_DDI_FAILED); 5877 } 5878 5879 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5880 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5881 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5882 "ddi_intr_get_navail() failed, status: 0x%x%, " 5883 "nintrs: %d", ddi_status, navail)); 5884 return (NXGE_ERROR | NXGE_DDI_FAILED); 5885 } 5886 5887 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5888 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5889 nintrs, navail)); 5890 5891 /* PSARC/2007/453 MSI-X interrupt limit override */ 5892 if (int_type == DDI_INTR_TYPE_MSIX) { 5893 nrequest = nxge_create_msi_property(nxgep); 5894 if (nrequest < navail) { 5895 navail = nrequest; 5896 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5897 "nxge_add_intrs_adv_type: nintrs %d " 5898 "navail %d (nrequest %d)", 5899 nintrs, navail, nrequest)); 5900 } 5901 } 5902 5903 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5904 /* MSI must be power of 2 */ 5905 if ((navail & 16) == 16) { 5906 navail = 16; 5907 } else if ((navail & 8) == 8) { 5908 navail = 8; 5909 } else if ((navail & 4) == 4) { 5910 navail = 4; 5911 } else if ((navail & 2) == 2) { 5912 navail = 2; 5913 } else { 5914 navail = 1; 5915 } 5916 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5917 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5918 "navail %d", nintrs, navail)); 5919 } 5920 5921 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5922 DDI_INTR_ALLOC_NORMAL); 5923 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5924 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5925 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5926 navail, &nactual, behavior); 5927 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5928 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5929 " ddi_intr_alloc() failed: %d", 5930 ddi_status)); 5931 kmem_free(intrp->htable, intrp->intr_size); 5932 return (NXGE_ERROR | NXGE_DDI_FAILED); 5933 } 5934 5935 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5936 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5937 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5938 " ddi_intr_get_pri() failed: %d", 5939 ddi_status)); 5940 /* Free already allocated interrupts */ 5941 for (y = 0; y < nactual; y++) { 5942 (void) ddi_intr_free(intrp->htable[y]); 5943 } 5944 5945 kmem_free(intrp->htable, intrp->intr_size); 5946 return (NXGE_ERROR | NXGE_DDI_FAILED); 5947 } 5948 5949 nrequired = 0; 5950 switch (nxgep->niu_type) { 5951 default: 5952 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5953 break; 5954 5955 case N2_NIU: 5956 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5957 break; 5958 } 5959 5960 if (status != NXGE_OK) { 5961 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5962 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5963 "failed: 0x%x", status)); 5964 /* Free already allocated interrupts */ 5965 for (y = 0; y < nactual; y++) { 5966 (void) ddi_intr_free(intrp->htable[y]); 5967 } 5968 5969 kmem_free(intrp->htable, intrp->intr_size); 5970 return (status); 5971 } 5972 5973 ldgp = nxgep->ldgvp->ldgp; 5974 for (x = 0; x < nrequired; x++, ldgp++) { 5975 ldgp->vector = (uint8_t)x; 5976 ldgp->intdata = SID_DATA(ldgp->func, x); 5977 arg1 = ldgp->ldvp; 5978 arg2 = nxgep; 5979 if (ldgp->nldvs == 1) { 5980 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5981 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5982 "nxge_add_intrs_adv_type: " 5983 "arg1 0x%x arg2 0x%x: " 5984 "1-1 int handler (entry %d intdata 0x%x)\n", 5985 arg1, arg2, 5986 x, ldgp->intdata)); 5987 } else if (ldgp->nldvs > 1) { 5988 inthandler = (uint_t *)ldgp->sys_intr_handler; 5989 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5990 "nxge_add_intrs_adv_type: " 5991 "arg1 0x%x arg2 0x%x: " 5992 "nldevs %d int handler " 5993 "(entry %d intdata 0x%x)\n", 5994 arg1, arg2, 5995 ldgp->nldvs, x, ldgp->intdata)); 5996 } 5997 5998 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5999 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 6000 "htable 0x%llx", x, intrp->htable[x])); 6001 6002 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6003 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6004 != DDI_SUCCESS) { 6005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6006 "==> nxge_add_intrs_adv_type: failed #%d " 6007 "status 0x%x", x, ddi_status)); 6008 for (y = 0; y < intrp->intr_added; y++) { 6009 (void) ddi_intr_remove_handler( 6010 intrp->htable[y]); 6011 } 6012 /* Free already allocated intr */ 6013 for (y = 0; y < nactual; y++) { 6014 (void) ddi_intr_free(intrp->htable[y]); 6015 } 6016 kmem_free(intrp->htable, intrp->intr_size); 6017 6018 (void) nxge_ldgv_uninit(nxgep); 6019 6020 return (NXGE_ERROR | NXGE_DDI_FAILED); 6021 } 6022 intrp->intr_added++; 6023 } 6024 6025 intrp->msi_intx_cnt = nactual; 6026 6027 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6028 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6029 navail, nactual, 6030 intrp->msi_intx_cnt, 6031 intrp->intr_added)); 6032 6033 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6034 6035 (void) nxge_intr_ldgv_init(nxgep); 6036 6037 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6038 6039 return (status); 6040 } 6041 6042 /*ARGSUSED*/ 6043 static nxge_status_t 6044 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6045 { 6046 dev_info_t *dip = nxgep->dip; 6047 p_nxge_ldg_t ldgp; 6048 p_nxge_intr_t intrp; 6049 uint_t *inthandler; 6050 void *arg1, *arg2; 6051 int behavior; 6052 int nintrs, navail; 6053 int nactual, nrequired; 6054 int inum = 0; 6055 int x, y; 6056 int ddi_status = DDI_SUCCESS; 6057 nxge_status_t status = NXGE_OK; 6058 6059 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6060 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6061 intrp->start_inum = 0; 6062 6063 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6064 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6065 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6066 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6067 "nintrs: %d", status, nintrs)); 6068 return (NXGE_ERROR | NXGE_DDI_FAILED); 6069 } 6070 6071 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6072 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6074 "ddi_intr_get_navail() failed, status: 0x%x%, " 6075 "nintrs: %d", ddi_status, navail)); 6076 return (NXGE_ERROR | NXGE_DDI_FAILED); 6077 } 6078 6079 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6080 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6081 nintrs, navail)); 6082 6083 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6084 DDI_INTR_ALLOC_NORMAL); 6085 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6086 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6087 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6088 navail, &nactual, behavior); 6089 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6091 " ddi_intr_alloc() failed: %d", 6092 ddi_status)); 6093 kmem_free(intrp->htable, intrp->intr_size); 6094 return (NXGE_ERROR | NXGE_DDI_FAILED); 6095 } 6096 6097 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6098 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6099 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6100 " ddi_intr_get_pri() failed: %d", 6101 ddi_status)); 6102 /* Free already allocated interrupts */ 6103 for (y = 0; y < nactual; y++) { 6104 (void) ddi_intr_free(intrp->htable[y]); 6105 } 6106 6107 kmem_free(intrp->htable, intrp->intr_size); 6108 return (NXGE_ERROR | NXGE_DDI_FAILED); 6109 } 6110 6111 nrequired = 0; 6112 switch (nxgep->niu_type) { 6113 default: 6114 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6115 break; 6116 6117 case N2_NIU: 6118 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6119 break; 6120 } 6121 6122 if (status != NXGE_OK) { 6123 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6124 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6125 "failed: 0x%x", status)); 6126 /* Free already allocated interrupts */ 6127 for (y = 0; y < nactual; y++) { 6128 (void) ddi_intr_free(intrp->htable[y]); 6129 } 6130 6131 kmem_free(intrp->htable, intrp->intr_size); 6132 return (status); 6133 } 6134 6135 ldgp = nxgep->ldgvp->ldgp; 6136 for (x = 0; x < nrequired; x++, ldgp++) { 6137 ldgp->vector = (uint8_t)x; 6138 if (nxgep->niu_type != N2_NIU) { 6139 ldgp->intdata = SID_DATA(ldgp->func, x); 6140 } 6141 6142 arg1 = ldgp->ldvp; 6143 arg2 = nxgep; 6144 if (ldgp->nldvs == 1) { 6145 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6146 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6147 "nxge_add_intrs_adv_type_fix: " 6148 "1-1 int handler(%d) ldg %d ldv %d " 6149 "arg1 $%p arg2 $%p\n", 6150 x, ldgp->ldg, ldgp->ldvp->ldv, 6151 arg1, arg2)); 6152 } else if (ldgp->nldvs > 1) { 6153 inthandler = (uint_t *)ldgp->sys_intr_handler; 6154 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6155 "nxge_add_intrs_adv_type_fix: " 6156 "shared ldv %d int handler(%d) ldv %d ldg %d" 6157 "arg1 0x%016llx arg2 0x%016llx\n", 6158 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6159 arg1, arg2)); 6160 } 6161 6162 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6163 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6164 != DDI_SUCCESS) { 6165 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6166 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6167 "status 0x%x", x, ddi_status)); 6168 for (y = 0; y < intrp->intr_added; y++) { 6169 (void) ddi_intr_remove_handler( 6170 intrp->htable[y]); 6171 } 6172 for (y = 0; y < nactual; y++) { 6173 (void) ddi_intr_free(intrp->htable[y]); 6174 } 6175 /* Free already allocated intr */ 6176 kmem_free(intrp->htable, intrp->intr_size); 6177 6178 (void) nxge_ldgv_uninit(nxgep); 6179 6180 return (NXGE_ERROR | NXGE_DDI_FAILED); 6181 } 6182 intrp->intr_added++; 6183 } 6184 6185 intrp->msi_intx_cnt = nactual; 6186 6187 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6188 6189 status = nxge_intr_ldgv_init(nxgep); 6190 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6191 6192 return (status); 6193 } 6194 6195 static void 6196 nxge_remove_intrs(p_nxge_t nxgep) 6197 { 6198 int i, inum; 6199 p_nxge_intr_t intrp; 6200 6201 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6202 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6203 if (!intrp->intr_registered) { 6204 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6205 "<== nxge_remove_intrs: interrupts not registered")); 6206 return; 6207 } 6208 6209 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6210 6211 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6212 (void) ddi_intr_block_disable(intrp->htable, 6213 intrp->intr_added); 6214 } else { 6215 for (i = 0; i < intrp->intr_added; i++) { 6216 (void) ddi_intr_disable(intrp->htable[i]); 6217 } 6218 } 6219 6220 for (inum = 0; inum < intrp->intr_added; inum++) { 6221 if (intrp->htable[inum]) { 6222 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6223 } 6224 } 6225 6226 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6227 if (intrp->htable[inum]) { 6228 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6229 "nxge_remove_intrs: ddi_intr_free inum %d " 6230 "msi_intx_cnt %d intr_added %d", 6231 inum, 6232 intrp->msi_intx_cnt, 6233 intrp->intr_added)); 6234 6235 (void) ddi_intr_free(intrp->htable[inum]); 6236 } 6237 } 6238 6239 kmem_free(intrp->htable, intrp->intr_size); 6240 intrp->intr_registered = B_FALSE; 6241 intrp->intr_enabled = B_FALSE; 6242 intrp->msi_intx_cnt = 0; 6243 intrp->intr_added = 0; 6244 6245 (void) nxge_ldgv_uninit(nxgep); 6246 6247 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6248 "#msix-request"); 6249 6250 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6251 } 6252 6253 /*ARGSUSED*/ 6254 static void 6255 nxge_remove_soft_intrs(p_nxge_t nxgep) 6256 { 6257 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 6258 if (nxgep->resched_id) { 6259 ddi_remove_softintr(nxgep->resched_id); 6260 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6261 "==> nxge_remove_soft_intrs: removed")); 6262 nxgep->resched_id = NULL; 6263 } 6264 6265 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 6266 } 6267 6268 /*ARGSUSED*/ 6269 static void 6270 nxge_intrs_enable(p_nxge_t nxgep) 6271 { 6272 p_nxge_intr_t intrp; 6273 int i; 6274 int status; 6275 6276 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6277 6278 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6279 6280 if (!intrp->intr_registered) { 6281 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6282 "interrupts are not registered")); 6283 return; 6284 } 6285 6286 if (intrp->intr_enabled) { 6287 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6288 "<== nxge_intrs_enable: already enabled")); 6289 return; 6290 } 6291 6292 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6293 status = ddi_intr_block_enable(intrp->htable, 6294 intrp->intr_added); 6295 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6296 "block enable - status 0x%x total inums #%d\n", 6297 status, intrp->intr_added)); 6298 } else { 6299 for (i = 0; i < intrp->intr_added; i++) { 6300 status = ddi_intr_enable(intrp->htable[i]); 6301 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6302 "ddi_intr_enable:enable - status 0x%x " 6303 "total inums %d enable inum #%d\n", 6304 status, intrp->intr_added, i)); 6305 if (status == DDI_SUCCESS) { 6306 intrp->intr_enabled = B_TRUE; 6307 } 6308 } 6309 } 6310 6311 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6312 } 6313 6314 /*ARGSUSED*/ 6315 static void 6316 nxge_intrs_disable(p_nxge_t nxgep) 6317 { 6318 p_nxge_intr_t intrp; 6319 int i; 6320 6321 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6322 6323 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6324 6325 if (!intrp->intr_registered) { 6326 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6327 "interrupts are not registered")); 6328 return; 6329 } 6330 6331 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6332 (void) ddi_intr_block_disable(intrp->htable, 6333 intrp->intr_added); 6334 } else { 6335 for (i = 0; i < intrp->intr_added; i++) { 6336 (void) ddi_intr_disable(intrp->htable[i]); 6337 } 6338 } 6339 6340 intrp->intr_enabled = B_FALSE; 6341 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6342 } 6343 6344 static nxge_status_t 6345 nxge_mac_register(p_nxge_t nxgep) 6346 { 6347 mac_register_t *macp; 6348 int status; 6349 6350 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6351 6352 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6353 return (NXGE_ERROR); 6354 6355 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6356 macp->m_driver = nxgep; 6357 macp->m_dip = nxgep->dip; 6358 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6359 macp->m_callbacks = &nxge_m_callbacks; 6360 macp->m_min_sdu = 0; 6361 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6362 NXGE_EHEADER_VLAN_CRC; 6363 macp->m_max_sdu = nxgep->mac.default_mtu; 6364 macp->m_margin = VLAN_TAGSZ; 6365 macp->m_priv_props = nxge_priv_props; 6366 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6367 6368 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6369 "==> nxge_mac_register: instance %d " 6370 "max_sdu %d margin %d maxframe %d (header %d)", 6371 nxgep->instance, 6372 macp->m_max_sdu, macp->m_margin, 6373 nxgep->mac.maxframesize, 6374 NXGE_EHEADER_VLAN_CRC)); 6375 6376 status = mac_register(macp, &nxgep->mach); 6377 mac_free(macp); 6378 6379 if (status != 0) { 6380 cmn_err(CE_WARN, 6381 "!nxge_mac_register failed (status %d instance %d)", 6382 status, nxgep->instance); 6383 return (NXGE_ERROR); 6384 } 6385 6386 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6387 "(instance %d)", nxgep->instance)); 6388 6389 return (NXGE_OK); 6390 } 6391 6392 void 6393 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6394 { 6395 ssize_t size; 6396 mblk_t *nmp; 6397 uint8_t blk_id; 6398 uint8_t chan; 6399 uint32_t err_id; 6400 err_inject_t *eip; 6401 6402 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6403 6404 size = 1024; 6405 nmp = mp->b_cont; 6406 eip = (err_inject_t *)nmp->b_rptr; 6407 blk_id = eip->blk_id; 6408 err_id = eip->err_id; 6409 chan = eip->chan; 6410 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6411 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6412 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6413 switch (blk_id) { 6414 case MAC_BLK_ID: 6415 break; 6416 case TXMAC_BLK_ID: 6417 break; 6418 case RXMAC_BLK_ID: 6419 break; 6420 case MIF_BLK_ID: 6421 break; 6422 case IPP_BLK_ID: 6423 nxge_ipp_inject_err(nxgep, err_id); 6424 break; 6425 case TXC_BLK_ID: 6426 nxge_txc_inject_err(nxgep, err_id); 6427 break; 6428 case TXDMA_BLK_ID: 6429 nxge_txdma_inject_err(nxgep, err_id, chan); 6430 break; 6431 case RXDMA_BLK_ID: 6432 nxge_rxdma_inject_err(nxgep, err_id, chan); 6433 break; 6434 case ZCP_BLK_ID: 6435 nxge_zcp_inject_err(nxgep, err_id); 6436 break; 6437 case ESPC_BLK_ID: 6438 break; 6439 case FFLP_BLK_ID: 6440 break; 6441 case PHY_BLK_ID: 6442 break; 6443 case ETHER_SERDES_BLK_ID: 6444 break; 6445 case PCIE_SERDES_BLK_ID: 6446 break; 6447 case VIR_BLK_ID: 6448 break; 6449 } 6450 6451 nmp->b_wptr = nmp->b_rptr + size; 6452 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6453 6454 miocack(wq, mp, (int)size, 0); 6455 } 6456 6457 static int 6458 nxge_init_common_dev(p_nxge_t nxgep) 6459 { 6460 p_nxge_hw_list_t hw_p; 6461 dev_info_t *p_dip; 6462 6463 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6464 6465 p_dip = nxgep->p_dip; 6466 MUTEX_ENTER(&nxge_common_lock); 6467 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6468 "==> nxge_init_common_dev:func # %d", 6469 nxgep->function_num)); 6470 /* 6471 * Loop through existing per neptune hardware list. 6472 */ 6473 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6474 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6475 "==> nxge_init_common_device:func # %d " 6476 "hw_p $%p parent dip $%p", 6477 nxgep->function_num, 6478 hw_p, 6479 p_dip)); 6480 if (hw_p->parent_devp == p_dip) { 6481 nxgep->nxge_hw_p = hw_p; 6482 hw_p->ndevs++; 6483 hw_p->nxge_p[nxgep->function_num] = nxgep; 6484 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6485 "==> nxge_init_common_device:func # %d " 6486 "hw_p $%p parent dip $%p " 6487 "ndevs %d (found)", 6488 nxgep->function_num, 6489 hw_p, 6490 p_dip, 6491 hw_p->ndevs)); 6492 break; 6493 } 6494 } 6495 6496 if (hw_p == NULL) { 6497 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6498 "==> nxge_init_common_device:func # %d " 6499 "parent dip $%p (new)", 6500 nxgep->function_num, 6501 p_dip)); 6502 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6503 hw_p->parent_devp = p_dip; 6504 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6505 nxgep->nxge_hw_p = hw_p; 6506 hw_p->ndevs++; 6507 hw_p->nxge_p[nxgep->function_num] = nxgep; 6508 hw_p->next = nxge_hw_list; 6509 if (nxgep->niu_type == N2_NIU) { 6510 hw_p->niu_type = N2_NIU; 6511 hw_p->platform_type = P_NEPTUNE_NIU; 6512 } else { 6513 hw_p->niu_type = NIU_TYPE_NONE; 6514 hw_p->platform_type = P_NEPTUNE_NONE; 6515 } 6516 6517 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6518 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6519 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6520 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6521 6522 nxge_hw_list = hw_p; 6523 6524 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6525 } 6526 6527 MUTEX_EXIT(&nxge_common_lock); 6528 6529 nxgep->platform_type = hw_p->platform_type; 6530 if (nxgep->niu_type != N2_NIU) { 6531 nxgep->niu_type = hw_p->niu_type; 6532 } 6533 6534 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6535 "==> nxge_init_common_device (nxge_hw_list) $%p", 6536 nxge_hw_list)); 6537 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6538 6539 return (NXGE_OK); 6540 } 6541 6542 static void 6543 nxge_uninit_common_dev(p_nxge_t nxgep) 6544 { 6545 p_nxge_hw_list_t hw_p, h_hw_p; 6546 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6547 p_nxge_hw_pt_cfg_t p_cfgp; 6548 dev_info_t *p_dip; 6549 6550 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6551 if (nxgep->nxge_hw_p == NULL) { 6552 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6553 "<== nxge_uninit_common_device (no common)")); 6554 return; 6555 } 6556 6557 MUTEX_ENTER(&nxge_common_lock); 6558 h_hw_p = nxge_hw_list; 6559 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6560 p_dip = hw_p->parent_devp; 6561 if (nxgep->nxge_hw_p == hw_p && 6562 p_dip == nxgep->p_dip && 6563 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6564 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6565 6566 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6567 "==> nxge_uninit_common_device:func # %d " 6568 "hw_p $%p parent dip $%p " 6569 "ndevs %d (found)", 6570 nxgep->function_num, 6571 hw_p, 6572 p_dip, 6573 hw_p->ndevs)); 6574 6575 /* 6576 * Release the RDC table, a shared resoruce 6577 * of the nxge hardware. The RDC table was 6578 * assigned to this instance of nxge in 6579 * nxge_use_cfg_dma_config(). 6580 */ 6581 if (!isLDOMguest(nxgep)) { 6582 p_dma_cfgp = 6583 (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6584 p_cfgp = 6585 (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6586 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6587 p_cfgp->def_mac_rxdma_grpid); 6588 } 6589 6590 if (hw_p->ndevs) { 6591 hw_p->ndevs--; 6592 } 6593 hw_p->nxge_p[nxgep->function_num] = NULL; 6594 if (!hw_p->ndevs) { 6595 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6596 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6597 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6598 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6599 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6600 "==> nxge_uninit_common_device: " 6601 "func # %d " 6602 "hw_p $%p parent dip $%p " 6603 "ndevs %d (last)", 6604 nxgep->function_num, 6605 hw_p, 6606 p_dip, 6607 hw_p->ndevs)); 6608 6609 nxge_hio_uninit(nxgep); 6610 6611 if (hw_p == nxge_hw_list) { 6612 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6613 "==> nxge_uninit_common_device:" 6614 "remove head func # %d " 6615 "hw_p $%p parent dip $%p " 6616 "ndevs %d (head)", 6617 nxgep->function_num, 6618 hw_p, 6619 p_dip, 6620 hw_p->ndevs)); 6621 nxge_hw_list = hw_p->next; 6622 } else { 6623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6624 "==> nxge_uninit_common_device:" 6625 "remove middle func # %d " 6626 "hw_p $%p parent dip $%p " 6627 "ndevs %d (middle)", 6628 nxgep->function_num, 6629 hw_p, 6630 p_dip, 6631 hw_p->ndevs)); 6632 h_hw_p->next = hw_p->next; 6633 } 6634 6635 nxgep->nxge_hw_p = NULL; 6636 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6637 } 6638 break; 6639 } else { 6640 h_hw_p = hw_p; 6641 } 6642 } 6643 6644 MUTEX_EXIT(&nxge_common_lock); 6645 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6646 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6647 nxge_hw_list)); 6648 6649 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6650 } 6651 6652 /* 6653 * Determines the number of ports from the niu_type or the platform type. 6654 * Returns the number of ports, or returns zero on failure. 6655 */ 6656 6657 int 6658 nxge_get_nports(p_nxge_t nxgep) 6659 { 6660 int nports = 0; 6661 6662 switch (nxgep->niu_type) { 6663 case N2_NIU: 6664 case NEPTUNE_2_10GF: 6665 nports = 2; 6666 break; 6667 case NEPTUNE_4_1GC: 6668 case NEPTUNE_2_10GF_2_1GC: 6669 case NEPTUNE_1_10GF_3_1GC: 6670 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6671 case NEPTUNE_2_10GF_2_1GRF: 6672 nports = 4; 6673 break; 6674 default: 6675 switch (nxgep->platform_type) { 6676 case P_NEPTUNE_NIU: 6677 case P_NEPTUNE_ATLAS_2PORT: 6678 nports = 2; 6679 break; 6680 case P_NEPTUNE_ATLAS_4PORT: 6681 case P_NEPTUNE_MARAMBA_P0: 6682 case P_NEPTUNE_MARAMBA_P1: 6683 case P_NEPTUNE_ALONSO: 6684 nports = 4; 6685 break; 6686 default: 6687 break; 6688 } 6689 break; 6690 } 6691 6692 return (nports); 6693 } 6694 6695 /* 6696 * The following two functions are to support 6697 * PSARC/2007/453 MSI-X interrupt limit override. 6698 */ 6699 static int 6700 nxge_create_msi_property(p_nxge_t nxgep) 6701 { 6702 int nmsi; 6703 extern int ncpus; 6704 6705 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6706 6707 switch (nxgep->mac.portmode) { 6708 case PORT_10G_COPPER: 6709 case PORT_10G_FIBER: 6710 case PORT_10G_TN1010: 6711 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6712 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6713 /* 6714 * The maximum MSI-X requested will be 8. 6715 * If the # of CPUs is less than 8, we will reqeust 6716 * # MSI-X based on the # of CPUs. 6717 */ 6718 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6719 nmsi = NXGE_MSIX_REQUEST_10G; 6720 } else { 6721 nmsi = ncpus; 6722 } 6723 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6724 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6725 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6726 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6727 break; 6728 6729 default: 6730 nmsi = NXGE_MSIX_REQUEST_1G; 6731 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6732 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6733 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6734 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6735 break; 6736 } 6737 6738 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6739 return (nmsi); 6740 } 6741 6742 /* ARGSUSED */ 6743 static int 6744 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6745 void *pr_val) 6746 { 6747 int err = 0; 6748 link_flowctrl_t fl; 6749 6750 switch (pr_num) { 6751 case MAC_PROP_AUTONEG: 6752 *(uint8_t *)pr_val = 1; 6753 break; 6754 case MAC_PROP_FLOWCTRL: 6755 if (pr_valsize < sizeof (link_flowctrl_t)) 6756 return (EINVAL); 6757 fl = LINK_FLOWCTRL_RX; 6758 bcopy(&fl, pr_val, sizeof (fl)); 6759 break; 6760 case MAC_PROP_ADV_1000FDX_CAP: 6761 case MAC_PROP_EN_1000FDX_CAP: 6762 *(uint8_t *)pr_val = 1; 6763 break; 6764 case MAC_PROP_ADV_100FDX_CAP: 6765 case MAC_PROP_EN_100FDX_CAP: 6766 *(uint8_t *)pr_val = 1; 6767 break; 6768 default: 6769 err = ENOTSUP; 6770 break; 6771 } 6772 return (err); 6773 } 6774 6775 6776 /* 6777 * The following is a software around for the Neptune hardware's 6778 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6779 * an interrupr handler is removed. 6780 */ 6781 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6782 #define NXGE_PIM_RESET (1ULL << 29) 6783 #define NXGE_GLU_RESET (1ULL << 30) 6784 #define NXGE_NIU_RESET (1ULL << 31) 6785 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6786 NXGE_GLU_RESET | \ 6787 NXGE_NIU_RESET) 6788 6789 #define NXGE_WAIT_QUITE_TIME 200000 6790 #define NXGE_WAIT_QUITE_RETRY 40 6791 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6792 6793 static void 6794 nxge_niu_peu_reset(p_nxge_t nxgep) 6795 { 6796 uint32_t rvalue; 6797 p_nxge_hw_list_t hw_p; 6798 p_nxge_t fnxgep; 6799 int i, j; 6800 6801 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6802 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6803 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6804 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6805 return; 6806 } 6807 6808 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6809 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6810 hw_p->flags, nxgep->nxge_link_poll_timerid, 6811 nxgep->nxge_timerid)); 6812 6813 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6814 /* 6815 * Make sure other instances from the same hardware 6816 * stop sending PIO and in quiescent state. 6817 */ 6818 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6819 fnxgep = hw_p->nxge_p[i]; 6820 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6821 "==> nxge_niu_peu_reset: checking entry %d " 6822 "nxgep $%p", i, fnxgep)); 6823 #ifdef NXGE_DEBUG 6824 if (fnxgep) { 6825 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6826 "==> nxge_niu_peu_reset: entry %d (function %d) " 6827 "link timer id %d hw timer id %d", 6828 i, fnxgep->function_num, 6829 fnxgep->nxge_link_poll_timerid, 6830 fnxgep->nxge_timerid)); 6831 } 6832 #endif 6833 if (fnxgep && fnxgep != nxgep && 6834 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6835 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6836 "==> nxge_niu_peu_reset: checking $%p " 6837 "(function %d) timer ids", 6838 fnxgep, fnxgep->function_num)); 6839 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6840 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6841 "==> nxge_niu_peu_reset: waiting")); 6842 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6843 if (!fnxgep->nxge_timerid && 6844 !fnxgep->nxge_link_poll_timerid) { 6845 break; 6846 } 6847 } 6848 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6849 if (fnxgep->nxge_timerid || 6850 fnxgep->nxge_link_poll_timerid) { 6851 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6852 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6853 "<== nxge_niu_peu_reset: cannot reset " 6854 "hardware (devices are still in use)")); 6855 return; 6856 } 6857 } 6858 } 6859 6860 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6861 hw_p->flags |= COMMON_RESET_NIU_PCI; 6862 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6863 NXGE_PCI_PORT_LOGIC_OFFSET); 6864 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6865 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6866 "(data 0x%x)", 6867 NXGE_PCI_PORT_LOGIC_OFFSET, 6868 NXGE_PCI_PORT_LOGIC_OFFSET, 6869 rvalue)); 6870 6871 rvalue |= NXGE_PCI_RESET_ALL; 6872 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6873 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6874 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6875 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6876 rvalue)); 6877 6878 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6879 } 6880 6881 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6882 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6883 } 6884 6885 static void 6886 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6887 { 6888 p_dev_regs_t dev_regs; 6889 uint32_t value; 6890 6891 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6892 6893 if (!nxge_set_replay_timer) { 6894 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6895 "==> nxge_set_pci_replay_timeout: will not change " 6896 "the timeout")); 6897 return; 6898 } 6899 6900 dev_regs = nxgep->dev_regs; 6901 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6902 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6903 dev_regs, dev_regs->nxge_pciregh)); 6904 6905 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6906 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6907 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 6908 "no PCI handle", 6909 dev_regs)); 6910 return; 6911 } 6912 value = (pci_config_get32(dev_regs->nxge_pciregh, 6913 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 6914 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 6915 6916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6917 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 6918 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 6919 pci_config_get32(dev_regs->nxge_pciregh, 6920 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 6921 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 6922 6923 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 6924 value); 6925 6926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6927 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 6928 pci_config_get32(dev_regs->nxge_pciregh, 6929 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 6930 6931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 6932 } 6933