1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 28 */ 29 #include <sys/nxge/nxge_impl.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <sys/nxge/nxge_rxdma.h> 32 #include <sys/pcie.h> 33 34 uint32_t nxge_use_partition = 0; /* debug partition flag */ 35 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 36 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 37 /* 38 * PSARC/2007/453 MSI-X interrupt limit override 39 * (This PSARC case is limited to MSI-X vectors 40 * and SPARC platforms only). 41 */ 42 #if defined(_BIG_ENDIAN) 43 uint32_t nxge_msi_enable = 2; 44 #else 45 uint32_t nxge_msi_enable = 1; 46 #endif 47 48 /* 49 * Software workaround for a Neptune (PCI-E) 50 * hardware interrupt bug which the hardware 51 * may generate spurious interrupts after the 52 * device interrupt handler was removed. If this flag 53 * is enabled, the driver will reset the 54 * hardware when devices are being detached. 55 */ 56 uint32_t nxge_peu_reset_enable = 0; 57 58 /* 59 * Software workaround for the hardware 60 * checksum bugs that affect packet transmission 61 * and receive: 62 * 63 * Usage of nxge_cksum_offload: 64 * 65 * (1) nxge_cksum_offload = 0 (default): 66 * - transmits packets: 67 * TCP: uses the hardware checksum feature. 68 * UDP: driver will compute the software checksum 69 * based on the partial checksum computed 70 * by the IP layer. 71 * - receives packets 72 * TCP: marks packets checksum flags based on hardware result. 73 * UDP: will not mark checksum flags. 74 * 75 * (2) nxge_cksum_offload = 1: 76 * - transmit packets: 77 * TCP/UDP: uses the hardware checksum feature. 78 * - receives packets 79 * TCP/UDP: marks packet checksum flags based on hardware result. 80 * 81 * (3) nxge_cksum_offload = 2: 82 * - The driver will not register its checksum capability. 83 * Checksum for both TCP and UDP will be computed 84 * by the stack. 85 * - The software LSO is not allowed in this case. 86 * 87 * (4) nxge_cksum_offload > 2: 88 * - Will be treated as it is set to 2 89 * (stack will compute the checksum). 90 * 91 * (5) If the hardware bug is fixed, this workaround 92 * needs to be updated accordingly to reflect 93 * the new hardware revision. 94 */ 95 uint32_t nxge_cksum_offload = 0; 96 97 /* 98 * Globals: tunable parameters (/etc/system or adb) 99 * 100 */ 101 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 102 uint32_t nxge_rbr_spare_size = 0; 103 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 104 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 105 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 106 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 107 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 108 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 109 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 110 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 111 boolean_t nxge_jumbo_enable = B_FALSE; 112 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 113 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 114 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 115 116 /* MAX LSO size */ 117 #define NXGE_LSO_MAXLEN 65535 118 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 119 120 /* 121 * Debugging flags: 122 * nxge_no_tx_lb : transmit load balancing 123 * nxge_tx_lb_policy: 0 - TCP port (default) 124 * 3 - DEST MAC 125 */ 126 uint32_t nxge_no_tx_lb = 0; 127 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 128 129 /* 130 * Add tunable to reduce the amount of time spent in the 131 * ISR doing Rx Processing. 132 */ 133 uint32_t nxge_max_rx_pkts = 1024; 134 135 /* 136 * Tunables to manage the receive buffer blocks. 137 * 138 * nxge_rx_threshold_hi: copy all buffers. 139 * nxge_rx_bcopy_size_type: receive buffer block size type. 140 * nxge_rx_threshold_lo: copy only up to tunable block size type. 141 */ 142 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 143 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 144 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 145 146 /* Use kmem_alloc() to allocate data buffers. */ 147 #if defined(_BIG_ENDIAN) 148 uint32_t nxge_use_kmem_alloc = 1; 149 #else 150 uint32_t nxge_use_kmem_alloc = 0; 151 #endif 152 153 rtrace_t npi_rtracebuf; 154 155 /* 156 * The hardware sometimes fails to allow enough time for the link partner 157 * to send an acknowledgement for packets that the hardware sent to it. The 158 * hardware resends the packets earlier than it should be in those instances. 159 * This behavior caused some switches to acknowledge the wrong packets 160 * and it triggered the fatal error. 161 * This software workaround is to set the replay timer to a value 162 * suggested by the hardware team. 163 * 164 * PCI config space replay timer register: 165 * The following replay timeout value is 0xc 166 * for bit 14:18. 167 */ 168 #define PCI_REPLAY_TIMEOUT_CFG_OFFSET 0xb8 169 #define PCI_REPLAY_TIMEOUT_SHIFT 14 170 171 uint32_t nxge_set_replay_timer = 1; 172 uint32_t nxge_replay_timeout = 0xc; 173 174 /* 175 * The transmit serialization sometimes causes 176 * longer sleep before calling the driver transmit 177 * function as it sleeps longer than it should. 178 * The performace group suggests that a time wait tunable 179 * can be used to set the maximum wait time when needed 180 * and the default is set to 1 tick. 181 */ 182 uint32_t nxge_tx_serial_maxsleep = 1; 183 184 #if defined(sun4v) 185 /* 186 * Hypervisor N2/NIU services information. 187 */ 188 static hsvc_info_t niu_hsvc = { 189 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 190 NIU_MINOR_VER, "nxge" 191 }; 192 193 static int nxge_hsvc_register(p_nxge_t); 194 #endif 195 196 /* 197 * Function Prototypes 198 */ 199 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 200 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 201 static void nxge_unattach(p_nxge_t); 202 203 #if NXGE_PROPERTY 204 static void nxge_remove_hard_properties(p_nxge_t); 205 #endif 206 207 /* 208 * These two functions are required by nxge_hio.c 209 */ 210 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 211 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 212 213 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 214 215 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 216 static void nxge_destroy_mutexes(p_nxge_t); 217 218 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 219 static void nxge_unmap_regs(p_nxge_t nxgep); 220 #ifdef NXGE_DEBUG 221 static void nxge_test_map_regs(p_nxge_t nxgep); 222 #endif 223 224 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 225 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 226 static void nxge_remove_intrs(p_nxge_t nxgep); 227 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 228 229 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 230 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 231 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 232 static void nxge_intrs_enable(p_nxge_t nxgep); 233 static void nxge_intrs_disable(p_nxge_t nxgep); 234 235 static void nxge_suspend(p_nxge_t); 236 static nxge_status_t nxge_resume(p_nxge_t); 237 238 static nxge_status_t nxge_setup_dev(p_nxge_t); 239 static void nxge_destroy_dev(p_nxge_t); 240 241 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 242 static void nxge_free_mem_pool(p_nxge_t); 243 244 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 245 static void nxge_free_rx_mem_pool(p_nxge_t); 246 247 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 248 static void nxge_free_tx_mem_pool(p_nxge_t); 249 250 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 251 struct ddi_dma_attr *, 252 size_t, ddi_device_acc_attr_t *, uint_t, 253 p_nxge_dma_common_t); 254 255 static void nxge_dma_mem_free(p_nxge_dma_common_t); 256 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 257 258 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 259 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 260 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 261 262 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 263 p_nxge_dma_common_t *, size_t); 264 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 265 266 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 267 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 268 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 269 270 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 271 p_nxge_dma_common_t *, 272 size_t); 273 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 274 275 static int nxge_init_common_dev(p_nxge_t); 276 static void nxge_uninit_common_dev(p_nxge_t); 277 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 278 char *, caddr_t); 279 280 /* 281 * The next declarations are for the GLDv3 interface. 282 */ 283 static int nxge_m_start(void *); 284 static void nxge_m_stop(void *); 285 static int nxge_m_unicst(void *, const uint8_t *); 286 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 287 static int nxge_m_promisc(void *, boolean_t); 288 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 289 static void nxge_m_resources(void *); 290 mblk_t *nxge_m_tx(void *arg, mblk_t *); 291 static nxge_status_t nxge_mac_register(p_nxge_t); 292 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 293 mac_addr_slot_t slot); 294 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 295 boolean_t factory); 296 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 297 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 298 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 299 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 300 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 301 uint_t, const void *); 302 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 303 uint_t, uint_t, void *); 304 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 305 const void *); 306 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 307 void *); 308 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 309 310 static void nxge_niu_peu_reset(p_nxge_t nxgep); 311 static void nxge_set_pci_replay_timeout(nxge_t *); 312 313 mac_priv_prop_t nxge_priv_props[] = { 314 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 315 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 316 {"_function_number", MAC_PROP_PERM_READ}, 317 {"_fw_version", MAC_PROP_PERM_READ}, 318 {"_port_mode", MAC_PROP_PERM_READ}, 319 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 320 {"_accept_jumbo", MAC_PROP_PERM_RW}, 321 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 322 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 323 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 324 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 325 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 326 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 327 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 328 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 329 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 330 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 331 {"_soft_lso_enable", MAC_PROP_PERM_RW} 332 }; 333 334 #define NXGE_MAX_PRIV_PROPS \ 335 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 336 337 #define NXGE_M_CALLBACK_FLAGS\ 338 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 339 340 341 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 342 #define MAX_DUMP_SZ 256 343 344 #define NXGE_M_CALLBACK_FLAGS \ 345 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 346 347 mac_callbacks_t nxge_m_callbacks = { 348 NXGE_M_CALLBACK_FLAGS, 349 nxge_m_stat, 350 nxge_m_start, 351 nxge_m_stop, 352 nxge_m_promisc, 353 nxge_m_multicst, 354 nxge_m_unicst, 355 nxge_m_tx, 356 nxge_m_resources, 357 nxge_m_ioctl, 358 nxge_m_getcapab, 359 NULL, 360 NULL, 361 nxge_m_setprop, 362 nxge_m_getprop 363 }; 364 365 void 366 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 367 368 /* PSARC/2007/453 MSI-X interrupt limit override. */ 369 #define NXGE_MSIX_REQUEST_10G 8 370 #define NXGE_MSIX_REQUEST_1G 2 371 static int nxge_create_msi_property(p_nxge_t); 372 373 /* 374 * These global variables control the message 375 * output. 376 */ 377 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 378 uint64_t nxge_debug_level; 379 380 /* 381 * This list contains the instance structures for the Neptune 382 * devices present in the system. The lock exists to guarantee 383 * mutually exclusive access to the list. 384 */ 385 void *nxge_list = NULL; 386 387 void *nxge_hw_list = NULL; 388 nxge_os_mutex_t nxge_common_lock; 389 390 extern uint64_t npi_debug_level; 391 392 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 393 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 394 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 395 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 396 extern void nxge_fm_init(p_nxge_t, 397 ddi_device_acc_attr_t *, 398 ddi_device_acc_attr_t *, 399 ddi_dma_attr_t *); 400 extern void nxge_fm_fini(p_nxge_t); 401 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 402 403 /* 404 * Count used to maintain the number of buffers being used 405 * by Neptune instances and loaned up to the upper layers. 406 */ 407 uint32_t nxge_mblks_pending = 0; 408 409 /* 410 * Device register access attributes for PIO. 411 */ 412 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 413 DDI_DEVICE_ATTR_V0, 414 DDI_STRUCTURE_LE_ACC, 415 DDI_STRICTORDER_ACC, 416 }; 417 418 /* 419 * Device descriptor access attributes for DMA. 420 */ 421 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 422 DDI_DEVICE_ATTR_V0, 423 DDI_STRUCTURE_LE_ACC, 424 DDI_STRICTORDER_ACC 425 }; 426 427 /* 428 * Device buffer access attributes for DMA. 429 */ 430 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 431 DDI_DEVICE_ATTR_V0, 432 DDI_STRUCTURE_BE_ACC, 433 DDI_STRICTORDER_ACC 434 }; 435 436 ddi_dma_attr_t nxge_desc_dma_attr = { 437 DMA_ATTR_V0, /* version number. */ 438 0, /* low address */ 439 0xffffffffffffffff, /* high address */ 440 0xffffffffffffffff, /* address counter max */ 441 #ifndef NIU_PA_WORKAROUND 442 0x100000, /* alignment */ 443 #else 444 0x2000, 445 #endif 446 0xfc00fc, /* dlim_burstsizes */ 447 0x1, /* minimum transfer size */ 448 0xffffffffffffffff, /* maximum transfer size */ 449 0xffffffffffffffff, /* maximum segment size */ 450 1, /* scatter/gather list length */ 451 (unsigned int) 1, /* granularity */ 452 0 /* attribute flags */ 453 }; 454 455 ddi_dma_attr_t nxge_tx_dma_attr = { 456 DMA_ATTR_V0, /* version number. */ 457 0, /* low address */ 458 0xffffffffffffffff, /* high address */ 459 0xffffffffffffffff, /* address counter max */ 460 #if defined(_BIG_ENDIAN) 461 0x2000, /* alignment */ 462 #else 463 0x1000, /* alignment */ 464 #endif 465 0xfc00fc, /* dlim_burstsizes */ 466 0x1, /* minimum transfer size */ 467 0xffffffffffffffff, /* maximum transfer size */ 468 0xffffffffffffffff, /* maximum segment size */ 469 5, /* scatter/gather list length */ 470 (unsigned int) 1, /* granularity */ 471 0 /* attribute flags */ 472 }; 473 474 ddi_dma_attr_t nxge_rx_dma_attr = { 475 DMA_ATTR_V0, /* version number. */ 476 0, /* low address */ 477 0xffffffffffffffff, /* high address */ 478 0xffffffffffffffff, /* address counter max */ 479 0x2000, /* alignment */ 480 0xfc00fc, /* dlim_burstsizes */ 481 0x1, /* minimum transfer size */ 482 0xffffffffffffffff, /* maximum transfer size */ 483 0xffffffffffffffff, /* maximum segment size */ 484 1, /* scatter/gather list length */ 485 (unsigned int) 1, /* granularity */ 486 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 487 }; 488 489 ddi_dma_lim_t nxge_dma_limits = { 490 (uint_t)0, /* dlim_addr_lo */ 491 (uint_t)0xffffffff, /* dlim_addr_hi */ 492 (uint_t)0xffffffff, /* dlim_cntr_max */ 493 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 494 0x1, /* dlim_minxfer */ 495 1024 /* dlim_speed */ 496 }; 497 498 dma_method_t nxge_force_dma = DVMA; 499 500 /* 501 * dma chunk sizes. 502 * 503 * Try to allocate the largest possible size 504 * so that fewer number of dma chunks would be managed 505 */ 506 #ifdef NIU_PA_WORKAROUND 507 size_t alloc_sizes [] = {0x2000}; 508 #else 509 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 510 0x10000, 0x20000, 0x40000, 0x80000, 511 0x100000, 0x200000, 0x400000, 0x800000, 512 0x1000000, 0x2000000, 0x4000000}; 513 #endif 514 515 /* 516 * Translate "dev_t" to a pointer to the associated "dev_info_t". 517 */ 518 519 extern void nxge_get_environs(nxge_t *); 520 521 static int 522 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 523 { 524 p_nxge_t nxgep = NULL; 525 int instance; 526 int status = DDI_SUCCESS; 527 uint8_t portn; 528 nxge_mmac_t *mmac_info; 529 530 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 531 532 /* 533 * Get the device instance since we'll need to setup 534 * or retrieve a soft state for this instance. 535 */ 536 instance = ddi_get_instance(dip); 537 538 switch (cmd) { 539 case DDI_ATTACH: 540 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 541 break; 542 543 case DDI_RESUME: 544 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 545 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 546 if (nxgep == NULL) { 547 status = DDI_FAILURE; 548 break; 549 } 550 if (nxgep->dip != dip) { 551 status = DDI_FAILURE; 552 break; 553 } 554 if (nxgep->suspended == DDI_PM_SUSPEND) { 555 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 556 } else { 557 status = nxge_resume(nxgep); 558 } 559 goto nxge_attach_exit; 560 561 case DDI_PM_RESUME: 562 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 563 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 564 if (nxgep == NULL) { 565 status = DDI_FAILURE; 566 break; 567 } 568 if (nxgep->dip != dip) { 569 status = DDI_FAILURE; 570 break; 571 } 572 status = nxge_resume(nxgep); 573 goto nxge_attach_exit; 574 575 default: 576 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 577 status = DDI_FAILURE; 578 goto nxge_attach_exit; 579 } 580 581 582 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 583 status = DDI_FAILURE; 584 goto nxge_attach_exit; 585 } 586 587 nxgep = ddi_get_soft_state(nxge_list, instance); 588 if (nxgep == NULL) { 589 status = NXGE_ERROR; 590 goto nxge_attach_fail2; 591 } 592 593 nxgep->nxge_magic = NXGE_MAGIC; 594 595 nxgep->drv_state = 0; 596 nxgep->dip = dip; 597 nxgep->instance = instance; 598 nxgep->p_dip = ddi_get_parent(dip); 599 nxgep->nxge_debug_level = nxge_debug_level; 600 npi_debug_level = nxge_debug_level; 601 602 /* Are we a guest running in a Hybrid I/O environment? */ 603 nxge_get_environs(nxgep); 604 605 status = nxge_map_regs(nxgep); 606 607 if (status != NXGE_OK) { 608 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 609 goto nxge_attach_fail3; 610 } 611 612 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 613 &nxge_dev_desc_dma_acc_attr, 614 &nxge_rx_dma_attr); 615 616 /* Create & initialize the per-Neptune data structure */ 617 /* (even if we're a guest). */ 618 status = nxge_init_common_dev(nxgep); 619 if (status != NXGE_OK) { 620 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 621 "nxge_init_common_dev failed")); 622 goto nxge_attach_fail4; 623 } 624 625 /* 626 * Software workaround: set the replay timer. 627 */ 628 if (nxgep->niu_type != N2_NIU) { 629 nxge_set_pci_replay_timeout(nxgep); 630 } 631 632 #if defined(sun4v) 633 /* This is required by nxge_hio_init(), which follows. */ 634 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 635 goto nxge_attach_fail; 636 #endif 637 638 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 639 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 640 "nxge_hio_init failed")); 641 goto nxge_attach_fail4; 642 } 643 644 if (nxgep->niu_type == NEPTUNE_2_10GF) { 645 if (nxgep->function_num > 1) { 646 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 647 " function %d. Only functions 0 and 1 are " 648 "supported for this card.", nxgep->function_num)); 649 status = NXGE_ERROR; 650 goto nxge_attach_fail4; 651 } 652 } 653 654 if (isLDOMguest(nxgep)) { 655 /* 656 * Use the function number here. 657 */ 658 nxgep->mac.portnum = nxgep->function_num; 659 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 660 661 /* XXX We'll set the MAC address counts to 1 for now. */ 662 mmac_info = &nxgep->nxge_mmac_info; 663 mmac_info->num_mmac = 1; 664 mmac_info->naddrfree = 1; 665 } else { 666 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 667 nxgep->mac.portnum = portn; 668 if ((portn == 0) || (portn == 1)) 669 nxgep->mac.porttype = PORT_TYPE_XMAC; 670 else 671 nxgep->mac.porttype = PORT_TYPE_BMAC; 672 /* 673 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 674 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 675 * The two types of MACs have different characterizations. 676 */ 677 mmac_info = &nxgep->nxge_mmac_info; 678 if (nxgep->function_num < 2) { 679 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 680 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 681 } else { 682 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 683 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 684 } 685 } 686 /* 687 * Setup the Ndd parameters for the this instance. 688 */ 689 nxge_init_param(nxgep); 690 691 /* 692 * Setup Register Tracing Buffer. 693 */ 694 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 695 696 /* init stats ptr */ 697 nxge_init_statsp(nxgep); 698 699 /* 700 * Copy the vpd info from eeprom to a local data 701 * structure, and then check its validity. 702 */ 703 if (!isLDOMguest(nxgep)) { 704 int *regp; 705 uint_t reglen; 706 int rv; 707 708 nxge_vpd_info_get(nxgep); 709 710 /* Find the NIU config handle. */ 711 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 712 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 713 "reg", ®p, ®len); 714 715 if (rv != DDI_PROP_SUCCESS) { 716 goto nxge_attach_fail5; 717 } 718 /* 719 * The address_hi, that is the first int, in the reg 720 * property consists of config handle, but need to remove 721 * the bits 28-31 which are OBP specific info. 722 */ 723 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 724 ddi_prop_free(regp); 725 } 726 727 if (isLDOMguest(nxgep)) { 728 uchar_t *prop_val; 729 uint_t prop_len; 730 731 extern void nxge_get_logical_props(p_nxge_t); 732 733 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 734 nxgep->mac.portmode = PORT_LOGICAL; 735 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 736 "phy-type", "virtual transceiver"); 737 738 nxgep->nports = 1; 739 nxgep->board_ver = 0; /* XXX What? */ 740 741 /* 742 * local-mac-address property gives us info on which 743 * specific MAC address the Hybrid resource is associated 744 * with. 745 */ 746 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 747 "local-mac-address", &prop_val, 748 &prop_len) != DDI_PROP_SUCCESS) { 749 goto nxge_attach_fail5; 750 } 751 if (prop_len != ETHERADDRL) { 752 ddi_prop_free(prop_val); 753 goto nxge_attach_fail5; 754 } 755 ether_copy(prop_val, nxgep->hio_mac_addr); 756 ddi_prop_free(prop_val); 757 nxge_get_logical_props(nxgep); 758 759 } else { 760 status = nxge_xcvr_find(nxgep); 761 762 if (status != NXGE_OK) { 763 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 764 " Couldn't determine card type" 765 " .... exit ")); 766 goto nxge_attach_fail5; 767 } 768 769 status = nxge_get_config_properties(nxgep); 770 771 if (status != NXGE_OK) { 772 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 773 "get_hw create failed")); 774 goto nxge_attach_fail; 775 } 776 } 777 778 /* 779 * Setup the Kstats for the driver. 780 */ 781 nxge_setup_kstats(nxgep); 782 783 if (!isLDOMguest(nxgep)) 784 nxge_setup_param(nxgep); 785 786 status = nxge_setup_system_dma_pages(nxgep); 787 if (status != NXGE_OK) { 788 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 789 goto nxge_attach_fail; 790 } 791 792 nxge_hw_id_init(nxgep); 793 794 if (!isLDOMguest(nxgep)) 795 nxge_hw_init_niu_common(nxgep); 796 797 status = nxge_setup_mutexes(nxgep); 798 if (status != NXGE_OK) { 799 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 800 goto nxge_attach_fail; 801 } 802 803 #if defined(sun4v) 804 if (isLDOMguest(nxgep)) { 805 /* Find our VR & channel sets. */ 806 status = nxge_hio_vr_add(nxgep); 807 goto nxge_attach_exit; 808 } 809 #endif 810 811 status = nxge_setup_dev(nxgep); 812 if (status != DDI_SUCCESS) { 813 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 814 goto nxge_attach_fail; 815 } 816 817 status = nxge_add_intrs(nxgep); 818 if (status != DDI_SUCCESS) { 819 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 820 goto nxge_attach_fail; 821 } 822 status = nxge_add_soft_intrs(nxgep); 823 if (status != DDI_SUCCESS) { 824 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 825 "add_soft_intr failed")); 826 goto nxge_attach_fail; 827 } 828 829 /* 830 * Enable interrupts. 831 */ 832 nxge_intrs_enable(nxgep); 833 834 /* If a guest, register with vio_net instead. */ 835 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 836 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 837 "unable to register to mac layer (%d)", status)); 838 goto nxge_attach_fail; 839 } 840 841 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 842 843 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 844 "registered to mac (instance %d)", instance)); 845 846 /* nxge_link_monitor calls xcvr.check_link recursively */ 847 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 848 849 goto nxge_attach_exit; 850 851 nxge_attach_fail: 852 nxge_unattach(nxgep); 853 goto nxge_attach_fail1; 854 855 nxge_attach_fail5: 856 /* 857 * Tear down the ndd parameters setup. 858 */ 859 nxge_destroy_param(nxgep); 860 861 /* 862 * Tear down the kstat setup. 863 */ 864 nxge_destroy_kstats(nxgep); 865 866 nxge_attach_fail4: 867 if (nxgep->nxge_hw_p) { 868 nxge_uninit_common_dev(nxgep); 869 nxgep->nxge_hw_p = NULL; 870 } 871 872 nxge_attach_fail3: 873 /* 874 * Unmap the register setup. 875 */ 876 nxge_unmap_regs(nxgep); 877 878 nxge_fm_fini(nxgep); 879 880 nxge_attach_fail2: 881 ddi_soft_state_free(nxge_list, nxgep->instance); 882 883 nxge_attach_fail1: 884 if (status != NXGE_OK) 885 status = (NXGE_ERROR | NXGE_DDI_FAILED); 886 nxgep = NULL; 887 888 nxge_attach_exit: 889 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 890 status)); 891 892 return (status); 893 } 894 895 static int 896 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 897 { 898 int status = DDI_SUCCESS; 899 int instance; 900 p_nxge_t nxgep = NULL; 901 902 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 903 instance = ddi_get_instance(dip); 904 nxgep = ddi_get_soft_state(nxge_list, instance); 905 if (nxgep == NULL) { 906 status = DDI_FAILURE; 907 goto nxge_detach_exit; 908 } 909 910 switch (cmd) { 911 case DDI_DETACH: 912 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 913 break; 914 915 case DDI_PM_SUSPEND: 916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 917 nxgep->suspended = DDI_PM_SUSPEND; 918 nxge_suspend(nxgep); 919 break; 920 921 case DDI_SUSPEND: 922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 923 if (nxgep->suspended != DDI_PM_SUSPEND) { 924 nxgep->suspended = DDI_SUSPEND; 925 nxge_suspend(nxgep); 926 } 927 break; 928 929 default: 930 status = DDI_FAILURE; 931 } 932 933 if (cmd != DDI_DETACH) 934 goto nxge_detach_exit; 935 936 /* 937 * Stop the xcvr polling. 938 */ 939 nxgep->suspended = cmd; 940 941 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 942 943 if (isLDOMguest(nxgep)) { 944 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 945 nxge_m_stop((void *)nxgep); 946 nxge_hio_unregister(nxgep); 947 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 949 "<== nxge_detach status = 0x%08X", status)); 950 return (DDI_FAILURE); 951 } 952 953 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 954 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 955 956 nxge_unattach(nxgep); 957 nxgep = NULL; 958 959 nxge_detach_exit: 960 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 961 status)); 962 963 return (status); 964 } 965 966 static void 967 nxge_unattach(p_nxge_t nxgep) 968 { 969 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 970 971 if (nxgep == NULL || nxgep->dev_regs == NULL) { 972 return; 973 } 974 975 nxgep->nxge_magic = 0; 976 977 if (nxgep->nxge_timerid) { 978 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 979 nxgep->nxge_timerid = 0; 980 } 981 982 /* 983 * If this flag is set, it will affect the Neptune 984 * only. 985 */ 986 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 987 nxge_niu_peu_reset(nxgep); 988 } 989 990 #if defined(sun4v) 991 if (isLDOMguest(nxgep)) { 992 (void) nxge_hio_vr_release(nxgep); 993 } 994 #endif 995 996 if (nxgep->nxge_hw_p) { 997 nxge_uninit_common_dev(nxgep); 998 nxgep->nxge_hw_p = NULL; 999 } 1000 1001 #if defined(sun4v) 1002 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 1003 (void) hsvc_unregister(&nxgep->niu_hsvc); 1004 nxgep->niu_hsvc_available = B_FALSE; 1005 } 1006 #endif 1007 /* 1008 * Stop any further interrupts. 1009 */ 1010 nxge_remove_intrs(nxgep); 1011 1012 /* remove soft interrups */ 1013 nxge_remove_soft_intrs(nxgep); 1014 1015 /* 1016 * Stop the device and free resources. 1017 */ 1018 if (!isLDOMguest(nxgep)) { 1019 nxge_destroy_dev(nxgep); 1020 } 1021 1022 /* 1023 * Tear down the ndd parameters setup. 1024 */ 1025 nxge_destroy_param(nxgep); 1026 1027 /* 1028 * Tear down the kstat setup. 1029 */ 1030 nxge_destroy_kstats(nxgep); 1031 1032 /* 1033 * Destroy all mutexes. 1034 */ 1035 nxge_destroy_mutexes(nxgep); 1036 1037 /* 1038 * Remove the list of ndd parameters which 1039 * were setup during attach. 1040 */ 1041 if (nxgep->dip) { 1042 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1043 " nxge_unattach: remove all properties")); 1044 1045 (void) ddi_prop_remove_all(nxgep->dip); 1046 } 1047 1048 #if NXGE_PROPERTY 1049 nxge_remove_hard_properties(nxgep); 1050 #endif 1051 1052 /* 1053 * Unmap the register setup. 1054 */ 1055 nxge_unmap_regs(nxgep); 1056 1057 nxge_fm_fini(nxgep); 1058 1059 ddi_soft_state_free(nxge_list, nxgep->instance); 1060 1061 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1062 } 1063 1064 #if defined(sun4v) 1065 int 1066 nxge_hsvc_register( 1067 nxge_t *nxgep) 1068 { 1069 nxge_status_t status; 1070 1071 if (nxgep->niu_type == N2_NIU) { 1072 nxgep->niu_hsvc_available = B_FALSE; 1073 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1074 if ((status = hsvc_register(&nxgep->niu_hsvc, 1075 &nxgep->niu_min_ver)) != 0) { 1076 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1077 "nxge_attach: %s: cannot negotiate " 1078 "hypervisor services revision %d group: 0x%lx " 1079 "major: 0x%lx minor: 0x%lx errno: %d", 1080 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1081 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1082 niu_hsvc.hsvc_minor, status)); 1083 return (DDI_FAILURE); 1084 } 1085 nxgep->niu_hsvc_available = B_TRUE; 1086 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1087 "NIU Hypervisor service enabled")); 1088 } 1089 1090 return (DDI_SUCCESS); 1091 } 1092 #endif 1093 1094 static char n2_siu_name[] = "niu"; 1095 1096 static nxge_status_t 1097 nxge_map_regs(p_nxge_t nxgep) 1098 { 1099 int ddi_status = DDI_SUCCESS; 1100 p_dev_regs_t dev_regs; 1101 char buf[MAXPATHLEN + 1]; 1102 char *devname; 1103 #ifdef NXGE_DEBUG 1104 char *sysname; 1105 #endif 1106 off_t regsize; 1107 nxge_status_t status = NXGE_OK; 1108 #if !defined(_BIG_ENDIAN) 1109 off_t pci_offset; 1110 uint16_t pcie_devctl; 1111 #endif 1112 1113 if (isLDOMguest(nxgep)) { 1114 return (nxge_guest_regs_map(nxgep)); 1115 } 1116 1117 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1118 nxgep->dev_regs = NULL; 1119 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1120 dev_regs->nxge_regh = NULL; 1121 dev_regs->nxge_pciregh = NULL; 1122 dev_regs->nxge_msix_regh = NULL; 1123 dev_regs->nxge_vir_regh = NULL; 1124 dev_regs->nxge_vir2_regh = NULL; 1125 nxgep->niu_type = NIU_TYPE_NONE; 1126 1127 devname = ddi_pathname(nxgep->dip, buf); 1128 ASSERT(strlen(devname) > 0); 1129 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1130 "nxge_map_regs: pathname devname %s", devname)); 1131 1132 /* 1133 * The driver is running on a N2-NIU system if devname is something 1134 * like "/niu@80/network@0" 1135 */ 1136 if (strstr(devname, n2_siu_name)) { 1137 /* N2/NIU */ 1138 nxgep->niu_type = N2_NIU; 1139 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1140 "nxge_map_regs: N2/NIU devname %s", devname)); 1141 /* get function number */ 1142 nxgep->function_num = 1143 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1144 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1145 "nxge_map_regs: N2/NIU function number %d", 1146 nxgep->function_num)); 1147 } else { 1148 int *prop_val; 1149 uint_t prop_len; 1150 uint8_t func_num; 1151 1152 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1153 0, "reg", 1154 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1155 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1156 "Reg property not found")); 1157 ddi_status = DDI_FAILURE; 1158 goto nxge_map_regs_fail0; 1159 1160 } else { 1161 func_num = (prop_val[0] >> 8) & 0x7; 1162 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1163 "Reg property found: fun # %d", 1164 func_num)); 1165 nxgep->function_num = func_num; 1166 if (isLDOMguest(nxgep)) { 1167 nxgep->function_num /= 2; 1168 return (NXGE_OK); 1169 } 1170 ddi_prop_free(prop_val); 1171 } 1172 } 1173 1174 switch (nxgep->niu_type) { 1175 default: 1176 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1177 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1178 "nxge_map_regs: pci config size 0x%x", regsize)); 1179 1180 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1181 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1182 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1183 if (ddi_status != DDI_SUCCESS) { 1184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1185 "ddi_map_regs, nxge bus config regs failed")); 1186 goto nxge_map_regs_fail0; 1187 } 1188 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1189 "nxge_map_reg: PCI config addr 0x%0llx " 1190 " handle 0x%0llx", dev_regs->nxge_pciregp, 1191 dev_regs->nxge_pciregh)); 1192 /* 1193 * IMP IMP 1194 * workaround for bit swapping bug in HW 1195 * which ends up in no-snoop = yes 1196 * resulting, in DMA not synched properly 1197 */ 1198 #if !defined(_BIG_ENDIAN) 1199 /* workarounds for x86 systems */ 1200 pci_offset = 0x80 + PCIE_DEVCTL; 1201 pcie_devctl = 0x0; 1202 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1203 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1204 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1205 pcie_devctl); 1206 #endif 1207 1208 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1209 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1210 "nxge_map_regs: pio size 0x%x", regsize)); 1211 /* set up the device mapped register */ 1212 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1213 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1214 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1215 if (ddi_status != DDI_SUCCESS) { 1216 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1217 "ddi_map_regs for Neptune global reg failed")); 1218 goto nxge_map_regs_fail1; 1219 } 1220 1221 /* set up the msi/msi-x mapped register */ 1222 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1223 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1224 "nxge_map_regs: msix size 0x%x", regsize)); 1225 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1226 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1227 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1228 if (ddi_status != DDI_SUCCESS) { 1229 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1230 "ddi_map_regs for msi reg failed")); 1231 goto nxge_map_regs_fail2; 1232 } 1233 1234 /* set up the vio region mapped register */ 1235 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1236 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1237 "nxge_map_regs: vio size 0x%x", regsize)); 1238 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1239 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1240 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1241 1242 if (ddi_status != DDI_SUCCESS) { 1243 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1244 "ddi_map_regs for nxge vio reg failed")); 1245 goto nxge_map_regs_fail3; 1246 } 1247 nxgep->dev_regs = dev_regs; 1248 1249 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1250 NPI_PCI_ADD_HANDLE_SET(nxgep, 1251 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1252 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1253 NPI_MSI_ADD_HANDLE_SET(nxgep, 1254 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1255 1256 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1257 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1258 1259 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1260 NPI_REG_ADD_HANDLE_SET(nxgep, 1261 (npi_reg_ptr_t)dev_regs->nxge_regp); 1262 1263 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1264 NPI_VREG_ADD_HANDLE_SET(nxgep, 1265 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1266 1267 break; 1268 1269 case N2_NIU: 1270 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1271 /* 1272 * Set up the device mapped register (FWARC 2006/556) 1273 * (changed back to 1: reg starts at 1!) 1274 */ 1275 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1276 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1277 "nxge_map_regs: dev size 0x%x", regsize)); 1278 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1279 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1280 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1281 1282 if (ddi_status != DDI_SUCCESS) { 1283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1284 "ddi_map_regs for N2/NIU, global reg failed ")); 1285 goto nxge_map_regs_fail1; 1286 } 1287 1288 /* set up the first vio region mapped register */ 1289 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1290 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1291 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1292 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1293 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1294 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1295 1296 if (ddi_status != DDI_SUCCESS) { 1297 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1298 "ddi_map_regs for nxge vio reg failed")); 1299 goto nxge_map_regs_fail2; 1300 } 1301 /* set up the second vio region mapped register */ 1302 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1303 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1304 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1305 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1306 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1307 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1308 1309 if (ddi_status != DDI_SUCCESS) { 1310 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1311 "ddi_map_regs for nxge vio2 reg failed")); 1312 goto nxge_map_regs_fail3; 1313 } 1314 nxgep->dev_regs = dev_regs; 1315 1316 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1317 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1318 1319 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1320 NPI_REG_ADD_HANDLE_SET(nxgep, 1321 (npi_reg_ptr_t)dev_regs->nxge_regp); 1322 1323 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1324 NPI_VREG_ADD_HANDLE_SET(nxgep, 1325 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1326 1327 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1328 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1329 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1330 1331 break; 1332 } 1333 1334 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1335 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1336 1337 goto nxge_map_regs_exit; 1338 nxge_map_regs_fail3: 1339 if (dev_regs->nxge_msix_regh) { 1340 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1341 } 1342 if (dev_regs->nxge_vir_regh) { 1343 ddi_regs_map_free(&dev_regs->nxge_regh); 1344 } 1345 nxge_map_regs_fail2: 1346 if (dev_regs->nxge_regh) { 1347 ddi_regs_map_free(&dev_regs->nxge_regh); 1348 } 1349 nxge_map_regs_fail1: 1350 if (dev_regs->nxge_pciregh) { 1351 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1352 } 1353 nxge_map_regs_fail0: 1354 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1355 kmem_free(dev_regs, sizeof (dev_regs_t)); 1356 1357 nxge_map_regs_exit: 1358 if (ddi_status != DDI_SUCCESS) 1359 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1360 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1361 return (status); 1362 } 1363 1364 static void 1365 nxge_unmap_regs(p_nxge_t nxgep) 1366 { 1367 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1368 1369 if (isLDOMguest(nxgep)) { 1370 nxge_guest_regs_map_free(nxgep); 1371 return; 1372 } 1373 1374 if (nxgep->dev_regs) { 1375 if (nxgep->dev_regs->nxge_pciregh) { 1376 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1377 "==> nxge_unmap_regs: bus")); 1378 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1379 nxgep->dev_regs->nxge_pciregh = NULL; 1380 } 1381 if (nxgep->dev_regs->nxge_regh) { 1382 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1383 "==> nxge_unmap_regs: device registers")); 1384 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1385 nxgep->dev_regs->nxge_regh = NULL; 1386 } 1387 if (nxgep->dev_regs->nxge_msix_regh) { 1388 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1389 "==> nxge_unmap_regs: device interrupts")); 1390 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1391 nxgep->dev_regs->nxge_msix_regh = NULL; 1392 } 1393 if (nxgep->dev_regs->nxge_vir_regh) { 1394 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1395 "==> nxge_unmap_regs: vio region")); 1396 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1397 nxgep->dev_regs->nxge_vir_regh = NULL; 1398 } 1399 if (nxgep->dev_regs->nxge_vir2_regh) { 1400 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1401 "==> nxge_unmap_regs: vio2 region")); 1402 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1403 nxgep->dev_regs->nxge_vir2_regh = NULL; 1404 } 1405 1406 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1407 nxgep->dev_regs = NULL; 1408 } 1409 1410 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1411 } 1412 1413 static nxge_status_t 1414 nxge_setup_mutexes(p_nxge_t nxgep) 1415 { 1416 int ddi_status = DDI_SUCCESS; 1417 nxge_status_t status = NXGE_OK; 1418 nxge_classify_t *classify_ptr; 1419 int partition; 1420 1421 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1422 1423 /* 1424 * Get the interrupt cookie so the mutexes can be 1425 * Initialized. 1426 */ 1427 if (isLDOMguest(nxgep)) { 1428 nxgep->interrupt_cookie = 0; 1429 } else { 1430 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1431 &nxgep->interrupt_cookie); 1432 1433 if (ddi_status != DDI_SUCCESS) { 1434 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1435 "<== nxge_setup_mutexes: failed 0x%x", 1436 ddi_status)); 1437 goto nxge_setup_mutexes_exit; 1438 } 1439 } 1440 1441 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1442 MUTEX_INIT(&nxgep->poll_lock, NULL, 1443 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1444 1445 /* 1446 * Initialize mutexes for this device. 1447 */ 1448 MUTEX_INIT(nxgep->genlock, NULL, 1449 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1450 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1451 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1452 MUTEX_INIT(&nxgep->mif_lock, NULL, 1453 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1454 MUTEX_INIT(&nxgep->group_lock, NULL, 1455 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1456 RW_INIT(&nxgep->filter_lock, NULL, 1457 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1458 1459 classify_ptr = &nxgep->classifier; 1460 /* 1461 * FFLP Mutexes are never used in interrupt context 1462 * as fflp operation can take very long time to 1463 * complete and hence not suitable to invoke from interrupt 1464 * handlers. 1465 */ 1466 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1467 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1468 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1469 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1470 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1471 for (partition = 0; partition < MAX_PARTITION; partition++) { 1472 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1473 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1474 } 1475 } 1476 1477 nxge_setup_mutexes_exit: 1478 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1479 "<== nxge_setup_mutexes status = %x", status)); 1480 1481 if (ddi_status != DDI_SUCCESS) 1482 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1483 1484 return (status); 1485 } 1486 1487 static void 1488 nxge_destroy_mutexes(p_nxge_t nxgep) 1489 { 1490 int partition; 1491 nxge_classify_t *classify_ptr; 1492 1493 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1494 RW_DESTROY(&nxgep->filter_lock); 1495 MUTEX_DESTROY(&nxgep->group_lock); 1496 MUTEX_DESTROY(&nxgep->mif_lock); 1497 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1498 MUTEX_DESTROY(nxgep->genlock); 1499 1500 classify_ptr = &nxgep->classifier; 1501 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1502 1503 /* Destroy all polling resources. */ 1504 MUTEX_DESTROY(&nxgep->poll_lock); 1505 cv_destroy(&nxgep->poll_cv); 1506 1507 /* free data structures, based on HW type */ 1508 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1509 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1510 for (partition = 0; partition < MAX_PARTITION; partition++) { 1511 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1512 } 1513 } 1514 1515 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1516 } 1517 1518 nxge_status_t 1519 nxge_init(p_nxge_t nxgep) 1520 { 1521 nxge_status_t status = NXGE_OK; 1522 1523 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1524 1525 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1526 return (status); 1527 } 1528 1529 /* 1530 * Allocate system memory for the receive/transmit buffer blocks 1531 * and receive/transmit descriptor rings. 1532 */ 1533 status = nxge_alloc_mem_pool(nxgep); 1534 if (status != NXGE_OK) { 1535 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1536 goto nxge_init_fail1; 1537 } 1538 1539 if (!isLDOMguest(nxgep)) { 1540 /* 1541 * Initialize and enable the TXC registers. 1542 * (Globally enable the Tx controller, 1543 * enable the port, configure the dma channel bitmap, 1544 * configure the max burst size). 1545 */ 1546 status = nxge_txc_init(nxgep); 1547 if (status != NXGE_OK) { 1548 NXGE_ERROR_MSG((nxgep, 1549 NXGE_ERR_CTL, "init txc failed\n")); 1550 goto nxge_init_fail2; 1551 } 1552 } 1553 1554 /* 1555 * Initialize and enable TXDMA channels. 1556 */ 1557 status = nxge_init_txdma_channels(nxgep); 1558 if (status != NXGE_OK) { 1559 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1560 goto nxge_init_fail3; 1561 } 1562 1563 /* 1564 * Initialize and enable RXDMA channels. 1565 */ 1566 status = nxge_init_rxdma_channels(nxgep); 1567 if (status != NXGE_OK) { 1568 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1569 goto nxge_init_fail4; 1570 } 1571 1572 /* 1573 * The guest domain is now done. 1574 */ 1575 if (isLDOMguest(nxgep)) { 1576 nxgep->drv_state |= STATE_HW_INITIALIZED; 1577 goto nxge_init_exit; 1578 } 1579 1580 /* 1581 * Initialize TCAM and FCRAM (Neptune). 1582 */ 1583 status = nxge_classify_init(nxgep); 1584 if (status != NXGE_OK) { 1585 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1586 goto nxge_init_fail5; 1587 } 1588 1589 /* 1590 * Initialize ZCP 1591 */ 1592 status = nxge_zcp_init(nxgep); 1593 if (status != NXGE_OK) { 1594 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1595 goto nxge_init_fail5; 1596 } 1597 1598 /* 1599 * Initialize IPP. 1600 */ 1601 status = nxge_ipp_init(nxgep); 1602 if (status != NXGE_OK) { 1603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1604 goto nxge_init_fail5; 1605 } 1606 1607 /* 1608 * Initialize the MAC block. 1609 */ 1610 status = nxge_mac_init(nxgep); 1611 if (status != NXGE_OK) { 1612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1613 goto nxge_init_fail5; 1614 } 1615 1616 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1617 1618 /* 1619 * Enable hardware interrupts. 1620 */ 1621 nxge_intr_hw_enable(nxgep); 1622 nxgep->drv_state |= STATE_HW_INITIALIZED; 1623 1624 goto nxge_init_exit; 1625 1626 nxge_init_fail5: 1627 nxge_uninit_rxdma_channels(nxgep); 1628 nxge_init_fail4: 1629 nxge_uninit_txdma_channels(nxgep); 1630 nxge_init_fail3: 1631 if (!isLDOMguest(nxgep)) { 1632 (void) nxge_txc_uninit(nxgep); 1633 } 1634 nxge_init_fail2: 1635 nxge_free_mem_pool(nxgep); 1636 nxge_init_fail1: 1637 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1638 "<== nxge_init status (failed) = 0x%08x", status)); 1639 return (status); 1640 1641 nxge_init_exit: 1642 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1643 status)); 1644 return (status); 1645 } 1646 1647 1648 timeout_id_t 1649 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1650 { 1651 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1652 return (timeout(func, (caddr_t)nxgep, 1653 drv_usectohz(1000 * msec))); 1654 } 1655 return (NULL); 1656 } 1657 1658 /*ARGSUSED*/ 1659 void 1660 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1661 { 1662 if (timerid) { 1663 (void) untimeout(timerid); 1664 } 1665 } 1666 1667 void 1668 nxge_uninit(p_nxge_t nxgep) 1669 { 1670 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1671 1672 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1673 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1674 "==> nxge_uninit: not initialized")); 1675 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1676 "<== nxge_uninit")); 1677 return; 1678 } 1679 1680 /* stop timer */ 1681 if (nxgep->nxge_timerid) { 1682 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1683 nxgep->nxge_timerid = 0; 1684 } 1685 1686 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1687 (void) nxge_intr_hw_disable(nxgep); 1688 1689 /* 1690 * Reset the receive MAC side. 1691 */ 1692 (void) nxge_rx_mac_disable(nxgep); 1693 1694 /* Disable and soft reset the IPP */ 1695 if (!isLDOMguest(nxgep)) 1696 (void) nxge_ipp_disable(nxgep); 1697 1698 /* Free classification resources */ 1699 (void) nxge_classify_uninit(nxgep); 1700 1701 /* 1702 * Reset the transmit/receive DMA side. 1703 */ 1704 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1705 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1706 1707 nxge_uninit_txdma_channels(nxgep); 1708 nxge_uninit_rxdma_channels(nxgep); 1709 1710 /* 1711 * Reset the transmit MAC side. 1712 */ 1713 (void) nxge_tx_mac_disable(nxgep); 1714 1715 nxge_free_mem_pool(nxgep); 1716 1717 /* 1718 * Start the timer if the reset flag is not set. 1719 * If this reset flag is set, the link monitor 1720 * will not be started in order to stop furthur bus 1721 * activities coming from this interface. 1722 * The driver will start the monitor function 1723 * if the interface was initialized again later. 1724 */ 1725 if (!nxge_peu_reset_enable) { 1726 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1727 } 1728 1729 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1730 1731 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1732 "nxge_mblks_pending %d", nxge_mblks_pending)); 1733 } 1734 1735 void 1736 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1737 { 1738 #if defined(__i386) 1739 size_t reg; 1740 #else 1741 uint64_t reg; 1742 #endif 1743 uint64_t regdata; 1744 int i, retry; 1745 1746 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1747 regdata = 0; 1748 retry = 1; 1749 1750 for (i = 0; i < retry; i++) { 1751 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1752 } 1753 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1754 } 1755 1756 void 1757 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1758 { 1759 #if defined(__i386) 1760 size_t reg; 1761 #else 1762 uint64_t reg; 1763 #endif 1764 uint64_t buf[2]; 1765 1766 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1767 #if defined(__i386) 1768 reg = (size_t)buf[0]; 1769 #else 1770 reg = buf[0]; 1771 #endif 1772 1773 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1774 } 1775 1776 1777 nxge_os_mutex_t nxgedebuglock; 1778 int nxge_debug_init = 0; 1779 1780 /*ARGSUSED*/ 1781 /*VARARGS*/ 1782 void 1783 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1784 { 1785 char msg_buffer[1048]; 1786 char prefix_buffer[32]; 1787 int instance; 1788 uint64_t debug_level; 1789 int cmn_level = CE_CONT; 1790 va_list ap; 1791 1792 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1793 /* In case a developer has changed nxge_debug_level. */ 1794 if (nxgep->nxge_debug_level != nxge_debug_level) 1795 nxgep->nxge_debug_level = nxge_debug_level; 1796 } 1797 1798 debug_level = (nxgep == NULL) ? nxge_debug_level : 1799 nxgep->nxge_debug_level; 1800 1801 if ((level & debug_level) || 1802 (level == NXGE_NOTE) || 1803 (level == NXGE_ERR_CTL)) { 1804 /* do the msg processing */ 1805 if (nxge_debug_init == 0) { 1806 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1807 nxge_debug_init = 1; 1808 } 1809 1810 MUTEX_ENTER(&nxgedebuglock); 1811 1812 if ((level & NXGE_NOTE)) { 1813 cmn_level = CE_NOTE; 1814 } 1815 1816 if (level & NXGE_ERR_CTL) { 1817 cmn_level = CE_WARN; 1818 } 1819 1820 va_start(ap, fmt); 1821 (void) vsprintf(msg_buffer, fmt, ap); 1822 va_end(ap); 1823 if (nxgep == NULL) { 1824 instance = -1; 1825 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1826 } else { 1827 instance = nxgep->instance; 1828 (void) sprintf(prefix_buffer, 1829 "%s%d :", "nxge", instance); 1830 } 1831 1832 MUTEX_EXIT(&nxgedebuglock); 1833 cmn_err(cmn_level, "!%s %s\n", 1834 prefix_buffer, msg_buffer); 1835 1836 } 1837 } 1838 1839 char * 1840 nxge_dump_packet(char *addr, int size) 1841 { 1842 uchar_t *ap = (uchar_t *)addr; 1843 int i; 1844 static char etherbuf[1024]; 1845 char *cp = etherbuf; 1846 char digits[] = "0123456789abcdef"; 1847 1848 if (!size) 1849 size = 60; 1850 1851 if (size > MAX_DUMP_SZ) { 1852 /* Dump the leading bytes */ 1853 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1854 if (*ap > 0x0f) 1855 *cp++ = digits[*ap >> 4]; 1856 *cp++ = digits[*ap++ & 0xf]; 1857 *cp++ = ':'; 1858 } 1859 for (i = 0; i < 20; i++) 1860 *cp++ = '.'; 1861 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1862 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1863 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1864 if (*ap > 0x0f) 1865 *cp++ = digits[*ap >> 4]; 1866 *cp++ = digits[*ap++ & 0xf]; 1867 *cp++ = ':'; 1868 } 1869 } else { 1870 for (i = 0; i < size; i++) { 1871 if (*ap > 0x0f) 1872 *cp++ = digits[*ap >> 4]; 1873 *cp++ = digits[*ap++ & 0xf]; 1874 *cp++ = ':'; 1875 } 1876 } 1877 *--cp = 0; 1878 return (etherbuf); 1879 } 1880 1881 #ifdef NXGE_DEBUG 1882 static void 1883 nxge_test_map_regs(p_nxge_t nxgep) 1884 { 1885 ddi_acc_handle_t cfg_handle; 1886 p_pci_cfg_t cfg_ptr; 1887 ddi_acc_handle_t dev_handle; 1888 char *dev_ptr; 1889 ddi_acc_handle_t pci_config_handle; 1890 uint32_t regval; 1891 int i; 1892 1893 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1894 1895 dev_handle = nxgep->dev_regs->nxge_regh; 1896 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1897 1898 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1899 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1900 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1901 1902 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1903 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1904 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1905 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1906 &cfg_ptr->vendorid)); 1907 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1908 "\tvendorid 0x%x devid 0x%x", 1909 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1910 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1912 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1913 "bar1c 0x%x", 1914 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1915 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1916 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1917 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1918 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1919 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1920 "base 28 0x%x bar2c 0x%x\n", 1921 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1922 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1923 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1924 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1925 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1926 "\nNeptune PCI BAR: base30 0x%x\n", 1927 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1928 1929 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1930 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1932 "first 0x%llx second 0x%llx third 0x%llx " 1933 "last 0x%llx ", 1934 NXGE_PIO_READ64(dev_handle, 1935 (uint64_t *)(dev_ptr + 0), 0), 1936 NXGE_PIO_READ64(dev_handle, 1937 (uint64_t *)(dev_ptr + 8), 0), 1938 NXGE_PIO_READ64(dev_handle, 1939 (uint64_t *)(dev_ptr + 16), 0), 1940 NXGE_PIO_READ64(cfg_handle, 1941 (uint64_t *)(dev_ptr + 24), 0))); 1942 } 1943 } 1944 1945 #endif 1946 1947 static void 1948 nxge_suspend(p_nxge_t nxgep) 1949 { 1950 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1951 1952 nxge_intrs_disable(nxgep); 1953 nxge_destroy_dev(nxgep); 1954 1955 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1956 } 1957 1958 static nxge_status_t 1959 nxge_resume(p_nxge_t nxgep) 1960 { 1961 nxge_status_t status = NXGE_OK; 1962 1963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1964 1965 nxgep->suspended = DDI_RESUME; 1966 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1967 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1968 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1969 (void) nxge_rx_mac_enable(nxgep); 1970 (void) nxge_tx_mac_enable(nxgep); 1971 nxge_intrs_enable(nxgep); 1972 nxgep->suspended = 0; 1973 1974 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1975 "<== nxge_resume status = 0x%x", status)); 1976 return (status); 1977 } 1978 1979 static nxge_status_t 1980 nxge_setup_dev(p_nxge_t nxgep) 1981 { 1982 nxge_status_t status = NXGE_OK; 1983 1984 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1985 nxgep->mac.portnum)); 1986 1987 status = nxge_link_init(nxgep); 1988 1989 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1990 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1991 "port%d Bad register acc handle", nxgep->mac.portnum)); 1992 status = NXGE_ERROR; 1993 } 1994 1995 if (status != NXGE_OK) { 1996 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1997 " nxge_setup_dev status " 1998 "(xcvr init 0x%08x)", status)); 1999 goto nxge_setup_dev_exit; 2000 } 2001 2002 nxge_setup_dev_exit: 2003 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2004 "<== nxge_setup_dev port %d status = 0x%08x", 2005 nxgep->mac.portnum, status)); 2006 2007 return (status); 2008 } 2009 2010 static void 2011 nxge_destroy_dev(p_nxge_t nxgep) 2012 { 2013 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 2014 2015 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 2016 2017 (void) nxge_hw_stop(nxgep); 2018 2019 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 2020 } 2021 2022 static nxge_status_t 2023 nxge_setup_system_dma_pages(p_nxge_t nxgep) 2024 { 2025 int ddi_status = DDI_SUCCESS; 2026 uint_t count; 2027 ddi_dma_cookie_t cookie; 2028 uint_t iommu_pagesize; 2029 nxge_status_t status = NXGE_OK; 2030 2031 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 2032 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 2033 if (nxgep->niu_type != N2_NIU) { 2034 iommu_pagesize = dvma_pagesize(nxgep->dip); 2035 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2036 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2037 " default_block_size %d iommu_pagesize %d", 2038 nxgep->sys_page_sz, 2039 ddi_ptob(nxgep->dip, (ulong_t)1), 2040 nxgep->rx_default_block_size, 2041 iommu_pagesize)); 2042 2043 if (iommu_pagesize != 0) { 2044 if (nxgep->sys_page_sz == iommu_pagesize) { 2045 if (iommu_pagesize > 0x4000) 2046 nxgep->sys_page_sz = 0x4000; 2047 } else { 2048 if (nxgep->sys_page_sz > iommu_pagesize) 2049 nxgep->sys_page_sz = iommu_pagesize; 2050 } 2051 } 2052 } 2053 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2054 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2055 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2056 "default_block_size %d page mask %d", 2057 nxgep->sys_page_sz, 2058 ddi_ptob(nxgep->dip, (ulong_t)1), 2059 nxgep->rx_default_block_size, 2060 nxgep->sys_page_mask)); 2061 2062 2063 switch (nxgep->sys_page_sz) { 2064 default: 2065 nxgep->sys_page_sz = 0x1000; 2066 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2067 nxgep->rx_default_block_size = 0x1000; 2068 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2069 break; 2070 case 0x1000: 2071 nxgep->rx_default_block_size = 0x1000; 2072 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2073 break; 2074 case 0x2000: 2075 nxgep->rx_default_block_size = 0x2000; 2076 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2077 break; 2078 case 0x4000: 2079 nxgep->rx_default_block_size = 0x4000; 2080 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2081 break; 2082 case 0x8000: 2083 nxgep->rx_default_block_size = 0x8000; 2084 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2085 break; 2086 } 2087 2088 #ifndef USE_RX_BIG_BUF 2089 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2090 #else 2091 nxgep->rx_default_block_size = 0x2000; 2092 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2093 #endif 2094 /* 2095 * Get the system DMA burst size. 2096 */ 2097 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2098 DDI_DMA_DONTWAIT, 0, 2099 &nxgep->dmasparehandle); 2100 if (ddi_status != DDI_SUCCESS) { 2101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2102 "ddi_dma_alloc_handle: failed " 2103 " status 0x%x", ddi_status)); 2104 goto nxge_get_soft_properties_exit; 2105 } 2106 2107 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2108 (caddr_t)nxgep->dmasparehandle, 2109 sizeof (nxgep->dmasparehandle), 2110 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2111 DDI_DMA_DONTWAIT, 0, 2112 &cookie, &count); 2113 if (ddi_status != DDI_DMA_MAPPED) { 2114 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2115 "Binding spare handle to find system" 2116 " burstsize failed.")); 2117 ddi_status = DDI_FAILURE; 2118 goto nxge_get_soft_properties_fail1; 2119 } 2120 2121 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2122 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2123 2124 nxge_get_soft_properties_fail1: 2125 ddi_dma_free_handle(&nxgep->dmasparehandle); 2126 2127 nxge_get_soft_properties_exit: 2128 2129 if (ddi_status != DDI_SUCCESS) 2130 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2131 2132 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2133 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2134 return (status); 2135 } 2136 2137 static nxge_status_t 2138 nxge_alloc_mem_pool(p_nxge_t nxgep) 2139 { 2140 nxge_status_t status = NXGE_OK; 2141 2142 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2143 2144 status = nxge_alloc_rx_mem_pool(nxgep); 2145 if (status != NXGE_OK) { 2146 return (NXGE_ERROR); 2147 } 2148 2149 status = nxge_alloc_tx_mem_pool(nxgep); 2150 if (status != NXGE_OK) { 2151 nxge_free_rx_mem_pool(nxgep); 2152 return (NXGE_ERROR); 2153 } 2154 2155 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2156 return (NXGE_OK); 2157 } 2158 2159 static void 2160 nxge_free_mem_pool(p_nxge_t nxgep) 2161 { 2162 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2163 2164 nxge_free_rx_mem_pool(nxgep); 2165 nxge_free_tx_mem_pool(nxgep); 2166 2167 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2168 } 2169 2170 nxge_status_t 2171 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2172 { 2173 uint32_t rdc_max; 2174 p_nxge_dma_pt_cfg_t p_all_cfgp; 2175 p_nxge_hw_pt_cfg_t p_cfgp; 2176 p_nxge_dma_pool_t dma_poolp; 2177 p_nxge_dma_common_t *dma_buf_p; 2178 p_nxge_dma_pool_t dma_cntl_poolp; 2179 p_nxge_dma_common_t *dma_cntl_p; 2180 uint32_t *num_chunks; /* per dma */ 2181 nxge_status_t status = NXGE_OK; 2182 2183 uint32_t nxge_port_rbr_size; 2184 uint32_t nxge_port_rbr_spare_size; 2185 uint32_t nxge_port_rcr_size; 2186 uint32_t rx_cntl_alloc_size; 2187 2188 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2189 2190 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2191 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2192 rdc_max = NXGE_MAX_RDCS; 2193 2194 /* 2195 * Allocate memory for the common DMA data structures. 2196 */ 2197 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2198 KM_SLEEP); 2199 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2200 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2201 2202 dma_cntl_poolp = (p_nxge_dma_pool_t) 2203 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2204 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2205 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2206 2207 num_chunks = (uint32_t *)KMEM_ZALLOC( 2208 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2209 2210 /* 2211 * Assume that each DMA channel will be configured with 2212 * the default block size. 2213 * rbr block counts are modulo the batch count (16). 2214 */ 2215 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2216 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2217 2218 if (!nxge_port_rbr_size) { 2219 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2220 } 2221 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2222 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2223 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2224 } 2225 2226 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2227 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2228 2229 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2230 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2231 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2232 } 2233 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2234 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2235 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2236 "set to default %d", 2237 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2238 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2239 } 2240 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2241 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2242 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2243 "set to default %d", 2244 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2245 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2246 } 2247 2248 /* 2249 * N2/NIU has limitation on the descriptor sizes (contiguous 2250 * memory allocation on data buffers to 4M (contig_mem_alloc) 2251 * and little endian for control buffers (must use the ddi/dki mem alloc 2252 * function). 2253 */ 2254 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2255 if (nxgep->niu_type == N2_NIU) { 2256 nxge_port_rbr_spare_size = 0; 2257 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2258 (!ISP2(nxge_port_rbr_size))) { 2259 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2260 } 2261 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2262 (!ISP2(nxge_port_rcr_size))) { 2263 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2264 } 2265 } 2266 #endif 2267 2268 /* 2269 * Addresses of receive block ring, receive completion ring and the 2270 * mailbox must be all cache-aligned (64 bytes). 2271 */ 2272 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2273 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2274 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2275 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2276 2277 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2278 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2279 "nxge_port_rcr_size = %d " 2280 "rx_cntl_alloc_size = %d", 2281 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2282 nxge_port_rcr_size, 2283 rx_cntl_alloc_size)); 2284 2285 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2286 if (nxgep->niu_type == N2_NIU) { 2287 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2288 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2289 2290 if (!ISP2(rx_buf_alloc_size)) { 2291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2292 "==> nxge_alloc_rx_mem_pool: " 2293 " must be power of 2")); 2294 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2295 goto nxge_alloc_rx_mem_pool_exit; 2296 } 2297 2298 if (rx_buf_alloc_size > (1 << 22)) { 2299 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2300 "==> nxge_alloc_rx_mem_pool: " 2301 " limit size to 4M")); 2302 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2303 goto nxge_alloc_rx_mem_pool_exit; 2304 } 2305 2306 if (rx_cntl_alloc_size < 0x2000) { 2307 rx_cntl_alloc_size = 0x2000; 2308 } 2309 } 2310 #endif 2311 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2312 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2313 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2314 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2315 2316 dma_poolp->ndmas = p_cfgp->max_rdcs; 2317 dma_poolp->num_chunks = num_chunks; 2318 dma_poolp->buf_allocated = B_TRUE; 2319 nxgep->rx_buf_pool_p = dma_poolp; 2320 dma_poolp->dma_buf_pool_p = dma_buf_p; 2321 2322 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2323 dma_cntl_poolp->buf_allocated = B_TRUE; 2324 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2325 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2326 2327 /* Allocate the receive rings, too. */ 2328 nxgep->rx_rbr_rings = 2329 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2330 nxgep->rx_rbr_rings->rbr_rings = 2331 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2332 nxgep->rx_rcr_rings = 2333 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2334 nxgep->rx_rcr_rings->rcr_rings = 2335 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2336 nxgep->rx_mbox_areas_p = 2337 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2338 nxgep->rx_mbox_areas_p->rxmbox_areas = 2339 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2340 2341 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2342 p_cfgp->max_rdcs; 2343 2344 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2345 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2346 2347 nxge_alloc_rx_mem_pool_exit: 2348 return (status); 2349 } 2350 2351 /* 2352 * nxge_alloc_rxb 2353 * 2354 * Allocate buffers for an RDC. 2355 * 2356 * Arguments: 2357 * nxgep 2358 * channel The channel to map into our kernel space. 2359 * 2360 * Notes: 2361 * 2362 * NPI function calls: 2363 * 2364 * NXGE function calls: 2365 * 2366 * Registers accessed: 2367 * 2368 * Context: 2369 * 2370 * Taking apart: 2371 * 2372 * Open questions: 2373 * 2374 */ 2375 nxge_status_t 2376 nxge_alloc_rxb( 2377 p_nxge_t nxgep, 2378 int channel) 2379 { 2380 size_t rx_buf_alloc_size; 2381 nxge_status_t status = NXGE_OK; 2382 2383 nxge_dma_common_t **data; 2384 nxge_dma_common_t **control; 2385 uint32_t *num_chunks; 2386 2387 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2388 2389 /* 2390 * Allocate memory for the receive buffers and descriptor rings. 2391 * Replace these allocation functions with the interface functions 2392 * provided by the partition manager if/when they are available. 2393 */ 2394 2395 /* 2396 * Allocate memory for the receive buffer blocks. 2397 */ 2398 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2399 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2400 2401 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2402 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2403 2404 if ((status = nxge_alloc_rx_buf_dma( 2405 nxgep, channel, data, rx_buf_alloc_size, 2406 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2407 return (status); 2408 } 2409 2410 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2411 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2412 2413 /* 2414 * Allocate memory for descriptor rings and mailbox. 2415 */ 2416 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2417 2418 if ((status = nxge_alloc_rx_cntl_dma( 2419 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2420 != NXGE_OK) { 2421 nxge_free_rx_cntl_dma(nxgep, *control); 2422 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2423 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2424 return (status); 2425 } 2426 2427 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2428 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2429 2430 return (status); 2431 } 2432 2433 void 2434 nxge_free_rxb( 2435 p_nxge_t nxgep, 2436 int channel) 2437 { 2438 nxge_dma_common_t *data; 2439 nxge_dma_common_t *control; 2440 uint32_t num_chunks; 2441 2442 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2443 2444 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2445 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2446 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2447 2448 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2449 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2450 2451 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2452 nxge_free_rx_cntl_dma(nxgep, control); 2453 2454 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2455 2456 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2457 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2458 2459 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2460 } 2461 2462 static void 2463 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2464 { 2465 int rdc_max = NXGE_MAX_RDCS; 2466 2467 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2468 2469 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2470 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2471 "<== nxge_free_rx_mem_pool " 2472 "(null rx buf pool or buf not allocated")); 2473 return; 2474 } 2475 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2476 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2477 "<== nxge_free_rx_mem_pool " 2478 "(null rx cntl buf pool or cntl buf not allocated")); 2479 return; 2480 } 2481 2482 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2483 sizeof (p_nxge_dma_common_t) * rdc_max); 2484 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2485 2486 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2487 sizeof (uint32_t) * rdc_max); 2488 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2489 sizeof (p_nxge_dma_common_t) * rdc_max); 2490 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2491 2492 nxgep->rx_buf_pool_p = 0; 2493 nxgep->rx_cntl_pool_p = 0; 2494 2495 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2496 sizeof (p_rx_rbr_ring_t) * rdc_max); 2497 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2498 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2499 sizeof (p_rx_rcr_ring_t) * rdc_max); 2500 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2501 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2502 sizeof (p_rx_mbox_t) * rdc_max); 2503 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2504 2505 nxgep->rx_rbr_rings = 0; 2506 nxgep->rx_rcr_rings = 0; 2507 nxgep->rx_mbox_areas_p = 0; 2508 2509 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2510 } 2511 2512 2513 static nxge_status_t 2514 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2515 p_nxge_dma_common_t *dmap, 2516 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2517 { 2518 p_nxge_dma_common_t rx_dmap; 2519 nxge_status_t status = NXGE_OK; 2520 size_t total_alloc_size; 2521 size_t allocated = 0; 2522 int i, size_index, array_size; 2523 boolean_t use_kmem_alloc = B_FALSE; 2524 2525 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2526 2527 rx_dmap = (p_nxge_dma_common_t) 2528 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2529 KM_SLEEP); 2530 2531 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2532 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2533 dma_channel, alloc_size, block_size, dmap)); 2534 2535 total_alloc_size = alloc_size; 2536 2537 #if defined(RX_USE_RECLAIM_POST) 2538 total_alloc_size = alloc_size + alloc_size/4; 2539 #endif 2540 2541 i = 0; 2542 size_index = 0; 2543 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2544 while ((alloc_sizes[size_index] < alloc_size) && 2545 (size_index < array_size)) 2546 size_index++; 2547 if (size_index >= array_size) { 2548 size_index = array_size - 1; 2549 } 2550 2551 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2552 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2553 use_kmem_alloc = B_TRUE; 2554 #if defined(__i386) || defined(__amd64) 2555 size_index = 0; 2556 #endif 2557 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2558 "==> nxge_alloc_rx_buf_dma: " 2559 "Neptune use kmem_alloc() - size_index %d", 2560 size_index)); 2561 } 2562 2563 while ((allocated < total_alloc_size) && 2564 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2565 rx_dmap[i].dma_chunk_index = i; 2566 rx_dmap[i].block_size = block_size; 2567 rx_dmap[i].alength = alloc_sizes[size_index]; 2568 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2569 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2570 rx_dmap[i].dma_channel = dma_channel; 2571 rx_dmap[i].contig_alloc_type = B_FALSE; 2572 rx_dmap[i].kmem_alloc_type = B_FALSE; 2573 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2574 2575 /* 2576 * N2/NIU: data buffers must be contiguous as the driver 2577 * needs to call Hypervisor api to set up 2578 * logical pages. 2579 */ 2580 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2581 rx_dmap[i].contig_alloc_type = B_TRUE; 2582 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2583 } else if (use_kmem_alloc) { 2584 /* For Neptune, use kmem_alloc */ 2585 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2586 "==> nxge_alloc_rx_buf_dma: " 2587 "Neptune use kmem_alloc()")); 2588 rx_dmap[i].kmem_alloc_type = B_TRUE; 2589 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2590 } 2591 2592 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2593 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2594 "i %d nblocks %d alength %d", 2595 dma_channel, i, &rx_dmap[i], block_size, 2596 i, rx_dmap[i].nblocks, 2597 rx_dmap[i].alength)); 2598 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2599 &nxge_rx_dma_attr, 2600 rx_dmap[i].alength, 2601 &nxge_dev_buf_dma_acc_attr, 2602 DDI_DMA_READ | DDI_DMA_STREAMING, 2603 (p_nxge_dma_common_t)(&rx_dmap[i])); 2604 if (status != NXGE_OK) { 2605 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2606 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2607 "dma %d size_index %d size requested %d", 2608 dma_channel, 2609 size_index, 2610 rx_dmap[i].alength)); 2611 size_index--; 2612 } else { 2613 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2614 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2615 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2616 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2617 "buf_alloc_state %d alloc_type %d", 2618 dma_channel, 2619 &rx_dmap[i], 2620 rx_dmap[i].kaddrp, 2621 rx_dmap[i].alength, 2622 rx_dmap[i].buf_alloc_state, 2623 rx_dmap[i].buf_alloc_type)); 2624 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2625 " alloc_rx_buf_dma allocated rdc %d " 2626 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2627 dma_channel, i, rx_dmap[i].alength, 2628 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2629 rx_dmap[i].kaddrp)); 2630 i++; 2631 allocated += alloc_sizes[size_index]; 2632 } 2633 } 2634 2635 if (allocated < total_alloc_size) { 2636 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2637 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2638 "allocated 0x%x requested 0x%x", 2639 dma_channel, 2640 allocated, total_alloc_size)); 2641 status = NXGE_ERROR; 2642 goto nxge_alloc_rx_mem_fail1; 2643 } 2644 2645 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2646 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2647 "allocated 0x%x requested 0x%x", 2648 dma_channel, 2649 allocated, total_alloc_size)); 2650 2651 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2652 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2653 dma_channel, i)); 2654 *num_chunks = i; 2655 *dmap = rx_dmap; 2656 2657 goto nxge_alloc_rx_mem_exit; 2658 2659 nxge_alloc_rx_mem_fail1: 2660 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2661 2662 nxge_alloc_rx_mem_exit: 2663 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2664 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2665 2666 return (status); 2667 } 2668 2669 /*ARGSUSED*/ 2670 static void 2671 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2672 uint32_t num_chunks) 2673 { 2674 int i; 2675 2676 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2677 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2678 2679 if (dmap == 0) 2680 return; 2681 2682 for (i = 0; i < num_chunks; i++) { 2683 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2684 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2685 i, dmap)); 2686 nxge_dma_free_rx_data_buf(dmap++); 2687 } 2688 2689 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2690 } 2691 2692 /*ARGSUSED*/ 2693 static nxge_status_t 2694 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2695 p_nxge_dma_common_t *dmap, size_t size) 2696 { 2697 p_nxge_dma_common_t rx_dmap; 2698 nxge_status_t status = NXGE_OK; 2699 2700 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2701 2702 rx_dmap = (p_nxge_dma_common_t) 2703 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2704 2705 rx_dmap->contig_alloc_type = B_FALSE; 2706 rx_dmap->kmem_alloc_type = B_FALSE; 2707 2708 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2709 &nxge_desc_dma_attr, 2710 size, 2711 &nxge_dev_desc_dma_acc_attr, 2712 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2713 rx_dmap); 2714 if (status != NXGE_OK) { 2715 goto nxge_alloc_rx_cntl_dma_fail1; 2716 } 2717 2718 *dmap = rx_dmap; 2719 goto nxge_alloc_rx_cntl_dma_exit; 2720 2721 nxge_alloc_rx_cntl_dma_fail1: 2722 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2723 2724 nxge_alloc_rx_cntl_dma_exit: 2725 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2726 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2727 2728 return (status); 2729 } 2730 2731 /*ARGSUSED*/ 2732 static void 2733 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2734 { 2735 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2736 2737 if (dmap == 0) 2738 return; 2739 2740 nxge_dma_mem_free(dmap); 2741 2742 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2743 } 2744 2745 typedef struct { 2746 size_t tx_size; 2747 size_t cr_size; 2748 size_t threshhold; 2749 } nxge_tdc_sizes_t; 2750 2751 static 2752 nxge_status_t 2753 nxge_tdc_sizes( 2754 nxge_t *nxgep, 2755 nxge_tdc_sizes_t *sizes) 2756 { 2757 uint32_t threshhold; /* The bcopy() threshhold */ 2758 size_t tx_size; /* Transmit buffer size */ 2759 size_t cr_size; /* Completion ring size */ 2760 2761 /* 2762 * Assume that each DMA channel will be configured with the 2763 * default transmit buffer size for copying transmit data. 2764 * (If a packet is bigger than this, it will not be copied.) 2765 */ 2766 if (nxgep->niu_type == N2_NIU) { 2767 threshhold = TX_BCOPY_SIZE; 2768 } else { 2769 threshhold = nxge_bcopy_thresh; 2770 } 2771 tx_size = nxge_tx_ring_size * threshhold; 2772 2773 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2774 cr_size += sizeof (txdma_mailbox_t); 2775 2776 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2777 if (nxgep->niu_type == N2_NIU) { 2778 if (!ISP2(tx_size)) { 2779 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2780 "==> nxge_tdc_sizes: Tx size" 2781 " must be power of 2")); 2782 return (NXGE_ERROR); 2783 } 2784 2785 if (tx_size > (1 << 22)) { 2786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2787 "==> nxge_tdc_sizes: Tx size" 2788 " limited to 4M")); 2789 return (NXGE_ERROR); 2790 } 2791 2792 if (cr_size < 0x2000) 2793 cr_size = 0x2000; 2794 } 2795 #endif 2796 2797 sizes->threshhold = threshhold; 2798 sizes->tx_size = tx_size; 2799 sizes->cr_size = cr_size; 2800 2801 return (NXGE_OK); 2802 } 2803 /* 2804 * nxge_alloc_txb 2805 * 2806 * Allocate buffers for an TDC. 2807 * 2808 * Arguments: 2809 * nxgep 2810 * channel The channel to map into our kernel space. 2811 * 2812 * Notes: 2813 * 2814 * NPI function calls: 2815 * 2816 * NXGE function calls: 2817 * 2818 * Registers accessed: 2819 * 2820 * Context: 2821 * 2822 * Taking apart: 2823 * 2824 * Open questions: 2825 * 2826 */ 2827 nxge_status_t 2828 nxge_alloc_txb( 2829 p_nxge_t nxgep, 2830 int channel) 2831 { 2832 nxge_dma_common_t **dma_buf_p; 2833 nxge_dma_common_t **dma_cntl_p; 2834 uint32_t *num_chunks; 2835 nxge_status_t status = NXGE_OK; 2836 2837 nxge_tdc_sizes_t sizes; 2838 2839 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2840 2841 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2842 return (NXGE_ERROR); 2843 2844 /* 2845 * Allocate memory for transmit buffers and descriptor rings. 2846 * Replace these allocation functions with the interface functions 2847 * provided by the partition manager Real Soon Now. 2848 */ 2849 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2850 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2851 2852 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2853 2854 /* 2855 * Allocate memory for transmit buffers and descriptor rings. 2856 * Replace allocation functions with interface functions provided 2857 * by the partition manager when it is available. 2858 * 2859 * Allocate memory for the transmit buffer pool. 2860 */ 2861 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2862 "sizes: tx: %ld, cr:%ld, th:%ld", 2863 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2864 2865 *num_chunks = 0; 2866 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2867 sizes.tx_size, sizes.threshhold, num_chunks); 2868 if (status != NXGE_OK) { 2869 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2870 return (status); 2871 } 2872 2873 /* 2874 * Allocate memory for descriptor rings and mailbox. 2875 */ 2876 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2877 sizes.cr_size); 2878 if (status != NXGE_OK) { 2879 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2880 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2881 return (status); 2882 } 2883 2884 return (NXGE_OK); 2885 } 2886 2887 void 2888 nxge_free_txb( 2889 p_nxge_t nxgep, 2890 int channel) 2891 { 2892 nxge_dma_common_t *data; 2893 nxge_dma_common_t *control; 2894 uint32_t num_chunks; 2895 2896 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2897 2898 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2899 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2900 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2901 2902 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2903 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2904 2905 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2906 nxge_free_tx_cntl_dma(nxgep, control); 2907 2908 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2909 2910 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2911 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2912 2913 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2914 } 2915 2916 /* 2917 * nxge_alloc_tx_mem_pool 2918 * 2919 * This function allocates all of the per-port TDC control data structures. 2920 * The per-channel (TDC) data structures are allocated when needed. 2921 * 2922 * Arguments: 2923 * nxgep 2924 * 2925 * Notes: 2926 * 2927 * Context: 2928 * Any domain 2929 */ 2930 nxge_status_t 2931 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2932 { 2933 nxge_hw_pt_cfg_t *p_cfgp; 2934 nxge_dma_pool_t *dma_poolp; 2935 nxge_dma_common_t **dma_buf_p; 2936 nxge_dma_pool_t *dma_cntl_poolp; 2937 nxge_dma_common_t **dma_cntl_p; 2938 uint32_t *num_chunks; /* per dma */ 2939 int tdc_max; 2940 2941 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2942 2943 p_cfgp = &nxgep->pt_config.hw_config; 2944 tdc_max = NXGE_MAX_TDCS; 2945 2946 /* 2947 * Allocate memory for each transmit DMA channel. 2948 */ 2949 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2950 KM_SLEEP); 2951 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2952 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2953 2954 dma_cntl_poolp = (p_nxge_dma_pool_t) 2955 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2956 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2957 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2958 2959 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2960 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2961 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2962 "set to default %d", 2963 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2964 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2965 } 2966 2967 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2968 /* 2969 * N2/NIU has limitation on the descriptor sizes (contiguous 2970 * memory allocation on data buffers to 4M (contig_mem_alloc) 2971 * and little endian for control buffers (must use the ddi/dki mem alloc 2972 * function). The transmit ring is limited to 8K (includes the 2973 * mailbox). 2974 */ 2975 if (nxgep->niu_type == N2_NIU) { 2976 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2977 (!ISP2(nxge_tx_ring_size))) { 2978 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2979 } 2980 } 2981 #endif 2982 2983 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2984 2985 num_chunks = (uint32_t *)KMEM_ZALLOC( 2986 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2987 2988 dma_poolp->ndmas = p_cfgp->tdc.owned; 2989 dma_poolp->num_chunks = num_chunks; 2990 dma_poolp->dma_buf_pool_p = dma_buf_p; 2991 nxgep->tx_buf_pool_p = dma_poolp; 2992 2993 dma_poolp->buf_allocated = B_TRUE; 2994 2995 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2996 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2997 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2998 2999 dma_cntl_poolp->buf_allocated = B_TRUE; 3000 3001 nxgep->tx_rings = 3002 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 3003 nxgep->tx_rings->rings = 3004 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 3005 nxgep->tx_mbox_areas_p = 3006 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 3007 nxgep->tx_mbox_areas_p->txmbox_areas_p = 3008 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 3009 3010 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 3011 3012 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 3013 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 3014 tdc_max, dma_poolp->ndmas)); 3015 3016 return (NXGE_OK); 3017 } 3018 3019 nxge_status_t 3020 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 3021 p_nxge_dma_common_t *dmap, size_t alloc_size, 3022 size_t block_size, uint32_t *num_chunks) 3023 { 3024 p_nxge_dma_common_t tx_dmap; 3025 nxge_status_t status = NXGE_OK; 3026 size_t total_alloc_size; 3027 size_t allocated = 0; 3028 int i, size_index, array_size; 3029 3030 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 3031 3032 tx_dmap = (p_nxge_dma_common_t) 3033 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 3034 KM_SLEEP); 3035 3036 total_alloc_size = alloc_size; 3037 i = 0; 3038 size_index = 0; 3039 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3040 while ((alloc_sizes[size_index] < alloc_size) && 3041 (size_index < array_size)) 3042 size_index++; 3043 if (size_index >= array_size) { 3044 size_index = array_size - 1; 3045 } 3046 3047 while ((allocated < total_alloc_size) && 3048 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3049 3050 tx_dmap[i].dma_chunk_index = i; 3051 tx_dmap[i].block_size = block_size; 3052 tx_dmap[i].alength = alloc_sizes[size_index]; 3053 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3054 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3055 tx_dmap[i].dma_channel = dma_channel; 3056 tx_dmap[i].contig_alloc_type = B_FALSE; 3057 tx_dmap[i].kmem_alloc_type = B_FALSE; 3058 3059 /* 3060 * N2/NIU: data buffers must be contiguous as the driver 3061 * needs to call Hypervisor api to set up 3062 * logical pages. 3063 */ 3064 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3065 tx_dmap[i].contig_alloc_type = B_TRUE; 3066 } 3067 3068 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3069 &nxge_tx_dma_attr, 3070 tx_dmap[i].alength, 3071 &nxge_dev_buf_dma_acc_attr, 3072 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3073 (p_nxge_dma_common_t)(&tx_dmap[i])); 3074 if (status != NXGE_OK) { 3075 size_index--; 3076 } else { 3077 i++; 3078 allocated += alloc_sizes[size_index]; 3079 } 3080 } 3081 3082 if (allocated < total_alloc_size) { 3083 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3084 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3085 "allocated 0x%x requested 0x%x", 3086 dma_channel, 3087 allocated, total_alloc_size)); 3088 status = NXGE_ERROR; 3089 goto nxge_alloc_tx_mem_fail1; 3090 } 3091 3092 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3093 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3094 "allocated 0x%x requested 0x%x", 3095 dma_channel, 3096 allocated, total_alloc_size)); 3097 3098 *num_chunks = i; 3099 *dmap = tx_dmap; 3100 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3101 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3102 *dmap, i)); 3103 goto nxge_alloc_tx_mem_exit; 3104 3105 nxge_alloc_tx_mem_fail1: 3106 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3107 3108 nxge_alloc_tx_mem_exit: 3109 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3110 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3111 3112 return (status); 3113 } 3114 3115 /*ARGSUSED*/ 3116 static void 3117 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3118 uint32_t num_chunks) 3119 { 3120 int i; 3121 3122 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3123 3124 if (dmap == 0) 3125 return; 3126 3127 for (i = 0; i < num_chunks; i++) { 3128 nxge_dma_mem_free(dmap++); 3129 } 3130 3131 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3132 } 3133 3134 /*ARGSUSED*/ 3135 nxge_status_t 3136 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3137 p_nxge_dma_common_t *dmap, size_t size) 3138 { 3139 p_nxge_dma_common_t tx_dmap; 3140 nxge_status_t status = NXGE_OK; 3141 3142 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3143 tx_dmap = (p_nxge_dma_common_t) 3144 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3145 3146 tx_dmap->contig_alloc_type = B_FALSE; 3147 tx_dmap->kmem_alloc_type = B_FALSE; 3148 3149 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3150 &nxge_desc_dma_attr, 3151 size, 3152 &nxge_dev_desc_dma_acc_attr, 3153 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3154 tx_dmap); 3155 if (status != NXGE_OK) { 3156 goto nxge_alloc_tx_cntl_dma_fail1; 3157 } 3158 3159 *dmap = tx_dmap; 3160 goto nxge_alloc_tx_cntl_dma_exit; 3161 3162 nxge_alloc_tx_cntl_dma_fail1: 3163 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3164 3165 nxge_alloc_tx_cntl_dma_exit: 3166 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3167 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3168 3169 return (status); 3170 } 3171 3172 /*ARGSUSED*/ 3173 static void 3174 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3175 { 3176 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3177 3178 if (dmap == 0) 3179 return; 3180 3181 nxge_dma_mem_free(dmap); 3182 3183 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3184 } 3185 3186 /* 3187 * nxge_free_tx_mem_pool 3188 * 3189 * This function frees all of the per-port TDC control data structures. 3190 * The per-channel (TDC) data structures are freed when the channel 3191 * is stopped. 3192 * 3193 * Arguments: 3194 * nxgep 3195 * 3196 * Notes: 3197 * 3198 * Context: 3199 * Any domain 3200 */ 3201 static void 3202 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3203 { 3204 int tdc_max = NXGE_MAX_TDCS; 3205 3206 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3207 3208 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3209 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3210 "<== nxge_free_tx_mem_pool " 3211 "(null tx buf pool or buf not allocated")); 3212 return; 3213 } 3214 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3215 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3216 "<== nxge_free_tx_mem_pool " 3217 "(null tx cntl buf pool or cntl buf not allocated")); 3218 return; 3219 } 3220 3221 /* 1. Free the mailboxes. */ 3222 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3223 sizeof (p_tx_mbox_t) * tdc_max); 3224 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3225 3226 nxgep->tx_mbox_areas_p = 0; 3227 3228 /* 2. Free the transmit ring arrays. */ 3229 KMEM_FREE(nxgep->tx_rings->rings, 3230 sizeof (p_tx_ring_t) * tdc_max); 3231 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3232 3233 nxgep->tx_rings = 0; 3234 3235 /* 3. Free the completion ring data structures. */ 3236 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3237 sizeof (p_nxge_dma_common_t) * tdc_max); 3238 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3239 3240 nxgep->tx_cntl_pool_p = 0; 3241 3242 /* 4. Free the data ring data structures. */ 3243 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3244 sizeof (uint32_t) * tdc_max); 3245 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3246 sizeof (p_nxge_dma_common_t) * tdc_max); 3247 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3248 3249 nxgep->tx_buf_pool_p = 0; 3250 3251 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3252 } 3253 3254 /*ARGSUSED*/ 3255 static nxge_status_t 3256 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3257 struct ddi_dma_attr *dma_attrp, 3258 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3259 p_nxge_dma_common_t dma_p) 3260 { 3261 caddr_t kaddrp; 3262 int ddi_status = DDI_SUCCESS; 3263 boolean_t contig_alloc_type; 3264 boolean_t kmem_alloc_type; 3265 3266 contig_alloc_type = dma_p->contig_alloc_type; 3267 3268 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3269 /* 3270 * contig_alloc_type for contiguous memory only allowed 3271 * for N2/NIU. 3272 */ 3273 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3274 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3275 dma_p->contig_alloc_type)); 3276 return (NXGE_ERROR | NXGE_DDI_FAILED); 3277 } 3278 3279 dma_p->dma_handle = NULL; 3280 dma_p->acc_handle = NULL; 3281 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3282 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3283 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3284 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3285 if (ddi_status != DDI_SUCCESS) { 3286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3287 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3288 return (NXGE_ERROR | NXGE_DDI_FAILED); 3289 } 3290 3291 kmem_alloc_type = dma_p->kmem_alloc_type; 3292 3293 switch (contig_alloc_type) { 3294 case B_FALSE: 3295 switch (kmem_alloc_type) { 3296 case B_FALSE: 3297 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3298 length, 3299 acc_attr_p, 3300 xfer_flags, 3301 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3302 &dma_p->acc_handle); 3303 if (ddi_status != DDI_SUCCESS) { 3304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3305 "nxge_dma_mem_alloc: " 3306 "ddi_dma_mem_alloc failed")); 3307 ddi_dma_free_handle(&dma_p->dma_handle); 3308 dma_p->dma_handle = NULL; 3309 return (NXGE_ERROR | NXGE_DDI_FAILED); 3310 } 3311 if (dma_p->alength < length) { 3312 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3313 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3314 "< length.")); 3315 ddi_dma_mem_free(&dma_p->acc_handle); 3316 ddi_dma_free_handle(&dma_p->dma_handle); 3317 dma_p->acc_handle = NULL; 3318 dma_p->dma_handle = NULL; 3319 return (NXGE_ERROR); 3320 } 3321 3322 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3323 NULL, 3324 kaddrp, dma_p->alength, xfer_flags, 3325 DDI_DMA_DONTWAIT, 3326 0, &dma_p->dma_cookie, &dma_p->ncookies); 3327 if (ddi_status != DDI_DMA_MAPPED) { 3328 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3329 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3330 "failed " 3331 "(staus 0x%x ncookies %d.)", ddi_status, 3332 dma_p->ncookies)); 3333 if (dma_p->acc_handle) { 3334 ddi_dma_mem_free(&dma_p->acc_handle); 3335 dma_p->acc_handle = NULL; 3336 } 3337 ddi_dma_free_handle(&dma_p->dma_handle); 3338 dma_p->dma_handle = NULL; 3339 return (NXGE_ERROR | NXGE_DDI_FAILED); 3340 } 3341 3342 if (dma_p->ncookies != 1) { 3343 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3344 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3345 "> 1 cookie" 3346 "(staus 0x%x ncookies %d.)", ddi_status, 3347 dma_p->ncookies)); 3348 if (dma_p->acc_handle) { 3349 ddi_dma_mem_free(&dma_p->acc_handle); 3350 dma_p->acc_handle = NULL; 3351 } 3352 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3353 ddi_dma_free_handle(&dma_p->dma_handle); 3354 dma_p->dma_handle = NULL; 3355 return (NXGE_ERROR); 3356 } 3357 break; 3358 3359 case B_TRUE: 3360 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3361 if (kaddrp == NULL) { 3362 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3363 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3364 "kmem alloc failed")); 3365 return (NXGE_ERROR); 3366 } 3367 3368 dma_p->alength = length; 3369 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3370 NULL, kaddrp, dma_p->alength, xfer_flags, 3371 DDI_DMA_DONTWAIT, 0, 3372 &dma_p->dma_cookie, &dma_p->ncookies); 3373 if (ddi_status != DDI_DMA_MAPPED) { 3374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3375 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3376 "(kmem_alloc) failed kaddrp $%p length %d " 3377 "(staus 0x%x (%d) ncookies %d.)", 3378 kaddrp, length, 3379 ddi_status, ddi_status, dma_p->ncookies)); 3380 KMEM_FREE(kaddrp, length); 3381 dma_p->acc_handle = NULL; 3382 ddi_dma_free_handle(&dma_p->dma_handle); 3383 dma_p->dma_handle = NULL; 3384 dma_p->kaddrp = NULL; 3385 return (NXGE_ERROR | NXGE_DDI_FAILED); 3386 } 3387 3388 if (dma_p->ncookies != 1) { 3389 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3390 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3391 "(kmem_alloc) > 1 cookie" 3392 "(staus 0x%x ncookies %d.)", ddi_status, 3393 dma_p->ncookies)); 3394 KMEM_FREE(kaddrp, length); 3395 dma_p->acc_handle = NULL; 3396 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3397 ddi_dma_free_handle(&dma_p->dma_handle); 3398 dma_p->dma_handle = NULL; 3399 dma_p->kaddrp = NULL; 3400 return (NXGE_ERROR); 3401 } 3402 3403 dma_p->kaddrp = kaddrp; 3404 3405 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3406 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3407 "kaddr $%p alength %d", 3408 dma_p, 3409 kaddrp, 3410 dma_p->alength)); 3411 break; 3412 } 3413 break; 3414 3415 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3416 case B_TRUE: 3417 kaddrp = (caddr_t)contig_mem_alloc(length); 3418 if (kaddrp == NULL) { 3419 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3420 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3421 ddi_dma_free_handle(&dma_p->dma_handle); 3422 return (NXGE_ERROR | NXGE_DDI_FAILED); 3423 } 3424 3425 dma_p->alength = length; 3426 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3427 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3428 &dma_p->dma_cookie, &dma_p->ncookies); 3429 if (ddi_status != DDI_DMA_MAPPED) { 3430 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3431 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3432 "(status 0x%x ncookies %d.)", ddi_status, 3433 dma_p->ncookies)); 3434 3435 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3436 "==> nxge_dma_mem_alloc: (not mapped)" 3437 "length %lu (0x%x) " 3438 "free contig kaddrp $%p " 3439 "va_to_pa $%p", 3440 length, length, 3441 kaddrp, 3442 va_to_pa(kaddrp))); 3443 3444 3445 contig_mem_free((void *)kaddrp, length); 3446 ddi_dma_free_handle(&dma_p->dma_handle); 3447 3448 dma_p->dma_handle = NULL; 3449 dma_p->acc_handle = NULL; 3450 dma_p->alength = NULL; 3451 dma_p->kaddrp = NULL; 3452 3453 return (NXGE_ERROR | NXGE_DDI_FAILED); 3454 } 3455 3456 if (dma_p->ncookies != 1 || 3457 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3458 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3459 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3460 "cookie or " 3461 "dmac_laddress is NULL $%p size %d " 3462 " (status 0x%x ncookies %d.)", 3463 ddi_status, 3464 dma_p->dma_cookie.dmac_laddress, 3465 dma_p->dma_cookie.dmac_size, 3466 dma_p->ncookies)); 3467 3468 contig_mem_free((void *)kaddrp, length); 3469 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3470 ddi_dma_free_handle(&dma_p->dma_handle); 3471 3472 dma_p->alength = 0; 3473 dma_p->dma_handle = NULL; 3474 dma_p->acc_handle = NULL; 3475 dma_p->kaddrp = NULL; 3476 3477 return (NXGE_ERROR | NXGE_DDI_FAILED); 3478 } 3479 break; 3480 3481 #else 3482 case B_TRUE: 3483 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3484 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3485 return (NXGE_ERROR | NXGE_DDI_FAILED); 3486 #endif 3487 } 3488 3489 dma_p->kaddrp = kaddrp; 3490 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3491 dma_p->alength - RXBUF_64B_ALIGNED; 3492 #if defined(__i386) 3493 dma_p->ioaddr_pp = 3494 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3495 #else 3496 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3497 #endif 3498 dma_p->last_ioaddr_pp = 3499 #if defined(__i386) 3500 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3501 #else 3502 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3503 #endif 3504 dma_p->alength - RXBUF_64B_ALIGNED; 3505 3506 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3507 3508 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3509 dma_p->orig_ioaddr_pp = 3510 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3511 dma_p->orig_alength = length; 3512 dma_p->orig_kaddrp = kaddrp; 3513 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3514 #endif 3515 3516 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3517 "dma buffer allocated: dma_p $%p " 3518 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3519 "dma_p->ioaddr_p $%p " 3520 "dma_p->orig_ioaddr_p $%p " 3521 "orig_vatopa $%p " 3522 "alength %d (0x%x) " 3523 "kaddrp $%p " 3524 "length %d (0x%x)", 3525 dma_p, 3526 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3527 dma_p->ioaddr_pp, 3528 dma_p->orig_ioaddr_pp, 3529 dma_p->orig_vatopa, 3530 dma_p->alength, dma_p->alength, 3531 kaddrp, 3532 length, length)); 3533 3534 return (NXGE_OK); 3535 } 3536 3537 static void 3538 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3539 { 3540 if (dma_p->dma_handle != NULL) { 3541 if (dma_p->ncookies) { 3542 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3543 dma_p->ncookies = 0; 3544 } 3545 ddi_dma_free_handle(&dma_p->dma_handle); 3546 dma_p->dma_handle = NULL; 3547 } 3548 3549 if (dma_p->acc_handle != NULL) { 3550 ddi_dma_mem_free(&dma_p->acc_handle); 3551 dma_p->acc_handle = NULL; 3552 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3553 } 3554 3555 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3556 if (dma_p->contig_alloc_type && 3557 dma_p->orig_kaddrp && dma_p->orig_alength) { 3558 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3559 "kaddrp $%p (orig_kaddrp $%p)" 3560 "mem type %d ", 3561 "orig_alength %d " 3562 "alength 0x%x (%d)", 3563 dma_p->kaddrp, 3564 dma_p->orig_kaddrp, 3565 dma_p->contig_alloc_type, 3566 dma_p->orig_alength, 3567 dma_p->alength, dma_p->alength)); 3568 3569 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3570 dma_p->orig_alength = NULL; 3571 dma_p->orig_kaddrp = NULL; 3572 dma_p->contig_alloc_type = B_FALSE; 3573 } 3574 #endif 3575 dma_p->kaddrp = NULL; 3576 dma_p->alength = NULL; 3577 } 3578 3579 static void 3580 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3581 { 3582 uint64_t kaddr; 3583 uint32_t buf_size; 3584 3585 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3586 3587 if (dma_p->dma_handle != NULL) { 3588 if (dma_p->ncookies) { 3589 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3590 dma_p->ncookies = 0; 3591 } 3592 ddi_dma_free_handle(&dma_p->dma_handle); 3593 dma_p->dma_handle = NULL; 3594 } 3595 3596 if (dma_p->acc_handle != NULL) { 3597 ddi_dma_mem_free(&dma_p->acc_handle); 3598 dma_p->acc_handle = NULL; 3599 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3600 } 3601 3602 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3603 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3604 dma_p, 3605 dma_p->buf_alloc_state)); 3606 3607 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3608 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3609 "<== nxge_dma_free_rx_data_buf: " 3610 "outstanding data buffers")); 3611 return; 3612 } 3613 3614 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3615 if (dma_p->contig_alloc_type && 3616 dma_p->orig_kaddrp && dma_p->orig_alength) { 3617 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3618 "kaddrp $%p (orig_kaddrp $%p)" 3619 "mem type %d ", 3620 "orig_alength %d " 3621 "alength 0x%x (%d)", 3622 dma_p->kaddrp, 3623 dma_p->orig_kaddrp, 3624 dma_p->contig_alloc_type, 3625 dma_p->orig_alength, 3626 dma_p->alength, dma_p->alength)); 3627 3628 kaddr = (uint64_t)dma_p->orig_kaddrp; 3629 buf_size = dma_p->orig_alength; 3630 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3631 dma_p->orig_alength = NULL; 3632 dma_p->orig_kaddrp = NULL; 3633 dma_p->contig_alloc_type = B_FALSE; 3634 dma_p->kaddrp = NULL; 3635 dma_p->alength = NULL; 3636 return; 3637 } 3638 #endif 3639 3640 if (dma_p->kmem_alloc_type) { 3641 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3642 "nxge_dma_free_rx_data_buf: free kmem " 3643 "kaddrp $%p (orig_kaddrp $%p)" 3644 "alloc type %d " 3645 "orig_alength %d " 3646 "alength 0x%x (%d)", 3647 dma_p->kaddrp, 3648 dma_p->orig_kaddrp, 3649 dma_p->kmem_alloc_type, 3650 dma_p->orig_alength, 3651 dma_p->alength, dma_p->alength)); 3652 #if defined(__i386) 3653 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3654 #else 3655 kaddr = (uint64_t)dma_p->kaddrp; 3656 #endif 3657 buf_size = dma_p->orig_alength; 3658 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3659 "nxge_dma_free_rx_data_buf: free dmap $%p " 3660 "kaddr $%p buf_size %d", 3661 dma_p, 3662 kaddr, buf_size)); 3663 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3664 dma_p->alength = 0; 3665 dma_p->orig_alength = 0; 3666 dma_p->kaddrp = NULL; 3667 dma_p->kmem_alloc_type = B_FALSE; 3668 } 3669 3670 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3671 } 3672 3673 /* 3674 * nxge_m_start() -- start transmitting and receiving. 3675 * 3676 * This function is called by the MAC layer when the first 3677 * stream is open to prepare the hardware ready for sending 3678 * and transmitting packets. 3679 */ 3680 static int 3681 nxge_m_start(void *arg) 3682 { 3683 p_nxge_t nxgep = (p_nxge_t)arg; 3684 3685 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3686 3687 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3688 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3689 } 3690 3691 MUTEX_ENTER(nxgep->genlock); 3692 if (nxge_init(nxgep) != NXGE_OK) { 3693 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3694 "<== nxge_m_start: initialization failed")); 3695 MUTEX_EXIT(nxgep->genlock); 3696 return (EIO); 3697 } 3698 3699 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3700 goto nxge_m_start_exit; 3701 /* 3702 * Start timer to check the system error and tx hangs 3703 */ 3704 if (!isLDOMguest(nxgep)) 3705 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3706 nxge_check_hw_state, NXGE_CHECK_TIMER); 3707 #if defined(sun4v) 3708 else 3709 nxge_hio_start_timer(nxgep); 3710 #endif 3711 3712 nxgep->link_notify = B_TRUE; 3713 3714 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3715 3716 nxge_m_start_exit: 3717 MUTEX_EXIT(nxgep->genlock); 3718 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3719 return (0); 3720 } 3721 3722 /* 3723 * nxge_m_stop(): stop transmitting and receiving. 3724 */ 3725 static void 3726 nxge_m_stop(void *arg) 3727 { 3728 p_nxge_t nxgep = (p_nxge_t)arg; 3729 3730 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3731 3732 MUTEX_ENTER(nxgep->genlock); 3733 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3734 3735 if (nxgep->nxge_timerid) { 3736 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3737 nxgep->nxge_timerid = 0; 3738 } 3739 3740 nxge_uninit(nxgep); 3741 3742 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3743 3744 MUTEX_EXIT(nxgep->genlock); 3745 3746 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3747 } 3748 3749 static int 3750 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3751 { 3752 p_nxge_t nxgep = (p_nxge_t)arg; 3753 struct ether_addr addrp; 3754 3755 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3756 3757 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3758 if (nxge_set_mac_addr(nxgep, &addrp)) { 3759 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3760 "<== nxge_m_unicst: set unitcast failed")); 3761 return (EINVAL); 3762 } 3763 3764 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3765 3766 return (0); 3767 } 3768 3769 static int 3770 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3771 { 3772 p_nxge_t nxgep = (p_nxge_t)arg; 3773 struct ether_addr addrp; 3774 3775 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3776 "==> nxge_m_multicst: add %d", add)); 3777 3778 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3779 if (add) { 3780 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3782 "<== nxge_m_multicst: add multicast failed")); 3783 return (EINVAL); 3784 } 3785 } else { 3786 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3787 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3788 "<== nxge_m_multicst: del multicast failed")); 3789 return (EINVAL); 3790 } 3791 } 3792 3793 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3794 3795 return (0); 3796 } 3797 3798 static int 3799 nxge_m_promisc(void *arg, boolean_t on) 3800 { 3801 p_nxge_t nxgep = (p_nxge_t)arg; 3802 3803 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3804 "==> nxge_m_promisc: on %d", on)); 3805 3806 if (nxge_set_promisc(nxgep, on)) { 3807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3808 "<== nxge_m_promisc: set promisc failed")); 3809 return (EINVAL); 3810 } 3811 3812 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3813 "<== nxge_m_promisc: on %d", on)); 3814 3815 return (0); 3816 } 3817 3818 static void 3819 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3820 { 3821 p_nxge_t nxgep = (p_nxge_t)arg; 3822 struct iocblk *iocp; 3823 boolean_t need_privilege; 3824 int err; 3825 int cmd; 3826 3827 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3828 3829 iocp = (struct iocblk *)mp->b_rptr; 3830 iocp->ioc_error = 0; 3831 need_privilege = B_TRUE; 3832 cmd = iocp->ioc_cmd; 3833 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3834 switch (cmd) { 3835 default: 3836 miocnak(wq, mp, 0, EINVAL); 3837 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3838 return; 3839 3840 case LB_GET_INFO_SIZE: 3841 case LB_GET_INFO: 3842 case LB_GET_MODE: 3843 need_privilege = B_FALSE; 3844 break; 3845 case LB_SET_MODE: 3846 break; 3847 3848 3849 case NXGE_GET_MII: 3850 case NXGE_PUT_MII: 3851 case NXGE_GET64: 3852 case NXGE_PUT64: 3853 case NXGE_GET_TX_RING_SZ: 3854 case NXGE_GET_TX_DESC: 3855 case NXGE_TX_SIDE_RESET: 3856 case NXGE_RX_SIDE_RESET: 3857 case NXGE_GLOBAL_RESET: 3858 case NXGE_RESET_MAC: 3859 case NXGE_TX_REGS_DUMP: 3860 case NXGE_RX_REGS_DUMP: 3861 case NXGE_INT_REGS_DUMP: 3862 case NXGE_VIR_INT_REGS_DUMP: 3863 case NXGE_PUT_TCAM: 3864 case NXGE_GET_TCAM: 3865 case NXGE_RTRACE: 3866 case NXGE_RDUMP: 3867 3868 need_privilege = B_FALSE; 3869 break; 3870 case NXGE_INJECT_ERR: 3871 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3872 nxge_err_inject(nxgep, wq, mp); 3873 break; 3874 } 3875 3876 if (need_privilege) { 3877 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3878 if (err != 0) { 3879 miocnak(wq, mp, 0, err); 3880 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3881 "<== nxge_m_ioctl: no priv")); 3882 return; 3883 } 3884 } 3885 3886 switch (cmd) { 3887 3888 case LB_GET_MODE: 3889 case LB_SET_MODE: 3890 case LB_GET_INFO_SIZE: 3891 case LB_GET_INFO: 3892 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3893 break; 3894 3895 case NXGE_GET_MII: 3896 case NXGE_PUT_MII: 3897 case NXGE_PUT_TCAM: 3898 case NXGE_GET_TCAM: 3899 case NXGE_GET64: 3900 case NXGE_PUT64: 3901 case NXGE_GET_TX_RING_SZ: 3902 case NXGE_GET_TX_DESC: 3903 case NXGE_TX_SIDE_RESET: 3904 case NXGE_RX_SIDE_RESET: 3905 case NXGE_GLOBAL_RESET: 3906 case NXGE_RESET_MAC: 3907 case NXGE_TX_REGS_DUMP: 3908 case NXGE_RX_REGS_DUMP: 3909 case NXGE_INT_REGS_DUMP: 3910 case NXGE_VIR_INT_REGS_DUMP: 3911 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3912 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3913 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3914 break; 3915 } 3916 3917 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3918 } 3919 3920 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3921 3922 static void 3923 nxge_m_resources(void *arg) 3924 { 3925 p_nxge_t nxgep = arg; 3926 mac_rx_fifo_t mrf; 3927 3928 nxge_grp_set_t *set = &nxgep->rx_set; 3929 uint8_t rdc; 3930 3931 rx_rcr_ring_t *ring; 3932 3933 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3934 3935 MUTEX_ENTER(nxgep->genlock); 3936 3937 if (set->owned.map == 0) { 3938 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3939 "nxge_m_resources: no receive resources")); 3940 goto nxge_m_resources_exit; 3941 } 3942 3943 /* 3944 * CR 6492541 Check to see if the drv_state has been initialized, 3945 * if not * call nxge_init(). 3946 */ 3947 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3948 if (nxge_init(nxgep) != NXGE_OK) 3949 goto nxge_m_resources_exit; 3950 } 3951 3952 mrf.mrf_type = MAC_RX_FIFO; 3953 mrf.mrf_blank = nxge_rx_hw_blank; 3954 mrf.mrf_arg = (void *)nxgep; 3955 3956 mrf.mrf_normal_blank_time = 128; 3957 mrf.mrf_normal_pkt_count = 8; 3958 3959 /* 3960 * Export our receive resources to the MAC layer. 3961 */ 3962 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3963 if ((1 << rdc) & set->owned.map) { 3964 ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 3965 if (ring == 0) { 3966 /* 3967 * This is a big deal only if we are 3968 * *not* in an LDOMs environment. 3969 */ 3970 if (nxgep->environs == SOLARIS_DOMAIN) { 3971 cmn_err(CE_NOTE, 3972 "==> nxge_m_resources: " 3973 "ring %d == 0", rdc); 3974 } 3975 continue; 3976 } 3977 ring->rcr_mac_handle = mac_resource_add 3978 (nxgep->mach, (mac_resource_t *)&mrf); 3979 3980 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3981 "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 3982 rdc, ring, ring->rcr_mac_handle)); 3983 } 3984 } 3985 3986 nxge_m_resources_exit: 3987 MUTEX_EXIT(nxgep->genlock); 3988 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3989 } 3990 3991 void 3992 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3993 { 3994 p_nxge_mmac_stats_t mmac_stats; 3995 int i; 3996 nxge_mmac_t *mmac_info; 3997 3998 mmac_info = &nxgep->nxge_mmac_info; 3999 4000 mmac_stats = &nxgep->statsp->mmac_stats; 4001 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 4002 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 4003 4004 for (i = 0; i < ETHERADDRL; i++) { 4005 if (factory) { 4006 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4007 = mmac_info->factory_mac_pool[slot][ 4008 (ETHERADDRL-1) - i]; 4009 } else { 4010 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 4011 = mmac_info->mac_pool[slot].addr[ 4012 (ETHERADDRL - 1) - i]; 4013 } 4014 } 4015 } 4016 4017 /* 4018 * nxge_altmac_set() -- Set an alternate MAC address 4019 */ 4020 static int 4021 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 4022 { 4023 uint8_t addrn; 4024 uint8_t portn; 4025 npi_mac_addr_t altmac; 4026 hostinfo_t mac_rdc; 4027 p_nxge_class_pt_cfg_t clscfgp; 4028 4029 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 4030 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 4031 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 4032 4033 portn = nxgep->mac.portnum; 4034 addrn = (uint8_t)slot - 1; 4035 4036 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 4037 addrn, &altmac) != NPI_SUCCESS) 4038 return (EIO); 4039 4040 /* 4041 * Set the rdc table number for the host info entry 4042 * for this mac address slot. 4043 */ 4044 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4045 mac_rdc.value = 0; 4046 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 4047 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4048 4049 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4050 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4051 return (EIO); 4052 } 4053 4054 /* 4055 * Enable comparison with the alternate MAC address. 4056 * While the first alternate addr is enabled by bit 1 of register 4057 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4058 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4059 * accordingly before calling npi_mac_altaddr_entry. 4060 */ 4061 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4062 addrn = (uint8_t)slot - 1; 4063 else 4064 addrn = (uint8_t)slot; 4065 4066 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 4067 != NPI_SUCCESS) 4068 return (EIO); 4069 4070 return (0); 4071 } 4072 4073 /* 4074 * nxeg_m_mmac_add() - find an unused address slot, set the address 4075 * value to the one specified, enable the port to start filtering on 4076 * the new MAC address. Returns 0 on success. 4077 */ 4078 int 4079 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 4080 { 4081 p_nxge_t nxgep = arg; 4082 mac_addr_slot_t slot; 4083 nxge_mmac_t *mmac_info; 4084 int err; 4085 nxge_status_t status; 4086 4087 mutex_enter(nxgep->genlock); 4088 4089 /* 4090 * Make sure that nxge is initialized, if _start() has 4091 * not been called. 4092 */ 4093 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4094 status = nxge_init(nxgep); 4095 if (status != NXGE_OK) { 4096 mutex_exit(nxgep->genlock); 4097 return (ENXIO); 4098 } 4099 } 4100 4101 mmac_info = &nxgep->nxge_mmac_info; 4102 if (mmac_info->naddrfree == 0) { 4103 mutex_exit(nxgep->genlock); 4104 return (ENOSPC); 4105 } 4106 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4107 maddr->mma_addrlen)) { 4108 mutex_exit(nxgep->genlock); 4109 return (EINVAL); 4110 } 4111 /* 4112 * Search for the first available slot. Because naddrfree 4113 * is not zero, we are guaranteed to find one. 4114 * Slot 0 is for unique (primary) MAC. The first alternate 4115 * MAC slot is slot 1. 4116 * Each of the first two ports of Neptune has 16 alternate 4117 * MAC slots but only the first 7 (of 15) slots have assigned factory 4118 * MAC addresses. We first search among the slots without bundled 4119 * factory MACs. If we fail to find one in that range, then we 4120 * search the slots with bundled factory MACs. A factory MAC 4121 * will be wasted while the slot is used with a user MAC address. 4122 * But the slot could be used by factory MAC again after calling 4123 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4124 */ 4125 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 4126 for (slot = mmac_info->num_factory_mmac + 1; 4127 slot <= mmac_info->num_mmac; slot++) { 4128 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4129 break; 4130 } 4131 if (slot > mmac_info->num_mmac) { 4132 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4133 slot++) { 4134 if (!(mmac_info->mac_pool[slot].flags 4135 & MMAC_SLOT_USED)) 4136 break; 4137 } 4138 } 4139 } else { 4140 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 4141 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4142 break; 4143 } 4144 } 4145 ASSERT(slot <= mmac_info->num_mmac); 4146 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 4147 mutex_exit(nxgep->genlock); 4148 return (err); 4149 } 4150 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4151 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4152 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4153 mmac_info->naddrfree--; 4154 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4155 4156 maddr->mma_slot = slot; 4157 4158 mutex_exit(nxgep->genlock); 4159 return (0); 4160 } 4161 4162 /* 4163 * This function reserves an unused slot and programs the slot and the HW 4164 * with a factory mac address. 4165 */ 4166 static int 4167 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 4168 { 4169 p_nxge_t nxgep = arg; 4170 mac_addr_slot_t slot; 4171 nxge_mmac_t *mmac_info; 4172 int err; 4173 nxge_status_t status; 4174 4175 mutex_enter(nxgep->genlock); 4176 4177 /* 4178 * Make sure that nxge is initialized, if _start() has 4179 * not been called. 4180 */ 4181 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4182 status = nxge_init(nxgep); 4183 if (status != NXGE_OK) { 4184 mutex_exit(nxgep->genlock); 4185 return (ENXIO); 4186 } 4187 } 4188 4189 mmac_info = &nxgep->nxge_mmac_info; 4190 if (mmac_info->naddrfree == 0) { 4191 mutex_exit(nxgep->genlock); 4192 return (ENOSPC); 4193 } 4194 4195 slot = maddr->mma_slot; 4196 if (slot == -1) { /* -1: Take the first available slot */ 4197 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 4198 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4199 break; 4200 } 4201 if (slot > mmac_info->num_factory_mmac) { 4202 mutex_exit(nxgep->genlock); 4203 return (ENOSPC); 4204 } 4205 } 4206 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 4207 /* 4208 * Do not support factory MAC at a slot greater than 4209 * num_factory_mmac even when there are available factory 4210 * MAC addresses because the alternate MACs are bundled with 4211 * slot[1] through slot[num_factory_mmac] 4212 */ 4213 mutex_exit(nxgep->genlock); 4214 return (EINVAL); 4215 } 4216 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4217 mutex_exit(nxgep->genlock); 4218 return (EBUSY); 4219 } 4220 /* Verify the address to be reserved */ 4221 if (!mac_unicst_verify(nxgep->mach, 4222 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 4223 mutex_exit(nxgep->genlock); 4224 return (EINVAL); 4225 } 4226 if (err = nxge_altmac_set(nxgep, 4227 mmac_info->factory_mac_pool[slot], slot)) { 4228 mutex_exit(nxgep->genlock); 4229 return (err); 4230 } 4231 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 4232 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4233 mmac_info->naddrfree--; 4234 4235 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 4236 mutex_exit(nxgep->genlock); 4237 4238 /* Pass info back to the caller */ 4239 maddr->mma_slot = slot; 4240 maddr->mma_addrlen = ETHERADDRL; 4241 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4242 4243 return (0); 4244 } 4245 4246 /* 4247 * Remove the specified mac address and update the HW not to filter 4248 * the mac address anymore. 4249 */ 4250 int 4251 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 4252 { 4253 p_nxge_t nxgep = arg; 4254 nxge_mmac_t *mmac_info; 4255 uint8_t addrn; 4256 uint8_t portn; 4257 int err = 0; 4258 nxge_status_t status; 4259 4260 mutex_enter(nxgep->genlock); 4261 4262 /* 4263 * Make sure that nxge is initialized, if _start() has 4264 * not been called. 4265 */ 4266 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4267 status = nxge_init(nxgep); 4268 if (status != NXGE_OK) { 4269 mutex_exit(nxgep->genlock); 4270 return (ENXIO); 4271 } 4272 } 4273 4274 mmac_info = &nxgep->nxge_mmac_info; 4275 if (slot < 1 || slot > mmac_info->num_mmac) { 4276 mutex_exit(nxgep->genlock); 4277 return (EINVAL); 4278 } 4279 4280 portn = nxgep->mac.portnum; 4281 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4282 addrn = (uint8_t)slot - 1; 4283 else 4284 addrn = (uint8_t)slot; 4285 4286 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4287 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4288 == NPI_SUCCESS) { 4289 mmac_info->naddrfree++; 4290 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4291 /* 4292 * Regardless if the MAC we just stopped filtering 4293 * is a user addr or a facory addr, we must set 4294 * the MMAC_VENDOR_ADDR flag if this slot has an 4295 * associated factory MAC to indicate that a factory 4296 * MAC is available. 4297 */ 4298 if (slot <= mmac_info->num_factory_mmac) { 4299 mmac_info->mac_pool[slot].flags 4300 |= MMAC_VENDOR_ADDR; 4301 } 4302 /* 4303 * Clear mac_pool[slot].addr so that kstat shows 0 4304 * alternate MAC address if the slot is not used. 4305 * (But nxge_m_mmac_get returns the factory MAC even 4306 * when the slot is not used!) 4307 */ 4308 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4309 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4310 } else { 4311 err = EIO; 4312 } 4313 } else { 4314 err = EINVAL; 4315 } 4316 4317 mutex_exit(nxgep->genlock); 4318 return (err); 4319 } 4320 4321 /* 4322 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 4323 */ 4324 static int 4325 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 4326 { 4327 p_nxge_t nxgep = arg; 4328 mac_addr_slot_t slot; 4329 nxge_mmac_t *mmac_info; 4330 int err = 0; 4331 nxge_status_t status; 4332 4333 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4334 maddr->mma_addrlen)) 4335 return (EINVAL); 4336 4337 slot = maddr->mma_slot; 4338 4339 mutex_enter(nxgep->genlock); 4340 4341 /* 4342 * Make sure that nxge is initialized, if _start() has 4343 * not been called. 4344 */ 4345 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4346 status = nxge_init(nxgep); 4347 if (status != NXGE_OK) { 4348 mutex_exit(nxgep->genlock); 4349 return (ENXIO); 4350 } 4351 } 4352 4353 mmac_info = &nxgep->nxge_mmac_info; 4354 if (slot < 1 || slot > mmac_info->num_mmac) { 4355 mutex_exit(nxgep->genlock); 4356 return (EINVAL); 4357 } 4358 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4359 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4360 != 0) { 4361 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4362 ETHERADDRL); 4363 /* 4364 * Assume that the MAC passed down from the caller 4365 * is not a factory MAC address (The user should 4366 * call mmac_remove followed by mmac_reserve if 4367 * he wants to use the factory MAC for this slot). 4368 */ 4369 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4370 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4371 } 4372 } else { 4373 err = EINVAL; 4374 } 4375 mutex_exit(nxgep->genlock); 4376 return (err); 4377 } 4378 4379 /* 4380 * nxge_m_mmac_get() - Get the MAC address and other information 4381 * related to the slot. mma_flags should be set to 0 in the call. 4382 * Note: although kstat shows MAC address as zero when a slot is 4383 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 4384 * to the caller as long as the slot is not using a user MAC address. 4385 * The following table shows the rules, 4386 * 4387 * USED VENDOR mma_addr 4388 * ------------------------------------------------------------ 4389 * (1) Slot uses a user MAC: yes no user MAC 4390 * (2) Slot uses a factory MAC: yes yes factory MAC 4391 * (3) Slot is not used but is 4392 * factory MAC capable: no yes factory MAC 4393 * (4) Slot is not used and is 4394 * not factory MAC capable: no no 0 4395 * ------------------------------------------------------------ 4396 */ 4397 static int 4398 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 4399 { 4400 nxge_t *nxgep = arg; 4401 mac_addr_slot_t slot; 4402 nxge_mmac_t *mmac_info; 4403 nxge_status_t status; 4404 4405 slot = maddr->mma_slot; 4406 4407 mutex_enter(nxgep->genlock); 4408 4409 /* 4410 * Make sure that nxge is initialized, if _start() has 4411 * not been called. 4412 */ 4413 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4414 status = nxge_init(nxgep); 4415 if (status != NXGE_OK) { 4416 mutex_exit(nxgep->genlock); 4417 return (ENXIO); 4418 } 4419 } 4420 4421 mmac_info = &nxgep->nxge_mmac_info; 4422 4423 if (slot < 1 || slot > mmac_info->num_mmac) { 4424 mutex_exit(nxgep->genlock); 4425 return (EINVAL); 4426 } 4427 maddr->mma_flags = 0; 4428 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 4429 maddr->mma_flags |= MMAC_SLOT_USED; 4430 4431 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 4432 maddr->mma_flags |= MMAC_VENDOR_ADDR; 4433 bcopy(mmac_info->factory_mac_pool[slot], 4434 maddr->mma_addr, ETHERADDRL); 4435 maddr->mma_addrlen = ETHERADDRL; 4436 } else { 4437 if (maddr->mma_flags & MMAC_SLOT_USED) { 4438 bcopy(mmac_info->mac_pool[slot].addr, 4439 maddr->mma_addr, ETHERADDRL); 4440 maddr->mma_addrlen = ETHERADDRL; 4441 } else { 4442 bzero(maddr->mma_addr, ETHERADDRL); 4443 maddr->mma_addrlen = 0; 4444 } 4445 } 4446 mutex_exit(nxgep->genlock); 4447 return (0); 4448 } 4449 4450 static boolean_t 4451 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4452 { 4453 nxge_t *nxgep = arg; 4454 uint32_t *txflags = cap_data; 4455 multiaddress_capab_t *mmacp = cap_data; 4456 4457 switch (cap) { 4458 case MAC_CAPAB_HCKSUM: 4459 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4460 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4461 if (nxge_cksum_offload <= 1) { 4462 *txflags = HCKSUM_INET_PARTIAL; 4463 } 4464 break; 4465 4466 case MAC_CAPAB_POLL: 4467 /* 4468 * There's nothing for us to fill in, simply returning 4469 * B_TRUE stating that we support polling is sufficient. 4470 */ 4471 break; 4472 4473 case MAC_CAPAB_MULTIADDRESS: 4474 mmacp = (multiaddress_capab_t *)cap_data; 4475 mutex_enter(nxgep->genlock); 4476 4477 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 4478 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 4479 mmacp->maddr_flag = 0; /* 0 is required by PSARC2006/265 */ 4480 /* 4481 * maddr_handle is driver's private data, passed back to 4482 * entry point functions as arg. 4483 */ 4484 mmacp->maddr_handle = nxgep; 4485 mmacp->maddr_add = nxge_m_mmac_add; 4486 mmacp->maddr_remove = nxge_m_mmac_remove; 4487 mmacp->maddr_modify = nxge_m_mmac_modify; 4488 mmacp->maddr_get = nxge_m_mmac_get; 4489 mmacp->maddr_reserve = nxge_m_mmac_reserve; 4490 4491 mutex_exit(nxgep->genlock); 4492 break; 4493 4494 case MAC_CAPAB_LSO: { 4495 mac_capab_lso_t *cap_lso = cap_data; 4496 4497 if (nxgep->soft_lso_enable) { 4498 if (nxge_cksum_offload <= 1) { 4499 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4500 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4501 nxge_lso_max = NXGE_LSO_MAXLEN; 4502 } 4503 cap_lso->lso_basic_tcp_ipv4.lso_max = 4504 nxge_lso_max; 4505 } 4506 break; 4507 } else { 4508 return (B_FALSE); 4509 } 4510 } 4511 4512 #if defined(sun4v) 4513 case MAC_CAPAB_RINGS: { 4514 mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 4515 4516 /* 4517 * Only the service domain driver responds to 4518 * this capability request. 4519 */ 4520 if (isLDOMservice(nxgep)) { 4521 mrings->mr_handle = (void *)nxgep; 4522 4523 /* 4524 * No dynamic allocation of groups and 4525 * rings at this time. Shares dictate the 4526 * configuration. 4527 */ 4528 mrings->mr_gadd_ring = NULL; 4529 mrings->mr_grem_ring = NULL; 4530 mrings->mr_rget = NULL; 4531 mrings->mr_gget = nxge_hio_group_get; 4532 4533 if (mrings->mr_type == MAC_RING_TYPE_RX) { 4534 mrings->mr_rnum = 8; /* XXX */ 4535 mrings->mr_gnum = 6; /* XXX */ 4536 } else { 4537 mrings->mr_rnum = 8; /* XXX */ 4538 mrings->mr_gnum = 0; /* XXX */ 4539 } 4540 } else 4541 return (B_FALSE); 4542 break; 4543 } 4544 4545 case MAC_CAPAB_SHARES: { 4546 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4547 4548 /* 4549 * Only the service domain driver responds to 4550 * this capability request. 4551 */ 4552 if (isLDOMservice(nxgep)) { 4553 mshares->ms_snum = 3; 4554 mshares->ms_handle = (void *)nxgep; 4555 mshares->ms_salloc = nxge_hio_share_alloc; 4556 mshares->ms_sfree = nxge_hio_share_free; 4557 mshares->ms_sadd = NULL; 4558 mshares->ms_sremove = NULL; 4559 mshares->ms_squery = nxge_hio_share_query; 4560 } else 4561 return (B_FALSE); 4562 break; 4563 } 4564 #endif 4565 default: 4566 return (B_FALSE); 4567 } 4568 return (B_TRUE); 4569 } 4570 4571 static boolean_t 4572 nxge_param_locked(mac_prop_id_t pr_num) 4573 { 4574 /* 4575 * All adv_* parameters are locked (read-only) while 4576 * the device is in any sort of loopback mode ... 4577 */ 4578 switch (pr_num) { 4579 case MAC_PROP_ADV_1000FDX_CAP: 4580 case MAC_PROP_EN_1000FDX_CAP: 4581 case MAC_PROP_ADV_1000HDX_CAP: 4582 case MAC_PROP_EN_1000HDX_CAP: 4583 case MAC_PROP_ADV_100FDX_CAP: 4584 case MAC_PROP_EN_100FDX_CAP: 4585 case MAC_PROP_ADV_100HDX_CAP: 4586 case MAC_PROP_EN_100HDX_CAP: 4587 case MAC_PROP_ADV_10FDX_CAP: 4588 case MAC_PROP_EN_10FDX_CAP: 4589 case MAC_PROP_ADV_10HDX_CAP: 4590 case MAC_PROP_EN_10HDX_CAP: 4591 case MAC_PROP_AUTONEG: 4592 case MAC_PROP_FLOWCTRL: 4593 return (B_TRUE); 4594 } 4595 return (B_FALSE); 4596 } 4597 4598 /* 4599 * callback functions for set/get of properties 4600 */ 4601 static int 4602 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4603 uint_t pr_valsize, const void *pr_val) 4604 { 4605 nxge_t *nxgep = barg; 4606 p_nxge_param_t param_arr; 4607 p_nxge_stats_t statsp; 4608 int err = 0; 4609 uint8_t val; 4610 uint32_t cur_mtu, new_mtu, old_framesize; 4611 link_flowctrl_t fl; 4612 4613 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4614 param_arr = nxgep->param_arr; 4615 statsp = nxgep->statsp; 4616 mutex_enter(nxgep->genlock); 4617 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4618 nxge_param_locked(pr_num)) { 4619 /* 4620 * All adv_* parameters are locked (read-only) 4621 * while the device is in any sort of loopback mode. 4622 */ 4623 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4624 "==> nxge_m_setprop: loopback mode: read only")); 4625 mutex_exit(nxgep->genlock); 4626 return (EBUSY); 4627 } 4628 4629 val = *(uint8_t *)pr_val; 4630 switch (pr_num) { 4631 case MAC_PROP_EN_1000FDX_CAP: 4632 nxgep->param_en_1000fdx = val; 4633 param_arr[param_anar_1000fdx].value = val; 4634 4635 goto reprogram; 4636 4637 case MAC_PROP_EN_100FDX_CAP: 4638 nxgep->param_en_100fdx = val; 4639 param_arr[param_anar_100fdx].value = val; 4640 4641 goto reprogram; 4642 4643 case MAC_PROP_EN_10FDX_CAP: 4644 nxgep->param_en_10fdx = val; 4645 param_arr[param_anar_10fdx].value = val; 4646 4647 goto reprogram; 4648 4649 case MAC_PROP_EN_1000HDX_CAP: 4650 case MAC_PROP_EN_100HDX_CAP: 4651 case MAC_PROP_EN_10HDX_CAP: 4652 case MAC_PROP_ADV_1000FDX_CAP: 4653 case MAC_PROP_ADV_1000HDX_CAP: 4654 case MAC_PROP_ADV_100FDX_CAP: 4655 case MAC_PROP_ADV_100HDX_CAP: 4656 case MAC_PROP_ADV_10FDX_CAP: 4657 case MAC_PROP_ADV_10HDX_CAP: 4658 case MAC_PROP_STATUS: 4659 case MAC_PROP_SPEED: 4660 case MAC_PROP_DUPLEX: 4661 err = EINVAL; /* cannot set read-only properties */ 4662 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4663 "==> nxge_m_setprop: read only property %d", 4664 pr_num)); 4665 break; 4666 4667 case MAC_PROP_AUTONEG: 4668 param_arr[param_autoneg].value = val; 4669 4670 goto reprogram; 4671 4672 case MAC_PROP_MTU: 4673 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4674 err = EBUSY; 4675 break; 4676 } 4677 4678 cur_mtu = nxgep->mac.default_mtu; 4679 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4680 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4681 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4682 new_mtu, nxgep->mac.is_jumbo)); 4683 4684 if (new_mtu == cur_mtu) { 4685 err = 0; 4686 break; 4687 } 4688 if (new_mtu < NXGE_DEFAULT_MTU || 4689 new_mtu > NXGE_MAXIMUM_MTU) { 4690 err = EINVAL; 4691 break; 4692 } 4693 4694 if ((new_mtu > NXGE_DEFAULT_MTU) && 4695 !nxgep->mac.is_jumbo) { 4696 err = EINVAL; 4697 break; 4698 } 4699 4700 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4701 nxgep->mac.maxframesize = (uint16_t) 4702 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4703 if (nxge_mac_set_framesize(nxgep)) { 4704 nxgep->mac.maxframesize = 4705 (uint16_t)old_framesize; 4706 err = EINVAL; 4707 break; 4708 } 4709 4710 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4711 if (err) { 4712 nxgep->mac.maxframesize = 4713 (uint16_t)old_framesize; 4714 err = EINVAL; 4715 break; 4716 } 4717 4718 nxgep->mac.default_mtu = new_mtu; 4719 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4720 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4721 new_mtu, nxgep->mac.maxframesize)); 4722 break; 4723 4724 case MAC_PROP_FLOWCTRL: 4725 bcopy(pr_val, &fl, sizeof (fl)); 4726 switch (fl) { 4727 default: 4728 err = EINVAL; 4729 break; 4730 4731 case LINK_FLOWCTRL_NONE: 4732 param_arr[param_anar_pause].value = 0; 4733 break; 4734 4735 case LINK_FLOWCTRL_RX: 4736 param_arr[param_anar_pause].value = 1; 4737 break; 4738 4739 case LINK_FLOWCTRL_TX: 4740 case LINK_FLOWCTRL_BI: 4741 err = EINVAL; 4742 break; 4743 } 4744 4745 reprogram: 4746 if (err == 0) { 4747 if (!nxge_param_link_update(nxgep)) { 4748 err = EINVAL; 4749 } 4750 } 4751 break; 4752 case MAC_PROP_PRIVATE: 4753 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4754 "==> nxge_m_setprop: private property")); 4755 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4756 pr_val); 4757 break; 4758 4759 default: 4760 err = ENOTSUP; 4761 break; 4762 } 4763 4764 mutex_exit(nxgep->genlock); 4765 4766 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4767 "<== nxge_m_setprop (return %d)", err)); 4768 return (err); 4769 } 4770 4771 static int 4772 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4773 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 4774 { 4775 nxge_t *nxgep = barg; 4776 p_nxge_param_t param_arr = nxgep->param_arr; 4777 p_nxge_stats_t statsp = nxgep->statsp; 4778 int err = 0; 4779 link_flowctrl_t fl; 4780 uint64_t tmp = 0; 4781 link_state_t ls; 4782 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4783 4784 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4785 "==> nxge_m_getprop: pr_num %d", pr_num)); 4786 4787 if (pr_valsize == 0) 4788 return (EINVAL); 4789 4790 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4791 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4792 return (err); 4793 } 4794 4795 bzero(pr_val, pr_valsize); 4796 switch (pr_num) { 4797 case MAC_PROP_DUPLEX: 4798 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4799 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4800 "==> nxge_m_getprop: duplex mode %d", 4801 *(uint8_t *)pr_val)); 4802 break; 4803 4804 case MAC_PROP_SPEED: 4805 if (pr_valsize < sizeof (uint64_t)) 4806 return (EINVAL); 4807 tmp = statsp->mac_stats.link_speed * 1000000ull; 4808 bcopy(&tmp, pr_val, sizeof (tmp)); 4809 break; 4810 4811 case MAC_PROP_STATUS: 4812 if (pr_valsize < sizeof (link_state_t)) 4813 return (EINVAL); 4814 if (!statsp->mac_stats.link_up) 4815 ls = LINK_STATE_DOWN; 4816 else 4817 ls = LINK_STATE_UP; 4818 bcopy(&ls, pr_val, sizeof (ls)); 4819 break; 4820 4821 case MAC_PROP_AUTONEG: 4822 *(uint8_t *)pr_val = 4823 param_arr[param_autoneg].value; 4824 break; 4825 4826 case MAC_PROP_FLOWCTRL: 4827 if (pr_valsize < sizeof (link_flowctrl_t)) 4828 return (EINVAL); 4829 4830 fl = LINK_FLOWCTRL_NONE; 4831 if (param_arr[param_anar_pause].value) { 4832 fl = LINK_FLOWCTRL_RX; 4833 } 4834 bcopy(&fl, pr_val, sizeof (fl)); 4835 break; 4836 4837 case MAC_PROP_ADV_1000FDX_CAP: 4838 *(uint8_t *)pr_val = 4839 param_arr[param_anar_1000fdx].value; 4840 break; 4841 4842 case MAC_PROP_EN_1000FDX_CAP: 4843 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4844 break; 4845 4846 case MAC_PROP_ADV_100FDX_CAP: 4847 *(uint8_t *)pr_val = 4848 param_arr[param_anar_100fdx].value; 4849 break; 4850 4851 case MAC_PROP_EN_100FDX_CAP: 4852 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4853 break; 4854 4855 case MAC_PROP_ADV_10FDX_CAP: 4856 *(uint8_t *)pr_val = 4857 param_arr[param_anar_10fdx].value; 4858 break; 4859 4860 case MAC_PROP_EN_10FDX_CAP: 4861 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4862 break; 4863 4864 case MAC_PROP_EN_1000HDX_CAP: 4865 case MAC_PROP_EN_100HDX_CAP: 4866 case MAC_PROP_EN_10HDX_CAP: 4867 case MAC_PROP_ADV_1000HDX_CAP: 4868 case MAC_PROP_ADV_100HDX_CAP: 4869 case MAC_PROP_ADV_10HDX_CAP: 4870 err = ENOTSUP; 4871 break; 4872 4873 case MAC_PROP_PRIVATE: 4874 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4875 pr_valsize, pr_val); 4876 break; 4877 default: 4878 err = EINVAL; 4879 break; 4880 } 4881 4882 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4883 4884 return (err); 4885 } 4886 4887 /* ARGSUSED */ 4888 static int 4889 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4890 const void *pr_val) 4891 { 4892 p_nxge_param_t param_arr = nxgep->param_arr; 4893 int err = 0; 4894 long result; 4895 4896 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4897 "==> nxge_set_priv_prop: name %s", pr_name)); 4898 4899 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4900 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4901 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4902 "<== nxge_set_priv_prop: name %s " 4903 "pr_val %s result %d " 4904 "param %d is_jumbo %d", 4905 pr_name, pr_val, result, 4906 param_arr[param_accept_jumbo].value, 4907 nxgep->mac.is_jumbo)); 4908 4909 if (result > 1 || result < 0) { 4910 err = EINVAL; 4911 } else { 4912 if (nxgep->mac.is_jumbo == 4913 (uint32_t)result) { 4914 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4915 "no change (%d %d)", 4916 nxgep->mac.is_jumbo, 4917 result)); 4918 return (0); 4919 } 4920 } 4921 4922 param_arr[param_accept_jumbo].value = result; 4923 nxgep->mac.is_jumbo = B_FALSE; 4924 if (result) { 4925 nxgep->mac.is_jumbo = B_TRUE; 4926 } 4927 4928 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4929 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4930 pr_name, result, nxgep->mac.is_jumbo)); 4931 4932 return (err); 4933 } 4934 4935 /* Blanking */ 4936 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4937 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4938 (char *)pr_val, 4939 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4940 if (err) { 4941 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4942 "<== nxge_set_priv_prop: " 4943 "unable to set (%s)", pr_name)); 4944 err = EINVAL; 4945 } else { 4946 err = 0; 4947 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4948 "<== nxge_set_priv_prop: " 4949 "set (%s)", pr_name)); 4950 } 4951 4952 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4953 "<== nxge_set_priv_prop: name %s (value %d)", 4954 pr_name, result)); 4955 4956 return (err); 4957 } 4958 4959 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4960 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4961 (char *)pr_val, 4962 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4963 if (err) { 4964 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4965 "<== nxge_set_priv_prop: " 4966 "unable to set (%s)", pr_name)); 4967 err = EINVAL; 4968 } else { 4969 err = 0; 4970 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4971 "<== nxge_set_priv_prop: " 4972 "set (%s)", pr_name)); 4973 } 4974 4975 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4976 "<== nxge_set_priv_prop: name %s (value %d)", 4977 pr_name, result)); 4978 4979 return (err); 4980 } 4981 4982 /* Classification */ 4983 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4984 if (pr_val == NULL) { 4985 err = EINVAL; 4986 return (err); 4987 } 4988 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4989 4990 err = nxge_param_set_ip_opt(nxgep, NULL, 4991 NULL, (char *)pr_val, 4992 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4993 4994 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4995 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4996 pr_name, result)); 4997 4998 return (err); 4999 } 5000 5001 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5002 if (pr_val == NULL) { 5003 err = EINVAL; 5004 return (err); 5005 } 5006 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5007 5008 err = nxge_param_set_ip_opt(nxgep, NULL, 5009 NULL, (char *)pr_val, 5010 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5011 5012 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5013 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5014 pr_name, result)); 5015 5016 return (err); 5017 } 5018 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5019 if (pr_val == NULL) { 5020 err = EINVAL; 5021 return (err); 5022 } 5023 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5024 5025 err = nxge_param_set_ip_opt(nxgep, NULL, 5026 NULL, (char *)pr_val, 5027 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5028 5029 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5030 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5031 pr_name, result)); 5032 5033 return (err); 5034 } 5035 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5036 if (pr_val == NULL) { 5037 err = EINVAL; 5038 return (err); 5039 } 5040 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5041 5042 err = nxge_param_set_ip_opt(nxgep, NULL, 5043 NULL, (char *)pr_val, 5044 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5045 5046 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5047 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5048 pr_name, result)); 5049 5050 return (err); 5051 } 5052 5053 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5054 if (pr_val == NULL) { 5055 err = EINVAL; 5056 return (err); 5057 } 5058 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5059 5060 err = nxge_param_set_ip_opt(nxgep, NULL, 5061 NULL, (char *)pr_val, 5062 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5063 5064 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5065 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5066 pr_name, result)); 5067 5068 return (err); 5069 } 5070 5071 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5072 if (pr_val == NULL) { 5073 err = EINVAL; 5074 return (err); 5075 } 5076 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5077 5078 err = nxge_param_set_ip_opt(nxgep, NULL, 5079 NULL, (char *)pr_val, 5080 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5081 5082 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5083 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5084 pr_name, result)); 5085 5086 return (err); 5087 } 5088 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5089 if (pr_val == NULL) { 5090 err = EINVAL; 5091 return (err); 5092 } 5093 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5094 5095 err = nxge_param_set_ip_opt(nxgep, NULL, 5096 NULL, (char *)pr_val, 5097 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5098 5099 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5100 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5101 pr_name, result)); 5102 5103 return (err); 5104 } 5105 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5106 if (pr_val == NULL) { 5107 err = EINVAL; 5108 return (err); 5109 } 5110 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5111 5112 err = nxge_param_set_ip_opt(nxgep, NULL, 5113 NULL, (char *)pr_val, 5114 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5115 5116 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5117 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5118 pr_name, result)); 5119 5120 return (err); 5121 } 5122 5123 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5124 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 5125 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5126 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 5127 err = EBUSY; 5128 return (err); 5129 } 5130 if (pr_val == NULL) { 5131 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5132 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5133 err = EINVAL; 5134 return (err); 5135 } 5136 5137 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5138 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5139 "<== nxge_set_priv_prop: name %s " 5140 "(lso %d pr_val %s value %d)", 5141 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5142 5143 if (result > 1 || result < 0) { 5144 err = EINVAL; 5145 } else { 5146 if (nxgep->soft_lso_enable == (uint32_t)result) { 5147 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5148 "no change (%d %d)", 5149 nxgep->soft_lso_enable, result)); 5150 return (0); 5151 } 5152 } 5153 5154 nxgep->soft_lso_enable = (int)result; 5155 5156 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5157 "<== nxge_set_priv_prop: name %s (value %d)", 5158 pr_name, result)); 5159 5160 return (err); 5161 } 5162 /* 5163 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5164 * following code to be executed. 5165 */ 5166 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5167 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5168 (caddr_t)¶m_arr[param_anar_10gfdx]); 5169 return (err); 5170 } 5171 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5172 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5173 (caddr_t)¶m_arr[param_anar_pause]); 5174 return (err); 5175 } 5176 5177 return (EINVAL); 5178 } 5179 5180 static int 5181 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5182 uint_t pr_valsize, void *pr_val) 5183 { 5184 p_nxge_param_t param_arr = nxgep->param_arr; 5185 char valstr[MAXNAMELEN]; 5186 int err = EINVAL; 5187 uint_t strsize; 5188 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5189 5190 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5191 "==> nxge_get_priv_prop: property %s", pr_name)); 5192 5193 /* function number */ 5194 if (strcmp(pr_name, "_function_number") == 0) { 5195 if (is_default) 5196 return (ENOTSUP); 5197 (void) snprintf(valstr, sizeof (valstr), "%d", 5198 nxgep->function_num); 5199 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5200 "==> nxge_get_priv_prop: name %s " 5201 "(value %d valstr %s)", 5202 pr_name, nxgep->function_num, valstr)); 5203 5204 err = 0; 5205 goto done; 5206 } 5207 5208 /* Neptune firmware version */ 5209 if (strcmp(pr_name, "_fw_version") == 0) { 5210 if (is_default) 5211 return (ENOTSUP); 5212 (void) snprintf(valstr, sizeof (valstr), "%s", 5213 nxgep->vpd_info.ver); 5214 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5215 "==> nxge_get_priv_prop: name %s " 5216 "(value %d valstr %s)", 5217 pr_name, nxgep->vpd_info.ver, valstr)); 5218 5219 err = 0; 5220 goto done; 5221 } 5222 5223 /* port PHY mode */ 5224 if (strcmp(pr_name, "_port_mode") == 0) { 5225 if (is_default) 5226 return (ENOTSUP); 5227 switch (nxgep->mac.portmode) { 5228 case PORT_1G_COPPER: 5229 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5230 nxgep->hot_swappable_phy ? 5231 "[Hot Swappable]" : ""); 5232 break; 5233 case PORT_1G_FIBER: 5234 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5235 nxgep->hot_swappable_phy ? 5236 "[hot swappable]" : ""); 5237 break; 5238 case PORT_10G_COPPER: 5239 (void) snprintf(valstr, sizeof (valstr), 5240 "10G copper %s", 5241 nxgep->hot_swappable_phy ? 5242 "[hot swappable]" : ""); 5243 break; 5244 case PORT_10G_FIBER: 5245 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5246 nxgep->hot_swappable_phy ? 5247 "[hot swappable]" : ""); 5248 break; 5249 case PORT_10G_SERDES: 5250 (void) snprintf(valstr, sizeof (valstr), 5251 "10G serdes %s", nxgep->hot_swappable_phy ? 5252 "[hot swappable]" : ""); 5253 break; 5254 case PORT_1G_SERDES: 5255 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5256 nxgep->hot_swappable_phy ? 5257 "[hot swappable]" : ""); 5258 break; 5259 case PORT_1G_TN1010: 5260 (void) snprintf(valstr, sizeof (valstr), 5261 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5262 "[hot swappable]" : ""); 5263 break; 5264 case PORT_10G_TN1010: 5265 (void) snprintf(valstr, sizeof (valstr), 5266 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5267 "[hot swappable]" : ""); 5268 break; 5269 case PORT_1G_RGMII_FIBER: 5270 (void) snprintf(valstr, sizeof (valstr), 5271 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5272 "[hot swappable]" : ""); 5273 break; 5274 case PORT_HSP_MODE: 5275 (void) snprintf(valstr, sizeof (valstr), 5276 "phy not present[hot swappable]"); 5277 break; 5278 default: 5279 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5280 nxgep->hot_swappable_phy ? 5281 "[hot swappable]" : ""); 5282 break; 5283 } 5284 5285 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5286 "==> nxge_get_priv_prop: name %s (value %s)", 5287 pr_name, valstr)); 5288 5289 err = 0; 5290 goto done; 5291 } 5292 5293 /* Hot swappable PHY */ 5294 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5295 if (is_default) 5296 return (ENOTSUP); 5297 (void) snprintf(valstr, sizeof (valstr), "%s", 5298 nxgep->hot_swappable_phy ? 5299 "yes" : "no"); 5300 5301 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5302 "==> nxge_get_priv_prop: name %s " 5303 "(value %d valstr %s)", 5304 pr_name, nxgep->hot_swappable_phy, valstr)); 5305 5306 err = 0; 5307 goto done; 5308 } 5309 5310 5311 /* accept jumbo */ 5312 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5313 if (is_default) 5314 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5315 else 5316 (void) snprintf(valstr, sizeof (valstr), 5317 "%d", nxgep->mac.is_jumbo); 5318 err = 0; 5319 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5320 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5321 pr_name, 5322 (uint32_t)param_arr[param_accept_jumbo].value, 5323 nxgep->mac.is_jumbo, 5324 nxge_jumbo_enable)); 5325 5326 goto done; 5327 } 5328 5329 /* Receive Interrupt Blanking Parameters */ 5330 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5331 err = 0; 5332 if (is_default) { 5333 (void) snprintf(valstr, sizeof (valstr), 5334 "%d", RXDMA_RCR_TO_DEFAULT); 5335 goto done; 5336 } 5337 5338 (void) snprintf(valstr, sizeof (valstr), "%d", 5339 nxgep->intr_timeout); 5340 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5341 "==> nxge_get_priv_prop: name %s (value %d)", 5342 pr_name, 5343 (uint32_t)nxgep->intr_timeout)); 5344 goto done; 5345 } 5346 5347 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5348 err = 0; 5349 if (is_default) { 5350 (void) snprintf(valstr, sizeof (valstr), 5351 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5352 goto done; 5353 } 5354 (void) snprintf(valstr, sizeof (valstr), "%d", 5355 nxgep->intr_threshold); 5356 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5357 "==> nxge_get_priv_prop: name %s (value %d)", 5358 pr_name, (uint32_t)nxgep->intr_threshold)); 5359 5360 goto done; 5361 } 5362 5363 /* Classification and Load Distribution Configuration */ 5364 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5365 if (is_default) { 5366 (void) snprintf(valstr, sizeof (valstr), "%x", 5367 NXGE_CLASS_FLOW_GEN_SERVER); 5368 err = 0; 5369 goto done; 5370 } 5371 err = nxge_dld_get_ip_opt(nxgep, 5372 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5373 5374 (void) snprintf(valstr, sizeof (valstr), "%x", 5375 (int)param_arr[param_class_opt_ipv4_tcp].value); 5376 5377 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5378 "==> nxge_get_priv_prop: %s", valstr)); 5379 goto done; 5380 } 5381 5382 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5383 if (is_default) { 5384 (void) snprintf(valstr, sizeof (valstr), "%x", 5385 NXGE_CLASS_FLOW_GEN_SERVER); 5386 err = 0; 5387 goto done; 5388 } 5389 err = nxge_dld_get_ip_opt(nxgep, 5390 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5391 5392 (void) snprintf(valstr, sizeof (valstr), "%x", 5393 (int)param_arr[param_class_opt_ipv4_udp].value); 5394 5395 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5396 "==> nxge_get_priv_prop: %s", valstr)); 5397 goto done; 5398 } 5399 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5400 if (is_default) { 5401 (void) snprintf(valstr, sizeof (valstr), "%x", 5402 NXGE_CLASS_FLOW_GEN_SERVER); 5403 err = 0; 5404 goto done; 5405 } 5406 err = nxge_dld_get_ip_opt(nxgep, 5407 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5408 5409 (void) snprintf(valstr, sizeof (valstr), "%x", 5410 (int)param_arr[param_class_opt_ipv4_ah].value); 5411 5412 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5413 "==> nxge_get_priv_prop: %s", valstr)); 5414 goto done; 5415 } 5416 5417 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5418 if (is_default) { 5419 (void) snprintf(valstr, sizeof (valstr), "%x", 5420 NXGE_CLASS_FLOW_GEN_SERVER); 5421 err = 0; 5422 goto done; 5423 } 5424 err = nxge_dld_get_ip_opt(nxgep, 5425 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5426 5427 (void) snprintf(valstr, sizeof (valstr), "%x", 5428 (int)param_arr[param_class_opt_ipv4_sctp].value); 5429 5430 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5431 "==> nxge_get_priv_prop: %s", valstr)); 5432 goto done; 5433 } 5434 5435 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5436 if (is_default) { 5437 (void) snprintf(valstr, sizeof (valstr), "%x", 5438 NXGE_CLASS_FLOW_GEN_SERVER); 5439 err = 0; 5440 goto done; 5441 } 5442 err = nxge_dld_get_ip_opt(nxgep, 5443 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5444 5445 (void) snprintf(valstr, sizeof (valstr), "%x", 5446 (int)param_arr[param_class_opt_ipv6_tcp].value); 5447 5448 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5449 "==> nxge_get_priv_prop: %s", valstr)); 5450 goto done; 5451 } 5452 5453 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5454 if (is_default) { 5455 (void) snprintf(valstr, sizeof (valstr), "%x", 5456 NXGE_CLASS_FLOW_GEN_SERVER); 5457 err = 0; 5458 goto done; 5459 } 5460 err = nxge_dld_get_ip_opt(nxgep, 5461 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5462 5463 (void) snprintf(valstr, sizeof (valstr), "%x", 5464 (int)param_arr[param_class_opt_ipv6_udp].value); 5465 5466 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5467 "==> nxge_get_priv_prop: %s", valstr)); 5468 goto done; 5469 } 5470 5471 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5472 if (is_default) { 5473 (void) snprintf(valstr, sizeof (valstr), "%x", 5474 NXGE_CLASS_FLOW_GEN_SERVER); 5475 err = 0; 5476 goto done; 5477 } 5478 err = nxge_dld_get_ip_opt(nxgep, 5479 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5480 5481 (void) snprintf(valstr, sizeof (valstr), "%x", 5482 (int)param_arr[param_class_opt_ipv6_ah].value); 5483 5484 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5485 "==> nxge_get_priv_prop: %s", valstr)); 5486 goto done; 5487 } 5488 5489 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5490 if (is_default) { 5491 (void) snprintf(valstr, sizeof (valstr), "%x", 5492 NXGE_CLASS_FLOW_GEN_SERVER); 5493 err = 0; 5494 goto done; 5495 } 5496 err = nxge_dld_get_ip_opt(nxgep, 5497 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5498 5499 (void) snprintf(valstr, sizeof (valstr), "%x", 5500 (int)param_arr[param_class_opt_ipv6_sctp].value); 5501 5502 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5503 "==> nxge_get_priv_prop: %s", valstr)); 5504 goto done; 5505 } 5506 5507 /* Software LSO */ 5508 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5509 if (is_default) { 5510 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5511 err = 0; 5512 goto done; 5513 } 5514 (void) snprintf(valstr, sizeof (valstr), 5515 "%d", nxgep->soft_lso_enable); 5516 err = 0; 5517 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5518 "==> nxge_get_priv_prop: name %s (value %d)", 5519 pr_name, nxgep->soft_lso_enable)); 5520 5521 goto done; 5522 } 5523 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5524 err = 0; 5525 if (is_default || 5526 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5527 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5528 goto done; 5529 } else { 5530 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5531 goto done; 5532 } 5533 } 5534 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5535 err = 0; 5536 if (is_default || 5537 nxgep->param_arr[param_anar_pause].value != 0) { 5538 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5539 goto done; 5540 } else { 5541 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5542 goto done; 5543 } 5544 } 5545 5546 done: 5547 if (err == 0) { 5548 strsize = (uint_t)strlen(valstr); 5549 if (pr_valsize < strsize) { 5550 err = ENOBUFS; 5551 } else { 5552 (void) strlcpy(pr_val, valstr, pr_valsize); 5553 } 5554 } 5555 5556 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5557 "<== nxge_get_priv_prop: return %d", err)); 5558 return (err); 5559 } 5560 5561 /* 5562 * Module loading and removing entry points. 5563 */ 5564 5565 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5566 nodev, NULL, D_MP, NULL); 5567 5568 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5569 5570 /* 5571 * Module linkage information for the kernel. 5572 */ 5573 static struct modldrv nxge_modldrv = { 5574 &mod_driverops, 5575 NXGE_DESC_VER, 5576 &nxge_dev_ops 5577 }; 5578 5579 static struct modlinkage modlinkage = { 5580 MODREV_1, (void *) &nxge_modldrv, NULL 5581 }; 5582 5583 int 5584 _init(void) 5585 { 5586 int status; 5587 5588 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5589 mac_init_ops(&nxge_dev_ops, "nxge"); 5590 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5591 if (status != 0) { 5592 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5593 "failed to init device soft state")); 5594 goto _init_exit; 5595 } 5596 status = mod_install(&modlinkage); 5597 if (status != 0) { 5598 ddi_soft_state_fini(&nxge_list); 5599 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5600 goto _init_exit; 5601 } 5602 5603 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5604 5605 _init_exit: 5606 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5607 5608 return (status); 5609 } 5610 5611 int 5612 _fini(void) 5613 { 5614 int status; 5615 5616 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5617 5618 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5619 5620 if (nxge_mblks_pending) 5621 return (EBUSY); 5622 5623 status = mod_remove(&modlinkage); 5624 if (status != DDI_SUCCESS) { 5625 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5626 "Module removal failed 0x%08x", 5627 status)); 5628 goto _fini_exit; 5629 } 5630 5631 mac_fini_ops(&nxge_dev_ops); 5632 5633 ddi_soft_state_fini(&nxge_list); 5634 5635 MUTEX_DESTROY(&nxge_common_lock); 5636 _fini_exit: 5637 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5638 5639 return (status); 5640 } 5641 5642 int 5643 _info(struct modinfo *modinfop) 5644 { 5645 int status; 5646 5647 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5648 status = mod_info(&modlinkage, modinfop); 5649 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5650 5651 return (status); 5652 } 5653 5654 /*ARGSUSED*/ 5655 static nxge_status_t 5656 nxge_add_intrs(p_nxge_t nxgep) 5657 { 5658 5659 int intr_types; 5660 int type = 0; 5661 int ddi_status = DDI_SUCCESS; 5662 nxge_status_t status = NXGE_OK; 5663 5664 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5665 5666 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5667 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5668 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5669 nxgep->nxge_intr_type.intr_added = 0; 5670 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5671 nxgep->nxge_intr_type.intr_type = 0; 5672 5673 if (nxgep->niu_type == N2_NIU) { 5674 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5675 } else if (nxge_msi_enable) { 5676 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5677 } 5678 5679 /* Get the supported interrupt types */ 5680 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5681 != DDI_SUCCESS) { 5682 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5683 "ddi_intr_get_supported_types failed: status 0x%08x", 5684 ddi_status)); 5685 return (NXGE_ERROR | NXGE_DDI_FAILED); 5686 } 5687 nxgep->nxge_intr_type.intr_types = intr_types; 5688 5689 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5690 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5691 5692 /* 5693 * Solaris MSIX is not supported yet. use MSI for now. 5694 * nxge_msi_enable (1): 5695 * 1 - MSI 2 - MSI-X others - FIXED 5696 */ 5697 switch (nxge_msi_enable) { 5698 default: 5699 type = DDI_INTR_TYPE_FIXED; 5700 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5701 "use fixed (intx emulation) type %08x", 5702 type)); 5703 break; 5704 5705 case 2: 5706 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5707 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5708 if (intr_types & DDI_INTR_TYPE_MSIX) { 5709 type = DDI_INTR_TYPE_MSIX; 5710 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5711 "ddi_intr_get_supported_types: MSIX 0x%08x", 5712 type)); 5713 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5714 type = DDI_INTR_TYPE_MSI; 5715 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5716 "ddi_intr_get_supported_types: MSI 0x%08x", 5717 type)); 5718 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5719 type = DDI_INTR_TYPE_FIXED; 5720 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5721 "ddi_intr_get_supported_types: MSXED0x%08x", 5722 type)); 5723 } 5724 break; 5725 5726 case 1: 5727 if (intr_types & DDI_INTR_TYPE_MSI) { 5728 type = DDI_INTR_TYPE_MSI; 5729 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5730 "ddi_intr_get_supported_types: MSI 0x%08x", 5731 type)); 5732 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5733 type = DDI_INTR_TYPE_MSIX; 5734 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5735 "ddi_intr_get_supported_types: MSIX 0x%08x", 5736 type)); 5737 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5738 type = DDI_INTR_TYPE_FIXED; 5739 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5740 "ddi_intr_get_supported_types: MSXED0x%08x", 5741 type)); 5742 } 5743 } 5744 5745 nxgep->nxge_intr_type.intr_type = type; 5746 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5747 type == DDI_INTR_TYPE_FIXED) && 5748 nxgep->nxge_intr_type.niu_msi_enable) { 5749 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5750 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5751 " nxge_add_intrs: " 5752 " nxge_add_intrs_adv failed: status 0x%08x", 5753 status)); 5754 return (status); 5755 } else { 5756 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5757 "interrupts registered : type %d", type)); 5758 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5759 5760 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5761 "\nAdded advanced nxge add_intr_adv " 5762 "intr type 0x%x\n", type)); 5763 5764 return (status); 5765 } 5766 } 5767 5768 if (!nxgep->nxge_intr_type.intr_registered) { 5769 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5770 "failed to register interrupts")); 5771 return (NXGE_ERROR | NXGE_DDI_FAILED); 5772 } 5773 5774 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5775 return (status); 5776 } 5777 5778 /*ARGSUSED*/ 5779 static nxge_status_t 5780 nxge_add_soft_intrs(p_nxge_t nxgep) 5781 { 5782 5783 int ddi_status = DDI_SUCCESS; 5784 nxge_status_t status = NXGE_OK; 5785 5786 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 5787 5788 nxgep->resched_id = NULL; 5789 nxgep->resched_running = B_FALSE; 5790 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5791 &nxgep->resched_id, 5792 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 5793 if (ddi_status != DDI_SUCCESS) { 5794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5795 "ddi_add_softintrs failed: status 0x%08x", 5796 ddi_status)); 5797 return (NXGE_ERROR | NXGE_DDI_FAILED); 5798 } 5799 5800 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 5801 5802 return (status); 5803 } 5804 5805 static nxge_status_t 5806 nxge_add_intrs_adv(p_nxge_t nxgep) 5807 { 5808 int intr_type; 5809 p_nxge_intr_t intrp; 5810 5811 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5812 5813 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5814 intr_type = intrp->intr_type; 5815 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5816 intr_type)); 5817 5818 switch (intr_type) { 5819 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5820 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5821 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5822 5823 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5824 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5825 5826 default: 5827 return (NXGE_ERROR); 5828 } 5829 } 5830 5831 5832 /*ARGSUSED*/ 5833 static nxge_status_t 5834 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5835 { 5836 dev_info_t *dip = nxgep->dip; 5837 p_nxge_ldg_t ldgp; 5838 p_nxge_intr_t intrp; 5839 uint_t *inthandler; 5840 void *arg1, *arg2; 5841 int behavior; 5842 int nintrs, navail, nrequest; 5843 int nactual, nrequired; 5844 int inum = 0; 5845 int x, y; 5846 int ddi_status = DDI_SUCCESS; 5847 nxge_status_t status = NXGE_OK; 5848 5849 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5850 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5851 intrp->start_inum = 0; 5852 5853 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5854 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5855 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5856 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5857 "nintrs: %d", ddi_status, nintrs)); 5858 return (NXGE_ERROR | NXGE_DDI_FAILED); 5859 } 5860 5861 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5862 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5863 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5864 "ddi_intr_get_navail() failed, status: 0x%x%, " 5865 "nintrs: %d", ddi_status, navail)); 5866 return (NXGE_ERROR | NXGE_DDI_FAILED); 5867 } 5868 5869 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5870 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5871 nintrs, navail)); 5872 5873 /* PSARC/2007/453 MSI-X interrupt limit override */ 5874 if (int_type == DDI_INTR_TYPE_MSIX) { 5875 nrequest = nxge_create_msi_property(nxgep); 5876 if (nrequest < navail) { 5877 navail = nrequest; 5878 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5879 "nxge_add_intrs_adv_type: nintrs %d " 5880 "navail %d (nrequest %d)", 5881 nintrs, navail, nrequest)); 5882 } 5883 } 5884 5885 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5886 /* MSI must be power of 2 */ 5887 if ((navail & 16) == 16) { 5888 navail = 16; 5889 } else if ((navail & 8) == 8) { 5890 navail = 8; 5891 } else if ((navail & 4) == 4) { 5892 navail = 4; 5893 } else if ((navail & 2) == 2) { 5894 navail = 2; 5895 } else { 5896 navail = 1; 5897 } 5898 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5899 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5900 "navail %d", nintrs, navail)); 5901 } 5902 5903 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5904 DDI_INTR_ALLOC_NORMAL); 5905 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5906 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5907 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5908 navail, &nactual, behavior); 5909 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5910 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5911 " ddi_intr_alloc() failed: %d", 5912 ddi_status)); 5913 kmem_free(intrp->htable, intrp->intr_size); 5914 return (NXGE_ERROR | NXGE_DDI_FAILED); 5915 } 5916 5917 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5918 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5919 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5920 " ddi_intr_get_pri() failed: %d", 5921 ddi_status)); 5922 /* Free already allocated interrupts */ 5923 for (y = 0; y < nactual; y++) { 5924 (void) ddi_intr_free(intrp->htable[y]); 5925 } 5926 5927 kmem_free(intrp->htable, intrp->intr_size); 5928 return (NXGE_ERROR | NXGE_DDI_FAILED); 5929 } 5930 5931 nrequired = 0; 5932 switch (nxgep->niu_type) { 5933 default: 5934 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5935 break; 5936 5937 case N2_NIU: 5938 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5939 break; 5940 } 5941 5942 if (status != NXGE_OK) { 5943 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5944 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5945 "failed: 0x%x", status)); 5946 /* Free already allocated interrupts */ 5947 for (y = 0; y < nactual; y++) { 5948 (void) ddi_intr_free(intrp->htable[y]); 5949 } 5950 5951 kmem_free(intrp->htable, intrp->intr_size); 5952 return (status); 5953 } 5954 5955 ldgp = nxgep->ldgvp->ldgp; 5956 for (x = 0; x < nrequired; x++, ldgp++) { 5957 ldgp->vector = (uint8_t)x; 5958 ldgp->intdata = SID_DATA(ldgp->func, x); 5959 arg1 = ldgp->ldvp; 5960 arg2 = nxgep; 5961 if (ldgp->nldvs == 1) { 5962 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5963 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5964 "nxge_add_intrs_adv_type: " 5965 "arg1 0x%x arg2 0x%x: " 5966 "1-1 int handler (entry %d intdata 0x%x)\n", 5967 arg1, arg2, 5968 x, ldgp->intdata)); 5969 } else if (ldgp->nldvs > 1) { 5970 inthandler = (uint_t *)ldgp->sys_intr_handler; 5971 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5972 "nxge_add_intrs_adv_type: " 5973 "arg1 0x%x arg2 0x%x: " 5974 "nldevs %d int handler " 5975 "(entry %d intdata 0x%x)\n", 5976 arg1, arg2, 5977 ldgp->nldvs, x, ldgp->intdata)); 5978 } 5979 5980 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5981 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5982 "htable 0x%llx", x, intrp->htable[x])); 5983 5984 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5985 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5986 != DDI_SUCCESS) { 5987 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5988 "==> nxge_add_intrs_adv_type: failed #%d " 5989 "status 0x%x", x, ddi_status)); 5990 for (y = 0; y < intrp->intr_added; y++) { 5991 (void) ddi_intr_remove_handler( 5992 intrp->htable[y]); 5993 } 5994 /* Free already allocated intr */ 5995 for (y = 0; y < nactual; y++) { 5996 (void) ddi_intr_free(intrp->htable[y]); 5997 } 5998 kmem_free(intrp->htable, intrp->intr_size); 5999 6000 (void) nxge_ldgv_uninit(nxgep); 6001 6002 return (NXGE_ERROR | NXGE_DDI_FAILED); 6003 } 6004 intrp->intr_added++; 6005 } 6006 6007 intrp->msi_intx_cnt = nactual; 6008 6009 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6010 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 6011 navail, nactual, 6012 intrp->msi_intx_cnt, 6013 intrp->intr_added)); 6014 6015 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6016 6017 (void) nxge_intr_ldgv_init(nxgep); 6018 6019 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 6020 6021 return (status); 6022 } 6023 6024 /*ARGSUSED*/ 6025 static nxge_status_t 6026 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 6027 { 6028 dev_info_t *dip = nxgep->dip; 6029 p_nxge_ldg_t ldgp; 6030 p_nxge_intr_t intrp; 6031 uint_t *inthandler; 6032 void *arg1, *arg2; 6033 int behavior; 6034 int nintrs, navail; 6035 int nactual, nrequired; 6036 int inum = 0; 6037 int x, y; 6038 int ddi_status = DDI_SUCCESS; 6039 nxge_status_t status = NXGE_OK; 6040 6041 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6042 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6043 intrp->start_inum = 0; 6044 6045 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6046 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6047 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6048 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6049 "nintrs: %d", status, nintrs)); 6050 return (NXGE_ERROR | NXGE_DDI_FAILED); 6051 } 6052 6053 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6054 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6055 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6056 "ddi_intr_get_navail() failed, status: 0x%x%, " 6057 "nintrs: %d", ddi_status, navail)); 6058 return (NXGE_ERROR | NXGE_DDI_FAILED); 6059 } 6060 6061 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6062 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6063 nintrs, navail)); 6064 6065 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6066 DDI_INTR_ALLOC_NORMAL); 6067 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6068 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6069 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6070 navail, &nactual, behavior); 6071 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6072 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6073 " ddi_intr_alloc() failed: %d", 6074 ddi_status)); 6075 kmem_free(intrp->htable, intrp->intr_size); 6076 return (NXGE_ERROR | NXGE_DDI_FAILED); 6077 } 6078 6079 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6080 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6081 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6082 " ddi_intr_get_pri() failed: %d", 6083 ddi_status)); 6084 /* Free already allocated interrupts */ 6085 for (y = 0; y < nactual; y++) { 6086 (void) ddi_intr_free(intrp->htable[y]); 6087 } 6088 6089 kmem_free(intrp->htable, intrp->intr_size); 6090 return (NXGE_ERROR | NXGE_DDI_FAILED); 6091 } 6092 6093 nrequired = 0; 6094 switch (nxgep->niu_type) { 6095 default: 6096 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6097 break; 6098 6099 case N2_NIU: 6100 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6101 break; 6102 } 6103 6104 if (status != NXGE_OK) { 6105 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6106 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6107 "failed: 0x%x", status)); 6108 /* Free already allocated interrupts */ 6109 for (y = 0; y < nactual; y++) { 6110 (void) ddi_intr_free(intrp->htable[y]); 6111 } 6112 6113 kmem_free(intrp->htable, intrp->intr_size); 6114 return (status); 6115 } 6116 6117 ldgp = nxgep->ldgvp->ldgp; 6118 for (x = 0; x < nrequired; x++, ldgp++) { 6119 ldgp->vector = (uint8_t)x; 6120 if (nxgep->niu_type != N2_NIU) { 6121 ldgp->intdata = SID_DATA(ldgp->func, x); 6122 } 6123 6124 arg1 = ldgp->ldvp; 6125 arg2 = nxgep; 6126 if (ldgp->nldvs == 1) { 6127 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6128 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6129 "nxge_add_intrs_adv_type_fix: " 6130 "1-1 int handler(%d) ldg %d ldv %d " 6131 "arg1 $%p arg2 $%p\n", 6132 x, ldgp->ldg, ldgp->ldvp->ldv, 6133 arg1, arg2)); 6134 } else if (ldgp->nldvs > 1) { 6135 inthandler = (uint_t *)ldgp->sys_intr_handler; 6136 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6137 "nxge_add_intrs_adv_type_fix: " 6138 "shared ldv %d int handler(%d) ldv %d ldg %d" 6139 "arg1 0x%016llx arg2 0x%016llx\n", 6140 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6141 arg1, arg2)); 6142 } 6143 6144 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6145 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6146 != DDI_SUCCESS) { 6147 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6148 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6149 "status 0x%x", x, ddi_status)); 6150 for (y = 0; y < intrp->intr_added; y++) { 6151 (void) ddi_intr_remove_handler( 6152 intrp->htable[y]); 6153 } 6154 for (y = 0; y < nactual; y++) { 6155 (void) ddi_intr_free(intrp->htable[y]); 6156 } 6157 /* Free already allocated intr */ 6158 kmem_free(intrp->htable, intrp->intr_size); 6159 6160 (void) nxge_ldgv_uninit(nxgep); 6161 6162 return (NXGE_ERROR | NXGE_DDI_FAILED); 6163 } 6164 intrp->intr_added++; 6165 } 6166 6167 intrp->msi_intx_cnt = nactual; 6168 6169 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6170 6171 status = nxge_intr_ldgv_init(nxgep); 6172 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6173 6174 return (status); 6175 } 6176 6177 static void 6178 nxge_remove_intrs(p_nxge_t nxgep) 6179 { 6180 int i, inum; 6181 p_nxge_intr_t intrp; 6182 6183 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6184 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6185 if (!intrp->intr_registered) { 6186 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6187 "<== nxge_remove_intrs: interrupts not registered")); 6188 return; 6189 } 6190 6191 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6192 6193 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6194 (void) ddi_intr_block_disable(intrp->htable, 6195 intrp->intr_added); 6196 } else { 6197 for (i = 0; i < intrp->intr_added; i++) { 6198 (void) ddi_intr_disable(intrp->htable[i]); 6199 } 6200 } 6201 6202 for (inum = 0; inum < intrp->intr_added; inum++) { 6203 if (intrp->htable[inum]) { 6204 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6205 } 6206 } 6207 6208 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6209 if (intrp->htable[inum]) { 6210 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6211 "nxge_remove_intrs: ddi_intr_free inum %d " 6212 "msi_intx_cnt %d intr_added %d", 6213 inum, 6214 intrp->msi_intx_cnt, 6215 intrp->intr_added)); 6216 6217 (void) ddi_intr_free(intrp->htable[inum]); 6218 } 6219 } 6220 6221 kmem_free(intrp->htable, intrp->intr_size); 6222 intrp->intr_registered = B_FALSE; 6223 intrp->intr_enabled = B_FALSE; 6224 intrp->msi_intx_cnt = 0; 6225 intrp->intr_added = 0; 6226 6227 (void) nxge_ldgv_uninit(nxgep); 6228 6229 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6230 "#msix-request"); 6231 6232 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6233 } 6234 6235 /*ARGSUSED*/ 6236 static void 6237 nxge_remove_soft_intrs(p_nxge_t nxgep) 6238 { 6239 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 6240 if (nxgep->resched_id) { 6241 ddi_remove_softintr(nxgep->resched_id); 6242 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6243 "==> nxge_remove_soft_intrs: removed")); 6244 nxgep->resched_id = NULL; 6245 } 6246 6247 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 6248 } 6249 6250 /*ARGSUSED*/ 6251 static void 6252 nxge_intrs_enable(p_nxge_t nxgep) 6253 { 6254 p_nxge_intr_t intrp; 6255 int i; 6256 int status; 6257 6258 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6259 6260 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6261 6262 if (!intrp->intr_registered) { 6263 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6264 "interrupts are not registered")); 6265 return; 6266 } 6267 6268 if (intrp->intr_enabled) { 6269 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6270 "<== nxge_intrs_enable: already enabled")); 6271 return; 6272 } 6273 6274 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6275 status = ddi_intr_block_enable(intrp->htable, 6276 intrp->intr_added); 6277 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6278 "block enable - status 0x%x total inums #%d\n", 6279 status, intrp->intr_added)); 6280 } else { 6281 for (i = 0; i < intrp->intr_added; i++) { 6282 status = ddi_intr_enable(intrp->htable[i]); 6283 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6284 "ddi_intr_enable:enable - status 0x%x " 6285 "total inums %d enable inum #%d\n", 6286 status, intrp->intr_added, i)); 6287 if (status == DDI_SUCCESS) { 6288 intrp->intr_enabled = B_TRUE; 6289 } 6290 } 6291 } 6292 6293 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6294 } 6295 6296 /*ARGSUSED*/ 6297 static void 6298 nxge_intrs_disable(p_nxge_t nxgep) 6299 { 6300 p_nxge_intr_t intrp; 6301 int i; 6302 6303 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6304 6305 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6306 6307 if (!intrp->intr_registered) { 6308 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6309 "interrupts are not registered")); 6310 return; 6311 } 6312 6313 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6314 (void) ddi_intr_block_disable(intrp->htable, 6315 intrp->intr_added); 6316 } else { 6317 for (i = 0; i < intrp->intr_added; i++) { 6318 (void) ddi_intr_disable(intrp->htable[i]); 6319 } 6320 } 6321 6322 intrp->intr_enabled = B_FALSE; 6323 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6324 } 6325 6326 static nxge_status_t 6327 nxge_mac_register(p_nxge_t nxgep) 6328 { 6329 mac_register_t *macp; 6330 int status; 6331 6332 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6333 6334 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6335 return (NXGE_ERROR); 6336 6337 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6338 macp->m_driver = nxgep; 6339 macp->m_dip = nxgep->dip; 6340 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6341 macp->m_callbacks = &nxge_m_callbacks; 6342 macp->m_min_sdu = 0; 6343 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6344 NXGE_EHEADER_VLAN_CRC; 6345 macp->m_max_sdu = nxgep->mac.default_mtu; 6346 macp->m_margin = VLAN_TAGSZ; 6347 macp->m_priv_props = nxge_priv_props; 6348 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6349 6350 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6351 "==> nxge_mac_register: instance %d " 6352 "max_sdu %d margin %d maxframe %d (header %d)", 6353 nxgep->instance, 6354 macp->m_max_sdu, macp->m_margin, 6355 nxgep->mac.maxframesize, 6356 NXGE_EHEADER_VLAN_CRC)); 6357 6358 status = mac_register(macp, &nxgep->mach); 6359 mac_free(macp); 6360 6361 if (status != 0) { 6362 cmn_err(CE_WARN, 6363 "!nxge_mac_register failed (status %d instance %d)", 6364 status, nxgep->instance); 6365 return (NXGE_ERROR); 6366 } 6367 6368 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6369 "(instance %d)", nxgep->instance)); 6370 6371 return (NXGE_OK); 6372 } 6373 6374 void 6375 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6376 { 6377 ssize_t size; 6378 mblk_t *nmp; 6379 uint8_t blk_id; 6380 uint8_t chan; 6381 uint32_t err_id; 6382 err_inject_t *eip; 6383 6384 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6385 6386 size = 1024; 6387 nmp = mp->b_cont; 6388 eip = (err_inject_t *)nmp->b_rptr; 6389 blk_id = eip->blk_id; 6390 err_id = eip->err_id; 6391 chan = eip->chan; 6392 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6393 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6394 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6395 switch (blk_id) { 6396 case MAC_BLK_ID: 6397 break; 6398 case TXMAC_BLK_ID: 6399 break; 6400 case RXMAC_BLK_ID: 6401 break; 6402 case MIF_BLK_ID: 6403 break; 6404 case IPP_BLK_ID: 6405 nxge_ipp_inject_err(nxgep, err_id); 6406 break; 6407 case TXC_BLK_ID: 6408 nxge_txc_inject_err(nxgep, err_id); 6409 break; 6410 case TXDMA_BLK_ID: 6411 nxge_txdma_inject_err(nxgep, err_id, chan); 6412 break; 6413 case RXDMA_BLK_ID: 6414 nxge_rxdma_inject_err(nxgep, err_id, chan); 6415 break; 6416 case ZCP_BLK_ID: 6417 nxge_zcp_inject_err(nxgep, err_id); 6418 break; 6419 case ESPC_BLK_ID: 6420 break; 6421 case FFLP_BLK_ID: 6422 break; 6423 case PHY_BLK_ID: 6424 break; 6425 case ETHER_SERDES_BLK_ID: 6426 break; 6427 case PCIE_SERDES_BLK_ID: 6428 break; 6429 case VIR_BLK_ID: 6430 break; 6431 } 6432 6433 nmp->b_wptr = nmp->b_rptr + size; 6434 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6435 6436 miocack(wq, mp, (int)size, 0); 6437 } 6438 6439 static int 6440 nxge_init_common_dev(p_nxge_t nxgep) 6441 { 6442 p_nxge_hw_list_t hw_p; 6443 dev_info_t *p_dip; 6444 6445 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6446 6447 p_dip = nxgep->p_dip; 6448 MUTEX_ENTER(&nxge_common_lock); 6449 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6450 "==> nxge_init_common_dev:func # %d", 6451 nxgep->function_num)); 6452 /* 6453 * Loop through existing per neptune hardware list. 6454 */ 6455 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6456 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6457 "==> nxge_init_common_device:func # %d " 6458 "hw_p $%p parent dip $%p", 6459 nxgep->function_num, 6460 hw_p, 6461 p_dip)); 6462 if (hw_p->parent_devp == p_dip) { 6463 nxgep->nxge_hw_p = hw_p; 6464 hw_p->ndevs++; 6465 hw_p->nxge_p[nxgep->function_num] = nxgep; 6466 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6467 "==> nxge_init_common_device:func # %d " 6468 "hw_p $%p parent dip $%p " 6469 "ndevs %d (found)", 6470 nxgep->function_num, 6471 hw_p, 6472 p_dip, 6473 hw_p->ndevs)); 6474 break; 6475 } 6476 } 6477 6478 if (hw_p == NULL) { 6479 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6480 "==> nxge_init_common_device:func # %d " 6481 "parent dip $%p (new)", 6482 nxgep->function_num, 6483 p_dip)); 6484 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6485 hw_p->parent_devp = p_dip; 6486 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6487 nxgep->nxge_hw_p = hw_p; 6488 hw_p->ndevs++; 6489 hw_p->nxge_p[nxgep->function_num] = nxgep; 6490 hw_p->next = nxge_hw_list; 6491 if (nxgep->niu_type == N2_NIU) { 6492 hw_p->niu_type = N2_NIU; 6493 hw_p->platform_type = P_NEPTUNE_NIU; 6494 } else { 6495 hw_p->niu_type = NIU_TYPE_NONE; 6496 hw_p->platform_type = P_NEPTUNE_NONE; 6497 } 6498 6499 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6500 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6501 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6502 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6503 6504 nxge_hw_list = hw_p; 6505 6506 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6507 } 6508 6509 MUTEX_EXIT(&nxge_common_lock); 6510 6511 nxgep->platform_type = hw_p->platform_type; 6512 if (nxgep->niu_type != N2_NIU) { 6513 nxgep->niu_type = hw_p->niu_type; 6514 } 6515 6516 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6517 "==> nxge_init_common_device (nxge_hw_list) $%p", 6518 nxge_hw_list)); 6519 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6520 6521 return (NXGE_OK); 6522 } 6523 6524 static void 6525 nxge_uninit_common_dev(p_nxge_t nxgep) 6526 { 6527 p_nxge_hw_list_t hw_p, h_hw_p; 6528 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6529 p_nxge_hw_pt_cfg_t p_cfgp; 6530 dev_info_t *p_dip; 6531 6532 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6533 if (nxgep->nxge_hw_p == NULL) { 6534 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6535 "<== nxge_uninit_common_device (no common)")); 6536 return; 6537 } 6538 6539 MUTEX_ENTER(&nxge_common_lock); 6540 h_hw_p = nxge_hw_list; 6541 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6542 p_dip = hw_p->parent_devp; 6543 if (nxgep->nxge_hw_p == hw_p && 6544 p_dip == nxgep->p_dip && 6545 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6546 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6547 6548 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6549 "==> nxge_uninit_common_device:func # %d " 6550 "hw_p $%p parent dip $%p " 6551 "ndevs %d (found)", 6552 nxgep->function_num, 6553 hw_p, 6554 p_dip, 6555 hw_p->ndevs)); 6556 6557 /* 6558 * Release the RDC table, a shared resoruce 6559 * of the nxge hardware. The RDC table was 6560 * assigned to this instance of nxge in 6561 * nxge_use_cfg_dma_config(). 6562 */ 6563 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6564 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6565 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6566 p_cfgp->def_mac_rxdma_grpid); 6567 6568 if (hw_p->ndevs) { 6569 hw_p->ndevs--; 6570 } 6571 hw_p->nxge_p[nxgep->function_num] = NULL; 6572 if (!hw_p->ndevs) { 6573 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6574 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6575 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6576 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6577 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6578 "==> nxge_uninit_common_device: " 6579 "func # %d " 6580 "hw_p $%p parent dip $%p " 6581 "ndevs %d (last)", 6582 nxgep->function_num, 6583 hw_p, 6584 p_dip, 6585 hw_p->ndevs)); 6586 6587 nxge_hio_uninit(nxgep); 6588 6589 if (hw_p == nxge_hw_list) { 6590 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6591 "==> nxge_uninit_common_device:" 6592 "remove head func # %d " 6593 "hw_p $%p parent dip $%p " 6594 "ndevs %d (head)", 6595 nxgep->function_num, 6596 hw_p, 6597 p_dip, 6598 hw_p->ndevs)); 6599 nxge_hw_list = hw_p->next; 6600 } else { 6601 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6602 "==> nxge_uninit_common_device:" 6603 "remove middle func # %d " 6604 "hw_p $%p parent dip $%p " 6605 "ndevs %d (middle)", 6606 nxgep->function_num, 6607 hw_p, 6608 p_dip, 6609 hw_p->ndevs)); 6610 h_hw_p->next = hw_p->next; 6611 } 6612 6613 nxgep->nxge_hw_p = NULL; 6614 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6615 } 6616 break; 6617 } else { 6618 h_hw_p = hw_p; 6619 } 6620 } 6621 6622 MUTEX_EXIT(&nxge_common_lock); 6623 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6624 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6625 nxge_hw_list)); 6626 6627 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6628 } 6629 6630 /* 6631 * Determines the number of ports from the niu_type or the platform type. 6632 * Returns the number of ports, or returns zero on failure. 6633 */ 6634 6635 int 6636 nxge_get_nports(p_nxge_t nxgep) 6637 { 6638 int nports = 0; 6639 6640 switch (nxgep->niu_type) { 6641 case N2_NIU: 6642 case NEPTUNE_2_10GF: 6643 nports = 2; 6644 break; 6645 case NEPTUNE_4_1GC: 6646 case NEPTUNE_2_10GF_2_1GC: 6647 case NEPTUNE_1_10GF_3_1GC: 6648 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6649 case NEPTUNE_2_10GF_2_1GRF: 6650 nports = 4; 6651 break; 6652 default: 6653 switch (nxgep->platform_type) { 6654 case P_NEPTUNE_NIU: 6655 case P_NEPTUNE_ATLAS_2PORT: 6656 nports = 2; 6657 break; 6658 case P_NEPTUNE_ATLAS_4PORT: 6659 case P_NEPTUNE_MARAMBA_P0: 6660 case P_NEPTUNE_MARAMBA_P1: 6661 case P_NEPTUNE_ALONSO: 6662 nports = 4; 6663 break; 6664 default: 6665 break; 6666 } 6667 break; 6668 } 6669 6670 return (nports); 6671 } 6672 6673 /* 6674 * The following two functions are to support 6675 * PSARC/2007/453 MSI-X interrupt limit override. 6676 */ 6677 static int 6678 nxge_create_msi_property(p_nxge_t nxgep) 6679 { 6680 int nmsi; 6681 extern int ncpus; 6682 6683 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6684 6685 switch (nxgep->mac.portmode) { 6686 case PORT_10G_COPPER: 6687 case PORT_10G_FIBER: 6688 case PORT_10G_TN1010: 6689 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6690 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6691 /* 6692 * The maximum MSI-X requested will be 8. 6693 * If the # of CPUs is less than 8, we will reqeust 6694 * # MSI-X based on the # of CPUs. 6695 */ 6696 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6697 nmsi = NXGE_MSIX_REQUEST_10G; 6698 } else { 6699 nmsi = ncpus; 6700 } 6701 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6702 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6703 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6704 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6705 break; 6706 6707 default: 6708 nmsi = NXGE_MSIX_REQUEST_1G; 6709 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6710 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6711 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6712 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6713 break; 6714 } 6715 6716 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6717 return (nmsi); 6718 } 6719 6720 /* ARGSUSED */ 6721 static int 6722 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6723 void *pr_val) 6724 { 6725 int err = 0; 6726 link_flowctrl_t fl; 6727 6728 switch (pr_num) { 6729 case MAC_PROP_AUTONEG: 6730 *(uint8_t *)pr_val = 1; 6731 break; 6732 case MAC_PROP_FLOWCTRL: 6733 if (pr_valsize < sizeof (link_flowctrl_t)) 6734 return (EINVAL); 6735 fl = LINK_FLOWCTRL_RX; 6736 bcopy(&fl, pr_val, sizeof (fl)); 6737 break; 6738 case MAC_PROP_ADV_1000FDX_CAP: 6739 case MAC_PROP_EN_1000FDX_CAP: 6740 *(uint8_t *)pr_val = 1; 6741 break; 6742 case MAC_PROP_ADV_100FDX_CAP: 6743 case MAC_PROP_EN_100FDX_CAP: 6744 *(uint8_t *)pr_val = 1; 6745 break; 6746 default: 6747 err = ENOTSUP; 6748 break; 6749 } 6750 return (err); 6751 } 6752 6753 6754 /* 6755 * The following is a software around for the Neptune hardware's 6756 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6757 * an interrupr handler is removed. 6758 */ 6759 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6760 #define NXGE_PIM_RESET (1ULL << 29) 6761 #define NXGE_GLU_RESET (1ULL << 30) 6762 #define NXGE_NIU_RESET (1ULL << 31) 6763 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6764 NXGE_GLU_RESET | \ 6765 NXGE_NIU_RESET) 6766 6767 #define NXGE_WAIT_QUITE_TIME 200000 6768 #define NXGE_WAIT_QUITE_RETRY 40 6769 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6770 6771 static void 6772 nxge_niu_peu_reset(p_nxge_t nxgep) 6773 { 6774 uint32_t rvalue; 6775 p_nxge_hw_list_t hw_p; 6776 p_nxge_t fnxgep; 6777 int i, j; 6778 6779 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6780 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6782 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6783 return; 6784 } 6785 6786 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6787 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6788 hw_p->flags, nxgep->nxge_link_poll_timerid, 6789 nxgep->nxge_timerid)); 6790 6791 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6792 /* 6793 * Make sure other instances from the same hardware 6794 * stop sending PIO and in quiescent state. 6795 */ 6796 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6797 fnxgep = hw_p->nxge_p[i]; 6798 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6799 "==> nxge_niu_peu_reset: checking entry %d " 6800 "nxgep $%p", i, fnxgep)); 6801 #ifdef NXGE_DEBUG 6802 if (fnxgep) { 6803 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6804 "==> nxge_niu_peu_reset: entry %d (function %d) " 6805 "link timer id %d hw timer id %d", 6806 i, fnxgep->function_num, 6807 fnxgep->nxge_link_poll_timerid, 6808 fnxgep->nxge_timerid)); 6809 } 6810 #endif 6811 if (fnxgep && fnxgep != nxgep && 6812 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6813 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6814 "==> nxge_niu_peu_reset: checking $%p " 6815 "(function %d) timer ids", 6816 fnxgep, fnxgep->function_num)); 6817 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6818 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6819 "==> nxge_niu_peu_reset: waiting")); 6820 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6821 if (!fnxgep->nxge_timerid && 6822 !fnxgep->nxge_link_poll_timerid) { 6823 break; 6824 } 6825 } 6826 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6827 if (fnxgep->nxge_timerid || 6828 fnxgep->nxge_link_poll_timerid) { 6829 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6830 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6831 "<== nxge_niu_peu_reset: cannot reset " 6832 "hardware (devices are still in use)")); 6833 return; 6834 } 6835 } 6836 } 6837 6838 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6839 hw_p->flags |= COMMON_RESET_NIU_PCI; 6840 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6841 NXGE_PCI_PORT_LOGIC_OFFSET); 6842 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6843 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6844 "(data 0x%x)", 6845 NXGE_PCI_PORT_LOGIC_OFFSET, 6846 NXGE_PCI_PORT_LOGIC_OFFSET, 6847 rvalue)); 6848 6849 rvalue |= NXGE_PCI_RESET_ALL; 6850 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6851 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6852 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6853 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6854 rvalue)); 6855 6856 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6857 } 6858 6859 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6860 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6861 } 6862 6863 static void 6864 nxge_set_pci_replay_timeout(p_nxge_t nxgep) 6865 { 6866 p_dev_regs_t dev_regs; 6867 uint32_t value; 6868 6869 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_pci_replay_timeout")); 6870 6871 if (!nxge_set_replay_timer) { 6872 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6873 "==> nxge_set_pci_replay_timeout: will not change " 6874 "the timeout")); 6875 return; 6876 } 6877 6878 dev_regs = nxgep->dev_regs; 6879 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6880 "==> nxge_set_pci_replay_timeout: dev_regs 0x%p pcireg 0x%p", 6881 dev_regs, dev_regs->nxge_pciregh)); 6882 6883 if (dev_regs == NULL || (dev_regs->nxge_pciregh == NULL)) { 6884 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6885 "==> nxge_set_pci_replay_timeout: NULL dev_regs $%p or " 6886 "no PCI handle", 6887 dev_regs)); 6888 return; 6889 } 6890 value = (pci_config_get32(dev_regs->nxge_pciregh, 6891 PCI_REPLAY_TIMEOUT_CFG_OFFSET) | 6892 (nxge_replay_timeout << PCI_REPLAY_TIMEOUT_SHIFT)); 6893 6894 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6895 "nxge_set_pci_replay_timeout: replay timeout value before set 0x%x " 6896 "(timeout value to set 0x%x at offset 0x%x) value 0x%x", 6897 pci_config_get32(dev_regs->nxge_pciregh, 6898 PCI_REPLAY_TIMEOUT_CFG_OFFSET), nxge_replay_timeout, 6899 PCI_REPLAY_TIMEOUT_CFG_OFFSET, value)); 6900 6901 pci_config_put32(dev_regs->nxge_pciregh, PCI_REPLAY_TIMEOUT_CFG_OFFSET, 6902 value); 6903 6904 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6905 "nxge_set_pci_replay_timeout: replay timeout value after set 0x%x", 6906 pci_config_get32(dev_regs->nxge_pciregh, 6907 PCI_REPLAY_TIMEOUT_CFG_OFFSET))); 6908 6909 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_pci_replay_timeout")); 6910 } 6911