1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/nxge/nxge_hio.h> 33 #include <sys/nxge/nxge_rxdma.h> 34 #include <sys/pcie.h> 35 36 uint32_t nxge_use_partition = 0; /* debug partition flag */ 37 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 38 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 39 /* 40 * PSARC/2007/453 MSI-X interrupt limit override 41 * (This PSARC case is limited to MSI-X vectors 42 * and SPARC platforms only). 43 */ 44 #if defined(_BIG_ENDIAN) 45 uint32_t nxge_msi_enable = 2; 46 #else 47 uint32_t nxge_msi_enable = 1; 48 #endif 49 50 /* 51 * Software workaround for a Neptune (PCI-E) 52 * hardware interrupt bug which the hardware 53 * may generate spurious interrupts after the 54 * device interrupt handler was removed. If this flag 55 * is enabled, the driver will reset the 56 * hardware when devices are being detached. 57 */ 58 uint32_t nxge_peu_reset_enable = 0; 59 60 /* 61 * Software workaround for the hardware 62 * checksum bugs that affect packet transmission 63 * and receive: 64 * 65 * Usage of nxge_cksum_offload: 66 * 67 * (1) nxge_cksum_offload = 0 (default): 68 * - transmits packets: 69 * TCP: uses the hardware checksum feature. 70 * UDP: driver will compute the software checksum 71 * based on the partial checksum computed 72 * by the IP layer. 73 * - receives packets 74 * TCP: marks packets checksum flags based on hardware result. 75 * UDP: will not mark checksum flags. 76 * 77 * (2) nxge_cksum_offload = 1: 78 * - transmit packets: 79 * TCP/UDP: uses the hardware checksum feature. 80 * - receives packets 81 * TCP/UDP: marks packet checksum flags based on hardware result. 82 * 83 * (3) nxge_cksum_offload = 2: 84 * - The driver will not register its checksum capability. 85 * Checksum for both TCP and UDP will be computed 86 * by the stack. 87 * - The software LSO is not allowed in this case. 88 * 89 * (4) nxge_cksum_offload > 2: 90 * - Will be treated as it is set to 2 91 * (stack will compute the checksum). 92 * 93 * (5) If the hardware bug is fixed, this workaround 94 * needs to be updated accordingly to reflect 95 * the new hardware revision. 96 */ 97 uint32_t nxge_cksum_offload = 0; 98 99 /* 100 * Globals: tunable parameters (/etc/system or adb) 101 * 102 */ 103 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 104 uint32_t nxge_rbr_spare_size = 0; 105 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 106 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 107 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 108 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 109 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 110 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 111 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 112 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 113 boolean_t nxge_jumbo_enable = B_FALSE; 114 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 115 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 116 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 117 118 /* MAX LSO size */ 119 #define NXGE_LSO_MAXLEN 65535 120 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 121 122 /* 123 * Debugging flags: 124 * nxge_no_tx_lb : transmit load balancing 125 * nxge_tx_lb_policy: 0 - TCP port (default) 126 * 3 - DEST MAC 127 */ 128 uint32_t nxge_no_tx_lb = 0; 129 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 130 131 /* 132 * Add tunable to reduce the amount of time spent in the 133 * ISR doing Rx Processing. 134 */ 135 uint32_t nxge_max_rx_pkts = 1024; 136 137 /* 138 * Tunables to manage the receive buffer blocks. 139 * 140 * nxge_rx_threshold_hi: copy all buffers. 141 * nxge_rx_bcopy_size_type: receive buffer block size type. 142 * nxge_rx_threshold_lo: copy only up to tunable block size type. 143 */ 144 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 145 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 146 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 147 148 /* Use kmem_alloc() to allocate data buffers. */ 149 #if !defined(__i386) 150 uint32_t nxge_use_kmem_alloc = 1; 151 #else 152 uint32_t nxge_use_kmem_alloc = 0; 153 #endif 154 155 rtrace_t npi_rtracebuf; 156 157 #if defined(sun4v) 158 /* 159 * Hypervisor N2/NIU services information. 160 */ 161 static hsvc_info_t niu_hsvc = { 162 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 163 NIU_MINOR_VER, "nxge" 164 }; 165 166 static int nxge_hsvc_register(p_nxge_t); 167 #endif 168 169 /* 170 * Function Prototypes 171 */ 172 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 173 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 174 static void nxge_unattach(p_nxge_t); 175 176 #if NXGE_PROPERTY 177 static void nxge_remove_hard_properties(p_nxge_t); 178 #endif 179 180 /* 181 * These two functions are required by nxge_hio.c 182 */ 183 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 184 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 185 186 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 187 188 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 189 static void nxge_destroy_mutexes(p_nxge_t); 190 191 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 192 static void nxge_unmap_regs(p_nxge_t nxgep); 193 #ifdef NXGE_DEBUG 194 static void nxge_test_map_regs(p_nxge_t nxgep); 195 #endif 196 197 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 198 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 199 static void nxge_remove_intrs(p_nxge_t nxgep); 200 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 201 202 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 203 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 204 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 205 static void nxge_intrs_enable(p_nxge_t nxgep); 206 static void nxge_intrs_disable(p_nxge_t nxgep); 207 208 static void nxge_suspend(p_nxge_t); 209 static nxge_status_t nxge_resume(p_nxge_t); 210 211 static nxge_status_t nxge_setup_dev(p_nxge_t); 212 static void nxge_destroy_dev(p_nxge_t); 213 214 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 215 static void nxge_free_mem_pool(p_nxge_t); 216 217 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 218 static void nxge_free_rx_mem_pool(p_nxge_t); 219 220 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 221 static void nxge_free_tx_mem_pool(p_nxge_t); 222 223 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 224 struct ddi_dma_attr *, 225 size_t, ddi_device_acc_attr_t *, uint_t, 226 p_nxge_dma_common_t); 227 228 static void nxge_dma_mem_free(p_nxge_dma_common_t); 229 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 230 231 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 232 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 233 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 234 235 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 236 p_nxge_dma_common_t *, size_t); 237 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 238 239 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 240 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 241 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 242 243 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 244 p_nxge_dma_common_t *, 245 size_t); 246 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 247 248 static int nxge_init_common_dev(p_nxge_t); 249 static void nxge_uninit_common_dev(p_nxge_t); 250 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 251 char *, caddr_t); 252 253 /* 254 * The next declarations are for the GLDv3 interface. 255 */ 256 static int nxge_m_start(void *); 257 static void nxge_m_stop(void *); 258 static int nxge_m_unicst(void *, const uint8_t *); 259 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 260 static int nxge_m_promisc(void *, boolean_t); 261 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 262 static void nxge_m_resources(void *); 263 mblk_t *nxge_m_tx(void *arg, mblk_t *); 264 static nxge_status_t nxge_mac_register(p_nxge_t); 265 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 266 mac_addr_slot_t slot); 267 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 268 boolean_t factory); 269 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 270 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 271 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 272 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 273 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 274 uint_t, const void *); 275 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 276 uint_t, uint_t, void *); 277 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 278 const void *); 279 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 280 void *); 281 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 282 283 static void nxge_niu_peu_reset(p_nxge_t nxgep); 284 285 mac_priv_prop_t nxge_priv_props[] = { 286 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 287 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 288 {"_function_number", MAC_PROP_PERM_READ}, 289 {"_fw_version", MAC_PROP_PERM_READ}, 290 {"_port_mode", MAC_PROP_PERM_READ}, 291 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 292 {"_accept_jumbo", MAC_PROP_PERM_RW}, 293 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 294 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 295 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 296 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 297 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 298 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 299 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 300 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 301 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 302 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 303 {"_soft_lso_enable", MAC_PROP_PERM_RW} 304 }; 305 306 #define NXGE_MAX_PRIV_PROPS \ 307 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 308 309 #define NXGE_M_CALLBACK_FLAGS\ 310 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 311 312 313 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 314 #define MAX_DUMP_SZ 256 315 316 #define NXGE_M_CALLBACK_FLAGS \ 317 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 318 319 mac_callbacks_t nxge_m_callbacks = { 320 NXGE_M_CALLBACK_FLAGS, 321 nxge_m_stat, 322 nxge_m_start, 323 nxge_m_stop, 324 nxge_m_promisc, 325 nxge_m_multicst, 326 nxge_m_unicst, 327 nxge_m_tx, 328 nxge_m_resources, 329 nxge_m_ioctl, 330 nxge_m_getcapab, 331 NULL, 332 NULL, 333 nxge_m_setprop, 334 nxge_m_getprop 335 }; 336 337 void 338 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 339 340 /* PSARC/2007/453 MSI-X interrupt limit override. */ 341 #define NXGE_MSIX_REQUEST_10G 8 342 #define NXGE_MSIX_REQUEST_1G 2 343 static int nxge_create_msi_property(p_nxge_t); 344 345 /* 346 * These global variables control the message 347 * output. 348 */ 349 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 350 uint64_t nxge_debug_level; 351 352 /* 353 * This list contains the instance structures for the Neptune 354 * devices present in the system. The lock exists to guarantee 355 * mutually exclusive access to the list. 356 */ 357 void *nxge_list = NULL; 358 359 void *nxge_hw_list = NULL; 360 nxge_os_mutex_t nxge_common_lock; 361 362 extern uint64_t npi_debug_level; 363 364 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 365 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 366 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 367 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 368 extern void nxge_fm_init(p_nxge_t, 369 ddi_device_acc_attr_t *, 370 ddi_device_acc_attr_t *, 371 ddi_dma_attr_t *); 372 extern void nxge_fm_fini(p_nxge_t); 373 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 374 375 /* 376 * Count used to maintain the number of buffers being used 377 * by Neptune instances and loaned up to the upper layers. 378 */ 379 uint32_t nxge_mblks_pending = 0; 380 381 /* 382 * Device register access attributes for PIO. 383 */ 384 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 385 DDI_DEVICE_ATTR_V0, 386 DDI_STRUCTURE_LE_ACC, 387 DDI_STRICTORDER_ACC, 388 }; 389 390 /* 391 * Device descriptor access attributes for DMA. 392 */ 393 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 394 DDI_DEVICE_ATTR_V0, 395 DDI_STRUCTURE_LE_ACC, 396 DDI_STRICTORDER_ACC 397 }; 398 399 /* 400 * Device buffer access attributes for DMA. 401 */ 402 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 403 DDI_DEVICE_ATTR_V0, 404 DDI_STRUCTURE_BE_ACC, 405 DDI_STRICTORDER_ACC 406 }; 407 408 ddi_dma_attr_t nxge_desc_dma_attr = { 409 DMA_ATTR_V0, /* version number. */ 410 0, /* low address */ 411 0xffffffffffffffff, /* high address */ 412 0xffffffffffffffff, /* address counter max */ 413 #ifndef NIU_PA_WORKAROUND 414 0x100000, /* alignment */ 415 #else 416 0x2000, 417 #endif 418 0xfc00fc, /* dlim_burstsizes */ 419 0x1, /* minimum transfer size */ 420 0xffffffffffffffff, /* maximum transfer size */ 421 0xffffffffffffffff, /* maximum segment size */ 422 1, /* scatter/gather list length */ 423 (unsigned int) 1, /* granularity */ 424 0 /* attribute flags */ 425 }; 426 427 ddi_dma_attr_t nxge_tx_dma_attr = { 428 DMA_ATTR_V0, /* version number. */ 429 0, /* low address */ 430 0xffffffffffffffff, /* high address */ 431 0xffffffffffffffff, /* address counter max */ 432 #if defined(_BIG_ENDIAN) 433 0x2000, /* alignment */ 434 #else 435 0x1000, /* alignment */ 436 #endif 437 0xfc00fc, /* dlim_burstsizes */ 438 0x1, /* minimum transfer size */ 439 0xffffffffffffffff, /* maximum transfer size */ 440 0xffffffffffffffff, /* maximum segment size */ 441 5, /* scatter/gather list length */ 442 (unsigned int) 1, /* granularity */ 443 0 /* attribute flags */ 444 }; 445 446 ddi_dma_attr_t nxge_rx_dma_attr = { 447 DMA_ATTR_V0, /* version number. */ 448 0, /* low address */ 449 0xffffffffffffffff, /* high address */ 450 0xffffffffffffffff, /* address counter max */ 451 0x2000, /* alignment */ 452 0xfc00fc, /* dlim_burstsizes */ 453 0x1, /* minimum transfer size */ 454 0xffffffffffffffff, /* maximum transfer size */ 455 0xffffffffffffffff, /* maximum segment size */ 456 1, /* scatter/gather list length */ 457 (unsigned int) 1, /* granularity */ 458 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 459 }; 460 461 ddi_dma_lim_t nxge_dma_limits = { 462 (uint_t)0, /* dlim_addr_lo */ 463 (uint_t)0xffffffff, /* dlim_addr_hi */ 464 (uint_t)0xffffffff, /* dlim_cntr_max */ 465 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 466 0x1, /* dlim_minxfer */ 467 1024 /* dlim_speed */ 468 }; 469 470 dma_method_t nxge_force_dma = DVMA; 471 472 /* 473 * dma chunk sizes. 474 * 475 * Try to allocate the largest possible size 476 * so that fewer number of dma chunks would be managed 477 */ 478 #ifdef NIU_PA_WORKAROUND 479 size_t alloc_sizes [] = {0x2000}; 480 #else 481 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 482 0x10000, 0x20000, 0x40000, 0x80000, 483 0x100000, 0x200000, 0x400000, 0x800000, 484 0x1000000, 0x2000000, 0x4000000}; 485 #endif 486 487 /* 488 * Translate "dev_t" to a pointer to the associated "dev_info_t". 489 */ 490 491 extern void nxge_get_environs(nxge_t *); 492 493 static int 494 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 495 { 496 p_nxge_t nxgep = NULL; 497 int instance; 498 int status = DDI_SUCCESS; 499 uint8_t portn; 500 nxge_mmac_t *mmac_info; 501 502 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 503 504 /* 505 * Get the device instance since we'll need to setup 506 * or retrieve a soft state for this instance. 507 */ 508 instance = ddi_get_instance(dip); 509 510 switch (cmd) { 511 case DDI_ATTACH: 512 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 513 break; 514 515 case DDI_RESUME: 516 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 517 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 518 if (nxgep == NULL) { 519 status = DDI_FAILURE; 520 break; 521 } 522 if (nxgep->dip != dip) { 523 status = DDI_FAILURE; 524 break; 525 } 526 if (nxgep->suspended == DDI_PM_SUSPEND) { 527 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 528 } else { 529 status = nxge_resume(nxgep); 530 } 531 goto nxge_attach_exit; 532 533 case DDI_PM_RESUME: 534 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 535 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 536 if (nxgep == NULL) { 537 status = DDI_FAILURE; 538 break; 539 } 540 if (nxgep->dip != dip) { 541 status = DDI_FAILURE; 542 break; 543 } 544 status = nxge_resume(nxgep); 545 goto nxge_attach_exit; 546 547 default: 548 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 549 status = DDI_FAILURE; 550 goto nxge_attach_exit; 551 } 552 553 554 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 555 status = DDI_FAILURE; 556 goto nxge_attach_exit; 557 } 558 559 nxgep = ddi_get_soft_state(nxge_list, instance); 560 if (nxgep == NULL) { 561 status = NXGE_ERROR; 562 goto nxge_attach_fail2; 563 } 564 565 nxgep->nxge_magic = NXGE_MAGIC; 566 567 nxgep->drv_state = 0; 568 nxgep->dip = dip; 569 nxgep->instance = instance; 570 nxgep->p_dip = ddi_get_parent(dip); 571 nxgep->nxge_debug_level = nxge_debug_level; 572 npi_debug_level = nxge_debug_level; 573 574 /* Are we a guest running in a Hybrid I/O environment? */ 575 nxge_get_environs(nxgep); 576 577 status = nxge_map_regs(nxgep); 578 579 if (status != NXGE_OK) { 580 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 581 goto nxge_attach_fail3; 582 } 583 584 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 585 &nxge_dev_desc_dma_acc_attr, 586 &nxge_rx_dma_attr); 587 588 /* Create & initialize the per-Neptune data structure */ 589 /* (even if we're a guest). */ 590 status = nxge_init_common_dev(nxgep); 591 if (status != NXGE_OK) { 592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 593 "nxge_init_common_dev failed")); 594 goto nxge_attach_fail4; 595 } 596 597 #if defined(sun4v) 598 /* This is required by nxge_hio_init(), which follows. */ 599 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 600 goto nxge_attach_fail; 601 #endif 602 603 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 604 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 605 "nxge_hio_init failed")); 606 goto nxge_attach_fail4; 607 } 608 609 if (nxgep->niu_type == NEPTUNE_2_10GF) { 610 if (nxgep->function_num > 1) { 611 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 612 " function %d. Only functions 0 and 1 are " 613 "supported for this card.", nxgep->function_num)); 614 status = NXGE_ERROR; 615 goto nxge_attach_fail4; 616 } 617 } 618 619 if (isLDOMguest(nxgep)) { 620 /* 621 * Use the function number here. 622 */ 623 nxgep->mac.portnum = nxgep->function_num; 624 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 625 626 /* XXX We'll set the MAC address counts to 1 for now. */ 627 mmac_info = &nxgep->nxge_mmac_info; 628 mmac_info->num_mmac = 1; 629 mmac_info->naddrfree = 1; 630 } else { 631 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 632 nxgep->mac.portnum = portn; 633 if ((portn == 0) || (portn == 1)) 634 nxgep->mac.porttype = PORT_TYPE_XMAC; 635 else 636 nxgep->mac.porttype = PORT_TYPE_BMAC; 637 /* 638 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 639 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 640 * The two types of MACs have different characterizations. 641 */ 642 mmac_info = &nxgep->nxge_mmac_info; 643 if (nxgep->function_num < 2) { 644 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 645 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 646 } else { 647 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 648 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 649 } 650 } 651 /* 652 * Setup the Ndd parameters for the this instance. 653 */ 654 nxge_init_param(nxgep); 655 656 /* 657 * Setup Register Tracing Buffer. 658 */ 659 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 660 661 /* init stats ptr */ 662 nxge_init_statsp(nxgep); 663 664 /* 665 * Copy the vpd info from eeprom to a local data 666 * structure, and then check its validity. 667 */ 668 if (!isLDOMguest(nxgep)) { 669 int *regp; 670 uint_t reglen; 671 int rv; 672 673 nxge_vpd_info_get(nxgep); 674 675 /* Find the NIU config handle. */ 676 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 677 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 678 "reg", ®p, ®len); 679 680 if (rv != DDI_PROP_SUCCESS) { 681 goto nxge_attach_fail5; 682 } 683 /* 684 * The address_hi, that is the first int, in the reg 685 * property consists of config handle, but need to remove 686 * the bits 28-31 which are OBP specific info. 687 */ 688 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 689 ddi_prop_free(regp); 690 } 691 692 if (isLDOMguest(nxgep)) { 693 uchar_t *prop_val; 694 uint_t prop_len; 695 696 extern void nxge_get_logical_props(p_nxge_t); 697 698 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 699 nxgep->mac.portmode = PORT_LOGICAL; 700 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 701 "phy-type", "virtual transceiver"); 702 703 nxgep->nports = 1; 704 nxgep->board_ver = 0; /* XXX What? */ 705 706 /* 707 * local-mac-address property gives us info on which 708 * specific MAC address the Hybrid resource is associated 709 * with. 710 */ 711 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 712 "local-mac-address", &prop_val, 713 &prop_len) != DDI_PROP_SUCCESS) { 714 goto nxge_attach_fail5; 715 } 716 if (prop_len != ETHERADDRL) { 717 ddi_prop_free(prop_val); 718 goto nxge_attach_fail5; 719 } 720 ether_copy(prop_val, nxgep->hio_mac_addr); 721 ddi_prop_free(prop_val); 722 nxge_get_logical_props(nxgep); 723 724 } else { 725 status = nxge_xcvr_find(nxgep); 726 727 if (status != NXGE_OK) { 728 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 729 " Couldn't determine card type" 730 " .... exit ")); 731 goto nxge_attach_fail5; 732 } 733 734 status = nxge_get_config_properties(nxgep); 735 736 if (status != NXGE_OK) { 737 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 738 "get_hw create failed")); 739 goto nxge_attach_fail; 740 } 741 } 742 743 /* 744 * Setup the Kstats for the driver. 745 */ 746 nxge_setup_kstats(nxgep); 747 748 if (!isLDOMguest(nxgep)) 749 nxge_setup_param(nxgep); 750 751 status = nxge_setup_system_dma_pages(nxgep); 752 if (status != NXGE_OK) { 753 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 754 goto nxge_attach_fail; 755 } 756 757 nxge_hw_id_init(nxgep); 758 759 if (!isLDOMguest(nxgep)) 760 nxge_hw_init_niu_common(nxgep); 761 762 status = nxge_setup_mutexes(nxgep); 763 if (status != NXGE_OK) { 764 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 765 goto nxge_attach_fail; 766 } 767 768 #if defined(sun4v) 769 if (isLDOMguest(nxgep)) { 770 /* Find our VR & channel sets. */ 771 status = nxge_hio_vr_add(nxgep); 772 goto nxge_attach_exit; 773 } 774 #endif 775 776 status = nxge_setup_dev(nxgep); 777 if (status != DDI_SUCCESS) { 778 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 779 goto nxge_attach_fail; 780 } 781 782 status = nxge_add_intrs(nxgep); 783 if (status != DDI_SUCCESS) { 784 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 785 goto nxge_attach_fail; 786 } 787 status = nxge_add_soft_intrs(nxgep); 788 if (status != DDI_SUCCESS) { 789 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 790 "add_soft_intr failed")); 791 goto nxge_attach_fail; 792 } 793 794 /* 795 * Enable interrupts. 796 */ 797 nxge_intrs_enable(nxgep); 798 799 // If a guest, register with vio_net instead. 800 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 801 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 802 "unable to register to mac layer (%d)", status)); 803 goto nxge_attach_fail; 804 } 805 806 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 807 808 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 809 "registered to mac (instance %d)", instance)); 810 811 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 812 813 goto nxge_attach_exit; 814 815 nxge_attach_fail: 816 nxge_unattach(nxgep); 817 goto nxge_attach_fail1; 818 819 nxge_attach_fail5: 820 /* 821 * Tear down the ndd parameters setup. 822 */ 823 nxge_destroy_param(nxgep); 824 825 /* 826 * Tear down the kstat setup. 827 */ 828 nxge_destroy_kstats(nxgep); 829 830 nxge_attach_fail4: 831 if (nxgep->nxge_hw_p) { 832 nxge_uninit_common_dev(nxgep); 833 nxgep->nxge_hw_p = NULL; 834 } 835 836 nxge_attach_fail3: 837 /* 838 * Unmap the register setup. 839 */ 840 nxge_unmap_regs(nxgep); 841 842 nxge_fm_fini(nxgep); 843 844 nxge_attach_fail2: 845 ddi_soft_state_free(nxge_list, nxgep->instance); 846 847 nxge_attach_fail1: 848 if (status != NXGE_OK) 849 status = (NXGE_ERROR | NXGE_DDI_FAILED); 850 nxgep = NULL; 851 852 nxge_attach_exit: 853 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 854 status)); 855 856 return (status); 857 } 858 859 static int 860 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 861 { 862 int status = DDI_SUCCESS; 863 int instance; 864 p_nxge_t nxgep = NULL; 865 866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 867 instance = ddi_get_instance(dip); 868 nxgep = ddi_get_soft_state(nxge_list, instance); 869 if (nxgep == NULL) { 870 status = DDI_FAILURE; 871 goto nxge_detach_exit; 872 } 873 874 switch (cmd) { 875 case DDI_DETACH: 876 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 877 break; 878 879 case DDI_PM_SUSPEND: 880 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 881 nxgep->suspended = DDI_PM_SUSPEND; 882 nxge_suspend(nxgep); 883 break; 884 885 case DDI_SUSPEND: 886 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 887 if (nxgep->suspended != DDI_PM_SUSPEND) { 888 nxgep->suspended = DDI_SUSPEND; 889 nxge_suspend(nxgep); 890 } 891 break; 892 893 default: 894 status = DDI_FAILURE; 895 } 896 897 if (cmd != DDI_DETACH) 898 goto nxge_detach_exit; 899 900 /* 901 * Stop the xcvr polling. 902 */ 903 nxgep->suspended = cmd; 904 905 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 906 907 if (isLDOMguest(nxgep)) { 908 nxge_hio_unregister(nxgep); 909 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 910 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 911 "<== nxge_detach status = 0x%08X", status)); 912 return (DDI_FAILURE); 913 } 914 915 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 916 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 917 918 nxge_unattach(nxgep); 919 nxgep = NULL; 920 921 nxge_detach_exit: 922 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 923 status)); 924 925 return (status); 926 } 927 928 static void 929 nxge_unattach(p_nxge_t nxgep) 930 { 931 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 932 933 if (nxgep == NULL || nxgep->dev_regs == NULL) { 934 return; 935 } 936 937 nxgep->nxge_magic = 0; 938 939 if (nxgep->nxge_timerid) { 940 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 941 nxgep->nxge_timerid = 0; 942 } 943 944 /* 945 * If this flag is set, it will affect the Neptune 946 * only. 947 */ 948 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 949 nxge_niu_peu_reset(nxgep); 950 } 951 952 #if defined(sun4v) 953 if (isLDOMguest(nxgep)) { 954 (void) nxge_hio_vr_release(nxgep); 955 } 956 #endif 957 958 if (nxgep->nxge_hw_p) { 959 nxge_uninit_common_dev(nxgep); 960 nxgep->nxge_hw_p = NULL; 961 } 962 963 #if defined(sun4v) 964 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 965 (void) hsvc_unregister(&nxgep->niu_hsvc); 966 nxgep->niu_hsvc_available = B_FALSE; 967 } 968 #endif 969 /* 970 * Stop any further interrupts. 971 */ 972 nxge_remove_intrs(nxgep); 973 974 /* remove soft interrups */ 975 nxge_remove_soft_intrs(nxgep); 976 977 /* 978 * Stop the device and free resources. 979 */ 980 if (!isLDOMguest(nxgep)) { 981 nxge_destroy_dev(nxgep); 982 } 983 984 /* 985 * Tear down the ndd parameters setup. 986 */ 987 nxge_destroy_param(nxgep); 988 989 /* 990 * Tear down the kstat setup. 991 */ 992 nxge_destroy_kstats(nxgep); 993 994 /* 995 * Destroy all mutexes. 996 */ 997 nxge_destroy_mutexes(nxgep); 998 999 /* 1000 * Remove the list of ndd parameters which 1001 * were setup during attach. 1002 */ 1003 if (nxgep->dip) { 1004 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1005 " nxge_unattach: remove all properties")); 1006 1007 (void) ddi_prop_remove_all(nxgep->dip); 1008 } 1009 1010 #if NXGE_PROPERTY 1011 nxge_remove_hard_properties(nxgep); 1012 #endif 1013 1014 /* 1015 * Unmap the register setup. 1016 */ 1017 nxge_unmap_regs(nxgep); 1018 1019 nxge_fm_fini(nxgep); 1020 1021 ddi_soft_state_free(nxge_list, nxgep->instance); 1022 1023 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1024 } 1025 1026 #if defined(sun4v) 1027 int 1028 nxge_hsvc_register( 1029 nxge_t *nxgep) 1030 { 1031 nxge_status_t status; 1032 1033 if (nxgep->niu_type == N2_NIU) { 1034 nxgep->niu_hsvc_available = B_FALSE; 1035 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1036 if ((status = hsvc_register(&nxgep->niu_hsvc, 1037 &nxgep->niu_min_ver)) != 0) { 1038 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1039 "nxge_attach: %s: cannot negotiate " 1040 "hypervisor services revision %d group: 0x%lx " 1041 "major: 0x%lx minor: 0x%lx errno: %d", 1042 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1043 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1044 niu_hsvc.hsvc_minor, status)); 1045 return (DDI_FAILURE); 1046 } 1047 nxgep->niu_hsvc_available = B_TRUE; 1048 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1049 "NIU Hypervisor service enabled")); 1050 } 1051 1052 return (DDI_SUCCESS); 1053 } 1054 #endif 1055 1056 static char n2_siu_name[] = "niu"; 1057 1058 static nxge_status_t 1059 nxge_map_regs(p_nxge_t nxgep) 1060 { 1061 int ddi_status = DDI_SUCCESS; 1062 p_dev_regs_t dev_regs; 1063 char buf[MAXPATHLEN + 1]; 1064 char *devname; 1065 #ifdef NXGE_DEBUG 1066 char *sysname; 1067 #endif 1068 off_t regsize; 1069 nxge_status_t status = NXGE_OK; 1070 #if !defined(_BIG_ENDIAN) 1071 off_t pci_offset; 1072 uint16_t pcie_devctl; 1073 #endif 1074 1075 if (isLDOMguest(nxgep)) { 1076 return (nxge_guest_regs_map(nxgep)); 1077 } 1078 1079 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1080 nxgep->dev_regs = NULL; 1081 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1082 dev_regs->nxge_regh = NULL; 1083 dev_regs->nxge_pciregh = NULL; 1084 dev_regs->nxge_msix_regh = NULL; 1085 dev_regs->nxge_vir_regh = NULL; 1086 dev_regs->nxge_vir2_regh = NULL; 1087 nxgep->niu_type = NIU_TYPE_NONE; 1088 1089 devname = ddi_pathname(nxgep->dip, buf); 1090 ASSERT(strlen(devname) > 0); 1091 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1092 "nxge_map_regs: pathname devname %s", devname)); 1093 1094 if (strstr(devname, n2_siu_name)) { 1095 /* N2/NIU */ 1096 nxgep->niu_type = N2_NIU; 1097 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1098 "nxge_map_regs: N2/NIU devname %s", devname)); 1099 /* get function number */ 1100 nxgep->function_num = 1101 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1102 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1103 "nxge_map_regs: N2/NIU function number %d", 1104 nxgep->function_num)); 1105 } else { 1106 int *prop_val; 1107 uint_t prop_len; 1108 uint8_t func_num; 1109 1110 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1111 0, "reg", 1112 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1113 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1114 "Reg property not found")); 1115 ddi_status = DDI_FAILURE; 1116 goto nxge_map_regs_fail0; 1117 1118 } else { 1119 func_num = (prop_val[0] >> 8) & 0x7; 1120 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1121 "Reg property found: fun # %d", 1122 func_num)); 1123 nxgep->function_num = func_num; 1124 if (isLDOMguest(nxgep)) { 1125 nxgep->function_num /= 2; 1126 return (NXGE_OK); 1127 } 1128 ddi_prop_free(prop_val); 1129 } 1130 } 1131 1132 switch (nxgep->niu_type) { 1133 default: 1134 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1135 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1136 "nxge_map_regs: pci config size 0x%x", regsize)); 1137 1138 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1139 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1140 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1141 if (ddi_status != DDI_SUCCESS) { 1142 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1143 "ddi_map_regs, nxge bus config regs failed")); 1144 goto nxge_map_regs_fail0; 1145 } 1146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1147 "nxge_map_reg: PCI config addr 0x%0llx " 1148 " handle 0x%0llx", dev_regs->nxge_pciregp, 1149 dev_regs->nxge_pciregh)); 1150 /* 1151 * IMP IMP 1152 * workaround for bit swapping bug in HW 1153 * which ends up in no-snoop = yes 1154 * resulting, in DMA not synched properly 1155 */ 1156 #if !defined(_BIG_ENDIAN) 1157 /* workarounds for x86 systems */ 1158 pci_offset = 0x80 + PCIE_DEVCTL; 1159 pcie_devctl = 0x0; 1160 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1161 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1162 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1163 pcie_devctl); 1164 #endif 1165 1166 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1167 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1168 "nxge_map_regs: pio size 0x%x", regsize)); 1169 /* set up the device mapped register */ 1170 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1171 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1172 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1173 if (ddi_status != DDI_SUCCESS) { 1174 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1175 "ddi_map_regs for Neptune global reg failed")); 1176 goto nxge_map_regs_fail1; 1177 } 1178 1179 /* set up the msi/msi-x mapped register */ 1180 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1181 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1182 "nxge_map_regs: msix size 0x%x", regsize)); 1183 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1184 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1185 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1186 if (ddi_status != DDI_SUCCESS) { 1187 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1188 "ddi_map_regs for msi reg failed")); 1189 goto nxge_map_regs_fail2; 1190 } 1191 1192 /* set up the vio region mapped register */ 1193 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1194 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1195 "nxge_map_regs: vio size 0x%x", regsize)); 1196 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1197 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1198 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1199 1200 if (ddi_status != DDI_SUCCESS) { 1201 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1202 "ddi_map_regs for nxge vio reg failed")); 1203 goto nxge_map_regs_fail3; 1204 } 1205 nxgep->dev_regs = dev_regs; 1206 1207 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1208 NPI_PCI_ADD_HANDLE_SET(nxgep, 1209 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1210 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1211 NPI_MSI_ADD_HANDLE_SET(nxgep, 1212 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1213 1214 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1215 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1216 1217 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1218 NPI_REG_ADD_HANDLE_SET(nxgep, 1219 (npi_reg_ptr_t)dev_regs->nxge_regp); 1220 1221 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1222 NPI_VREG_ADD_HANDLE_SET(nxgep, 1223 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1224 1225 break; 1226 1227 case N2_NIU: 1228 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1229 /* 1230 * Set up the device mapped register (FWARC 2006/556) 1231 * (changed back to 1: reg starts at 1!) 1232 */ 1233 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1234 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1235 "nxge_map_regs: dev size 0x%x", regsize)); 1236 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1237 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1238 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1239 1240 if (ddi_status != DDI_SUCCESS) { 1241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1242 "ddi_map_regs for N2/NIU, global reg failed ")); 1243 goto nxge_map_regs_fail1; 1244 } 1245 1246 /* set up the first vio region mapped register */ 1247 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1248 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1249 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1250 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1251 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1252 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1253 1254 if (ddi_status != DDI_SUCCESS) { 1255 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1256 "ddi_map_regs for nxge vio reg failed")); 1257 goto nxge_map_regs_fail2; 1258 } 1259 /* set up the second vio region mapped register */ 1260 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1261 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1262 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1263 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1264 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1265 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1266 1267 if (ddi_status != DDI_SUCCESS) { 1268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1269 "ddi_map_regs for nxge vio2 reg failed")); 1270 goto nxge_map_regs_fail3; 1271 } 1272 nxgep->dev_regs = dev_regs; 1273 1274 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1275 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1276 1277 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1278 NPI_REG_ADD_HANDLE_SET(nxgep, 1279 (npi_reg_ptr_t)dev_regs->nxge_regp); 1280 1281 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1282 NPI_VREG_ADD_HANDLE_SET(nxgep, 1283 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1284 1285 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1286 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1287 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1288 1289 break; 1290 } 1291 1292 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1293 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1294 1295 goto nxge_map_regs_exit; 1296 nxge_map_regs_fail3: 1297 if (dev_regs->nxge_msix_regh) { 1298 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1299 } 1300 if (dev_regs->nxge_vir_regh) { 1301 ddi_regs_map_free(&dev_regs->nxge_regh); 1302 } 1303 nxge_map_regs_fail2: 1304 if (dev_regs->nxge_regh) { 1305 ddi_regs_map_free(&dev_regs->nxge_regh); 1306 } 1307 nxge_map_regs_fail1: 1308 if (dev_regs->nxge_pciregh) { 1309 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1310 } 1311 nxge_map_regs_fail0: 1312 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1313 kmem_free(dev_regs, sizeof (dev_regs_t)); 1314 1315 nxge_map_regs_exit: 1316 if (ddi_status != DDI_SUCCESS) 1317 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1318 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1319 return (status); 1320 } 1321 1322 static void 1323 nxge_unmap_regs(p_nxge_t nxgep) 1324 { 1325 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1326 1327 if (isLDOMguest(nxgep)) { 1328 nxge_guest_regs_map_free(nxgep); 1329 return; 1330 } 1331 1332 if (nxgep->dev_regs) { 1333 if (nxgep->dev_regs->nxge_pciregh) { 1334 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1335 "==> nxge_unmap_regs: bus")); 1336 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1337 nxgep->dev_regs->nxge_pciregh = NULL; 1338 } 1339 if (nxgep->dev_regs->nxge_regh) { 1340 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1341 "==> nxge_unmap_regs: device registers")); 1342 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1343 nxgep->dev_regs->nxge_regh = NULL; 1344 } 1345 if (nxgep->dev_regs->nxge_msix_regh) { 1346 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1347 "==> nxge_unmap_regs: device interrupts")); 1348 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1349 nxgep->dev_regs->nxge_msix_regh = NULL; 1350 } 1351 if (nxgep->dev_regs->nxge_vir_regh) { 1352 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1353 "==> nxge_unmap_regs: vio region")); 1354 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1355 nxgep->dev_regs->nxge_vir_regh = NULL; 1356 } 1357 if (nxgep->dev_regs->nxge_vir2_regh) { 1358 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1359 "==> nxge_unmap_regs: vio2 region")); 1360 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1361 nxgep->dev_regs->nxge_vir2_regh = NULL; 1362 } 1363 1364 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1365 nxgep->dev_regs = NULL; 1366 } 1367 1368 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1369 } 1370 1371 static nxge_status_t 1372 nxge_setup_mutexes(p_nxge_t nxgep) 1373 { 1374 int ddi_status = DDI_SUCCESS; 1375 nxge_status_t status = NXGE_OK; 1376 nxge_classify_t *classify_ptr; 1377 int partition; 1378 1379 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1380 1381 /* 1382 * Get the interrupt cookie so the mutexes can be 1383 * Initialized. 1384 */ 1385 if (isLDOMguest(nxgep)) { 1386 nxgep->interrupt_cookie = 0; 1387 } else { 1388 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1389 &nxgep->interrupt_cookie); 1390 1391 if (ddi_status != DDI_SUCCESS) { 1392 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1393 "<== nxge_setup_mutexes: failed 0x%x", 1394 ddi_status)); 1395 goto nxge_setup_mutexes_exit; 1396 } 1397 } 1398 1399 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1400 MUTEX_INIT(&nxgep->poll_lock, NULL, 1401 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1402 1403 /* 1404 * Initialize mutexes for this device. 1405 */ 1406 MUTEX_INIT(nxgep->genlock, NULL, 1407 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1408 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1409 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1410 MUTEX_INIT(&nxgep->mif_lock, NULL, 1411 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1412 MUTEX_INIT(&nxgep->group_lock, NULL, 1413 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1414 RW_INIT(&nxgep->filter_lock, NULL, 1415 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1416 1417 classify_ptr = &nxgep->classifier; 1418 /* 1419 * FFLP Mutexes are never used in interrupt context 1420 * as fflp operation can take very long time to 1421 * complete and hence not suitable to invoke from interrupt 1422 * handlers. 1423 */ 1424 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1425 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1426 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1427 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1428 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1429 for (partition = 0; partition < MAX_PARTITION; partition++) { 1430 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1431 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1432 } 1433 } 1434 1435 nxge_setup_mutexes_exit: 1436 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1437 "<== nxge_setup_mutexes status = %x", status)); 1438 1439 if (ddi_status != DDI_SUCCESS) 1440 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1441 1442 return (status); 1443 } 1444 1445 static void 1446 nxge_destroy_mutexes(p_nxge_t nxgep) 1447 { 1448 int partition; 1449 nxge_classify_t *classify_ptr; 1450 1451 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1452 RW_DESTROY(&nxgep->filter_lock); 1453 MUTEX_DESTROY(&nxgep->group_lock); 1454 MUTEX_DESTROY(&nxgep->mif_lock); 1455 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1456 MUTEX_DESTROY(nxgep->genlock); 1457 1458 classify_ptr = &nxgep->classifier; 1459 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1460 1461 /* Destroy all polling resources. */ 1462 MUTEX_DESTROY(&nxgep->poll_lock); 1463 cv_destroy(&nxgep->poll_cv); 1464 1465 /* free data structures, based on HW type */ 1466 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1467 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1468 for (partition = 0; partition < MAX_PARTITION; partition++) { 1469 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1470 } 1471 } 1472 1473 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1474 } 1475 1476 nxge_status_t 1477 nxge_init(p_nxge_t nxgep) 1478 { 1479 nxge_status_t status = NXGE_OK; 1480 1481 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1482 1483 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1484 return (status); 1485 } 1486 1487 /* 1488 * Allocate system memory for the receive/transmit buffer blocks 1489 * and receive/transmit descriptor rings. 1490 */ 1491 status = nxge_alloc_mem_pool(nxgep); 1492 if (status != NXGE_OK) { 1493 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1494 goto nxge_init_fail1; 1495 } 1496 1497 if (!isLDOMguest(nxgep)) { 1498 /* 1499 * Initialize and enable the TXC registers. 1500 * (Globally enable the Tx controller, 1501 * enable the port, configure the dma channel bitmap, 1502 * configure the max burst size). 1503 */ 1504 status = nxge_txc_init(nxgep); 1505 if (status != NXGE_OK) { 1506 NXGE_ERROR_MSG((nxgep, 1507 NXGE_ERR_CTL, "init txc failed\n")); 1508 goto nxge_init_fail2; 1509 } 1510 } 1511 1512 /* 1513 * Initialize and enable TXDMA channels. 1514 */ 1515 status = nxge_init_txdma_channels(nxgep); 1516 if (status != NXGE_OK) { 1517 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1518 goto nxge_init_fail3; 1519 } 1520 1521 /* 1522 * Initialize and enable RXDMA channels. 1523 */ 1524 status = nxge_init_rxdma_channels(nxgep); 1525 if (status != NXGE_OK) { 1526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1527 goto nxge_init_fail4; 1528 } 1529 1530 /* 1531 * The guest domain is now done. 1532 */ 1533 if (isLDOMguest(nxgep)) { 1534 nxgep->drv_state |= STATE_HW_INITIALIZED; 1535 goto nxge_init_exit; 1536 } 1537 1538 /* 1539 * Initialize TCAM and FCRAM (Neptune). 1540 */ 1541 status = nxge_classify_init(nxgep); 1542 if (status != NXGE_OK) { 1543 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1544 goto nxge_init_fail5; 1545 } 1546 1547 /* 1548 * Initialize ZCP 1549 */ 1550 status = nxge_zcp_init(nxgep); 1551 if (status != NXGE_OK) { 1552 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1553 goto nxge_init_fail5; 1554 } 1555 1556 /* 1557 * Initialize IPP. 1558 */ 1559 status = nxge_ipp_init(nxgep); 1560 if (status != NXGE_OK) { 1561 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1562 goto nxge_init_fail5; 1563 } 1564 1565 /* 1566 * Initialize the MAC block. 1567 */ 1568 status = nxge_mac_init(nxgep); 1569 if (status != NXGE_OK) { 1570 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1571 goto nxge_init_fail5; 1572 } 1573 1574 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1575 1576 /* 1577 * Enable hardware interrupts. 1578 */ 1579 nxge_intr_hw_enable(nxgep); 1580 nxgep->drv_state |= STATE_HW_INITIALIZED; 1581 1582 goto nxge_init_exit; 1583 1584 nxge_init_fail5: 1585 nxge_uninit_rxdma_channels(nxgep); 1586 nxge_init_fail4: 1587 nxge_uninit_txdma_channels(nxgep); 1588 nxge_init_fail3: 1589 if (!isLDOMguest(nxgep)) { 1590 (void) nxge_txc_uninit(nxgep); 1591 } 1592 nxge_init_fail2: 1593 nxge_free_mem_pool(nxgep); 1594 nxge_init_fail1: 1595 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1596 "<== nxge_init status (failed) = 0x%08x", status)); 1597 return (status); 1598 1599 nxge_init_exit: 1600 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1601 status)); 1602 return (status); 1603 } 1604 1605 1606 timeout_id_t 1607 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1608 { 1609 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1610 return (timeout(func, (caddr_t)nxgep, 1611 drv_usectohz(1000 * msec))); 1612 } 1613 return (NULL); 1614 } 1615 1616 /*ARGSUSED*/ 1617 void 1618 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1619 { 1620 if (timerid) { 1621 (void) untimeout(timerid); 1622 } 1623 } 1624 1625 void 1626 nxge_uninit(p_nxge_t nxgep) 1627 { 1628 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1629 1630 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1631 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1632 "==> nxge_uninit: not initialized")); 1633 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1634 "<== nxge_uninit")); 1635 return; 1636 } 1637 1638 /* stop timer */ 1639 if (nxgep->nxge_timerid) { 1640 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1641 nxgep->nxge_timerid = 0; 1642 } 1643 1644 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1645 (void) nxge_intr_hw_disable(nxgep); 1646 1647 /* 1648 * Reset the receive MAC side. 1649 */ 1650 (void) nxge_rx_mac_disable(nxgep); 1651 1652 /* Disable and soft reset the IPP */ 1653 if (!isLDOMguest(nxgep)) 1654 (void) nxge_ipp_disable(nxgep); 1655 1656 /* Free classification resources */ 1657 (void) nxge_classify_uninit(nxgep); 1658 1659 /* 1660 * Reset the transmit/receive DMA side. 1661 */ 1662 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1663 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1664 1665 nxge_uninit_txdma_channels(nxgep); 1666 nxge_uninit_rxdma_channels(nxgep); 1667 1668 /* 1669 * Reset the transmit MAC side. 1670 */ 1671 (void) nxge_tx_mac_disable(nxgep); 1672 1673 nxge_free_mem_pool(nxgep); 1674 1675 /* 1676 * Start the timer if the reset flag is not set. 1677 * If this reset flag is set, the link monitor 1678 * will not be started in order to stop furthur bus 1679 * activities coming from this interface. 1680 * The driver will start the monitor function 1681 * if the interface was initialized again later. 1682 */ 1683 if (!nxge_peu_reset_enable) { 1684 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1685 } 1686 1687 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1688 1689 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1690 "nxge_mblks_pending %d", nxge_mblks_pending)); 1691 } 1692 1693 void 1694 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1695 { 1696 #if defined(__i386) 1697 size_t reg; 1698 #else 1699 uint64_t reg; 1700 #endif 1701 uint64_t regdata; 1702 int i, retry; 1703 1704 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1705 regdata = 0; 1706 retry = 1; 1707 1708 for (i = 0; i < retry; i++) { 1709 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1710 } 1711 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1712 } 1713 1714 void 1715 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1716 { 1717 #if defined(__i386) 1718 size_t reg; 1719 #else 1720 uint64_t reg; 1721 #endif 1722 uint64_t buf[2]; 1723 1724 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1725 #if defined(__i386) 1726 reg = (size_t)buf[0]; 1727 #else 1728 reg = buf[0]; 1729 #endif 1730 1731 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1732 } 1733 1734 1735 nxge_os_mutex_t nxgedebuglock; 1736 int nxge_debug_init = 0; 1737 1738 /*ARGSUSED*/ 1739 /*VARARGS*/ 1740 void 1741 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1742 { 1743 char msg_buffer[1048]; 1744 char prefix_buffer[32]; 1745 int instance; 1746 uint64_t debug_level; 1747 int cmn_level = CE_CONT; 1748 va_list ap; 1749 1750 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1751 /* In case a developer has changed nxge_debug_level. */ 1752 if (nxgep->nxge_debug_level != nxge_debug_level) 1753 nxgep->nxge_debug_level = nxge_debug_level; 1754 } 1755 1756 debug_level = (nxgep == NULL) ? nxge_debug_level : 1757 nxgep->nxge_debug_level; 1758 1759 if ((level & debug_level) || 1760 (level == NXGE_NOTE) || 1761 (level == NXGE_ERR_CTL)) { 1762 /* do the msg processing */ 1763 if (nxge_debug_init == 0) { 1764 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1765 nxge_debug_init = 1; 1766 } 1767 1768 MUTEX_ENTER(&nxgedebuglock); 1769 1770 if ((level & NXGE_NOTE)) { 1771 cmn_level = CE_NOTE; 1772 } 1773 1774 if (level & NXGE_ERR_CTL) { 1775 cmn_level = CE_WARN; 1776 } 1777 1778 va_start(ap, fmt); 1779 (void) vsprintf(msg_buffer, fmt, ap); 1780 va_end(ap); 1781 if (nxgep == NULL) { 1782 instance = -1; 1783 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1784 } else { 1785 instance = nxgep->instance; 1786 (void) sprintf(prefix_buffer, 1787 "%s%d :", "nxge", instance); 1788 } 1789 1790 MUTEX_EXIT(&nxgedebuglock); 1791 cmn_err(cmn_level, "!%s %s\n", 1792 prefix_buffer, msg_buffer); 1793 1794 } 1795 } 1796 1797 char * 1798 nxge_dump_packet(char *addr, int size) 1799 { 1800 uchar_t *ap = (uchar_t *)addr; 1801 int i; 1802 static char etherbuf[1024]; 1803 char *cp = etherbuf; 1804 char digits[] = "0123456789abcdef"; 1805 1806 if (!size) 1807 size = 60; 1808 1809 if (size > MAX_DUMP_SZ) { 1810 /* Dump the leading bytes */ 1811 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1812 if (*ap > 0x0f) 1813 *cp++ = digits[*ap >> 4]; 1814 *cp++ = digits[*ap++ & 0xf]; 1815 *cp++ = ':'; 1816 } 1817 for (i = 0; i < 20; i++) 1818 *cp++ = '.'; 1819 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1820 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1821 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1822 if (*ap > 0x0f) 1823 *cp++ = digits[*ap >> 4]; 1824 *cp++ = digits[*ap++ & 0xf]; 1825 *cp++ = ':'; 1826 } 1827 } else { 1828 for (i = 0; i < size; i++) { 1829 if (*ap > 0x0f) 1830 *cp++ = digits[*ap >> 4]; 1831 *cp++ = digits[*ap++ & 0xf]; 1832 *cp++ = ':'; 1833 } 1834 } 1835 *--cp = 0; 1836 return (etherbuf); 1837 } 1838 1839 #ifdef NXGE_DEBUG 1840 static void 1841 nxge_test_map_regs(p_nxge_t nxgep) 1842 { 1843 ddi_acc_handle_t cfg_handle; 1844 p_pci_cfg_t cfg_ptr; 1845 ddi_acc_handle_t dev_handle; 1846 char *dev_ptr; 1847 ddi_acc_handle_t pci_config_handle; 1848 uint32_t regval; 1849 int i; 1850 1851 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1852 1853 dev_handle = nxgep->dev_regs->nxge_regh; 1854 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1855 1856 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1857 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1858 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1859 1860 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1861 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1862 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1863 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1864 &cfg_ptr->vendorid)); 1865 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1866 "\tvendorid 0x%x devid 0x%x", 1867 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1868 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1869 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1870 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1871 "bar1c 0x%x", 1872 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1873 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1874 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1875 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1876 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1877 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1878 "base 28 0x%x bar2c 0x%x\n", 1879 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1880 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1881 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1882 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1883 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1884 "\nNeptune PCI BAR: base30 0x%x\n", 1885 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1886 1887 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1888 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1889 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1890 "first 0x%llx second 0x%llx third 0x%llx " 1891 "last 0x%llx ", 1892 NXGE_PIO_READ64(dev_handle, 1893 (uint64_t *)(dev_ptr + 0), 0), 1894 NXGE_PIO_READ64(dev_handle, 1895 (uint64_t *)(dev_ptr + 8), 0), 1896 NXGE_PIO_READ64(dev_handle, 1897 (uint64_t *)(dev_ptr + 16), 0), 1898 NXGE_PIO_READ64(cfg_handle, 1899 (uint64_t *)(dev_ptr + 24), 0))); 1900 } 1901 } 1902 1903 #endif 1904 1905 static void 1906 nxge_suspend(p_nxge_t nxgep) 1907 { 1908 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1909 1910 nxge_intrs_disable(nxgep); 1911 nxge_destroy_dev(nxgep); 1912 1913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1914 } 1915 1916 static nxge_status_t 1917 nxge_resume(p_nxge_t nxgep) 1918 { 1919 nxge_status_t status = NXGE_OK; 1920 1921 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1922 1923 nxgep->suspended = DDI_RESUME; 1924 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1925 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1926 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1927 (void) nxge_rx_mac_enable(nxgep); 1928 (void) nxge_tx_mac_enable(nxgep); 1929 nxge_intrs_enable(nxgep); 1930 nxgep->suspended = 0; 1931 1932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1933 "<== nxge_resume status = 0x%x", status)); 1934 return (status); 1935 } 1936 1937 static nxge_status_t 1938 nxge_setup_dev(p_nxge_t nxgep) 1939 { 1940 nxge_status_t status = NXGE_OK; 1941 1942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1943 nxgep->mac.portnum)); 1944 1945 status = nxge_link_init(nxgep); 1946 1947 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1949 "port%d Bad register acc handle", nxgep->mac.portnum)); 1950 status = NXGE_ERROR; 1951 } 1952 1953 if (status != NXGE_OK) { 1954 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1955 " nxge_setup_dev status " 1956 "(xcvr init 0x%08x)", status)); 1957 goto nxge_setup_dev_exit; 1958 } 1959 1960 nxge_setup_dev_exit: 1961 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1962 "<== nxge_setup_dev port %d status = 0x%08x", 1963 nxgep->mac.portnum, status)); 1964 1965 return (status); 1966 } 1967 1968 static void 1969 nxge_destroy_dev(p_nxge_t nxgep) 1970 { 1971 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1972 1973 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1974 1975 (void) nxge_hw_stop(nxgep); 1976 1977 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1978 } 1979 1980 static nxge_status_t 1981 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1982 { 1983 int ddi_status = DDI_SUCCESS; 1984 uint_t count; 1985 ddi_dma_cookie_t cookie; 1986 uint_t iommu_pagesize; 1987 nxge_status_t status = NXGE_OK; 1988 1989 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1990 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1991 if (nxgep->niu_type != N2_NIU) { 1992 iommu_pagesize = dvma_pagesize(nxgep->dip); 1993 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1994 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1995 " default_block_size %d iommu_pagesize %d", 1996 nxgep->sys_page_sz, 1997 ddi_ptob(nxgep->dip, (ulong_t)1), 1998 nxgep->rx_default_block_size, 1999 iommu_pagesize)); 2000 2001 if (iommu_pagesize != 0) { 2002 if (nxgep->sys_page_sz == iommu_pagesize) { 2003 if (iommu_pagesize > 0x4000) 2004 nxgep->sys_page_sz = 0x4000; 2005 } else { 2006 if (nxgep->sys_page_sz > iommu_pagesize) 2007 nxgep->sys_page_sz = iommu_pagesize; 2008 } 2009 } 2010 } 2011 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2012 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2013 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2014 "default_block_size %d page mask %d", 2015 nxgep->sys_page_sz, 2016 ddi_ptob(nxgep->dip, (ulong_t)1), 2017 nxgep->rx_default_block_size, 2018 nxgep->sys_page_mask)); 2019 2020 2021 switch (nxgep->sys_page_sz) { 2022 default: 2023 nxgep->sys_page_sz = 0x1000; 2024 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2025 nxgep->rx_default_block_size = 0x1000; 2026 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2027 break; 2028 case 0x1000: 2029 nxgep->rx_default_block_size = 0x1000; 2030 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2031 break; 2032 case 0x2000: 2033 nxgep->rx_default_block_size = 0x2000; 2034 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2035 break; 2036 case 0x4000: 2037 nxgep->rx_default_block_size = 0x4000; 2038 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2039 break; 2040 case 0x8000: 2041 nxgep->rx_default_block_size = 0x8000; 2042 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2043 break; 2044 } 2045 2046 #ifndef USE_RX_BIG_BUF 2047 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2048 #else 2049 nxgep->rx_default_block_size = 0x2000; 2050 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2051 #endif 2052 /* 2053 * Get the system DMA burst size. 2054 */ 2055 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2056 DDI_DMA_DONTWAIT, 0, 2057 &nxgep->dmasparehandle); 2058 if (ddi_status != DDI_SUCCESS) { 2059 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2060 "ddi_dma_alloc_handle: failed " 2061 " status 0x%x", ddi_status)); 2062 goto nxge_get_soft_properties_exit; 2063 } 2064 2065 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2066 (caddr_t)nxgep->dmasparehandle, 2067 sizeof (nxgep->dmasparehandle), 2068 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2069 DDI_DMA_DONTWAIT, 0, 2070 &cookie, &count); 2071 if (ddi_status != DDI_DMA_MAPPED) { 2072 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2073 "Binding spare handle to find system" 2074 " burstsize failed.")); 2075 ddi_status = DDI_FAILURE; 2076 goto nxge_get_soft_properties_fail1; 2077 } 2078 2079 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2080 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2081 2082 nxge_get_soft_properties_fail1: 2083 ddi_dma_free_handle(&nxgep->dmasparehandle); 2084 2085 nxge_get_soft_properties_exit: 2086 2087 if (ddi_status != DDI_SUCCESS) 2088 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2089 2090 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2091 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2092 return (status); 2093 } 2094 2095 static nxge_status_t 2096 nxge_alloc_mem_pool(p_nxge_t nxgep) 2097 { 2098 nxge_status_t status = NXGE_OK; 2099 2100 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2101 2102 status = nxge_alloc_rx_mem_pool(nxgep); 2103 if (status != NXGE_OK) { 2104 return (NXGE_ERROR); 2105 } 2106 2107 status = nxge_alloc_tx_mem_pool(nxgep); 2108 if (status != NXGE_OK) { 2109 nxge_free_rx_mem_pool(nxgep); 2110 return (NXGE_ERROR); 2111 } 2112 2113 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2114 return (NXGE_OK); 2115 } 2116 2117 static void 2118 nxge_free_mem_pool(p_nxge_t nxgep) 2119 { 2120 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2121 2122 nxge_free_rx_mem_pool(nxgep); 2123 nxge_free_tx_mem_pool(nxgep); 2124 2125 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2126 } 2127 2128 nxge_status_t 2129 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2130 { 2131 uint32_t rdc_max; 2132 p_nxge_dma_pt_cfg_t p_all_cfgp; 2133 p_nxge_hw_pt_cfg_t p_cfgp; 2134 p_nxge_dma_pool_t dma_poolp; 2135 p_nxge_dma_common_t *dma_buf_p; 2136 p_nxge_dma_pool_t dma_cntl_poolp; 2137 p_nxge_dma_common_t *dma_cntl_p; 2138 uint32_t *num_chunks; /* per dma */ 2139 nxge_status_t status = NXGE_OK; 2140 2141 uint32_t nxge_port_rbr_size; 2142 uint32_t nxge_port_rbr_spare_size; 2143 uint32_t nxge_port_rcr_size; 2144 uint32_t rx_cntl_alloc_size; 2145 2146 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2147 2148 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2149 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2150 rdc_max = NXGE_MAX_RDCS; 2151 2152 /* 2153 * Allocate memory for the common DMA data structures. 2154 */ 2155 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2156 KM_SLEEP); 2157 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2158 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2159 2160 dma_cntl_poolp = (p_nxge_dma_pool_t) 2161 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2162 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2163 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2164 2165 num_chunks = (uint32_t *)KMEM_ZALLOC( 2166 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2167 2168 /* 2169 * Assume that each DMA channel will be configured with 2170 * the default block size. 2171 * rbr block counts are modulo the batch count (16). 2172 */ 2173 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2174 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2175 2176 if (!nxge_port_rbr_size) { 2177 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2178 } 2179 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2180 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2181 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2182 } 2183 2184 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2185 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2186 2187 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2188 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2189 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2190 } 2191 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2192 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2193 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2194 "set to default %d", 2195 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2196 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2197 } 2198 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2199 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2200 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2201 "set to default %d", 2202 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2203 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2204 } 2205 2206 /* 2207 * N2/NIU has limitation on the descriptor sizes (contiguous 2208 * memory allocation on data buffers to 4M (contig_mem_alloc) 2209 * and little endian for control buffers (must use the ddi/dki mem alloc 2210 * function). 2211 */ 2212 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2213 if (nxgep->niu_type == N2_NIU) { 2214 nxge_port_rbr_spare_size = 0; 2215 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2216 (!ISP2(nxge_port_rbr_size))) { 2217 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2218 } 2219 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2220 (!ISP2(nxge_port_rcr_size))) { 2221 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2222 } 2223 } 2224 #endif 2225 2226 /* 2227 * Addresses of receive block ring, receive completion ring and the 2228 * mailbox must be all cache-aligned (64 bytes). 2229 */ 2230 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2231 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2232 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2233 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2234 2235 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2236 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2237 "nxge_port_rcr_size = %d " 2238 "rx_cntl_alloc_size = %d", 2239 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2240 nxge_port_rcr_size, 2241 rx_cntl_alloc_size)); 2242 2243 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2244 if (nxgep->niu_type == N2_NIU) { 2245 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2246 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2247 2248 if (!ISP2(rx_buf_alloc_size)) { 2249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2250 "==> nxge_alloc_rx_mem_pool: " 2251 " must be power of 2")); 2252 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2253 goto nxge_alloc_rx_mem_pool_exit; 2254 } 2255 2256 if (rx_buf_alloc_size > (1 << 22)) { 2257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2258 "==> nxge_alloc_rx_mem_pool: " 2259 " limit size to 4M")); 2260 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2261 goto nxge_alloc_rx_mem_pool_exit; 2262 } 2263 2264 if (rx_cntl_alloc_size < 0x2000) { 2265 rx_cntl_alloc_size = 0x2000; 2266 } 2267 } 2268 #endif 2269 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2270 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2271 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2272 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2273 2274 dma_poolp->ndmas = p_cfgp->max_rdcs; 2275 dma_poolp->num_chunks = num_chunks; 2276 dma_poolp->buf_allocated = B_TRUE; 2277 nxgep->rx_buf_pool_p = dma_poolp; 2278 dma_poolp->dma_buf_pool_p = dma_buf_p; 2279 2280 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2281 dma_cntl_poolp->buf_allocated = B_TRUE; 2282 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2283 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2284 2285 /* Allocate the receive rings, too. */ 2286 nxgep->rx_rbr_rings = 2287 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2288 nxgep->rx_rbr_rings->rbr_rings = 2289 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2290 nxgep->rx_rcr_rings = 2291 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2292 nxgep->rx_rcr_rings->rcr_rings = 2293 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2294 nxgep->rx_mbox_areas_p = 2295 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2296 nxgep->rx_mbox_areas_p->rxmbox_areas = 2297 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2298 2299 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2300 p_cfgp->max_rdcs; 2301 2302 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2303 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2304 2305 nxge_alloc_rx_mem_pool_exit: 2306 return (status); 2307 } 2308 2309 /* 2310 * nxge_alloc_rxb 2311 * 2312 * Allocate buffers for an RDC. 2313 * 2314 * Arguments: 2315 * nxgep 2316 * channel The channel to map into our kernel space. 2317 * 2318 * Notes: 2319 * 2320 * NPI function calls: 2321 * 2322 * NXGE function calls: 2323 * 2324 * Registers accessed: 2325 * 2326 * Context: 2327 * 2328 * Taking apart: 2329 * 2330 * Open questions: 2331 * 2332 */ 2333 nxge_status_t 2334 nxge_alloc_rxb( 2335 p_nxge_t nxgep, 2336 int channel) 2337 { 2338 size_t rx_buf_alloc_size; 2339 nxge_status_t status = NXGE_OK; 2340 2341 nxge_dma_common_t **data; 2342 nxge_dma_common_t **control; 2343 uint32_t *num_chunks; 2344 2345 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2346 2347 /* 2348 * Allocate memory for the receive buffers and descriptor rings. 2349 * Replace these allocation functions with the interface functions 2350 * provided by the partition manager if/when they are available. 2351 */ 2352 2353 /* 2354 * Allocate memory for the receive buffer blocks. 2355 */ 2356 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2357 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2358 2359 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2360 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2361 2362 if ((status = nxge_alloc_rx_buf_dma( 2363 nxgep, channel, data, rx_buf_alloc_size, 2364 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2365 return (status); 2366 } 2367 2368 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2369 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2370 2371 /* 2372 * Allocate memory for descriptor rings and mailbox. 2373 */ 2374 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2375 2376 if ((status = nxge_alloc_rx_cntl_dma( 2377 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2378 != NXGE_OK) { 2379 nxge_free_rx_cntl_dma(nxgep, *control); 2380 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2381 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2382 return (status); 2383 } 2384 2385 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2386 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2387 2388 return (status); 2389 } 2390 2391 void 2392 nxge_free_rxb( 2393 p_nxge_t nxgep, 2394 int channel) 2395 { 2396 nxge_dma_common_t *data; 2397 nxge_dma_common_t *control; 2398 uint32_t num_chunks; 2399 2400 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2401 2402 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2403 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2404 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2405 2406 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2407 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2408 2409 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2410 nxge_free_rx_cntl_dma(nxgep, control); 2411 2412 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2413 2414 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2415 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2416 2417 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2418 } 2419 2420 static void 2421 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2422 { 2423 int rdc_max = NXGE_MAX_RDCS; 2424 2425 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2426 2427 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2428 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2429 "<== nxge_free_rx_mem_pool " 2430 "(null rx buf pool or buf not allocated")); 2431 return; 2432 } 2433 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2434 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2435 "<== nxge_free_rx_mem_pool " 2436 "(null rx cntl buf pool or cntl buf not allocated")); 2437 return; 2438 } 2439 2440 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2441 sizeof (p_nxge_dma_common_t) * rdc_max); 2442 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2443 2444 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2445 sizeof (uint32_t) * rdc_max); 2446 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2447 sizeof (p_nxge_dma_common_t) * rdc_max); 2448 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2449 2450 nxgep->rx_buf_pool_p = 0; 2451 nxgep->rx_cntl_pool_p = 0; 2452 2453 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2454 sizeof (p_rx_rbr_ring_t) * rdc_max); 2455 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2456 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2457 sizeof (p_rx_rcr_ring_t) * rdc_max); 2458 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2459 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2460 sizeof (p_rx_mbox_t) * rdc_max); 2461 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2462 2463 nxgep->rx_rbr_rings = 0; 2464 nxgep->rx_rcr_rings = 0; 2465 nxgep->rx_mbox_areas_p = 0; 2466 2467 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2468 } 2469 2470 2471 static nxge_status_t 2472 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2473 p_nxge_dma_common_t *dmap, 2474 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2475 { 2476 p_nxge_dma_common_t rx_dmap; 2477 nxge_status_t status = NXGE_OK; 2478 size_t total_alloc_size; 2479 size_t allocated = 0; 2480 int i, size_index, array_size; 2481 boolean_t use_kmem_alloc = B_FALSE; 2482 2483 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2484 2485 rx_dmap = (p_nxge_dma_common_t) 2486 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2487 KM_SLEEP); 2488 2489 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2490 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2491 dma_channel, alloc_size, block_size, dmap)); 2492 2493 total_alloc_size = alloc_size; 2494 2495 #if defined(RX_USE_RECLAIM_POST) 2496 total_alloc_size = alloc_size + alloc_size/4; 2497 #endif 2498 2499 i = 0; 2500 size_index = 0; 2501 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2502 while ((alloc_sizes[size_index] < alloc_size) && 2503 (size_index < array_size)) 2504 size_index++; 2505 if (size_index >= array_size) { 2506 size_index = array_size - 1; 2507 } 2508 2509 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2510 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2511 use_kmem_alloc = B_TRUE; 2512 #if defined(__i386) || defined(__amd64) 2513 size_index = 0; 2514 #endif 2515 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2516 "==> nxge_alloc_rx_buf_dma: " 2517 "Neptune use kmem_alloc() - size_index %d", 2518 size_index)); 2519 } 2520 2521 while ((allocated < total_alloc_size) && 2522 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2523 rx_dmap[i].dma_chunk_index = i; 2524 rx_dmap[i].block_size = block_size; 2525 rx_dmap[i].alength = alloc_sizes[size_index]; 2526 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2527 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2528 rx_dmap[i].dma_channel = dma_channel; 2529 rx_dmap[i].contig_alloc_type = B_FALSE; 2530 rx_dmap[i].kmem_alloc_type = B_FALSE; 2531 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2532 2533 /* 2534 * N2/NIU: data buffers must be contiguous as the driver 2535 * needs to call Hypervisor api to set up 2536 * logical pages. 2537 */ 2538 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2539 rx_dmap[i].contig_alloc_type = B_TRUE; 2540 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2541 } else if (use_kmem_alloc) { 2542 /* For Neptune, use kmem_alloc */ 2543 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2544 "==> nxge_alloc_rx_buf_dma: " 2545 "Neptune use kmem_alloc()")); 2546 rx_dmap[i].kmem_alloc_type = B_TRUE; 2547 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2548 } 2549 2550 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2551 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2552 "i %d nblocks %d alength %d", 2553 dma_channel, i, &rx_dmap[i], block_size, 2554 i, rx_dmap[i].nblocks, 2555 rx_dmap[i].alength)); 2556 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2557 &nxge_rx_dma_attr, 2558 rx_dmap[i].alength, 2559 &nxge_dev_buf_dma_acc_attr, 2560 DDI_DMA_READ | DDI_DMA_STREAMING, 2561 (p_nxge_dma_common_t)(&rx_dmap[i])); 2562 if (status != NXGE_OK) { 2563 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2564 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2565 "dma %d size_index %d size requested %d", 2566 dma_channel, 2567 size_index, 2568 rx_dmap[i].alength)); 2569 size_index--; 2570 } else { 2571 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2572 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2573 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2574 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2575 "buf_alloc_state %d alloc_type %d", 2576 dma_channel, 2577 &rx_dmap[i], 2578 rx_dmap[i].kaddrp, 2579 rx_dmap[i].alength, 2580 rx_dmap[i].buf_alloc_state, 2581 rx_dmap[i].buf_alloc_type)); 2582 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2583 " alloc_rx_buf_dma allocated rdc %d " 2584 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2585 dma_channel, i, rx_dmap[i].alength, 2586 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2587 rx_dmap[i].kaddrp)); 2588 i++; 2589 allocated += alloc_sizes[size_index]; 2590 } 2591 } 2592 2593 if (allocated < total_alloc_size) { 2594 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2595 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2596 "allocated 0x%x requested 0x%x", 2597 dma_channel, 2598 allocated, total_alloc_size)); 2599 status = NXGE_ERROR; 2600 goto nxge_alloc_rx_mem_fail1; 2601 } 2602 2603 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2604 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2605 "allocated 0x%x requested 0x%x", 2606 dma_channel, 2607 allocated, total_alloc_size)); 2608 2609 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2610 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2611 dma_channel, i)); 2612 *num_chunks = i; 2613 *dmap = rx_dmap; 2614 2615 goto nxge_alloc_rx_mem_exit; 2616 2617 nxge_alloc_rx_mem_fail1: 2618 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2619 2620 nxge_alloc_rx_mem_exit: 2621 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2622 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2623 2624 return (status); 2625 } 2626 2627 /*ARGSUSED*/ 2628 static void 2629 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2630 uint32_t num_chunks) 2631 { 2632 int i; 2633 2634 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2635 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2636 2637 if (dmap == 0) 2638 return; 2639 2640 for (i = 0; i < num_chunks; i++) { 2641 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2642 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2643 i, dmap)); 2644 nxge_dma_free_rx_data_buf(dmap++); 2645 } 2646 2647 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2648 } 2649 2650 /*ARGSUSED*/ 2651 static nxge_status_t 2652 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2653 p_nxge_dma_common_t *dmap, size_t size) 2654 { 2655 p_nxge_dma_common_t rx_dmap; 2656 nxge_status_t status = NXGE_OK; 2657 2658 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2659 2660 rx_dmap = (p_nxge_dma_common_t) 2661 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2662 2663 rx_dmap->contig_alloc_type = B_FALSE; 2664 rx_dmap->kmem_alloc_type = B_FALSE; 2665 2666 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2667 &nxge_desc_dma_attr, 2668 size, 2669 &nxge_dev_desc_dma_acc_attr, 2670 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2671 rx_dmap); 2672 if (status != NXGE_OK) { 2673 goto nxge_alloc_rx_cntl_dma_fail1; 2674 } 2675 2676 *dmap = rx_dmap; 2677 goto nxge_alloc_rx_cntl_dma_exit; 2678 2679 nxge_alloc_rx_cntl_dma_fail1: 2680 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2681 2682 nxge_alloc_rx_cntl_dma_exit: 2683 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2684 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2685 2686 return (status); 2687 } 2688 2689 /*ARGSUSED*/ 2690 static void 2691 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2692 { 2693 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2694 2695 if (dmap == 0) 2696 return; 2697 2698 nxge_dma_mem_free(dmap); 2699 2700 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2701 } 2702 2703 typedef struct { 2704 size_t tx_size; 2705 size_t cr_size; 2706 size_t threshhold; 2707 } nxge_tdc_sizes_t; 2708 2709 static 2710 nxge_status_t 2711 nxge_tdc_sizes( 2712 nxge_t *nxgep, 2713 nxge_tdc_sizes_t *sizes) 2714 { 2715 uint32_t threshhold; /* The bcopy() threshhold */ 2716 size_t tx_size; /* Transmit buffer size */ 2717 size_t cr_size; /* Completion ring size */ 2718 2719 /* 2720 * Assume that each DMA channel will be configured with the 2721 * default transmit buffer size for copying transmit data. 2722 * (If a packet is bigger than this, it will not be copied.) 2723 */ 2724 if (nxgep->niu_type == N2_NIU) { 2725 threshhold = TX_BCOPY_SIZE; 2726 } else { 2727 threshhold = nxge_bcopy_thresh; 2728 } 2729 tx_size = nxge_tx_ring_size * threshhold; 2730 2731 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2732 cr_size += sizeof (txdma_mailbox_t); 2733 2734 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2735 if (nxgep->niu_type == N2_NIU) { 2736 if (!ISP2(tx_size)) { 2737 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2738 "==> nxge_tdc_sizes: Tx size" 2739 " must be power of 2")); 2740 return (NXGE_ERROR); 2741 } 2742 2743 if (tx_size > (1 << 22)) { 2744 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2745 "==> nxge_tdc_sizes: Tx size" 2746 " limited to 4M")); 2747 return (NXGE_ERROR); 2748 } 2749 2750 if (cr_size < 0x2000) 2751 cr_size = 0x2000; 2752 } 2753 #endif 2754 2755 sizes->threshhold = threshhold; 2756 sizes->tx_size = tx_size; 2757 sizes->cr_size = cr_size; 2758 2759 return (NXGE_OK); 2760 } 2761 /* 2762 * nxge_alloc_txb 2763 * 2764 * Allocate buffers for an TDC. 2765 * 2766 * Arguments: 2767 * nxgep 2768 * channel The channel to map into our kernel space. 2769 * 2770 * Notes: 2771 * 2772 * NPI function calls: 2773 * 2774 * NXGE function calls: 2775 * 2776 * Registers accessed: 2777 * 2778 * Context: 2779 * 2780 * Taking apart: 2781 * 2782 * Open questions: 2783 * 2784 */ 2785 nxge_status_t 2786 nxge_alloc_txb( 2787 p_nxge_t nxgep, 2788 int channel) 2789 { 2790 nxge_dma_common_t **dma_buf_p; 2791 nxge_dma_common_t **dma_cntl_p; 2792 uint32_t *num_chunks; 2793 nxge_status_t status = NXGE_OK; 2794 2795 nxge_tdc_sizes_t sizes; 2796 2797 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2798 2799 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2800 return (NXGE_ERROR); 2801 2802 /* 2803 * Allocate memory for transmit buffers and descriptor rings. 2804 * Replace these allocation functions with the interface functions 2805 * provided by the partition manager Real Soon Now. 2806 */ 2807 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2808 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2809 2810 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2811 2812 /* 2813 * Allocate memory for transmit buffers and descriptor rings. 2814 * Replace allocation functions with interface functions provided 2815 * by the partition manager when it is available. 2816 * 2817 * Allocate memory for the transmit buffer pool. 2818 */ 2819 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2820 "sizes: tx: %ld, cr:%ld, th:%ld", 2821 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2822 2823 *num_chunks = 0; 2824 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2825 sizes.tx_size, sizes.threshhold, num_chunks); 2826 if (status != NXGE_OK) { 2827 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2828 return (status); 2829 } 2830 2831 /* 2832 * Allocate memory for descriptor rings and mailbox. 2833 */ 2834 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2835 sizes.cr_size); 2836 if (status != NXGE_OK) { 2837 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2838 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2839 return (status); 2840 } 2841 2842 return (NXGE_OK); 2843 } 2844 2845 void 2846 nxge_free_txb( 2847 p_nxge_t nxgep, 2848 int channel) 2849 { 2850 nxge_dma_common_t *data; 2851 nxge_dma_common_t *control; 2852 uint32_t num_chunks; 2853 2854 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2855 2856 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2857 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2858 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2859 2860 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2861 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2862 2863 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2864 nxge_free_tx_cntl_dma(nxgep, control); 2865 2866 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2867 2868 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2869 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2870 2871 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2872 } 2873 2874 /* 2875 * nxge_alloc_tx_mem_pool 2876 * 2877 * This function allocates all of the per-port TDC control data structures. 2878 * The per-channel (TDC) data structures are allocated when needed. 2879 * 2880 * Arguments: 2881 * nxgep 2882 * 2883 * Notes: 2884 * 2885 * Context: 2886 * Any domain 2887 */ 2888 nxge_status_t 2889 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2890 { 2891 nxge_hw_pt_cfg_t *p_cfgp; 2892 nxge_dma_pool_t *dma_poolp; 2893 nxge_dma_common_t **dma_buf_p; 2894 nxge_dma_pool_t *dma_cntl_poolp; 2895 nxge_dma_common_t **dma_cntl_p; 2896 uint32_t *num_chunks; /* per dma */ 2897 int tdc_max; 2898 2899 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2900 2901 p_cfgp = &nxgep->pt_config.hw_config; 2902 tdc_max = NXGE_MAX_TDCS; 2903 2904 /* 2905 * Allocate memory for each transmit DMA channel. 2906 */ 2907 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2908 KM_SLEEP); 2909 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2910 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2911 2912 dma_cntl_poolp = (p_nxge_dma_pool_t) 2913 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2914 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2915 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2916 2917 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2918 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2919 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2920 "set to default %d", 2921 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2922 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2923 } 2924 2925 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2926 /* 2927 * N2/NIU has limitation on the descriptor sizes (contiguous 2928 * memory allocation on data buffers to 4M (contig_mem_alloc) 2929 * and little endian for control buffers (must use the ddi/dki mem alloc 2930 * function). The transmit ring is limited to 8K (includes the 2931 * mailbox). 2932 */ 2933 if (nxgep->niu_type == N2_NIU) { 2934 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2935 (!ISP2(nxge_tx_ring_size))) { 2936 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2937 } 2938 } 2939 #endif 2940 2941 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2942 2943 num_chunks = (uint32_t *)KMEM_ZALLOC( 2944 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2945 2946 dma_poolp->ndmas = p_cfgp->tdc.owned; 2947 dma_poolp->num_chunks = num_chunks; 2948 dma_poolp->dma_buf_pool_p = dma_buf_p; 2949 nxgep->tx_buf_pool_p = dma_poolp; 2950 2951 dma_poolp->buf_allocated = B_TRUE; 2952 2953 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2954 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2955 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2956 2957 dma_cntl_poolp->buf_allocated = B_TRUE; 2958 2959 nxgep->tx_rings = 2960 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 2961 nxgep->tx_rings->rings = 2962 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 2963 nxgep->tx_mbox_areas_p = 2964 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 2965 nxgep->tx_mbox_areas_p->txmbox_areas_p = 2966 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 2967 2968 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 2969 2970 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2971 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 2972 tdc_max, dma_poolp->ndmas)); 2973 2974 return (NXGE_OK); 2975 } 2976 2977 nxge_status_t 2978 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2979 p_nxge_dma_common_t *dmap, size_t alloc_size, 2980 size_t block_size, uint32_t *num_chunks) 2981 { 2982 p_nxge_dma_common_t tx_dmap; 2983 nxge_status_t status = NXGE_OK; 2984 size_t total_alloc_size; 2985 size_t allocated = 0; 2986 int i, size_index, array_size; 2987 2988 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2989 2990 tx_dmap = (p_nxge_dma_common_t) 2991 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2992 KM_SLEEP); 2993 2994 total_alloc_size = alloc_size; 2995 i = 0; 2996 size_index = 0; 2997 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2998 while ((alloc_sizes[size_index] < alloc_size) && 2999 (size_index < array_size)) 3000 size_index++; 3001 if (size_index >= array_size) { 3002 size_index = array_size - 1; 3003 } 3004 3005 while ((allocated < total_alloc_size) && 3006 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3007 3008 tx_dmap[i].dma_chunk_index = i; 3009 tx_dmap[i].block_size = block_size; 3010 tx_dmap[i].alength = alloc_sizes[size_index]; 3011 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3012 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3013 tx_dmap[i].dma_channel = dma_channel; 3014 tx_dmap[i].contig_alloc_type = B_FALSE; 3015 tx_dmap[i].kmem_alloc_type = B_FALSE; 3016 3017 /* 3018 * N2/NIU: data buffers must be contiguous as the driver 3019 * needs to call Hypervisor api to set up 3020 * logical pages. 3021 */ 3022 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3023 tx_dmap[i].contig_alloc_type = B_TRUE; 3024 } 3025 3026 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3027 &nxge_tx_dma_attr, 3028 tx_dmap[i].alength, 3029 &nxge_dev_buf_dma_acc_attr, 3030 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3031 (p_nxge_dma_common_t)(&tx_dmap[i])); 3032 if (status != NXGE_OK) { 3033 size_index--; 3034 } else { 3035 i++; 3036 allocated += alloc_sizes[size_index]; 3037 } 3038 } 3039 3040 if (allocated < total_alloc_size) { 3041 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3042 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3043 "allocated 0x%x requested 0x%x", 3044 dma_channel, 3045 allocated, total_alloc_size)); 3046 status = NXGE_ERROR; 3047 goto nxge_alloc_tx_mem_fail1; 3048 } 3049 3050 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3051 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3052 "allocated 0x%x requested 0x%x", 3053 dma_channel, 3054 allocated, total_alloc_size)); 3055 3056 *num_chunks = i; 3057 *dmap = tx_dmap; 3058 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3059 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3060 *dmap, i)); 3061 goto nxge_alloc_tx_mem_exit; 3062 3063 nxge_alloc_tx_mem_fail1: 3064 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3065 3066 nxge_alloc_tx_mem_exit: 3067 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3068 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3069 3070 return (status); 3071 } 3072 3073 /*ARGSUSED*/ 3074 static void 3075 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3076 uint32_t num_chunks) 3077 { 3078 int i; 3079 3080 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3081 3082 if (dmap == 0) 3083 return; 3084 3085 for (i = 0; i < num_chunks; i++) { 3086 nxge_dma_mem_free(dmap++); 3087 } 3088 3089 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3090 } 3091 3092 /*ARGSUSED*/ 3093 nxge_status_t 3094 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3095 p_nxge_dma_common_t *dmap, size_t size) 3096 { 3097 p_nxge_dma_common_t tx_dmap; 3098 nxge_status_t status = NXGE_OK; 3099 3100 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3101 tx_dmap = (p_nxge_dma_common_t) 3102 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3103 3104 tx_dmap->contig_alloc_type = B_FALSE; 3105 tx_dmap->kmem_alloc_type = B_FALSE; 3106 3107 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3108 &nxge_desc_dma_attr, 3109 size, 3110 &nxge_dev_desc_dma_acc_attr, 3111 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3112 tx_dmap); 3113 if (status != NXGE_OK) { 3114 goto nxge_alloc_tx_cntl_dma_fail1; 3115 } 3116 3117 *dmap = tx_dmap; 3118 goto nxge_alloc_tx_cntl_dma_exit; 3119 3120 nxge_alloc_tx_cntl_dma_fail1: 3121 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3122 3123 nxge_alloc_tx_cntl_dma_exit: 3124 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3125 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3126 3127 return (status); 3128 } 3129 3130 /*ARGSUSED*/ 3131 static void 3132 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3133 { 3134 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3135 3136 if (dmap == 0) 3137 return; 3138 3139 nxge_dma_mem_free(dmap); 3140 3141 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3142 } 3143 3144 /* 3145 * nxge_free_tx_mem_pool 3146 * 3147 * This function frees all of the per-port TDC control data structures. 3148 * The per-channel (TDC) data structures are freed when the channel 3149 * is stopped. 3150 * 3151 * Arguments: 3152 * nxgep 3153 * 3154 * Notes: 3155 * 3156 * Context: 3157 * Any domain 3158 */ 3159 static void 3160 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3161 { 3162 int tdc_max = NXGE_MAX_TDCS; 3163 3164 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3165 3166 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3167 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3168 "<== nxge_free_tx_mem_pool " 3169 "(null tx buf pool or buf not allocated")); 3170 return; 3171 } 3172 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3174 "<== nxge_free_tx_mem_pool " 3175 "(null tx cntl buf pool or cntl buf not allocated")); 3176 return; 3177 } 3178 3179 /* 1. Free the mailboxes. */ 3180 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3181 sizeof (p_tx_mbox_t) * tdc_max); 3182 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3183 3184 nxgep->tx_mbox_areas_p = 0; 3185 3186 /* 2. Free the transmit ring arrays. */ 3187 KMEM_FREE(nxgep->tx_rings->rings, 3188 sizeof (p_tx_ring_t) * tdc_max); 3189 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3190 3191 nxgep->tx_rings = 0; 3192 3193 /* 3. Free the completion ring data structures. */ 3194 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3195 sizeof (p_nxge_dma_common_t) * tdc_max); 3196 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3197 3198 nxgep->tx_cntl_pool_p = 0; 3199 3200 /* 4. Free the data ring data structures. */ 3201 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3202 sizeof (uint32_t) * tdc_max); 3203 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3204 sizeof (p_nxge_dma_common_t) * tdc_max); 3205 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3206 3207 nxgep->tx_buf_pool_p = 0; 3208 3209 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3210 } 3211 3212 /*ARGSUSED*/ 3213 static nxge_status_t 3214 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3215 struct ddi_dma_attr *dma_attrp, 3216 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3217 p_nxge_dma_common_t dma_p) 3218 { 3219 caddr_t kaddrp; 3220 int ddi_status = DDI_SUCCESS; 3221 boolean_t contig_alloc_type; 3222 boolean_t kmem_alloc_type; 3223 3224 contig_alloc_type = dma_p->contig_alloc_type; 3225 3226 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3227 /* 3228 * contig_alloc_type for contiguous memory only allowed 3229 * for N2/NIU. 3230 */ 3231 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3232 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3233 dma_p->contig_alloc_type)); 3234 return (NXGE_ERROR | NXGE_DDI_FAILED); 3235 } 3236 3237 dma_p->dma_handle = NULL; 3238 dma_p->acc_handle = NULL; 3239 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3240 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3241 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3242 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3243 if (ddi_status != DDI_SUCCESS) { 3244 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3245 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3246 return (NXGE_ERROR | NXGE_DDI_FAILED); 3247 } 3248 3249 kmem_alloc_type = dma_p->kmem_alloc_type; 3250 3251 switch (contig_alloc_type) { 3252 case B_FALSE: 3253 switch (kmem_alloc_type) { 3254 case B_FALSE: 3255 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3256 length, 3257 acc_attr_p, 3258 xfer_flags, 3259 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3260 &dma_p->acc_handle); 3261 if (ddi_status != DDI_SUCCESS) { 3262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3263 "nxge_dma_mem_alloc: " 3264 "ddi_dma_mem_alloc failed")); 3265 ddi_dma_free_handle(&dma_p->dma_handle); 3266 dma_p->dma_handle = NULL; 3267 return (NXGE_ERROR | NXGE_DDI_FAILED); 3268 } 3269 if (dma_p->alength < length) { 3270 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3271 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3272 "< length.")); 3273 ddi_dma_mem_free(&dma_p->acc_handle); 3274 ddi_dma_free_handle(&dma_p->dma_handle); 3275 dma_p->acc_handle = NULL; 3276 dma_p->dma_handle = NULL; 3277 return (NXGE_ERROR); 3278 } 3279 3280 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3281 NULL, 3282 kaddrp, dma_p->alength, xfer_flags, 3283 DDI_DMA_DONTWAIT, 3284 0, &dma_p->dma_cookie, &dma_p->ncookies); 3285 if (ddi_status != DDI_DMA_MAPPED) { 3286 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3287 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3288 "failed " 3289 "(staus 0x%x ncookies %d.)", ddi_status, 3290 dma_p->ncookies)); 3291 if (dma_p->acc_handle) { 3292 ddi_dma_mem_free(&dma_p->acc_handle); 3293 dma_p->acc_handle = NULL; 3294 } 3295 ddi_dma_free_handle(&dma_p->dma_handle); 3296 dma_p->dma_handle = NULL; 3297 return (NXGE_ERROR | NXGE_DDI_FAILED); 3298 } 3299 3300 if (dma_p->ncookies != 1) { 3301 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3302 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3303 "> 1 cookie" 3304 "(staus 0x%x ncookies %d.)", ddi_status, 3305 dma_p->ncookies)); 3306 if (dma_p->acc_handle) { 3307 ddi_dma_mem_free(&dma_p->acc_handle); 3308 dma_p->acc_handle = NULL; 3309 } 3310 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3311 ddi_dma_free_handle(&dma_p->dma_handle); 3312 dma_p->dma_handle = NULL; 3313 return (NXGE_ERROR); 3314 } 3315 break; 3316 3317 case B_TRUE: 3318 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3319 if (kaddrp == NULL) { 3320 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3321 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3322 "kmem alloc failed")); 3323 return (NXGE_ERROR); 3324 } 3325 3326 dma_p->alength = length; 3327 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3328 NULL, kaddrp, dma_p->alength, xfer_flags, 3329 DDI_DMA_DONTWAIT, 0, 3330 &dma_p->dma_cookie, &dma_p->ncookies); 3331 if (ddi_status != DDI_DMA_MAPPED) { 3332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3333 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3334 "(kmem_alloc) failed kaddrp $%p length %d " 3335 "(staus 0x%x (%d) ncookies %d.)", 3336 kaddrp, length, 3337 ddi_status, ddi_status, dma_p->ncookies)); 3338 KMEM_FREE(kaddrp, length); 3339 dma_p->acc_handle = NULL; 3340 ddi_dma_free_handle(&dma_p->dma_handle); 3341 dma_p->dma_handle = NULL; 3342 dma_p->kaddrp = NULL; 3343 return (NXGE_ERROR | NXGE_DDI_FAILED); 3344 } 3345 3346 if (dma_p->ncookies != 1) { 3347 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3348 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3349 "(kmem_alloc) > 1 cookie" 3350 "(staus 0x%x ncookies %d.)", ddi_status, 3351 dma_p->ncookies)); 3352 KMEM_FREE(kaddrp, length); 3353 dma_p->acc_handle = NULL; 3354 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3355 ddi_dma_free_handle(&dma_p->dma_handle); 3356 dma_p->dma_handle = NULL; 3357 dma_p->kaddrp = NULL; 3358 return (NXGE_ERROR); 3359 } 3360 3361 dma_p->kaddrp = kaddrp; 3362 3363 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3364 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3365 "kaddr $%p alength %d", 3366 dma_p, 3367 kaddrp, 3368 dma_p->alength)); 3369 break; 3370 } 3371 break; 3372 3373 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3374 case B_TRUE: 3375 kaddrp = (caddr_t)contig_mem_alloc(length); 3376 if (kaddrp == NULL) { 3377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3378 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3379 ddi_dma_free_handle(&dma_p->dma_handle); 3380 return (NXGE_ERROR | NXGE_DDI_FAILED); 3381 } 3382 3383 dma_p->alength = length; 3384 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3385 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3386 &dma_p->dma_cookie, &dma_p->ncookies); 3387 if (ddi_status != DDI_DMA_MAPPED) { 3388 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3389 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3390 "(status 0x%x ncookies %d.)", ddi_status, 3391 dma_p->ncookies)); 3392 3393 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3394 "==> nxge_dma_mem_alloc: (not mapped)" 3395 "length %lu (0x%x) " 3396 "free contig kaddrp $%p " 3397 "va_to_pa $%p", 3398 length, length, 3399 kaddrp, 3400 va_to_pa(kaddrp))); 3401 3402 3403 contig_mem_free((void *)kaddrp, length); 3404 ddi_dma_free_handle(&dma_p->dma_handle); 3405 3406 dma_p->dma_handle = NULL; 3407 dma_p->acc_handle = NULL; 3408 dma_p->alength = NULL; 3409 dma_p->kaddrp = NULL; 3410 3411 return (NXGE_ERROR | NXGE_DDI_FAILED); 3412 } 3413 3414 if (dma_p->ncookies != 1 || 3415 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3416 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3417 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3418 "cookie or " 3419 "dmac_laddress is NULL $%p size %d " 3420 " (status 0x%x ncookies %d.)", 3421 ddi_status, 3422 dma_p->dma_cookie.dmac_laddress, 3423 dma_p->dma_cookie.dmac_size, 3424 dma_p->ncookies)); 3425 3426 contig_mem_free((void *)kaddrp, length); 3427 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3428 ddi_dma_free_handle(&dma_p->dma_handle); 3429 3430 dma_p->alength = 0; 3431 dma_p->dma_handle = NULL; 3432 dma_p->acc_handle = NULL; 3433 dma_p->kaddrp = NULL; 3434 3435 return (NXGE_ERROR | NXGE_DDI_FAILED); 3436 } 3437 break; 3438 3439 #else 3440 case B_TRUE: 3441 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3442 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3443 return (NXGE_ERROR | NXGE_DDI_FAILED); 3444 #endif 3445 } 3446 3447 dma_p->kaddrp = kaddrp; 3448 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3449 dma_p->alength - RXBUF_64B_ALIGNED; 3450 #if defined(__i386) 3451 dma_p->ioaddr_pp = 3452 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3453 #else 3454 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3455 #endif 3456 dma_p->last_ioaddr_pp = 3457 #if defined(__i386) 3458 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3459 #else 3460 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3461 #endif 3462 dma_p->alength - RXBUF_64B_ALIGNED; 3463 3464 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3465 3466 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3467 dma_p->orig_ioaddr_pp = 3468 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3469 dma_p->orig_alength = length; 3470 dma_p->orig_kaddrp = kaddrp; 3471 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3472 #endif 3473 3474 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3475 "dma buffer allocated: dma_p $%p " 3476 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3477 "dma_p->ioaddr_p $%p " 3478 "dma_p->orig_ioaddr_p $%p " 3479 "orig_vatopa $%p " 3480 "alength %d (0x%x) " 3481 "kaddrp $%p " 3482 "length %d (0x%x)", 3483 dma_p, 3484 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3485 dma_p->ioaddr_pp, 3486 dma_p->orig_ioaddr_pp, 3487 dma_p->orig_vatopa, 3488 dma_p->alength, dma_p->alength, 3489 kaddrp, 3490 length, length)); 3491 3492 return (NXGE_OK); 3493 } 3494 3495 static void 3496 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3497 { 3498 if (dma_p->dma_handle != NULL) { 3499 if (dma_p->ncookies) { 3500 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3501 dma_p->ncookies = 0; 3502 } 3503 ddi_dma_free_handle(&dma_p->dma_handle); 3504 dma_p->dma_handle = NULL; 3505 } 3506 3507 if (dma_p->acc_handle != NULL) { 3508 ddi_dma_mem_free(&dma_p->acc_handle); 3509 dma_p->acc_handle = NULL; 3510 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3511 } 3512 3513 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3514 if (dma_p->contig_alloc_type && 3515 dma_p->orig_kaddrp && dma_p->orig_alength) { 3516 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3517 "kaddrp $%p (orig_kaddrp $%p)" 3518 "mem type %d ", 3519 "orig_alength %d " 3520 "alength 0x%x (%d)", 3521 dma_p->kaddrp, 3522 dma_p->orig_kaddrp, 3523 dma_p->contig_alloc_type, 3524 dma_p->orig_alength, 3525 dma_p->alength, dma_p->alength)); 3526 3527 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3528 dma_p->orig_alength = NULL; 3529 dma_p->orig_kaddrp = NULL; 3530 dma_p->contig_alloc_type = B_FALSE; 3531 } 3532 #endif 3533 dma_p->kaddrp = NULL; 3534 dma_p->alength = NULL; 3535 } 3536 3537 static void 3538 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3539 { 3540 uint64_t kaddr; 3541 uint32_t buf_size; 3542 3543 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3544 3545 if (dma_p->dma_handle != NULL) { 3546 if (dma_p->ncookies) { 3547 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3548 dma_p->ncookies = 0; 3549 } 3550 ddi_dma_free_handle(&dma_p->dma_handle); 3551 dma_p->dma_handle = NULL; 3552 } 3553 3554 if (dma_p->acc_handle != NULL) { 3555 ddi_dma_mem_free(&dma_p->acc_handle); 3556 dma_p->acc_handle = NULL; 3557 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3558 } 3559 3560 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3561 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3562 dma_p, 3563 dma_p->buf_alloc_state)); 3564 3565 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3566 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3567 "<== nxge_dma_free_rx_data_buf: " 3568 "outstanding data buffers")); 3569 return; 3570 } 3571 3572 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3573 if (dma_p->contig_alloc_type && 3574 dma_p->orig_kaddrp && dma_p->orig_alength) { 3575 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3576 "kaddrp $%p (orig_kaddrp $%p)" 3577 "mem type %d ", 3578 "orig_alength %d " 3579 "alength 0x%x (%d)", 3580 dma_p->kaddrp, 3581 dma_p->orig_kaddrp, 3582 dma_p->contig_alloc_type, 3583 dma_p->orig_alength, 3584 dma_p->alength, dma_p->alength)); 3585 3586 kaddr = (uint64_t)dma_p->orig_kaddrp; 3587 buf_size = dma_p->orig_alength; 3588 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3589 dma_p->orig_alength = NULL; 3590 dma_p->orig_kaddrp = NULL; 3591 dma_p->contig_alloc_type = B_FALSE; 3592 dma_p->kaddrp = NULL; 3593 dma_p->alength = NULL; 3594 return; 3595 } 3596 #endif 3597 3598 if (dma_p->kmem_alloc_type) { 3599 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3600 "nxge_dma_free_rx_data_buf: free kmem " 3601 "kaddrp $%p (orig_kaddrp $%p)" 3602 "alloc type %d " 3603 "orig_alength %d " 3604 "alength 0x%x (%d)", 3605 dma_p->kaddrp, 3606 dma_p->orig_kaddrp, 3607 dma_p->kmem_alloc_type, 3608 dma_p->orig_alength, 3609 dma_p->alength, dma_p->alength)); 3610 #if defined(__i386) 3611 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3612 #else 3613 kaddr = (uint64_t)dma_p->kaddrp; 3614 #endif 3615 buf_size = dma_p->orig_alength; 3616 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3617 "nxge_dma_free_rx_data_buf: free dmap $%p " 3618 "kaddr $%p buf_size %d", 3619 dma_p, 3620 kaddr, buf_size)); 3621 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3622 dma_p->alength = 0; 3623 dma_p->orig_alength = 0; 3624 dma_p->kaddrp = NULL; 3625 dma_p->kmem_alloc_type = B_FALSE; 3626 } 3627 3628 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3629 } 3630 3631 /* 3632 * nxge_m_start() -- start transmitting and receiving. 3633 * 3634 * This function is called by the MAC layer when the first 3635 * stream is open to prepare the hardware ready for sending 3636 * and transmitting packets. 3637 */ 3638 static int 3639 nxge_m_start(void *arg) 3640 { 3641 p_nxge_t nxgep = (p_nxge_t)arg; 3642 3643 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3644 3645 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3646 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3647 } 3648 3649 MUTEX_ENTER(nxgep->genlock); 3650 if (nxge_init(nxgep) != NXGE_OK) { 3651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3652 "<== nxge_m_start: initialization failed")); 3653 MUTEX_EXIT(nxgep->genlock); 3654 return (EIO); 3655 } 3656 3657 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3658 goto nxge_m_start_exit; 3659 /* 3660 * Start timer to check the system error and tx hangs 3661 */ 3662 if (!isLDOMguest(nxgep)) 3663 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3664 nxge_check_hw_state, NXGE_CHECK_TIMER); 3665 #if defined(sun4v) 3666 else 3667 nxge_hio_start_timer(nxgep); 3668 #endif 3669 3670 nxgep->link_notify = B_TRUE; 3671 3672 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3673 3674 nxge_m_start_exit: 3675 MUTEX_EXIT(nxgep->genlock); 3676 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3677 return (0); 3678 } 3679 3680 /* 3681 * nxge_m_stop(): stop transmitting and receiving. 3682 */ 3683 static void 3684 nxge_m_stop(void *arg) 3685 { 3686 p_nxge_t nxgep = (p_nxge_t)arg; 3687 3688 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3689 3690 if (nxgep->nxge_timerid) { 3691 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3692 nxgep->nxge_timerid = 0; 3693 } 3694 3695 MUTEX_ENTER(nxgep->genlock); 3696 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3697 nxge_uninit(nxgep); 3698 3699 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3700 3701 MUTEX_EXIT(nxgep->genlock); 3702 3703 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3704 } 3705 3706 static int 3707 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3708 { 3709 p_nxge_t nxgep = (p_nxge_t)arg; 3710 struct ether_addr addrp; 3711 3712 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3713 3714 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3715 if (nxge_set_mac_addr(nxgep, &addrp)) { 3716 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3717 "<== nxge_m_unicst: set unitcast failed")); 3718 return (EINVAL); 3719 } 3720 3721 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3722 3723 return (0); 3724 } 3725 3726 static int 3727 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3728 { 3729 p_nxge_t nxgep = (p_nxge_t)arg; 3730 struct ether_addr addrp; 3731 3732 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3733 "==> nxge_m_multicst: add %d", add)); 3734 3735 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3736 if (add) { 3737 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3738 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3739 "<== nxge_m_multicst: add multicast failed")); 3740 return (EINVAL); 3741 } 3742 } else { 3743 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3744 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3745 "<== nxge_m_multicst: del multicast failed")); 3746 return (EINVAL); 3747 } 3748 } 3749 3750 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3751 3752 return (0); 3753 } 3754 3755 static int 3756 nxge_m_promisc(void *arg, boolean_t on) 3757 { 3758 p_nxge_t nxgep = (p_nxge_t)arg; 3759 3760 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3761 "==> nxge_m_promisc: on %d", on)); 3762 3763 if (nxge_set_promisc(nxgep, on)) { 3764 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3765 "<== nxge_m_promisc: set promisc failed")); 3766 return (EINVAL); 3767 } 3768 3769 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3770 "<== nxge_m_promisc: on %d", on)); 3771 3772 return (0); 3773 } 3774 3775 static void 3776 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3777 { 3778 p_nxge_t nxgep = (p_nxge_t)arg; 3779 struct iocblk *iocp; 3780 boolean_t need_privilege; 3781 int err; 3782 int cmd; 3783 3784 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3785 3786 iocp = (struct iocblk *)mp->b_rptr; 3787 iocp->ioc_error = 0; 3788 need_privilege = B_TRUE; 3789 cmd = iocp->ioc_cmd; 3790 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3791 switch (cmd) { 3792 default: 3793 miocnak(wq, mp, 0, EINVAL); 3794 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3795 return; 3796 3797 case LB_GET_INFO_SIZE: 3798 case LB_GET_INFO: 3799 case LB_GET_MODE: 3800 need_privilege = B_FALSE; 3801 break; 3802 case LB_SET_MODE: 3803 break; 3804 3805 3806 case NXGE_GET_MII: 3807 case NXGE_PUT_MII: 3808 case NXGE_GET64: 3809 case NXGE_PUT64: 3810 case NXGE_GET_TX_RING_SZ: 3811 case NXGE_GET_TX_DESC: 3812 case NXGE_TX_SIDE_RESET: 3813 case NXGE_RX_SIDE_RESET: 3814 case NXGE_GLOBAL_RESET: 3815 case NXGE_RESET_MAC: 3816 case NXGE_TX_REGS_DUMP: 3817 case NXGE_RX_REGS_DUMP: 3818 case NXGE_INT_REGS_DUMP: 3819 case NXGE_VIR_INT_REGS_DUMP: 3820 case NXGE_PUT_TCAM: 3821 case NXGE_GET_TCAM: 3822 case NXGE_RTRACE: 3823 case NXGE_RDUMP: 3824 3825 need_privilege = B_FALSE; 3826 break; 3827 case NXGE_INJECT_ERR: 3828 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3829 nxge_err_inject(nxgep, wq, mp); 3830 break; 3831 } 3832 3833 if (need_privilege) { 3834 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3835 if (err != 0) { 3836 miocnak(wq, mp, 0, err); 3837 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3838 "<== nxge_m_ioctl: no priv")); 3839 return; 3840 } 3841 } 3842 3843 switch (cmd) { 3844 3845 case LB_GET_MODE: 3846 case LB_SET_MODE: 3847 case LB_GET_INFO_SIZE: 3848 case LB_GET_INFO: 3849 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3850 break; 3851 3852 case NXGE_GET_MII: 3853 case NXGE_PUT_MII: 3854 case NXGE_PUT_TCAM: 3855 case NXGE_GET_TCAM: 3856 case NXGE_GET64: 3857 case NXGE_PUT64: 3858 case NXGE_GET_TX_RING_SZ: 3859 case NXGE_GET_TX_DESC: 3860 case NXGE_TX_SIDE_RESET: 3861 case NXGE_RX_SIDE_RESET: 3862 case NXGE_GLOBAL_RESET: 3863 case NXGE_RESET_MAC: 3864 case NXGE_TX_REGS_DUMP: 3865 case NXGE_RX_REGS_DUMP: 3866 case NXGE_INT_REGS_DUMP: 3867 case NXGE_VIR_INT_REGS_DUMP: 3868 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3869 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3870 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3871 break; 3872 } 3873 3874 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3875 } 3876 3877 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3878 3879 static void 3880 nxge_m_resources(void *arg) 3881 { 3882 p_nxge_t nxgep = arg; 3883 mac_rx_fifo_t mrf; 3884 3885 nxge_grp_set_t *set = &nxgep->rx_set; 3886 uint8_t rdc; 3887 3888 rx_rcr_ring_t *ring; 3889 3890 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3891 3892 MUTEX_ENTER(nxgep->genlock); 3893 3894 if (set->owned.map == 0) { 3895 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3896 "nxge_m_resources: no receive resources")); 3897 goto nxge_m_resources_exit; 3898 } 3899 3900 /* 3901 * CR 6492541 Check to see if the drv_state has been initialized, 3902 * if not * call nxge_init(). 3903 */ 3904 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3905 if (nxge_init(nxgep) != NXGE_OK) 3906 goto nxge_m_resources_exit; 3907 } 3908 3909 mrf.mrf_type = MAC_RX_FIFO; 3910 mrf.mrf_blank = nxge_rx_hw_blank; 3911 mrf.mrf_arg = (void *)nxgep; 3912 3913 mrf.mrf_normal_blank_time = 128; 3914 mrf.mrf_normal_pkt_count = 8; 3915 3916 /* 3917 * Export our receive resources to the MAC layer. 3918 */ 3919 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3920 if ((1 << rdc) & set->owned.map) { 3921 ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 3922 if (ring == 0) { 3923 /* 3924 * This is a big deal only if we are 3925 * *not* in an LDOMs environment. 3926 */ 3927 if (nxgep->environs == SOLARIS_DOMAIN) { 3928 cmn_err(CE_NOTE, 3929 "==> nxge_m_resources: " 3930 "ring %d == 0", rdc); 3931 } 3932 continue; 3933 } 3934 ring->rcr_mac_handle = mac_resource_add 3935 (nxgep->mach, (mac_resource_t *)&mrf); 3936 3937 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3938 "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 3939 rdc, ring, ring->rcr_mac_handle)); 3940 } 3941 } 3942 3943 nxge_m_resources_exit: 3944 MUTEX_EXIT(nxgep->genlock); 3945 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3946 } 3947 3948 void 3949 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3950 { 3951 p_nxge_mmac_stats_t mmac_stats; 3952 int i; 3953 nxge_mmac_t *mmac_info; 3954 3955 mmac_info = &nxgep->nxge_mmac_info; 3956 3957 mmac_stats = &nxgep->statsp->mmac_stats; 3958 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3959 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3960 3961 for (i = 0; i < ETHERADDRL; i++) { 3962 if (factory) { 3963 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3964 = mmac_info->factory_mac_pool[slot][ 3965 (ETHERADDRL-1) - i]; 3966 } else { 3967 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3968 = mmac_info->mac_pool[slot].addr[ 3969 (ETHERADDRL - 1) - i]; 3970 } 3971 } 3972 } 3973 3974 /* 3975 * nxge_altmac_set() -- Set an alternate MAC address 3976 */ 3977 static int 3978 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3979 { 3980 uint8_t addrn; 3981 uint8_t portn; 3982 npi_mac_addr_t altmac; 3983 hostinfo_t mac_rdc; 3984 p_nxge_class_pt_cfg_t clscfgp; 3985 3986 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3987 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3988 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3989 3990 portn = nxgep->mac.portnum; 3991 addrn = (uint8_t)slot - 1; 3992 3993 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3994 addrn, &altmac) != NPI_SUCCESS) 3995 return (EIO); 3996 3997 /* 3998 * Set the rdc table number for the host info entry 3999 * for this mac address slot. 4000 */ 4001 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4002 mac_rdc.value = 0; 4003 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 4004 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4005 4006 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4007 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4008 return (EIO); 4009 } 4010 4011 /* 4012 * Enable comparison with the alternate MAC address. 4013 * While the first alternate addr is enabled by bit 1 of register 4014 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4015 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4016 * accordingly before calling npi_mac_altaddr_entry. 4017 */ 4018 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4019 addrn = (uint8_t)slot - 1; 4020 else 4021 addrn = (uint8_t)slot; 4022 4023 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 4024 != NPI_SUCCESS) 4025 return (EIO); 4026 4027 return (0); 4028 } 4029 4030 /* 4031 * nxeg_m_mmac_add() - find an unused address slot, set the address 4032 * value to the one specified, enable the port to start filtering on 4033 * the new MAC address. Returns 0 on success. 4034 */ 4035 int 4036 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 4037 { 4038 p_nxge_t nxgep = arg; 4039 mac_addr_slot_t slot; 4040 nxge_mmac_t *mmac_info; 4041 int err; 4042 nxge_status_t status; 4043 4044 mutex_enter(nxgep->genlock); 4045 4046 /* 4047 * Make sure that nxge is initialized, if _start() has 4048 * not been called. 4049 */ 4050 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4051 status = nxge_init(nxgep); 4052 if (status != NXGE_OK) { 4053 mutex_exit(nxgep->genlock); 4054 return (ENXIO); 4055 } 4056 } 4057 4058 mmac_info = &nxgep->nxge_mmac_info; 4059 if (mmac_info->naddrfree == 0) { 4060 mutex_exit(nxgep->genlock); 4061 return (ENOSPC); 4062 } 4063 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4064 maddr->mma_addrlen)) { 4065 mutex_exit(nxgep->genlock); 4066 return (EINVAL); 4067 } 4068 /* 4069 * Search for the first available slot. Because naddrfree 4070 * is not zero, we are guaranteed to find one. 4071 * Slot 0 is for unique (primary) MAC. The first alternate 4072 * MAC slot is slot 1. 4073 * Each of the first two ports of Neptune has 16 alternate 4074 * MAC slots but only the first 7 (of 15) slots have assigned factory 4075 * MAC addresses. We first search among the slots without bundled 4076 * factory MACs. If we fail to find one in that range, then we 4077 * search the slots with bundled factory MACs. A factory MAC 4078 * will be wasted while the slot is used with a user MAC address. 4079 * But the slot could be used by factory MAC again after calling 4080 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4081 */ 4082 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 4083 for (slot = mmac_info->num_factory_mmac + 1; 4084 slot <= mmac_info->num_mmac; slot++) { 4085 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4086 break; 4087 } 4088 if (slot > mmac_info->num_mmac) { 4089 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4090 slot++) { 4091 if (!(mmac_info->mac_pool[slot].flags 4092 & MMAC_SLOT_USED)) 4093 break; 4094 } 4095 } 4096 } else { 4097 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 4098 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4099 break; 4100 } 4101 } 4102 ASSERT(slot <= mmac_info->num_mmac); 4103 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 4104 mutex_exit(nxgep->genlock); 4105 return (err); 4106 } 4107 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4108 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4109 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4110 mmac_info->naddrfree--; 4111 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4112 4113 maddr->mma_slot = slot; 4114 4115 mutex_exit(nxgep->genlock); 4116 return (0); 4117 } 4118 4119 /* 4120 * This function reserves an unused slot and programs the slot and the HW 4121 * with a factory mac address. 4122 */ 4123 static int 4124 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 4125 { 4126 p_nxge_t nxgep = arg; 4127 mac_addr_slot_t slot; 4128 nxge_mmac_t *mmac_info; 4129 int err; 4130 nxge_status_t status; 4131 4132 mutex_enter(nxgep->genlock); 4133 4134 /* 4135 * Make sure that nxge is initialized, if _start() has 4136 * not been called. 4137 */ 4138 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4139 status = nxge_init(nxgep); 4140 if (status != NXGE_OK) { 4141 mutex_exit(nxgep->genlock); 4142 return (ENXIO); 4143 } 4144 } 4145 4146 mmac_info = &nxgep->nxge_mmac_info; 4147 if (mmac_info->naddrfree == 0) { 4148 mutex_exit(nxgep->genlock); 4149 return (ENOSPC); 4150 } 4151 4152 slot = maddr->mma_slot; 4153 if (slot == -1) { /* -1: Take the first available slot */ 4154 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 4155 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4156 break; 4157 } 4158 if (slot > mmac_info->num_factory_mmac) { 4159 mutex_exit(nxgep->genlock); 4160 return (ENOSPC); 4161 } 4162 } 4163 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 4164 /* 4165 * Do not support factory MAC at a slot greater than 4166 * num_factory_mmac even when there are available factory 4167 * MAC addresses because the alternate MACs are bundled with 4168 * slot[1] through slot[num_factory_mmac] 4169 */ 4170 mutex_exit(nxgep->genlock); 4171 return (EINVAL); 4172 } 4173 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4174 mutex_exit(nxgep->genlock); 4175 return (EBUSY); 4176 } 4177 /* Verify the address to be reserved */ 4178 if (!mac_unicst_verify(nxgep->mach, 4179 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 4180 mutex_exit(nxgep->genlock); 4181 return (EINVAL); 4182 } 4183 if (err = nxge_altmac_set(nxgep, 4184 mmac_info->factory_mac_pool[slot], slot)) { 4185 mutex_exit(nxgep->genlock); 4186 return (err); 4187 } 4188 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 4189 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4190 mmac_info->naddrfree--; 4191 4192 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 4193 mutex_exit(nxgep->genlock); 4194 4195 /* Pass info back to the caller */ 4196 maddr->mma_slot = slot; 4197 maddr->mma_addrlen = ETHERADDRL; 4198 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4199 4200 return (0); 4201 } 4202 4203 /* 4204 * Remove the specified mac address and update the HW not to filter 4205 * the mac address anymore. 4206 */ 4207 int 4208 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 4209 { 4210 p_nxge_t nxgep = arg; 4211 nxge_mmac_t *mmac_info; 4212 uint8_t addrn; 4213 uint8_t portn; 4214 int err = 0; 4215 nxge_status_t status; 4216 4217 mutex_enter(nxgep->genlock); 4218 4219 /* 4220 * Make sure that nxge is initialized, if _start() has 4221 * not been called. 4222 */ 4223 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4224 status = nxge_init(nxgep); 4225 if (status != NXGE_OK) { 4226 mutex_exit(nxgep->genlock); 4227 return (ENXIO); 4228 } 4229 } 4230 4231 mmac_info = &nxgep->nxge_mmac_info; 4232 if (slot < 1 || slot > mmac_info->num_mmac) { 4233 mutex_exit(nxgep->genlock); 4234 return (EINVAL); 4235 } 4236 4237 portn = nxgep->mac.portnum; 4238 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4239 addrn = (uint8_t)slot - 1; 4240 else 4241 addrn = (uint8_t)slot; 4242 4243 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4244 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4245 == NPI_SUCCESS) { 4246 mmac_info->naddrfree++; 4247 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4248 /* 4249 * Regardless if the MAC we just stopped filtering 4250 * is a user addr or a facory addr, we must set 4251 * the MMAC_VENDOR_ADDR flag if this slot has an 4252 * associated factory MAC to indicate that a factory 4253 * MAC is available. 4254 */ 4255 if (slot <= mmac_info->num_factory_mmac) { 4256 mmac_info->mac_pool[slot].flags 4257 |= MMAC_VENDOR_ADDR; 4258 } 4259 /* 4260 * Clear mac_pool[slot].addr so that kstat shows 0 4261 * alternate MAC address if the slot is not used. 4262 * (But nxge_m_mmac_get returns the factory MAC even 4263 * when the slot is not used!) 4264 */ 4265 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4266 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4267 } else { 4268 err = EIO; 4269 } 4270 } else { 4271 err = EINVAL; 4272 } 4273 4274 mutex_exit(nxgep->genlock); 4275 return (err); 4276 } 4277 4278 /* 4279 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 4280 */ 4281 static int 4282 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 4283 { 4284 p_nxge_t nxgep = arg; 4285 mac_addr_slot_t slot; 4286 nxge_mmac_t *mmac_info; 4287 int err = 0; 4288 nxge_status_t status; 4289 4290 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4291 maddr->mma_addrlen)) 4292 return (EINVAL); 4293 4294 slot = maddr->mma_slot; 4295 4296 mutex_enter(nxgep->genlock); 4297 4298 /* 4299 * Make sure that nxge is initialized, if _start() has 4300 * not been called. 4301 */ 4302 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4303 status = nxge_init(nxgep); 4304 if (status != NXGE_OK) { 4305 mutex_exit(nxgep->genlock); 4306 return (ENXIO); 4307 } 4308 } 4309 4310 mmac_info = &nxgep->nxge_mmac_info; 4311 if (slot < 1 || slot > mmac_info->num_mmac) { 4312 mutex_exit(nxgep->genlock); 4313 return (EINVAL); 4314 } 4315 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4316 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4317 != 0) { 4318 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4319 ETHERADDRL); 4320 /* 4321 * Assume that the MAC passed down from the caller 4322 * is not a factory MAC address (The user should 4323 * call mmac_remove followed by mmac_reserve if 4324 * he wants to use the factory MAC for this slot). 4325 */ 4326 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4327 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4328 } 4329 } else { 4330 err = EINVAL; 4331 } 4332 mutex_exit(nxgep->genlock); 4333 return (err); 4334 } 4335 4336 /* 4337 * nxge_m_mmac_get() - Get the MAC address and other information 4338 * related to the slot. mma_flags should be set to 0 in the call. 4339 * Note: although kstat shows MAC address as zero when a slot is 4340 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 4341 * to the caller as long as the slot is not using a user MAC address. 4342 * The following table shows the rules, 4343 * 4344 * USED VENDOR mma_addr 4345 * ------------------------------------------------------------ 4346 * (1) Slot uses a user MAC: yes no user MAC 4347 * (2) Slot uses a factory MAC: yes yes factory MAC 4348 * (3) Slot is not used but is 4349 * factory MAC capable: no yes factory MAC 4350 * (4) Slot is not used and is 4351 * not factory MAC capable: no no 0 4352 * ------------------------------------------------------------ 4353 */ 4354 static int 4355 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 4356 { 4357 nxge_t *nxgep = arg; 4358 mac_addr_slot_t slot; 4359 nxge_mmac_t *mmac_info; 4360 nxge_status_t status; 4361 4362 slot = maddr->mma_slot; 4363 4364 mutex_enter(nxgep->genlock); 4365 4366 /* 4367 * Make sure that nxge is initialized, if _start() has 4368 * not been called. 4369 */ 4370 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4371 status = nxge_init(nxgep); 4372 if (status != NXGE_OK) { 4373 mutex_exit(nxgep->genlock); 4374 return (ENXIO); 4375 } 4376 } 4377 4378 mmac_info = &nxgep->nxge_mmac_info; 4379 4380 if (slot < 1 || slot > mmac_info->num_mmac) { 4381 mutex_exit(nxgep->genlock); 4382 return (EINVAL); 4383 } 4384 maddr->mma_flags = 0; 4385 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 4386 maddr->mma_flags |= MMAC_SLOT_USED; 4387 4388 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 4389 maddr->mma_flags |= MMAC_VENDOR_ADDR; 4390 bcopy(mmac_info->factory_mac_pool[slot], 4391 maddr->mma_addr, ETHERADDRL); 4392 maddr->mma_addrlen = ETHERADDRL; 4393 } else { 4394 if (maddr->mma_flags & MMAC_SLOT_USED) { 4395 bcopy(mmac_info->mac_pool[slot].addr, 4396 maddr->mma_addr, ETHERADDRL); 4397 maddr->mma_addrlen = ETHERADDRL; 4398 } else { 4399 bzero(maddr->mma_addr, ETHERADDRL); 4400 maddr->mma_addrlen = 0; 4401 } 4402 } 4403 mutex_exit(nxgep->genlock); 4404 return (0); 4405 } 4406 4407 static boolean_t 4408 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4409 { 4410 nxge_t *nxgep = arg; 4411 uint32_t *txflags = cap_data; 4412 multiaddress_capab_t *mmacp = cap_data; 4413 4414 switch (cap) { 4415 case MAC_CAPAB_HCKSUM: 4416 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4417 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4418 if (nxge_cksum_offload <= 1) { 4419 *txflags = HCKSUM_INET_PARTIAL; 4420 } 4421 break; 4422 4423 case MAC_CAPAB_POLL: 4424 /* 4425 * There's nothing for us to fill in, simply returning 4426 * B_TRUE stating that we support polling is sufficient. 4427 */ 4428 break; 4429 4430 case MAC_CAPAB_MULTIADDRESS: 4431 mmacp = (multiaddress_capab_t *)cap_data; 4432 mutex_enter(nxgep->genlock); 4433 4434 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 4435 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 4436 mmacp->maddr_flag = 0; /* 0 is required by PSARC2006/265 */ 4437 /* 4438 * maddr_handle is driver's private data, passed back to 4439 * entry point functions as arg. 4440 */ 4441 mmacp->maddr_handle = nxgep; 4442 mmacp->maddr_add = nxge_m_mmac_add; 4443 mmacp->maddr_remove = nxge_m_mmac_remove; 4444 mmacp->maddr_modify = nxge_m_mmac_modify; 4445 mmacp->maddr_get = nxge_m_mmac_get; 4446 mmacp->maddr_reserve = nxge_m_mmac_reserve; 4447 4448 mutex_exit(nxgep->genlock); 4449 break; 4450 4451 case MAC_CAPAB_LSO: { 4452 mac_capab_lso_t *cap_lso = cap_data; 4453 4454 if (nxgep->soft_lso_enable) { 4455 if (nxge_cksum_offload <= 1) { 4456 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4457 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4458 nxge_lso_max = NXGE_LSO_MAXLEN; 4459 } 4460 cap_lso->lso_basic_tcp_ipv4.lso_max = 4461 nxge_lso_max; 4462 } 4463 break; 4464 } else { 4465 return (B_FALSE); 4466 } 4467 } 4468 4469 #if defined(sun4v) 4470 case MAC_CAPAB_RINGS: { 4471 mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 4472 4473 /* 4474 * Only the service domain driver responds to 4475 * this capability request. 4476 */ 4477 if (isLDOMservice(nxgep)) { 4478 mrings->mr_handle = (void *)nxgep; 4479 4480 /* 4481 * No dynamic allocation of groups and 4482 * rings at this time. Shares dictate the 4483 * configuration. 4484 */ 4485 mrings->mr_gadd_ring = NULL; 4486 mrings->mr_grem_ring = NULL; 4487 mrings->mr_rget = NULL; 4488 mrings->mr_gget = nxge_hio_group_get; 4489 4490 if (mrings->mr_type == MAC_RING_TYPE_RX) { 4491 mrings->mr_rnum = 8; /* XXX */ 4492 mrings->mr_gnum = 6; /* XXX */ 4493 } else { 4494 mrings->mr_rnum = 8; /* XXX */ 4495 mrings->mr_gnum = 0; /* XXX */ 4496 } 4497 } else 4498 return (B_FALSE); 4499 break; 4500 } 4501 4502 case MAC_CAPAB_SHARES: { 4503 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4504 4505 /* 4506 * Only the service domain driver responds to 4507 * this capability request. 4508 */ 4509 if (isLDOMservice(nxgep)) { 4510 mshares->ms_snum = 3; 4511 mshares->ms_handle = (void *)nxgep; 4512 mshares->ms_salloc = nxge_hio_share_alloc; 4513 mshares->ms_sfree = nxge_hio_share_free; 4514 mshares->ms_sadd = NULL; 4515 mshares->ms_sremove = NULL; 4516 mshares->ms_squery = nxge_hio_share_query; 4517 } else 4518 return (B_FALSE); 4519 break; 4520 } 4521 #endif 4522 default: 4523 return (B_FALSE); 4524 } 4525 return (B_TRUE); 4526 } 4527 4528 static boolean_t 4529 nxge_param_locked(mac_prop_id_t pr_num) 4530 { 4531 /* 4532 * All adv_* parameters are locked (read-only) while 4533 * the device is in any sort of loopback mode ... 4534 */ 4535 switch (pr_num) { 4536 case DLD_PROP_ADV_1000FDX_CAP: 4537 case DLD_PROP_EN_1000FDX_CAP: 4538 case DLD_PROP_ADV_1000HDX_CAP: 4539 case DLD_PROP_EN_1000HDX_CAP: 4540 case DLD_PROP_ADV_100FDX_CAP: 4541 case DLD_PROP_EN_100FDX_CAP: 4542 case DLD_PROP_ADV_100HDX_CAP: 4543 case DLD_PROP_EN_100HDX_CAP: 4544 case DLD_PROP_ADV_10FDX_CAP: 4545 case DLD_PROP_EN_10FDX_CAP: 4546 case DLD_PROP_ADV_10HDX_CAP: 4547 case DLD_PROP_EN_10HDX_CAP: 4548 case DLD_PROP_AUTONEG: 4549 case DLD_PROP_FLOWCTRL: 4550 return (B_TRUE); 4551 } 4552 return (B_FALSE); 4553 } 4554 4555 /* 4556 * callback functions for set/get of properties 4557 */ 4558 static int 4559 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4560 uint_t pr_valsize, const void *pr_val) 4561 { 4562 nxge_t *nxgep = barg; 4563 p_nxge_param_t param_arr; 4564 p_nxge_stats_t statsp; 4565 int err = 0; 4566 uint8_t val; 4567 uint32_t cur_mtu, new_mtu, old_framesize; 4568 link_flowctrl_t fl; 4569 4570 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4571 param_arr = nxgep->param_arr; 4572 statsp = nxgep->statsp; 4573 mutex_enter(nxgep->genlock); 4574 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4575 nxge_param_locked(pr_num)) { 4576 /* 4577 * All adv_* parameters are locked (read-only) 4578 * while the device is in any sort of loopback mode. 4579 */ 4580 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4581 "==> nxge_m_setprop: loopback mode: read only")); 4582 mutex_exit(nxgep->genlock); 4583 return (EBUSY); 4584 } 4585 4586 val = *(uint8_t *)pr_val; 4587 switch (pr_num) { 4588 case DLD_PROP_EN_1000FDX_CAP: 4589 nxgep->param_en_1000fdx = val; 4590 param_arr[param_anar_1000fdx].value = val; 4591 4592 goto reprogram; 4593 4594 case DLD_PROP_EN_100FDX_CAP: 4595 nxgep->param_en_100fdx = val; 4596 param_arr[param_anar_100fdx].value = val; 4597 4598 goto reprogram; 4599 4600 case DLD_PROP_EN_10FDX_CAP: 4601 nxgep->param_en_10fdx = val; 4602 param_arr[param_anar_10fdx].value = val; 4603 4604 goto reprogram; 4605 4606 case DLD_PROP_EN_1000HDX_CAP: 4607 case DLD_PROP_EN_100HDX_CAP: 4608 case DLD_PROP_EN_10HDX_CAP: 4609 case DLD_PROP_ADV_1000FDX_CAP: 4610 case DLD_PROP_ADV_1000HDX_CAP: 4611 case DLD_PROP_ADV_100FDX_CAP: 4612 case DLD_PROP_ADV_100HDX_CAP: 4613 case DLD_PROP_ADV_10FDX_CAP: 4614 case DLD_PROP_ADV_10HDX_CAP: 4615 case DLD_PROP_STATUS: 4616 case DLD_PROP_SPEED: 4617 case DLD_PROP_DUPLEX: 4618 err = EINVAL; /* cannot set read-only properties */ 4619 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4620 "==> nxge_m_setprop: read only property %d", 4621 pr_num)); 4622 break; 4623 4624 case DLD_PROP_AUTONEG: 4625 param_arr[param_autoneg].value = val; 4626 4627 goto reprogram; 4628 4629 case DLD_PROP_MTU: 4630 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4631 err = EBUSY; 4632 break; 4633 } 4634 4635 cur_mtu = nxgep->mac.default_mtu; 4636 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4637 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4638 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4639 new_mtu, nxgep->mac.is_jumbo)); 4640 4641 if (new_mtu == cur_mtu) { 4642 err = 0; 4643 break; 4644 } 4645 if (new_mtu < NXGE_DEFAULT_MTU || 4646 new_mtu > NXGE_MAXIMUM_MTU) { 4647 err = EINVAL; 4648 break; 4649 } 4650 4651 if ((new_mtu > NXGE_DEFAULT_MTU) && 4652 !nxgep->mac.is_jumbo) { 4653 err = EINVAL; 4654 break; 4655 } 4656 4657 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4658 nxgep->mac.maxframesize = (uint16_t) 4659 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4660 if (nxge_mac_set_framesize(nxgep)) { 4661 nxgep->mac.maxframesize = 4662 (uint16_t)old_framesize; 4663 err = EINVAL; 4664 break; 4665 } 4666 4667 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4668 if (err) { 4669 nxgep->mac.maxframesize = 4670 (uint16_t)old_framesize; 4671 err = EINVAL; 4672 break; 4673 } 4674 4675 nxgep->mac.default_mtu = new_mtu; 4676 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4677 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4678 new_mtu, nxgep->mac.maxframesize)); 4679 break; 4680 4681 case DLD_PROP_FLOWCTRL: 4682 bcopy(pr_val, &fl, sizeof (fl)); 4683 switch (fl) { 4684 default: 4685 err = EINVAL; 4686 break; 4687 4688 case LINK_FLOWCTRL_NONE: 4689 param_arr[param_anar_pause].value = 0; 4690 break; 4691 4692 case LINK_FLOWCTRL_RX: 4693 param_arr[param_anar_pause].value = 1; 4694 break; 4695 4696 case LINK_FLOWCTRL_TX: 4697 case LINK_FLOWCTRL_BI: 4698 err = EINVAL; 4699 break; 4700 } 4701 4702 reprogram: 4703 if (err == 0) { 4704 if (!nxge_param_link_update(nxgep)) { 4705 err = EINVAL; 4706 } 4707 } 4708 break; 4709 case DLD_PROP_PRIVATE: 4710 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4711 "==> nxge_m_setprop: private property")); 4712 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4713 pr_val); 4714 break; 4715 4716 default: 4717 err = ENOTSUP; 4718 break; 4719 } 4720 4721 mutex_exit(nxgep->genlock); 4722 4723 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4724 "<== nxge_m_setprop (return %d)", err)); 4725 return (err); 4726 } 4727 4728 static int 4729 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4730 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 4731 { 4732 nxge_t *nxgep = barg; 4733 p_nxge_param_t param_arr = nxgep->param_arr; 4734 p_nxge_stats_t statsp = nxgep->statsp; 4735 int err = 0; 4736 link_flowctrl_t fl; 4737 uint64_t tmp = 0; 4738 link_state_t ls; 4739 boolean_t is_default = (pr_flags & DLD_DEFAULT); 4740 4741 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4742 "==> nxge_m_getprop: pr_num %d", pr_num)); 4743 4744 if (pr_valsize == 0) 4745 return (EINVAL); 4746 4747 if ((is_default) && (pr_num != DLD_PROP_PRIVATE)) { 4748 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4749 return (err); 4750 } 4751 4752 bzero(pr_val, pr_valsize); 4753 switch (pr_num) { 4754 case DLD_PROP_DUPLEX: 4755 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4756 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4757 "==> nxge_m_getprop: duplex mode %d", 4758 *(uint8_t *)pr_val)); 4759 break; 4760 4761 case DLD_PROP_SPEED: 4762 if (pr_valsize < sizeof (uint64_t)) 4763 return (EINVAL); 4764 tmp = statsp->mac_stats.link_speed * 1000000ull; 4765 bcopy(&tmp, pr_val, sizeof (tmp)); 4766 break; 4767 4768 case DLD_PROP_STATUS: 4769 if (pr_valsize < sizeof (link_state_t)) 4770 return (EINVAL); 4771 if (!statsp->mac_stats.link_up) 4772 ls = LINK_STATE_DOWN; 4773 else 4774 ls = LINK_STATE_UP; 4775 bcopy(&ls, pr_val, sizeof (ls)); 4776 break; 4777 4778 case DLD_PROP_AUTONEG: 4779 *(uint8_t *)pr_val = 4780 param_arr[param_autoneg].value; 4781 break; 4782 4783 case DLD_PROP_FLOWCTRL: 4784 if (pr_valsize < sizeof (link_flowctrl_t)) 4785 return (EINVAL); 4786 4787 fl = LINK_FLOWCTRL_NONE; 4788 if (param_arr[param_anar_pause].value) { 4789 fl = LINK_FLOWCTRL_RX; 4790 } 4791 bcopy(&fl, pr_val, sizeof (fl)); 4792 break; 4793 4794 case DLD_PROP_ADV_1000FDX_CAP: 4795 *(uint8_t *)pr_val = 4796 param_arr[param_anar_1000fdx].value; 4797 break; 4798 4799 case DLD_PROP_EN_1000FDX_CAP: 4800 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4801 break; 4802 4803 case DLD_PROP_ADV_100FDX_CAP: 4804 *(uint8_t *)pr_val = 4805 param_arr[param_anar_100fdx].value; 4806 break; 4807 4808 case DLD_PROP_EN_100FDX_CAP: 4809 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4810 break; 4811 4812 case DLD_PROP_ADV_10FDX_CAP: 4813 *(uint8_t *)pr_val = 4814 param_arr[param_anar_10fdx].value; 4815 break; 4816 4817 case DLD_PROP_EN_10FDX_CAP: 4818 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4819 break; 4820 4821 case DLD_PROP_EN_1000HDX_CAP: 4822 case DLD_PROP_EN_100HDX_CAP: 4823 case DLD_PROP_EN_10HDX_CAP: 4824 case DLD_PROP_ADV_1000HDX_CAP: 4825 case DLD_PROP_ADV_100HDX_CAP: 4826 case DLD_PROP_ADV_10HDX_CAP: 4827 err = ENOTSUP; 4828 break; 4829 4830 case DLD_PROP_PRIVATE: 4831 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4832 pr_valsize, pr_val); 4833 break; 4834 default: 4835 err = EINVAL; 4836 break; 4837 } 4838 4839 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4840 4841 return (err); 4842 } 4843 4844 /* ARGSUSED */ 4845 static int 4846 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4847 const void *pr_val) 4848 { 4849 p_nxge_param_t param_arr = nxgep->param_arr; 4850 int err = 0; 4851 long result; 4852 4853 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4854 "==> nxge_set_priv_prop: name %s", pr_name)); 4855 4856 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4857 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4858 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4859 "<== nxge_set_priv_prop: name %s " 4860 "pr_val %s result %d " 4861 "param %d is_jumbo %d", 4862 pr_name, pr_val, result, 4863 param_arr[param_accept_jumbo].value, 4864 nxgep->mac.is_jumbo)); 4865 4866 if (result > 1 || result < 0) { 4867 err = EINVAL; 4868 } else { 4869 if (nxgep->mac.is_jumbo == 4870 (uint32_t)result) { 4871 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4872 "no change (%d %d)", 4873 nxgep->mac.is_jumbo, 4874 result)); 4875 return (0); 4876 } 4877 } 4878 4879 param_arr[param_accept_jumbo].value = result; 4880 nxgep->mac.is_jumbo = B_FALSE; 4881 if (result) { 4882 nxgep->mac.is_jumbo = B_TRUE; 4883 } 4884 4885 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4886 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4887 pr_name, result, nxgep->mac.is_jumbo)); 4888 4889 return (err); 4890 } 4891 4892 /* Blanking */ 4893 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4894 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4895 (char *)pr_val, 4896 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4897 if (err) { 4898 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4899 "<== nxge_set_priv_prop: " 4900 "unable to set (%s)", pr_name)); 4901 err = EINVAL; 4902 } else { 4903 err = 0; 4904 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4905 "<== nxge_set_priv_prop: " 4906 "set (%s)", pr_name)); 4907 } 4908 4909 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4910 "<== nxge_set_priv_prop: name %s (value %d)", 4911 pr_name, result)); 4912 4913 return (err); 4914 } 4915 4916 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4917 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4918 (char *)pr_val, 4919 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4920 if (err) { 4921 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4922 "<== nxge_set_priv_prop: " 4923 "unable to set (%s)", pr_name)); 4924 err = EINVAL; 4925 } else { 4926 err = 0; 4927 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4928 "<== nxge_set_priv_prop: " 4929 "set (%s)", pr_name)); 4930 } 4931 4932 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4933 "<== nxge_set_priv_prop: name %s (value %d)", 4934 pr_name, result)); 4935 4936 return (err); 4937 } 4938 4939 /* Classification */ 4940 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4941 if (pr_val == NULL) { 4942 err = EINVAL; 4943 return (err); 4944 } 4945 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4946 4947 err = nxge_param_set_ip_opt(nxgep, NULL, 4948 NULL, (char *)pr_val, 4949 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4950 4951 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4952 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4953 pr_name, result)); 4954 4955 return (err); 4956 } 4957 4958 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4959 if (pr_val == NULL) { 4960 err = EINVAL; 4961 return (err); 4962 } 4963 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4964 4965 err = nxge_param_set_ip_opt(nxgep, NULL, 4966 NULL, (char *)pr_val, 4967 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4968 4969 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4970 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4971 pr_name, result)); 4972 4973 return (err); 4974 } 4975 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4976 if (pr_val == NULL) { 4977 err = EINVAL; 4978 return (err); 4979 } 4980 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4981 4982 err = nxge_param_set_ip_opt(nxgep, NULL, 4983 NULL, (char *)pr_val, 4984 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4985 4986 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4987 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4988 pr_name, result)); 4989 4990 return (err); 4991 } 4992 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4993 if (pr_val == NULL) { 4994 err = EINVAL; 4995 return (err); 4996 } 4997 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4998 4999 err = nxge_param_set_ip_opt(nxgep, NULL, 5000 NULL, (char *)pr_val, 5001 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5002 5003 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5004 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5005 pr_name, result)); 5006 5007 return (err); 5008 } 5009 5010 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5011 if (pr_val == NULL) { 5012 err = EINVAL; 5013 return (err); 5014 } 5015 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5016 5017 err = nxge_param_set_ip_opt(nxgep, NULL, 5018 NULL, (char *)pr_val, 5019 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5020 5021 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5022 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5023 pr_name, result)); 5024 5025 return (err); 5026 } 5027 5028 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5029 if (pr_val == NULL) { 5030 err = EINVAL; 5031 return (err); 5032 } 5033 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5034 5035 err = nxge_param_set_ip_opt(nxgep, NULL, 5036 NULL, (char *)pr_val, 5037 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5038 5039 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5040 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5041 pr_name, result)); 5042 5043 return (err); 5044 } 5045 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5046 if (pr_val == NULL) { 5047 err = EINVAL; 5048 return (err); 5049 } 5050 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5051 5052 err = nxge_param_set_ip_opt(nxgep, NULL, 5053 NULL, (char *)pr_val, 5054 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5055 5056 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5057 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5058 pr_name, result)); 5059 5060 return (err); 5061 } 5062 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5063 if (pr_val == NULL) { 5064 err = EINVAL; 5065 return (err); 5066 } 5067 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5068 5069 err = nxge_param_set_ip_opt(nxgep, NULL, 5070 NULL, (char *)pr_val, 5071 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5072 5073 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5074 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5075 pr_name, result)); 5076 5077 return (err); 5078 } 5079 5080 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5081 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 5082 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5083 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 5084 err = EBUSY; 5085 return (err); 5086 } 5087 if (pr_val == NULL) { 5088 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5089 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5090 err = EINVAL; 5091 return (err); 5092 } 5093 5094 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5095 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5096 "<== nxge_set_priv_prop: name %s " 5097 "(lso %d pr_val %s value %d)", 5098 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5099 5100 if (result > 1 || result < 0) { 5101 err = EINVAL; 5102 } else { 5103 if (nxgep->soft_lso_enable == (uint32_t)result) { 5104 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5105 "no change (%d %d)", 5106 nxgep->soft_lso_enable, result)); 5107 return (0); 5108 } 5109 } 5110 5111 nxgep->soft_lso_enable = (int)result; 5112 5113 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5114 "<== nxge_set_priv_prop: name %s (value %d)", 5115 pr_name, result)); 5116 5117 return (err); 5118 } 5119 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5120 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5121 (caddr_t)¶m_arr[param_anar_10gfdx]); 5122 return (err); 5123 } 5124 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5125 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5126 (caddr_t)¶m_arr[param_anar_pause]); 5127 return (err); 5128 } 5129 5130 return (EINVAL); 5131 } 5132 5133 static int 5134 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5135 uint_t pr_valsize, void *pr_val) 5136 { 5137 p_nxge_param_t param_arr = nxgep->param_arr; 5138 char valstr[MAXNAMELEN]; 5139 int err = EINVAL; 5140 uint_t strsize; 5141 boolean_t is_default = (pr_flags & DLD_DEFAULT); 5142 5143 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5144 "==> nxge_get_priv_prop: property %s", pr_name)); 5145 5146 /* function number */ 5147 if (strcmp(pr_name, "_function_number") == 0) { 5148 if (is_default) 5149 return (ENOTSUP); 5150 (void) snprintf(valstr, sizeof (valstr), "%d", 5151 nxgep->function_num); 5152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5153 "==> nxge_get_priv_prop: name %s " 5154 "(value %d valstr %s)", 5155 pr_name, nxgep->function_num, valstr)); 5156 5157 err = 0; 5158 goto done; 5159 } 5160 5161 /* Neptune firmware version */ 5162 if (strcmp(pr_name, "_fw_version") == 0) { 5163 if (is_default) 5164 return (ENOTSUP); 5165 (void) snprintf(valstr, sizeof (valstr), "%s", 5166 nxgep->vpd_info.ver); 5167 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5168 "==> nxge_get_priv_prop: name %s " 5169 "(value %d valstr %s)", 5170 pr_name, nxgep->vpd_info.ver, valstr)); 5171 5172 err = 0; 5173 goto done; 5174 } 5175 5176 /* port PHY mode */ 5177 if (strcmp(pr_name, "_port_mode") == 0) { 5178 if (is_default) 5179 return (ENOTSUP); 5180 switch (nxgep->mac.portmode) { 5181 case PORT_1G_COPPER: 5182 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5183 nxgep->hot_swappable_phy ? 5184 "[Hot Swappable]" : ""); 5185 break; 5186 case PORT_1G_FIBER: 5187 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5188 nxgep->hot_swappable_phy ? 5189 "[hot swappable]" : ""); 5190 break; 5191 case PORT_10G_COPPER: 5192 (void) snprintf(valstr, sizeof (valstr), 5193 "10G copper %s", 5194 nxgep->hot_swappable_phy ? 5195 "[hot swappable]" : ""); 5196 break; 5197 case PORT_10G_FIBER: 5198 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5199 nxgep->hot_swappable_phy ? 5200 "[hot swappable]" : ""); 5201 break; 5202 case PORT_10G_SERDES: 5203 (void) snprintf(valstr, sizeof (valstr), 5204 "10G serdes %s", nxgep->hot_swappable_phy ? 5205 "[hot swappable]" : ""); 5206 break; 5207 case PORT_1G_SERDES: 5208 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5209 nxgep->hot_swappable_phy ? 5210 "[hot swappable]" : ""); 5211 break; 5212 case PORT_1G_RGMII_FIBER: 5213 (void) snprintf(valstr, sizeof (valstr), 5214 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5215 "[hot swappable]" : ""); 5216 break; 5217 case PORT_HSP_MODE: 5218 (void) snprintf(valstr, sizeof (valstr), 5219 "phy not present[hot swappable]"); 5220 break; 5221 default: 5222 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5223 nxgep->hot_swappable_phy ? 5224 "[hot swappable]" : ""); 5225 break; 5226 } 5227 5228 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5229 "==> nxge_get_priv_prop: name %s (value %s)", 5230 pr_name, valstr)); 5231 5232 err = 0; 5233 goto done; 5234 } 5235 5236 /* Hot swappable PHY */ 5237 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5238 if (is_default) 5239 return (ENOTSUP); 5240 (void) snprintf(valstr, sizeof (valstr), "%s", 5241 nxgep->hot_swappable_phy ? 5242 "yes" : "no"); 5243 5244 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5245 "==> nxge_get_priv_prop: name %s " 5246 "(value %d valstr %s)", 5247 pr_name, nxgep->hot_swappable_phy, valstr)); 5248 5249 err = 0; 5250 goto done; 5251 } 5252 5253 5254 /* accept jumbo */ 5255 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5256 if (is_default) 5257 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5258 else 5259 (void) snprintf(valstr, sizeof (valstr), 5260 "%d", nxgep->mac.is_jumbo); 5261 err = 0; 5262 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5263 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5264 pr_name, 5265 (uint32_t)param_arr[param_accept_jumbo].value, 5266 nxgep->mac.is_jumbo, 5267 nxge_jumbo_enable)); 5268 5269 goto done; 5270 } 5271 5272 /* Receive Interrupt Blanking Parameters */ 5273 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5274 err = 0; 5275 if (is_default) { 5276 (void) snprintf(valstr, sizeof (valstr), 5277 "%d", RXDMA_RCR_TO_DEFAULT); 5278 goto done; 5279 } 5280 5281 (void) snprintf(valstr, sizeof (valstr), "%d", 5282 nxgep->intr_timeout); 5283 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5284 "==> nxge_get_priv_prop: name %s (value %d)", 5285 pr_name, 5286 (uint32_t)nxgep->intr_timeout)); 5287 goto done; 5288 } 5289 5290 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5291 err = 0; 5292 if (is_default) { 5293 (void) snprintf(valstr, sizeof (valstr), 5294 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5295 goto done; 5296 } 5297 (void) snprintf(valstr, sizeof (valstr), "%d", 5298 nxgep->intr_threshold); 5299 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5300 "==> nxge_get_priv_prop: name %s (value %d)", 5301 pr_name, (uint32_t)nxgep->intr_threshold)); 5302 5303 goto done; 5304 } 5305 5306 /* Classification and Load Distribution Configuration */ 5307 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5308 if (is_default) { 5309 (void) snprintf(valstr, sizeof (valstr), "%x", 5310 NXGE_CLASS_FLOW_GEN_SERVER); 5311 err = 0; 5312 goto done; 5313 } 5314 err = nxge_dld_get_ip_opt(nxgep, 5315 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5316 5317 (void) snprintf(valstr, sizeof (valstr), "%x", 5318 (int)param_arr[param_class_opt_ipv4_tcp].value); 5319 5320 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5321 "==> nxge_get_priv_prop: %s", valstr)); 5322 goto done; 5323 } 5324 5325 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5326 if (is_default) { 5327 (void) snprintf(valstr, sizeof (valstr), "%x", 5328 NXGE_CLASS_FLOW_GEN_SERVER); 5329 err = 0; 5330 goto done; 5331 } 5332 err = nxge_dld_get_ip_opt(nxgep, 5333 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5334 5335 (void) snprintf(valstr, sizeof (valstr), "%x", 5336 (int)param_arr[param_class_opt_ipv4_udp].value); 5337 5338 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5339 "==> nxge_get_priv_prop: %s", valstr)); 5340 goto done; 5341 } 5342 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5343 if (is_default) { 5344 (void) snprintf(valstr, sizeof (valstr), "%x", 5345 NXGE_CLASS_FLOW_GEN_SERVER); 5346 err = 0; 5347 goto done; 5348 } 5349 err = nxge_dld_get_ip_opt(nxgep, 5350 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5351 5352 (void) snprintf(valstr, sizeof (valstr), "%x", 5353 (int)param_arr[param_class_opt_ipv4_ah].value); 5354 5355 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5356 "==> nxge_get_priv_prop: %s", valstr)); 5357 goto done; 5358 } 5359 5360 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5361 if (is_default) { 5362 (void) snprintf(valstr, sizeof (valstr), "%x", 5363 NXGE_CLASS_FLOW_GEN_SERVER); 5364 err = 0; 5365 goto done; 5366 } 5367 err = nxge_dld_get_ip_opt(nxgep, 5368 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5369 5370 (void) snprintf(valstr, sizeof (valstr), "%x", 5371 (int)param_arr[param_class_opt_ipv4_sctp].value); 5372 5373 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5374 "==> nxge_get_priv_prop: %s", valstr)); 5375 goto done; 5376 } 5377 5378 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5379 if (is_default) { 5380 (void) snprintf(valstr, sizeof (valstr), "%x", 5381 NXGE_CLASS_FLOW_GEN_SERVER); 5382 err = 0; 5383 goto done; 5384 } 5385 err = nxge_dld_get_ip_opt(nxgep, 5386 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5387 5388 (void) snprintf(valstr, sizeof (valstr), "%x", 5389 (int)param_arr[param_class_opt_ipv6_tcp].value); 5390 5391 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5392 "==> nxge_get_priv_prop: %s", valstr)); 5393 goto done; 5394 } 5395 5396 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5397 if (is_default) { 5398 (void) snprintf(valstr, sizeof (valstr), "%x", 5399 NXGE_CLASS_FLOW_GEN_SERVER); 5400 err = 0; 5401 goto done; 5402 } 5403 err = nxge_dld_get_ip_opt(nxgep, 5404 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5405 5406 (void) snprintf(valstr, sizeof (valstr), "%x", 5407 (int)param_arr[param_class_opt_ipv6_udp].value); 5408 5409 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5410 "==> nxge_get_priv_prop: %s", valstr)); 5411 goto done; 5412 } 5413 5414 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5415 if (is_default) { 5416 (void) snprintf(valstr, sizeof (valstr), "%x", 5417 NXGE_CLASS_FLOW_GEN_SERVER); 5418 err = 0; 5419 goto done; 5420 } 5421 err = nxge_dld_get_ip_opt(nxgep, 5422 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5423 5424 (void) snprintf(valstr, sizeof (valstr), "%x", 5425 (int)param_arr[param_class_opt_ipv6_ah].value); 5426 5427 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5428 "==> nxge_get_priv_prop: %s", valstr)); 5429 goto done; 5430 } 5431 5432 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5433 if (is_default) { 5434 (void) snprintf(valstr, sizeof (valstr), "%x", 5435 NXGE_CLASS_FLOW_GEN_SERVER); 5436 err = 0; 5437 goto done; 5438 } 5439 err = nxge_dld_get_ip_opt(nxgep, 5440 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5441 5442 (void) snprintf(valstr, sizeof (valstr), "%x", 5443 (int)param_arr[param_class_opt_ipv6_sctp].value); 5444 5445 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5446 "==> nxge_get_priv_prop: %s", valstr)); 5447 goto done; 5448 } 5449 5450 /* Software LSO */ 5451 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5452 if (is_default) { 5453 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5454 err = 0; 5455 goto done; 5456 } 5457 (void) snprintf(valstr, sizeof (valstr), 5458 "%d", nxgep->soft_lso_enable); 5459 err = 0; 5460 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5461 "==> nxge_get_priv_prop: name %s (value %d)", 5462 pr_name, nxgep->soft_lso_enable)); 5463 5464 goto done; 5465 } 5466 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5467 err = 0; 5468 if (is_default || 5469 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5470 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5471 goto done; 5472 } else { 5473 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5474 goto done; 5475 } 5476 } 5477 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5478 err = 0; 5479 if (is_default || 5480 nxgep->param_arr[param_anar_pause].value != 0) { 5481 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5482 goto done; 5483 } else { 5484 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5485 goto done; 5486 } 5487 } 5488 5489 done: 5490 if (err == 0) { 5491 strsize = (uint_t)strlen(valstr); 5492 if (pr_valsize < strsize) { 5493 err = ENOBUFS; 5494 } else { 5495 (void) strlcpy(pr_val, valstr, pr_valsize); 5496 } 5497 } 5498 5499 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5500 "<== nxge_get_priv_prop: return %d", err)); 5501 return (err); 5502 } 5503 5504 /* 5505 * Module loading and removing entry points. 5506 */ 5507 5508 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5509 nodev, NULL, D_MP, NULL); 5510 5511 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5512 5513 /* 5514 * Module linkage information for the kernel. 5515 */ 5516 static struct modldrv nxge_modldrv = { 5517 &mod_driverops, 5518 NXGE_DESC_VER, 5519 &nxge_dev_ops 5520 }; 5521 5522 static struct modlinkage modlinkage = { 5523 MODREV_1, (void *) &nxge_modldrv, NULL 5524 }; 5525 5526 int 5527 _init(void) 5528 { 5529 int status; 5530 5531 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5532 mac_init_ops(&nxge_dev_ops, "nxge"); 5533 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5534 if (status != 0) { 5535 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5536 "failed to init device soft state")); 5537 goto _init_exit; 5538 } 5539 status = mod_install(&modlinkage); 5540 if (status != 0) { 5541 ddi_soft_state_fini(&nxge_list); 5542 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5543 goto _init_exit; 5544 } 5545 5546 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5547 5548 _init_exit: 5549 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5550 5551 return (status); 5552 } 5553 5554 int 5555 _fini(void) 5556 { 5557 int status; 5558 5559 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5560 5561 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5562 5563 if (nxge_mblks_pending) 5564 return (EBUSY); 5565 5566 status = mod_remove(&modlinkage); 5567 if (status != DDI_SUCCESS) { 5568 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5569 "Module removal failed 0x%08x", 5570 status)); 5571 goto _fini_exit; 5572 } 5573 5574 mac_fini_ops(&nxge_dev_ops); 5575 5576 ddi_soft_state_fini(&nxge_list); 5577 5578 MUTEX_DESTROY(&nxge_common_lock); 5579 _fini_exit: 5580 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5581 5582 return (status); 5583 } 5584 5585 int 5586 _info(struct modinfo *modinfop) 5587 { 5588 int status; 5589 5590 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5591 status = mod_info(&modlinkage, modinfop); 5592 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5593 5594 return (status); 5595 } 5596 5597 /*ARGSUSED*/ 5598 static nxge_status_t 5599 nxge_add_intrs(p_nxge_t nxgep) 5600 { 5601 5602 int intr_types; 5603 int type = 0; 5604 int ddi_status = DDI_SUCCESS; 5605 nxge_status_t status = NXGE_OK; 5606 5607 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5608 5609 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5610 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5611 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5612 nxgep->nxge_intr_type.intr_added = 0; 5613 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5614 nxgep->nxge_intr_type.intr_type = 0; 5615 5616 if (nxgep->niu_type == N2_NIU) { 5617 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5618 } else if (nxge_msi_enable) { 5619 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5620 } 5621 5622 /* Get the supported interrupt types */ 5623 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5624 != DDI_SUCCESS) { 5625 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5626 "ddi_intr_get_supported_types failed: status 0x%08x", 5627 ddi_status)); 5628 return (NXGE_ERROR | NXGE_DDI_FAILED); 5629 } 5630 nxgep->nxge_intr_type.intr_types = intr_types; 5631 5632 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5633 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5634 5635 /* 5636 * Solaris MSIX is not supported yet. use MSI for now. 5637 * nxge_msi_enable (1): 5638 * 1 - MSI 2 - MSI-X others - FIXED 5639 */ 5640 switch (nxge_msi_enable) { 5641 default: 5642 type = DDI_INTR_TYPE_FIXED; 5643 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5644 "use fixed (intx emulation) type %08x", 5645 type)); 5646 break; 5647 5648 case 2: 5649 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5650 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5651 if (intr_types & DDI_INTR_TYPE_MSIX) { 5652 type = DDI_INTR_TYPE_MSIX; 5653 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5654 "ddi_intr_get_supported_types: MSIX 0x%08x", 5655 type)); 5656 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5657 type = DDI_INTR_TYPE_MSI; 5658 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5659 "ddi_intr_get_supported_types: MSI 0x%08x", 5660 type)); 5661 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5662 type = DDI_INTR_TYPE_FIXED; 5663 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5664 "ddi_intr_get_supported_types: MSXED0x%08x", 5665 type)); 5666 } 5667 break; 5668 5669 case 1: 5670 if (intr_types & DDI_INTR_TYPE_MSI) { 5671 type = DDI_INTR_TYPE_MSI; 5672 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5673 "ddi_intr_get_supported_types: MSI 0x%08x", 5674 type)); 5675 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5676 type = DDI_INTR_TYPE_MSIX; 5677 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5678 "ddi_intr_get_supported_types: MSIX 0x%08x", 5679 type)); 5680 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5681 type = DDI_INTR_TYPE_FIXED; 5682 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5683 "ddi_intr_get_supported_types: MSXED0x%08x", 5684 type)); 5685 } 5686 } 5687 5688 nxgep->nxge_intr_type.intr_type = type; 5689 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5690 type == DDI_INTR_TYPE_FIXED) && 5691 nxgep->nxge_intr_type.niu_msi_enable) { 5692 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5693 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5694 " nxge_add_intrs: " 5695 " nxge_add_intrs_adv failed: status 0x%08x", 5696 status)); 5697 return (status); 5698 } else { 5699 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5700 "interrupts registered : type %d", type)); 5701 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5702 5703 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5704 "\nAdded advanced nxge add_intr_adv " 5705 "intr type 0x%x\n", type)); 5706 5707 return (status); 5708 } 5709 } 5710 5711 if (!nxgep->nxge_intr_type.intr_registered) { 5712 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5713 "failed to register interrupts")); 5714 return (NXGE_ERROR | NXGE_DDI_FAILED); 5715 } 5716 5717 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5718 return (status); 5719 } 5720 5721 /*ARGSUSED*/ 5722 static nxge_status_t 5723 nxge_add_soft_intrs(p_nxge_t nxgep) 5724 { 5725 5726 int ddi_status = DDI_SUCCESS; 5727 nxge_status_t status = NXGE_OK; 5728 5729 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 5730 5731 nxgep->resched_id = NULL; 5732 nxgep->resched_running = B_FALSE; 5733 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5734 &nxgep->resched_id, 5735 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 5736 if (ddi_status != DDI_SUCCESS) { 5737 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5738 "ddi_add_softintrs failed: status 0x%08x", 5739 ddi_status)); 5740 return (NXGE_ERROR | NXGE_DDI_FAILED); 5741 } 5742 5743 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 5744 5745 return (status); 5746 } 5747 5748 static nxge_status_t 5749 nxge_add_intrs_adv(p_nxge_t nxgep) 5750 { 5751 int intr_type; 5752 p_nxge_intr_t intrp; 5753 5754 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5755 5756 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5757 intr_type = intrp->intr_type; 5758 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5759 intr_type)); 5760 5761 switch (intr_type) { 5762 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5763 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5764 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5765 5766 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5767 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5768 5769 default: 5770 return (NXGE_ERROR); 5771 } 5772 } 5773 5774 5775 /*ARGSUSED*/ 5776 static nxge_status_t 5777 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5778 { 5779 dev_info_t *dip = nxgep->dip; 5780 p_nxge_ldg_t ldgp; 5781 p_nxge_intr_t intrp; 5782 uint_t *inthandler; 5783 void *arg1, *arg2; 5784 int behavior; 5785 int nintrs, navail, nrequest; 5786 int nactual, nrequired; 5787 int inum = 0; 5788 int x, y; 5789 int ddi_status = DDI_SUCCESS; 5790 nxge_status_t status = NXGE_OK; 5791 5792 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5793 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5794 intrp->start_inum = 0; 5795 5796 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5797 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5799 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5800 "nintrs: %d", ddi_status, nintrs)); 5801 return (NXGE_ERROR | NXGE_DDI_FAILED); 5802 } 5803 5804 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5805 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5806 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5807 "ddi_intr_get_navail() failed, status: 0x%x%, " 5808 "nintrs: %d", ddi_status, navail)); 5809 return (NXGE_ERROR | NXGE_DDI_FAILED); 5810 } 5811 5812 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5813 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5814 nintrs, navail)); 5815 5816 /* PSARC/2007/453 MSI-X interrupt limit override */ 5817 if (int_type == DDI_INTR_TYPE_MSIX) { 5818 nrequest = nxge_create_msi_property(nxgep); 5819 if (nrequest < navail) { 5820 navail = nrequest; 5821 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5822 "nxge_add_intrs_adv_type: nintrs %d " 5823 "navail %d (nrequest %d)", 5824 nintrs, navail, nrequest)); 5825 } 5826 } 5827 5828 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5829 /* MSI must be power of 2 */ 5830 if ((navail & 16) == 16) { 5831 navail = 16; 5832 } else if ((navail & 8) == 8) { 5833 navail = 8; 5834 } else if ((navail & 4) == 4) { 5835 navail = 4; 5836 } else if ((navail & 2) == 2) { 5837 navail = 2; 5838 } else { 5839 navail = 1; 5840 } 5841 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5842 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5843 "navail %d", nintrs, navail)); 5844 } 5845 5846 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5847 DDI_INTR_ALLOC_NORMAL); 5848 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5849 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5850 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5851 navail, &nactual, behavior); 5852 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5853 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5854 " ddi_intr_alloc() failed: %d", 5855 ddi_status)); 5856 kmem_free(intrp->htable, intrp->intr_size); 5857 return (NXGE_ERROR | NXGE_DDI_FAILED); 5858 } 5859 5860 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5861 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5862 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5863 " ddi_intr_get_pri() failed: %d", 5864 ddi_status)); 5865 /* Free already allocated interrupts */ 5866 for (y = 0; y < nactual; y++) { 5867 (void) ddi_intr_free(intrp->htable[y]); 5868 } 5869 5870 kmem_free(intrp->htable, intrp->intr_size); 5871 return (NXGE_ERROR | NXGE_DDI_FAILED); 5872 } 5873 5874 nrequired = 0; 5875 switch (nxgep->niu_type) { 5876 default: 5877 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5878 break; 5879 5880 case N2_NIU: 5881 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5882 break; 5883 } 5884 5885 if (status != NXGE_OK) { 5886 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5887 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5888 "failed: 0x%x", status)); 5889 /* Free already allocated interrupts */ 5890 for (y = 0; y < nactual; y++) { 5891 (void) ddi_intr_free(intrp->htable[y]); 5892 } 5893 5894 kmem_free(intrp->htable, intrp->intr_size); 5895 return (status); 5896 } 5897 5898 ldgp = nxgep->ldgvp->ldgp; 5899 for (x = 0; x < nrequired; x++, ldgp++) { 5900 ldgp->vector = (uint8_t)x; 5901 ldgp->intdata = SID_DATA(ldgp->func, x); 5902 arg1 = ldgp->ldvp; 5903 arg2 = nxgep; 5904 if (ldgp->nldvs == 1) { 5905 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5906 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5907 "nxge_add_intrs_adv_type: " 5908 "arg1 0x%x arg2 0x%x: " 5909 "1-1 int handler (entry %d intdata 0x%x)\n", 5910 arg1, arg2, 5911 x, ldgp->intdata)); 5912 } else if (ldgp->nldvs > 1) { 5913 inthandler = (uint_t *)ldgp->sys_intr_handler; 5914 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5915 "nxge_add_intrs_adv_type: " 5916 "arg1 0x%x arg2 0x%x: " 5917 "nldevs %d int handler " 5918 "(entry %d intdata 0x%x)\n", 5919 arg1, arg2, 5920 ldgp->nldvs, x, ldgp->intdata)); 5921 } 5922 5923 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5924 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5925 "htable 0x%llx", x, intrp->htable[x])); 5926 5927 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5928 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5929 != DDI_SUCCESS) { 5930 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5931 "==> nxge_add_intrs_adv_type: failed #%d " 5932 "status 0x%x", x, ddi_status)); 5933 for (y = 0; y < intrp->intr_added; y++) { 5934 (void) ddi_intr_remove_handler( 5935 intrp->htable[y]); 5936 } 5937 /* Free already allocated intr */ 5938 for (y = 0; y < nactual; y++) { 5939 (void) ddi_intr_free(intrp->htable[y]); 5940 } 5941 kmem_free(intrp->htable, intrp->intr_size); 5942 5943 (void) nxge_ldgv_uninit(nxgep); 5944 5945 return (NXGE_ERROR | NXGE_DDI_FAILED); 5946 } 5947 intrp->intr_added++; 5948 } 5949 5950 intrp->msi_intx_cnt = nactual; 5951 5952 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5953 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 5954 navail, nactual, 5955 intrp->msi_intx_cnt, 5956 intrp->intr_added)); 5957 5958 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 5959 5960 (void) nxge_intr_ldgv_init(nxgep); 5961 5962 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 5963 5964 return (status); 5965 } 5966 5967 /*ARGSUSED*/ 5968 static nxge_status_t 5969 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 5970 { 5971 dev_info_t *dip = nxgep->dip; 5972 p_nxge_ldg_t ldgp; 5973 p_nxge_intr_t intrp; 5974 uint_t *inthandler; 5975 void *arg1, *arg2; 5976 int behavior; 5977 int nintrs, navail; 5978 int nactual, nrequired; 5979 int inum = 0; 5980 int x, y; 5981 int ddi_status = DDI_SUCCESS; 5982 nxge_status_t status = NXGE_OK; 5983 5984 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 5985 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5986 intrp->start_inum = 0; 5987 5988 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5989 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5990 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5991 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5992 "nintrs: %d", status, nintrs)); 5993 return (NXGE_ERROR | NXGE_DDI_FAILED); 5994 } 5995 5996 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5997 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5998 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5999 "ddi_intr_get_navail() failed, status: 0x%x%, " 6000 "nintrs: %d", ddi_status, navail)); 6001 return (NXGE_ERROR | NXGE_DDI_FAILED); 6002 } 6003 6004 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6005 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6006 nintrs, navail)); 6007 6008 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6009 DDI_INTR_ALLOC_NORMAL); 6010 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6011 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6012 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6013 navail, &nactual, behavior); 6014 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6016 " ddi_intr_alloc() failed: %d", 6017 ddi_status)); 6018 kmem_free(intrp->htable, intrp->intr_size); 6019 return (NXGE_ERROR | NXGE_DDI_FAILED); 6020 } 6021 6022 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6023 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6024 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6025 " ddi_intr_get_pri() failed: %d", 6026 ddi_status)); 6027 /* Free already allocated interrupts */ 6028 for (y = 0; y < nactual; y++) { 6029 (void) ddi_intr_free(intrp->htable[y]); 6030 } 6031 6032 kmem_free(intrp->htable, intrp->intr_size); 6033 return (NXGE_ERROR | NXGE_DDI_FAILED); 6034 } 6035 6036 nrequired = 0; 6037 switch (nxgep->niu_type) { 6038 default: 6039 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6040 break; 6041 6042 case N2_NIU: 6043 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6044 break; 6045 } 6046 6047 if (status != NXGE_OK) { 6048 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6049 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6050 "failed: 0x%x", status)); 6051 /* Free already allocated interrupts */ 6052 for (y = 0; y < nactual; y++) { 6053 (void) ddi_intr_free(intrp->htable[y]); 6054 } 6055 6056 kmem_free(intrp->htable, intrp->intr_size); 6057 return (status); 6058 } 6059 6060 ldgp = nxgep->ldgvp->ldgp; 6061 for (x = 0; x < nrequired; x++, ldgp++) { 6062 ldgp->vector = (uint8_t)x; 6063 if (nxgep->niu_type != N2_NIU) { 6064 ldgp->intdata = SID_DATA(ldgp->func, x); 6065 } 6066 6067 arg1 = ldgp->ldvp; 6068 arg2 = nxgep; 6069 if (ldgp->nldvs == 1) { 6070 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6071 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6072 "nxge_add_intrs_adv_type_fix: " 6073 "1-1 int handler(%d) ldg %d ldv %d " 6074 "arg1 $%p arg2 $%p\n", 6075 x, ldgp->ldg, ldgp->ldvp->ldv, 6076 arg1, arg2)); 6077 } else if (ldgp->nldvs > 1) { 6078 inthandler = (uint_t *)ldgp->sys_intr_handler; 6079 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6080 "nxge_add_intrs_adv_type_fix: " 6081 "shared ldv %d int handler(%d) ldv %d ldg %d" 6082 "arg1 0x%016llx arg2 0x%016llx\n", 6083 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6084 arg1, arg2)); 6085 } 6086 6087 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6088 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6089 != DDI_SUCCESS) { 6090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6091 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6092 "status 0x%x", x, ddi_status)); 6093 for (y = 0; y < intrp->intr_added; y++) { 6094 (void) ddi_intr_remove_handler( 6095 intrp->htable[y]); 6096 } 6097 for (y = 0; y < nactual; y++) { 6098 (void) ddi_intr_free(intrp->htable[y]); 6099 } 6100 /* Free already allocated intr */ 6101 kmem_free(intrp->htable, intrp->intr_size); 6102 6103 (void) nxge_ldgv_uninit(nxgep); 6104 6105 return (NXGE_ERROR | NXGE_DDI_FAILED); 6106 } 6107 intrp->intr_added++; 6108 } 6109 6110 intrp->msi_intx_cnt = nactual; 6111 6112 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6113 6114 status = nxge_intr_ldgv_init(nxgep); 6115 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6116 6117 return (status); 6118 } 6119 6120 static void 6121 nxge_remove_intrs(p_nxge_t nxgep) 6122 { 6123 int i, inum; 6124 p_nxge_intr_t intrp; 6125 6126 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6127 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6128 if (!intrp->intr_registered) { 6129 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6130 "<== nxge_remove_intrs: interrupts not registered")); 6131 return; 6132 } 6133 6134 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6135 6136 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6137 (void) ddi_intr_block_disable(intrp->htable, 6138 intrp->intr_added); 6139 } else { 6140 for (i = 0; i < intrp->intr_added; i++) { 6141 (void) ddi_intr_disable(intrp->htable[i]); 6142 } 6143 } 6144 6145 for (inum = 0; inum < intrp->intr_added; inum++) { 6146 if (intrp->htable[inum]) { 6147 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6148 } 6149 } 6150 6151 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6152 if (intrp->htable[inum]) { 6153 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6154 "nxge_remove_intrs: ddi_intr_free inum %d " 6155 "msi_intx_cnt %d intr_added %d", 6156 inum, 6157 intrp->msi_intx_cnt, 6158 intrp->intr_added)); 6159 6160 (void) ddi_intr_free(intrp->htable[inum]); 6161 } 6162 } 6163 6164 kmem_free(intrp->htable, intrp->intr_size); 6165 intrp->intr_registered = B_FALSE; 6166 intrp->intr_enabled = B_FALSE; 6167 intrp->msi_intx_cnt = 0; 6168 intrp->intr_added = 0; 6169 6170 (void) nxge_ldgv_uninit(nxgep); 6171 6172 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6173 "#msix-request"); 6174 6175 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6176 } 6177 6178 /*ARGSUSED*/ 6179 static void 6180 nxge_remove_soft_intrs(p_nxge_t nxgep) 6181 { 6182 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 6183 if (nxgep->resched_id) { 6184 ddi_remove_softintr(nxgep->resched_id); 6185 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6186 "==> nxge_remove_soft_intrs: removed")); 6187 nxgep->resched_id = NULL; 6188 } 6189 6190 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 6191 } 6192 6193 /*ARGSUSED*/ 6194 static void 6195 nxge_intrs_enable(p_nxge_t nxgep) 6196 { 6197 p_nxge_intr_t intrp; 6198 int i; 6199 int status; 6200 6201 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6202 6203 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6204 6205 if (!intrp->intr_registered) { 6206 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6207 "interrupts are not registered")); 6208 return; 6209 } 6210 6211 if (intrp->intr_enabled) { 6212 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6213 "<== nxge_intrs_enable: already enabled")); 6214 return; 6215 } 6216 6217 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6218 status = ddi_intr_block_enable(intrp->htable, 6219 intrp->intr_added); 6220 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6221 "block enable - status 0x%x total inums #%d\n", 6222 status, intrp->intr_added)); 6223 } else { 6224 for (i = 0; i < intrp->intr_added; i++) { 6225 status = ddi_intr_enable(intrp->htable[i]); 6226 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6227 "ddi_intr_enable:enable - status 0x%x " 6228 "total inums %d enable inum #%d\n", 6229 status, intrp->intr_added, i)); 6230 if (status == DDI_SUCCESS) { 6231 intrp->intr_enabled = B_TRUE; 6232 } 6233 } 6234 } 6235 6236 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6237 } 6238 6239 /*ARGSUSED*/ 6240 static void 6241 nxge_intrs_disable(p_nxge_t nxgep) 6242 { 6243 p_nxge_intr_t intrp; 6244 int i; 6245 6246 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6247 6248 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6249 6250 if (!intrp->intr_registered) { 6251 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6252 "interrupts are not registered")); 6253 return; 6254 } 6255 6256 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6257 (void) ddi_intr_block_disable(intrp->htable, 6258 intrp->intr_added); 6259 } else { 6260 for (i = 0; i < intrp->intr_added; i++) { 6261 (void) ddi_intr_disable(intrp->htable[i]); 6262 } 6263 } 6264 6265 intrp->intr_enabled = B_FALSE; 6266 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6267 } 6268 6269 static nxge_status_t 6270 nxge_mac_register(p_nxge_t nxgep) 6271 { 6272 mac_register_t *macp; 6273 int status; 6274 6275 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6276 6277 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6278 return (NXGE_ERROR); 6279 6280 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6281 macp->m_driver = nxgep; 6282 macp->m_dip = nxgep->dip; 6283 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6284 macp->m_callbacks = &nxge_m_callbacks; 6285 macp->m_min_sdu = 0; 6286 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6287 NXGE_EHEADER_VLAN_CRC; 6288 macp->m_max_sdu = nxgep->mac.default_mtu; 6289 macp->m_margin = VLAN_TAGSZ; 6290 macp->m_priv_props = nxge_priv_props; 6291 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6292 6293 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6294 "==> nxge_mac_register: instance %d " 6295 "max_sdu %d margin %d maxframe %d (header %d)", 6296 nxgep->instance, 6297 macp->m_max_sdu, macp->m_margin, 6298 nxgep->mac.maxframesize, 6299 NXGE_EHEADER_VLAN_CRC)); 6300 6301 status = mac_register(macp, &nxgep->mach); 6302 mac_free(macp); 6303 6304 if (status != 0) { 6305 cmn_err(CE_WARN, 6306 "!nxge_mac_register failed (status %d instance %d)", 6307 status, nxgep->instance); 6308 return (NXGE_ERROR); 6309 } 6310 6311 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6312 "(instance %d)", nxgep->instance)); 6313 6314 return (NXGE_OK); 6315 } 6316 6317 void 6318 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6319 { 6320 ssize_t size; 6321 mblk_t *nmp; 6322 uint8_t blk_id; 6323 uint8_t chan; 6324 uint32_t err_id; 6325 err_inject_t *eip; 6326 6327 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6328 6329 size = 1024; 6330 nmp = mp->b_cont; 6331 eip = (err_inject_t *)nmp->b_rptr; 6332 blk_id = eip->blk_id; 6333 err_id = eip->err_id; 6334 chan = eip->chan; 6335 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6336 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6337 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6338 switch (blk_id) { 6339 case MAC_BLK_ID: 6340 break; 6341 case TXMAC_BLK_ID: 6342 break; 6343 case RXMAC_BLK_ID: 6344 break; 6345 case MIF_BLK_ID: 6346 break; 6347 case IPP_BLK_ID: 6348 nxge_ipp_inject_err(nxgep, err_id); 6349 break; 6350 case TXC_BLK_ID: 6351 nxge_txc_inject_err(nxgep, err_id); 6352 break; 6353 case TXDMA_BLK_ID: 6354 nxge_txdma_inject_err(nxgep, err_id, chan); 6355 break; 6356 case RXDMA_BLK_ID: 6357 nxge_rxdma_inject_err(nxgep, err_id, chan); 6358 break; 6359 case ZCP_BLK_ID: 6360 nxge_zcp_inject_err(nxgep, err_id); 6361 break; 6362 case ESPC_BLK_ID: 6363 break; 6364 case FFLP_BLK_ID: 6365 break; 6366 case PHY_BLK_ID: 6367 break; 6368 case ETHER_SERDES_BLK_ID: 6369 break; 6370 case PCIE_SERDES_BLK_ID: 6371 break; 6372 case VIR_BLK_ID: 6373 break; 6374 } 6375 6376 nmp->b_wptr = nmp->b_rptr + size; 6377 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6378 6379 miocack(wq, mp, (int)size, 0); 6380 } 6381 6382 static int 6383 nxge_init_common_dev(p_nxge_t nxgep) 6384 { 6385 p_nxge_hw_list_t hw_p; 6386 dev_info_t *p_dip; 6387 6388 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6389 6390 p_dip = nxgep->p_dip; 6391 MUTEX_ENTER(&nxge_common_lock); 6392 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6393 "==> nxge_init_common_dev:func # %d", 6394 nxgep->function_num)); 6395 /* 6396 * Loop through existing per neptune hardware list. 6397 */ 6398 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6399 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6400 "==> nxge_init_common_device:func # %d " 6401 "hw_p $%p parent dip $%p", 6402 nxgep->function_num, 6403 hw_p, 6404 p_dip)); 6405 if (hw_p->parent_devp == p_dip) { 6406 nxgep->nxge_hw_p = hw_p; 6407 hw_p->ndevs++; 6408 hw_p->nxge_p[nxgep->function_num] = nxgep; 6409 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6410 "==> nxge_init_common_device:func # %d " 6411 "hw_p $%p parent dip $%p " 6412 "ndevs %d (found)", 6413 nxgep->function_num, 6414 hw_p, 6415 p_dip, 6416 hw_p->ndevs)); 6417 break; 6418 } 6419 } 6420 6421 if (hw_p == NULL) { 6422 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6423 "==> nxge_init_common_device:func # %d " 6424 "parent dip $%p (new)", 6425 nxgep->function_num, 6426 p_dip)); 6427 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6428 hw_p->parent_devp = p_dip; 6429 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6430 nxgep->nxge_hw_p = hw_p; 6431 hw_p->ndevs++; 6432 hw_p->nxge_p[nxgep->function_num] = nxgep; 6433 hw_p->next = nxge_hw_list; 6434 if (nxgep->niu_type == N2_NIU) { 6435 hw_p->niu_type = N2_NIU; 6436 hw_p->platform_type = P_NEPTUNE_NIU; 6437 } else { 6438 hw_p->niu_type = NIU_TYPE_NONE; 6439 hw_p->platform_type = P_NEPTUNE_NONE; 6440 } 6441 6442 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6443 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6444 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6445 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6446 6447 nxge_hw_list = hw_p; 6448 6449 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6450 } 6451 6452 MUTEX_EXIT(&nxge_common_lock); 6453 6454 nxgep->platform_type = hw_p->platform_type; 6455 if (nxgep->niu_type != N2_NIU) { 6456 nxgep->niu_type = hw_p->niu_type; 6457 } 6458 6459 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6460 "==> nxge_init_common_device (nxge_hw_list) $%p", 6461 nxge_hw_list)); 6462 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6463 6464 return (NXGE_OK); 6465 } 6466 6467 static void 6468 nxge_uninit_common_dev(p_nxge_t nxgep) 6469 { 6470 p_nxge_hw_list_t hw_p, h_hw_p; 6471 dev_info_t *p_dip; 6472 6473 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6474 if (nxgep->nxge_hw_p == NULL) { 6475 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6476 "<== nxge_uninit_common_device (no common)")); 6477 return; 6478 } 6479 6480 MUTEX_ENTER(&nxge_common_lock); 6481 h_hw_p = nxge_hw_list; 6482 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6483 p_dip = hw_p->parent_devp; 6484 if (nxgep->nxge_hw_p == hw_p && 6485 p_dip == nxgep->p_dip && 6486 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6487 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6488 6489 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6490 "==> nxge_uninit_common_device:func # %d " 6491 "hw_p $%p parent dip $%p " 6492 "ndevs %d (found)", 6493 nxgep->function_num, 6494 hw_p, 6495 p_dip, 6496 hw_p->ndevs)); 6497 6498 if (hw_p->ndevs) { 6499 hw_p->ndevs--; 6500 } 6501 hw_p->nxge_p[nxgep->function_num] = NULL; 6502 if (!hw_p->ndevs) { 6503 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6504 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6505 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6506 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6507 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6508 "==> nxge_uninit_common_device: " 6509 "func # %d " 6510 "hw_p $%p parent dip $%p " 6511 "ndevs %d (last)", 6512 nxgep->function_num, 6513 hw_p, 6514 p_dip, 6515 hw_p->ndevs)); 6516 6517 nxge_hio_uninit(nxgep); 6518 6519 if (hw_p == nxge_hw_list) { 6520 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6521 "==> nxge_uninit_common_device:" 6522 "remove head func # %d " 6523 "hw_p $%p parent dip $%p " 6524 "ndevs %d (head)", 6525 nxgep->function_num, 6526 hw_p, 6527 p_dip, 6528 hw_p->ndevs)); 6529 nxge_hw_list = hw_p->next; 6530 } else { 6531 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6532 "==> nxge_uninit_common_device:" 6533 "remove middle func # %d " 6534 "hw_p $%p parent dip $%p " 6535 "ndevs %d (middle)", 6536 nxgep->function_num, 6537 hw_p, 6538 p_dip, 6539 hw_p->ndevs)); 6540 h_hw_p->next = hw_p->next; 6541 } 6542 6543 nxgep->nxge_hw_p = NULL; 6544 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6545 } 6546 break; 6547 } else { 6548 h_hw_p = hw_p; 6549 } 6550 } 6551 6552 MUTEX_EXIT(&nxge_common_lock); 6553 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6554 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6555 nxge_hw_list)); 6556 6557 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6558 } 6559 6560 /* 6561 * Determines the number of ports from the niu_type or the platform type. 6562 * Returns the number of ports, or returns zero on failure. 6563 */ 6564 6565 int 6566 nxge_get_nports(p_nxge_t nxgep) 6567 { 6568 int nports = 0; 6569 6570 switch (nxgep->niu_type) { 6571 case N2_NIU: 6572 case NEPTUNE_2_10GF: 6573 nports = 2; 6574 break; 6575 case NEPTUNE_4_1GC: 6576 case NEPTUNE_2_10GF_2_1GC: 6577 case NEPTUNE_1_10GF_3_1GC: 6578 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6579 case NEPTUNE_2_10GF_2_1GRF: 6580 nports = 4; 6581 break; 6582 default: 6583 switch (nxgep->platform_type) { 6584 case P_NEPTUNE_NIU: 6585 case P_NEPTUNE_ATLAS_2PORT: 6586 nports = 2; 6587 break; 6588 case P_NEPTUNE_ATLAS_4PORT: 6589 case P_NEPTUNE_MARAMBA_P0: 6590 case P_NEPTUNE_MARAMBA_P1: 6591 case P_NEPTUNE_ALONSO: 6592 nports = 4; 6593 break; 6594 default: 6595 break; 6596 } 6597 break; 6598 } 6599 6600 return (nports); 6601 } 6602 6603 /* 6604 * The following two functions are to support 6605 * PSARC/2007/453 MSI-X interrupt limit override. 6606 */ 6607 static int 6608 nxge_create_msi_property(p_nxge_t nxgep) 6609 { 6610 int nmsi; 6611 extern int ncpus; 6612 6613 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6614 6615 switch (nxgep->mac.portmode) { 6616 case PORT_10G_COPPER: 6617 case PORT_10G_FIBER: 6618 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6619 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6620 /* 6621 * The maximum MSI-X requested will be 8. 6622 * If the # of CPUs is less than 8, we will reqeust 6623 * # MSI-X based on the # of CPUs. 6624 */ 6625 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6626 nmsi = NXGE_MSIX_REQUEST_10G; 6627 } else { 6628 nmsi = ncpus; 6629 } 6630 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6631 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6632 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6633 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6634 break; 6635 6636 default: 6637 nmsi = NXGE_MSIX_REQUEST_1G; 6638 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6639 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6640 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6641 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6642 break; 6643 } 6644 6645 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6646 return (nmsi); 6647 } 6648 6649 /* ARGSUSED */ 6650 static int 6651 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6652 void *pr_val) 6653 { 6654 int err = 0; 6655 link_flowctrl_t fl; 6656 6657 switch (pr_num) { 6658 case DLD_PROP_AUTONEG: 6659 *(uint8_t *)pr_val = 1; 6660 break; 6661 case DLD_PROP_FLOWCTRL: 6662 if (pr_valsize < sizeof (link_flowctrl_t)) 6663 return (EINVAL); 6664 fl = LINK_FLOWCTRL_RX; 6665 bcopy(&fl, pr_val, sizeof (fl)); 6666 break; 6667 case DLD_PROP_ADV_1000FDX_CAP: 6668 case DLD_PROP_EN_1000FDX_CAP: 6669 *(uint8_t *)pr_val = 1; 6670 break; 6671 case DLD_PROP_ADV_100FDX_CAP: 6672 case DLD_PROP_EN_100FDX_CAP: 6673 *(uint8_t *)pr_val = 1; 6674 break; 6675 default: 6676 err = ENOTSUP; 6677 break; 6678 } 6679 return (err); 6680 } 6681 6682 6683 /* 6684 * The following is a software around for the Neptune hardware's 6685 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6686 * an interrupr handler is removed. 6687 */ 6688 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6689 #define NXGE_PIM_RESET (1ULL << 29) 6690 #define NXGE_GLU_RESET (1ULL << 30) 6691 #define NXGE_NIU_RESET (1ULL << 31) 6692 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6693 NXGE_GLU_RESET | \ 6694 NXGE_NIU_RESET) 6695 6696 #define NXGE_WAIT_QUITE_TIME 200000 6697 #define NXGE_WAIT_QUITE_RETRY 40 6698 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6699 6700 static void 6701 nxge_niu_peu_reset(p_nxge_t nxgep) 6702 { 6703 uint32_t rvalue; 6704 p_nxge_hw_list_t hw_p; 6705 p_nxge_t fnxgep; 6706 int i, j; 6707 6708 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6709 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6710 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6711 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6712 return; 6713 } 6714 6715 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6716 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6717 hw_p->flags, nxgep->nxge_link_poll_timerid, 6718 nxgep->nxge_timerid)); 6719 6720 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6721 /* 6722 * Make sure other instances from the same hardware 6723 * stop sending PIO and in quiescent state. 6724 */ 6725 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6726 fnxgep = hw_p->nxge_p[i]; 6727 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6728 "==> nxge_niu_peu_reset: checking entry %d " 6729 "nxgep $%p", i, fnxgep)); 6730 #ifdef NXGE_DEBUG 6731 if (fnxgep) { 6732 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6733 "==> nxge_niu_peu_reset: entry %d (function %d) " 6734 "link timer id %d hw timer id %d", 6735 i, fnxgep->function_num, 6736 fnxgep->nxge_link_poll_timerid, 6737 fnxgep->nxge_timerid)); 6738 } 6739 #endif 6740 if (fnxgep && fnxgep != nxgep && 6741 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6742 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6743 "==> nxge_niu_peu_reset: checking $%p " 6744 "(function %d) timer ids", 6745 fnxgep, fnxgep->function_num)); 6746 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6747 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6748 "==> nxge_niu_peu_reset: waiting")); 6749 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6750 if (!fnxgep->nxge_timerid && 6751 !fnxgep->nxge_link_poll_timerid) { 6752 break; 6753 } 6754 } 6755 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6756 if (fnxgep->nxge_timerid || 6757 fnxgep->nxge_link_poll_timerid) { 6758 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6759 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6760 "<== nxge_niu_peu_reset: cannot reset " 6761 "hardware (devices are still in use)")); 6762 return; 6763 } 6764 } 6765 } 6766 6767 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6768 hw_p->flags |= COMMON_RESET_NIU_PCI; 6769 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6770 NXGE_PCI_PORT_LOGIC_OFFSET); 6771 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6772 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6773 "(data 0x%x)", 6774 NXGE_PCI_PORT_LOGIC_OFFSET, 6775 NXGE_PCI_PORT_LOGIC_OFFSET, 6776 rvalue)); 6777 6778 rvalue |= NXGE_PCI_RESET_ALL; 6779 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6780 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6781 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6782 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6783 rvalue)); 6784 6785 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6786 } 6787 6788 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6789 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6790 } 6791