1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/nxge/nxge_hio.h> 33 #include <sys/nxge/nxge_rxdma.h> 34 #include <sys/pcie.h> 35 36 uint32_t nxge_use_partition = 0; /* debug partition flag */ 37 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 38 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 39 /* 40 * PSARC/2007/453 MSI-X interrupt limit override 41 * (This PSARC case is limited to MSI-X vectors 42 * and SPARC platforms only). 43 */ 44 #if defined(_BIG_ENDIAN) 45 uint32_t nxge_msi_enable = 2; 46 #else 47 uint32_t nxge_msi_enable = 1; 48 #endif 49 50 /* 51 * Software workaround for a Neptune (PCI-E) 52 * hardware interrupt bug which the hardware 53 * may generate spurious interrupts after the 54 * device interrupt handler was removed. If this flag 55 * is enabled, the driver will reset the 56 * hardware when devices are being detached. 57 */ 58 uint32_t nxge_peu_reset_enable = 0; 59 60 /* 61 * Software workaround for the hardware 62 * checksum bugs that affect packet transmission 63 * and receive: 64 * 65 * Usage of nxge_cksum_offload: 66 * 67 * (1) nxge_cksum_offload = 0 (default): 68 * - transmits packets: 69 * TCP: uses the hardware checksum feature. 70 * UDP: driver will compute the software checksum 71 * based on the partial checksum computed 72 * by the IP layer. 73 * - receives packets 74 * TCP: marks packets checksum flags based on hardware result. 75 * UDP: will not mark checksum flags. 76 * 77 * (2) nxge_cksum_offload = 1: 78 * - transmit packets: 79 * TCP/UDP: uses the hardware checksum feature. 80 * - receives packets 81 * TCP/UDP: marks packet checksum flags based on hardware result. 82 * 83 * (3) nxge_cksum_offload = 2: 84 * - The driver will not register its checksum capability. 85 * Checksum for both TCP and UDP will be computed 86 * by the stack. 87 * - The software LSO is not allowed in this case. 88 * 89 * (4) nxge_cksum_offload > 2: 90 * - Will be treated as it is set to 2 91 * (stack will compute the checksum). 92 * 93 * (5) If the hardware bug is fixed, this workaround 94 * needs to be updated accordingly to reflect 95 * the new hardware revision. 96 */ 97 uint32_t nxge_cksum_offload = 0; 98 99 /* 100 * Globals: tunable parameters (/etc/system or adb) 101 * 102 */ 103 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 104 uint32_t nxge_rbr_spare_size = 0; 105 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 106 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 107 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 108 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 109 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 110 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 111 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 112 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 113 boolean_t nxge_jumbo_enable = B_FALSE; 114 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 115 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 116 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 117 118 /* MAX LSO size */ 119 #define NXGE_LSO_MAXLEN 65535 120 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 121 122 /* 123 * Debugging flags: 124 * nxge_no_tx_lb : transmit load balancing 125 * nxge_tx_lb_policy: 0 - TCP port (default) 126 * 3 - DEST MAC 127 */ 128 uint32_t nxge_no_tx_lb = 0; 129 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 130 131 /* 132 * Add tunable to reduce the amount of time spent in the 133 * ISR doing Rx Processing. 134 */ 135 uint32_t nxge_max_rx_pkts = 1024; 136 137 /* 138 * Tunables to manage the receive buffer blocks. 139 * 140 * nxge_rx_threshold_hi: copy all buffers. 141 * nxge_rx_bcopy_size_type: receive buffer block size type. 142 * nxge_rx_threshold_lo: copy only up to tunable block size type. 143 */ 144 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 145 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 146 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 147 148 /* Use kmem_alloc() to allocate data buffers. */ 149 #if defined(_BIG_ENDIAN) 150 uint32_t nxge_use_kmem_alloc = 1; 151 #else 152 uint32_t nxge_use_kmem_alloc = 0; 153 #endif 154 155 rtrace_t npi_rtracebuf; 156 157 #if defined(sun4v) 158 /* 159 * Hypervisor N2/NIU services information. 160 */ 161 static hsvc_info_t niu_hsvc = { 162 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 163 NIU_MINOR_VER, "nxge" 164 }; 165 166 static int nxge_hsvc_register(p_nxge_t); 167 #endif 168 169 /* 170 * Function Prototypes 171 */ 172 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 173 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 174 static void nxge_unattach(p_nxge_t); 175 176 #if NXGE_PROPERTY 177 static void nxge_remove_hard_properties(p_nxge_t); 178 #endif 179 180 /* 181 * These two functions are required by nxge_hio.c 182 */ 183 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 184 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 185 186 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 187 188 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 189 static void nxge_destroy_mutexes(p_nxge_t); 190 191 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 192 static void nxge_unmap_regs(p_nxge_t nxgep); 193 #ifdef NXGE_DEBUG 194 static void nxge_test_map_regs(p_nxge_t nxgep); 195 #endif 196 197 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 198 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 199 static void nxge_remove_intrs(p_nxge_t nxgep); 200 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 201 202 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 203 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 204 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 205 static void nxge_intrs_enable(p_nxge_t nxgep); 206 static void nxge_intrs_disable(p_nxge_t nxgep); 207 208 static void nxge_suspend(p_nxge_t); 209 static nxge_status_t nxge_resume(p_nxge_t); 210 211 static nxge_status_t nxge_setup_dev(p_nxge_t); 212 static void nxge_destroy_dev(p_nxge_t); 213 214 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 215 static void nxge_free_mem_pool(p_nxge_t); 216 217 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 218 static void nxge_free_rx_mem_pool(p_nxge_t); 219 220 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 221 static void nxge_free_tx_mem_pool(p_nxge_t); 222 223 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 224 struct ddi_dma_attr *, 225 size_t, ddi_device_acc_attr_t *, uint_t, 226 p_nxge_dma_common_t); 227 228 static void nxge_dma_mem_free(p_nxge_dma_common_t); 229 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 230 231 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 232 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 233 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 234 235 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 236 p_nxge_dma_common_t *, size_t); 237 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 238 239 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 240 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 241 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 242 243 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 244 p_nxge_dma_common_t *, 245 size_t); 246 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 247 248 static int nxge_init_common_dev(p_nxge_t); 249 static void nxge_uninit_common_dev(p_nxge_t); 250 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 251 char *, caddr_t); 252 253 /* 254 * The next declarations are for the GLDv3 interface. 255 */ 256 static int nxge_m_start(void *); 257 static void nxge_m_stop(void *); 258 static int nxge_m_unicst(void *, const uint8_t *); 259 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 260 static int nxge_m_promisc(void *, boolean_t); 261 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 262 static void nxge_m_resources(void *); 263 mblk_t *nxge_m_tx(void *arg, mblk_t *); 264 static nxge_status_t nxge_mac_register(p_nxge_t); 265 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 266 mac_addr_slot_t slot); 267 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 268 boolean_t factory); 269 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 270 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 271 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 272 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 273 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 274 uint_t, const void *); 275 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 276 uint_t, uint_t, void *); 277 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 278 const void *); 279 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 280 void *); 281 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 282 283 static void nxge_niu_peu_reset(p_nxge_t nxgep); 284 285 mac_priv_prop_t nxge_priv_props[] = { 286 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 287 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 288 {"_function_number", MAC_PROP_PERM_READ}, 289 {"_fw_version", MAC_PROP_PERM_READ}, 290 {"_port_mode", MAC_PROP_PERM_READ}, 291 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 292 {"_accept_jumbo", MAC_PROP_PERM_RW}, 293 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 294 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 295 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 296 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 297 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 298 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 299 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 300 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 301 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 302 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 303 {"_soft_lso_enable", MAC_PROP_PERM_RW} 304 }; 305 306 #define NXGE_MAX_PRIV_PROPS \ 307 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 308 309 #define NXGE_M_CALLBACK_FLAGS\ 310 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 311 312 313 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 314 #define MAX_DUMP_SZ 256 315 316 #define NXGE_M_CALLBACK_FLAGS \ 317 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 318 319 mac_callbacks_t nxge_m_callbacks = { 320 NXGE_M_CALLBACK_FLAGS, 321 nxge_m_stat, 322 nxge_m_start, 323 nxge_m_stop, 324 nxge_m_promisc, 325 nxge_m_multicst, 326 nxge_m_unicst, 327 nxge_m_tx, 328 nxge_m_resources, 329 nxge_m_ioctl, 330 nxge_m_getcapab, 331 NULL, 332 NULL, 333 nxge_m_setprop, 334 nxge_m_getprop 335 }; 336 337 void 338 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 339 340 /* PSARC/2007/453 MSI-X interrupt limit override. */ 341 #define NXGE_MSIX_REQUEST_10G 8 342 #define NXGE_MSIX_REQUEST_1G 2 343 static int nxge_create_msi_property(p_nxge_t); 344 345 /* 346 * These global variables control the message 347 * output. 348 */ 349 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 350 uint64_t nxge_debug_level; 351 352 /* 353 * This list contains the instance structures for the Neptune 354 * devices present in the system. The lock exists to guarantee 355 * mutually exclusive access to the list. 356 */ 357 void *nxge_list = NULL; 358 359 void *nxge_hw_list = NULL; 360 nxge_os_mutex_t nxge_common_lock; 361 362 extern uint64_t npi_debug_level; 363 364 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 365 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 366 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 367 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 368 extern void nxge_fm_init(p_nxge_t, 369 ddi_device_acc_attr_t *, 370 ddi_device_acc_attr_t *, 371 ddi_dma_attr_t *); 372 extern void nxge_fm_fini(p_nxge_t); 373 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 374 375 /* 376 * Count used to maintain the number of buffers being used 377 * by Neptune instances and loaned up to the upper layers. 378 */ 379 uint32_t nxge_mblks_pending = 0; 380 381 /* 382 * Device register access attributes for PIO. 383 */ 384 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 385 DDI_DEVICE_ATTR_V0, 386 DDI_STRUCTURE_LE_ACC, 387 DDI_STRICTORDER_ACC, 388 }; 389 390 /* 391 * Device descriptor access attributes for DMA. 392 */ 393 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 394 DDI_DEVICE_ATTR_V0, 395 DDI_STRUCTURE_LE_ACC, 396 DDI_STRICTORDER_ACC 397 }; 398 399 /* 400 * Device buffer access attributes for DMA. 401 */ 402 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 403 DDI_DEVICE_ATTR_V0, 404 DDI_STRUCTURE_BE_ACC, 405 DDI_STRICTORDER_ACC 406 }; 407 408 ddi_dma_attr_t nxge_desc_dma_attr = { 409 DMA_ATTR_V0, /* version number. */ 410 0, /* low address */ 411 0xffffffffffffffff, /* high address */ 412 0xffffffffffffffff, /* address counter max */ 413 #ifndef NIU_PA_WORKAROUND 414 0x100000, /* alignment */ 415 #else 416 0x2000, 417 #endif 418 0xfc00fc, /* dlim_burstsizes */ 419 0x1, /* minimum transfer size */ 420 0xffffffffffffffff, /* maximum transfer size */ 421 0xffffffffffffffff, /* maximum segment size */ 422 1, /* scatter/gather list length */ 423 (unsigned int) 1, /* granularity */ 424 0 /* attribute flags */ 425 }; 426 427 ddi_dma_attr_t nxge_tx_dma_attr = { 428 DMA_ATTR_V0, /* version number. */ 429 0, /* low address */ 430 0xffffffffffffffff, /* high address */ 431 0xffffffffffffffff, /* address counter max */ 432 #if defined(_BIG_ENDIAN) 433 0x2000, /* alignment */ 434 #else 435 0x1000, /* alignment */ 436 #endif 437 0xfc00fc, /* dlim_burstsizes */ 438 0x1, /* minimum transfer size */ 439 0xffffffffffffffff, /* maximum transfer size */ 440 0xffffffffffffffff, /* maximum segment size */ 441 5, /* scatter/gather list length */ 442 (unsigned int) 1, /* granularity */ 443 0 /* attribute flags */ 444 }; 445 446 ddi_dma_attr_t nxge_rx_dma_attr = { 447 DMA_ATTR_V0, /* version number. */ 448 0, /* low address */ 449 0xffffffffffffffff, /* high address */ 450 0xffffffffffffffff, /* address counter max */ 451 0x2000, /* alignment */ 452 0xfc00fc, /* dlim_burstsizes */ 453 0x1, /* minimum transfer size */ 454 0xffffffffffffffff, /* maximum transfer size */ 455 0xffffffffffffffff, /* maximum segment size */ 456 1, /* scatter/gather list length */ 457 (unsigned int) 1, /* granularity */ 458 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 459 }; 460 461 ddi_dma_lim_t nxge_dma_limits = { 462 (uint_t)0, /* dlim_addr_lo */ 463 (uint_t)0xffffffff, /* dlim_addr_hi */ 464 (uint_t)0xffffffff, /* dlim_cntr_max */ 465 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 466 0x1, /* dlim_minxfer */ 467 1024 /* dlim_speed */ 468 }; 469 470 dma_method_t nxge_force_dma = DVMA; 471 472 /* 473 * dma chunk sizes. 474 * 475 * Try to allocate the largest possible size 476 * so that fewer number of dma chunks would be managed 477 */ 478 #ifdef NIU_PA_WORKAROUND 479 size_t alloc_sizes [] = {0x2000}; 480 #else 481 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 482 0x10000, 0x20000, 0x40000, 0x80000, 483 0x100000, 0x200000, 0x400000, 0x800000, 484 0x1000000, 0x2000000, 0x4000000}; 485 #endif 486 487 /* 488 * Translate "dev_t" to a pointer to the associated "dev_info_t". 489 */ 490 491 extern void nxge_get_environs(nxge_t *); 492 493 static int 494 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 495 { 496 p_nxge_t nxgep = NULL; 497 int instance; 498 int status = DDI_SUCCESS; 499 uint8_t portn; 500 nxge_mmac_t *mmac_info; 501 502 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 503 504 /* 505 * Get the device instance since we'll need to setup 506 * or retrieve a soft state for this instance. 507 */ 508 instance = ddi_get_instance(dip); 509 510 switch (cmd) { 511 case DDI_ATTACH: 512 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 513 break; 514 515 case DDI_RESUME: 516 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 517 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 518 if (nxgep == NULL) { 519 status = DDI_FAILURE; 520 break; 521 } 522 if (nxgep->dip != dip) { 523 status = DDI_FAILURE; 524 break; 525 } 526 if (nxgep->suspended == DDI_PM_SUSPEND) { 527 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 528 } else { 529 status = nxge_resume(nxgep); 530 } 531 goto nxge_attach_exit; 532 533 case DDI_PM_RESUME: 534 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 535 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 536 if (nxgep == NULL) { 537 status = DDI_FAILURE; 538 break; 539 } 540 if (nxgep->dip != dip) { 541 status = DDI_FAILURE; 542 break; 543 } 544 status = nxge_resume(nxgep); 545 goto nxge_attach_exit; 546 547 default: 548 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 549 status = DDI_FAILURE; 550 goto nxge_attach_exit; 551 } 552 553 554 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 555 status = DDI_FAILURE; 556 goto nxge_attach_exit; 557 } 558 559 nxgep = ddi_get_soft_state(nxge_list, instance); 560 if (nxgep == NULL) { 561 status = NXGE_ERROR; 562 goto nxge_attach_fail2; 563 } 564 565 nxgep->nxge_magic = NXGE_MAGIC; 566 567 nxgep->drv_state = 0; 568 nxgep->dip = dip; 569 nxgep->instance = instance; 570 nxgep->p_dip = ddi_get_parent(dip); 571 nxgep->nxge_debug_level = nxge_debug_level; 572 npi_debug_level = nxge_debug_level; 573 574 /* Are we a guest running in a Hybrid I/O environment? */ 575 nxge_get_environs(nxgep); 576 577 status = nxge_map_regs(nxgep); 578 579 if (status != NXGE_OK) { 580 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 581 goto nxge_attach_fail3; 582 } 583 584 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 585 &nxge_dev_desc_dma_acc_attr, 586 &nxge_rx_dma_attr); 587 588 /* Create & initialize the per-Neptune data structure */ 589 /* (even if we're a guest). */ 590 status = nxge_init_common_dev(nxgep); 591 if (status != NXGE_OK) { 592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 593 "nxge_init_common_dev failed")); 594 goto nxge_attach_fail4; 595 } 596 597 #if defined(sun4v) 598 /* This is required by nxge_hio_init(), which follows. */ 599 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 600 goto nxge_attach_fail; 601 #endif 602 603 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 604 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 605 "nxge_hio_init failed")); 606 goto nxge_attach_fail4; 607 } 608 609 if (nxgep->niu_type == NEPTUNE_2_10GF) { 610 if (nxgep->function_num > 1) { 611 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 612 " function %d. Only functions 0 and 1 are " 613 "supported for this card.", nxgep->function_num)); 614 status = NXGE_ERROR; 615 goto nxge_attach_fail4; 616 } 617 } 618 619 if (isLDOMguest(nxgep)) { 620 /* 621 * Use the function number here. 622 */ 623 nxgep->mac.portnum = nxgep->function_num; 624 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 625 626 /* XXX We'll set the MAC address counts to 1 for now. */ 627 mmac_info = &nxgep->nxge_mmac_info; 628 mmac_info->num_mmac = 1; 629 mmac_info->naddrfree = 1; 630 } else { 631 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 632 nxgep->mac.portnum = portn; 633 if ((portn == 0) || (portn == 1)) 634 nxgep->mac.porttype = PORT_TYPE_XMAC; 635 else 636 nxgep->mac.porttype = PORT_TYPE_BMAC; 637 /* 638 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 639 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 640 * The two types of MACs have different characterizations. 641 */ 642 mmac_info = &nxgep->nxge_mmac_info; 643 if (nxgep->function_num < 2) { 644 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 645 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 646 } else { 647 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 648 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 649 } 650 } 651 /* 652 * Setup the Ndd parameters for the this instance. 653 */ 654 nxge_init_param(nxgep); 655 656 /* 657 * Setup Register Tracing Buffer. 658 */ 659 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 660 661 /* init stats ptr */ 662 nxge_init_statsp(nxgep); 663 664 /* 665 * Copy the vpd info from eeprom to a local data 666 * structure, and then check its validity. 667 */ 668 if (!isLDOMguest(nxgep)) { 669 int *regp; 670 uint_t reglen; 671 int rv; 672 673 nxge_vpd_info_get(nxgep); 674 675 /* Find the NIU config handle. */ 676 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 677 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 678 "reg", ®p, ®len); 679 680 if (rv != DDI_PROP_SUCCESS) { 681 goto nxge_attach_fail5; 682 } 683 /* 684 * The address_hi, that is the first int, in the reg 685 * property consists of config handle, but need to remove 686 * the bits 28-31 which are OBP specific info. 687 */ 688 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 689 ddi_prop_free(regp); 690 } 691 692 if (isLDOMguest(nxgep)) { 693 uchar_t *prop_val; 694 uint_t prop_len; 695 696 extern void nxge_get_logical_props(p_nxge_t); 697 698 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 699 nxgep->mac.portmode = PORT_LOGICAL; 700 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 701 "phy-type", "virtual transceiver"); 702 703 nxgep->nports = 1; 704 nxgep->board_ver = 0; /* XXX What? */ 705 706 /* 707 * local-mac-address property gives us info on which 708 * specific MAC address the Hybrid resource is associated 709 * with. 710 */ 711 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 712 "local-mac-address", &prop_val, 713 &prop_len) != DDI_PROP_SUCCESS) { 714 goto nxge_attach_fail5; 715 } 716 if (prop_len != ETHERADDRL) { 717 ddi_prop_free(prop_val); 718 goto nxge_attach_fail5; 719 } 720 ether_copy(prop_val, nxgep->hio_mac_addr); 721 ddi_prop_free(prop_val); 722 nxge_get_logical_props(nxgep); 723 724 } else { 725 status = nxge_xcvr_find(nxgep); 726 727 if (status != NXGE_OK) { 728 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 729 " Couldn't determine card type" 730 " .... exit ")); 731 goto nxge_attach_fail5; 732 } 733 734 status = nxge_get_config_properties(nxgep); 735 736 if (status != NXGE_OK) { 737 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 738 "get_hw create failed")); 739 goto nxge_attach_fail; 740 } 741 } 742 743 /* 744 * Setup the Kstats for the driver. 745 */ 746 nxge_setup_kstats(nxgep); 747 748 if (!isLDOMguest(nxgep)) 749 nxge_setup_param(nxgep); 750 751 status = nxge_setup_system_dma_pages(nxgep); 752 if (status != NXGE_OK) { 753 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 754 goto nxge_attach_fail; 755 } 756 757 nxge_hw_id_init(nxgep); 758 759 if (!isLDOMguest(nxgep)) 760 nxge_hw_init_niu_common(nxgep); 761 762 status = nxge_setup_mutexes(nxgep); 763 if (status != NXGE_OK) { 764 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 765 goto nxge_attach_fail; 766 } 767 768 #if defined(sun4v) 769 if (isLDOMguest(nxgep)) { 770 /* Find our VR & channel sets. */ 771 status = nxge_hio_vr_add(nxgep); 772 goto nxge_attach_exit; 773 } 774 #endif 775 776 status = nxge_setup_dev(nxgep); 777 if (status != DDI_SUCCESS) { 778 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 779 goto nxge_attach_fail; 780 } 781 782 status = nxge_add_intrs(nxgep); 783 if (status != DDI_SUCCESS) { 784 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 785 goto nxge_attach_fail; 786 } 787 status = nxge_add_soft_intrs(nxgep); 788 if (status != DDI_SUCCESS) { 789 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 790 "add_soft_intr failed")); 791 goto nxge_attach_fail; 792 } 793 794 /* 795 * Enable interrupts. 796 */ 797 nxge_intrs_enable(nxgep); 798 799 /* If a guest, register with vio_net instead. */ 800 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 801 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 802 "unable to register to mac layer (%d)", status)); 803 goto nxge_attach_fail; 804 } 805 806 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 807 808 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 809 "registered to mac (instance %d)", instance)); 810 811 /* nxge_link_monitor calls xcvr.check_link recursively */ 812 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 813 814 goto nxge_attach_exit; 815 816 nxge_attach_fail: 817 nxge_unattach(nxgep); 818 goto nxge_attach_fail1; 819 820 nxge_attach_fail5: 821 /* 822 * Tear down the ndd parameters setup. 823 */ 824 nxge_destroy_param(nxgep); 825 826 /* 827 * Tear down the kstat setup. 828 */ 829 nxge_destroy_kstats(nxgep); 830 831 nxge_attach_fail4: 832 if (nxgep->nxge_hw_p) { 833 nxge_uninit_common_dev(nxgep); 834 nxgep->nxge_hw_p = NULL; 835 } 836 837 nxge_attach_fail3: 838 /* 839 * Unmap the register setup. 840 */ 841 nxge_unmap_regs(nxgep); 842 843 nxge_fm_fini(nxgep); 844 845 nxge_attach_fail2: 846 ddi_soft_state_free(nxge_list, nxgep->instance); 847 848 nxge_attach_fail1: 849 if (status != NXGE_OK) 850 status = (NXGE_ERROR | NXGE_DDI_FAILED); 851 nxgep = NULL; 852 853 nxge_attach_exit: 854 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 855 status)); 856 857 return (status); 858 } 859 860 static int 861 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 862 { 863 int status = DDI_SUCCESS; 864 int instance; 865 p_nxge_t nxgep = NULL; 866 867 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 868 instance = ddi_get_instance(dip); 869 nxgep = ddi_get_soft_state(nxge_list, instance); 870 if (nxgep == NULL) { 871 status = DDI_FAILURE; 872 goto nxge_detach_exit; 873 } 874 875 switch (cmd) { 876 case DDI_DETACH: 877 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 878 break; 879 880 case DDI_PM_SUSPEND: 881 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 882 nxgep->suspended = DDI_PM_SUSPEND; 883 nxge_suspend(nxgep); 884 break; 885 886 case DDI_SUSPEND: 887 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 888 if (nxgep->suspended != DDI_PM_SUSPEND) { 889 nxgep->suspended = DDI_SUSPEND; 890 nxge_suspend(nxgep); 891 } 892 break; 893 894 default: 895 status = DDI_FAILURE; 896 } 897 898 if (cmd != DDI_DETACH) 899 goto nxge_detach_exit; 900 901 /* 902 * Stop the xcvr polling. 903 */ 904 nxgep->suspended = cmd; 905 906 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 907 908 if (isLDOMguest(nxgep)) { 909 nxge_hio_unregister(nxgep); 910 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 911 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 912 "<== nxge_detach status = 0x%08X", status)); 913 return (DDI_FAILURE); 914 } 915 916 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 917 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 918 919 nxge_unattach(nxgep); 920 nxgep = NULL; 921 922 nxge_detach_exit: 923 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 924 status)); 925 926 return (status); 927 } 928 929 static void 930 nxge_unattach(p_nxge_t nxgep) 931 { 932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 933 934 if (nxgep == NULL || nxgep->dev_regs == NULL) { 935 return; 936 } 937 938 nxgep->nxge_magic = 0; 939 940 if (nxgep->nxge_timerid) { 941 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 942 nxgep->nxge_timerid = 0; 943 } 944 945 /* 946 * If this flag is set, it will affect the Neptune 947 * only. 948 */ 949 if ((nxgep->niu_type != N2_NIU) && nxge_peu_reset_enable) { 950 nxge_niu_peu_reset(nxgep); 951 } 952 953 #if defined(sun4v) 954 if (isLDOMguest(nxgep)) { 955 (void) nxge_hio_vr_release(nxgep); 956 } 957 #endif 958 959 if (nxgep->nxge_hw_p) { 960 nxge_uninit_common_dev(nxgep); 961 nxgep->nxge_hw_p = NULL; 962 } 963 964 #if defined(sun4v) 965 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 966 (void) hsvc_unregister(&nxgep->niu_hsvc); 967 nxgep->niu_hsvc_available = B_FALSE; 968 } 969 #endif 970 /* 971 * Stop any further interrupts. 972 */ 973 nxge_remove_intrs(nxgep); 974 975 /* remove soft interrups */ 976 nxge_remove_soft_intrs(nxgep); 977 978 /* 979 * Stop the device and free resources. 980 */ 981 if (!isLDOMguest(nxgep)) { 982 nxge_destroy_dev(nxgep); 983 } 984 985 /* 986 * Tear down the ndd parameters setup. 987 */ 988 nxge_destroy_param(nxgep); 989 990 /* 991 * Tear down the kstat setup. 992 */ 993 nxge_destroy_kstats(nxgep); 994 995 /* 996 * Destroy all mutexes. 997 */ 998 nxge_destroy_mutexes(nxgep); 999 1000 /* 1001 * Remove the list of ndd parameters which 1002 * were setup during attach. 1003 */ 1004 if (nxgep->dip) { 1005 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 1006 " nxge_unattach: remove all properties")); 1007 1008 (void) ddi_prop_remove_all(nxgep->dip); 1009 } 1010 1011 #if NXGE_PROPERTY 1012 nxge_remove_hard_properties(nxgep); 1013 #endif 1014 1015 /* 1016 * Unmap the register setup. 1017 */ 1018 nxge_unmap_regs(nxgep); 1019 1020 nxge_fm_fini(nxgep); 1021 1022 ddi_soft_state_free(nxge_list, nxgep->instance); 1023 1024 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1025 } 1026 1027 #if defined(sun4v) 1028 int 1029 nxge_hsvc_register( 1030 nxge_t *nxgep) 1031 { 1032 nxge_status_t status; 1033 1034 if (nxgep->niu_type == N2_NIU) { 1035 nxgep->niu_hsvc_available = B_FALSE; 1036 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1037 if ((status = hsvc_register(&nxgep->niu_hsvc, 1038 &nxgep->niu_min_ver)) != 0) { 1039 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1040 "nxge_attach: %s: cannot negotiate " 1041 "hypervisor services revision %d group: 0x%lx " 1042 "major: 0x%lx minor: 0x%lx errno: %d", 1043 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1044 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1045 niu_hsvc.hsvc_minor, status)); 1046 return (DDI_FAILURE); 1047 } 1048 nxgep->niu_hsvc_available = B_TRUE; 1049 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1050 "NIU Hypervisor service enabled")); 1051 } 1052 1053 return (DDI_SUCCESS); 1054 } 1055 #endif 1056 1057 static char n2_siu_name[] = "niu"; 1058 1059 static nxge_status_t 1060 nxge_map_regs(p_nxge_t nxgep) 1061 { 1062 int ddi_status = DDI_SUCCESS; 1063 p_dev_regs_t dev_regs; 1064 char buf[MAXPATHLEN + 1]; 1065 char *devname; 1066 #ifdef NXGE_DEBUG 1067 char *sysname; 1068 #endif 1069 off_t regsize; 1070 nxge_status_t status = NXGE_OK; 1071 #if !defined(_BIG_ENDIAN) 1072 off_t pci_offset; 1073 uint16_t pcie_devctl; 1074 #endif 1075 1076 if (isLDOMguest(nxgep)) { 1077 return (nxge_guest_regs_map(nxgep)); 1078 } 1079 1080 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1081 nxgep->dev_regs = NULL; 1082 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1083 dev_regs->nxge_regh = NULL; 1084 dev_regs->nxge_pciregh = NULL; 1085 dev_regs->nxge_msix_regh = NULL; 1086 dev_regs->nxge_vir_regh = NULL; 1087 dev_regs->nxge_vir2_regh = NULL; 1088 nxgep->niu_type = NIU_TYPE_NONE; 1089 1090 devname = ddi_pathname(nxgep->dip, buf); 1091 ASSERT(strlen(devname) > 0); 1092 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1093 "nxge_map_regs: pathname devname %s", devname)); 1094 1095 /* 1096 * The driver is running on a N2-NIU system if devname is something 1097 * like "/niu@80/network@0" 1098 */ 1099 if (strstr(devname, n2_siu_name)) { 1100 /* N2/NIU */ 1101 nxgep->niu_type = N2_NIU; 1102 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1103 "nxge_map_regs: N2/NIU devname %s", devname)); 1104 /* get function number */ 1105 nxgep->function_num = 1106 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1107 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1108 "nxge_map_regs: N2/NIU function number %d", 1109 nxgep->function_num)); 1110 } else { 1111 int *prop_val; 1112 uint_t prop_len; 1113 uint8_t func_num; 1114 1115 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1116 0, "reg", 1117 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1118 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1119 "Reg property not found")); 1120 ddi_status = DDI_FAILURE; 1121 goto nxge_map_regs_fail0; 1122 1123 } else { 1124 func_num = (prop_val[0] >> 8) & 0x7; 1125 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1126 "Reg property found: fun # %d", 1127 func_num)); 1128 nxgep->function_num = func_num; 1129 if (isLDOMguest(nxgep)) { 1130 nxgep->function_num /= 2; 1131 return (NXGE_OK); 1132 } 1133 ddi_prop_free(prop_val); 1134 } 1135 } 1136 1137 switch (nxgep->niu_type) { 1138 default: 1139 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1140 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1141 "nxge_map_regs: pci config size 0x%x", regsize)); 1142 1143 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1144 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1145 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1146 if (ddi_status != DDI_SUCCESS) { 1147 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1148 "ddi_map_regs, nxge bus config regs failed")); 1149 goto nxge_map_regs_fail0; 1150 } 1151 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1152 "nxge_map_reg: PCI config addr 0x%0llx " 1153 " handle 0x%0llx", dev_regs->nxge_pciregp, 1154 dev_regs->nxge_pciregh)); 1155 /* 1156 * IMP IMP 1157 * workaround for bit swapping bug in HW 1158 * which ends up in no-snoop = yes 1159 * resulting, in DMA not synched properly 1160 */ 1161 #if !defined(_BIG_ENDIAN) 1162 /* workarounds for x86 systems */ 1163 pci_offset = 0x80 + PCIE_DEVCTL; 1164 pcie_devctl = 0x0; 1165 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1166 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1167 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1168 pcie_devctl); 1169 #endif 1170 1171 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1172 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1173 "nxge_map_regs: pio size 0x%x", regsize)); 1174 /* set up the device mapped register */ 1175 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1176 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1177 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1178 if (ddi_status != DDI_SUCCESS) { 1179 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1180 "ddi_map_regs for Neptune global reg failed")); 1181 goto nxge_map_regs_fail1; 1182 } 1183 1184 /* set up the msi/msi-x mapped register */ 1185 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1186 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1187 "nxge_map_regs: msix size 0x%x", regsize)); 1188 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1189 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1190 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1191 if (ddi_status != DDI_SUCCESS) { 1192 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1193 "ddi_map_regs for msi reg failed")); 1194 goto nxge_map_regs_fail2; 1195 } 1196 1197 /* set up the vio region mapped register */ 1198 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1199 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1200 "nxge_map_regs: vio size 0x%x", regsize)); 1201 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1202 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1203 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1204 1205 if (ddi_status != DDI_SUCCESS) { 1206 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1207 "ddi_map_regs for nxge vio reg failed")); 1208 goto nxge_map_regs_fail3; 1209 } 1210 nxgep->dev_regs = dev_regs; 1211 1212 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1213 NPI_PCI_ADD_HANDLE_SET(nxgep, 1214 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1215 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1216 NPI_MSI_ADD_HANDLE_SET(nxgep, 1217 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1218 1219 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1220 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1221 1222 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1223 NPI_REG_ADD_HANDLE_SET(nxgep, 1224 (npi_reg_ptr_t)dev_regs->nxge_regp); 1225 1226 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1227 NPI_VREG_ADD_HANDLE_SET(nxgep, 1228 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1229 1230 break; 1231 1232 case N2_NIU: 1233 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1234 /* 1235 * Set up the device mapped register (FWARC 2006/556) 1236 * (changed back to 1: reg starts at 1!) 1237 */ 1238 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1239 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1240 "nxge_map_regs: dev size 0x%x", regsize)); 1241 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1242 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1243 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1244 1245 if (ddi_status != DDI_SUCCESS) { 1246 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1247 "ddi_map_regs for N2/NIU, global reg failed ")); 1248 goto nxge_map_regs_fail1; 1249 } 1250 1251 /* set up the first vio region mapped register */ 1252 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1253 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1254 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1255 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1256 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1257 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1258 1259 if (ddi_status != DDI_SUCCESS) { 1260 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1261 "ddi_map_regs for nxge vio reg failed")); 1262 goto nxge_map_regs_fail2; 1263 } 1264 /* set up the second vio region mapped register */ 1265 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1266 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1267 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1268 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1269 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1270 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1271 1272 if (ddi_status != DDI_SUCCESS) { 1273 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1274 "ddi_map_regs for nxge vio2 reg failed")); 1275 goto nxge_map_regs_fail3; 1276 } 1277 nxgep->dev_regs = dev_regs; 1278 1279 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1280 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1281 1282 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1283 NPI_REG_ADD_HANDLE_SET(nxgep, 1284 (npi_reg_ptr_t)dev_regs->nxge_regp); 1285 1286 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1287 NPI_VREG_ADD_HANDLE_SET(nxgep, 1288 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1289 1290 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1291 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1292 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1293 1294 break; 1295 } 1296 1297 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1298 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1299 1300 goto nxge_map_regs_exit; 1301 nxge_map_regs_fail3: 1302 if (dev_regs->nxge_msix_regh) { 1303 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1304 } 1305 if (dev_regs->nxge_vir_regh) { 1306 ddi_regs_map_free(&dev_regs->nxge_regh); 1307 } 1308 nxge_map_regs_fail2: 1309 if (dev_regs->nxge_regh) { 1310 ddi_regs_map_free(&dev_regs->nxge_regh); 1311 } 1312 nxge_map_regs_fail1: 1313 if (dev_regs->nxge_pciregh) { 1314 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1315 } 1316 nxge_map_regs_fail0: 1317 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1318 kmem_free(dev_regs, sizeof (dev_regs_t)); 1319 1320 nxge_map_regs_exit: 1321 if (ddi_status != DDI_SUCCESS) 1322 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1323 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1324 return (status); 1325 } 1326 1327 static void 1328 nxge_unmap_regs(p_nxge_t nxgep) 1329 { 1330 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1331 1332 if (isLDOMguest(nxgep)) { 1333 nxge_guest_regs_map_free(nxgep); 1334 return; 1335 } 1336 1337 if (nxgep->dev_regs) { 1338 if (nxgep->dev_regs->nxge_pciregh) { 1339 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1340 "==> nxge_unmap_regs: bus")); 1341 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1342 nxgep->dev_regs->nxge_pciregh = NULL; 1343 } 1344 if (nxgep->dev_regs->nxge_regh) { 1345 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1346 "==> nxge_unmap_regs: device registers")); 1347 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1348 nxgep->dev_regs->nxge_regh = NULL; 1349 } 1350 if (nxgep->dev_regs->nxge_msix_regh) { 1351 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1352 "==> nxge_unmap_regs: device interrupts")); 1353 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1354 nxgep->dev_regs->nxge_msix_regh = NULL; 1355 } 1356 if (nxgep->dev_regs->nxge_vir_regh) { 1357 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1358 "==> nxge_unmap_regs: vio region")); 1359 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1360 nxgep->dev_regs->nxge_vir_regh = NULL; 1361 } 1362 if (nxgep->dev_regs->nxge_vir2_regh) { 1363 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1364 "==> nxge_unmap_regs: vio2 region")); 1365 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1366 nxgep->dev_regs->nxge_vir2_regh = NULL; 1367 } 1368 1369 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1370 nxgep->dev_regs = NULL; 1371 } 1372 1373 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1374 } 1375 1376 static nxge_status_t 1377 nxge_setup_mutexes(p_nxge_t nxgep) 1378 { 1379 int ddi_status = DDI_SUCCESS; 1380 nxge_status_t status = NXGE_OK; 1381 nxge_classify_t *classify_ptr; 1382 int partition; 1383 1384 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1385 1386 /* 1387 * Get the interrupt cookie so the mutexes can be 1388 * Initialized. 1389 */ 1390 if (isLDOMguest(nxgep)) { 1391 nxgep->interrupt_cookie = 0; 1392 } else { 1393 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1394 &nxgep->interrupt_cookie); 1395 1396 if (ddi_status != DDI_SUCCESS) { 1397 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1398 "<== nxge_setup_mutexes: failed 0x%x", 1399 ddi_status)); 1400 goto nxge_setup_mutexes_exit; 1401 } 1402 } 1403 1404 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1405 MUTEX_INIT(&nxgep->poll_lock, NULL, 1406 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1407 1408 /* 1409 * Initialize mutexes for this device. 1410 */ 1411 MUTEX_INIT(nxgep->genlock, NULL, 1412 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1413 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1414 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1415 MUTEX_INIT(&nxgep->mif_lock, NULL, 1416 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1417 MUTEX_INIT(&nxgep->group_lock, NULL, 1418 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1419 RW_INIT(&nxgep->filter_lock, NULL, 1420 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1421 1422 classify_ptr = &nxgep->classifier; 1423 /* 1424 * FFLP Mutexes are never used in interrupt context 1425 * as fflp operation can take very long time to 1426 * complete and hence not suitable to invoke from interrupt 1427 * handlers. 1428 */ 1429 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1430 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1431 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1432 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1433 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1434 for (partition = 0; partition < MAX_PARTITION; partition++) { 1435 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1436 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1437 } 1438 } 1439 1440 nxge_setup_mutexes_exit: 1441 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1442 "<== nxge_setup_mutexes status = %x", status)); 1443 1444 if (ddi_status != DDI_SUCCESS) 1445 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1446 1447 return (status); 1448 } 1449 1450 static void 1451 nxge_destroy_mutexes(p_nxge_t nxgep) 1452 { 1453 int partition; 1454 nxge_classify_t *classify_ptr; 1455 1456 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1457 RW_DESTROY(&nxgep->filter_lock); 1458 MUTEX_DESTROY(&nxgep->group_lock); 1459 MUTEX_DESTROY(&nxgep->mif_lock); 1460 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1461 MUTEX_DESTROY(nxgep->genlock); 1462 1463 classify_ptr = &nxgep->classifier; 1464 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1465 1466 /* Destroy all polling resources. */ 1467 MUTEX_DESTROY(&nxgep->poll_lock); 1468 cv_destroy(&nxgep->poll_cv); 1469 1470 /* free data structures, based on HW type */ 1471 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1472 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1473 for (partition = 0; partition < MAX_PARTITION; partition++) { 1474 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1475 } 1476 } 1477 1478 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1479 } 1480 1481 nxge_status_t 1482 nxge_init(p_nxge_t nxgep) 1483 { 1484 nxge_status_t status = NXGE_OK; 1485 1486 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1487 1488 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1489 return (status); 1490 } 1491 1492 /* 1493 * Allocate system memory for the receive/transmit buffer blocks 1494 * and receive/transmit descriptor rings. 1495 */ 1496 status = nxge_alloc_mem_pool(nxgep); 1497 if (status != NXGE_OK) { 1498 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1499 goto nxge_init_fail1; 1500 } 1501 1502 if (!isLDOMguest(nxgep)) { 1503 /* 1504 * Initialize and enable the TXC registers. 1505 * (Globally enable the Tx controller, 1506 * enable the port, configure the dma channel bitmap, 1507 * configure the max burst size). 1508 */ 1509 status = nxge_txc_init(nxgep); 1510 if (status != NXGE_OK) { 1511 NXGE_ERROR_MSG((nxgep, 1512 NXGE_ERR_CTL, "init txc failed\n")); 1513 goto nxge_init_fail2; 1514 } 1515 } 1516 1517 /* 1518 * Initialize and enable TXDMA channels. 1519 */ 1520 status = nxge_init_txdma_channels(nxgep); 1521 if (status != NXGE_OK) { 1522 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1523 goto nxge_init_fail3; 1524 } 1525 1526 /* 1527 * Initialize and enable RXDMA channels. 1528 */ 1529 status = nxge_init_rxdma_channels(nxgep); 1530 if (status != NXGE_OK) { 1531 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1532 goto nxge_init_fail4; 1533 } 1534 1535 /* 1536 * The guest domain is now done. 1537 */ 1538 if (isLDOMguest(nxgep)) { 1539 nxgep->drv_state |= STATE_HW_INITIALIZED; 1540 goto nxge_init_exit; 1541 } 1542 1543 /* 1544 * Initialize TCAM and FCRAM (Neptune). 1545 */ 1546 status = nxge_classify_init(nxgep); 1547 if (status != NXGE_OK) { 1548 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1549 goto nxge_init_fail5; 1550 } 1551 1552 /* 1553 * Initialize ZCP 1554 */ 1555 status = nxge_zcp_init(nxgep); 1556 if (status != NXGE_OK) { 1557 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1558 goto nxge_init_fail5; 1559 } 1560 1561 /* 1562 * Initialize IPP. 1563 */ 1564 status = nxge_ipp_init(nxgep); 1565 if (status != NXGE_OK) { 1566 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1567 goto nxge_init_fail5; 1568 } 1569 1570 /* 1571 * Initialize the MAC block. 1572 */ 1573 status = nxge_mac_init(nxgep); 1574 if (status != NXGE_OK) { 1575 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1576 goto nxge_init_fail5; 1577 } 1578 1579 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1580 1581 /* 1582 * Enable hardware interrupts. 1583 */ 1584 nxge_intr_hw_enable(nxgep); 1585 nxgep->drv_state |= STATE_HW_INITIALIZED; 1586 1587 goto nxge_init_exit; 1588 1589 nxge_init_fail5: 1590 nxge_uninit_rxdma_channels(nxgep); 1591 nxge_init_fail4: 1592 nxge_uninit_txdma_channels(nxgep); 1593 nxge_init_fail3: 1594 if (!isLDOMguest(nxgep)) { 1595 (void) nxge_txc_uninit(nxgep); 1596 } 1597 nxge_init_fail2: 1598 nxge_free_mem_pool(nxgep); 1599 nxge_init_fail1: 1600 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1601 "<== nxge_init status (failed) = 0x%08x", status)); 1602 return (status); 1603 1604 nxge_init_exit: 1605 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1606 status)); 1607 return (status); 1608 } 1609 1610 1611 timeout_id_t 1612 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1613 { 1614 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1615 return (timeout(func, (caddr_t)nxgep, 1616 drv_usectohz(1000 * msec))); 1617 } 1618 return (NULL); 1619 } 1620 1621 /*ARGSUSED*/ 1622 void 1623 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1624 { 1625 if (timerid) { 1626 (void) untimeout(timerid); 1627 } 1628 } 1629 1630 void 1631 nxge_uninit(p_nxge_t nxgep) 1632 { 1633 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1634 1635 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1636 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1637 "==> nxge_uninit: not initialized")); 1638 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1639 "<== nxge_uninit")); 1640 return; 1641 } 1642 1643 /* stop timer */ 1644 if (nxgep->nxge_timerid) { 1645 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1646 nxgep->nxge_timerid = 0; 1647 } 1648 1649 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1650 (void) nxge_intr_hw_disable(nxgep); 1651 1652 /* 1653 * Reset the receive MAC side. 1654 */ 1655 (void) nxge_rx_mac_disable(nxgep); 1656 1657 /* Disable and soft reset the IPP */ 1658 if (!isLDOMguest(nxgep)) 1659 (void) nxge_ipp_disable(nxgep); 1660 1661 /* Free classification resources */ 1662 (void) nxge_classify_uninit(nxgep); 1663 1664 /* 1665 * Reset the transmit/receive DMA side. 1666 */ 1667 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1668 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1669 1670 nxge_uninit_txdma_channels(nxgep); 1671 nxge_uninit_rxdma_channels(nxgep); 1672 1673 /* 1674 * Reset the transmit MAC side. 1675 */ 1676 (void) nxge_tx_mac_disable(nxgep); 1677 1678 nxge_free_mem_pool(nxgep); 1679 1680 /* 1681 * Start the timer if the reset flag is not set. 1682 * If this reset flag is set, the link monitor 1683 * will not be started in order to stop furthur bus 1684 * activities coming from this interface. 1685 * The driver will start the monitor function 1686 * if the interface was initialized again later. 1687 */ 1688 if (!nxge_peu_reset_enable) { 1689 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1690 } 1691 1692 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1693 1694 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1695 "nxge_mblks_pending %d", nxge_mblks_pending)); 1696 } 1697 1698 void 1699 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1700 { 1701 #if defined(__i386) 1702 size_t reg; 1703 #else 1704 uint64_t reg; 1705 #endif 1706 uint64_t regdata; 1707 int i, retry; 1708 1709 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1710 regdata = 0; 1711 retry = 1; 1712 1713 for (i = 0; i < retry; i++) { 1714 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1715 } 1716 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1717 } 1718 1719 void 1720 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1721 { 1722 #if defined(__i386) 1723 size_t reg; 1724 #else 1725 uint64_t reg; 1726 #endif 1727 uint64_t buf[2]; 1728 1729 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1730 #if defined(__i386) 1731 reg = (size_t)buf[0]; 1732 #else 1733 reg = buf[0]; 1734 #endif 1735 1736 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1737 } 1738 1739 1740 nxge_os_mutex_t nxgedebuglock; 1741 int nxge_debug_init = 0; 1742 1743 /*ARGSUSED*/ 1744 /*VARARGS*/ 1745 void 1746 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1747 { 1748 char msg_buffer[1048]; 1749 char prefix_buffer[32]; 1750 int instance; 1751 uint64_t debug_level; 1752 int cmn_level = CE_CONT; 1753 va_list ap; 1754 1755 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1756 /* In case a developer has changed nxge_debug_level. */ 1757 if (nxgep->nxge_debug_level != nxge_debug_level) 1758 nxgep->nxge_debug_level = nxge_debug_level; 1759 } 1760 1761 debug_level = (nxgep == NULL) ? nxge_debug_level : 1762 nxgep->nxge_debug_level; 1763 1764 if ((level & debug_level) || 1765 (level == NXGE_NOTE) || 1766 (level == NXGE_ERR_CTL)) { 1767 /* do the msg processing */ 1768 if (nxge_debug_init == 0) { 1769 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1770 nxge_debug_init = 1; 1771 } 1772 1773 MUTEX_ENTER(&nxgedebuglock); 1774 1775 if ((level & NXGE_NOTE)) { 1776 cmn_level = CE_NOTE; 1777 } 1778 1779 if (level & NXGE_ERR_CTL) { 1780 cmn_level = CE_WARN; 1781 } 1782 1783 va_start(ap, fmt); 1784 (void) vsprintf(msg_buffer, fmt, ap); 1785 va_end(ap); 1786 if (nxgep == NULL) { 1787 instance = -1; 1788 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1789 } else { 1790 instance = nxgep->instance; 1791 (void) sprintf(prefix_buffer, 1792 "%s%d :", "nxge", instance); 1793 } 1794 1795 MUTEX_EXIT(&nxgedebuglock); 1796 cmn_err(cmn_level, "!%s %s\n", 1797 prefix_buffer, msg_buffer); 1798 1799 } 1800 } 1801 1802 char * 1803 nxge_dump_packet(char *addr, int size) 1804 { 1805 uchar_t *ap = (uchar_t *)addr; 1806 int i; 1807 static char etherbuf[1024]; 1808 char *cp = etherbuf; 1809 char digits[] = "0123456789abcdef"; 1810 1811 if (!size) 1812 size = 60; 1813 1814 if (size > MAX_DUMP_SZ) { 1815 /* Dump the leading bytes */ 1816 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1817 if (*ap > 0x0f) 1818 *cp++ = digits[*ap >> 4]; 1819 *cp++ = digits[*ap++ & 0xf]; 1820 *cp++ = ':'; 1821 } 1822 for (i = 0; i < 20; i++) 1823 *cp++ = '.'; 1824 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1825 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1826 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1827 if (*ap > 0x0f) 1828 *cp++ = digits[*ap >> 4]; 1829 *cp++ = digits[*ap++ & 0xf]; 1830 *cp++ = ':'; 1831 } 1832 } else { 1833 for (i = 0; i < size; i++) { 1834 if (*ap > 0x0f) 1835 *cp++ = digits[*ap >> 4]; 1836 *cp++ = digits[*ap++ & 0xf]; 1837 *cp++ = ':'; 1838 } 1839 } 1840 *--cp = 0; 1841 return (etherbuf); 1842 } 1843 1844 #ifdef NXGE_DEBUG 1845 static void 1846 nxge_test_map_regs(p_nxge_t nxgep) 1847 { 1848 ddi_acc_handle_t cfg_handle; 1849 p_pci_cfg_t cfg_ptr; 1850 ddi_acc_handle_t dev_handle; 1851 char *dev_ptr; 1852 ddi_acc_handle_t pci_config_handle; 1853 uint32_t regval; 1854 int i; 1855 1856 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1857 1858 dev_handle = nxgep->dev_regs->nxge_regh; 1859 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1860 1861 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1862 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1863 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1864 1865 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1866 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1867 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1868 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1869 &cfg_ptr->vendorid)); 1870 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1871 "\tvendorid 0x%x devid 0x%x", 1872 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1873 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1874 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1875 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1876 "bar1c 0x%x", 1877 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1878 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1879 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1880 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1881 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1882 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1883 "base 28 0x%x bar2c 0x%x\n", 1884 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1885 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1886 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1887 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1888 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1889 "\nNeptune PCI BAR: base30 0x%x\n", 1890 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1891 1892 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1893 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1894 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1895 "first 0x%llx second 0x%llx third 0x%llx " 1896 "last 0x%llx ", 1897 NXGE_PIO_READ64(dev_handle, 1898 (uint64_t *)(dev_ptr + 0), 0), 1899 NXGE_PIO_READ64(dev_handle, 1900 (uint64_t *)(dev_ptr + 8), 0), 1901 NXGE_PIO_READ64(dev_handle, 1902 (uint64_t *)(dev_ptr + 16), 0), 1903 NXGE_PIO_READ64(cfg_handle, 1904 (uint64_t *)(dev_ptr + 24), 0))); 1905 } 1906 } 1907 1908 #endif 1909 1910 static void 1911 nxge_suspend(p_nxge_t nxgep) 1912 { 1913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1914 1915 nxge_intrs_disable(nxgep); 1916 nxge_destroy_dev(nxgep); 1917 1918 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1919 } 1920 1921 static nxge_status_t 1922 nxge_resume(p_nxge_t nxgep) 1923 { 1924 nxge_status_t status = NXGE_OK; 1925 1926 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1927 1928 nxgep->suspended = DDI_RESUME; 1929 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1930 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1931 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1932 (void) nxge_rx_mac_enable(nxgep); 1933 (void) nxge_tx_mac_enable(nxgep); 1934 nxge_intrs_enable(nxgep); 1935 nxgep->suspended = 0; 1936 1937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1938 "<== nxge_resume status = 0x%x", status)); 1939 return (status); 1940 } 1941 1942 static nxge_status_t 1943 nxge_setup_dev(p_nxge_t nxgep) 1944 { 1945 nxge_status_t status = NXGE_OK; 1946 1947 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1948 nxgep->mac.portnum)); 1949 1950 status = nxge_link_init(nxgep); 1951 1952 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1953 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1954 "port%d Bad register acc handle", nxgep->mac.portnum)); 1955 status = NXGE_ERROR; 1956 } 1957 1958 if (status != NXGE_OK) { 1959 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1960 " nxge_setup_dev status " 1961 "(xcvr init 0x%08x)", status)); 1962 goto nxge_setup_dev_exit; 1963 } 1964 1965 nxge_setup_dev_exit: 1966 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1967 "<== nxge_setup_dev port %d status = 0x%08x", 1968 nxgep->mac.portnum, status)); 1969 1970 return (status); 1971 } 1972 1973 static void 1974 nxge_destroy_dev(p_nxge_t nxgep) 1975 { 1976 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1977 1978 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1979 1980 (void) nxge_hw_stop(nxgep); 1981 1982 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1983 } 1984 1985 static nxge_status_t 1986 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1987 { 1988 int ddi_status = DDI_SUCCESS; 1989 uint_t count; 1990 ddi_dma_cookie_t cookie; 1991 uint_t iommu_pagesize; 1992 nxge_status_t status = NXGE_OK; 1993 1994 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1995 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1996 if (nxgep->niu_type != N2_NIU) { 1997 iommu_pagesize = dvma_pagesize(nxgep->dip); 1998 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1999 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2000 " default_block_size %d iommu_pagesize %d", 2001 nxgep->sys_page_sz, 2002 ddi_ptob(nxgep->dip, (ulong_t)1), 2003 nxgep->rx_default_block_size, 2004 iommu_pagesize)); 2005 2006 if (iommu_pagesize != 0) { 2007 if (nxgep->sys_page_sz == iommu_pagesize) { 2008 if (iommu_pagesize > 0x4000) 2009 nxgep->sys_page_sz = 0x4000; 2010 } else { 2011 if (nxgep->sys_page_sz > iommu_pagesize) 2012 nxgep->sys_page_sz = iommu_pagesize; 2013 } 2014 } 2015 } 2016 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2017 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2018 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 2019 "default_block_size %d page mask %d", 2020 nxgep->sys_page_sz, 2021 ddi_ptob(nxgep->dip, (ulong_t)1), 2022 nxgep->rx_default_block_size, 2023 nxgep->sys_page_mask)); 2024 2025 2026 switch (nxgep->sys_page_sz) { 2027 default: 2028 nxgep->sys_page_sz = 0x1000; 2029 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 2030 nxgep->rx_default_block_size = 0x1000; 2031 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2032 break; 2033 case 0x1000: 2034 nxgep->rx_default_block_size = 0x1000; 2035 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2036 break; 2037 case 0x2000: 2038 nxgep->rx_default_block_size = 0x2000; 2039 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2040 break; 2041 case 0x4000: 2042 nxgep->rx_default_block_size = 0x4000; 2043 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2044 break; 2045 case 0x8000: 2046 nxgep->rx_default_block_size = 0x8000; 2047 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2048 break; 2049 } 2050 2051 #ifndef USE_RX_BIG_BUF 2052 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2053 #else 2054 nxgep->rx_default_block_size = 0x2000; 2055 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2056 #endif 2057 /* 2058 * Get the system DMA burst size. 2059 */ 2060 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2061 DDI_DMA_DONTWAIT, 0, 2062 &nxgep->dmasparehandle); 2063 if (ddi_status != DDI_SUCCESS) { 2064 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2065 "ddi_dma_alloc_handle: failed " 2066 " status 0x%x", ddi_status)); 2067 goto nxge_get_soft_properties_exit; 2068 } 2069 2070 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2071 (caddr_t)nxgep->dmasparehandle, 2072 sizeof (nxgep->dmasparehandle), 2073 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2074 DDI_DMA_DONTWAIT, 0, 2075 &cookie, &count); 2076 if (ddi_status != DDI_DMA_MAPPED) { 2077 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2078 "Binding spare handle to find system" 2079 " burstsize failed.")); 2080 ddi_status = DDI_FAILURE; 2081 goto nxge_get_soft_properties_fail1; 2082 } 2083 2084 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2085 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2086 2087 nxge_get_soft_properties_fail1: 2088 ddi_dma_free_handle(&nxgep->dmasparehandle); 2089 2090 nxge_get_soft_properties_exit: 2091 2092 if (ddi_status != DDI_SUCCESS) 2093 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2094 2095 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2096 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2097 return (status); 2098 } 2099 2100 static nxge_status_t 2101 nxge_alloc_mem_pool(p_nxge_t nxgep) 2102 { 2103 nxge_status_t status = NXGE_OK; 2104 2105 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2106 2107 status = nxge_alloc_rx_mem_pool(nxgep); 2108 if (status != NXGE_OK) { 2109 return (NXGE_ERROR); 2110 } 2111 2112 status = nxge_alloc_tx_mem_pool(nxgep); 2113 if (status != NXGE_OK) { 2114 nxge_free_rx_mem_pool(nxgep); 2115 return (NXGE_ERROR); 2116 } 2117 2118 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2119 return (NXGE_OK); 2120 } 2121 2122 static void 2123 nxge_free_mem_pool(p_nxge_t nxgep) 2124 { 2125 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2126 2127 nxge_free_rx_mem_pool(nxgep); 2128 nxge_free_tx_mem_pool(nxgep); 2129 2130 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2131 } 2132 2133 nxge_status_t 2134 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2135 { 2136 uint32_t rdc_max; 2137 p_nxge_dma_pt_cfg_t p_all_cfgp; 2138 p_nxge_hw_pt_cfg_t p_cfgp; 2139 p_nxge_dma_pool_t dma_poolp; 2140 p_nxge_dma_common_t *dma_buf_p; 2141 p_nxge_dma_pool_t dma_cntl_poolp; 2142 p_nxge_dma_common_t *dma_cntl_p; 2143 uint32_t *num_chunks; /* per dma */ 2144 nxge_status_t status = NXGE_OK; 2145 2146 uint32_t nxge_port_rbr_size; 2147 uint32_t nxge_port_rbr_spare_size; 2148 uint32_t nxge_port_rcr_size; 2149 uint32_t rx_cntl_alloc_size; 2150 2151 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2152 2153 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2154 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2155 rdc_max = NXGE_MAX_RDCS; 2156 2157 /* 2158 * Allocate memory for the common DMA data structures. 2159 */ 2160 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2161 KM_SLEEP); 2162 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2163 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2164 2165 dma_cntl_poolp = (p_nxge_dma_pool_t) 2166 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2167 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2168 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2169 2170 num_chunks = (uint32_t *)KMEM_ZALLOC( 2171 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2172 2173 /* 2174 * Assume that each DMA channel will be configured with 2175 * the default block size. 2176 * rbr block counts are modulo the batch count (16). 2177 */ 2178 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2179 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2180 2181 if (!nxge_port_rbr_size) { 2182 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2183 } 2184 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2185 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2186 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2187 } 2188 2189 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2190 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2191 2192 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2193 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2194 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2195 } 2196 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2197 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2198 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2199 "set to default %d", 2200 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2201 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2202 } 2203 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2204 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2205 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2206 "set to default %d", 2207 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2208 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2209 } 2210 2211 /* 2212 * N2/NIU has limitation on the descriptor sizes (contiguous 2213 * memory allocation on data buffers to 4M (contig_mem_alloc) 2214 * and little endian for control buffers (must use the ddi/dki mem alloc 2215 * function). 2216 */ 2217 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2218 if (nxgep->niu_type == N2_NIU) { 2219 nxge_port_rbr_spare_size = 0; 2220 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2221 (!ISP2(nxge_port_rbr_size))) { 2222 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2223 } 2224 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2225 (!ISP2(nxge_port_rcr_size))) { 2226 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2227 } 2228 } 2229 #endif 2230 2231 /* 2232 * Addresses of receive block ring, receive completion ring and the 2233 * mailbox must be all cache-aligned (64 bytes). 2234 */ 2235 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2236 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2237 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2238 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2239 2240 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2241 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2242 "nxge_port_rcr_size = %d " 2243 "rx_cntl_alloc_size = %d", 2244 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2245 nxge_port_rcr_size, 2246 rx_cntl_alloc_size)); 2247 2248 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2249 if (nxgep->niu_type == N2_NIU) { 2250 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2251 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2252 2253 if (!ISP2(rx_buf_alloc_size)) { 2254 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2255 "==> nxge_alloc_rx_mem_pool: " 2256 " must be power of 2")); 2257 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2258 goto nxge_alloc_rx_mem_pool_exit; 2259 } 2260 2261 if (rx_buf_alloc_size > (1 << 22)) { 2262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2263 "==> nxge_alloc_rx_mem_pool: " 2264 " limit size to 4M")); 2265 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2266 goto nxge_alloc_rx_mem_pool_exit; 2267 } 2268 2269 if (rx_cntl_alloc_size < 0x2000) { 2270 rx_cntl_alloc_size = 0x2000; 2271 } 2272 } 2273 #endif 2274 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2275 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2276 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2277 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2278 2279 dma_poolp->ndmas = p_cfgp->max_rdcs; 2280 dma_poolp->num_chunks = num_chunks; 2281 dma_poolp->buf_allocated = B_TRUE; 2282 nxgep->rx_buf_pool_p = dma_poolp; 2283 dma_poolp->dma_buf_pool_p = dma_buf_p; 2284 2285 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2286 dma_cntl_poolp->buf_allocated = B_TRUE; 2287 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2288 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2289 2290 /* Allocate the receive rings, too. */ 2291 nxgep->rx_rbr_rings = 2292 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2293 nxgep->rx_rbr_rings->rbr_rings = 2294 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2295 nxgep->rx_rcr_rings = 2296 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2297 nxgep->rx_rcr_rings->rcr_rings = 2298 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2299 nxgep->rx_mbox_areas_p = 2300 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2301 nxgep->rx_mbox_areas_p->rxmbox_areas = 2302 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2303 2304 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2305 p_cfgp->max_rdcs; 2306 2307 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2308 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2309 2310 nxge_alloc_rx_mem_pool_exit: 2311 return (status); 2312 } 2313 2314 /* 2315 * nxge_alloc_rxb 2316 * 2317 * Allocate buffers for an RDC. 2318 * 2319 * Arguments: 2320 * nxgep 2321 * channel The channel to map into our kernel space. 2322 * 2323 * Notes: 2324 * 2325 * NPI function calls: 2326 * 2327 * NXGE function calls: 2328 * 2329 * Registers accessed: 2330 * 2331 * Context: 2332 * 2333 * Taking apart: 2334 * 2335 * Open questions: 2336 * 2337 */ 2338 nxge_status_t 2339 nxge_alloc_rxb( 2340 p_nxge_t nxgep, 2341 int channel) 2342 { 2343 size_t rx_buf_alloc_size; 2344 nxge_status_t status = NXGE_OK; 2345 2346 nxge_dma_common_t **data; 2347 nxge_dma_common_t **control; 2348 uint32_t *num_chunks; 2349 2350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2351 2352 /* 2353 * Allocate memory for the receive buffers and descriptor rings. 2354 * Replace these allocation functions with the interface functions 2355 * provided by the partition manager if/when they are available. 2356 */ 2357 2358 /* 2359 * Allocate memory for the receive buffer blocks. 2360 */ 2361 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2362 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2363 2364 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2365 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2366 2367 if ((status = nxge_alloc_rx_buf_dma( 2368 nxgep, channel, data, rx_buf_alloc_size, 2369 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2370 return (status); 2371 } 2372 2373 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2374 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2375 2376 /* 2377 * Allocate memory for descriptor rings and mailbox. 2378 */ 2379 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2380 2381 if ((status = nxge_alloc_rx_cntl_dma( 2382 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2383 != NXGE_OK) { 2384 nxge_free_rx_cntl_dma(nxgep, *control); 2385 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2386 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2387 return (status); 2388 } 2389 2390 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2391 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2392 2393 return (status); 2394 } 2395 2396 void 2397 nxge_free_rxb( 2398 p_nxge_t nxgep, 2399 int channel) 2400 { 2401 nxge_dma_common_t *data; 2402 nxge_dma_common_t *control; 2403 uint32_t num_chunks; 2404 2405 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2406 2407 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2408 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2409 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2410 2411 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2412 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2413 2414 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2415 nxge_free_rx_cntl_dma(nxgep, control); 2416 2417 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2418 2419 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2420 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2421 2422 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2423 } 2424 2425 static void 2426 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2427 { 2428 int rdc_max = NXGE_MAX_RDCS; 2429 2430 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2431 2432 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2433 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2434 "<== nxge_free_rx_mem_pool " 2435 "(null rx buf pool or buf not allocated")); 2436 return; 2437 } 2438 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2439 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2440 "<== nxge_free_rx_mem_pool " 2441 "(null rx cntl buf pool or cntl buf not allocated")); 2442 return; 2443 } 2444 2445 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2446 sizeof (p_nxge_dma_common_t) * rdc_max); 2447 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2448 2449 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2450 sizeof (uint32_t) * rdc_max); 2451 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2452 sizeof (p_nxge_dma_common_t) * rdc_max); 2453 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2454 2455 nxgep->rx_buf_pool_p = 0; 2456 nxgep->rx_cntl_pool_p = 0; 2457 2458 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2459 sizeof (p_rx_rbr_ring_t) * rdc_max); 2460 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2461 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2462 sizeof (p_rx_rcr_ring_t) * rdc_max); 2463 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2464 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2465 sizeof (p_rx_mbox_t) * rdc_max); 2466 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2467 2468 nxgep->rx_rbr_rings = 0; 2469 nxgep->rx_rcr_rings = 0; 2470 nxgep->rx_mbox_areas_p = 0; 2471 2472 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2473 } 2474 2475 2476 static nxge_status_t 2477 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2478 p_nxge_dma_common_t *dmap, 2479 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2480 { 2481 p_nxge_dma_common_t rx_dmap; 2482 nxge_status_t status = NXGE_OK; 2483 size_t total_alloc_size; 2484 size_t allocated = 0; 2485 int i, size_index, array_size; 2486 boolean_t use_kmem_alloc = B_FALSE; 2487 2488 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2489 2490 rx_dmap = (p_nxge_dma_common_t) 2491 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2492 KM_SLEEP); 2493 2494 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2495 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2496 dma_channel, alloc_size, block_size, dmap)); 2497 2498 total_alloc_size = alloc_size; 2499 2500 #if defined(RX_USE_RECLAIM_POST) 2501 total_alloc_size = alloc_size + alloc_size/4; 2502 #endif 2503 2504 i = 0; 2505 size_index = 0; 2506 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2507 while ((alloc_sizes[size_index] < alloc_size) && 2508 (size_index < array_size)) 2509 size_index++; 2510 if (size_index >= array_size) { 2511 size_index = array_size - 1; 2512 } 2513 2514 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2515 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2516 use_kmem_alloc = B_TRUE; 2517 #if defined(__i386) || defined(__amd64) 2518 size_index = 0; 2519 #endif 2520 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2521 "==> nxge_alloc_rx_buf_dma: " 2522 "Neptune use kmem_alloc() - size_index %d", 2523 size_index)); 2524 } 2525 2526 while ((allocated < total_alloc_size) && 2527 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2528 rx_dmap[i].dma_chunk_index = i; 2529 rx_dmap[i].block_size = block_size; 2530 rx_dmap[i].alength = alloc_sizes[size_index]; 2531 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2532 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2533 rx_dmap[i].dma_channel = dma_channel; 2534 rx_dmap[i].contig_alloc_type = B_FALSE; 2535 rx_dmap[i].kmem_alloc_type = B_FALSE; 2536 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2537 2538 /* 2539 * N2/NIU: data buffers must be contiguous as the driver 2540 * needs to call Hypervisor api to set up 2541 * logical pages. 2542 */ 2543 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2544 rx_dmap[i].contig_alloc_type = B_TRUE; 2545 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2546 } else if (use_kmem_alloc) { 2547 /* For Neptune, use kmem_alloc */ 2548 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2549 "==> nxge_alloc_rx_buf_dma: " 2550 "Neptune use kmem_alloc()")); 2551 rx_dmap[i].kmem_alloc_type = B_TRUE; 2552 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2553 } 2554 2555 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2556 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2557 "i %d nblocks %d alength %d", 2558 dma_channel, i, &rx_dmap[i], block_size, 2559 i, rx_dmap[i].nblocks, 2560 rx_dmap[i].alength)); 2561 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2562 &nxge_rx_dma_attr, 2563 rx_dmap[i].alength, 2564 &nxge_dev_buf_dma_acc_attr, 2565 DDI_DMA_READ | DDI_DMA_STREAMING, 2566 (p_nxge_dma_common_t)(&rx_dmap[i])); 2567 if (status != NXGE_OK) { 2568 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2569 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2570 "dma %d size_index %d size requested %d", 2571 dma_channel, 2572 size_index, 2573 rx_dmap[i].alength)); 2574 size_index--; 2575 } else { 2576 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2577 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2578 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2579 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2580 "buf_alloc_state %d alloc_type %d", 2581 dma_channel, 2582 &rx_dmap[i], 2583 rx_dmap[i].kaddrp, 2584 rx_dmap[i].alength, 2585 rx_dmap[i].buf_alloc_state, 2586 rx_dmap[i].buf_alloc_type)); 2587 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2588 " alloc_rx_buf_dma allocated rdc %d " 2589 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2590 dma_channel, i, rx_dmap[i].alength, 2591 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2592 rx_dmap[i].kaddrp)); 2593 i++; 2594 allocated += alloc_sizes[size_index]; 2595 } 2596 } 2597 2598 if (allocated < total_alloc_size) { 2599 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2600 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2601 "allocated 0x%x requested 0x%x", 2602 dma_channel, 2603 allocated, total_alloc_size)); 2604 status = NXGE_ERROR; 2605 goto nxge_alloc_rx_mem_fail1; 2606 } 2607 2608 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2609 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2610 "allocated 0x%x requested 0x%x", 2611 dma_channel, 2612 allocated, total_alloc_size)); 2613 2614 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2615 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2616 dma_channel, i)); 2617 *num_chunks = i; 2618 *dmap = rx_dmap; 2619 2620 goto nxge_alloc_rx_mem_exit; 2621 2622 nxge_alloc_rx_mem_fail1: 2623 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2624 2625 nxge_alloc_rx_mem_exit: 2626 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2627 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2628 2629 return (status); 2630 } 2631 2632 /*ARGSUSED*/ 2633 static void 2634 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2635 uint32_t num_chunks) 2636 { 2637 int i; 2638 2639 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2640 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2641 2642 if (dmap == 0) 2643 return; 2644 2645 for (i = 0; i < num_chunks; i++) { 2646 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2647 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2648 i, dmap)); 2649 nxge_dma_free_rx_data_buf(dmap++); 2650 } 2651 2652 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2653 } 2654 2655 /*ARGSUSED*/ 2656 static nxge_status_t 2657 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2658 p_nxge_dma_common_t *dmap, size_t size) 2659 { 2660 p_nxge_dma_common_t rx_dmap; 2661 nxge_status_t status = NXGE_OK; 2662 2663 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2664 2665 rx_dmap = (p_nxge_dma_common_t) 2666 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2667 2668 rx_dmap->contig_alloc_type = B_FALSE; 2669 rx_dmap->kmem_alloc_type = B_FALSE; 2670 2671 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2672 &nxge_desc_dma_attr, 2673 size, 2674 &nxge_dev_desc_dma_acc_attr, 2675 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2676 rx_dmap); 2677 if (status != NXGE_OK) { 2678 goto nxge_alloc_rx_cntl_dma_fail1; 2679 } 2680 2681 *dmap = rx_dmap; 2682 goto nxge_alloc_rx_cntl_dma_exit; 2683 2684 nxge_alloc_rx_cntl_dma_fail1: 2685 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2686 2687 nxge_alloc_rx_cntl_dma_exit: 2688 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2689 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2690 2691 return (status); 2692 } 2693 2694 /*ARGSUSED*/ 2695 static void 2696 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2697 { 2698 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2699 2700 if (dmap == 0) 2701 return; 2702 2703 nxge_dma_mem_free(dmap); 2704 2705 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2706 } 2707 2708 typedef struct { 2709 size_t tx_size; 2710 size_t cr_size; 2711 size_t threshhold; 2712 } nxge_tdc_sizes_t; 2713 2714 static 2715 nxge_status_t 2716 nxge_tdc_sizes( 2717 nxge_t *nxgep, 2718 nxge_tdc_sizes_t *sizes) 2719 { 2720 uint32_t threshhold; /* The bcopy() threshhold */ 2721 size_t tx_size; /* Transmit buffer size */ 2722 size_t cr_size; /* Completion ring size */ 2723 2724 /* 2725 * Assume that each DMA channel will be configured with the 2726 * default transmit buffer size for copying transmit data. 2727 * (If a packet is bigger than this, it will not be copied.) 2728 */ 2729 if (nxgep->niu_type == N2_NIU) { 2730 threshhold = TX_BCOPY_SIZE; 2731 } else { 2732 threshhold = nxge_bcopy_thresh; 2733 } 2734 tx_size = nxge_tx_ring_size * threshhold; 2735 2736 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2737 cr_size += sizeof (txdma_mailbox_t); 2738 2739 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2740 if (nxgep->niu_type == N2_NIU) { 2741 if (!ISP2(tx_size)) { 2742 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2743 "==> nxge_tdc_sizes: Tx size" 2744 " must be power of 2")); 2745 return (NXGE_ERROR); 2746 } 2747 2748 if (tx_size > (1 << 22)) { 2749 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2750 "==> nxge_tdc_sizes: Tx size" 2751 " limited to 4M")); 2752 return (NXGE_ERROR); 2753 } 2754 2755 if (cr_size < 0x2000) 2756 cr_size = 0x2000; 2757 } 2758 #endif 2759 2760 sizes->threshhold = threshhold; 2761 sizes->tx_size = tx_size; 2762 sizes->cr_size = cr_size; 2763 2764 return (NXGE_OK); 2765 } 2766 /* 2767 * nxge_alloc_txb 2768 * 2769 * Allocate buffers for an TDC. 2770 * 2771 * Arguments: 2772 * nxgep 2773 * channel The channel to map into our kernel space. 2774 * 2775 * Notes: 2776 * 2777 * NPI function calls: 2778 * 2779 * NXGE function calls: 2780 * 2781 * Registers accessed: 2782 * 2783 * Context: 2784 * 2785 * Taking apart: 2786 * 2787 * Open questions: 2788 * 2789 */ 2790 nxge_status_t 2791 nxge_alloc_txb( 2792 p_nxge_t nxgep, 2793 int channel) 2794 { 2795 nxge_dma_common_t **dma_buf_p; 2796 nxge_dma_common_t **dma_cntl_p; 2797 uint32_t *num_chunks; 2798 nxge_status_t status = NXGE_OK; 2799 2800 nxge_tdc_sizes_t sizes; 2801 2802 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2803 2804 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2805 return (NXGE_ERROR); 2806 2807 /* 2808 * Allocate memory for transmit buffers and descriptor rings. 2809 * Replace these allocation functions with the interface functions 2810 * provided by the partition manager Real Soon Now. 2811 */ 2812 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2813 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2814 2815 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2816 2817 /* 2818 * Allocate memory for transmit buffers and descriptor rings. 2819 * Replace allocation functions with interface functions provided 2820 * by the partition manager when it is available. 2821 * 2822 * Allocate memory for the transmit buffer pool. 2823 */ 2824 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2825 "sizes: tx: %ld, cr:%ld, th:%ld", 2826 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2827 2828 *num_chunks = 0; 2829 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2830 sizes.tx_size, sizes.threshhold, num_chunks); 2831 if (status != NXGE_OK) { 2832 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2833 return (status); 2834 } 2835 2836 /* 2837 * Allocate memory for descriptor rings and mailbox. 2838 */ 2839 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2840 sizes.cr_size); 2841 if (status != NXGE_OK) { 2842 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2843 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2844 return (status); 2845 } 2846 2847 return (NXGE_OK); 2848 } 2849 2850 void 2851 nxge_free_txb( 2852 p_nxge_t nxgep, 2853 int channel) 2854 { 2855 nxge_dma_common_t *data; 2856 nxge_dma_common_t *control; 2857 uint32_t num_chunks; 2858 2859 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2860 2861 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2862 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2863 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2864 2865 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2866 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2867 2868 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2869 nxge_free_tx_cntl_dma(nxgep, control); 2870 2871 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2872 2873 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2874 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2875 2876 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2877 } 2878 2879 /* 2880 * nxge_alloc_tx_mem_pool 2881 * 2882 * This function allocates all of the per-port TDC control data structures. 2883 * The per-channel (TDC) data structures are allocated when needed. 2884 * 2885 * Arguments: 2886 * nxgep 2887 * 2888 * Notes: 2889 * 2890 * Context: 2891 * Any domain 2892 */ 2893 nxge_status_t 2894 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2895 { 2896 nxge_hw_pt_cfg_t *p_cfgp; 2897 nxge_dma_pool_t *dma_poolp; 2898 nxge_dma_common_t **dma_buf_p; 2899 nxge_dma_pool_t *dma_cntl_poolp; 2900 nxge_dma_common_t **dma_cntl_p; 2901 uint32_t *num_chunks; /* per dma */ 2902 int tdc_max; 2903 2904 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2905 2906 p_cfgp = &nxgep->pt_config.hw_config; 2907 tdc_max = NXGE_MAX_TDCS; 2908 2909 /* 2910 * Allocate memory for each transmit DMA channel. 2911 */ 2912 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2913 KM_SLEEP); 2914 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2915 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2916 2917 dma_cntl_poolp = (p_nxge_dma_pool_t) 2918 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2919 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2920 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2921 2922 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2923 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2924 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2925 "set to default %d", 2926 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2927 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2928 } 2929 2930 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2931 /* 2932 * N2/NIU has limitation on the descriptor sizes (contiguous 2933 * memory allocation on data buffers to 4M (contig_mem_alloc) 2934 * and little endian for control buffers (must use the ddi/dki mem alloc 2935 * function). The transmit ring is limited to 8K (includes the 2936 * mailbox). 2937 */ 2938 if (nxgep->niu_type == N2_NIU) { 2939 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2940 (!ISP2(nxge_tx_ring_size))) { 2941 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2942 } 2943 } 2944 #endif 2945 2946 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2947 2948 num_chunks = (uint32_t *)KMEM_ZALLOC( 2949 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2950 2951 dma_poolp->ndmas = p_cfgp->tdc.owned; 2952 dma_poolp->num_chunks = num_chunks; 2953 dma_poolp->dma_buf_pool_p = dma_buf_p; 2954 nxgep->tx_buf_pool_p = dma_poolp; 2955 2956 dma_poolp->buf_allocated = B_TRUE; 2957 2958 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2959 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2960 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2961 2962 dma_cntl_poolp->buf_allocated = B_TRUE; 2963 2964 nxgep->tx_rings = 2965 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 2966 nxgep->tx_rings->rings = 2967 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 2968 nxgep->tx_mbox_areas_p = 2969 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 2970 nxgep->tx_mbox_areas_p->txmbox_areas_p = 2971 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 2972 2973 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 2974 2975 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2976 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 2977 tdc_max, dma_poolp->ndmas)); 2978 2979 return (NXGE_OK); 2980 } 2981 2982 nxge_status_t 2983 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2984 p_nxge_dma_common_t *dmap, size_t alloc_size, 2985 size_t block_size, uint32_t *num_chunks) 2986 { 2987 p_nxge_dma_common_t tx_dmap; 2988 nxge_status_t status = NXGE_OK; 2989 size_t total_alloc_size; 2990 size_t allocated = 0; 2991 int i, size_index, array_size; 2992 2993 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2994 2995 tx_dmap = (p_nxge_dma_common_t) 2996 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2997 KM_SLEEP); 2998 2999 total_alloc_size = alloc_size; 3000 i = 0; 3001 size_index = 0; 3002 array_size = sizeof (alloc_sizes) / sizeof (size_t); 3003 while ((alloc_sizes[size_index] < alloc_size) && 3004 (size_index < array_size)) 3005 size_index++; 3006 if (size_index >= array_size) { 3007 size_index = array_size - 1; 3008 } 3009 3010 while ((allocated < total_alloc_size) && 3011 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 3012 3013 tx_dmap[i].dma_chunk_index = i; 3014 tx_dmap[i].block_size = block_size; 3015 tx_dmap[i].alength = alloc_sizes[size_index]; 3016 tx_dmap[i].orig_alength = tx_dmap[i].alength; 3017 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 3018 tx_dmap[i].dma_channel = dma_channel; 3019 tx_dmap[i].contig_alloc_type = B_FALSE; 3020 tx_dmap[i].kmem_alloc_type = B_FALSE; 3021 3022 /* 3023 * N2/NIU: data buffers must be contiguous as the driver 3024 * needs to call Hypervisor api to set up 3025 * logical pages. 3026 */ 3027 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 3028 tx_dmap[i].contig_alloc_type = B_TRUE; 3029 } 3030 3031 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3032 &nxge_tx_dma_attr, 3033 tx_dmap[i].alength, 3034 &nxge_dev_buf_dma_acc_attr, 3035 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3036 (p_nxge_dma_common_t)(&tx_dmap[i])); 3037 if (status != NXGE_OK) { 3038 size_index--; 3039 } else { 3040 i++; 3041 allocated += alloc_sizes[size_index]; 3042 } 3043 } 3044 3045 if (allocated < total_alloc_size) { 3046 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3047 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3048 "allocated 0x%x requested 0x%x", 3049 dma_channel, 3050 allocated, total_alloc_size)); 3051 status = NXGE_ERROR; 3052 goto nxge_alloc_tx_mem_fail1; 3053 } 3054 3055 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3056 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3057 "allocated 0x%x requested 0x%x", 3058 dma_channel, 3059 allocated, total_alloc_size)); 3060 3061 *num_chunks = i; 3062 *dmap = tx_dmap; 3063 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3064 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3065 *dmap, i)); 3066 goto nxge_alloc_tx_mem_exit; 3067 3068 nxge_alloc_tx_mem_fail1: 3069 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3070 3071 nxge_alloc_tx_mem_exit: 3072 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3073 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3074 3075 return (status); 3076 } 3077 3078 /*ARGSUSED*/ 3079 static void 3080 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3081 uint32_t num_chunks) 3082 { 3083 int i; 3084 3085 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3086 3087 if (dmap == 0) 3088 return; 3089 3090 for (i = 0; i < num_chunks; i++) { 3091 nxge_dma_mem_free(dmap++); 3092 } 3093 3094 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3095 } 3096 3097 /*ARGSUSED*/ 3098 nxge_status_t 3099 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3100 p_nxge_dma_common_t *dmap, size_t size) 3101 { 3102 p_nxge_dma_common_t tx_dmap; 3103 nxge_status_t status = NXGE_OK; 3104 3105 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3106 tx_dmap = (p_nxge_dma_common_t) 3107 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3108 3109 tx_dmap->contig_alloc_type = B_FALSE; 3110 tx_dmap->kmem_alloc_type = B_FALSE; 3111 3112 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3113 &nxge_desc_dma_attr, 3114 size, 3115 &nxge_dev_desc_dma_acc_attr, 3116 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3117 tx_dmap); 3118 if (status != NXGE_OK) { 3119 goto nxge_alloc_tx_cntl_dma_fail1; 3120 } 3121 3122 *dmap = tx_dmap; 3123 goto nxge_alloc_tx_cntl_dma_exit; 3124 3125 nxge_alloc_tx_cntl_dma_fail1: 3126 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3127 3128 nxge_alloc_tx_cntl_dma_exit: 3129 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3130 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3131 3132 return (status); 3133 } 3134 3135 /*ARGSUSED*/ 3136 static void 3137 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3138 { 3139 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3140 3141 if (dmap == 0) 3142 return; 3143 3144 nxge_dma_mem_free(dmap); 3145 3146 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3147 } 3148 3149 /* 3150 * nxge_free_tx_mem_pool 3151 * 3152 * This function frees all of the per-port TDC control data structures. 3153 * The per-channel (TDC) data structures are freed when the channel 3154 * is stopped. 3155 * 3156 * Arguments: 3157 * nxgep 3158 * 3159 * Notes: 3160 * 3161 * Context: 3162 * Any domain 3163 */ 3164 static void 3165 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3166 { 3167 int tdc_max = NXGE_MAX_TDCS; 3168 3169 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3170 3171 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3172 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3173 "<== nxge_free_tx_mem_pool " 3174 "(null tx buf pool or buf not allocated")); 3175 return; 3176 } 3177 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3179 "<== nxge_free_tx_mem_pool " 3180 "(null tx cntl buf pool or cntl buf not allocated")); 3181 return; 3182 } 3183 3184 /* 1. Free the mailboxes. */ 3185 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3186 sizeof (p_tx_mbox_t) * tdc_max); 3187 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3188 3189 nxgep->tx_mbox_areas_p = 0; 3190 3191 /* 2. Free the transmit ring arrays. */ 3192 KMEM_FREE(nxgep->tx_rings->rings, 3193 sizeof (p_tx_ring_t) * tdc_max); 3194 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3195 3196 nxgep->tx_rings = 0; 3197 3198 /* 3. Free the completion ring data structures. */ 3199 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3200 sizeof (p_nxge_dma_common_t) * tdc_max); 3201 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3202 3203 nxgep->tx_cntl_pool_p = 0; 3204 3205 /* 4. Free the data ring data structures. */ 3206 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3207 sizeof (uint32_t) * tdc_max); 3208 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3209 sizeof (p_nxge_dma_common_t) * tdc_max); 3210 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3211 3212 nxgep->tx_buf_pool_p = 0; 3213 3214 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3215 } 3216 3217 /*ARGSUSED*/ 3218 static nxge_status_t 3219 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3220 struct ddi_dma_attr *dma_attrp, 3221 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3222 p_nxge_dma_common_t dma_p) 3223 { 3224 caddr_t kaddrp; 3225 int ddi_status = DDI_SUCCESS; 3226 boolean_t contig_alloc_type; 3227 boolean_t kmem_alloc_type; 3228 3229 contig_alloc_type = dma_p->contig_alloc_type; 3230 3231 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3232 /* 3233 * contig_alloc_type for contiguous memory only allowed 3234 * for N2/NIU. 3235 */ 3236 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3237 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3238 dma_p->contig_alloc_type)); 3239 return (NXGE_ERROR | NXGE_DDI_FAILED); 3240 } 3241 3242 dma_p->dma_handle = NULL; 3243 dma_p->acc_handle = NULL; 3244 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3245 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3246 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3247 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3248 if (ddi_status != DDI_SUCCESS) { 3249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3250 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3251 return (NXGE_ERROR | NXGE_DDI_FAILED); 3252 } 3253 3254 kmem_alloc_type = dma_p->kmem_alloc_type; 3255 3256 switch (contig_alloc_type) { 3257 case B_FALSE: 3258 switch (kmem_alloc_type) { 3259 case B_FALSE: 3260 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3261 length, 3262 acc_attr_p, 3263 xfer_flags, 3264 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3265 &dma_p->acc_handle); 3266 if (ddi_status != DDI_SUCCESS) { 3267 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3268 "nxge_dma_mem_alloc: " 3269 "ddi_dma_mem_alloc failed")); 3270 ddi_dma_free_handle(&dma_p->dma_handle); 3271 dma_p->dma_handle = NULL; 3272 return (NXGE_ERROR | NXGE_DDI_FAILED); 3273 } 3274 if (dma_p->alength < length) { 3275 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3276 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3277 "< length.")); 3278 ddi_dma_mem_free(&dma_p->acc_handle); 3279 ddi_dma_free_handle(&dma_p->dma_handle); 3280 dma_p->acc_handle = NULL; 3281 dma_p->dma_handle = NULL; 3282 return (NXGE_ERROR); 3283 } 3284 3285 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3286 NULL, 3287 kaddrp, dma_p->alength, xfer_flags, 3288 DDI_DMA_DONTWAIT, 3289 0, &dma_p->dma_cookie, &dma_p->ncookies); 3290 if (ddi_status != DDI_DMA_MAPPED) { 3291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3292 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3293 "failed " 3294 "(staus 0x%x ncookies %d.)", ddi_status, 3295 dma_p->ncookies)); 3296 if (dma_p->acc_handle) { 3297 ddi_dma_mem_free(&dma_p->acc_handle); 3298 dma_p->acc_handle = NULL; 3299 } 3300 ddi_dma_free_handle(&dma_p->dma_handle); 3301 dma_p->dma_handle = NULL; 3302 return (NXGE_ERROR | NXGE_DDI_FAILED); 3303 } 3304 3305 if (dma_p->ncookies != 1) { 3306 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3307 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3308 "> 1 cookie" 3309 "(staus 0x%x ncookies %d.)", ddi_status, 3310 dma_p->ncookies)); 3311 if (dma_p->acc_handle) { 3312 ddi_dma_mem_free(&dma_p->acc_handle); 3313 dma_p->acc_handle = NULL; 3314 } 3315 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3316 ddi_dma_free_handle(&dma_p->dma_handle); 3317 dma_p->dma_handle = NULL; 3318 return (NXGE_ERROR); 3319 } 3320 break; 3321 3322 case B_TRUE: 3323 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3324 if (kaddrp == NULL) { 3325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3326 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3327 "kmem alloc failed")); 3328 return (NXGE_ERROR); 3329 } 3330 3331 dma_p->alength = length; 3332 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3333 NULL, kaddrp, dma_p->alength, xfer_flags, 3334 DDI_DMA_DONTWAIT, 0, 3335 &dma_p->dma_cookie, &dma_p->ncookies); 3336 if (ddi_status != DDI_DMA_MAPPED) { 3337 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3338 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3339 "(kmem_alloc) failed kaddrp $%p length %d " 3340 "(staus 0x%x (%d) ncookies %d.)", 3341 kaddrp, length, 3342 ddi_status, ddi_status, dma_p->ncookies)); 3343 KMEM_FREE(kaddrp, length); 3344 dma_p->acc_handle = NULL; 3345 ddi_dma_free_handle(&dma_p->dma_handle); 3346 dma_p->dma_handle = NULL; 3347 dma_p->kaddrp = NULL; 3348 return (NXGE_ERROR | NXGE_DDI_FAILED); 3349 } 3350 3351 if (dma_p->ncookies != 1) { 3352 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3353 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3354 "(kmem_alloc) > 1 cookie" 3355 "(staus 0x%x ncookies %d.)", ddi_status, 3356 dma_p->ncookies)); 3357 KMEM_FREE(kaddrp, length); 3358 dma_p->acc_handle = NULL; 3359 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3360 ddi_dma_free_handle(&dma_p->dma_handle); 3361 dma_p->dma_handle = NULL; 3362 dma_p->kaddrp = NULL; 3363 return (NXGE_ERROR); 3364 } 3365 3366 dma_p->kaddrp = kaddrp; 3367 3368 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3369 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3370 "kaddr $%p alength %d", 3371 dma_p, 3372 kaddrp, 3373 dma_p->alength)); 3374 break; 3375 } 3376 break; 3377 3378 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3379 case B_TRUE: 3380 kaddrp = (caddr_t)contig_mem_alloc(length); 3381 if (kaddrp == NULL) { 3382 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3383 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3384 ddi_dma_free_handle(&dma_p->dma_handle); 3385 return (NXGE_ERROR | NXGE_DDI_FAILED); 3386 } 3387 3388 dma_p->alength = length; 3389 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3390 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3391 &dma_p->dma_cookie, &dma_p->ncookies); 3392 if (ddi_status != DDI_DMA_MAPPED) { 3393 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3394 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3395 "(status 0x%x ncookies %d.)", ddi_status, 3396 dma_p->ncookies)); 3397 3398 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3399 "==> nxge_dma_mem_alloc: (not mapped)" 3400 "length %lu (0x%x) " 3401 "free contig kaddrp $%p " 3402 "va_to_pa $%p", 3403 length, length, 3404 kaddrp, 3405 va_to_pa(kaddrp))); 3406 3407 3408 contig_mem_free((void *)kaddrp, length); 3409 ddi_dma_free_handle(&dma_p->dma_handle); 3410 3411 dma_p->dma_handle = NULL; 3412 dma_p->acc_handle = NULL; 3413 dma_p->alength = NULL; 3414 dma_p->kaddrp = NULL; 3415 3416 return (NXGE_ERROR | NXGE_DDI_FAILED); 3417 } 3418 3419 if (dma_p->ncookies != 1 || 3420 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3421 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3422 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3423 "cookie or " 3424 "dmac_laddress is NULL $%p size %d " 3425 " (status 0x%x ncookies %d.)", 3426 ddi_status, 3427 dma_p->dma_cookie.dmac_laddress, 3428 dma_p->dma_cookie.dmac_size, 3429 dma_p->ncookies)); 3430 3431 contig_mem_free((void *)kaddrp, length); 3432 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3433 ddi_dma_free_handle(&dma_p->dma_handle); 3434 3435 dma_p->alength = 0; 3436 dma_p->dma_handle = NULL; 3437 dma_p->acc_handle = NULL; 3438 dma_p->kaddrp = NULL; 3439 3440 return (NXGE_ERROR | NXGE_DDI_FAILED); 3441 } 3442 break; 3443 3444 #else 3445 case B_TRUE: 3446 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3447 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3448 return (NXGE_ERROR | NXGE_DDI_FAILED); 3449 #endif 3450 } 3451 3452 dma_p->kaddrp = kaddrp; 3453 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3454 dma_p->alength - RXBUF_64B_ALIGNED; 3455 #if defined(__i386) 3456 dma_p->ioaddr_pp = 3457 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3458 #else 3459 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3460 #endif 3461 dma_p->last_ioaddr_pp = 3462 #if defined(__i386) 3463 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3464 #else 3465 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3466 #endif 3467 dma_p->alength - RXBUF_64B_ALIGNED; 3468 3469 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3470 3471 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3472 dma_p->orig_ioaddr_pp = 3473 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3474 dma_p->orig_alength = length; 3475 dma_p->orig_kaddrp = kaddrp; 3476 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3477 #endif 3478 3479 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3480 "dma buffer allocated: dma_p $%p " 3481 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3482 "dma_p->ioaddr_p $%p " 3483 "dma_p->orig_ioaddr_p $%p " 3484 "orig_vatopa $%p " 3485 "alength %d (0x%x) " 3486 "kaddrp $%p " 3487 "length %d (0x%x)", 3488 dma_p, 3489 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3490 dma_p->ioaddr_pp, 3491 dma_p->orig_ioaddr_pp, 3492 dma_p->orig_vatopa, 3493 dma_p->alength, dma_p->alength, 3494 kaddrp, 3495 length, length)); 3496 3497 return (NXGE_OK); 3498 } 3499 3500 static void 3501 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3502 { 3503 if (dma_p->dma_handle != NULL) { 3504 if (dma_p->ncookies) { 3505 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3506 dma_p->ncookies = 0; 3507 } 3508 ddi_dma_free_handle(&dma_p->dma_handle); 3509 dma_p->dma_handle = NULL; 3510 } 3511 3512 if (dma_p->acc_handle != NULL) { 3513 ddi_dma_mem_free(&dma_p->acc_handle); 3514 dma_p->acc_handle = NULL; 3515 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3516 } 3517 3518 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3519 if (dma_p->contig_alloc_type && 3520 dma_p->orig_kaddrp && dma_p->orig_alength) { 3521 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3522 "kaddrp $%p (orig_kaddrp $%p)" 3523 "mem type %d ", 3524 "orig_alength %d " 3525 "alength 0x%x (%d)", 3526 dma_p->kaddrp, 3527 dma_p->orig_kaddrp, 3528 dma_p->contig_alloc_type, 3529 dma_p->orig_alength, 3530 dma_p->alength, dma_p->alength)); 3531 3532 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3533 dma_p->orig_alength = NULL; 3534 dma_p->orig_kaddrp = NULL; 3535 dma_p->contig_alloc_type = B_FALSE; 3536 } 3537 #endif 3538 dma_p->kaddrp = NULL; 3539 dma_p->alength = NULL; 3540 } 3541 3542 static void 3543 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3544 { 3545 uint64_t kaddr; 3546 uint32_t buf_size; 3547 3548 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3549 3550 if (dma_p->dma_handle != NULL) { 3551 if (dma_p->ncookies) { 3552 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3553 dma_p->ncookies = 0; 3554 } 3555 ddi_dma_free_handle(&dma_p->dma_handle); 3556 dma_p->dma_handle = NULL; 3557 } 3558 3559 if (dma_p->acc_handle != NULL) { 3560 ddi_dma_mem_free(&dma_p->acc_handle); 3561 dma_p->acc_handle = NULL; 3562 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3563 } 3564 3565 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3566 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3567 dma_p, 3568 dma_p->buf_alloc_state)); 3569 3570 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3571 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3572 "<== nxge_dma_free_rx_data_buf: " 3573 "outstanding data buffers")); 3574 return; 3575 } 3576 3577 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3578 if (dma_p->contig_alloc_type && 3579 dma_p->orig_kaddrp && dma_p->orig_alength) { 3580 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3581 "kaddrp $%p (orig_kaddrp $%p)" 3582 "mem type %d ", 3583 "orig_alength %d " 3584 "alength 0x%x (%d)", 3585 dma_p->kaddrp, 3586 dma_p->orig_kaddrp, 3587 dma_p->contig_alloc_type, 3588 dma_p->orig_alength, 3589 dma_p->alength, dma_p->alength)); 3590 3591 kaddr = (uint64_t)dma_p->orig_kaddrp; 3592 buf_size = dma_p->orig_alength; 3593 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3594 dma_p->orig_alength = NULL; 3595 dma_p->orig_kaddrp = NULL; 3596 dma_p->contig_alloc_type = B_FALSE; 3597 dma_p->kaddrp = NULL; 3598 dma_p->alength = NULL; 3599 return; 3600 } 3601 #endif 3602 3603 if (dma_p->kmem_alloc_type) { 3604 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3605 "nxge_dma_free_rx_data_buf: free kmem " 3606 "kaddrp $%p (orig_kaddrp $%p)" 3607 "alloc type %d " 3608 "orig_alength %d " 3609 "alength 0x%x (%d)", 3610 dma_p->kaddrp, 3611 dma_p->orig_kaddrp, 3612 dma_p->kmem_alloc_type, 3613 dma_p->orig_alength, 3614 dma_p->alength, dma_p->alength)); 3615 #if defined(__i386) 3616 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3617 #else 3618 kaddr = (uint64_t)dma_p->kaddrp; 3619 #endif 3620 buf_size = dma_p->orig_alength; 3621 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3622 "nxge_dma_free_rx_data_buf: free dmap $%p " 3623 "kaddr $%p buf_size %d", 3624 dma_p, 3625 kaddr, buf_size)); 3626 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3627 dma_p->alength = 0; 3628 dma_p->orig_alength = 0; 3629 dma_p->kaddrp = NULL; 3630 dma_p->kmem_alloc_type = B_FALSE; 3631 } 3632 3633 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3634 } 3635 3636 /* 3637 * nxge_m_start() -- start transmitting and receiving. 3638 * 3639 * This function is called by the MAC layer when the first 3640 * stream is open to prepare the hardware ready for sending 3641 * and transmitting packets. 3642 */ 3643 static int 3644 nxge_m_start(void *arg) 3645 { 3646 p_nxge_t nxgep = (p_nxge_t)arg; 3647 3648 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3649 3650 if (nxge_peu_reset_enable && !nxgep->nxge_link_poll_timerid) { 3651 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 3652 } 3653 3654 MUTEX_ENTER(nxgep->genlock); 3655 if (nxge_init(nxgep) != NXGE_OK) { 3656 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3657 "<== nxge_m_start: initialization failed")); 3658 MUTEX_EXIT(nxgep->genlock); 3659 return (EIO); 3660 } 3661 3662 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3663 goto nxge_m_start_exit; 3664 /* 3665 * Start timer to check the system error and tx hangs 3666 */ 3667 if (!isLDOMguest(nxgep)) 3668 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3669 nxge_check_hw_state, NXGE_CHECK_TIMER); 3670 #if defined(sun4v) 3671 else 3672 nxge_hio_start_timer(nxgep); 3673 #endif 3674 3675 nxgep->link_notify = B_TRUE; 3676 3677 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3678 3679 nxge_m_start_exit: 3680 MUTEX_EXIT(nxgep->genlock); 3681 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3682 return (0); 3683 } 3684 3685 /* 3686 * nxge_m_stop(): stop transmitting and receiving. 3687 */ 3688 static void 3689 nxge_m_stop(void *arg) 3690 { 3691 p_nxge_t nxgep = (p_nxge_t)arg; 3692 3693 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3694 3695 if (nxgep->nxge_timerid) { 3696 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3697 nxgep->nxge_timerid = 0; 3698 } 3699 3700 MUTEX_ENTER(nxgep->genlock); 3701 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3702 nxge_uninit(nxgep); 3703 3704 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3705 3706 MUTEX_EXIT(nxgep->genlock); 3707 3708 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3709 } 3710 3711 static int 3712 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3713 { 3714 p_nxge_t nxgep = (p_nxge_t)arg; 3715 struct ether_addr addrp; 3716 3717 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3718 3719 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3720 if (nxge_set_mac_addr(nxgep, &addrp)) { 3721 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3722 "<== nxge_m_unicst: set unitcast failed")); 3723 return (EINVAL); 3724 } 3725 3726 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3727 3728 return (0); 3729 } 3730 3731 static int 3732 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3733 { 3734 p_nxge_t nxgep = (p_nxge_t)arg; 3735 struct ether_addr addrp; 3736 3737 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3738 "==> nxge_m_multicst: add %d", add)); 3739 3740 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3741 if (add) { 3742 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3743 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3744 "<== nxge_m_multicst: add multicast failed")); 3745 return (EINVAL); 3746 } 3747 } else { 3748 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3749 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3750 "<== nxge_m_multicst: del multicast failed")); 3751 return (EINVAL); 3752 } 3753 } 3754 3755 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3756 3757 return (0); 3758 } 3759 3760 static int 3761 nxge_m_promisc(void *arg, boolean_t on) 3762 { 3763 p_nxge_t nxgep = (p_nxge_t)arg; 3764 3765 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3766 "==> nxge_m_promisc: on %d", on)); 3767 3768 if (nxge_set_promisc(nxgep, on)) { 3769 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3770 "<== nxge_m_promisc: set promisc failed")); 3771 return (EINVAL); 3772 } 3773 3774 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3775 "<== nxge_m_promisc: on %d", on)); 3776 3777 return (0); 3778 } 3779 3780 static void 3781 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3782 { 3783 p_nxge_t nxgep = (p_nxge_t)arg; 3784 struct iocblk *iocp; 3785 boolean_t need_privilege; 3786 int err; 3787 int cmd; 3788 3789 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3790 3791 iocp = (struct iocblk *)mp->b_rptr; 3792 iocp->ioc_error = 0; 3793 need_privilege = B_TRUE; 3794 cmd = iocp->ioc_cmd; 3795 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3796 switch (cmd) { 3797 default: 3798 miocnak(wq, mp, 0, EINVAL); 3799 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3800 return; 3801 3802 case LB_GET_INFO_SIZE: 3803 case LB_GET_INFO: 3804 case LB_GET_MODE: 3805 need_privilege = B_FALSE; 3806 break; 3807 case LB_SET_MODE: 3808 break; 3809 3810 3811 case NXGE_GET_MII: 3812 case NXGE_PUT_MII: 3813 case NXGE_GET64: 3814 case NXGE_PUT64: 3815 case NXGE_GET_TX_RING_SZ: 3816 case NXGE_GET_TX_DESC: 3817 case NXGE_TX_SIDE_RESET: 3818 case NXGE_RX_SIDE_RESET: 3819 case NXGE_GLOBAL_RESET: 3820 case NXGE_RESET_MAC: 3821 case NXGE_TX_REGS_DUMP: 3822 case NXGE_RX_REGS_DUMP: 3823 case NXGE_INT_REGS_DUMP: 3824 case NXGE_VIR_INT_REGS_DUMP: 3825 case NXGE_PUT_TCAM: 3826 case NXGE_GET_TCAM: 3827 case NXGE_RTRACE: 3828 case NXGE_RDUMP: 3829 3830 need_privilege = B_FALSE; 3831 break; 3832 case NXGE_INJECT_ERR: 3833 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3834 nxge_err_inject(nxgep, wq, mp); 3835 break; 3836 } 3837 3838 if (need_privilege) { 3839 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3840 if (err != 0) { 3841 miocnak(wq, mp, 0, err); 3842 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3843 "<== nxge_m_ioctl: no priv")); 3844 return; 3845 } 3846 } 3847 3848 switch (cmd) { 3849 3850 case LB_GET_MODE: 3851 case LB_SET_MODE: 3852 case LB_GET_INFO_SIZE: 3853 case LB_GET_INFO: 3854 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3855 break; 3856 3857 case NXGE_GET_MII: 3858 case NXGE_PUT_MII: 3859 case NXGE_PUT_TCAM: 3860 case NXGE_GET_TCAM: 3861 case NXGE_GET64: 3862 case NXGE_PUT64: 3863 case NXGE_GET_TX_RING_SZ: 3864 case NXGE_GET_TX_DESC: 3865 case NXGE_TX_SIDE_RESET: 3866 case NXGE_RX_SIDE_RESET: 3867 case NXGE_GLOBAL_RESET: 3868 case NXGE_RESET_MAC: 3869 case NXGE_TX_REGS_DUMP: 3870 case NXGE_RX_REGS_DUMP: 3871 case NXGE_INT_REGS_DUMP: 3872 case NXGE_VIR_INT_REGS_DUMP: 3873 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3874 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3875 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3876 break; 3877 } 3878 3879 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3880 } 3881 3882 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3883 3884 static void 3885 nxge_m_resources(void *arg) 3886 { 3887 p_nxge_t nxgep = arg; 3888 mac_rx_fifo_t mrf; 3889 3890 nxge_grp_set_t *set = &nxgep->rx_set; 3891 uint8_t rdc; 3892 3893 rx_rcr_ring_t *ring; 3894 3895 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3896 3897 MUTEX_ENTER(nxgep->genlock); 3898 3899 if (set->owned.map == 0) { 3900 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3901 "nxge_m_resources: no receive resources")); 3902 goto nxge_m_resources_exit; 3903 } 3904 3905 /* 3906 * CR 6492541 Check to see if the drv_state has been initialized, 3907 * if not * call nxge_init(). 3908 */ 3909 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3910 if (nxge_init(nxgep) != NXGE_OK) 3911 goto nxge_m_resources_exit; 3912 } 3913 3914 mrf.mrf_type = MAC_RX_FIFO; 3915 mrf.mrf_blank = nxge_rx_hw_blank; 3916 mrf.mrf_arg = (void *)nxgep; 3917 3918 mrf.mrf_normal_blank_time = 128; 3919 mrf.mrf_normal_pkt_count = 8; 3920 3921 /* 3922 * Export our receive resources to the MAC layer. 3923 */ 3924 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3925 if ((1 << rdc) & set->owned.map) { 3926 ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 3927 if (ring == 0) { 3928 /* 3929 * This is a big deal only if we are 3930 * *not* in an LDOMs environment. 3931 */ 3932 if (nxgep->environs == SOLARIS_DOMAIN) { 3933 cmn_err(CE_NOTE, 3934 "==> nxge_m_resources: " 3935 "ring %d == 0", rdc); 3936 } 3937 continue; 3938 } 3939 ring->rcr_mac_handle = mac_resource_add 3940 (nxgep->mach, (mac_resource_t *)&mrf); 3941 3942 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3943 "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 3944 rdc, ring, ring->rcr_mac_handle)); 3945 } 3946 } 3947 3948 nxge_m_resources_exit: 3949 MUTEX_EXIT(nxgep->genlock); 3950 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3951 } 3952 3953 void 3954 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3955 { 3956 p_nxge_mmac_stats_t mmac_stats; 3957 int i; 3958 nxge_mmac_t *mmac_info; 3959 3960 mmac_info = &nxgep->nxge_mmac_info; 3961 3962 mmac_stats = &nxgep->statsp->mmac_stats; 3963 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3964 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3965 3966 for (i = 0; i < ETHERADDRL; i++) { 3967 if (factory) { 3968 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3969 = mmac_info->factory_mac_pool[slot][ 3970 (ETHERADDRL-1) - i]; 3971 } else { 3972 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3973 = mmac_info->mac_pool[slot].addr[ 3974 (ETHERADDRL - 1) - i]; 3975 } 3976 } 3977 } 3978 3979 /* 3980 * nxge_altmac_set() -- Set an alternate MAC address 3981 */ 3982 static int 3983 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3984 { 3985 uint8_t addrn; 3986 uint8_t portn; 3987 npi_mac_addr_t altmac; 3988 hostinfo_t mac_rdc; 3989 p_nxge_class_pt_cfg_t clscfgp; 3990 3991 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3992 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3993 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3994 3995 portn = nxgep->mac.portnum; 3996 addrn = (uint8_t)slot - 1; 3997 3998 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3999 addrn, &altmac) != NPI_SUCCESS) 4000 return (EIO); 4001 4002 /* 4003 * Set the rdc table number for the host info entry 4004 * for this mac address slot. 4005 */ 4006 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 4007 mac_rdc.value = 0; 4008 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 4009 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 4010 4011 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 4012 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 4013 return (EIO); 4014 } 4015 4016 /* 4017 * Enable comparison with the alternate MAC address. 4018 * While the first alternate addr is enabled by bit 1 of register 4019 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 4020 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 4021 * accordingly before calling npi_mac_altaddr_entry. 4022 */ 4023 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4024 addrn = (uint8_t)slot - 1; 4025 else 4026 addrn = (uint8_t)slot; 4027 4028 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 4029 != NPI_SUCCESS) 4030 return (EIO); 4031 4032 return (0); 4033 } 4034 4035 /* 4036 * nxeg_m_mmac_add() - find an unused address slot, set the address 4037 * value to the one specified, enable the port to start filtering on 4038 * the new MAC address. Returns 0 on success. 4039 */ 4040 int 4041 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 4042 { 4043 p_nxge_t nxgep = arg; 4044 mac_addr_slot_t slot; 4045 nxge_mmac_t *mmac_info; 4046 int err; 4047 nxge_status_t status; 4048 4049 mutex_enter(nxgep->genlock); 4050 4051 /* 4052 * Make sure that nxge is initialized, if _start() has 4053 * not been called. 4054 */ 4055 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4056 status = nxge_init(nxgep); 4057 if (status != NXGE_OK) { 4058 mutex_exit(nxgep->genlock); 4059 return (ENXIO); 4060 } 4061 } 4062 4063 mmac_info = &nxgep->nxge_mmac_info; 4064 if (mmac_info->naddrfree == 0) { 4065 mutex_exit(nxgep->genlock); 4066 return (ENOSPC); 4067 } 4068 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4069 maddr->mma_addrlen)) { 4070 mutex_exit(nxgep->genlock); 4071 return (EINVAL); 4072 } 4073 /* 4074 * Search for the first available slot. Because naddrfree 4075 * is not zero, we are guaranteed to find one. 4076 * Slot 0 is for unique (primary) MAC. The first alternate 4077 * MAC slot is slot 1. 4078 * Each of the first two ports of Neptune has 16 alternate 4079 * MAC slots but only the first 7 (of 15) slots have assigned factory 4080 * MAC addresses. We first search among the slots without bundled 4081 * factory MACs. If we fail to find one in that range, then we 4082 * search the slots with bundled factory MACs. A factory MAC 4083 * will be wasted while the slot is used with a user MAC address. 4084 * But the slot could be used by factory MAC again after calling 4085 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4086 */ 4087 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 4088 for (slot = mmac_info->num_factory_mmac + 1; 4089 slot <= mmac_info->num_mmac; slot++) { 4090 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4091 break; 4092 } 4093 if (slot > mmac_info->num_mmac) { 4094 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4095 slot++) { 4096 if (!(mmac_info->mac_pool[slot].flags 4097 & MMAC_SLOT_USED)) 4098 break; 4099 } 4100 } 4101 } else { 4102 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 4103 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4104 break; 4105 } 4106 } 4107 ASSERT(slot <= mmac_info->num_mmac); 4108 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 4109 mutex_exit(nxgep->genlock); 4110 return (err); 4111 } 4112 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4113 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4114 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4115 mmac_info->naddrfree--; 4116 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4117 4118 maddr->mma_slot = slot; 4119 4120 mutex_exit(nxgep->genlock); 4121 return (0); 4122 } 4123 4124 /* 4125 * This function reserves an unused slot and programs the slot and the HW 4126 * with a factory mac address. 4127 */ 4128 static int 4129 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 4130 { 4131 p_nxge_t nxgep = arg; 4132 mac_addr_slot_t slot; 4133 nxge_mmac_t *mmac_info; 4134 int err; 4135 nxge_status_t status; 4136 4137 mutex_enter(nxgep->genlock); 4138 4139 /* 4140 * Make sure that nxge is initialized, if _start() has 4141 * not been called. 4142 */ 4143 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4144 status = nxge_init(nxgep); 4145 if (status != NXGE_OK) { 4146 mutex_exit(nxgep->genlock); 4147 return (ENXIO); 4148 } 4149 } 4150 4151 mmac_info = &nxgep->nxge_mmac_info; 4152 if (mmac_info->naddrfree == 0) { 4153 mutex_exit(nxgep->genlock); 4154 return (ENOSPC); 4155 } 4156 4157 slot = maddr->mma_slot; 4158 if (slot == -1) { /* -1: Take the first available slot */ 4159 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 4160 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4161 break; 4162 } 4163 if (slot > mmac_info->num_factory_mmac) { 4164 mutex_exit(nxgep->genlock); 4165 return (ENOSPC); 4166 } 4167 } 4168 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 4169 /* 4170 * Do not support factory MAC at a slot greater than 4171 * num_factory_mmac even when there are available factory 4172 * MAC addresses because the alternate MACs are bundled with 4173 * slot[1] through slot[num_factory_mmac] 4174 */ 4175 mutex_exit(nxgep->genlock); 4176 return (EINVAL); 4177 } 4178 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4179 mutex_exit(nxgep->genlock); 4180 return (EBUSY); 4181 } 4182 /* Verify the address to be reserved */ 4183 if (!mac_unicst_verify(nxgep->mach, 4184 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 4185 mutex_exit(nxgep->genlock); 4186 return (EINVAL); 4187 } 4188 if (err = nxge_altmac_set(nxgep, 4189 mmac_info->factory_mac_pool[slot], slot)) { 4190 mutex_exit(nxgep->genlock); 4191 return (err); 4192 } 4193 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 4194 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4195 mmac_info->naddrfree--; 4196 4197 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 4198 mutex_exit(nxgep->genlock); 4199 4200 /* Pass info back to the caller */ 4201 maddr->mma_slot = slot; 4202 maddr->mma_addrlen = ETHERADDRL; 4203 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4204 4205 return (0); 4206 } 4207 4208 /* 4209 * Remove the specified mac address and update the HW not to filter 4210 * the mac address anymore. 4211 */ 4212 int 4213 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 4214 { 4215 p_nxge_t nxgep = arg; 4216 nxge_mmac_t *mmac_info; 4217 uint8_t addrn; 4218 uint8_t portn; 4219 int err = 0; 4220 nxge_status_t status; 4221 4222 mutex_enter(nxgep->genlock); 4223 4224 /* 4225 * Make sure that nxge is initialized, if _start() has 4226 * not been called. 4227 */ 4228 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4229 status = nxge_init(nxgep); 4230 if (status != NXGE_OK) { 4231 mutex_exit(nxgep->genlock); 4232 return (ENXIO); 4233 } 4234 } 4235 4236 mmac_info = &nxgep->nxge_mmac_info; 4237 if (slot < 1 || slot > mmac_info->num_mmac) { 4238 mutex_exit(nxgep->genlock); 4239 return (EINVAL); 4240 } 4241 4242 portn = nxgep->mac.portnum; 4243 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4244 addrn = (uint8_t)slot - 1; 4245 else 4246 addrn = (uint8_t)slot; 4247 4248 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4249 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4250 == NPI_SUCCESS) { 4251 mmac_info->naddrfree++; 4252 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4253 /* 4254 * Regardless if the MAC we just stopped filtering 4255 * is a user addr or a facory addr, we must set 4256 * the MMAC_VENDOR_ADDR flag if this slot has an 4257 * associated factory MAC to indicate that a factory 4258 * MAC is available. 4259 */ 4260 if (slot <= mmac_info->num_factory_mmac) { 4261 mmac_info->mac_pool[slot].flags 4262 |= MMAC_VENDOR_ADDR; 4263 } 4264 /* 4265 * Clear mac_pool[slot].addr so that kstat shows 0 4266 * alternate MAC address if the slot is not used. 4267 * (But nxge_m_mmac_get returns the factory MAC even 4268 * when the slot is not used!) 4269 */ 4270 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4271 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4272 } else { 4273 err = EIO; 4274 } 4275 } else { 4276 err = EINVAL; 4277 } 4278 4279 mutex_exit(nxgep->genlock); 4280 return (err); 4281 } 4282 4283 /* 4284 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 4285 */ 4286 static int 4287 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 4288 { 4289 p_nxge_t nxgep = arg; 4290 mac_addr_slot_t slot; 4291 nxge_mmac_t *mmac_info; 4292 int err = 0; 4293 nxge_status_t status; 4294 4295 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4296 maddr->mma_addrlen)) 4297 return (EINVAL); 4298 4299 slot = maddr->mma_slot; 4300 4301 mutex_enter(nxgep->genlock); 4302 4303 /* 4304 * Make sure that nxge is initialized, if _start() has 4305 * not been called. 4306 */ 4307 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4308 status = nxge_init(nxgep); 4309 if (status != NXGE_OK) { 4310 mutex_exit(nxgep->genlock); 4311 return (ENXIO); 4312 } 4313 } 4314 4315 mmac_info = &nxgep->nxge_mmac_info; 4316 if (slot < 1 || slot > mmac_info->num_mmac) { 4317 mutex_exit(nxgep->genlock); 4318 return (EINVAL); 4319 } 4320 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4321 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4322 != 0) { 4323 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4324 ETHERADDRL); 4325 /* 4326 * Assume that the MAC passed down from the caller 4327 * is not a factory MAC address (The user should 4328 * call mmac_remove followed by mmac_reserve if 4329 * he wants to use the factory MAC for this slot). 4330 */ 4331 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4332 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4333 } 4334 } else { 4335 err = EINVAL; 4336 } 4337 mutex_exit(nxgep->genlock); 4338 return (err); 4339 } 4340 4341 /* 4342 * nxge_m_mmac_get() - Get the MAC address and other information 4343 * related to the slot. mma_flags should be set to 0 in the call. 4344 * Note: although kstat shows MAC address as zero when a slot is 4345 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 4346 * to the caller as long as the slot is not using a user MAC address. 4347 * The following table shows the rules, 4348 * 4349 * USED VENDOR mma_addr 4350 * ------------------------------------------------------------ 4351 * (1) Slot uses a user MAC: yes no user MAC 4352 * (2) Slot uses a factory MAC: yes yes factory MAC 4353 * (3) Slot is not used but is 4354 * factory MAC capable: no yes factory MAC 4355 * (4) Slot is not used and is 4356 * not factory MAC capable: no no 0 4357 * ------------------------------------------------------------ 4358 */ 4359 static int 4360 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 4361 { 4362 nxge_t *nxgep = arg; 4363 mac_addr_slot_t slot; 4364 nxge_mmac_t *mmac_info; 4365 nxge_status_t status; 4366 4367 slot = maddr->mma_slot; 4368 4369 mutex_enter(nxgep->genlock); 4370 4371 /* 4372 * Make sure that nxge is initialized, if _start() has 4373 * not been called. 4374 */ 4375 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4376 status = nxge_init(nxgep); 4377 if (status != NXGE_OK) { 4378 mutex_exit(nxgep->genlock); 4379 return (ENXIO); 4380 } 4381 } 4382 4383 mmac_info = &nxgep->nxge_mmac_info; 4384 4385 if (slot < 1 || slot > mmac_info->num_mmac) { 4386 mutex_exit(nxgep->genlock); 4387 return (EINVAL); 4388 } 4389 maddr->mma_flags = 0; 4390 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 4391 maddr->mma_flags |= MMAC_SLOT_USED; 4392 4393 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 4394 maddr->mma_flags |= MMAC_VENDOR_ADDR; 4395 bcopy(mmac_info->factory_mac_pool[slot], 4396 maddr->mma_addr, ETHERADDRL); 4397 maddr->mma_addrlen = ETHERADDRL; 4398 } else { 4399 if (maddr->mma_flags & MMAC_SLOT_USED) { 4400 bcopy(mmac_info->mac_pool[slot].addr, 4401 maddr->mma_addr, ETHERADDRL); 4402 maddr->mma_addrlen = ETHERADDRL; 4403 } else { 4404 bzero(maddr->mma_addr, ETHERADDRL); 4405 maddr->mma_addrlen = 0; 4406 } 4407 } 4408 mutex_exit(nxgep->genlock); 4409 return (0); 4410 } 4411 4412 static boolean_t 4413 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4414 { 4415 nxge_t *nxgep = arg; 4416 uint32_t *txflags = cap_data; 4417 multiaddress_capab_t *mmacp = cap_data; 4418 4419 switch (cap) { 4420 case MAC_CAPAB_HCKSUM: 4421 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4422 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4423 if (nxge_cksum_offload <= 1) { 4424 *txflags = HCKSUM_INET_PARTIAL; 4425 } 4426 break; 4427 4428 case MAC_CAPAB_POLL: 4429 /* 4430 * There's nothing for us to fill in, simply returning 4431 * B_TRUE stating that we support polling is sufficient. 4432 */ 4433 break; 4434 4435 case MAC_CAPAB_MULTIADDRESS: 4436 mmacp = (multiaddress_capab_t *)cap_data; 4437 mutex_enter(nxgep->genlock); 4438 4439 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 4440 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 4441 mmacp->maddr_flag = 0; /* 0 is required by PSARC2006/265 */ 4442 /* 4443 * maddr_handle is driver's private data, passed back to 4444 * entry point functions as arg. 4445 */ 4446 mmacp->maddr_handle = nxgep; 4447 mmacp->maddr_add = nxge_m_mmac_add; 4448 mmacp->maddr_remove = nxge_m_mmac_remove; 4449 mmacp->maddr_modify = nxge_m_mmac_modify; 4450 mmacp->maddr_get = nxge_m_mmac_get; 4451 mmacp->maddr_reserve = nxge_m_mmac_reserve; 4452 4453 mutex_exit(nxgep->genlock); 4454 break; 4455 4456 case MAC_CAPAB_LSO: { 4457 mac_capab_lso_t *cap_lso = cap_data; 4458 4459 if (nxgep->soft_lso_enable) { 4460 if (nxge_cksum_offload <= 1) { 4461 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4462 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4463 nxge_lso_max = NXGE_LSO_MAXLEN; 4464 } 4465 cap_lso->lso_basic_tcp_ipv4.lso_max = 4466 nxge_lso_max; 4467 } 4468 break; 4469 } else { 4470 return (B_FALSE); 4471 } 4472 } 4473 4474 #if defined(sun4v) 4475 case MAC_CAPAB_RINGS: { 4476 mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 4477 4478 /* 4479 * Only the service domain driver responds to 4480 * this capability request. 4481 */ 4482 if (isLDOMservice(nxgep)) { 4483 mrings->mr_handle = (void *)nxgep; 4484 4485 /* 4486 * No dynamic allocation of groups and 4487 * rings at this time. Shares dictate the 4488 * configuration. 4489 */ 4490 mrings->mr_gadd_ring = NULL; 4491 mrings->mr_grem_ring = NULL; 4492 mrings->mr_rget = NULL; 4493 mrings->mr_gget = nxge_hio_group_get; 4494 4495 if (mrings->mr_type == MAC_RING_TYPE_RX) { 4496 mrings->mr_rnum = 8; /* XXX */ 4497 mrings->mr_gnum = 6; /* XXX */ 4498 } else { 4499 mrings->mr_rnum = 8; /* XXX */ 4500 mrings->mr_gnum = 0; /* XXX */ 4501 } 4502 } else 4503 return (B_FALSE); 4504 break; 4505 } 4506 4507 case MAC_CAPAB_SHARES: { 4508 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4509 4510 /* 4511 * Only the service domain driver responds to 4512 * this capability request. 4513 */ 4514 if (isLDOMservice(nxgep)) { 4515 mshares->ms_snum = 3; 4516 mshares->ms_handle = (void *)nxgep; 4517 mshares->ms_salloc = nxge_hio_share_alloc; 4518 mshares->ms_sfree = nxge_hio_share_free; 4519 mshares->ms_sadd = NULL; 4520 mshares->ms_sremove = NULL; 4521 mshares->ms_squery = nxge_hio_share_query; 4522 } else 4523 return (B_FALSE); 4524 break; 4525 } 4526 #endif 4527 default: 4528 return (B_FALSE); 4529 } 4530 return (B_TRUE); 4531 } 4532 4533 static boolean_t 4534 nxge_param_locked(mac_prop_id_t pr_num) 4535 { 4536 /* 4537 * All adv_* parameters are locked (read-only) while 4538 * the device is in any sort of loopback mode ... 4539 */ 4540 switch (pr_num) { 4541 case MAC_PROP_ADV_1000FDX_CAP: 4542 case MAC_PROP_EN_1000FDX_CAP: 4543 case MAC_PROP_ADV_1000HDX_CAP: 4544 case MAC_PROP_EN_1000HDX_CAP: 4545 case MAC_PROP_ADV_100FDX_CAP: 4546 case MAC_PROP_EN_100FDX_CAP: 4547 case MAC_PROP_ADV_100HDX_CAP: 4548 case MAC_PROP_EN_100HDX_CAP: 4549 case MAC_PROP_ADV_10FDX_CAP: 4550 case MAC_PROP_EN_10FDX_CAP: 4551 case MAC_PROP_ADV_10HDX_CAP: 4552 case MAC_PROP_EN_10HDX_CAP: 4553 case MAC_PROP_AUTONEG: 4554 case MAC_PROP_FLOWCTRL: 4555 return (B_TRUE); 4556 } 4557 return (B_FALSE); 4558 } 4559 4560 /* 4561 * callback functions for set/get of properties 4562 */ 4563 static int 4564 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4565 uint_t pr_valsize, const void *pr_val) 4566 { 4567 nxge_t *nxgep = barg; 4568 p_nxge_param_t param_arr; 4569 p_nxge_stats_t statsp; 4570 int err = 0; 4571 uint8_t val; 4572 uint32_t cur_mtu, new_mtu, old_framesize; 4573 link_flowctrl_t fl; 4574 4575 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4576 param_arr = nxgep->param_arr; 4577 statsp = nxgep->statsp; 4578 mutex_enter(nxgep->genlock); 4579 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4580 nxge_param_locked(pr_num)) { 4581 /* 4582 * All adv_* parameters are locked (read-only) 4583 * while the device is in any sort of loopback mode. 4584 */ 4585 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4586 "==> nxge_m_setprop: loopback mode: read only")); 4587 mutex_exit(nxgep->genlock); 4588 return (EBUSY); 4589 } 4590 4591 val = *(uint8_t *)pr_val; 4592 switch (pr_num) { 4593 case MAC_PROP_EN_1000FDX_CAP: 4594 nxgep->param_en_1000fdx = val; 4595 param_arr[param_anar_1000fdx].value = val; 4596 4597 goto reprogram; 4598 4599 case MAC_PROP_EN_100FDX_CAP: 4600 nxgep->param_en_100fdx = val; 4601 param_arr[param_anar_100fdx].value = val; 4602 4603 goto reprogram; 4604 4605 case MAC_PROP_EN_10FDX_CAP: 4606 nxgep->param_en_10fdx = val; 4607 param_arr[param_anar_10fdx].value = val; 4608 4609 goto reprogram; 4610 4611 case MAC_PROP_EN_1000HDX_CAP: 4612 case MAC_PROP_EN_100HDX_CAP: 4613 case MAC_PROP_EN_10HDX_CAP: 4614 case MAC_PROP_ADV_1000FDX_CAP: 4615 case MAC_PROP_ADV_1000HDX_CAP: 4616 case MAC_PROP_ADV_100FDX_CAP: 4617 case MAC_PROP_ADV_100HDX_CAP: 4618 case MAC_PROP_ADV_10FDX_CAP: 4619 case MAC_PROP_ADV_10HDX_CAP: 4620 case MAC_PROP_STATUS: 4621 case MAC_PROP_SPEED: 4622 case MAC_PROP_DUPLEX: 4623 err = EINVAL; /* cannot set read-only properties */ 4624 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4625 "==> nxge_m_setprop: read only property %d", 4626 pr_num)); 4627 break; 4628 4629 case MAC_PROP_AUTONEG: 4630 param_arr[param_autoneg].value = val; 4631 4632 goto reprogram; 4633 4634 case MAC_PROP_MTU: 4635 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4636 err = EBUSY; 4637 break; 4638 } 4639 4640 cur_mtu = nxgep->mac.default_mtu; 4641 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4642 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4643 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4644 new_mtu, nxgep->mac.is_jumbo)); 4645 4646 if (new_mtu == cur_mtu) { 4647 err = 0; 4648 break; 4649 } 4650 if (new_mtu < NXGE_DEFAULT_MTU || 4651 new_mtu > NXGE_MAXIMUM_MTU) { 4652 err = EINVAL; 4653 break; 4654 } 4655 4656 if ((new_mtu > NXGE_DEFAULT_MTU) && 4657 !nxgep->mac.is_jumbo) { 4658 err = EINVAL; 4659 break; 4660 } 4661 4662 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4663 nxgep->mac.maxframesize = (uint16_t) 4664 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4665 if (nxge_mac_set_framesize(nxgep)) { 4666 nxgep->mac.maxframesize = 4667 (uint16_t)old_framesize; 4668 err = EINVAL; 4669 break; 4670 } 4671 4672 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4673 if (err) { 4674 nxgep->mac.maxframesize = 4675 (uint16_t)old_framesize; 4676 err = EINVAL; 4677 break; 4678 } 4679 4680 nxgep->mac.default_mtu = new_mtu; 4681 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4682 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4683 new_mtu, nxgep->mac.maxframesize)); 4684 break; 4685 4686 case MAC_PROP_FLOWCTRL: 4687 bcopy(pr_val, &fl, sizeof (fl)); 4688 switch (fl) { 4689 default: 4690 err = EINVAL; 4691 break; 4692 4693 case LINK_FLOWCTRL_NONE: 4694 param_arr[param_anar_pause].value = 0; 4695 break; 4696 4697 case LINK_FLOWCTRL_RX: 4698 param_arr[param_anar_pause].value = 1; 4699 break; 4700 4701 case LINK_FLOWCTRL_TX: 4702 case LINK_FLOWCTRL_BI: 4703 err = EINVAL; 4704 break; 4705 } 4706 4707 reprogram: 4708 if (err == 0) { 4709 if (!nxge_param_link_update(nxgep)) { 4710 err = EINVAL; 4711 } 4712 } 4713 break; 4714 case MAC_PROP_PRIVATE: 4715 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4716 "==> nxge_m_setprop: private property")); 4717 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4718 pr_val); 4719 break; 4720 4721 default: 4722 err = ENOTSUP; 4723 break; 4724 } 4725 4726 mutex_exit(nxgep->genlock); 4727 4728 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4729 "<== nxge_m_setprop (return %d)", err)); 4730 return (err); 4731 } 4732 4733 static int 4734 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4735 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 4736 { 4737 nxge_t *nxgep = barg; 4738 p_nxge_param_t param_arr = nxgep->param_arr; 4739 p_nxge_stats_t statsp = nxgep->statsp; 4740 int err = 0; 4741 link_flowctrl_t fl; 4742 uint64_t tmp = 0; 4743 link_state_t ls; 4744 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 4745 4746 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4747 "==> nxge_m_getprop: pr_num %d", pr_num)); 4748 4749 if (pr_valsize == 0) 4750 return (EINVAL); 4751 4752 if ((is_default) && (pr_num != MAC_PROP_PRIVATE)) { 4753 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4754 return (err); 4755 } 4756 4757 bzero(pr_val, pr_valsize); 4758 switch (pr_num) { 4759 case MAC_PROP_DUPLEX: 4760 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4761 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4762 "==> nxge_m_getprop: duplex mode %d", 4763 *(uint8_t *)pr_val)); 4764 break; 4765 4766 case MAC_PROP_SPEED: 4767 if (pr_valsize < sizeof (uint64_t)) 4768 return (EINVAL); 4769 tmp = statsp->mac_stats.link_speed * 1000000ull; 4770 bcopy(&tmp, pr_val, sizeof (tmp)); 4771 break; 4772 4773 case MAC_PROP_STATUS: 4774 if (pr_valsize < sizeof (link_state_t)) 4775 return (EINVAL); 4776 if (!statsp->mac_stats.link_up) 4777 ls = LINK_STATE_DOWN; 4778 else 4779 ls = LINK_STATE_UP; 4780 bcopy(&ls, pr_val, sizeof (ls)); 4781 break; 4782 4783 case MAC_PROP_AUTONEG: 4784 *(uint8_t *)pr_val = 4785 param_arr[param_autoneg].value; 4786 break; 4787 4788 case MAC_PROP_FLOWCTRL: 4789 if (pr_valsize < sizeof (link_flowctrl_t)) 4790 return (EINVAL); 4791 4792 fl = LINK_FLOWCTRL_NONE; 4793 if (param_arr[param_anar_pause].value) { 4794 fl = LINK_FLOWCTRL_RX; 4795 } 4796 bcopy(&fl, pr_val, sizeof (fl)); 4797 break; 4798 4799 case MAC_PROP_ADV_1000FDX_CAP: 4800 *(uint8_t *)pr_val = 4801 param_arr[param_anar_1000fdx].value; 4802 break; 4803 4804 case MAC_PROP_EN_1000FDX_CAP: 4805 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4806 break; 4807 4808 case MAC_PROP_ADV_100FDX_CAP: 4809 *(uint8_t *)pr_val = 4810 param_arr[param_anar_100fdx].value; 4811 break; 4812 4813 case MAC_PROP_EN_100FDX_CAP: 4814 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4815 break; 4816 4817 case MAC_PROP_ADV_10FDX_CAP: 4818 *(uint8_t *)pr_val = 4819 param_arr[param_anar_10fdx].value; 4820 break; 4821 4822 case MAC_PROP_EN_10FDX_CAP: 4823 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4824 break; 4825 4826 case MAC_PROP_EN_1000HDX_CAP: 4827 case MAC_PROP_EN_100HDX_CAP: 4828 case MAC_PROP_EN_10HDX_CAP: 4829 case MAC_PROP_ADV_1000HDX_CAP: 4830 case MAC_PROP_ADV_100HDX_CAP: 4831 case MAC_PROP_ADV_10HDX_CAP: 4832 err = ENOTSUP; 4833 break; 4834 4835 case MAC_PROP_PRIVATE: 4836 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4837 pr_valsize, pr_val); 4838 break; 4839 default: 4840 err = EINVAL; 4841 break; 4842 } 4843 4844 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4845 4846 return (err); 4847 } 4848 4849 /* ARGSUSED */ 4850 static int 4851 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4852 const void *pr_val) 4853 { 4854 p_nxge_param_t param_arr = nxgep->param_arr; 4855 int err = 0; 4856 long result; 4857 4858 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4859 "==> nxge_set_priv_prop: name %s", pr_name)); 4860 4861 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4862 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4863 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4864 "<== nxge_set_priv_prop: name %s " 4865 "pr_val %s result %d " 4866 "param %d is_jumbo %d", 4867 pr_name, pr_val, result, 4868 param_arr[param_accept_jumbo].value, 4869 nxgep->mac.is_jumbo)); 4870 4871 if (result > 1 || result < 0) { 4872 err = EINVAL; 4873 } else { 4874 if (nxgep->mac.is_jumbo == 4875 (uint32_t)result) { 4876 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4877 "no change (%d %d)", 4878 nxgep->mac.is_jumbo, 4879 result)); 4880 return (0); 4881 } 4882 } 4883 4884 param_arr[param_accept_jumbo].value = result; 4885 nxgep->mac.is_jumbo = B_FALSE; 4886 if (result) { 4887 nxgep->mac.is_jumbo = B_TRUE; 4888 } 4889 4890 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4891 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4892 pr_name, result, nxgep->mac.is_jumbo)); 4893 4894 return (err); 4895 } 4896 4897 /* Blanking */ 4898 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4899 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4900 (char *)pr_val, 4901 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4902 if (err) { 4903 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4904 "<== nxge_set_priv_prop: " 4905 "unable to set (%s)", pr_name)); 4906 err = EINVAL; 4907 } else { 4908 err = 0; 4909 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4910 "<== nxge_set_priv_prop: " 4911 "set (%s)", pr_name)); 4912 } 4913 4914 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4915 "<== nxge_set_priv_prop: name %s (value %d)", 4916 pr_name, result)); 4917 4918 return (err); 4919 } 4920 4921 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4922 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4923 (char *)pr_val, 4924 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4925 if (err) { 4926 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4927 "<== nxge_set_priv_prop: " 4928 "unable to set (%s)", pr_name)); 4929 err = EINVAL; 4930 } else { 4931 err = 0; 4932 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4933 "<== nxge_set_priv_prop: " 4934 "set (%s)", pr_name)); 4935 } 4936 4937 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4938 "<== nxge_set_priv_prop: name %s (value %d)", 4939 pr_name, result)); 4940 4941 return (err); 4942 } 4943 4944 /* Classification */ 4945 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4946 if (pr_val == NULL) { 4947 err = EINVAL; 4948 return (err); 4949 } 4950 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4951 4952 err = nxge_param_set_ip_opt(nxgep, NULL, 4953 NULL, (char *)pr_val, 4954 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4955 4956 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4957 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4958 pr_name, result)); 4959 4960 return (err); 4961 } 4962 4963 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4964 if (pr_val == NULL) { 4965 err = EINVAL; 4966 return (err); 4967 } 4968 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4969 4970 err = nxge_param_set_ip_opt(nxgep, NULL, 4971 NULL, (char *)pr_val, 4972 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4973 4974 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4975 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4976 pr_name, result)); 4977 4978 return (err); 4979 } 4980 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4981 if (pr_val == NULL) { 4982 err = EINVAL; 4983 return (err); 4984 } 4985 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4986 4987 err = nxge_param_set_ip_opt(nxgep, NULL, 4988 NULL, (char *)pr_val, 4989 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4990 4991 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4992 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4993 pr_name, result)); 4994 4995 return (err); 4996 } 4997 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4998 if (pr_val == NULL) { 4999 err = EINVAL; 5000 return (err); 5001 } 5002 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5003 5004 err = nxge_param_set_ip_opt(nxgep, NULL, 5005 NULL, (char *)pr_val, 5006 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5007 5008 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5009 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5010 pr_name, result)); 5011 5012 return (err); 5013 } 5014 5015 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5016 if (pr_val == NULL) { 5017 err = EINVAL; 5018 return (err); 5019 } 5020 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5021 5022 err = nxge_param_set_ip_opt(nxgep, NULL, 5023 NULL, (char *)pr_val, 5024 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5025 5026 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5027 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5028 pr_name, result)); 5029 5030 return (err); 5031 } 5032 5033 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5034 if (pr_val == NULL) { 5035 err = EINVAL; 5036 return (err); 5037 } 5038 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5039 5040 err = nxge_param_set_ip_opt(nxgep, NULL, 5041 NULL, (char *)pr_val, 5042 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5043 5044 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5045 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5046 pr_name, result)); 5047 5048 return (err); 5049 } 5050 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5051 if (pr_val == NULL) { 5052 err = EINVAL; 5053 return (err); 5054 } 5055 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5056 5057 err = nxge_param_set_ip_opt(nxgep, NULL, 5058 NULL, (char *)pr_val, 5059 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5060 5061 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5062 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5063 pr_name, result)); 5064 5065 return (err); 5066 } 5067 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5068 if (pr_val == NULL) { 5069 err = EINVAL; 5070 return (err); 5071 } 5072 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5073 5074 err = nxge_param_set_ip_opt(nxgep, NULL, 5075 NULL, (char *)pr_val, 5076 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5077 5078 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5079 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5080 pr_name, result)); 5081 5082 return (err); 5083 } 5084 5085 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5086 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 5087 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5088 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 5089 err = EBUSY; 5090 return (err); 5091 } 5092 if (pr_val == NULL) { 5093 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5094 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5095 err = EINVAL; 5096 return (err); 5097 } 5098 5099 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5100 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5101 "<== nxge_set_priv_prop: name %s " 5102 "(lso %d pr_val %s value %d)", 5103 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5104 5105 if (result > 1 || result < 0) { 5106 err = EINVAL; 5107 } else { 5108 if (nxgep->soft_lso_enable == (uint32_t)result) { 5109 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5110 "no change (%d %d)", 5111 nxgep->soft_lso_enable, result)); 5112 return (0); 5113 } 5114 } 5115 5116 nxgep->soft_lso_enable = (int)result; 5117 5118 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5119 "<== nxge_set_priv_prop: name %s (value %d)", 5120 pr_name, result)); 5121 5122 return (err); 5123 } 5124 /* 5125 * Commands like "ndd -set /dev/nxge0 adv_10gfdx_cap 1" cause the 5126 * following code to be executed. 5127 */ 5128 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5129 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5130 (caddr_t)¶m_arr[param_anar_10gfdx]); 5131 return (err); 5132 } 5133 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5134 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5135 (caddr_t)¶m_arr[param_anar_pause]); 5136 return (err); 5137 } 5138 5139 return (EINVAL); 5140 } 5141 5142 static int 5143 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5144 uint_t pr_valsize, void *pr_val) 5145 { 5146 p_nxge_param_t param_arr = nxgep->param_arr; 5147 char valstr[MAXNAMELEN]; 5148 int err = EINVAL; 5149 uint_t strsize; 5150 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 5151 5152 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5153 "==> nxge_get_priv_prop: property %s", pr_name)); 5154 5155 /* function number */ 5156 if (strcmp(pr_name, "_function_number") == 0) { 5157 if (is_default) 5158 return (ENOTSUP); 5159 (void) snprintf(valstr, sizeof (valstr), "%d", 5160 nxgep->function_num); 5161 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5162 "==> nxge_get_priv_prop: name %s " 5163 "(value %d valstr %s)", 5164 pr_name, nxgep->function_num, valstr)); 5165 5166 err = 0; 5167 goto done; 5168 } 5169 5170 /* Neptune firmware version */ 5171 if (strcmp(pr_name, "_fw_version") == 0) { 5172 if (is_default) 5173 return (ENOTSUP); 5174 (void) snprintf(valstr, sizeof (valstr), "%s", 5175 nxgep->vpd_info.ver); 5176 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5177 "==> nxge_get_priv_prop: name %s " 5178 "(value %d valstr %s)", 5179 pr_name, nxgep->vpd_info.ver, valstr)); 5180 5181 err = 0; 5182 goto done; 5183 } 5184 5185 /* port PHY mode */ 5186 if (strcmp(pr_name, "_port_mode") == 0) { 5187 if (is_default) 5188 return (ENOTSUP); 5189 switch (nxgep->mac.portmode) { 5190 case PORT_1G_COPPER: 5191 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5192 nxgep->hot_swappable_phy ? 5193 "[Hot Swappable]" : ""); 5194 break; 5195 case PORT_1G_FIBER: 5196 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5197 nxgep->hot_swappable_phy ? 5198 "[hot swappable]" : ""); 5199 break; 5200 case PORT_10G_COPPER: 5201 (void) snprintf(valstr, sizeof (valstr), 5202 "10G copper %s", 5203 nxgep->hot_swappable_phy ? 5204 "[hot swappable]" : ""); 5205 break; 5206 case PORT_10G_FIBER: 5207 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5208 nxgep->hot_swappable_phy ? 5209 "[hot swappable]" : ""); 5210 break; 5211 case PORT_10G_SERDES: 5212 (void) snprintf(valstr, sizeof (valstr), 5213 "10G serdes %s", nxgep->hot_swappable_phy ? 5214 "[hot swappable]" : ""); 5215 break; 5216 case PORT_1G_SERDES: 5217 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5218 nxgep->hot_swappable_phy ? 5219 "[hot swappable]" : ""); 5220 break; 5221 case PORT_1G_TN1010: 5222 (void) snprintf(valstr, sizeof (valstr), 5223 "1G TN1010 copper %s", nxgep->hot_swappable_phy ? 5224 "[hot swappable]" : ""); 5225 break; 5226 case PORT_10G_TN1010: 5227 (void) snprintf(valstr, sizeof (valstr), 5228 "10G TN1010 copper %s", nxgep->hot_swappable_phy ? 5229 "[hot swappable]" : ""); 5230 break; 5231 case PORT_1G_RGMII_FIBER: 5232 (void) snprintf(valstr, sizeof (valstr), 5233 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5234 "[hot swappable]" : ""); 5235 break; 5236 case PORT_HSP_MODE: 5237 (void) snprintf(valstr, sizeof (valstr), 5238 "phy not present[hot swappable]"); 5239 break; 5240 default: 5241 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5242 nxgep->hot_swappable_phy ? 5243 "[hot swappable]" : ""); 5244 break; 5245 } 5246 5247 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5248 "==> nxge_get_priv_prop: name %s (value %s)", 5249 pr_name, valstr)); 5250 5251 err = 0; 5252 goto done; 5253 } 5254 5255 /* Hot swappable PHY */ 5256 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5257 if (is_default) 5258 return (ENOTSUP); 5259 (void) snprintf(valstr, sizeof (valstr), "%s", 5260 nxgep->hot_swappable_phy ? 5261 "yes" : "no"); 5262 5263 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5264 "==> nxge_get_priv_prop: name %s " 5265 "(value %d valstr %s)", 5266 pr_name, nxgep->hot_swappable_phy, valstr)); 5267 5268 err = 0; 5269 goto done; 5270 } 5271 5272 5273 /* accept jumbo */ 5274 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5275 if (is_default) 5276 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5277 else 5278 (void) snprintf(valstr, sizeof (valstr), 5279 "%d", nxgep->mac.is_jumbo); 5280 err = 0; 5281 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5282 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5283 pr_name, 5284 (uint32_t)param_arr[param_accept_jumbo].value, 5285 nxgep->mac.is_jumbo, 5286 nxge_jumbo_enable)); 5287 5288 goto done; 5289 } 5290 5291 /* Receive Interrupt Blanking Parameters */ 5292 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5293 err = 0; 5294 if (is_default) { 5295 (void) snprintf(valstr, sizeof (valstr), 5296 "%d", RXDMA_RCR_TO_DEFAULT); 5297 goto done; 5298 } 5299 5300 (void) snprintf(valstr, sizeof (valstr), "%d", 5301 nxgep->intr_timeout); 5302 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5303 "==> nxge_get_priv_prop: name %s (value %d)", 5304 pr_name, 5305 (uint32_t)nxgep->intr_timeout)); 5306 goto done; 5307 } 5308 5309 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5310 err = 0; 5311 if (is_default) { 5312 (void) snprintf(valstr, sizeof (valstr), 5313 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5314 goto done; 5315 } 5316 (void) snprintf(valstr, sizeof (valstr), "%d", 5317 nxgep->intr_threshold); 5318 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5319 "==> nxge_get_priv_prop: name %s (value %d)", 5320 pr_name, (uint32_t)nxgep->intr_threshold)); 5321 5322 goto done; 5323 } 5324 5325 /* Classification and Load Distribution Configuration */ 5326 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5327 if (is_default) { 5328 (void) snprintf(valstr, sizeof (valstr), "%x", 5329 NXGE_CLASS_FLOW_GEN_SERVER); 5330 err = 0; 5331 goto done; 5332 } 5333 err = nxge_dld_get_ip_opt(nxgep, 5334 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5335 5336 (void) snprintf(valstr, sizeof (valstr), "%x", 5337 (int)param_arr[param_class_opt_ipv4_tcp].value); 5338 5339 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5340 "==> nxge_get_priv_prop: %s", valstr)); 5341 goto done; 5342 } 5343 5344 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5345 if (is_default) { 5346 (void) snprintf(valstr, sizeof (valstr), "%x", 5347 NXGE_CLASS_FLOW_GEN_SERVER); 5348 err = 0; 5349 goto done; 5350 } 5351 err = nxge_dld_get_ip_opt(nxgep, 5352 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5353 5354 (void) snprintf(valstr, sizeof (valstr), "%x", 5355 (int)param_arr[param_class_opt_ipv4_udp].value); 5356 5357 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5358 "==> nxge_get_priv_prop: %s", valstr)); 5359 goto done; 5360 } 5361 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5362 if (is_default) { 5363 (void) snprintf(valstr, sizeof (valstr), "%x", 5364 NXGE_CLASS_FLOW_GEN_SERVER); 5365 err = 0; 5366 goto done; 5367 } 5368 err = nxge_dld_get_ip_opt(nxgep, 5369 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5370 5371 (void) snprintf(valstr, sizeof (valstr), "%x", 5372 (int)param_arr[param_class_opt_ipv4_ah].value); 5373 5374 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5375 "==> nxge_get_priv_prop: %s", valstr)); 5376 goto done; 5377 } 5378 5379 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5380 if (is_default) { 5381 (void) snprintf(valstr, sizeof (valstr), "%x", 5382 NXGE_CLASS_FLOW_GEN_SERVER); 5383 err = 0; 5384 goto done; 5385 } 5386 err = nxge_dld_get_ip_opt(nxgep, 5387 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5388 5389 (void) snprintf(valstr, sizeof (valstr), "%x", 5390 (int)param_arr[param_class_opt_ipv4_sctp].value); 5391 5392 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5393 "==> nxge_get_priv_prop: %s", valstr)); 5394 goto done; 5395 } 5396 5397 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5398 if (is_default) { 5399 (void) snprintf(valstr, sizeof (valstr), "%x", 5400 NXGE_CLASS_FLOW_GEN_SERVER); 5401 err = 0; 5402 goto done; 5403 } 5404 err = nxge_dld_get_ip_opt(nxgep, 5405 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5406 5407 (void) snprintf(valstr, sizeof (valstr), "%x", 5408 (int)param_arr[param_class_opt_ipv6_tcp].value); 5409 5410 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5411 "==> nxge_get_priv_prop: %s", valstr)); 5412 goto done; 5413 } 5414 5415 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5416 if (is_default) { 5417 (void) snprintf(valstr, sizeof (valstr), "%x", 5418 NXGE_CLASS_FLOW_GEN_SERVER); 5419 err = 0; 5420 goto done; 5421 } 5422 err = nxge_dld_get_ip_opt(nxgep, 5423 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5424 5425 (void) snprintf(valstr, sizeof (valstr), "%x", 5426 (int)param_arr[param_class_opt_ipv6_udp].value); 5427 5428 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5429 "==> nxge_get_priv_prop: %s", valstr)); 5430 goto done; 5431 } 5432 5433 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5434 if (is_default) { 5435 (void) snprintf(valstr, sizeof (valstr), "%x", 5436 NXGE_CLASS_FLOW_GEN_SERVER); 5437 err = 0; 5438 goto done; 5439 } 5440 err = nxge_dld_get_ip_opt(nxgep, 5441 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5442 5443 (void) snprintf(valstr, sizeof (valstr), "%x", 5444 (int)param_arr[param_class_opt_ipv6_ah].value); 5445 5446 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5447 "==> nxge_get_priv_prop: %s", valstr)); 5448 goto done; 5449 } 5450 5451 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5452 if (is_default) { 5453 (void) snprintf(valstr, sizeof (valstr), "%x", 5454 NXGE_CLASS_FLOW_GEN_SERVER); 5455 err = 0; 5456 goto done; 5457 } 5458 err = nxge_dld_get_ip_opt(nxgep, 5459 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5460 5461 (void) snprintf(valstr, sizeof (valstr), "%x", 5462 (int)param_arr[param_class_opt_ipv6_sctp].value); 5463 5464 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5465 "==> nxge_get_priv_prop: %s", valstr)); 5466 goto done; 5467 } 5468 5469 /* Software LSO */ 5470 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5471 if (is_default) { 5472 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5473 err = 0; 5474 goto done; 5475 } 5476 (void) snprintf(valstr, sizeof (valstr), 5477 "%d", nxgep->soft_lso_enable); 5478 err = 0; 5479 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5480 "==> nxge_get_priv_prop: name %s (value %d)", 5481 pr_name, nxgep->soft_lso_enable)); 5482 5483 goto done; 5484 } 5485 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5486 err = 0; 5487 if (is_default || 5488 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5489 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5490 goto done; 5491 } else { 5492 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5493 goto done; 5494 } 5495 } 5496 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5497 err = 0; 5498 if (is_default || 5499 nxgep->param_arr[param_anar_pause].value != 0) { 5500 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5501 goto done; 5502 } else { 5503 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5504 goto done; 5505 } 5506 } 5507 5508 done: 5509 if (err == 0) { 5510 strsize = (uint_t)strlen(valstr); 5511 if (pr_valsize < strsize) { 5512 err = ENOBUFS; 5513 } else { 5514 (void) strlcpy(pr_val, valstr, pr_valsize); 5515 } 5516 } 5517 5518 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5519 "<== nxge_get_priv_prop: return %d", err)); 5520 return (err); 5521 } 5522 5523 /* 5524 * Module loading and removing entry points. 5525 */ 5526 5527 DDI_DEFINE_STREAM_OPS(nxge_dev_ops, nulldev, nulldev, nxge_attach, nxge_detach, 5528 nodev, NULL, D_MP, NULL); 5529 5530 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5531 5532 /* 5533 * Module linkage information for the kernel. 5534 */ 5535 static struct modldrv nxge_modldrv = { 5536 &mod_driverops, 5537 NXGE_DESC_VER, 5538 &nxge_dev_ops 5539 }; 5540 5541 static struct modlinkage modlinkage = { 5542 MODREV_1, (void *) &nxge_modldrv, NULL 5543 }; 5544 5545 int 5546 _init(void) 5547 { 5548 int status; 5549 5550 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5551 mac_init_ops(&nxge_dev_ops, "nxge"); 5552 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5553 if (status != 0) { 5554 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5555 "failed to init device soft state")); 5556 goto _init_exit; 5557 } 5558 status = mod_install(&modlinkage); 5559 if (status != 0) { 5560 ddi_soft_state_fini(&nxge_list); 5561 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5562 goto _init_exit; 5563 } 5564 5565 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5566 5567 _init_exit: 5568 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5569 5570 return (status); 5571 } 5572 5573 int 5574 _fini(void) 5575 { 5576 int status; 5577 5578 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5579 5580 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5581 5582 if (nxge_mblks_pending) 5583 return (EBUSY); 5584 5585 status = mod_remove(&modlinkage); 5586 if (status != DDI_SUCCESS) { 5587 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5588 "Module removal failed 0x%08x", 5589 status)); 5590 goto _fini_exit; 5591 } 5592 5593 mac_fini_ops(&nxge_dev_ops); 5594 5595 ddi_soft_state_fini(&nxge_list); 5596 5597 MUTEX_DESTROY(&nxge_common_lock); 5598 _fini_exit: 5599 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5600 5601 return (status); 5602 } 5603 5604 int 5605 _info(struct modinfo *modinfop) 5606 { 5607 int status; 5608 5609 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5610 status = mod_info(&modlinkage, modinfop); 5611 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5612 5613 return (status); 5614 } 5615 5616 /*ARGSUSED*/ 5617 static nxge_status_t 5618 nxge_add_intrs(p_nxge_t nxgep) 5619 { 5620 5621 int intr_types; 5622 int type = 0; 5623 int ddi_status = DDI_SUCCESS; 5624 nxge_status_t status = NXGE_OK; 5625 5626 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5627 5628 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5629 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5630 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5631 nxgep->nxge_intr_type.intr_added = 0; 5632 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5633 nxgep->nxge_intr_type.intr_type = 0; 5634 5635 if (nxgep->niu_type == N2_NIU) { 5636 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5637 } else if (nxge_msi_enable) { 5638 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5639 } 5640 5641 /* Get the supported interrupt types */ 5642 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5643 != DDI_SUCCESS) { 5644 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5645 "ddi_intr_get_supported_types failed: status 0x%08x", 5646 ddi_status)); 5647 return (NXGE_ERROR | NXGE_DDI_FAILED); 5648 } 5649 nxgep->nxge_intr_type.intr_types = intr_types; 5650 5651 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5652 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5653 5654 /* 5655 * Solaris MSIX is not supported yet. use MSI for now. 5656 * nxge_msi_enable (1): 5657 * 1 - MSI 2 - MSI-X others - FIXED 5658 */ 5659 switch (nxge_msi_enable) { 5660 default: 5661 type = DDI_INTR_TYPE_FIXED; 5662 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5663 "use fixed (intx emulation) type %08x", 5664 type)); 5665 break; 5666 5667 case 2: 5668 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5669 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5670 if (intr_types & DDI_INTR_TYPE_MSIX) { 5671 type = DDI_INTR_TYPE_MSIX; 5672 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5673 "ddi_intr_get_supported_types: MSIX 0x%08x", 5674 type)); 5675 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5676 type = DDI_INTR_TYPE_MSI; 5677 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5678 "ddi_intr_get_supported_types: MSI 0x%08x", 5679 type)); 5680 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5681 type = DDI_INTR_TYPE_FIXED; 5682 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5683 "ddi_intr_get_supported_types: MSXED0x%08x", 5684 type)); 5685 } 5686 break; 5687 5688 case 1: 5689 if (intr_types & DDI_INTR_TYPE_MSI) { 5690 type = DDI_INTR_TYPE_MSI; 5691 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5692 "ddi_intr_get_supported_types: MSI 0x%08x", 5693 type)); 5694 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5695 type = DDI_INTR_TYPE_MSIX; 5696 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5697 "ddi_intr_get_supported_types: MSIX 0x%08x", 5698 type)); 5699 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5700 type = DDI_INTR_TYPE_FIXED; 5701 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5702 "ddi_intr_get_supported_types: MSXED0x%08x", 5703 type)); 5704 } 5705 } 5706 5707 nxgep->nxge_intr_type.intr_type = type; 5708 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5709 type == DDI_INTR_TYPE_FIXED) && 5710 nxgep->nxge_intr_type.niu_msi_enable) { 5711 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5712 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5713 " nxge_add_intrs: " 5714 " nxge_add_intrs_adv failed: status 0x%08x", 5715 status)); 5716 return (status); 5717 } else { 5718 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5719 "interrupts registered : type %d", type)); 5720 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5721 5722 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5723 "\nAdded advanced nxge add_intr_adv " 5724 "intr type 0x%x\n", type)); 5725 5726 return (status); 5727 } 5728 } 5729 5730 if (!nxgep->nxge_intr_type.intr_registered) { 5731 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5732 "failed to register interrupts")); 5733 return (NXGE_ERROR | NXGE_DDI_FAILED); 5734 } 5735 5736 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5737 return (status); 5738 } 5739 5740 /*ARGSUSED*/ 5741 static nxge_status_t 5742 nxge_add_soft_intrs(p_nxge_t nxgep) 5743 { 5744 5745 int ddi_status = DDI_SUCCESS; 5746 nxge_status_t status = NXGE_OK; 5747 5748 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 5749 5750 nxgep->resched_id = NULL; 5751 nxgep->resched_running = B_FALSE; 5752 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5753 &nxgep->resched_id, 5754 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 5755 if (ddi_status != DDI_SUCCESS) { 5756 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5757 "ddi_add_softintrs failed: status 0x%08x", 5758 ddi_status)); 5759 return (NXGE_ERROR | NXGE_DDI_FAILED); 5760 } 5761 5762 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 5763 5764 return (status); 5765 } 5766 5767 static nxge_status_t 5768 nxge_add_intrs_adv(p_nxge_t nxgep) 5769 { 5770 int intr_type; 5771 p_nxge_intr_t intrp; 5772 5773 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5774 5775 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5776 intr_type = intrp->intr_type; 5777 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5778 intr_type)); 5779 5780 switch (intr_type) { 5781 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5782 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5783 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5784 5785 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5786 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5787 5788 default: 5789 return (NXGE_ERROR); 5790 } 5791 } 5792 5793 5794 /*ARGSUSED*/ 5795 static nxge_status_t 5796 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5797 { 5798 dev_info_t *dip = nxgep->dip; 5799 p_nxge_ldg_t ldgp; 5800 p_nxge_intr_t intrp; 5801 uint_t *inthandler; 5802 void *arg1, *arg2; 5803 int behavior; 5804 int nintrs, navail, nrequest; 5805 int nactual, nrequired; 5806 int inum = 0; 5807 int x, y; 5808 int ddi_status = DDI_SUCCESS; 5809 nxge_status_t status = NXGE_OK; 5810 5811 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5812 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5813 intrp->start_inum = 0; 5814 5815 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5816 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5817 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5818 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5819 "nintrs: %d", ddi_status, nintrs)); 5820 return (NXGE_ERROR | NXGE_DDI_FAILED); 5821 } 5822 5823 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5824 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5825 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5826 "ddi_intr_get_navail() failed, status: 0x%x%, " 5827 "nintrs: %d", ddi_status, navail)); 5828 return (NXGE_ERROR | NXGE_DDI_FAILED); 5829 } 5830 5831 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5832 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5833 nintrs, navail)); 5834 5835 /* PSARC/2007/453 MSI-X interrupt limit override */ 5836 if (int_type == DDI_INTR_TYPE_MSIX) { 5837 nrequest = nxge_create_msi_property(nxgep); 5838 if (nrequest < navail) { 5839 navail = nrequest; 5840 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5841 "nxge_add_intrs_adv_type: nintrs %d " 5842 "navail %d (nrequest %d)", 5843 nintrs, navail, nrequest)); 5844 } 5845 } 5846 5847 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5848 /* MSI must be power of 2 */ 5849 if ((navail & 16) == 16) { 5850 navail = 16; 5851 } else if ((navail & 8) == 8) { 5852 navail = 8; 5853 } else if ((navail & 4) == 4) { 5854 navail = 4; 5855 } else if ((navail & 2) == 2) { 5856 navail = 2; 5857 } else { 5858 navail = 1; 5859 } 5860 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5861 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5862 "navail %d", nintrs, navail)); 5863 } 5864 5865 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5866 DDI_INTR_ALLOC_NORMAL); 5867 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5868 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5869 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5870 navail, &nactual, behavior); 5871 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5872 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5873 " ddi_intr_alloc() failed: %d", 5874 ddi_status)); 5875 kmem_free(intrp->htable, intrp->intr_size); 5876 return (NXGE_ERROR | NXGE_DDI_FAILED); 5877 } 5878 5879 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5880 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5881 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5882 " ddi_intr_get_pri() failed: %d", 5883 ddi_status)); 5884 /* Free already allocated interrupts */ 5885 for (y = 0; y < nactual; y++) { 5886 (void) ddi_intr_free(intrp->htable[y]); 5887 } 5888 5889 kmem_free(intrp->htable, intrp->intr_size); 5890 return (NXGE_ERROR | NXGE_DDI_FAILED); 5891 } 5892 5893 nrequired = 0; 5894 switch (nxgep->niu_type) { 5895 default: 5896 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5897 break; 5898 5899 case N2_NIU: 5900 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5901 break; 5902 } 5903 5904 if (status != NXGE_OK) { 5905 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5906 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5907 "failed: 0x%x", status)); 5908 /* Free already allocated interrupts */ 5909 for (y = 0; y < nactual; y++) { 5910 (void) ddi_intr_free(intrp->htable[y]); 5911 } 5912 5913 kmem_free(intrp->htable, intrp->intr_size); 5914 return (status); 5915 } 5916 5917 ldgp = nxgep->ldgvp->ldgp; 5918 for (x = 0; x < nrequired; x++, ldgp++) { 5919 ldgp->vector = (uint8_t)x; 5920 ldgp->intdata = SID_DATA(ldgp->func, x); 5921 arg1 = ldgp->ldvp; 5922 arg2 = nxgep; 5923 if (ldgp->nldvs == 1) { 5924 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5925 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5926 "nxge_add_intrs_adv_type: " 5927 "arg1 0x%x arg2 0x%x: " 5928 "1-1 int handler (entry %d intdata 0x%x)\n", 5929 arg1, arg2, 5930 x, ldgp->intdata)); 5931 } else if (ldgp->nldvs > 1) { 5932 inthandler = (uint_t *)ldgp->sys_intr_handler; 5933 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5934 "nxge_add_intrs_adv_type: " 5935 "arg1 0x%x arg2 0x%x: " 5936 "nldevs %d int handler " 5937 "(entry %d intdata 0x%x)\n", 5938 arg1, arg2, 5939 ldgp->nldvs, x, ldgp->intdata)); 5940 } 5941 5942 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5943 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5944 "htable 0x%llx", x, intrp->htable[x])); 5945 5946 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5947 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5948 != DDI_SUCCESS) { 5949 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5950 "==> nxge_add_intrs_adv_type: failed #%d " 5951 "status 0x%x", x, ddi_status)); 5952 for (y = 0; y < intrp->intr_added; y++) { 5953 (void) ddi_intr_remove_handler( 5954 intrp->htable[y]); 5955 } 5956 /* Free already allocated intr */ 5957 for (y = 0; y < nactual; y++) { 5958 (void) ddi_intr_free(intrp->htable[y]); 5959 } 5960 kmem_free(intrp->htable, intrp->intr_size); 5961 5962 (void) nxge_ldgv_uninit(nxgep); 5963 5964 return (NXGE_ERROR | NXGE_DDI_FAILED); 5965 } 5966 intrp->intr_added++; 5967 } 5968 5969 intrp->msi_intx_cnt = nactual; 5970 5971 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5972 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 5973 navail, nactual, 5974 intrp->msi_intx_cnt, 5975 intrp->intr_added)); 5976 5977 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 5978 5979 (void) nxge_intr_ldgv_init(nxgep); 5980 5981 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 5982 5983 return (status); 5984 } 5985 5986 /*ARGSUSED*/ 5987 static nxge_status_t 5988 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 5989 { 5990 dev_info_t *dip = nxgep->dip; 5991 p_nxge_ldg_t ldgp; 5992 p_nxge_intr_t intrp; 5993 uint_t *inthandler; 5994 void *arg1, *arg2; 5995 int behavior; 5996 int nintrs, navail; 5997 int nactual, nrequired; 5998 int inum = 0; 5999 int x, y; 6000 int ddi_status = DDI_SUCCESS; 6001 nxge_status_t status = NXGE_OK; 6002 6003 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 6004 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6005 intrp->start_inum = 0; 6006 6007 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 6008 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 6009 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6010 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 6011 "nintrs: %d", status, nintrs)); 6012 return (NXGE_ERROR | NXGE_DDI_FAILED); 6013 } 6014 6015 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 6016 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 6017 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6018 "ddi_intr_get_navail() failed, status: 0x%x%, " 6019 "nintrs: %d", ddi_status, navail)); 6020 return (NXGE_ERROR | NXGE_DDI_FAILED); 6021 } 6022 6023 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6024 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6025 nintrs, navail)); 6026 6027 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6028 DDI_INTR_ALLOC_NORMAL); 6029 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6030 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6031 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6032 navail, &nactual, behavior); 6033 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6034 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6035 " ddi_intr_alloc() failed: %d", 6036 ddi_status)); 6037 kmem_free(intrp->htable, intrp->intr_size); 6038 return (NXGE_ERROR | NXGE_DDI_FAILED); 6039 } 6040 6041 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6042 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6043 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6044 " ddi_intr_get_pri() failed: %d", 6045 ddi_status)); 6046 /* Free already allocated interrupts */ 6047 for (y = 0; y < nactual; y++) { 6048 (void) ddi_intr_free(intrp->htable[y]); 6049 } 6050 6051 kmem_free(intrp->htable, intrp->intr_size); 6052 return (NXGE_ERROR | NXGE_DDI_FAILED); 6053 } 6054 6055 nrequired = 0; 6056 switch (nxgep->niu_type) { 6057 default: 6058 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6059 break; 6060 6061 case N2_NIU: 6062 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6063 break; 6064 } 6065 6066 if (status != NXGE_OK) { 6067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6068 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6069 "failed: 0x%x", status)); 6070 /* Free already allocated interrupts */ 6071 for (y = 0; y < nactual; y++) { 6072 (void) ddi_intr_free(intrp->htable[y]); 6073 } 6074 6075 kmem_free(intrp->htable, intrp->intr_size); 6076 return (status); 6077 } 6078 6079 ldgp = nxgep->ldgvp->ldgp; 6080 for (x = 0; x < nrequired; x++, ldgp++) { 6081 ldgp->vector = (uint8_t)x; 6082 if (nxgep->niu_type != N2_NIU) { 6083 ldgp->intdata = SID_DATA(ldgp->func, x); 6084 } 6085 6086 arg1 = ldgp->ldvp; 6087 arg2 = nxgep; 6088 if (ldgp->nldvs == 1) { 6089 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6090 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6091 "nxge_add_intrs_adv_type_fix: " 6092 "1-1 int handler(%d) ldg %d ldv %d " 6093 "arg1 $%p arg2 $%p\n", 6094 x, ldgp->ldg, ldgp->ldvp->ldv, 6095 arg1, arg2)); 6096 } else if (ldgp->nldvs > 1) { 6097 inthandler = (uint_t *)ldgp->sys_intr_handler; 6098 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6099 "nxge_add_intrs_adv_type_fix: " 6100 "shared ldv %d int handler(%d) ldv %d ldg %d" 6101 "arg1 0x%016llx arg2 0x%016llx\n", 6102 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6103 arg1, arg2)); 6104 } 6105 6106 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6107 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6108 != DDI_SUCCESS) { 6109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6110 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6111 "status 0x%x", x, ddi_status)); 6112 for (y = 0; y < intrp->intr_added; y++) { 6113 (void) ddi_intr_remove_handler( 6114 intrp->htable[y]); 6115 } 6116 for (y = 0; y < nactual; y++) { 6117 (void) ddi_intr_free(intrp->htable[y]); 6118 } 6119 /* Free already allocated intr */ 6120 kmem_free(intrp->htable, intrp->intr_size); 6121 6122 (void) nxge_ldgv_uninit(nxgep); 6123 6124 return (NXGE_ERROR | NXGE_DDI_FAILED); 6125 } 6126 intrp->intr_added++; 6127 } 6128 6129 intrp->msi_intx_cnt = nactual; 6130 6131 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6132 6133 status = nxge_intr_ldgv_init(nxgep); 6134 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6135 6136 return (status); 6137 } 6138 6139 static void 6140 nxge_remove_intrs(p_nxge_t nxgep) 6141 { 6142 int i, inum; 6143 p_nxge_intr_t intrp; 6144 6145 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6146 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6147 if (!intrp->intr_registered) { 6148 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6149 "<== nxge_remove_intrs: interrupts not registered")); 6150 return; 6151 } 6152 6153 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6154 6155 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6156 (void) ddi_intr_block_disable(intrp->htable, 6157 intrp->intr_added); 6158 } else { 6159 for (i = 0; i < intrp->intr_added; i++) { 6160 (void) ddi_intr_disable(intrp->htable[i]); 6161 } 6162 } 6163 6164 for (inum = 0; inum < intrp->intr_added; inum++) { 6165 if (intrp->htable[inum]) { 6166 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6167 } 6168 } 6169 6170 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6171 if (intrp->htable[inum]) { 6172 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6173 "nxge_remove_intrs: ddi_intr_free inum %d " 6174 "msi_intx_cnt %d intr_added %d", 6175 inum, 6176 intrp->msi_intx_cnt, 6177 intrp->intr_added)); 6178 6179 (void) ddi_intr_free(intrp->htable[inum]); 6180 } 6181 } 6182 6183 kmem_free(intrp->htable, intrp->intr_size); 6184 intrp->intr_registered = B_FALSE; 6185 intrp->intr_enabled = B_FALSE; 6186 intrp->msi_intx_cnt = 0; 6187 intrp->intr_added = 0; 6188 6189 (void) nxge_ldgv_uninit(nxgep); 6190 6191 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6192 "#msix-request"); 6193 6194 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6195 } 6196 6197 /*ARGSUSED*/ 6198 static void 6199 nxge_remove_soft_intrs(p_nxge_t nxgep) 6200 { 6201 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 6202 if (nxgep->resched_id) { 6203 ddi_remove_softintr(nxgep->resched_id); 6204 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6205 "==> nxge_remove_soft_intrs: removed")); 6206 nxgep->resched_id = NULL; 6207 } 6208 6209 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 6210 } 6211 6212 /*ARGSUSED*/ 6213 static void 6214 nxge_intrs_enable(p_nxge_t nxgep) 6215 { 6216 p_nxge_intr_t intrp; 6217 int i; 6218 int status; 6219 6220 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6221 6222 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6223 6224 if (!intrp->intr_registered) { 6225 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6226 "interrupts are not registered")); 6227 return; 6228 } 6229 6230 if (intrp->intr_enabled) { 6231 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6232 "<== nxge_intrs_enable: already enabled")); 6233 return; 6234 } 6235 6236 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6237 status = ddi_intr_block_enable(intrp->htable, 6238 intrp->intr_added); 6239 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6240 "block enable - status 0x%x total inums #%d\n", 6241 status, intrp->intr_added)); 6242 } else { 6243 for (i = 0; i < intrp->intr_added; i++) { 6244 status = ddi_intr_enable(intrp->htable[i]); 6245 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6246 "ddi_intr_enable:enable - status 0x%x " 6247 "total inums %d enable inum #%d\n", 6248 status, intrp->intr_added, i)); 6249 if (status == DDI_SUCCESS) { 6250 intrp->intr_enabled = B_TRUE; 6251 } 6252 } 6253 } 6254 6255 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6256 } 6257 6258 /*ARGSUSED*/ 6259 static void 6260 nxge_intrs_disable(p_nxge_t nxgep) 6261 { 6262 p_nxge_intr_t intrp; 6263 int i; 6264 6265 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6266 6267 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6268 6269 if (!intrp->intr_registered) { 6270 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6271 "interrupts are not registered")); 6272 return; 6273 } 6274 6275 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6276 (void) ddi_intr_block_disable(intrp->htable, 6277 intrp->intr_added); 6278 } else { 6279 for (i = 0; i < intrp->intr_added; i++) { 6280 (void) ddi_intr_disable(intrp->htable[i]); 6281 } 6282 } 6283 6284 intrp->intr_enabled = B_FALSE; 6285 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6286 } 6287 6288 static nxge_status_t 6289 nxge_mac_register(p_nxge_t nxgep) 6290 { 6291 mac_register_t *macp; 6292 int status; 6293 6294 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6295 6296 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6297 return (NXGE_ERROR); 6298 6299 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6300 macp->m_driver = nxgep; 6301 macp->m_dip = nxgep->dip; 6302 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6303 macp->m_callbacks = &nxge_m_callbacks; 6304 macp->m_min_sdu = 0; 6305 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6306 NXGE_EHEADER_VLAN_CRC; 6307 macp->m_max_sdu = nxgep->mac.default_mtu; 6308 macp->m_margin = VLAN_TAGSZ; 6309 macp->m_priv_props = nxge_priv_props; 6310 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6311 6312 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6313 "==> nxge_mac_register: instance %d " 6314 "max_sdu %d margin %d maxframe %d (header %d)", 6315 nxgep->instance, 6316 macp->m_max_sdu, macp->m_margin, 6317 nxgep->mac.maxframesize, 6318 NXGE_EHEADER_VLAN_CRC)); 6319 6320 status = mac_register(macp, &nxgep->mach); 6321 mac_free(macp); 6322 6323 if (status != 0) { 6324 cmn_err(CE_WARN, 6325 "!nxge_mac_register failed (status %d instance %d)", 6326 status, nxgep->instance); 6327 return (NXGE_ERROR); 6328 } 6329 6330 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6331 "(instance %d)", nxgep->instance)); 6332 6333 return (NXGE_OK); 6334 } 6335 6336 void 6337 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6338 { 6339 ssize_t size; 6340 mblk_t *nmp; 6341 uint8_t blk_id; 6342 uint8_t chan; 6343 uint32_t err_id; 6344 err_inject_t *eip; 6345 6346 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6347 6348 size = 1024; 6349 nmp = mp->b_cont; 6350 eip = (err_inject_t *)nmp->b_rptr; 6351 blk_id = eip->blk_id; 6352 err_id = eip->err_id; 6353 chan = eip->chan; 6354 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6355 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6356 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6357 switch (blk_id) { 6358 case MAC_BLK_ID: 6359 break; 6360 case TXMAC_BLK_ID: 6361 break; 6362 case RXMAC_BLK_ID: 6363 break; 6364 case MIF_BLK_ID: 6365 break; 6366 case IPP_BLK_ID: 6367 nxge_ipp_inject_err(nxgep, err_id); 6368 break; 6369 case TXC_BLK_ID: 6370 nxge_txc_inject_err(nxgep, err_id); 6371 break; 6372 case TXDMA_BLK_ID: 6373 nxge_txdma_inject_err(nxgep, err_id, chan); 6374 break; 6375 case RXDMA_BLK_ID: 6376 nxge_rxdma_inject_err(nxgep, err_id, chan); 6377 break; 6378 case ZCP_BLK_ID: 6379 nxge_zcp_inject_err(nxgep, err_id); 6380 break; 6381 case ESPC_BLK_ID: 6382 break; 6383 case FFLP_BLK_ID: 6384 break; 6385 case PHY_BLK_ID: 6386 break; 6387 case ETHER_SERDES_BLK_ID: 6388 break; 6389 case PCIE_SERDES_BLK_ID: 6390 break; 6391 case VIR_BLK_ID: 6392 break; 6393 } 6394 6395 nmp->b_wptr = nmp->b_rptr + size; 6396 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6397 6398 miocack(wq, mp, (int)size, 0); 6399 } 6400 6401 static int 6402 nxge_init_common_dev(p_nxge_t nxgep) 6403 { 6404 p_nxge_hw_list_t hw_p; 6405 dev_info_t *p_dip; 6406 6407 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6408 6409 p_dip = nxgep->p_dip; 6410 MUTEX_ENTER(&nxge_common_lock); 6411 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6412 "==> nxge_init_common_dev:func # %d", 6413 nxgep->function_num)); 6414 /* 6415 * Loop through existing per neptune hardware list. 6416 */ 6417 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6418 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6419 "==> nxge_init_common_device:func # %d " 6420 "hw_p $%p parent dip $%p", 6421 nxgep->function_num, 6422 hw_p, 6423 p_dip)); 6424 if (hw_p->parent_devp == p_dip) { 6425 nxgep->nxge_hw_p = hw_p; 6426 hw_p->ndevs++; 6427 hw_p->nxge_p[nxgep->function_num] = nxgep; 6428 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6429 "==> nxge_init_common_device:func # %d " 6430 "hw_p $%p parent dip $%p " 6431 "ndevs %d (found)", 6432 nxgep->function_num, 6433 hw_p, 6434 p_dip, 6435 hw_p->ndevs)); 6436 break; 6437 } 6438 } 6439 6440 if (hw_p == NULL) { 6441 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6442 "==> nxge_init_common_device:func # %d " 6443 "parent dip $%p (new)", 6444 nxgep->function_num, 6445 p_dip)); 6446 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6447 hw_p->parent_devp = p_dip; 6448 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6449 nxgep->nxge_hw_p = hw_p; 6450 hw_p->ndevs++; 6451 hw_p->nxge_p[nxgep->function_num] = nxgep; 6452 hw_p->next = nxge_hw_list; 6453 if (nxgep->niu_type == N2_NIU) { 6454 hw_p->niu_type = N2_NIU; 6455 hw_p->platform_type = P_NEPTUNE_NIU; 6456 } else { 6457 hw_p->niu_type = NIU_TYPE_NONE; 6458 hw_p->platform_type = P_NEPTUNE_NONE; 6459 } 6460 6461 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6462 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6463 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6464 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6465 6466 nxge_hw_list = hw_p; 6467 6468 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6469 } 6470 6471 MUTEX_EXIT(&nxge_common_lock); 6472 6473 nxgep->platform_type = hw_p->platform_type; 6474 if (nxgep->niu_type != N2_NIU) { 6475 nxgep->niu_type = hw_p->niu_type; 6476 } 6477 6478 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6479 "==> nxge_init_common_device (nxge_hw_list) $%p", 6480 nxge_hw_list)); 6481 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6482 6483 return (NXGE_OK); 6484 } 6485 6486 static void 6487 nxge_uninit_common_dev(p_nxge_t nxgep) 6488 { 6489 p_nxge_hw_list_t hw_p, h_hw_p; 6490 p_nxge_dma_pt_cfg_t p_dma_cfgp; 6491 p_nxge_hw_pt_cfg_t p_cfgp; 6492 dev_info_t *p_dip; 6493 6494 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6495 if (nxgep->nxge_hw_p == NULL) { 6496 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6497 "<== nxge_uninit_common_device (no common)")); 6498 return; 6499 } 6500 6501 MUTEX_ENTER(&nxge_common_lock); 6502 h_hw_p = nxge_hw_list; 6503 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6504 p_dip = hw_p->parent_devp; 6505 if (nxgep->nxge_hw_p == hw_p && 6506 p_dip == nxgep->p_dip && 6507 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6508 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6509 6510 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6511 "==> nxge_uninit_common_device:func # %d " 6512 "hw_p $%p parent dip $%p " 6513 "ndevs %d (found)", 6514 nxgep->function_num, 6515 hw_p, 6516 p_dip, 6517 hw_p->ndevs)); 6518 6519 /* 6520 * Release the RDC table, a shared resoruce 6521 * of the nxge hardware. The RDC table was 6522 * assigned to this instance of nxge in 6523 * nxge_use_cfg_dma_config(). 6524 */ 6525 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 6526 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config; 6527 (void) nxge_fzc_rdc_tbl_unbind(nxgep, 6528 p_cfgp->def_mac_rxdma_grpid); 6529 6530 if (hw_p->ndevs) { 6531 hw_p->ndevs--; 6532 } 6533 hw_p->nxge_p[nxgep->function_num] = NULL; 6534 if (!hw_p->ndevs) { 6535 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6536 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6537 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6538 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6539 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6540 "==> nxge_uninit_common_device: " 6541 "func # %d " 6542 "hw_p $%p parent dip $%p " 6543 "ndevs %d (last)", 6544 nxgep->function_num, 6545 hw_p, 6546 p_dip, 6547 hw_p->ndevs)); 6548 6549 nxge_hio_uninit(nxgep); 6550 6551 if (hw_p == nxge_hw_list) { 6552 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6553 "==> nxge_uninit_common_device:" 6554 "remove head func # %d " 6555 "hw_p $%p parent dip $%p " 6556 "ndevs %d (head)", 6557 nxgep->function_num, 6558 hw_p, 6559 p_dip, 6560 hw_p->ndevs)); 6561 nxge_hw_list = hw_p->next; 6562 } else { 6563 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6564 "==> nxge_uninit_common_device:" 6565 "remove middle func # %d " 6566 "hw_p $%p parent dip $%p " 6567 "ndevs %d (middle)", 6568 nxgep->function_num, 6569 hw_p, 6570 p_dip, 6571 hw_p->ndevs)); 6572 h_hw_p->next = hw_p->next; 6573 } 6574 6575 nxgep->nxge_hw_p = NULL; 6576 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6577 } 6578 break; 6579 } else { 6580 h_hw_p = hw_p; 6581 } 6582 } 6583 6584 MUTEX_EXIT(&nxge_common_lock); 6585 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6586 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6587 nxge_hw_list)); 6588 6589 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6590 } 6591 6592 /* 6593 * Determines the number of ports from the niu_type or the platform type. 6594 * Returns the number of ports, or returns zero on failure. 6595 */ 6596 6597 int 6598 nxge_get_nports(p_nxge_t nxgep) 6599 { 6600 int nports = 0; 6601 6602 switch (nxgep->niu_type) { 6603 case N2_NIU: 6604 case NEPTUNE_2_10GF: 6605 nports = 2; 6606 break; 6607 case NEPTUNE_4_1GC: 6608 case NEPTUNE_2_10GF_2_1GC: 6609 case NEPTUNE_1_10GF_3_1GC: 6610 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6611 case NEPTUNE_2_10GF_2_1GRF: 6612 nports = 4; 6613 break; 6614 default: 6615 switch (nxgep->platform_type) { 6616 case P_NEPTUNE_NIU: 6617 case P_NEPTUNE_ATLAS_2PORT: 6618 nports = 2; 6619 break; 6620 case P_NEPTUNE_ATLAS_4PORT: 6621 case P_NEPTUNE_MARAMBA_P0: 6622 case P_NEPTUNE_MARAMBA_P1: 6623 case P_NEPTUNE_ALONSO: 6624 nports = 4; 6625 break; 6626 default: 6627 break; 6628 } 6629 break; 6630 } 6631 6632 return (nports); 6633 } 6634 6635 /* 6636 * The following two functions are to support 6637 * PSARC/2007/453 MSI-X interrupt limit override. 6638 */ 6639 static int 6640 nxge_create_msi_property(p_nxge_t nxgep) 6641 { 6642 int nmsi; 6643 extern int ncpus; 6644 6645 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6646 6647 switch (nxgep->mac.portmode) { 6648 case PORT_10G_COPPER: 6649 case PORT_10G_FIBER: 6650 case PORT_10G_TN1010: 6651 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6652 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6653 /* 6654 * The maximum MSI-X requested will be 8. 6655 * If the # of CPUs is less than 8, we will reqeust 6656 * # MSI-X based on the # of CPUs. 6657 */ 6658 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6659 nmsi = NXGE_MSIX_REQUEST_10G; 6660 } else { 6661 nmsi = ncpus; 6662 } 6663 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6664 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6665 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6666 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6667 break; 6668 6669 default: 6670 nmsi = NXGE_MSIX_REQUEST_1G; 6671 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6672 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6673 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6674 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6675 break; 6676 } 6677 6678 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6679 return (nmsi); 6680 } 6681 6682 /* ARGSUSED */ 6683 static int 6684 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6685 void *pr_val) 6686 { 6687 int err = 0; 6688 link_flowctrl_t fl; 6689 6690 switch (pr_num) { 6691 case MAC_PROP_AUTONEG: 6692 *(uint8_t *)pr_val = 1; 6693 break; 6694 case MAC_PROP_FLOWCTRL: 6695 if (pr_valsize < sizeof (link_flowctrl_t)) 6696 return (EINVAL); 6697 fl = LINK_FLOWCTRL_RX; 6698 bcopy(&fl, pr_val, sizeof (fl)); 6699 break; 6700 case MAC_PROP_ADV_1000FDX_CAP: 6701 case MAC_PROP_EN_1000FDX_CAP: 6702 *(uint8_t *)pr_val = 1; 6703 break; 6704 case MAC_PROP_ADV_100FDX_CAP: 6705 case MAC_PROP_EN_100FDX_CAP: 6706 *(uint8_t *)pr_val = 1; 6707 break; 6708 default: 6709 err = ENOTSUP; 6710 break; 6711 } 6712 return (err); 6713 } 6714 6715 6716 /* 6717 * The following is a software around for the Neptune hardware's 6718 * interrupt bugs; The Neptune hardware may generate spurious interrupts when 6719 * an interrupr handler is removed. 6720 */ 6721 #define NXGE_PCI_PORT_LOGIC_OFFSET 0x98 6722 #define NXGE_PIM_RESET (1ULL << 29) 6723 #define NXGE_GLU_RESET (1ULL << 30) 6724 #define NXGE_NIU_RESET (1ULL << 31) 6725 #define NXGE_PCI_RESET_ALL (NXGE_PIM_RESET | \ 6726 NXGE_GLU_RESET | \ 6727 NXGE_NIU_RESET) 6728 6729 #define NXGE_WAIT_QUITE_TIME 200000 6730 #define NXGE_WAIT_QUITE_RETRY 40 6731 #define NXGE_PCI_RESET_WAIT 1000000 /* one second */ 6732 6733 static void 6734 nxge_niu_peu_reset(p_nxge_t nxgep) 6735 { 6736 uint32_t rvalue; 6737 p_nxge_hw_list_t hw_p; 6738 p_nxge_t fnxgep; 6739 int i, j; 6740 6741 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_niu_peu_reset")); 6742 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 6743 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6744 "==> nxge_niu_peu_reset: NULL hardware pointer")); 6745 return; 6746 } 6747 6748 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6749 "==> nxge_niu_peu_reset: flags 0x%x link timer id %d timer id %d", 6750 hw_p->flags, nxgep->nxge_link_poll_timerid, 6751 nxgep->nxge_timerid)); 6752 6753 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 6754 /* 6755 * Make sure other instances from the same hardware 6756 * stop sending PIO and in quiescent state. 6757 */ 6758 for (i = 0; i < NXGE_MAX_PORTS; i++) { 6759 fnxgep = hw_p->nxge_p[i]; 6760 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6761 "==> nxge_niu_peu_reset: checking entry %d " 6762 "nxgep $%p", i, fnxgep)); 6763 #ifdef NXGE_DEBUG 6764 if (fnxgep) { 6765 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6766 "==> nxge_niu_peu_reset: entry %d (function %d) " 6767 "link timer id %d hw timer id %d", 6768 i, fnxgep->function_num, 6769 fnxgep->nxge_link_poll_timerid, 6770 fnxgep->nxge_timerid)); 6771 } 6772 #endif 6773 if (fnxgep && fnxgep != nxgep && 6774 (fnxgep->nxge_timerid || fnxgep->nxge_link_poll_timerid)) { 6775 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6776 "==> nxge_niu_peu_reset: checking $%p " 6777 "(function %d) timer ids", 6778 fnxgep, fnxgep->function_num)); 6779 for (j = 0; j < NXGE_WAIT_QUITE_RETRY; j++) { 6780 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6781 "==> nxge_niu_peu_reset: waiting")); 6782 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6783 if (!fnxgep->nxge_timerid && 6784 !fnxgep->nxge_link_poll_timerid) { 6785 break; 6786 } 6787 } 6788 NXGE_DELAY(NXGE_WAIT_QUITE_TIME); 6789 if (fnxgep->nxge_timerid || 6790 fnxgep->nxge_link_poll_timerid) { 6791 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6792 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6793 "<== nxge_niu_peu_reset: cannot reset " 6794 "hardware (devices are still in use)")); 6795 return; 6796 } 6797 } 6798 } 6799 6800 if ((hw_p->flags & COMMON_RESET_NIU_PCI) != COMMON_RESET_NIU_PCI) { 6801 hw_p->flags |= COMMON_RESET_NIU_PCI; 6802 rvalue = pci_config_get32(nxgep->dev_regs->nxge_pciregh, 6803 NXGE_PCI_PORT_LOGIC_OFFSET); 6804 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6805 "nxge_niu_peu_reset: read offset 0x%x (%d) " 6806 "(data 0x%x)", 6807 NXGE_PCI_PORT_LOGIC_OFFSET, 6808 NXGE_PCI_PORT_LOGIC_OFFSET, 6809 rvalue)); 6810 6811 rvalue |= NXGE_PCI_RESET_ALL; 6812 pci_config_put32(nxgep->dev_regs->nxge_pciregh, 6813 NXGE_PCI_PORT_LOGIC_OFFSET, rvalue); 6814 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 6815 "nxge_niu_peu_reset: RESETTING NIU: write NIU reset 0x%x", 6816 rvalue)); 6817 6818 NXGE_DELAY(NXGE_PCI_RESET_WAIT); 6819 } 6820 6821 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 6822 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_niu_peu_reset")); 6823 } 6824