1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/nxge/nxge_hio.h> 33 #include <sys/nxge/nxge_rxdma.h> 34 #include <sys/pcie.h> 35 36 uint32_t nxge_use_partition = 0; /* debug partition flag */ 37 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 38 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 39 /* 40 * PSARC/2007/453 MSI-X interrupt limit override 41 * (This PSARC case is limited to MSI-X vectors 42 * and SPARC platforms only). 43 */ 44 #if defined(_BIG_ENDIAN) 45 uint32_t nxge_msi_enable = 2; 46 #else 47 uint32_t nxge_msi_enable = 1; 48 #endif 49 50 /* 51 * Software workaround for the hardware 52 * checksum bugs that affect packet transmission 53 * and receive: 54 * 55 * Usage of nxge_cksum_offload: 56 * 57 * (1) nxge_cksum_offload = 0 (default): 58 * - transmits packets: 59 * TCP: uses the hardware checksum feature. 60 * UDP: driver will compute the software checksum 61 * based on the partial checksum computed 62 * by the IP layer. 63 * - receives packets 64 * TCP: marks packets checksum flags based on hardware result. 65 * UDP: will not mark checksum flags. 66 * 67 * (2) nxge_cksum_offload = 1: 68 * - transmit packets: 69 * TCP/UDP: uses the hardware checksum feature. 70 * - receives packets 71 * TCP/UDP: marks packet checksum flags based on hardware result. 72 * 73 * (3) nxge_cksum_offload = 2: 74 * - The driver will not register its checksum capability. 75 * Checksum for both TCP and UDP will be computed 76 * by the stack. 77 * - The software LSO is not allowed in this case. 78 * 79 * (4) nxge_cksum_offload > 2: 80 * - Will be treated as it is set to 2 81 * (stack will compute the checksum). 82 * 83 * (5) If the hardware bug is fixed, this workaround 84 * needs to be updated accordingly to reflect 85 * the new hardware revision. 86 */ 87 uint32_t nxge_cksum_offload = 0; 88 89 /* 90 * Globals: tunable parameters (/etc/system or adb) 91 * 92 */ 93 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 94 uint32_t nxge_rbr_spare_size = 0; 95 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 96 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 97 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 98 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 99 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 100 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 101 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 102 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 103 boolean_t nxge_jumbo_enable = B_FALSE; 104 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 105 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 106 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 107 108 /* MAX LSO size */ 109 #define NXGE_LSO_MAXLEN 65535 110 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 111 112 /* 113 * Debugging flags: 114 * nxge_no_tx_lb : transmit load balancing 115 * nxge_tx_lb_policy: 0 - TCP port (default) 116 * 3 - DEST MAC 117 */ 118 uint32_t nxge_no_tx_lb = 0; 119 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 120 121 /* 122 * Add tunable to reduce the amount of time spent in the 123 * ISR doing Rx Processing. 124 */ 125 uint32_t nxge_max_rx_pkts = 1024; 126 127 /* 128 * Tunables to manage the receive buffer blocks. 129 * 130 * nxge_rx_threshold_hi: copy all buffers. 131 * nxge_rx_bcopy_size_type: receive buffer block size type. 132 * nxge_rx_threshold_lo: copy only up to tunable block size type. 133 */ 134 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 135 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 136 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 137 138 /* Use kmem_alloc() to allocate data buffers. */ 139 #if !defined(__i386) 140 uint32_t nxge_use_kmem_alloc = 1; 141 #else 142 uint32_t nxge_use_kmem_alloc = 0; 143 #endif 144 145 rtrace_t npi_rtracebuf; 146 147 #if defined(sun4v) 148 /* 149 * Hypervisor N2/NIU services information. 150 */ 151 static hsvc_info_t niu_hsvc = { 152 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 153 NIU_MINOR_VER, "nxge" 154 }; 155 156 static int nxge_hsvc_register(p_nxge_t); 157 #endif 158 159 /* 160 * Function Prototypes 161 */ 162 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 163 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 164 static void nxge_unattach(p_nxge_t); 165 166 #if NXGE_PROPERTY 167 static void nxge_remove_hard_properties(p_nxge_t); 168 #endif 169 170 /* 171 * These two functions are required by nxge_hio.c 172 */ 173 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 174 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 175 176 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 177 178 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 179 static void nxge_destroy_mutexes(p_nxge_t); 180 181 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 182 static void nxge_unmap_regs(p_nxge_t nxgep); 183 #ifdef NXGE_DEBUG 184 static void nxge_test_map_regs(p_nxge_t nxgep); 185 #endif 186 187 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 188 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 189 static void nxge_remove_intrs(p_nxge_t nxgep); 190 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 191 192 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 193 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 194 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 195 static void nxge_intrs_enable(p_nxge_t nxgep); 196 static void nxge_intrs_disable(p_nxge_t nxgep); 197 198 static void nxge_suspend(p_nxge_t); 199 static nxge_status_t nxge_resume(p_nxge_t); 200 201 static nxge_status_t nxge_setup_dev(p_nxge_t); 202 static void nxge_destroy_dev(p_nxge_t); 203 204 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 205 static void nxge_free_mem_pool(p_nxge_t); 206 207 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 208 static void nxge_free_rx_mem_pool(p_nxge_t); 209 210 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 211 static void nxge_free_tx_mem_pool(p_nxge_t); 212 213 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 214 struct ddi_dma_attr *, 215 size_t, ddi_device_acc_attr_t *, uint_t, 216 p_nxge_dma_common_t); 217 218 static void nxge_dma_mem_free(p_nxge_dma_common_t); 219 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 220 221 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 222 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 223 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 224 225 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 226 p_nxge_dma_common_t *, size_t); 227 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 228 229 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 230 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 231 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 232 233 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 234 p_nxge_dma_common_t *, 235 size_t); 236 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 237 238 static int nxge_init_common_dev(p_nxge_t); 239 static void nxge_uninit_common_dev(p_nxge_t); 240 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 241 char *, caddr_t); 242 243 /* 244 * The next declarations are for the GLDv3 interface. 245 */ 246 static int nxge_m_start(void *); 247 static void nxge_m_stop(void *); 248 static int nxge_m_unicst(void *, const uint8_t *); 249 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 250 static int nxge_m_promisc(void *, boolean_t); 251 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 252 static void nxge_m_resources(void *); 253 mblk_t *nxge_m_tx(void *arg, mblk_t *); 254 static nxge_status_t nxge_mac_register(p_nxge_t); 255 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 256 mac_addr_slot_t slot); 257 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 258 boolean_t factory); 259 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 260 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 261 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 262 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 263 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 264 uint_t, const void *); 265 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 266 uint_t, uint_t, void *); 267 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 268 const void *); 269 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 270 void *); 271 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 272 273 274 mac_priv_prop_t nxge_priv_props[] = { 275 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 276 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 277 {"_function_number", MAC_PROP_PERM_READ}, 278 {"_fw_version", MAC_PROP_PERM_READ}, 279 {"_port_mode", MAC_PROP_PERM_READ}, 280 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 281 {"_accept_jumbo", MAC_PROP_PERM_RW}, 282 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 283 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 284 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 285 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 286 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 287 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 288 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 289 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 290 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 291 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 292 {"_soft_lso_enable", MAC_PROP_PERM_RW} 293 }; 294 295 #define NXGE_MAX_PRIV_PROPS \ 296 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 297 298 #define NXGE_M_CALLBACK_FLAGS\ 299 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 300 301 302 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 303 #define MAX_DUMP_SZ 256 304 305 #define NXGE_M_CALLBACK_FLAGS \ 306 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 307 308 mac_callbacks_t nxge_m_callbacks = { 309 NXGE_M_CALLBACK_FLAGS, 310 nxge_m_stat, 311 nxge_m_start, 312 nxge_m_stop, 313 nxge_m_promisc, 314 nxge_m_multicst, 315 nxge_m_unicst, 316 nxge_m_tx, 317 nxge_m_resources, 318 nxge_m_ioctl, 319 nxge_m_getcapab, 320 NULL, 321 NULL, 322 nxge_m_setprop, 323 nxge_m_getprop 324 }; 325 326 void 327 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 328 329 /* PSARC/2007/453 MSI-X interrupt limit override. */ 330 #define NXGE_MSIX_REQUEST_10G 8 331 #define NXGE_MSIX_REQUEST_1G 2 332 static int nxge_create_msi_property(p_nxge_t); 333 334 /* 335 * These global variables control the message 336 * output. 337 */ 338 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 339 uint64_t nxge_debug_level; 340 341 /* 342 * This list contains the instance structures for the Neptune 343 * devices present in the system. The lock exists to guarantee 344 * mutually exclusive access to the list. 345 */ 346 void *nxge_list = NULL; 347 348 void *nxge_hw_list = NULL; 349 nxge_os_mutex_t nxge_common_lock; 350 351 extern uint64_t npi_debug_level; 352 353 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 354 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 355 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 356 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 357 extern void nxge_fm_init(p_nxge_t, 358 ddi_device_acc_attr_t *, 359 ddi_device_acc_attr_t *, 360 ddi_dma_attr_t *); 361 extern void nxge_fm_fini(p_nxge_t); 362 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 363 364 /* 365 * Count used to maintain the number of buffers being used 366 * by Neptune instances and loaned up to the upper layers. 367 */ 368 uint32_t nxge_mblks_pending = 0; 369 370 /* 371 * Device register access attributes for PIO. 372 */ 373 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 374 DDI_DEVICE_ATTR_V0, 375 DDI_STRUCTURE_LE_ACC, 376 DDI_STRICTORDER_ACC, 377 }; 378 379 /* 380 * Device descriptor access attributes for DMA. 381 */ 382 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 383 DDI_DEVICE_ATTR_V0, 384 DDI_STRUCTURE_LE_ACC, 385 DDI_STRICTORDER_ACC 386 }; 387 388 /* 389 * Device buffer access attributes for DMA. 390 */ 391 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 392 DDI_DEVICE_ATTR_V0, 393 DDI_STRUCTURE_BE_ACC, 394 DDI_STRICTORDER_ACC 395 }; 396 397 ddi_dma_attr_t nxge_desc_dma_attr = { 398 DMA_ATTR_V0, /* version number. */ 399 0, /* low address */ 400 0xffffffffffffffff, /* high address */ 401 0xffffffffffffffff, /* address counter max */ 402 #ifndef NIU_PA_WORKAROUND 403 0x100000, /* alignment */ 404 #else 405 0x2000, 406 #endif 407 0xfc00fc, /* dlim_burstsizes */ 408 0x1, /* minimum transfer size */ 409 0xffffffffffffffff, /* maximum transfer size */ 410 0xffffffffffffffff, /* maximum segment size */ 411 1, /* scatter/gather list length */ 412 (unsigned int) 1, /* granularity */ 413 0 /* attribute flags */ 414 }; 415 416 ddi_dma_attr_t nxge_tx_dma_attr = { 417 DMA_ATTR_V0, /* version number. */ 418 0, /* low address */ 419 0xffffffffffffffff, /* high address */ 420 0xffffffffffffffff, /* address counter max */ 421 #if defined(_BIG_ENDIAN) 422 0x2000, /* alignment */ 423 #else 424 0x1000, /* alignment */ 425 #endif 426 0xfc00fc, /* dlim_burstsizes */ 427 0x1, /* minimum transfer size */ 428 0xffffffffffffffff, /* maximum transfer size */ 429 0xffffffffffffffff, /* maximum segment size */ 430 5, /* scatter/gather list length */ 431 (unsigned int) 1, /* granularity */ 432 0 /* attribute flags */ 433 }; 434 435 ddi_dma_attr_t nxge_rx_dma_attr = { 436 DMA_ATTR_V0, /* version number. */ 437 0, /* low address */ 438 0xffffffffffffffff, /* high address */ 439 0xffffffffffffffff, /* address counter max */ 440 0x2000, /* alignment */ 441 0xfc00fc, /* dlim_burstsizes */ 442 0x1, /* minimum transfer size */ 443 0xffffffffffffffff, /* maximum transfer size */ 444 0xffffffffffffffff, /* maximum segment size */ 445 1, /* scatter/gather list length */ 446 (unsigned int) 1, /* granularity */ 447 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 448 }; 449 450 ddi_dma_lim_t nxge_dma_limits = { 451 (uint_t)0, /* dlim_addr_lo */ 452 (uint_t)0xffffffff, /* dlim_addr_hi */ 453 (uint_t)0xffffffff, /* dlim_cntr_max */ 454 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 455 0x1, /* dlim_minxfer */ 456 1024 /* dlim_speed */ 457 }; 458 459 dma_method_t nxge_force_dma = DVMA; 460 461 /* 462 * dma chunk sizes. 463 * 464 * Try to allocate the largest possible size 465 * so that fewer number of dma chunks would be managed 466 */ 467 #ifdef NIU_PA_WORKAROUND 468 size_t alloc_sizes [] = {0x2000}; 469 #else 470 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 471 0x10000, 0x20000, 0x40000, 0x80000, 472 0x100000, 0x200000, 0x400000, 0x800000, 473 0x1000000, 0x2000000, 0x4000000}; 474 #endif 475 476 /* 477 * Translate "dev_t" to a pointer to the associated "dev_info_t". 478 */ 479 480 extern void nxge_get_environs(nxge_t *); 481 482 static int 483 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 484 { 485 p_nxge_t nxgep = NULL; 486 int instance; 487 int status = DDI_SUCCESS; 488 uint8_t portn; 489 nxge_mmac_t *mmac_info; 490 491 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 492 493 /* 494 * Get the device instance since we'll need to setup 495 * or retrieve a soft state for this instance. 496 */ 497 instance = ddi_get_instance(dip); 498 499 switch (cmd) { 500 case DDI_ATTACH: 501 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 502 break; 503 504 case DDI_RESUME: 505 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 506 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 507 if (nxgep == NULL) { 508 status = DDI_FAILURE; 509 break; 510 } 511 if (nxgep->dip != dip) { 512 status = DDI_FAILURE; 513 break; 514 } 515 if (nxgep->suspended == DDI_PM_SUSPEND) { 516 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 517 } else { 518 status = nxge_resume(nxgep); 519 } 520 goto nxge_attach_exit; 521 522 case DDI_PM_RESUME: 523 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 524 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 525 if (nxgep == NULL) { 526 status = DDI_FAILURE; 527 break; 528 } 529 if (nxgep->dip != dip) { 530 status = DDI_FAILURE; 531 break; 532 } 533 status = nxge_resume(nxgep); 534 goto nxge_attach_exit; 535 536 default: 537 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 538 status = DDI_FAILURE; 539 goto nxge_attach_exit; 540 } 541 542 543 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 544 status = DDI_FAILURE; 545 goto nxge_attach_exit; 546 } 547 548 nxgep = ddi_get_soft_state(nxge_list, instance); 549 if (nxgep == NULL) { 550 status = NXGE_ERROR; 551 goto nxge_attach_fail2; 552 } 553 554 nxgep->nxge_magic = NXGE_MAGIC; 555 556 nxgep->drv_state = 0; 557 nxgep->dip = dip; 558 nxgep->instance = instance; 559 nxgep->p_dip = ddi_get_parent(dip); 560 nxgep->nxge_debug_level = nxge_debug_level; 561 npi_debug_level = nxge_debug_level; 562 563 /* Are we a guest running in a Hybrid I/O environment? */ 564 nxge_get_environs(nxgep); 565 566 status = nxge_map_regs(nxgep); 567 568 if (status != NXGE_OK) { 569 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 570 goto nxge_attach_fail3; 571 } 572 573 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 574 &nxge_dev_desc_dma_acc_attr, 575 &nxge_rx_dma_attr); 576 577 /* Create & initialize the per-Neptune data structure */ 578 /* (even if we're a guest). */ 579 status = nxge_init_common_dev(nxgep); 580 if (status != NXGE_OK) { 581 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 582 "nxge_init_common_dev failed")); 583 goto nxge_attach_fail4; 584 } 585 586 #if defined(sun4v) 587 /* This is required by nxge_hio_init(), which follows. */ 588 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 589 goto nxge_attach_fail; 590 #endif 591 592 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 593 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 594 "nxge_hio_init failed")); 595 goto nxge_attach_fail4; 596 } 597 598 if (nxgep->niu_type == NEPTUNE_2_10GF) { 599 if (nxgep->function_num > 1) { 600 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 601 " function %d. Only functions 0 and 1 are " 602 "supported for this card.", nxgep->function_num)); 603 status = NXGE_ERROR; 604 goto nxge_attach_fail4; 605 } 606 } 607 608 if (isLDOMguest(nxgep)) { 609 /* 610 * Use the function number here. 611 */ 612 nxgep->mac.portnum = nxgep->function_num; 613 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 614 615 /* XXX We'll set the MAC address counts to 1 for now. */ 616 mmac_info = &nxgep->nxge_mmac_info; 617 mmac_info->num_mmac = 1; 618 mmac_info->naddrfree = 1; 619 } else { 620 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 621 nxgep->mac.portnum = portn; 622 if ((portn == 0) || (portn == 1)) 623 nxgep->mac.porttype = PORT_TYPE_XMAC; 624 else 625 nxgep->mac.porttype = PORT_TYPE_BMAC; 626 /* 627 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 628 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 629 * The two types of MACs have different characterizations. 630 */ 631 mmac_info = &nxgep->nxge_mmac_info; 632 if (nxgep->function_num < 2) { 633 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 634 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 635 } else { 636 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 637 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 638 } 639 } 640 /* 641 * Setup the Ndd parameters for the this instance. 642 */ 643 nxge_init_param(nxgep); 644 645 /* 646 * Setup Register Tracing Buffer. 647 */ 648 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 649 650 /* init stats ptr */ 651 nxge_init_statsp(nxgep); 652 653 /* 654 * Copy the vpd info from eeprom to a local data 655 * structure, and then check its validity. 656 */ 657 if (!isLDOMguest(nxgep)) { 658 int *regp; 659 uint_t reglen; 660 int rv; 661 662 nxge_vpd_info_get(nxgep); 663 664 /* Find the NIU config handle. */ 665 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 666 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 667 "reg", ®p, ®len); 668 669 if (rv != DDI_PROP_SUCCESS) { 670 goto nxge_attach_fail5; 671 } 672 /* 673 * The address_hi, that is the first int, in the reg 674 * property consists of config handle, but need to remove 675 * the bits 28-31 which are OBP specific info. 676 */ 677 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 678 ddi_prop_free(regp); 679 } 680 681 if (isLDOMguest(nxgep)) { 682 uchar_t *prop_val; 683 uint_t prop_len; 684 685 extern void nxge_get_logical_props(p_nxge_t); 686 687 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 688 nxgep->mac.portmode = PORT_LOGICAL; 689 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 690 "phy-type", "virtual transceiver"); 691 692 nxgep->nports = 1; 693 nxgep->board_ver = 0; /* XXX What? */ 694 695 /* 696 * local-mac-address property gives us info on which 697 * specific MAC address the Hybrid resource is associated 698 * with. 699 */ 700 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 701 "local-mac-address", &prop_val, 702 &prop_len) != DDI_PROP_SUCCESS) { 703 goto nxge_attach_fail5; 704 } 705 if (prop_len != ETHERADDRL) { 706 ddi_prop_free(prop_val); 707 goto nxge_attach_fail5; 708 } 709 ether_copy(prop_val, nxgep->hio_mac_addr); 710 ddi_prop_free(prop_val); 711 nxge_get_logical_props(nxgep); 712 713 } else { 714 status = nxge_xcvr_find(nxgep); 715 716 if (status != NXGE_OK) { 717 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 718 " Couldn't determine card type" 719 " .... exit ")); 720 goto nxge_attach_fail5; 721 } 722 723 status = nxge_get_config_properties(nxgep); 724 725 if (status != NXGE_OK) { 726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 727 "get_hw create failed")); 728 goto nxge_attach_fail; 729 } 730 } 731 732 /* 733 * Setup the Kstats for the driver. 734 */ 735 nxge_setup_kstats(nxgep); 736 737 if (!isLDOMguest(nxgep)) 738 nxge_setup_param(nxgep); 739 740 status = nxge_setup_system_dma_pages(nxgep); 741 if (status != NXGE_OK) { 742 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 743 goto nxge_attach_fail; 744 } 745 746 nxge_hw_id_init(nxgep); 747 748 if (!isLDOMguest(nxgep)) 749 nxge_hw_init_niu_common(nxgep); 750 751 status = nxge_setup_mutexes(nxgep); 752 if (status != NXGE_OK) { 753 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 754 goto nxge_attach_fail; 755 } 756 757 #if defined(sun4v) 758 if (isLDOMguest(nxgep)) { 759 /* Find our VR & channel sets. */ 760 status = nxge_hio_vr_add(nxgep); 761 goto nxge_attach_exit; 762 } 763 #endif 764 765 status = nxge_setup_dev(nxgep); 766 if (status != DDI_SUCCESS) { 767 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 768 goto nxge_attach_fail; 769 } 770 771 status = nxge_add_intrs(nxgep); 772 if (status != DDI_SUCCESS) { 773 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 774 goto nxge_attach_fail; 775 } 776 status = nxge_add_soft_intrs(nxgep); 777 if (status != DDI_SUCCESS) { 778 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 779 "add_soft_intr failed")); 780 goto nxge_attach_fail; 781 } 782 783 /* 784 * Enable interrupts. 785 */ 786 nxge_intrs_enable(nxgep); 787 788 // If a guest, register with vio_net instead. 789 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 790 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 791 "unable to register to mac layer (%d)", status)); 792 goto nxge_attach_fail; 793 } 794 795 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 796 797 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 798 "registered to mac (instance %d)", instance)); 799 800 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 801 802 goto nxge_attach_exit; 803 804 nxge_attach_fail: 805 nxge_unattach(nxgep); 806 goto nxge_attach_fail1; 807 808 nxge_attach_fail5: 809 /* 810 * Tear down the ndd parameters setup. 811 */ 812 nxge_destroy_param(nxgep); 813 814 /* 815 * Tear down the kstat setup. 816 */ 817 nxge_destroy_kstats(nxgep); 818 819 nxge_attach_fail4: 820 if (nxgep->nxge_hw_p) { 821 nxge_uninit_common_dev(nxgep); 822 nxgep->nxge_hw_p = NULL; 823 } 824 825 nxge_attach_fail3: 826 /* 827 * Unmap the register setup. 828 */ 829 nxge_unmap_regs(nxgep); 830 831 nxge_fm_fini(nxgep); 832 833 nxge_attach_fail2: 834 ddi_soft_state_free(nxge_list, nxgep->instance); 835 836 nxge_attach_fail1: 837 if (status != NXGE_OK) 838 status = (NXGE_ERROR | NXGE_DDI_FAILED); 839 nxgep = NULL; 840 841 nxge_attach_exit: 842 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 843 status)); 844 845 return (status); 846 } 847 848 static int 849 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 850 { 851 int status = DDI_SUCCESS; 852 int instance; 853 p_nxge_t nxgep = NULL; 854 855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 856 instance = ddi_get_instance(dip); 857 nxgep = ddi_get_soft_state(nxge_list, instance); 858 if (nxgep == NULL) { 859 status = DDI_FAILURE; 860 goto nxge_detach_exit; 861 } 862 863 switch (cmd) { 864 case DDI_DETACH: 865 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 866 break; 867 868 case DDI_PM_SUSPEND: 869 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 870 nxgep->suspended = DDI_PM_SUSPEND; 871 nxge_suspend(nxgep); 872 break; 873 874 case DDI_SUSPEND: 875 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 876 if (nxgep->suspended != DDI_PM_SUSPEND) { 877 nxgep->suspended = DDI_SUSPEND; 878 nxge_suspend(nxgep); 879 } 880 break; 881 882 default: 883 status = DDI_FAILURE; 884 } 885 886 if (cmd != DDI_DETACH) 887 goto nxge_detach_exit; 888 889 /* 890 * Stop the xcvr polling. 891 */ 892 nxgep->suspended = cmd; 893 894 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 895 896 if (isLDOMguest(nxgep)) { 897 nxge_hio_unregister(nxgep); 898 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 899 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 900 "<== nxge_detach status = 0x%08X", status)); 901 return (DDI_FAILURE); 902 } 903 904 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 905 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 906 907 nxge_unattach(nxgep); 908 nxgep = NULL; 909 910 nxge_detach_exit: 911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 912 status)); 913 914 return (status); 915 } 916 917 static void 918 nxge_unattach(p_nxge_t nxgep) 919 { 920 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 921 922 if (nxgep == NULL || nxgep->dev_regs == NULL) { 923 return; 924 } 925 926 nxgep->nxge_magic = 0; 927 928 if (nxgep->nxge_timerid) { 929 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 930 nxgep->nxge_timerid = 0; 931 } 932 933 #if defined(sun4v) 934 if (isLDOMguest(nxgep)) { 935 (void) nxge_hio_vr_release(nxgep); 936 } 937 #endif 938 939 if (nxgep->nxge_hw_p) { 940 nxge_uninit_common_dev(nxgep); 941 nxgep->nxge_hw_p = NULL; 942 } 943 944 #if defined(sun4v) 945 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 946 (void) hsvc_unregister(&nxgep->niu_hsvc); 947 nxgep->niu_hsvc_available = B_FALSE; 948 } 949 #endif 950 /* 951 * Stop any further interrupts. 952 */ 953 nxge_remove_intrs(nxgep); 954 955 /* remove soft interrups */ 956 nxge_remove_soft_intrs(nxgep); 957 958 /* 959 * Stop the device and free resources. 960 */ 961 if (!isLDOMguest(nxgep)) { 962 nxge_destroy_dev(nxgep); 963 } 964 965 /* 966 * Tear down the ndd parameters setup. 967 */ 968 nxge_destroy_param(nxgep); 969 970 /* 971 * Tear down the kstat setup. 972 */ 973 nxge_destroy_kstats(nxgep); 974 975 /* 976 * Destroy all mutexes. 977 */ 978 nxge_destroy_mutexes(nxgep); 979 980 /* 981 * Remove the list of ndd parameters which 982 * were setup during attach. 983 */ 984 if (nxgep->dip) { 985 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 986 " nxge_unattach: remove all properties")); 987 988 (void) ddi_prop_remove_all(nxgep->dip); 989 } 990 991 #if NXGE_PROPERTY 992 nxge_remove_hard_properties(nxgep); 993 #endif 994 995 /* 996 * Unmap the register setup. 997 */ 998 nxge_unmap_regs(nxgep); 999 1000 nxge_fm_fini(nxgep); 1001 1002 ddi_soft_state_free(nxge_list, nxgep->instance); 1003 1004 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 1005 } 1006 1007 #if defined(sun4v) 1008 int 1009 nxge_hsvc_register( 1010 nxge_t *nxgep) 1011 { 1012 nxge_status_t status; 1013 1014 if (nxgep->niu_type == N2_NIU) { 1015 nxgep->niu_hsvc_available = B_FALSE; 1016 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 1017 if ((status = hsvc_register(&nxgep->niu_hsvc, 1018 &nxgep->niu_min_ver)) != 0) { 1019 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1020 "nxge_attach: %s: cannot negotiate " 1021 "hypervisor services revision %d group: 0x%lx " 1022 "major: 0x%lx minor: 0x%lx errno: %d", 1023 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 1024 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 1025 niu_hsvc.hsvc_minor, status)); 1026 return (DDI_FAILURE); 1027 } 1028 nxgep->niu_hsvc_available = B_TRUE; 1029 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1030 "NIU Hypervisor service enabled")); 1031 } 1032 1033 return (DDI_SUCCESS); 1034 } 1035 #endif 1036 1037 static char n2_siu_name[] = "niu"; 1038 1039 static nxge_status_t 1040 nxge_map_regs(p_nxge_t nxgep) 1041 { 1042 int ddi_status = DDI_SUCCESS; 1043 p_dev_regs_t dev_regs; 1044 char buf[MAXPATHLEN + 1]; 1045 char *devname; 1046 #ifdef NXGE_DEBUG 1047 char *sysname; 1048 #endif 1049 off_t regsize; 1050 nxge_status_t status = NXGE_OK; 1051 #if !defined(_BIG_ENDIAN) 1052 off_t pci_offset; 1053 uint16_t pcie_devctl; 1054 #endif 1055 1056 if (isLDOMguest(nxgep)) { 1057 return (nxge_guest_regs_map(nxgep)); 1058 } 1059 1060 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1061 nxgep->dev_regs = NULL; 1062 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1063 dev_regs->nxge_regh = NULL; 1064 dev_regs->nxge_pciregh = NULL; 1065 dev_regs->nxge_msix_regh = NULL; 1066 dev_regs->nxge_vir_regh = NULL; 1067 dev_regs->nxge_vir2_regh = NULL; 1068 nxgep->niu_type = NIU_TYPE_NONE; 1069 1070 devname = ddi_pathname(nxgep->dip, buf); 1071 ASSERT(strlen(devname) > 0); 1072 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1073 "nxge_map_regs: pathname devname %s", devname)); 1074 1075 if (strstr(devname, n2_siu_name)) { 1076 /* N2/NIU */ 1077 nxgep->niu_type = N2_NIU; 1078 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1079 "nxge_map_regs: N2/NIU devname %s", devname)); 1080 /* get function number */ 1081 nxgep->function_num = 1082 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1083 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1084 "nxge_map_regs: N2/NIU function number %d", 1085 nxgep->function_num)); 1086 } else { 1087 int *prop_val; 1088 uint_t prop_len; 1089 uint8_t func_num; 1090 1091 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1092 0, "reg", 1093 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1094 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1095 "Reg property not found")); 1096 ddi_status = DDI_FAILURE; 1097 goto nxge_map_regs_fail0; 1098 1099 } else { 1100 func_num = (prop_val[0] >> 8) & 0x7; 1101 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1102 "Reg property found: fun # %d", 1103 func_num)); 1104 nxgep->function_num = func_num; 1105 if (isLDOMguest(nxgep)) { 1106 nxgep->function_num /= 2; 1107 return (NXGE_OK); 1108 } 1109 ddi_prop_free(prop_val); 1110 } 1111 } 1112 1113 switch (nxgep->niu_type) { 1114 default: 1115 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1116 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1117 "nxge_map_regs: pci config size 0x%x", regsize)); 1118 1119 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1120 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1121 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1122 if (ddi_status != DDI_SUCCESS) { 1123 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1124 "ddi_map_regs, nxge bus config regs failed")); 1125 goto nxge_map_regs_fail0; 1126 } 1127 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1128 "nxge_map_reg: PCI config addr 0x%0llx " 1129 " handle 0x%0llx", dev_regs->nxge_pciregp, 1130 dev_regs->nxge_pciregh)); 1131 /* 1132 * IMP IMP 1133 * workaround for bit swapping bug in HW 1134 * which ends up in no-snoop = yes 1135 * resulting, in DMA not synched properly 1136 */ 1137 #if !defined(_BIG_ENDIAN) 1138 /* workarounds for x86 systems */ 1139 pci_offset = 0x80 + PCIE_DEVCTL; 1140 pcie_devctl = 0x0; 1141 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1142 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1143 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1144 pcie_devctl); 1145 #endif 1146 1147 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1148 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1149 "nxge_map_regs: pio size 0x%x", regsize)); 1150 /* set up the device mapped register */ 1151 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1152 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1153 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1154 if (ddi_status != DDI_SUCCESS) { 1155 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1156 "ddi_map_regs for Neptune global reg failed")); 1157 goto nxge_map_regs_fail1; 1158 } 1159 1160 /* set up the msi/msi-x mapped register */ 1161 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1162 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1163 "nxge_map_regs: msix size 0x%x", regsize)); 1164 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1165 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1166 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1167 if (ddi_status != DDI_SUCCESS) { 1168 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1169 "ddi_map_regs for msi reg failed")); 1170 goto nxge_map_regs_fail2; 1171 } 1172 1173 /* set up the vio region mapped register */ 1174 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1175 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1176 "nxge_map_regs: vio size 0x%x", regsize)); 1177 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1178 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1179 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1180 1181 if (ddi_status != DDI_SUCCESS) { 1182 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1183 "ddi_map_regs for nxge vio reg failed")); 1184 goto nxge_map_regs_fail3; 1185 } 1186 nxgep->dev_regs = dev_regs; 1187 1188 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1189 NPI_PCI_ADD_HANDLE_SET(nxgep, 1190 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1191 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1192 NPI_MSI_ADD_HANDLE_SET(nxgep, 1193 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1194 1195 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1196 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1197 1198 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1199 NPI_REG_ADD_HANDLE_SET(nxgep, 1200 (npi_reg_ptr_t)dev_regs->nxge_regp); 1201 1202 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1203 NPI_VREG_ADD_HANDLE_SET(nxgep, 1204 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1205 1206 break; 1207 1208 case N2_NIU: 1209 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1210 /* 1211 * Set up the device mapped register (FWARC 2006/556) 1212 * (changed back to 1: reg starts at 1!) 1213 */ 1214 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1215 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1216 "nxge_map_regs: dev size 0x%x", regsize)); 1217 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1218 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1219 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1220 1221 if (ddi_status != DDI_SUCCESS) { 1222 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1223 "ddi_map_regs for N2/NIU, global reg failed ")); 1224 goto nxge_map_regs_fail1; 1225 } 1226 1227 /* set up the first vio region mapped register */ 1228 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1229 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1230 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1231 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1232 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1233 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1234 1235 if (ddi_status != DDI_SUCCESS) { 1236 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1237 "ddi_map_regs for nxge vio reg failed")); 1238 goto nxge_map_regs_fail2; 1239 } 1240 /* set up the second vio region mapped register */ 1241 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1242 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1243 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1244 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1245 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1246 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1247 1248 if (ddi_status != DDI_SUCCESS) { 1249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1250 "ddi_map_regs for nxge vio2 reg failed")); 1251 goto nxge_map_regs_fail3; 1252 } 1253 nxgep->dev_regs = dev_regs; 1254 1255 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1256 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1257 1258 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1259 NPI_REG_ADD_HANDLE_SET(nxgep, 1260 (npi_reg_ptr_t)dev_regs->nxge_regp); 1261 1262 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1263 NPI_VREG_ADD_HANDLE_SET(nxgep, 1264 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1265 1266 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1267 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1268 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1269 1270 break; 1271 } 1272 1273 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1274 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1275 1276 goto nxge_map_regs_exit; 1277 nxge_map_regs_fail3: 1278 if (dev_regs->nxge_msix_regh) { 1279 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1280 } 1281 if (dev_regs->nxge_vir_regh) { 1282 ddi_regs_map_free(&dev_regs->nxge_regh); 1283 } 1284 nxge_map_regs_fail2: 1285 if (dev_regs->nxge_regh) { 1286 ddi_regs_map_free(&dev_regs->nxge_regh); 1287 } 1288 nxge_map_regs_fail1: 1289 if (dev_regs->nxge_pciregh) { 1290 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1291 } 1292 nxge_map_regs_fail0: 1293 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1294 kmem_free(dev_regs, sizeof (dev_regs_t)); 1295 1296 nxge_map_regs_exit: 1297 if (ddi_status != DDI_SUCCESS) 1298 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1299 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1300 return (status); 1301 } 1302 1303 static void 1304 nxge_unmap_regs(p_nxge_t nxgep) 1305 { 1306 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1307 1308 if (isLDOMguest(nxgep)) { 1309 nxge_guest_regs_map_free(nxgep); 1310 return; 1311 } 1312 1313 if (nxgep->dev_regs) { 1314 if (nxgep->dev_regs->nxge_pciregh) { 1315 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1316 "==> nxge_unmap_regs: bus")); 1317 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1318 nxgep->dev_regs->nxge_pciregh = NULL; 1319 } 1320 if (nxgep->dev_regs->nxge_regh) { 1321 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1322 "==> nxge_unmap_regs: device registers")); 1323 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1324 nxgep->dev_regs->nxge_regh = NULL; 1325 } 1326 if (nxgep->dev_regs->nxge_msix_regh) { 1327 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1328 "==> nxge_unmap_regs: device interrupts")); 1329 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1330 nxgep->dev_regs->nxge_msix_regh = NULL; 1331 } 1332 if (nxgep->dev_regs->nxge_vir_regh) { 1333 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1334 "==> nxge_unmap_regs: vio region")); 1335 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1336 nxgep->dev_regs->nxge_vir_regh = NULL; 1337 } 1338 if (nxgep->dev_regs->nxge_vir2_regh) { 1339 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1340 "==> nxge_unmap_regs: vio2 region")); 1341 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1342 nxgep->dev_regs->nxge_vir2_regh = NULL; 1343 } 1344 1345 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1346 nxgep->dev_regs = NULL; 1347 } 1348 1349 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1350 } 1351 1352 static nxge_status_t 1353 nxge_setup_mutexes(p_nxge_t nxgep) 1354 { 1355 int ddi_status = DDI_SUCCESS; 1356 nxge_status_t status = NXGE_OK; 1357 nxge_classify_t *classify_ptr; 1358 int partition; 1359 1360 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1361 1362 /* 1363 * Get the interrupt cookie so the mutexes can be 1364 * Initialized. 1365 */ 1366 if (isLDOMguest(nxgep)) { 1367 nxgep->interrupt_cookie = 0; 1368 } else { 1369 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1370 &nxgep->interrupt_cookie); 1371 1372 if (ddi_status != DDI_SUCCESS) { 1373 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1374 "<== nxge_setup_mutexes: failed 0x%x", 1375 ddi_status)); 1376 goto nxge_setup_mutexes_exit; 1377 } 1378 } 1379 1380 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1381 MUTEX_INIT(&nxgep->poll_lock, NULL, 1382 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1383 1384 /* 1385 * Initialize mutexes for this device. 1386 */ 1387 MUTEX_INIT(nxgep->genlock, NULL, 1388 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1389 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1390 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1391 MUTEX_INIT(&nxgep->mif_lock, NULL, 1392 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1393 MUTEX_INIT(&nxgep->group_lock, NULL, 1394 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1395 RW_INIT(&nxgep->filter_lock, NULL, 1396 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1397 1398 classify_ptr = &nxgep->classifier; 1399 /* 1400 * FFLP Mutexes are never used in interrupt context 1401 * as fflp operation can take very long time to 1402 * complete and hence not suitable to invoke from interrupt 1403 * handlers. 1404 */ 1405 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1406 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1407 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1408 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1409 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1410 for (partition = 0; partition < MAX_PARTITION; partition++) { 1411 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1412 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1413 } 1414 } 1415 1416 nxge_setup_mutexes_exit: 1417 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1418 "<== nxge_setup_mutexes status = %x", status)); 1419 1420 if (ddi_status != DDI_SUCCESS) 1421 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1422 1423 return (status); 1424 } 1425 1426 static void 1427 nxge_destroy_mutexes(p_nxge_t nxgep) 1428 { 1429 int partition; 1430 nxge_classify_t *classify_ptr; 1431 1432 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1433 RW_DESTROY(&nxgep->filter_lock); 1434 MUTEX_DESTROY(&nxgep->group_lock); 1435 MUTEX_DESTROY(&nxgep->mif_lock); 1436 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1437 MUTEX_DESTROY(nxgep->genlock); 1438 1439 classify_ptr = &nxgep->classifier; 1440 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1441 1442 /* Destroy all polling resources. */ 1443 MUTEX_DESTROY(&nxgep->poll_lock); 1444 cv_destroy(&nxgep->poll_cv); 1445 1446 /* free data structures, based on HW type */ 1447 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1448 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1449 for (partition = 0; partition < MAX_PARTITION; partition++) { 1450 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1451 } 1452 } 1453 1454 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1455 } 1456 1457 nxge_status_t 1458 nxge_init(p_nxge_t nxgep) 1459 { 1460 nxge_status_t status = NXGE_OK; 1461 1462 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1463 1464 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1465 return (status); 1466 } 1467 1468 /* 1469 * Allocate system memory for the receive/transmit buffer blocks 1470 * and receive/transmit descriptor rings. 1471 */ 1472 status = nxge_alloc_mem_pool(nxgep); 1473 if (status != NXGE_OK) { 1474 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1475 goto nxge_init_fail1; 1476 } 1477 1478 if (!isLDOMguest(nxgep)) { 1479 /* 1480 * Initialize and enable the TXC registers. 1481 * (Globally enable the Tx controller, 1482 * enable the port, configure the dma channel bitmap, 1483 * configure the max burst size). 1484 */ 1485 status = nxge_txc_init(nxgep); 1486 if (status != NXGE_OK) { 1487 NXGE_ERROR_MSG((nxgep, 1488 NXGE_ERR_CTL, "init txc failed\n")); 1489 goto nxge_init_fail2; 1490 } 1491 } 1492 1493 /* 1494 * Initialize and enable TXDMA channels. 1495 */ 1496 status = nxge_init_txdma_channels(nxgep); 1497 if (status != NXGE_OK) { 1498 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1499 goto nxge_init_fail3; 1500 } 1501 1502 /* 1503 * Initialize and enable RXDMA channels. 1504 */ 1505 status = nxge_init_rxdma_channels(nxgep); 1506 if (status != NXGE_OK) { 1507 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1508 goto nxge_init_fail4; 1509 } 1510 1511 /* 1512 * The guest domain is now done. 1513 */ 1514 if (isLDOMguest(nxgep)) { 1515 nxgep->drv_state |= STATE_HW_INITIALIZED; 1516 goto nxge_init_exit; 1517 } 1518 1519 /* 1520 * Initialize TCAM and FCRAM (Neptune). 1521 */ 1522 status = nxge_classify_init(nxgep); 1523 if (status != NXGE_OK) { 1524 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1525 goto nxge_init_fail5; 1526 } 1527 1528 /* 1529 * Initialize ZCP 1530 */ 1531 status = nxge_zcp_init(nxgep); 1532 if (status != NXGE_OK) { 1533 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1534 goto nxge_init_fail5; 1535 } 1536 1537 /* 1538 * Initialize IPP. 1539 */ 1540 status = nxge_ipp_init(nxgep); 1541 if (status != NXGE_OK) { 1542 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1543 goto nxge_init_fail5; 1544 } 1545 1546 /* 1547 * Initialize the MAC block. 1548 */ 1549 status = nxge_mac_init(nxgep); 1550 if (status != NXGE_OK) { 1551 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1552 goto nxge_init_fail5; 1553 } 1554 1555 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1556 1557 /* 1558 * Enable hardware interrupts. 1559 */ 1560 nxge_intr_hw_enable(nxgep); 1561 nxgep->drv_state |= STATE_HW_INITIALIZED; 1562 1563 goto nxge_init_exit; 1564 1565 nxge_init_fail5: 1566 nxge_uninit_rxdma_channels(nxgep); 1567 nxge_init_fail4: 1568 nxge_uninit_txdma_channels(nxgep); 1569 nxge_init_fail3: 1570 if (!isLDOMguest(nxgep)) { 1571 (void) nxge_txc_uninit(nxgep); 1572 } 1573 nxge_init_fail2: 1574 nxge_free_mem_pool(nxgep); 1575 nxge_init_fail1: 1576 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1577 "<== nxge_init status (failed) = 0x%08x", status)); 1578 return (status); 1579 1580 nxge_init_exit: 1581 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1582 status)); 1583 return (status); 1584 } 1585 1586 1587 timeout_id_t 1588 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1589 { 1590 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1591 return (timeout(func, (caddr_t)nxgep, 1592 drv_usectohz(1000 * msec))); 1593 } 1594 return (NULL); 1595 } 1596 1597 /*ARGSUSED*/ 1598 void 1599 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1600 { 1601 if (timerid) { 1602 (void) untimeout(timerid); 1603 } 1604 } 1605 1606 void 1607 nxge_uninit(p_nxge_t nxgep) 1608 { 1609 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1610 1611 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1612 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1613 "==> nxge_uninit: not initialized")); 1614 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1615 "<== nxge_uninit")); 1616 return; 1617 } 1618 1619 /* stop timer */ 1620 if (nxgep->nxge_timerid) { 1621 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1622 nxgep->nxge_timerid = 0; 1623 } 1624 1625 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1626 (void) nxge_intr_hw_disable(nxgep); 1627 1628 /* 1629 * Reset the receive MAC side. 1630 */ 1631 (void) nxge_rx_mac_disable(nxgep); 1632 1633 /* Disable and soft reset the IPP */ 1634 if (!isLDOMguest(nxgep)) 1635 (void) nxge_ipp_disable(nxgep); 1636 1637 /* Free classification resources */ 1638 (void) nxge_classify_uninit(nxgep); 1639 1640 /* 1641 * Reset the transmit/receive DMA side. 1642 */ 1643 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1644 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1645 1646 nxge_uninit_txdma_channels(nxgep); 1647 nxge_uninit_rxdma_channels(nxgep); 1648 1649 /* 1650 * Reset the transmit MAC side. 1651 */ 1652 (void) nxge_tx_mac_disable(nxgep); 1653 1654 nxge_free_mem_pool(nxgep); 1655 1656 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1657 1658 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1659 1660 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1661 "nxge_mblks_pending %d", nxge_mblks_pending)); 1662 } 1663 1664 void 1665 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1666 { 1667 #if defined(__i386) 1668 size_t reg; 1669 #else 1670 uint64_t reg; 1671 #endif 1672 uint64_t regdata; 1673 int i, retry; 1674 1675 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1676 regdata = 0; 1677 retry = 1; 1678 1679 for (i = 0; i < retry; i++) { 1680 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1681 } 1682 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1683 } 1684 1685 void 1686 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1687 { 1688 #if defined(__i386) 1689 size_t reg; 1690 #else 1691 uint64_t reg; 1692 #endif 1693 uint64_t buf[2]; 1694 1695 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1696 #if defined(__i386) 1697 reg = (size_t)buf[0]; 1698 #else 1699 reg = buf[0]; 1700 #endif 1701 1702 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1703 } 1704 1705 1706 nxge_os_mutex_t nxgedebuglock; 1707 int nxge_debug_init = 0; 1708 1709 /*ARGSUSED*/ 1710 /*VARARGS*/ 1711 void 1712 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1713 { 1714 char msg_buffer[1048]; 1715 char prefix_buffer[32]; 1716 int instance; 1717 uint64_t debug_level; 1718 int cmn_level = CE_CONT; 1719 va_list ap; 1720 1721 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1722 /* In case a developer has changed nxge_debug_level. */ 1723 if (nxgep->nxge_debug_level != nxge_debug_level) 1724 nxgep->nxge_debug_level = nxge_debug_level; 1725 } 1726 1727 debug_level = (nxgep == NULL) ? nxge_debug_level : 1728 nxgep->nxge_debug_level; 1729 1730 if ((level & debug_level) || 1731 (level == NXGE_NOTE) || 1732 (level == NXGE_ERR_CTL)) { 1733 /* do the msg processing */ 1734 if (nxge_debug_init == 0) { 1735 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1736 nxge_debug_init = 1; 1737 } 1738 1739 MUTEX_ENTER(&nxgedebuglock); 1740 1741 if ((level & NXGE_NOTE)) { 1742 cmn_level = CE_NOTE; 1743 } 1744 1745 if (level & NXGE_ERR_CTL) { 1746 cmn_level = CE_WARN; 1747 } 1748 1749 va_start(ap, fmt); 1750 (void) vsprintf(msg_buffer, fmt, ap); 1751 va_end(ap); 1752 if (nxgep == NULL) { 1753 instance = -1; 1754 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1755 } else { 1756 instance = nxgep->instance; 1757 (void) sprintf(prefix_buffer, 1758 "%s%d :", "nxge", instance); 1759 } 1760 1761 MUTEX_EXIT(&nxgedebuglock); 1762 cmn_err(cmn_level, "!%s %s\n", 1763 prefix_buffer, msg_buffer); 1764 1765 } 1766 } 1767 1768 char * 1769 nxge_dump_packet(char *addr, int size) 1770 { 1771 uchar_t *ap = (uchar_t *)addr; 1772 int i; 1773 static char etherbuf[1024]; 1774 char *cp = etherbuf; 1775 char digits[] = "0123456789abcdef"; 1776 1777 if (!size) 1778 size = 60; 1779 1780 if (size > MAX_DUMP_SZ) { 1781 /* Dump the leading bytes */ 1782 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1783 if (*ap > 0x0f) 1784 *cp++ = digits[*ap >> 4]; 1785 *cp++ = digits[*ap++ & 0xf]; 1786 *cp++ = ':'; 1787 } 1788 for (i = 0; i < 20; i++) 1789 *cp++ = '.'; 1790 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1791 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1792 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1793 if (*ap > 0x0f) 1794 *cp++ = digits[*ap >> 4]; 1795 *cp++ = digits[*ap++ & 0xf]; 1796 *cp++ = ':'; 1797 } 1798 } else { 1799 for (i = 0; i < size; i++) { 1800 if (*ap > 0x0f) 1801 *cp++ = digits[*ap >> 4]; 1802 *cp++ = digits[*ap++ & 0xf]; 1803 *cp++ = ':'; 1804 } 1805 } 1806 *--cp = 0; 1807 return (etherbuf); 1808 } 1809 1810 #ifdef NXGE_DEBUG 1811 static void 1812 nxge_test_map_regs(p_nxge_t nxgep) 1813 { 1814 ddi_acc_handle_t cfg_handle; 1815 p_pci_cfg_t cfg_ptr; 1816 ddi_acc_handle_t dev_handle; 1817 char *dev_ptr; 1818 ddi_acc_handle_t pci_config_handle; 1819 uint32_t regval; 1820 int i; 1821 1822 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1823 1824 dev_handle = nxgep->dev_regs->nxge_regh; 1825 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1826 1827 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1828 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1829 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1830 1831 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1832 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1833 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1834 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1835 &cfg_ptr->vendorid)); 1836 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1837 "\tvendorid 0x%x devid 0x%x", 1838 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1839 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1840 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1841 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1842 "bar1c 0x%x", 1843 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1844 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1845 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1846 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1847 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1848 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1849 "base 28 0x%x bar2c 0x%x\n", 1850 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1851 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1852 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1853 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1854 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1855 "\nNeptune PCI BAR: base30 0x%x\n", 1856 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1857 1858 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1859 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1860 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1861 "first 0x%llx second 0x%llx third 0x%llx " 1862 "last 0x%llx ", 1863 NXGE_PIO_READ64(dev_handle, 1864 (uint64_t *)(dev_ptr + 0), 0), 1865 NXGE_PIO_READ64(dev_handle, 1866 (uint64_t *)(dev_ptr + 8), 0), 1867 NXGE_PIO_READ64(dev_handle, 1868 (uint64_t *)(dev_ptr + 16), 0), 1869 NXGE_PIO_READ64(cfg_handle, 1870 (uint64_t *)(dev_ptr + 24), 0))); 1871 } 1872 } 1873 1874 #endif 1875 1876 static void 1877 nxge_suspend(p_nxge_t nxgep) 1878 { 1879 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1880 1881 nxge_intrs_disable(nxgep); 1882 nxge_destroy_dev(nxgep); 1883 1884 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1885 } 1886 1887 static nxge_status_t 1888 nxge_resume(p_nxge_t nxgep) 1889 { 1890 nxge_status_t status = NXGE_OK; 1891 1892 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1893 1894 nxgep->suspended = DDI_RESUME; 1895 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1896 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1897 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1898 (void) nxge_rx_mac_enable(nxgep); 1899 (void) nxge_tx_mac_enable(nxgep); 1900 nxge_intrs_enable(nxgep); 1901 nxgep->suspended = 0; 1902 1903 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1904 "<== nxge_resume status = 0x%x", status)); 1905 return (status); 1906 } 1907 1908 static nxge_status_t 1909 nxge_setup_dev(p_nxge_t nxgep) 1910 { 1911 nxge_status_t status = NXGE_OK; 1912 1913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1914 nxgep->mac.portnum)); 1915 1916 status = nxge_link_init(nxgep); 1917 1918 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1919 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1920 "port%d Bad register acc handle", nxgep->mac.portnum)); 1921 status = NXGE_ERROR; 1922 } 1923 1924 if (status != NXGE_OK) { 1925 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1926 " nxge_setup_dev status " 1927 "(xcvr init 0x%08x)", status)); 1928 goto nxge_setup_dev_exit; 1929 } 1930 1931 nxge_setup_dev_exit: 1932 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1933 "<== nxge_setup_dev port %d status = 0x%08x", 1934 nxgep->mac.portnum, status)); 1935 1936 return (status); 1937 } 1938 1939 static void 1940 nxge_destroy_dev(p_nxge_t nxgep) 1941 { 1942 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1943 1944 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1945 1946 (void) nxge_hw_stop(nxgep); 1947 1948 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1949 } 1950 1951 static nxge_status_t 1952 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1953 { 1954 int ddi_status = DDI_SUCCESS; 1955 uint_t count; 1956 ddi_dma_cookie_t cookie; 1957 uint_t iommu_pagesize; 1958 nxge_status_t status = NXGE_OK; 1959 1960 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1961 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1962 if (nxgep->niu_type != N2_NIU) { 1963 iommu_pagesize = dvma_pagesize(nxgep->dip); 1964 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1965 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1966 " default_block_size %d iommu_pagesize %d", 1967 nxgep->sys_page_sz, 1968 ddi_ptob(nxgep->dip, (ulong_t)1), 1969 nxgep->rx_default_block_size, 1970 iommu_pagesize)); 1971 1972 if (iommu_pagesize != 0) { 1973 if (nxgep->sys_page_sz == iommu_pagesize) { 1974 if (iommu_pagesize > 0x4000) 1975 nxgep->sys_page_sz = 0x4000; 1976 } else { 1977 if (nxgep->sys_page_sz > iommu_pagesize) 1978 nxgep->sys_page_sz = iommu_pagesize; 1979 } 1980 } 1981 } 1982 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1983 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1984 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1985 "default_block_size %d page mask %d", 1986 nxgep->sys_page_sz, 1987 ddi_ptob(nxgep->dip, (ulong_t)1), 1988 nxgep->rx_default_block_size, 1989 nxgep->sys_page_mask)); 1990 1991 1992 switch (nxgep->sys_page_sz) { 1993 default: 1994 nxgep->sys_page_sz = 0x1000; 1995 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1996 nxgep->rx_default_block_size = 0x1000; 1997 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1998 break; 1999 case 0x1000: 2000 nxgep->rx_default_block_size = 0x1000; 2001 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 2002 break; 2003 case 0x2000: 2004 nxgep->rx_default_block_size = 0x2000; 2005 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2006 break; 2007 case 0x4000: 2008 nxgep->rx_default_block_size = 0x4000; 2009 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 2010 break; 2011 case 0x8000: 2012 nxgep->rx_default_block_size = 0x8000; 2013 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 2014 break; 2015 } 2016 2017 #ifndef USE_RX_BIG_BUF 2018 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 2019 #else 2020 nxgep->rx_default_block_size = 0x2000; 2021 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 2022 #endif 2023 /* 2024 * Get the system DMA burst size. 2025 */ 2026 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2027 DDI_DMA_DONTWAIT, 0, 2028 &nxgep->dmasparehandle); 2029 if (ddi_status != DDI_SUCCESS) { 2030 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2031 "ddi_dma_alloc_handle: failed " 2032 " status 0x%x", ddi_status)); 2033 goto nxge_get_soft_properties_exit; 2034 } 2035 2036 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2037 (caddr_t)nxgep->dmasparehandle, 2038 sizeof (nxgep->dmasparehandle), 2039 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2040 DDI_DMA_DONTWAIT, 0, 2041 &cookie, &count); 2042 if (ddi_status != DDI_DMA_MAPPED) { 2043 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2044 "Binding spare handle to find system" 2045 " burstsize failed.")); 2046 ddi_status = DDI_FAILURE; 2047 goto nxge_get_soft_properties_fail1; 2048 } 2049 2050 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2051 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2052 2053 nxge_get_soft_properties_fail1: 2054 ddi_dma_free_handle(&nxgep->dmasparehandle); 2055 2056 nxge_get_soft_properties_exit: 2057 2058 if (ddi_status != DDI_SUCCESS) 2059 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2060 2061 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2062 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2063 return (status); 2064 } 2065 2066 static nxge_status_t 2067 nxge_alloc_mem_pool(p_nxge_t nxgep) 2068 { 2069 nxge_status_t status = NXGE_OK; 2070 2071 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2072 2073 status = nxge_alloc_rx_mem_pool(nxgep); 2074 if (status != NXGE_OK) { 2075 return (NXGE_ERROR); 2076 } 2077 2078 status = nxge_alloc_tx_mem_pool(nxgep); 2079 if (status != NXGE_OK) { 2080 nxge_free_rx_mem_pool(nxgep); 2081 return (NXGE_ERROR); 2082 } 2083 2084 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2085 return (NXGE_OK); 2086 } 2087 2088 static void 2089 nxge_free_mem_pool(p_nxge_t nxgep) 2090 { 2091 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2092 2093 nxge_free_rx_mem_pool(nxgep); 2094 nxge_free_tx_mem_pool(nxgep); 2095 2096 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2097 } 2098 2099 nxge_status_t 2100 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2101 { 2102 uint32_t rdc_max; 2103 p_nxge_dma_pt_cfg_t p_all_cfgp; 2104 p_nxge_hw_pt_cfg_t p_cfgp; 2105 p_nxge_dma_pool_t dma_poolp; 2106 p_nxge_dma_common_t *dma_buf_p; 2107 p_nxge_dma_pool_t dma_cntl_poolp; 2108 p_nxge_dma_common_t *dma_cntl_p; 2109 uint32_t *num_chunks; /* per dma */ 2110 nxge_status_t status = NXGE_OK; 2111 2112 uint32_t nxge_port_rbr_size; 2113 uint32_t nxge_port_rbr_spare_size; 2114 uint32_t nxge_port_rcr_size; 2115 uint32_t rx_cntl_alloc_size; 2116 2117 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2118 2119 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2120 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2121 rdc_max = NXGE_MAX_RDCS; 2122 2123 /* 2124 * Allocate memory for the common DMA data structures. 2125 */ 2126 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2127 KM_SLEEP); 2128 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2129 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2130 2131 dma_cntl_poolp = (p_nxge_dma_pool_t) 2132 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2133 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2134 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2135 2136 num_chunks = (uint32_t *)KMEM_ZALLOC( 2137 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2138 2139 /* 2140 * Assume that each DMA channel will be configured with 2141 * the default block size. 2142 * rbr block counts are modulo the batch count (16). 2143 */ 2144 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2145 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2146 2147 if (!nxge_port_rbr_size) { 2148 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2149 } 2150 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2151 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2152 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2153 } 2154 2155 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2156 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2157 2158 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2159 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2160 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2161 } 2162 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2163 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2164 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2165 "set to default %d", 2166 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2167 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2168 } 2169 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2170 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2171 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2172 "set to default %d", 2173 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2174 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2175 } 2176 2177 /* 2178 * N2/NIU has limitation on the descriptor sizes (contiguous 2179 * memory allocation on data buffers to 4M (contig_mem_alloc) 2180 * and little endian for control buffers (must use the ddi/dki mem alloc 2181 * function). 2182 */ 2183 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2184 if (nxgep->niu_type == N2_NIU) { 2185 nxge_port_rbr_spare_size = 0; 2186 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2187 (!ISP2(nxge_port_rbr_size))) { 2188 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2189 } 2190 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2191 (!ISP2(nxge_port_rcr_size))) { 2192 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2193 } 2194 } 2195 #endif 2196 2197 /* 2198 * Addresses of receive block ring, receive completion ring and the 2199 * mailbox must be all cache-aligned (64 bytes). 2200 */ 2201 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2202 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2203 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2204 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2205 2206 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2207 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2208 "nxge_port_rcr_size = %d " 2209 "rx_cntl_alloc_size = %d", 2210 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2211 nxge_port_rcr_size, 2212 rx_cntl_alloc_size)); 2213 2214 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2215 if (nxgep->niu_type == N2_NIU) { 2216 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2217 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2218 2219 if (!ISP2(rx_buf_alloc_size)) { 2220 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2221 "==> nxge_alloc_rx_mem_pool: " 2222 " must be power of 2")); 2223 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2224 goto nxge_alloc_rx_mem_pool_exit; 2225 } 2226 2227 if (rx_buf_alloc_size > (1 << 22)) { 2228 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2229 "==> nxge_alloc_rx_mem_pool: " 2230 " limit size to 4M")); 2231 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2232 goto nxge_alloc_rx_mem_pool_exit; 2233 } 2234 2235 if (rx_cntl_alloc_size < 0x2000) { 2236 rx_cntl_alloc_size = 0x2000; 2237 } 2238 } 2239 #endif 2240 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2241 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2242 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2243 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2244 2245 dma_poolp->ndmas = p_cfgp->max_rdcs; 2246 dma_poolp->num_chunks = num_chunks; 2247 dma_poolp->buf_allocated = B_TRUE; 2248 nxgep->rx_buf_pool_p = dma_poolp; 2249 dma_poolp->dma_buf_pool_p = dma_buf_p; 2250 2251 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2252 dma_cntl_poolp->buf_allocated = B_TRUE; 2253 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2254 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2255 2256 /* Allocate the receive rings, too. */ 2257 nxgep->rx_rbr_rings = 2258 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2259 nxgep->rx_rbr_rings->rbr_rings = 2260 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2261 nxgep->rx_rcr_rings = 2262 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2263 nxgep->rx_rcr_rings->rcr_rings = 2264 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2265 nxgep->rx_mbox_areas_p = 2266 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2267 nxgep->rx_mbox_areas_p->rxmbox_areas = 2268 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2269 2270 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2271 p_cfgp->max_rdcs; 2272 2273 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2274 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2275 2276 nxge_alloc_rx_mem_pool_exit: 2277 return (status); 2278 } 2279 2280 /* 2281 * nxge_alloc_rxb 2282 * 2283 * Allocate buffers for an RDC. 2284 * 2285 * Arguments: 2286 * nxgep 2287 * channel The channel to map into our kernel space. 2288 * 2289 * Notes: 2290 * 2291 * NPI function calls: 2292 * 2293 * NXGE function calls: 2294 * 2295 * Registers accessed: 2296 * 2297 * Context: 2298 * 2299 * Taking apart: 2300 * 2301 * Open questions: 2302 * 2303 */ 2304 nxge_status_t 2305 nxge_alloc_rxb( 2306 p_nxge_t nxgep, 2307 int channel) 2308 { 2309 size_t rx_buf_alloc_size; 2310 nxge_status_t status = NXGE_OK; 2311 2312 nxge_dma_common_t **data; 2313 nxge_dma_common_t **control; 2314 uint32_t *num_chunks; 2315 2316 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2317 2318 /* 2319 * Allocate memory for the receive buffers and descriptor rings. 2320 * Replace these allocation functions with the interface functions 2321 * provided by the partition manager if/when they are available. 2322 */ 2323 2324 /* 2325 * Allocate memory for the receive buffer blocks. 2326 */ 2327 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2328 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2329 2330 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2331 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2332 2333 if ((status = nxge_alloc_rx_buf_dma( 2334 nxgep, channel, data, rx_buf_alloc_size, 2335 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2336 return (status); 2337 } 2338 2339 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2340 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2341 2342 /* 2343 * Allocate memory for descriptor rings and mailbox. 2344 */ 2345 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2346 2347 if ((status = nxge_alloc_rx_cntl_dma( 2348 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2349 != NXGE_OK) { 2350 nxge_free_rx_cntl_dma(nxgep, *control); 2351 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2352 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2353 return (status); 2354 } 2355 2356 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2357 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2358 2359 return (status); 2360 } 2361 2362 void 2363 nxge_free_rxb( 2364 p_nxge_t nxgep, 2365 int channel) 2366 { 2367 nxge_dma_common_t *data; 2368 nxge_dma_common_t *control; 2369 uint32_t num_chunks; 2370 2371 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2372 2373 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2374 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2375 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2376 2377 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2378 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2379 2380 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2381 nxge_free_rx_cntl_dma(nxgep, control); 2382 2383 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2384 2385 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2386 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2387 2388 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2389 } 2390 2391 static void 2392 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2393 { 2394 int rdc_max = NXGE_MAX_RDCS; 2395 2396 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2397 2398 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2399 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2400 "<== nxge_free_rx_mem_pool " 2401 "(null rx buf pool or buf not allocated")); 2402 return; 2403 } 2404 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2405 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2406 "<== nxge_free_rx_mem_pool " 2407 "(null rx cntl buf pool or cntl buf not allocated")); 2408 return; 2409 } 2410 2411 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2412 sizeof (p_nxge_dma_common_t) * rdc_max); 2413 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2414 2415 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2416 sizeof (uint32_t) * rdc_max); 2417 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2418 sizeof (p_nxge_dma_common_t) * rdc_max); 2419 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2420 2421 nxgep->rx_buf_pool_p = 0; 2422 nxgep->rx_cntl_pool_p = 0; 2423 2424 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2425 sizeof (p_rx_rbr_ring_t) * rdc_max); 2426 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2427 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2428 sizeof (p_rx_rcr_ring_t) * rdc_max); 2429 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2430 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2431 sizeof (p_rx_mbox_t) * rdc_max); 2432 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2433 2434 nxgep->rx_rbr_rings = 0; 2435 nxgep->rx_rcr_rings = 0; 2436 nxgep->rx_mbox_areas_p = 0; 2437 2438 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2439 } 2440 2441 2442 static nxge_status_t 2443 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2444 p_nxge_dma_common_t *dmap, 2445 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2446 { 2447 p_nxge_dma_common_t rx_dmap; 2448 nxge_status_t status = NXGE_OK; 2449 size_t total_alloc_size; 2450 size_t allocated = 0; 2451 int i, size_index, array_size; 2452 boolean_t use_kmem_alloc = B_FALSE; 2453 2454 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2455 2456 rx_dmap = (p_nxge_dma_common_t) 2457 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2458 KM_SLEEP); 2459 2460 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2461 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2462 dma_channel, alloc_size, block_size, dmap)); 2463 2464 total_alloc_size = alloc_size; 2465 2466 #if defined(RX_USE_RECLAIM_POST) 2467 total_alloc_size = alloc_size + alloc_size/4; 2468 #endif 2469 2470 i = 0; 2471 size_index = 0; 2472 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2473 while ((alloc_sizes[size_index] < alloc_size) && 2474 (size_index < array_size)) 2475 size_index++; 2476 if (size_index >= array_size) { 2477 size_index = array_size - 1; 2478 } 2479 2480 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2481 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2482 use_kmem_alloc = B_TRUE; 2483 #if defined(__i386) || defined(__amd64) 2484 size_index = 0; 2485 #endif 2486 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2487 "==> nxge_alloc_rx_buf_dma: " 2488 "Neptune use kmem_alloc() - size_index %d", 2489 size_index)); 2490 } 2491 2492 while ((allocated < total_alloc_size) && 2493 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2494 rx_dmap[i].dma_chunk_index = i; 2495 rx_dmap[i].block_size = block_size; 2496 rx_dmap[i].alength = alloc_sizes[size_index]; 2497 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2498 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2499 rx_dmap[i].dma_channel = dma_channel; 2500 rx_dmap[i].contig_alloc_type = B_FALSE; 2501 rx_dmap[i].kmem_alloc_type = B_FALSE; 2502 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2503 2504 /* 2505 * N2/NIU: data buffers must be contiguous as the driver 2506 * needs to call Hypervisor api to set up 2507 * logical pages. 2508 */ 2509 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2510 rx_dmap[i].contig_alloc_type = B_TRUE; 2511 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2512 } else if (use_kmem_alloc) { 2513 /* For Neptune, use kmem_alloc */ 2514 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2515 "==> nxge_alloc_rx_buf_dma: " 2516 "Neptune use kmem_alloc()")); 2517 rx_dmap[i].kmem_alloc_type = B_TRUE; 2518 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2519 } 2520 2521 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2522 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2523 "i %d nblocks %d alength %d", 2524 dma_channel, i, &rx_dmap[i], block_size, 2525 i, rx_dmap[i].nblocks, 2526 rx_dmap[i].alength)); 2527 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2528 &nxge_rx_dma_attr, 2529 rx_dmap[i].alength, 2530 &nxge_dev_buf_dma_acc_attr, 2531 DDI_DMA_READ | DDI_DMA_STREAMING, 2532 (p_nxge_dma_common_t)(&rx_dmap[i])); 2533 if (status != NXGE_OK) { 2534 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2535 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2536 "dma %d size_index %d size requested %d", 2537 dma_channel, 2538 size_index, 2539 rx_dmap[i].alength)); 2540 size_index--; 2541 } else { 2542 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2543 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2544 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2545 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2546 "buf_alloc_state %d alloc_type %d", 2547 dma_channel, 2548 &rx_dmap[i], 2549 rx_dmap[i].kaddrp, 2550 rx_dmap[i].alength, 2551 rx_dmap[i].buf_alloc_state, 2552 rx_dmap[i].buf_alloc_type)); 2553 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2554 " alloc_rx_buf_dma allocated rdc %d " 2555 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2556 dma_channel, i, rx_dmap[i].alength, 2557 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2558 rx_dmap[i].kaddrp)); 2559 i++; 2560 allocated += alloc_sizes[size_index]; 2561 } 2562 } 2563 2564 if (allocated < total_alloc_size) { 2565 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2566 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2567 "allocated 0x%x requested 0x%x", 2568 dma_channel, 2569 allocated, total_alloc_size)); 2570 status = NXGE_ERROR; 2571 goto nxge_alloc_rx_mem_fail1; 2572 } 2573 2574 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2575 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2576 "allocated 0x%x requested 0x%x", 2577 dma_channel, 2578 allocated, total_alloc_size)); 2579 2580 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2581 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2582 dma_channel, i)); 2583 *num_chunks = i; 2584 *dmap = rx_dmap; 2585 2586 goto nxge_alloc_rx_mem_exit; 2587 2588 nxge_alloc_rx_mem_fail1: 2589 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2590 2591 nxge_alloc_rx_mem_exit: 2592 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2593 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2594 2595 return (status); 2596 } 2597 2598 /*ARGSUSED*/ 2599 static void 2600 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2601 uint32_t num_chunks) 2602 { 2603 int i; 2604 2605 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2606 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2607 2608 if (dmap == 0) 2609 return; 2610 2611 for (i = 0; i < num_chunks; i++) { 2612 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2613 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2614 i, dmap)); 2615 nxge_dma_free_rx_data_buf(dmap++); 2616 } 2617 2618 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2619 } 2620 2621 /*ARGSUSED*/ 2622 static nxge_status_t 2623 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2624 p_nxge_dma_common_t *dmap, size_t size) 2625 { 2626 p_nxge_dma_common_t rx_dmap; 2627 nxge_status_t status = NXGE_OK; 2628 2629 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2630 2631 rx_dmap = (p_nxge_dma_common_t) 2632 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2633 2634 rx_dmap->contig_alloc_type = B_FALSE; 2635 rx_dmap->kmem_alloc_type = B_FALSE; 2636 2637 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2638 &nxge_desc_dma_attr, 2639 size, 2640 &nxge_dev_desc_dma_acc_attr, 2641 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2642 rx_dmap); 2643 if (status != NXGE_OK) { 2644 goto nxge_alloc_rx_cntl_dma_fail1; 2645 } 2646 2647 *dmap = rx_dmap; 2648 goto nxge_alloc_rx_cntl_dma_exit; 2649 2650 nxge_alloc_rx_cntl_dma_fail1: 2651 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2652 2653 nxge_alloc_rx_cntl_dma_exit: 2654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2655 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2656 2657 return (status); 2658 } 2659 2660 /*ARGSUSED*/ 2661 static void 2662 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2663 { 2664 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2665 2666 if (dmap == 0) 2667 return; 2668 2669 nxge_dma_mem_free(dmap); 2670 2671 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2672 } 2673 2674 typedef struct { 2675 size_t tx_size; 2676 size_t cr_size; 2677 size_t threshhold; 2678 } nxge_tdc_sizes_t; 2679 2680 static 2681 nxge_status_t 2682 nxge_tdc_sizes( 2683 nxge_t *nxgep, 2684 nxge_tdc_sizes_t *sizes) 2685 { 2686 uint32_t threshhold; /* The bcopy() threshhold */ 2687 size_t tx_size; /* Transmit buffer size */ 2688 size_t cr_size; /* Completion ring size */ 2689 2690 /* 2691 * Assume that each DMA channel will be configured with the 2692 * default transmit buffer size for copying transmit data. 2693 * (If a packet is bigger than this, it will not be copied.) 2694 */ 2695 if (nxgep->niu_type == N2_NIU) { 2696 threshhold = TX_BCOPY_SIZE; 2697 } else { 2698 threshhold = nxge_bcopy_thresh; 2699 } 2700 tx_size = nxge_tx_ring_size * threshhold; 2701 2702 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2703 cr_size += sizeof (txdma_mailbox_t); 2704 2705 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2706 if (nxgep->niu_type == N2_NIU) { 2707 if (!ISP2(tx_size)) { 2708 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2709 "==> nxge_tdc_sizes: Tx size" 2710 " must be power of 2")); 2711 return (NXGE_ERROR); 2712 } 2713 2714 if (tx_size > (1 << 22)) { 2715 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2716 "==> nxge_tdc_sizes: Tx size" 2717 " limited to 4M")); 2718 return (NXGE_ERROR); 2719 } 2720 2721 if (cr_size < 0x2000) 2722 cr_size = 0x2000; 2723 } 2724 #endif 2725 2726 sizes->threshhold = threshhold; 2727 sizes->tx_size = tx_size; 2728 sizes->cr_size = cr_size; 2729 2730 return (NXGE_OK); 2731 } 2732 /* 2733 * nxge_alloc_txb 2734 * 2735 * Allocate buffers for an TDC. 2736 * 2737 * Arguments: 2738 * nxgep 2739 * channel The channel to map into our kernel space. 2740 * 2741 * Notes: 2742 * 2743 * NPI function calls: 2744 * 2745 * NXGE function calls: 2746 * 2747 * Registers accessed: 2748 * 2749 * Context: 2750 * 2751 * Taking apart: 2752 * 2753 * Open questions: 2754 * 2755 */ 2756 nxge_status_t 2757 nxge_alloc_txb( 2758 p_nxge_t nxgep, 2759 int channel) 2760 { 2761 nxge_dma_common_t **dma_buf_p; 2762 nxge_dma_common_t **dma_cntl_p; 2763 uint32_t *num_chunks; 2764 nxge_status_t status = NXGE_OK; 2765 2766 nxge_tdc_sizes_t sizes; 2767 2768 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2769 2770 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2771 return (NXGE_ERROR); 2772 2773 /* 2774 * Allocate memory for transmit buffers and descriptor rings. 2775 * Replace these allocation functions with the interface functions 2776 * provided by the partition manager Real Soon Now. 2777 */ 2778 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2779 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2780 2781 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2782 2783 /* 2784 * Allocate memory for transmit buffers and descriptor rings. 2785 * Replace allocation functions with interface functions provided 2786 * by the partition manager when it is available. 2787 * 2788 * Allocate memory for the transmit buffer pool. 2789 */ 2790 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2791 "sizes: tx: %ld, cr:%ld, th:%ld", 2792 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2793 2794 *num_chunks = 0; 2795 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2796 sizes.tx_size, sizes.threshhold, num_chunks); 2797 if (status != NXGE_OK) { 2798 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2799 return (status); 2800 } 2801 2802 /* 2803 * Allocate memory for descriptor rings and mailbox. 2804 */ 2805 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2806 sizes.cr_size); 2807 if (status != NXGE_OK) { 2808 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2809 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2810 return (status); 2811 } 2812 2813 return (NXGE_OK); 2814 } 2815 2816 void 2817 nxge_free_txb( 2818 p_nxge_t nxgep, 2819 int channel) 2820 { 2821 nxge_dma_common_t *data; 2822 nxge_dma_common_t *control; 2823 uint32_t num_chunks; 2824 2825 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2826 2827 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2828 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2829 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2830 2831 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2832 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2833 2834 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2835 nxge_free_tx_cntl_dma(nxgep, control); 2836 2837 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2838 2839 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2840 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2841 2842 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2843 } 2844 2845 /* 2846 * nxge_alloc_tx_mem_pool 2847 * 2848 * This function allocates all of the per-port TDC control data structures. 2849 * The per-channel (TDC) data structures are allocated when needed. 2850 * 2851 * Arguments: 2852 * nxgep 2853 * 2854 * Notes: 2855 * 2856 * Context: 2857 * Any domain 2858 */ 2859 nxge_status_t 2860 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2861 { 2862 nxge_hw_pt_cfg_t *p_cfgp; 2863 nxge_dma_pool_t *dma_poolp; 2864 nxge_dma_common_t **dma_buf_p; 2865 nxge_dma_pool_t *dma_cntl_poolp; 2866 nxge_dma_common_t **dma_cntl_p; 2867 uint32_t *num_chunks; /* per dma */ 2868 int tdc_max; 2869 2870 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2871 2872 p_cfgp = &nxgep->pt_config.hw_config; 2873 tdc_max = NXGE_MAX_TDCS; 2874 2875 /* 2876 * Allocate memory for each transmit DMA channel. 2877 */ 2878 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2879 KM_SLEEP); 2880 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2881 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2882 2883 dma_cntl_poolp = (p_nxge_dma_pool_t) 2884 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2885 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2886 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2887 2888 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2889 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2890 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2891 "set to default %d", 2892 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2893 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2894 } 2895 2896 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2897 /* 2898 * N2/NIU has limitation on the descriptor sizes (contiguous 2899 * memory allocation on data buffers to 4M (contig_mem_alloc) 2900 * and little endian for control buffers (must use the ddi/dki mem alloc 2901 * function). The transmit ring is limited to 8K (includes the 2902 * mailbox). 2903 */ 2904 if (nxgep->niu_type == N2_NIU) { 2905 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2906 (!ISP2(nxge_tx_ring_size))) { 2907 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2908 } 2909 } 2910 #endif 2911 2912 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2913 2914 num_chunks = (uint32_t *)KMEM_ZALLOC( 2915 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2916 2917 dma_poolp->ndmas = p_cfgp->tdc.owned; 2918 dma_poolp->num_chunks = num_chunks; 2919 dma_poolp->dma_buf_pool_p = dma_buf_p; 2920 nxgep->tx_buf_pool_p = dma_poolp; 2921 2922 dma_poolp->buf_allocated = B_TRUE; 2923 2924 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2925 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2926 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2927 2928 dma_cntl_poolp->buf_allocated = B_TRUE; 2929 2930 nxgep->tx_rings = 2931 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 2932 nxgep->tx_rings->rings = 2933 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 2934 nxgep->tx_mbox_areas_p = 2935 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 2936 nxgep->tx_mbox_areas_p->txmbox_areas_p = 2937 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 2938 2939 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 2940 2941 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2942 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 2943 tdc_max, dma_poolp->ndmas)); 2944 2945 return (NXGE_OK); 2946 } 2947 2948 nxge_status_t 2949 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2950 p_nxge_dma_common_t *dmap, size_t alloc_size, 2951 size_t block_size, uint32_t *num_chunks) 2952 { 2953 p_nxge_dma_common_t tx_dmap; 2954 nxge_status_t status = NXGE_OK; 2955 size_t total_alloc_size; 2956 size_t allocated = 0; 2957 int i, size_index, array_size; 2958 2959 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2960 2961 tx_dmap = (p_nxge_dma_common_t) 2962 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2963 KM_SLEEP); 2964 2965 total_alloc_size = alloc_size; 2966 i = 0; 2967 size_index = 0; 2968 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2969 while ((alloc_sizes[size_index] < alloc_size) && 2970 (size_index < array_size)) 2971 size_index++; 2972 if (size_index >= array_size) { 2973 size_index = array_size - 1; 2974 } 2975 2976 while ((allocated < total_alloc_size) && 2977 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2978 2979 tx_dmap[i].dma_chunk_index = i; 2980 tx_dmap[i].block_size = block_size; 2981 tx_dmap[i].alength = alloc_sizes[size_index]; 2982 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2983 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2984 tx_dmap[i].dma_channel = dma_channel; 2985 tx_dmap[i].contig_alloc_type = B_FALSE; 2986 tx_dmap[i].kmem_alloc_type = B_FALSE; 2987 2988 /* 2989 * N2/NIU: data buffers must be contiguous as the driver 2990 * needs to call Hypervisor api to set up 2991 * logical pages. 2992 */ 2993 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2994 tx_dmap[i].contig_alloc_type = B_TRUE; 2995 } 2996 2997 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2998 &nxge_tx_dma_attr, 2999 tx_dmap[i].alength, 3000 &nxge_dev_buf_dma_acc_attr, 3001 DDI_DMA_WRITE | DDI_DMA_STREAMING, 3002 (p_nxge_dma_common_t)(&tx_dmap[i])); 3003 if (status != NXGE_OK) { 3004 size_index--; 3005 } else { 3006 i++; 3007 allocated += alloc_sizes[size_index]; 3008 } 3009 } 3010 3011 if (allocated < total_alloc_size) { 3012 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3013 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 3014 "allocated 0x%x requested 0x%x", 3015 dma_channel, 3016 allocated, total_alloc_size)); 3017 status = NXGE_ERROR; 3018 goto nxge_alloc_tx_mem_fail1; 3019 } 3020 3021 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3022 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 3023 "allocated 0x%x requested 0x%x", 3024 dma_channel, 3025 allocated, total_alloc_size)); 3026 3027 *num_chunks = i; 3028 *dmap = tx_dmap; 3029 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3030 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 3031 *dmap, i)); 3032 goto nxge_alloc_tx_mem_exit; 3033 3034 nxge_alloc_tx_mem_fail1: 3035 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 3036 3037 nxge_alloc_tx_mem_exit: 3038 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3039 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3040 3041 return (status); 3042 } 3043 3044 /*ARGSUSED*/ 3045 static void 3046 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3047 uint32_t num_chunks) 3048 { 3049 int i; 3050 3051 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3052 3053 if (dmap == 0) 3054 return; 3055 3056 for (i = 0; i < num_chunks; i++) { 3057 nxge_dma_mem_free(dmap++); 3058 } 3059 3060 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3061 } 3062 3063 /*ARGSUSED*/ 3064 nxge_status_t 3065 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3066 p_nxge_dma_common_t *dmap, size_t size) 3067 { 3068 p_nxge_dma_common_t tx_dmap; 3069 nxge_status_t status = NXGE_OK; 3070 3071 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3072 tx_dmap = (p_nxge_dma_common_t) 3073 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3074 3075 tx_dmap->contig_alloc_type = B_FALSE; 3076 tx_dmap->kmem_alloc_type = B_FALSE; 3077 3078 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3079 &nxge_desc_dma_attr, 3080 size, 3081 &nxge_dev_desc_dma_acc_attr, 3082 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3083 tx_dmap); 3084 if (status != NXGE_OK) { 3085 goto nxge_alloc_tx_cntl_dma_fail1; 3086 } 3087 3088 *dmap = tx_dmap; 3089 goto nxge_alloc_tx_cntl_dma_exit; 3090 3091 nxge_alloc_tx_cntl_dma_fail1: 3092 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3093 3094 nxge_alloc_tx_cntl_dma_exit: 3095 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3096 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3097 3098 return (status); 3099 } 3100 3101 /*ARGSUSED*/ 3102 static void 3103 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3104 { 3105 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3106 3107 if (dmap == 0) 3108 return; 3109 3110 nxge_dma_mem_free(dmap); 3111 3112 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3113 } 3114 3115 /* 3116 * nxge_free_tx_mem_pool 3117 * 3118 * This function frees all of the per-port TDC control data structures. 3119 * The per-channel (TDC) data structures are freed when the channel 3120 * is stopped. 3121 * 3122 * Arguments: 3123 * nxgep 3124 * 3125 * Notes: 3126 * 3127 * Context: 3128 * Any domain 3129 */ 3130 static void 3131 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3132 { 3133 int tdc_max = NXGE_MAX_TDCS; 3134 3135 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3136 3137 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3138 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3139 "<== nxge_free_tx_mem_pool " 3140 "(null tx buf pool or buf not allocated")); 3141 return; 3142 } 3143 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3144 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3145 "<== nxge_free_tx_mem_pool " 3146 "(null tx cntl buf pool or cntl buf not allocated")); 3147 return; 3148 } 3149 3150 /* 1. Free the mailboxes. */ 3151 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3152 sizeof (p_tx_mbox_t) * tdc_max); 3153 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3154 3155 nxgep->tx_mbox_areas_p = 0; 3156 3157 /* 2. Free the transmit ring arrays. */ 3158 KMEM_FREE(nxgep->tx_rings->rings, 3159 sizeof (p_tx_ring_t) * tdc_max); 3160 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3161 3162 nxgep->tx_rings = 0; 3163 3164 /* 3. Free the completion ring data structures. */ 3165 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3166 sizeof (p_nxge_dma_common_t) * tdc_max); 3167 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3168 3169 nxgep->tx_cntl_pool_p = 0; 3170 3171 /* 4. Free the data ring data structures. */ 3172 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3173 sizeof (uint32_t) * tdc_max); 3174 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3175 sizeof (p_nxge_dma_common_t) * tdc_max); 3176 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3177 3178 nxgep->tx_buf_pool_p = 0; 3179 3180 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3181 } 3182 3183 /*ARGSUSED*/ 3184 static nxge_status_t 3185 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3186 struct ddi_dma_attr *dma_attrp, 3187 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3188 p_nxge_dma_common_t dma_p) 3189 { 3190 caddr_t kaddrp; 3191 int ddi_status = DDI_SUCCESS; 3192 boolean_t contig_alloc_type; 3193 boolean_t kmem_alloc_type; 3194 3195 contig_alloc_type = dma_p->contig_alloc_type; 3196 3197 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3198 /* 3199 * contig_alloc_type for contiguous memory only allowed 3200 * for N2/NIU. 3201 */ 3202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3203 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3204 dma_p->contig_alloc_type)); 3205 return (NXGE_ERROR | NXGE_DDI_FAILED); 3206 } 3207 3208 dma_p->dma_handle = NULL; 3209 dma_p->acc_handle = NULL; 3210 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3211 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3212 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3213 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3214 if (ddi_status != DDI_SUCCESS) { 3215 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3216 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3217 return (NXGE_ERROR | NXGE_DDI_FAILED); 3218 } 3219 3220 kmem_alloc_type = dma_p->kmem_alloc_type; 3221 3222 switch (contig_alloc_type) { 3223 case B_FALSE: 3224 switch (kmem_alloc_type) { 3225 case B_FALSE: 3226 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3227 length, 3228 acc_attr_p, 3229 xfer_flags, 3230 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3231 &dma_p->acc_handle); 3232 if (ddi_status != DDI_SUCCESS) { 3233 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3234 "nxge_dma_mem_alloc: " 3235 "ddi_dma_mem_alloc failed")); 3236 ddi_dma_free_handle(&dma_p->dma_handle); 3237 dma_p->dma_handle = NULL; 3238 return (NXGE_ERROR | NXGE_DDI_FAILED); 3239 } 3240 if (dma_p->alength < length) { 3241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3242 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3243 "< length.")); 3244 ddi_dma_mem_free(&dma_p->acc_handle); 3245 ddi_dma_free_handle(&dma_p->dma_handle); 3246 dma_p->acc_handle = NULL; 3247 dma_p->dma_handle = NULL; 3248 return (NXGE_ERROR); 3249 } 3250 3251 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3252 NULL, 3253 kaddrp, dma_p->alength, xfer_flags, 3254 DDI_DMA_DONTWAIT, 3255 0, &dma_p->dma_cookie, &dma_p->ncookies); 3256 if (ddi_status != DDI_DMA_MAPPED) { 3257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3258 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3259 "failed " 3260 "(staus 0x%x ncookies %d.)", ddi_status, 3261 dma_p->ncookies)); 3262 if (dma_p->acc_handle) { 3263 ddi_dma_mem_free(&dma_p->acc_handle); 3264 dma_p->acc_handle = NULL; 3265 } 3266 ddi_dma_free_handle(&dma_p->dma_handle); 3267 dma_p->dma_handle = NULL; 3268 return (NXGE_ERROR | NXGE_DDI_FAILED); 3269 } 3270 3271 if (dma_p->ncookies != 1) { 3272 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3273 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3274 "> 1 cookie" 3275 "(staus 0x%x ncookies %d.)", ddi_status, 3276 dma_p->ncookies)); 3277 if (dma_p->acc_handle) { 3278 ddi_dma_mem_free(&dma_p->acc_handle); 3279 dma_p->acc_handle = NULL; 3280 } 3281 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3282 ddi_dma_free_handle(&dma_p->dma_handle); 3283 dma_p->dma_handle = NULL; 3284 return (NXGE_ERROR); 3285 } 3286 break; 3287 3288 case B_TRUE: 3289 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3290 if (kaddrp == NULL) { 3291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3292 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3293 "kmem alloc failed")); 3294 return (NXGE_ERROR); 3295 } 3296 3297 dma_p->alength = length; 3298 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3299 NULL, kaddrp, dma_p->alength, xfer_flags, 3300 DDI_DMA_DONTWAIT, 0, 3301 &dma_p->dma_cookie, &dma_p->ncookies); 3302 if (ddi_status != DDI_DMA_MAPPED) { 3303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3304 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3305 "(kmem_alloc) failed kaddrp $%p length %d " 3306 "(staus 0x%x (%d) ncookies %d.)", 3307 kaddrp, length, 3308 ddi_status, ddi_status, dma_p->ncookies)); 3309 KMEM_FREE(kaddrp, length); 3310 dma_p->acc_handle = NULL; 3311 ddi_dma_free_handle(&dma_p->dma_handle); 3312 dma_p->dma_handle = NULL; 3313 dma_p->kaddrp = NULL; 3314 return (NXGE_ERROR | NXGE_DDI_FAILED); 3315 } 3316 3317 if (dma_p->ncookies != 1) { 3318 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3319 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3320 "(kmem_alloc) > 1 cookie" 3321 "(staus 0x%x ncookies %d.)", ddi_status, 3322 dma_p->ncookies)); 3323 KMEM_FREE(kaddrp, length); 3324 dma_p->acc_handle = NULL; 3325 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3326 ddi_dma_free_handle(&dma_p->dma_handle); 3327 dma_p->dma_handle = NULL; 3328 dma_p->kaddrp = NULL; 3329 return (NXGE_ERROR); 3330 } 3331 3332 dma_p->kaddrp = kaddrp; 3333 3334 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3335 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3336 "kaddr $%p alength %d", 3337 dma_p, 3338 kaddrp, 3339 dma_p->alength)); 3340 break; 3341 } 3342 break; 3343 3344 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3345 case B_TRUE: 3346 kaddrp = (caddr_t)contig_mem_alloc(length); 3347 if (kaddrp == NULL) { 3348 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3349 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3350 ddi_dma_free_handle(&dma_p->dma_handle); 3351 return (NXGE_ERROR | NXGE_DDI_FAILED); 3352 } 3353 3354 dma_p->alength = length; 3355 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3356 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3357 &dma_p->dma_cookie, &dma_p->ncookies); 3358 if (ddi_status != DDI_DMA_MAPPED) { 3359 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3360 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3361 "(status 0x%x ncookies %d.)", ddi_status, 3362 dma_p->ncookies)); 3363 3364 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3365 "==> nxge_dma_mem_alloc: (not mapped)" 3366 "length %lu (0x%x) " 3367 "free contig kaddrp $%p " 3368 "va_to_pa $%p", 3369 length, length, 3370 kaddrp, 3371 va_to_pa(kaddrp))); 3372 3373 3374 contig_mem_free((void *)kaddrp, length); 3375 ddi_dma_free_handle(&dma_p->dma_handle); 3376 3377 dma_p->dma_handle = NULL; 3378 dma_p->acc_handle = NULL; 3379 dma_p->alength = NULL; 3380 dma_p->kaddrp = NULL; 3381 3382 return (NXGE_ERROR | NXGE_DDI_FAILED); 3383 } 3384 3385 if (dma_p->ncookies != 1 || 3386 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3387 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3388 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3389 "cookie or " 3390 "dmac_laddress is NULL $%p size %d " 3391 " (status 0x%x ncookies %d.)", 3392 ddi_status, 3393 dma_p->dma_cookie.dmac_laddress, 3394 dma_p->dma_cookie.dmac_size, 3395 dma_p->ncookies)); 3396 3397 contig_mem_free((void *)kaddrp, length); 3398 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3399 ddi_dma_free_handle(&dma_p->dma_handle); 3400 3401 dma_p->alength = 0; 3402 dma_p->dma_handle = NULL; 3403 dma_p->acc_handle = NULL; 3404 dma_p->kaddrp = NULL; 3405 3406 return (NXGE_ERROR | NXGE_DDI_FAILED); 3407 } 3408 break; 3409 3410 #else 3411 case B_TRUE: 3412 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3413 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3414 return (NXGE_ERROR | NXGE_DDI_FAILED); 3415 #endif 3416 } 3417 3418 dma_p->kaddrp = kaddrp; 3419 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3420 dma_p->alength - RXBUF_64B_ALIGNED; 3421 #if defined(__i386) 3422 dma_p->ioaddr_pp = 3423 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3424 #else 3425 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3426 #endif 3427 dma_p->last_ioaddr_pp = 3428 #if defined(__i386) 3429 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3430 #else 3431 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3432 #endif 3433 dma_p->alength - RXBUF_64B_ALIGNED; 3434 3435 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3436 3437 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3438 dma_p->orig_ioaddr_pp = 3439 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3440 dma_p->orig_alength = length; 3441 dma_p->orig_kaddrp = kaddrp; 3442 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3443 #endif 3444 3445 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3446 "dma buffer allocated: dma_p $%p " 3447 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3448 "dma_p->ioaddr_p $%p " 3449 "dma_p->orig_ioaddr_p $%p " 3450 "orig_vatopa $%p " 3451 "alength %d (0x%x) " 3452 "kaddrp $%p " 3453 "length %d (0x%x)", 3454 dma_p, 3455 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3456 dma_p->ioaddr_pp, 3457 dma_p->orig_ioaddr_pp, 3458 dma_p->orig_vatopa, 3459 dma_p->alength, dma_p->alength, 3460 kaddrp, 3461 length, length)); 3462 3463 return (NXGE_OK); 3464 } 3465 3466 static void 3467 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3468 { 3469 if (dma_p->dma_handle != NULL) { 3470 if (dma_p->ncookies) { 3471 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3472 dma_p->ncookies = 0; 3473 } 3474 ddi_dma_free_handle(&dma_p->dma_handle); 3475 dma_p->dma_handle = NULL; 3476 } 3477 3478 if (dma_p->acc_handle != NULL) { 3479 ddi_dma_mem_free(&dma_p->acc_handle); 3480 dma_p->acc_handle = NULL; 3481 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3482 } 3483 3484 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3485 if (dma_p->contig_alloc_type && 3486 dma_p->orig_kaddrp && dma_p->orig_alength) { 3487 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3488 "kaddrp $%p (orig_kaddrp $%p)" 3489 "mem type %d ", 3490 "orig_alength %d " 3491 "alength 0x%x (%d)", 3492 dma_p->kaddrp, 3493 dma_p->orig_kaddrp, 3494 dma_p->contig_alloc_type, 3495 dma_p->orig_alength, 3496 dma_p->alength, dma_p->alength)); 3497 3498 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3499 dma_p->orig_alength = NULL; 3500 dma_p->orig_kaddrp = NULL; 3501 dma_p->contig_alloc_type = B_FALSE; 3502 } 3503 #endif 3504 dma_p->kaddrp = NULL; 3505 dma_p->alength = NULL; 3506 } 3507 3508 static void 3509 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3510 { 3511 uint64_t kaddr; 3512 uint32_t buf_size; 3513 3514 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3515 3516 if (dma_p->dma_handle != NULL) { 3517 if (dma_p->ncookies) { 3518 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3519 dma_p->ncookies = 0; 3520 } 3521 ddi_dma_free_handle(&dma_p->dma_handle); 3522 dma_p->dma_handle = NULL; 3523 } 3524 3525 if (dma_p->acc_handle != NULL) { 3526 ddi_dma_mem_free(&dma_p->acc_handle); 3527 dma_p->acc_handle = NULL; 3528 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3529 } 3530 3531 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3532 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3533 dma_p, 3534 dma_p->buf_alloc_state)); 3535 3536 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3537 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3538 "<== nxge_dma_free_rx_data_buf: " 3539 "outstanding data buffers")); 3540 return; 3541 } 3542 3543 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3544 if (dma_p->contig_alloc_type && 3545 dma_p->orig_kaddrp && dma_p->orig_alength) { 3546 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3547 "kaddrp $%p (orig_kaddrp $%p)" 3548 "mem type %d ", 3549 "orig_alength %d " 3550 "alength 0x%x (%d)", 3551 dma_p->kaddrp, 3552 dma_p->orig_kaddrp, 3553 dma_p->contig_alloc_type, 3554 dma_p->orig_alength, 3555 dma_p->alength, dma_p->alength)); 3556 3557 kaddr = (uint64_t)dma_p->orig_kaddrp; 3558 buf_size = dma_p->orig_alength; 3559 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3560 dma_p->orig_alength = NULL; 3561 dma_p->orig_kaddrp = NULL; 3562 dma_p->contig_alloc_type = B_FALSE; 3563 dma_p->kaddrp = NULL; 3564 dma_p->alength = NULL; 3565 return; 3566 } 3567 #endif 3568 3569 if (dma_p->kmem_alloc_type) { 3570 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3571 "nxge_dma_free_rx_data_buf: free kmem " 3572 "kaddrp $%p (orig_kaddrp $%p)" 3573 "alloc type %d " 3574 "orig_alength %d " 3575 "alength 0x%x (%d)", 3576 dma_p->kaddrp, 3577 dma_p->orig_kaddrp, 3578 dma_p->kmem_alloc_type, 3579 dma_p->orig_alength, 3580 dma_p->alength, dma_p->alength)); 3581 #if defined(__i386) 3582 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3583 #else 3584 kaddr = (uint64_t)dma_p->kaddrp; 3585 #endif 3586 buf_size = dma_p->orig_alength; 3587 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3588 "nxge_dma_free_rx_data_buf: free dmap $%p " 3589 "kaddr $%p buf_size %d", 3590 dma_p, 3591 kaddr, buf_size)); 3592 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3593 dma_p->alength = 0; 3594 dma_p->orig_alength = 0; 3595 dma_p->kaddrp = NULL; 3596 dma_p->kmem_alloc_type = B_FALSE; 3597 } 3598 3599 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3600 } 3601 3602 /* 3603 * nxge_m_start() -- start transmitting and receiving. 3604 * 3605 * This function is called by the MAC layer when the first 3606 * stream is open to prepare the hardware ready for sending 3607 * and transmitting packets. 3608 */ 3609 static int 3610 nxge_m_start(void *arg) 3611 { 3612 p_nxge_t nxgep = (p_nxge_t)arg; 3613 3614 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3615 3616 MUTEX_ENTER(nxgep->genlock); 3617 if (nxge_init(nxgep) != NXGE_OK) { 3618 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3619 "<== nxge_m_start: initialization failed")); 3620 MUTEX_EXIT(nxgep->genlock); 3621 return (EIO); 3622 } 3623 3624 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3625 goto nxge_m_start_exit; 3626 /* 3627 * Start timer to check the system error and tx hangs 3628 */ 3629 if (!isLDOMguest(nxgep)) 3630 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3631 nxge_check_hw_state, NXGE_CHECK_TIMER); 3632 #if defined(sun4v) 3633 else 3634 nxge_hio_start_timer(nxgep); 3635 #endif 3636 3637 nxgep->link_notify = B_TRUE; 3638 3639 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3640 3641 nxge_m_start_exit: 3642 MUTEX_EXIT(nxgep->genlock); 3643 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3644 return (0); 3645 } 3646 3647 /* 3648 * nxge_m_stop(): stop transmitting and receiving. 3649 */ 3650 static void 3651 nxge_m_stop(void *arg) 3652 { 3653 p_nxge_t nxgep = (p_nxge_t)arg; 3654 3655 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3656 3657 if (nxgep->nxge_timerid) { 3658 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3659 nxgep->nxge_timerid = 0; 3660 } 3661 3662 MUTEX_ENTER(nxgep->genlock); 3663 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3664 nxge_uninit(nxgep); 3665 3666 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3667 3668 MUTEX_EXIT(nxgep->genlock); 3669 3670 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3671 } 3672 3673 static int 3674 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3675 { 3676 p_nxge_t nxgep = (p_nxge_t)arg; 3677 struct ether_addr addrp; 3678 3679 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3680 3681 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3682 if (nxge_set_mac_addr(nxgep, &addrp)) { 3683 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3684 "<== nxge_m_unicst: set unitcast failed")); 3685 return (EINVAL); 3686 } 3687 3688 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3689 3690 return (0); 3691 } 3692 3693 static int 3694 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3695 { 3696 p_nxge_t nxgep = (p_nxge_t)arg; 3697 struct ether_addr addrp; 3698 3699 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3700 "==> nxge_m_multicst: add %d", add)); 3701 3702 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3703 if (add) { 3704 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3706 "<== nxge_m_multicst: add multicast failed")); 3707 return (EINVAL); 3708 } 3709 } else { 3710 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3711 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3712 "<== nxge_m_multicst: del multicast failed")); 3713 return (EINVAL); 3714 } 3715 } 3716 3717 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3718 3719 return (0); 3720 } 3721 3722 static int 3723 nxge_m_promisc(void *arg, boolean_t on) 3724 { 3725 p_nxge_t nxgep = (p_nxge_t)arg; 3726 3727 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3728 "==> nxge_m_promisc: on %d", on)); 3729 3730 if (nxge_set_promisc(nxgep, on)) { 3731 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3732 "<== nxge_m_promisc: set promisc failed")); 3733 return (EINVAL); 3734 } 3735 3736 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3737 "<== nxge_m_promisc: on %d", on)); 3738 3739 return (0); 3740 } 3741 3742 static void 3743 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3744 { 3745 p_nxge_t nxgep = (p_nxge_t)arg; 3746 struct iocblk *iocp; 3747 boolean_t need_privilege; 3748 int err; 3749 int cmd; 3750 3751 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3752 3753 iocp = (struct iocblk *)mp->b_rptr; 3754 iocp->ioc_error = 0; 3755 need_privilege = B_TRUE; 3756 cmd = iocp->ioc_cmd; 3757 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3758 switch (cmd) { 3759 default: 3760 miocnak(wq, mp, 0, EINVAL); 3761 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3762 return; 3763 3764 case LB_GET_INFO_SIZE: 3765 case LB_GET_INFO: 3766 case LB_GET_MODE: 3767 need_privilege = B_FALSE; 3768 break; 3769 case LB_SET_MODE: 3770 break; 3771 3772 3773 case NXGE_GET_MII: 3774 case NXGE_PUT_MII: 3775 case NXGE_GET64: 3776 case NXGE_PUT64: 3777 case NXGE_GET_TX_RING_SZ: 3778 case NXGE_GET_TX_DESC: 3779 case NXGE_TX_SIDE_RESET: 3780 case NXGE_RX_SIDE_RESET: 3781 case NXGE_GLOBAL_RESET: 3782 case NXGE_RESET_MAC: 3783 case NXGE_TX_REGS_DUMP: 3784 case NXGE_RX_REGS_DUMP: 3785 case NXGE_INT_REGS_DUMP: 3786 case NXGE_VIR_INT_REGS_DUMP: 3787 case NXGE_PUT_TCAM: 3788 case NXGE_GET_TCAM: 3789 case NXGE_RTRACE: 3790 case NXGE_RDUMP: 3791 3792 need_privilege = B_FALSE; 3793 break; 3794 case NXGE_INJECT_ERR: 3795 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3796 nxge_err_inject(nxgep, wq, mp); 3797 break; 3798 } 3799 3800 if (need_privilege) { 3801 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3802 if (err != 0) { 3803 miocnak(wq, mp, 0, err); 3804 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3805 "<== nxge_m_ioctl: no priv")); 3806 return; 3807 } 3808 } 3809 3810 switch (cmd) { 3811 3812 case LB_GET_MODE: 3813 case LB_SET_MODE: 3814 case LB_GET_INFO_SIZE: 3815 case LB_GET_INFO: 3816 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3817 break; 3818 3819 case NXGE_GET_MII: 3820 case NXGE_PUT_MII: 3821 case NXGE_PUT_TCAM: 3822 case NXGE_GET_TCAM: 3823 case NXGE_GET64: 3824 case NXGE_PUT64: 3825 case NXGE_GET_TX_RING_SZ: 3826 case NXGE_GET_TX_DESC: 3827 case NXGE_TX_SIDE_RESET: 3828 case NXGE_RX_SIDE_RESET: 3829 case NXGE_GLOBAL_RESET: 3830 case NXGE_RESET_MAC: 3831 case NXGE_TX_REGS_DUMP: 3832 case NXGE_RX_REGS_DUMP: 3833 case NXGE_INT_REGS_DUMP: 3834 case NXGE_VIR_INT_REGS_DUMP: 3835 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3836 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3837 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3838 break; 3839 } 3840 3841 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3842 } 3843 3844 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3845 3846 static void 3847 nxge_m_resources(void *arg) 3848 { 3849 p_nxge_t nxgep = arg; 3850 mac_rx_fifo_t mrf; 3851 3852 nxge_grp_set_t *set = &nxgep->rx_set; 3853 uint8_t rdc; 3854 3855 rx_rcr_ring_t *ring; 3856 3857 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3858 3859 MUTEX_ENTER(nxgep->genlock); 3860 3861 if (set->owned.map == 0) { 3862 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3863 "nxge_m_resources: no receive resources")); 3864 goto nxge_m_resources_exit; 3865 } 3866 3867 /* 3868 * CR 6492541 Check to see if the drv_state has been initialized, 3869 * if not * call nxge_init(). 3870 */ 3871 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3872 if (nxge_init(nxgep) != NXGE_OK) 3873 goto nxge_m_resources_exit; 3874 } 3875 3876 mrf.mrf_type = MAC_RX_FIFO; 3877 mrf.mrf_blank = nxge_rx_hw_blank; 3878 mrf.mrf_arg = (void *)nxgep; 3879 3880 mrf.mrf_normal_blank_time = 128; 3881 mrf.mrf_normal_pkt_count = 8; 3882 3883 /* 3884 * Export our receive resources to the MAC layer. 3885 */ 3886 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3887 if ((1 << rdc) & set->owned.map) { 3888 ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 3889 if (ring == 0) { 3890 /* 3891 * This is a big deal only if we are 3892 * *not* in an LDOMs environment. 3893 */ 3894 if (nxgep->environs == SOLARIS_DOMAIN) { 3895 cmn_err(CE_NOTE, 3896 "==> nxge_m_resources: " 3897 "ring %d == 0", rdc); 3898 } 3899 continue; 3900 } 3901 ring->rcr_mac_handle = mac_resource_add 3902 (nxgep->mach, (mac_resource_t *)&mrf); 3903 3904 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3905 "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 3906 rdc, ring, ring->rcr_mac_handle)); 3907 } 3908 } 3909 3910 nxge_m_resources_exit: 3911 MUTEX_EXIT(nxgep->genlock); 3912 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3913 } 3914 3915 void 3916 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3917 { 3918 p_nxge_mmac_stats_t mmac_stats; 3919 int i; 3920 nxge_mmac_t *mmac_info; 3921 3922 mmac_info = &nxgep->nxge_mmac_info; 3923 3924 mmac_stats = &nxgep->statsp->mmac_stats; 3925 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3926 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3927 3928 for (i = 0; i < ETHERADDRL; i++) { 3929 if (factory) { 3930 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3931 = mmac_info->factory_mac_pool[slot][ 3932 (ETHERADDRL-1) - i]; 3933 } else { 3934 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3935 = mmac_info->mac_pool[slot].addr[ 3936 (ETHERADDRL - 1) - i]; 3937 } 3938 } 3939 } 3940 3941 /* 3942 * nxge_altmac_set() -- Set an alternate MAC address 3943 */ 3944 static int 3945 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3946 { 3947 uint8_t addrn; 3948 uint8_t portn; 3949 npi_mac_addr_t altmac; 3950 hostinfo_t mac_rdc; 3951 p_nxge_class_pt_cfg_t clscfgp; 3952 3953 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3954 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3955 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3956 3957 portn = nxgep->mac.portnum; 3958 addrn = (uint8_t)slot - 1; 3959 3960 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3961 addrn, &altmac) != NPI_SUCCESS) 3962 return (EIO); 3963 3964 /* 3965 * Set the rdc table number for the host info entry 3966 * for this mac address slot. 3967 */ 3968 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3969 mac_rdc.value = 0; 3970 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3971 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3972 3973 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3974 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3975 return (EIO); 3976 } 3977 3978 /* 3979 * Enable comparison with the alternate MAC address. 3980 * While the first alternate addr is enabled by bit 1 of register 3981 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3982 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3983 * accordingly before calling npi_mac_altaddr_entry. 3984 */ 3985 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3986 addrn = (uint8_t)slot - 1; 3987 else 3988 addrn = (uint8_t)slot; 3989 3990 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3991 != NPI_SUCCESS) 3992 return (EIO); 3993 3994 return (0); 3995 } 3996 3997 /* 3998 * nxeg_m_mmac_add() - find an unused address slot, set the address 3999 * value to the one specified, enable the port to start filtering on 4000 * the new MAC address. Returns 0 on success. 4001 */ 4002 int 4003 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 4004 { 4005 p_nxge_t nxgep = arg; 4006 mac_addr_slot_t slot; 4007 nxge_mmac_t *mmac_info; 4008 int err; 4009 nxge_status_t status; 4010 4011 mutex_enter(nxgep->genlock); 4012 4013 /* 4014 * Make sure that nxge is initialized, if _start() has 4015 * not been called. 4016 */ 4017 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4018 status = nxge_init(nxgep); 4019 if (status != NXGE_OK) { 4020 mutex_exit(nxgep->genlock); 4021 return (ENXIO); 4022 } 4023 } 4024 4025 mmac_info = &nxgep->nxge_mmac_info; 4026 if (mmac_info->naddrfree == 0) { 4027 mutex_exit(nxgep->genlock); 4028 return (ENOSPC); 4029 } 4030 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4031 maddr->mma_addrlen)) { 4032 mutex_exit(nxgep->genlock); 4033 return (EINVAL); 4034 } 4035 /* 4036 * Search for the first available slot. Because naddrfree 4037 * is not zero, we are guaranteed to find one. 4038 * Slot 0 is for unique (primary) MAC. The first alternate 4039 * MAC slot is slot 1. 4040 * Each of the first two ports of Neptune has 16 alternate 4041 * MAC slots but only the first 7 (of 15) slots have assigned factory 4042 * MAC addresses. We first search among the slots without bundled 4043 * factory MACs. If we fail to find one in that range, then we 4044 * search the slots with bundled factory MACs. A factory MAC 4045 * will be wasted while the slot is used with a user MAC address. 4046 * But the slot could be used by factory MAC again after calling 4047 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4048 */ 4049 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 4050 for (slot = mmac_info->num_factory_mmac + 1; 4051 slot <= mmac_info->num_mmac; slot++) { 4052 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4053 break; 4054 } 4055 if (slot > mmac_info->num_mmac) { 4056 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4057 slot++) { 4058 if (!(mmac_info->mac_pool[slot].flags 4059 & MMAC_SLOT_USED)) 4060 break; 4061 } 4062 } 4063 } else { 4064 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 4065 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4066 break; 4067 } 4068 } 4069 ASSERT(slot <= mmac_info->num_mmac); 4070 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 4071 mutex_exit(nxgep->genlock); 4072 return (err); 4073 } 4074 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4075 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4076 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4077 mmac_info->naddrfree--; 4078 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4079 4080 maddr->mma_slot = slot; 4081 4082 mutex_exit(nxgep->genlock); 4083 return (0); 4084 } 4085 4086 /* 4087 * This function reserves an unused slot and programs the slot and the HW 4088 * with a factory mac address. 4089 */ 4090 static int 4091 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 4092 { 4093 p_nxge_t nxgep = arg; 4094 mac_addr_slot_t slot; 4095 nxge_mmac_t *mmac_info; 4096 int err; 4097 nxge_status_t status; 4098 4099 mutex_enter(nxgep->genlock); 4100 4101 /* 4102 * Make sure that nxge is initialized, if _start() has 4103 * not been called. 4104 */ 4105 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4106 status = nxge_init(nxgep); 4107 if (status != NXGE_OK) { 4108 mutex_exit(nxgep->genlock); 4109 return (ENXIO); 4110 } 4111 } 4112 4113 mmac_info = &nxgep->nxge_mmac_info; 4114 if (mmac_info->naddrfree == 0) { 4115 mutex_exit(nxgep->genlock); 4116 return (ENOSPC); 4117 } 4118 4119 slot = maddr->mma_slot; 4120 if (slot == -1) { /* -1: Take the first available slot */ 4121 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 4122 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4123 break; 4124 } 4125 if (slot > mmac_info->num_factory_mmac) { 4126 mutex_exit(nxgep->genlock); 4127 return (ENOSPC); 4128 } 4129 } 4130 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 4131 /* 4132 * Do not support factory MAC at a slot greater than 4133 * num_factory_mmac even when there are available factory 4134 * MAC addresses because the alternate MACs are bundled with 4135 * slot[1] through slot[num_factory_mmac] 4136 */ 4137 mutex_exit(nxgep->genlock); 4138 return (EINVAL); 4139 } 4140 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4141 mutex_exit(nxgep->genlock); 4142 return (EBUSY); 4143 } 4144 /* Verify the address to be reserved */ 4145 if (!mac_unicst_verify(nxgep->mach, 4146 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 4147 mutex_exit(nxgep->genlock); 4148 return (EINVAL); 4149 } 4150 if (err = nxge_altmac_set(nxgep, 4151 mmac_info->factory_mac_pool[slot], slot)) { 4152 mutex_exit(nxgep->genlock); 4153 return (err); 4154 } 4155 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 4156 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4157 mmac_info->naddrfree--; 4158 4159 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 4160 mutex_exit(nxgep->genlock); 4161 4162 /* Pass info back to the caller */ 4163 maddr->mma_slot = slot; 4164 maddr->mma_addrlen = ETHERADDRL; 4165 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4166 4167 return (0); 4168 } 4169 4170 /* 4171 * Remove the specified mac address and update the HW not to filter 4172 * the mac address anymore. 4173 */ 4174 int 4175 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 4176 { 4177 p_nxge_t nxgep = arg; 4178 nxge_mmac_t *mmac_info; 4179 uint8_t addrn; 4180 uint8_t portn; 4181 int err = 0; 4182 nxge_status_t status; 4183 4184 mutex_enter(nxgep->genlock); 4185 4186 /* 4187 * Make sure that nxge is initialized, if _start() has 4188 * not been called. 4189 */ 4190 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4191 status = nxge_init(nxgep); 4192 if (status != NXGE_OK) { 4193 mutex_exit(nxgep->genlock); 4194 return (ENXIO); 4195 } 4196 } 4197 4198 mmac_info = &nxgep->nxge_mmac_info; 4199 if (slot < 1 || slot > mmac_info->num_mmac) { 4200 mutex_exit(nxgep->genlock); 4201 return (EINVAL); 4202 } 4203 4204 portn = nxgep->mac.portnum; 4205 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4206 addrn = (uint8_t)slot - 1; 4207 else 4208 addrn = (uint8_t)slot; 4209 4210 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4211 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4212 == NPI_SUCCESS) { 4213 mmac_info->naddrfree++; 4214 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4215 /* 4216 * Regardless if the MAC we just stopped filtering 4217 * is a user addr or a facory addr, we must set 4218 * the MMAC_VENDOR_ADDR flag if this slot has an 4219 * associated factory MAC to indicate that a factory 4220 * MAC is available. 4221 */ 4222 if (slot <= mmac_info->num_factory_mmac) { 4223 mmac_info->mac_pool[slot].flags 4224 |= MMAC_VENDOR_ADDR; 4225 } 4226 /* 4227 * Clear mac_pool[slot].addr so that kstat shows 0 4228 * alternate MAC address if the slot is not used. 4229 * (But nxge_m_mmac_get returns the factory MAC even 4230 * when the slot is not used!) 4231 */ 4232 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4233 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4234 } else { 4235 err = EIO; 4236 } 4237 } else { 4238 err = EINVAL; 4239 } 4240 4241 mutex_exit(nxgep->genlock); 4242 return (err); 4243 } 4244 4245 /* 4246 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 4247 */ 4248 static int 4249 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 4250 { 4251 p_nxge_t nxgep = arg; 4252 mac_addr_slot_t slot; 4253 nxge_mmac_t *mmac_info; 4254 int err = 0; 4255 nxge_status_t status; 4256 4257 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4258 maddr->mma_addrlen)) 4259 return (EINVAL); 4260 4261 slot = maddr->mma_slot; 4262 4263 mutex_enter(nxgep->genlock); 4264 4265 /* 4266 * Make sure that nxge is initialized, if _start() has 4267 * not been called. 4268 */ 4269 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4270 status = nxge_init(nxgep); 4271 if (status != NXGE_OK) { 4272 mutex_exit(nxgep->genlock); 4273 return (ENXIO); 4274 } 4275 } 4276 4277 mmac_info = &nxgep->nxge_mmac_info; 4278 if (slot < 1 || slot > mmac_info->num_mmac) { 4279 mutex_exit(nxgep->genlock); 4280 return (EINVAL); 4281 } 4282 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4283 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4284 != 0) { 4285 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4286 ETHERADDRL); 4287 /* 4288 * Assume that the MAC passed down from the caller 4289 * is not a factory MAC address (The user should 4290 * call mmac_remove followed by mmac_reserve if 4291 * he wants to use the factory MAC for this slot). 4292 */ 4293 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4294 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4295 } 4296 } else { 4297 err = EINVAL; 4298 } 4299 mutex_exit(nxgep->genlock); 4300 return (err); 4301 } 4302 4303 /* 4304 * nxge_m_mmac_get() - Get the MAC address and other information 4305 * related to the slot. mma_flags should be set to 0 in the call. 4306 * Note: although kstat shows MAC address as zero when a slot is 4307 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 4308 * to the caller as long as the slot is not using a user MAC address. 4309 * The following table shows the rules, 4310 * 4311 * USED VENDOR mma_addr 4312 * ------------------------------------------------------------ 4313 * (1) Slot uses a user MAC: yes no user MAC 4314 * (2) Slot uses a factory MAC: yes yes factory MAC 4315 * (3) Slot is not used but is 4316 * factory MAC capable: no yes factory MAC 4317 * (4) Slot is not used and is 4318 * not factory MAC capable: no no 0 4319 * ------------------------------------------------------------ 4320 */ 4321 static int 4322 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 4323 { 4324 nxge_t *nxgep = arg; 4325 mac_addr_slot_t slot; 4326 nxge_mmac_t *mmac_info; 4327 nxge_status_t status; 4328 4329 slot = maddr->mma_slot; 4330 4331 mutex_enter(nxgep->genlock); 4332 4333 /* 4334 * Make sure that nxge is initialized, if _start() has 4335 * not been called. 4336 */ 4337 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4338 status = nxge_init(nxgep); 4339 if (status != NXGE_OK) { 4340 mutex_exit(nxgep->genlock); 4341 return (ENXIO); 4342 } 4343 } 4344 4345 mmac_info = &nxgep->nxge_mmac_info; 4346 4347 if (slot < 1 || slot > mmac_info->num_mmac) { 4348 mutex_exit(nxgep->genlock); 4349 return (EINVAL); 4350 } 4351 maddr->mma_flags = 0; 4352 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 4353 maddr->mma_flags |= MMAC_SLOT_USED; 4354 4355 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 4356 maddr->mma_flags |= MMAC_VENDOR_ADDR; 4357 bcopy(mmac_info->factory_mac_pool[slot], 4358 maddr->mma_addr, ETHERADDRL); 4359 maddr->mma_addrlen = ETHERADDRL; 4360 } else { 4361 if (maddr->mma_flags & MMAC_SLOT_USED) { 4362 bcopy(mmac_info->mac_pool[slot].addr, 4363 maddr->mma_addr, ETHERADDRL); 4364 maddr->mma_addrlen = ETHERADDRL; 4365 } else { 4366 bzero(maddr->mma_addr, ETHERADDRL); 4367 maddr->mma_addrlen = 0; 4368 } 4369 } 4370 mutex_exit(nxgep->genlock); 4371 return (0); 4372 } 4373 4374 static boolean_t 4375 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4376 { 4377 nxge_t *nxgep = arg; 4378 uint32_t *txflags = cap_data; 4379 multiaddress_capab_t *mmacp = cap_data; 4380 4381 switch (cap) { 4382 case MAC_CAPAB_HCKSUM: 4383 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4384 "==> nxge_m_getcapab: checksum %d", nxge_cksum_offload)); 4385 if (nxge_cksum_offload <= 1) { 4386 *txflags = HCKSUM_INET_PARTIAL; 4387 } 4388 break; 4389 4390 case MAC_CAPAB_POLL: 4391 /* 4392 * There's nothing for us to fill in, simply returning 4393 * B_TRUE stating that we support polling is sufficient. 4394 */ 4395 break; 4396 4397 case MAC_CAPAB_MULTIADDRESS: 4398 mmacp = (multiaddress_capab_t *)cap_data; 4399 mutex_enter(nxgep->genlock); 4400 4401 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 4402 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 4403 mmacp->maddr_flag = 0; /* 0 is required by PSARC2006/265 */ 4404 /* 4405 * maddr_handle is driver's private data, passed back to 4406 * entry point functions as arg. 4407 */ 4408 mmacp->maddr_handle = nxgep; 4409 mmacp->maddr_add = nxge_m_mmac_add; 4410 mmacp->maddr_remove = nxge_m_mmac_remove; 4411 mmacp->maddr_modify = nxge_m_mmac_modify; 4412 mmacp->maddr_get = nxge_m_mmac_get; 4413 mmacp->maddr_reserve = nxge_m_mmac_reserve; 4414 4415 mutex_exit(nxgep->genlock); 4416 break; 4417 4418 case MAC_CAPAB_LSO: { 4419 mac_capab_lso_t *cap_lso = cap_data; 4420 4421 if (nxgep->soft_lso_enable) { 4422 if (nxge_cksum_offload <= 1) { 4423 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4424 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4425 nxge_lso_max = NXGE_LSO_MAXLEN; 4426 } 4427 cap_lso->lso_basic_tcp_ipv4.lso_max = 4428 nxge_lso_max; 4429 } 4430 break; 4431 } else { 4432 return (B_FALSE); 4433 } 4434 } 4435 4436 #if defined(sun4v) 4437 case MAC_CAPAB_RINGS: { 4438 mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 4439 4440 /* 4441 * Only the service domain driver responds to 4442 * this capability request. 4443 */ 4444 if (isLDOMservice(nxgep)) { 4445 mrings->mr_handle = (void *)nxgep; 4446 4447 /* 4448 * No dynamic allocation of groups and 4449 * rings at this time. Shares dictate the 4450 * configurartion. 4451 */ 4452 mrings->mr_gadd_ring = NULL; 4453 mrings->mr_grem_ring = NULL; 4454 mrings->mr_rget = NULL; 4455 mrings->mr_gget = nxge_hio_group_get; 4456 4457 if (mrings->mr_type == MAC_RING_TYPE_RX) { 4458 mrings->mr_rnum = 8; /* XXX */ 4459 mrings->mr_gnum = 6; /* XXX */ 4460 } else { 4461 mrings->mr_rnum = 8; /* XXX */ 4462 mrings->mr_gnum = 0; /* XXX */ 4463 } 4464 } else 4465 return (B_FALSE); 4466 break; 4467 } 4468 4469 case MAC_CAPAB_SHARES: { 4470 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4471 4472 /* 4473 * Only the service domain driver responds to 4474 * this capability request. 4475 */ 4476 if (isLDOMservice(nxgep)) { 4477 mshares->ms_snum = 3; 4478 mshares->ms_handle = (void *)nxgep; 4479 mshares->ms_salloc = nxge_hio_share_alloc; 4480 mshares->ms_sfree = nxge_hio_share_free; 4481 mshares->ms_sadd = NULL; 4482 mshares->ms_sremove = NULL; 4483 mshares->ms_squery = nxge_hio_share_query; 4484 } else 4485 return (B_FALSE); 4486 break; 4487 } 4488 #endif 4489 default: 4490 return (B_FALSE); 4491 } 4492 return (B_TRUE); 4493 } 4494 4495 static boolean_t 4496 nxge_param_locked(mac_prop_id_t pr_num) 4497 { 4498 /* 4499 * All adv_* parameters are locked (read-only) while 4500 * the device is in any sort of loopback mode ... 4501 */ 4502 switch (pr_num) { 4503 case DLD_PROP_ADV_1000FDX_CAP: 4504 case DLD_PROP_EN_1000FDX_CAP: 4505 case DLD_PROP_ADV_1000HDX_CAP: 4506 case DLD_PROP_EN_1000HDX_CAP: 4507 case DLD_PROP_ADV_100FDX_CAP: 4508 case DLD_PROP_EN_100FDX_CAP: 4509 case DLD_PROP_ADV_100HDX_CAP: 4510 case DLD_PROP_EN_100HDX_CAP: 4511 case DLD_PROP_ADV_10FDX_CAP: 4512 case DLD_PROP_EN_10FDX_CAP: 4513 case DLD_PROP_ADV_10HDX_CAP: 4514 case DLD_PROP_EN_10HDX_CAP: 4515 case DLD_PROP_AUTONEG: 4516 case DLD_PROP_FLOWCTRL: 4517 return (B_TRUE); 4518 } 4519 return (B_FALSE); 4520 } 4521 4522 /* 4523 * callback functions for set/get of properties 4524 */ 4525 static int 4526 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4527 uint_t pr_valsize, const void *pr_val) 4528 { 4529 nxge_t *nxgep = barg; 4530 p_nxge_param_t param_arr; 4531 p_nxge_stats_t statsp; 4532 int err = 0; 4533 uint8_t val; 4534 uint32_t cur_mtu, new_mtu, old_framesize; 4535 link_flowctrl_t fl; 4536 4537 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4538 param_arr = nxgep->param_arr; 4539 statsp = nxgep->statsp; 4540 mutex_enter(nxgep->genlock); 4541 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4542 nxge_param_locked(pr_num)) { 4543 /* 4544 * All adv_* parameters are locked (read-only) 4545 * while the device is in any sort of loopback mode. 4546 */ 4547 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4548 "==> nxge_m_setprop: loopback mode: read only")); 4549 mutex_exit(nxgep->genlock); 4550 return (EBUSY); 4551 } 4552 4553 val = *(uint8_t *)pr_val; 4554 switch (pr_num) { 4555 case DLD_PROP_EN_1000FDX_CAP: 4556 nxgep->param_en_1000fdx = val; 4557 param_arr[param_anar_1000fdx].value = val; 4558 4559 goto reprogram; 4560 4561 case DLD_PROP_EN_100FDX_CAP: 4562 nxgep->param_en_100fdx = val; 4563 param_arr[param_anar_100fdx].value = val; 4564 4565 goto reprogram; 4566 4567 case DLD_PROP_EN_10FDX_CAP: 4568 nxgep->param_en_10fdx = val; 4569 param_arr[param_anar_10fdx].value = val; 4570 4571 goto reprogram; 4572 4573 case DLD_PROP_EN_1000HDX_CAP: 4574 case DLD_PROP_EN_100HDX_CAP: 4575 case DLD_PROP_EN_10HDX_CAP: 4576 case DLD_PROP_ADV_1000FDX_CAP: 4577 case DLD_PROP_ADV_1000HDX_CAP: 4578 case DLD_PROP_ADV_100FDX_CAP: 4579 case DLD_PROP_ADV_100HDX_CAP: 4580 case DLD_PROP_ADV_10FDX_CAP: 4581 case DLD_PROP_ADV_10HDX_CAP: 4582 case DLD_PROP_STATUS: 4583 case DLD_PROP_SPEED: 4584 case DLD_PROP_DUPLEX: 4585 err = EINVAL; /* cannot set read-only properties */ 4586 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4587 "==> nxge_m_setprop: read only property %d", 4588 pr_num)); 4589 break; 4590 4591 case DLD_PROP_AUTONEG: 4592 param_arr[param_autoneg].value = val; 4593 4594 goto reprogram; 4595 4596 case DLD_PROP_MTU: 4597 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4598 err = EBUSY; 4599 break; 4600 } 4601 4602 cur_mtu = nxgep->mac.default_mtu; 4603 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4604 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4605 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4606 new_mtu, nxgep->mac.is_jumbo)); 4607 4608 if (new_mtu == cur_mtu) { 4609 err = 0; 4610 break; 4611 } 4612 if (new_mtu < NXGE_DEFAULT_MTU || 4613 new_mtu > NXGE_MAXIMUM_MTU) { 4614 err = EINVAL; 4615 break; 4616 } 4617 4618 if ((new_mtu > NXGE_DEFAULT_MTU) && 4619 !nxgep->mac.is_jumbo) { 4620 err = EINVAL; 4621 break; 4622 } 4623 4624 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4625 nxgep->mac.maxframesize = (uint16_t) 4626 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4627 if (nxge_mac_set_framesize(nxgep)) { 4628 nxgep->mac.maxframesize = 4629 (uint16_t)old_framesize; 4630 err = EINVAL; 4631 break; 4632 } 4633 4634 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4635 if (err) { 4636 nxgep->mac.maxframesize = 4637 (uint16_t)old_framesize; 4638 err = EINVAL; 4639 break; 4640 } 4641 4642 nxgep->mac.default_mtu = new_mtu; 4643 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4644 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4645 new_mtu, nxgep->mac.maxframesize)); 4646 break; 4647 4648 case DLD_PROP_FLOWCTRL: 4649 bcopy(pr_val, &fl, sizeof (fl)); 4650 switch (fl) { 4651 default: 4652 err = EINVAL; 4653 break; 4654 4655 case LINK_FLOWCTRL_NONE: 4656 param_arr[param_anar_pause].value = 0; 4657 break; 4658 4659 case LINK_FLOWCTRL_RX: 4660 param_arr[param_anar_pause].value = 1; 4661 break; 4662 4663 case LINK_FLOWCTRL_TX: 4664 case LINK_FLOWCTRL_BI: 4665 err = EINVAL; 4666 break; 4667 } 4668 4669 reprogram: 4670 if (err == 0) { 4671 if (!nxge_param_link_update(nxgep)) { 4672 err = EINVAL; 4673 } 4674 } 4675 break; 4676 case DLD_PROP_PRIVATE: 4677 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4678 "==> nxge_m_setprop: private property")); 4679 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4680 pr_val); 4681 break; 4682 4683 default: 4684 err = ENOTSUP; 4685 break; 4686 } 4687 4688 mutex_exit(nxgep->genlock); 4689 4690 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4691 "<== nxge_m_setprop (return %d)", err)); 4692 return (err); 4693 } 4694 4695 static int 4696 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4697 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 4698 { 4699 nxge_t *nxgep = barg; 4700 p_nxge_param_t param_arr = nxgep->param_arr; 4701 p_nxge_stats_t statsp = nxgep->statsp; 4702 int err = 0; 4703 link_flowctrl_t fl; 4704 uint64_t tmp = 0; 4705 link_state_t ls; 4706 boolean_t is_default = (pr_flags & DLD_DEFAULT); 4707 4708 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4709 "==> nxge_m_getprop: pr_num %d", pr_num)); 4710 4711 if (pr_valsize == 0) 4712 return (EINVAL); 4713 4714 if ((is_default) && (pr_num != DLD_PROP_PRIVATE)) { 4715 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4716 return (err); 4717 } 4718 4719 bzero(pr_val, pr_valsize); 4720 switch (pr_num) { 4721 case DLD_PROP_DUPLEX: 4722 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4723 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4724 "==> nxge_m_getprop: duplex mode %d", 4725 *(uint8_t *)pr_val)); 4726 break; 4727 4728 case DLD_PROP_SPEED: 4729 if (pr_valsize < sizeof (uint64_t)) 4730 return (EINVAL); 4731 tmp = statsp->mac_stats.link_speed * 1000000ull; 4732 bcopy(&tmp, pr_val, sizeof (tmp)); 4733 break; 4734 4735 case DLD_PROP_STATUS: 4736 if (pr_valsize < sizeof (link_state_t)) 4737 return (EINVAL); 4738 if (!statsp->mac_stats.link_up) 4739 ls = LINK_STATE_DOWN; 4740 else 4741 ls = LINK_STATE_UP; 4742 bcopy(&ls, pr_val, sizeof (ls)); 4743 break; 4744 4745 case DLD_PROP_AUTONEG: 4746 *(uint8_t *)pr_val = 4747 param_arr[param_autoneg].value; 4748 break; 4749 4750 case DLD_PROP_FLOWCTRL: 4751 if (pr_valsize < sizeof (link_flowctrl_t)) 4752 return (EINVAL); 4753 4754 fl = LINK_FLOWCTRL_NONE; 4755 if (param_arr[param_anar_pause].value) { 4756 fl = LINK_FLOWCTRL_RX; 4757 } 4758 bcopy(&fl, pr_val, sizeof (fl)); 4759 break; 4760 4761 case DLD_PROP_ADV_1000FDX_CAP: 4762 *(uint8_t *)pr_val = 4763 param_arr[param_anar_1000fdx].value; 4764 break; 4765 4766 case DLD_PROP_EN_1000FDX_CAP: 4767 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4768 break; 4769 4770 case DLD_PROP_ADV_100FDX_CAP: 4771 *(uint8_t *)pr_val = 4772 param_arr[param_anar_100fdx].value; 4773 break; 4774 4775 case DLD_PROP_EN_100FDX_CAP: 4776 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4777 break; 4778 4779 case DLD_PROP_ADV_10FDX_CAP: 4780 *(uint8_t *)pr_val = 4781 param_arr[param_anar_10fdx].value; 4782 break; 4783 4784 case DLD_PROP_EN_10FDX_CAP: 4785 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4786 break; 4787 4788 case DLD_PROP_EN_1000HDX_CAP: 4789 case DLD_PROP_EN_100HDX_CAP: 4790 case DLD_PROP_EN_10HDX_CAP: 4791 case DLD_PROP_ADV_1000HDX_CAP: 4792 case DLD_PROP_ADV_100HDX_CAP: 4793 case DLD_PROP_ADV_10HDX_CAP: 4794 err = ENOTSUP; 4795 break; 4796 4797 case DLD_PROP_PRIVATE: 4798 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4799 pr_valsize, pr_val); 4800 break; 4801 default: 4802 err = EINVAL; 4803 break; 4804 } 4805 4806 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4807 4808 return (err); 4809 } 4810 4811 /* ARGSUSED */ 4812 static int 4813 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4814 const void *pr_val) 4815 { 4816 p_nxge_param_t param_arr = nxgep->param_arr; 4817 int err = 0; 4818 long result; 4819 4820 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4821 "==> nxge_set_priv_prop: name %s", pr_name)); 4822 4823 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4824 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4825 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4826 "<== nxge_set_priv_prop: name %s " 4827 "pr_val %s result %d " 4828 "param %d is_jumbo %d", 4829 pr_name, pr_val, result, 4830 param_arr[param_accept_jumbo].value, 4831 nxgep->mac.is_jumbo)); 4832 4833 if (result > 1 || result < 0) { 4834 err = EINVAL; 4835 } else { 4836 if (nxgep->mac.is_jumbo == 4837 (uint32_t)result) { 4838 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4839 "no change (%d %d)", 4840 nxgep->mac.is_jumbo, 4841 result)); 4842 return (0); 4843 } 4844 } 4845 4846 param_arr[param_accept_jumbo].value = result; 4847 nxgep->mac.is_jumbo = B_FALSE; 4848 if (result) { 4849 nxgep->mac.is_jumbo = B_TRUE; 4850 } 4851 4852 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4853 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4854 pr_name, result, nxgep->mac.is_jumbo)); 4855 4856 return (err); 4857 } 4858 4859 /* Blanking */ 4860 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4861 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4862 (char *)pr_val, 4863 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4864 if (err) { 4865 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4866 "<== nxge_set_priv_prop: " 4867 "unable to set (%s)", pr_name)); 4868 err = EINVAL; 4869 } else { 4870 err = 0; 4871 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4872 "<== nxge_set_priv_prop: " 4873 "set (%s)", pr_name)); 4874 } 4875 4876 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4877 "<== nxge_set_priv_prop: name %s (value %d)", 4878 pr_name, result)); 4879 4880 return (err); 4881 } 4882 4883 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4884 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4885 (char *)pr_val, 4886 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4887 if (err) { 4888 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4889 "<== nxge_set_priv_prop: " 4890 "unable to set (%s)", pr_name)); 4891 err = EINVAL; 4892 } else { 4893 err = 0; 4894 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4895 "<== nxge_set_priv_prop: " 4896 "set (%s)", pr_name)); 4897 } 4898 4899 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4900 "<== nxge_set_priv_prop: name %s (value %d)", 4901 pr_name, result)); 4902 4903 return (err); 4904 } 4905 4906 /* Classification */ 4907 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4908 if (pr_val == NULL) { 4909 err = EINVAL; 4910 return (err); 4911 } 4912 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4913 4914 err = nxge_param_set_ip_opt(nxgep, NULL, 4915 NULL, (char *)pr_val, 4916 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4917 4918 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4919 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4920 pr_name, result)); 4921 4922 return (err); 4923 } 4924 4925 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4926 if (pr_val == NULL) { 4927 err = EINVAL; 4928 return (err); 4929 } 4930 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4931 4932 err = nxge_param_set_ip_opt(nxgep, NULL, 4933 NULL, (char *)pr_val, 4934 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4935 4936 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4937 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4938 pr_name, result)); 4939 4940 return (err); 4941 } 4942 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4943 if (pr_val == NULL) { 4944 err = EINVAL; 4945 return (err); 4946 } 4947 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4948 4949 err = nxge_param_set_ip_opt(nxgep, NULL, 4950 NULL, (char *)pr_val, 4951 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4952 4953 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4954 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4955 pr_name, result)); 4956 4957 return (err); 4958 } 4959 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4960 if (pr_val == NULL) { 4961 err = EINVAL; 4962 return (err); 4963 } 4964 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4965 4966 err = nxge_param_set_ip_opt(nxgep, NULL, 4967 NULL, (char *)pr_val, 4968 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4969 4970 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4971 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4972 pr_name, result)); 4973 4974 return (err); 4975 } 4976 4977 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4978 if (pr_val == NULL) { 4979 err = EINVAL; 4980 return (err); 4981 } 4982 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4983 4984 err = nxge_param_set_ip_opt(nxgep, NULL, 4985 NULL, (char *)pr_val, 4986 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4987 4988 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4989 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4990 pr_name, result)); 4991 4992 return (err); 4993 } 4994 4995 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4996 if (pr_val == NULL) { 4997 err = EINVAL; 4998 return (err); 4999 } 5000 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5001 5002 err = nxge_param_set_ip_opt(nxgep, NULL, 5003 NULL, (char *)pr_val, 5004 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5005 5006 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5007 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5008 pr_name, result)); 5009 5010 return (err); 5011 } 5012 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5013 if (pr_val == NULL) { 5014 err = EINVAL; 5015 return (err); 5016 } 5017 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5018 5019 err = nxge_param_set_ip_opt(nxgep, NULL, 5020 NULL, (char *)pr_val, 5021 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5022 5023 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5024 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5025 pr_name, result)); 5026 5027 return (err); 5028 } 5029 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5030 if (pr_val == NULL) { 5031 err = EINVAL; 5032 return (err); 5033 } 5034 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5035 5036 err = nxge_param_set_ip_opt(nxgep, NULL, 5037 NULL, (char *)pr_val, 5038 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5039 5040 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5041 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5042 pr_name, result)); 5043 5044 return (err); 5045 } 5046 5047 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5048 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 5049 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5050 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 5051 err = EBUSY; 5052 return (err); 5053 } 5054 if (pr_val == NULL) { 5055 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5056 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5057 err = EINVAL; 5058 return (err); 5059 } 5060 5061 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5062 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5063 "<== nxge_set_priv_prop: name %s " 5064 "(lso %d pr_val %s value %d)", 5065 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5066 5067 if (result > 1 || result < 0) { 5068 err = EINVAL; 5069 } else { 5070 if (nxgep->soft_lso_enable == (uint32_t)result) { 5071 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5072 "no change (%d %d)", 5073 nxgep->soft_lso_enable, result)); 5074 return (0); 5075 } 5076 } 5077 5078 nxgep->soft_lso_enable = (int)result; 5079 5080 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5081 "<== nxge_set_priv_prop: name %s (value %d)", 5082 pr_name, result)); 5083 5084 return (err); 5085 } 5086 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5087 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5088 (caddr_t)¶m_arr[param_anar_10gfdx]); 5089 return (err); 5090 } 5091 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5092 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5093 (caddr_t)¶m_arr[param_anar_pause]); 5094 return (err); 5095 } 5096 5097 return (EINVAL); 5098 } 5099 5100 static int 5101 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5102 uint_t pr_valsize, void *pr_val) 5103 { 5104 p_nxge_param_t param_arr = nxgep->param_arr; 5105 char valstr[MAXNAMELEN]; 5106 int err = EINVAL; 5107 uint_t strsize; 5108 boolean_t is_default = (pr_flags & DLD_DEFAULT); 5109 5110 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5111 "==> nxge_get_priv_prop: property %s", pr_name)); 5112 5113 /* function number */ 5114 if (strcmp(pr_name, "_function_number") == 0) { 5115 if (is_default) 5116 return (ENOTSUP); 5117 (void) snprintf(valstr, sizeof (valstr), "%d", 5118 nxgep->function_num); 5119 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5120 "==> nxge_get_priv_prop: name %s " 5121 "(value %d valstr %s)", 5122 pr_name, nxgep->function_num, valstr)); 5123 5124 err = 0; 5125 goto done; 5126 } 5127 5128 /* Neptune firmware version */ 5129 if (strcmp(pr_name, "_fw_version") == 0) { 5130 if (is_default) 5131 return (ENOTSUP); 5132 (void) snprintf(valstr, sizeof (valstr), "%s", 5133 nxgep->vpd_info.ver); 5134 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5135 "==> nxge_get_priv_prop: name %s " 5136 "(value %d valstr %s)", 5137 pr_name, nxgep->vpd_info.ver, valstr)); 5138 5139 err = 0; 5140 goto done; 5141 } 5142 5143 /* port PHY mode */ 5144 if (strcmp(pr_name, "_port_mode") == 0) { 5145 if (is_default) 5146 return (ENOTSUP); 5147 switch (nxgep->mac.portmode) { 5148 case PORT_1G_COPPER: 5149 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5150 nxgep->hot_swappable_phy ? 5151 "[Hot Swappable]" : ""); 5152 break; 5153 case PORT_1G_FIBER: 5154 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5155 nxgep->hot_swappable_phy ? 5156 "[hot swappable]" : ""); 5157 break; 5158 case PORT_10G_COPPER: 5159 (void) snprintf(valstr, sizeof (valstr), 5160 "10G copper %s", 5161 nxgep->hot_swappable_phy ? 5162 "[hot swappable]" : ""); 5163 break; 5164 case PORT_10G_FIBER: 5165 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5166 nxgep->hot_swappable_phy ? 5167 "[hot swappable]" : ""); 5168 break; 5169 case PORT_10G_SERDES: 5170 (void) snprintf(valstr, sizeof (valstr), 5171 "10G serdes %s", nxgep->hot_swappable_phy ? 5172 "[hot swappable]" : ""); 5173 break; 5174 case PORT_1G_SERDES: 5175 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5176 nxgep->hot_swappable_phy ? 5177 "[hot swappable]" : ""); 5178 break; 5179 case PORT_1G_RGMII_FIBER: 5180 (void) snprintf(valstr, sizeof (valstr), 5181 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5182 "[hot swappable]" : ""); 5183 break; 5184 case PORT_HSP_MODE: 5185 (void) snprintf(valstr, sizeof (valstr), 5186 "phy not present[hot swappable]"); 5187 break; 5188 default: 5189 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5190 nxgep->hot_swappable_phy ? 5191 "[hot swappable]" : ""); 5192 break; 5193 } 5194 5195 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5196 "==> nxge_get_priv_prop: name %s (value %s)", 5197 pr_name, valstr)); 5198 5199 err = 0; 5200 goto done; 5201 } 5202 5203 /* Hot swappable PHY */ 5204 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5205 if (is_default) 5206 return (ENOTSUP); 5207 (void) snprintf(valstr, sizeof (valstr), "%s", 5208 nxgep->hot_swappable_phy ? 5209 "yes" : "no"); 5210 5211 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5212 "==> nxge_get_priv_prop: name %s " 5213 "(value %d valstr %s)", 5214 pr_name, nxgep->hot_swappable_phy, valstr)); 5215 5216 err = 0; 5217 goto done; 5218 } 5219 5220 5221 /* accept jumbo */ 5222 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5223 if (is_default) 5224 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5225 else 5226 (void) snprintf(valstr, sizeof (valstr), 5227 "%d", nxgep->mac.is_jumbo); 5228 err = 0; 5229 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5230 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5231 pr_name, 5232 (uint32_t)param_arr[param_accept_jumbo].value, 5233 nxgep->mac.is_jumbo, 5234 nxge_jumbo_enable)); 5235 5236 goto done; 5237 } 5238 5239 /* Receive Interrupt Blanking Parameters */ 5240 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5241 err = 0; 5242 if (is_default) { 5243 (void) snprintf(valstr, sizeof (valstr), 5244 "%d", RXDMA_RCR_TO_DEFAULT); 5245 goto done; 5246 } 5247 5248 (void) snprintf(valstr, sizeof (valstr), "%d", 5249 nxgep->intr_timeout); 5250 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5251 "==> nxge_get_priv_prop: name %s (value %d)", 5252 pr_name, 5253 (uint32_t)nxgep->intr_timeout)); 5254 goto done; 5255 } 5256 5257 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5258 err = 0; 5259 if (is_default) { 5260 (void) snprintf(valstr, sizeof (valstr), 5261 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5262 goto done; 5263 } 5264 (void) snprintf(valstr, sizeof (valstr), "%d", 5265 nxgep->intr_threshold); 5266 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5267 "==> nxge_get_priv_prop: name %s (value %d)", 5268 pr_name, (uint32_t)nxgep->intr_threshold)); 5269 5270 goto done; 5271 } 5272 5273 /* Classification and Load Distribution Configuration */ 5274 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5275 if (is_default) { 5276 (void) snprintf(valstr, sizeof (valstr), "%x", 5277 NXGE_CLASS_FLOW_GEN_SERVER); 5278 err = 0; 5279 goto done; 5280 } 5281 err = nxge_dld_get_ip_opt(nxgep, 5282 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5283 5284 (void) snprintf(valstr, sizeof (valstr), "%x", 5285 (int)param_arr[param_class_opt_ipv4_tcp].value); 5286 5287 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5288 "==> nxge_get_priv_prop: %s", valstr)); 5289 goto done; 5290 } 5291 5292 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5293 if (is_default) { 5294 (void) snprintf(valstr, sizeof (valstr), "%x", 5295 NXGE_CLASS_FLOW_GEN_SERVER); 5296 err = 0; 5297 goto done; 5298 } 5299 err = nxge_dld_get_ip_opt(nxgep, 5300 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5301 5302 (void) snprintf(valstr, sizeof (valstr), "%x", 5303 (int)param_arr[param_class_opt_ipv4_udp].value); 5304 5305 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5306 "==> nxge_get_priv_prop: %s", valstr)); 5307 goto done; 5308 } 5309 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5310 if (is_default) { 5311 (void) snprintf(valstr, sizeof (valstr), "%x", 5312 NXGE_CLASS_FLOW_GEN_SERVER); 5313 err = 0; 5314 goto done; 5315 } 5316 err = nxge_dld_get_ip_opt(nxgep, 5317 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5318 5319 (void) snprintf(valstr, sizeof (valstr), "%x", 5320 (int)param_arr[param_class_opt_ipv4_ah].value); 5321 5322 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5323 "==> nxge_get_priv_prop: %s", valstr)); 5324 goto done; 5325 } 5326 5327 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5328 if (is_default) { 5329 (void) snprintf(valstr, sizeof (valstr), "%x", 5330 NXGE_CLASS_FLOW_GEN_SERVER); 5331 err = 0; 5332 goto done; 5333 } 5334 err = nxge_dld_get_ip_opt(nxgep, 5335 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5336 5337 (void) snprintf(valstr, sizeof (valstr), "%x", 5338 (int)param_arr[param_class_opt_ipv4_sctp].value); 5339 5340 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5341 "==> nxge_get_priv_prop: %s", valstr)); 5342 goto done; 5343 } 5344 5345 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5346 if (is_default) { 5347 (void) snprintf(valstr, sizeof (valstr), "%x", 5348 NXGE_CLASS_FLOW_GEN_SERVER); 5349 err = 0; 5350 goto done; 5351 } 5352 err = nxge_dld_get_ip_opt(nxgep, 5353 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5354 5355 (void) snprintf(valstr, sizeof (valstr), "%x", 5356 (int)param_arr[param_class_opt_ipv6_tcp].value); 5357 5358 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5359 "==> nxge_get_priv_prop: %s", valstr)); 5360 goto done; 5361 } 5362 5363 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5364 if (is_default) { 5365 (void) snprintf(valstr, sizeof (valstr), "%x", 5366 NXGE_CLASS_FLOW_GEN_SERVER); 5367 err = 0; 5368 goto done; 5369 } 5370 err = nxge_dld_get_ip_opt(nxgep, 5371 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5372 5373 (void) snprintf(valstr, sizeof (valstr), "%x", 5374 (int)param_arr[param_class_opt_ipv6_udp].value); 5375 5376 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5377 "==> nxge_get_priv_prop: %s", valstr)); 5378 goto done; 5379 } 5380 5381 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5382 if (is_default) { 5383 (void) snprintf(valstr, sizeof (valstr), "%x", 5384 NXGE_CLASS_FLOW_GEN_SERVER); 5385 err = 0; 5386 goto done; 5387 } 5388 err = nxge_dld_get_ip_opt(nxgep, 5389 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5390 5391 (void) snprintf(valstr, sizeof (valstr), "%x", 5392 (int)param_arr[param_class_opt_ipv6_ah].value); 5393 5394 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5395 "==> nxge_get_priv_prop: %s", valstr)); 5396 goto done; 5397 } 5398 5399 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5400 if (is_default) { 5401 (void) snprintf(valstr, sizeof (valstr), "%x", 5402 NXGE_CLASS_FLOW_GEN_SERVER); 5403 err = 0; 5404 goto done; 5405 } 5406 err = nxge_dld_get_ip_opt(nxgep, 5407 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5408 5409 (void) snprintf(valstr, sizeof (valstr), "%x", 5410 (int)param_arr[param_class_opt_ipv6_sctp].value); 5411 5412 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5413 "==> nxge_get_priv_prop: %s", valstr)); 5414 goto done; 5415 } 5416 5417 /* Software LSO */ 5418 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5419 if (is_default) { 5420 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5421 err = 0; 5422 goto done; 5423 } 5424 (void) snprintf(valstr, sizeof (valstr), 5425 "%d", nxgep->soft_lso_enable); 5426 err = 0; 5427 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5428 "==> nxge_get_priv_prop: name %s (value %d)", 5429 pr_name, nxgep->soft_lso_enable)); 5430 5431 goto done; 5432 } 5433 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5434 err = 0; 5435 if (is_default || 5436 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5437 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5438 goto done; 5439 } else { 5440 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5441 goto done; 5442 } 5443 } 5444 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5445 err = 0; 5446 if (is_default || 5447 nxgep->param_arr[param_anar_pause].value != 0) { 5448 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5449 goto done; 5450 } else { 5451 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5452 goto done; 5453 } 5454 } 5455 5456 done: 5457 if (err == 0) { 5458 strsize = (uint_t)strlen(valstr); 5459 if (pr_valsize < strsize) { 5460 err = ENOBUFS; 5461 } else { 5462 (void) strlcpy(pr_val, valstr, pr_valsize); 5463 } 5464 } 5465 5466 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5467 "<== nxge_get_priv_prop: return %d", err)); 5468 return (err); 5469 } 5470 5471 /* 5472 * Module loading and removing entry points. 5473 */ 5474 5475 static struct cb_ops nxge_cb_ops = { 5476 nodev, /* cb_open */ 5477 nodev, /* cb_close */ 5478 nodev, /* cb_strategy */ 5479 nodev, /* cb_print */ 5480 nodev, /* cb_dump */ 5481 nodev, /* cb_read */ 5482 nodev, /* cb_write */ 5483 nodev, /* cb_ioctl */ 5484 nodev, /* cb_devmap */ 5485 nodev, /* cb_mmap */ 5486 nodev, /* cb_segmap */ 5487 nochpoll, /* cb_chpoll */ 5488 ddi_prop_op, /* cb_prop_op */ 5489 NULL, 5490 D_MP, /* cb_flag */ 5491 CB_REV, /* rev */ 5492 nodev, /* int (*cb_aread)() */ 5493 nodev /* int (*cb_awrite)() */ 5494 }; 5495 5496 static struct dev_ops nxge_dev_ops = { 5497 DEVO_REV, /* devo_rev */ 5498 0, /* devo_refcnt */ 5499 nulldev, 5500 nulldev, /* devo_identify */ 5501 nulldev, /* devo_probe */ 5502 nxge_attach, /* devo_attach */ 5503 nxge_detach, /* devo_detach */ 5504 nodev, /* devo_reset */ 5505 &nxge_cb_ops, /* devo_cb_ops */ 5506 (struct bus_ops *)NULL, /* devo_bus_ops */ 5507 ddi_power /* devo_power */ 5508 }; 5509 5510 extern struct mod_ops mod_driverops; 5511 5512 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5513 5514 /* 5515 * Module linkage information for the kernel. 5516 */ 5517 static struct modldrv nxge_modldrv = { 5518 &mod_driverops, 5519 NXGE_DESC_VER, 5520 &nxge_dev_ops 5521 }; 5522 5523 static struct modlinkage modlinkage = { 5524 MODREV_1, (void *) &nxge_modldrv, NULL 5525 }; 5526 5527 int 5528 _init(void) 5529 { 5530 int status; 5531 5532 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5533 mac_init_ops(&nxge_dev_ops, "nxge"); 5534 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5535 if (status != 0) { 5536 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5537 "failed to init device soft state")); 5538 goto _init_exit; 5539 } 5540 status = mod_install(&modlinkage); 5541 if (status != 0) { 5542 ddi_soft_state_fini(&nxge_list); 5543 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5544 goto _init_exit; 5545 } 5546 5547 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5548 5549 _init_exit: 5550 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5551 5552 return (status); 5553 } 5554 5555 int 5556 _fini(void) 5557 { 5558 int status; 5559 5560 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5561 5562 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5563 5564 if (nxge_mblks_pending) 5565 return (EBUSY); 5566 5567 status = mod_remove(&modlinkage); 5568 if (status != DDI_SUCCESS) { 5569 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5570 "Module removal failed 0x%08x", 5571 status)); 5572 goto _fini_exit; 5573 } 5574 5575 mac_fini_ops(&nxge_dev_ops); 5576 5577 ddi_soft_state_fini(&nxge_list); 5578 5579 MUTEX_DESTROY(&nxge_common_lock); 5580 _fini_exit: 5581 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5582 5583 return (status); 5584 } 5585 5586 int 5587 _info(struct modinfo *modinfop) 5588 { 5589 int status; 5590 5591 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5592 status = mod_info(&modlinkage, modinfop); 5593 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5594 5595 return (status); 5596 } 5597 5598 /*ARGSUSED*/ 5599 static nxge_status_t 5600 nxge_add_intrs(p_nxge_t nxgep) 5601 { 5602 5603 int intr_types; 5604 int type = 0; 5605 int ddi_status = DDI_SUCCESS; 5606 nxge_status_t status = NXGE_OK; 5607 5608 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5609 5610 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5611 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5612 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5613 nxgep->nxge_intr_type.intr_added = 0; 5614 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5615 nxgep->nxge_intr_type.intr_type = 0; 5616 5617 if (nxgep->niu_type == N2_NIU) { 5618 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5619 } else if (nxge_msi_enable) { 5620 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5621 } 5622 5623 /* Get the supported interrupt types */ 5624 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5625 != DDI_SUCCESS) { 5626 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5627 "ddi_intr_get_supported_types failed: status 0x%08x", 5628 ddi_status)); 5629 return (NXGE_ERROR | NXGE_DDI_FAILED); 5630 } 5631 nxgep->nxge_intr_type.intr_types = intr_types; 5632 5633 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5634 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5635 5636 /* 5637 * Solaris MSIX is not supported yet. use MSI for now. 5638 * nxge_msi_enable (1): 5639 * 1 - MSI 2 - MSI-X others - FIXED 5640 */ 5641 switch (nxge_msi_enable) { 5642 default: 5643 type = DDI_INTR_TYPE_FIXED; 5644 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5645 "use fixed (intx emulation) type %08x", 5646 type)); 5647 break; 5648 5649 case 2: 5650 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5651 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5652 if (intr_types & DDI_INTR_TYPE_MSIX) { 5653 type = DDI_INTR_TYPE_MSIX; 5654 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5655 "ddi_intr_get_supported_types: MSIX 0x%08x", 5656 type)); 5657 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5658 type = DDI_INTR_TYPE_MSI; 5659 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5660 "ddi_intr_get_supported_types: MSI 0x%08x", 5661 type)); 5662 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5663 type = DDI_INTR_TYPE_FIXED; 5664 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5665 "ddi_intr_get_supported_types: MSXED0x%08x", 5666 type)); 5667 } 5668 break; 5669 5670 case 1: 5671 if (intr_types & DDI_INTR_TYPE_MSI) { 5672 type = DDI_INTR_TYPE_MSI; 5673 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5674 "ddi_intr_get_supported_types: MSI 0x%08x", 5675 type)); 5676 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5677 type = DDI_INTR_TYPE_MSIX; 5678 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5679 "ddi_intr_get_supported_types: MSIX 0x%08x", 5680 type)); 5681 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5682 type = DDI_INTR_TYPE_FIXED; 5683 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5684 "ddi_intr_get_supported_types: MSXED0x%08x", 5685 type)); 5686 } 5687 } 5688 5689 nxgep->nxge_intr_type.intr_type = type; 5690 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5691 type == DDI_INTR_TYPE_FIXED) && 5692 nxgep->nxge_intr_type.niu_msi_enable) { 5693 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5694 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5695 " nxge_add_intrs: " 5696 " nxge_add_intrs_adv failed: status 0x%08x", 5697 status)); 5698 return (status); 5699 } else { 5700 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5701 "interrupts registered : type %d", type)); 5702 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5703 5704 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5705 "\nAdded advanced nxge add_intr_adv " 5706 "intr type 0x%x\n", type)); 5707 5708 return (status); 5709 } 5710 } 5711 5712 if (!nxgep->nxge_intr_type.intr_registered) { 5713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5714 "failed to register interrupts")); 5715 return (NXGE_ERROR | NXGE_DDI_FAILED); 5716 } 5717 5718 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5719 return (status); 5720 } 5721 5722 /*ARGSUSED*/ 5723 static nxge_status_t 5724 nxge_add_soft_intrs(p_nxge_t nxgep) 5725 { 5726 5727 int ddi_status = DDI_SUCCESS; 5728 nxge_status_t status = NXGE_OK; 5729 5730 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 5731 5732 nxgep->resched_id = NULL; 5733 nxgep->resched_running = B_FALSE; 5734 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5735 &nxgep->resched_id, 5736 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 5737 if (ddi_status != DDI_SUCCESS) { 5738 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5739 "ddi_add_softintrs failed: status 0x%08x", 5740 ddi_status)); 5741 return (NXGE_ERROR | NXGE_DDI_FAILED); 5742 } 5743 5744 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 5745 5746 return (status); 5747 } 5748 5749 static nxge_status_t 5750 nxge_add_intrs_adv(p_nxge_t nxgep) 5751 { 5752 int intr_type; 5753 p_nxge_intr_t intrp; 5754 5755 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5756 5757 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5758 intr_type = intrp->intr_type; 5759 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5760 intr_type)); 5761 5762 switch (intr_type) { 5763 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5764 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5765 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5766 5767 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5768 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5769 5770 default: 5771 return (NXGE_ERROR); 5772 } 5773 } 5774 5775 5776 /*ARGSUSED*/ 5777 static nxge_status_t 5778 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5779 { 5780 dev_info_t *dip = nxgep->dip; 5781 p_nxge_ldg_t ldgp; 5782 p_nxge_intr_t intrp; 5783 uint_t *inthandler; 5784 void *arg1, *arg2; 5785 int behavior; 5786 int nintrs, navail, nrequest; 5787 int nactual, nrequired; 5788 int inum = 0; 5789 int x, y; 5790 int ddi_status = DDI_SUCCESS; 5791 nxge_status_t status = NXGE_OK; 5792 5793 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5794 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5795 intrp->start_inum = 0; 5796 5797 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5798 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5799 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5800 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5801 "nintrs: %d", ddi_status, nintrs)); 5802 return (NXGE_ERROR | NXGE_DDI_FAILED); 5803 } 5804 5805 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5806 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5808 "ddi_intr_get_navail() failed, status: 0x%x%, " 5809 "nintrs: %d", ddi_status, navail)); 5810 return (NXGE_ERROR | NXGE_DDI_FAILED); 5811 } 5812 5813 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5814 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5815 nintrs, navail)); 5816 5817 /* PSARC/2007/453 MSI-X interrupt limit override */ 5818 if (int_type == DDI_INTR_TYPE_MSIX) { 5819 nrequest = nxge_create_msi_property(nxgep); 5820 if (nrequest < navail) { 5821 navail = nrequest; 5822 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5823 "nxge_add_intrs_adv_type: nintrs %d " 5824 "navail %d (nrequest %d)", 5825 nintrs, navail, nrequest)); 5826 } 5827 } 5828 5829 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5830 /* MSI must be power of 2 */ 5831 if ((navail & 16) == 16) { 5832 navail = 16; 5833 } else if ((navail & 8) == 8) { 5834 navail = 8; 5835 } else if ((navail & 4) == 4) { 5836 navail = 4; 5837 } else if ((navail & 2) == 2) { 5838 navail = 2; 5839 } else { 5840 navail = 1; 5841 } 5842 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5843 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5844 "navail %d", nintrs, navail)); 5845 } 5846 5847 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5848 DDI_INTR_ALLOC_NORMAL); 5849 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5850 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5851 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5852 navail, &nactual, behavior); 5853 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5854 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5855 " ddi_intr_alloc() failed: %d", 5856 ddi_status)); 5857 kmem_free(intrp->htable, intrp->intr_size); 5858 return (NXGE_ERROR | NXGE_DDI_FAILED); 5859 } 5860 5861 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5862 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5863 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5864 " ddi_intr_get_pri() failed: %d", 5865 ddi_status)); 5866 /* Free already allocated interrupts */ 5867 for (y = 0; y < nactual; y++) { 5868 (void) ddi_intr_free(intrp->htable[y]); 5869 } 5870 5871 kmem_free(intrp->htable, intrp->intr_size); 5872 return (NXGE_ERROR | NXGE_DDI_FAILED); 5873 } 5874 5875 nrequired = 0; 5876 switch (nxgep->niu_type) { 5877 default: 5878 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5879 break; 5880 5881 case N2_NIU: 5882 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5883 break; 5884 } 5885 5886 if (status != NXGE_OK) { 5887 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5888 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5889 "failed: 0x%x", status)); 5890 /* Free already allocated interrupts */ 5891 for (y = 0; y < nactual; y++) { 5892 (void) ddi_intr_free(intrp->htable[y]); 5893 } 5894 5895 kmem_free(intrp->htable, intrp->intr_size); 5896 return (status); 5897 } 5898 5899 ldgp = nxgep->ldgvp->ldgp; 5900 for (x = 0; x < nrequired; x++, ldgp++) { 5901 ldgp->vector = (uint8_t)x; 5902 ldgp->intdata = SID_DATA(ldgp->func, x); 5903 arg1 = ldgp->ldvp; 5904 arg2 = nxgep; 5905 if (ldgp->nldvs == 1) { 5906 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5907 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5908 "nxge_add_intrs_adv_type: " 5909 "arg1 0x%x arg2 0x%x: " 5910 "1-1 int handler (entry %d intdata 0x%x)\n", 5911 arg1, arg2, 5912 x, ldgp->intdata)); 5913 } else if (ldgp->nldvs > 1) { 5914 inthandler = (uint_t *)ldgp->sys_intr_handler; 5915 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5916 "nxge_add_intrs_adv_type: " 5917 "arg1 0x%x arg2 0x%x: " 5918 "nldevs %d int handler " 5919 "(entry %d intdata 0x%x)\n", 5920 arg1, arg2, 5921 ldgp->nldvs, x, ldgp->intdata)); 5922 } 5923 5924 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5925 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5926 "htable 0x%llx", x, intrp->htable[x])); 5927 5928 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5929 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5930 != DDI_SUCCESS) { 5931 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5932 "==> nxge_add_intrs_adv_type: failed #%d " 5933 "status 0x%x", x, ddi_status)); 5934 for (y = 0; y < intrp->intr_added; y++) { 5935 (void) ddi_intr_remove_handler( 5936 intrp->htable[y]); 5937 } 5938 /* Free already allocated intr */ 5939 for (y = 0; y < nactual; y++) { 5940 (void) ddi_intr_free(intrp->htable[y]); 5941 } 5942 kmem_free(intrp->htable, intrp->intr_size); 5943 5944 (void) nxge_ldgv_uninit(nxgep); 5945 5946 return (NXGE_ERROR | NXGE_DDI_FAILED); 5947 } 5948 intrp->intr_added++; 5949 } 5950 5951 intrp->msi_intx_cnt = nactual; 5952 5953 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5954 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 5955 navail, nactual, 5956 intrp->msi_intx_cnt, 5957 intrp->intr_added)); 5958 5959 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 5960 5961 (void) nxge_intr_ldgv_init(nxgep); 5962 5963 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 5964 5965 return (status); 5966 } 5967 5968 /*ARGSUSED*/ 5969 static nxge_status_t 5970 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 5971 { 5972 dev_info_t *dip = nxgep->dip; 5973 p_nxge_ldg_t ldgp; 5974 p_nxge_intr_t intrp; 5975 uint_t *inthandler; 5976 void *arg1, *arg2; 5977 int behavior; 5978 int nintrs, navail; 5979 int nactual, nrequired; 5980 int inum = 0; 5981 int x, y; 5982 int ddi_status = DDI_SUCCESS; 5983 nxge_status_t status = NXGE_OK; 5984 5985 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 5986 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5987 intrp->start_inum = 0; 5988 5989 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5990 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5991 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5992 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5993 "nintrs: %d", status, nintrs)); 5994 return (NXGE_ERROR | NXGE_DDI_FAILED); 5995 } 5996 5997 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5998 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5999 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6000 "ddi_intr_get_navail() failed, status: 0x%x%, " 6001 "nintrs: %d", ddi_status, navail)); 6002 return (NXGE_ERROR | NXGE_DDI_FAILED); 6003 } 6004 6005 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6006 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 6007 nintrs, navail)); 6008 6009 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 6010 DDI_INTR_ALLOC_NORMAL); 6011 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 6012 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 6013 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 6014 navail, &nactual, behavior); 6015 if (ddi_status != DDI_SUCCESS || nactual == 0) { 6016 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6017 " ddi_intr_alloc() failed: %d", 6018 ddi_status)); 6019 kmem_free(intrp->htable, intrp->intr_size); 6020 return (NXGE_ERROR | NXGE_DDI_FAILED); 6021 } 6022 6023 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 6024 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 6025 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6026 " ddi_intr_get_pri() failed: %d", 6027 ddi_status)); 6028 /* Free already allocated interrupts */ 6029 for (y = 0; y < nactual; y++) { 6030 (void) ddi_intr_free(intrp->htable[y]); 6031 } 6032 6033 kmem_free(intrp->htable, intrp->intr_size); 6034 return (NXGE_ERROR | NXGE_DDI_FAILED); 6035 } 6036 6037 nrequired = 0; 6038 switch (nxgep->niu_type) { 6039 default: 6040 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6041 break; 6042 6043 case N2_NIU: 6044 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6045 break; 6046 } 6047 6048 if (status != NXGE_OK) { 6049 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6050 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6051 "failed: 0x%x", status)); 6052 /* Free already allocated interrupts */ 6053 for (y = 0; y < nactual; y++) { 6054 (void) ddi_intr_free(intrp->htable[y]); 6055 } 6056 6057 kmem_free(intrp->htable, intrp->intr_size); 6058 return (status); 6059 } 6060 6061 ldgp = nxgep->ldgvp->ldgp; 6062 for (x = 0; x < nrequired; x++, ldgp++) { 6063 ldgp->vector = (uint8_t)x; 6064 if (nxgep->niu_type != N2_NIU) { 6065 ldgp->intdata = SID_DATA(ldgp->func, x); 6066 } 6067 6068 arg1 = ldgp->ldvp; 6069 arg2 = nxgep; 6070 if (ldgp->nldvs == 1) { 6071 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6072 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6073 "nxge_add_intrs_adv_type_fix: " 6074 "1-1 int handler(%d) ldg %d ldv %d " 6075 "arg1 $%p arg2 $%p\n", 6076 x, ldgp->ldg, ldgp->ldvp->ldv, 6077 arg1, arg2)); 6078 } else if (ldgp->nldvs > 1) { 6079 inthandler = (uint_t *)ldgp->sys_intr_handler; 6080 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6081 "nxge_add_intrs_adv_type_fix: " 6082 "shared ldv %d int handler(%d) ldv %d ldg %d" 6083 "arg1 0x%016llx arg2 0x%016llx\n", 6084 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6085 arg1, arg2)); 6086 } 6087 6088 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6089 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6090 != DDI_SUCCESS) { 6091 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6092 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6093 "status 0x%x", x, ddi_status)); 6094 for (y = 0; y < intrp->intr_added; y++) { 6095 (void) ddi_intr_remove_handler( 6096 intrp->htable[y]); 6097 } 6098 for (y = 0; y < nactual; y++) { 6099 (void) ddi_intr_free(intrp->htable[y]); 6100 } 6101 /* Free already allocated intr */ 6102 kmem_free(intrp->htable, intrp->intr_size); 6103 6104 (void) nxge_ldgv_uninit(nxgep); 6105 6106 return (NXGE_ERROR | NXGE_DDI_FAILED); 6107 } 6108 intrp->intr_added++; 6109 } 6110 6111 intrp->msi_intx_cnt = nactual; 6112 6113 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6114 6115 status = nxge_intr_ldgv_init(nxgep); 6116 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6117 6118 return (status); 6119 } 6120 6121 static void 6122 nxge_remove_intrs(p_nxge_t nxgep) 6123 { 6124 int i, inum; 6125 p_nxge_intr_t intrp; 6126 6127 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6128 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6129 if (!intrp->intr_registered) { 6130 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6131 "<== nxge_remove_intrs: interrupts not registered")); 6132 return; 6133 } 6134 6135 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6136 6137 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6138 (void) ddi_intr_block_disable(intrp->htable, 6139 intrp->intr_added); 6140 } else { 6141 for (i = 0; i < intrp->intr_added; i++) { 6142 (void) ddi_intr_disable(intrp->htable[i]); 6143 } 6144 } 6145 6146 for (inum = 0; inum < intrp->intr_added; inum++) { 6147 if (intrp->htable[inum]) { 6148 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6149 } 6150 } 6151 6152 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6153 if (intrp->htable[inum]) { 6154 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6155 "nxge_remove_intrs: ddi_intr_free inum %d " 6156 "msi_intx_cnt %d intr_added %d", 6157 inum, 6158 intrp->msi_intx_cnt, 6159 intrp->intr_added)); 6160 6161 (void) ddi_intr_free(intrp->htable[inum]); 6162 } 6163 } 6164 6165 kmem_free(intrp->htable, intrp->intr_size); 6166 intrp->intr_registered = B_FALSE; 6167 intrp->intr_enabled = B_FALSE; 6168 intrp->msi_intx_cnt = 0; 6169 intrp->intr_added = 0; 6170 6171 (void) nxge_ldgv_uninit(nxgep); 6172 6173 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6174 "#msix-request"); 6175 6176 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6177 } 6178 6179 /*ARGSUSED*/ 6180 static void 6181 nxge_remove_soft_intrs(p_nxge_t nxgep) 6182 { 6183 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 6184 if (nxgep->resched_id) { 6185 ddi_remove_softintr(nxgep->resched_id); 6186 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6187 "==> nxge_remove_soft_intrs: removed")); 6188 nxgep->resched_id = NULL; 6189 } 6190 6191 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 6192 } 6193 6194 /*ARGSUSED*/ 6195 static void 6196 nxge_intrs_enable(p_nxge_t nxgep) 6197 { 6198 p_nxge_intr_t intrp; 6199 int i; 6200 int status; 6201 6202 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6203 6204 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6205 6206 if (!intrp->intr_registered) { 6207 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6208 "interrupts are not registered")); 6209 return; 6210 } 6211 6212 if (intrp->intr_enabled) { 6213 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6214 "<== nxge_intrs_enable: already enabled")); 6215 return; 6216 } 6217 6218 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6219 status = ddi_intr_block_enable(intrp->htable, 6220 intrp->intr_added); 6221 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6222 "block enable - status 0x%x total inums #%d\n", 6223 status, intrp->intr_added)); 6224 } else { 6225 for (i = 0; i < intrp->intr_added; i++) { 6226 status = ddi_intr_enable(intrp->htable[i]); 6227 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6228 "ddi_intr_enable:enable - status 0x%x " 6229 "total inums %d enable inum #%d\n", 6230 status, intrp->intr_added, i)); 6231 if (status == DDI_SUCCESS) { 6232 intrp->intr_enabled = B_TRUE; 6233 } 6234 } 6235 } 6236 6237 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6238 } 6239 6240 /*ARGSUSED*/ 6241 static void 6242 nxge_intrs_disable(p_nxge_t nxgep) 6243 { 6244 p_nxge_intr_t intrp; 6245 int i; 6246 6247 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6248 6249 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6250 6251 if (!intrp->intr_registered) { 6252 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6253 "interrupts are not registered")); 6254 return; 6255 } 6256 6257 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6258 (void) ddi_intr_block_disable(intrp->htable, 6259 intrp->intr_added); 6260 } else { 6261 for (i = 0; i < intrp->intr_added; i++) { 6262 (void) ddi_intr_disable(intrp->htable[i]); 6263 } 6264 } 6265 6266 intrp->intr_enabled = B_FALSE; 6267 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6268 } 6269 6270 static nxge_status_t 6271 nxge_mac_register(p_nxge_t nxgep) 6272 { 6273 mac_register_t *macp; 6274 int status; 6275 6276 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6277 6278 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6279 return (NXGE_ERROR); 6280 6281 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6282 macp->m_driver = nxgep; 6283 macp->m_dip = nxgep->dip; 6284 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6285 macp->m_callbacks = &nxge_m_callbacks; 6286 macp->m_min_sdu = 0; 6287 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6288 NXGE_EHEADER_VLAN_CRC; 6289 macp->m_max_sdu = nxgep->mac.default_mtu; 6290 macp->m_margin = VLAN_TAGSZ; 6291 macp->m_priv_props = nxge_priv_props; 6292 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6293 6294 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6295 "==> nxge_mac_register: instance %d " 6296 "max_sdu %d margin %d maxframe %d (header %d)", 6297 nxgep->instance, 6298 macp->m_max_sdu, macp->m_margin, 6299 nxgep->mac.maxframesize, 6300 NXGE_EHEADER_VLAN_CRC)); 6301 6302 status = mac_register(macp, &nxgep->mach); 6303 mac_free(macp); 6304 6305 if (status != 0) { 6306 cmn_err(CE_WARN, 6307 "!nxge_mac_register failed (status %d instance %d)", 6308 status, nxgep->instance); 6309 return (NXGE_ERROR); 6310 } 6311 6312 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6313 "(instance %d)", nxgep->instance)); 6314 6315 return (NXGE_OK); 6316 } 6317 6318 void 6319 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6320 { 6321 ssize_t size; 6322 mblk_t *nmp; 6323 uint8_t blk_id; 6324 uint8_t chan; 6325 uint32_t err_id; 6326 err_inject_t *eip; 6327 6328 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6329 6330 size = 1024; 6331 nmp = mp->b_cont; 6332 eip = (err_inject_t *)nmp->b_rptr; 6333 blk_id = eip->blk_id; 6334 err_id = eip->err_id; 6335 chan = eip->chan; 6336 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6337 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6338 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6339 switch (blk_id) { 6340 case MAC_BLK_ID: 6341 break; 6342 case TXMAC_BLK_ID: 6343 break; 6344 case RXMAC_BLK_ID: 6345 break; 6346 case MIF_BLK_ID: 6347 break; 6348 case IPP_BLK_ID: 6349 nxge_ipp_inject_err(nxgep, err_id); 6350 break; 6351 case TXC_BLK_ID: 6352 nxge_txc_inject_err(nxgep, err_id); 6353 break; 6354 case TXDMA_BLK_ID: 6355 nxge_txdma_inject_err(nxgep, err_id, chan); 6356 break; 6357 case RXDMA_BLK_ID: 6358 nxge_rxdma_inject_err(nxgep, err_id, chan); 6359 break; 6360 case ZCP_BLK_ID: 6361 nxge_zcp_inject_err(nxgep, err_id); 6362 break; 6363 case ESPC_BLK_ID: 6364 break; 6365 case FFLP_BLK_ID: 6366 break; 6367 case PHY_BLK_ID: 6368 break; 6369 case ETHER_SERDES_BLK_ID: 6370 break; 6371 case PCIE_SERDES_BLK_ID: 6372 break; 6373 case VIR_BLK_ID: 6374 break; 6375 } 6376 6377 nmp->b_wptr = nmp->b_rptr + size; 6378 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6379 6380 miocack(wq, mp, (int)size, 0); 6381 } 6382 6383 static int 6384 nxge_init_common_dev(p_nxge_t nxgep) 6385 { 6386 p_nxge_hw_list_t hw_p; 6387 dev_info_t *p_dip; 6388 6389 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6390 6391 p_dip = nxgep->p_dip; 6392 MUTEX_ENTER(&nxge_common_lock); 6393 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6394 "==> nxge_init_common_dev:func # %d", 6395 nxgep->function_num)); 6396 /* 6397 * Loop through existing per neptune hardware list. 6398 */ 6399 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6400 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6401 "==> nxge_init_common_device:func # %d " 6402 "hw_p $%p parent dip $%p", 6403 nxgep->function_num, 6404 hw_p, 6405 p_dip)); 6406 if (hw_p->parent_devp == p_dip) { 6407 nxgep->nxge_hw_p = hw_p; 6408 hw_p->ndevs++; 6409 hw_p->nxge_p[nxgep->function_num] = nxgep; 6410 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6411 "==> nxge_init_common_device:func # %d " 6412 "hw_p $%p parent dip $%p " 6413 "ndevs %d (found)", 6414 nxgep->function_num, 6415 hw_p, 6416 p_dip, 6417 hw_p->ndevs)); 6418 break; 6419 } 6420 } 6421 6422 if (hw_p == NULL) { 6423 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6424 "==> nxge_init_common_device:func # %d " 6425 "parent dip $%p (new)", 6426 nxgep->function_num, 6427 p_dip)); 6428 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6429 hw_p->parent_devp = p_dip; 6430 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6431 nxgep->nxge_hw_p = hw_p; 6432 hw_p->ndevs++; 6433 hw_p->nxge_p[nxgep->function_num] = nxgep; 6434 hw_p->next = nxge_hw_list; 6435 if (nxgep->niu_type == N2_NIU) { 6436 hw_p->niu_type = N2_NIU; 6437 hw_p->platform_type = P_NEPTUNE_NIU; 6438 } else { 6439 hw_p->niu_type = NIU_TYPE_NONE; 6440 hw_p->platform_type = P_NEPTUNE_NONE; 6441 } 6442 6443 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6444 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6445 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6446 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6447 6448 nxge_hw_list = hw_p; 6449 6450 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6451 } 6452 6453 MUTEX_EXIT(&nxge_common_lock); 6454 6455 nxgep->platform_type = hw_p->platform_type; 6456 if (nxgep->niu_type != N2_NIU) { 6457 nxgep->niu_type = hw_p->niu_type; 6458 } 6459 6460 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6461 "==> nxge_init_common_device (nxge_hw_list) $%p", 6462 nxge_hw_list)); 6463 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6464 6465 return (NXGE_OK); 6466 } 6467 6468 static void 6469 nxge_uninit_common_dev(p_nxge_t nxgep) 6470 { 6471 p_nxge_hw_list_t hw_p, h_hw_p; 6472 dev_info_t *p_dip; 6473 6474 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6475 if (nxgep->nxge_hw_p == NULL) { 6476 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6477 "<== nxge_uninit_common_device (no common)")); 6478 return; 6479 } 6480 6481 MUTEX_ENTER(&nxge_common_lock); 6482 h_hw_p = nxge_hw_list; 6483 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6484 p_dip = hw_p->parent_devp; 6485 if (nxgep->nxge_hw_p == hw_p && 6486 p_dip == nxgep->p_dip && 6487 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6488 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6489 6490 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6491 "==> nxge_uninit_common_device:func # %d " 6492 "hw_p $%p parent dip $%p " 6493 "ndevs %d (found)", 6494 nxgep->function_num, 6495 hw_p, 6496 p_dip, 6497 hw_p->ndevs)); 6498 6499 if (hw_p->ndevs) { 6500 hw_p->ndevs--; 6501 } 6502 hw_p->nxge_p[nxgep->function_num] = NULL; 6503 if (!hw_p->ndevs) { 6504 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6505 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6506 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6507 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6508 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6509 "==> nxge_uninit_common_device: " 6510 "func # %d " 6511 "hw_p $%p parent dip $%p " 6512 "ndevs %d (last)", 6513 nxgep->function_num, 6514 hw_p, 6515 p_dip, 6516 hw_p->ndevs)); 6517 6518 nxge_hio_uninit(nxgep); 6519 6520 if (hw_p == nxge_hw_list) { 6521 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6522 "==> nxge_uninit_common_device:" 6523 "remove head func # %d " 6524 "hw_p $%p parent dip $%p " 6525 "ndevs %d (head)", 6526 nxgep->function_num, 6527 hw_p, 6528 p_dip, 6529 hw_p->ndevs)); 6530 nxge_hw_list = hw_p->next; 6531 } else { 6532 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6533 "==> nxge_uninit_common_device:" 6534 "remove middle func # %d " 6535 "hw_p $%p parent dip $%p " 6536 "ndevs %d (middle)", 6537 nxgep->function_num, 6538 hw_p, 6539 p_dip, 6540 hw_p->ndevs)); 6541 h_hw_p->next = hw_p->next; 6542 } 6543 6544 nxgep->nxge_hw_p = NULL; 6545 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6546 } 6547 break; 6548 } else { 6549 h_hw_p = hw_p; 6550 } 6551 } 6552 6553 MUTEX_EXIT(&nxge_common_lock); 6554 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6555 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6556 nxge_hw_list)); 6557 6558 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6559 } 6560 6561 /* 6562 * Determines the number of ports from the niu_type or the platform type. 6563 * Returns the number of ports, or returns zero on failure. 6564 */ 6565 6566 int 6567 nxge_get_nports(p_nxge_t nxgep) 6568 { 6569 int nports = 0; 6570 6571 switch (nxgep->niu_type) { 6572 case N2_NIU: 6573 case NEPTUNE_2_10GF: 6574 nports = 2; 6575 break; 6576 case NEPTUNE_4_1GC: 6577 case NEPTUNE_2_10GF_2_1GC: 6578 case NEPTUNE_1_10GF_3_1GC: 6579 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6580 case NEPTUNE_2_10GF_2_1GRF: 6581 nports = 4; 6582 break; 6583 default: 6584 switch (nxgep->platform_type) { 6585 case P_NEPTUNE_NIU: 6586 case P_NEPTUNE_ATLAS_2PORT: 6587 nports = 2; 6588 break; 6589 case P_NEPTUNE_ATLAS_4PORT: 6590 case P_NEPTUNE_MARAMBA_P0: 6591 case P_NEPTUNE_MARAMBA_P1: 6592 case P_NEPTUNE_ALONSO: 6593 nports = 4; 6594 break; 6595 default: 6596 break; 6597 } 6598 break; 6599 } 6600 6601 return (nports); 6602 } 6603 6604 /* 6605 * The following two functions are to support 6606 * PSARC/2007/453 MSI-X interrupt limit override. 6607 */ 6608 static int 6609 nxge_create_msi_property(p_nxge_t nxgep) 6610 { 6611 int nmsi; 6612 extern int ncpus; 6613 6614 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6615 6616 switch (nxgep->mac.portmode) { 6617 case PORT_10G_COPPER: 6618 case PORT_10G_FIBER: 6619 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6620 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6621 /* 6622 * The maximum MSI-X requested will be 8. 6623 * If the # of CPUs is less than 8, we will reqeust 6624 * # MSI-X based on the # of CPUs. 6625 */ 6626 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6627 nmsi = NXGE_MSIX_REQUEST_10G; 6628 } else { 6629 nmsi = ncpus; 6630 } 6631 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6632 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6633 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6634 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6635 break; 6636 6637 default: 6638 nmsi = NXGE_MSIX_REQUEST_1G; 6639 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6640 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6641 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6642 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6643 break; 6644 } 6645 6646 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6647 return (nmsi); 6648 } 6649 6650 /* ARGSUSED */ 6651 static int 6652 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6653 void *pr_val) 6654 { 6655 int err = 0; 6656 link_flowctrl_t fl; 6657 6658 switch (pr_num) { 6659 case DLD_PROP_AUTONEG: 6660 *(uint8_t *)pr_val = 1; 6661 break; 6662 case DLD_PROP_FLOWCTRL: 6663 if (pr_valsize < sizeof (link_flowctrl_t)) 6664 return (EINVAL); 6665 fl = LINK_FLOWCTRL_RX; 6666 bcopy(&fl, pr_val, sizeof (fl)); 6667 break; 6668 case DLD_PROP_ADV_1000FDX_CAP: 6669 case DLD_PROP_EN_1000FDX_CAP: 6670 *(uint8_t *)pr_val = 1; 6671 break; 6672 case DLD_PROP_ADV_100FDX_CAP: 6673 case DLD_PROP_EN_100FDX_CAP: 6674 *(uint8_t *)pr_val = 1; 6675 break; 6676 default: 6677 err = ENOTSUP; 6678 break; 6679 } 6680 return (err); 6681 } 6682