1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver. 30 */ 31 #include <sys/nxge/nxge_impl.h> 32 #include <sys/nxge/nxge_hio.h> 33 #include <sys/nxge/nxge_rxdma.h> 34 #include <sys/pcie.h> 35 36 uint32_t nxge_use_partition = 0; /* debug partition flag */ 37 uint32_t nxge_dma_obp_props_only = 1; /* use obp published props */ 38 uint32_t nxge_use_rdc_intr = 1; /* debug to assign rdc intr */ 39 /* 40 * PSARC/2007/453 MSI-X interrupt limit override 41 * (This PSARC case is limited to MSI-X vectors 42 * and SPARC platforms only). 43 */ 44 #if defined(_BIG_ENDIAN) 45 uint32_t nxge_msi_enable = 2; 46 #else 47 uint32_t nxge_msi_enable = 1; 48 #endif 49 50 uint32_t nxge_cksum_enable = 0; 51 52 /* 53 * Globals: tunable parameters (/etc/system or adb) 54 * 55 */ 56 uint32_t nxge_rbr_size = NXGE_RBR_RBB_DEFAULT; 57 uint32_t nxge_rbr_spare_size = 0; 58 uint32_t nxge_rcr_size = NXGE_RCR_DEFAULT; 59 uint32_t nxge_tx_ring_size = NXGE_TX_RING_DEFAULT; 60 boolean_t nxge_no_msg = B_TRUE; /* control message display */ 61 uint32_t nxge_no_link_notify = 0; /* control DL_NOTIFY */ 62 uint32_t nxge_bcopy_thresh = TX_BCOPY_MAX; 63 uint32_t nxge_dvma_thresh = TX_FASTDVMA_MIN; 64 uint32_t nxge_dma_stream_thresh = TX_STREAM_MIN; 65 uint32_t nxge_jumbo_mtu = TX_JUMBO_MTU; 66 boolean_t nxge_jumbo_enable = B_FALSE; 67 uint16_t nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT; 68 uint16_t nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD; 69 nxge_tx_mode_t nxge_tx_scheme = NXGE_USE_SERIAL; 70 71 /* MAX LSO size */ 72 #define NXGE_LSO_MAXLEN 65535 73 uint32_t nxge_lso_max = NXGE_LSO_MAXLEN; 74 75 /* 76 * Debugging flags: 77 * nxge_no_tx_lb : transmit load balancing 78 * nxge_tx_lb_policy: 0 - TCP port (default) 79 * 3 - DEST MAC 80 */ 81 uint32_t nxge_no_tx_lb = 0; 82 uint32_t nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP; 83 84 /* 85 * Add tunable to reduce the amount of time spent in the 86 * ISR doing Rx Processing. 87 */ 88 uint32_t nxge_max_rx_pkts = 1024; 89 90 /* 91 * Tunables to manage the receive buffer blocks. 92 * 93 * nxge_rx_threshold_hi: copy all buffers. 94 * nxge_rx_bcopy_size_type: receive buffer block size type. 95 * nxge_rx_threshold_lo: copy only up to tunable block size type. 96 */ 97 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6; 98 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 99 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3; 100 101 /* Use kmem_alloc() to allocate data buffers. */ 102 #if !defined(__i386) 103 uint32_t nxge_use_kmem_alloc = 1; 104 #else 105 uint32_t nxge_use_kmem_alloc = 0; 106 #endif 107 108 rtrace_t npi_rtracebuf; 109 110 #if defined(sun4v) 111 /* 112 * Hypervisor N2/NIU services information. 113 */ 114 static hsvc_info_t niu_hsvc = { 115 HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER, 116 NIU_MINOR_VER, "nxge" 117 }; 118 119 static int nxge_hsvc_register(p_nxge_t); 120 #endif 121 122 /* 123 * Function Prototypes 124 */ 125 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t); 126 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t); 127 static void nxge_unattach(p_nxge_t); 128 129 #if NXGE_PROPERTY 130 static void nxge_remove_hard_properties(p_nxge_t); 131 #endif 132 133 /* 134 * These two functions are required by nxge_hio.c 135 */ 136 extern int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 137 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 138 139 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t); 140 141 static nxge_status_t nxge_setup_mutexes(p_nxge_t); 142 static void nxge_destroy_mutexes(p_nxge_t); 143 144 static nxge_status_t nxge_map_regs(p_nxge_t nxgep); 145 static void nxge_unmap_regs(p_nxge_t nxgep); 146 #ifdef NXGE_DEBUG 147 static void nxge_test_map_regs(p_nxge_t nxgep); 148 #endif 149 150 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep); 151 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep); 152 static void nxge_remove_intrs(p_nxge_t nxgep); 153 static void nxge_remove_soft_intrs(p_nxge_t nxgep); 154 155 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep); 156 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t); 157 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t); 158 static void nxge_intrs_enable(p_nxge_t nxgep); 159 static void nxge_intrs_disable(p_nxge_t nxgep); 160 161 static void nxge_suspend(p_nxge_t); 162 static nxge_status_t nxge_resume(p_nxge_t); 163 164 static nxge_status_t nxge_setup_dev(p_nxge_t); 165 static void nxge_destroy_dev(p_nxge_t); 166 167 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t); 168 static void nxge_free_mem_pool(p_nxge_t); 169 170 nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 171 static void nxge_free_rx_mem_pool(p_nxge_t); 172 173 nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 174 static void nxge_free_tx_mem_pool(p_nxge_t); 175 176 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t, 177 struct ddi_dma_attr *, 178 size_t, ddi_device_acc_attr_t *, uint_t, 179 p_nxge_dma_common_t); 180 181 static void nxge_dma_mem_free(p_nxge_dma_common_t); 182 static void nxge_dma_free_rx_data_buf(p_nxge_dma_common_t); 183 184 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t, 185 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 186 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 187 188 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t, 189 p_nxge_dma_common_t *, size_t); 190 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 191 192 extern nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t, 193 p_nxge_dma_common_t *, size_t, size_t, uint32_t *); 194 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t); 195 196 extern nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t, 197 p_nxge_dma_common_t *, 198 size_t); 199 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t); 200 201 static int nxge_init_common_dev(p_nxge_t); 202 static void nxge_uninit_common_dev(p_nxge_t); 203 extern int nxge_param_set_mac(p_nxge_t, queue_t *, mblk_t *, 204 char *, caddr_t); 205 206 /* 207 * The next declarations are for the GLDv3 interface. 208 */ 209 static int nxge_m_start(void *); 210 static void nxge_m_stop(void *); 211 static int nxge_m_unicst(void *, const uint8_t *); 212 static int nxge_m_multicst(void *, boolean_t, const uint8_t *); 213 static int nxge_m_promisc(void *, boolean_t); 214 static void nxge_m_ioctl(void *, queue_t *, mblk_t *); 215 static void nxge_m_resources(void *); 216 mblk_t *nxge_m_tx(void *arg, mblk_t *); 217 static nxge_status_t nxge_mac_register(p_nxge_t); 218 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr, 219 mac_addr_slot_t slot); 220 void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, 221 boolean_t factory); 222 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr); 223 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 224 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 225 static boolean_t nxge_m_getcapab(void *, mac_capab_t, void *); 226 static int nxge_m_setprop(void *, const char *, mac_prop_id_t, 227 uint_t, const void *); 228 static int nxge_m_getprop(void *, const char *, mac_prop_id_t, 229 uint_t, uint_t, void *); 230 static int nxge_set_priv_prop(nxge_t *, const char *, uint_t, 231 const void *); 232 static int nxge_get_priv_prop(nxge_t *, const char *, uint_t, uint_t, 233 void *); 234 static int nxge_get_def_val(nxge_t *, mac_prop_id_t, uint_t, void *); 235 236 237 mac_priv_prop_t nxge_priv_props[] = { 238 {"_adv_10gfdx_cap", MAC_PROP_PERM_RW}, 239 {"_adv_pause_cap", MAC_PROP_PERM_RW}, 240 {"_function_number", MAC_PROP_PERM_READ}, 241 {"_fw_version", MAC_PROP_PERM_READ}, 242 {"_port_mode", MAC_PROP_PERM_READ}, 243 {"_hot_swap_phy", MAC_PROP_PERM_READ}, 244 {"_accept_jumbo", MAC_PROP_PERM_RW}, 245 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 246 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 247 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 248 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 249 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 250 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 251 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 252 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 253 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 254 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}, 255 {"_soft_lso_enable", MAC_PROP_PERM_RW} 256 }; 257 258 #define NXGE_MAX_PRIV_PROPS \ 259 (sizeof (nxge_priv_props)/sizeof (mac_priv_prop_t)) 260 261 #define NXGE_M_CALLBACK_FLAGS\ 262 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 263 264 265 #define NXGE_NEPTUNE_MAGIC 0x4E584745UL 266 #define MAX_DUMP_SZ 256 267 268 #define NXGE_M_CALLBACK_FLAGS \ 269 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 270 271 mac_callbacks_t nxge_m_callbacks = { 272 NXGE_M_CALLBACK_FLAGS, 273 nxge_m_stat, 274 nxge_m_start, 275 nxge_m_stop, 276 nxge_m_promisc, 277 nxge_m_multicst, 278 nxge_m_unicst, 279 nxge_m_tx, 280 nxge_m_resources, 281 nxge_m_ioctl, 282 nxge_m_getcapab, 283 NULL, 284 NULL, 285 nxge_m_setprop, 286 nxge_m_getprop 287 }; 288 289 void 290 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *); 291 292 /* PSARC/2007/453 MSI-X interrupt limit override. */ 293 #define NXGE_MSIX_REQUEST_10G 8 294 #define NXGE_MSIX_REQUEST_1G 2 295 static int nxge_create_msi_property(p_nxge_t); 296 297 /* 298 * These global variables control the message 299 * output. 300 */ 301 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG; 302 uint64_t nxge_debug_level; 303 304 /* 305 * This list contains the instance structures for the Neptune 306 * devices present in the system. The lock exists to guarantee 307 * mutually exclusive access to the list. 308 */ 309 void *nxge_list = NULL; 310 311 void *nxge_hw_list = NULL; 312 nxge_os_mutex_t nxge_common_lock; 313 314 extern uint64_t npi_debug_level; 315 316 extern nxge_status_t nxge_ldgv_init(p_nxge_t, int *, int *); 317 extern nxge_status_t nxge_ldgv_init_n2(p_nxge_t, int *, int *); 318 extern nxge_status_t nxge_ldgv_uninit(p_nxge_t); 319 extern nxge_status_t nxge_intr_ldgv_init(p_nxge_t); 320 extern void nxge_fm_init(p_nxge_t, 321 ddi_device_acc_attr_t *, 322 ddi_device_acc_attr_t *, 323 ddi_dma_attr_t *); 324 extern void nxge_fm_fini(p_nxge_t); 325 extern npi_status_t npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t); 326 327 /* 328 * Count used to maintain the number of buffers being used 329 * by Neptune instances and loaned up to the upper layers. 330 */ 331 uint32_t nxge_mblks_pending = 0; 332 333 /* 334 * Device register access attributes for PIO. 335 */ 336 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = { 337 DDI_DEVICE_ATTR_V0, 338 DDI_STRUCTURE_LE_ACC, 339 DDI_STRICTORDER_ACC, 340 }; 341 342 /* 343 * Device descriptor access attributes for DMA. 344 */ 345 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = { 346 DDI_DEVICE_ATTR_V0, 347 DDI_STRUCTURE_LE_ACC, 348 DDI_STRICTORDER_ACC 349 }; 350 351 /* 352 * Device buffer access attributes for DMA. 353 */ 354 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = { 355 DDI_DEVICE_ATTR_V0, 356 DDI_STRUCTURE_BE_ACC, 357 DDI_STRICTORDER_ACC 358 }; 359 360 ddi_dma_attr_t nxge_desc_dma_attr = { 361 DMA_ATTR_V0, /* version number. */ 362 0, /* low address */ 363 0xffffffffffffffff, /* high address */ 364 0xffffffffffffffff, /* address counter max */ 365 #ifndef NIU_PA_WORKAROUND 366 0x100000, /* alignment */ 367 #else 368 0x2000, 369 #endif 370 0xfc00fc, /* dlim_burstsizes */ 371 0x1, /* minimum transfer size */ 372 0xffffffffffffffff, /* maximum transfer size */ 373 0xffffffffffffffff, /* maximum segment size */ 374 1, /* scatter/gather list length */ 375 (unsigned int) 1, /* granularity */ 376 0 /* attribute flags */ 377 }; 378 379 ddi_dma_attr_t nxge_tx_dma_attr = { 380 DMA_ATTR_V0, /* version number. */ 381 0, /* low address */ 382 0xffffffffffffffff, /* high address */ 383 0xffffffffffffffff, /* address counter max */ 384 #if defined(_BIG_ENDIAN) 385 0x2000, /* alignment */ 386 #else 387 0x1000, /* alignment */ 388 #endif 389 0xfc00fc, /* dlim_burstsizes */ 390 0x1, /* minimum transfer size */ 391 0xffffffffffffffff, /* maximum transfer size */ 392 0xffffffffffffffff, /* maximum segment size */ 393 5, /* scatter/gather list length */ 394 (unsigned int) 1, /* granularity */ 395 0 /* attribute flags */ 396 }; 397 398 ddi_dma_attr_t nxge_rx_dma_attr = { 399 DMA_ATTR_V0, /* version number. */ 400 0, /* low address */ 401 0xffffffffffffffff, /* high address */ 402 0xffffffffffffffff, /* address counter max */ 403 0x2000, /* alignment */ 404 0xfc00fc, /* dlim_burstsizes */ 405 0x1, /* minimum transfer size */ 406 0xffffffffffffffff, /* maximum transfer size */ 407 0xffffffffffffffff, /* maximum segment size */ 408 1, /* scatter/gather list length */ 409 (unsigned int) 1, /* granularity */ 410 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 411 }; 412 413 ddi_dma_lim_t nxge_dma_limits = { 414 (uint_t)0, /* dlim_addr_lo */ 415 (uint_t)0xffffffff, /* dlim_addr_hi */ 416 (uint_t)0xffffffff, /* dlim_cntr_max */ 417 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 418 0x1, /* dlim_minxfer */ 419 1024 /* dlim_speed */ 420 }; 421 422 dma_method_t nxge_force_dma = DVMA; 423 424 /* 425 * dma chunk sizes. 426 * 427 * Try to allocate the largest possible size 428 * so that fewer number of dma chunks would be managed 429 */ 430 #ifdef NIU_PA_WORKAROUND 431 size_t alloc_sizes [] = {0x2000}; 432 #else 433 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000, 434 0x10000, 0x20000, 0x40000, 0x80000, 435 0x100000, 0x200000, 0x400000, 0x800000, 436 0x1000000, 0x2000000, 0x4000000}; 437 #endif 438 439 /* 440 * Translate "dev_t" to a pointer to the associated "dev_info_t". 441 */ 442 443 extern void nxge_get_environs(nxge_t *); 444 445 static int 446 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 447 { 448 p_nxge_t nxgep = NULL; 449 int instance; 450 int status = DDI_SUCCESS; 451 uint8_t portn; 452 nxge_mmac_t *mmac_info; 453 454 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach")); 455 456 /* 457 * Get the device instance since we'll need to setup 458 * or retrieve a soft state for this instance. 459 */ 460 instance = ddi_get_instance(dip); 461 462 switch (cmd) { 463 case DDI_ATTACH: 464 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH")); 465 break; 466 467 case DDI_RESUME: 468 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME")); 469 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 470 if (nxgep == NULL) { 471 status = DDI_FAILURE; 472 break; 473 } 474 if (nxgep->dip != dip) { 475 status = DDI_FAILURE; 476 break; 477 } 478 if (nxgep->suspended == DDI_PM_SUSPEND) { 479 status = ddi_dev_is_needed(nxgep->dip, 0, 1); 480 } else { 481 status = nxge_resume(nxgep); 482 } 483 goto nxge_attach_exit; 484 485 case DDI_PM_RESUME: 486 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME")); 487 nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance); 488 if (nxgep == NULL) { 489 status = DDI_FAILURE; 490 break; 491 } 492 if (nxgep->dip != dip) { 493 status = DDI_FAILURE; 494 break; 495 } 496 status = nxge_resume(nxgep); 497 goto nxge_attach_exit; 498 499 default: 500 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown")); 501 status = DDI_FAILURE; 502 goto nxge_attach_exit; 503 } 504 505 506 if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) { 507 status = DDI_FAILURE; 508 goto nxge_attach_exit; 509 } 510 511 nxgep = ddi_get_soft_state(nxge_list, instance); 512 if (nxgep == NULL) { 513 status = NXGE_ERROR; 514 goto nxge_attach_fail2; 515 } 516 517 nxgep->nxge_magic = NXGE_MAGIC; 518 519 nxgep->drv_state = 0; 520 nxgep->dip = dip; 521 nxgep->instance = instance; 522 nxgep->p_dip = ddi_get_parent(dip); 523 nxgep->nxge_debug_level = nxge_debug_level; 524 npi_debug_level = nxge_debug_level; 525 526 /* Are we a guest running in a Hybrid I/O environment? */ 527 nxge_get_environs(nxgep); 528 529 status = nxge_map_regs(nxgep); 530 531 if (status != NXGE_OK) { 532 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed")); 533 goto nxge_attach_fail3; 534 } 535 536 nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, 537 &nxge_dev_desc_dma_acc_attr, 538 &nxge_rx_dma_attr); 539 540 /* Create & initialize the per-Neptune data structure */ 541 /* (even if we're a guest). */ 542 status = nxge_init_common_dev(nxgep); 543 if (status != NXGE_OK) { 544 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 545 "nxge_init_common_dev failed")); 546 goto nxge_attach_fail4; 547 } 548 549 #if defined(sun4v) 550 /* This is required by nxge_hio_init(), which follows. */ 551 if ((status = nxge_hsvc_register(nxgep)) != DDI_SUCCESS) 552 goto nxge_attach_fail; 553 #endif 554 555 if ((status = nxge_hio_init(nxgep)) != NXGE_OK) { 556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 557 "nxge_hio_init failed")); 558 goto nxge_attach_fail4; 559 } 560 561 if (nxgep->niu_type == NEPTUNE_2_10GF) { 562 if (nxgep->function_num > 1) { 563 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Unsupported" 564 " function %d. Only functions 0 and 1 are " 565 "supported for this card.", nxgep->function_num)); 566 status = NXGE_ERROR; 567 goto nxge_attach_fail4; 568 } 569 } 570 571 if (isLDOMguest(nxgep)) { 572 /* 573 * Use the function number here. 574 */ 575 nxgep->mac.portnum = nxgep->function_num; 576 nxgep->mac.porttype = PORT_TYPE_LOGICAL; 577 578 /* XXX We'll set the MAC address counts to 1 for now. */ 579 mmac_info = &nxgep->nxge_mmac_info; 580 mmac_info->num_mmac = 1; 581 mmac_info->naddrfree = 1; 582 } else { 583 portn = NXGE_GET_PORT_NUM(nxgep->function_num); 584 nxgep->mac.portnum = portn; 585 if ((portn == 0) || (portn == 1)) 586 nxgep->mac.porttype = PORT_TYPE_XMAC; 587 else 588 nxgep->mac.porttype = PORT_TYPE_BMAC; 589 /* 590 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC) 591 * internally, the rest 2 ports use BMAC (1G "Big" MAC). 592 * The two types of MACs have different characterizations. 593 */ 594 mmac_info = &nxgep->nxge_mmac_info; 595 if (nxgep->function_num < 2) { 596 mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY; 597 mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY; 598 } else { 599 mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY; 600 mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY; 601 } 602 } 603 /* 604 * Setup the Ndd parameters for the this instance. 605 */ 606 nxge_init_param(nxgep); 607 608 /* 609 * Setup Register Tracing Buffer. 610 */ 611 npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf); 612 613 /* init stats ptr */ 614 nxge_init_statsp(nxgep); 615 616 /* 617 * Copy the vpd info from eeprom to a local data 618 * structure, and then check its validity. 619 */ 620 if (!isLDOMguest(nxgep)) { 621 int *regp; 622 uint_t reglen; 623 int rv; 624 625 nxge_vpd_info_get(nxgep); 626 627 /* Find the NIU config handle. */ 628 rv = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 629 ddi_get_parent(nxgep->dip), DDI_PROP_DONTPASS, 630 "reg", ®p, ®len); 631 632 if (rv != DDI_PROP_SUCCESS) { 633 goto nxge_attach_fail5; 634 } 635 /* 636 * The address_hi, that is the first int, in the reg 637 * property consists of config handle, but need to remove 638 * the bits 28-31 which are OBP specific info. 639 */ 640 nxgep->niu_cfg_hdl = (*regp) & 0xFFFFFFF; 641 ddi_prop_free(regp); 642 } 643 644 if (isLDOMguest(nxgep)) { 645 uchar_t *prop_val; 646 uint_t prop_len; 647 648 extern void nxge_get_logical_props(p_nxge_t); 649 650 nxgep->statsp->mac_stats.xcvr_inuse = LOGICAL_XCVR; 651 nxgep->mac.portmode = PORT_LOGICAL; 652 (void) ddi_prop_update_string(DDI_DEV_T_NONE, nxgep->dip, 653 "phy-type", "virtual transceiver"); 654 655 nxgep->nports = 1; 656 nxgep->board_ver = 0; /* XXX What? */ 657 658 /* 659 * local-mac-address property gives us info on which 660 * specific MAC address the Hybrid resource is associated 661 * with. 662 */ 663 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, nxgep->dip, 0, 664 "local-mac-address", &prop_val, 665 &prop_len) != DDI_PROP_SUCCESS) { 666 goto nxge_attach_fail5; 667 } 668 if (prop_len != ETHERADDRL) { 669 ddi_prop_free(prop_val); 670 goto nxge_attach_fail5; 671 } 672 ether_copy(prop_val, nxgep->hio_mac_addr); 673 ddi_prop_free(prop_val); 674 nxge_get_logical_props(nxgep); 675 676 } else { 677 status = nxge_xcvr_find(nxgep); 678 679 if (status != NXGE_OK) { 680 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: " 681 " Couldn't determine card type" 682 " .... exit ")); 683 goto nxge_attach_fail5; 684 } 685 686 status = nxge_get_config_properties(nxgep); 687 688 if (status != NXGE_OK) { 689 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 690 "get_hw create failed")); 691 goto nxge_attach_fail; 692 } 693 } 694 695 /* 696 * Setup the Kstats for the driver. 697 */ 698 nxge_setup_kstats(nxgep); 699 700 if (!isLDOMguest(nxgep)) 701 nxge_setup_param(nxgep); 702 703 status = nxge_setup_system_dma_pages(nxgep); 704 if (status != NXGE_OK) { 705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed")); 706 goto nxge_attach_fail; 707 } 708 709 nxge_hw_id_init(nxgep); 710 711 if (!isLDOMguest(nxgep)) 712 nxge_hw_init_niu_common(nxgep); 713 714 status = nxge_setup_mutexes(nxgep); 715 if (status != NXGE_OK) { 716 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed")); 717 goto nxge_attach_fail; 718 } 719 720 #if defined(sun4v) 721 if (isLDOMguest(nxgep)) { 722 /* Find our VR & channel sets. */ 723 status = nxge_hio_vr_add(nxgep); 724 goto nxge_attach_exit; 725 } 726 #endif 727 728 status = nxge_setup_dev(nxgep); 729 if (status != DDI_SUCCESS) { 730 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed")); 731 goto nxge_attach_fail; 732 } 733 734 status = nxge_add_intrs(nxgep); 735 if (status != DDI_SUCCESS) { 736 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed")); 737 goto nxge_attach_fail; 738 } 739 status = nxge_add_soft_intrs(nxgep); 740 if (status != DDI_SUCCESS) { 741 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 742 "add_soft_intr failed")); 743 goto nxge_attach_fail; 744 } 745 746 /* 747 * Enable interrupts. 748 */ 749 nxge_intrs_enable(nxgep); 750 751 // If a guest, register with vio_net instead. 752 if ((status = nxge_mac_register(nxgep)) != NXGE_OK) { 753 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 754 "unable to register to mac layer (%d)", status)); 755 goto nxge_attach_fail; 756 } 757 758 mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN); 759 760 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 761 "registered to mac (instance %d)", instance)); 762 763 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 764 765 goto nxge_attach_exit; 766 767 nxge_attach_fail: 768 nxge_unattach(nxgep); 769 goto nxge_attach_fail1; 770 771 nxge_attach_fail5: 772 /* 773 * Tear down the ndd parameters setup. 774 */ 775 nxge_destroy_param(nxgep); 776 777 /* 778 * Tear down the kstat setup. 779 */ 780 nxge_destroy_kstats(nxgep); 781 782 nxge_attach_fail4: 783 if (nxgep->nxge_hw_p) { 784 nxge_uninit_common_dev(nxgep); 785 nxgep->nxge_hw_p = NULL; 786 } 787 788 nxge_attach_fail3: 789 /* 790 * Unmap the register setup. 791 */ 792 nxge_unmap_regs(nxgep); 793 794 nxge_fm_fini(nxgep); 795 796 nxge_attach_fail2: 797 ddi_soft_state_free(nxge_list, nxgep->instance); 798 799 nxge_attach_fail1: 800 if (status != NXGE_OK) 801 status = (NXGE_ERROR | NXGE_DDI_FAILED); 802 nxgep = NULL; 803 804 nxge_attach_exit: 805 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x", 806 status)); 807 808 return (status); 809 } 810 811 static int 812 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 813 { 814 int status = DDI_SUCCESS; 815 int instance; 816 p_nxge_t nxgep = NULL; 817 818 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach")); 819 instance = ddi_get_instance(dip); 820 nxgep = ddi_get_soft_state(nxge_list, instance); 821 if (nxgep == NULL) { 822 status = DDI_FAILURE; 823 goto nxge_detach_exit; 824 } 825 826 switch (cmd) { 827 case DDI_DETACH: 828 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH")); 829 break; 830 831 case DDI_PM_SUSPEND: 832 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 833 nxgep->suspended = DDI_PM_SUSPEND; 834 nxge_suspend(nxgep); 835 break; 836 837 case DDI_SUSPEND: 838 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND")); 839 if (nxgep->suspended != DDI_PM_SUSPEND) { 840 nxgep->suspended = DDI_SUSPEND; 841 nxge_suspend(nxgep); 842 } 843 break; 844 845 default: 846 status = DDI_FAILURE; 847 } 848 849 if (cmd != DDI_DETACH) 850 goto nxge_detach_exit; 851 852 /* 853 * Stop the xcvr polling. 854 */ 855 nxgep->suspended = cmd; 856 857 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 858 859 if (isLDOMguest(nxgep)) { 860 nxge_hio_unregister(nxgep); 861 } else if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) { 862 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 863 "<== nxge_detach status = 0x%08X", status)); 864 return (DDI_FAILURE); 865 } 866 867 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 868 "<== nxge_detach (mac_unregister) status = 0x%08X", status)); 869 870 nxge_unattach(nxgep); 871 nxgep = NULL; 872 873 nxge_detach_exit: 874 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X", 875 status)); 876 877 return (status); 878 } 879 880 static void 881 nxge_unattach(p_nxge_t nxgep) 882 { 883 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach")); 884 885 if (nxgep == NULL || nxgep->dev_regs == NULL) { 886 return; 887 } 888 889 nxgep->nxge_magic = 0; 890 891 if (nxgep->nxge_timerid) { 892 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 893 nxgep->nxge_timerid = 0; 894 } 895 896 #if defined(sun4v) 897 if (isLDOMguest(nxgep)) { 898 (void) nxge_hio_vr_release(nxgep); 899 } 900 #endif 901 902 if (nxgep->nxge_hw_p) { 903 nxge_uninit_common_dev(nxgep); 904 nxgep->nxge_hw_p = NULL; 905 } 906 907 #if defined(sun4v) 908 if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) { 909 (void) hsvc_unregister(&nxgep->niu_hsvc); 910 nxgep->niu_hsvc_available = B_FALSE; 911 } 912 #endif 913 /* 914 * Stop any further interrupts. 915 */ 916 nxge_remove_intrs(nxgep); 917 918 /* remove soft interrups */ 919 nxge_remove_soft_intrs(nxgep); 920 921 /* 922 * Stop the device and free resources. 923 */ 924 if (!isLDOMguest(nxgep)) { 925 nxge_destroy_dev(nxgep); 926 } 927 928 /* 929 * Tear down the ndd parameters setup. 930 */ 931 nxge_destroy_param(nxgep); 932 933 /* 934 * Tear down the kstat setup. 935 */ 936 nxge_destroy_kstats(nxgep); 937 938 /* 939 * Destroy all mutexes. 940 */ 941 nxge_destroy_mutexes(nxgep); 942 943 /* 944 * Remove the list of ndd parameters which 945 * were setup during attach. 946 */ 947 if (nxgep->dip) { 948 NXGE_DEBUG_MSG((nxgep, OBP_CTL, 949 " nxge_unattach: remove all properties")); 950 951 (void) ddi_prop_remove_all(nxgep->dip); 952 } 953 954 #if NXGE_PROPERTY 955 nxge_remove_hard_properties(nxgep); 956 #endif 957 958 /* 959 * Unmap the register setup. 960 */ 961 nxge_unmap_regs(nxgep); 962 963 nxge_fm_fini(nxgep); 964 965 ddi_soft_state_free(nxge_list, nxgep->instance); 966 967 NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach")); 968 } 969 970 #if defined(sun4v) 971 int 972 nxge_hsvc_register( 973 nxge_t *nxgep) 974 { 975 nxge_status_t status; 976 977 if (nxgep->niu_type == N2_NIU) { 978 nxgep->niu_hsvc_available = B_FALSE; 979 bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t)); 980 if ((status = hsvc_register(&nxgep->niu_hsvc, 981 &nxgep->niu_min_ver)) != 0) { 982 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 983 "nxge_attach: %s: cannot negotiate " 984 "hypervisor services revision %d group: 0x%lx " 985 "major: 0x%lx minor: 0x%lx errno: %d", 986 niu_hsvc.hsvc_modname, niu_hsvc.hsvc_rev, 987 niu_hsvc.hsvc_group, niu_hsvc.hsvc_major, 988 niu_hsvc.hsvc_minor, status)); 989 return (DDI_FAILURE); 990 } 991 nxgep->niu_hsvc_available = B_TRUE; 992 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 993 "NIU Hypervisor service enabled")); 994 } 995 996 return (DDI_SUCCESS); 997 } 998 #endif 999 1000 static char n2_siu_name[] = "niu"; 1001 1002 static nxge_status_t 1003 nxge_map_regs(p_nxge_t nxgep) 1004 { 1005 int ddi_status = DDI_SUCCESS; 1006 p_dev_regs_t dev_regs; 1007 char buf[MAXPATHLEN + 1]; 1008 char *devname; 1009 #ifdef NXGE_DEBUG 1010 char *sysname; 1011 #endif 1012 off_t regsize; 1013 nxge_status_t status = NXGE_OK; 1014 #if !defined(_BIG_ENDIAN) 1015 off_t pci_offset; 1016 uint16_t pcie_devctl; 1017 #endif 1018 1019 if (isLDOMguest(nxgep)) { 1020 return (nxge_guest_regs_map(nxgep)); 1021 } 1022 1023 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs")); 1024 nxgep->dev_regs = NULL; 1025 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 1026 dev_regs->nxge_regh = NULL; 1027 dev_regs->nxge_pciregh = NULL; 1028 dev_regs->nxge_msix_regh = NULL; 1029 dev_regs->nxge_vir_regh = NULL; 1030 dev_regs->nxge_vir2_regh = NULL; 1031 nxgep->niu_type = NIU_TYPE_NONE; 1032 1033 devname = ddi_pathname(nxgep->dip, buf); 1034 ASSERT(strlen(devname) > 0); 1035 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1036 "nxge_map_regs: pathname devname %s", devname)); 1037 1038 if (strstr(devname, n2_siu_name)) { 1039 /* N2/NIU */ 1040 nxgep->niu_type = N2_NIU; 1041 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1042 "nxge_map_regs: N2/NIU devname %s", devname)); 1043 /* get function number */ 1044 nxgep->function_num = 1045 (devname[strlen(devname) -1] == '1' ? 1 : 0); 1046 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1047 "nxge_map_regs: N2/NIU function number %d", 1048 nxgep->function_num)); 1049 } else { 1050 int *prop_val; 1051 uint_t prop_len; 1052 uint8_t func_num; 1053 1054 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip, 1055 0, "reg", 1056 &prop_val, &prop_len) != DDI_PROP_SUCCESS) { 1057 NXGE_DEBUG_MSG((nxgep, VPD_CTL, 1058 "Reg property not found")); 1059 ddi_status = DDI_FAILURE; 1060 goto nxge_map_regs_fail0; 1061 1062 } else { 1063 func_num = (prop_val[0] >> 8) & 0x7; 1064 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1065 "Reg property found: fun # %d", 1066 func_num)); 1067 nxgep->function_num = func_num; 1068 if (isLDOMguest(nxgep)) { 1069 nxgep->function_num /= 2; 1070 return (NXGE_OK); 1071 } 1072 ddi_prop_free(prop_val); 1073 } 1074 } 1075 1076 switch (nxgep->niu_type) { 1077 default: 1078 (void) ddi_dev_regsize(nxgep->dip, 0, ®size); 1079 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1080 "nxge_map_regs: pci config size 0x%x", regsize)); 1081 1082 ddi_status = ddi_regs_map_setup(nxgep->dip, 0, 1083 (caddr_t *)&(dev_regs->nxge_pciregp), 0, 0, 1084 &nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh); 1085 if (ddi_status != DDI_SUCCESS) { 1086 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1087 "ddi_map_regs, nxge bus config regs failed")); 1088 goto nxge_map_regs_fail0; 1089 } 1090 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1091 "nxge_map_reg: PCI config addr 0x%0llx " 1092 " handle 0x%0llx", dev_regs->nxge_pciregp, 1093 dev_regs->nxge_pciregh)); 1094 /* 1095 * IMP IMP 1096 * workaround for bit swapping bug in HW 1097 * which ends up in no-snoop = yes 1098 * resulting, in DMA not synched properly 1099 */ 1100 #if !defined(_BIG_ENDIAN) 1101 /* workarounds for x86 systems */ 1102 pci_offset = 0x80 + PCIE_DEVCTL; 1103 pcie_devctl = 0x0; 1104 pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP; 1105 pcie_devctl |= PCIE_DEVCTL_RO_EN; 1106 pci_config_put16(dev_regs->nxge_pciregh, pci_offset, 1107 pcie_devctl); 1108 #endif 1109 1110 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1111 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1112 "nxge_map_regs: pio size 0x%x", regsize)); 1113 /* set up the device mapped register */ 1114 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1115 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1116 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1117 if (ddi_status != DDI_SUCCESS) { 1118 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1119 "ddi_map_regs for Neptune global reg failed")); 1120 goto nxge_map_regs_fail1; 1121 } 1122 1123 /* set up the msi/msi-x mapped register */ 1124 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1125 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1126 "nxge_map_regs: msix size 0x%x", regsize)); 1127 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1128 (caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0, 1129 &nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh); 1130 if (ddi_status != DDI_SUCCESS) { 1131 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1132 "ddi_map_regs for msi reg failed")); 1133 goto nxge_map_regs_fail2; 1134 } 1135 1136 /* set up the vio region mapped register */ 1137 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1138 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1139 "nxge_map_regs: vio size 0x%x", regsize)); 1140 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1141 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1142 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1143 1144 if (ddi_status != DDI_SUCCESS) { 1145 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1146 "ddi_map_regs for nxge vio reg failed")); 1147 goto nxge_map_regs_fail3; 1148 } 1149 nxgep->dev_regs = dev_regs; 1150 1151 NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh); 1152 NPI_PCI_ADD_HANDLE_SET(nxgep, 1153 (npi_reg_ptr_t)dev_regs->nxge_pciregp); 1154 NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh); 1155 NPI_MSI_ADD_HANDLE_SET(nxgep, 1156 (npi_reg_ptr_t)dev_regs->nxge_msix_regp); 1157 1158 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1159 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1160 1161 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1162 NPI_REG_ADD_HANDLE_SET(nxgep, 1163 (npi_reg_ptr_t)dev_regs->nxge_regp); 1164 1165 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1166 NPI_VREG_ADD_HANDLE_SET(nxgep, 1167 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1168 1169 break; 1170 1171 case N2_NIU: 1172 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU")); 1173 /* 1174 * Set up the device mapped register (FWARC 2006/556) 1175 * (changed back to 1: reg starts at 1!) 1176 */ 1177 (void) ddi_dev_regsize(nxgep->dip, 1, ®size); 1178 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1179 "nxge_map_regs: dev size 0x%x", regsize)); 1180 ddi_status = ddi_regs_map_setup(nxgep->dip, 1, 1181 (caddr_t *)&(dev_regs->nxge_regp), 0, 0, 1182 &nxge_dev_reg_acc_attr, &dev_regs->nxge_regh); 1183 1184 if (ddi_status != DDI_SUCCESS) { 1185 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1186 "ddi_map_regs for N2/NIU, global reg failed ")); 1187 goto nxge_map_regs_fail1; 1188 } 1189 1190 /* set up the first vio region mapped register */ 1191 (void) ddi_dev_regsize(nxgep->dip, 2, ®size); 1192 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1193 "nxge_map_regs: vio (1) size 0x%x", regsize)); 1194 ddi_status = ddi_regs_map_setup(nxgep->dip, 2, 1195 (caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0, 1196 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh); 1197 1198 if (ddi_status != DDI_SUCCESS) { 1199 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1200 "ddi_map_regs for nxge vio reg failed")); 1201 goto nxge_map_regs_fail2; 1202 } 1203 /* set up the second vio region mapped register */ 1204 (void) ddi_dev_regsize(nxgep->dip, 3, ®size); 1205 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1206 "nxge_map_regs: vio (3) size 0x%x", regsize)); 1207 ddi_status = ddi_regs_map_setup(nxgep->dip, 3, 1208 (caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0, 1209 &nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh); 1210 1211 if (ddi_status != DDI_SUCCESS) { 1212 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1213 "ddi_map_regs for nxge vio2 reg failed")); 1214 goto nxge_map_regs_fail3; 1215 } 1216 nxgep->dev_regs = dev_regs; 1217 1218 NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1219 NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp); 1220 1221 NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh); 1222 NPI_REG_ADD_HANDLE_SET(nxgep, 1223 (npi_reg_ptr_t)dev_regs->nxge_regp); 1224 1225 NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh); 1226 NPI_VREG_ADD_HANDLE_SET(nxgep, 1227 (npi_reg_ptr_t)dev_regs->nxge_vir_regp); 1228 1229 NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh); 1230 NPI_V2REG_ADD_HANDLE_SET(nxgep, 1231 (npi_reg_ptr_t)dev_regs->nxge_vir2_regp); 1232 1233 break; 1234 } 1235 1236 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx " 1237 " handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh)); 1238 1239 goto nxge_map_regs_exit; 1240 nxge_map_regs_fail3: 1241 if (dev_regs->nxge_msix_regh) { 1242 ddi_regs_map_free(&dev_regs->nxge_msix_regh); 1243 } 1244 if (dev_regs->nxge_vir_regh) { 1245 ddi_regs_map_free(&dev_regs->nxge_regh); 1246 } 1247 nxge_map_regs_fail2: 1248 if (dev_regs->nxge_regh) { 1249 ddi_regs_map_free(&dev_regs->nxge_regh); 1250 } 1251 nxge_map_regs_fail1: 1252 if (dev_regs->nxge_pciregh) { 1253 ddi_regs_map_free(&dev_regs->nxge_pciregh); 1254 } 1255 nxge_map_regs_fail0: 1256 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory")); 1257 kmem_free(dev_regs, sizeof (dev_regs_t)); 1258 1259 nxge_map_regs_exit: 1260 if (ddi_status != DDI_SUCCESS) 1261 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1262 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs")); 1263 return (status); 1264 } 1265 1266 static void 1267 nxge_unmap_regs(p_nxge_t nxgep) 1268 { 1269 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs")); 1270 1271 if (isLDOMguest(nxgep)) { 1272 nxge_guest_regs_map_free(nxgep); 1273 return; 1274 } 1275 1276 if (nxgep->dev_regs) { 1277 if (nxgep->dev_regs->nxge_pciregh) { 1278 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1279 "==> nxge_unmap_regs: bus")); 1280 ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh); 1281 nxgep->dev_regs->nxge_pciregh = NULL; 1282 } 1283 if (nxgep->dev_regs->nxge_regh) { 1284 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1285 "==> nxge_unmap_regs: device registers")); 1286 ddi_regs_map_free(&nxgep->dev_regs->nxge_regh); 1287 nxgep->dev_regs->nxge_regh = NULL; 1288 } 1289 if (nxgep->dev_regs->nxge_msix_regh) { 1290 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1291 "==> nxge_unmap_regs: device interrupts")); 1292 ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh); 1293 nxgep->dev_regs->nxge_msix_regh = NULL; 1294 } 1295 if (nxgep->dev_regs->nxge_vir_regh) { 1296 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1297 "==> nxge_unmap_regs: vio region")); 1298 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh); 1299 nxgep->dev_regs->nxge_vir_regh = NULL; 1300 } 1301 if (nxgep->dev_regs->nxge_vir2_regh) { 1302 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1303 "==> nxge_unmap_regs: vio2 region")); 1304 ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh); 1305 nxgep->dev_regs->nxge_vir2_regh = NULL; 1306 } 1307 1308 kmem_free(nxgep->dev_regs, sizeof (dev_regs_t)); 1309 nxgep->dev_regs = NULL; 1310 } 1311 1312 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs")); 1313 } 1314 1315 static nxge_status_t 1316 nxge_setup_mutexes(p_nxge_t nxgep) 1317 { 1318 int ddi_status = DDI_SUCCESS; 1319 nxge_status_t status = NXGE_OK; 1320 nxge_classify_t *classify_ptr; 1321 int partition; 1322 1323 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes")); 1324 1325 /* 1326 * Get the interrupt cookie so the mutexes can be 1327 * Initialized. 1328 */ 1329 if (isLDOMguest(nxgep)) { 1330 nxgep->interrupt_cookie = 0; 1331 } else { 1332 ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0, 1333 &nxgep->interrupt_cookie); 1334 1335 if (ddi_status != DDI_SUCCESS) { 1336 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1337 "<== nxge_setup_mutexes: failed 0x%x", 1338 ddi_status)); 1339 goto nxge_setup_mutexes_exit; 1340 } 1341 } 1342 1343 cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL); 1344 MUTEX_INIT(&nxgep->poll_lock, NULL, 1345 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1346 1347 /* 1348 * Initialize mutexes for this device. 1349 */ 1350 MUTEX_INIT(nxgep->genlock, NULL, 1351 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1352 MUTEX_INIT(&nxgep->ouraddr_lock, NULL, 1353 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1354 MUTEX_INIT(&nxgep->mif_lock, NULL, 1355 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1356 MUTEX_INIT(&nxgep->group_lock, NULL, 1357 MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1358 RW_INIT(&nxgep->filter_lock, NULL, 1359 RW_DRIVER, (void *)nxgep->interrupt_cookie); 1360 1361 classify_ptr = &nxgep->classifier; 1362 /* 1363 * FFLP Mutexes are never used in interrupt context 1364 * as fflp operation can take very long time to 1365 * complete and hence not suitable to invoke from interrupt 1366 * handlers. 1367 */ 1368 MUTEX_INIT(&classify_ptr->tcam_lock, NULL, 1369 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1370 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1371 MUTEX_INIT(&classify_ptr->fcram_lock, NULL, 1372 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1373 for (partition = 0; partition < MAX_PARTITION; partition++) { 1374 MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL, 1375 NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie); 1376 } 1377 } 1378 1379 nxge_setup_mutexes_exit: 1380 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1381 "<== nxge_setup_mutexes status = %x", status)); 1382 1383 if (ddi_status != DDI_SUCCESS) 1384 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 1385 1386 return (status); 1387 } 1388 1389 static void 1390 nxge_destroy_mutexes(p_nxge_t nxgep) 1391 { 1392 int partition; 1393 nxge_classify_t *classify_ptr; 1394 1395 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes")); 1396 RW_DESTROY(&nxgep->filter_lock); 1397 MUTEX_DESTROY(&nxgep->group_lock); 1398 MUTEX_DESTROY(&nxgep->mif_lock); 1399 MUTEX_DESTROY(&nxgep->ouraddr_lock); 1400 MUTEX_DESTROY(nxgep->genlock); 1401 1402 classify_ptr = &nxgep->classifier; 1403 MUTEX_DESTROY(&classify_ptr->tcam_lock); 1404 1405 /* Destroy all polling resources. */ 1406 MUTEX_DESTROY(&nxgep->poll_lock); 1407 cv_destroy(&nxgep->poll_cv); 1408 1409 /* free data structures, based on HW type */ 1410 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1411 MUTEX_DESTROY(&classify_ptr->fcram_lock); 1412 for (partition = 0; partition < MAX_PARTITION; partition++) { 1413 MUTEX_DESTROY(&classify_ptr->hash_lock[partition]); 1414 } 1415 } 1416 1417 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes")); 1418 } 1419 1420 nxge_status_t 1421 nxge_init(p_nxge_t nxgep) 1422 { 1423 nxge_status_t status = NXGE_OK; 1424 1425 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init")); 1426 1427 if (nxgep->drv_state & STATE_HW_INITIALIZED) { 1428 return (status); 1429 } 1430 1431 /* 1432 * Allocate system memory for the receive/transmit buffer blocks 1433 * and receive/transmit descriptor rings. 1434 */ 1435 status = nxge_alloc_mem_pool(nxgep); 1436 if (status != NXGE_OK) { 1437 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n")); 1438 goto nxge_init_fail1; 1439 } 1440 1441 if (!isLDOMguest(nxgep)) { 1442 /* 1443 * Initialize and enable the TXC registers. 1444 * (Globally enable the Tx controller, 1445 * enable the port, configure the dma channel bitmap, 1446 * configure the max burst size). 1447 */ 1448 status = nxge_txc_init(nxgep); 1449 if (status != NXGE_OK) { 1450 NXGE_ERROR_MSG((nxgep, 1451 NXGE_ERR_CTL, "init txc failed\n")); 1452 goto nxge_init_fail2; 1453 } 1454 } 1455 1456 /* 1457 * Initialize and enable TXDMA channels. 1458 */ 1459 status = nxge_init_txdma_channels(nxgep); 1460 if (status != NXGE_OK) { 1461 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n")); 1462 goto nxge_init_fail3; 1463 } 1464 1465 /* 1466 * Initialize and enable RXDMA channels. 1467 */ 1468 status = nxge_init_rxdma_channels(nxgep); 1469 if (status != NXGE_OK) { 1470 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n")); 1471 goto nxge_init_fail4; 1472 } 1473 1474 /* 1475 * The guest domain is now done. 1476 */ 1477 if (isLDOMguest(nxgep)) { 1478 nxgep->drv_state |= STATE_HW_INITIALIZED; 1479 goto nxge_init_exit; 1480 } 1481 1482 /* 1483 * Initialize TCAM and FCRAM (Neptune). 1484 */ 1485 status = nxge_classify_init(nxgep); 1486 if (status != NXGE_OK) { 1487 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n")); 1488 goto nxge_init_fail5; 1489 } 1490 1491 /* 1492 * Initialize ZCP 1493 */ 1494 status = nxge_zcp_init(nxgep); 1495 if (status != NXGE_OK) { 1496 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n")); 1497 goto nxge_init_fail5; 1498 } 1499 1500 /* 1501 * Initialize IPP. 1502 */ 1503 status = nxge_ipp_init(nxgep); 1504 if (status != NXGE_OK) { 1505 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n")); 1506 goto nxge_init_fail5; 1507 } 1508 1509 /* 1510 * Initialize the MAC block. 1511 */ 1512 status = nxge_mac_init(nxgep); 1513 if (status != NXGE_OK) { 1514 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n")); 1515 goto nxge_init_fail5; 1516 } 1517 1518 nxge_intrs_enable(nxgep); /* XXX What changes do I need to make here? */ 1519 1520 /* 1521 * Enable hardware interrupts. 1522 */ 1523 nxge_intr_hw_enable(nxgep); 1524 nxgep->drv_state |= STATE_HW_INITIALIZED; 1525 1526 goto nxge_init_exit; 1527 1528 nxge_init_fail5: 1529 nxge_uninit_rxdma_channels(nxgep); 1530 nxge_init_fail4: 1531 nxge_uninit_txdma_channels(nxgep); 1532 nxge_init_fail3: 1533 if (!isLDOMguest(nxgep)) { 1534 (void) nxge_txc_uninit(nxgep); 1535 } 1536 nxge_init_fail2: 1537 nxge_free_mem_pool(nxgep); 1538 nxge_init_fail1: 1539 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1540 "<== nxge_init status (failed) = 0x%08x", status)); 1541 return (status); 1542 1543 nxge_init_exit: 1544 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x", 1545 status)); 1546 return (status); 1547 } 1548 1549 1550 timeout_id_t 1551 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec) 1552 { 1553 if ((nxgep->suspended == 0) || (nxgep->suspended == DDI_RESUME)) { 1554 return (timeout(func, (caddr_t)nxgep, 1555 drv_usectohz(1000 * msec))); 1556 } 1557 return (NULL); 1558 } 1559 1560 /*ARGSUSED*/ 1561 void 1562 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid) 1563 { 1564 if (timerid) { 1565 (void) untimeout(timerid); 1566 } 1567 } 1568 1569 void 1570 nxge_uninit(p_nxge_t nxgep) 1571 { 1572 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit")); 1573 1574 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1575 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1576 "==> nxge_uninit: not initialized")); 1577 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1578 "<== nxge_uninit")); 1579 return; 1580 } 1581 1582 /* stop timer */ 1583 if (nxgep->nxge_timerid) { 1584 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 1585 nxgep->nxge_timerid = 0; 1586 } 1587 1588 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1589 (void) nxge_intr_hw_disable(nxgep); 1590 1591 /* 1592 * Reset the receive MAC side. 1593 */ 1594 (void) nxge_rx_mac_disable(nxgep); 1595 1596 /* Disable and soft reset the IPP */ 1597 if (!isLDOMguest(nxgep)) 1598 (void) nxge_ipp_disable(nxgep); 1599 1600 /* Free classification resources */ 1601 (void) nxge_classify_uninit(nxgep); 1602 1603 /* 1604 * Reset the transmit/receive DMA side. 1605 */ 1606 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 1607 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1608 1609 nxge_uninit_txdma_channels(nxgep); 1610 nxge_uninit_rxdma_channels(nxgep); 1611 1612 /* 1613 * Reset the transmit MAC side. 1614 */ 1615 (void) nxge_tx_mac_disable(nxgep); 1616 1617 nxge_free_mem_pool(nxgep); 1618 1619 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1620 1621 nxgep->drv_state &= ~STATE_HW_INITIALIZED; 1622 1623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: " 1624 "nxge_mblks_pending %d", nxge_mblks_pending)); 1625 } 1626 1627 void 1628 nxge_get64(p_nxge_t nxgep, p_mblk_t mp) 1629 { 1630 #if defined(__i386) 1631 size_t reg; 1632 #else 1633 uint64_t reg; 1634 #endif 1635 uint64_t regdata; 1636 int i, retry; 1637 1638 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1639 regdata = 0; 1640 retry = 1; 1641 1642 for (i = 0; i < retry; i++) { 1643 NXGE_REG_RD64(nxgep->npi_handle, reg, ®data); 1644 } 1645 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1646 } 1647 1648 void 1649 nxge_put64(p_nxge_t nxgep, p_mblk_t mp) 1650 { 1651 #if defined(__i386) 1652 size_t reg; 1653 #else 1654 uint64_t reg; 1655 #endif 1656 uint64_t buf[2]; 1657 1658 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1659 #if defined(__i386) 1660 reg = (size_t)buf[0]; 1661 #else 1662 reg = buf[0]; 1663 #endif 1664 1665 NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]); 1666 } 1667 1668 1669 nxge_os_mutex_t nxgedebuglock; 1670 int nxge_debug_init = 0; 1671 1672 /*ARGSUSED*/ 1673 /*VARARGS*/ 1674 void 1675 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...) 1676 { 1677 char msg_buffer[1048]; 1678 char prefix_buffer[32]; 1679 int instance; 1680 uint64_t debug_level; 1681 int cmn_level = CE_CONT; 1682 va_list ap; 1683 1684 if (nxgep && nxgep->nxge_debug_level != nxge_debug_level) { 1685 /* In case a developer has changed nxge_debug_level. */ 1686 if (nxgep->nxge_debug_level != nxge_debug_level) 1687 nxgep->nxge_debug_level = nxge_debug_level; 1688 } 1689 1690 debug_level = (nxgep == NULL) ? nxge_debug_level : 1691 nxgep->nxge_debug_level; 1692 1693 if ((level & debug_level) || 1694 (level == NXGE_NOTE) || 1695 (level == NXGE_ERR_CTL)) { 1696 /* do the msg processing */ 1697 if (nxge_debug_init == 0) { 1698 MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1699 nxge_debug_init = 1; 1700 } 1701 1702 MUTEX_ENTER(&nxgedebuglock); 1703 1704 if ((level & NXGE_NOTE)) { 1705 cmn_level = CE_NOTE; 1706 } 1707 1708 if (level & NXGE_ERR_CTL) { 1709 cmn_level = CE_WARN; 1710 } 1711 1712 va_start(ap, fmt); 1713 (void) vsprintf(msg_buffer, fmt, ap); 1714 va_end(ap); 1715 if (nxgep == NULL) { 1716 instance = -1; 1717 (void) sprintf(prefix_buffer, "%s :", "nxge"); 1718 } else { 1719 instance = nxgep->instance; 1720 (void) sprintf(prefix_buffer, 1721 "%s%d :", "nxge", instance); 1722 } 1723 1724 MUTEX_EXIT(&nxgedebuglock); 1725 cmn_err(cmn_level, "!%s %s\n", 1726 prefix_buffer, msg_buffer); 1727 1728 } 1729 } 1730 1731 char * 1732 nxge_dump_packet(char *addr, int size) 1733 { 1734 uchar_t *ap = (uchar_t *)addr; 1735 int i; 1736 static char etherbuf[1024]; 1737 char *cp = etherbuf; 1738 char digits[] = "0123456789abcdef"; 1739 1740 if (!size) 1741 size = 60; 1742 1743 if (size > MAX_DUMP_SZ) { 1744 /* Dump the leading bytes */ 1745 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1746 if (*ap > 0x0f) 1747 *cp++ = digits[*ap >> 4]; 1748 *cp++ = digits[*ap++ & 0xf]; 1749 *cp++ = ':'; 1750 } 1751 for (i = 0; i < 20; i++) 1752 *cp++ = '.'; 1753 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1754 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2)); 1755 for (i = 0; i < MAX_DUMP_SZ/2; i++) { 1756 if (*ap > 0x0f) 1757 *cp++ = digits[*ap >> 4]; 1758 *cp++ = digits[*ap++ & 0xf]; 1759 *cp++ = ':'; 1760 } 1761 } else { 1762 for (i = 0; i < size; i++) { 1763 if (*ap > 0x0f) 1764 *cp++ = digits[*ap >> 4]; 1765 *cp++ = digits[*ap++ & 0xf]; 1766 *cp++ = ':'; 1767 } 1768 } 1769 *--cp = 0; 1770 return (etherbuf); 1771 } 1772 1773 #ifdef NXGE_DEBUG 1774 static void 1775 nxge_test_map_regs(p_nxge_t nxgep) 1776 { 1777 ddi_acc_handle_t cfg_handle; 1778 p_pci_cfg_t cfg_ptr; 1779 ddi_acc_handle_t dev_handle; 1780 char *dev_ptr; 1781 ddi_acc_handle_t pci_config_handle; 1782 uint32_t regval; 1783 int i; 1784 1785 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs")); 1786 1787 dev_handle = nxgep->dev_regs->nxge_regh; 1788 dev_ptr = (char *)nxgep->dev_regs->nxge_regp; 1789 1790 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) { 1791 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1792 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1793 1794 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1795 "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr)); 1796 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1797 "Neptune PCI cfg_ptr vendor id ptr 0x%llx", 1798 &cfg_ptr->vendorid)); 1799 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1800 "\tvendorid 0x%x devid 0x%x", 1801 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0), 1802 NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid, 0))); 1803 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1804 "PCI BAR: base 0x%x base14 0x%x base 18 0x%x " 1805 "bar1c 0x%x", 1806 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base, 0), 1807 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0), 1808 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0), 1809 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0))); 1810 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1811 "\nNeptune PCI BAR: base20 0x%x base24 0x%x " 1812 "base 28 0x%x bar2c 0x%x\n", 1813 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0), 1814 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0), 1815 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0), 1816 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0))); 1817 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1818 "\nNeptune PCI BAR: base30 0x%x\n", 1819 NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0))); 1820 1821 cfg_handle = nxgep->dev_regs->nxge_pciregh; 1822 cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp; 1823 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1824 "first 0x%llx second 0x%llx third 0x%llx " 1825 "last 0x%llx ", 1826 NXGE_PIO_READ64(dev_handle, 1827 (uint64_t *)(dev_ptr + 0), 0), 1828 NXGE_PIO_READ64(dev_handle, 1829 (uint64_t *)(dev_ptr + 8), 0), 1830 NXGE_PIO_READ64(dev_handle, 1831 (uint64_t *)(dev_ptr + 16), 0), 1832 NXGE_PIO_READ64(cfg_handle, 1833 (uint64_t *)(dev_ptr + 24), 0))); 1834 } 1835 } 1836 1837 #endif 1838 1839 static void 1840 nxge_suspend(p_nxge_t nxgep) 1841 { 1842 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend")); 1843 1844 nxge_intrs_disable(nxgep); 1845 nxge_destroy_dev(nxgep); 1846 1847 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend")); 1848 } 1849 1850 static nxge_status_t 1851 nxge_resume(p_nxge_t nxgep) 1852 { 1853 nxge_status_t status = NXGE_OK; 1854 1855 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume")); 1856 1857 nxgep->suspended = DDI_RESUME; 1858 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1859 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1860 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1861 (void) nxge_rx_mac_enable(nxgep); 1862 (void) nxge_tx_mac_enable(nxgep); 1863 nxge_intrs_enable(nxgep); 1864 nxgep->suspended = 0; 1865 1866 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1867 "<== nxge_resume status = 0x%x", status)); 1868 return (status); 1869 } 1870 1871 static nxge_status_t 1872 nxge_setup_dev(p_nxge_t nxgep) 1873 { 1874 nxge_status_t status = NXGE_OK; 1875 1876 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d", 1877 nxgep->mac.portnum)); 1878 1879 status = nxge_link_init(nxgep); 1880 1881 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1882 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1883 "port%d Bad register acc handle", nxgep->mac.portnum)); 1884 status = NXGE_ERROR; 1885 } 1886 1887 if (status != NXGE_OK) { 1888 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1889 " nxge_setup_dev status " 1890 "(xcvr init 0x%08x)", status)); 1891 goto nxge_setup_dev_exit; 1892 } 1893 1894 nxge_setup_dev_exit: 1895 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1896 "<== nxge_setup_dev port %d status = 0x%08x", 1897 nxgep->mac.portnum, status)); 1898 1899 return (status); 1900 } 1901 1902 static void 1903 nxge_destroy_dev(p_nxge_t nxgep) 1904 { 1905 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev")); 1906 1907 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1908 1909 (void) nxge_hw_stop(nxgep); 1910 1911 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev")); 1912 } 1913 1914 static nxge_status_t 1915 nxge_setup_system_dma_pages(p_nxge_t nxgep) 1916 { 1917 int ddi_status = DDI_SUCCESS; 1918 uint_t count; 1919 ddi_dma_cookie_t cookie; 1920 uint_t iommu_pagesize; 1921 nxge_status_t status = NXGE_OK; 1922 1923 NXGE_ERROR_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages")); 1924 nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1); 1925 if (nxgep->niu_type != N2_NIU) { 1926 iommu_pagesize = dvma_pagesize(nxgep->dip); 1927 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1928 " nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1929 " default_block_size %d iommu_pagesize %d", 1930 nxgep->sys_page_sz, 1931 ddi_ptob(nxgep->dip, (ulong_t)1), 1932 nxgep->rx_default_block_size, 1933 iommu_pagesize)); 1934 1935 if (iommu_pagesize != 0) { 1936 if (nxgep->sys_page_sz == iommu_pagesize) { 1937 if (iommu_pagesize > 0x4000) 1938 nxgep->sys_page_sz = 0x4000; 1939 } else { 1940 if (nxgep->sys_page_sz > iommu_pagesize) 1941 nxgep->sys_page_sz = iommu_pagesize; 1942 } 1943 } 1944 } 1945 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1946 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 1947 "==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1948 "default_block_size %d page mask %d", 1949 nxgep->sys_page_sz, 1950 ddi_ptob(nxgep->dip, (ulong_t)1), 1951 nxgep->rx_default_block_size, 1952 nxgep->sys_page_mask)); 1953 1954 1955 switch (nxgep->sys_page_sz) { 1956 default: 1957 nxgep->sys_page_sz = 0x1000; 1958 nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1); 1959 nxgep->rx_default_block_size = 0x1000; 1960 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1961 break; 1962 case 0x1000: 1963 nxgep->rx_default_block_size = 0x1000; 1964 nxgep->rx_bksize_code = RBR_BKSIZE_4K; 1965 break; 1966 case 0x2000: 1967 nxgep->rx_default_block_size = 0x2000; 1968 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1969 break; 1970 case 0x4000: 1971 nxgep->rx_default_block_size = 0x4000; 1972 nxgep->rx_bksize_code = RBR_BKSIZE_16K; 1973 break; 1974 case 0x8000: 1975 nxgep->rx_default_block_size = 0x8000; 1976 nxgep->rx_bksize_code = RBR_BKSIZE_32K; 1977 break; 1978 } 1979 1980 #ifndef USE_RX_BIG_BUF 1981 nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz; 1982 #else 1983 nxgep->rx_default_block_size = 0x2000; 1984 nxgep->rx_bksize_code = RBR_BKSIZE_8K; 1985 #endif 1986 /* 1987 * Get the system DMA burst size. 1988 */ 1989 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 1990 DDI_DMA_DONTWAIT, 0, 1991 &nxgep->dmasparehandle); 1992 if (ddi_status != DDI_SUCCESS) { 1993 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1994 "ddi_dma_alloc_handle: failed " 1995 " status 0x%x", ddi_status)); 1996 goto nxge_get_soft_properties_exit; 1997 } 1998 1999 ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL, 2000 (caddr_t)nxgep->dmasparehandle, 2001 sizeof (nxgep->dmasparehandle), 2002 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2003 DDI_DMA_DONTWAIT, 0, 2004 &cookie, &count); 2005 if (ddi_status != DDI_DMA_MAPPED) { 2006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2007 "Binding spare handle to find system" 2008 " burstsize failed.")); 2009 ddi_status = DDI_FAILURE; 2010 goto nxge_get_soft_properties_fail1; 2011 } 2012 2013 nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle); 2014 (void) ddi_dma_unbind_handle(nxgep->dmasparehandle); 2015 2016 nxge_get_soft_properties_fail1: 2017 ddi_dma_free_handle(&nxgep->dmasparehandle); 2018 2019 nxge_get_soft_properties_exit: 2020 2021 if (ddi_status != DDI_SUCCESS) 2022 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2023 2024 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 2025 "<== nxge_setup_system_dma_pages status = 0x%08x", status)); 2026 return (status); 2027 } 2028 2029 static nxge_status_t 2030 nxge_alloc_mem_pool(p_nxge_t nxgep) 2031 { 2032 nxge_status_t status = NXGE_OK; 2033 2034 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool")); 2035 2036 status = nxge_alloc_rx_mem_pool(nxgep); 2037 if (status != NXGE_OK) { 2038 return (NXGE_ERROR); 2039 } 2040 2041 status = nxge_alloc_tx_mem_pool(nxgep); 2042 if (status != NXGE_OK) { 2043 nxge_free_rx_mem_pool(nxgep); 2044 return (NXGE_ERROR); 2045 } 2046 2047 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool")); 2048 return (NXGE_OK); 2049 } 2050 2051 static void 2052 nxge_free_mem_pool(p_nxge_t nxgep) 2053 { 2054 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool")); 2055 2056 nxge_free_rx_mem_pool(nxgep); 2057 nxge_free_tx_mem_pool(nxgep); 2058 2059 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool")); 2060 } 2061 2062 nxge_status_t 2063 nxge_alloc_rx_mem_pool(p_nxge_t nxgep) 2064 { 2065 uint32_t rdc_max; 2066 p_nxge_dma_pt_cfg_t p_all_cfgp; 2067 p_nxge_hw_pt_cfg_t p_cfgp; 2068 p_nxge_dma_pool_t dma_poolp; 2069 p_nxge_dma_common_t *dma_buf_p; 2070 p_nxge_dma_pool_t dma_cntl_poolp; 2071 p_nxge_dma_common_t *dma_cntl_p; 2072 uint32_t *num_chunks; /* per dma */ 2073 nxge_status_t status = NXGE_OK; 2074 2075 uint32_t nxge_port_rbr_size; 2076 uint32_t nxge_port_rbr_spare_size; 2077 uint32_t nxge_port_rcr_size; 2078 uint32_t rx_cntl_alloc_size; 2079 2080 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool")); 2081 2082 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 2083 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 2084 rdc_max = NXGE_MAX_RDCS; 2085 2086 /* 2087 * Allocate memory for the common DMA data structures. 2088 */ 2089 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2090 KM_SLEEP); 2091 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2092 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2093 2094 dma_cntl_poolp = (p_nxge_dma_pool_t) 2095 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2096 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2097 sizeof (p_nxge_dma_common_t) * rdc_max, KM_SLEEP); 2098 2099 num_chunks = (uint32_t *)KMEM_ZALLOC( 2100 sizeof (uint32_t) * rdc_max, KM_SLEEP); 2101 2102 /* 2103 * Assume that each DMA channel will be configured with 2104 * the default block size. 2105 * rbr block counts are modulo the batch count (16). 2106 */ 2107 nxge_port_rbr_size = p_all_cfgp->rbr_size; 2108 nxge_port_rcr_size = p_all_cfgp->rcr_size; 2109 2110 if (!nxge_port_rbr_size) { 2111 nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT; 2112 } 2113 if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) { 2114 nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH * 2115 (nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1)); 2116 } 2117 2118 p_all_cfgp->rbr_size = nxge_port_rbr_size; 2119 nxge_port_rbr_spare_size = nxge_rbr_spare_size; 2120 2121 if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) { 2122 nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH * 2123 (nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1)); 2124 } 2125 if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) { 2126 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2127 "nxge_alloc_rx_mem_pool: RBR size too high %d, " 2128 "set to default %d", 2129 nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS)); 2130 nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS; 2131 } 2132 if (nxge_port_rcr_size > RCR_DEFAULT_MAX) { 2133 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2134 "nxge_alloc_rx_mem_pool: RCR too high %d, " 2135 "set to default %d", 2136 nxge_port_rcr_size, RCR_DEFAULT_MAX)); 2137 nxge_port_rcr_size = RCR_DEFAULT_MAX; 2138 } 2139 2140 /* 2141 * N2/NIU has limitation on the descriptor sizes (contiguous 2142 * memory allocation on data buffers to 4M (contig_mem_alloc) 2143 * and little endian for control buffers (must use the ddi/dki mem alloc 2144 * function). 2145 */ 2146 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2147 if (nxgep->niu_type == N2_NIU) { 2148 nxge_port_rbr_spare_size = 0; 2149 if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) || 2150 (!ISP2(nxge_port_rbr_size))) { 2151 nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX; 2152 } 2153 if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) || 2154 (!ISP2(nxge_port_rcr_size))) { 2155 nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX; 2156 } 2157 } 2158 #endif 2159 2160 /* 2161 * Addresses of receive block ring, receive completion ring and the 2162 * mailbox must be all cache-aligned (64 bytes). 2163 */ 2164 rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size; 2165 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 2166 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size); 2167 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 2168 2169 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: " 2170 "nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d " 2171 "nxge_port_rcr_size = %d " 2172 "rx_cntl_alloc_size = %d", 2173 nxge_port_rbr_size, nxge_port_rbr_spare_size, 2174 nxge_port_rcr_size, 2175 rx_cntl_alloc_size)); 2176 2177 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2178 if (nxgep->niu_type == N2_NIU) { 2179 uint32_t rx_buf_alloc_size = (nxgep->rx_default_block_size * 2180 (nxge_port_rbr_size + nxge_port_rbr_spare_size)); 2181 2182 if (!ISP2(rx_buf_alloc_size)) { 2183 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2184 "==> nxge_alloc_rx_mem_pool: " 2185 " must be power of 2")); 2186 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2187 goto nxge_alloc_rx_mem_pool_exit; 2188 } 2189 2190 if (rx_buf_alloc_size > (1 << 22)) { 2191 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2192 "==> nxge_alloc_rx_mem_pool: " 2193 " limit size to 4M")); 2194 status |= (NXGE_ERROR | NXGE_DDI_FAILED); 2195 goto nxge_alloc_rx_mem_pool_exit; 2196 } 2197 2198 if (rx_cntl_alloc_size < 0x2000) { 2199 rx_cntl_alloc_size = 0x2000; 2200 } 2201 } 2202 #endif 2203 nxgep->nxge_port_rbr_size = nxge_port_rbr_size; 2204 nxgep->nxge_port_rcr_size = nxge_port_rcr_size; 2205 nxgep->nxge_port_rbr_spare_size = nxge_port_rbr_spare_size; 2206 nxgep->nxge_port_rx_cntl_alloc_size = rx_cntl_alloc_size; 2207 2208 dma_poolp->ndmas = p_cfgp->max_rdcs; 2209 dma_poolp->num_chunks = num_chunks; 2210 dma_poolp->buf_allocated = B_TRUE; 2211 nxgep->rx_buf_pool_p = dma_poolp; 2212 dma_poolp->dma_buf_pool_p = dma_buf_p; 2213 2214 dma_cntl_poolp->ndmas = p_cfgp->max_rdcs; 2215 dma_cntl_poolp->buf_allocated = B_TRUE; 2216 nxgep->rx_cntl_pool_p = dma_cntl_poolp; 2217 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2218 2219 /* Allocate the receive rings, too. */ 2220 nxgep->rx_rbr_rings = 2221 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2222 nxgep->rx_rbr_rings->rbr_rings = 2223 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * rdc_max, KM_SLEEP); 2224 nxgep->rx_rcr_rings = 2225 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2226 nxgep->rx_rcr_rings->rcr_rings = 2227 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * rdc_max, KM_SLEEP); 2228 nxgep->rx_mbox_areas_p = 2229 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2230 nxgep->rx_mbox_areas_p->rxmbox_areas = 2231 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * rdc_max, KM_SLEEP); 2232 2233 nxgep->rx_rbr_rings->ndmas = nxgep->rx_rcr_rings->ndmas = 2234 p_cfgp->max_rdcs; 2235 2236 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2237 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2238 2239 nxge_alloc_rx_mem_pool_exit: 2240 return (status); 2241 } 2242 2243 /* 2244 * nxge_alloc_rxb 2245 * 2246 * Allocate buffers for an RDC. 2247 * 2248 * Arguments: 2249 * nxgep 2250 * channel The channel to map into our kernel space. 2251 * 2252 * Notes: 2253 * 2254 * NPI function calls: 2255 * 2256 * NXGE function calls: 2257 * 2258 * Registers accessed: 2259 * 2260 * Context: 2261 * 2262 * Taking apart: 2263 * 2264 * Open questions: 2265 * 2266 */ 2267 nxge_status_t 2268 nxge_alloc_rxb( 2269 p_nxge_t nxgep, 2270 int channel) 2271 { 2272 size_t rx_buf_alloc_size; 2273 nxge_status_t status = NXGE_OK; 2274 2275 nxge_dma_common_t **data; 2276 nxge_dma_common_t **control; 2277 uint32_t *num_chunks; 2278 2279 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2280 2281 /* 2282 * Allocate memory for the receive buffers and descriptor rings. 2283 * Replace these allocation functions with the interface functions 2284 * provided by the partition manager if/when they are available. 2285 */ 2286 2287 /* 2288 * Allocate memory for the receive buffer blocks. 2289 */ 2290 rx_buf_alloc_size = (nxgep->rx_default_block_size * 2291 (nxgep->nxge_port_rbr_size + nxgep->nxge_port_rbr_spare_size)); 2292 2293 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2294 num_chunks = &nxgep->rx_buf_pool_p->num_chunks[channel]; 2295 2296 if ((status = nxge_alloc_rx_buf_dma( 2297 nxgep, channel, data, rx_buf_alloc_size, 2298 nxgep->rx_default_block_size, num_chunks)) != NXGE_OK) { 2299 return (status); 2300 } 2301 2302 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_alloc_rxb(): " 2303 "dma %d dma_buf_p %llx &dma_buf_p %llx", channel, *data, data)); 2304 2305 /* 2306 * Allocate memory for descriptor rings and mailbox. 2307 */ 2308 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2309 2310 if ((status = nxge_alloc_rx_cntl_dma( 2311 nxgep, channel, control, nxgep->nxge_port_rx_cntl_alloc_size)) 2312 != NXGE_OK) { 2313 nxge_free_rx_cntl_dma(nxgep, *control); 2314 (*data)->buf_alloc_state |= BUF_ALLOCATED_WAIT_FREE; 2315 nxge_free_rx_buf_dma(nxgep, *data, *num_chunks); 2316 return (status); 2317 } 2318 2319 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2320 "<== nxge_alloc_rx_mem_pool:status 0x%08x", status)); 2321 2322 return (status); 2323 } 2324 2325 void 2326 nxge_free_rxb( 2327 p_nxge_t nxgep, 2328 int channel) 2329 { 2330 nxge_dma_common_t *data; 2331 nxge_dma_common_t *control; 2332 uint32_t num_chunks; 2333 2334 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rbb")); 2335 2336 data = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2337 num_chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 2338 nxge_free_rx_buf_dma(nxgep, data, num_chunks); 2339 2340 nxgep->rx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2341 nxgep->rx_buf_pool_p->num_chunks[channel] = 0; 2342 2343 control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2344 nxge_free_rx_cntl_dma(nxgep, control); 2345 2346 nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2347 2348 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2349 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2350 2351 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_alloc_rbb")); 2352 } 2353 2354 static void 2355 nxge_free_rx_mem_pool(p_nxge_t nxgep) 2356 { 2357 int rdc_max = NXGE_MAX_RDCS; 2358 2359 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool")); 2360 2361 if (!nxgep->rx_buf_pool_p || !nxgep->rx_buf_pool_p->buf_allocated) { 2362 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2363 "<== nxge_free_rx_mem_pool " 2364 "(null rx buf pool or buf not allocated")); 2365 return; 2366 } 2367 if (!nxgep->rx_cntl_pool_p || !nxgep->rx_cntl_pool_p->buf_allocated) { 2368 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2369 "<== nxge_free_rx_mem_pool " 2370 "(null rx cntl buf pool or cntl buf not allocated")); 2371 return; 2372 } 2373 2374 KMEM_FREE(nxgep->rx_cntl_pool_p->dma_buf_pool_p, 2375 sizeof (p_nxge_dma_common_t) * rdc_max); 2376 KMEM_FREE(nxgep->rx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 2377 2378 KMEM_FREE(nxgep->rx_buf_pool_p->num_chunks, 2379 sizeof (uint32_t) * rdc_max); 2380 KMEM_FREE(nxgep->rx_buf_pool_p->dma_buf_pool_p, 2381 sizeof (p_nxge_dma_common_t) * rdc_max); 2382 KMEM_FREE(nxgep->rx_buf_pool_p, sizeof (nxge_dma_pool_t)); 2383 2384 nxgep->rx_buf_pool_p = 0; 2385 nxgep->rx_cntl_pool_p = 0; 2386 2387 KMEM_FREE(nxgep->rx_rbr_rings->rbr_rings, 2388 sizeof (p_rx_rbr_ring_t) * rdc_max); 2389 KMEM_FREE(nxgep->rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2390 KMEM_FREE(nxgep->rx_rcr_rings->rcr_rings, 2391 sizeof (p_rx_rcr_ring_t) * rdc_max); 2392 KMEM_FREE(nxgep->rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2393 KMEM_FREE(nxgep->rx_mbox_areas_p->rxmbox_areas, 2394 sizeof (p_rx_mbox_t) * rdc_max); 2395 KMEM_FREE(nxgep->rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2396 2397 nxgep->rx_rbr_rings = 0; 2398 nxgep->rx_rcr_rings = 0; 2399 nxgep->rx_mbox_areas_p = 0; 2400 2401 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool")); 2402 } 2403 2404 2405 static nxge_status_t 2406 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2407 p_nxge_dma_common_t *dmap, 2408 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 2409 { 2410 p_nxge_dma_common_t rx_dmap; 2411 nxge_status_t status = NXGE_OK; 2412 size_t total_alloc_size; 2413 size_t allocated = 0; 2414 int i, size_index, array_size; 2415 boolean_t use_kmem_alloc = B_FALSE; 2416 2417 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma")); 2418 2419 rx_dmap = (p_nxge_dma_common_t) 2420 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2421 KM_SLEEP); 2422 2423 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2424 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 2425 dma_channel, alloc_size, block_size, dmap)); 2426 2427 total_alloc_size = alloc_size; 2428 2429 #if defined(RX_USE_RECLAIM_POST) 2430 total_alloc_size = alloc_size + alloc_size/4; 2431 #endif 2432 2433 i = 0; 2434 size_index = 0; 2435 array_size = sizeof (alloc_sizes)/sizeof (size_t); 2436 while ((alloc_sizes[size_index] < alloc_size) && 2437 (size_index < array_size)) 2438 size_index++; 2439 if (size_index >= array_size) { 2440 size_index = array_size - 1; 2441 } 2442 2443 /* For Neptune, use kmem_alloc if the kmem flag is set. */ 2444 if (nxgep->niu_type != N2_NIU && nxge_use_kmem_alloc) { 2445 use_kmem_alloc = B_TRUE; 2446 #if defined(__i386) || defined(__amd64) 2447 size_index = 0; 2448 #endif 2449 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2450 "==> nxge_alloc_rx_buf_dma: " 2451 "Neptune use kmem_alloc() - size_index %d", 2452 size_index)); 2453 } 2454 2455 while ((allocated < total_alloc_size) && 2456 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2457 rx_dmap[i].dma_chunk_index = i; 2458 rx_dmap[i].block_size = block_size; 2459 rx_dmap[i].alength = alloc_sizes[size_index]; 2460 rx_dmap[i].orig_alength = rx_dmap[i].alength; 2461 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2462 rx_dmap[i].dma_channel = dma_channel; 2463 rx_dmap[i].contig_alloc_type = B_FALSE; 2464 rx_dmap[i].kmem_alloc_type = B_FALSE; 2465 rx_dmap[i].buf_alloc_type = DDI_MEM_ALLOC; 2466 2467 /* 2468 * N2/NIU: data buffers must be contiguous as the driver 2469 * needs to call Hypervisor api to set up 2470 * logical pages. 2471 */ 2472 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2473 rx_dmap[i].contig_alloc_type = B_TRUE; 2474 rx_dmap[i].buf_alloc_type = CONTIG_MEM_ALLOC; 2475 } else if (use_kmem_alloc) { 2476 /* For Neptune, use kmem_alloc */ 2477 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2478 "==> nxge_alloc_rx_buf_dma: " 2479 "Neptune use kmem_alloc()")); 2480 rx_dmap[i].kmem_alloc_type = B_TRUE; 2481 rx_dmap[i].buf_alloc_type = KMEM_ALLOC; 2482 } 2483 2484 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2485 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 2486 "i %d nblocks %d alength %d", 2487 dma_channel, i, &rx_dmap[i], block_size, 2488 i, rx_dmap[i].nblocks, 2489 rx_dmap[i].alength)); 2490 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2491 &nxge_rx_dma_attr, 2492 rx_dmap[i].alength, 2493 &nxge_dev_buf_dma_acc_attr, 2494 DDI_DMA_READ | DDI_DMA_STREAMING, 2495 (p_nxge_dma_common_t)(&rx_dmap[i])); 2496 if (status != NXGE_OK) { 2497 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2498 "nxge_alloc_rx_buf_dma: Alloc Failed: " 2499 "dma %d size_index %d size requested %d", 2500 dma_channel, 2501 size_index, 2502 rx_dmap[i].alength)); 2503 size_index--; 2504 } else { 2505 rx_dmap[i].buf_alloc_state = BUF_ALLOCATED; 2506 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2507 " nxge_alloc_rx_buf_dma DONE alloc mem: " 2508 "dma %d dma_buf_p $%p kaddrp $%p alength %d " 2509 "buf_alloc_state %d alloc_type %d", 2510 dma_channel, 2511 &rx_dmap[i], 2512 rx_dmap[i].kaddrp, 2513 rx_dmap[i].alength, 2514 rx_dmap[i].buf_alloc_state, 2515 rx_dmap[i].buf_alloc_type)); 2516 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2517 " alloc_rx_buf_dma allocated rdc %d " 2518 "chunk %d size %x dvma %x bufp %llx kaddrp $%p", 2519 dma_channel, i, rx_dmap[i].alength, 2520 rx_dmap[i].ioaddr_pp, &rx_dmap[i], 2521 rx_dmap[i].kaddrp)); 2522 i++; 2523 allocated += alloc_sizes[size_index]; 2524 } 2525 } 2526 2527 if (allocated < total_alloc_size) { 2528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2529 "==> nxge_alloc_rx_buf_dma: not enough for channel %d " 2530 "allocated 0x%x requested 0x%x", 2531 dma_channel, 2532 allocated, total_alloc_size)); 2533 status = NXGE_ERROR; 2534 goto nxge_alloc_rx_mem_fail1; 2535 } 2536 2537 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2538 "==> nxge_alloc_rx_buf_dma: Allocated for channel %d " 2539 "allocated 0x%x requested 0x%x", 2540 dma_channel, 2541 allocated, total_alloc_size)); 2542 2543 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2544 " alloc_rx_buf_dma rdc %d allocated %d chunks", 2545 dma_channel, i)); 2546 *num_chunks = i; 2547 *dmap = rx_dmap; 2548 2549 goto nxge_alloc_rx_mem_exit; 2550 2551 nxge_alloc_rx_mem_fail1: 2552 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2553 2554 nxge_alloc_rx_mem_exit: 2555 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2556 "<== nxge_alloc_rx_buf_dma status 0x%08x", status)); 2557 2558 return (status); 2559 } 2560 2561 /*ARGSUSED*/ 2562 static void 2563 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 2564 uint32_t num_chunks) 2565 { 2566 int i; 2567 2568 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2569 "==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 2570 2571 if (dmap == 0) 2572 return; 2573 2574 for (i = 0; i < num_chunks; i++) { 2575 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2576 "==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx", 2577 i, dmap)); 2578 nxge_dma_free_rx_data_buf(dmap++); 2579 } 2580 2581 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma")); 2582 } 2583 2584 /*ARGSUSED*/ 2585 static nxge_status_t 2586 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 2587 p_nxge_dma_common_t *dmap, size_t size) 2588 { 2589 p_nxge_dma_common_t rx_dmap; 2590 nxge_status_t status = NXGE_OK; 2591 2592 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma")); 2593 2594 rx_dmap = (p_nxge_dma_common_t) 2595 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 2596 2597 rx_dmap->contig_alloc_type = B_FALSE; 2598 rx_dmap->kmem_alloc_type = B_FALSE; 2599 2600 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2601 &nxge_desc_dma_attr, 2602 size, 2603 &nxge_dev_desc_dma_acc_attr, 2604 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2605 rx_dmap); 2606 if (status != NXGE_OK) { 2607 goto nxge_alloc_rx_cntl_dma_fail1; 2608 } 2609 2610 *dmap = rx_dmap; 2611 goto nxge_alloc_rx_cntl_dma_exit; 2612 2613 nxge_alloc_rx_cntl_dma_fail1: 2614 KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t)); 2615 2616 nxge_alloc_rx_cntl_dma_exit: 2617 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2618 "<== nxge_alloc_rx_cntl_dma status 0x%08x", status)); 2619 2620 return (status); 2621 } 2622 2623 /*ARGSUSED*/ 2624 static void 2625 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 2626 { 2627 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma")); 2628 2629 if (dmap == 0) 2630 return; 2631 2632 nxge_dma_mem_free(dmap); 2633 2634 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma")); 2635 } 2636 2637 typedef struct { 2638 size_t tx_size; 2639 size_t cr_size; 2640 size_t threshhold; 2641 } nxge_tdc_sizes_t; 2642 2643 static 2644 nxge_status_t 2645 nxge_tdc_sizes( 2646 nxge_t *nxgep, 2647 nxge_tdc_sizes_t *sizes) 2648 { 2649 uint32_t threshhold; /* The bcopy() threshhold */ 2650 size_t tx_size; /* Transmit buffer size */ 2651 size_t cr_size; /* Completion ring size */ 2652 2653 /* 2654 * Assume that each DMA channel will be configured with the 2655 * default transmit buffer size for copying transmit data. 2656 * (If a packet is bigger than this, it will not be copied.) 2657 */ 2658 if (nxgep->niu_type == N2_NIU) { 2659 threshhold = TX_BCOPY_SIZE; 2660 } else { 2661 threshhold = nxge_bcopy_thresh; 2662 } 2663 tx_size = nxge_tx_ring_size * threshhold; 2664 2665 cr_size = nxge_tx_ring_size * sizeof (tx_desc_t); 2666 cr_size += sizeof (txdma_mailbox_t); 2667 2668 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2669 if (nxgep->niu_type == N2_NIU) { 2670 if (!ISP2(tx_size)) { 2671 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2672 "==> nxge_tdc_sizes: Tx size" 2673 " must be power of 2")); 2674 return (NXGE_ERROR); 2675 } 2676 2677 if (tx_size > (1 << 22)) { 2678 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2679 "==> nxge_tdc_sizes: Tx size" 2680 " limited to 4M")); 2681 return (NXGE_ERROR); 2682 } 2683 2684 if (cr_size < 0x2000) 2685 cr_size = 0x2000; 2686 } 2687 #endif 2688 2689 sizes->threshhold = threshhold; 2690 sizes->tx_size = tx_size; 2691 sizes->cr_size = cr_size; 2692 2693 return (NXGE_OK); 2694 } 2695 /* 2696 * nxge_alloc_txb 2697 * 2698 * Allocate buffers for an TDC. 2699 * 2700 * Arguments: 2701 * nxgep 2702 * channel The channel to map into our kernel space. 2703 * 2704 * Notes: 2705 * 2706 * NPI function calls: 2707 * 2708 * NXGE function calls: 2709 * 2710 * Registers accessed: 2711 * 2712 * Context: 2713 * 2714 * Taking apart: 2715 * 2716 * Open questions: 2717 * 2718 */ 2719 nxge_status_t 2720 nxge_alloc_txb( 2721 p_nxge_t nxgep, 2722 int channel) 2723 { 2724 nxge_dma_common_t **dma_buf_p; 2725 nxge_dma_common_t **dma_cntl_p; 2726 uint32_t *num_chunks; 2727 nxge_status_t status = NXGE_OK; 2728 2729 nxge_tdc_sizes_t sizes; 2730 2731 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tbb")); 2732 2733 if (nxge_tdc_sizes(nxgep, &sizes) != NXGE_OK) 2734 return (NXGE_ERROR); 2735 2736 /* 2737 * Allocate memory for transmit buffers and descriptor rings. 2738 * Replace these allocation functions with the interface functions 2739 * provided by the partition manager Real Soon Now. 2740 */ 2741 dma_buf_p = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2742 num_chunks = &nxgep->tx_buf_pool_p->num_chunks[channel]; 2743 2744 dma_cntl_p = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2745 2746 /* 2747 * Allocate memory for transmit buffers and descriptor rings. 2748 * Replace allocation functions with interface functions provided 2749 * by the partition manager when it is available. 2750 * 2751 * Allocate memory for the transmit buffer pool. 2752 */ 2753 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2754 "sizes: tx: %ld, cr:%ld, th:%ld", 2755 sizes.tx_size, sizes.cr_size, sizes.threshhold)); 2756 2757 *num_chunks = 0; 2758 status = nxge_alloc_tx_buf_dma(nxgep, channel, dma_buf_p, 2759 sizes.tx_size, sizes.threshhold, num_chunks); 2760 if (status != NXGE_OK) { 2761 cmn_err(CE_NOTE, "nxge_alloc_tx_buf_dma failed!"); 2762 return (status); 2763 } 2764 2765 /* 2766 * Allocate memory for descriptor rings and mailbox. 2767 */ 2768 status = nxge_alloc_tx_cntl_dma(nxgep, channel, dma_cntl_p, 2769 sizes.cr_size); 2770 if (status != NXGE_OK) { 2771 nxge_free_tx_buf_dma(nxgep, *dma_buf_p, *num_chunks); 2772 cmn_err(CE_NOTE, "nxge_alloc_tx_cntl_dma failed!"); 2773 return (status); 2774 } 2775 2776 return (NXGE_OK); 2777 } 2778 2779 void 2780 nxge_free_txb( 2781 p_nxge_t nxgep, 2782 int channel) 2783 { 2784 nxge_dma_common_t *data; 2785 nxge_dma_common_t *control; 2786 uint32_t num_chunks; 2787 2788 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_txb")); 2789 2790 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2791 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2792 nxge_free_tx_buf_dma(nxgep, data, num_chunks); 2793 2794 nxgep->tx_buf_pool_p->dma_buf_pool_p[channel] = 0; 2795 nxgep->tx_buf_pool_p->num_chunks[channel] = 0; 2796 2797 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2798 nxge_free_tx_cntl_dma(nxgep, control); 2799 2800 nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel] = 0; 2801 2802 KMEM_FREE(data, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2803 KMEM_FREE(control, sizeof (nxge_dma_common_t)); 2804 2805 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_txb")); 2806 } 2807 2808 /* 2809 * nxge_alloc_tx_mem_pool 2810 * 2811 * This function allocates all of the per-port TDC control data structures. 2812 * The per-channel (TDC) data structures are allocated when needed. 2813 * 2814 * Arguments: 2815 * nxgep 2816 * 2817 * Notes: 2818 * 2819 * Context: 2820 * Any domain 2821 */ 2822 nxge_status_t 2823 nxge_alloc_tx_mem_pool(p_nxge_t nxgep) 2824 { 2825 nxge_hw_pt_cfg_t *p_cfgp; 2826 nxge_dma_pool_t *dma_poolp; 2827 nxge_dma_common_t **dma_buf_p; 2828 nxge_dma_pool_t *dma_cntl_poolp; 2829 nxge_dma_common_t **dma_cntl_p; 2830 uint32_t *num_chunks; /* per dma */ 2831 int tdc_max; 2832 2833 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool")); 2834 2835 p_cfgp = &nxgep->pt_config.hw_config; 2836 tdc_max = NXGE_MAX_TDCS; 2837 2838 /* 2839 * Allocate memory for each transmit DMA channel. 2840 */ 2841 dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t), 2842 KM_SLEEP); 2843 dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2844 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2845 2846 dma_cntl_poolp = (p_nxge_dma_pool_t) 2847 KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP); 2848 dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC( 2849 sizeof (p_nxge_dma_common_t) * tdc_max, KM_SLEEP); 2850 2851 if (nxge_tx_ring_size > TDC_DEFAULT_MAX) { 2852 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2853 "nxge_alloc_tx_mem_pool: TDC too high %d, " 2854 "set to default %d", 2855 nxge_tx_ring_size, TDC_DEFAULT_MAX)); 2856 nxge_tx_ring_size = TDC_DEFAULT_MAX; 2857 } 2858 2859 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2860 /* 2861 * N2/NIU has limitation on the descriptor sizes (contiguous 2862 * memory allocation on data buffers to 4M (contig_mem_alloc) 2863 * and little endian for control buffers (must use the ddi/dki mem alloc 2864 * function). The transmit ring is limited to 8K (includes the 2865 * mailbox). 2866 */ 2867 if (nxgep->niu_type == N2_NIU) { 2868 if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) || 2869 (!ISP2(nxge_tx_ring_size))) { 2870 nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX; 2871 } 2872 } 2873 #endif 2874 2875 nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size; 2876 2877 num_chunks = (uint32_t *)KMEM_ZALLOC( 2878 sizeof (uint32_t) * tdc_max, KM_SLEEP); 2879 2880 dma_poolp->ndmas = p_cfgp->tdc.owned; 2881 dma_poolp->num_chunks = num_chunks; 2882 dma_poolp->dma_buf_pool_p = dma_buf_p; 2883 nxgep->tx_buf_pool_p = dma_poolp; 2884 2885 dma_poolp->buf_allocated = B_TRUE; 2886 2887 dma_cntl_poolp->ndmas = p_cfgp->tdc.owned; 2888 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2889 nxgep->tx_cntl_pool_p = dma_cntl_poolp; 2890 2891 dma_cntl_poolp->buf_allocated = B_TRUE; 2892 2893 nxgep->tx_rings = 2894 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 2895 nxgep->tx_rings->rings = 2896 KMEM_ZALLOC(sizeof (p_tx_ring_t) * tdc_max, KM_SLEEP); 2897 nxgep->tx_mbox_areas_p = 2898 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 2899 nxgep->tx_mbox_areas_p->txmbox_areas_p = 2900 KMEM_ZALLOC(sizeof (p_tx_mbox_t) * tdc_max, KM_SLEEP); 2901 2902 nxgep->tx_rings->ndmas = p_cfgp->tdc.owned; 2903 2904 NXGE_DEBUG_MSG((nxgep, MEM_CTL, 2905 "==> nxge_alloc_tx_mem_pool: ndmas %d poolp->ndmas %d", 2906 tdc_max, dma_poolp->ndmas)); 2907 2908 return (NXGE_OK); 2909 } 2910 2911 nxge_status_t 2912 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel, 2913 p_nxge_dma_common_t *dmap, size_t alloc_size, 2914 size_t block_size, uint32_t *num_chunks) 2915 { 2916 p_nxge_dma_common_t tx_dmap; 2917 nxge_status_t status = NXGE_OK; 2918 size_t total_alloc_size; 2919 size_t allocated = 0; 2920 int i, size_index, array_size; 2921 2922 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma")); 2923 2924 tx_dmap = (p_nxge_dma_common_t) 2925 KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK, 2926 KM_SLEEP); 2927 2928 total_alloc_size = alloc_size; 2929 i = 0; 2930 size_index = 0; 2931 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2932 while ((alloc_sizes[size_index] < alloc_size) && 2933 (size_index < array_size)) 2934 size_index++; 2935 if (size_index >= array_size) { 2936 size_index = array_size - 1; 2937 } 2938 2939 while ((allocated < total_alloc_size) && 2940 (size_index >= 0) && (i < NXGE_DMA_BLOCK)) { 2941 2942 tx_dmap[i].dma_chunk_index = i; 2943 tx_dmap[i].block_size = block_size; 2944 tx_dmap[i].alength = alloc_sizes[size_index]; 2945 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2946 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2947 tx_dmap[i].dma_channel = dma_channel; 2948 tx_dmap[i].contig_alloc_type = B_FALSE; 2949 tx_dmap[i].kmem_alloc_type = B_FALSE; 2950 2951 /* 2952 * N2/NIU: data buffers must be contiguous as the driver 2953 * needs to call Hypervisor api to set up 2954 * logical pages. 2955 */ 2956 if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) { 2957 tx_dmap[i].contig_alloc_type = B_TRUE; 2958 } 2959 2960 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 2961 &nxge_tx_dma_attr, 2962 tx_dmap[i].alength, 2963 &nxge_dev_buf_dma_acc_attr, 2964 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2965 (p_nxge_dma_common_t)(&tx_dmap[i])); 2966 if (status != NXGE_OK) { 2967 size_index--; 2968 } else { 2969 i++; 2970 allocated += alloc_sizes[size_index]; 2971 } 2972 } 2973 2974 if (allocated < total_alloc_size) { 2975 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2976 "==> nxge_alloc_tx_buf_dma: not enough channel %d: " 2977 "allocated 0x%x requested 0x%x", 2978 dma_channel, 2979 allocated, total_alloc_size)); 2980 status = NXGE_ERROR; 2981 goto nxge_alloc_tx_mem_fail1; 2982 } 2983 2984 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2985 "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: " 2986 "allocated 0x%x requested 0x%x", 2987 dma_channel, 2988 allocated, total_alloc_size)); 2989 2990 *num_chunks = i; 2991 *dmap = tx_dmap; 2992 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2993 "==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2994 *dmap, i)); 2995 goto nxge_alloc_tx_mem_exit; 2996 2997 nxge_alloc_tx_mem_fail1: 2998 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK); 2999 3000 nxge_alloc_tx_mem_exit: 3001 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3002 "<== nxge_alloc_tx_buf_dma status 0x%08x", status)); 3003 3004 return (status); 3005 } 3006 3007 /*ARGSUSED*/ 3008 static void 3009 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap, 3010 uint32_t num_chunks) 3011 { 3012 int i; 3013 3014 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma")); 3015 3016 if (dmap == 0) 3017 return; 3018 3019 for (i = 0; i < num_chunks; i++) { 3020 nxge_dma_mem_free(dmap++); 3021 } 3022 3023 NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma")); 3024 } 3025 3026 /*ARGSUSED*/ 3027 nxge_status_t 3028 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel, 3029 p_nxge_dma_common_t *dmap, size_t size) 3030 { 3031 p_nxge_dma_common_t tx_dmap; 3032 nxge_status_t status = NXGE_OK; 3033 3034 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma")); 3035 tx_dmap = (p_nxge_dma_common_t) 3036 KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP); 3037 3038 tx_dmap->contig_alloc_type = B_FALSE; 3039 tx_dmap->kmem_alloc_type = B_FALSE; 3040 3041 status = nxge_dma_mem_alloc(nxgep, nxge_force_dma, 3042 &nxge_desc_dma_attr, 3043 size, 3044 &nxge_dev_desc_dma_acc_attr, 3045 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3046 tx_dmap); 3047 if (status != NXGE_OK) { 3048 goto nxge_alloc_tx_cntl_dma_fail1; 3049 } 3050 3051 *dmap = tx_dmap; 3052 goto nxge_alloc_tx_cntl_dma_exit; 3053 3054 nxge_alloc_tx_cntl_dma_fail1: 3055 KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t)); 3056 3057 nxge_alloc_tx_cntl_dma_exit: 3058 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3059 "<== nxge_alloc_tx_cntl_dma status 0x%08x", status)); 3060 3061 return (status); 3062 } 3063 3064 /*ARGSUSED*/ 3065 static void 3066 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap) 3067 { 3068 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma")); 3069 3070 if (dmap == 0) 3071 return; 3072 3073 nxge_dma_mem_free(dmap); 3074 3075 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma")); 3076 } 3077 3078 /* 3079 * nxge_free_tx_mem_pool 3080 * 3081 * This function frees all of the per-port TDC control data structures. 3082 * The per-channel (TDC) data structures are freed when the channel 3083 * is stopped. 3084 * 3085 * Arguments: 3086 * nxgep 3087 * 3088 * Notes: 3089 * 3090 * Context: 3091 * Any domain 3092 */ 3093 static void 3094 nxge_free_tx_mem_pool(p_nxge_t nxgep) 3095 { 3096 int tdc_max = NXGE_MAX_TDCS; 3097 3098 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_tx_mem_pool")); 3099 3100 if (!nxgep->tx_buf_pool_p || !nxgep->tx_buf_pool_p->buf_allocated) { 3101 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3102 "<== nxge_free_tx_mem_pool " 3103 "(null tx buf pool or buf not allocated")); 3104 return; 3105 } 3106 if (!nxgep->tx_cntl_pool_p || !nxgep->tx_cntl_pool_p->buf_allocated) { 3107 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3108 "<== nxge_free_tx_mem_pool " 3109 "(null tx cntl buf pool or cntl buf not allocated")); 3110 return; 3111 } 3112 3113 /* 1. Free the mailboxes. */ 3114 KMEM_FREE(nxgep->tx_mbox_areas_p->txmbox_areas_p, 3115 sizeof (p_tx_mbox_t) * tdc_max); 3116 KMEM_FREE(nxgep->tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 3117 3118 nxgep->tx_mbox_areas_p = 0; 3119 3120 /* 2. Free the transmit ring arrays. */ 3121 KMEM_FREE(nxgep->tx_rings->rings, 3122 sizeof (p_tx_ring_t) * tdc_max); 3123 KMEM_FREE(nxgep->tx_rings, sizeof (tx_rings_t)); 3124 3125 nxgep->tx_rings = 0; 3126 3127 /* 3. Free the completion ring data structures. */ 3128 KMEM_FREE(nxgep->tx_cntl_pool_p->dma_buf_pool_p, 3129 sizeof (p_nxge_dma_common_t) * tdc_max); 3130 KMEM_FREE(nxgep->tx_cntl_pool_p, sizeof (nxge_dma_pool_t)); 3131 3132 nxgep->tx_cntl_pool_p = 0; 3133 3134 /* 4. Free the data ring data structures. */ 3135 KMEM_FREE(nxgep->tx_buf_pool_p->num_chunks, 3136 sizeof (uint32_t) * tdc_max); 3137 KMEM_FREE(nxgep->tx_buf_pool_p->dma_buf_pool_p, 3138 sizeof (p_nxge_dma_common_t) * tdc_max); 3139 KMEM_FREE(nxgep->tx_buf_pool_p, sizeof (nxge_dma_pool_t)); 3140 3141 nxgep->tx_buf_pool_p = 0; 3142 3143 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_tx_mem_pool")); 3144 } 3145 3146 /*ARGSUSED*/ 3147 static nxge_status_t 3148 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method, 3149 struct ddi_dma_attr *dma_attrp, 3150 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 3151 p_nxge_dma_common_t dma_p) 3152 { 3153 caddr_t kaddrp; 3154 int ddi_status = DDI_SUCCESS; 3155 boolean_t contig_alloc_type; 3156 boolean_t kmem_alloc_type; 3157 3158 contig_alloc_type = dma_p->contig_alloc_type; 3159 3160 if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) { 3161 /* 3162 * contig_alloc_type for contiguous memory only allowed 3163 * for N2/NIU. 3164 */ 3165 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3166 "nxge_dma_mem_alloc: alloc type not allowed (%d)", 3167 dma_p->contig_alloc_type)); 3168 return (NXGE_ERROR | NXGE_DDI_FAILED); 3169 } 3170 3171 dma_p->dma_handle = NULL; 3172 dma_p->acc_handle = NULL; 3173 dma_p->kaddrp = dma_p->last_kaddrp = NULL; 3174 dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL; 3175 ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp, 3176 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 3177 if (ddi_status != DDI_SUCCESS) { 3178 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3179 "nxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 3180 return (NXGE_ERROR | NXGE_DDI_FAILED); 3181 } 3182 3183 kmem_alloc_type = dma_p->kmem_alloc_type; 3184 3185 switch (contig_alloc_type) { 3186 case B_FALSE: 3187 switch (kmem_alloc_type) { 3188 case B_FALSE: 3189 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, 3190 length, 3191 acc_attr_p, 3192 xfer_flags, 3193 DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 3194 &dma_p->acc_handle); 3195 if (ddi_status != DDI_SUCCESS) { 3196 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3197 "nxge_dma_mem_alloc: " 3198 "ddi_dma_mem_alloc failed")); 3199 ddi_dma_free_handle(&dma_p->dma_handle); 3200 dma_p->dma_handle = NULL; 3201 return (NXGE_ERROR | NXGE_DDI_FAILED); 3202 } 3203 if (dma_p->alength < length) { 3204 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3205 "nxge_dma_mem_alloc:di_dma_mem_alloc " 3206 "< length.")); 3207 ddi_dma_mem_free(&dma_p->acc_handle); 3208 ddi_dma_free_handle(&dma_p->dma_handle); 3209 dma_p->acc_handle = NULL; 3210 dma_p->dma_handle = NULL; 3211 return (NXGE_ERROR); 3212 } 3213 3214 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3215 NULL, 3216 kaddrp, dma_p->alength, xfer_flags, 3217 DDI_DMA_DONTWAIT, 3218 0, &dma_p->dma_cookie, &dma_p->ncookies); 3219 if (ddi_status != DDI_DMA_MAPPED) { 3220 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3221 "nxge_dma_mem_alloc: ddi_dma_addr_bind " 3222 "failed " 3223 "(staus 0x%x ncookies %d.)", ddi_status, 3224 dma_p->ncookies)); 3225 if (dma_p->acc_handle) { 3226 ddi_dma_mem_free(&dma_p->acc_handle); 3227 dma_p->acc_handle = NULL; 3228 } 3229 ddi_dma_free_handle(&dma_p->dma_handle); 3230 dma_p->dma_handle = NULL; 3231 return (NXGE_ERROR | NXGE_DDI_FAILED); 3232 } 3233 3234 if (dma_p->ncookies != 1) { 3235 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3236 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3237 "> 1 cookie" 3238 "(staus 0x%x ncookies %d.)", ddi_status, 3239 dma_p->ncookies)); 3240 if (dma_p->acc_handle) { 3241 ddi_dma_mem_free(&dma_p->acc_handle); 3242 dma_p->acc_handle = NULL; 3243 } 3244 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3245 ddi_dma_free_handle(&dma_p->dma_handle); 3246 dma_p->dma_handle = NULL; 3247 return (NXGE_ERROR); 3248 } 3249 break; 3250 3251 case B_TRUE: 3252 kaddrp = KMEM_ALLOC(length, KM_NOSLEEP); 3253 if (kaddrp == NULL) { 3254 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3255 "nxge_dma_mem_alloc:ddi_dma_mem_alloc " 3256 "kmem alloc failed")); 3257 return (NXGE_ERROR); 3258 } 3259 3260 dma_p->alength = length; 3261 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, 3262 NULL, kaddrp, dma_p->alength, xfer_flags, 3263 DDI_DMA_DONTWAIT, 0, 3264 &dma_p->dma_cookie, &dma_p->ncookies); 3265 if (ddi_status != DDI_DMA_MAPPED) { 3266 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3267 "nxge_dma_mem_alloc:ddi_dma_addr_bind: " 3268 "(kmem_alloc) failed kaddrp $%p length %d " 3269 "(staus 0x%x (%d) ncookies %d.)", 3270 kaddrp, length, 3271 ddi_status, ddi_status, dma_p->ncookies)); 3272 KMEM_FREE(kaddrp, length); 3273 dma_p->acc_handle = NULL; 3274 ddi_dma_free_handle(&dma_p->dma_handle); 3275 dma_p->dma_handle = NULL; 3276 dma_p->kaddrp = NULL; 3277 return (NXGE_ERROR | NXGE_DDI_FAILED); 3278 } 3279 3280 if (dma_p->ncookies != 1) { 3281 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3282 "nxge_dma_mem_alloc:ddi_dma_addr_bind " 3283 "(kmem_alloc) > 1 cookie" 3284 "(staus 0x%x ncookies %d.)", ddi_status, 3285 dma_p->ncookies)); 3286 KMEM_FREE(kaddrp, length); 3287 dma_p->acc_handle = NULL; 3288 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3289 ddi_dma_free_handle(&dma_p->dma_handle); 3290 dma_p->dma_handle = NULL; 3291 dma_p->kaddrp = NULL; 3292 return (NXGE_ERROR); 3293 } 3294 3295 dma_p->kaddrp = kaddrp; 3296 3297 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3298 "nxge_dma_mem_alloc: kmem_alloc dmap $%p " 3299 "kaddr $%p alength %d", 3300 dma_p, 3301 kaddrp, 3302 dma_p->alength)); 3303 break; 3304 } 3305 break; 3306 3307 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3308 case B_TRUE: 3309 kaddrp = (caddr_t)contig_mem_alloc(length); 3310 if (kaddrp == NULL) { 3311 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3312 "nxge_dma_mem_alloc:contig_mem_alloc failed.")); 3313 ddi_dma_free_handle(&dma_p->dma_handle); 3314 return (NXGE_ERROR | NXGE_DDI_FAILED); 3315 } 3316 3317 dma_p->alength = length; 3318 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 3319 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 3320 &dma_p->dma_cookie, &dma_p->ncookies); 3321 if (ddi_status != DDI_DMA_MAPPED) { 3322 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3323 "nxge_dma_mem_alloc:di_dma_addr_bind failed " 3324 "(status 0x%x ncookies %d.)", ddi_status, 3325 dma_p->ncookies)); 3326 3327 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3328 "==> nxge_dma_mem_alloc: (not mapped)" 3329 "length %lu (0x%x) " 3330 "free contig kaddrp $%p " 3331 "va_to_pa $%p", 3332 length, length, 3333 kaddrp, 3334 va_to_pa(kaddrp))); 3335 3336 3337 contig_mem_free((void *)kaddrp, length); 3338 ddi_dma_free_handle(&dma_p->dma_handle); 3339 3340 dma_p->dma_handle = NULL; 3341 dma_p->acc_handle = NULL; 3342 dma_p->alength = NULL; 3343 dma_p->kaddrp = NULL; 3344 3345 return (NXGE_ERROR | NXGE_DDI_FAILED); 3346 } 3347 3348 if (dma_p->ncookies != 1 || 3349 (dma_p->dma_cookie.dmac_laddress == NULL)) { 3350 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3351 "nxge_dma_mem_alloc:di_dma_addr_bind > 1 " 3352 "cookie or " 3353 "dmac_laddress is NULL $%p size %d " 3354 " (status 0x%x ncookies %d.)", 3355 ddi_status, 3356 dma_p->dma_cookie.dmac_laddress, 3357 dma_p->dma_cookie.dmac_size, 3358 dma_p->ncookies)); 3359 3360 contig_mem_free((void *)kaddrp, length); 3361 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3362 ddi_dma_free_handle(&dma_p->dma_handle); 3363 3364 dma_p->alength = 0; 3365 dma_p->dma_handle = NULL; 3366 dma_p->acc_handle = NULL; 3367 dma_p->kaddrp = NULL; 3368 3369 return (NXGE_ERROR | NXGE_DDI_FAILED); 3370 } 3371 break; 3372 3373 #else 3374 case B_TRUE: 3375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3376 "nxge_dma_mem_alloc: invalid alloc type for !sun4v")); 3377 return (NXGE_ERROR | NXGE_DDI_FAILED); 3378 #endif 3379 } 3380 3381 dma_p->kaddrp = kaddrp; 3382 dma_p->last_kaddrp = (unsigned char *)kaddrp + 3383 dma_p->alength - RXBUF_64B_ALIGNED; 3384 #if defined(__i386) 3385 dma_p->ioaddr_pp = 3386 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 3387 #else 3388 dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3389 #endif 3390 dma_p->last_ioaddr_pp = 3391 #if defined(__i386) 3392 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress + 3393 #else 3394 (unsigned char *)dma_p->dma_cookie.dmac_laddress + 3395 #endif 3396 dma_p->alength - RXBUF_64B_ALIGNED; 3397 3398 NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 3399 3400 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3401 dma_p->orig_ioaddr_pp = 3402 (unsigned char *)dma_p->dma_cookie.dmac_laddress; 3403 dma_p->orig_alength = length; 3404 dma_p->orig_kaddrp = kaddrp; 3405 dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp); 3406 #endif 3407 3408 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: " 3409 "dma buffer allocated: dma_p $%p " 3410 "return dmac_ladress from cookie $%p cookie dmac_size %d " 3411 "dma_p->ioaddr_p $%p " 3412 "dma_p->orig_ioaddr_p $%p " 3413 "orig_vatopa $%p " 3414 "alength %d (0x%x) " 3415 "kaddrp $%p " 3416 "length %d (0x%x)", 3417 dma_p, 3418 dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size, 3419 dma_p->ioaddr_pp, 3420 dma_p->orig_ioaddr_pp, 3421 dma_p->orig_vatopa, 3422 dma_p->alength, dma_p->alength, 3423 kaddrp, 3424 length, length)); 3425 3426 return (NXGE_OK); 3427 } 3428 3429 static void 3430 nxge_dma_mem_free(p_nxge_dma_common_t dma_p) 3431 { 3432 if (dma_p->dma_handle != NULL) { 3433 if (dma_p->ncookies) { 3434 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3435 dma_p->ncookies = 0; 3436 } 3437 ddi_dma_free_handle(&dma_p->dma_handle); 3438 dma_p->dma_handle = NULL; 3439 } 3440 3441 if (dma_p->acc_handle != NULL) { 3442 ddi_dma_mem_free(&dma_p->acc_handle); 3443 dma_p->acc_handle = NULL; 3444 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3445 } 3446 3447 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3448 if (dma_p->contig_alloc_type && 3449 dma_p->orig_kaddrp && dma_p->orig_alength) { 3450 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: " 3451 "kaddrp $%p (orig_kaddrp $%p)" 3452 "mem type %d ", 3453 "orig_alength %d " 3454 "alength 0x%x (%d)", 3455 dma_p->kaddrp, 3456 dma_p->orig_kaddrp, 3457 dma_p->contig_alloc_type, 3458 dma_p->orig_alength, 3459 dma_p->alength, dma_p->alength)); 3460 3461 contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength); 3462 dma_p->orig_alength = NULL; 3463 dma_p->orig_kaddrp = NULL; 3464 dma_p->contig_alloc_type = B_FALSE; 3465 } 3466 #endif 3467 dma_p->kaddrp = NULL; 3468 dma_p->alength = NULL; 3469 } 3470 3471 static void 3472 nxge_dma_free_rx_data_buf(p_nxge_dma_common_t dma_p) 3473 { 3474 uint64_t kaddr; 3475 uint32_t buf_size; 3476 3477 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_dma_free_rx_data_buf")); 3478 3479 if (dma_p->dma_handle != NULL) { 3480 if (dma_p->ncookies) { 3481 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 3482 dma_p->ncookies = 0; 3483 } 3484 ddi_dma_free_handle(&dma_p->dma_handle); 3485 dma_p->dma_handle = NULL; 3486 } 3487 3488 if (dma_p->acc_handle != NULL) { 3489 ddi_dma_mem_free(&dma_p->acc_handle); 3490 dma_p->acc_handle = NULL; 3491 NPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 3492 } 3493 3494 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3495 "==> nxge_dma_free_rx_data_buf: dmap $%p buf_alloc_state %d", 3496 dma_p, 3497 dma_p->buf_alloc_state)); 3498 3499 if (!(dma_p->buf_alloc_state & BUF_ALLOCATED_WAIT_FREE)) { 3500 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3501 "<== nxge_dma_free_rx_data_buf: " 3502 "outstanding data buffers")); 3503 return; 3504 } 3505 3506 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3507 if (dma_p->contig_alloc_type && 3508 dma_p->orig_kaddrp && dma_p->orig_alength) { 3509 NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_free_rx_data_buf: " 3510 "kaddrp $%p (orig_kaddrp $%p)" 3511 "mem type %d ", 3512 "orig_alength %d " 3513 "alength 0x%x (%d)", 3514 dma_p->kaddrp, 3515 dma_p->orig_kaddrp, 3516 dma_p->contig_alloc_type, 3517 dma_p->orig_alength, 3518 dma_p->alength, dma_p->alength)); 3519 3520 kaddr = (uint64_t)dma_p->orig_kaddrp; 3521 buf_size = dma_p->orig_alength; 3522 nxge_free_buf(CONTIG_MEM_ALLOC, kaddr, buf_size); 3523 dma_p->orig_alength = NULL; 3524 dma_p->orig_kaddrp = NULL; 3525 dma_p->contig_alloc_type = B_FALSE; 3526 dma_p->kaddrp = NULL; 3527 dma_p->alength = NULL; 3528 return; 3529 } 3530 #endif 3531 3532 if (dma_p->kmem_alloc_type) { 3533 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3534 "nxge_dma_free_rx_data_buf: free kmem " 3535 "kaddrp $%p (orig_kaddrp $%p)" 3536 "alloc type %d " 3537 "orig_alength %d " 3538 "alength 0x%x (%d)", 3539 dma_p->kaddrp, 3540 dma_p->orig_kaddrp, 3541 dma_p->kmem_alloc_type, 3542 dma_p->orig_alength, 3543 dma_p->alength, dma_p->alength)); 3544 #if defined(__i386) 3545 kaddr = (uint64_t)(uint32_t)dma_p->kaddrp; 3546 #else 3547 kaddr = (uint64_t)dma_p->kaddrp; 3548 #endif 3549 buf_size = dma_p->orig_alength; 3550 NXGE_DEBUG_MSG((NULL, DMA_CTL, 3551 "nxge_dma_free_rx_data_buf: free dmap $%p " 3552 "kaddr $%p buf_size %d", 3553 dma_p, 3554 kaddr, buf_size)); 3555 nxge_free_buf(KMEM_ALLOC, kaddr, buf_size); 3556 dma_p->alength = 0; 3557 dma_p->orig_alength = 0; 3558 dma_p->kaddrp = NULL; 3559 dma_p->kmem_alloc_type = B_FALSE; 3560 } 3561 3562 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_dma_free_rx_data_buf")); 3563 } 3564 3565 /* 3566 * nxge_m_start() -- start transmitting and receiving. 3567 * 3568 * This function is called by the MAC layer when the first 3569 * stream is open to prepare the hardware ready for sending 3570 * and transmitting packets. 3571 */ 3572 static int 3573 nxge_m_start(void *arg) 3574 { 3575 p_nxge_t nxgep = (p_nxge_t)arg; 3576 3577 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start")); 3578 3579 MUTEX_ENTER(nxgep->genlock); 3580 if (nxge_init(nxgep) != NXGE_OK) { 3581 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3582 "<== nxge_m_start: initialization failed")); 3583 MUTEX_EXIT(nxgep->genlock); 3584 return (EIO); 3585 } 3586 3587 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) 3588 goto nxge_m_start_exit; 3589 /* 3590 * Start timer to check the system error and tx hangs 3591 */ 3592 if (!isLDOMguest(nxgep)) 3593 nxgep->nxge_timerid = nxge_start_timer(nxgep, 3594 nxge_check_hw_state, NXGE_CHECK_TIMER); 3595 #if defined(sun4v) 3596 else 3597 nxge_hio_start_timer(nxgep); 3598 #endif 3599 3600 nxgep->link_notify = B_TRUE; 3601 3602 nxgep->nxge_mac_state = NXGE_MAC_STARTED; 3603 3604 nxge_m_start_exit: 3605 MUTEX_EXIT(nxgep->genlock); 3606 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start")); 3607 return (0); 3608 } 3609 3610 /* 3611 * nxge_m_stop(): stop transmitting and receiving. 3612 */ 3613 static void 3614 nxge_m_stop(void *arg) 3615 { 3616 p_nxge_t nxgep = (p_nxge_t)arg; 3617 3618 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop")); 3619 3620 if (nxgep->nxge_timerid) { 3621 nxge_stop_timer(nxgep, nxgep->nxge_timerid); 3622 nxgep->nxge_timerid = 0; 3623 } 3624 3625 MUTEX_ENTER(nxgep->genlock); 3626 nxgep->nxge_mac_state = NXGE_MAC_STOPPING; 3627 nxge_uninit(nxgep); 3628 3629 nxgep->nxge_mac_state = NXGE_MAC_STOPPED; 3630 3631 MUTEX_EXIT(nxgep->genlock); 3632 3633 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop")); 3634 } 3635 3636 static int 3637 nxge_m_unicst(void *arg, const uint8_t *macaddr) 3638 { 3639 p_nxge_t nxgep = (p_nxge_t)arg; 3640 struct ether_addr addrp; 3641 3642 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst")); 3643 3644 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 3645 if (nxge_set_mac_addr(nxgep, &addrp)) { 3646 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3647 "<== nxge_m_unicst: set unitcast failed")); 3648 return (EINVAL); 3649 } 3650 3651 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst")); 3652 3653 return (0); 3654 } 3655 3656 static int 3657 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 3658 { 3659 p_nxge_t nxgep = (p_nxge_t)arg; 3660 struct ether_addr addrp; 3661 3662 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3663 "==> nxge_m_multicst: add %d", add)); 3664 3665 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 3666 if (add) { 3667 if (nxge_add_mcast_addr(nxgep, &addrp)) { 3668 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3669 "<== nxge_m_multicst: add multicast failed")); 3670 return (EINVAL); 3671 } 3672 } else { 3673 if (nxge_del_mcast_addr(nxgep, &addrp)) { 3674 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3675 "<== nxge_m_multicst: del multicast failed")); 3676 return (EINVAL); 3677 } 3678 } 3679 3680 NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst")); 3681 3682 return (0); 3683 } 3684 3685 static int 3686 nxge_m_promisc(void *arg, boolean_t on) 3687 { 3688 p_nxge_t nxgep = (p_nxge_t)arg; 3689 3690 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3691 "==> nxge_m_promisc: on %d", on)); 3692 3693 if (nxge_set_promisc(nxgep, on)) { 3694 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3695 "<== nxge_m_promisc: set promisc failed")); 3696 return (EINVAL); 3697 } 3698 3699 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 3700 "<== nxge_m_promisc: on %d", on)); 3701 3702 return (0); 3703 } 3704 3705 static void 3706 nxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3707 { 3708 p_nxge_t nxgep = (p_nxge_t)arg; 3709 struct iocblk *iocp; 3710 boolean_t need_privilege; 3711 int err; 3712 int cmd; 3713 3714 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl")); 3715 3716 iocp = (struct iocblk *)mp->b_rptr; 3717 iocp->ioc_error = 0; 3718 need_privilege = B_TRUE; 3719 cmd = iocp->ioc_cmd; 3720 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd)); 3721 switch (cmd) { 3722 default: 3723 miocnak(wq, mp, 0, EINVAL); 3724 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid")); 3725 return; 3726 3727 case LB_GET_INFO_SIZE: 3728 case LB_GET_INFO: 3729 case LB_GET_MODE: 3730 need_privilege = B_FALSE; 3731 break; 3732 case LB_SET_MODE: 3733 break; 3734 3735 3736 case NXGE_GET_MII: 3737 case NXGE_PUT_MII: 3738 case NXGE_GET64: 3739 case NXGE_PUT64: 3740 case NXGE_GET_TX_RING_SZ: 3741 case NXGE_GET_TX_DESC: 3742 case NXGE_TX_SIDE_RESET: 3743 case NXGE_RX_SIDE_RESET: 3744 case NXGE_GLOBAL_RESET: 3745 case NXGE_RESET_MAC: 3746 case NXGE_TX_REGS_DUMP: 3747 case NXGE_RX_REGS_DUMP: 3748 case NXGE_INT_REGS_DUMP: 3749 case NXGE_VIR_INT_REGS_DUMP: 3750 case NXGE_PUT_TCAM: 3751 case NXGE_GET_TCAM: 3752 case NXGE_RTRACE: 3753 case NXGE_RDUMP: 3754 3755 need_privilege = B_FALSE; 3756 break; 3757 case NXGE_INJECT_ERR: 3758 cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n"); 3759 nxge_err_inject(nxgep, wq, mp); 3760 break; 3761 } 3762 3763 if (need_privilege) { 3764 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 3765 if (err != 0) { 3766 miocnak(wq, mp, 0, err); 3767 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3768 "<== nxge_m_ioctl: no priv")); 3769 return; 3770 } 3771 } 3772 3773 switch (cmd) { 3774 3775 case LB_GET_MODE: 3776 case LB_SET_MODE: 3777 case LB_GET_INFO_SIZE: 3778 case LB_GET_INFO: 3779 nxge_loopback_ioctl(nxgep, wq, mp, iocp); 3780 break; 3781 3782 case NXGE_GET_MII: 3783 case NXGE_PUT_MII: 3784 case NXGE_PUT_TCAM: 3785 case NXGE_GET_TCAM: 3786 case NXGE_GET64: 3787 case NXGE_PUT64: 3788 case NXGE_GET_TX_RING_SZ: 3789 case NXGE_GET_TX_DESC: 3790 case NXGE_TX_SIDE_RESET: 3791 case NXGE_RX_SIDE_RESET: 3792 case NXGE_GLOBAL_RESET: 3793 case NXGE_RESET_MAC: 3794 case NXGE_TX_REGS_DUMP: 3795 case NXGE_RX_REGS_DUMP: 3796 case NXGE_INT_REGS_DUMP: 3797 case NXGE_VIR_INT_REGS_DUMP: 3798 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3799 "==> nxge_m_ioctl: cmd 0x%x", cmd)); 3800 nxge_hw_ioctl(nxgep, wq, mp, iocp); 3801 break; 3802 } 3803 3804 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl")); 3805 } 3806 3807 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 3808 3809 static void 3810 nxge_m_resources(void *arg) 3811 { 3812 p_nxge_t nxgep = arg; 3813 mac_rx_fifo_t mrf; 3814 3815 nxge_grp_set_t *set = &nxgep->rx_set; 3816 uint8_t rdc; 3817 3818 rx_rcr_ring_t *ring; 3819 3820 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources")); 3821 3822 MUTEX_ENTER(nxgep->genlock); 3823 3824 if (set->owned.map == 0) { 3825 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 3826 "nxge_m_resources: no receive resources")); 3827 goto nxge_m_resources_exit; 3828 } 3829 3830 /* 3831 * CR 6492541 Check to see if the drv_state has been initialized, 3832 * if not * call nxge_init(). 3833 */ 3834 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3835 if (nxge_init(nxgep) != NXGE_OK) 3836 goto nxge_m_resources_exit; 3837 } 3838 3839 mrf.mrf_type = MAC_RX_FIFO; 3840 mrf.mrf_blank = nxge_rx_hw_blank; 3841 mrf.mrf_arg = (void *)nxgep; 3842 3843 mrf.mrf_normal_blank_time = 128; 3844 mrf.mrf_normal_pkt_count = 8; 3845 3846 /* 3847 * Export our receive resources to the MAC layer. 3848 */ 3849 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3850 if ((1 << rdc) & set->owned.map) { 3851 ring = nxgep->rx_rcr_rings->rcr_rings[rdc]; 3852 if (ring == 0) { 3853 /* 3854 * This is a big deal only if we are 3855 * *not* in an LDOMs environment. 3856 */ 3857 if (nxgep->environs == SOLARIS_DOMAIN) { 3858 cmn_err(CE_NOTE, 3859 "==> nxge_m_resources: " 3860 "ring %d == 0", rdc); 3861 } 3862 continue; 3863 } 3864 ring->rcr_mac_handle = mac_resource_add 3865 (nxgep->mach, (mac_resource_t *)&mrf); 3866 3867 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 3868 "==> nxge_m_resources: RDC %d RCR %p MAC handle %p", 3869 rdc, ring, ring->rcr_mac_handle)); 3870 } 3871 } 3872 3873 nxge_m_resources_exit: 3874 MUTEX_EXIT(nxgep->genlock); 3875 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources")); 3876 } 3877 3878 void 3879 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory) 3880 { 3881 p_nxge_mmac_stats_t mmac_stats; 3882 int i; 3883 nxge_mmac_t *mmac_info; 3884 3885 mmac_info = &nxgep->nxge_mmac_info; 3886 3887 mmac_stats = &nxgep->statsp->mmac_stats; 3888 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 3889 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 3890 3891 for (i = 0; i < ETHERADDRL; i++) { 3892 if (factory) { 3893 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3894 = mmac_info->factory_mac_pool[slot][ 3895 (ETHERADDRL-1) - i]; 3896 } else { 3897 mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i] 3898 = mmac_info->mac_pool[slot].addr[ 3899 (ETHERADDRL - 1) - i]; 3900 } 3901 } 3902 } 3903 3904 /* 3905 * nxge_altmac_set() -- Set an alternate MAC address 3906 */ 3907 static int 3908 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot) 3909 { 3910 uint8_t addrn; 3911 uint8_t portn; 3912 npi_mac_addr_t altmac; 3913 hostinfo_t mac_rdc; 3914 p_nxge_class_pt_cfg_t clscfgp; 3915 3916 altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff); 3917 altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff); 3918 altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff); 3919 3920 portn = nxgep->mac.portnum; 3921 addrn = (uint8_t)slot - 1; 3922 3923 if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn, 3924 addrn, &altmac) != NPI_SUCCESS) 3925 return (EIO); 3926 3927 /* 3928 * Set the rdc table number for the host info entry 3929 * for this mac address slot. 3930 */ 3931 clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config; 3932 mac_rdc.value = 0; 3933 mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl; 3934 mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr; 3935 3936 if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET, 3937 nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) { 3938 return (EIO); 3939 } 3940 3941 /* 3942 * Enable comparison with the alternate MAC address. 3943 * While the first alternate addr is enabled by bit 1 of register 3944 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register 3945 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn 3946 * accordingly before calling npi_mac_altaddr_entry. 3947 */ 3948 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 3949 addrn = (uint8_t)slot - 1; 3950 else 3951 addrn = (uint8_t)slot; 3952 3953 if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn) 3954 != NPI_SUCCESS) 3955 return (EIO); 3956 3957 return (0); 3958 } 3959 3960 /* 3961 * nxeg_m_mmac_add() - find an unused address slot, set the address 3962 * value to the one specified, enable the port to start filtering on 3963 * the new MAC address. Returns 0 on success. 3964 */ 3965 int 3966 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 3967 { 3968 p_nxge_t nxgep = arg; 3969 mac_addr_slot_t slot; 3970 nxge_mmac_t *mmac_info; 3971 int err; 3972 nxge_status_t status; 3973 3974 mutex_enter(nxgep->genlock); 3975 3976 /* 3977 * Make sure that nxge is initialized, if _start() has 3978 * not been called. 3979 */ 3980 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3981 status = nxge_init(nxgep); 3982 if (status != NXGE_OK) { 3983 mutex_exit(nxgep->genlock); 3984 return (ENXIO); 3985 } 3986 } 3987 3988 mmac_info = &nxgep->nxge_mmac_info; 3989 if (mmac_info->naddrfree == 0) { 3990 mutex_exit(nxgep->genlock); 3991 return (ENOSPC); 3992 } 3993 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 3994 maddr->mma_addrlen)) { 3995 mutex_exit(nxgep->genlock); 3996 return (EINVAL); 3997 } 3998 /* 3999 * Search for the first available slot. Because naddrfree 4000 * is not zero, we are guaranteed to find one. 4001 * Slot 0 is for unique (primary) MAC. The first alternate 4002 * MAC slot is slot 1. 4003 * Each of the first two ports of Neptune has 16 alternate 4004 * MAC slots but only the first 7 (of 15) slots have assigned factory 4005 * MAC addresses. We first search among the slots without bundled 4006 * factory MACs. If we fail to find one in that range, then we 4007 * search the slots with bundled factory MACs. A factory MAC 4008 * will be wasted while the slot is used with a user MAC address. 4009 * But the slot could be used by factory MAC again after calling 4010 * nxge_m_mmac_remove and nxge_m_mmac_reserve. 4011 */ 4012 if (mmac_info->num_factory_mmac < mmac_info->num_mmac) { 4013 for (slot = mmac_info->num_factory_mmac + 1; 4014 slot <= mmac_info->num_mmac; slot++) { 4015 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4016 break; 4017 } 4018 if (slot > mmac_info->num_mmac) { 4019 for (slot = 1; slot <= mmac_info->num_factory_mmac; 4020 slot++) { 4021 if (!(mmac_info->mac_pool[slot].flags 4022 & MMAC_SLOT_USED)) 4023 break; 4024 } 4025 } 4026 } else { 4027 for (slot = 1; slot <= mmac_info->num_mmac; slot++) { 4028 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4029 break; 4030 } 4031 } 4032 ASSERT(slot <= mmac_info->num_mmac); 4033 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) { 4034 mutex_exit(nxgep->genlock); 4035 return (err); 4036 } 4037 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 4038 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 4039 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4040 mmac_info->naddrfree--; 4041 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4042 4043 maddr->mma_slot = slot; 4044 4045 mutex_exit(nxgep->genlock); 4046 return (0); 4047 } 4048 4049 /* 4050 * This function reserves an unused slot and programs the slot and the HW 4051 * with a factory mac address. 4052 */ 4053 static int 4054 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr) 4055 { 4056 p_nxge_t nxgep = arg; 4057 mac_addr_slot_t slot; 4058 nxge_mmac_t *mmac_info; 4059 int err; 4060 nxge_status_t status; 4061 4062 mutex_enter(nxgep->genlock); 4063 4064 /* 4065 * Make sure that nxge is initialized, if _start() has 4066 * not been called. 4067 */ 4068 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4069 status = nxge_init(nxgep); 4070 if (status != NXGE_OK) { 4071 mutex_exit(nxgep->genlock); 4072 return (ENXIO); 4073 } 4074 } 4075 4076 mmac_info = &nxgep->nxge_mmac_info; 4077 if (mmac_info->naddrfree == 0) { 4078 mutex_exit(nxgep->genlock); 4079 return (ENOSPC); 4080 } 4081 4082 slot = maddr->mma_slot; 4083 if (slot == -1) { /* -1: Take the first available slot */ 4084 for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) { 4085 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 4086 break; 4087 } 4088 if (slot > mmac_info->num_factory_mmac) { 4089 mutex_exit(nxgep->genlock); 4090 return (ENOSPC); 4091 } 4092 } 4093 if (slot < 1 || slot > mmac_info->num_factory_mmac) { 4094 /* 4095 * Do not support factory MAC at a slot greater than 4096 * num_factory_mmac even when there are available factory 4097 * MAC addresses because the alternate MACs are bundled with 4098 * slot[1] through slot[num_factory_mmac] 4099 */ 4100 mutex_exit(nxgep->genlock); 4101 return (EINVAL); 4102 } 4103 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4104 mutex_exit(nxgep->genlock); 4105 return (EBUSY); 4106 } 4107 /* Verify the address to be reserved */ 4108 if (!mac_unicst_verify(nxgep->mach, 4109 mmac_info->factory_mac_pool[slot], ETHERADDRL)) { 4110 mutex_exit(nxgep->genlock); 4111 return (EINVAL); 4112 } 4113 if (err = nxge_altmac_set(nxgep, 4114 mmac_info->factory_mac_pool[slot], slot)) { 4115 mutex_exit(nxgep->genlock); 4116 return (err); 4117 } 4118 bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL); 4119 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4120 mmac_info->naddrfree--; 4121 4122 nxge_mmac_kstat_update(nxgep, slot, B_TRUE); 4123 mutex_exit(nxgep->genlock); 4124 4125 /* Pass info back to the caller */ 4126 maddr->mma_slot = slot; 4127 maddr->mma_addrlen = ETHERADDRL; 4128 maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR; 4129 4130 return (0); 4131 } 4132 4133 /* 4134 * Remove the specified mac address and update the HW not to filter 4135 * the mac address anymore. 4136 */ 4137 int 4138 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 4139 { 4140 p_nxge_t nxgep = arg; 4141 nxge_mmac_t *mmac_info; 4142 uint8_t addrn; 4143 uint8_t portn; 4144 int err = 0; 4145 nxge_status_t status; 4146 4147 mutex_enter(nxgep->genlock); 4148 4149 /* 4150 * Make sure that nxge is initialized, if _start() has 4151 * not been called. 4152 */ 4153 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4154 status = nxge_init(nxgep); 4155 if (status != NXGE_OK) { 4156 mutex_exit(nxgep->genlock); 4157 return (ENXIO); 4158 } 4159 } 4160 4161 mmac_info = &nxgep->nxge_mmac_info; 4162 if (slot < 1 || slot > mmac_info->num_mmac) { 4163 mutex_exit(nxgep->genlock); 4164 return (EINVAL); 4165 } 4166 4167 portn = nxgep->mac.portnum; 4168 if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1) 4169 addrn = (uint8_t)slot - 1; 4170 else 4171 addrn = (uint8_t)slot; 4172 4173 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4174 if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn) 4175 == NPI_SUCCESS) { 4176 mmac_info->naddrfree++; 4177 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 4178 /* 4179 * Regardless if the MAC we just stopped filtering 4180 * is a user addr or a facory addr, we must set 4181 * the MMAC_VENDOR_ADDR flag if this slot has an 4182 * associated factory MAC to indicate that a factory 4183 * MAC is available. 4184 */ 4185 if (slot <= mmac_info->num_factory_mmac) { 4186 mmac_info->mac_pool[slot].flags 4187 |= MMAC_VENDOR_ADDR; 4188 } 4189 /* 4190 * Clear mac_pool[slot].addr so that kstat shows 0 4191 * alternate MAC address if the slot is not used. 4192 * (But nxge_m_mmac_get returns the factory MAC even 4193 * when the slot is not used!) 4194 */ 4195 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 4196 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4197 } else { 4198 err = EIO; 4199 } 4200 } else { 4201 err = EINVAL; 4202 } 4203 4204 mutex_exit(nxgep->genlock); 4205 return (err); 4206 } 4207 4208 /* 4209 * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve(). 4210 */ 4211 static int 4212 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 4213 { 4214 p_nxge_t nxgep = arg; 4215 mac_addr_slot_t slot; 4216 nxge_mmac_t *mmac_info; 4217 int err = 0; 4218 nxge_status_t status; 4219 4220 if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr, 4221 maddr->mma_addrlen)) 4222 return (EINVAL); 4223 4224 slot = maddr->mma_slot; 4225 4226 mutex_enter(nxgep->genlock); 4227 4228 /* 4229 * Make sure that nxge is initialized, if _start() has 4230 * not been called. 4231 */ 4232 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4233 status = nxge_init(nxgep); 4234 if (status != NXGE_OK) { 4235 mutex_exit(nxgep->genlock); 4236 return (ENXIO); 4237 } 4238 } 4239 4240 mmac_info = &nxgep->nxge_mmac_info; 4241 if (slot < 1 || slot > mmac_info->num_mmac) { 4242 mutex_exit(nxgep->genlock); 4243 return (EINVAL); 4244 } 4245 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 4246 if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) 4247 != 0) { 4248 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 4249 ETHERADDRL); 4250 /* 4251 * Assume that the MAC passed down from the caller 4252 * is not a factory MAC address (The user should 4253 * call mmac_remove followed by mmac_reserve if 4254 * he wants to use the factory MAC for this slot). 4255 */ 4256 mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR; 4257 nxge_mmac_kstat_update(nxgep, slot, B_FALSE); 4258 } 4259 } else { 4260 err = EINVAL; 4261 } 4262 mutex_exit(nxgep->genlock); 4263 return (err); 4264 } 4265 4266 /* 4267 * nxge_m_mmac_get() - Get the MAC address and other information 4268 * related to the slot. mma_flags should be set to 0 in the call. 4269 * Note: although kstat shows MAC address as zero when a slot is 4270 * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC 4271 * to the caller as long as the slot is not using a user MAC address. 4272 * The following table shows the rules, 4273 * 4274 * USED VENDOR mma_addr 4275 * ------------------------------------------------------------ 4276 * (1) Slot uses a user MAC: yes no user MAC 4277 * (2) Slot uses a factory MAC: yes yes factory MAC 4278 * (3) Slot is not used but is 4279 * factory MAC capable: no yes factory MAC 4280 * (4) Slot is not used and is 4281 * not factory MAC capable: no no 0 4282 * ------------------------------------------------------------ 4283 */ 4284 static int 4285 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 4286 { 4287 nxge_t *nxgep = arg; 4288 mac_addr_slot_t slot; 4289 nxge_mmac_t *mmac_info; 4290 nxge_status_t status; 4291 4292 slot = maddr->mma_slot; 4293 4294 mutex_enter(nxgep->genlock); 4295 4296 /* 4297 * Make sure that nxge is initialized, if _start() has 4298 * not been called. 4299 */ 4300 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 4301 status = nxge_init(nxgep); 4302 if (status != NXGE_OK) { 4303 mutex_exit(nxgep->genlock); 4304 return (ENXIO); 4305 } 4306 } 4307 4308 mmac_info = &nxgep->nxge_mmac_info; 4309 4310 if (slot < 1 || slot > mmac_info->num_mmac) { 4311 mutex_exit(nxgep->genlock); 4312 return (EINVAL); 4313 } 4314 maddr->mma_flags = 0; 4315 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) 4316 maddr->mma_flags |= MMAC_SLOT_USED; 4317 4318 if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) { 4319 maddr->mma_flags |= MMAC_VENDOR_ADDR; 4320 bcopy(mmac_info->factory_mac_pool[slot], 4321 maddr->mma_addr, ETHERADDRL); 4322 maddr->mma_addrlen = ETHERADDRL; 4323 } else { 4324 if (maddr->mma_flags & MMAC_SLOT_USED) { 4325 bcopy(mmac_info->mac_pool[slot].addr, 4326 maddr->mma_addr, ETHERADDRL); 4327 maddr->mma_addrlen = ETHERADDRL; 4328 } else { 4329 bzero(maddr->mma_addr, ETHERADDRL); 4330 maddr->mma_addrlen = 0; 4331 } 4332 } 4333 mutex_exit(nxgep->genlock); 4334 return (0); 4335 } 4336 4337 static boolean_t 4338 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 4339 { 4340 nxge_t *nxgep = arg; 4341 uint32_t *txflags = cap_data; 4342 multiaddress_capab_t *mmacp = cap_data; 4343 4344 switch (cap) { 4345 case MAC_CAPAB_HCKSUM: 4346 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4347 "==> nxge_m_getcapab: checksum %d", nxge_cksum_enable)); 4348 if (nxge_cksum_enable) { 4349 *txflags = HCKSUM_INET_PARTIAL; 4350 } 4351 break; 4352 4353 case MAC_CAPAB_POLL: 4354 /* 4355 * There's nothing for us to fill in, simply returning 4356 * B_TRUE stating that we support polling is sufficient. 4357 */ 4358 break; 4359 4360 case MAC_CAPAB_MULTIADDRESS: 4361 mmacp = (multiaddress_capab_t *)cap_data; 4362 mutex_enter(nxgep->genlock); 4363 4364 mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac; 4365 mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree; 4366 mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */ 4367 /* 4368 * maddr_handle is driver's private data, passed back to 4369 * entry point functions as arg. 4370 */ 4371 mmacp->maddr_handle = nxgep; 4372 mmacp->maddr_add = nxge_m_mmac_add; 4373 mmacp->maddr_remove = nxge_m_mmac_remove; 4374 mmacp->maddr_modify = nxge_m_mmac_modify; 4375 mmacp->maddr_get = nxge_m_mmac_get; 4376 mmacp->maddr_reserve = nxge_m_mmac_reserve; 4377 4378 mutex_exit(nxgep->genlock); 4379 break; 4380 4381 case MAC_CAPAB_LSO: { 4382 mac_capab_lso_t *cap_lso = cap_data; 4383 4384 if (nxgep->soft_lso_enable) { 4385 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 4386 if (nxge_lso_max > NXGE_LSO_MAXLEN) { 4387 nxge_lso_max = NXGE_LSO_MAXLEN; 4388 } 4389 cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max; 4390 break; 4391 } else { 4392 return (B_FALSE); 4393 } 4394 } 4395 4396 #if defined(sun4v) 4397 case MAC_CAPAB_RINGS: { 4398 mac_capab_rings_t *mrings = (mac_capab_rings_t *)cap_data; 4399 4400 /* 4401 * Only the service domain driver responds to 4402 * this capability request. 4403 */ 4404 if (isLDOMservice(nxgep)) { 4405 mrings->mr_handle = (void *)nxgep; 4406 4407 /* 4408 * No dynamic allocation of groups and 4409 * rings at this time. Shares dictate the 4410 * configurartion. 4411 */ 4412 mrings->mr_gadd_ring = NULL; 4413 mrings->mr_grem_ring = NULL; 4414 mrings->mr_rget = NULL; 4415 mrings->mr_gget = nxge_hio_group_get; 4416 4417 if (mrings->mr_type == MAC_RING_TYPE_RX) { 4418 mrings->mr_rnum = 8; /* XXX */ 4419 mrings->mr_gnum = 6; /* XXX */ 4420 } else { 4421 mrings->mr_rnum = 8; /* XXX */ 4422 mrings->mr_gnum = 0; /* XXX */ 4423 } 4424 } else 4425 return (B_FALSE); 4426 break; 4427 } 4428 4429 case MAC_CAPAB_SHARES: { 4430 mac_capab_share_t *mshares = (mac_capab_share_t *)cap_data; 4431 4432 /* 4433 * Only the service domain driver responds to 4434 * this capability request. 4435 */ 4436 if (isLDOMservice(nxgep)) { 4437 mshares->ms_snum = 3; 4438 mshares->ms_handle = (void *)nxgep; 4439 mshares->ms_salloc = nxge_hio_share_alloc; 4440 mshares->ms_sfree = nxge_hio_share_free; 4441 mshares->ms_sadd = NULL; 4442 mshares->ms_sremove = NULL; 4443 mshares->ms_squery = nxge_hio_share_query; 4444 } else 4445 return (B_FALSE); 4446 break; 4447 } 4448 #endif 4449 default: 4450 return (B_FALSE); 4451 } 4452 return (B_TRUE); 4453 } 4454 4455 static boolean_t 4456 nxge_param_locked(mac_prop_id_t pr_num) 4457 { 4458 /* 4459 * All adv_* parameters are locked (read-only) while 4460 * the device is in any sort of loopback mode ... 4461 */ 4462 switch (pr_num) { 4463 case DLD_PROP_ADV_1000FDX_CAP: 4464 case DLD_PROP_EN_1000FDX_CAP: 4465 case DLD_PROP_ADV_1000HDX_CAP: 4466 case DLD_PROP_EN_1000HDX_CAP: 4467 case DLD_PROP_ADV_100FDX_CAP: 4468 case DLD_PROP_EN_100FDX_CAP: 4469 case DLD_PROP_ADV_100HDX_CAP: 4470 case DLD_PROP_EN_100HDX_CAP: 4471 case DLD_PROP_ADV_10FDX_CAP: 4472 case DLD_PROP_EN_10FDX_CAP: 4473 case DLD_PROP_ADV_10HDX_CAP: 4474 case DLD_PROP_EN_10HDX_CAP: 4475 case DLD_PROP_AUTONEG: 4476 case DLD_PROP_FLOWCTRL: 4477 return (B_TRUE); 4478 } 4479 return (B_FALSE); 4480 } 4481 4482 /* 4483 * callback functions for set/get of properties 4484 */ 4485 static int 4486 nxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4487 uint_t pr_valsize, const void *pr_val) 4488 { 4489 nxge_t *nxgep = barg; 4490 p_nxge_param_t param_arr; 4491 p_nxge_stats_t statsp; 4492 int err = 0; 4493 uint8_t val; 4494 uint32_t cur_mtu, new_mtu, old_framesize; 4495 link_flowctrl_t fl; 4496 4497 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_setprop")); 4498 param_arr = nxgep->param_arr; 4499 statsp = nxgep->statsp; 4500 mutex_enter(nxgep->genlock); 4501 if (statsp->port_stats.lb_mode != nxge_lb_normal && 4502 nxge_param_locked(pr_num)) { 4503 /* 4504 * All adv_* parameters are locked (read-only) 4505 * while the device is in any sort of loopback mode. 4506 */ 4507 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4508 "==> nxge_m_setprop: loopback mode: read only")); 4509 mutex_exit(nxgep->genlock); 4510 return (EBUSY); 4511 } 4512 4513 val = *(uint8_t *)pr_val; 4514 switch (pr_num) { 4515 case DLD_PROP_EN_1000FDX_CAP: 4516 nxgep->param_en_1000fdx = val; 4517 param_arr[param_anar_1000fdx].value = val; 4518 4519 goto reprogram; 4520 4521 case DLD_PROP_EN_100FDX_CAP: 4522 nxgep->param_en_100fdx = val; 4523 param_arr[param_anar_100fdx].value = val; 4524 4525 goto reprogram; 4526 4527 case DLD_PROP_EN_10FDX_CAP: 4528 nxgep->param_en_10fdx = val; 4529 param_arr[param_anar_10fdx].value = val; 4530 4531 goto reprogram; 4532 4533 case DLD_PROP_EN_1000HDX_CAP: 4534 case DLD_PROP_EN_100HDX_CAP: 4535 case DLD_PROP_EN_10HDX_CAP: 4536 case DLD_PROP_ADV_1000FDX_CAP: 4537 case DLD_PROP_ADV_1000HDX_CAP: 4538 case DLD_PROP_ADV_100FDX_CAP: 4539 case DLD_PROP_ADV_100HDX_CAP: 4540 case DLD_PROP_ADV_10FDX_CAP: 4541 case DLD_PROP_ADV_10HDX_CAP: 4542 case DLD_PROP_STATUS: 4543 case DLD_PROP_SPEED: 4544 case DLD_PROP_DUPLEX: 4545 err = EINVAL; /* cannot set read-only properties */ 4546 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4547 "==> nxge_m_setprop: read only property %d", 4548 pr_num)); 4549 break; 4550 4551 case DLD_PROP_AUTONEG: 4552 param_arr[param_autoneg].value = val; 4553 4554 goto reprogram; 4555 4556 case DLD_PROP_MTU: 4557 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 4558 err = EBUSY; 4559 break; 4560 } 4561 4562 cur_mtu = nxgep->mac.default_mtu; 4563 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 4564 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4565 "==> nxge_m_setprop: set MTU: %d is_jumbo %d", 4566 new_mtu, nxgep->mac.is_jumbo)); 4567 4568 if (new_mtu == cur_mtu) { 4569 err = 0; 4570 break; 4571 } 4572 if (new_mtu < NXGE_DEFAULT_MTU || 4573 new_mtu > NXGE_MAXIMUM_MTU) { 4574 err = EINVAL; 4575 break; 4576 } 4577 4578 if ((new_mtu > NXGE_DEFAULT_MTU) && 4579 !nxgep->mac.is_jumbo) { 4580 err = EINVAL; 4581 break; 4582 } 4583 4584 old_framesize = (uint32_t)nxgep->mac.maxframesize; 4585 nxgep->mac.maxframesize = (uint16_t) 4586 (new_mtu + NXGE_EHEADER_VLAN_CRC); 4587 if (nxge_mac_set_framesize(nxgep)) { 4588 nxgep->mac.maxframesize = 4589 (uint16_t)old_framesize; 4590 err = EINVAL; 4591 break; 4592 } 4593 4594 err = mac_maxsdu_update(nxgep->mach, new_mtu); 4595 if (err) { 4596 nxgep->mac.maxframesize = 4597 (uint16_t)old_framesize; 4598 err = EINVAL; 4599 break; 4600 } 4601 4602 nxgep->mac.default_mtu = new_mtu; 4603 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4604 "==> nxge_m_setprop: set MTU: %d maxframe %d", 4605 new_mtu, nxgep->mac.maxframesize)); 4606 break; 4607 4608 case DLD_PROP_FLOWCTRL: 4609 bcopy(pr_val, &fl, sizeof (fl)); 4610 switch (fl) { 4611 default: 4612 err = EINVAL; 4613 break; 4614 4615 case LINK_FLOWCTRL_NONE: 4616 param_arr[param_anar_pause].value = 0; 4617 break; 4618 4619 case LINK_FLOWCTRL_RX: 4620 param_arr[param_anar_pause].value = 1; 4621 break; 4622 4623 case LINK_FLOWCTRL_TX: 4624 case LINK_FLOWCTRL_BI: 4625 err = EINVAL; 4626 break; 4627 } 4628 4629 reprogram: 4630 if (err == 0) { 4631 if (!nxge_param_link_update(nxgep)) { 4632 err = EINVAL; 4633 } 4634 } 4635 break; 4636 case DLD_PROP_PRIVATE: 4637 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4638 "==> nxge_m_setprop: private property")); 4639 err = nxge_set_priv_prop(nxgep, pr_name, pr_valsize, 4640 pr_val); 4641 break; 4642 4643 default: 4644 err = ENOTSUP; 4645 break; 4646 } 4647 4648 mutex_exit(nxgep->genlock); 4649 4650 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4651 "<== nxge_m_setprop (return %d)", err)); 4652 return (err); 4653 } 4654 4655 static int 4656 nxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 4657 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 4658 { 4659 nxge_t *nxgep = barg; 4660 p_nxge_param_t param_arr = nxgep->param_arr; 4661 p_nxge_stats_t statsp = nxgep->statsp; 4662 int err = 0; 4663 link_flowctrl_t fl; 4664 uint64_t tmp = 0; 4665 link_state_t ls; 4666 boolean_t is_default = (pr_flags & DLD_DEFAULT); 4667 4668 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4669 "==> nxge_m_getprop: pr_num %d", pr_num)); 4670 4671 if (pr_valsize == 0) 4672 return (EINVAL); 4673 4674 if ((is_default) && (pr_num != DLD_PROP_PRIVATE)) { 4675 err = nxge_get_def_val(nxgep, pr_num, pr_valsize, pr_val); 4676 return (err); 4677 } 4678 4679 bzero(pr_val, pr_valsize); 4680 switch (pr_num) { 4681 case DLD_PROP_DUPLEX: 4682 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 4683 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4684 "==> nxge_m_getprop: duplex mode %d", 4685 *(uint8_t *)pr_val)); 4686 break; 4687 4688 case DLD_PROP_SPEED: 4689 if (pr_valsize < sizeof (uint64_t)) 4690 return (EINVAL); 4691 tmp = statsp->mac_stats.link_speed * 1000000ull; 4692 bcopy(&tmp, pr_val, sizeof (tmp)); 4693 break; 4694 4695 case DLD_PROP_STATUS: 4696 if (pr_valsize < sizeof (link_state_t)) 4697 return (EINVAL); 4698 if (!statsp->mac_stats.link_up) 4699 ls = LINK_STATE_DOWN; 4700 else 4701 ls = LINK_STATE_UP; 4702 bcopy(&ls, pr_val, sizeof (ls)); 4703 break; 4704 4705 case DLD_PROP_AUTONEG: 4706 *(uint8_t *)pr_val = 4707 param_arr[param_autoneg].value; 4708 break; 4709 4710 case DLD_PROP_FLOWCTRL: 4711 if (pr_valsize < sizeof (link_flowctrl_t)) 4712 return (EINVAL); 4713 4714 fl = LINK_FLOWCTRL_NONE; 4715 if (param_arr[param_anar_pause].value) { 4716 fl = LINK_FLOWCTRL_RX; 4717 } 4718 bcopy(&fl, pr_val, sizeof (fl)); 4719 break; 4720 4721 case DLD_PROP_ADV_1000FDX_CAP: 4722 *(uint8_t *)pr_val = 4723 param_arr[param_anar_1000fdx].value; 4724 break; 4725 4726 case DLD_PROP_EN_1000FDX_CAP: 4727 *(uint8_t *)pr_val = nxgep->param_en_1000fdx; 4728 break; 4729 4730 case DLD_PROP_ADV_100FDX_CAP: 4731 *(uint8_t *)pr_val = 4732 param_arr[param_anar_100fdx].value; 4733 break; 4734 4735 case DLD_PROP_EN_100FDX_CAP: 4736 *(uint8_t *)pr_val = nxgep->param_en_100fdx; 4737 break; 4738 4739 case DLD_PROP_ADV_10FDX_CAP: 4740 *(uint8_t *)pr_val = 4741 param_arr[param_anar_10fdx].value; 4742 break; 4743 4744 case DLD_PROP_EN_10FDX_CAP: 4745 *(uint8_t *)pr_val = nxgep->param_en_10fdx; 4746 break; 4747 4748 case DLD_PROP_EN_1000HDX_CAP: 4749 case DLD_PROP_EN_100HDX_CAP: 4750 case DLD_PROP_EN_10HDX_CAP: 4751 case DLD_PROP_ADV_1000HDX_CAP: 4752 case DLD_PROP_ADV_100HDX_CAP: 4753 case DLD_PROP_ADV_10HDX_CAP: 4754 err = ENOTSUP; 4755 break; 4756 4757 case DLD_PROP_PRIVATE: 4758 err = nxge_get_priv_prop(nxgep, pr_name, pr_flags, 4759 pr_valsize, pr_val); 4760 break; 4761 default: 4762 err = EINVAL; 4763 break; 4764 } 4765 4766 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_getprop")); 4767 4768 return (err); 4769 } 4770 4771 /* ARGSUSED */ 4772 static int 4773 nxge_set_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_valsize, 4774 const void *pr_val) 4775 { 4776 p_nxge_param_t param_arr = nxgep->param_arr; 4777 int err = 0; 4778 long result; 4779 4780 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4781 "==> nxge_set_priv_prop: name %s", pr_name)); 4782 4783 if (strcmp(pr_name, "_accept_jumbo") == 0) { 4784 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4785 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4786 "<== nxge_set_priv_prop: name %s " 4787 "pr_val %s result %d " 4788 "param %d is_jumbo %d", 4789 pr_name, pr_val, result, 4790 param_arr[param_accept_jumbo].value, 4791 nxgep->mac.is_jumbo)); 4792 4793 if (result > 1 || result < 0) { 4794 err = EINVAL; 4795 } else { 4796 if (nxgep->mac.is_jumbo == 4797 (uint32_t)result) { 4798 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4799 "no change (%d %d)", 4800 nxgep->mac.is_jumbo, 4801 result)); 4802 return (0); 4803 } 4804 } 4805 4806 param_arr[param_accept_jumbo].value = result; 4807 nxgep->mac.is_jumbo = B_FALSE; 4808 if (result) { 4809 nxgep->mac.is_jumbo = B_TRUE; 4810 } 4811 4812 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4813 "<== nxge_set_priv_prop: name %s (value %d) is_jumbo %d", 4814 pr_name, result, nxgep->mac.is_jumbo)); 4815 4816 return (err); 4817 } 4818 4819 /* Blanking */ 4820 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 4821 err = nxge_param_rx_intr_time(nxgep, NULL, NULL, 4822 (char *)pr_val, 4823 (caddr_t)¶m_arr[param_rxdma_intr_time]); 4824 if (err) { 4825 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4826 "<== nxge_set_priv_prop: " 4827 "unable to set (%s)", pr_name)); 4828 err = EINVAL; 4829 } else { 4830 err = 0; 4831 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4832 "<== nxge_set_priv_prop: " 4833 "set (%s)", pr_name)); 4834 } 4835 4836 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4837 "<== nxge_set_priv_prop: name %s (value %d)", 4838 pr_name, result)); 4839 4840 return (err); 4841 } 4842 4843 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 4844 err = nxge_param_rx_intr_pkts(nxgep, NULL, NULL, 4845 (char *)pr_val, 4846 (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 4847 if (err) { 4848 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4849 "<== nxge_set_priv_prop: " 4850 "unable to set (%s)", pr_name)); 4851 err = EINVAL; 4852 } else { 4853 err = 0; 4854 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4855 "<== nxge_set_priv_prop: " 4856 "set (%s)", pr_name)); 4857 } 4858 4859 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4860 "<== nxge_set_priv_prop: name %s (value %d)", 4861 pr_name, result)); 4862 4863 return (err); 4864 } 4865 4866 /* Classification */ 4867 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 4868 if (pr_val == NULL) { 4869 err = EINVAL; 4870 return (err); 4871 } 4872 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4873 4874 err = nxge_param_set_ip_opt(nxgep, NULL, 4875 NULL, (char *)pr_val, 4876 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 4877 4878 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4879 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4880 pr_name, result)); 4881 4882 return (err); 4883 } 4884 4885 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 4886 if (pr_val == NULL) { 4887 err = EINVAL; 4888 return (err); 4889 } 4890 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4891 4892 err = nxge_param_set_ip_opt(nxgep, NULL, 4893 NULL, (char *)pr_val, 4894 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 4895 4896 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4897 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4898 pr_name, result)); 4899 4900 return (err); 4901 } 4902 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 4903 if (pr_val == NULL) { 4904 err = EINVAL; 4905 return (err); 4906 } 4907 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4908 4909 err = nxge_param_set_ip_opt(nxgep, NULL, 4910 NULL, (char *)pr_val, 4911 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 4912 4913 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4914 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4915 pr_name, result)); 4916 4917 return (err); 4918 } 4919 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 4920 if (pr_val == NULL) { 4921 err = EINVAL; 4922 return (err); 4923 } 4924 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4925 4926 err = nxge_param_set_ip_opt(nxgep, NULL, 4927 NULL, (char *)pr_val, 4928 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 4929 4930 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4931 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4932 pr_name, result)); 4933 4934 return (err); 4935 } 4936 4937 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 4938 if (pr_val == NULL) { 4939 err = EINVAL; 4940 return (err); 4941 } 4942 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4943 4944 err = nxge_param_set_ip_opt(nxgep, NULL, 4945 NULL, (char *)pr_val, 4946 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 4947 4948 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4949 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4950 pr_name, result)); 4951 4952 return (err); 4953 } 4954 4955 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 4956 if (pr_val == NULL) { 4957 err = EINVAL; 4958 return (err); 4959 } 4960 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4961 4962 err = nxge_param_set_ip_opt(nxgep, NULL, 4963 NULL, (char *)pr_val, 4964 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 4965 4966 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4967 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4968 pr_name, result)); 4969 4970 return (err); 4971 } 4972 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 4973 if (pr_val == NULL) { 4974 err = EINVAL; 4975 return (err); 4976 } 4977 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4978 4979 err = nxge_param_set_ip_opt(nxgep, NULL, 4980 NULL, (char *)pr_val, 4981 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 4982 4983 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 4984 "<== nxge_set_priv_prop: name %s (value 0x%x)", 4985 pr_name, result)); 4986 4987 return (err); 4988 } 4989 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 4990 if (pr_val == NULL) { 4991 err = EINVAL; 4992 return (err); 4993 } 4994 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4995 4996 err = nxge_param_set_ip_opt(nxgep, NULL, 4997 NULL, (char *)pr_val, 4998 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 4999 5000 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5001 "<== nxge_set_priv_prop: name %s (value 0x%x)", 5002 pr_name, result)); 5003 5004 return (err); 5005 } 5006 5007 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5008 if (nxgep->nxge_mac_state == NXGE_MAC_STARTED) { 5009 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5010 "==> nxge_set_priv_prop: name %s (busy)", pr_name)); 5011 err = EBUSY; 5012 return (err); 5013 } 5014 if (pr_val == NULL) { 5015 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5016 "==> nxge_set_priv_prop: name %s (null)", pr_name)); 5017 err = EINVAL; 5018 return (err); 5019 } 5020 5021 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 5022 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5023 "<== nxge_set_priv_prop: name %s " 5024 "(lso %d pr_val %s value %d)", 5025 pr_name, nxgep->soft_lso_enable, pr_val, result)); 5026 5027 if (result > 1 || result < 0) { 5028 err = EINVAL; 5029 } else { 5030 if (nxgep->soft_lso_enable == (uint32_t)result) { 5031 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5032 "no change (%d %d)", 5033 nxgep->soft_lso_enable, result)); 5034 return (0); 5035 } 5036 } 5037 5038 nxgep->soft_lso_enable = (int)result; 5039 5040 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5041 "<== nxge_set_priv_prop: name %s (value %d)", 5042 pr_name, result)); 5043 5044 return (err); 5045 } 5046 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5047 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5048 (caddr_t)¶m_arr[param_anar_10gfdx]); 5049 return (err); 5050 } 5051 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5052 err = nxge_param_set_mac(nxgep, NULL, NULL, (char *)pr_val, 5053 (caddr_t)¶m_arr[param_anar_pause]); 5054 return (err); 5055 } 5056 5057 return (EINVAL); 5058 } 5059 5060 static int 5061 nxge_get_priv_prop(p_nxge_t nxgep, const char *pr_name, uint_t pr_flags, 5062 uint_t pr_valsize, void *pr_val) 5063 { 5064 p_nxge_param_t param_arr = nxgep->param_arr; 5065 char valstr[MAXNAMELEN]; 5066 int err = EINVAL; 5067 uint_t strsize; 5068 boolean_t is_default = (pr_flags & DLD_DEFAULT); 5069 5070 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5071 "==> nxge_get_priv_prop: property %s", pr_name)); 5072 5073 /* function number */ 5074 if (strcmp(pr_name, "_function_number") == 0) { 5075 if (is_default) 5076 return (ENOTSUP); 5077 (void) snprintf(valstr, sizeof (valstr), "%d", 5078 nxgep->function_num); 5079 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5080 "==> nxge_get_priv_prop: name %s " 5081 "(value %d valstr %s)", 5082 pr_name, nxgep->function_num, valstr)); 5083 5084 err = 0; 5085 goto done; 5086 } 5087 5088 /* Neptune firmware version */ 5089 if (strcmp(pr_name, "_fw_version") == 0) { 5090 if (is_default) 5091 return (ENOTSUP); 5092 (void) snprintf(valstr, sizeof (valstr), "%s", 5093 nxgep->vpd_info.ver); 5094 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5095 "==> nxge_get_priv_prop: name %s " 5096 "(value %d valstr %s)", 5097 pr_name, nxgep->vpd_info.ver, valstr)); 5098 5099 err = 0; 5100 goto done; 5101 } 5102 5103 /* port PHY mode */ 5104 if (strcmp(pr_name, "_port_mode") == 0) { 5105 if (is_default) 5106 return (ENOTSUP); 5107 switch (nxgep->mac.portmode) { 5108 case PORT_1G_COPPER: 5109 (void) snprintf(valstr, sizeof (valstr), "1G copper %s", 5110 nxgep->hot_swappable_phy ? 5111 "[Hot Swappable]" : ""); 5112 break; 5113 case PORT_1G_FIBER: 5114 (void) snprintf(valstr, sizeof (valstr), "1G fiber %s", 5115 nxgep->hot_swappable_phy ? 5116 "[hot swappable]" : ""); 5117 break; 5118 case PORT_10G_COPPER: 5119 (void) snprintf(valstr, sizeof (valstr), 5120 "10G copper %s", 5121 nxgep->hot_swappable_phy ? 5122 "[hot swappable]" : ""); 5123 break; 5124 case PORT_10G_FIBER: 5125 (void) snprintf(valstr, sizeof (valstr), "10G fiber %s", 5126 nxgep->hot_swappable_phy ? 5127 "[hot swappable]" : ""); 5128 break; 5129 case PORT_10G_SERDES: 5130 (void) snprintf(valstr, sizeof (valstr), 5131 "10G serdes %s", nxgep->hot_swappable_phy ? 5132 "[hot swappable]" : ""); 5133 break; 5134 case PORT_1G_SERDES: 5135 (void) snprintf(valstr, sizeof (valstr), "1G serdes %s", 5136 nxgep->hot_swappable_phy ? 5137 "[hot swappable]" : ""); 5138 break; 5139 case PORT_1G_RGMII_FIBER: 5140 (void) snprintf(valstr, sizeof (valstr), 5141 "1G rgmii fiber %s", nxgep->hot_swappable_phy ? 5142 "[hot swappable]" : ""); 5143 break; 5144 case PORT_HSP_MODE: 5145 (void) snprintf(valstr, sizeof (valstr), 5146 "phy not present[hot swappable]"); 5147 break; 5148 default: 5149 (void) snprintf(valstr, sizeof (valstr), "unknown %s", 5150 nxgep->hot_swappable_phy ? 5151 "[hot swappable]" : ""); 5152 break; 5153 } 5154 5155 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5156 "==> nxge_get_priv_prop: name %s (value %s)", 5157 pr_name, valstr)); 5158 5159 err = 0; 5160 goto done; 5161 } 5162 5163 /* Hot swappable PHY */ 5164 if (strcmp(pr_name, "_hot_swap_phy") == 0) { 5165 if (is_default) 5166 return (ENOTSUP); 5167 (void) snprintf(valstr, sizeof (valstr), "%s", 5168 nxgep->hot_swappable_phy ? 5169 "yes" : "no"); 5170 5171 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5172 "==> nxge_get_priv_prop: name %s " 5173 "(value %d valstr %s)", 5174 pr_name, nxgep->hot_swappable_phy, valstr)); 5175 5176 err = 0; 5177 goto done; 5178 } 5179 5180 5181 /* accept jumbo */ 5182 if (strcmp(pr_name, "_accept_jumbo") == 0) { 5183 if (is_default) 5184 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5185 else 5186 (void) snprintf(valstr, sizeof (valstr), 5187 "%d", nxgep->mac.is_jumbo); 5188 err = 0; 5189 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5190 "==> nxge_get_priv_prop: name %s (value %d (%d, %d))", 5191 pr_name, 5192 (uint32_t)param_arr[param_accept_jumbo].value, 5193 nxgep->mac.is_jumbo, 5194 nxge_jumbo_enable)); 5195 5196 goto done; 5197 } 5198 5199 /* Receive Interrupt Blanking Parameters */ 5200 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 5201 err = 0; 5202 if (is_default) { 5203 (void) snprintf(valstr, sizeof (valstr), 5204 "%d", RXDMA_RCR_TO_DEFAULT); 5205 goto done; 5206 } 5207 5208 (void) snprintf(valstr, sizeof (valstr), "%d", 5209 nxgep->intr_timeout); 5210 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5211 "==> nxge_get_priv_prop: name %s (value %d)", 5212 pr_name, 5213 (uint32_t)nxgep->intr_timeout)); 5214 goto done; 5215 } 5216 5217 if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 5218 err = 0; 5219 if (is_default) { 5220 (void) snprintf(valstr, sizeof (valstr), 5221 "%d", RXDMA_RCR_PTHRES_DEFAULT); 5222 goto done; 5223 } 5224 (void) snprintf(valstr, sizeof (valstr), "%d", 5225 nxgep->intr_threshold); 5226 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5227 "==> nxge_get_priv_prop: name %s (value %d)", 5228 pr_name, (uint32_t)nxgep->intr_threshold)); 5229 5230 goto done; 5231 } 5232 5233 /* Classification and Load Distribution Configuration */ 5234 if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 5235 if (is_default) { 5236 (void) snprintf(valstr, sizeof (valstr), "%x", 5237 NXGE_CLASS_FLOW_GEN_SERVER); 5238 err = 0; 5239 goto done; 5240 } 5241 err = nxge_dld_get_ip_opt(nxgep, 5242 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 5243 5244 (void) snprintf(valstr, sizeof (valstr), "%x", 5245 (int)param_arr[param_class_opt_ipv4_tcp].value); 5246 5247 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5248 "==> nxge_get_priv_prop: %s", valstr)); 5249 goto done; 5250 } 5251 5252 if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 5253 if (is_default) { 5254 (void) snprintf(valstr, sizeof (valstr), "%x", 5255 NXGE_CLASS_FLOW_GEN_SERVER); 5256 err = 0; 5257 goto done; 5258 } 5259 err = nxge_dld_get_ip_opt(nxgep, 5260 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 5261 5262 (void) snprintf(valstr, sizeof (valstr), "%x", 5263 (int)param_arr[param_class_opt_ipv4_udp].value); 5264 5265 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5266 "==> nxge_get_priv_prop: %s", valstr)); 5267 goto done; 5268 } 5269 if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 5270 if (is_default) { 5271 (void) snprintf(valstr, sizeof (valstr), "%x", 5272 NXGE_CLASS_FLOW_GEN_SERVER); 5273 err = 0; 5274 goto done; 5275 } 5276 err = nxge_dld_get_ip_opt(nxgep, 5277 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 5278 5279 (void) snprintf(valstr, sizeof (valstr), "%x", 5280 (int)param_arr[param_class_opt_ipv4_ah].value); 5281 5282 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5283 "==> nxge_get_priv_prop: %s", valstr)); 5284 goto done; 5285 } 5286 5287 if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 5288 if (is_default) { 5289 (void) snprintf(valstr, sizeof (valstr), "%x", 5290 NXGE_CLASS_FLOW_GEN_SERVER); 5291 err = 0; 5292 goto done; 5293 } 5294 err = nxge_dld_get_ip_opt(nxgep, 5295 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 5296 5297 (void) snprintf(valstr, sizeof (valstr), "%x", 5298 (int)param_arr[param_class_opt_ipv4_sctp].value); 5299 5300 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5301 "==> nxge_get_priv_prop: %s", valstr)); 5302 goto done; 5303 } 5304 5305 if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 5306 if (is_default) { 5307 (void) snprintf(valstr, sizeof (valstr), "%x", 5308 NXGE_CLASS_FLOW_GEN_SERVER); 5309 err = 0; 5310 goto done; 5311 } 5312 err = nxge_dld_get_ip_opt(nxgep, 5313 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 5314 5315 (void) snprintf(valstr, sizeof (valstr), "%x", 5316 (int)param_arr[param_class_opt_ipv6_tcp].value); 5317 5318 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5319 "==> nxge_get_priv_prop: %s", valstr)); 5320 goto done; 5321 } 5322 5323 if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 5324 if (is_default) { 5325 (void) snprintf(valstr, sizeof (valstr), "%x", 5326 NXGE_CLASS_FLOW_GEN_SERVER); 5327 err = 0; 5328 goto done; 5329 } 5330 err = nxge_dld_get_ip_opt(nxgep, 5331 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 5332 5333 (void) snprintf(valstr, sizeof (valstr), "%x", 5334 (int)param_arr[param_class_opt_ipv6_udp].value); 5335 5336 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5337 "==> nxge_get_priv_prop: %s", valstr)); 5338 goto done; 5339 } 5340 5341 if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 5342 if (is_default) { 5343 (void) snprintf(valstr, sizeof (valstr), "%x", 5344 NXGE_CLASS_FLOW_GEN_SERVER); 5345 err = 0; 5346 goto done; 5347 } 5348 err = nxge_dld_get_ip_opt(nxgep, 5349 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 5350 5351 (void) snprintf(valstr, sizeof (valstr), "%x", 5352 (int)param_arr[param_class_opt_ipv6_ah].value); 5353 5354 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5355 "==> nxge_get_priv_prop: %s", valstr)); 5356 goto done; 5357 } 5358 5359 if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 5360 if (is_default) { 5361 (void) snprintf(valstr, sizeof (valstr), "%x", 5362 NXGE_CLASS_FLOW_GEN_SERVER); 5363 err = 0; 5364 goto done; 5365 } 5366 err = nxge_dld_get_ip_opt(nxgep, 5367 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 5368 5369 (void) snprintf(valstr, sizeof (valstr), "%x", 5370 (int)param_arr[param_class_opt_ipv6_sctp].value); 5371 5372 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5373 "==> nxge_get_priv_prop: %s", valstr)); 5374 goto done; 5375 } 5376 5377 /* Software LSO */ 5378 if (strcmp(pr_name, "_soft_lso_enable") == 0) { 5379 if (is_default) { 5380 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5381 err = 0; 5382 goto done; 5383 } 5384 (void) snprintf(valstr, sizeof (valstr), 5385 "%d", nxgep->soft_lso_enable); 5386 err = 0; 5387 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5388 "==> nxge_get_priv_prop: name %s (value %d)", 5389 pr_name, nxgep->soft_lso_enable)); 5390 5391 goto done; 5392 } 5393 if (strcmp(pr_name, "_adv_10gfdx_cap") == 0) { 5394 err = 0; 5395 if (is_default || 5396 nxgep->param_arr[param_anar_10gfdx].value != 0) { 5397 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5398 goto done; 5399 } else { 5400 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5401 goto done; 5402 } 5403 } 5404 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 5405 err = 0; 5406 if (is_default || 5407 nxgep->param_arr[param_anar_pause].value != 0) { 5408 (void) snprintf(valstr, sizeof (valstr), "%d", 1); 5409 goto done; 5410 } else { 5411 (void) snprintf(valstr, sizeof (valstr), "%d", 0); 5412 goto done; 5413 } 5414 } 5415 5416 done: 5417 if (err == 0) { 5418 strsize = (uint_t)strlen(valstr); 5419 if (pr_valsize < strsize) { 5420 err = ENOBUFS; 5421 } else { 5422 (void) strlcpy(pr_val, valstr, pr_valsize); 5423 } 5424 } 5425 5426 NXGE_DEBUG_MSG((nxgep, NXGE_CTL, 5427 "<== nxge_get_priv_prop: return %d", err)); 5428 return (err); 5429 } 5430 5431 /* 5432 * Module loading and removing entry points. 5433 */ 5434 5435 static struct cb_ops nxge_cb_ops = { 5436 nodev, /* cb_open */ 5437 nodev, /* cb_close */ 5438 nodev, /* cb_strategy */ 5439 nodev, /* cb_print */ 5440 nodev, /* cb_dump */ 5441 nodev, /* cb_read */ 5442 nodev, /* cb_write */ 5443 nodev, /* cb_ioctl */ 5444 nodev, /* cb_devmap */ 5445 nodev, /* cb_mmap */ 5446 nodev, /* cb_segmap */ 5447 nochpoll, /* cb_chpoll */ 5448 ddi_prop_op, /* cb_prop_op */ 5449 NULL, 5450 D_MP, /* cb_flag */ 5451 CB_REV, /* rev */ 5452 nodev, /* int (*cb_aread)() */ 5453 nodev /* int (*cb_awrite)() */ 5454 }; 5455 5456 static struct dev_ops nxge_dev_ops = { 5457 DEVO_REV, /* devo_rev */ 5458 0, /* devo_refcnt */ 5459 nulldev, 5460 nulldev, /* devo_identify */ 5461 nulldev, /* devo_probe */ 5462 nxge_attach, /* devo_attach */ 5463 nxge_detach, /* devo_detach */ 5464 nodev, /* devo_reset */ 5465 &nxge_cb_ops, /* devo_cb_ops */ 5466 (struct bus_ops *)NULL, /* devo_bus_ops */ 5467 ddi_power /* devo_power */ 5468 }; 5469 5470 extern struct mod_ops mod_driverops; 5471 5472 #define NXGE_DESC_VER "Sun NIU 10Gb Ethernet" 5473 5474 /* 5475 * Module linkage information for the kernel. 5476 */ 5477 static struct modldrv nxge_modldrv = { 5478 &mod_driverops, 5479 NXGE_DESC_VER, 5480 &nxge_dev_ops 5481 }; 5482 5483 static struct modlinkage modlinkage = { 5484 MODREV_1, (void *) &nxge_modldrv, NULL 5485 }; 5486 5487 int 5488 _init(void) 5489 { 5490 int status; 5491 5492 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 5493 mac_init_ops(&nxge_dev_ops, "nxge"); 5494 status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0); 5495 if (status != 0) { 5496 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5497 "failed to init device soft state")); 5498 goto _init_exit; 5499 } 5500 status = mod_install(&modlinkage); 5501 if (status != 0) { 5502 ddi_soft_state_fini(&nxge_list); 5503 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed")); 5504 goto _init_exit; 5505 } 5506 5507 MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL); 5508 5509 _init_exit: 5510 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 5511 5512 return (status); 5513 } 5514 5515 int 5516 _fini(void) 5517 { 5518 int status; 5519 5520 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 5521 5522 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 5523 5524 if (nxge_mblks_pending) 5525 return (EBUSY); 5526 5527 status = mod_remove(&modlinkage); 5528 if (status != DDI_SUCCESS) { 5529 NXGE_DEBUG_MSG((NULL, MOD_CTL, 5530 "Module removal failed 0x%08x", 5531 status)); 5532 goto _fini_exit; 5533 } 5534 5535 mac_fini_ops(&nxge_dev_ops); 5536 5537 ddi_soft_state_fini(&nxge_list); 5538 5539 MUTEX_DESTROY(&nxge_common_lock); 5540 _fini_exit: 5541 NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 5542 5543 return (status); 5544 } 5545 5546 int 5547 _info(struct modinfo *modinfop) 5548 { 5549 int status; 5550 5551 NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 5552 status = mod_info(&modlinkage, modinfop); 5553 NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 5554 5555 return (status); 5556 } 5557 5558 /*ARGSUSED*/ 5559 static nxge_status_t 5560 nxge_add_intrs(p_nxge_t nxgep) 5561 { 5562 5563 int intr_types; 5564 int type = 0; 5565 int ddi_status = DDI_SUCCESS; 5566 nxge_status_t status = NXGE_OK; 5567 5568 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs")); 5569 5570 nxgep->nxge_intr_type.intr_registered = B_FALSE; 5571 nxgep->nxge_intr_type.intr_enabled = B_FALSE; 5572 nxgep->nxge_intr_type.msi_intx_cnt = 0; 5573 nxgep->nxge_intr_type.intr_added = 0; 5574 nxgep->nxge_intr_type.niu_msi_enable = B_FALSE; 5575 nxgep->nxge_intr_type.intr_type = 0; 5576 5577 if (nxgep->niu_type == N2_NIU) { 5578 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5579 } else if (nxge_msi_enable) { 5580 nxgep->nxge_intr_type.niu_msi_enable = B_TRUE; 5581 } 5582 5583 /* Get the supported interrupt types */ 5584 if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types)) 5585 != DDI_SUCCESS) { 5586 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: " 5587 "ddi_intr_get_supported_types failed: status 0x%08x", 5588 ddi_status)); 5589 return (NXGE_ERROR | NXGE_DDI_FAILED); 5590 } 5591 nxgep->nxge_intr_type.intr_types = intr_types; 5592 5593 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5594 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5595 5596 /* 5597 * Solaris MSIX is not supported yet. use MSI for now. 5598 * nxge_msi_enable (1): 5599 * 1 - MSI 2 - MSI-X others - FIXED 5600 */ 5601 switch (nxge_msi_enable) { 5602 default: 5603 type = DDI_INTR_TYPE_FIXED; 5604 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5605 "use fixed (intx emulation) type %08x", 5606 type)); 5607 break; 5608 5609 case 2: 5610 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5611 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 5612 if (intr_types & DDI_INTR_TYPE_MSIX) { 5613 type = DDI_INTR_TYPE_MSIX; 5614 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5615 "ddi_intr_get_supported_types: MSIX 0x%08x", 5616 type)); 5617 } else if (intr_types & DDI_INTR_TYPE_MSI) { 5618 type = DDI_INTR_TYPE_MSI; 5619 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5620 "ddi_intr_get_supported_types: MSI 0x%08x", 5621 type)); 5622 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5623 type = DDI_INTR_TYPE_FIXED; 5624 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5625 "ddi_intr_get_supported_types: MSXED0x%08x", 5626 type)); 5627 } 5628 break; 5629 5630 case 1: 5631 if (intr_types & DDI_INTR_TYPE_MSI) { 5632 type = DDI_INTR_TYPE_MSI; 5633 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: " 5634 "ddi_intr_get_supported_types: MSI 0x%08x", 5635 type)); 5636 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 5637 type = DDI_INTR_TYPE_MSIX; 5638 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5639 "ddi_intr_get_supported_types: MSIX 0x%08x", 5640 type)); 5641 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 5642 type = DDI_INTR_TYPE_FIXED; 5643 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5644 "ddi_intr_get_supported_types: MSXED0x%08x", 5645 type)); 5646 } 5647 } 5648 5649 nxgep->nxge_intr_type.intr_type = type; 5650 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 5651 type == DDI_INTR_TYPE_FIXED) && 5652 nxgep->nxge_intr_type.niu_msi_enable) { 5653 if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) { 5654 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5655 " nxge_add_intrs: " 5656 " nxge_add_intrs_adv failed: status 0x%08x", 5657 status)); 5658 return (status); 5659 } else { 5660 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: " 5661 "interrupts registered : type %d", type)); 5662 nxgep->nxge_intr_type.intr_registered = B_TRUE; 5663 5664 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5665 "\nAdded advanced nxge add_intr_adv " 5666 "intr type 0x%x\n", type)); 5667 5668 return (status); 5669 } 5670 } 5671 5672 if (!nxgep->nxge_intr_type.intr_registered) { 5673 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: " 5674 "failed to register interrupts")); 5675 return (NXGE_ERROR | NXGE_DDI_FAILED); 5676 } 5677 5678 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs")); 5679 return (status); 5680 } 5681 5682 /*ARGSUSED*/ 5683 static nxge_status_t 5684 nxge_add_soft_intrs(p_nxge_t nxgep) 5685 { 5686 5687 int ddi_status = DDI_SUCCESS; 5688 nxge_status_t status = NXGE_OK; 5689 5690 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs")); 5691 5692 nxgep->resched_id = NULL; 5693 nxgep->resched_running = B_FALSE; 5694 ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW, 5695 &nxgep->resched_id, 5696 NULL, NULL, nxge_reschedule, (caddr_t)nxgep); 5697 if (ddi_status != DDI_SUCCESS) { 5698 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: " 5699 "ddi_add_softintrs failed: status 0x%08x", 5700 ddi_status)); 5701 return (NXGE_ERROR | NXGE_DDI_FAILED); 5702 } 5703 5704 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs")); 5705 5706 return (status); 5707 } 5708 5709 static nxge_status_t 5710 nxge_add_intrs_adv(p_nxge_t nxgep) 5711 { 5712 int intr_type; 5713 p_nxge_intr_t intrp; 5714 5715 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv")); 5716 5717 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5718 intr_type = intrp->intr_type; 5719 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x", 5720 intr_type)); 5721 5722 switch (intr_type) { 5723 case DDI_INTR_TYPE_MSI: /* 0x2 */ 5724 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 5725 return (nxge_add_intrs_adv_type(nxgep, intr_type)); 5726 5727 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 5728 return (nxge_add_intrs_adv_type_fix(nxgep, intr_type)); 5729 5730 default: 5731 return (NXGE_ERROR); 5732 } 5733 } 5734 5735 5736 /*ARGSUSED*/ 5737 static nxge_status_t 5738 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type) 5739 { 5740 dev_info_t *dip = nxgep->dip; 5741 p_nxge_ldg_t ldgp; 5742 p_nxge_intr_t intrp; 5743 uint_t *inthandler; 5744 void *arg1, *arg2; 5745 int behavior; 5746 int nintrs, navail, nrequest; 5747 int nactual, nrequired; 5748 int inum = 0; 5749 int x, y; 5750 int ddi_status = DDI_SUCCESS; 5751 nxge_status_t status = NXGE_OK; 5752 5753 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type")); 5754 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5755 intrp->start_inum = 0; 5756 5757 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5758 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5759 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5760 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5761 "nintrs: %d", ddi_status, nintrs)); 5762 return (NXGE_ERROR | NXGE_DDI_FAILED); 5763 } 5764 5765 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5766 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5767 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5768 "ddi_intr_get_navail() failed, status: 0x%x%, " 5769 "nintrs: %d", ddi_status, navail)); 5770 return (NXGE_ERROR | NXGE_DDI_FAILED); 5771 } 5772 5773 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5774 "ddi_intr_get_navail() returned: nintrs %d, navail %d", 5775 nintrs, navail)); 5776 5777 /* PSARC/2007/453 MSI-X interrupt limit override */ 5778 if (int_type == DDI_INTR_TYPE_MSIX) { 5779 nrequest = nxge_create_msi_property(nxgep); 5780 if (nrequest < navail) { 5781 navail = nrequest; 5782 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5783 "nxge_add_intrs_adv_type: nintrs %d " 5784 "navail %d (nrequest %d)", 5785 nintrs, navail, nrequest)); 5786 } 5787 } 5788 5789 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 5790 /* MSI must be power of 2 */ 5791 if ((navail & 16) == 16) { 5792 navail = 16; 5793 } else if ((navail & 8) == 8) { 5794 navail = 8; 5795 } else if ((navail & 4) == 4) { 5796 navail = 4; 5797 } else if ((navail & 2) == 2) { 5798 navail = 2; 5799 } else { 5800 navail = 1; 5801 } 5802 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5803 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 5804 "navail %d", nintrs, navail)); 5805 } 5806 5807 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5808 DDI_INTR_ALLOC_NORMAL); 5809 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5810 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5811 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5812 navail, &nactual, behavior); 5813 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5814 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5815 " ddi_intr_alloc() failed: %d", 5816 ddi_status)); 5817 kmem_free(intrp->htable, intrp->intr_size); 5818 return (NXGE_ERROR | NXGE_DDI_FAILED); 5819 } 5820 5821 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5822 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5823 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5824 " ddi_intr_get_pri() failed: %d", 5825 ddi_status)); 5826 /* Free already allocated interrupts */ 5827 for (y = 0; y < nactual; y++) { 5828 (void) ddi_intr_free(intrp->htable[y]); 5829 } 5830 5831 kmem_free(intrp->htable, intrp->intr_size); 5832 return (NXGE_ERROR | NXGE_DDI_FAILED); 5833 } 5834 5835 nrequired = 0; 5836 switch (nxgep->niu_type) { 5837 default: 5838 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 5839 break; 5840 5841 case N2_NIU: 5842 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 5843 break; 5844 } 5845 5846 if (status != NXGE_OK) { 5847 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5848 "nxge_add_intrs_adv_typ:nxge_ldgv_init " 5849 "failed: 0x%x", status)); 5850 /* Free already allocated interrupts */ 5851 for (y = 0; y < nactual; y++) { 5852 (void) ddi_intr_free(intrp->htable[y]); 5853 } 5854 5855 kmem_free(intrp->htable, intrp->intr_size); 5856 return (status); 5857 } 5858 5859 ldgp = nxgep->ldgvp->ldgp; 5860 for (x = 0; x < nrequired; x++, ldgp++) { 5861 ldgp->vector = (uint8_t)x; 5862 ldgp->intdata = SID_DATA(ldgp->func, x); 5863 arg1 = ldgp->ldvp; 5864 arg2 = nxgep; 5865 if (ldgp->nldvs == 1) { 5866 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 5867 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5868 "nxge_add_intrs_adv_type: " 5869 "arg1 0x%x arg2 0x%x: " 5870 "1-1 int handler (entry %d intdata 0x%x)\n", 5871 arg1, arg2, 5872 x, ldgp->intdata)); 5873 } else if (ldgp->nldvs > 1) { 5874 inthandler = (uint_t *)ldgp->sys_intr_handler; 5875 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5876 "nxge_add_intrs_adv_type: " 5877 "arg1 0x%x arg2 0x%x: " 5878 "nldevs %d int handler " 5879 "(entry %d intdata 0x%x)\n", 5880 arg1, arg2, 5881 ldgp->nldvs, x, ldgp->intdata)); 5882 } 5883 5884 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5885 "==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 5886 "htable 0x%llx", x, intrp->htable[x])); 5887 5888 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 5889 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 5890 != DDI_SUCCESS) { 5891 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5892 "==> nxge_add_intrs_adv_type: failed #%d " 5893 "status 0x%x", x, ddi_status)); 5894 for (y = 0; y < intrp->intr_added; y++) { 5895 (void) ddi_intr_remove_handler( 5896 intrp->htable[y]); 5897 } 5898 /* Free already allocated intr */ 5899 for (y = 0; y < nactual; y++) { 5900 (void) ddi_intr_free(intrp->htable[y]); 5901 } 5902 kmem_free(intrp->htable, intrp->intr_size); 5903 5904 (void) nxge_ldgv_uninit(nxgep); 5905 5906 return (NXGE_ERROR | NXGE_DDI_FAILED); 5907 } 5908 intrp->intr_added++; 5909 } 5910 5911 intrp->msi_intx_cnt = nactual; 5912 5913 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 5914 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 5915 navail, nactual, 5916 intrp->msi_intx_cnt, 5917 intrp->intr_added)); 5918 5919 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 5920 5921 (void) nxge_intr_ldgv_init(nxgep); 5922 5923 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type")); 5924 5925 return (status); 5926 } 5927 5928 /*ARGSUSED*/ 5929 static nxge_status_t 5930 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type) 5931 { 5932 dev_info_t *dip = nxgep->dip; 5933 p_nxge_ldg_t ldgp; 5934 p_nxge_intr_t intrp; 5935 uint_t *inthandler; 5936 void *arg1, *arg2; 5937 int behavior; 5938 int nintrs, navail; 5939 int nactual, nrequired; 5940 int inum = 0; 5941 int x, y; 5942 int ddi_status = DDI_SUCCESS; 5943 nxge_status_t status = NXGE_OK; 5944 5945 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix")); 5946 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 5947 intrp->start_inum = 0; 5948 5949 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 5950 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 5951 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5952 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 5953 "nintrs: %d", status, nintrs)); 5954 return (NXGE_ERROR | NXGE_DDI_FAILED); 5955 } 5956 5957 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 5958 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 5959 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5960 "ddi_intr_get_navail() failed, status: 0x%x%, " 5961 "nintrs: %d", ddi_status, navail)); 5962 return (NXGE_ERROR | NXGE_DDI_FAILED); 5963 } 5964 5965 NXGE_DEBUG_MSG((nxgep, INT_CTL, 5966 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 5967 nintrs, navail)); 5968 5969 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 5970 DDI_INTR_ALLOC_NORMAL); 5971 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 5972 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 5973 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 5974 navail, &nactual, behavior); 5975 if (ddi_status != DDI_SUCCESS || nactual == 0) { 5976 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5977 " ddi_intr_alloc() failed: %d", 5978 ddi_status)); 5979 kmem_free(intrp->htable, intrp->intr_size); 5980 return (NXGE_ERROR | NXGE_DDI_FAILED); 5981 } 5982 5983 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 5984 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 5985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 5986 " ddi_intr_get_pri() failed: %d", 5987 ddi_status)); 5988 /* Free already allocated interrupts */ 5989 for (y = 0; y < nactual; y++) { 5990 (void) ddi_intr_free(intrp->htable[y]); 5991 } 5992 5993 kmem_free(intrp->htable, intrp->intr_size); 5994 return (NXGE_ERROR | NXGE_DDI_FAILED); 5995 } 5996 5997 nrequired = 0; 5998 switch (nxgep->niu_type) { 5999 default: 6000 status = nxge_ldgv_init(nxgep, &nactual, &nrequired); 6001 break; 6002 6003 case N2_NIU: 6004 status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired); 6005 break; 6006 } 6007 6008 if (status != NXGE_OK) { 6009 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6010 "nxge_add_intrs_adv_type_fix:nxge_ldgv_init " 6011 "failed: 0x%x", status)); 6012 /* Free already allocated interrupts */ 6013 for (y = 0; y < nactual; y++) { 6014 (void) ddi_intr_free(intrp->htable[y]); 6015 } 6016 6017 kmem_free(intrp->htable, intrp->intr_size); 6018 return (status); 6019 } 6020 6021 ldgp = nxgep->ldgvp->ldgp; 6022 for (x = 0; x < nrequired; x++, ldgp++) { 6023 ldgp->vector = (uint8_t)x; 6024 if (nxgep->niu_type != N2_NIU) { 6025 ldgp->intdata = SID_DATA(ldgp->func, x); 6026 } 6027 6028 arg1 = ldgp->ldvp; 6029 arg2 = nxgep; 6030 if (ldgp->nldvs == 1) { 6031 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 6032 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6033 "nxge_add_intrs_adv_type_fix: " 6034 "1-1 int handler(%d) ldg %d ldv %d " 6035 "arg1 $%p arg2 $%p\n", 6036 x, ldgp->ldg, ldgp->ldvp->ldv, 6037 arg1, arg2)); 6038 } else if (ldgp->nldvs > 1) { 6039 inthandler = (uint_t *)ldgp->sys_intr_handler; 6040 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6041 "nxge_add_intrs_adv_type_fix: " 6042 "shared ldv %d int handler(%d) ldv %d ldg %d" 6043 "arg1 0x%016llx arg2 0x%016llx\n", 6044 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 6045 arg1, arg2)); 6046 } 6047 6048 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 6049 (ddi_intr_handler_t *)inthandler, arg1, arg2)) 6050 != DDI_SUCCESS) { 6051 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 6052 "==> nxge_add_intrs_adv_type_fix: failed #%d " 6053 "status 0x%x", x, ddi_status)); 6054 for (y = 0; y < intrp->intr_added; y++) { 6055 (void) ddi_intr_remove_handler( 6056 intrp->htable[y]); 6057 } 6058 for (y = 0; y < nactual; y++) { 6059 (void) ddi_intr_free(intrp->htable[y]); 6060 } 6061 /* Free already allocated intr */ 6062 kmem_free(intrp->htable, intrp->intr_size); 6063 6064 (void) nxge_ldgv_uninit(nxgep); 6065 6066 return (NXGE_ERROR | NXGE_DDI_FAILED); 6067 } 6068 intrp->intr_added++; 6069 } 6070 6071 intrp->msi_intx_cnt = nactual; 6072 6073 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 6074 6075 status = nxge_intr_ldgv_init(nxgep); 6076 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix")); 6077 6078 return (status); 6079 } 6080 6081 static void 6082 nxge_remove_intrs(p_nxge_t nxgep) 6083 { 6084 int i, inum; 6085 p_nxge_intr_t intrp; 6086 6087 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs")); 6088 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6089 if (!intrp->intr_registered) { 6090 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6091 "<== nxge_remove_intrs: interrupts not registered")); 6092 return; 6093 } 6094 6095 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced")); 6096 6097 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6098 (void) ddi_intr_block_disable(intrp->htable, 6099 intrp->intr_added); 6100 } else { 6101 for (i = 0; i < intrp->intr_added; i++) { 6102 (void) ddi_intr_disable(intrp->htable[i]); 6103 } 6104 } 6105 6106 for (inum = 0; inum < intrp->intr_added; inum++) { 6107 if (intrp->htable[inum]) { 6108 (void) ddi_intr_remove_handler(intrp->htable[inum]); 6109 } 6110 } 6111 6112 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 6113 if (intrp->htable[inum]) { 6114 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 6115 "nxge_remove_intrs: ddi_intr_free inum %d " 6116 "msi_intx_cnt %d intr_added %d", 6117 inum, 6118 intrp->msi_intx_cnt, 6119 intrp->intr_added)); 6120 6121 (void) ddi_intr_free(intrp->htable[inum]); 6122 } 6123 } 6124 6125 kmem_free(intrp->htable, intrp->intr_size); 6126 intrp->intr_registered = B_FALSE; 6127 intrp->intr_enabled = B_FALSE; 6128 intrp->msi_intx_cnt = 0; 6129 intrp->intr_added = 0; 6130 6131 (void) nxge_ldgv_uninit(nxgep); 6132 6133 (void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip, 6134 "#msix-request"); 6135 6136 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs")); 6137 } 6138 6139 /*ARGSUSED*/ 6140 static void 6141 nxge_remove_soft_intrs(p_nxge_t nxgep) 6142 { 6143 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs")); 6144 if (nxgep->resched_id) { 6145 ddi_remove_softintr(nxgep->resched_id); 6146 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6147 "==> nxge_remove_soft_intrs: removed")); 6148 nxgep->resched_id = NULL; 6149 } 6150 6151 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs")); 6152 } 6153 6154 /*ARGSUSED*/ 6155 static void 6156 nxge_intrs_enable(p_nxge_t nxgep) 6157 { 6158 p_nxge_intr_t intrp; 6159 int i; 6160 int status; 6161 6162 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable")); 6163 6164 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6165 6166 if (!intrp->intr_registered) { 6167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: " 6168 "interrupts are not registered")); 6169 return; 6170 } 6171 6172 if (intrp->intr_enabled) { 6173 NXGE_DEBUG_MSG((nxgep, INT_CTL, 6174 "<== nxge_intrs_enable: already enabled")); 6175 return; 6176 } 6177 6178 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6179 status = ddi_intr_block_enable(intrp->htable, 6180 intrp->intr_added); 6181 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6182 "block enable - status 0x%x total inums #%d\n", 6183 status, intrp->intr_added)); 6184 } else { 6185 for (i = 0; i < intrp->intr_added; i++) { 6186 status = ddi_intr_enable(intrp->htable[i]); 6187 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable " 6188 "ddi_intr_enable:enable - status 0x%x " 6189 "total inums %d enable inum #%d\n", 6190 status, intrp->intr_added, i)); 6191 if (status == DDI_SUCCESS) { 6192 intrp->intr_enabled = B_TRUE; 6193 } 6194 } 6195 } 6196 6197 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable")); 6198 } 6199 6200 /*ARGSUSED*/ 6201 static void 6202 nxge_intrs_disable(p_nxge_t nxgep) 6203 { 6204 p_nxge_intr_t intrp; 6205 int i; 6206 6207 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable")); 6208 6209 intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type; 6210 6211 if (!intrp->intr_registered) { 6212 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: " 6213 "interrupts are not registered")); 6214 return; 6215 } 6216 6217 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 6218 (void) ddi_intr_block_disable(intrp->htable, 6219 intrp->intr_added); 6220 } else { 6221 for (i = 0; i < intrp->intr_added; i++) { 6222 (void) ddi_intr_disable(intrp->htable[i]); 6223 } 6224 } 6225 6226 intrp->intr_enabled = B_FALSE; 6227 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable")); 6228 } 6229 6230 static nxge_status_t 6231 nxge_mac_register(p_nxge_t nxgep) 6232 { 6233 mac_register_t *macp; 6234 int status; 6235 6236 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register")); 6237 6238 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 6239 return (NXGE_ERROR); 6240 6241 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 6242 macp->m_driver = nxgep; 6243 macp->m_dip = nxgep->dip; 6244 macp->m_src_addr = nxgep->ouraddr.ether_addr_octet; 6245 macp->m_callbacks = &nxge_m_callbacks; 6246 macp->m_min_sdu = 0; 6247 nxgep->mac.default_mtu = nxgep->mac.maxframesize - 6248 NXGE_EHEADER_VLAN_CRC; 6249 macp->m_max_sdu = nxgep->mac.default_mtu; 6250 macp->m_margin = VLAN_TAGSZ; 6251 macp->m_priv_props = nxge_priv_props; 6252 macp->m_priv_prop_count = NXGE_MAX_PRIV_PROPS; 6253 6254 NXGE_DEBUG_MSG((nxgep, MAC_CTL, 6255 "==> nxge_mac_register: instance %d " 6256 "max_sdu %d margin %d maxframe %d (header %d)", 6257 nxgep->instance, 6258 macp->m_max_sdu, macp->m_margin, 6259 nxgep->mac.maxframesize, 6260 NXGE_EHEADER_VLAN_CRC)); 6261 6262 status = mac_register(macp, &nxgep->mach); 6263 mac_free(macp); 6264 6265 if (status != 0) { 6266 cmn_err(CE_WARN, 6267 "!nxge_mac_register failed (status %d instance %d)", 6268 status, nxgep->instance); 6269 return (NXGE_ERROR); 6270 } 6271 6272 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success " 6273 "(instance %d)", nxgep->instance)); 6274 6275 return (NXGE_OK); 6276 } 6277 6278 void 6279 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp) 6280 { 6281 ssize_t size; 6282 mblk_t *nmp; 6283 uint8_t blk_id; 6284 uint8_t chan; 6285 uint32_t err_id; 6286 err_inject_t *eip; 6287 6288 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject")); 6289 6290 size = 1024; 6291 nmp = mp->b_cont; 6292 eip = (err_inject_t *)nmp->b_rptr; 6293 blk_id = eip->blk_id; 6294 err_id = eip->err_id; 6295 chan = eip->chan; 6296 cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id); 6297 cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id); 6298 cmn_err(CE_NOTE, "!chan = 0x%x\n", chan); 6299 switch (blk_id) { 6300 case MAC_BLK_ID: 6301 break; 6302 case TXMAC_BLK_ID: 6303 break; 6304 case RXMAC_BLK_ID: 6305 break; 6306 case MIF_BLK_ID: 6307 break; 6308 case IPP_BLK_ID: 6309 nxge_ipp_inject_err(nxgep, err_id); 6310 break; 6311 case TXC_BLK_ID: 6312 nxge_txc_inject_err(nxgep, err_id); 6313 break; 6314 case TXDMA_BLK_ID: 6315 nxge_txdma_inject_err(nxgep, err_id, chan); 6316 break; 6317 case RXDMA_BLK_ID: 6318 nxge_rxdma_inject_err(nxgep, err_id, chan); 6319 break; 6320 case ZCP_BLK_ID: 6321 nxge_zcp_inject_err(nxgep, err_id); 6322 break; 6323 case ESPC_BLK_ID: 6324 break; 6325 case FFLP_BLK_ID: 6326 break; 6327 case PHY_BLK_ID: 6328 break; 6329 case ETHER_SERDES_BLK_ID: 6330 break; 6331 case PCIE_SERDES_BLK_ID: 6332 break; 6333 case VIR_BLK_ID: 6334 break; 6335 } 6336 6337 nmp->b_wptr = nmp->b_rptr + size; 6338 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject")); 6339 6340 miocack(wq, mp, (int)size, 0); 6341 } 6342 6343 static int 6344 nxge_init_common_dev(p_nxge_t nxgep) 6345 { 6346 p_nxge_hw_list_t hw_p; 6347 dev_info_t *p_dip; 6348 6349 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device")); 6350 6351 p_dip = nxgep->p_dip; 6352 MUTEX_ENTER(&nxge_common_lock); 6353 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6354 "==> nxge_init_common_dev:func # %d", 6355 nxgep->function_num)); 6356 /* 6357 * Loop through existing per neptune hardware list. 6358 */ 6359 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6360 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6361 "==> nxge_init_common_device:func # %d " 6362 "hw_p $%p parent dip $%p", 6363 nxgep->function_num, 6364 hw_p, 6365 p_dip)); 6366 if (hw_p->parent_devp == p_dip) { 6367 nxgep->nxge_hw_p = hw_p; 6368 hw_p->ndevs++; 6369 hw_p->nxge_p[nxgep->function_num] = nxgep; 6370 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6371 "==> nxge_init_common_device:func # %d " 6372 "hw_p $%p parent dip $%p " 6373 "ndevs %d (found)", 6374 nxgep->function_num, 6375 hw_p, 6376 p_dip, 6377 hw_p->ndevs)); 6378 break; 6379 } 6380 } 6381 6382 if (hw_p == NULL) { 6383 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6384 "==> nxge_init_common_device:func # %d " 6385 "parent dip $%p (new)", 6386 nxgep->function_num, 6387 p_dip)); 6388 hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP); 6389 hw_p->parent_devp = p_dip; 6390 hw_p->magic = NXGE_NEPTUNE_MAGIC; 6391 nxgep->nxge_hw_p = hw_p; 6392 hw_p->ndevs++; 6393 hw_p->nxge_p[nxgep->function_num] = nxgep; 6394 hw_p->next = nxge_hw_list; 6395 if (nxgep->niu_type == N2_NIU) { 6396 hw_p->niu_type = N2_NIU; 6397 hw_p->platform_type = P_NEPTUNE_NIU; 6398 } else { 6399 hw_p->niu_type = NIU_TYPE_NONE; 6400 hw_p->platform_type = P_NEPTUNE_NONE; 6401 } 6402 6403 MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 6404 MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 6405 MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 6406 MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL); 6407 6408 nxge_hw_list = hw_p; 6409 6410 (void) nxge_scan_ports_phy(nxgep, nxge_hw_list); 6411 } 6412 6413 MUTEX_EXIT(&nxge_common_lock); 6414 6415 nxgep->platform_type = hw_p->platform_type; 6416 if (nxgep->niu_type != N2_NIU) { 6417 nxgep->niu_type = hw_p->niu_type; 6418 } 6419 6420 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6421 "==> nxge_init_common_device (nxge_hw_list) $%p", 6422 nxge_hw_list)); 6423 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device")); 6424 6425 return (NXGE_OK); 6426 } 6427 6428 static void 6429 nxge_uninit_common_dev(p_nxge_t nxgep) 6430 { 6431 p_nxge_hw_list_t hw_p, h_hw_p; 6432 dev_info_t *p_dip; 6433 6434 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device")); 6435 if (nxgep->nxge_hw_p == NULL) { 6436 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6437 "<== nxge_uninit_common_device (no common)")); 6438 return; 6439 } 6440 6441 MUTEX_ENTER(&nxge_common_lock); 6442 h_hw_p = nxge_hw_list; 6443 for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) { 6444 p_dip = hw_p->parent_devp; 6445 if (nxgep->nxge_hw_p == hw_p && 6446 p_dip == nxgep->p_dip && 6447 nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC && 6448 hw_p->magic == NXGE_NEPTUNE_MAGIC) { 6449 6450 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6451 "==> nxge_uninit_common_device:func # %d " 6452 "hw_p $%p parent dip $%p " 6453 "ndevs %d (found)", 6454 nxgep->function_num, 6455 hw_p, 6456 p_dip, 6457 hw_p->ndevs)); 6458 6459 if (hw_p->ndevs) { 6460 hw_p->ndevs--; 6461 } 6462 hw_p->nxge_p[nxgep->function_num] = NULL; 6463 if (!hw_p->ndevs) { 6464 MUTEX_DESTROY(&hw_p->nxge_vlan_lock); 6465 MUTEX_DESTROY(&hw_p->nxge_tcam_lock); 6466 MUTEX_DESTROY(&hw_p->nxge_cfg_lock); 6467 MUTEX_DESTROY(&hw_p->nxge_mdio_lock); 6468 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6469 "==> nxge_uninit_common_device: " 6470 "func # %d " 6471 "hw_p $%p parent dip $%p " 6472 "ndevs %d (last)", 6473 nxgep->function_num, 6474 hw_p, 6475 p_dip, 6476 hw_p->ndevs)); 6477 6478 nxge_hio_uninit(nxgep); 6479 6480 if (hw_p == nxge_hw_list) { 6481 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6482 "==> nxge_uninit_common_device:" 6483 "remove head func # %d " 6484 "hw_p $%p parent dip $%p " 6485 "ndevs %d (head)", 6486 nxgep->function_num, 6487 hw_p, 6488 p_dip, 6489 hw_p->ndevs)); 6490 nxge_hw_list = hw_p->next; 6491 } else { 6492 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6493 "==> nxge_uninit_common_device:" 6494 "remove middle func # %d " 6495 "hw_p $%p parent dip $%p " 6496 "ndevs %d (middle)", 6497 nxgep->function_num, 6498 hw_p, 6499 p_dip, 6500 hw_p->ndevs)); 6501 h_hw_p->next = hw_p->next; 6502 } 6503 6504 nxgep->nxge_hw_p = NULL; 6505 KMEM_FREE(hw_p, sizeof (nxge_hw_list_t)); 6506 } 6507 break; 6508 } else { 6509 h_hw_p = hw_p; 6510 } 6511 } 6512 6513 MUTEX_EXIT(&nxge_common_lock); 6514 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6515 "==> nxge_uninit_common_device (nxge_hw_list) $%p", 6516 nxge_hw_list)); 6517 6518 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device")); 6519 } 6520 6521 /* 6522 * Determines the number of ports from the niu_type or the platform type. 6523 * Returns the number of ports, or returns zero on failure. 6524 */ 6525 6526 int 6527 nxge_get_nports(p_nxge_t nxgep) 6528 { 6529 int nports = 0; 6530 6531 switch (nxgep->niu_type) { 6532 case N2_NIU: 6533 case NEPTUNE_2_10GF: 6534 nports = 2; 6535 break; 6536 case NEPTUNE_4_1GC: 6537 case NEPTUNE_2_10GF_2_1GC: 6538 case NEPTUNE_1_10GF_3_1GC: 6539 case NEPTUNE_1_1GC_1_10GF_2_1GC: 6540 case NEPTUNE_2_10GF_2_1GRF: 6541 nports = 4; 6542 break; 6543 default: 6544 switch (nxgep->platform_type) { 6545 case P_NEPTUNE_NIU: 6546 case P_NEPTUNE_ATLAS_2PORT: 6547 nports = 2; 6548 break; 6549 case P_NEPTUNE_ATLAS_4PORT: 6550 case P_NEPTUNE_MARAMBA_P0: 6551 case P_NEPTUNE_MARAMBA_P1: 6552 case P_NEPTUNE_ALONSO: 6553 nports = 4; 6554 break; 6555 default: 6556 break; 6557 } 6558 break; 6559 } 6560 6561 return (nports); 6562 } 6563 6564 /* 6565 * The following two functions are to support 6566 * PSARC/2007/453 MSI-X interrupt limit override. 6567 */ 6568 static int 6569 nxge_create_msi_property(p_nxge_t nxgep) 6570 { 6571 int nmsi; 6572 extern int ncpus; 6573 6574 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property")); 6575 6576 switch (nxgep->mac.portmode) { 6577 case PORT_10G_COPPER: 6578 case PORT_10G_FIBER: 6579 (void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip, 6580 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 6581 /* 6582 * The maximum MSI-X requested will be 8. 6583 * If the # of CPUs is less than 8, we will reqeust 6584 * # MSI-X based on the # of CPUs. 6585 */ 6586 if (ncpus >= NXGE_MSIX_REQUEST_10G) { 6587 nmsi = NXGE_MSIX_REQUEST_10G; 6588 } else { 6589 nmsi = ncpus; 6590 } 6591 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6592 "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 6593 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6594 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6595 break; 6596 6597 default: 6598 nmsi = NXGE_MSIX_REQUEST_1G; 6599 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 6600 "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)", 6601 ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip, 6602 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 6603 break; 6604 } 6605 6606 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property")); 6607 return (nmsi); 6608 } 6609 6610 /* ARGSUSED */ 6611 static int 6612 nxge_get_def_val(nxge_t *nxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 6613 void *pr_val) 6614 { 6615 int err = 0; 6616 link_flowctrl_t fl; 6617 6618 switch (pr_num) { 6619 case DLD_PROP_AUTONEG: 6620 *(uint8_t *)pr_val = 1; 6621 break; 6622 case DLD_PROP_FLOWCTRL: 6623 if (pr_valsize < sizeof (link_flowctrl_t)) 6624 return (EINVAL); 6625 fl = LINK_FLOWCTRL_RX; 6626 bcopy(&fl, pr_val, sizeof (fl)); 6627 break; 6628 case DLD_PROP_ADV_1000FDX_CAP: 6629 case DLD_PROP_EN_1000FDX_CAP: 6630 *(uint8_t *)pr_val = 1; 6631 break; 6632 case DLD_PROP_ADV_100FDX_CAP: 6633 case DLD_PROP_EN_100FDX_CAP: 6634 *(uint8_t *)pr_val = 1; 6635 break; 6636 default: 6637 err = ENOTSUP; 6638 break; 6639 } 6640 return (err); 6641 } 6642