1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver. 30 */ 31 #include <hxge_impl.h> 32 #include <hxge_pfc.h> 33 34 /* 35 * PSARC/2007/453 MSI-X interrupt limit override 36 * (This PSARC case is limited to MSI-X vectors 37 * and SPARC platforms only). 38 */ 39 #if defined(_BIG_ENDIAN) 40 uint32_t hxge_msi_enable = 2; 41 #else 42 uint32_t hxge_msi_enable = 1; 43 #endif 44 45 /* 46 * Globals: tunable parameters (/etc/system or adb) 47 * 48 */ 49 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT; 50 uint32_t hxge_rbr_spare_size = 0; 51 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT; 52 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT; 53 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX; 54 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN; 55 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN; 56 uint32_t hxge_jumbo_mtu = TX_JUMBO_MTU; 57 boolean_t hxge_jumbo_enable = B_FALSE; 58 59 static hxge_os_mutex_t hxgedebuglock; 60 static int hxge_debug_init = 0; 61 62 /* 63 * Debugging flags: 64 * hxge_no_tx_lb : transmit load balancing 65 * hxge_tx_lb_policy: 0 - TCP/UDP port (default) 66 * 1 - From the Stack 67 * 2 - Destination IP Address 68 */ 69 uint32_t hxge_no_tx_lb = 0; 70 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP; 71 72 /* 73 * Add tunable to reduce the amount of time spent in the 74 * ISR doing Rx Processing. 75 */ 76 uint32_t hxge_max_rx_pkts = 1024; 77 78 /* 79 * Tunables to manage the receive buffer blocks. 80 * 81 * hxge_rx_threshold_hi: copy all buffers. 82 * hxge_rx_bcopy_size_type: receive buffer block size type. 83 * hxge_rx_threshold_lo: copy only up to tunable block size type. 84 */ 85 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6; 86 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 87 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3; 88 89 rtrace_t hpi_rtracebuf; 90 91 /* 92 * Function Prototypes 93 */ 94 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t); 95 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t); 96 static void hxge_unattach(p_hxge_t); 97 98 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t); 99 100 static hxge_status_t hxge_setup_mutexes(p_hxge_t); 101 static void hxge_destroy_mutexes(p_hxge_t); 102 103 static hxge_status_t hxge_map_regs(p_hxge_t hxgep); 104 static void hxge_unmap_regs(p_hxge_t hxgep); 105 106 hxge_status_t hxge_add_intrs(p_hxge_t hxgep); 107 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep); 108 static void hxge_remove_intrs(p_hxge_t hxgep); 109 static void hxge_remove_soft_intrs(p_hxge_t hxgep); 110 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep); 111 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t); 112 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t); 113 void hxge_intrs_enable(p_hxge_t hxgep); 114 static void hxge_intrs_disable(p_hxge_t hxgep); 115 static void hxge_suspend(p_hxge_t); 116 static hxge_status_t hxge_resume(p_hxge_t); 117 hxge_status_t hxge_setup_dev(p_hxge_t); 118 static void hxge_destroy_dev(p_hxge_t); 119 hxge_status_t hxge_alloc_mem_pool(p_hxge_t); 120 static void hxge_free_mem_pool(p_hxge_t); 121 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t); 122 static void hxge_free_rx_mem_pool(p_hxge_t); 123 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t); 124 static void hxge_free_tx_mem_pool(p_hxge_t); 125 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t, 126 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t, 127 p_hxge_dma_common_t); 128 static void hxge_dma_mem_free(p_hxge_dma_common_t); 129 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t, 130 p_hxge_dma_common_t *, size_t, size_t, uint32_t *); 131 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t); 132 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t, 133 p_hxge_dma_common_t *, size_t); 134 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t); 135 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t, 136 p_hxge_dma_common_t *, size_t, size_t, uint32_t *); 137 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t); 138 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t, 139 p_hxge_dma_common_t *, size_t); 140 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t); 141 static int hxge_init_common_dev(p_hxge_t); 142 static void hxge_uninit_common_dev(p_hxge_t); 143 144 /* 145 * The next declarations are for the GLDv3 interface. 146 */ 147 static int hxge_m_start(void *); 148 static void hxge_m_stop(void *); 149 static int hxge_m_unicst(void *, const uint8_t *); 150 static int hxge_m_multicst(void *, boolean_t, const uint8_t *); 151 static int hxge_m_promisc(void *, boolean_t); 152 static void hxge_m_ioctl(void *, queue_t *, mblk_t *); 153 static void hxge_m_resources(void *); 154 static hxge_status_t hxge_mac_register(p_hxge_t hxgep); 155 156 static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 157 static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 158 static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 159 static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 160 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *); 161 162 #define HXGE_MAGIC 0x4E584745UL 163 #define MAX_DUMP_SZ 256 164 165 #define HXGE_M_CALLBACK_FLAGS (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB) 166 167 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp); 168 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep); 169 170 static mac_callbacks_t hxge_m_callbacks = { 171 HXGE_M_CALLBACK_FLAGS, 172 hxge_m_stat, 173 hxge_m_start, 174 hxge_m_stop, 175 hxge_m_promisc, 176 hxge_m_multicst, 177 hxge_m_unicst, 178 hxge_m_tx, 179 hxge_m_resources, 180 hxge_m_ioctl, 181 hxge_m_getcapab 182 }; 183 184 /* Enable debug messages as necessary. */ 185 uint64_t hxge_debug_level = 0x0; 186 187 /* 188 * This list contains the instance structures for the Hydra 189 * devices present in the system. The lock exists to guarantee 190 * mutually exclusive access to the list. 191 */ 192 void *hxge_list = NULL; 193 void *hxge_hw_list = NULL; 194 hxge_os_mutex_t hxge_common_lock; 195 196 extern uint64_t hpi_debug_level; 197 198 extern hxge_status_t hxge_ldgv_init(); 199 extern hxge_status_t hxge_ldgv_uninit(); 200 extern hxge_status_t hxge_intr_ldgv_init(); 201 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr, 202 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr); 203 extern void hxge_fm_fini(p_hxge_t hxgep); 204 205 /* 206 * Count used to maintain the number of buffers being used 207 * by Hydra instances and loaned up to the upper layers. 208 */ 209 uint32_t hxge_mblks_pending = 0; 210 211 /* 212 * Device register access attributes for PIO. 213 */ 214 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = { 215 DDI_DEVICE_ATTR_V0, 216 DDI_STRUCTURE_LE_ACC, 217 DDI_STRICTORDER_ACC, 218 }; 219 220 /* 221 * Device descriptor access attributes for DMA. 222 */ 223 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = { 224 DDI_DEVICE_ATTR_V0, 225 DDI_STRUCTURE_LE_ACC, 226 DDI_STRICTORDER_ACC 227 }; 228 229 /* 230 * Device buffer access attributes for DMA. 231 */ 232 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = { 233 DDI_DEVICE_ATTR_V0, 234 DDI_STRUCTURE_BE_ACC, 235 DDI_STRICTORDER_ACC 236 }; 237 238 ddi_dma_attr_t hxge_desc_dma_attr = { 239 DMA_ATTR_V0, /* version number. */ 240 0, /* low address */ 241 0xffffffffffffffff, /* high address */ 242 0xffffffffffffffff, /* address counter max */ 243 0x100000, /* alignment */ 244 0xfc00fc, /* dlim_burstsizes */ 245 0x1, /* minimum transfer size */ 246 0xffffffffffffffff, /* maximum transfer size */ 247 0xffffffffffffffff, /* maximum segment size */ 248 1, /* scatter/gather list length */ 249 (unsigned int)1, /* granularity */ 250 0 /* attribute flags */ 251 }; 252 253 ddi_dma_attr_t hxge_tx_dma_attr = { 254 DMA_ATTR_V0, /* version number. */ 255 0, /* low address */ 256 0xffffffffffffffff, /* high address */ 257 0xffffffffffffffff, /* address counter max */ 258 #if defined(_BIG_ENDIAN) 259 0x2000, /* alignment */ 260 #else 261 0x1000, /* alignment */ 262 #endif 263 0xfc00fc, /* dlim_burstsizes */ 264 0x1, /* minimum transfer size */ 265 0xffffffffffffffff, /* maximum transfer size */ 266 0xffffffffffffffff, /* maximum segment size */ 267 5, /* scatter/gather list length */ 268 (unsigned int)1, /* granularity */ 269 0 /* attribute flags */ 270 }; 271 272 ddi_dma_attr_t hxge_rx_dma_attr = { 273 DMA_ATTR_V0, /* version number. */ 274 0, /* low address */ 275 0xffffffffffffffff, /* high address */ 276 0xffffffffffffffff, /* address counter max */ 277 0x10000, /* alignment */ 278 0xfc00fc, /* dlim_burstsizes */ 279 0x1, /* minimum transfer size */ 280 0xffffffffffffffff, /* maximum transfer size */ 281 0xffffffffffffffff, /* maximum segment size */ 282 1, /* scatter/gather list length */ 283 (unsigned int)1, /* granularity */ 284 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 285 }; 286 287 ddi_dma_lim_t hxge_dma_limits = { 288 (uint_t)0, /* dlim_addr_lo */ 289 (uint_t)0xffffffff, /* dlim_addr_hi */ 290 (uint_t)0xffffffff, /* dlim_cntr_max */ 291 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 292 0x1, /* dlim_minxfer */ 293 1024 /* dlim_speed */ 294 }; 295 296 dma_method_t hxge_force_dma = DVMA; 297 298 /* 299 * dma chunk sizes. 300 * 301 * Try to allocate the largest possible size 302 * so that fewer number of dma chunks would be managed 303 */ 304 size_t alloc_sizes[] = { 305 0x1000, 0x2000, 0x4000, 0x8000, 306 0x10000, 0x20000, 0x40000, 0x80000, 307 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000 308 }; 309 310 /* 311 * Translate "dev_t" to a pointer to the associated "dev_info_t". 312 */ 313 static int 314 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 315 { 316 p_hxge_t hxgep = NULL; 317 int instance; 318 int status = DDI_SUCCESS; 319 320 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach")); 321 322 /* 323 * Get the device instance since we'll need to setup or retrieve a soft 324 * state for this instance. 325 */ 326 instance = ddi_get_instance(dip); 327 328 switch (cmd) { 329 case DDI_ATTACH: 330 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH")); 331 break; 332 333 case DDI_RESUME: 334 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME")); 335 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance); 336 if (hxgep == NULL) { 337 status = DDI_FAILURE; 338 break; 339 } 340 if (hxgep->dip != dip) { 341 status = DDI_FAILURE; 342 break; 343 } 344 if (hxgep->suspended == DDI_PM_SUSPEND) { 345 status = ddi_dev_is_needed(hxgep->dip, 0, 1); 346 } else { 347 (void) hxge_resume(hxgep); 348 } 349 goto hxge_attach_exit; 350 351 case DDI_PM_RESUME: 352 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME")); 353 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance); 354 if (hxgep == NULL) { 355 status = DDI_FAILURE; 356 break; 357 } 358 if (hxgep->dip != dip) { 359 status = DDI_FAILURE; 360 break; 361 } 362 (void) hxge_resume(hxgep); 363 goto hxge_attach_exit; 364 365 default: 366 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown")); 367 status = DDI_FAILURE; 368 goto hxge_attach_exit; 369 } 370 371 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) { 372 status = DDI_FAILURE; 373 HXGE_ERROR_MSG((hxgep, DDI_CTL, 374 "ddi_soft_state_zalloc failed")); 375 goto hxge_attach_exit; 376 } 377 378 hxgep = ddi_get_soft_state(hxge_list, instance); 379 if (hxgep == NULL) { 380 status = HXGE_ERROR; 381 HXGE_ERROR_MSG((hxgep, DDI_CTL, 382 "ddi_get_soft_state failed")); 383 goto hxge_attach_fail2; 384 } 385 386 hxgep->drv_state = 0; 387 hxgep->dip = dip; 388 hxgep->instance = instance; 389 hxgep->p_dip = ddi_get_parent(dip); 390 hxgep->hxge_debug_level = hxge_debug_level; 391 hpi_debug_level = hxge_debug_level; 392 393 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr, 394 &hxge_rx_dma_attr); 395 396 status = hxge_map_regs(hxgep); 397 if (status != HXGE_OK) { 398 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed")); 399 goto hxge_attach_fail3; 400 } 401 402 status = hxge_init_common_dev(hxgep); 403 if (status != HXGE_OK) { 404 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 405 "hxge_init_common_dev failed")); 406 goto hxge_attach_fail4; 407 } 408 409 /* 410 * Setup the Ndd parameters for this instance. 411 */ 412 hxge_init_param(hxgep); 413 414 /* 415 * Setup Register Tracing Buffer. 416 */ 417 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf); 418 419 /* init stats ptr */ 420 hxge_init_statsp(hxgep); 421 422 status = hxge_get_config_properties(hxgep); 423 if (status != HXGE_OK) { 424 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed")); 425 goto hxge_attach_fail; 426 } 427 428 /* 429 * Setup the Kstats for the driver. 430 */ 431 hxge_setup_kstats(hxgep); 432 hxge_setup_param(hxgep); 433 434 status = hxge_setup_system_dma_pages(hxgep); 435 if (status != HXGE_OK) { 436 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed")); 437 goto hxge_attach_fail; 438 } 439 440 hxge_hw_id_init(hxgep); 441 hxge_hw_init_niu_common(hxgep); 442 443 status = hxge_setup_mutexes(hxgep); 444 if (status != HXGE_OK) { 445 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed")); 446 goto hxge_attach_fail; 447 } 448 449 status = hxge_setup_dev(hxgep); 450 if (status != DDI_SUCCESS) { 451 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed")); 452 goto hxge_attach_fail; 453 } 454 455 status = hxge_add_intrs(hxgep); 456 if (status != DDI_SUCCESS) { 457 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed")); 458 goto hxge_attach_fail; 459 } 460 461 status = hxge_add_soft_intrs(hxgep); 462 if (status != DDI_SUCCESS) { 463 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed")); 464 goto hxge_attach_fail; 465 } 466 467 /* 468 * Enable interrupts. 469 */ 470 hxge_intrs_enable(hxgep); 471 472 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) { 473 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 474 "unable to register to mac layer (%d)", status)); 475 goto hxge_attach_fail; 476 } 477 mac_link_update(hxgep->mach, LINK_STATE_UP); 478 479 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)", 480 instance)); 481 482 goto hxge_attach_exit; 483 484 hxge_attach_fail: 485 hxge_unattach(hxgep); 486 goto hxge_attach_fail1; 487 488 hxge_attach_fail5: 489 /* 490 * Tear down the ndd parameters setup. 491 */ 492 hxge_destroy_param(hxgep); 493 494 /* 495 * Tear down the kstat setup. 496 */ 497 hxge_destroy_kstats(hxgep); 498 499 hxge_attach_fail4: 500 if (hxgep->hxge_hw_p) { 501 hxge_uninit_common_dev(hxgep); 502 hxgep->hxge_hw_p = NULL; 503 } 504 hxge_attach_fail3: 505 /* 506 * Unmap the register setup. 507 */ 508 hxge_unmap_regs(hxgep); 509 510 hxge_fm_fini(hxgep); 511 512 hxge_attach_fail2: 513 ddi_soft_state_free(hxge_list, hxgep->instance); 514 515 hxge_attach_fail1: 516 if (status != HXGE_OK) 517 status = (HXGE_ERROR | HXGE_DDI_FAILED); 518 hxgep = NULL; 519 520 hxge_attach_exit: 521 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x", 522 status)); 523 524 return (status); 525 } 526 527 static int 528 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 529 { 530 int status = DDI_SUCCESS; 531 int instance; 532 p_hxge_t hxgep = NULL; 533 534 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach")); 535 instance = ddi_get_instance(dip); 536 hxgep = ddi_get_soft_state(hxge_list, instance); 537 if (hxgep == NULL) { 538 status = DDI_FAILURE; 539 goto hxge_detach_exit; 540 } 541 542 switch (cmd) { 543 case DDI_DETACH: 544 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH")); 545 break; 546 547 case DDI_PM_SUSPEND: 548 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 549 hxgep->suspended = DDI_PM_SUSPEND; 550 hxge_suspend(hxgep); 551 break; 552 553 case DDI_SUSPEND: 554 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND")); 555 if (hxgep->suspended != DDI_PM_SUSPEND) { 556 hxgep->suspended = DDI_SUSPEND; 557 hxge_suspend(hxgep); 558 } 559 break; 560 561 default: 562 status = DDI_FAILURE; 563 break; 564 } 565 566 if (cmd != DDI_DETACH) 567 goto hxge_detach_exit; 568 569 /* 570 * Stop the xcvr polling. 571 */ 572 hxgep->suspended = cmd; 573 574 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) { 575 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 576 "<== hxge_detach status = 0x%08X", status)); 577 return (DDI_FAILURE); 578 } 579 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 580 "<== hxge_detach (mac_unregister) status = 0x%08X", status)); 581 582 hxge_unattach(hxgep); 583 hxgep = NULL; 584 585 hxge_detach_exit: 586 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X", 587 status)); 588 589 return (status); 590 } 591 592 static void 593 hxge_unattach(p_hxge_t hxgep) 594 { 595 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach")); 596 597 if (hxgep == NULL || hxgep->dev_regs == NULL) { 598 return; 599 } 600 601 if (hxgep->hxge_hw_p) { 602 hxge_uninit_common_dev(hxgep); 603 hxgep->hxge_hw_p = NULL; 604 } 605 606 if (hxgep->hxge_timerid) { 607 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 608 hxgep->hxge_timerid = 0; 609 } 610 611 /* Stop any further interrupts. */ 612 hxge_remove_intrs(hxgep); 613 614 /* Remove soft interrups */ 615 hxge_remove_soft_intrs(hxgep); 616 617 /* Stop the device and free resources. */ 618 hxge_destroy_dev(hxgep); 619 620 /* Tear down the ndd parameters setup. */ 621 hxge_destroy_param(hxgep); 622 623 /* Tear down the kstat setup. */ 624 hxge_destroy_kstats(hxgep); 625 626 /* 627 * Remove the list of ndd parameters which were setup during attach. 628 */ 629 if (hxgep->dip) { 630 HXGE_DEBUG_MSG((hxgep, OBP_CTL, 631 " hxge_unattach: remove all properties")); 632 (void) ddi_prop_remove_all(hxgep->dip); 633 } 634 635 /* 636 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any 637 * previous state before unmapping the registers. 638 */ 639 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E); 640 HXGE_DELAY(1000); 641 642 /* 643 * Unmap the register setup. 644 */ 645 hxge_unmap_regs(hxgep); 646 647 hxge_fm_fini(hxgep); 648 649 /* 650 * Free the soft state data structures allocated with this instance. 651 */ 652 ddi_soft_state_free(hxge_list, hxgep->instance); 653 654 /* Destroy all mutexes. */ 655 hxge_destroy_mutexes(hxgep); 656 657 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach")); 658 } 659 660 static hxge_status_t 661 hxge_map_regs(p_hxge_t hxgep) 662 { 663 int ddi_status = DDI_SUCCESS; 664 p_dev_regs_t dev_regs; 665 666 #ifdef HXGE_DEBUG 667 char *sysname; 668 #endif 669 670 off_t regsize; 671 hxge_status_t status = HXGE_OK; 672 int nregs; 673 674 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs")); 675 676 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS) 677 return (HXGE_ERROR); 678 679 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs)); 680 681 hxgep->dev_regs = NULL; 682 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 683 dev_regs->hxge_regh = NULL; 684 dev_regs->hxge_pciregh = NULL; 685 dev_regs->hxge_msix_regh = NULL; 686 687 (void) ddi_dev_regsize(hxgep->dip, 0, ®size); 688 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 689 "hxge_map_regs: pci config size 0x%x", regsize)); 690 691 ddi_status = ddi_regs_map_setup(hxgep->dip, 0, 692 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0, 693 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh); 694 if (ddi_status != DDI_SUCCESS) { 695 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 696 "ddi_map_regs, hxge bus config regs failed")); 697 goto hxge_map_regs_fail0; 698 } 699 700 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 701 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx", 702 dev_regs->hxge_pciregp, 703 dev_regs->hxge_pciregh)); 704 705 (void) ddi_dev_regsize(hxgep->dip, 1, ®size); 706 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 707 "hxge_map_regs: pio size 0x%x", regsize)); 708 709 /* set up the device mapped register */ 710 ddi_status = ddi_regs_map_setup(hxgep->dip, 1, 711 (caddr_t *)&(dev_regs->hxge_regp), 0, 0, 712 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh); 713 714 if (ddi_status != DDI_SUCCESS) { 715 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 716 "ddi_map_regs for Hydra global reg failed")); 717 goto hxge_map_regs_fail1; 718 } 719 720 /* set up the msi/msi-x mapped register */ 721 (void) ddi_dev_regsize(hxgep->dip, 2, ®size); 722 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 723 "hxge_map_regs: msix size 0x%x", regsize)); 724 725 ddi_status = ddi_regs_map_setup(hxgep->dip, 2, 726 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0, 727 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh); 728 729 if (ddi_status != DDI_SUCCESS) { 730 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 731 "ddi_map_regs for msi reg failed")); 732 goto hxge_map_regs_fail2; 733 } 734 735 hxgep->dev_regs = dev_regs; 736 737 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh); 738 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp); 739 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh); 740 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp); 741 742 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh); 743 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp); 744 745 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh); 746 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp); 747 748 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx " 749 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh)); 750 751 goto hxge_map_regs_exit; 752 753 hxge_map_regs_fail3: 754 if (dev_regs->hxge_msix_regh) { 755 ddi_regs_map_free(&dev_regs->hxge_msix_regh); 756 } 757 758 hxge_map_regs_fail2: 759 if (dev_regs->hxge_regh) { 760 ddi_regs_map_free(&dev_regs->hxge_regh); 761 } 762 763 hxge_map_regs_fail1: 764 if (dev_regs->hxge_pciregh) { 765 ddi_regs_map_free(&dev_regs->hxge_pciregh); 766 } 767 768 hxge_map_regs_fail0: 769 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory")); 770 kmem_free(dev_regs, sizeof (dev_regs_t)); 771 772 hxge_map_regs_exit: 773 if (ddi_status != DDI_SUCCESS) 774 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 775 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs")); 776 return (status); 777 } 778 779 static void 780 hxge_unmap_regs(p_hxge_t hxgep) 781 { 782 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs")); 783 if (hxgep->dev_regs) { 784 if (hxgep->dev_regs->hxge_pciregh) { 785 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 786 "==> hxge_unmap_regs: bus")); 787 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh); 788 hxgep->dev_regs->hxge_pciregh = NULL; 789 } 790 791 if (hxgep->dev_regs->hxge_regh) { 792 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 793 "==> hxge_unmap_regs: device registers")); 794 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh); 795 hxgep->dev_regs->hxge_regh = NULL; 796 } 797 798 if (hxgep->dev_regs->hxge_msix_regh) { 799 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 800 "==> hxge_unmap_regs: device interrupts")); 801 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh); 802 hxgep->dev_regs->hxge_msix_regh = NULL; 803 } 804 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t)); 805 hxgep->dev_regs = NULL; 806 } 807 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs")); 808 } 809 810 static hxge_status_t 811 hxge_setup_mutexes(p_hxge_t hxgep) 812 { 813 int ddi_status = DDI_SUCCESS; 814 hxge_status_t status = HXGE_OK; 815 816 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes")); 817 818 /* 819 * Get the interrupt cookie so the mutexes can be Initialised. 820 */ 821 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0, 822 &hxgep->interrupt_cookie); 823 824 if (ddi_status != DDI_SUCCESS) { 825 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 826 "<== hxge_setup_mutexes: failed 0x%x", ddi_status)); 827 goto hxge_setup_mutexes_exit; 828 } 829 830 /* 831 * Initialize mutex's for this device. 832 */ 833 MUTEX_INIT(hxgep->genlock, NULL, 834 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 835 MUTEX_INIT(&hxgep->ouraddr_lock, NULL, 836 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 837 RW_INIT(&hxgep->filter_lock, NULL, 838 RW_DRIVER, (void *) hxgep->interrupt_cookie); 839 MUTEX_INIT(&hxgep->pio_lock, NULL, 840 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 841 842 hxge_setup_mutexes_exit: 843 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 844 "<== hxge_setup_mutexes status = %x", status)); 845 846 if (ddi_status != DDI_SUCCESS) 847 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 848 849 return (status); 850 } 851 852 static void 853 hxge_destroy_mutexes(p_hxge_t hxgep) 854 { 855 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes")); 856 RW_DESTROY(&hxgep->filter_lock); 857 MUTEX_DESTROY(&hxgep->ouraddr_lock); 858 MUTEX_DESTROY(hxgep->genlock); 859 MUTEX_DESTROY(&hxgep->pio_lock); 860 861 if (hxge_debug_init == 1) { 862 MUTEX_DESTROY(&hxgedebuglock); 863 hxge_debug_init = 0; 864 } 865 866 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes")); 867 } 868 869 hxge_status_t 870 hxge_init(p_hxge_t hxgep) 871 { 872 hxge_status_t status = HXGE_OK; 873 874 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init")); 875 876 if (hxgep->drv_state & STATE_HW_INITIALIZED) { 877 return (status); 878 } 879 880 /* 881 * Allocate system memory for the receive/transmit buffer blocks and 882 * receive/transmit descriptor rings. 883 */ 884 status = hxge_alloc_mem_pool(hxgep); 885 if (status != HXGE_OK) { 886 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n")); 887 goto hxge_init_fail1; 888 } 889 890 /* 891 * Initialize and enable TXDMA channels. 892 */ 893 status = hxge_init_txdma_channels(hxgep); 894 if (status != HXGE_OK) { 895 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n")); 896 goto hxge_init_fail3; 897 } 898 899 /* 900 * Initialize and enable RXDMA channels. 901 */ 902 status = hxge_init_rxdma_channels(hxgep); 903 if (status != HXGE_OK) { 904 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n")); 905 goto hxge_init_fail4; 906 } 907 908 /* 909 * Initialize TCAM 910 */ 911 status = hxge_classify_init(hxgep); 912 if (status != HXGE_OK) { 913 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n")); 914 goto hxge_init_fail5; 915 } 916 917 /* 918 * Initialize the VMAC block. 919 */ 920 status = hxge_vmac_init(hxgep); 921 if (status != HXGE_OK) { 922 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n")); 923 goto hxge_init_fail5; 924 } 925 926 /* Bringup - this may be unnecessary when PXE and FCODE available */ 927 status = hxge_pfc_set_default_mac_addr(hxgep); 928 if (status != HXGE_OK) { 929 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 930 "Default Address Failure\n")); 931 goto hxge_init_fail5; 932 } 933 934 hxge_intrs_enable(hxgep); 935 936 /* 937 * Enable hardware interrupts. 938 */ 939 hxge_intr_hw_enable(hxgep); 940 hxgep->drv_state |= STATE_HW_INITIALIZED; 941 942 goto hxge_init_exit; 943 944 hxge_init_fail5: 945 hxge_uninit_rxdma_channels(hxgep); 946 hxge_init_fail4: 947 hxge_uninit_txdma_channels(hxgep); 948 hxge_init_fail3: 949 hxge_free_mem_pool(hxgep); 950 hxge_init_fail1: 951 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 952 "<== hxge_init status (failed) = 0x%08x", status)); 953 return (status); 954 955 hxge_init_exit: 956 957 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x", 958 status)); 959 960 return (status); 961 } 962 963 timeout_id_t 964 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec) 965 { 966 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) { 967 return (timeout(func, (caddr_t)hxgep, 968 drv_usectohz(1000 * msec))); 969 } 970 return (NULL); 971 } 972 973 /*ARGSUSED*/ 974 void 975 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid) 976 { 977 if (timerid) { 978 (void) untimeout(timerid); 979 } 980 } 981 982 void 983 hxge_uninit(p_hxge_t hxgep) 984 { 985 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit")); 986 987 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 988 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 989 "==> hxge_uninit: not initialized")); 990 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit")); 991 return; 992 } 993 994 /* Stop timer */ 995 if (hxgep->hxge_timerid) { 996 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 997 hxgep->hxge_timerid = 0; 998 } 999 1000 (void) hxge_intr_hw_disable(hxgep); 1001 1002 /* Reset the receive VMAC side. */ 1003 (void) hxge_rx_vmac_disable(hxgep); 1004 1005 /* Free classification resources */ 1006 (void) hxge_classify_uninit(hxgep); 1007 1008 /* Reset the transmit/receive DMA side. */ 1009 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 1010 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 1011 1012 hxge_uninit_txdma_channels(hxgep); 1013 hxge_uninit_rxdma_channels(hxgep); 1014 1015 /* Reset the transmit VMAC side. */ 1016 (void) hxge_tx_vmac_disable(hxgep); 1017 1018 hxge_free_mem_pool(hxgep); 1019 1020 hxgep->drv_state &= ~STATE_HW_INITIALIZED; 1021 1022 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit")); 1023 } 1024 1025 void 1026 hxge_get64(p_hxge_t hxgep, p_mblk_t mp) 1027 { 1028 #if defined(__i386) 1029 size_t reg; 1030 #else 1031 uint64_t reg; 1032 #endif 1033 uint64_t regdata; 1034 int i, retry; 1035 1036 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1037 regdata = 0; 1038 retry = 1; 1039 1040 for (i = 0; i < retry; i++) { 1041 HXGE_REG_RD64(hxgep->hpi_handle, reg, ®data); 1042 } 1043 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1044 } 1045 1046 void 1047 hxge_put64(p_hxge_t hxgep, p_mblk_t mp) 1048 { 1049 #if defined(__i386) 1050 size_t reg; 1051 #else 1052 uint64_t reg; 1053 #endif 1054 uint64_t buf[2]; 1055 1056 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1057 #if defined(__i386) 1058 reg = (size_t)buf[0]; 1059 #else 1060 reg = buf[0]; 1061 #endif 1062 1063 HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]); 1064 } 1065 1066 /*ARGSUSED*/ 1067 /*VARARGS*/ 1068 void 1069 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...) 1070 { 1071 char msg_buffer[1048]; 1072 char prefix_buffer[32]; 1073 int instance; 1074 uint64_t debug_level; 1075 int cmn_level = CE_CONT; 1076 va_list ap; 1077 1078 debug_level = (hxgep == NULL) ? hxge_debug_level : 1079 hxgep->hxge_debug_level; 1080 1081 if ((level & debug_level) || (level == HXGE_NOTE) || 1082 (level == HXGE_ERR_CTL)) { 1083 /* do the msg processing */ 1084 if (hxge_debug_init == 0) { 1085 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1086 hxge_debug_init = 1; 1087 } 1088 1089 MUTEX_ENTER(&hxgedebuglock); 1090 1091 if ((level & HXGE_NOTE)) { 1092 cmn_level = CE_NOTE; 1093 } 1094 1095 if (level & HXGE_ERR_CTL) { 1096 cmn_level = CE_WARN; 1097 } 1098 1099 va_start(ap, fmt); 1100 (void) vsprintf(msg_buffer, fmt, ap); 1101 va_end(ap); 1102 1103 if (hxgep == NULL) { 1104 instance = -1; 1105 (void) sprintf(prefix_buffer, "%s :", "hxge"); 1106 } else { 1107 instance = hxgep->instance; 1108 (void) sprintf(prefix_buffer, 1109 "%s%d :", "hxge", instance); 1110 } 1111 1112 MUTEX_EXIT(&hxgedebuglock); 1113 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer); 1114 } 1115 } 1116 1117 char * 1118 hxge_dump_packet(char *addr, int size) 1119 { 1120 uchar_t *ap = (uchar_t *)addr; 1121 int i; 1122 static char etherbuf[1024]; 1123 char *cp = etherbuf; 1124 char digits[] = "0123456789abcdef"; 1125 1126 if (!size) 1127 size = 60; 1128 1129 if (size > MAX_DUMP_SZ) { 1130 /* Dump the leading bytes */ 1131 for (i = 0; i < MAX_DUMP_SZ / 2; i++) { 1132 if (*ap > 0x0f) 1133 *cp++ = digits[*ap >> 4]; 1134 *cp++ = digits[*ap++ & 0xf]; 1135 *cp++ = ':'; 1136 } 1137 for (i = 0; i < 20; i++) 1138 *cp++ = '.'; 1139 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1140 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2)); 1141 for (i = 0; i < MAX_DUMP_SZ / 2; i++) { 1142 if (*ap > 0x0f) 1143 *cp++ = digits[*ap >> 4]; 1144 *cp++ = digits[*ap++ & 0xf]; 1145 *cp++ = ':'; 1146 } 1147 } else { 1148 for (i = 0; i < size; i++) { 1149 if (*ap > 0x0f) 1150 *cp++ = digits[*ap >> 4]; 1151 *cp++ = digits[*ap++ & 0xf]; 1152 *cp++ = ':'; 1153 } 1154 } 1155 *--cp = 0; 1156 return (etherbuf); 1157 } 1158 1159 static void 1160 hxge_suspend(p_hxge_t hxgep) 1161 { 1162 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend")); 1163 1164 hxge_intrs_disable(hxgep); 1165 hxge_destroy_dev(hxgep); 1166 1167 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend")); 1168 } 1169 1170 static hxge_status_t 1171 hxge_resume(p_hxge_t hxgep) 1172 { 1173 hxge_status_t status = HXGE_OK; 1174 1175 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume")); 1176 hxgep->suspended = DDI_RESUME; 1177 1178 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START); 1179 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START); 1180 1181 (void) hxge_rx_vmac_enable(hxgep); 1182 (void) hxge_tx_vmac_enable(hxgep); 1183 1184 hxge_intrs_enable(hxgep); 1185 1186 hxgep->suspended = 0; 1187 1188 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1189 "<== hxge_resume status = 0x%x", status)); 1190 1191 return (status); 1192 } 1193 1194 hxge_status_t 1195 hxge_setup_dev(p_hxge_t hxgep) 1196 { 1197 hxge_status_t status = HXGE_OK; 1198 1199 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev")); 1200 1201 status = hxge_link_init(hxgep); 1202 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) { 1203 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1204 "Bad register acc handle")); 1205 status = HXGE_ERROR; 1206 } 1207 1208 if (status != HXGE_OK) { 1209 HXGE_DEBUG_MSG((hxgep, MAC_CTL, 1210 " hxge_setup_dev status (link init 0x%08x)", status)); 1211 goto hxge_setup_dev_exit; 1212 } 1213 1214 hxge_setup_dev_exit: 1215 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1216 "<== hxge_setup_dev status = 0x%08x", status)); 1217 1218 return (status); 1219 } 1220 1221 static void 1222 hxge_destroy_dev(p_hxge_t hxgep) 1223 { 1224 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev")); 1225 1226 (void) hxge_hw_stop(hxgep); 1227 1228 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev")); 1229 } 1230 1231 static hxge_status_t 1232 hxge_setup_system_dma_pages(p_hxge_t hxgep) 1233 { 1234 int ddi_status = DDI_SUCCESS; 1235 uint_t count; 1236 ddi_dma_cookie_t cookie; 1237 uint_t iommu_pagesize; 1238 hxge_status_t status = HXGE_OK; 1239 1240 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages")); 1241 1242 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1); 1243 iommu_pagesize = dvma_pagesize(hxgep->dip); 1244 1245 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1246 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1247 " default_block_size %d iommu_pagesize %d", 1248 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1), 1249 hxgep->rx_default_block_size, iommu_pagesize)); 1250 1251 if (iommu_pagesize != 0) { 1252 if (hxgep->sys_page_sz == iommu_pagesize) { 1253 /* Hydra support up to 8K pages */ 1254 if (iommu_pagesize > 0x2000) 1255 hxgep->sys_page_sz = 0x2000; 1256 } else { 1257 if (hxgep->sys_page_sz > iommu_pagesize) 1258 hxgep->sys_page_sz = iommu_pagesize; 1259 } 1260 } 1261 1262 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1); 1263 1264 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1265 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1266 "default_block_size %d page mask %d", 1267 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1), 1268 hxgep->rx_default_block_size, hxgep->sys_page_mask)); 1269 1270 switch (hxgep->sys_page_sz) { 1271 default: 1272 hxgep->sys_page_sz = 0x1000; 1273 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1); 1274 hxgep->rx_default_block_size = 0x1000; 1275 hxgep->rx_bksize_code = RBR_BKSIZE_4K; 1276 break; 1277 case 0x1000: 1278 hxgep->rx_default_block_size = 0x1000; 1279 hxgep->rx_bksize_code = RBR_BKSIZE_4K; 1280 break; 1281 case 0x2000: 1282 hxgep->rx_default_block_size = 0x2000; 1283 hxgep->rx_bksize_code = RBR_BKSIZE_8K; 1284 break; 1285 } 1286 1287 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1288 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1289 hxge_desc_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1290 1291 /* 1292 * Get the system DMA burst size. 1293 */ 1294 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr, 1295 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle); 1296 if (ddi_status != DDI_SUCCESS) { 1297 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1298 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status)); 1299 goto hxge_get_soft_properties_exit; 1300 } 1301 1302 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL, 1303 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle), 1304 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0, 1305 &cookie, &count); 1306 if (ddi_status != DDI_DMA_MAPPED) { 1307 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1308 "Binding spare handle to find system burstsize failed.")); 1309 ddi_status = DDI_FAILURE; 1310 goto hxge_get_soft_properties_fail1; 1311 } 1312 1313 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle); 1314 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle); 1315 1316 hxge_get_soft_properties_fail1: 1317 ddi_dma_free_handle(&hxgep->dmasparehandle); 1318 1319 hxge_get_soft_properties_exit: 1320 1321 if (ddi_status != DDI_SUCCESS) 1322 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 1323 1324 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1325 "<== hxge_setup_system_dma_pages status = 0x%08x", status)); 1326 1327 return (status); 1328 } 1329 1330 hxge_status_t 1331 hxge_alloc_mem_pool(p_hxge_t hxgep) 1332 { 1333 hxge_status_t status = HXGE_OK; 1334 1335 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool")); 1336 1337 status = hxge_alloc_rx_mem_pool(hxgep); 1338 if (status != HXGE_OK) { 1339 return (HXGE_ERROR); 1340 } 1341 1342 status = hxge_alloc_tx_mem_pool(hxgep); 1343 if (status != HXGE_OK) { 1344 hxge_free_rx_mem_pool(hxgep); 1345 return (HXGE_ERROR); 1346 } 1347 1348 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool")); 1349 return (HXGE_OK); 1350 } 1351 1352 static void 1353 hxge_free_mem_pool(p_hxge_t hxgep) 1354 { 1355 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool")); 1356 1357 hxge_free_rx_mem_pool(hxgep); 1358 hxge_free_tx_mem_pool(hxgep); 1359 1360 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool")); 1361 } 1362 1363 static hxge_status_t 1364 hxge_alloc_rx_mem_pool(p_hxge_t hxgep) 1365 { 1366 int i, j; 1367 uint32_t ndmas, st_rdc; 1368 p_hxge_dma_pt_cfg_t p_all_cfgp; 1369 p_hxge_hw_pt_cfg_t p_cfgp; 1370 p_hxge_dma_pool_t dma_poolp; 1371 p_hxge_dma_common_t *dma_buf_p; 1372 p_hxge_dma_pool_t dma_cntl_poolp; 1373 p_hxge_dma_common_t *dma_cntl_p; 1374 size_t rx_buf_alloc_size; 1375 size_t rx_cntl_alloc_size; 1376 uint32_t *num_chunks; /* per dma */ 1377 hxge_status_t status = HXGE_OK; 1378 1379 uint32_t hxge_port_rbr_size; 1380 uint32_t hxge_port_rbr_spare_size; 1381 uint32_t hxge_port_rcr_size; 1382 1383 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool")); 1384 1385 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 1386 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1387 st_rdc = p_cfgp->start_rdc; 1388 ndmas = p_cfgp->max_rdcs; 1389 1390 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1391 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1392 1393 /* 1394 * Allocate memory for each receive DMA channel. 1395 */ 1396 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t), 1397 KM_SLEEP); 1398 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1399 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1400 1401 dma_cntl_poolp = (p_hxge_dma_pool_t) 1402 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1403 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1404 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1405 1406 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas, 1407 KM_SLEEP); 1408 1409 /* 1410 * Assume that each DMA channel will be configured with default block 1411 * size. rbr block counts are mod of batch count (16). 1412 */ 1413 hxge_port_rbr_size = p_all_cfgp->rbr_size; 1414 hxge_port_rcr_size = p_all_cfgp->rcr_size; 1415 1416 if (!hxge_port_rbr_size) { 1417 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT; 1418 } 1419 1420 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) { 1421 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH * 1422 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1)); 1423 } 1424 1425 p_all_cfgp->rbr_size = hxge_port_rbr_size; 1426 hxge_port_rbr_spare_size = hxge_rbr_spare_size; 1427 1428 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) { 1429 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH * 1430 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1)); 1431 } 1432 1433 rx_buf_alloc_size = (hxgep->rx_default_block_size * 1434 (hxge_port_rbr_size + hxge_port_rbr_spare_size)); 1435 1436 /* 1437 * Addresses of receive block ring, receive completion ring and the 1438 * mailbox must be all cache-aligned (64 bytes). 1439 */ 1440 rx_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size; 1441 rx_cntl_alloc_size *= (sizeof (rx_desc_t)); 1442 rx_cntl_alloc_size += (sizeof (rcr_entry_t) * hxge_port_rcr_size); 1443 rx_cntl_alloc_size += sizeof (rxdma_mailbox_t); 1444 1445 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: " 1446 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d " 1447 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d", 1448 hxge_port_rbr_size, hxge_port_rbr_spare_size, 1449 hxge_port_rcr_size, rx_cntl_alloc_size)); 1450 1451 hxgep->hxge_port_rbr_size = hxge_port_rbr_size; 1452 hxgep->hxge_port_rcr_size = hxge_port_rcr_size; 1453 1454 /* 1455 * Allocate memory for receive buffers and descriptor rings. Replace 1456 * allocation functions with interface functions provided by the 1457 * partition manager when it is available. 1458 */ 1459 /* 1460 * Allocate memory for the receive buffer blocks. 1461 */ 1462 for (i = 0; i < ndmas; i++) { 1463 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1464 " hxge_alloc_rx_mem_pool to alloc mem: " 1465 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1466 i, dma_buf_p[i], &dma_buf_p[i])); 1467 1468 num_chunks[i] = 0; 1469 1470 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i], 1471 rx_buf_alloc_size, hxgep->rx_default_block_size, 1472 &num_chunks[i]); 1473 if (status != HXGE_OK) { 1474 break; 1475 } 1476 1477 st_rdc++; 1478 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1479 " hxge_alloc_rx_mem_pool DONE alloc mem: " 1480 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1481 dma_buf_p[i], &dma_buf_p[i])); 1482 } 1483 1484 if (i < ndmas) { 1485 goto hxge_alloc_rx_mem_fail1; 1486 } 1487 1488 /* 1489 * Allocate memory for descriptor rings and mailbox. 1490 */ 1491 st_rdc = p_cfgp->start_rdc; 1492 for (j = 0; j < ndmas; j++) { 1493 status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, &dma_cntl_p[j], 1494 rx_cntl_alloc_size); 1495 if (status != HXGE_OK) { 1496 break; 1497 } 1498 st_rdc++; 1499 } 1500 1501 if (j < ndmas) { 1502 goto hxge_alloc_rx_mem_fail2; 1503 } 1504 1505 dma_poolp->ndmas = ndmas; 1506 dma_poolp->num_chunks = num_chunks; 1507 dma_poolp->buf_allocated = B_TRUE; 1508 hxgep->rx_buf_pool_p = dma_poolp; 1509 dma_poolp->dma_buf_pool_p = dma_buf_p; 1510 1511 dma_cntl_poolp->ndmas = ndmas; 1512 dma_cntl_poolp->buf_allocated = B_TRUE; 1513 hxgep->rx_cntl_pool_p = dma_cntl_poolp; 1514 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 1515 1516 goto hxge_alloc_rx_mem_pool_exit; 1517 1518 hxge_alloc_rx_mem_fail2: 1519 /* Free control buffers */ 1520 j--; 1521 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1522 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 1523 for (; j >= 0; j--) { 1524 hxge_free_rx_cntl_dma(hxgep, 1525 (p_hxge_dma_common_t)dma_cntl_p[j]); 1526 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1527 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 1528 } 1529 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1530 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 1531 1532 hxge_alloc_rx_mem_fail1: 1533 /* Free data buffers */ 1534 i--; 1535 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1536 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 1537 for (; i >= 0; i--) { 1538 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i], 1539 num_chunks[i]); 1540 } 1541 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1542 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 1543 1544 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1545 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1546 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1547 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 1548 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1549 1550 hxge_alloc_rx_mem_pool_exit: 1551 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1552 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status)); 1553 1554 return (status); 1555 } 1556 1557 static void 1558 hxge_free_rx_mem_pool(p_hxge_t hxgep) 1559 { 1560 uint32_t i, ndmas; 1561 p_hxge_dma_pool_t dma_poolp; 1562 p_hxge_dma_common_t *dma_buf_p; 1563 p_hxge_dma_pool_t dma_cntl_poolp; 1564 p_hxge_dma_common_t *dma_cntl_p; 1565 uint32_t *num_chunks; 1566 1567 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool")); 1568 1569 dma_poolp = hxgep->rx_buf_pool_p; 1570 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 1571 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool " 1572 "(null rx buf pool or buf not allocated")); 1573 return; 1574 } 1575 1576 dma_cntl_poolp = hxgep->rx_cntl_pool_p; 1577 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 1578 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1579 "<== hxge_free_rx_mem_pool " 1580 "(null rx cntl buf pool or cntl buf not allocated")); 1581 return; 1582 } 1583 1584 dma_buf_p = dma_poolp->dma_buf_pool_p; 1585 num_chunks = dma_poolp->num_chunks; 1586 1587 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1588 ndmas = dma_cntl_poolp->ndmas; 1589 1590 for (i = 0; i < ndmas; i++) { 1591 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]); 1592 } 1593 1594 for (i = 0; i < ndmas; i++) { 1595 hxge_free_rx_cntl_dma(hxgep, dma_cntl_p[i]); 1596 } 1597 1598 for (i = 0; i < ndmas; i++) { 1599 KMEM_FREE(dma_buf_p[i], 1600 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1601 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t)); 1602 } 1603 1604 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1605 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1606 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 1607 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1608 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1609 1610 hxgep->rx_buf_pool_p = NULL; 1611 hxgep->rx_cntl_pool_p = NULL; 1612 1613 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool")); 1614 } 1615 1616 static hxge_status_t 1617 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel, 1618 p_hxge_dma_common_t *dmap, 1619 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 1620 { 1621 p_hxge_dma_common_t rx_dmap; 1622 hxge_status_t status = HXGE_OK; 1623 size_t total_alloc_size; 1624 size_t allocated = 0; 1625 int i, size_index, array_size; 1626 1627 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma")); 1628 1629 rx_dmap = (p_hxge_dma_common_t) 1630 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP); 1631 1632 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1633 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 1634 dma_channel, alloc_size, block_size, dmap)); 1635 1636 total_alloc_size = alloc_size; 1637 1638 i = 0; 1639 size_index = 0; 1640 array_size = sizeof (alloc_sizes) / sizeof (size_t); 1641 while ((alloc_sizes[size_index] < alloc_size) && 1642 (size_index < array_size)) 1643 size_index++; 1644 if (size_index >= array_size) { 1645 size_index = array_size - 1; 1646 } 1647 1648 while ((allocated < total_alloc_size) && 1649 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) { 1650 rx_dmap[i].dma_chunk_index = i; 1651 rx_dmap[i].block_size = block_size; 1652 rx_dmap[i].alength = alloc_sizes[size_index]; 1653 rx_dmap[i].orig_alength = rx_dmap[i].alength; 1654 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 1655 rx_dmap[i].dma_channel = dma_channel; 1656 rx_dmap[i].contig_alloc_type = B_FALSE; 1657 1658 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1659 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 1660 "i %d nblocks %d alength %d", 1661 dma_channel, i, &rx_dmap[i], block_size, 1662 i, rx_dmap[i].nblocks, rx_dmap[i].alength)); 1663 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1664 &hxge_rx_dma_attr, rx_dmap[i].alength, 1665 &hxge_dev_buf_dma_acc_attr, 1666 DDI_DMA_READ | DDI_DMA_STREAMING, 1667 (p_hxge_dma_common_t)(&rx_dmap[i])); 1668 if (status != HXGE_OK) { 1669 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1670 " hxge_alloc_rx_buf_dma: Alloc Failed: " 1671 " for size: %d", alloc_sizes[size_index])); 1672 size_index--; 1673 } else { 1674 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1675 " alloc_rx_buf_dma allocated rdc %d " 1676 "chunk %d size %x dvma %x bufp %llx ", 1677 dma_channel, i, rx_dmap[i].alength, 1678 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 1679 i++; 1680 allocated += alloc_sizes[size_index]; 1681 } 1682 } 1683 1684 if (allocated < total_alloc_size) { 1685 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1686 " hxge_alloc_rx_buf_dma failed due to" 1687 " allocated(%d) < required(%d)", 1688 allocated, total_alloc_size)); 1689 goto hxge_alloc_rx_mem_fail1; 1690 } 1691 1692 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1693 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i)); 1694 1695 *num_chunks = i; 1696 *dmap = rx_dmap; 1697 1698 goto hxge_alloc_rx_mem_exit; 1699 1700 hxge_alloc_rx_mem_fail1: 1701 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1702 1703 hxge_alloc_rx_mem_exit: 1704 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1705 "<== hxge_alloc_rx_buf_dma status 0x%08x", status)); 1706 1707 return (status); 1708 } 1709 1710 /*ARGSUSED*/ 1711 static void 1712 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap, 1713 uint32_t num_chunks) 1714 { 1715 int i; 1716 1717 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1718 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 1719 1720 for (i = 0; i < num_chunks; i++) { 1721 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1722 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap)); 1723 hxge_dma_mem_free(dmap++); 1724 } 1725 1726 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma")); 1727 } 1728 1729 /*ARGSUSED*/ 1730 static hxge_status_t 1731 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel, 1732 p_hxge_dma_common_t *dmap, size_t size) 1733 { 1734 p_hxge_dma_common_t rx_dmap; 1735 hxge_status_t status = HXGE_OK; 1736 1737 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma")); 1738 1739 rx_dmap = (p_hxge_dma_common_t) 1740 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP); 1741 1742 rx_dmap->contig_alloc_type = B_FALSE; 1743 1744 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1745 &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr, 1746 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap); 1747 if (status != HXGE_OK) { 1748 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1749 " hxge_alloc_rx_cntl_dma: Alloc Failed: " 1750 " for size: %d", size)); 1751 goto hxge_alloc_rx_cntl_dma_fail1; 1752 } 1753 1754 *dmap = rx_dmap; 1755 1756 goto hxge_alloc_rx_cntl_dma_exit; 1757 1758 hxge_alloc_rx_cntl_dma_fail1: 1759 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t)); 1760 1761 hxge_alloc_rx_cntl_dma_exit: 1762 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1763 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status)); 1764 1765 return (status); 1766 } 1767 1768 /*ARGSUSED*/ 1769 static void 1770 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap) 1771 { 1772 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma")); 1773 1774 hxge_dma_mem_free(dmap); 1775 1776 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma")); 1777 } 1778 1779 static hxge_status_t 1780 hxge_alloc_tx_mem_pool(p_hxge_t hxgep) 1781 { 1782 hxge_status_t status = HXGE_OK; 1783 int i, j; 1784 uint32_t ndmas, st_tdc; 1785 p_hxge_dma_pt_cfg_t p_all_cfgp; 1786 p_hxge_hw_pt_cfg_t p_cfgp; 1787 p_hxge_dma_pool_t dma_poolp; 1788 p_hxge_dma_common_t *dma_buf_p; 1789 p_hxge_dma_pool_t dma_cntl_poolp; 1790 p_hxge_dma_common_t *dma_cntl_p; 1791 size_t tx_buf_alloc_size; 1792 size_t tx_cntl_alloc_size; 1793 uint32_t *num_chunks; /* per dma */ 1794 1795 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool")); 1796 1797 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 1798 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1799 st_tdc = p_cfgp->start_tdc; 1800 ndmas = p_cfgp->max_tdcs; 1801 1802 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: " 1803 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d", 1804 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs)); 1805 /* 1806 * Allocate memory for each transmit DMA channel. 1807 */ 1808 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t), 1809 KM_SLEEP); 1810 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1811 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1812 1813 dma_cntl_poolp = (p_hxge_dma_pool_t) 1814 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1815 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1816 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1817 1818 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size; 1819 1820 /* 1821 * Assume that each DMA channel will be configured with default 1822 * transmit bufer size for copying transmit data. (For packet payload 1823 * over this limit, packets will not be copied.) 1824 */ 1825 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size); 1826 1827 /* 1828 * Addresses of transmit descriptor ring and the mailbox must be all 1829 * cache-aligned (64 bytes). 1830 */ 1831 tx_cntl_alloc_size = hxge_tx_ring_size; 1832 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 1833 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 1834 1835 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas, 1836 KM_SLEEP); 1837 1838 /* 1839 * Allocate memory for transmit buffers and descriptor rings. Replace 1840 * allocation functions with interface functions provided by the 1841 * partition manager when it is available. 1842 * 1843 * Allocate memory for the transmit buffer pool. 1844 */ 1845 for (i = 0; i < ndmas; i++) { 1846 num_chunks[i] = 0; 1847 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i], 1848 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]); 1849 if (status != HXGE_OK) { 1850 break; 1851 } 1852 st_tdc++; 1853 } 1854 1855 if (i < ndmas) { 1856 goto hxge_alloc_tx_mem_pool_fail1; 1857 } 1858 1859 st_tdc = p_cfgp->start_tdc; 1860 1861 /* 1862 * Allocate memory for descriptor rings and mailbox. 1863 */ 1864 for (j = 0; j < ndmas; j++) { 1865 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j], 1866 tx_cntl_alloc_size); 1867 if (status != HXGE_OK) { 1868 break; 1869 } 1870 st_tdc++; 1871 } 1872 1873 if (j < ndmas) { 1874 goto hxge_alloc_tx_mem_pool_fail2; 1875 } 1876 1877 dma_poolp->ndmas = ndmas; 1878 dma_poolp->num_chunks = num_chunks; 1879 dma_poolp->buf_allocated = B_TRUE; 1880 dma_poolp->dma_buf_pool_p = dma_buf_p; 1881 hxgep->tx_buf_pool_p = dma_poolp; 1882 1883 dma_cntl_poolp->ndmas = ndmas; 1884 dma_cntl_poolp->buf_allocated = B_TRUE; 1885 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 1886 hxgep->tx_cntl_pool_p = dma_cntl_poolp; 1887 1888 HXGE_DEBUG_MSG((hxgep, MEM_CTL, 1889 "==> hxge_alloc_tx_mem_pool: start_tdc %d " 1890 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas)); 1891 1892 goto hxge_alloc_tx_mem_pool_exit; 1893 1894 hxge_alloc_tx_mem_pool_fail2: 1895 /* Free control buffers */ 1896 j--; 1897 for (; j >= 0; j--) { 1898 hxge_free_tx_cntl_dma(hxgep, 1899 (p_hxge_dma_common_t)dma_cntl_p[j]); 1900 } 1901 1902 hxge_alloc_tx_mem_pool_fail1: 1903 /* Free data buffers */ 1904 i--; 1905 for (; i >= 0; i--) { 1906 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i], 1907 num_chunks[i]); 1908 } 1909 1910 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1911 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1912 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 1913 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1914 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1915 1916 hxge_alloc_tx_mem_pool_exit: 1917 HXGE_DEBUG_MSG((hxgep, MEM_CTL, 1918 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status)); 1919 1920 return (status); 1921 } 1922 1923 static hxge_status_t 1924 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel, 1925 p_hxge_dma_common_t *dmap, size_t alloc_size, 1926 size_t block_size, uint32_t *num_chunks) 1927 { 1928 p_hxge_dma_common_t tx_dmap; 1929 hxge_status_t status = HXGE_OK; 1930 size_t total_alloc_size; 1931 size_t allocated = 0; 1932 int i, size_index, array_size; 1933 1934 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma")); 1935 1936 tx_dmap = (p_hxge_dma_common_t) 1937 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP); 1938 1939 total_alloc_size = alloc_size; 1940 i = 0; 1941 size_index = 0; 1942 array_size = sizeof (alloc_sizes) / sizeof (size_t); 1943 while ((alloc_sizes[size_index] < alloc_size) && 1944 (size_index < array_size)) 1945 size_index++; 1946 if (size_index >= array_size) { 1947 size_index = array_size - 1; 1948 } 1949 1950 while ((allocated < total_alloc_size) && 1951 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) { 1952 tx_dmap[i].dma_chunk_index = i; 1953 tx_dmap[i].block_size = block_size; 1954 tx_dmap[i].alength = alloc_sizes[size_index]; 1955 tx_dmap[i].orig_alength = tx_dmap[i].alength; 1956 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 1957 tx_dmap[i].dma_channel = dma_channel; 1958 tx_dmap[i].contig_alloc_type = B_FALSE; 1959 1960 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1961 &hxge_tx_dma_attr, tx_dmap[i].alength, 1962 &hxge_dev_buf_dma_acc_attr, 1963 DDI_DMA_WRITE | DDI_DMA_STREAMING, 1964 (p_hxge_dma_common_t)(&tx_dmap[i])); 1965 if (status != HXGE_OK) { 1966 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1967 " hxge_alloc_tx_buf_dma: Alloc Failed: " 1968 " for size: %d", alloc_sizes[size_index])); 1969 size_index--; 1970 } else { 1971 i++; 1972 allocated += alloc_sizes[size_index]; 1973 } 1974 } 1975 1976 if (allocated < total_alloc_size) { 1977 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1978 " hxge_alloc_tx_buf_dma: failed due to" 1979 " allocated(%d) < required(%d)", 1980 allocated, total_alloc_size)); 1981 goto hxge_alloc_tx_mem_fail1; 1982 } 1983 1984 *num_chunks = i; 1985 *dmap = tx_dmap; 1986 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1987 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 1988 *dmap, i)); 1989 goto hxge_alloc_tx_mem_exit; 1990 1991 hxge_alloc_tx_mem_fail1: 1992 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1993 1994 hxge_alloc_tx_mem_exit: 1995 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1996 "<== hxge_alloc_tx_buf_dma status 0x%08x", status)); 1997 1998 return (status); 1999 } 2000 2001 /*ARGSUSED*/ 2002 static void 2003 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap, 2004 uint32_t num_chunks) 2005 { 2006 int i; 2007 2008 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma")); 2009 2010 for (i = 0; i < num_chunks; i++) { 2011 hxge_dma_mem_free(dmap++); 2012 } 2013 2014 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma")); 2015 } 2016 2017 /*ARGSUSED*/ 2018 static hxge_status_t 2019 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel, 2020 p_hxge_dma_common_t *dmap, size_t size) 2021 { 2022 p_hxge_dma_common_t tx_dmap; 2023 hxge_status_t status = HXGE_OK; 2024 2025 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma")); 2026 2027 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t), 2028 KM_SLEEP); 2029 2030 tx_dmap->contig_alloc_type = B_FALSE; 2031 2032 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 2033 &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr, 2034 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap); 2035 if (status != HXGE_OK) { 2036 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2037 " hxge_alloc_tx_cntl_dma: Alloc Failed: " 2038 " for size: %d", size)); 2039 goto hxge_alloc_tx_cntl_dma_fail1; 2040 } 2041 2042 *dmap = tx_dmap; 2043 2044 goto hxge_alloc_tx_cntl_dma_exit; 2045 2046 hxge_alloc_tx_cntl_dma_fail1: 2047 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t)); 2048 2049 hxge_alloc_tx_cntl_dma_exit: 2050 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2051 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status)); 2052 2053 return (status); 2054 } 2055 2056 /*ARGSUSED*/ 2057 static void 2058 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap) 2059 { 2060 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma")); 2061 2062 hxge_dma_mem_free(dmap); 2063 2064 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma")); 2065 } 2066 2067 static void 2068 hxge_free_tx_mem_pool(p_hxge_t hxgep) 2069 { 2070 uint32_t i, ndmas; 2071 p_hxge_dma_pool_t dma_poolp; 2072 p_hxge_dma_common_t *dma_buf_p; 2073 p_hxge_dma_pool_t dma_cntl_poolp; 2074 p_hxge_dma_common_t *dma_cntl_p; 2075 uint32_t *num_chunks; 2076 2077 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool")); 2078 2079 dma_poolp = hxgep->tx_buf_pool_p; 2080 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2081 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2082 "<== hxge_free_tx_mem_pool " 2083 "(null rx buf pool or buf not allocated")); 2084 return; 2085 } 2086 2087 dma_cntl_poolp = hxgep->tx_cntl_pool_p; 2088 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2089 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2090 "<== hxge_free_tx_mem_pool " 2091 "(null tx cntl buf pool or cntl buf not allocated")); 2092 return; 2093 } 2094 2095 dma_buf_p = dma_poolp->dma_buf_pool_p; 2096 num_chunks = dma_poolp->num_chunks; 2097 2098 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2099 ndmas = dma_cntl_poolp->ndmas; 2100 2101 for (i = 0; i < ndmas; i++) { 2102 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]); 2103 } 2104 2105 for (i = 0; i < ndmas; i++) { 2106 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]); 2107 } 2108 2109 for (i = 0; i < ndmas; i++) { 2110 KMEM_FREE(dma_buf_p[i], 2111 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 2112 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t)); 2113 } 2114 2115 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2116 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 2117 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 2118 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 2119 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 2120 2121 hxgep->tx_buf_pool_p = NULL; 2122 hxgep->tx_cntl_pool_p = NULL; 2123 2124 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool")); 2125 } 2126 2127 /*ARGSUSED*/ 2128 static hxge_status_t 2129 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method, 2130 struct ddi_dma_attr *dma_attrp, 2131 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2132 p_hxge_dma_common_t dma_p) 2133 { 2134 caddr_t kaddrp; 2135 int ddi_status = DDI_SUCCESS; 2136 2137 dma_p->dma_handle = NULL; 2138 dma_p->acc_handle = NULL; 2139 dma_p->kaddrp = NULL; 2140 2141 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp, 2142 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2143 if (ddi_status != DDI_SUCCESS) { 2144 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2145 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2146 return (HXGE_ERROR | HXGE_DDI_FAILED); 2147 } 2148 2149 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p, 2150 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2151 &dma_p->acc_handle); 2152 if (ddi_status != DDI_SUCCESS) { 2153 /* The caller will decide whether it is fatal */ 2154 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2155 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2156 ddi_dma_free_handle(&dma_p->dma_handle); 2157 dma_p->dma_handle = NULL; 2158 return (HXGE_ERROR | HXGE_DDI_FAILED); 2159 } 2160 2161 if (dma_p->alength < length) { 2162 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2163 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length.")); 2164 ddi_dma_mem_free(&dma_p->acc_handle); 2165 ddi_dma_free_handle(&dma_p->dma_handle); 2166 dma_p->acc_handle = NULL; 2167 dma_p->dma_handle = NULL; 2168 return (HXGE_ERROR); 2169 } 2170 2171 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2172 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2173 &dma_p->dma_cookie, &dma_p->ncookies); 2174 if (ddi_status != DDI_DMA_MAPPED) { 2175 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2176 "hxge_dma_mem_alloc:di_dma_addr_bind failed " 2177 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies)); 2178 if (dma_p->acc_handle) { 2179 ddi_dma_mem_free(&dma_p->acc_handle); 2180 dma_p->acc_handle = NULL; 2181 } 2182 ddi_dma_free_handle(&dma_p->dma_handle); 2183 dma_p->dma_handle = NULL; 2184 return (HXGE_ERROR | HXGE_DDI_FAILED); 2185 } 2186 2187 if (dma_p->ncookies != 1) { 2188 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2189 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie" 2190 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies)); 2191 if (dma_p->acc_handle) { 2192 ddi_dma_mem_free(&dma_p->acc_handle); 2193 dma_p->acc_handle = NULL; 2194 } 2195 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2196 ddi_dma_free_handle(&dma_p->dma_handle); 2197 dma_p->dma_handle = NULL; 2198 return (HXGE_ERROR); 2199 } 2200 2201 dma_p->kaddrp = kaddrp; 2202 #if defined(__i386) 2203 dma_p->ioaddr_pp = 2204 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 2205 #else 2206 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress; 2207 #endif 2208 2209 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2210 2211 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: " 2212 "dma buffer allocated: dma_p $%p " 2213 "return dmac_ladress from cookie $%p dmac_size %d " 2214 "dma_p->ioaddr_p $%p " 2215 "dma_p->orig_ioaddr_p $%p " 2216 "orig_vatopa $%p " 2217 "alength %d (0x%x) " 2218 "kaddrp $%p " 2219 "length %d (0x%x)", 2220 dma_p, 2221 dma_p->dma_cookie.dmac_laddress, 2222 dma_p->dma_cookie.dmac_size, 2223 dma_p->ioaddr_pp, 2224 dma_p->orig_ioaddr_pp, 2225 dma_p->orig_vatopa, 2226 dma_p->alength, dma_p->alength, 2227 kaddrp, 2228 length, length)); 2229 2230 return (HXGE_OK); 2231 } 2232 2233 static void 2234 hxge_dma_mem_free(p_hxge_dma_common_t dma_p) 2235 { 2236 if (dma_p->dma_handle != NULL) { 2237 if (dma_p->ncookies) { 2238 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2239 dma_p->ncookies = 0; 2240 } 2241 ddi_dma_free_handle(&dma_p->dma_handle); 2242 dma_p->dma_handle = NULL; 2243 } 2244 if (dma_p->acc_handle != NULL) { 2245 ddi_dma_mem_free(&dma_p->acc_handle); 2246 dma_p->acc_handle = NULL; 2247 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2248 } 2249 dma_p->kaddrp = NULL; 2250 dma_p->alength = NULL; 2251 } 2252 2253 /* 2254 * hxge_m_start() -- start transmitting and receiving. 2255 * 2256 * This function is called by the MAC layer when the first 2257 * stream is open to prepare the hardware ready for sending 2258 * and transmitting packets. 2259 */ 2260 static int 2261 hxge_m_start(void *arg) 2262 { 2263 p_hxge_t hxgep = (p_hxge_t)arg; 2264 2265 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start")); 2266 2267 MUTEX_ENTER(hxgep->genlock); 2268 2269 if (hxge_init(hxgep) != DDI_SUCCESS) { 2270 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2271 "<== hxge_m_start: initialization failed")); 2272 MUTEX_EXIT(hxgep->genlock); 2273 return (EIO); 2274 } 2275 2276 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) { 2277 /* 2278 * Start timer to check the system error and tx hangs 2279 */ 2280 hxgep->hxge_timerid = hxge_start_timer(hxgep, 2281 hxge_check_hw_state, HXGE_CHECK_TIMER); 2282 2283 hxgep->hxge_mac_state = HXGE_MAC_STARTED; 2284 } 2285 2286 MUTEX_EXIT(hxgep->genlock); 2287 2288 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start")); 2289 2290 return (0); 2291 } 2292 2293 /* 2294 * hxge_m_stop(): stop transmitting and receiving. 2295 */ 2296 static void 2297 hxge_m_stop(void *arg) 2298 { 2299 p_hxge_t hxgep = (p_hxge_t)arg; 2300 2301 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop")); 2302 2303 if (hxgep->hxge_timerid) { 2304 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 2305 hxgep->hxge_timerid = 0; 2306 } 2307 2308 MUTEX_ENTER(hxgep->genlock); 2309 2310 hxge_uninit(hxgep); 2311 2312 hxgep->hxge_mac_state = HXGE_MAC_STOPPED; 2313 2314 MUTEX_EXIT(hxgep->genlock); 2315 2316 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop")); 2317 } 2318 2319 static int 2320 hxge_m_unicst(void *arg, const uint8_t *macaddr) 2321 { 2322 p_hxge_t hxgep = (p_hxge_t)arg; 2323 struct ether_addr addrp; 2324 hxge_status_t status; 2325 2326 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst")); 2327 2328 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 2329 2330 status = hxge_set_mac_addr(hxgep, &addrp); 2331 if (status != HXGE_OK) { 2332 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2333 "<== hxge_m_unicst: set unitcast failed")); 2334 return (EINVAL); 2335 } 2336 2337 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst")); 2338 2339 return (0); 2340 } 2341 2342 static int 2343 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 2344 { 2345 p_hxge_t hxgep = (p_hxge_t)arg; 2346 struct ether_addr addrp; 2347 2348 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add)); 2349 2350 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 2351 2352 if (add) { 2353 if (hxge_add_mcast_addr(hxgep, &addrp)) { 2354 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2355 "<== hxge_m_multicst: add multicast failed")); 2356 return (EINVAL); 2357 } 2358 } else { 2359 if (hxge_del_mcast_addr(hxgep, &addrp)) { 2360 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2361 "<== hxge_m_multicst: del multicast failed")); 2362 return (EINVAL); 2363 } 2364 } 2365 2366 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst")); 2367 2368 return (0); 2369 } 2370 2371 static int 2372 hxge_m_promisc(void *arg, boolean_t on) 2373 { 2374 p_hxge_t hxgep = (p_hxge_t)arg; 2375 2376 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on)); 2377 2378 if (hxge_set_promisc(hxgep, on)) { 2379 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2380 "<== hxge_m_promisc: set promisc failed")); 2381 return (EINVAL); 2382 } 2383 2384 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on)); 2385 2386 return (0); 2387 } 2388 2389 static void 2390 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2391 { 2392 p_hxge_t hxgep = (p_hxge_t)arg; 2393 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 2394 boolean_t need_privilege; 2395 int err; 2396 int cmd; 2397 2398 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl")); 2399 2400 iocp = (struct iocblk *)mp->b_rptr; 2401 iocp->ioc_error = 0; 2402 need_privilege = B_TRUE; 2403 cmd = iocp->ioc_cmd; 2404 2405 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd)); 2406 switch (cmd) { 2407 default: 2408 miocnak(wq, mp, 0, EINVAL); 2409 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid")); 2410 return; 2411 2412 case LB_GET_INFO_SIZE: 2413 case LB_GET_INFO: 2414 case LB_GET_MODE: 2415 need_privilege = B_FALSE; 2416 break; 2417 2418 case LB_SET_MODE: 2419 break; 2420 2421 case ND_GET: 2422 need_privilege = B_FALSE; 2423 break; 2424 case ND_SET: 2425 break; 2426 2427 case HXGE_GET64: 2428 case HXGE_PUT64: 2429 case HXGE_GET_TX_RING_SZ: 2430 case HXGE_GET_TX_DESC: 2431 case HXGE_TX_SIDE_RESET: 2432 case HXGE_RX_SIDE_RESET: 2433 case HXGE_GLOBAL_RESET: 2434 case HXGE_RESET_MAC: 2435 case HXGE_PUT_TCAM: 2436 case HXGE_GET_TCAM: 2437 case HXGE_RTRACE: 2438 2439 need_privilege = B_FALSE; 2440 break; 2441 } 2442 2443 if (need_privilege) { 2444 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 2445 if (err != 0) { 2446 miocnak(wq, mp, 0, err); 2447 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2448 "<== hxge_m_ioctl: no priv")); 2449 return; 2450 } 2451 } 2452 2453 switch (cmd) { 2454 case ND_GET: 2455 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command")); 2456 case ND_SET: 2457 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command")); 2458 hxge_param_ioctl(hxgep, wq, mp, iocp); 2459 break; 2460 2461 case LB_GET_MODE: 2462 case LB_SET_MODE: 2463 case LB_GET_INFO_SIZE: 2464 case LB_GET_INFO: 2465 hxge_loopback_ioctl(hxgep, wq, mp, iocp); 2466 break; 2467 2468 case HXGE_PUT_TCAM: 2469 case HXGE_GET_TCAM: 2470 case HXGE_GET64: 2471 case HXGE_PUT64: 2472 case HXGE_GET_TX_RING_SZ: 2473 case HXGE_GET_TX_DESC: 2474 case HXGE_TX_SIDE_RESET: 2475 case HXGE_RX_SIDE_RESET: 2476 case HXGE_GLOBAL_RESET: 2477 case HXGE_RESET_MAC: 2478 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, 2479 "==> hxge_m_ioctl: cmd 0x%x", cmd)); 2480 hxge_hw_ioctl(hxgep, wq, mp, iocp); 2481 break; 2482 } 2483 2484 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl")); 2485 } 2486 2487 extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 2488 2489 static void 2490 hxge_m_resources(void *arg) 2491 { 2492 p_hxge_t hxgep = arg; 2493 mac_rx_fifo_t mrf; 2494 p_rx_rcr_rings_t rcr_rings; 2495 p_rx_rcr_ring_t *rcr_p; 2496 p_rx_rcr_ring_t rcrp; 2497 uint32_t i, ndmas; 2498 int status; 2499 2500 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources")); 2501 2502 MUTEX_ENTER(hxgep->genlock); 2503 2504 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2505 status = hxge_init(hxgep); 2506 if (status != HXGE_OK) { 2507 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: " 2508 "hxge_init failed")); 2509 MUTEX_EXIT(hxgep->genlock); 2510 return; 2511 } 2512 } 2513 2514 mrf.mrf_type = MAC_RX_FIFO; 2515 mrf.mrf_blank = hxge_rx_hw_blank; 2516 mrf.mrf_arg = (void *)hxgep; 2517 2518 mrf.mrf_normal_blank_time = RXDMA_RCR_TO_DEFAULT; 2519 mrf.mrf_normal_pkt_count = RXDMA_RCR_PTHRES_DEFAULT; 2520 2521 rcr_rings = hxgep->rx_rcr_rings; 2522 rcr_p = rcr_rings->rcr_rings; 2523 ndmas = rcr_rings->ndmas; 2524 2525 /* 2526 * Export our receive resources to the MAC layer. 2527 */ 2528 for (i = 0; i < ndmas; i++) { 2529 rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i]; 2530 rcrp->rcr_mac_handle = 2531 mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf); 2532 2533 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2534 "==> hxge_m_resources: vdma %d dma %d " 2535 "rcrptr 0x%016llx mac_handle 0x%016llx", 2536 i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle)); 2537 } 2538 2539 MUTEX_EXIT(hxgep->genlock); 2540 2541 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources")); 2542 } 2543 2544 /* 2545 * Set an alternate MAC address 2546 */ 2547 static int 2548 hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot) 2549 { 2550 uint64_t address; 2551 uint64_t tmp; 2552 hpi_status_t status; 2553 uint8_t addrn; 2554 int i; 2555 2556 /* 2557 * Convert a byte array to a 48 bit value. 2558 * Need to check endianess if in doubt 2559 */ 2560 address = 0; 2561 for (i = 0; i < ETHERADDRL; i++) { 2562 tmp = maddr[i]; 2563 address <<= 8; 2564 address |= tmp; 2565 } 2566 2567 addrn = (uint8_t)slot; 2568 status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address); 2569 if (status != HPI_SUCCESS) 2570 return (EIO); 2571 2572 return (0); 2573 } 2574 2575 static void 2576 hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot) 2577 { 2578 p_hxge_mmac_stats_t mmac_stats; 2579 int i; 2580 hxge_mmac_t *mmac_info; 2581 2582 mmac_info = &hxgep->hxge_mmac_info; 2583 mmac_stats = &hxgep->statsp->mmac_stats; 2584 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 2585 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 2586 2587 for (i = 0; i < ETHERADDRL; i++) { 2588 mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] = 2589 mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 2590 } 2591 } 2592 2593 /* 2594 * Find an unused address slot, set the address value to the one specified, 2595 * enable the port to start filtering on the new MAC address. 2596 * Returns: 0 on success. 2597 */ 2598 int 2599 hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 2600 { 2601 p_hxge_t hxgep = arg; 2602 mac_addr_slot_t slot; 2603 hxge_mmac_t *mmac_info; 2604 int err; 2605 hxge_status_t status; 2606 2607 mutex_enter(hxgep->genlock); 2608 2609 /* 2610 * Make sure that hxge is initialized, if _start() has 2611 * not been called. 2612 */ 2613 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2614 status = hxge_init(hxgep); 2615 if (status != HXGE_OK) { 2616 mutex_exit(hxgep->genlock); 2617 return (ENXIO); 2618 } 2619 } 2620 2621 mmac_info = &hxgep->hxge_mmac_info; 2622 if (mmac_info->naddrfree == 0) { 2623 mutex_exit(hxgep->genlock); 2624 return (ENOSPC); 2625 } 2626 2627 if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr, 2628 maddr->mma_addrlen)) { 2629 mutex_exit(hxgep->genlock); 2630 return (EINVAL); 2631 } 2632 2633 /* 2634 * Search for the first available slot. Because naddrfree 2635 * is not zero, we are guaranteed to find one. 2636 * Slot 0 is for unique (primary) MAC. The first alternate 2637 * MAC slot is slot 1. 2638 */ 2639 for (slot = 1; slot < mmac_info->num_mmac; slot++) { 2640 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 2641 break; 2642 } 2643 2644 ASSERT(slot < mmac_info->num_mmac); 2645 if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) { 2646 mutex_exit(hxgep->genlock); 2647 return (err); 2648 } 2649 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 2650 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 2651 mmac_info->naddrfree--; 2652 hxge_mmac_kstat_update(hxgep, slot); 2653 2654 maddr->mma_slot = slot; 2655 2656 mutex_exit(hxgep->genlock); 2657 return (0); 2658 } 2659 2660 /* 2661 * Remove the specified mac address and update 2662 * the h/w not to filter the mac address anymore. 2663 * Returns: 0, on success. 2664 */ 2665 int 2666 hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 2667 { 2668 p_hxge_t hxgep = arg; 2669 hxge_mmac_t *mmac_info; 2670 int err = 0; 2671 hxge_status_t status; 2672 2673 mutex_enter(hxgep->genlock); 2674 2675 /* 2676 * Make sure that hxge is initialized, if _start() has 2677 * not been called. 2678 */ 2679 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2680 status = hxge_init(hxgep); 2681 if (status != HXGE_OK) { 2682 mutex_exit(hxgep->genlock); 2683 return (ENXIO); 2684 } 2685 } 2686 2687 mmac_info = &hxgep->hxge_mmac_info; 2688 if (slot <= 0 || slot >= mmac_info->num_mmac) { 2689 mutex_exit(hxgep->genlock); 2690 return (EINVAL); 2691 } 2692 2693 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 2694 if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) == 2695 HPI_SUCCESS) { 2696 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 2697 mmac_info->naddrfree++; 2698 /* 2699 * Clear mac_pool[slot].addr so that kstat shows 0 2700 * alternate MAC address if the slot is not used. 2701 */ 2702 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 2703 hxge_mmac_kstat_update(hxgep, slot); 2704 } else { 2705 err = EIO; 2706 } 2707 } else { 2708 err = EINVAL; 2709 } 2710 2711 mutex_exit(hxgep->genlock); 2712 return (err); 2713 } 2714 2715 /* 2716 * Modify a mac address added by hxge_mmac_add(). 2717 * Returns: 0, on success. 2718 */ 2719 int 2720 hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 2721 { 2722 p_hxge_t hxgep = arg; 2723 mac_addr_slot_t slot; 2724 hxge_mmac_t *mmac_info; 2725 int err = 0; 2726 hxge_status_t status; 2727 2728 if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr, 2729 maddr->mma_addrlen)) 2730 return (EINVAL); 2731 2732 slot = maddr->mma_slot; 2733 2734 mutex_enter(hxgep->genlock); 2735 2736 /* 2737 * Make sure that hxge is initialized, if _start() has 2738 * not been called. 2739 */ 2740 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2741 status = hxge_init(hxgep); 2742 if (status != HXGE_OK) { 2743 mutex_exit(hxgep->genlock); 2744 return (ENXIO); 2745 } 2746 } 2747 2748 mmac_info = &hxgep->hxge_mmac_info; 2749 if (slot <= 0 || slot >= mmac_info->num_mmac) { 2750 mutex_exit(hxgep->genlock); 2751 return (EINVAL); 2752 } 2753 2754 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 2755 if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, 2756 slot)) == 0) { 2757 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 2758 ETHERADDRL); 2759 hxge_mmac_kstat_update(hxgep, slot); 2760 } 2761 } else { 2762 err = EINVAL; 2763 } 2764 2765 mutex_exit(hxgep->genlock); 2766 return (err); 2767 } 2768 2769 /* 2770 * static int 2771 * hxge_m_mmac_get() - Get the MAC address and other information 2772 * related to the slot. mma_flags should be set to 0 in the call. 2773 * Note: although kstat shows MAC address as zero when a slot is 2774 * not used, Crossbow expects hxge_m_mmac_get to copy factory MAC 2775 * to the caller as long as the slot is not using a user MAC address. 2776 * The following table shows the rules, 2777 * 2778 * USED VENDOR mma_addr 2779 * ------------------------------------------------------------ 2780 * (1) Slot uses a user MAC: yes no user MAC 2781 * (2) Slot uses a factory MAC: yes yes factory MAC 2782 * (3) Slot is not used but is 2783 * factory MAC capable: no yes factory MAC 2784 * (4) Slot is not used and is 2785 * not factory MAC capable: no no 0 2786 * ------------------------------------------------------------ 2787 */ 2788 int 2789 hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 2790 { 2791 hxge_t *hxgep = arg; 2792 mac_addr_slot_t slot; 2793 hxge_mmac_t *mmac_info; 2794 hxge_status_t status; 2795 2796 slot = maddr->mma_slot; 2797 2798 mutex_enter(hxgep->genlock); 2799 2800 /* 2801 * Make sure that hxge is initialized, if _start() has 2802 * not been called. 2803 */ 2804 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2805 status = hxge_init(hxgep); 2806 if (status != HXGE_OK) { 2807 mutex_exit(hxgep->genlock); 2808 return (ENXIO); 2809 } 2810 } 2811 2812 mmac_info = &hxgep->hxge_mmac_info; 2813 if (slot <= 0 || slot >= mmac_info->num_mmac) { 2814 mutex_exit(hxgep->genlock); 2815 return (EINVAL); 2816 } 2817 2818 maddr->mma_flags = 0; 2819 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 2820 maddr->mma_flags |= MMAC_SLOT_USED; 2821 bcopy(mmac_info->mac_pool[slot].addr, 2822 maddr->mma_addr, ETHERADDRL); 2823 maddr->mma_addrlen = ETHERADDRL; 2824 } 2825 2826 mutex_exit(hxgep->genlock); 2827 return (0); 2828 } 2829 2830 /*ARGSUSED*/ 2831 boolean_t 2832 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2833 { 2834 p_hxge_t hxgep = (p_hxge_t)arg; 2835 uint32_t *txflags = cap_data; 2836 multiaddress_capab_t *mmacp = cap_data; 2837 2838 switch (cap) { 2839 case MAC_CAPAB_HCKSUM: 2840 *txflags = HCKSUM_INET_PARTIAL; 2841 break; 2842 2843 case MAC_CAPAB_POLL: 2844 /* 2845 * There's nothing for us to fill in, simply returning B_TRUE 2846 * stating that we support polling is sufficient. 2847 */ 2848 break; 2849 2850 case MAC_CAPAB_MULTIADDRESS: 2851 /* 2852 * The number of MAC addresses made available by 2853 * this capability is one less than the total as 2854 * the primary address in slot 0 is counted in 2855 * the total. 2856 */ 2857 mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1; 2858 mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree; 2859 mmacp->maddr_flag = 0; /* No multiple factory macs */ 2860 mmacp->maddr_handle = hxgep; 2861 mmacp->maddr_add = hxge_m_mmac_add; 2862 mmacp->maddr_remove = hxge_m_mmac_remove; 2863 mmacp->maddr_modify = hxge_m_mmac_modify; 2864 mmacp->maddr_get = hxge_m_mmac_get; 2865 mmacp->maddr_reserve = NULL; /* No multiple factory macs */ 2866 break; 2867 default: 2868 return (B_FALSE); 2869 } 2870 return (B_TRUE); 2871 } 2872 2873 /* 2874 * Module loading and removing entry points. 2875 */ 2876 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach, 2877 nodev, NULL, D_MP, NULL); 2878 2879 extern struct mod_ops mod_driverops; 2880 2881 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver" 2882 2883 /* 2884 * Module linkage information for the kernel. 2885 */ 2886 static struct modldrv hxge_modldrv = { 2887 &mod_driverops, 2888 HXGE_DESC_VER, 2889 &hxge_dev_ops 2890 }; 2891 2892 static struct modlinkage modlinkage = { 2893 MODREV_1, (void *) &hxge_modldrv, NULL 2894 }; 2895 2896 int 2897 _init(void) 2898 { 2899 int status; 2900 2901 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 2902 mac_init_ops(&hxge_dev_ops, "hxge"); 2903 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0); 2904 if (status != 0) { 2905 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 2906 "failed to init device soft state")); 2907 mac_fini_ops(&hxge_dev_ops); 2908 goto _init_exit; 2909 } 2910 2911 status = mod_install(&modlinkage); 2912 if (status != 0) { 2913 ddi_soft_state_fini(&hxge_list); 2914 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed")); 2915 goto _init_exit; 2916 } 2917 2918 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL); 2919 2920 _init_exit: 2921 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 2922 2923 return (status); 2924 } 2925 2926 int 2927 _fini(void) 2928 { 2929 int status; 2930 2931 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 2932 2933 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 2934 2935 if (hxge_mblks_pending) 2936 return (EBUSY); 2937 2938 status = mod_remove(&modlinkage); 2939 if (status != DDI_SUCCESS) { 2940 HXGE_DEBUG_MSG((NULL, MOD_CTL, 2941 "Module removal failed 0x%08x", status)); 2942 goto _fini_exit; 2943 } 2944 2945 mac_fini_ops(&hxge_dev_ops); 2946 2947 ddi_soft_state_fini(&hxge_list); 2948 2949 MUTEX_DESTROY(&hxge_common_lock); 2950 2951 _fini_exit: 2952 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 2953 2954 return (status); 2955 } 2956 2957 int 2958 _info(struct modinfo *modinfop) 2959 { 2960 int status; 2961 2962 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 2963 status = mod_info(&modlinkage, modinfop); 2964 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 2965 2966 return (status); 2967 } 2968 2969 /*ARGSUSED*/ 2970 hxge_status_t 2971 hxge_add_intrs(p_hxge_t hxgep) 2972 { 2973 int intr_types; 2974 int type = 0; 2975 int ddi_status = DDI_SUCCESS; 2976 hxge_status_t status = HXGE_OK; 2977 2978 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs")); 2979 2980 hxgep->hxge_intr_type.intr_registered = B_FALSE; 2981 hxgep->hxge_intr_type.intr_enabled = B_FALSE; 2982 hxgep->hxge_intr_type.msi_intx_cnt = 0; 2983 hxgep->hxge_intr_type.intr_added = 0; 2984 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE; 2985 hxgep->hxge_intr_type.intr_type = 0; 2986 2987 if (hxge_msi_enable) { 2988 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE; 2989 } 2990 2991 /* Get the supported interrupt types */ 2992 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types)) 2993 != DDI_SUCCESS) { 2994 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: " 2995 "ddi_intr_get_supported_types failed: status 0x%08x", 2996 ddi_status)); 2997 return (HXGE_ERROR | HXGE_DDI_FAILED); 2998 } 2999 3000 hxgep->hxge_intr_type.intr_types = intr_types; 3001 3002 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3003 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3004 3005 /* 3006 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable: 3007 * (1): 1 - MSI 3008 * (2): 2 - MSI-X 3009 * others - FIXED 3010 */ 3011 switch (hxge_msi_enable) { 3012 default: 3013 type = DDI_INTR_TYPE_FIXED; 3014 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3015 "use fixed (intx emulation) type %08x", type)); 3016 break; 3017 3018 case 2: 3019 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3020 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3021 if (intr_types & DDI_INTR_TYPE_MSIX) { 3022 type = DDI_INTR_TYPE_MSIX; 3023 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3024 "==> hxge_add_intrs: " 3025 "ddi_intr_get_supported_types: MSIX 0x%08x", type)); 3026 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3027 type = DDI_INTR_TYPE_MSI; 3028 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3029 "==> hxge_add_intrs: " 3030 "ddi_intr_get_supported_types: MSI 0x%08x", type)); 3031 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3032 type = DDI_INTR_TYPE_FIXED; 3033 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3034 "ddi_intr_get_supported_types: MSXED0x%08x", type)); 3035 } 3036 break; 3037 3038 case 1: 3039 if (intr_types & DDI_INTR_TYPE_MSI) { 3040 type = DDI_INTR_TYPE_MSI; 3041 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3042 "==> hxge_add_intrs: " 3043 "ddi_intr_get_supported_types: MSI 0x%08x", type)); 3044 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3045 type = DDI_INTR_TYPE_MSIX; 3046 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3047 "==> hxge_add_intrs: " 3048 "ddi_intr_get_supported_types: MSIX 0x%08x", type)); 3049 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3050 type = DDI_INTR_TYPE_FIXED; 3051 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3052 "==> hxge_add_intrs: " 3053 "ddi_intr_get_supported_types: MSXED0x%08x", type)); 3054 } 3055 } 3056 3057 hxgep->hxge_intr_type.intr_type = type; 3058 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3059 type == DDI_INTR_TYPE_FIXED) && 3060 hxgep->hxge_intr_type.niu_msi_enable) { 3061 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) { 3062 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3063 " hxge_add_intrs: " 3064 " hxge_add_intrs_adv failed: status 0x%08x", 3065 status)); 3066 return (status); 3067 } else { 3068 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: " 3069 "interrupts registered : type %d", type)); 3070 hxgep->hxge_intr_type.intr_registered = B_TRUE; 3071 3072 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3073 "\nAdded advanced hxge add_intr_adv " 3074 "intr type 0x%x\n", type)); 3075 3076 return (status); 3077 } 3078 } 3079 3080 if (!hxgep->hxge_intr_type.intr_registered) { 3081 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3082 "==> hxge_add_intrs: failed to register interrupts")); 3083 return (HXGE_ERROR | HXGE_DDI_FAILED); 3084 } 3085 3086 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs")); 3087 3088 return (status); 3089 } 3090 3091 /*ARGSUSED*/ 3092 static hxge_status_t 3093 hxge_add_soft_intrs(p_hxge_t hxgep) 3094 { 3095 int ddi_status = DDI_SUCCESS; 3096 hxge_status_t status = HXGE_OK; 3097 3098 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs")); 3099 3100 hxgep->resched_id = NULL; 3101 hxgep->resched_running = B_FALSE; 3102 ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW, 3103 &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep); 3104 if (ddi_status != DDI_SUCCESS) { 3105 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: " 3106 "ddi_add_softintrs failed: status 0x%08x", ddi_status)); 3107 return (HXGE_ERROR | HXGE_DDI_FAILED); 3108 } 3109 3110 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs")); 3111 3112 return (status); 3113 } 3114 3115 /*ARGSUSED*/ 3116 static hxge_status_t 3117 hxge_add_intrs_adv(p_hxge_t hxgep) 3118 { 3119 int intr_type; 3120 p_hxge_intr_t intrp; 3121 hxge_status_t status; 3122 3123 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv")); 3124 3125 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3126 intr_type = intrp->intr_type; 3127 3128 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x", 3129 intr_type)); 3130 3131 switch (intr_type) { 3132 case DDI_INTR_TYPE_MSI: /* 0x2 */ 3133 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 3134 status = hxge_add_intrs_adv_type(hxgep, intr_type); 3135 break; 3136 3137 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 3138 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type); 3139 break; 3140 3141 default: 3142 status = HXGE_ERROR; 3143 break; 3144 } 3145 3146 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv")); 3147 3148 return (status); 3149 } 3150 3151 /*ARGSUSED*/ 3152 static hxge_status_t 3153 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type) 3154 { 3155 dev_info_t *dip = hxgep->dip; 3156 p_hxge_ldg_t ldgp; 3157 p_hxge_intr_t intrp; 3158 uint_t *inthandler; 3159 void *arg1, *arg2; 3160 int behavior; 3161 int nintrs, navail; 3162 int nactual, nrequired; 3163 int inum = 0; 3164 int loop = 0; 3165 int x, y; 3166 int ddi_status = DDI_SUCCESS; 3167 hxge_status_t status = HXGE_OK; 3168 3169 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type")); 3170 3171 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3172 3173 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 3174 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 3175 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3176 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 3177 "nintrs: %d", ddi_status, nintrs)); 3178 return (HXGE_ERROR | HXGE_DDI_FAILED); 3179 } 3180 3181 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 3182 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 3183 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3184 "ddi_intr_get_navail() failed, status: 0x%x%, " 3185 "nintrs: %d", ddi_status, navail)); 3186 return (HXGE_ERROR | HXGE_DDI_FAILED); 3187 } 3188 3189 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3190 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d", 3191 int_type, nintrs, navail)); 3192 3193 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 3194 /* MSI must be power of 2 */ 3195 if ((navail & 16) == 16) { 3196 navail = 16; 3197 } else if ((navail & 8) == 8) { 3198 navail = 8; 3199 } else if ((navail & 4) == 4) { 3200 navail = 4; 3201 } else if ((navail & 2) == 2) { 3202 navail = 2; 3203 } else { 3204 navail = 1; 3205 } 3206 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3207 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 3208 "navail %d", nintrs, navail)); 3209 } 3210 3211 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3212 "requesting: intr type %d nintrs %d, navail %d", 3213 int_type, nintrs, navail)); 3214 3215 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 3216 DDI_INTR_ALLOC_NORMAL); 3217 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 3218 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP); 3219 3220 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 3221 navail, &nactual, behavior); 3222 if (ddi_status != DDI_SUCCESS || nactual == 0) { 3223 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3224 " ddi_intr_alloc() failed: %d", ddi_status)); 3225 kmem_free(intrp->htable, intrp->intr_size); 3226 return (HXGE_ERROR | HXGE_DDI_FAILED); 3227 } 3228 3229 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3230 "ddi_intr_alloc() returned: navail %d nactual %d", 3231 navail, nactual)); 3232 3233 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 3234 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 3235 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3236 " ddi_intr_get_pri() failed: %d", ddi_status)); 3237 /* Free already allocated interrupts */ 3238 for (y = 0; y < nactual; y++) { 3239 (void) ddi_intr_free(intrp->htable[y]); 3240 } 3241 3242 kmem_free(intrp->htable, intrp->intr_size); 3243 return (HXGE_ERROR | HXGE_DDI_FAILED); 3244 } 3245 3246 nrequired = 0; 3247 status = hxge_ldgv_init(hxgep, &nactual, &nrequired); 3248 if (status != HXGE_OK) { 3249 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3250 "hxge_add_intrs_adv_typ:hxge_ldgv_init " 3251 "failed: 0x%x", status)); 3252 /* Free already allocated interrupts */ 3253 for (y = 0; y < nactual; y++) { 3254 (void) ddi_intr_free(intrp->htable[y]); 3255 } 3256 3257 kmem_free(intrp->htable, intrp->intr_size); 3258 return (status); 3259 } 3260 3261 ldgp = hxgep->ldgvp->ldgp; 3262 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3263 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual)); 3264 3265 if (nactual < nrequired) 3266 loop = nactual; 3267 else 3268 loop = nrequired; 3269 3270 for (x = 0; x < loop; x++, ldgp++) { 3271 ldgp->vector = (uint8_t)x; 3272 arg1 = ldgp->ldvp; 3273 arg2 = hxgep; 3274 if (ldgp->nldvs == 1) { 3275 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 3276 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3277 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: " 3278 "1-1 int handler (entry %d)\n", 3279 arg1, arg2, x)); 3280 } else if (ldgp->nldvs > 1) { 3281 inthandler = (uint_t *)ldgp->sys_intr_handler; 3282 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3283 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: " 3284 "nldevs %d int handler (entry %d)\n", 3285 arg1, arg2, ldgp->nldvs, x)); 3286 } 3287 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3288 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 3289 "htable 0x%llx", x, intrp->htable[x])); 3290 3291 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 3292 (ddi_intr_handler_t *)inthandler, arg1, arg2)) != 3293 DDI_SUCCESS) { 3294 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3295 "==> hxge_add_intrs_adv_type: failed #%d " 3296 "status 0x%x", x, ddi_status)); 3297 for (y = 0; y < intrp->intr_added; y++) { 3298 (void) ddi_intr_remove_handler( 3299 intrp->htable[y]); 3300 } 3301 3302 /* Free already allocated intr */ 3303 for (y = 0; y < nactual; y++) { 3304 (void) ddi_intr_free(intrp->htable[y]); 3305 } 3306 kmem_free(intrp->htable, intrp->intr_size); 3307 3308 (void) hxge_ldgv_uninit(hxgep); 3309 3310 return (HXGE_ERROR | HXGE_DDI_FAILED); 3311 } 3312 3313 intrp->intr_added++; 3314 } 3315 intrp->msi_intx_cnt = nactual; 3316 3317 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3318 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 3319 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added)); 3320 3321 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 3322 (void) hxge_intr_ldgv_init(hxgep); 3323 3324 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type")); 3325 3326 return (status); 3327 } 3328 3329 /*ARGSUSED*/ 3330 static hxge_status_t 3331 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type) 3332 { 3333 dev_info_t *dip = hxgep->dip; 3334 p_hxge_ldg_t ldgp; 3335 p_hxge_intr_t intrp; 3336 uint_t *inthandler; 3337 void *arg1, *arg2; 3338 int behavior; 3339 int nintrs, navail; 3340 int nactual, nrequired; 3341 int inum = 0; 3342 int x, y; 3343 int ddi_status = DDI_SUCCESS; 3344 hxge_status_t status = HXGE_OK; 3345 3346 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix")); 3347 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3348 3349 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 3350 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 3351 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3352 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 3353 "nintrs: %d", status, nintrs)); 3354 return (HXGE_ERROR | HXGE_DDI_FAILED); 3355 } 3356 3357 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 3358 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 3359 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3360 "ddi_intr_get_navail() failed, status: 0x%x%, " 3361 "nintrs: %d", ddi_status, navail)); 3362 return (HXGE_ERROR | HXGE_DDI_FAILED); 3363 } 3364 3365 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3366 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 3367 nintrs, navail)); 3368 3369 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 3370 DDI_INTR_ALLOC_NORMAL); 3371 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 3372 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 3373 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 3374 navail, &nactual, behavior); 3375 if (ddi_status != DDI_SUCCESS || nactual == 0) { 3376 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3377 " ddi_intr_alloc() failed: %d", ddi_status)); 3378 kmem_free(intrp->htable, intrp->intr_size); 3379 return (HXGE_ERROR | HXGE_DDI_FAILED); 3380 } 3381 3382 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 3383 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 3384 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3385 " ddi_intr_get_pri() failed: %d", ddi_status)); 3386 /* Free already allocated interrupts */ 3387 for (y = 0; y < nactual; y++) { 3388 (void) ddi_intr_free(intrp->htable[y]); 3389 } 3390 3391 kmem_free(intrp->htable, intrp->intr_size); 3392 return (HXGE_ERROR | HXGE_DDI_FAILED); 3393 } 3394 3395 nrequired = 0; 3396 status = hxge_ldgv_init(hxgep, &nactual, &nrequired); 3397 if (status != HXGE_OK) { 3398 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3399 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init " 3400 "failed: 0x%x", status)); 3401 /* Free already allocated interrupts */ 3402 for (y = 0; y < nactual; y++) { 3403 (void) ddi_intr_free(intrp->htable[y]); 3404 } 3405 3406 kmem_free(intrp->htable, intrp->intr_size); 3407 return (status); 3408 } 3409 3410 ldgp = hxgep->ldgvp->ldgp; 3411 for (x = 0; x < nrequired; x++, ldgp++) { 3412 ldgp->vector = (uint8_t)x; 3413 arg1 = ldgp->ldvp; 3414 arg2 = hxgep; 3415 if (ldgp->nldvs == 1) { 3416 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 3417 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3418 "hxge_add_intrs_adv_type_fix: " 3419 "1-1 int handler(%d) ldg %d ldv %d " 3420 "arg1 $%p arg2 $%p\n", 3421 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2)); 3422 } else if (ldgp->nldvs > 1) { 3423 inthandler = (uint_t *)ldgp->sys_intr_handler; 3424 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3425 "hxge_add_intrs_adv_type_fix: " 3426 "shared ldv %d int handler(%d) ldv %d ldg %d" 3427 "arg1 0x%016llx arg2 0x%016llx\n", 3428 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 3429 arg1, arg2)); 3430 } 3431 3432 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 3433 (ddi_intr_handler_t *)inthandler, arg1, arg2)) != 3434 DDI_SUCCESS) { 3435 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3436 "==> hxge_add_intrs_adv_type_fix: failed #%d " 3437 "status 0x%x", x, ddi_status)); 3438 for (y = 0; y < intrp->intr_added; y++) { 3439 (void) ddi_intr_remove_handler( 3440 intrp->htable[y]); 3441 } 3442 for (y = 0; y < nactual; y++) { 3443 (void) ddi_intr_free(intrp->htable[y]); 3444 } 3445 /* Free already allocated intr */ 3446 kmem_free(intrp->htable, intrp->intr_size); 3447 3448 (void) hxge_ldgv_uninit(hxgep); 3449 3450 return (HXGE_ERROR | HXGE_DDI_FAILED); 3451 } 3452 intrp->intr_added++; 3453 } 3454 3455 intrp->msi_intx_cnt = nactual; 3456 3457 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 3458 3459 status = hxge_intr_ldgv_init(hxgep); 3460 3461 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix")); 3462 3463 return (status); 3464 } 3465 3466 /*ARGSUSED*/ 3467 static void 3468 hxge_remove_intrs(p_hxge_t hxgep) 3469 { 3470 int i, inum; 3471 p_hxge_intr_t intrp; 3472 3473 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs")); 3474 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3475 if (!intrp->intr_registered) { 3476 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3477 "<== hxge_remove_intrs: interrupts not registered")); 3478 return; 3479 } 3480 3481 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced")); 3482 3483 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3484 (void) ddi_intr_block_disable(intrp->htable, 3485 intrp->intr_added); 3486 } else { 3487 for (i = 0; i < intrp->intr_added; i++) { 3488 (void) ddi_intr_disable(intrp->htable[i]); 3489 } 3490 } 3491 3492 for (inum = 0; inum < intrp->intr_added; inum++) { 3493 if (intrp->htable[inum]) { 3494 (void) ddi_intr_remove_handler(intrp->htable[inum]); 3495 } 3496 } 3497 3498 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 3499 if (intrp->htable[inum]) { 3500 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3501 "hxge_remove_intrs: ddi_intr_free inum %d " 3502 "msi_intx_cnt %d intr_added %d", 3503 inum, intrp->msi_intx_cnt, intrp->intr_added)); 3504 3505 (void) ddi_intr_free(intrp->htable[inum]); 3506 } 3507 } 3508 3509 kmem_free(intrp->htable, intrp->intr_size); 3510 intrp->intr_registered = B_FALSE; 3511 intrp->intr_enabled = B_FALSE; 3512 intrp->msi_intx_cnt = 0; 3513 intrp->intr_added = 0; 3514 3515 (void) hxge_ldgv_uninit(hxgep); 3516 3517 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs")); 3518 } 3519 3520 /*ARGSUSED*/ 3521 static void 3522 hxge_remove_soft_intrs(p_hxge_t hxgep) 3523 { 3524 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs")); 3525 3526 if (hxgep->resched_id) { 3527 ddi_remove_softintr(hxgep->resched_id); 3528 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3529 "==> hxge_remove_soft_intrs: removed")); 3530 hxgep->resched_id = NULL; 3531 } 3532 3533 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs")); 3534 } 3535 3536 /*ARGSUSED*/ 3537 void 3538 hxge_intrs_enable(p_hxge_t hxgep) 3539 { 3540 p_hxge_intr_t intrp; 3541 int i; 3542 int status; 3543 3544 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable")); 3545 3546 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3547 3548 if (!intrp->intr_registered) { 3549 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: " 3550 "interrupts are not registered")); 3551 return; 3552 } 3553 3554 if (intrp->intr_enabled) { 3555 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3556 "<== hxge_intrs_enable: already enabled")); 3557 return; 3558 } 3559 3560 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3561 status = ddi_intr_block_enable(intrp->htable, 3562 intrp->intr_added); 3563 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable " 3564 "block enable - status 0x%x total inums #%d\n", 3565 status, intrp->intr_added)); 3566 } else { 3567 for (i = 0; i < intrp->intr_added; i++) { 3568 status = ddi_intr_enable(intrp->htable[i]); 3569 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable " 3570 "ddi_intr_enable:enable - status 0x%x " 3571 "total inums %d enable inum #%d\n", 3572 status, intrp->intr_added, i)); 3573 if (status == DDI_SUCCESS) { 3574 intrp->intr_enabled = B_TRUE; 3575 } 3576 } 3577 } 3578 3579 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable")); 3580 } 3581 3582 /*ARGSUSED*/ 3583 static void 3584 hxge_intrs_disable(p_hxge_t hxgep) 3585 { 3586 p_hxge_intr_t intrp; 3587 int i; 3588 3589 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable")); 3590 3591 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3592 3593 if (!intrp->intr_registered) { 3594 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: " 3595 "interrupts are not registered")); 3596 return; 3597 } 3598 3599 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3600 (void) ddi_intr_block_disable(intrp->htable, 3601 intrp->intr_added); 3602 } else { 3603 for (i = 0; i < intrp->intr_added; i++) { 3604 (void) ddi_intr_disable(intrp->htable[i]); 3605 } 3606 } 3607 3608 intrp->intr_enabled = B_FALSE; 3609 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable")); 3610 } 3611 3612 static hxge_status_t 3613 hxge_mac_register(p_hxge_t hxgep) 3614 { 3615 mac_register_t *macp; 3616 int status; 3617 3618 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register")); 3619 3620 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3621 return (HXGE_ERROR); 3622 3623 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3624 macp->m_driver = hxgep; 3625 macp->m_dip = hxgep->dip; 3626 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet; 3627 3628 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3629 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x", 3630 macp->m_src_addr[0], 3631 macp->m_src_addr[1], 3632 macp->m_src_addr[2], 3633 macp->m_src_addr[3], 3634 macp->m_src_addr[4], 3635 macp->m_src_addr[5])); 3636 3637 macp->m_callbacks = &hxge_m_callbacks; 3638 macp->m_min_sdu = 0; 3639 macp->m_max_sdu = hxgep->vmac.maxframesize - 3640 sizeof (struct ether_header) - ETHERFCSL - 4 - TX_PKT_HEADER_SIZE; 3641 3642 status = mac_register(macp, &hxgep->mach); 3643 mac_free(macp); 3644 3645 if (status != 0) { 3646 cmn_err(CE_WARN, 3647 "hxge_mac_register failed (status %d instance %d)", 3648 status, hxgep->instance); 3649 return (HXGE_ERROR); 3650 } 3651 3652 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success " 3653 "(instance %d)", hxgep->instance)); 3654 3655 return (HXGE_OK); 3656 } 3657 3658 static int 3659 hxge_init_common_dev(p_hxge_t hxgep) 3660 { 3661 p_hxge_hw_list_t hw_p; 3662 dev_info_t *p_dip; 3663 3664 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev")); 3665 3666 p_dip = hxgep->p_dip; 3667 MUTEX_ENTER(&hxge_common_lock); 3668 3669 /* 3670 * Loop through existing per Hydra hardware list. 3671 */ 3672 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) { 3673 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3674 "==> hxge_init_common_dev: hw_p $%p parent dip $%p", 3675 hw_p, p_dip)); 3676 if (hw_p->parent_devp == p_dip) { 3677 hxgep->hxge_hw_p = hw_p; 3678 hw_p->ndevs++; 3679 hw_p->hxge_p = hxgep; 3680 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3681 "==> hxge_init_common_device: " 3682 "hw_p $%p parent dip $%p ndevs %d (found)", 3683 hw_p, p_dip, hw_p->ndevs)); 3684 break; 3685 } 3686 } 3687 3688 if (hw_p == NULL) { 3689 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3690 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip)); 3691 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP); 3692 hw_p->parent_devp = p_dip; 3693 hw_p->magic = HXGE_MAGIC; 3694 hxgep->hxge_hw_p = hw_p; 3695 hw_p->ndevs++; 3696 hw_p->hxge_p = hxgep; 3697 hw_p->next = hxge_hw_list; 3698 3699 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 3700 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 3701 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 3702 3703 hxge_hw_list = hw_p; 3704 } 3705 MUTEX_EXIT(&hxge_common_lock); 3706 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3707 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list)); 3708 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev")); 3709 3710 return (HXGE_OK); 3711 } 3712 3713 static void 3714 hxge_uninit_common_dev(p_hxge_t hxgep) 3715 { 3716 p_hxge_hw_list_t hw_p, h_hw_p; 3717 dev_info_t *p_dip; 3718 3719 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev")); 3720 if (hxgep->hxge_hw_p == NULL) { 3721 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3722 "<== hxge_uninit_common_dev (no common)")); 3723 return; 3724 } 3725 3726 MUTEX_ENTER(&hxge_common_lock); 3727 h_hw_p = hxge_hw_list; 3728 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) { 3729 p_dip = hw_p->parent_devp; 3730 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip && 3731 hxgep->hxge_hw_p->magic == HXGE_MAGIC && 3732 hw_p->magic == HXGE_MAGIC) { 3733 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3734 "==> hxge_uninit_common_dev: " 3735 "hw_p $%p parent dip $%p ndevs %d (found)", 3736 hw_p, p_dip, hw_p->ndevs)); 3737 3738 hxgep->hxge_hw_p = NULL; 3739 if (hw_p->ndevs) { 3740 hw_p->ndevs--; 3741 } 3742 hw_p->hxge_p = NULL; 3743 if (!hw_p->ndevs) { 3744 MUTEX_DESTROY(&hw_p->hxge_vlan_lock); 3745 MUTEX_DESTROY(&hw_p->hxge_tcam_lock); 3746 MUTEX_DESTROY(&hw_p->hxge_cfg_lock); 3747 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3748 "==> hxge_uninit_common_dev: " 3749 "hw_p $%p parent dip $%p ndevs %d (last)", 3750 hw_p, p_dip, hw_p->ndevs)); 3751 3752 if (hw_p == hxge_hw_list) { 3753 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3754 "==> hxge_uninit_common_dev:" 3755 "remove head " 3756 "hw_p $%p parent dip $%p " 3757 "ndevs %d (head)", 3758 hw_p, p_dip, hw_p->ndevs)); 3759 hxge_hw_list = hw_p->next; 3760 } else { 3761 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3762 "==> hxge_uninit_common_dev:" 3763 "remove middle " 3764 "hw_p $%p parent dip $%p " 3765 "ndevs %d (middle)", 3766 hw_p, p_dip, hw_p->ndevs)); 3767 h_hw_p->next = hw_p->next; 3768 } 3769 3770 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t)); 3771 } 3772 break; 3773 } else { 3774 h_hw_p = hw_p; 3775 } 3776 } 3777 3778 MUTEX_EXIT(&hxge_common_lock); 3779 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3780 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list)); 3781 3782 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev")); 3783 } 3784