1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver. 28 */ 29 #include <hxge_impl.h> 30 #include <hxge_pfc.h> 31 32 /* 33 * PSARC/2007/453 MSI-X interrupt limit override 34 * (This PSARC case is limited to MSI-X vectors 35 * and SPARC platforms only). 36 */ 37 #if defined(_BIG_ENDIAN) 38 uint32_t hxge_msi_enable = 2; 39 #else 40 uint32_t hxge_msi_enable = 1; 41 #endif 42 43 /* 44 * Globals: tunable parameters (/etc/system or adb) 45 * 46 */ 47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT; 48 uint32_t hxge_rbr_spare_size = 0; 49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT; 50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT; 51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX; 52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN; 53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN; 54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE; 55 56 static hxge_os_mutex_t hxgedebuglock; 57 static int hxge_debug_init = 0; 58 59 /* 60 * Debugging flags: 61 * hxge_no_tx_lb : transmit load balancing 62 * hxge_tx_lb_policy: 0 - TCP/UDP port (default) 63 * 1 - From the Stack 64 * 2 - Destination IP Address 65 */ 66 uint32_t hxge_no_tx_lb = 0; 67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP; 68 69 /* 70 * Add tunable to reduce the amount of time spent in the 71 * ISR doing Rx Processing. 72 */ 73 #if defined(__sparc) 74 uint32_t hxge_max_rx_pkts = 512; 75 #else 76 uint32_t hxge_max_rx_pkts = 1024; 77 #endif 78 79 /* 80 * Tunables to manage the receive buffer blocks. 81 * 82 * hxge_rx_threshold_hi: copy all buffers. 83 * hxge_rx_bcopy_size_type: receive buffer block size type. 84 * hxge_rx_threshold_lo: copy only up to tunable block size type. 85 */ 86 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_7; 87 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 88 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3; 89 90 rtrace_t hpi_rtracebuf; 91 92 /* 93 * Function Prototypes 94 */ 95 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t); 96 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t); 97 static void hxge_unattach(p_hxge_t); 98 99 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t); 100 101 static hxge_status_t hxge_setup_mutexes(p_hxge_t); 102 static void hxge_destroy_mutexes(p_hxge_t); 103 104 static hxge_status_t hxge_map_regs(p_hxge_t hxgep); 105 static void hxge_unmap_regs(p_hxge_t hxgep); 106 107 hxge_status_t hxge_add_intrs(p_hxge_t hxgep); 108 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep); 109 static void hxge_remove_intrs(p_hxge_t hxgep); 110 static void hxge_remove_soft_intrs(p_hxge_t hxgep); 111 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep); 112 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t); 113 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t); 114 void hxge_intrs_enable(p_hxge_t hxgep); 115 static void hxge_intrs_disable(p_hxge_t hxgep); 116 static void hxge_suspend(p_hxge_t); 117 static hxge_status_t hxge_resume(p_hxge_t); 118 hxge_status_t hxge_setup_dev(p_hxge_t); 119 static void hxge_destroy_dev(p_hxge_t); 120 hxge_status_t hxge_alloc_mem_pool(p_hxge_t); 121 static void hxge_free_mem_pool(p_hxge_t); 122 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t); 123 static void hxge_free_rx_mem_pool(p_hxge_t); 124 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t); 125 static void hxge_free_tx_mem_pool(p_hxge_t); 126 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t, 127 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t, 128 p_hxge_dma_common_t); 129 static void hxge_dma_mem_free(p_hxge_dma_common_t); 130 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t, 131 p_hxge_dma_common_t *, size_t, size_t, uint32_t *); 132 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t); 133 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t, 134 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t); 135 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t); 136 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t, 137 p_hxge_dma_common_t *, size_t, size_t, uint32_t *); 138 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t); 139 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t, 140 p_hxge_dma_common_t *, size_t); 141 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t); 142 static int hxge_init_common_dev(p_hxge_t); 143 static void hxge_uninit_common_dev(p_hxge_t); 144 145 /* 146 * The next declarations are for the GLDv3 interface. 147 */ 148 static int hxge_m_start(void *); 149 static void hxge_m_stop(void *); 150 static int hxge_m_unicst(void *, const uint8_t *); 151 static int hxge_m_multicst(void *, boolean_t, const uint8_t *); 152 static int hxge_m_promisc(void *, boolean_t); 153 static void hxge_m_ioctl(void *, queue_t *, mblk_t *); 154 static hxge_status_t hxge_mac_register(p_hxge_t hxgep); 155 156 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *); 157 static boolean_t hxge_param_locked(mac_prop_id_t pr_num); 158 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 159 uint_t pr_valsize, const void *pr_val); 160 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 161 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *); 162 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, 163 uint_t pr_valsize, void *pr_val); 164 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, 165 uint_t pr_valsize, const void *pr_val); 166 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, 167 uint_t pr_flags, uint_t pr_valsize, void *pr_val); 168 static void hxge_link_poll(void *arg); 169 static void hxge_link_update(p_hxge_t hxge, link_state_t state); 170 static void hxge_msix_init(p_hxge_t hxgep); 171 void hxge_check_msix_parity_err(p_hxge_t hxgep); 172 static uint8_t gen_32bit_parity(uint32_t data, boolean_t odd_parity); 173 174 mac_priv_prop_t hxge_priv_props[] = { 175 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 176 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 177 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 178 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 179 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 180 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 181 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 182 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 183 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 184 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW} 185 }; 186 187 #define HXGE_MAX_PRIV_PROPS \ 188 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t)) 189 190 #define HXGE_MAGIC 0x4E584745UL 191 #define MAX_DUMP_SZ 256 192 193 #define HXGE_M_CALLBACK_FLAGS \ 194 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 195 196 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp); 197 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep); 198 199 static mac_callbacks_t hxge_m_callbacks = { 200 HXGE_M_CALLBACK_FLAGS, 201 hxge_m_stat, 202 hxge_m_start, 203 hxge_m_stop, 204 hxge_m_promisc, 205 hxge_m_multicst, 206 hxge_m_unicst, 207 hxge_m_tx, 208 hxge_m_ioctl, 209 hxge_m_getcapab, 210 NULL, 211 NULL, 212 hxge_m_setprop, 213 hxge_m_getprop 214 }; 215 216 /* Enable debug messages as necessary. */ 217 uint64_t hxge_debug_level = 0; 218 219 /* 220 * This list contains the instance structures for the Hydra 221 * devices present in the system. The lock exists to guarantee 222 * mutually exclusive access to the list. 223 */ 224 void *hxge_list = NULL; 225 void *hxge_hw_list = NULL; 226 hxge_os_mutex_t hxge_common_lock; 227 228 extern uint64_t hpi_debug_level; 229 230 extern hxge_status_t hxge_ldgv_init(); 231 extern hxge_status_t hxge_ldgv_uninit(); 232 extern hxge_status_t hxge_intr_ldgv_init(); 233 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr, 234 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr); 235 extern void hxge_fm_fini(p_hxge_t hxgep); 236 237 /* 238 * Count used to maintain the number of buffers being used 239 * by Hydra instances and loaned up to the upper layers. 240 */ 241 uint32_t hxge_mblks_pending = 0; 242 243 /* 244 * Device register access attributes for PIO. 245 */ 246 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = { 247 DDI_DEVICE_ATTR_V0, 248 DDI_STRUCTURE_LE_ACC, 249 DDI_STRICTORDER_ACC, 250 }; 251 252 /* 253 * Device descriptor access attributes for DMA. 254 */ 255 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = { 256 DDI_DEVICE_ATTR_V0, 257 DDI_STRUCTURE_LE_ACC, 258 DDI_STRICTORDER_ACC 259 }; 260 261 /* 262 * Device buffer access attributes for DMA. 263 */ 264 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = { 265 DDI_DEVICE_ATTR_V0, 266 DDI_STRUCTURE_BE_ACC, 267 DDI_STRICTORDER_ACC 268 }; 269 270 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = { 271 DMA_ATTR_V0, /* version number. */ 272 0, /* low address */ 273 0xffffffffffffffff, /* high address */ 274 0xffffffffffffffff, /* address counter max */ 275 0x80000, /* alignment */ 276 0xfc00fc, /* dlim_burstsizes */ 277 0x1, /* minimum transfer size */ 278 0xffffffffffffffff, /* maximum transfer size */ 279 0xffffffffffffffff, /* maximum segment size */ 280 1, /* scatter/gather list length */ 281 (unsigned int)1, /* granularity */ 282 0 /* attribute flags */ 283 }; 284 285 ddi_dma_attr_t hxge_tx_desc_dma_attr = { 286 DMA_ATTR_V0, /* version number. */ 287 0, /* low address */ 288 0xffffffffffffffff, /* high address */ 289 0xffffffffffffffff, /* address counter max */ 290 0x100000, /* alignment */ 291 0xfc00fc, /* dlim_burstsizes */ 292 0x1, /* minimum transfer size */ 293 0xffffffffffffffff, /* maximum transfer size */ 294 0xffffffffffffffff, /* maximum segment size */ 295 1, /* scatter/gather list length */ 296 (unsigned int)1, /* granularity */ 297 0 /* attribute flags */ 298 }; 299 300 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = { 301 DMA_ATTR_V0, /* version number. */ 302 0, /* low address */ 303 0xffffffffffffffff, /* high address */ 304 0xffffffffffffffff, /* address counter max */ 305 0x40000, /* alignment */ 306 0xfc00fc, /* dlim_burstsizes */ 307 0x1, /* minimum transfer size */ 308 0xffffffffffffffff, /* maximum transfer size */ 309 0xffffffffffffffff, /* maximum segment size */ 310 1, /* scatter/gather list length */ 311 (unsigned int)1, /* granularity */ 312 0 /* attribute flags */ 313 }; 314 315 ddi_dma_attr_t hxge_rx_mbox_dma_attr = { 316 DMA_ATTR_V0, /* version number. */ 317 0, /* low address */ 318 0xffffffffffffffff, /* high address */ 319 0xffffffffffffffff, /* address counter max */ 320 #if defined(_BIG_ENDIAN) 321 0x2000, /* alignment */ 322 #else 323 0x1000, /* alignment */ 324 #endif 325 0xfc00fc, /* dlim_burstsizes */ 326 0x1, /* minimum transfer size */ 327 0xffffffffffffffff, /* maximum transfer size */ 328 0xffffffffffffffff, /* maximum segment size */ 329 5, /* scatter/gather list length */ 330 (unsigned int)1, /* granularity */ 331 0 /* attribute flags */ 332 }; 333 334 ddi_dma_attr_t hxge_tx_dma_attr = { 335 DMA_ATTR_V0, /* version number. */ 336 0, /* low address */ 337 0xffffffffffffffff, /* high address */ 338 0xffffffffffffffff, /* address counter max */ 339 #if defined(_BIG_ENDIAN) 340 0x2000, /* alignment */ 341 #else 342 0x1000, /* alignment */ 343 #endif 344 0xfc00fc, /* dlim_burstsizes */ 345 0x1, /* minimum transfer size */ 346 0xffffffffffffffff, /* maximum transfer size */ 347 0xffffffffffffffff, /* maximum segment size */ 348 5, /* scatter/gather list length */ 349 (unsigned int)1, /* granularity */ 350 0 /* attribute flags */ 351 }; 352 353 ddi_dma_attr_t hxge_rx_dma_attr = { 354 DMA_ATTR_V0, /* version number. */ 355 0, /* low address */ 356 0xffffffffffffffff, /* high address */ 357 0xffffffffffffffff, /* address counter max */ 358 0x10000, /* alignment */ 359 0xfc00fc, /* dlim_burstsizes */ 360 0x1, /* minimum transfer size */ 361 0xffffffffffffffff, /* maximum transfer size */ 362 0xffffffffffffffff, /* maximum segment size */ 363 1, /* scatter/gather list length */ 364 (unsigned int)1, /* granularity */ 365 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 366 }; 367 368 ddi_dma_lim_t hxge_dma_limits = { 369 (uint_t)0, /* dlim_addr_lo */ 370 (uint_t)0xffffffff, /* dlim_addr_hi */ 371 (uint_t)0xffffffff, /* dlim_cntr_max */ 372 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 373 0x1, /* dlim_minxfer */ 374 1024 /* dlim_speed */ 375 }; 376 377 dma_method_t hxge_force_dma = DVMA; 378 379 /* 380 * dma chunk sizes. 381 * 382 * Try to allocate the largest possible size 383 * so that fewer number of dma chunks would be managed 384 */ 385 size_t alloc_sizes[] = { 386 0x1000, 0x2000, 0x4000, 0x8000, 387 0x10000, 0x20000, 0x40000, 0x80000, 388 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000 389 }; 390 391 /* 392 * Translate "dev_t" to a pointer to the associated "dev_info_t". 393 */ 394 static int 395 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 396 { 397 p_hxge_t hxgep = NULL; 398 int instance; 399 int status = DDI_SUCCESS; 400 401 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach")); 402 403 /* 404 * Get the device instance since we'll need to setup or retrieve a soft 405 * state for this instance. 406 */ 407 instance = ddi_get_instance(dip); 408 409 switch (cmd) { 410 case DDI_ATTACH: 411 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH")); 412 break; 413 414 case DDI_RESUME: 415 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME")); 416 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance); 417 if (hxgep == NULL) { 418 status = DDI_FAILURE; 419 break; 420 } 421 if (hxgep->dip != dip) { 422 status = DDI_FAILURE; 423 break; 424 } 425 if (hxgep->suspended == DDI_PM_SUSPEND) { 426 status = ddi_dev_is_needed(hxgep->dip, 0, 1); 427 } else { 428 (void) hxge_resume(hxgep); 429 } 430 goto hxge_attach_exit; 431 432 case DDI_PM_RESUME: 433 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME")); 434 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance); 435 if (hxgep == NULL) { 436 status = DDI_FAILURE; 437 break; 438 } 439 if (hxgep->dip != dip) { 440 status = DDI_FAILURE; 441 break; 442 } 443 (void) hxge_resume(hxgep); 444 goto hxge_attach_exit; 445 446 default: 447 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown")); 448 status = DDI_FAILURE; 449 goto hxge_attach_exit; 450 } 451 452 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) { 453 status = DDI_FAILURE; 454 HXGE_ERROR_MSG((hxgep, DDI_CTL, 455 "ddi_soft_state_zalloc failed")); 456 goto hxge_attach_exit; 457 } 458 459 hxgep = ddi_get_soft_state(hxge_list, instance); 460 if (hxgep == NULL) { 461 status = HXGE_ERROR; 462 HXGE_ERROR_MSG((hxgep, DDI_CTL, 463 "ddi_get_soft_state failed")); 464 goto hxge_attach_fail2; 465 } 466 467 hxgep->drv_state = 0; 468 hxgep->dip = dip; 469 hxgep->instance = instance; 470 hxgep->p_dip = ddi_get_parent(dip); 471 hxgep->hxge_debug_level = hxge_debug_level; 472 hpi_debug_level = hxge_debug_level; 473 474 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr, 475 &hxge_rx_dma_attr); 476 477 status = hxge_map_regs(hxgep); 478 if (status != HXGE_OK) { 479 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed")); 480 goto hxge_attach_fail3; 481 } 482 483 /* Scrub the MSI-X memory */ 484 hxge_msix_init(hxgep); 485 486 status = hxge_init_common_dev(hxgep); 487 if (status != HXGE_OK) { 488 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 489 "hxge_init_common_dev failed")); 490 goto hxge_attach_fail4; 491 } 492 493 /* 494 * Setup the Ndd parameters for this instance. 495 */ 496 hxge_init_param(hxgep); 497 498 /* 499 * Setup Register Tracing Buffer. 500 */ 501 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf); 502 503 /* init stats ptr */ 504 hxge_init_statsp(hxgep); 505 506 status = hxge_setup_mutexes(hxgep); 507 if (status != HXGE_OK) { 508 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed")); 509 goto hxge_attach_fail; 510 } 511 512 status = hxge_get_config_properties(hxgep); 513 if (status != HXGE_OK) { 514 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed")); 515 goto hxge_attach_fail; 516 } 517 518 /* 519 * Setup the Kstats for the driver. 520 */ 521 hxge_setup_kstats(hxgep); 522 hxge_setup_param(hxgep); 523 524 status = hxge_setup_system_dma_pages(hxgep); 525 if (status != HXGE_OK) { 526 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed")); 527 goto hxge_attach_fail; 528 } 529 530 hxge_hw_id_init(hxgep); 531 hxge_hw_init_niu_common(hxgep); 532 533 status = hxge_setup_dev(hxgep); 534 if (status != DDI_SUCCESS) { 535 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed")); 536 goto hxge_attach_fail; 537 } 538 539 status = hxge_add_intrs(hxgep); 540 if (status != DDI_SUCCESS) { 541 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed")); 542 goto hxge_attach_fail; 543 } 544 545 status = hxge_add_soft_intrs(hxgep); 546 if (status != DDI_SUCCESS) { 547 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed")); 548 goto hxge_attach_fail; 549 } 550 551 /* 552 * Enable interrupts. 553 */ 554 hxge_intrs_enable(hxgep); 555 556 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) { 557 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 558 "unable to register to mac layer (%d)", status)); 559 goto hxge_attach_fail; 560 } 561 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN); 562 563 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)", 564 instance)); 565 566 goto hxge_attach_exit; 567 568 hxge_attach_fail: 569 hxge_unattach(hxgep); 570 goto hxge_attach_fail1; 571 572 hxge_attach_fail5: 573 /* 574 * Tear down the ndd parameters setup. 575 */ 576 hxge_destroy_param(hxgep); 577 578 /* 579 * Tear down the kstat setup. 580 */ 581 hxge_destroy_kstats(hxgep); 582 583 hxge_attach_fail4: 584 if (hxgep->hxge_hw_p) { 585 hxge_uninit_common_dev(hxgep); 586 hxgep->hxge_hw_p = NULL; 587 } 588 hxge_attach_fail3: 589 /* 590 * Unmap the register setup. 591 */ 592 hxge_unmap_regs(hxgep); 593 594 hxge_fm_fini(hxgep); 595 596 hxge_attach_fail2: 597 ddi_soft_state_free(hxge_list, hxgep->instance); 598 599 hxge_attach_fail1: 600 if (status != HXGE_OK) 601 status = (HXGE_ERROR | HXGE_DDI_FAILED); 602 hxgep = NULL; 603 604 hxge_attach_exit: 605 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x", 606 status)); 607 608 return (status); 609 } 610 611 static int 612 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 613 { 614 int status = DDI_SUCCESS; 615 int instance; 616 p_hxge_t hxgep = NULL; 617 618 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach")); 619 instance = ddi_get_instance(dip); 620 hxgep = ddi_get_soft_state(hxge_list, instance); 621 if (hxgep == NULL) { 622 status = DDI_FAILURE; 623 goto hxge_detach_exit; 624 } 625 626 switch (cmd) { 627 case DDI_DETACH: 628 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH")); 629 break; 630 631 case DDI_PM_SUSPEND: 632 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 633 hxgep->suspended = DDI_PM_SUSPEND; 634 hxge_suspend(hxgep); 635 break; 636 637 case DDI_SUSPEND: 638 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND")); 639 if (hxgep->suspended != DDI_PM_SUSPEND) { 640 hxgep->suspended = DDI_SUSPEND; 641 hxge_suspend(hxgep); 642 } 643 break; 644 645 default: 646 status = DDI_FAILURE; 647 break; 648 } 649 650 if (cmd != DDI_DETACH) 651 goto hxge_detach_exit; 652 653 /* 654 * Stop the xcvr polling. 655 */ 656 hxgep->suspended = cmd; 657 658 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) { 659 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 660 "<== hxge_detach status = 0x%08X", status)); 661 return (DDI_FAILURE); 662 } 663 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 664 "<== hxge_detach (mac_unregister) status = 0x%08X", status)); 665 666 hxge_unattach(hxgep); 667 hxgep = NULL; 668 669 hxge_detach_exit: 670 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X", 671 status)); 672 673 return (status); 674 } 675 676 static void 677 hxge_unattach(p_hxge_t hxgep) 678 { 679 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach")); 680 681 if (hxgep == NULL || hxgep->dev_regs == NULL) { 682 return; 683 } 684 685 if (hxgep->hxge_hw_p) { 686 hxge_uninit_common_dev(hxgep); 687 hxgep->hxge_hw_p = NULL; 688 } 689 690 if (hxgep->hxge_timerid) { 691 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 692 hxgep->hxge_timerid = 0; 693 } 694 695 /* Stop any further interrupts. */ 696 hxge_remove_intrs(hxgep); 697 698 /* Remove soft interrups */ 699 hxge_remove_soft_intrs(hxgep); 700 701 /* Stop the device and free resources. */ 702 hxge_destroy_dev(hxgep); 703 704 /* Tear down the ndd parameters setup. */ 705 hxge_destroy_param(hxgep); 706 707 /* Tear down the kstat setup. */ 708 hxge_destroy_kstats(hxgep); 709 710 /* 711 * Remove the list of ndd parameters which were setup during attach. 712 */ 713 if (hxgep->dip) { 714 HXGE_DEBUG_MSG((hxgep, OBP_CTL, 715 " hxge_unattach: remove all properties")); 716 (void) ddi_prop_remove_all(hxgep->dip); 717 } 718 719 /* 720 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any 721 * previous state before unmapping the registers. 722 */ 723 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E); 724 HXGE_DELAY(1000); 725 726 /* 727 * Unmap the register setup. 728 */ 729 hxge_unmap_regs(hxgep); 730 731 hxge_fm_fini(hxgep); 732 733 /* Destroy all mutexes. */ 734 hxge_destroy_mutexes(hxgep); 735 736 /* 737 * Free the soft state data structures allocated with this instance. 738 */ 739 ddi_soft_state_free(hxge_list, hxgep->instance); 740 741 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach")); 742 } 743 744 static hxge_status_t 745 hxge_map_regs(p_hxge_t hxgep) 746 { 747 int ddi_status = DDI_SUCCESS; 748 p_dev_regs_t dev_regs; 749 750 #ifdef HXGE_DEBUG 751 char *sysname; 752 #endif 753 754 off_t regsize; 755 hxge_status_t status = HXGE_OK; 756 int nregs; 757 758 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs")); 759 760 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS) 761 return (HXGE_ERROR); 762 763 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs)); 764 765 hxgep->dev_regs = NULL; 766 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 767 dev_regs->hxge_regh = NULL; 768 dev_regs->hxge_pciregh = NULL; 769 dev_regs->hxge_msix_regh = NULL; 770 771 (void) ddi_dev_regsize(hxgep->dip, 0, ®size); 772 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 773 "hxge_map_regs: pci config size 0x%x", regsize)); 774 775 ddi_status = ddi_regs_map_setup(hxgep->dip, 0, 776 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0, 777 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh); 778 if (ddi_status != DDI_SUCCESS) { 779 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 780 "ddi_map_regs, hxge bus config regs failed")); 781 goto hxge_map_regs_fail0; 782 } 783 784 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 785 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx", 786 dev_regs->hxge_pciregp, 787 dev_regs->hxge_pciregh)); 788 789 (void) ddi_dev_regsize(hxgep->dip, 1, ®size); 790 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 791 "hxge_map_regs: pio size 0x%x", regsize)); 792 793 /* set up the device mapped register */ 794 ddi_status = ddi_regs_map_setup(hxgep->dip, 1, 795 (caddr_t *)&(dev_regs->hxge_regp), 0, 0, 796 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh); 797 798 if (ddi_status != DDI_SUCCESS) { 799 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 800 "ddi_map_regs for Hydra global reg failed")); 801 goto hxge_map_regs_fail1; 802 } 803 804 /* set up the msi/msi-x mapped register */ 805 (void) ddi_dev_regsize(hxgep->dip, 2, ®size); 806 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 807 "hxge_map_regs: msix size 0x%x", regsize)); 808 809 ddi_status = ddi_regs_map_setup(hxgep->dip, 2, 810 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0, 811 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh); 812 813 if (ddi_status != DDI_SUCCESS) { 814 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 815 "ddi_map_regs for msi reg failed")); 816 goto hxge_map_regs_fail2; 817 } 818 819 hxgep->dev_regs = dev_regs; 820 821 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh); 822 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp); 823 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh); 824 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp); 825 826 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh); 827 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp); 828 829 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh); 830 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp); 831 832 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx " 833 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh)); 834 835 goto hxge_map_regs_exit; 836 837 hxge_map_regs_fail3: 838 if (dev_regs->hxge_msix_regh) { 839 ddi_regs_map_free(&dev_regs->hxge_msix_regh); 840 } 841 842 hxge_map_regs_fail2: 843 if (dev_regs->hxge_regh) { 844 ddi_regs_map_free(&dev_regs->hxge_regh); 845 } 846 847 hxge_map_regs_fail1: 848 if (dev_regs->hxge_pciregh) { 849 ddi_regs_map_free(&dev_regs->hxge_pciregh); 850 } 851 852 hxge_map_regs_fail0: 853 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory")); 854 kmem_free(dev_regs, sizeof (dev_regs_t)); 855 856 hxge_map_regs_exit: 857 if (ddi_status != DDI_SUCCESS) 858 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 859 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs")); 860 return (status); 861 } 862 863 static void 864 hxge_unmap_regs(p_hxge_t hxgep) 865 { 866 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs")); 867 if (hxgep->dev_regs) { 868 if (hxgep->dev_regs->hxge_pciregh) { 869 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 870 "==> hxge_unmap_regs: bus")); 871 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh); 872 hxgep->dev_regs->hxge_pciregh = NULL; 873 } 874 875 if (hxgep->dev_regs->hxge_regh) { 876 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 877 "==> hxge_unmap_regs: device registers")); 878 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh); 879 hxgep->dev_regs->hxge_regh = NULL; 880 } 881 882 if (hxgep->dev_regs->hxge_msix_regh) { 883 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 884 "==> hxge_unmap_regs: device interrupts")); 885 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh); 886 hxgep->dev_regs->hxge_msix_regh = NULL; 887 } 888 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t)); 889 hxgep->dev_regs = NULL; 890 } 891 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs")); 892 } 893 894 static hxge_status_t 895 hxge_setup_mutexes(p_hxge_t hxgep) 896 { 897 int ddi_status = DDI_SUCCESS; 898 hxge_status_t status = HXGE_OK; 899 900 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes")); 901 902 /* 903 * Get the interrupt cookie so the mutexes can be Initialised. 904 */ 905 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0, 906 &hxgep->interrupt_cookie); 907 908 if (ddi_status != DDI_SUCCESS) { 909 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 910 "<== hxge_setup_mutexes: failed 0x%x", ddi_status)); 911 goto hxge_setup_mutexes_exit; 912 } 913 914 /* 915 * Initialize mutex's for this device. 916 */ 917 MUTEX_INIT(hxgep->genlock, NULL, 918 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 919 MUTEX_INIT(&hxgep->ouraddr_lock, NULL, 920 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 921 RW_INIT(&hxgep->filter_lock, NULL, 922 RW_DRIVER, (void *) hxgep->interrupt_cookie); 923 MUTEX_INIT(&hxgep->pio_lock, NULL, 924 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 925 MUTEX_INIT(&hxgep->timeout.lock, NULL, 926 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 927 928 hxge_setup_mutexes_exit: 929 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 930 "<== hxge_setup_mutexes status = %x", status)); 931 932 if (ddi_status != DDI_SUCCESS) 933 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 934 935 return (status); 936 } 937 938 static void 939 hxge_destroy_mutexes(p_hxge_t hxgep) 940 { 941 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes")); 942 RW_DESTROY(&hxgep->filter_lock); 943 MUTEX_DESTROY(&hxgep->ouraddr_lock); 944 MUTEX_DESTROY(hxgep->genlock); 945 MUTEX_DESTROY(&hxgep->pio_lock); 946 MUTEX_DESTROY(&hxgep->timeout.lock); 947 948 if (hxge_debug_init == 1) { 949 MUTEX_DESTROY(&hxgedebuglock); 950 hxge_debug_init = 0; 951 } 952 953 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes")); 954 } 955 956 hxge_status_t 957 hxge_init(p_hxge_t hxgep) 958 { 959 hxge_status_t status = HXGE_OK; 960 961 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init")); 962 963 if (hxgep->drv_state & STATE_HW_INITIALIZED) { 964 return (status); 965 } 966 967 /* 968 * Allocate system memory for the receive/transmit buffer blocks and 969 * receive/transmit descriptor rings. 970 */ 971 status = hxge_alloc_mem_pool(hxgep); 972 if (status != HXGE_OK) { 973 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n")); 974 goto hxge_init_fail1; 975 } 976 977 /* 978 * Initialize and enable TXDMA channels. 979 */ 980 status = hxge_init_txdma_channels(hxgep); 981 if (status != HXGE_OK) { 982 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n")); 983 goto hxge_init_fail3; 984 } 985 986 /* 987 * Initialize and enable RXDMA channels. 988 */ 989 status = hxge_init_rxdma_channels(hxgep); 990 if (status != HXGE_OK) { 991 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n")); 992 goto hxge_init_fail4; 993 } 994 995 /* 996 * Initialize TCAM 997 */ 998 status = hxge_classify_init(hxgep); 999 if (status != HXGE_OK) { 1000 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n")); 1001 goto hxge_init_fail5; 1002 } 1003 1004 /* 1005 * Initialize the VMAC block. 1006 */ 1007 status = hxge_vmac_init(hxgep); 1008 if (status != HXGE_OK) { 1009 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n")); 1010 goto hxge_init_fail5; 1011 } 1012 1013 /* Bringup - this may be unnecessary when PXE and FCODE available */ 1014 status = hxge_pfc_set_default_mac_addr(hxgep); 1015 if (status != HXGE_OK) { 1016 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1017 "Default Address Failure\n")); 1018 goto hxge_init_fail5; 1019 } 1020 1021 hxge_intrs_enable(hxgep); 1022 1023 /* 1024 * Enable hardware interrupts. 1025 */ 1026 hxge_intr_hw_enable(hxgep); 1027 hxgep->drv_state |= STATE_HW_INITIALIZED; 1028 1029 goto hxge_init_exit; 1030 1031 hxge_init_fail5: 1032 hxge_uninit_rxdma_channels(hxgep); 1033 hxge_init_fail4: 1034 hxge_uninit_txdma_channels(hxgep); 1035 hxge_init_fail3: 1036 hxge_free_mem_pool(hxgep); 1037 hxge_init_fail1: 1038 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1039 "<== hxge_init status (failed) = 0x%08x", status)); 1040 return (status); 1041 1042 hxge_init_exit: 1043 1044 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x", 1045 status)); 1046 1047 return (status); 1048 } 1049 1050 timeout_id_t 1051 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec) 1052 { 1053 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) { 1054 return (timeout(func, (caddr_t)hxgep, 1055 drv_usectohz(1000 * msec))); 1056 } 1057 return (NULL); 1058 } 1059 1060 /*ARGSUSED*/ 1061 void 1062 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid) 1063 { 1064 if (timerid) { 1065 (void) untimeout(timerid); 1066 } 1067 } 1068 1069 void 1070 hxge_uninit(p_hxge_t hxgep) 1071 { 1072 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit")); 1073 1074 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 1075 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1076 "==> hxge_uninit: not initialized")); 1077 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit")); 1078 return; 1079 } 1080 1081 /* Stop timer */ 1082 if (hxgep->hxge_timerid) { 1083 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 1084 hxgep->hxge_timerid = 0; 1085 } 1086 1087 (void) hxge_intr_hw_disable(hxgep); 1088 1089 /* Reset the receive VMAC side. */ 1090 (void) hxge_rx_vmac_disable(hxgep); 1091 1092 /* Free classification resources */ 1093 (void) hxge_classify_uninit(hxgep); 1094 1095 /* Reset the transmit/receive DMA side. */ 1096 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 1097 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 1098 1099 hxge_uninit_txdma_channels(hxgep); 1100 hxge_uninit_rxdma_channels(hxgep); 1101 1102 /* Reset the transmit VMAC side. */ 1103 (void) hxge_tx_vmac_disable(hxgep); 1104 1105 hxge_free_mem_pool(hxgep); 1106 1107 hxgep->drv_state &= ~STATE_HW_INITIALIZED; 1108 1109 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit")); 1110 } 1111 1112 void 1113 hxge_get64(p_hxge_t hxgep, p_mblk_t mp) 1114 { 1115 #if defined(__i386) 1116 size_t reg; 1117 #else 1118 uint64_t reg; 1119 #endif 1120 uint64_t regdata; 1121 int i, retry; 1122 1123 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1124 regdata = 0; 1125 retry = 1; 1126 1127 for (i = 0; i < retry; i++) { 1128 HXGE_REG_RD64(hxgep->hpi_handle, reg, ®data); 1129 } 1130 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1131 } 1132 1133 void 1134 hxge_put64(p_hxge_t hxgep, p_mblk_t mp) 1135 { 1136 #if defined(__i386) 1137 size_t reg; 1138 #else 1139 uint64_t reg; 1140 #endif 1141 uint64_t buf[2]; 1142 1143 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1144 #if defined(__i386) 1145 reg = (size_t)buf[0]; 1146 #else 1147 reg = buf[0]; 1148 #endif 1149 1150 HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]); 1151 } 1152 1153 /*ARGSUSED*/ 1154 /*VARARGS*/ 1155 void 1156 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...) 1157 { 1158 char msg_buffer[1048]; 1159 char prefix_buffer[32]; 1160 int instance; 1161 uint64_t debug_level; 1162 int cmn_level = CE_CONT; 1163 va_list ap; 1164 1165 debug_level = (hxgep == NULL) ? hxge_debug_level : 1166 hxgep->hxge_debug_level; 1167 1168 if ((level & debug_level) || (level == HXGE_NOTE) || 1169 (level == HXGE_ERR_CTL)) { 1170 /* do the msg processing */ 1171 if (hxge_debug_init == 0) { 1172 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1173 hxge_debug_init = 1; 1174 } 1175 1176 MUTEX_ENTER(&hxgedebuglock); 1177 1178 if ((level & HXGE_NOTE)) { 1179 cmn_level = CE_NOTE; 1180 } 1181 1182 if (level & HXGE_ERR_CTL) { 1183 cmn_level = CE_WARN; 1184 } 1185 1186 va_start(ap, fmt); 1187 (void) vsprintf(msg_buffer, fmt, ap); 1188 va_end(ap); 1189 1190 if (hxgep == NULL) { 1191 instance = -1; 1192 (void) sprintf(prefix_buffer, "%s :", "hxge"); 1193 } else { 1194 instance = hxgep->instance; 1195 (void) sprintf(prefix_buffer, 1196 "%s%d :", "hxge", instance); 1197 } 1198 1199 MUTEX_EXIT(&hxgedebuglock); 1200 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer); 1201 } 1202 } 1203 1204 char * 1205 hxge_dump_packet(char *addr, int size) 1206 { 1207 uchar_t *ap = (uchar_t *)addr; 1208 int i; 1209 static char etherbuf[1024]; 1210 char *cp = etherbuf; 1211 char digits[] = "0123456789abcdef"; 1212 1213 if (!size) 1214 size = 60; 1215 1216 if (size > MAX_DUMP_SZ) { 1217 /* Dump the leading bytes */ 1218 for (i = 0; i < MAX_DUMP_SZ / 2; i++) { 1219 if (*ap > 0x0f) 1220 *cp++ = digits[*ap >> 4]; 1221 *cp++ = digits[*ap++ & 0xf]; 1222 *cp++ = ':'; 1223 } 1224 for (i = 0; i < 20; i++) 1225 *cp++ = '.'; 1226 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1227 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2)); 1228 for (i = 0; i < MAX_DUMP_SZ / 2; i++) { 1229 if (*ap > 0x0f) 1230 *cp++ = digits[*ap >> 4]; 1231 *cp++ = digits[*ap++ & 0xf]; 1232 *cp++ = ':'; 1233 } 1234 } else { 1235 for (i = 0; i < size; i++) { 1236 if (*ap > 0x0f) 1237 *cp++ = digits[*ap >> 4]; 1238 *cp++ = digits[*ap++ & 0xf]; 1239 *cp++ = ':'; 1240 } 1241 } 1242 *--cp = 0; 1243 return (etherbuf); 1244 } 1245 1246 static void 1247 hxge_suspend(p_hxge_t hxgep) 1248 { 1249 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend")); 1250 1251 /* 1252 * Stop the link status timer before hxge_intrs_disable() to avoid 1253 * accessing the the MSIX table simultaneously. Note that the timer 1254 * routine polls for MSIX parity errors. 1255 */ 1256 MUTEX_ENTER(&hxgep->timeout.lock); 1257 if (hxgep->timeout.id) 1258 (void) untimeout(hxgep->timeout.id); 1259 MUTEX_EXIT(&hxgep->timeout.lock); 1260 1261 hxge_intrs_disable(hxgep); 1262 hxge_destroy_dev(hxgep); 1263 1264 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend")); 1265 } 1266 1267 static hxge_status_t 1268 hxge_resume(p_hxge_t hxgep) 1269 { 1270 hxge_status_t status = HXGE_OK; 1271 1272 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume")); 1273 hxgep->suspended = DDI_RESUME; 1274 1275 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START); 1276 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START); 1277 1278 (void) hxge_rx_vmac_enable(hxgep); 1279 (void) hxge_tx_vmac_enable(hxgep); 1280 1281 hxge_intrs_enable(hxgep); 1282 1283 hxgep->suspended = 0; 1284 1285 /* 1286 * Resume the link status timer after hxge_intrs_enable to avoid 1287 * accessing MSIX table simultaneously. 1288 */ 1289 MUTEX_ENTER(&hxgep->timeout.lock); 1290 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep, 1291 hxgep->timeout.ticks); 1292 MUTEX_EXIT(&hxgep->timeout.lock); 1293 1294 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1295 "<== hxge_resume status = 0x%x", status)); 1296 1297 return (status); 1298 } 1299 1300 hxge_status_t 1301 hxge_setup_dev(p_hxge_t hxgep) 1302 { 1303 hxge_status_t status = HXGE_OK; 1304 1305 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev")); 1306 1307 status = hxge_link_init(hxgep); 1308 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) { 1309 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1310 "Bad register acc handle")); 1311 status = HXGE_ERROR; 1312 } 1313 1314 if (status != HXGE_OK) { 1315 HXGE_DEBUG_MSG((hxgep, MAC_CTL, 1316 " hxge_setup_dev status (link init 0x%08x)", status)); 1317 goto hxge_setup_dev_exit; 1318 } 1319 1320 hxge_setup_dev_exit: 1321 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1322 "<== hxge_setup_dev status = 0x%08x", status)); 1323 1324 return (status); 1325 } 1326 1327 static void 1328 hxge_destroy_dev(p_hxge_t hxgep) 1329 { 1330 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev")); 1331 1332 (void) hxge_hw_stop(hxgep); 1333 1334 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev")); 1335 } 1336 1337 static hxge_status_t 1338 hxge_setup_system_dma_pages(p_hxge_t hxgep) 1339 { 1340 int ddi_status = DDI_SUCCESS; 1341 uint_t count; 1342 ddi_dma_cookie_t cookie; 1343 uint_t iommu_pagesize; 1344 hxge_status_t status = HXGE_OK; 1345 1346 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages")); 1347 1348 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1); 1349 iommu_pagesize = dvma_pagesize(hxgep->dip); 1350 1351 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1352 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1353 " default_block_size %d iommu_pagesize %d", 1354 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1), 1355 hxgep->rx_default_block_size, iommu_pagesize)); 1356 1357 if (iommu_pagesize != 0) { 1358 if (hxgep->sys_page_sz == iommu_pagesize) { 1359 /* Hydra support up to 8K pages */ 1360 if (iommu_pagesize > 0x2000) 1361 hxgep->sys_page_sz = 0x2000; 1362 } else { 1363 if (hxgep->sys_page_sz > iommu_pagesize) 1364 hxgep->sys_page_sz = iommu_pagesize; 1365 } 1366 } 1367 1368 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1); 1369 1370 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1371 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1372 "default_block_size %d page mask %d", 1373 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1), 1374 hxgep->rx_default_block_size, hxgep->sys_page_mask)); 1375 1376 switch (hxgep->sys_page_sz) { 1377 default: 1378 hxgep->sys_page_sz = 0x1000; 1379 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1); 1380 hxgep->rx_default_block_size = 0x1000; 1381 hxgep->rx_bksize_code = RBR_BKSIZE_4K; 1382 break; 1383 case 0x1000: 1384 hxgep->rx_default_block_size = 0x1000; 1385 hxgep->rx_bksize_code = RBR_BKSIZE_4K; 1386 break; 1387 case 0x2000: 1388 hxgep->rx_default_block_size = 0x2000; 1389 hxgep->rx_bksize_code = RBR_BKSIZE_8K; 1390 break; 1391 } 1392 1393 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1394 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1395 1396 /* 1397 * Get the system DMA burst size. 1398 */ 1399 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr, 1400 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle); 1401 if (ddi_status != DDI_SUCCESS) { 1402 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1403 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status)); 1404 goto hxge_get_soft_properties_exit; 1405 } 1406 1407 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL, 1408 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle), 1409 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0, 1410 &cookie, &count); 1411 if (ddi_status != DDI_DMA_MAPPED) { 1412 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1413 "Binding spare handle to find system burstsize failed.")); 1414 ddi_status = DDI_FAILURE; 1415 goto hxge_get_soft_properties_fail1; 1416 } 1417 1418 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle); 1419 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle); 1420 1421 hxge_get_soft_properties_fail1: 1422 ddi_dma_free_handle(&hxgep->dmasparehandle); 1423 1424 hxge_get_soft_properties_exit: 1425 1426 if (ddi_status != DDI_SUCCESS) 1427 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 1428 1429 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1430 "<== hxge_setup_system_dma_pages status = 0x%08x", status)); 1431 1432 return (status); 1433 } 1434 1435 hxge_status_t 1436 hxge_alloc_mem_pool(p_hxge_t hxgep) 1437 { 1438 hxge_status_t status = HXGE_OK; 1439 1440 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool")); 1441 1442 status = hxge_alloc_rx_mem_pool(hxgep); 1443 if (status != HXGE_OK) { 1444 return (HXGE_ERROR); 1445 } 1446 1447 status = hxge_alloc_tx_mem_pool(hxgep); 1448 if (status != HXGE_OK) { 1449 hxge_free_rx_mem_pool(hxgep); 1450 return (HXGE_ERROR); 1451 } 1452 1453 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool")); 1454 return (HXGE_OK); 1455 } 1456 1457 static void 1458 hxge_free_mem_pool(p_hxge_t hxgep) 1459 { 1460 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool")); 1461 1462 hxge_free_rx_mem_pool(hxgep); 1463 hxge_free_tx_mem_pool(hxgep); 1464 1465 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool")); 1466 } 1467 1468 static hxge_status_t 1469 hxge_alloc_rx_mem_pool(p_hxge_t hxgep) 1470 { 1471 int i, j; 1472 uint32_t ndmas, st_rdc; 1473 p_hxge_dma_pt_cfg_t p_all_cfgp; 1474 p_hxge_hw_pt_cfg_t p_cfgp; 1475 p_hxge_dma_pool_t dma_poolp; 1476 p_hxge_dma_common_t *dma_buf_p; 1477 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 1478 p_hxge_dma_common_t *dma_rbr_cntl_p; 1479 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 1480 p_hxge_dma_common_t *dma_rcr_cntl_p; 1481 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 1482 p_hxge_dma_common_t *dma_mbox_cntl_p; 1483 size_t rx_buf_alloc_size; 1484 size_t rx_rbr_cntl_alloc_size; 1485 size_t rx_rcr_cntl_alloc_size; 1486 size_t rx_mbox_cntl_alloc_size; 1487 uint32_t *num_chunks; /* per dma */ 1488 hxge_status_t status = HXGE_OK; 1489 1490 uint32_t hxge_port_rbr_size; 1491 uint32_t hxge_port_rbr_spare_size; 1492 uint32_t hxge_port_rcr_size; 1493 1494 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool")); 1495 1496 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 1497 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1498 st_rdc = p_cfgp->start_rdc; 1499 ndmas = p_cfgp->max_rdcs; 1500 1501 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1502 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1503 1504 /* 1505 * Allocate memory for each receive DMA channel. 1506 */ 1507 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t), 1508 KM_SLEEP); 1509 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1510 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1511 1512 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t) 1513 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1514 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1515 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1516 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t) 1517 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1518 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1519 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1520 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t) 1521 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1522 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1523 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1524 1525 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas, 1526 KM_SLEEP); 1527 1528 /* 1529 * Assume that each DMA channel will be configured with default block 1530 * size. rbr block counts are mod of batch count (16). 1531 */ 1532 hxge_port_rbr_size = p_all_cfgp->rbr_size; 1533 hxge_port_rcr_size = p_all_cfgp->rcr_size; 1534 1535 if (!hxge_port_rbr_size) { 1536 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT; 1537 } 1538 1539 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) { 1540 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH * 1541 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1)); 1542 } 1543 1544 p_all_cfgp->rbr_size = hxge_port_rbr_size; 1545 hxge_port_rbr_spare_size = hxge_rbr_spare_size; 1546 1547 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) { 1548 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH * 1549 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1)); 1550 } 1551 1552 rx_buf_alloc_size = (hxgep->rx_default_block_size * 1553 (hxge_port_rbr_size + hxge_port_rbr_spare_size)); 1554 1555 /* 1556 * Addresses of receive block ring, receive completion ring and the 1557 * mailbox must be all cache-aligned (64 bytes). 1558 */ 1559 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size; 1560 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t); 1561 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size; 1562 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t); 1563 1564 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: " 1565 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d " 1566 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d", 1567 hxge_port_rbr_size, hxge_port_rbr_spare_size, 1568 hxge_port_rcr_size, rx_cntl_alloc_size)); 1569 1570 hxgep->hxge_port_rbr_size = hxge_port_rbr_size; 1571 hxgep->hxge_port_rcr_size = hxge_port_rcr_size; 1572 1573 /* 1574 * Allocate memory for receive buffers and descriptor rings. Replace 1575 * allocation functions with interface functions provided by the 1576 * partition manager when it is available. 1577 */ 1578 /* 1579 * Allocate memory for the receive buffer blocks. 1580 */ 1581 for (i = 0; i < ndmas; i++) { 1582 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1583 " hxge_alloc_rx_mem_pool to alloc mem: " 1584 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1585 i, dma_buf_p[i], &dma_buf_p[i])); 1586 1587 num_chunks[i] = 0; 1588 1589 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i], 1590 rx_buf_alloc_size, hxgep->rx_default_block_size, 1591 &num_chunks[i]); 1592 if (status != HXGE_OK) { 1593 break; 1594 } 1595 1596 st_rdc++; 1597 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1598 " hxge_alloc_rx_mem_pool DONE alloc mem: " 1599 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1600 dma_buf_p[i], &dma_buf_p[i])); 1601 } 1602 1603 if (i < ndmas) { 1604 goto hxge_alloc_rx_mem_fail1; 1605 } 1606 1607 /* 1608 * Allocate memory for descriptor rings and mailbox. 1609 */ 1610 st_rdc = p_cfgp->start_rdc; 1611 for (j = 0; j < ndmas; j++) { 1612 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1613 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr, 1614 rx_rbr_cntl_alloc_size)) != HXGE_OK) { 1615 break; 1616 } 1617 1618 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1619 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr, 1620 rx_rcr_cntl_alloc_size)) != HXGE_OK) { 1621 break; 1622 } 1623 1624 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1625 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr, 1626 rx_mbox_cntl_alloc_size)) != HXGE_OK) { 1627 break; 1628 } 1629 st_rdc++; 1630 } 1631 1632 if (j < ndmas) { 1633 goto hxge_alloc_rx_mem_fail2; 1634 } 1635 1636 dma_poolp->ndmas = ndmas; 1637 dma_poolp->num_chunks = num_chunks; 1638 dma_poolp->buf_allocated = B_TRUE; 1639 hxgep->rx_buf_pool_p = dma_poolp; 1640 dma_poolp->dma_buf_pool_p = dma_buf_p; 1641 1642 dma_rbr_cntl_poolp->ndmas = ndmas; 1643 dma_rbr_cntl_poolp->buf_allocated = B_TRUE; 1644 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp; 1645 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p; 1646 1647 dma_rcr_cntl_poolp->ndmas = ndmas; 1648 dma_rcr_cntl_poolp->buf_allocated = B_TRUE; 1649 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp; 1650 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p; 1651 1652 dma_mbox_cntl_poolp->ndmas = ndmas; 1653 dma_mbox_cntl_poolp->buf_allocated = B_TRUE; 1654 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp; 1655 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p; 1656 1657 goto hxge_alloc_rx_mem_pool_exit; 1658 1659 hxge_alloc_rx_mem_fail2: 1660 /* Free control buffers */ 1661 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1662 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 1663 for (; j >= 0; j--) { 1664 hxge_free_rx_cntl_dma(hxgep, 1665 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]); 1666 hxge_free_rx_cntl_dma(hxgep, 1667 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]); 1668 hxge_free_rx_cntl_dma(hxgep, 1669 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]); 1670 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1671 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 1672 } 1673 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1674 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 1675 1676 hxge_alloc_rx_mem_fail1: 1677 /* Free data buffers */ 1678 i--; 1679 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1680 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 1681 for (; i >= 0; i--) { 1682 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i], 1683 num_chunks[i]); 1684 } 1685 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1686 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 1687 1688 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1689 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1690 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1691 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1692 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1693 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1694 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1695 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t)); 1696 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1697 1698 hxge_alloc_rx_mem_pool_exit: 1699 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1700 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status)); 1701 1702 return (status); 1703 } 1704 1705 static void 1706 hxge_free_rx_mem_pool(p_hxge_t hxgep) 1707 { 1708 uint32_t i, ndmas; 1709 p_hxge_dma_pool_t dma_poolp; 1710 p_hxge_dma_common_t *dma_buf_p; 1711 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 1712 p_hxge_dma_common_t *dma_rbr_cntl_p; 1713 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 1714 p_hxge_dma_common_t *dma_rcr_cntl_p; 1715 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 1716 p_hxge_dma_common_t *dma_mbox_cntl_p; 1717 uint32_t *num_chunks; 1718 1719 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool")); 1720 1721 dma_poolp = hxgep->rx_buf_pool_p; 1722 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 1723 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool " 1724 "(null rx buf pool or buf not allocated")); 1725 return; 1726 } 1727 1728 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 1729 if (dma_rbr_cntl_poolp == NULL || 1730 (!dma_rbr_cntl_poolp->buf_allocated)) { 1731 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1732 "<== hxge_free_rx_mem_pool " 1733 "(null rbr cntl buf pool or rbr cntl buf not allocated")); 1734 return; 1735 } 1736 1737 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 1738 if (dma_rcr_cntl_poolp == NULL || 1739 (!dma_rcr_cntl_poolp->buf_allocated)) { 1740 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1741 "<== hxge_free_rx_mem_pool " 1742 "(null rcr cntl buf pool or rcr cntl buf not allocated")); 1743 return; 1744 } 1745 1746 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 1747 if (dma_mbox_cntl_poolp == NULL || 1748 (!dma_mbox_cntl_poolp->buf_allocated)) { 1749 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1750 "<== hxge_free_rx_mem_pool " 1751 "(null mbox cntl buf pool or mbox cntl buf not allocated")); 1752 return; 1753 } 1754 1755 dma_buf_p = dma_poolp->dma_buf_pool_p; 1756 num_chunks = dma_poolp->num_chunks; 1757 1758 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p; 1759 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p; 1760 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p; 1761 ndmas = dma_rbr_cntl_poolp->ndmas; 1762 1763 for (i = 0; i < ndmas; i++) { 1764 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]); 1765 } 1766 1767 for (i = 0; i < ndmas; i++) { 1768 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]); 1769 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]); 1770 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]); 1771 } 1772 1773 for (i = 0; i < ndmas; i++) { 1774 KMEM_FREE(dma_buf_p[i], 1775 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1776 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t)); 1777 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t)); 1778 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t)); 1779 } 1780 1781 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1782 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1783 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1784 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1785 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1786 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1787 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t)); 1788 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1789 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1790 1791 hxgep->rx_buf_pool_p = NULL; 1792 hxgep->rx_rbr_cntl_pool_p = NULL; 1793 hxgep->rx_rcr_cntl_pool_p = NULL; 1794 hxgep->rx_mbox_cntl_pool_p = NULL; 1795 1796 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool")); 1797 } 1798 1799 static hxge_status_t 1800 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel, 1801 p_hxge_dma_common_t *dmap, 1802 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 1803 { 1804 p_hxge_dma_common_t rx_dmap; 1805 hxge_status_t status = HXGE_OK; 1806 size_t total_alloc_size; 1807 size_t allocated = 0; 1808 int i, size_index, array_size; 1809 1810 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma")); 1811 1812 rx_dmap = (p_hxge_dma_common_t) 1813 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP); 1814 1815 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1816 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 1817 dma_channel, alloc_size, block_size, dmap)); 1818 1819 total_alloc_size = alloc_size; 1820 1821 i = 0; 1822 size_index = 0; 1823 array_size = sizeof (alloc_sizes) / sizeof (size_t); 1824 while ((size_index < array_size) && 1825 (alloc_sizes[size_index] < alloc_size)) 1826 size_index++; 1827 if (size_index >= array_size) { 1828 size_index = array_size - 1; 1829 } 1830 1831 while ((allocated < total_alloc_size) && 1832 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) { 1833 rx_dmap[i].dma_chunk_index = i; 1834 rx_dmap[i].block_size = block_size; 1835 rx_dmap[i].alength = alloc_sizes[size_index]; 1836 rx_dmap[i].orig_alength = rx_dmap[i].alength; 1837 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 1838 rx_dmap[i].dma_channel = dma_channel; 1839 rx_dmap[i].contig_alloc_type = B_FALSE; 1840 1841 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1842 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 1843 "i %d nblocks %d alength %d", 1844 dma_channel, i, &rx_dmap[i], block_size, 1845 i, rx_dmap[i].nblocks, rx_dmap[i].alength)); 1846 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1847 &hxge_rx_dma_attr, rx_dmap[i].alength, 1848 &hxge_dev_buf_dma_acc_attr, 1849 DDI_DMA_READ | DDI_DMA_STREAMING, 1850 (p_hxge_dma_common_t)(&rx_dmap[i])); 1851 if (status != HXGE_OK) { 1852 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1853 " hxge_alloc_rx_buf_dma: Alloc Failed: " 1854 " for size: %d", alloc_sizes[size_index])); 1855 size_index--; 1856 } else { 1857 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1858 " alloc_rx_buf_dma allocated rdc %d " 1859 "chunk %d size %x dvma %x bufp %llx ", 1860 dma_channel, i, rx_dmap[i].alength, 1861 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 1862 i++; 1863 allocated += alloc_sizes[size_index]; 1864 } 1865 } 1866 1867 if (allocated < total_alloc_size) { 1868 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1869 " hxge_alloc_rx_buf_dma failed due to" 1870 " allocated(%d) < required(%d)", 1871 allocated, total_alloc_size)); 1872 goto hxge_alloc_rx_mem_fail1; 1873 } 1874 1875 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1876 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i)); 1877 1878 *num_chunks = i; 1879 *dmap = rx_dmap; 1880 1881 goto hxge_alloc_rx_mem_exit; 1882 1883 hxge_alloc_rx_mem_fail1: 1884 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1885 1886 hxge_alloc_rx_mem_exit: 1887 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1888 "<== hxge_alloc_rx_buf_dma status 0x%08x", status)); 1889 1890 return (status); 1891 } 1892 1893 /*ARGSUSED*/ 1894 static void 1895 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap, 1896 uint32_t num_chunks) 1897 { 1898 int i; 1899 1900 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1901 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 1902 1903 for (i = 0; i < num_chunks; i++) { 1904 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1905 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap)); 1906 hxge_dma_mem_free(dmap++); 1907 } 1908 1909 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma")); 1910 } 1911 1912 /*ARGSUSED*/ 1913 static hxge_status_t 1914 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel, 1915 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size) 1916 { 1917 p_hxge_dma_common_t rx_dmap; 1918 hxge_status_t status = HXGE_OK; 1919 1920 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma")); 1921 1922 rx_dmap = (p_hxge_dma_common_t) 1923 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP); 1924 1925 rx_dmap->contig_alloc_type = B_FALSE; 1926 1927 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1928 attr, size, &hxge_dev_desc_dma_acc_attr, 1929 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap); 1930 if (status != HXGE_OK) { 1931 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1932 " hxge_alloc_rx_cntl_dma: Alloc Failed: " 1933 " for size: %d", size)); 1934 goto hxge_alloc_rx_cntl_dma_fail1; 1935 } 1936 1937 *dmap = rx_dmap; 1938 1939 goto hxge_alloc_rx_cntl_dma_exit; 1940 1941 hxge_alloc_rx_cntl_dma_fail1: 1942 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t)); 1943 1944 hxge_alloc_rx_cntl_dma_exit: 1945 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1946 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status)); 1947 1948 return (status); 1949 } 1950 1951 /*ARGSUSED*/ 1952 static void 1953 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap) 1954 { 1955 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma")); 1956 1957 hxge_dma_mem_free(dmap); 1958 1959 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma")); 1960 } 1961 1962 static hxge_status_t 1963 hxge_alloc_tx_mem_pool(p_hxge_t hxgep) 1964 { 1965 hxge_status_t status = HXGE_OK; 1966 int i, j; 1967 uint32_t ndmas, st_tdc; 1968 p_hxge_dma_pt_cfg_t p_all_cfgp; 1969 p_hxge_hw_pt_cfg_t p_cfgp; 1970 p_hxge_dma_pool_t dma_poolp; 1971 p_hxge_dma_common_t *dma_buf_p; 1972 p_hxge_dma_pool_t dma_cntl_poolp; 1973 p_hxge_dma_common_t *dma_cntl_p; 1974 size_t tx_buf_alloc_size; 1975 size_t tx_cntl_alloc_size; 1976 uint32_t *num_chunks; /* per dma */ 1977 1978 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool")); 1979 1980 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 1981 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1982 st_tdc = p_cfgp->start_tdc; 1983 ndmas = p_cfgp->max_tdcs; 1984 1985 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: " 1986 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d", 1987 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs)); 1988 /* 1989 * Allocate memory for each transmit DMA channel. 1990 */ 1991 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t), 1992 KM_SLEEP); 1993 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1994 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1995 1996 dma_cntl_poolp = (p_hxge_dma_pool_t) 1997 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1998 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1999 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 2000 2001 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size; 2002 2003 /* 2004 * Assume that each DMA channel will be configured with default 2005 * transmit bufer size for copying transmit data. (For packet payload 2006 * over this limit, packets will not be copied.) 2007 */ 2008 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size); 2009 2010 /* 2011 * Addresses of transmit descriptor ring and the mailbox must be all 2012 * cache-aligned (64 bytes). 2013 */ 2014 tx_cntl_alloc_size = hxge_tx_ring_size; 2015 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2016 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2017 2018 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas, 2019 KM_SLEEP); 2020 2021 /* 2022 * Allocate memory for transmit buffers and descriptor rings. Replace 2023 * allocation functions with interface functions provided by the 2024 * partition manager when it is available. 2025 * 2026 * Allocate memory for the transmit buffer pool. 2027 */ 2028 for (i = 0; i < ndmas; i++) { 2029 num_chunks[i] = 0; 2030 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i], 2031 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]); 2032 if (status != HXGE_OK) { 2033 break; 2034 } 2035 st_tdc++; 2036 } 2037 2038 if (i < ndmas) { 2039 goto hxge_alloc_tx_mem_pool_fail1; 2040 } 2041 2042 st_tdc = p_cfgp->start_tdc; 2043 2044 /* 2045 * Allocate memory for descriptor rings and mailbox. 2046 */ 2047 for (j = 0; j < ndmas; j++) { 2048 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j], 2049 tx_cntl_alloc_size); 2050 if (status != HXGE_OK) { 2051 break; 2052 } 2053 st_tdc++; 2054 } 2055 2056 if (j < ndmas) { 2057 goto hxge_alloc_tx_mem_pool_fail2; 2058 } 2059 2060 dma_poolp->ndmas = ndmas; 2061 dma_poolp->num_chunks = num_chunks; 2062 dma_poolp->buf_allocated = B_TRUE; 2063 dma_poolp->dma_buf_pool_p = dma_buf_p; 2064 hxgep->tx_buf_pool_p = dma_poolp; 2065 2066 dma_cntl_poolp->ndmas = ndmas; 2067 dma_cntl_poolp->buf_allocated = B_TRUE; 2068 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2069 hxgep->tx_cntl_pool_p = dma_cntl_poolp; 2070 2071 HXGE_DEBUG_MSG((hxgep, MEM_CTL, 2072 "==> hxge_alloc_tx_mem_pool: start_tdc %d " 2073 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas)); 2074 2075 goto hxge_alloc_tx_mem_pool_exit; 2076 2077 hxge_alloc_tx_mem_pool_fail2: 2078 /* Free control buffers */ 2079 j--; 2080 for (; j >= 0; j--) { 2081 hxge_free_tx_cntl_dma(hxgep, 2082 (p_hxge_dma_common_t)dma_cntl_p[j]); 2083 } 2084 2085 hxge_alloc_tx_mem_pool_fail1: 2086 /* Free data buffers */ 2087 i--; 2088 for (; i >= 0; i--) { 2089 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i], 2090 num_chunks[i]); 2091 } 2092 2093 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 2094 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 2095 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 2096 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 2097 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2098 2099 hxge_alloc_tx_mem_pool_exit: 2100 HXGE_DEBUG_MSG((hxgep, MEM_CTL, 2101 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status)); 2102 2103 return (status); 2104 } 2105 2106 static hxge_status_t 2107 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel, 2108 p_hxge_dma_common_t *dmap, size_t alloc_size, 2109 size_t block_size, uint32_t *num_chunks) 2110 { 2111 p_hxge_dma_common_t tx_dmap; 2112 hxge_status_t status = HXGE_OK; 2113 size_t total_alloc_size; 2114 size_t allocated = 0; 2115 int i, size_index, array_size; 2116 2117 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma")); 2118 2119 tx_dmap = (p_hxge_dma_common_t) 2120 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP); 2121 2122 total_alloc_size = alloc_size; 2123 i = 0; 2124 size_index = 0; 2125 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2126 while ((size_index < array_size) && 2127 (alloc_sizes[size_index] < alloc_size)) 2128 size_index++; 2129 if (size_index >= array_size) { 2130 size_index = array_size - 1; 2131 } 2132 2133 while ((allocated < total_alloc_size) && 2134 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) { 2135 tx_dmap[i].dma_chunk_index = i; 2136 tx_dmap[i].block_size = block_size; 2137 tx_dmap[i].alength = alloc_sizes[size_index]; 2138 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2139 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2140 tx_dmap[i].dma_channel = dma_channel; 2141 tx_dmap[i].contig_alloc_type = B_FALSE; 2142 2143 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 2144 &hxge_tx_dma_attr, tx_dmap[i].alength, 2145 &hxge_dev_buf_dma_acc_attr, 2146 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2147 (p_hxge_dma_common_t)(&tx_dmap[i])); 2148 if (status != HXGE_OK) { 2149 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2150 " hxge_alloc_tx_buf_dma: Alloc Failed: " 2151 " for size: %d", alloc_sizes[size_index])); 2152 size_index--; 2153 } else { 2154 i++; 2155 allocated += alloc_sizes[size_index]; 2156 } 2157 } 2158 2159 if (allocated < total_alloc_size) { 2160 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2161 " hxge_alloc_tx_buf_dma: failed due to" 2162 " allocated(%d) < required(%d)", 2163 allocated, total_alloc_size)); 2164 goto hxge_alloc_tx_mem_fail1; 2165 } 2166 2167 *num_chunks = i; 2168 *dmap = tx_dmap; 2169 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2170 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2171 *dmap, i)); 2172 goto hxge_alloc_tx_mem_exit; 2173 2174 hxge_alloc_tx_mem_fail1: 2175 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 2176 2177 hxge_alloc_tx_mem_exit: 2178 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2179 "<== hxge_alloc_tx_buf_dma status 0x%08x", status)); 2180 2181 return (status); 2182 } 2183 2184 /*ARGSUSED*/ 2185 static void 2186 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap, 2187 uint32_t num_chunks) 2188 { 2189 int i; 2190 2191 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma")); 2192 2193 for (i = 0; i < num_chunks; i++) { 2194 hxge_dma_mem_free(dmap++); 2195 } 2196 2197 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma")); 2198 } 2199 2200 /*ARGSUSED*/ 2201 static hxge_status_t 2202 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel, 2203 p_hxge_dma_common_t *dmap, size_t size) 2204 { 2205 p_hxge_dma_common_t tx_dmap; 2206 hxge_status_t status = HXGE_OK; 2207 2208 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma")); 2209 2210 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t), 2211 KM_SLEEP); 2212 2213 tx_dmap->contig_alloc_type = B_FALSE; 2214 2215 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 2216 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr, 2217 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap); 2218 if (status != HXGE_OK) { 2219 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2220 " hxge_alloc_tx_cntl_dma: Alloc Failed: " 2221 " for size: %d", size)); 2222 goto hxge_alloc_tx_cntl_dma_fail1; 2223 } 2224 2225 *dmap = tx_dmap; 2226 2227 goto hxge_alloc_tx_cntl_dma_exit; 2228 2229 hxge_alloc_tx_cntl_dma_fail1: 2230 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t)); 2231 2232 hxge_alloc_tx_cntl_dma_exit: 2233 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2234 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status)); 2235 2236 return (status); 2237 } 2238 2239 /*ARGSUSED*/ 2240 static void 2241 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap) 2242 { 2243 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma")); 2244 2245 hxge_dma_mem_free(dmap); 2246 2247 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma")); 2248 } 2249 2250 static void 2251 hxge_free_tx_mem_pool(p_hxge_t hxgep) 2252 { 2253 uint32_t i, ndmas; 2254 p_hxge_dma_pool_t dma_poolp; 2255 p_hxge_dma_common_t *dma_buf_p; 2256 p_hxge_dma_pool_t dma_cntl_poolp; 2257 p_hxge_dma_common_t *dma_cntl_p; 2258 uint32_t *num_chunks; 2259 2260 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool")); 2261 2262 dma_poolp = hxgep->tx_buf_pool_p; 2263 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2264 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2265 "<== hxge_free_tx_mem_pool " 2266 "(null rx buf pool or buf not allocated")); 2267 return; 2268 } 2269 2270 dma_cntl_poolp = hxgep->tx_cntl_pool_p; 2271 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2272 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2273 "<== hxge_free_tx_mem_pool " 2274 "(null tx cntl buf pool or cntl buf not allocated")); 2275 return; 2276 } 2277 2278 dma_buf_p = dma_poolp->dma_buf_pool_p; 2279 num_chunks = dma_poolp->num_chunks; 2280 2281 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2282 ndmas = dma_cntl_poolp->ndmas; 2283 2284 for (i = 0; i < ndmas; i++) { 2285 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]); 2286 } 2287 2288 for (i = 0; i < ndmas; i++) { 2289 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]); 2290 } 2291 2292 for (i = 0; i < ndmas; i++) { 2293 KMEM_FREE(dma_buf_p[i], 2294 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 2295 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t)); 2296 } 2297 2298 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2299 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 2300 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 2301 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 2302 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 2303 2304 hxgep->tx_buf_pool_p = NULL; 2305 hxgep->tx_cntl_pool_p = NULL; 2306 2307 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool")); 2308 } 2309 2310 /*ARGSUSED*/ 2311 static hxge_status_t 2312 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method, 2313 struct ddi_dma_attr *dma_attrp, 2314 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2315 p_hxge_dma_common_t dma_p) 2316 { 2317 caddr_t kaddrp; 2318 int ddi_status = DDI_SUCCESS; 2319 2320 dma_p->dma_handle = NULL; 2321 dma_p->acc_handle = NULL; 2322 dma_p->kaddrp = NULL; 2323 2324 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp, 2325 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2326 if (ddi_status != DDI_SUCCESS) { 2327 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2328 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2329 return (HXGE_ERROR | HXGE_DDI_FAILED); 2330 } 2331 2332 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p, 2333 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2334 &dma_p->acc_handle); 2335 if (ddi_status != DDI_SUCCESS) { 2336 /* The caller will decide whether it is fatal */ 2337 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2338 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2339 ddi_dma_free_handle(&dma_p->dma_handle); 2340 dma_p->dma_handle = NULL; 2341 return (HXGE_ERROR | HXGE_DDI_FAILED); 2342 } 2343 2344 if (dma_p->alength < length) { 2345 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2346 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length.")); 2347 ddi_dma_mem_free(&dma_p->acc_handle); 2348 ddi_dma_free_handle(&dma_p->dma_handle); 2349 dma_p->acc_handle = NULL; 2350 dma_p->dma_handle = NULL; 2351 return (HXGE_ERROR); 2352 } 2353 2354 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2355 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2356 &dma_p->dma_cookie, &dma_p->ncookies); 2357 if (ddi_status != DDI_DMA_MAPPED) { 2358 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2359 "hxge_dma_mem_alloc:di_dma_addr_bind failed " 2360 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies)); 2361 if (dma_p->acc_handle) { 2362 ddi_dma_mem_free(&dma_p->acc_handle); 2363 dma_p->acc_handle = NULL; 2364 } 2365 ddi_dma_free_handle(&dma_p->dma_handle); 2366 dma_p->dma_handle = NULL; 2367 return (HXGE_ERROR | HXGE_DDI_FAILED); 2368 } 2369 2370 if (dma_p->ncookies != 1) { 2371 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2372 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie" 2373 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies)); 2374 if (dma_p->acc_handle) { 2375 ddi_dma_mem_free(&dma_p->acc_handle); 2376 dma_p->acc_handle = NULL; 2377 } 2378 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2379 ddi_dma_free_handle(&dma_p->dma_handle); 2380 dma_p->dma_handle = NULL; 2381 return (HXGE_ERROR); 2382 } 2383 2384 dma_p->kaddrp = kaddrp; 2385 #if defined(__i386) 2386 dma_p->ioaddr_pp = 2387 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 2388 #else 2389 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress; 2390 #endif 2391 2392 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2393 2394 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: " 2395 "dma buffer allocated: dma_p $%p " 2396 "return dmac_ladress from cookie $%p dmac_size %d " 2397 "dma_p->ioaddr_p $%p " 2398 "dma_p->orig_ioaddr_p $%p " 2399 "orig_vatopa $%p " 2400 "alength %d (0x%x) " 2401 "kaddrp $%p " 2402 "length %d (0x%x)", 2403 dma_p, 2404 dma_p->dma_cookie.dmac_laddress, 2405 dma_p->dma_cookie.dmac_size, 2406 dma_p->ioaddr_pp, 2407 dma_p->orig_ioaddr_pp, 2408 dma_p->orig_vatopa, 2409 dma_p->alength, dma_p->alength, 2410 kaddrp, 2411 length, length)); 2412 2413 return (HXGE_OK); 2414 } 2415 2416 static void 2417 hxge_dma_mem_free(p_hxge_dma_common_t dma_p) 2418 { 2419 if (dma_p == NULL) 2420 return; 2421 2422 if (dma_p->dma_handle != NULL) { 2423 if (dma_p->ncookies) { 2424 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2425 dma_p->ncookies = 0; 2426 } 2427 ddi_dma_free_handle(&dma_p->dma_handle); 2428 dma_p->dma_handle = NULL; 2429 } 2430 2431 if (dma_p->acc_handle != NULL) { 2432 ddi_dma_mem_free(&dma_p->acc_handle); 2433 dma_p->acc_handle = NULL; 2434 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2435 } 2436 2437 dma_p->kaddrp = NULL; 2438 dma_p->alength = NULL; 2439 } 2440 2441 /* 2442 * hxge_m_start() -- start transmitting and receiving. 2443 * 2444 * This function is called by the MAC layer when the first 2445 * stream is open to prepare the hardware ready for sending 2446 * and transmitting packets. 2447 */ 2448 static int 2449 hxge_m_start(void *arg) 2450 { 2451 p_hxge_t hxgep = (p_hxge_t)arg; 2452 2453 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start")); 2454 2455 MUTEX_ENTER(hxgep->genlock); 2456 2457 if (hxge_init(hxgep) != DDI_SUCCESS) { 2458 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2459 "<== hxge_m_start: initialization failed")); 2460 MUTEX_EXIT(hxgep->genlock); 2461 return (EIO); 2462 } 2463 2464 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) { 2465 /* 2466 * Start timer to check the system error and tx hangs 2467 */ 2468 hxgep->hxge_timerid = hxge_start_timer(hxgep, 2469 hxge_check_hw_state, HXGE_CHECK_TIMER); 2470 2471 hxgep->hxge_mac_state = HXGE_MAC_STARTED; 2472 2473 hxgep->timeout.link_status = 0; 2474 hxgep->timeout.report_link_status = B_TRUE; 2475 hxgep->timeout.ticks = drv_usectohz(2 * 1000000); 2476 2477 /* Start the link status timer to check the link status */ 2478 MUTEX_ENTER(&hxgep->timeout.lock); 2479 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep, 2480 hxgep->timeout.ticks); 2481 MUTEX_EXIT(&hxgep->timeout.lock); 2482 } 2483 2484 MUTEX_EXIT(hxgep->genlock); 2485 2486 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start")); 2487 2488 return (0); 2489 } 2490 2491 /* 2492 * hxge_m_stop(): stop transmitting and receiving. 2493 */ 2494 static void 2495 hxge_m_stop(void *arg) 2496 { 2497 p_hxge_t hxgep = (p_hxge_t)arg; 2498 2499 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop")); 2500 2501 if (hxgep->hxge_timerid) { 2502 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 2503 hxgep->hxge_timerid = 0; 2504 } 2505 2506 /* Stop the link status timer before unregistering */ 2507 MUTEX_ENTER(&hxgep->timeout.lock); 2508 if (hxgep->timeout.id) { 2509 (void) untimeout(hxgep->timeout.id); 2510 hxgep->timeout.id = 0; 2511 } 2512 hxge_link_update(hxgep, LINK_STATE_DOWN); 2513 MUTEX_EXIT(&hxgep->timeout.lock); 2514 2515 MUTEX_ENTER(hxgep->genlock); 2516 2517 hxge_uninit(hxgep); 2518 2519 hxgep->hxge_mac_state = HXGE_MAC_STOPPED; 2520 2521 MUTEX_EXIT(hxgep->genlock); 2522 2523 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop")); 2524 } 2525 2526 static int 2527 hxge_m_unicst(void *arg, const uint8_t *macaddr) 2528 { 2529 p_hxge_t hxgep = (p_hxge_t)arg; 2530 struct ether_addr addrp; 2531 hxge_status_t status; 2532 2533 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst")); 2534 2535 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 2536 2537 status = hxge_set_mac_addr(hxgep, &addrp); 2538 if (status != HXGE_OK) { 2539 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2540 "<== hxge_m_unicst: set unitcast failed")); 2541 return (EINVAL); 2542 } 2543 2544 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst")); 2545 2546 return (0); 2547 } 2548 2549 static int 2550 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 2551 { 2552 p_hxge_t hxgep = (p_hxge_t)arg; 2553 struct ether_addr addrp; 2554 2555 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add)); 2556 2557 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 2558 2559 if (add) { 2560 if (hxge_add_mcast_addr(hxgep, &addrp)) { 2561 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2562 "<== hxge_m_multicst: add multicast failed")); 2563 return (EINVAL); 2564 } 2565 } else { 2566 if (hxge_del_mcast_addr(hxgep, &addrp)) { 2567 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2568 "<== hxge_m_multicst: del multicast failed")); 2569 return (EINVAL); 2570 } 2571 } 2572 2573 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst")); 2574 2575 return (0); 2576 } 2577 2578 static int 2579 hxge_m_promisc(void *arg, boolean_t on) 2580 { 2581 p_hxge_t hxgep = (p_hxge_t)arg; 2582 2583 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on)); 2584 2585 if (hxge_set_promisc(hxgep, on)) { 2586 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2587 "<== hxge_m_promisc: set promisc failed")); 2588 return (EINVAL); 2589 } 2590 2591 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on)); 2592 2593 return (0); 2594 } 2595 2596 static void 2597 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2598 { 2599 p_hxge_t hxgep = (p_hxge_t)arg; 2600 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 2601 boolean_t need_privilege; 2602 int err; 2603 int cmd; 2604 2605 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl")); 2606 2607 iocp = (struct iocblk *)mp->b_rptr; 2608 iocp->ioc_error = 0; 2609 need_privilege = B_TRUE; 2610 cmd = iocp->ioc_cmd; 2611 2612 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd)); 2613 switch (cmd) { 2614 default: 2615 miocnak(wq, mp, 0, EINVAL); 2616 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid")); 2617 return; 2618 2619 case LB_GET_INFO_SIZE: 2620 case LB_GET_INFO: 2621 case LB_GET_MODE: 2622 need_privilege = B_FALSE; 2623 break; 2624 2625 case LB_SET_MODE: 2626 break; 2627 2628 case ND_GET: 2629 need_privilege = B_FALSE; 2630 break; 2631 case ND_SET: 2632 break; 2633 2634 case HXGE_GET64: 2635 case HXGE_PUT64: 2636 case HXGE_GET_TX_RING_SZ: 2637 case HXGE_GET_TX_DESC: 2638 case HXGE_TX_SIDE_RESET: 2639 case HXGE_RX_SIDE_RESET: 2640 case HXGE_GLOBAL_RESET: 2641 case HXGE_RESET_MAC: 2642 case HXGE_PUT_TCAM: 2643 case HXGE_GET_TCAM: 2644 case HXGE_RTRACE: 2645 2646 need_privilege = B_FALSE; 2647 break; 2648 } 2649 2650 if (need_privilege) { 2651 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 2652 if (err != 0) { 2653 miocnak(wq, mp, 0, err); 2654 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2655 "<== hxge_m_ioctl: no priv")); 2656 return; 2657 } 2658 } 2659 2660 switch (cmd) { 2661 case ND_GET: 2662 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command")); 2663 case ND_SET: 2664 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command")); 2665 hxge_param_ioctl(hxgep, wq, mp, iocp); 2666 break; 2667 2668 case LB_GET_MODE: 2669 case LB_SET_MODE: 2670 case LB_GET_INFO_SIZE: 2671 case LB_GET_INFO: 2672 hxge_loopback_ioctl(hxgep, wq, mp, iocp); 2673 break; 2674 2675 case HXGE_PUT_TCAM: 2676 case HXGE_GET_TCAM: 2677 case HXGE_GET64: 2678 case HXGE_PUT64: 2679 case HXGE_GET_TX_RING_SZ: 2680 case HXGE_GET_TX_DESC: 2681 case HXGE_TX_SIDE_RESET: 2682 case HXGE_RX_SIDE_RESET: 2683 case HXGE_GLOBAL_RESET: 2684 case HXGE_RESET_MAC: 2685 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, 2686 "==> hxge_m_ioctl: cmd 0x%x", cmd)); 2687 hxge_hw_ioctl(hxgep, wq, mp, iocp); 2688 break; 2689 } 2690 2691 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl")); 2692 } 2693 2694 /*ARGSUSED*/ 2695 boolean_t 2696 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2697 { 2698 uint32_t *txflags = cap_data; 2699 2700 switch (cap) { 2701 case MAC_CAPAB_HCKSUM: 2702 *txflags = HCKSUM_INET_PARTIAL; 2703 break; 2704 2705 default: 2706 return (B_FALSE); 2707 } 2708 return (B_TRUE); 2709 } 2710 2711 static boolean_t 2712 hxge_param_locked(mac_prop_id_t pr_num) 2713 { 2714 /* 2715 * All adv_* parameters are locked (read-only) while 2716 * the device is in any sort of loopback mode ... 2717 */ 2718 switch (pr_num) { 2719 case MAC_PROP_ADV_1000FDX_CAP: 2720 case MAC_PROP_EN_1000FDX_CAP: 2721 case MAC_PROP_ADV_1000HDX_CAP: 2722 case MAC_PROP_EN_1000HDX_CAP: 2723 case MAC_PROP_ADV_100FDX_CAP: 2724 case MAC_PROP_EN_100FDX_CAP: 2725 case MAC_PROP_ADV_100HDX_CAP: 2726 case MAC_PROP_EN_100HDX_CAP: 2727 case MAC_PROP_ADV_10FDX_CAP: 2728 case MAC_PROP_EN_10FDX_CAP: 2729 case MAC_PROP_ADV_10HDX_CAP: 2730 case MAC_PROP_EN_10HDX_CAP: 2731 case MAC_PROP_AUTONEG: 2732 case MAC_PROP_FLOWCTRL: 2733 return (B_TRUE); 2734 } 2735 return (B_FALSE); 2736 } 2737 2738 /* 2739 * callback functions for set/get of properties 2740 */ 2741 static int 2742 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 2743 uint_t pr_valsize, const void *pr_val) 2744 { 2745 hxge_t *hxgep = barg; 2746 p_hxge_stats_t statsp; 2747 int err = 0; 2748 uint32_t new_mtu, old_framesize, new_framesize; 2749 2750 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop")); 2751 2752 statsp = hxgep->statsp; 2753 mutex_enter(hxgep->genlock); 2754 if (statsp->port_stats.lb_mode != hxge_lb_normal && 2755 hxge_param_locked(pr_num)) { 2756 /* 2757 * All adv_* parameters are locked (read-only) 2758 * while the device is in any sort of loopback mode. 2759 */ 2760 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2761 "==> hxge_m_setprop: loopback mode: read only")); 2762 mutex_exit(hxgep->genlock); 2763 return (EBUSY); 2764 } 2765 2766 switch (pr_num) { 2767 /* 2768 * These properties are either not exist or read only 2769 */ 2770 case MAC_PROP_EN_1000FDX_CAP: 2771 case MAC_PROP_EN_100FDX_CAP: 2772 case MAC_PROP_EN_10FDX_CAP: 2773 case MAC_PROP_EN_1000HDX_CAP: 2774 case MAC_PROP_EN_100HDX_CAP: 2775 case MAC_PROP_EN_10HDX_CAP: 2776 case MAC_PROP_ADV_1000FDX_CAP: 2777 case MAC_PROP_ADV_1000HDX_CAP: 2778 case MAC_PROP_ADV_100FDX_CAP: 2779 case MAC_PROP_ADV_100HDX_CAP: 2780 case MAC_PROP_ADV_10FDX_CAP: 2781 case MAC_PROP_ADV_10HDX_CAP: 2782 case MAC_PROP_STATUS: 2783 case MAC_PROP_SPEED: 2784 case MAC_PROP_DUPLEX: 2785 case MAC_PROP_AUTONEG: 2786 /* 2787 * Flow control is handled in the shared domain and 2788 * it is readonly here. 2789 */ 2790 case MAC_PROP_FLOWCTRL: 2791 err = EINVAL; 2792 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2793 "==> hxge_m_setprop: read only property %d", 2794 pr_num)); 2795 break; 2796 2797 case MAC_PROP_MTU: 2798 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 2799 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2800 "==> hxge_m_setprop: set MTU: %d", new_mtu)); 2801 2802 new_framesize = new_mtu + MTU_TO_FRAME_SIZE; 2803 if (new_framesize == hxgep->vmac.maxframesize) { 2804 err = 0; 2805 break; 2806 } 2807 2808 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) { 2809 err = EBUSY; 2810 break; 2811 } 2812 2813 if (new_framesize < MIN_FRAME_SIZE || 2814 new_framesize > MAX_FRAME_SIZE) { 2815 err = EINVAL; 2816 break; 2817 } 2818 2819 old_framesize = hxgep->vmac.maxframesize; 2820 hxgep->vmac.maxframesize = (uint16_t)new_framesize; 2821 2822 if (hxge_vmac_set_framesize(hxgep)) { 2823 hxgep->vmac.maxframesize = 2824 (uint16_t)old_framesize; 2825 err = EINVAL; 2826 break; 2827 } 2828 2829 err = mac_maxsdu_update(hxgep->mach, new_mtu); 2830 if (err) { 2831 hxgep->vmac.maxframesize = 2832 (uint16_t)old_framesize; 2833 (void) hxge_vmac_set_framesize(hxgep); 2834 } 2835 2836 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2837 "==> hxge_m_setprop: set MTU: %d maxframe %d", 2838 new_mtu, hxgep->vmac.maxframesize)); 2839 break; 2840 2841 case MAC_PROP_PRIVATE: 2842 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2843 "==> hxge_m_setprop: private property")); 2844 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize, 2845 pr_val); 2846 break; 2847 2848 default: 2849 err = ENOTSUP; 2850 break; 2851 } 2852 2853 mutex_exit(hxgep->genlock); 2854 2855 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2856 "<== hxge_m_setprop (return %d)", err)); 2857 2858 return (err); 2859 } 2860 2861 /* ARGSUSED */ 2862 static int 2863 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 2864 void *pr_val) 2865 { 2866 int err = 0; 2867 link_flowctrl_t fl; 2868 2869 switch (pr_num) { 2870 case MAC_PROP_DUPLEX: 2871 *(uint8_t *)pr_val = 2; 2872 break; 2873 case MAC_PROP_AUTONEG: 2874 *(uint8_t *)pr_val = 0; 2875 break; 2876 case MAC_PROP_FLOWCTRL: 2877 if (pr_valsize < sizeof (link_flowctrl_t)) 2878 return (EINVAL); 2879 fl = LINK_FLOWCTRL_TX; 2880 bcopy(&fl, pr_val, sizeof (fl)); 2881 break; 2882 default: 2883 err = ENOTSUP; 2884 break; 2885 } 2886 return (err); 2887 } 2888 2889 static int 2890 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 2891 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 2892 { 2893 hxge_t *hxgep = barg; 2894 p_hxge_stats_t statsp = hxgep->statsp; 2895 int err = 0; 2896 link_flowctrl_t fl; 2897 uint64_t tmp = 0; 2898 link_state_t ls; 2899 2900 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2901 "==> hxge_m_getprop: pr_num %d", pr_num)); 2902 2903 if (pr_valsize == 0) 2904 return (EINVAL); 2905 2906 *perm = MAC_PROP_PERM_RW; 2907 2908 if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) { 2909 err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val); 2910 return (err); 2911 } 2912 2913 bzero(pr_val, pr_valsize); 2914 switch (pr_num) { 2915 case MAC_PROP_DUPLEX: 2916 *perm = MAC_PROP_PERM_READ; 2917 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 2918 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2919 "==> hxge_m_getprop: duplex mode %d", 2920 *(uint8_t *)pr_val)); 2921 break; 2922 2923 case MAC_PROP_SPEED: 2924 *perm = MAC_PROP_PERM_READ; 2925 if (pr_valsize < sizeof (uint64_t)) 2926 return (EINVAL); 2927 tmp = statsp->mac_stats.link_speed * 1000000ull; 2928 bcopy(&tmp, pr_val, sizeof (tmp)); 2929 break; 2930 2931 case MAC_PROP_STATUS: 2932 *perm = MAC_PROP_PERM_READ; 2933 if (pr_valsize < sizeof (link_state_t)) 2934 return (EINVAL); 2935 if (!statsp->mac_stats.link_up) 2936 ls = LINK_STATE_DOWN; 2937 else 2938 ls = LINK_STATE_UP; 2939 bcopy(&ls, pr_val, sizeof (ls)); 2940 break; 2941 2942 case MAC_PROP_FLOWCTRL: 2943 /* 2944 * Flow control is supported by the shared domain and 2945 * it is currently transmit only 2946 */ 2947 *perm = MAC_PROP_PERM_READ; 2948 if (pr_valsize < sizeof (link_flowctrl_t)) 2949 return (EINVAL); 2950 fl = LINK_FLOWCTRL_TX; 2951 bcopy(&fl, pr_val, sizeof (fl)); 2952 break; 2953 case MAC_PROP_AUTONEG: 2954 /* 10G link only and it is not negotiable */ 2955 *perm = MAC_PROP_PERM_READ; 2956 *(uint8_t *)pr_val = 0; 2957 break; 2958 case MAC_PROP_ADV_1000FDX_CAP: 2959 case MAC_PROP_ADV_100FDX_CAP: 2960 case MAC_PROP_ADV_10FDX_CAP: 2961 case MAC_PROP_ADV_1000HDX_CAP: 2962 case MAC_PROP_ADV_100HDX_CAP: 2963 case MAC_PROP_ADV_10HDX_CAP: 2964 case MAC_PROP_EN_1000FDX_CAP: 2965 case MAC_PROP_EN_100FDX_CAP: 2966 case MAC_PROP_EN_10FDX_CAP: 2967 case MAC_PROP_EN_1000HDX_CAP: 2968 case MAC_PROP_EN_100HDX_CAP: 2969 case MAC_PROP_EN_10HDX_CAP: 2970 err = ENOTSUP; 2971 break; 2972 2973 case MAC_PROP_PRIVATE: 2974 err = hxge_get_priv_prop(hxgep, pr_name, pr_flags, 2975 pr_valsize, pr_val); 2976 break; 2977 default: 2978 err = EINVAL; 2979 break; 2980 } 2981 2982 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop")); 2983 2984 return (err); 2985 } 2986 2987 /* ARGSUSED */ 2988 static int 2989 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize, 2990 const void *pr_val) 2991 { 2992 p_hxge_param_t param_arr = hxgep->param_arr; 2993 int err = 0; 2994 2995 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2996 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val)); 2997 2998 if (pr_val == NULL) { 2999 return (EINVAL); 3000 } 3001 3002 /* Blanking */ 3003 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3004 err = hxge_param_rx_intr_time(hxgep, NULL, NULL, 3005 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]); 3006 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3007 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL, 3008 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 3009 3010 /* Classification */ 3011 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 3012 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3013 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 3014 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 3015 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3016 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 3017 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 3018 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3019 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 3020 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 3021 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3022 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 3023 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 3024 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3025 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 3026 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 3027 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3028 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 3029 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 3030 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3031 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 3032 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3033 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3034 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 3035 } else { 3036 err = EINVAL; 3037 } 3038 3039 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3040 "<== hxge_set_priv_prop: err %d", err)); 3041 3042 return (err); 3043 } 3044 3045 static int 3046 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags, 3047 uint_t pr_valsize, void *pr_val) 3048 { 3049 p_hxge_param_t param_arr = hxgep->param_arr; 3050 char valstr[MAXNAMELEN]; 3051 int err = 0; 3052 uint_t strsize; 3053 int value = 0; 3054 3055 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3056 "==> hxge_get_priv_prop: property %s", pr_name)); 3057 3058 if (pr_flags & MAC_PROP_DEFAULT) { 3059 /* Receive Interrupt Blanking Parameters */ 3060 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3061 value = RXDMA_RCR_TO_DEFAULT; 3062 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3063 value = RXDMA_RCR_PTHRES_DEFAULT; 3064 3065 /* Classification and Load Distribution Configuration */ 3066 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 || 3067 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 || 3068 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 || 3069 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 || 3070 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 || 3071 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 || 3072 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 || 3073 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3074 value = HXGE_CLASS_TCAM_LOOKUP; 3075 } else { 3076 err = EINVAL; 3077 } 3078 } else { 3079 /* Receive Interrupt Blanking Parameters */ 3080 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3081 value = hxgep->intr_timeout; 3082 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3083 value = hxgep->intr_threshold; 3084 3085 /* Classification and Load Distribution Configuration */ 3086 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 3087 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3088 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 3089 3090 value = (int)param_arr[param_class_opt_ipv4_tcp].value; 3091 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 3092 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3093 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 3094 3095 value = (int)param_arr[param_class_opt_ipv4_udp].value; 3096 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 3097 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3098 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 3099 3100 value = (int)param_arr[param_class_opt_ipv4_ah].value; 3101 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 3102 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3103 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 3104 3105 value = (int)param_arr[param_class_opt_ipv4_sctp].value; 3106 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 3107 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3108 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 3109 3110 value = (int)param_arr[param_class_opt_ipv6_tcp].value; 3111 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 3112 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3113 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 3114 3115 value = (int)param_arr[param_class_opt_ipv6_udp].value; 3116 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 3117 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3118 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 3119 3120 value = (int)param_arr[param_class_opt_ipv6_ah].value; 3121 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3122 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3123 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 3124 3125 value = (int)param_arr[param_class_opt_ipv6_sctp].value; 3126 } else { 3127 err = EINVAL; 3128 } 3129 } 3130 3131 if (err == 0) { 3132 (void) snprintf(valstr, sizeof (valstr), "0x%x", value); 3133 3134 strsize = (uint_t)strlen(valstr); 3135 if (pr_valsize < strsize) { 3136 err = ENOBUFS; 3137 } else { 3138 (void) strlcpy(pr_val, valstr, pr_valsize); 3139 } 3140 } 3141 3142 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3143 "<== hxge_get_priv_prop: return %d", err)); 3144 3145 return (err); 3146 } 3147 /* 3148 * Module loading and removing entry points. 3149 */ 3150 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach, 3151 nodev, NULL, D_MP, NULL, NULL); 3152 3153 extern struct mod_ops mod_driverops; 3154 3155 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver" 3156 3157 /* 3158 * Module linkage information for the kernel. 3159 */ 3160 static struct modldrv hxge_modldrv = { 3161 &mod_driverops, 3162 HXGE_DESC_VER, 3163 &hxge_dev_ops 3164 }; 3165 3166 static struct modlinkage modlinkage = { 3167 MODREV_1, (void *) &hxge_modldrv, NULL 3168 }; 3169 3170 int 3171 _init(void) 3172 { 3173 int status; 3174 3175 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3176 mac_init_ops(&hxge_dev_ops, "hxge"); 3177 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0); 3178 if (status != 0) { 3179 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 3180 "failed to init device soft state")); 3181 mac_fini_ops(&hxge_dev_ops); 3182 goto _init_exit; 3183 } 3184 3185 status = mod_install(&modlinkage); 3186 if (status != 0) { 3187 ddi_soft_state_fini(&hxge_list); 3188 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed")); 3189 goto _init_exit; 3190 } 3191 3192 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3193 3194 _init_exit: 3195 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3196 3197 return (status); 3198 } 3199 3200 int 3201 _fini(void) 3202 { 3203 int status; 3204 3205 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3206 3207 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3208 3209 if (hxge_mblks_pending) 3210 return (EBUSY); 3211 3212 status = mod_remove(&modlinkage); 3213 if (status != DDI_SUCCESS) { 3214 HXGE_DEBUG_MSG((NULL, MOD_CTL, 3215 "Module removal failed 0x%08x", status)); 3216 goto _fini_exit; 3217 } 3218 3219 mac_fini_ops(&hxge_dev_ops); 3220 3221 ddi_soft_state_fini(&hxge_list); 3222 3223 MUTEX_DESTROY(&hxge_common_lock); 3224 3225 _fini_exit: 3226 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3227 3228 return (status); 3229 } 3230 3231 int 3232 _info(struct modinfo *modinfop) 3233 { 3234 int status; 3235 3236 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3237 status = mod_info(&modlinkage, modinfop); 3238 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3239 3240 return (status); 3241 } 3242 3243 /*ARGSUSED*/ 3244 hxge_status_t 3245 hxge_add_intrs(p_hxge_t hxgep) 3246 { 3247 int intr_types; 3248 int type = 0; 3249 int ddi_status = DDI_SUCCESS; 3250 hxge_status_t status = HXGE_OK; 3251 3252 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs")); 3253 3254 hxgep->hxge_intr_type.intr_registered = B_FALSE; 3255 hxgep->hxge_intr_type.intr_enabled = B_FALSE; 3256 hxgep->hxge_intr_type.msi_intx_cnt = 0; 3257 hxgep->hxge_intr_type.intr_added = 0; 3258 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE; 3259 hxgep->hxge_intr_type.intr_type = 0; 3260 3261 if (hxge_msi_enable) { 3262 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE; 3263 } 3264 3265 /* Get the supported interrupt types */ 3266 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types)) 3267 != DDI_SUCCESS) { 3268 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: " 3269 "ddi_intr_get_supported_types failed: status 0x%08x", 3270 ddi_status)); 3271 return (HXGE_ERROR | HXGE_DDI_FAILED); 3272 } 3273 3274 hxgep->hxge_intr_type.intr_types = intr_types; 3275 3276 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3277 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3278 3279 /* 3280 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable: 3281 * (1): 1 - MSI 3282 * (2): 2 - MSI-X 3283 * others - FIXED 3284 */ 3285 switch (hxge_msi_enable) { 3286 default: 3287 type = DDI_INTR_TYPE_FIXED; 3288 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3289 "use fixed (intx emulation) type %08x", type)); 3290 break; 3291 3292 case 2: 3293 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3294 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3295 if (intr_types & DDI_INTR_TYPE_MSIX) { 3296 type = DDI_INTR_TYPE_MSIX; 3297 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3298 "==> hxge_add_intrs: " 3299 "ddi_intr_get_supported_types: MSIX 0x%08x", type)); 3300 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3301 type = DDI_INTR_TYPE_MSI; 3302 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3303 "==> hxge_add_intrs: " 3304 "ddi_intr_get_supported_types: MSI 0x%08x", type)); 3305 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3306 type = DDI_INTR_TYPE_FIXED; 3307 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3308 "ddi_intr_get_supported_types: MSXED0x%08x", type)); 3309 } 3310 break; 3311 3312 case 1: 3313 if (intr_types & DDI_INTR_TYPE_MSI) { 3314 type = DDI_INTR_TYPE_MSI; 3315 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3316 "==> hxge_add_intrs: " 3317 "ddi_intr_get_supported_types: MSI 0x%08x", type)); 3318 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3319 type = DDI_INTR_TYPE_MSIX; 3320 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3321 "==> hxge_add_intrs: " 3322 "ddi_intr_get_supported_types: MSIX 0x%08x", type)); 3323 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3324 type = DDI_INTR_TYPE_FIXED; 3325 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3326 "==> hxge_add_intrs: " 3327 "ddi_intr_get_supported_types: MSXED0x%08x", type)); 3328 } 3329 } 3330 3331 hxgep->hxge_intr_type.intr_type = type; 3332 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3333 type == DDI_INTR_TYPE_FIXED) && 3334 hxgep->hxge_intr_type.niu_msi_enable) { 3335 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) { 3336 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3337 " hxge_add_intrs: " 3338 " hxge_add_intrs_adv failed: status 0x%08x", 3339 status)); 3340 return (status); 3341 } else { 3342 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: " 3343 "interrupts registered : type %d", type)); 3344 hxgep->hxge_intr_type.intr_registered = B_TRUE; 3345 3346 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3347 "\nAdded advanced hxge add_intr_adv " 3348 "intr type 0x%x\n", type)); 3349 3350 return (status); 3351 } 3352 } 3353 3354 if (!hxgep->hxge_intr_type.intr_registered) { 3355 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3356 "==> hxge_add_intrs: failed to register interrupts")); 3357 return (HXGE_ERROR | HXGE_DDI_FAILED); 3358 } 3359 3360 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs")); 3361 3362 return (status); 3363 } 3364 3365 /*ARGSUSED*/ 3366 static hxge_status_t 3367 hxge_add_soft_intrs(p_hxge_t hxgep) 3368 { 3369 int ddi_status = DDI_SUCCESS; 3370 hxge_status_t status = HXGE_OK; 3371 3372 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs")); 3373 3374 hxgep->resched_id = NULL; 3375 hxgep->resched_running = B_FALSE; 3376 ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW, 3377 &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep); 3378 if (ddi_status != DDI_SUCCESS) { 3379 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: " 3380 "ddi_add_softintrs failed: status 0x%08x", ddi_status)); 3381 return (HXGE_ERROR | HXGE_DDI_FAILED); 3382 } 3383 3384 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs")); 3385 3386 return (status); 3387 } 3388 3389 /*ARGSUSED*/ 3390 static hxge_status_t 3391 hxge_add_intrs_adv(p_hxge_t hxgep) 3392 { 3393 int intr_type; 3394 p_hxge_intr_t intrp; 3395 hxge_status_t status; 3396 3397 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv")); 3398 3399 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3400 intr_type = intrp->intr_type; 3401 3402 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x", 3403 intr_type)); 3404 3405 switch (intr_type) { 3406 case DDI_INTR_TYPE_MSI: /* 0x2 */ 3407 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 3408 status = hxge_add_intrs_adv_type(hxgep, intr_type); 3409 break; 3410 3411 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 3412 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type); 3413 break; 3414 3415 default: 3416 status = HXGE_ERROR; 3417 break; 3418 } 3419 3420 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv")); 3421 3422 return (status); 3423 } 3424 3425 /*ARGSUSED*/ 3426 static hxge_status_t 3427 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type) 3428 { 3429 dev_info_t *dip = hxgep->dip; 3430 p_hxge_ldg_t ldgp; 3431 p_hxge_intr_t intrp; 3432 uint_t *inthandler; 3433 void *arg1, *arg2; 3434 int behavior; 3435 int nintrs, navail; 3436 int nactual, nrequired; 3437 int inum = 0; 3438 int loop = 0; 3439 int x, y; 3440 int ddi_status = DDI_SUCCESS; 3441 hxge_status_t status = HXGE_OK; 3442 3443 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type")); 3444 3445 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3446 3447 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 3448 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 3449 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3450 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 3451 "nintrs: %d", ddi_status, nintrs)); 3452 return (HXGE_ERROR | HXGE_DDI_FAILED); 3453 } 3454 3455 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 3456 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 3457 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3458 "ddi_intr_get_navail() failed, status: 0x%x%, " 3459 "nintrs: %d", ddi_status, navail)); 3460 return (HXGE_ERROR | HXGE_DDI_FAILED); 3461 } 3462 3463 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3464 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d", 3465 int_type, nintrs, navail)); 3466 3467 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 3468 /* MSI must be power of 2 */ 3469 if ((navail & 16) == 16) { 3470 navail = 16; 3471 } else if ((navail & 8) == 8) { 3472 navail = 8; 3473 } else if ((navail & 4) == 4) { 3474 navail = 4; 3475 } else if ((navail & 2) == 2) { 3476 navail = 2; 3477 } else { 3478 navail = 1; 3479 } 3480 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3481 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 3482 "navail %d", nintrs, navail)); 3483 } 3484 3485 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3486 "requesting: intr type %d nintrs %d, navail %d", 3487 int_type, nintrs, navail)); 3488 3489 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 3490 DDI_INTR_ALLOC_NORMAL); 3491 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 3492 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP); 3493 3494 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 3495 navail, &nactual, behavior); 3496 if (ddi_status != DDI_SUCCESS || nactual == 0) { 3497 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3498 " ddi_intr_alloc() failed: %d", ddi_status)); 3499 kmem_free(intrp->htable, intrp->intr_size); 3500 return (HXGE_ERROR | HXGE_DDI_FAILED); 3501 } 3502 3503 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3504 "ddi_intr_alloc() returned: navail %d nactual %d", 3505 navail, nactual)); 3506 3507 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 3508 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 3509 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3510 " ddi_intr_get_pri() failed: %d", ddi_status)); 3511 /* Free already allocated interrupts */ 3512 for (y = 0; y < nactual; y++) { 3513 (void) ddi_intr_free(intrp->htable[y]); 3514 } 3515 3516 kmem_free(intrp->htable, intrp->intr_size); 3517 return (HXGE_ERROR | HXGE_DDI_FAILED); 3518 } 3519 3520 nrequired = 0; 3521 status = hxge_ldgv_init(hxgep, &nactual, &nrequired); 3522 if (status != HXGE_OK) { 3523 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3524 "hxge_add_intrs_adv_typ:hxge_ldgv_init " 3525 "failed: 0x%x", status)); 3526 /* Free already allocated interrupts */ 3527 for (y = 0; y < nactual; y++) { 3528 (void) ddi_intr_free(intrp->htable[y]); 3529 } 3530 3531 kmem_free(intrp->htable, intrp->intr_size); 3532 return (status); 3533 } 3534 3535 ldgp = hxgep->ldgvp->ldgp; 3536 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3537 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual)); 3538 3539 if (nactual < nrequired) 3540 loop = nactual; 3541 else 3542 loop = nrequired; 3543 3544 for (x = 0; x < loop; x++, ldgp++) { 3545 ldgp->vector = (uint8_t)x; 3546 arg1 = ldgp->ldvp; 3547 arg2 = hxgep; 3548 if (ldgp->nldvs == 1) { 3549 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 3550 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3551 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: " 3552 "1-1 int handler (entry %d)\n", 3553 arg1, arg2, x)); 3554 } else if (ldgp->nldvs > 1) { 3555 inthandler = (uint_t *)ldgp->sys_intr_handler; 3556 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3557 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: " 3558 "nldevs %d int handler (entry %d)\n", 3559 arg1, arg2, ldgp->nldvs, x)); 3560 } 3561 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3562 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 3563 "htable 0x%llx", x, intrp->htable[x])); 3564 3565 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 3566 (ddi_intr_handler_t *)inthandler, arg1, arg2)) != 3567 DDI_SUCCESS) { 3568 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3569 "==> hxge_add_intrs_adv_type: failed #%d " 3570 "status 0x%x", x, ddi_status)); 3571 for (y = 0; y < intrp->intr_added; y++) { 3572 (void) ddi_intr_remove_handler( 3573 intrp->htable[y]); 3574 } 3575 3576 /* Free already allocated intr */ 3577 for (y = 0; y < nactual; y++) { 3578 (void) ddi_intr_free(intrp->htable[y]); 3579 } 3580 kmem_free(intrp->htable, intrp->intr_size); 3581 3582 (void) hxge_ldgv_uninit(hxgep); 3583 3584 return (HXGE_ERROR | HXGE_DDI_FAILED); 3585 } 3586 3587 intrp->intr_added++; 3588 } 3589 intrp->msi_intx_cnt = nactual; 3590 3591 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3592 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 3593 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added)); 3594 3595 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 3596 (void) hxge_intr_ldgv_init(hxgep); 3597 3598 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type")); 3599 3600 return (status); 3601 } 3602 3603 /*ARGSUSED*/ 3604 static hxge_status_t 3605 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type) 3606 { 3607 dev_info_t *dip = hxgep->dip; 3608 p_hxge_ldg_t ldgp; 3609 p_hxge_intr_t intrp; 3610 uint_t *inthandler; 3611 void *arg1, *arg2; 3612 int behavior; 3613 int nintrs, navail; 3614 int nactual, nrequired; 3615 int inum = 0; 3616 int x, y; 3617 int ddi_status = DDI_SUCCESS; 3618 hxge_status_t status = HXGE_OK; 3619 3620 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix")); 3621 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3622 3623 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 3624 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 3625 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3626 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 3627 "nintrs: %d", status, nintrs)); 3628 return (HXGE_ERROR | HXGE_DDI_FAILED); 3629 } 3630 3631 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 3632 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 3633 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3634 "ddi_intr_get_navail() failed, status: 0x%x%, " 3635 "nintrs: %d", ddi_status, navail)); 3636 return (HXGE_ERROR | HXGE_DDI_FAILED); 3637 } 3638 3639 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3640 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 3641 nintrs, navail)); 3642 3643 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 3644 DDI_INTR_ALLOC_NORMAL); 3645 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 3646 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 3647 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 3648 navail, &nactual, behavior); 3649 if (ddi_status != DDI_SUCCESS || nactual == 0) { 3650 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3651 " ddi_intr_alloc() failed: %d", ddi_status)); 3652 kmem_free(intrp->htable, intrp->intr_size); 3653 return (HXGE_ERROR | HXGE_DDI_FAILED); 3654 } 3655 3656 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 3657 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 3658 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3659 " ddi_intr_get_pri() failed: %d", ddi_status)); 3660 /* Free already allocated interrupts */ 3661 for (y = 0; y < nactual; y++) { 3662 (void) ddi_intr_free(intrp->htable[y]); 3663 } 3664 3665 kmem_free(intrp->htable, intrp->intr_size); 3666 return (HXGE_ERROR | HXGE_DDI_FAILED); 3667 } 3668 3669 nrequired = 0; 3670 status = hxge_ldgv_init(hxgep, &nactual, &nrequired); 3671 if (status != HXGE_OK) { 3672 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3673 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init " 3674 "failed: 0x%x", status)); 3675 /* Free already allocated interrupts */ 3676 for (y = 0; y < nactual; y++) { 3677 (void) ddi_intr_free(intrp->htable[y]); 3678 } 3679 3680 kmem_free(intrp->htable, intrp->intr_size); 3681 return (status); 3682 } 3683 3684 ldgp = hxgep->ldgvp->ldgp; 3685 for (x = 0; x < nrequired; x++, ldgp++) { 3686 ldgp->vector = (uint8_t)x; 3687 arg1 = ldgp->ldvp; 3688 arg2 = hxgep; 3689 if (ldgp->nldvs == 1) { 3690 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 3691 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3692 "hxge_add_intrs_adv_type_fix: " 3693 "1-1 int handler(%d) ldg %d ldv %d " 3694 "arg1 $%p arg2 $%p\n", 3695 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2)); 3696 } else if (ldgp->nldvs > 1) { 3697 inthandler = (uint_t *)ldgp->sys_intr_handler; 3698 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3699 "hxge_add_intrs_adv_type_fix: " 3700 "shared ldv %d int handler(%d) ldv %d ldg %d" 3701 "arg1 0x%016llx arg2 0x%016llx\n", 3702 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 3703 arg1, arg2)); 3704 } 3705 3706 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 3707 (ddi_intr_handler_t *)inthandler, arg1, arg2)) != 3708 DDI_SUCCESS) { 3709 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3710 "==> hxge_add_intrs_adv_type_fix: failed #%d " 3711 "status 0x%x", x, ddi_status)); 3712 for (y = 0; y < intrp->intr_added; y++) { 3713 (void) ddi_intr_remove_handler( 3714 intrp->htable[y]); 3715 } 3716 for (y = 0; y < nactual; y++) { 3717 (void) ddi_intr_free(intrp->htable[y]); 3718 } 3719 /* Free already allocated intr */ 3720 kmem_free(intrp->htable, intrp->intr_size); 3721 3722 (void) hxge_ldgv_uninit(hxgep); 3723 3724 return (HXGE_ERROR | HXGE_DDI_FAILED); 3725 } 3726 intrp->intr_added++; 3727 } 3728 3729 intrp->msi_intx_cnt = nactual; 3730 3731 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 3732 3733 status = hxge_intr_ldgv_init(hxgep); 3734 3735 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix")); 3736 3737 return (status); 3738 } 3739 3740 /*ARGSUSED*/ 3741 static void 3742 hxge_remove_intrs(p_hxge_t hxgep) 3743 { 3744 int i, inum; 3745 p_hxge_intr_t intrp; 3746 3747 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs")); 3748 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3749 if (!intrp->intr_registered) { 3750 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3751 "<== hxge_remove_intrs: interrupts not registered")); 3752 return; 3753 } 3754 3755 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced")); 3756 3757 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3758 (void) ddi_intr_block_disable(intrp->htable, 3759 intrp->intr_added); 3760 } else { 3761 for (i = 0; i < intrp->intr_added; i++) { 3762 (void) ddi_intr_disable(intrp->htable[i]); 3763 } 3764 } 3765 3766 for (inum = 0; inum < intrp->intr_added; inum++) { 3767 if (intrp->htable[inum]) { 3768 (void) ddi_intr_remove_handler(intrp->htable[inum]); 3769 } 3770 } 3771 3772 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 3773 if (intrp->htable[inum]) { 3774 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3775 "hxge_remove_intrs: ddi_intr_free inum %d " 3776 "msi_intx_cnt %d intr_added %d", 3777 inum, intrp->msi_intx_cnt, intrp->intr_added)); 3778 3779 (void) ddi_intr_free(intrp->htable[inum]); 3780 } 3781 } 3782 3783 kmem_free(intrp->htable, intrp->intr_size); 3784 intrp->intr_registered = B_FALSE; 3785 intrp->intr_enabled = B_FALSE; 3786 intrp->msi_intx_cnt = 0; 3787 intrp->intr_added = 0; 3788 3789 (void) hxge_ldgv_uninit(hxgep); 3790 3791 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs")); 3792 } 3793 3794 /*ARGSUSED*/ 3795 static void 3796 hxge_remove_soft_intrs(p_hxge_t hxgep) 3797 { 3798 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs")); 3799 3800 if (hxgep->resched_id) { 3801 ddi_remove_softintr(hxgep->resched_id); 3802 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3803 "==> hxge_remove_soft_intrs: removed")); 3804 hxgep->resched_id = NULL; 3805 } 3806 3807 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs")); 3808 } 3809 3810 /*ARGSUSED*/ 3811 void 3812 hxge_intrs_enable(p_hxge_t hxgep) 3813 { 3814 p_hxge_intr_t intrp; 3815 int i; 3816 int status; 3817 3818 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable")); 3819 3820 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3821 3822 if (!intrp->intr_registered) { 3823 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: " 3824 "interrupts are not registered")); 3825 return; 3826 } 3827 3828 if (intrp->intr_enabled) { 3829 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3830 "<== hxge_intrs_enable: already enabled")); 3831 return; 3832 } 3833 3834 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3835 status = ddi_intr_block_enable(intrp->htable, 3836 intrp->intr_added); 3837 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable " 3838 "block enable - status 0x%x total inums #%d\n", 3839 status, intrp->intr_added)); 3840 } else { 3841 for (i = 0; i < intrp->intr_added; i++) { 3842 status = ddi_intr_enable(intrp->htable[i]); 3843 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable " 3844 "ddi_intr_enable:enable - status 0x%x " 3845 "total inums %d enable inum #%d\n", 3846 status, intrp->intr_added, i)); 3847 if (status == DDI_SUCCESS) { 3848 intrp->intr_enabled = B_TRUE; 3849 } 3850 } 3851 } 3852 3853 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable")); 3854 } 3855 3856 /*ARGSUSED*/ 3857 static void 3858 hxge_intrs_disable(p_hxge_t hxgep) 3859 { 3860 p_hxge_intr_t intrp; 3861 int i; 3862 3863 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable")); 3864 3865 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3866 3867 if (!intrp->intr_registered) { 3868 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: " 3869 "interrupts are not registered")); 3870 return; 3871 } 3872 3873 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3874 (void) ddi_intr_block_disable(intrp->htable, 3875 intrp->intr_added); 3876 } else { 3877 for (i = 0; i < intrp->intr_added; i++) { 3878 (void) ddi_intr_disable(intrp->htable[i]); 3879 } 3880 } 3881 3882 intrp->intr_enabled = B_FALSE; 3883 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable")); 3884 } 3885 3886 static hxge_status_t 3887 hxge_mac_register(p_hxge_t hxgep) 3888 { 3889 mac_register_t *macp; 3890 int status; 3891 3892 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register")); 3893 3894 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3895 return (HXGE_ERROR); 3896 3897 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3898 macp->m_driver = hxgep; 3899 macp->m_dip = hxgep->dip; 3900 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet; 3901 3902 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3903 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x", 3904 macp->m_src_addr[0], 3905 macp->m_src_addr[1], 3906 macp->m_src_addr[2], 3907 macp->m_src_addr[3], 3908 macp->m_src_addr[4], 3909 macp->m_src_addr[5])); 3910 3911 macp->m_callbacks = &hxge_m_callbacks; 3912 macp->m_min_sdu = 0; 3913 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE; 3914 macp->m_margin = VLAN_TAGSZ; 3915 macp->m_priv_props = hxge_priv_props; 3916 macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS; 3917 3918 status = mac_register(macp, &hxgep->mach); 3919 mac_free(macp); 3920 3921 if (status != 0) { 3922 cmn_err(CE_WARN, 3923 "hxge_mac_register failed (status %d instance %d)", 3924 status, hxgep->instance); 3925 return (HXGE_ERROR); 3926 } 3927 3928 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success " 3929 "(instance %d)", hxgep->instance)); 3930 3931 return (HXGE_OK); 3932 } 3933 3934 static int 3935 hxge_init_common_dev(p_hxge_t hxgep) 3936 { 3937 p_hxge_hw_list_t hw_p; 3938 dev_info_t *p_dip; 3939 3940 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev")); 3941 3942 p_dip = hxgep->p_dip; 3943 MUTEX_ENTER(&hxge_common_lock); 3944 3945 /* 3946 * Loop through existing per Hydra hardware list. 3947 */ 3948 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) { 3949 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3950 "==> hxge_init_common_dev: hw_p $%p parent dip $%p", 3951 hw_p, p_dip)); 3952 if (hw_p->parent_devp == p_dip) { 3953 hxgep->hxge_hw_p = hw_p; 3954 hw_p->ndevs++; 3955 hw_p->hxge_p = hxgep; 3956 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3957 "==> hxge_init_common_device: " 3958 "hw_p $%p parent dip $%p ndevs %d (found)", 3959 hw_p, p_dip, hw_p->ndevs)); 3960 break; 3961 } 3962 } 3963 3964 if (hw_p == NULL) { 3965 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3966 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip)); 3967 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP); 3968 hw_p->parent_devp = p_dip; 3969 hw_p->magic = HXGE_MAGIC; 3970 hxgep->hxge_hw_p = hw_p; 3971 hw_p->ndevs++; 3972 hw_p->hxge_p = hxgep; 3973 hw_p->next = hxge_hw_list; 3974 3975 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 3976 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 3977 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 3978 3979 hxge_hw_list = hw_p; 3980 } 3981 MUTEX_EXIT(&hxge_common_lock); 3982 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3983 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list)); 3984 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev")); 3985 3986 return (HXGE_OK); 3987 } 3988 3989 static void 3990 hxge_uninit_common_dev(p_hxge_t hxgep) 3991 { 3992 p_hxge_hw_list_t hw_p, h_hw_p; 3993 dev_info_t *p_dip; 3994 3995 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev")); 3996 if (hxgep->hxge_hw_p == NULL) { 3997 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3998 "<== hxge_uninit_common_dev (no common)")); 3999 return; 4000 } 4001 4002 MUTEX_ENTER(&hxge_common_lock); 4003 h_hw_p = hxge_hw_list; 4004 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) { 4005 p_dip = hw_p->parent_devp; 4006 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip && 4007 hxgep->hxge_hw_p->magic == HXGE_MAGIC && 4008 hw_p->magic == HXGE_MAGIC) { 4009 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4010 "==> hxge_uninit_common_dev: " 4011 "hw_p $%p parent dip $%p ndevs %d (found)", 4012 hw_p, p_dip, hw_p->ndevs)); 4013 4014 hxgep->hxge_hw_p = NULL; 4015 if (hw_p->ndevs) { 4016 hw_p->ndevs--; 4017 } 4018 hw_p->hxge_p = NULL; 4019 if (!hw_p->ndevs) { 4020 MUTEX_DESTROY(&hw_p->hxge_vlan_lock); 4021 MUTEX_DESTROY(&hw_p->hxge_tcam_lock); 4022 MUTEX_DESTROY(&hw_p->hxge_cfg_lock); 4023 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4024 "==> hxge_uninit_common_dev: " 4025 "hw_p $%p parent dip $%p ndevs %d (last)", 4026 hw_p, p_dip, hw_p->ndevs)); 4027 4028 if (hw_p == hxge_hw_list) { 4029 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4030 "==> hxge_uninit_common_dev:" 4031 "remove head " 4032 "hw_p $%p parent dip $%p " 4033 "ndevs %d (head)", 4034 hw_p, p_dip, hw_p->ndevs)); 4035 hxge_hw_list = hw_p->next; 4036 } else { 4037 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4038 "==> hxge_uninit_common_dev:" 4039 "remove middle " 4040 "hw_p $%p parent dip $%p " 4041 "ndevs %d (middle)", 4042 hw_p, p_dip, hw_p->ndevs)); 4043 h_hw_p->next = hw_p->next; 4044 } 4045 4046 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t)); 4047 } 4048 break; 4049 } else { 4050 h_hw_p = hw_p; 4051 } 4052 } 4053 4054 MUTEX_EXIT(&hxge_common_lock); 4055 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4056 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list)); 4057 4058 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev")); 4059 } 4060 4061 #define HXGE_MSIX_ENTRIES 32 4062 #define HXGE_MSIX_WAIT_COUNT 10 4063 #define HXGE_MSIX_PARITY_CHECK_COUNT 30 4064 4065 static void 4066 hxge_link_poll(void *arg) 4067 { 4068 p_hxge_t hxgep = (p_hxge_t)arg; 4069 hpi_handle_t handle; 4070 cip_link_stat_t link_stat; 4071 hxge_timeout *to = &hxgep->timeout; 4072 4073 handle = HXGE_DEV_HPI_HANDLE(hxgep); 4074 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value); 4075 4076 if (to->report_link_status || 4077 (to->link_status != link_stat.bits.xpcs0_link_up)) { 4078 to->link_status = link_stat.bits.xpcs0_link_up; 4079 to->report_link_status = B_FALSE; 4080 4081 if (link_stat.bits.xpcs0_link_up) { 4082 hxge_link_update(hxgep, LINK_STATE_UP); 4083 } else { 4084 hxge_link_update(hxgep, LINK_STATE_DOWN); 4085 } 4086 } 4087 4088 /* Restart the link status timer to check the link status */ 4089 MUTEX_ENTER(&to->lock); 4090 to->id = timeout(hxge_link_poll, arg, to->ticks); 4091 MUTEX_EXIT(&to->lock); 4092 } 4093 4094 static void 4095 hxge_link_update(p_hxge_t hxgep, link_state_t state) 4096 { 4097 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp; 4098 4099 mac_link_update(hxgep->mach, state); 4100 if (state == LINK_STATE_UP) { 4101 statsp->mac_stats.link_speed = 10000; 4102 statsp->mac_stats.link_duplex = 2; 4103 statsp->mac_stats.link_up = 1; 4104 } else { 4105 statsp->mac_stats.link_speed = 0; 4106 statsp->mac_stats.link_duplex = 0; 4107 statsp->mac_stats.link_up = 0; 4108 } 4109 } 4110 4111 static void 4112 hxge_msix_init(p_hxge_t hxgep) 4113 { 4114 indacc_mem1_ctrl_t indacc_mem1_ctrl; 4115 indacc_mem1_data0_t data0; 4116 indacc_mem1_data1_t data1; 4117 indacc_mem1_data2_t data2; 4118 indacc_mem1_prty_t prty; 4119 int count; 4120 int i; 4121 4122 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) { 4123 indacc_mem1_ctrl.value = 0; 4124 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL, 4125 &indacc_mem1_ctrl.value); 4126 4127 data0.value = 0xffffffff - i; 4128 HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_DATA0, 4129 data0.value); 4130 data1.value = 0xffffffff - i - 1; 4131 HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_DATA1, 4132 data1.value); 4133 data2.value = 0xffffffff - i - 2; 4134 HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_DATA2, 4135 data2.value); 4136 4137 indacc_mem1_ctrl.value = 0; 4138 indacc_mem1_ctrl.bits.mem1_addr = i; 4139 indacc_mem1_ctrl.bits.mem1_sel = 2; 4140 indacc_mem1_ctrl.bits.mem1_prty_wen = 0; 4141 indacc_mem1_ctrl.bits.mem1_command = 0; 4142 indacc_mem1_ctrl.bits.mem1_diagen = 1; 4143 HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_CTRL, 4144 indacc_mem1_ctrl.value); 4145 4146 /* check that operation completed */ 4147 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL, 4148 &indacc_mem1_ctrl.value); 4149 4150 count = 0; 4151 while (indacc_mem1_ctrl.bits.mem1_access_status != 1 && 4152 count++ < HXGE_MSIX_WAIT_COUNT) { 4153 HXGE_DELAY(1); 4154 indacc_mem1_ctrl.value = 0; 4155 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL, 4156 &indacc_mem1_ctrl.value); 4157 } 4158 } 4159 4160 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) { 4161 indacc_mem1_ctrl.value = 0; 4162 indacc_mem1_ctrl.bits.mem1_addr = i; 4163 indacc_mem1_ctrl.bits.mem1_sel = 2; 4164 indacc_mem1_ctrl.bits.mem1_prty_wen = 0; 4165 indacc_mem1_ctrl.bits.mem1_command = 1; 4166 indacc_mem1_ctrl.bits.mem1_diagen = 1; 4167 4168 /* issue read command */ 4169 HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_CTRL, 4170 indacc_mem1_ctrl.value); 4171 4172 /* wait for read operation to complete */ 4173 count = 0; 4174 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL, 4175 &indacc_mem1_ctrl.value); 4176 while (indacc_mem1_ctrl.bits.mem1_access_status != 1 && 4177 count++ < HXGE_MSIX_WAIT_COUNT) { 4178 HXGE_DELAY(1); 4179 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_CTRL, 4180 &indacc_mem1_ctrl.value); 4181 } 4182 4183 4184 4185 data0.value = data1.value = data2.value = prty.value = 0; 4186 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_DATA0, 4187 &data0.value); 4188 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_DATA1, 4189 &data1.value); 4190 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_DATA2, 4191 &data2.value); 4192 HXGE_REG_RD32(hxgep->hpi_handle, INDACC_MEM1_PRTY, 4193 &prty.value); 4194 } 4195 4196 /* Turn off diagnostic mode */ 4197 indacc_mem1_ctrl.value = 0; 4198 indacc_mem1_ctrl.bits.mem1_addr = 0; 4199 indacc_mem1_ctrl.bits.mem1_sel = 0; 4200 indacc_mem1_ctrl.bits.mem1_prty_wen = 0; 4201 indacc_mem1_ctrl.bits.mem1_command = 0; 4202 indacc_mem1_ctrl.bits.mem1_diagen = 0; 4203 HXGE_REG_WR32(hxgep->hpi_handle, INDACC_MEM1_CTRL, 4204 indacc_mem1_ctrl.value); 4205 } 4206 4207 void 4208 hxge_check_msix_parity_err(p_hxge_t hxgep) 4209 { 4210 indacc_mem1_ctrl_t indacc_mem1_ctrl; 4211 indacc_mem1_data0_t data0; 4212 indacc_mem1_data1_t data1; 4213 indacc_mem1_data2_t data2; 4214 indacc_mem1_prty_t prty; 4215 uint32_t parity = 0; 4216 int count; 4217 int i; 4218 4219 hpi_handle_t handle; 4220 p_hxge_peu_sys_stats_t statsp; 4221 4222 handle = hxgep->hpi_handle; 4223 statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats; 4224 4225 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) { 4226 indacc_mem1_ctrl.value = 0; 4227 indacc_mem1_ctrl.bits.mem1_addr = i; 4228 indacc_mem1_ctrl.bits.mem1_sel = 2; 4229 indacc_mem1_ctrl.bits.mem1_prty_wen = 0; 4230 indacc_mem1_ctrl.bits.mem1_command = 1; 4231 indacc_mem1_ctrl.bits.mem1_diagen = 1; 4232 4233 /* issue read command */ 4234 HXGE_REG_WR32(handle, INDACC_MEM1_CTRL, indacc_mem1_ctrl.value); 4235 4236 /* wait for read operation to complete */ 4237 count = 0; 4238 HXGE_REG_RD32(handle, INDACC_MEM1_CTRL, 4239 &indacc_mem1_ctrl.value); 4240 while (indacc_mem1_ctrl.bits.mem1_access_status != 1 && 4241 count++ < HXGE_MSIX_WAIT_COUNT) { 4242 HXGE_DELAY(1); 4243 HXGE_REG_RD32(handle, INDACC_MEM1_CTRL, 4244 &indacc_mem1_ctrl.value); 4245 } 4246 4247 data0.value = data1.value = data2.value = prty.value = 0; 4248 HXGE_REG_RD32(handle, INDACC_MEM1_DATA0, &data0.value); 4249 HXGE_REG_RD32(handle, INDACC_MEM1_DATA1, &data1.value); 4250 HXGE_REG_RD32(handle, INDACC_MEM1_DATA2, &data2.value); 4251 HXGE_REG_RD32(handle, INDACC_MEM1_PRTY, &prty.value); 4252 4253 parity = gen_32bit_parity(data0.value, B_FALSE) | 4254 (gen_32bit_parity(data1.value, B_FALSE) << 4) | 4255 (gen_32bit_parity(data2.value, B_FALSE) << 8); 4256 4257 if (parity != prty.bits.mem1_parity) { 4258 statsp->eic_msix_parerr++; 4259 if (statsp->eic_msix_parerr == 1) { 4260 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 4261 "==> hxge_check_msix_parity_err: " 4262 "eic_msix_parerr")); 4263 HXGE_FM_REPORT_ERROR(hxgep, NULL, 4264 HXGE_FM_EREPORT_PEU_ERR); 4265 } 4266 } 4267 } 4268 4269 /* Turn off diagnostic mode */ 4270 indacc_mem1_ctrl.value = 0; 4271 indacc_mem1_ctrl.bits.mem1_addr = 0; 4272 indacc_mem1_ctrl.bits.mem1_sel = 0; 4273 indacc_mem1_ctrl.bits.mem1_prty_wen = 0; 4274 indacc_mem1_ctrl.bits.mem1_command = 0; 4275 indacc_mem1_ctrl.bits.mem1_diagen = 0; 4276 HXGE_REG_WR32(handle, INDACC_MEM1_CTRL, indacc_mem1_ctrl.value); 4277 } 4278 4279 static uint8_t 4280 gen_32bit_parity(uint32_t data, boolean_t odd_parity) 4281 { 4282 uint8_t parity = 0; 4283 uint8_t data_byte = 0; 4284 uint8_t parity_bit = 0; 4285 uint32_t i = 0, j = 0; 4286 4287 for (i = 0; i < 4; i++) { 4288 data_byte = (data >> (i * 8)) & 0xffULL; 4289 parity_bit = odd_parity ? 1 : 0; 4290 for (j = 0; j < 8; j++) { 4291 parity_bit ^= (data_byte >> j) & 0x1ULL; 4292 } 4293 parity |= (parity_bit << i); 4294 } 4295 4296 return (parity); 4297 } 4298