1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver. 28 */ 29 #include <hxge_impl.h> 30 #include <hxge_pfc.h> 31 32 /* 33 * PSARC/2007/453 MSI-X interrupt limit override 34 * (This PSARC case is limited to MSI-X vectors 35 * and SPARC platforms only). 36 */ 37 #if defined(_BIG_ENDIAN) 38 uint32_t hxge_msi_enable = 2; 39 #else 40 uint32_t hxge_msi_enable = 1; 41 #endif 42 43 /* 44 * Globals: tunable parameters (/etc/system or adb) 45 * 46 */ 47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT; 48 uint32_t hxge_rbr_spare_size = 0; 49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT; 50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT; 51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX; 52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN; 53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN; 54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE; 55 56 static hxge_os_mutex_t hxgedebuglock; 57 static int hxge_debug_init = 0; 58 59 /* 60 * Debugging flags: 61 * hxge_no_tx_lb : transmit load balancing 62 * hxge_tx_lb_policy: 0 - TCP/UDP port (default) 63 * 1 - From the Stack 64 * 2 - Destination IP Address 65 */ 66 uint32_t hxge_no_tx_lb = 0; 67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP; 68 69 /* 70 * Add tunable to reduce the amount of time spent in the 71 * ISR doing Rx Processing. 72 */ 73 #if defined(__sparc) 74 uint32_t hxge_max_rx_pkts = 512; 75 #else 76 uint32_t hxge_max_rx_pkts = 1024; 77 #endif 78 79 /* 80 * Tunables to manage the receive buffer blocks. 81 * 82 * hxge_rx_threshold_hi: copy all buffers. 83 * hxge_rx_bcopy_size_type: receive buffer block size type. 84 * hxge_rx_threshold_lo: copy only up to tunable block size type. 85 */ 86 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE; 87 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 88 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE; 89 90 rtrace_t hpi_rtracebuf; 91 92 /* 93 * Function Prototypes 94 */ 95 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t); 96 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t); 97 static void hxge_unattach(p_hxge_t); 98 99 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t); 100 101 static hxge_status_t hxge_setup_mutexes(p_hxge_t); 102 static void hxge_destroy_mutexes(p_hxge_t); 103 104 static hxge_status_t hxge_map_regs(p_hxge_t hxgep); 105 static void hxge_unmap_regs(p_hxge_t hxgep); 106 107 hxge_status_t hxge_add_intrs(p_hxge_t hxgep); 108 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep); 109 static void hxge_remove_intrs(p_hxge_t hxgep); 110 static void hxge_remove_soft_intrs(p_hxge_t hxgep); 111 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep); 112 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t); 113 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t); 114 void hxge_intrs_enable(p_hxge_t hxgep); 115 static void hxge_intrs_disable(p_hxge_t hxgep); 116 static void hxge_suspend(p_hxge_t); 117 static hxge_status_t hxge_resume(p_hxge_t); 118 hxge_status_t hxge_setup_dev(p_hxge_t); 119 static void hxge_destroy_dev(p_hxge_t); 120 hxge_status_t hxge_alloc_mem_pool(p_hxge_t); 121 static void hxge_free_mem_pool(p_hxge_t); 122 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t); 123 static void hxge_free_rx_mem_pool(p_hxge_t); 124 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t); 125 static void hxge_free_tx_mem_pool(p_hxge_t); 126 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t, 127 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t, 128 p_hxge_dma_common_t); 129 static void hxge_dma_mem_free(p_hxge_dma_common_t); 130 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t, 131 p_hxge_dma_common_t *, size_t, size_t, uint32_t *); 132 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t); 133 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t, 134 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t); 135 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t); 136 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t, 137 p_hxge_dma_common_t *, size_t, size_t, uint32_t *); 138 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t); 139 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t, 140 p_hxge_dma_common_t *, size_t); 141 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t); 142 static int hxge_init_common_dev(p_hxge_t); 143 static void hxge_uninit_common_dev(p_hxge_t); 144 145 /* 146 * The next declarations are for the GLDv3 interface. 147 */ 148 static int hxge_m_start(void *); 149 static void hxge_m_stop(void *); 150 static int hxge_m_unicst(void *, const uint8_t *); 151 static int hxge_m_multicst(void *, boolean_t, const uint8_t *); 152 static int hxge_m_promisc(void *, boolean_t); 153 static void hxge_m_ioctl(void *, queue_t *, mblk_t *); 154 static hxge_status_t hxge_mac_register(p_hxge_t hxgep); 155 156 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *); 157 static boolean_t hxge_param_locked(mac_prop_id_t pr_num); 158 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 159 uint_t pr_valsize, const void *pr_val); 160 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 161 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *); 162 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, 163 uint_t pr_valsize, void *pr_val); 164 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, 165 uint_t pr_valsize, const void *pr_val); 166 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, 167 uint_t pr_flags, uint_t pr_valsize, void *pr_val); 168 static void hxge_link_poll(void *arg); 169 static void hxge_link_update(p_hxge_t hxge, link_state_t state); 170 static void hxge_msix_init(p_hxge_t hxgep); 171 static void hxge_store_msix_table(p_hxge_t hxgep); 172 static void hxge_check_1entry_msix_table(p_hxge_t hxgep, int msix_index); 173 174 mac_priv_prop_t hxge_priv_props[] = { 175 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 176 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 177 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 178 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 179 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 180 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 181 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 182 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 183 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 184 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW} 185 }; 186 187 #define HXGE_MAX_PRIV_PROPS \ 188 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t)) 189 190 #define HXGE_MAGIC 0x4E584745UL 191 #define MAX_DUMP_SZ 256 192 193 #define HXGE_M_CALLBACK_FLAGS \ 194 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 195 196 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp); 197 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep); 198 199 static mac_callbacks_t hxge_m_callbacks = { 200 HXGE_M_CALLBACK_FLAGS, 201 hxge_m_stat, 202 hxge_m_start, 203 hxge_m_stop, 204 hxge_m_promisc, 205 hxge_m_multicst, 206 hxge_m_unicst, 207 hxge_m_tx, 208 hxge_m_ioctl, 209 hxge_m_getcapab, 210 NULL, 211 NULL, 212 hxge_m_setprop, 213 hxge_m_getprop 214 }; 215 216 /* PSARC/2007/453 MSI-X interrupt limit override. */ 217 #define HXGE_MSIX_REQUEST_10G 8 218 static int hxge_create_msi_property(p_hxge_t); 219 220 /* Enable debug messages as necessary. */ 221 uint64_t hxge_debug_level = 0; 222 223 /* 224 * This list contains the instance structures for the Hydra 225 * devices present in the system. The lock exists to guarantee 226 * mutually exclusive access to the list. 227 */ 228 void *hxge_list = NULL; 229 void *hxge_hw_list = NULL; 230 hxge_os_mutex_t hxge_common_lock; 231 232 extern uint64_t hpi_debug_level; 233 234 extern hxge_status_t hxge_ldgv_init(); 235 extern hxge_status_t hxge_ldgv_uninit(); 236 extern hxge_status_t hxge_intr_ldgv_init(); 237 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr, 238 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr); 239 extern void hxge_fm_fini(p_hxge_t hxgep); 240 241 /* 242 * Count used to maintain the number of buffers being used 243 * by Hydra instances and loaned up to the upper layers. 244 */ 245 uint32_t hxge_mblks_pending = 0; 246 247 /* 248 * Device register access attributes for PIO. 249 */ 250 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = { 251 DDI_DEVICE_ATTR_V0, 252 DDI_STRUCTURE_LE_ACC, 253 DDI_STRICTORDER_ACC, 254 }; 255 256 /* 257 * Device descriptor access attributes for DMA. 258 */ 259 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = { 260 DDI_DEVICE_ATTR_V0, 261 DDI_STRUCTURE_LE_ACC, 262 DDI_STRICTORDER_ACC 263 }; 264 265 /* 266 * Device buffer access attributes for DMA. 267 */ 268 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = { 269 DDI_DEVICE_ATTR_V0, 270 DDI_STRUCTURE_BE_ACC, 271 DDI_STRICTORDER_ACC 272 }; 273 274 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = { 275 DMA_ATTR_V0, /* version number. */ 276 0, /* low address */ 277 0xffffffffffffffff, /* high address */ 278 0xffffffffffffffff, /* address counter max */ 279 0x80000, /* alignment */ 280 0xfc00fc, /* dlim_burstsizes */ 281 0x1, /* minimum transfer size */ 282 0xffffffffffffffff, /* maximum transfer size */ 283 0xffffffffffffffff, /* maximum segment size */ 284 1, /* scatter/gather list length */ 285 (unsigned int)1, /* granularity */ 286 0 /* attribute flags */ 287 }; 288 289 ddi_dma_attr_t hxge_tx_desc_dma_attr = { 290 DMA_ATTR_V0, /* version number. */ 291 0, /* low address */ 292 0xffffffffffffffff, /* high address */ 293 0xffffffffffffffff, /* address counter max */ 294 0x100000, /* alignment */ 295 0xfc00fc, /* dlim_burstsizes */ 296 0x1, /* minimum transfer size */ 297 0xffffffffffffffff, /* maximum transfer size */ 298 0xffffffffffffffff, /* maximum segment size */ 299 1, /* scatter/gather list length */ 300 (unsigned int)1, /* granularity */ 301 0 /* attribute flags */ 302 }; 303 304 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = { 305 DMA_ATTR_V0, /* version number. */ 306 0, /* low address */ 307 0xffffffffffffffff, /* high address */ 308 0xffffffffffffffff, /* address counter max */ 309 0x40000, /* alignment */ 310 0xfc00fc, /* dlim_burstsizes */ 311 0x1, /* minimum transfer size */ 312 0xffffffffffffffff, /* maximum transfer size */ 313 0xffffffffffffffff, /* maximum segment size */ 314 1, /* scatter/gather list length */ 315 (unsigned int)1, /* granularity */ 316 0 /* attribute flags */ 317 }; 318 319 ddi_dma_attr_t hxge_rx_mbox_dma_attr = { 320 DMA_ATTR_V0, /* version number. */ 321 0, /* low address */ 322 0xffffffffffffffff, /* high address */ 323 0xffffffffffffffff, /* address counter max */ 324 #if defined(_BIG_ENDIAN) 325 0x2000, /* alignment */ 326 #else 327 0x1000, /* alignment */ 328 #endif 329 0xfc00fc, /* dlim_burstsizes */ 330 0x1, /* minimum transfer size */ 331 0xffffffffffffffff, /* maximum transfer size */ 332 0xffffffffffffffff, /* maximum segment size */ 333 5, /* scatter/gather list length */ 334 (unsigned int)1, /* granularity */ 335 0 /* attribute flags */ 336 }; 337 338 ddi_dma_attr_t hxge_tx_dma_attr = { 339 DMA_ATTR_V0, /* version number. */ 340 0, /* low address */ 341 0xffffffffffffffff, /* high address */ 342 0xffffffffffffffff, /* address counter max */ 343 #if defined(_BIG_ENDIAN) 344 0x2000, /* alignment */ 345 #else 346 0x1000, /* alignment */ 347 #endif 348 0xfc00fc, /* dlim_burstsizes */ 349 0x1, /* minimum transfer size */ 350 0xffffffffffffffff, /* maximum transfer size */ 351 0xffffffffffffffff, /* maximum segment size */ 352 5, /* scatter/gather list length */ 353 (unsigned int)1, /* granularity */ 354 0 /* attribute flags */ 355 }; 356 357 ddi_dma_attr_t hxge_rx_dma_attr = { 358 DMA_ATTR_V0, /* version number. */ 359 0, /* low address */ 360 0xffffffffffffffff, /* high address */ 361 0xffffffffffffffff, /* address counter max */ 362 0x10000, /* alignment */ 363 0xfc00fc, /* dlim_burstsizes */ 364 0x1, /* minimum transfer size */ 365 0xffffffffffffffff, /* maximum transfer size */ 366 0xffffffffffffffff, /* maximum segment size */ 367 1, /* scatter/gather list length */ 368 (unsigned int)1, /* granularity */ 369 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 370 }; 371 372 ddi_dma_lim_t hxge_dma_limits = { 373 (uint_t)0, /* dlim_addr_lo */ 374 (uint_t)0xffffffff, /* dlim_addr_hi */ 375 (uint_t)0xffffffff, /* dlim_cntr_max */ 376 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 377 0x1, /* dlim_minxfer */ 378 1024 /* dlim_speed */ 379 }; 380 381 dma_method_t hxge_force_dma = DVMA; 382 383 /* 384 * dma chunk sizes. 385 * 386 * Try to allocate the largest possible size 387 * so that fewer number of dma chunks would be managed 388 */ 389 size_t alloc_sizes[] = { 390 0x1000, 0x2000, 0x4000, 0x8000, 391 0x10000, 0x20000, 0x40000, 0x80000, 392 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000 393 }; 394 395 /* 396 * Translate "dev_t" to a pointer to the associated "dev_info_t". 397 */ 398 static int 399 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 400 { 401 p_hxge_t hxgep = NULL; 402 int instance; 403 int status = DDI_SUCCESS; 404 405 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach")); 406 407 /* 408 * Get the device instance since we'll need to setup or retrieve a soft 409 * state for this instance. 410 */ 411 instance = ddi_get_instance(dip); 412 413 switch (cmd) { 414 case DDI_ATTACH: 415 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH")); 416 break; 417 418 case DDI_RESUME: 419 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME")); 420 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance); 421 if (hxgep == NULL) { 422 status = DDI_FAILURE; 423 break; 424 } 425 if (hxgep->dip != dip) { 426 status = DDI_FAILURE; 427 break; 428 } 429 if (hxgep->suspended == DDI_PM_SUSPEND) { 430 status = ddi_dev_is_needed(hxgep->dip, 0, 1); 431 } else { 432 (void) hxge_resume(hxgep); 433 } 434 goto hxge_attach_exit; 435 436 case DDI_PM_RESUME: 437 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME")); 438 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance); 439 if (hxgep == NULL) { 440 status = DDI_FAILURE; 441 break; 442 } 443 if (hxgep->dip != dip) { 444 status = DDI_FAILURE; 445 break; 446 } 447 (void) hxge_resume(hxgep); 448 goto hxge_attach_exit; 449 450 default: 451 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown")); 452 status = DDI_FAILURE; 453 goto hxge_attach_exit; 454 } 455 456 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) { 457 status = DDI_FAILURE; 458 HXGE_ERROR_MSG((hxgep, DDI_CTL, 459 "ddi_soft_state_zalloc failed")); 460 goto hxge_attach_exit; 461 } 462 463 hxgep = ddi_get_soft_state(hxge_list, instance); 464 if (hxgep == NULL) { 465 status = HXGE_ERROR; 466 HXGE_ERROR_MSG((hxgep, DDI_CTL, 467 "ddi_get_soft_state failed")); 468 goto hxge_attach_fail2; 469 } 470 471 hxgep->drv_state = 0; 472 hxgep->dip = dip; 473 hxgep->instance = instance; 474 hxgep->p_dip = ddi_get_parent(dip); 475 hxgep->hxge_debug_level = hxge_debug_level; 476 hpi_debug_level = hxge_debug_level; 477 478 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr, 479 &hxge_rx_dma_attr); 480 481 status = hxge_map_regs(hxgep); 482 if (status != HXGE_OK) { 483 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed")); 484 goto hxge_attach_fail3; 485 } 486 487 /* Scrub the MSI-X memory */ 488 hxge_msix_init(hxgep); 489 490 status = hxge_init_common_dev(hxgep); 491 if (status != HXGE_OK) { 492 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 493 "hxge_init_common_dev failed")); 494 goto hxge_attach_fail4; 495 } 496 497 /* 498 * Setup the Ndd parameters for this instance. 499 */ 500 hxge_init_param(hxgep); 501 502 /* 503 * Setup Register Tracing Buffer. 504 */ 505 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf); 506 507 /* init stats ptr */ 508 hxge_init_statsp(hxgep); 509 510 status = hxge_setup_mutexes(hxgep); 511 if (status != HXGE_OK) { 512 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed")); 513 goto hxge_attach_fail; 514 } 515 516 status = hxge_get_config_properties(hxgep); 517 if (status != HXGE_OK) { 518 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed")); 519 goto hxge_attach_fail; 520 } 521 522 /* 523 * Setup the Kstats for the driver. 524 */ 525 hxge_setup_kstats(hxgep); 526 hxge_setup_param(hxgep); 527 528 status = hxge_setup_system_dma_pages(hxgep); 529 if (status != HXGE_OK) { 530 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed")); 531 goto hxge_attach_fail; 532 } 533 534 hxge_hw_id_init(hxgep); 535 hxge_hw_init_niu_common(hxgep); 536 537 status = hxge_setup_dev(hxgep); 538 if (status != DDI_SUCCESS) { 539 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed")); 540 goto hxge_attach_fail; 541 } 542 543 status = hxge_add_intrs(hxgep); 544 if (status != DDI_SUCCESS) { 545 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed")); 546 goto hxge_attach_fail; 547 } 548 549 status = hxge_add_soft_intrs(hxgep); 550 if (status != DDI_SUCCESS) { 551 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed")); 552 goto hxge_attach_fail; 553 } 554 555 /* 556 * Enable interrupts. 557 */ 558 hxge_intrs_enable(hxgep); 559 560 /* Keep copy of MSIx table written */ 561 hxge_store_msix_table(hxgep); 562 563 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) { 564 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 565 "unable to register to mac layer (%d)", status)); 566 goto hxge_attach_fail; 567 } 568 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN); 569 570 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)", 571 instance)); 572 573 goto hxge_attach_exit; 574 575 hxge_attach_fail: 576 hxge_unattach(hxgep); 577 goto hxge_attach_fail1; 578 579 hxge_attach_fail5: 580 /* 581 * Tear down the ndd parameters setup. 582 */ 583 hxge_destroy_param(hxgep); 584 585 /* 586 * Tear down the kstat setup. 587 */ 588 hxge_destroy_kstats(hxgep); 589 590 hxge_attach_fail4: 591 if (hxgep->hxge_hw_p) { 592 hxge_uninit_common_dev(hxgep); 593 hxgep->hxge_hw_p = NULL; 594 } 595 hxge_attach_fail3: 596 /* 597 * Unmap the register setup. 598 */ 599 hxge_unmap_regs(hxgep); 600 601 hxge_fm_fini(hxgep); 602 603 hxge_attach_fail2: 604 ddi_soft_state_free(hxge_list, hxgep->instance); 605 606 hxge_attach_fail1: 607 if (status != HXGE_OK) 608 status = (HXGE_ERROR | HXGE_DDI_FAILED); 609 hxgep = NULL; 610 611 hxge_attach_exit: 612 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x", 613 status)); 614 615 return (status); 616 } 617 618 static int 619 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 620 { 621 int status = DDI_SUCCESS; 622 int instance; 623 p_hxge_t hxgep = NULL; 624 625 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach")); 626 instance = ddi_get_instance(dip); 627 hxgep = ddi_get_soft_state(hxge_list, instance); 628 if (hxgep == NULL) { 629 status = DDI_FAILURE; 630 goto hxge_detach_exit; 631 } 632 633 switch (cmd) { 634 case DDI_DETACH: 635 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH")); 636 break; 637 638 case DDI_PM_SUSPEND: 639 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 640 hxgep->suspended = DDI_PM_SUSPEND; 641 hxge_suspend(hxgep); 642 break; 643 644 case DDI_SUSPEND: 645 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND")); 646 if (hxgep->suspended != DDI_PM_SUSPEND) { 647 hxgep->suspended = DDI_SUSPEND; 648 hxge_suspend(hxgep); 649 } 650 break; 651 652 default: 653 status = DDI_FAILURE; 654 break; 655 } 656 657 if (cmd != DDI_DETACH) 658 goto hxge_detach_exit; 659 660 /* 661 * Stop the xcvr polling. 662 */ 663 hxgep->suspended = cmd; 664 665 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) { 666 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 667 "<== hxge_detach status = 0x%08X", status)); 668 return (DDI_FAILURE); 669 } 670 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 671 "<== hxge_detach (mac_unregister) status = 0x%08X", status)); 672 673 hxge_unattach(hxgep); 674 hxgep = NULL; 675 676 hxge_detach_exit: 677 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X", 678 status)); 679 680 return (status); 681 } 682 683 static void 684 hxge_unattach(p_hxge_t hxgep) 685 { 686 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach")); 687 688 if (hxgep == NULL || hxgep->dev_regs == NULL) { 689 return; 690 } 691 692 if (hxgep->hxge_hw_p) { 693 hxge_uninit_common_dev(hxgep); 694 hxgep->hxge_hw_p = NULL; 695 } 696 697 if (hxgep->hxge_timerid) { 698 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 699 hxgep->hxge_timerid = 0; 700 } 701 702 /* Stop any further interrupts. */ 703 hxge_remove_intrs(hxgep); 704 705 /* Remove soft interrups */ 706 hxge_remove_soft_intrs(hxgep); 707 708 /* Stop the device and free resources. */ 709 hxge_destroy_dev(hxgep); 710 711 /* Tear down the ndd parameters setup. */ 712 hxge_destroy_param(hxgep); 713 714 /* Tear down the kstat setup. */ 715 hxge_destroy_kstats(hxgep); 716 717 /* 718 * Remove the list of ndd parameters which were setup during attach. 719 */ 720 if (hxgep->dip) { 721 HXGE_DEBUG_MSG((hxgep, OBP_CTL, 722 " hxge_unattach: remove all properties")); 723 (void) ddi_prop_remove_all(hxgep->dip); 724 } 725 726 /* 727 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any 728 * previous state before unmapping the registers. 729 */ 730 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E); 731 HXGE_DELAY(1000); 732 733 /* 734 * Unmap the register setup. 735 */ 736 hxge_unmap_regs(hxgep); 737 738 hxge_fm_fini(hxgep); 739 740 /* Destroy all mutexes. */ 741 hxge_destroy_mutexes(hxgep); 742 743 /* 744 * Free the soft state data structures allocated with this instance. 745 */ 746 ddi_soft_state_free(hxge_list, hxgep->instance); 747 748 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach")); 749 } 750 751 static hxge_status_t 752 hxge_map_regs(p_hxge_t hxgep) 753 { 754 int ddi_status = DDI_SUCCESS; 755 p_dev_regs_t dev_regs; 756 757 #ifdef HXGE_DEBUG 758 char *sysname; 759 #endif 760 761 off_t regsize; 762 hxge_status_t status = HXGE_OK; 763 int nregs; 764 765 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs")); 766 767 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS) 768 return (HXGE_ERROR); 769 770 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs)); 771 772 hxgep->dev_regs = NULL; 773 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 774 dev_regs->hxge_regh = NULL; 775 dev_regs->hxge_pciregh = NULL; 776 dev_regs->hxge_msix_regh = NULL; 777 778 (void) ddi_dev_regsize(hxgep->dip, 0, ®size); 779 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 780 "hxge_map_regs: pci config size 0x%x", regsize)); 781 782 ddi_status = ddi_regs_map_setup(hxgep->dip, 0, 783 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0, 784 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh); 785 if (ddi_status != DDI_SUCCESS) { 786 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 787 "ddi_map_regs, hxge bus config regs failed")); 788 goto hxge_map_regs_fail0; 789 } 790 791 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 792 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx", 793 dev_regs->hxge_pciregp, 794 dev_regs->hxge_pciregh)); 795 796 (void) ddi_dev_regsize(hxgep->dip, 1, ®size); 797 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 798 "hxge_map_regs: pio size 0x%x", regsize)); 799 800 /* set up the device mapped register */ 801 ddi_status = ddi_regs_map_setup(hxgep->dip, 1, 802 (caddr_t *)&(dev_regs->hxge_regp), 0, 0, 803 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh); 804 805 if (ddi_status != DDI_SUCCESS) { 806 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 807 "ddi_map_regs for Hydra global reg failed")); 808 goto hxge_map_regs_fail1; 809 } 810 811 /* set up the msi/msi-x mapped register */ 812 (void) ddi_dev_regsize(hxgep->dip, 2, ®size); 813 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 814 "hxge_map_regs: msix size 0x%x", regsize)); 815 816 ddi_status = ddi_regs_map_setup(hxgep->dip, 2, 817 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0, 818 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh); 819 820 if (ddi_status != DDI_SUCCESS) { 821 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 822 "ddi_map_regs for msi reg failed")); 823 goto hxge_map_regs_fail2; 824 } 825 826 hxgep->dev_regs = dev_regs; 827 828 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh); 829 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp); 830 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh); 831 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp); 832 833 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh); 834 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp); 835 836 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh); 837 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp); 838 839 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx " 840 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh)); 841 842 goto hxge_map_regs_exit; 843 844 hxge_map_regs_fail3: 845 if (dev_regs->hxge_msix_regh) { 846 ddi_regs_map_free(&dev_regs->hxge_msix_regh); 847 } 848 849 hxge_map_regs_fail2: 850 if (dev_regs->hxge_regh) { 851 ddi_regs_map_free(&dev_regs->hxge_regh); 852 } 853 854 hxge_map_regs_fail1: 855 if (dev_regs->hxge_pciregh) { 856 ddi_regs_map_free(&dev_regs->hxge_pciregh); 857 } 858 859 hxge_map_regs_fail0: 860 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory")); 861 kmem_free(dev_regs, sizeof (dev_regs_t)); 862 863 hxge_map_regs_exit: 864 if (ddi_status != DDI_SUCCESS) 865 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 866 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs")); 867 return (status); 868 } 869 870 static void 871 hxge_unmap_regs(p_hxge_t hxgep) 872 { 873 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs")); 874 if (hxgep->dev_regs) { 875 if (hxgep->dev_regs->hxge_pciregh) { 876 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 877 "==> hxge_unmap_regs: bus")); 878 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh); 879 hxgep->dev_regs->hxge_pciregh = NULL; 880 } 881 882 if (hxgep->dev_regs->hxge_regh) { 883 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 884 "==> hxge_unmap_regs: device registers")); 885 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh); 886 hxgep->dev_regs->hxge_regh = NULL; 887 } 888 889 if (hxgep->dev_regs->hxge_msix_regh) { 890 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 891 "==> hxge_unmap_regs: device interrupts")); 892 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh); 893 hxgep->dev_regs->hxge_msix_regh = NULL; 894 } 895 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t)); 896 hxgep->dev_regs = NULL; 897 } 898 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs")); 899 } 900 901 static hxge_status_t 902 hxge_setup_mutexes(p_hxge_t hxgep) 903 { 904 int ddi_status = DDI_SUCCESS; 905 hxge_status_t status = HXGE_OK; 906 907 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes")); 908 909 /* 910 * Get the interrupt cookie so the mutexes can be Initialised. 911 */ 912 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0, 913 &hxgep->interrupt_cookie); 914 915 if (ddi_status != DDI_SUCCESS) { 916 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 917 "<== hxge_setup_mutexes: failed 0x%x", ddi_status)); 918 goto hxge_setup_mutexes_exit; 919 } 920 921 /* 922 * Initialize mutex's for this device. 923 */ 924 MUTEX_INIT(hxgep->genlock, NULL, 925 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 926 MUTEX_INIT(&hxgep->ouraddr_lock, NULL, 927 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 928 RW_INIT(&hxgep->filter_lock, NULL, 929 RW_DRIVER, (void *) hxgep->interrupt_cookie); 930 MUTEX_INIT(&hxgep->pio_lock, NULL, 931 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 932 MUTEX_INIT(&hxgep->timeout.lock, NULL, 933 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 934 935 hxge_setup_mutexes_exit: 936 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 937 "<== hxge_setup_mutexes status = %x", status)); 938 939 if (ddi_status != DDI_SUCCESS) 940 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 941 942 return (status); 943 } 944 945 static void 946 hxge_destroy_mutexes(p_hxge_t hxgep) 947 { 948 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes")); 949 RW_DESTROY(&hxgep->filter_lock); 950 MUTEX_DESTROY(&hxgep->ouraddr_lock); 951 MUTEX_DESTROY(hxgep->genlock); 952 MUTEX_DESTROY(&hxgep->pio_lock); 953 MUTEX_DESTROY(&hxgep->timeout.lock); 954 955 if (hxge_debug_init == 1) { 956 MUTEX_DESTROY(&hxgedebuglock); 957 hxge_debug_init = 0; 958 } 959 960 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes")); 961 } 962 963 hxge_status_t 964 hxge_init(p_hxge_t hxgep) 965 { 966 hxge_status_t status = HXGE_OK; 967 968 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init")); 969 970 if (hxgep->drv_state & STATE_HW_INITIALIZED) { 971 return (status); 972 } 973 974 /* 975 * Allocate system memory for the receive/transmit buffer blocks and 976 * receive/transmit descriptor rings. 977 */ 978 status = hxge_alloc_mem_pool(hxgep); 979 if (status != HXGE_OK) { 980 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n")); 981 goto hxge_init_fail1; 982 } 983 984 /* 985 * Initialize and enable TXDMA channels. 986 */ 987 status = hxge_init_txdma_channels(hxgep); 988 if (status != HXGE_OK) { 989 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n")); 990 goto hxge_init_fail3; 991 } 992 993 /* 994 * Initialize and enable RXDMA channels. 995 */ 996 status = hxge_init_rxdma_channels(hxgep); 997 if (status != HXGE_OK) { 998 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n")); 999 goto hxge_init_fail4; 1000 } 1001 1002 /* 1003 * Initialize TCAM 1004 */ 1005 status = hxge_classify_init(hxgep); 1006 if (status != HXGE_OK) { 1007 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n")); 1008 goto hxge_init_fail5; 1009 } 1010 1011 /* 1012 * Initialize the VMAC block. 1013 */ 1014 status = hxge_vmac_init(hxgep); 1015 if (status != HXGE_OK) { 1016 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n")); 1017 goto hxge_init_fail5; 1018 } 1019 1020 /* Bringup - this may be unnecessary when PXE and FCODE available */ 1021 status = hxge_pfc_set_default_mac_addr(hxgep); 1022 if (status != HXGE_OK) { 1023 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1024 "Default Address Failure\n")); 1025 goto hxge_init_fail5; 1026 } 1027 1028 hxge_intrs_enable(hxgep); 1029 1030 /* Keep copy of MSIx table written */ 1031 hxge_store_msix_table(hxgep); 1032 1033 /* 1034 * Enable hardware interrupts. 1035 */ 1036 hxge_intr_hw_enable(hxgep); 1037 hxgep->drv_state |= STATE_HW_INITIALIZED; 1038 1039 goto hxge_init_exit; 1040 1041 hxge_init_fail5: 1042 hxge_uninit_rxdma_channels(hxgep); 1043 hxge_init_fail4: 1044 hxge_uninit_txdma_channels(hxgep); 1045 hxge_init_fail3: 1046 hxge_free_mem_pool(hxgep); 1047 hxge_init_fail1: 1048 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1049 "<== hxge_init status (failed) = 0x%08x", status)); 1050 return (status); 1051 1052 hxge_init_exit: 1053 1054 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x", 1055 status)); 1056 1057 return (status); 1058 } 1059 1060 timeout_id_t 1061 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec) 1062 { 1063 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) { 1064 return (timeout(func, (caddr_t)hxgep, 1065 drv_usectohz(1000 * msec))); 1066 } 1067 return (NULL); 1068 } 1069 1070 /*ARGSUSED*/ 1071 void 1072 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid) 1073 { 1074 if (timerid) { 1075 (void) untimeout(timerid); 1076 } 1077 } 1078 1079 void 1080 hxge_uninit(p_hxge_t hxgep) 1081 { 1082 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit")); 1083 1084 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 1085 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1086 "==> hxge_uninit: not initialized")); 1087 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit")); 1088 return; 1089 } 1090 1091 /* Stop timer */ 1092 if (hxgep->hxge_timerid) { 1093 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 1094 hxgep->hxge_timerid = 0; 1095 } 1096 1097 (void) hxge_intr_hw_disable(hxgep); 1098 1099 /* Reset the receive VMAC side. */ 1100 (void) hxge_rx_vmac_disable(hxgep); 1101 1102 /* Free classification resources */ 1103 (void) hxge_classify_uninit(hxgep); 1104 1105 /* Reset the transmit/receive DMA side. */ 1106 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 1107 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 1108 1109 hxge_uninit_txdma_channels(hxgep); 1110 hxge_uninit_rxdma_channels(hxgep); 1111 1112 /* Reset the transmit VMAC side. */ 1113 (void) hxge_tx_vmac_disable(hxgep); 1114 1115 hxge_free_mem_pool(hxgep); 1116 1117 hxgep->drv_state &= ~STATE_HW_INITIALIZED; 1118 1119 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit")); 1120 } 1121 1122 void 1123 hxge_get64(p_hxge_t hxgep, p_mblk_t mp) 1124 { 1125 #if defined(__i386) 1126 size_t reg; 1127 #else 1128 uint64_t reg; 1129 #endif 1130 uint64_t regdata; 1131 int i, retry; 1132 1133 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1134 regdata = 0; 1135 retry = 1; 1136 1137 for (i = 0; i < retry; i++) { 1138 HXGE_REG_RD64(hxgep->hpi_handle, reg, ®data); 1139 } 1140 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1141 } 1142 1143 void 1144 hxge_put64(p_hxge_t hxgep, p_mblk_t mp) 1145 { 1146 #if defined(__i386) 1147 size_t reg; 1148 #else 1149 uint64_t reg; 1150 #endif 1151 uint64_t buf[2]; 1152 1153 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1154 #if defined(__i386) 1155 reg = (size_t)buf[0]; 1156 #else 1157 reg = buf[0]; 1158 #endif 1159 1160 HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]); 1161 } 1162 1163 /*ARGSUSED*/ 1164 /*VARARGS*/ 1165 void 1166 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...) 1167 { 1168 char msg_buffer[1048]; 1169 char prefix_buffer[32]; 1170 int instance; 1171 uint64_t debug_level; 1172 int cmn_level = CE_CONT; 1173 va_list ap; 1174 1175 debug_level = (hxgep == NULL) ? hxge_debug_level : 1176 hxgep->hxge_debug_level; 1177 1178 if ((level & debug_level) || (level == HXGE_NOTE) || 1179 (level == HXGE_ERR_CTL)) { 1180 /* do the msg processing */ 1181 if (hxge_debug_init == 0) { 1182 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1183 hxge_debug_init = 1; 1184 } 1185 1186 MUTEX_ENTER(&hxgedebuglock); 1187 1188 if ((level & HXGE_NOTE)) { 1189 cmn_level = CE_NOTE; 1190 } 1191 1192 if (level & HXGE_ERR_CTL) { 1193 cmn_level = CE_WARN; 1194 } 1195 1196 va_start(ap, fmt); 1197 (void) vsprintf(msg_buffer, fmt, ap); 1198 va_end(ap); 1199 1200 if (hxgep == NULL) { 1201 instance = -1; 1202 (void) sprintf(prefix_buffer, "%s :", "hxge"); 1203 } else { 1204 instance = hxgep->instance; 1205 (void) sprintf(prefix_buffer, 1206 "%s%d :", "hxge", instance); 1207 } 1208 1209 MUTEX_EXIT(&hxgedebuglock); 1210 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer); 1211 } 1212 } 1213 1214 char * 1215 hxge_dump_packet(char *addr, int size) 1216 { 1217 uchar_t *ap = (uchar_t *)addr; 1218 int i; 1219 static char etherbuf[1024]; 1220 char *cp = etherbuf; 1221 char digits[] = "0123456789abcdef"; 1222 1223 if (!size) 1224 size = 60; 1225 1226 if (size > MAX_DUMP_SZ) { 1227 /* Dump the leading bytes */ 1228 for (i = 0; i < MAX_DUMP_SZ / 2; i++) { 1229 if (*ap > 0x0f) 1230 *cp++ = digits[*ap >> 4]; 1231 *cp++ = digits[*ap++ & 0xf]; 1232 *cp++ = ':'; 1233 } 1234 for (i = 0; i < 20; i++) 1235 *cp++ = '.'; 1236 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1237 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2)); 1238 for (i = 0; i < MAX_DUMP_SZ / 2; i++) { 1239 if (*ap > 0x0f) 1240 *cp++ = digits[*ap >> 4]; 1241 *cp++ = digits[*ap++ & 0xf]; 1242 *cp++ = ':'; 1243 } 1244 } else { 1245 for (i = 0; i < size; i++) { 1246 if (*ap > 0x0f) 1247 *cp++ = digits[*ap >> 4]; 1248 *cp++ = digits[*ap++ & 0xf]; 1249 *cp++ = ':'; 1250 } 1251 } 1252 *--cp = 0; 1253 return (etherbuf); 1254 } 1255 1256 static void 1257 hxge_suspend(p_hxge_t hxgep) 1258 { 1259 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend")); 1260 1261 /* 1262 * Stop the link status timer before hxge_intrs_disable() to avoid 1263 * accessing the the MSIX table simultaneously. Note that the timer 1264 * routine polls for MSIX parity errors. 1265 */ 1266 MUTEX_ENTER(&hxgep->timeout.lock); 1267 if (hxgep->timeout.id) 1268 (void) untimeout(hxgep->timeout.id); 1269 MUTEX_EXIT(&hxgep->timeout.lock); 1270 1271 hxge_intrs_disable(hxgep); 1272 hxge_destroy_dev(hxgep); 1273 1274 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend")); 1275 } 1276 1277 static hxge_status_t 1278 hxge_resume(p_hxge_t hxgep) 1279 { 1280 hxge_status_t status = HXGE_OK; 1281 1282 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume")); 1283 hxgep->suspended = DDI_RESUME; 1284 1285 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START); 1286 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START); 1287 1288 (void) hxge_rx_vmac_enable(hxgep); 1289 (void) hxge_tx_vmac_enable(hxgep); 1290 1291 hxge_intrs_enable(hxgep); 1292 1293 /* Keep copy of MSIx table written */ 1294 hxge_store_msix_table(hxgep); 1295 1296 hxgep->suspended = 0; 1297 1298 /* 1299 * Resume the link status timer after hxge_intrs_enable to avoid 1300 * accessing MSIX table simultaneously. 1301 */ 1302 MUTEX_ENTER(&hxgep->timeout.lock); 1303 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep, 1304 hxgep->timeout.ticks); 1305 MUTEX_EXIT(&hxgep->timeout.lock); 1306 1307 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1308 "<== hxge_resume status = 0x%x", status)); 1309 1310 return (status); 1311 } 1312 1313 hxge_status_t 1314 hxge_setup_dev(p_hxge_t hxgep) 1315 { 1316 hxge_status_t status = HXGE_OK; 1317 1318 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev")); 1319 1320 status = hxge_link_init(hxgep); 1321 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) { 1322 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1323 "Bad register acc handle")); 1324 status = HXGE_ERROR; 1325 } 1326 1327 if (status != HXGE_OK) { 1328 HXGE_DEBUG_MSG((hxgep, MAC_CTL, 1329 " hxge_setup_dev status (link init 0x%08x)", status)); 1330 goto hxge_setup_dev_exit; 1331 } 1332 1333 hxge_setup_dev_exit: 1334 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1335 "<== hxge_setup_dev status = 0x%08x", status)); 1336 1337 return (status); 1338 } 1339 1340 static void 1341 hxge_destroy_dev(p_hxge_t hxgep) 1342 { 1343 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev")); 1344 1345 (void) hxge_hw_stop(hxgep); 1346 1347 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev")); 1348 } 1349 1350 static hxge_status_t 1351 hxge_setup_system_dma_pages(p_hxge_t hxgep) 1352 { 1353 int ddi_status = DDI_SUCCESS; 1354 uint_t count; 1355 ddi_dma_cookie_t cookie; 1356 uint_t iommu_pagesize; 1357 hxge_status_t status = HXGE_OK; 1358 1359 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages")); 1360 1361 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1); 1362 iommu_pagesize = dvma_pagesize(hxgep->dip); 1363 1364 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1365 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1366 " default_block_size %d iommu_pagesize %d", 1367 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1), 1368 hxgep->rx_default_block_size, iommu_pagesize)); 1369 1370 if (iommu_pagesize != 0) { 1371 if (hxgep->sys_page_sz == iommu_pagesize) { 1372 /* Hydra support up to 8K pages */ 1373 if (iommu_pagesize > 0x2000) 1374 hxgep->sys_page_sz = 0x2000; 1375 } else { 1376 if (hxgep->sys_page_sz > iommu_pagesize) 1377 hxgep->sys_page_sz = iommu_pagesize; 1378 } 1379 } 1380 1381 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1); 1382 1383 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1384 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1385 "default_block_size %d page mask %d", 1386 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1), 1387 hxgep->rx_default_block_size, hxgep->sys_page_mask)); 1388 1389 switch (hxgep->sys_page_sz) { 1390 default: 1391 hxgep->sys_page_sz = 0x1000; 1392 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1); 1393 hxgep->rx_default_block_size = 0x1000; 1394 hxgep->rx_bksize_code = RBR_BKSIZE_4K; 1395 break; 1396 case 0x1000: 1397 hxgep->rx_default_block_size = 0x1000; 1398 hxgep->rx_bksize_code = RBR_BKSIZE_4K; 1399 break; 1400 case 0x2000: 1401 hxgep->rx_default_block_size = 0x2000; 1402 hxgep->rx_bksize_code = RBR_BKSIZE_8K; 1403 break; 1404 } 1405 1406 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1407 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1408 1409 /* 1410 * Get the system DMA burst size. 1411 */ 1412 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr, 1413 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle); 1414 if (ddi_status != DDI_SUCCESS) { 1415 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1416 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status)); 1417 goto hxge_get_soft_properties_exit; 1418 } 1419 1420 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL, 1421 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle), 1422 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0, 1423 &cookie, &count); 1424 if (ddi_status != DDI_DMA_MAPPED) { 1425 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1426 "Binding spare handle to find system burstsize failed.")); 1427 ddi_status = DDI_FAILURE; 1428 goto hxge_get_soft_properties_fail1; 1429 } 1430 1431 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle); 1432 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle); 1433 1434 hxge_get_soft_properties_fail1: 1435 ddi_dma_free_handle(&hxgep->dmasparehandle); 1436 1437 hxge_get_soft_properties_exit: 1438 1439 if (ddi_status != DDI_SUCCESS) 1440 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 1441 1442 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1443 "<== hxge_setup_system_dma_pages status = 0x%08x", status)); 1444 1445 return (status); 1446 } 1447 1448 hxge_status_t 1449 hxge_alloc_mem_pool(p_hxge_t hxgep) 1450 { 1451 hxge_status_t status = HXGE_OK; 1452 1453 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool")); 1454 1455 status = hxge_alloc_rx_mem_pool(hxgep); 1456 if (status != HXGE_OK) { 1457 return (HXGE_ERROR); 1458 } 1459 1460 status = hxge_alloc_tx_mem_pool(hxgep); 1461 if (status != HXGE_OK) { 1462 hxge_free_rx_mem_pool(hxgep); 1463 return (HXGE_ERROR); 1464 } 1465 1466 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool")); 1467 return (HXGE_OK); 1468 } 1469 1470 static void 1471 hxge_free_mem_pool(p_hxge_t hxgep) 1472 { 1473 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool")); 1474 1475 hxge_free_rx_mem_pool(hxgep); 1476 hxge_free_tx_mem_pool(hxgep); 1477 1478 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool")); 1479 } 1480 1481 static hxge_status_t 1482 hxge_alloc_rx_mem_pool(p_hxge_t hxgep) 1483 { 1484 int i, j; 1485 uint32_t ndmas, st_rdc; 1486 p_hxge_dma_pt_cfg_t p_all_cfgp; 1487 p_hxge_hw_pt_cfg_t p_cfgp; 1488 p_hxge_dma_pool_t dma_poolp; 1489 p_hxge_dma_common_t *dma_buf_p; 1490 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 1491 p_hxge_dma_common_t *dma_rbr_cntl_p; 1492 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 1493 p_hxge_dma_common_t *dma_rcr_cntl_p; 1494 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 1495 p_hxge_dma_common_t *dma_mbox_cntl_p; 1496 size_t rx_buf_alloc_size; 1497 size_t rx_rbr_cntl_alloc_size; 1498 size_t rx_rcr_cntl_alloc_size; 1499 size_t rx_mbox_cntl_alloc_size; 1500 uint32_t *num_chunks; /* per dma */ 1501 hxge_status_t status = HXGE_OK; 1502 1503 uint32_t hxge_port_rbr_size; 1504 uint32_t hxge_port_rbr_spare_size; 1505 uint32_t hxge_port_rcr_size; 1506 1507 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool")); 1508 1509 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 1510 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1511 st_rdc = p_cfgp->start_rdc; 1512 ndmas = p_cfgp->max_rdcs; 1513 1514 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1515 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1516 1517 /* 1518 * Allocate memory for each receive DMA channel. 1519 */ 1520 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t), 1521 KM_SLEEP); 1522 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1523 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1524 1525 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t) 1526 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1527 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1528 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1529 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t) 1530 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1531 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1532 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1533 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t) 1534 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1535 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1536 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1537 1538 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas, 1539 KM_SLEEP); 1540 1541 /* 1542 * Assume that each DMA channel will be configured with default block 1543 * size. rbr block counts are mod of batch count (16). 1544 */ 1545 hxge_port_rbr_size = p_all_cfgp->rbr_size; 1546 hxge_port_rcr_size = p_all_cfgp->rcr_size; 1547 1548 if (!hxge_port_rbr_size) { 1549 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT; 1550 } 1551 1552 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) { 1553 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH * 1554 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1)); 1555 } 1556 1557 p_all_cfgp->rbr_size = hxge_port_rbr_size; 1558 hxge_port_rbr_spare_size = hxge_rbr_spare_size; 1559 1560 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) { 1561 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH * 1562 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1)); 1563 } 1564 1565 rx_buf_alloc_size = (hxgep->rx_default_block_size * 1566 (hxge_port_rbr_size + hxge_port_rbr_spare_size)); 1567 1568 /* 1569 * Addresses of receive block ring, receive completion ring and the 1570 * mailbox must be all cache-aligned (64 bytes). 1571 */ 1572 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size; 1573 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t); 1574 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size; 1575 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t); 1576 1577 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: " 1578 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d " 1579 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d", 1580 hxge_port_rbr_size, hxge_port_rbr_spare_size, 1581 hxge_port_rcr_size, rx_cntl_alloc_size)); 1582 1583 hxgep->hxge_port_rbr_size = hxge_port_rbr_size; 1584 hxgep->hxge_port_rcr_size = hxge_port_rcr_size; 1585 1586 /* 1587 * Allocate memory for receive buffers and descriptor rings. Replace 1588 * allocation functions with interface functions provided by the 1589 * partition manager when it is available. 1590 */ 1591 /* 1592 * Allocate memory for the receive buffer blocks. 1593 */ 1594 for (i = 0; i < ndmas; i++) { 1595 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1596 " hxge_alloc_rx_mem_pool to alloc mem: " 1597 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1598 i, dma_buf_p[i], &dma_buf_p[i])); 1599 1600 num_chunks[i] = 0; 1601 1602 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i], 1603 rx_buf_alloc_size, hxgep->rx_default_block_size, 1604 &num_chunks[i]); 1605 if (status != HXGE_OK) { 1606 break; 1607 } 1608 1609 st_rdc++; 1610 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1611 " hxge_alloc_rx_mem_pool DONE alloc mem: " 1612 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1613 dma_buf_p[i], &dma_buf_p[i])); 1614 } 1615 1616 if (i < ndmas) { 1617 goto hxge_alloc_rx_mem_fail1; 1618 } 1619 1620 /* 1621 * Allocate memory for descriptor rings and mailbox. 1622 */ 1623 st_rdc = p_cfgp->start_rdc; 1624 for (j = 0; j < ndmas; j++) { 1625 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1626 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr, 1627 rx_rbr_cntl_alloc_size)) != HXGE_OK) { 1628 break; 1629 } 1630 1631 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1632 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr, 1633 rx_rcr_cntl_alloc_size)) != HXGE_OK) { 1634 break; 1635 } 1636 1637 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1638 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr, 1639 rx_mbox_cntl_alloc_size)) != HXGE_OK) { 1640 break; 1641 } 1642 st_rdc++; 1643 } 1644 1645 if (j < ndmas) { 1646 goto hxge_alloc_rx_mem_fail2; 1647 } 1648 1649 dma_poolp->ndmas = ndmas; 1650 dma_poolp->num_chunks = num_chunks; 1651 dma_poolp->buf_allocated = B_TRUE; 1652 hxgep->rx_buf_pool_p = dma_poolp; 1653 dma_poolp->dma_buf_pool_p = dma_buf_p; 1654 1655 dma_rbr_cntl_poolp->ndmas = ndmas; 1656 dma_rbr_cntl_poolp->buf_allocated = B_TRUE; 1657 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp; 1658 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p; 1659 1660 dma_rcr_cntl_poolp->ndmas = ndmas; 1661 dma_rcr_cntl_poolp->buf_allocated = B_TRUE; 1662 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp; 1663 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p; 1664 1665 dma_mbox_cntl_poolp->ndmas = ndmas; 1666 dma_mbox_cntl_poolp->buf_allocated = B_TRUE; 1667 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp; 1668 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p; 1669 1670 goto hxge_alloc_rx_mem_pool_exit; 1671 1672 hxge_alloc_rx_mem_fail2: 1673 /* Free control buffers */ 1674 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1675 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 1676 for (; j >= 0; j--) { 1677 hxge_free_rx_cntl_dma(hxgep, 1678 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]); 1679 hxge_free_rx_cntl_dma(hxgep, 1680 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]); 1681 hxge_free_rx_cntl_dma(hxgep, 1682 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]); 1683 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1684 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 1685 } 1686 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1687 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 1688 1689 hxge_alloc_rx_mem_fail1: 1690 /* Free data buffers */ 1691 i--; 1692 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1693 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 1694 for (; i >= 0; i--) { 1695 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i], 1696 num_chunks[i]); 1697 } 1698 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1699 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 1700 1701 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1702 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1703 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1704 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1705 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1706 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1707 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1708 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t)); 1709 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1710 1711 hxge_alloc_rx_mem_pool_exit: 1712 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1713 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status)); 1714 1715 return (status); 1716 } 1717 1718 static void 1719 hxge_free_rx_mem_pool(p_hxge_t hxgep) 1720 { 1721 uint32_t i, ndmas; 1722 p_hxge_dma_pool_t dma_poolp; 1723 p_hxge_dma_common_t *dma_buf_p; 1724 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 1725 p_hxge_dma_common_t *dma_rbr_cntl_p; 1726 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 1727 p_hxge_dma_common_t *dma_rcr_cntl_p; 1728 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 1729 p_hxge_dma_common_t *dma_mbox_cntl_p; 1730 uint32_t *num_chunks; 1731 1732 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool")); 1733 1734 dma_poolp = hxgep->rx_buf_pool_p; 1735 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 1736 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool " 1737 "(null rx buf pool or buf not allocated")); 1738 return; 1739 } 1740 1741 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 1742 if (dma_rbr_cntl_poolp == NULL || 1743 (!dma_rbr_cntl_poolp->buf_allocated)) { 1744 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1745 "<== hxge_free_rx_mem_pool " 1746 "(null rbr cntl buf pool or rbr cntl buf not allocated")); 1747 return; 1748 } 1749 1750 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 1751 if (dma_rcr_cntl_poolp == NULL || 1752 (!dma_rcr_cntl_poolp->buf_allocated)) { 1753 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1754 "<== hxge_free_rx_mem_pool " 1755 "(null rcr cntl buf pool or rcr cntl buf not allocated")); 1756 return; 1757 } 1758 1759 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 1760 if (dma_mbox_cntl_poolp == NULL || 1761 (!dma_mbox_cntl_poolp->buf_allocated)) { 1762 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1763 "<== hxge_free_rx_mem_pool " 1764 "(null mbox cntl buf pool or mbox cntl buf not allocated")); 1765 return; 1766 } 1767 1768 dma_buf_p = dma_poolp->dma_buf_pool_p; 1769 num_chunks = dma_poolp->num_chunks; 1770 1771 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p; 1772 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p; 1773 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p; 1774 ndmas = dma_rbr_cntl_poolp->ndmas; 1775 1776 for (i = 0; i < ndmas; i++) { 1777 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]); 1778 } 1779 1780 for (i = 0; i < ndmas; i++) { 1781 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]); 1782 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]); 1783 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]); 1784 } 1785 1786 for (i = 0; i < ndmas; i++) { 1787 KMEM_FREE(dma_buf_p[i], 1788 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1789 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t)); 1790 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t)); 1791 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t)); 1792 } 1793 1794 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1795 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1796 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1797 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1798 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1799 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1800 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t)); 1801 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1802 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1803 1804 hxgep->rx_buf_pool_p = NULL; 1805 hxgep->rx_rbr_cntl_pool_p = NULL; 1806 hxgep->rx_rcr_cntl_pool_p = NULL; 1807 hxgep->rx_mbox_cntl_pool_p = NULL; 1808 1809 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool")); 1810 } 1811 1812 static hxge_status_t 1813 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel, 1814 p_hxge_dma_common_t *dmap, 1815 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 1816 { 1817 p_hxge_dma_common_t rx_dmap; 1818 hxge_status_t status = HXGE_OK; 1819 size_t total_alloc_size; 1820 size_t allocated = 0; 1821 int i, size_index, array_size; 1822 1823 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma")); 1824 1825 rx_dmap = (p_hxge_dma_common_t) 1826 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP); 1827 1828 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1829 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 1830 dma_channel, alloc_size, block_size, dmap)); 1831 1832 total_alloc_size = alloc_size; 1833 1834 i = 0; 1835 size_index = 0; 1836 array_size = sizeof (alloc_sizes) / sizeof (size_t); 1837 while ((size_index < array_size) && 1838 (alloc_sizes[size_index] < alloc_size)) 1839 size_index++; 1840 if (size_index >= array_size) { 1841 size_index = array_size - 1; 1842 } 1843 1844 while ((allocated < total_alloc_size) && 1845 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) { 1846 rx_dmap[i].dma_chunk_index = i; 1847 rx_dmap[i].block_size = block_size; 1848 rx_dmap[i].alength = alloc_sizes[size_index]; 1849 rx_dmap[i].orig_alength = rx_dmap[i].alength; 1850 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 1851 rx_dmap[i].dma_channel = dma_channel; 1852 rx_dmap[i].contig_alloc_type = B_FALSE; 1853 1854 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1855 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 1856 "i %d nblocks %d alength %d", 1857 dma_channel, i, &rx_dmap[i], block_size, 1858 i, rx_dmap[i].nblocks, rx_dmap[i].alength)); 1859 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1860 &hxge_rx_dma_attr, rx_dmap[i].alength, 1861 &hxge_dev_buf_dma_acc_attr, 1862 DDI_DMA_READ | DDI_DMA_STREAMING, 1863 (p_hxge_dma_common_t)(&rx_dmap[i])); 1864 if (status != HXGE_OK) { 1865 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1866 " hxge_alloc_rx_buf_dma: Alloc Failed: " 1867 " for size: %d", alloc_sizes[size_index])); 1868 size_index--; 1869 } else { 1870 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1871 " alloc_rx_buf_dma allocated rdc %d " 1872 "chunk %d size %x dvma %x bufp %llx ", 1873 dma_channel, i, rx_dmap[i].alength, 1874 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 1875 i++; 1876 allocated += alloc_sizes[size_index]; 1877 } 1878 } 1879 1880 if (allocated < total_alloc_size) { 1881 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1882 " hxge_alloc_rx_buf_dma failed due to" 1883 " allocated(%d) < required(%d)", 1884 allocated, total_alloc_size)); 1885 goto hxge_alloc_rx_mem_fail1; 1886 } 1887 1888 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1889 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i)); 1890 1891 *num_chunks = i; 1892 *dmap = rx_dmap; 1893 1894 goto hxge_alloc_rx_mem_exit; 1895 1896 hxge_alloc_rx_mem_fail1: 1897 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1898 1899 hxge_alloc_rx_mem_exit: 1900 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1901 "<== hxge_alloc_rx_buf_dma status 0x%08x", status)); 1902 1903 return (status); 1904 } 1905 1906 /*ARGSUSED*/ 1907 static void 1908 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap, 1909 uint32_t num_chunks) 1910 { 1911 int i; 1912 1913 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1914 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 1915 1916 for (i = 0; i < num_chunks; i++) { 1917 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1918 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap)); 1919 hxge_dma_mem_free(dmap++); 1920 } 1921 1922 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma")); 1923 } 1924 1925 /*ARGSUSED*/ 1926 static hxge_status_t 1927 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel, 1928 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size) 1929 { 1930 p_hxge_dma_common_t rx_dmap; 1931 hxge_status_t status = HXGE_OK; 1932 1933 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma")); 1934 1935 rx_dmap = (p_hxge_dma_common_t) 1936 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP); 1937 1938 rx_dmap->contig_alloc_type = B_FALSE; 1939 1940 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1941 attr, size, &hxge_dev_desc_dma_acc_attr, 1942 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap); 1943 if (status != HXGE_OK) { 1944 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1945 " hxge_alloc_rx_cntl_dma: Alloc Failed: " 1946 " for size: %d", size)); 1947 goto hxge_alloc_rx_cntl_dma_fail1; 1948 } 1949 1950 *dmap = rx_dmap; 1951 1952 goto hxge_alloc_rx_cntl_dma_exit; 1953 1954 hxge_alloc_rx_cntl_dma_fail1: 1955 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t)); 1956 1957 hxge_alloc_rx_cntl_dma_exit: 1958 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1959 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status)); 1960 1961 return (status); 1962 } 1963 1964 /*ARGSUSED*/ 1965 static void 1966 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap) 1967 { 1968 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma")); 1969 1970 hxge_dma_mem_free(dmap); 1971 1972 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma")); 1973 } 1974 1975 static hxge_status_t 1976 hxge_alloc_tx_mem_pool(p_hxge_t hxgep) 1977 { 1978 hxge_status_t status = HXGE_OK; 1979 int i, j; 1980 uint32_t ndmas, st_tdc; 1981 p_hxge_dma_pt_cfg_t p_all_cfgp; 1982 p_hxge_hw_pt_cfg_t p_cfgp; 1983 p_hxge_dma_pool_t dma_poolp; 1984 p_hxge_dma_common_t *dma_buf_p; 1985 p_hxge_dma_pool_t dma_cntl_poolp; 1986 p_hxge_dma_common_t *dma_cntl_p; 1987 size_t tx_buf_alloc_size; 1988 size_t tx_cntl_alloc_size; 1989 uint32_t *num_chunks; /* per dma */ 1990 1991 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool")); 1992 1993 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 1994 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1995 st_tdc = p_cfgp->start_tdc; 1996 ndmas = p_cfgp->max_tdcs; 1997 1998 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: " 1999 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d", 2000 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs)); 2001 /* 2002 * Allocate memory for each transmit DMA channel. 2003 */ 2004 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t), 2005 KM_SLEEP); 2006 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 2007 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 2008 2009 dma_cntl_poolp = (p_hxge_dma_pool_t) 2010 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 2011 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 2012 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 2013 2014 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size; 2015 2016 /* 2017 * Assume that each DMA channel will be configured with default 2018 * transmit bufer size for copying transmit data. (For packet payload 2019 * over this limit, packets will not be copied.) 2020 */ 2021 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size); 2022 2023 /* 2024 * Addresses of transmit descriptor ring and the mailbox must be all 2025 * cache-aligned (64 bytes). 2026 */ 2027 tx_cntl_alloc_size = hxge_tx_ring_size; 2028 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2029 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2030 2031 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas, 2032 KM_SLEEP); 2033 2034 /* 2035 * Allocate memory for transmit buffers and descriptor rings. Replace 2036 * allocation functions with interface functions provided by the 2037 * partition manager when it is available. 2038 * 2039 * Allocate memory for the transmit buffer pool. 2040 */ 2041 for (i = 0; i < ndmas; i++) { 2042 num_chunks[i] = 0; 2043 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i], 2044 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]); 2045 if (status != HXGE_OK) { 2046 break; 2047 } 2048 st_tdc++; 2049 } 2050 2051 if (i < ndmas) { 2052 goto hxge_alloc_tx_mem_pool_fail1; 2053 } 2054 2055 st_tdc = p_cfgp->start_tdc; 2056 2057 /* 2058 * Allocate memory for descriptor rings and mailbox. 2059 */ 2060 for (j = 0; j < ndmas; j++) { 2061 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j], 2062 tx_cntl_alloc_size); 2063 if (status != HXGE_OK) { 2064 break; 2065 } 2066 st_tdc++; 2067 } 2068 2069 if (j < ndmas) { 2070 goto hxge_alloc_tx_mem_pool_fail2; 2071 } 2072 2073 dma_poolp->ndmas = ndmas; 2074 dma_poolp->num_chunks = num_chunks; 2075 dma_poolp->buf_allocated = B_TRUE; 2076 dma_poolp->dma_buf_pool_p = dma_buf_p; 2077 hxgep->tx_buf_pool_p = dma_poolp; 2078 2079 dma_cntl_poolp->ndmas = ndmas; 2080 dma_cntl_poolp->buf_allocated = B_TRUE; 2081 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2082 hxgep->tx_cntl_pool_p = dma_cntl_poolp; 2083 2084 HXGE_DEBUG_MSG((hxgep, MEM_CTL, 2085 "==> hxge_alloc_tx_mem_pool: start_tdc %d " 2086 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas)); 2087 2088 goto hxge_alloc_tx_mem_pool_exit; 2089 2090 hxge_alloc_tx_mem_pool_fail2: 2091 /* Free control buffers */ 2092 j--; 2093 for (; j >= 0; j--) { 2094 hxge_free_tx_cntl_dma(hxgep, 2095 (p_hxge_dma_common_t)dma_cntl_p[j]); 2096 } 2097 2098 hxge_alloc_tx_mem_pool_fail1: 2099 /* Free data buffers */ 2100 i--; 2101 for (; i >= 0; i--) { 2102 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i], 2103 num_chunks[i]); 2104 } 2105 2106 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 2107 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 2108 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 2109 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 2110 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2111 2112 hxge_alloc_tx_mem_pool_exit: 2113 HXGE_DEBUG_MSG((hxgep, MEM_CTL, 2114 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status)); 2115 2116 return (status); 2117 } 2118 2119 static hxge_status_t 2120 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel, 2121 p_hxge_dma_common_t *dmap, size_t alloc_size, 2122 size_t block_size, uint32_t *num_chunks) 2123 { 2124 p_hxge_dma_common_t tx_dmap; 2125 hxge_status_t status = HXGE_OK; 2126 size_t total_alloc_size; 2127 size_t allocated = 0; 2128 int i, size_index, array_size; 2129 2130 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma")); 2131 2132 tx_dmap = (p_hxge_dma_common_t) 2133 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP); 2134 2135 total_alloc_size = alloc_size; 2136 i = 0; 2137 size_index = 0; 2138 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2139 while ((size_index < array_size) && 2140 (alloc_sizes[size_index] < alloc_size)) 2141 size_index++; 2142 if (size_index >= array_size) { 2143 size_index = array_size - 1; 2144 } 2145 2146 while ((allocated < total_alloc_size) && 2147 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) { 2148 tx_dmap[i].dma_chunk_index = i; 2149 tx_dmap[i].block_size = block_size; 2150 tx_dmap[i].alength = alloc_sizes[size_index]; 2151 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2152 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2153 tx_dmap[i].dma_channel = dma_channel; 2154 tx_dmap[i].contig_alloc_type = B_FALSE; 2155 2156 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 2157 &hxge_tx_dma_attr, tx_dmap[i].alength, 2158 &hxge_dev_buf_dma_acc_attr, 2159 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2160 (p_hxge_dma_common_t)(&tx_dmap[i])); 2161 if (status != HXGE_OK) { 2162 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2163 " hxge_alloc_tx_buf_dma: Alloc Failed: " 2164 " for size: %d", alloc_sizes[size_index])); 2165 size_index--; 2166 } else { 2167 i++; 2168 allocated += alloc_sizes[size_index]; 2169 } 2170 } 2171 2172 if (allocated < total_alloc_size) { 2173 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2174 " hxge_alloc_tx_buf_dma: failed due to" 2175 " allocated(%d) < required(%d)", 2176 allocated, total_alloc_size)); 2177 goto hxge_alloc_tx_mem_fail1; 2178 } 2179 2180 *num_chunks = i; 2181 *dmap = tx_dmap; 2182 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2183 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2184 *dmap, i)); 2185 goto hxge_alloc_tx_mem_exit; 2186 2187 hxge_alloc_tx_mem_fail1: 2188 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 2189 2190 hxge_alloc_tx_mem_exit: 2191 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2192 "<== hxge_alloc_tx_buf_dma status 0x%08x", status)); 2193 2194 return (status); 2195 } 2196 2197 /*ARGSUSED*/ 2198 static void 2199 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap, 2200 uint32_t num_chunks) 2201 { 2202 int i; 2203 2204 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma")); 2205 2206 for (i = 0; i < num_chunks; i++) { 2207 hxge_dma_mem_free(dmap++); 2208 } 2209 2210 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma")); 2211 } 2212 2213 /*ARGSUSED*/ 2214 static hxge_status_t 2215 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel, 2216 p_hxge_dma_common_t *dmap, size_t size) 2217 { 2218 p_hxge_dma_common_t tx_dmap; 2219 hxge_status_t status = HXGE_OK; 2220 2221 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma")); 2222 2223 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t), 2224 KM_SLEEP); 2225 2226 tx_dmap->contig_alloc_type = B_FALSE; 2227 2228 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 2229 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr, 2230 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap); 2231 if (status != HXGE_OK) { 2232 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2233 " hxge_alloc_tx_cntl_dma: Alloc Failed: " 2234 " for size: %d", size)); 2235 goto hxge_alloc_tx_cntl_dma_fail1; 2236 } 2237 2238 *dmap = tx_dmap; 2239 2240 goto hxge_alloc_tx_cntl_dma_exit; 2241 2242 hxge_alloc_tx_cntl_dma_fail1: 2243 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t)); 2244 2245 hxge_alloc_tx_cntl_dma_exit: 2246 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2247 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status)); 2248 2249 return (status); 2250 } 2251 2252 /*ARGSUSED*/ 2253 static void 2254 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap) 2255 { 2256 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma")); 2257 2258 hxge_dma_mem_free(dmap); 2259 2260 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma")); 2261 } 2262 2263 static void 2264 hxge_free_tx_mem_pool(p_hxge_t hxgep) 2265 { 2266 uint32_t i, ndmas; 2267 p_hxge_dma_pool_t dma_poolp; 2268 p_hxge_dma_common_t *dma_buf_p; 2269 p_hxge_dma_pool_t dma_cntl_poolp; 2270 p_hxge_dma_common_t *dma_cntl_p; 2271 uint32_t *num_chunks; 2272 2273 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool")); 2274 2275 dma_poolp = hxgep->tx_buf_pool_p; 2276 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2277 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2278 "<== hxge_free_tx_mem_pool " 2279 "(null rx buf pool or buf not allocated")); 2280 return; 2281 } 2282 2283 dma_cntl_poolp = hxgep->tx_cntl_pool_p; 2284 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2285 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2286 "<== hxge_free_tx_mem_pool " 2287 "(null tx cntl buf pool or cntl buf not allocated")); 2288 return; 2289 } 2290 2291 dma_buf_p = dma_poolp->dma_buf_pool_p; 2292 num_chunks = dma_poolp->num_chunks; 2293 2294 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2295 ndmas = dma_cntl_poolp->ndmas; 2296 2297 for (i = 0; i < ndmas; i++) { 2298 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]); 2299 } 2300 2301 for (i = 0; i < ndmas; i++) { 2302 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]); 2303 } 2304 2305 for (i = 0; i < ndmas; i++) { 2306 KMEM_FREE(dma_buf_p[i], 2307 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 2308 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t)); 2309 } 2310 2311 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2312 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 2313 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 2314 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 2315 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 2316 2317 hxgep->tx_buf_pool_p = NULL; 2318 hxgep->tx_cntl_pool_p = NULL; 2319 2320 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool")); 2321 } 2322 2323 /*ARGSUSED*/ 2324 static hxge_status_t 2325 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method, 2326 struct ddi_dma_attr *dma_attrp, 2327 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2328 p_hxge_dma_common_t dma_p) 2329 { 2330 caddr_t kaddrp; 2331 int ddi_status = DDI_SUCCESS; 2332 2333 dma_p->dma_handle = NULL; 2334 dma_p->acc_handle = NULL; 2335 dma_p->kaddrp = NULL; 2336 2337 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp, 2338 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2339 if (ddi_status != DDI_SUCCESS) { 2340 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2341 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2342 return (HXGE_ERROR | HXGE_DDI_FAILED); 2343 } 2344 2345 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p, 2346 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2347 &dma_p->acc_handle); 2348 if (ddi_status != DDI_SUCCESS) { 2349 /* The caller will decide whether it is fatal */ 2350 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2351 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2352 ddi_dma_free_handle(&dma_p->dma_handle); 2353 dma_p->dma_handle = NULL; 2354 return (HXGE_ERROR | HXGE_DDI_FAILED); 2355 } 2356 2357 if (dma_p->alength < length) { 2358 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2359 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length.")); 2360 ddi_dma_mem_free(&dma_p->acc_handle); 2361 ddi_dma_free_handle(&dma_p->dma_handle); 2362 dma_p->acc_handle = NULL; 2363 dma_p->dma_handle = NULL; 2364 return (HXGE_ERROR); 2365 } 2366 2367 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2368 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2369 &dma_p->dma_cookie, &dma_p->ncookies); 2370 if (ddi_status != DDI_DMA_MAPPED) { 2371 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2372 "hxge_dma_mem_alloc:di_dma_addr_bind failed " 2373 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies)); 2374 if (dma_p->acc_handle) { 2375 ddi_dma_mem_free(&dma_p->acc_handle); 2376 dma_p->acc_handle = NULL; 2377 } 2378 ddi_dma_free_handle(&dma_p->dma_handle); 2379 dma_p->dma_handle = NULL; 2380 return (HXGE_ERROR | HXGE_DDI_FAILED); 2381 } 2382 2383 if (dma_p->ncookies != 1) { 2384 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2385 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie" 2386 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies)); 2387 if (dma_p->acc_handle) { 2388 ddi_dma_mem_free(&dma_p->acc_handle); 2389 dma_p->acc_handle = NULL; 2390 } 2391 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2392 ddi_dma_free_handle(&dma_p->dma_handle); 2393 dma_p->dma_handle = NULL; 2394 return (HXGE_ERROR); 2395 } 2396 2397 dma_p->kaddrp = kaddrp; 2398 #if defined(__i386) 2399 dma_p->ioaddr_pp = 2400 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 2401 #else 2402 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress; 2403 #endif 2404 2405 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2406 2407 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: " 2408 "dma buffer allocated: dma_p $%p " 2409 "return dmac_ladress from cookie $%p dmac_size %d " 2410 "dma_p->ioaddr_p $%p " 2411 "dma_p->orig_ioaddr_p $%p " 2412 "orig_vatopa $%p " 2413 "alength %d (0x%x) " 2414 "kaddrp $%p " 2415 "length %d (0x%x)", 2416 dma_p, 2417 dma_p->dma_cookie.dmac_laddress, 2418 dma_p->dma_cookie.dmac_size, 2419 dma_p->ioaddr_pp, 2420 dma_p->orig_ioaddr_pp, 2421 dma_p->orig_vatopa, 2422 dma_p->alength, dma_p->alength, 2423 kaddrp, 2424 length, length)); 2425 2426 return (HXGE_OK); 2427 } 2428 2429 static void 2430 hxge_dma_mem_free(p_hxge_dma_common_t dma_p) 2431 { 2432 if (dma_p == NULL) 2433 return; 2434 2435 if (dma_p->dma_handle != NULL) { 2436 if (dma_p->ncookies) { 2437 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2438 dma_p->ncookies = 0; 2439 } 2440 ddi_dma_free_handle(&dma_p->dma_handle); 2441 dma_p->dma_handle = NULL; 2442 } 2443 2444 if (dma_p->acc_handle != NULL) { 2445 ddi_dma_mem_free(&dma_p->acc_handle); 2446 dma_p->acc_handle = NULL; 2447 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2448 } 2449 2450 dma_p->kaddrp = NULL; 2451 dma_p->alength = NULL; 2452 } 2453 2454 /* 2455 * hxge_m_start() -- start transmitting and receiving. 2456 * 2457 * This function is called by the MAC layer when the first 2458 * stream is open to prepare the hardware ready for sending 2459 * and transmitting packets. 2460 */ 2461 static int 2462 hxge_m_start(void *arg) 2463 { 2464 p_hxge_t hxgep = (p_hxge_t)arg; 2465 2466 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start")); 2467 2468 MUTEX_ENTER(hxgep->genlock); 2469 2470 if (hxge_init(hxgep) != DDI_SUCCESS) { 2471 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2472 "<== hxge_m_start: initialization failed")); 2473 MUTEX_EXIT(hxgep->genlock); 2474 return (EIO); 2475 } 2476 2477 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) { 2478 /* 2479 * Start timer to check the system error and tx hangs 2480 */ 2481 hxgep->hxge_timerid = hxge_start_timer(hxgep, 2482 hxge_check_hw_state, HXGE_CHECK_TIMER); 2483 2484 hxgep->hxge_mac_state = HXGE_MAC_STARTED; 2485 2486 hxgep->timeout.link_status = 0; 2487 hxgep->timeout.report_link_status = B_TRUE; 2488 hxgep->timeout.ticks = drv_usectohz(2 * 1000000); 2489 2490 /* Start the link status timer to check the link status */ 2491 MUTEX_ENTER(&hxgep->timeout.lock); 2492 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep, 2493 hxgep->timeout.ticks); 2494 MUTEX_EXIT(&hxgep->timeout.lock); 2495 } 2496 2497 MUTEX_EXIT(hxgep->genlock); 2498 2499 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start")); 2500 2501 return (0); 2502 } 2503 2504 /* 2505 * hxge_m_stop(): stop transmitting and receiving. 2506 */ 2507 static void 2508 hxge_m_stop(void *arg) 2509 { 2510 p_hxge_t hxgep = (p_hxge_t)arg; 2511 2512 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop")); 2513 2514 if (hxgep->hxge_timerid) { 2515 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 2516 hxgep->hxge_timerid = 0; 2517 } 2518 2519 /* Stop the link status timer before unregistering */ 2520 MUTEX_ENTER(&hxgep->timeout.lock); 2521 if (hxgep->timeout.id) { 2522 (void) untimeout(hxgep->timeout.id); 2523 hxgep->timeout.id = 0; 2524 } 2525 hxge_link_update(hxgep, LINK_STATE_DOWN); 2526 MUTEX_EXIT(&hxgep->timeout.lock); 2527 2528 MUTEX_ENTER(hxgep->genlock); 2529 2530 hxge_uninit(hxgep); 2531 2532 hxgep->hxge_mac_state = HXGE_MAC_STOPPED; 2533 2534 MUTEX_EXIT(hxgep->genlock); 2535 2536 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop")); 2537 } 2538 2539 static int 2540 hxge_m_unicst(void *arg, const uint8_t *macaddr) 2541 { 2542 p_hxge_t hxgep = (p_hxge_t)arg; 2543 struct ether_addr addrp; 2544 hxge_status_t status; 2545 2546 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst")); 2547 2548 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 2549 2550 status = hxge_set_mac_addr(hxgep, &addrp); 2551 if (status != HXGE_OK) { 2552 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2553 "<== hxge_m_unicst: set unitcast failed")); 2554 return (EINVAL); 2555 } 2556 2557 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst")); 2558 2559 return (0); 2560 } 2561 2562 static int 2563 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 2564 { 2565 p_hxge_t hxgep = (p_hxge_t)arg; 2566 struct ether_addr addrp; 2567 2568 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add)); 2569 2570 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 2571 2572 if (add) { 2573 if (hxge_add_mcast_addr(hxgep, &addrp)) { 2574 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2575 "<== hxge_m_multicst: add multicast failed")); 2576 return (EINVAL); 2577 } 2578 } else { 2579 if (hxge_del_mcast_addr(hxgep, &addrp)) { 2580 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2581 "<== hxge_m_multicst: del multicast failed")); 2582 return (EINVAL); 2583 } 2584 } 2585 2586 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst")); 2587 2588 return (0); 2589 } 2590 2591 static int 2592 hxge_m_promisc(void *arg, boolean_t on) 2593 { 2594 p_hxge_t hxgep = (p_hxge_t)arg; 2595 2596 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on)); 2597 2598 if (hxge_set_promisc(hxgep, on)) { 2599 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2600 "<== hxge_m_promisc: set promisc failed")); 2601 return (EINVAL); 2602 } 2603 2604 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on)); 2605 2606 return (0); 2607 } 2608 2609 static void 2610 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2611 { 2612 p_hxge_t hxgep = (p_hxge_t)arg; 2613 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 2614 boolean_t need_privilege; 2615 int err; 2616 int cmd; 2617 2618 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl")); 2619 2620 iocp = (struct iocblk *)mp->b_rptr; 2621 iocp->ioc_error = 0; 2622 need_privilege = B_TRUE; 2623 cmd = iocp->ioc_cmd; 2624 2625 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd)); 2626 switch (cmd) { 2627 default: 2628 miocnak(wq, mp, 0, EINVAL); 2629 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid")); 2630 return; 2631 2632 case LB_GET_INFO_SIZE: 2633 case LB_GET_INFO: 2634 case LB_GET_MODE: 2635 need_privilege = B_FALSE; 2636 break; 2637 2638 case LB_SET_MODE: 2639 break; 2640 2641 case ND_GET: 2642 need_privilege = B_FALSE; 2643 break; 2644 case ND_SET: 2645 break; 2646 2647 case HXGE_GET64: 2648 case HXGE_PUT64: 2649 case HXGE_GET_TX_RING_SZ: 2650 case HXGE_GET_TX_DESC: 2651 case HXGE_TX_SIDE_RESET: 2652 case HXGE_RX_SIDE_RESET: 2653 case HXGE_GLOBAL_RESET: 2654 case HXGE_RESET_MAC: 2655 case HXGE_PUT_TCAM: 2656 case HXGE_GET_TCAM: 2657 case HXGE_RTRACE: 2658 2659 need_privilege = B_FALSE; 2660 break; 2661 } 2662 2663 if (need_privilege) { 2664 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 2665 if (err != 0) { 2666 miocnak(wq, mp, 0, err); 2667 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2668 "<== hxge_m_ioctl: no priv")); 2669 return; 2670 } 2671 } 2672 2673 switch (cmd) { 2674 case ND_GET: 2675 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command")); 2676 case ND_SET: 2677 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command")); 2678 hxge_param_ioctl(hxgep, wq, mp, iocp); 2679 break; 2680 2681 case LB_GET_MODE: 2682 case LB_SET_MODE: 2683 case LB_GET_INFO_SIZE: 2684 case LB_GET_INFO: 2685 hxge_loopback_ioctl(hxgep, wq, mp, iocp); 2686 break; 2687 2688 case HXGE_PUT_TCAM: 2689 case HXGE_GET_TCAM: 2690 case HXGE_GET64: 2691 case HXGE_PUT64: 2692 case HXGE_GET_TX_RING_SZ: 2693 case HXGE_GET_TX_DESC: 2694 case HXGE_TX_SIDE_RESET: 2695 case HXGE_RX_SIDE_RESET: 2696 case HXGE_GLOBAL_RESET: 2697 case HXGE_RESET_MAC: 2698 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, 2699 "==> hxge_m_ioctl: cmd 0x%x", cmd)); 2700 hxge_hw_ioctl(hxgep, wq, mp, iocp); 2701 break; 2702 } 2703 2704 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl")); 2705 } 2706 2707 /*ARGSUSED*/ 2708 boolean_t 2709 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2710 { 2711 uint32_t *txflags = cap_data; 2712 2713 switch (cap) { 2714 case MAC_CAPAB_HCKSUM: 2715 *txflags = HCKSUM_INET_PARTIAL; 2716 break; 2717 2718 default: 2719 return (B_FALSE); 2720 } 2721 return (B_TRUE); 2722 } 2723 2724 static boolean_t 2725 hxge_param_locked(mac_prop_id_t pr_num) 2726 { 2727 /* 2728 * All adv_* parameters are locked (read-only) while 2729 * the device is in any sort of loopback mode ... 2730 */ 2731 switch (pr_num) { 2732 case MAC_PROP_ADV_1000FDX_CAP: 2733 case MAC_PROP_EN_1000FDX_CAP: 2734 case MAC_PROP_ADV_1000HDX_CAP: 2735 case MAC_PROP_EN_1000HDX_CAP: 2736 case MAC_PROP_ADV_100FDX_CAP: 2737 case MAC_PROP_EN_100FDX_CAP: 2738 case MAC_PROP_ADV_100HDX_CAP: 2739 case MAC_PROP_EN_100HDX_CAP: 2740 case MAC_PROP_ADV_10FDX_CAP: 2741 case MAC_PROP_EN_10FDX_CAP: 2742 case MAC_PROP_ADV_10HDX_CAP: 2743 case MAC_PROP_EN_10HDX_CAP: 2744 case MAC_PROP_AUTONEG: 2745 case MAC_PROP_FLOWCTRL: 2746 return (B_TRUE); 2747 } 2748 return (B_FALSE); 2749 } 2750 2751 /* 2752 * callback functions for set/get of properties 2753 */ 2754 static int 2755 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 2756 uint_t pr_valsize, const void *pr_val) 2757 { 2758 hxge_t *hxgep = barg; 2759 p_hxge_stats_t statsp; 2760 int err = 0; 2761 uint32_t new_mtu, old_framesize, new_framesize; 2762 2763 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop")); 2764 2765 statsp = hxgep->statsp; 2766 mutex_enter(hxgep->genlock); 2767 if (statsp->port_stats.lb_mode != hxge_lb_normal && 2768 hxge_param_locked(pr_num)) { 2769 /* 2770 * All adv_* parameters are locked (read-only) 2771 * while the device is in any sort of loopback mode. 2772 */ 2773 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2774 "==> hxge_m_setprop: loopback mode: read only")); 2775 mutex_exit(hxgep->genlock); 2776 return (EBUSY); 2777 } 2778 2779 switch (pr_num) { 2780 /* 2781 * These properties are either not exist or read only 2782 */ 2783 case MAC_PROP_EN_1000FDX_CAP: 2784 case MAC_PROP_EN_100FDX_CAP: 2785 case MAC_PROP_EN_10FDX_CAP: 2786 case MAC_PROP_EN_1000HDX_CAP: 2787 case MAC_PROP_EN_100HDX_CAP: 2788 case MAC_PROP_EN_10HDX_CAP: 2789 case MAC_PROP_ADV_1000FDX_CAP: 2790 case MAC_PROP_ADV_1000HDX_CAP: 2791 case MAC_PROP_ADV_100FDX_CAP: 2792 case MAC_PROP_ADV_100HDX_CAP: 2793 case MAC_PROP_ADV_10FDX_CAP: 2794 case MAC_PROP_ADV_10HDX_CAP: 2795 case MAC_PROP_STATUS: 2796 case MAC_PROP_SPEED: 2797 case MAC_PROP_DUPLEX: 2798 case MAC_PROP_AUTONEG: 2799 /* 2800 * Flow control is handled in the shared domain and 2801 * it is readonly here. 2802 */ 2803 case MAC_PROP_FLOWCTRL: 2804 err = EINVAL; 2805 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2806 "==> hxge_m_setprop: read only property %d", 2807 pr_num)); 2808 break; 2809 2810 case MAC_PROP_MTU: 2811 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 2812 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2813 "==> hxge_m_setprop: set MTU: %d", new_mtu)); 2814 2815 new_framesize = new_mtu + MTU_TO_FRAME_SIZE; 2816 if (new_framesize == hxgep->vmac.maxframesize) { 2817 err = 0; 2818 break; 2819 } 2820 2821 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) { 2822 err = EBUSY; 2823 break; 2824 } 2825 2826 if (new_framesize < MIN_FRAME_SIZE || 2827 new_framesize > MAX_FRAME_SIZE) { 2828 err = EINVAL; 2829 break; 2830 } 2831 2832 old_framesize = hxgep->vmac.maxframesize; 2833 hxgep->vmac.maxframesize = (uint16_t)new_framesize; 2834 2835 if (hxge_vmac_set_framesize(hxgep)) { 2836 hxgep->vmac.maxframesize = 2837 (uint16_t)old_framesize; 2838 err = EINVAL; 2839 break; 2840 } 2841 2842 err = mac_maxsdu_update(hxgep->mach, new_mtu); 2843 if (err) { 2844 hxgep->vmac.maxframesize = 2845 (uint16_t)old_framesize; 2846 (void) hxge_vmac_set_framesize(hxgep); 2847 } 2848 2849 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2850 "==> hxge_m_setprop: set MTU: %d maxframe %d", 2851 new_mtu, hxgep->vmac.maxframesize)); 2852 break; 2853 2854 case MAC_PROP_PRIVATE: 2855 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2856 "==> hxge_m_setprop: private property")); 2857 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize, 2858 pr_val); 2859 break; 2860 2861 default: 2862 err = ENOTSUP; 2863 break; 2864 } 2865 2866 mutex_exit(hxgep->genlock); 2867 2868 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2869 "<== hxge_m_setprop (return %d)", err)); 2870 2871 return (err); 2872 } 2873 2874 /* ARGSUSED */ 2875 static int 2876 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 2877 void *pr_val) 2878 { 2879 int err = 0; 2880 link_flowctrl_t fl; 2881 2882 switch (pr_num) { 2883 case MAC_PROP_DUPLEX: 2884 *(uint8_t *)pr_val = 2; 2885 break; 2886 case MAC_PROP_AUTONEG: 2887 *(uint8_t *)pr_val = 0; 2888 break; 2889 case MAC_PROP_FLOWCTRL: 2890 if (pr_valsize < sizeof (link_flowctrl_t)) 2891 return (EINVAL); 2892 fl = LINK_FLOWCTRL_TX; 2893 bcopy(&fl, pr_val, sizeof (fl)); 2894 break; 2895 default: 2896 err = ENOTSUP; 2897 break; 2898 } 2899 return (err); 2900 } 2901 2902 static int 2903 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 2904 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 2905 { 2906 hxge_t *hxgep = barg; 2907 p_hxge_stats_t statsp = hxgep->statsp; 2908 int err = 0; 2909 link_flowctrl_t fl; 2910 uint64_t tmp = 0; 2911 link_state_t ls; 2912 2913 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2914 "==> hxge_m_getprop: pr_num %d", pr_num)); 2915 2916 if (pr_valsize == 0) 2917 return (EINVAL); 2918 2919 *perm = MAC_PROP_PERM_RW; 2920 2921 if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) { 2922 err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val); 2923 return (err); 2924 } 2925 2926 bzero(pr_val, pr_valsize); 2927 switch (pr_num) { 2928 case MAC_PROP_DUPLEX: 2929 *perm = MAC_PROP_PERM_READ; 2930 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 2931 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 2932 "==> hxge_m_getprop: duplex mode %d", 2933 *(uint8_t *)pr_val)); 2934 break; 2935 2936 case MAC_PROP_SPEED: 2937 *perm = MAC_PROP_PERM_READ; 2938 if (pr_valsize < sizeof (uint64_t)) 2939 return (EINVAL); 2940 tmp = statsp->mac_stats.link_speed * 1000000ull; 2941 bcopy(&tmp, pr_val, sizeof (tmp)); 2942 break; 2943 2944 case MAC_PROP_STATUS: 2945 *perm = MAC_PROP_PERM_READ; 2946 if (pr_valsize < sizeof (link_state_t)) 2947 return (EINVAL); 2948 if (!statsp->mac_stats.link_up) 2949 ls = LINK_STATE_DOWN; 2950 else 2951 ls = LINK_STATE_UP; 2952 bcopy(&ls, pr_val, sizeof (ls)); 2953 break; 2954 2955 case MAC_PROP_FLOWCTRL: 2956 /* 2957 * Flow control is supported by the shared domain and 2958 * it is currently transmit only 2959 */ 2960 *perm = MAC_PROP_PERM_READ; 2961 if (pr_valsize < sizeof (link_flowctrl_t)) 2962 return (EINVAL); 2963 fl = LINK_FLOWCTRL_TX; 2964 bcopy(&fl, pr_val, sizeof (fl)); 2965 break; 2966 case MAC_PROP_AUTONEG: 2967 /* 10G link only and it is not negotiable */ 2968 *perm = MAC_PROP_PERM_READ; 2969 *(uint8_t *)pr_val = 0; 2970 break; 2971 case MAC_PROP_ADV_1000FDX_CAP: 2972 case MAC_PROP_ADV_100FDX_CAP: 2973 case MAC_PROP_ADV_10FDX_CAP: 2974 case MAC_PROP_ADV_1000HDX_CAP: 2975 case MAC_PROP_ADV_100HDX_CAP: 2976 case MAC_PROP_ADV_10HDX_CAP: 2977 case MAC_PROP_EN_1000FDX_CAP: 2978 case MAC_PROP_EN_100FDX_CAP: 2979 case MAC_PROP_EN_10FDX_CAP: 2980 case MAC_PROP_EN_1000HDX_CAP: 2981 case MAC_PROP_EN_100HDX_CAP: 2982 case MAC_PROP_EN_10HDX_CAP: 2983 err = ENOTSUP; 2984 break; 2985 2986 case MAC_PROP_PRIVATE: 2987 err = hxge_get_priv_prop(hxgep, pr_name, pr_flags, 2988 pr_valsize, pr_val); 2989 break; 2990 default: 2991 err = EINVAL; 2992 break; 2993 } 2994 2995 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop")); 2996 2997 return (err); 2998 } 2999 3000 /* ARGSUSED */ 3001 static int 3002 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize, 3003 const void *pr_val) 3004 { 3005 p_hxge_param_t param_arr = hxgep->param_arr; 3006 int err = 0; 3007 3008 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3009 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val)); 3010 3011 if (pr_val == NULL) { 3012 return (EINVAL); 3013 } 3014 3015 /* Blanking */ 3016 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3017 err = hxge_param_rx_intr_time(hxgep, NULL, NULL, 3018 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]); 3019 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3020 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL, 3021 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 3022 3023 /* Classification */ 3024 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 3025 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3026 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 3027 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 3028 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3029 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 3030 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 3031 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3032 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 3033 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 3034 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3035 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 3036 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 3037 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3038 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 3039 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 3040 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3041 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 3042 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 3043 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3044 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 3045 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3046 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3047 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 3048 } else { 3049 err = EINVAL; 3050 } 3051 3052 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3053 "<== hxge_set_priv_prop: err %d", err)); 3054 3055 return (err); 3056 } 3057 3058 static int 3059 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags, 3060 uint_t pr_valsize, void *pr_val) 3061 { 3062 p_hxge_param_t param_arr = hxgep->param_arr; 3063 char valstr[MAXNAMELEN]; 3064 int err = 0; 3065 uint_t strsize; 3066 int value = 0; 3067 3068 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3069 "==> hxge_get_priv_prop: property %s", pr_name)); 3070 3071 if (pr_flags & MAC_PROP_DEFAULT) { 3072 /* Receive Interrupt Blanking Parameters */ 3073 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3074 value = RXDMA_RCR_TO_DEFAULT; 3075 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3076 value = RXDMA_RCR_PTHRES_DEFAULT; 3077 3078 /* Classification and Load Distribution Configuration */ 3079 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 || 3080 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 || 3081 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 || 3082 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 || 3083 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 || 3084 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 || 3085 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 || 3086 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3087 value = HXGE_CLASS_TCAM_LOOKUP; 3088 } else { 3089 err = EINVAL; 3090 } 3091 } else { 3092 /* Receive Interrupt Blanking Parameters */ 3093 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3094 value = hxgep->intr_timeout; 3095 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3096 value = hxgep->intr_threshold; 3097 3098 /* Classification and Load Distribution Configuration */ 3099 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 3100 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3101 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 3102 3103 value = (int)param_arr[param_class_opt_ipv4_tcp].value; 3104 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 3105 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3106 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 3107 3108 value = (int)param_arr[param_class_opt_ipv4_udp].value; 3109 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 3110 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3111 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 3112 3113 value = (int)param_arr[param_class_opt_ipv4_ah].value; 3114 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 3115 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3116 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 3117 3118 value = (int)param_arr[param_class_opt_ipv4_sctp].value; 3119 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 3120 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3121 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 3122 3123 value = (int)param_arr[param_class_opt_ipv6_tcp].value; 3124 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 3125 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3126 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 3127 3128 value = (int)param_arr[param_class_opt_ipv6_udp].value; 3129 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 3130 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3131 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 3132 3133 value = (int)param_arr[param_class_opt_ipv6_ah].value; 3134 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3135 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3136 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 3137 3138 value = (int)param_arr[param_class_opt_ipv6_sctp].value; 3139 } else { 3140 err = EINVAL; 3141 } 3142 } 3143 3144 if (err == 0) { 3145 (void) snprintf(valstr, sizeof (valstr), "0x%x", value); 3146 3147 strsize = (uint_t)strlen(valstr); 3148 if (pr_valsize < strsize) { 3149 err = ENOBUFS; 3150 } else { 3151 (void) strlcpy(pr_val, valstr, pr_valsize); 3152 } 3153 } 3154 3155 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3156 "<== hxge_get_priv_prop: return %d", err)); 3157 3158 return (err); 3159 } 3160 /* 3161 * Module loading and removing entry points. 3162 */ 3163 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach, 3164 nodev, NULL, D_MP, NULL, NULL); 3165 3166 extern struct mod_ops mod_driverops; 3167 3168 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver" 3169 3170 /* 3171 * Module linkage information for the kernel. 3172 */ 3173 static struct modldrv hxge_modldrv = { 3174 &mod_driverops, 3175 HXGE_DESC_VER, 3176 &hxge_dev_ops 3177 }; 3178 3179 static struct modlinkage modlinkage = { 3180 MODREV_1, (void *) &hxge_modldrv, NULL 3181 }; 3182 3183 int 3184 _init(void) 3185 { 3186 int status; 3187 3188 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3189 mac_init_ops(&hxge_dev_ops, "hxge"); 3190 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0); 3191 if (status != 0) { 3192 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 3193 "failed to init device soft state")); 3194 mac_fini_ops(&hxge_dev_ops); 3195 goto _init_exit; 3196 } 3197 3198 status = mod_install(&modlinkage); 3199 if (status != 0) { 3200 ddi_soft_state_fini(&hxge_list); 3201 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed")); 3202 goto _init_exit; 3203 } 3204 3205 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3206 3207 _init_exit: 3208 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3209 3210 return (status); 3211 } 3212 3213 int 3214 _fini(void) 3215 { 3216 int status; 3217 3218 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3219 3220 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3221 3222 if (hxge_mblks_pending) 3223 return (EBUSY); 3224 3225 status = mod_remove(&modlinkage); 3226 if (status != DDI_SUCCESS) { 3227 HXGE_DEBUG_MSG((NULL, MOD_CTL, 3228 "Module removal failed 0x%08x", status)); 3229 goto _fini_exit; 3230 } 3231 3232 mac_fini_ops(&hxge_dev_ops); 3233 3234 ddi_soft_state_fini(&hxge_list); 3235 3236 MUTEX_DESTROY(&hxge_common_lock); 3237 3238 _fini_exit: 3239 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3240 3241 return (status); 3242 } 3243 3244 int 3245 _info(struct modinfo *modinfop) 3246 { 3247 int status; 3248 3249 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3250 status = mod_info(&modlinkage, modinfop); 3251 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3252 3253 return (status); 3254 } 3255 3256 /*ARGSUSED*/ 3257 hxge_status_t 3258 hxge_add_intrs(p_hxge_t hxgep) 3259 { 3260 int intr_types; 3261 int type = 0; 3262 int ddi_status = DDI_SUCCESS; 3263 hxge_status_t status = HXGE_OK; 3264 3265 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs")); 3266 3267 hxgep->hxge_intr_type.intr_registered = B_FALSE; 3268 hxgep->hxge_intr_type.intr_enabled = B_FALSE; 3269 hxgep->hxge_intr_type.msi_intx_cnt = 0; 3270 hxgep->hxge_intr_type.intr_added = 0; 3271 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE; 3272 hxgep->hxge_intr_type.intr_type = 0; 3273 3274 if (hxge_msi_enable) { 3275 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE; 3276 } 3277 3278 /* Get the supported interrupt types */ 3279 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types)) 3280 != DDI_SUCCESS) { 3281 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: " 3282 "ddi_intr_get_supported_types failed: status 0x%08x", 3283 ddi_status)); 3284 return (HXGE_ERROR | HXGE_DDI_FAILED); 3285 } 3286 3287 hxgep->hxge_intr_type.intr_types = intr_types; 3288 3289 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3290 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3291 3292 /* 3293 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable: 3294 * (1): 1 - MSI 3295 * (2): 2 - MSI-X 3296 * others - FIXED 3297 */ 3298 switch (hxge_msi_enable) { 3299 default: 3300 type = DDI_INTR_TYPE_FIXED; 3301 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3302 "use fixed (intx emulation) type %08x", type)); 3303 break; 3304 3305 case 2: 3306 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3307 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3308 if (intr_types & DDI_INTR_TYPE_MSIX) { 3309 type = DDI_INTR_TYPE_MSIX; 3310 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3311 "==> hxge_add_intrs: " 3312 "ddi_intr_get_supported_types: MSIX 0x%08x", type)); 3313 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3314 type = DDI_INTR_TYPE_MSI; 3315 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3316 "==> hxge_add_intrs: " 3317 "ddi_intr_get_supported_types: MSI 0x%08x", type)); 3318 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3319 type = DDI_INTR_TYPE_FIXED; 3320 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3321 "ddi_intr_get_supported_types: MSXED0x%08x", type)); 3322 } 3323 break; 3324 3325 case 1: 3326 if (intr_types & DDI_INTR_TYPE_MSI) { 3327 type = DDI_INTR_TYPE_MSI; 3328 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3329 "==> hxge_add_intrs: " 3330 "ddi_intr_get_supported_types: MSI 0x%08x", type)); 3331 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3332 type = DDI_INTR_TYPE_MSIX; 3333 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3334 "==> hxge_add_intrs: " 3335 "ddi_intr_get_supported_types: MSIX 0x%08x", type)); 3336 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3337 type = DDI_INTR_TYPE_FIXED; 3338 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3339 "==> hxge_add_intrs: " 3340 "ddi_intr_get_supported_types: MSXED0x%08x", type)); 3341 } 3342 } 3343 3344 hxgep->hxge_intr_type.intr_type = type; 3345 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3346 type == DDI_INTR_TYPE_FIXED) && 3347 hxgep->hxge_intr_type.niu_msi_enable) { 3348 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) { 3349 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3350 " hxge_add_intrs: " 3351 " hxge_add_intrs_adv failed: status 0x%08x", 3352 status)); 3353 return (status); 3354 } else { 3355 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: " 3356 "interrupts registered : type %d", type)); 3357 hxgep->hxge_intr_type.intr_registered = B_TRUE; 3358 3359 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3360 "\nAdded advanced hxge add_intr_adv " 3361 "intr type 0x%x\n", type)); 3362 3363 return (status); 3364 } 3365 } 3366 3367 if (!hxgep->hxge_intr_type.intr_registered) { 3368 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3369 "==> hxge_add_intrs: failed to register interrupts")); 3370 return (HXGE_ERROR | HXGE_DDI_FAILED); 3371 } 3372 3373 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs")); 3374 3375 return (status); 3376 } 3377 3378 /*ARGSUSED*/ 3379 static hxge_status_t 3380 hxge_add_soft_intrs(p_hxge_t hxgep) 3381 { 3382 int ddi_status = DDI_SUCCESS; 3383 hxge_status_t status = HXGE_OK; 3384 3385 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs")); 3386 3387 hxgep->resched_id = NULL; 3388 hxgep->resched_running = B_FALSE; 3389 ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW, 3390 &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep); 3391 if (ddi_status != DDI_SUCCESS) { 3392 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: " 3393 "ddi_add_softintrs failed: status 0x%08x", ddi_status)); 3394 return (HXGE_ERROR | HXGE_DDI_FAILED); 3395 } 3396 3397 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs")); 3398 3399 return (status); 3400 } 3401 3402 /*ARGSUSED*/ 3403 static hxge_status_t 3404 hxge_add_intrs_adv(p_hxge_t hxgep) 3405 { 3406 int intr_type; 3407 p_hxge_intr_t intrp; 3408 hxge_status_t status; 3409 3410 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv")); 3411 3412 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3413 intr_type = intrp->intr_type; 3414 3415 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x", 3416 intr_type)); 3417 3418 switch (intr_type) { 3419 case DDI_INTR_TYPE_MSI: /* 0x2 */ 3420 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 3421 status = hxge_add_intrs_adv_type(hxgep, intr_type); 3422 break; 3423 3424 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 3425 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type); 3426 break; 3427 3428 default: 3429 status = HXGE_ERROR; 3430 break; 3431 } 3432 3433 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv")); 3434 3435 return (status); 3436 } 3437 3438 /*ARGSUSED*/ 3439 static hxge_status_t 3440 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type) 3441 { 3442 dev_info_t *dip = hxgep->dip; 3443 p_hxge_ldg_t ldgp; 3444 p_hxge_intr_t intrp; 3445 uint_t *inthandler; 3446 void *arg1, *arg2; 3447 int behavior; 3448 int nintrs, navail; 3449 int nactual, nrequired, nrequest; 3450 int inum = 0; 3451 int loop = 0; 3452 int x, y; 3453 int ddi_status = DDI_SUCCESS; 3454 hxge_status_t status = HXGE_OK; 3455 3456 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type")); 3457 3458 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3459 3460 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 3461 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 3462 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3463 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 3464 "nintrs: %d", ddi_status, nintrs)); 3465 return (HXGE_ERROR | HXGE_DDI_FAILED); 3466 } 3467 3468 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 3469 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 3470 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3471 "ddi_intr_get_navail() failed, status: 0x%x%, " 3472 "nintrs: %d", ddi_status, navail)); 3473 return (HXGE_ERROR | HXGE_DDI_FAILED); 3474 } 3475 3476 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3477 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d", 3478 int_type, nintrs, navail)); 3479 3480 /* PSARC/2007/453 MSI-X interrupt limit override */ 3481 if (int_type == DDI_INTR_TYPE_MSIX) { 3482 nrequest = hxge_create_msi_property(hxgep); 3483 if (nrequest < navail) { 3484 navail = nrequest; 3485 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3486 "hxge_add_intrs_adv_type: nintrs %d " 3487 "navail %d (nrequest %d)", 3488 nintrs, navail, nrequest)); 3489 } 3490 } 3491 3492 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 3493 /* MSI must be power of 2 */ 3494 if ((navail & 16) == 16) { 3495 navail = 16; 3496 } else if ((navail & 8) == 8) { 3497 navail = 8; 3498 } else if ((navail & 4) == 4) { 3499 navail = 4; 3500 } else if ((navail & 2) == 2) { 3501 navail = 2; 3502 } else { 3503 navail = 1; 3504 } 3505 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3506 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 3507 "navail %d", nintrs, navail)); 3508 } 3509 3510 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3511 "requesting: intr type %d nintrs %d, navail %d", 3512 int_type, nintrs, navail)); 3513 3514 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 3515 DDI_INTR_ALLOC_NORMAL); 3516 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 3517 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP); 3518 3519 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 3520 navail, &nactual, behavior); 3521 if (ddi_status != DDI_SUCCESS || nactual == 0) { 3522 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3523 " ddi_intr_alloc() failed: %d", ddi_status)); 3524 kmem_free(intrp->htable, intrp->intr_size); 3525 return (HXGE_ERROR | HXGE_DDI_FAILED); 3526 } 3527 3528 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3529 "ddi_intr_alloc() returned: navail %d nactual %d", 3530 navail, nactual)); 3531 3532 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 3533 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 3534 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3535 " ddi_intr_get_pri() failed: %d", ddi_status)); 3536 /* Free already allocated interrupts */ 3537 for (y = 0; y < nactual; y++) { 3538 (void) ddi_intr_free(intrp->htable[y]); 3539 } 3540 3541 kmem_free(intrp->htable, intrp->intr_size); 3542 return (HXGE_ERROR | HXGE_DDI_FAILED); 3543 } 3544 3545 nrequired = 0; 3546 status = hxge_ldgv_init(hxgep, &nactual, &nrequired); 3547 if (status != HXGE_OK) { 3548 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3549 "hxge_add_intrs_adv_typ:hxge_ldgv_init " 3550 "failed: 0x%x", status)); 3551 /* Free already allocated interrupts */ 3552 for (y = 0; y < nactual; y++) { 3553 (void) ddi_intr_free(intrp->htable[y]); 3554 } 3555 3556 kmem_free(intrp->htable, intrp->intr_size); 3557 return (status); 3558 } 3559 3560 ldgp = hxgep->ldgvp->ldgp; 3561 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3562 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual)); 3563 3564 if (nactual < nrequired) 3565 loop = nactual; 3566 else 3567 loop = nrequired; 3568 3569 for (x = 0; x < loop; x++, ldgp++) { 3570 ldgp->vector = (uint8_t)x; 3571 arg1 = ldgp->ldvp; 3572 arg2 = hxgep; 3573 if (ldgp->nldvs == 1) { 3574 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 3575 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3576 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: " 3577 "1-1 int handler (entry %d)\n", 3578 arg1, arg2, x)); 3579 } else if (ldgp->nldvs > 1) { 3580 inthandler = (uint_t *)ldgp->sys_intr_handler; 3581 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3582 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: " 3583 "nldevs %d int handler (entry %d)\n", 3584 arg1, arg2, ldgp->nldvs, x)); 3585 } 3586 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3587 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 3588 "htable 0x%llx", x, intrp->htable[x])); 3589 3590 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 3591 (ddi_intr_handler_t *)inthandler, arg1, arg2)) != 3592 DDI_SUCCESS) { 3593 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3594 "==> hxge_add_intrs_adv_type: failed #%d " 3595 "status 0x%x", x, ddi_status)); 3596 for (y = 0; y < intrp->intr_added; y++) { 3597 (void) ddi_intr_remove_handler( 3598 intrp->htable[y]); 3599 } 3600 3601 /* Free already allocated intr */ 3602 for (y = 0; y < nactual; y++) { 3603 (void) ddi_intr_free(intrp->htable[y]); 3604 } 3605 kmem_free(intrp->htable, intrp->intr_size); 3606 3607 (void) hxge_ldgv_uninit(hxgep); 3608 3609 return (HXGE_ERROR | HXGE_DDI_FAILED); 3610 } 3611 3612 intrp->intr_added++; 3613 } 3614 intrp->msi_intx_cnt = nactual; 3615 3616 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3617 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 3618 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added)); 3619 3620 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 3621 (void) hxge_intr_ldgv_init(hxgep); 3622 3623 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type")); 3624 3625 return (status); 3626 } 3627 3628 /*ARGSUSED*/ 3629 static hxge_status_t 3630 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type) 3631 { 3632 dev_info_t *dip = hxgep->dip; 3633 p_hxge_ldg_t ldgp; 3634 p_hxge_intr_t intrp; 3635 uint_t *inthandler; 3636 void *arg1, *arg2; 3637 int behavior; 3638 int nintrs, navail; 3639 int nactual, nrequired; 3640 int inum = 0; 3641 int x, y; 3642 int ddi_status = DDI_SUCCESS; 3643 hxge_status_t status = HXGE_OK; 3644 3645 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix")); 3646 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3647 3648 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 3649 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 3650 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3651 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 3652 "nintrs: %d", status, nintrs)); 3653 return (HXGE_ERROR | HXGE_DDI_FAILED); 3654 } 3655 3656 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 3657 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 3658 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3659 "ddi_intr_get_navail() failed, status: 0x%x%, " 3660 "nintrs: %d", ddi_status, navail)); 3661 return (HXGE_ERROR | HXGE_DDI_FAILED); 3662 } 3663 3664 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3665 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 3666 nintrs, navail)); 3667 3668 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 3669 DDI_INTR_ALLOC_NORMAL); 3670 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 3671 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 3672 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 3673 navail, &nactual, behavior); 3674 if (ddi_status != DDI_SUCCESS || nactual == 0) { 3675 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3676 " ddi_intr_alloc() failed: %d", ddi_status)); 3677 kmem_free(intrp->htable, intrp->intr_size); 3678 return (HXGE_ERROR | HXGE_DDI_FAILED); 3679 } 3680 3681 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 3682 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 3683 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3684 " ddi_intr_get_pri() failed: %d", ddi_status)); 3685 /* Free already allocated interrupts */ 3686 for (y = 0; y < nactual; y++) { 3687 (void) ddi_intr_free(intrp->htable[y]); 3688 } 3689 3690 kmem_free(intrp->htable, intrp->intr_size); 3691 return (HXGE_ERROR | HXGE_DDI_FAILED); 3692 } 3693 3694 nrequired = 0; 3695 status = hxge_ldgv_init(hxgep, &nactual, &nrequired); 3696 if (status != HXGE_OK) { 3697 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3698 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init " 3699 "failed: 0x%x", status)); 3700 /* Free already allocated interrupts */ 3701 for (y = 0; y < nactual; y++) { 3702 (void) ddi_intr_free(intrp->htable[y]); 3703 } 3704 3705 kmem_free(intrp->htable, intrp->intr_size); 3706 return (status); 3707 } 3708 3709 ldgp = hxgep->ldgvp->ldgp; 3710 for (x = 0; x < nrequired; x++, ldgp++) { 3711 ldgp->vector = (uint8_t)x; 3712 arg1 = ldgp->ldvp; 3713 arg2 = hxgep; 3714 if (ldgp->nldvs == 1) { 3715 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 3716 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3717 "hxge_add_intrs_adv_type_fix: " 3718 "1-1 int handler(%d) ldg %d ldv %d " 3719 "arg1 $%p arg2 $%p\n", 3720 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2)); 3721 } else if (ldgp->nldvs > 1) { 3722 inthandler = (uint_t *)ldgp->sys_intr_handler; 3723 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3724 "hxge_add_intrs_adv_type_fix: " 3725 "shared ldv %d int handler(%d) ldv %d ldg %d" 3726 "arg1 0x%016llx arg2 0x%016llx\n", 3727 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 3728 arg1, arg2)); 3729 } 3730 3731 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 3732 (ddi_intr_handler_t *)inthandler, arg1, arg2)) != 3733 DDI_SUCCESS) { 3734 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3735 "==> hxge_add_intrs_adv_type_fix: failed #%d " 3736 "status 0x%x", x, ddi_status)); 3737 for (y = 0; y < intrp->intr_added; y++) { 3738 (void) ddi_intr_remove_handler( 3739 intrp->htable[y]); 3740 } 3741 for (y = 0; y < nactual; y++) { 3742 (void) ddi_intr_free(intrp->htable[y]); 3743 } 3744 /* Free already allocated intr */ 3745 kmem_free(intrp->htable, intrp->intr_size); 3746 3747 (void) hxge_ldgv_uninit(hxgep); 3748 3749 return (HXGE_ERROR | HXGE_DDI_FAILED); 3750 } 3751 intrp->intr_added++; 3752 } 3753 3754 intrp->msi_intx_cnt = nactual; 3755 3756 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 3757 3758 status = hxge_intr_ldgv_init(hxgep); 3759 3760 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix")); 3761 3762 return (status); 3763 } 3764 3765 /*ARGSUSED*/ 3766 static void 3767 hxge_remove_intrs(p_hxge_t hxgep) 3768 { 3769 int i, inum; 3770 p_hxge_intr_t intrp; 3771 3772 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs")); 3773 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3774 if (!intrp->intr_registered) { 3775 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3776 "<== hxge_remove_intrs: interrupts not registered")); 3777 return; 3778 } 3779 3780 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced")); 3781 3782 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3783 (void) ddi_intr_block_disable(intrp->htable, 3784 intrp->intr_added); 3785 } else { 3786 for (i = 0; i < intrp->intr_added; i++) { 3787 (void) ddi_intr_disable(intrp->htable[i]); 3788 } 3789 } 3790 3791 for (inum = 0; inum < intrp->intr_added; inum++) { 3792 if (intrp->htable[inum]) { 3793 (void) ddi_intr_remove_handler(intrp->htable[inum]); 3794 } 3795 } 3796 3797 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 3798 if (intrp->htable[inum]) { 3799 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3800 "hxge_remove_intrs: ddi_intr_free inum %d " 3801 "msi_intx_cnt %d intr_added %d", 3802 inum, intrp->msi_intx_cnt, intrp->intr_added)); 3803 3804 (void) ddi_intr_free(intrp->htable[inum]); 3805 } 3806 } 3807 3808 kmem_free(intrp->htable, intrp->intr_size); 3809 intrp->intr_registered = B_FALSE; 3810 intrp->intr_enabled = B_FALSE; 3811 intrp->msi_intx_cnt = 0; 3812 intrp->intr_added = 0; 3813 3814 (void) hxge_ldgv_uninit(hxgep); 3815 3816 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs")); 3817 } 3818 3819 /*ARGSUSED*/ 3820 static void 3821 hxge_remove_soft_intrs(p_hxge_t hxgep) 3822 { 3823 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs")); 3824 3825 if (hxgep->resched_id) { 3826 ddi_remove_softintr(hxgep->resched_id); 3827 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3828 "==> hxge_remove_soft_intrs: removed")); 3829 hxgep->resched_id = NULL; 3830 } 3831 3832 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs")); 3833 } 3834 3835 /*ARGSUSED*/ 3836 void 3837 hxge_intrs_enable(p_hxge_t hxgep) 3838 { 3839 p_hxge_intr_t intrp; 3840 int i; 3841 int status; 3842 3843 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable")); 3844 3845 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3846 3847 if (!intrp->intr_registered) { 3848 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: " 3849 "interrupts are not registered")); 3850 return; 3851 } 3852 3853 if (intrp->intr_enabled) { 3854 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3855 "<== hxge_intrs_enable: already enabled")); 3856 return; 3857 } 3858 3859 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3860 status = ddi_intr_block_enable(intrp->htable, 3861 intrp->intr_added); 3862 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable " 3863 "block enable - status 0x%x total inums #%d\n", 3864 status, intrp->intr_added)); 3865 } else { 3866 for (i = 0; i < intrp->intr_added; i++) { 3867 status = ddi_intr_enable(intrp->htable[i]); 3868 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable " 3869 "ddi_intr_enable:enable - status 0x%x " 3870 "total inums %d enable inum #%d\n", 3871 status, intrp->intr_added, i)); 3872 if (status == DDI_SUCCESS) { 3873 intrp->intr_enabled = B_TRUE; 3874 } 3875 } 3876 } 3877 3878 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable")); 3879 } 3880 3881 /*ARGSUSED*/ 3882 static void 3883 hxge_intrs_disable(p_hxge_t hxgep) 3884 { 3885 p_hxge_intr_t intrp; 3886 int i; 3887 3888 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable")); 3889 3890 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3891 3892 if (!intrp->intr_registered) { 3893 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: " 3894 "interrupts are not registered")); 3895 return; 3896 } 3897 3898 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 3899 (void) ddi_intr_block_disable(intrp->htable, 3900 intrp->intr_added); 3901 } else { 3902 for (i = 0; i < intrp->intr_added; i++) { 3903 (void) ddi_intr_disable(intrp->htable[i]); 3904 } 3905 } 3906 3907 intrp->intr_enabled = B_FALSE; 3908 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable")); 3909 } 3910 3911 static hxge_status_t 3912 hxge_mac_register(p_hxge_t hxgep) 3913 { 3914 mac_register_t *macp; 3915 int status; 3916 3917 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register")); 3918 3919 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3920 return (HXGE_ERROR); 3921 3922 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3923 macp->m_driver = hxgep; 3924 macp->m_dip = hxgep->dip; 3925 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet; 3926 3927 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3928 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x", 3929 macp->m_src_addr[0], 3930 macp->m_src_addr[1], 3931 macp->m_src_addr[2], 3932 macp->m_src_addr[3], 3933 macp->m_src_addr[4], 3934 macp->m_src_addr[5])); 3935 3936 macp->m_callbacks = &hxge_m_callbacks; 3937 macp->m_min_sdu = 0; 3938 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE; 3939 macp->m_margin = VLAN_TAGSZ; 3940 macp->m_priv_props = hxge_priv_props; 3941 macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS; 3942 3943 status = mac_register(macp, &hxgep->mach); 3944 mac_free(macp); 3945 3946 if (status != 0) { 3947 cmn_err(CE_WARN, 3948 "hxge_mac_register failed (status %d instance %d)", 3949 status, hxgep->instance); 3950 return (HXGE_ERROR); 3951 } 3952 3953 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success " 3954 "(instance %d)", hxgep->instance)); 3955 3956 return (HXGE_OK); 3957 } 3958 3959 static int 3960 hxge_init_common_dev(p_hxge_t hxgep) 3961 { 3962 p_hxge_hw_list_t hw_p; 3963 dev_info_t *p_dip; 3964 3965 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev")); 3966 3967 p_dip = hxgep->p_dip; 3968 MUTEX_ENTER(&hxge_common_lock); 3969 3970 /* 3971 * Loop through existing per Hydra hardware list. 3972 */ 3973 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) { 3974 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3975 "==> hxge_init_common_dev: hw_p $%p parent dip $%p", 3976 hw_p, p_dip)); 3977 if (hw_p->parent_devp == p_dip) { 3978 hxgep->hxge_hw_p = hw_p; 3979 hw_p->ndevs++; 3980 hw_p->hxge_p = hxgep; 3981 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3982 "==> hxge_init_common_device: " 3983 "hw_p $%p parent dip $%p ndevs %d (found)", 3984 hw_p, p_dip, hw_p->ndevs)); 3985 break; 3986 } 3987 } 3988 3989 if (hw_p == NULL) { 3990 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 3991 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip)); 3992 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP); 3993 hw_p->parent_devp = p_dip; 3994 hw_p->magic = HXGE_MAGIC; 3995 hxgep->hxge_hw_p = hw_p; 3996 hw_p->ndevs++; 3997 hw_p->hxge_p = hxgep; 3998 hw_p->next = hxge_hw_list; 3999 4000 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4001 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4002 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4003 4004 hxge_hw_list = hw_p; 4005 } 4006 MUTEX_EXIT(&hxge_common_lock); 4007 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4008 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list)); 4009 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev")); 4010 4011 return (HXGE_OK); 4012 } 4013 4014 static void 4015 hxge_uninit_common_dev(p_hxge_t hxgep) 4016 { 4017 p_hxge_hw_list_t hw_p, h_hw_p; 4018 dev_info_t *p_dip; 4019 4020 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev")); 4021 if (hxgep->hxge_hw_p == NULL) { 4022 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4023 "<== hxge_uninit_common_dev (no common)")); 4024 return; 4025 } 4026 4027 MUTEX_ENTER(&hxge_common_lock); 4028 h_hw_p = hxge_hw_list; 4029 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) { 4030 p_dip = hw_p->parent_devp; 4031 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip && 4032 hxgep->hxge_hw_p->magic == HXGE_MAGIC && 4033 hw_p->magic == HXGE_MAGIC) { 4034 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4035 "==> hxge_uninit_common_dev: " 4036 "hw_p $%p parent dip $%p ndevs %d (found)", 4037 hw_p, p_dip, hw_p->ndevs)); 4038 4039 hxgep->hxge_hw_p = NULL; 4040 if (hw_p->ndevs) { 4041 hw_p->ndevs--; 4042 } 4043 hw_p->hxge_p = NULL; 4044 if (!hw_p->ndevs) { 4045 MUTEX_DESTROY(&hw_p->hxge_vlan_lock); 4046 MUTEX_DESTROY(&hw_p->hxge_tcam_lock); 4047 MUTEX_DESTROY(&hw_p->hxge_cfg_lock); 4048 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4049 "==> hxge_uninit_common_dev: " 4050 "hw_p $%p parent dip $%p ndevs %d (last)", 4051 hw_p, p_dip, hw_p->ndevs)); 4052 4053 if (hw_p == hxge_hw_list) { 4054 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4055 "==> hxge_uninit_common_dev:" 4056 "remove head " 4057 "hw_p $%p parent dip $%p " 4058 "ndevs %d (head)", 4059 hw_p, p_dip, hw_p->ndevs)); 4060 hxge_hw_list = hw_p->next; 4061 } else { 4062 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4063 "==> hxge_uninit_common_dev:" 4064 "remove middle " 4065 "hw_p $%p parent dip $%p " 4066 "ndevs %d (middle)", 4067 hw_p, p_dip, hw_p->ndevs)); 4068 h_hw_p->next = hw_p->next; 4069 } 4070 4071 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t)); 4072 } 4073 break; 4074 } else { 4075 h_hw_p = hw_p; 4076 } 4077 } 4078 4079 MUTEX_EXIT(&hxge_common_lock); 4080 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4081 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list)); 4082 4083 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev")); 4084 } 4085 4086 #define HXGE_MSIX_ENTRIES 32 4087 #define HXGE_MSIX_WAIT_COUNT 10 4088 #define HXGE_MSIX_PARITY_CHECK_COUNT 30 4089 4090 static void 4091 hxge_link_poll(void *arg) 4092 { 4093 p_hxge_t hxgep = (p_hxge_t)arg; 4094 hpi_handle_t handle; 4095 cip_link_stat_t link_stat; 4096 hxge_timeout *to = &hxgep->timeout; 4097 4098 handle = HXGE_DEV_HPI_HANDLE(hxgep); 4099 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value); 4100 4101 if (to->report_link_status || 4102 (to->link_status != link_stat.bits.xpcs0_link_up)) { 4103 to->link_status = link_stat.bits.xpcs0_link_up; 4104 to->report_link_status = B_FALSE; 4105 4106 if (link_stat.bits.xpcs0_link_up) { 4107 hxge_link_update(hxgep, LINK_STATE_UP); 4108 } else { 4109 hxge_link_update(hxgep, LINK_STATE_DOWN); 4110 } 4111 } 4112 4113 if (hxgep->msix_count++ >= HXGE_MSIX_PARITY_CHECK_COUNT) { 4114 hxgep->msix_count = 0; 4115 hxgep->msix_index++; 4116 if (hxgep->msix_index >= HXGE_MSIX_ENTRIES) 4117 hxgep->msix_index = 0; 4118 hxge_check_1entry_msix_table(hxgep, hxgep->msix_index); 4119 } 4120 4121 /* Restart the link status timer to check the link status */ 4122 MUTEX_ENTER(&to->lock); 4123 to->id = timeout(hxge_link_poll, arg, to->ticks); 4124 MUTEX_EXIT(&to->lock); 4125 } 4126 4127 static void 4128 hxge_link_update(p_hxge_t hxgep, link_state_t state) 4129 { 4130 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp; 4131 4132 mac_link_update(hxgep->mach, state); 4133 if (state == LINK_STATE_UP) { 4134 statsp->mac_stats.link_speed = 10000; 4135 statsp->mac_stats.link_duplex = 2; 4136 statsp->mac_stats.link_up = 1; 4137 } else { 4138 statsp->mac_stats.link_speed = 0; 4139 statsp->mac_stats.link_duplex = 0; 4140 statsp->mac_stats.link_up = 0; 4141 } 4142 } 4143 4144 static void 4145 hxge_msix_init(p_hxge_t hxgep) 4146 { 4147 uint32_t data0; 4148 uint32_t data1; 4149 uint32_t data2; 4150 int i; 4151 uint32_t msix_entry0; 4152 uint32_t msix_entry1; 4153 uint32_t msix_entry2; 4154 uint32_t msix_entry3; 4155 4156 /* Change to use MSIx bar instead of indirect access */ 4157 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) { 4158 data0 = 0xffffffff - i; 4159 data1 = 0xffffffff - i - 1; 4160 data2 = 0xffffffff - i - 2; 4161 4162 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0); 4163 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1); 4164 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2); 4165 } 4166 4167 /* Initialize ram data out buffer. */ 4168 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) { 4169 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0); 4170 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1); 4171 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2); 4172 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3); 4173 } 4174 } 4175 4176 static void 4177 hxge_store_msix_table(p_hxge_t hxgep) 4178 { 4179 int i; 4180 uint32_t msix_entry0; 4181 uint32_t msix_entry1; 4182 uint32_t msix_entry2; 4183 4184 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) { 4185 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0); 4186 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, 4187 &msix_entry1); 4188 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, 4189 &msix_entry2); 4190 4191 hxgep->msix_table[i][0] = msix_entry0; 4192 hxgep->msix_table[i][1] = msix_entry1; 4193 hxgep->msix_table[i][2] = msix_entry2; 4194 } 4195 } 4196 4197 static void 4198 hxge_check_1entry_msix_table(p_hxge_t hxgep, int i) 4199 { 4200 uint32_t msix_entry0; 4201 uint32_t msix_entry1; 4202 uint32_t msix_entry2; 4203 p_hxge_peu_sys_stats_t statsp; 4204 4205 statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats; 4206 4207 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0); 4208 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1); 4209 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2); 4210 4211 hxgep->msix_table_check[i][0] = msix_entry0; 4212 hxgep->msix_table_check[i][1] = msix_entry1; 4213 hxgep->msix_table_check[i][2] = msix_entry2; 4214 4215 if ((hxgep->msix_table[i][0] != hxgep->msix_table_check[i][0]) || 4216 (hxgep->msix_table[i][1] != hxgep->msix_table_check[i][1]) || 4217 (hxgep->msix_table[i][2] != hxgep->msix_table_check[i][2])) { 4218 statsp->eic_msix_parerr++; 4219 if (statsp->eic_msix_parerr == 1) { 4220 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 4221 "==> hxge_check_1entry_msix_table: " 4222 "eic_msix_parerr at index: %d", i)); 4223 HXGE_FM_REPORT_ERROR(hxgep, NULL, 4224 HXGE_FM_EREPORT_PEU_ERR); 4225 } 4226 } 4227 } 4228 4229 /* 4230 * The following function is to support 4231 * PSARC/2007/453 MSI-X interrupt limit override. 4232 */ 4233 static int 4234 hxge_create_msi_property(p_hxge_t hxgep) 4235 { 4236 int nmsi; 4237 extern int ncpus; 4238 4239 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property")); 4240 4241 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip, 4242 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0); 4243 /* 4244 * The maximum MSI-X requested will be 8. 4245 * If the # of CPUs is less than 8, we will reqeust 4246 * # MSI-X based on the # of CPUs. 4247 */ 4248 if (ncpus >= HXGE_MSIX_REQUEST_10G) { 4249 nmsi = HXGE_MSIX_REQUEST_10G; 4250 } else { 4251 nmsi = ncpus; 4252 } 4253 4254 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4255 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)", 4256 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip, 4257 DDI_PROP_CANSLEEP, "#msix-request"), nmsi)); 4258 4259 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property")); 4260 return (nmsi); 4261 } 4262