1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver. 28 */ 29 #include <hxge_impl.h> 30 #include <hxge_pfc.h> 31 32 /* 33 * PSARC/2007/453 MSI-X interrupt limit override 34 * (This PSARC case is limited to MSI-X vectors 35 * and SPARC platforms only). 36 */ 37 #if defined(_BIG_ENDIAN) 38 uint32_t hxge_msi_enable = 2; 39 #else 40 uint32_t hxge_msi_enable = 1; 41 #endif 42 43 /* 44 * Globals: tunable parameters (/etc/system or adb) 45 * 46 */ 47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT; 48 uint32_t hxge_rbr_spare_size = 0; 49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT; 50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT; 51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX; 52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN; 53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN; 54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE; 55 56 static hxge_os_mutex_t hxgedebuglock; 57 static int hxge_debug_init = 0; 58 59 /* 60 * Debugging flags: 61 * hxge_no_tx_lb : transmit load balancing 62 * hxge_tx_lb_policy: 0 - TCP/UDP port (default) 63 * 1 - From the Stack 64 * 2 - Destination IP Address 65 */ 66 uint32_t hxge_no_tx_lb = 0; 67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP; 68 69 /* 70 * Add tunable to reduce the amount of time spent in the 71 * ISR doing Rx Processing. 72 */ 73 uint32_t hxge_max_rx_pkts = 1024; 74 75 /* 76 * Tunables to manage the receive buffer blocks. 77 * 78 * hxge_rx_threshold_hi: copy all buffers. 79 * hxge_rx_bcopy_size_type: receive buffer block size type. 80 * hxge_rx_threshold_lo: copy only up to tunable block size type. 81 */ 82 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6; 83 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0; 84 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3; 85 86 rtrace_t hpi_rtracebuf; 87 88 /* 89 * Function Prototypes 90 */ 91 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t); 92 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t); 93 static void hxge_unattach(p_hxge_t); 94 95 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t); 96 97 static hxge_status_t hxge_setup_mutexes(p_hxge_t); 98 static void hxge_destroy_mutexes(p_hxge_t); 99 100 static hxge_status_t hxge_map_regs(p_hxge_t hxgep); 101 static void hxge_unmap_regs(p_hxge_t hxgep); 102 103 hxge_status_t hxge_add_intrs(p_hxge_t hxgep); 104 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep); 105 static void hxge_remove_intrs(p_hxge_t hxgep); 106 static void hxge_remove_soft_intrs(p_hxge_t hxgep); 107 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep); 108 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t); 109 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t); 110 void hxge_intrs_enable(p_hxge_t hxgep); 111 static void hxge_intrs_disable(p_hxge_t hxgep); 112 static void hxge_suspend(p_hxge_t); 113 static hxge_status_t hxge_resume(p_hxge_t); 114 hxge_status_t hxge_setup_dev(p_hxge_t); 115 static void hxge_destroy_dev(p_hxge_t); 116 hxge_status_t hxge_alloc_mem_pool(p_hxge_t); 117 static void hxge_free_mem_pool(p_hxge_t); 118 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t); 119 static void hxge_free_rx_mem_pool(p_hxge_t); 120 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t); 121 static void hxge_free_tx_mem_pool(p_hxge_t); 122 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t, 123 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t, 124 p_hxge_dma_common_t); 125 static void hxge_dma_mem_free(p_hxge_dma_common_t); 126 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t, 127 p_hxge_dma_common_t *, size_t, size_t, uint32_t *); 128 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t); 129 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t, 130 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t); 131 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t); 132 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t, 133 p_hxge_dma_common_t *, size_t, size_t, uint32_t *); 134 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t); 135 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t, 136 p_hxge_dma_common_t *, size_t); 137 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t); 138 static int hxge_init_common_dev(p_hxge_t); 139 static void hxge_uninit_common_dev(p_hxge_t); 140 141 /* 142 * The next declarations are for the GLDv3 interface. 143 */ 144 static int hxge_m_start(void *); 145 static void hxge_m_stop(void *); 146 static int hxge_m_unicst(void *, const uint8_t *); 147 static int hxge_m_multicst(void *, boolean_t, const uint8_t *); 148 static int hxge_m_promisc(void *, boolean_t); 149 static void hxge_m_ioctl(void *, queue_t *, mblk_t *); 150 static void hxge_m_resources(void *); 151 static hxge_status_t hxge_mac_register(p_hxge_t hxgep); 152 153 static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr); 154 static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 155 static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr); 156 static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr); 157 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *); 158 static boolean_t hxge_param_locked(mac_prop_id_t pr_num); 159 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 160 uint_t pr_valsize, const void *pr_val); 161 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 162 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *); 163 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, 164 uint_t pr_valsize, void *pr_val); 165 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, 166 uint_t pr_valsize, const void *pr_val); 167 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, 168 uint_t pr_flags, uint_t pr_valsize, void *pr_val); 169 static void hxge_link_poll(void *arg); 170 static void hxge_link_update(p_hxge_t hxge, link_state_t state); 171 172 mac_priv_prop_t hxge_priv_props[] = { 173 {"_rxdma_intr_time", MAC_PROP_PERM_RW}, 174 {"_rxdma_intr_pkts", MAC_PROP_PERM_RW}, 175 {"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW}, 176 {"_class_opt_ipv4_udp", MAC_PROP_PERM_RW}, 177 {"_class_opt_ipv4_ah", MAC_PROP_PERM_RW}, 178 {"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW}, 179 {"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW}, 180 {"_class_opt_ipv6_udp", MAC_PROP_PERM_RW}, 181 {"_class_opt_ipv6_ah", MAC_PROP_PERM_RW}, 182 {"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW} 183 }; 184 185 #define HXGE_MAX_PRIV_PROPS \ 186 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t)) 187 188 #define HXGE_MAGIC 0x4E584745UL 189 #define MAX_DUMP_SZ 256 190 191 #define HXGE_M_CALLBACK_FLAGS \ 192 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 193 194 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp); 195 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep); 196 197 static mac_callbacks_t hxge_m_callbacks = { 198 HXGE_M_CALLBACK_FLAGS, 199 hxge_m_stat, 200 hxge_m_start, 201 hxge_m_stop, 202 hxge_m_promisc, 203 hxge_m_multicst, 204 hxge_m_unicst, 205 hxge_m_tx, 206 hxge_m_resources, 207 hxge_m_ioctl, 208 hxge_m_getcapab, 209 NULL, 210 NULL, 211 hxge_m_setprop, 212 hxge_m_getprop 213 }; 214 215 /* Enable debug messages as necessary. */ 216 uint64_t hxge_debug_level = 0; 217 218 /* 219 * This list contains the instance structures for the Hydra 220 * devices present in the system. The lock exists to guarantee 221 * mutually exclusive access to the list. 222 */ 223 void *hxge_list = NULL; 224 void *hxge_hw_list = NULL; 225 hxge_os_mutex_t hxge_common_lock; 226 227 extern uint64_t hpi_debug_level; 228 229 extern hxge_status_t hxge_ldgv_init(); 230 extern hxge_status_t hxge_ldgv_uninit(); 231 extern hxge_status_t hxge_intr_ldgv_init(); 232 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr, 233 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr); 234 extern void hxge_fm_fini(p_hxge_t hxgep); 235 236 /* 237 * Count used to maintain the number of buffers being used 238 * by Hydra instances and loaned up to the upper layers. 239 */ 240 uint32_t hxge_mblks_pending = 0; 241 242 /* 243 * Device register access attributes for PIO. 244 */ 245 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = { 246 DDI_DEVICE_ATTR_V0, 247 DDI_STRUCTURE_LE_ACC, 248 DDI_STRICTORDER_ACC, 249 }; 250 251 /* 252 * Device descriptor access attributes for DMA. 253 */ 254 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = { 255 DDI_DEVICE_ATTR_V0, 256 DDI_STRUCTURE_LE_ACC, 257 DDI_STRICTORDER_ACC 258 }; 259 260 /* 261 * Device buffer access attributes for DMA. 262 */ 263 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = { 264 DDI_DEVICE_ATTR_V0, 265 DDI_STRUCTURE_BE_ACC, 266 DDI_STRICTORDER_ACC 267 }; 268 269 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = { 270 DMA_ATTR_V0, /* version number. */ 271 0, /* low address */ 272 0xffffffffffffffff, /* high address */ 273 0xffffffffffffffff, /* address counter max */ 274 0x80000, /* alignment */ 275 0xfc00fc, /* dlim_burstsizes */ 276 0x1, /* minimum transfer size */ 277 0xffffffffffffffff, /* maximum transfer size */ 278 0xffffffffffffffff, /* maximum segment size */ 279 1, /* scatter/gather list length */ 280 (unsigned int)1, /* granularity */ 281 0 /* attribute flags */ 282 }; 283 284 ddi_dma_attr_t hxge_tx_desc_dma_attr = { 285 DMA_ATTR_V0, /* version number. */ 286 0, /* low address */ 287 0xffffffffffffffff, /* high address */ 288 0xffffffffffffffff, /* address counter max */ 289 0x100000, /* alignment */ 290 0xfc00fc, /* dlim_burstsizes */ 291 0x1, /* minimum transfer size */ 292 0xffffffffffffffff, /* maximum transfer size */ 293 0xffffffffffffffff, /* maximum segment size */ 294 1, /* scatter/gather list length */ 295 (unsigned int)1, /* granularity */ 296 0 /* attribute flags */ 297 }; 298 299 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = { 300 DMA_ATTR_V0, /* version number. */ 301 0, /* low address */ 302 0xffffffffffffffff, /* high address */ 303 0xffffffffffffffff, /* address counter max */ 304 0x40000, /* alignment */ 305 0xfc00fc, /* dlim_burstsizes */ 306 0x1, /* minimum transfer size */ 307 0xffffffffffffffff, /* maximum transfer size */ 308 0xffffffffffffffff, /* maximum segment size */ 309 1, /* scatter/gather list length */ 310 (unsigned int)1, /* granularity */ 311 0 /* attribute flags */ 312 }; 313 314 ddi_dma_attr_t hxge_rx_mbox_dma_attr = { 315 DMA_ATTR_V0, /* version number. */ 316 0, /* low address */ 317 0xffffffffffffffff, /* high address */ 318 0xffffffffffffffff, /* address counter max */ 319 #if defined(_BIG_ENDIAN) 320 0x2000, /* alignment */ 321 #else 322 0x1000, /* alignment */ 323 #endif 324 0xfc00fc, /* dlim_burstsizes */ 325 0x1, /* minimum transfer size */ 326 0xffffffffffffffff, /* maximum transfer size */ 327 0xffffffffffffffff, /* maximum segment size */ 328 5, /* scatter/gather list length */ 329 (unsigned int)1, /* granularity */ 330 0 /* attribute flags */ 331 }; 332 333 ddi_dma_attr_t hxge_tx_dma_attr = { 334 DMA_ATTR_V0, /* version number. */ 335 0, /* low address */ 336 0xffffffffffffffff, /* high address */ 337 0xffffffffffffffff, /* address counter max */ 338 #if defined(_BIG_ENDIAN) 339 0x2000, /* alignment */ 340 #else 341 0x1000, /* alignment */ 342 #endif 343 0xfc00fc, /* dlim_burstsizes */ 344 0x1, /* minimum transfer size */ 345 0xffffffffffffffff, /* maximum transfer size */ 346 0xffffffffffffffff, /* maximum segment size */ 347 5, /* scatter/gather list length */ 348 (unsigned int)1, /* granularity */ 349 0 /* attribute flags */ 350 }; 351 352 ddi_dma_attr_t hxge_rx_dma_attr = { 353 DMA_ATTR_V0, /* version number. */ 354 0, /* low address */ 355 0xffffffffffffffff, /* high address */ 356 0xffffffffffffffff, /* address counter max */ 357 0x10000, /* alignment */ 358 0xfc00fc, /* dlim_burstsizes */ 359 0x1, /* minimum transfer size */ 360 0xffffffffffffffff, /* maximum transfer size */ 361 0xffffffffffffffff, /* maximum segment size */ 362 1, /* scatter/gather list length */ 363 (unsigned int)1, /* granularity */ 364 DDI_DMA_RELAXED_ORDERING /* attribute flags */ 365 }; 366 367 ddi_dma_lim_t hxge_dma_limits = { 368 (uint_t)0, /* dlim_addr_lo */ 369 (uint_t)0xffffffff, /* dlim_addr_hi */ 370 (uint_t)0xffffffff, /* dlim_cntr_max */ 371 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */ 372 0x1, /* dlim_minxfer */ 373 1024 /* dlim_speed */ 374 }; 375 376 dma_method_t hxge_force_dma = DVMA; 377 378 /* 379 * dma chunk sizes. 380 * 381 * Try to allocate the largest possible size 382 * so that fewer number of dma chunks would be managed 383 */ 384 size_t alloc_sizes[] = { 385 0x1000, 0x2000, 0x4000, 0x8000, 386 0x10000, 0x20000, 0x40000, 0x80000, 387 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000 388 }; 389 390 /* 391 * Translate "dev_t" to a pointer to the associated "dev_info_t". 392 */ 393 static int 394 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 395 { 396 p_hxge_t hxgep = NULL; 397 int instance; 398 int status = DDI_SUCCESS; 399 400 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach")); 401 402 /* 403 * Get the device instance since we'll need to setup or retrieve a soft 404 * state for this instance. 405 */ 406 instance = ddi_get_instance(dip); 407 408 switch (cmd) { 409 case DDI_ATTACH: 410 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH")); 411 break; 412 413 case DDI_RESUME: 414 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME")); 415 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance); 416 if (hxgep == NULL) { 417 status = DDI_FAILURE; 418 break; 419 } 420 if (hxgep->dip != dip) { 421 status = DDI_FAILURE; 422 break; 423 } 424 if (hxgep->suspended == DDI_PM_SUSPEND) { 425 status = ddi_dev_is_needed(hxgep->dip, 0, 1); 426 } else { 427 (void) hxge_resume(hxgep); 428 } 429 goto hxge_attach_exit; 430 431 case DDI_PM_RESUME: 432 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME")); 433 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance); 434 if (hxgep == NULL) { 435 status = DDI_FAILURE; 436 break; 437 } 438 if (hxgep->dip != dip) { 439 status = DDI_FAILURE; 440 break; 441 } 442 (void) hxge_resume(hxgep); 443 goto hxge_attach_exit; 444 445 default: 446 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown")); 447 status = DDI_FAILURE; 448 goto hxge_attach_exit; 449 } 450 451 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) { 452 status = DDI_FAILURE; 453 HXGE_ERROR_MSG((hxgep, DDI_CTL, 454 "ddi_soft_state_zalloc failed")); 455 goto hxge_attach_exit; 456 } 457 458 hxgep = ddi_get_soft_state(hxge_list, instance); 459 if (hxgep == NULL) { 460 status = HXGE_ERROR; 461 HXGE_ERROR_MSG((hxgep, DDI_CTL, 462 "ddi_get_soft_state failed")); 463 goto hxge_attach_fail2; 464 } 465 466 hxgep->drv_state = 0; 467 hxgep->dip = dip; 468 hxgep->instance = instance; 469 hxgep->p_dip = ddi_get_parent(dip); 470 hxgep->hxge_debug_level = hxge_debug_level; 471 hpi_debug_level = hxge_debug_level; 472 473 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr, 474 &hxge_rx_dma_attr); 475 476 status = hxge_map_regs(hxgep); 477 if (status != HXGE_OK) { 478 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed")); 479 goto hxge_attach_fail3; 480 } 481 482 status = hxge_init_common_dev(hxgep); 483 if (status != HXGE_OK) { 484 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 485 "hxge_init_common_dev failed")); 486 goto hxge_attach_fail4; 487 } 488 489 /* 490 * Setup the Ndd parameters for this instance. 491 */ 492 hxge_init_param(hxgep); 493 494 /* 495 * Setup Register Tracing Buffer. 496 */ 497 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf); 498 499 /* init stats ptr */ 500 hxge_init_statsp(hxgep); 501 502 status = hxge_setup_mutexes(hxgep); 503 if (status != HXGE_OK) { 504 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed")); 505 goto hxge_attach_fail; 506 } 507 508 status = hxge_get_config_properties(hxgep); 509 if (status != HXGE_OK) { 510 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed")); 511 goto hxge_attach_fail; 512 } 513 514 /* 515 * Setup the Kstats for the driver. 516 */ 517 hxge_setup_kstats(hxgep); 518 hxge_setup_param(hxgep); 519 520 status = hxge_setup_system_dma_pages(hxgep); 521 if (status != HXGE_OK) { 522 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed")); 523 goto hxge_attach_fail; 524 } 525 526 hxge_hw_id_init(hxgep); 527 hxge_hw_init_niu_common(hxgep); 528 529 status = hxge_setup_dev(hxgep); 530 if (status != DDI_SUCCESS) { 531 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed")); 532 goto hxge_attach_fail; 533 } 534 535 status = hxge_add_intrs(hxgep); 536 if (status != DDI_SUCCESS) { 537 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed")); 538 goto hxge_attach_fail; 539 } 540 541 status = hxge_add_soft_intrs(hxgep); 542 if (status != DDI_SUCCESS) { 543 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed")); 544 goto hxge_attach_fail; 545 } 546 547 /* 548 * Enable interrupts. 549 */ 550 hxge_intrs_enable(hxgep); 551 552 /* 553 * Take off all peu parity error mask here after ddi_intr_enable 554 * is called 555 */ 556 HXGE_REG_WR32(hxgep->hpi_handle, PEU_INTR_MASK, 0x0); 557 558 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) { 559 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 560 "unable to register to mac layer (%d)", status)); 561 goto hxge_attach_fail; 562 } 563 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN); 564 565 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)", 566 instance)); 567 568 goto hxge_attach_exit; 569 570 hxge_attach_fail: 571 hxge_unattach(hxgep); 572 goto hxge_attach_fail1; 573 574 hxge_attach_fail5: 575 /* 576 * Tear down the ndd parameters setup. 577 */ 578 hxge_destroy_param(hxgep); 579 580 /* 581 * Tear down the kstat setup. 582 */ 583 hxge_destroy_kstats(hxgep); 584 585 hxge_attach_fail4: 586 if (hxgep->hxge_hw_p) { 587 hxge_uninit_common_dev(hxgep); 588 hxgep->hxge_hw_p = NULL; 589 } 590 hxge_attach_fail3: 591 /* 592 * Unmap the register setup. 593 */ 594 hxge_unmap_regs(hxgep); 595 596 hxge_fm_fini(hxgep); 597 598 hxge_attach_fail2: 599 ddi_soft_state_free(hxge_list, hxgep->instance); 600 601 hxge_attach_fail1: 602 if (status != HXGE_OK) 603 status = (HXGE_ERROR | HXGE_DDI_FAILED); 604 hxgep = NULL; 605 606 hxge_attach_exit: 607 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x", 608 status)); 609 610 return (status); 611 } 612 613 static int 614 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 615 { 616 int status = DDI_SUCCESS; 617 int instance; 618 p_hxge_t hxgep = NULL; 619 620 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach")); 621 instance = ddi_get_instance(dip); 622 hxgep = ddi_get_soft_state(hxge_list, instance); 623 if (hxgep == NULL) { 624 status = DDI_FAILURE; 625 goto hxge_detach_exit; 626 } 627 628 switch (cmd) { 629 case DDI_DETACH: 630 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH")); 631 break; 632 633 case DDI_PM_SUSPEND: 634 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND")); 635 hxgep->suspended = DDI_PM_SUSPEND; 636 hxge_suspend(hxgep); 637 break; 638 639 case DDI_SUSPEND: 640 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND")); 641 if (hxgep->suspended != DDI_PM_SUSPEND) { 642 hxgep->suspended = DDI_SUSPEND; 643 hxge_suspend(hxgep); 644 } 645 break; 646 647 default: 648 status = DDI_FAILURE; 649 break; 650 } 651 652 if (cmd != DDI_DETACH) 653 goto hxge_detach_exit; 654 655 /* 656 * Stop the xcvr polling. 657 */ 658 hxgep->suspended = cmd; 659 660 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) { 661 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 662 "<== hxge_detach status = 0x%08X", status)); 663 return (DDI_FAILURE); 664 } 665 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 666 "<== hxge_detach (mac_unregister) status = 0x%08X", status)); 667 668 hxge_unattach(hxgep); 669 hxgep = NULL; 670 671 hxge_detach_exit: 672 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X", 673 status)); 674 675 return (status); 676 } 677 678 static void 679 hxge_unattach(p_hxge_t hxgep) 680 { 681 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach")); 682 683 if (hxgep == NULL || hxgep->dev_regs == NULL) { 684 return; 685 } 686 687 if (hxgep->hxge_hw_p) { 688 hxge_uninit_common_dev(hxgep); 689 hxgep->hxge_hw_p = NULL; 690 } 691 692 if (hxgep->hxge_timerid) { 693 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 694 hxgep->hxge_timerid = 0; 695 } 696 697 /* Stop any further interrupts. */ 698 hxge_remove_intrs(hxgep); 699 700 /* Remove soft interrups */ 701 hxge_remove_soft_intrs(hxgep); 702 703 /* Stop the device and free resources. */ 704 hxge_destroy_dev(hxgep); 705 706 /* Tear down the ndd parameters setup. */ 707 hxge_destroy_param(hxgep); 708 709 /* Tear down the kstat setup. */ 710 hxge_destroy_kstats(hxgep); 711 712 /* 713 * Remove the list of ndd parameters which were setup during attach. 714 */ 715 if (hxgep->dip) { 716 HXGE_DEBUG_MSG((hxgep, OBP_CTL, 717 " hxge_unattach: remove all properties")); 718 (void) ddi_prop_remove_all(hxgep->dip); 719 } 720 721 /* 722 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any 723 * previous state before unmapping the registers. 724 */ 725 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E); 726 HXGE_DELAY(1000); 727 728 /* 729 * Unmap the register setup. 730 */ 731 hxge_unmap_regs(hxgep); 732 733 hxge_fm_fini(hxgep); 734 735 /* Destroy all mutexes. */ 736 hxge_destroy_mutexes(hxgep); 737 738 /* 739 * Free the soft state data structures allocated with this instance. 740 */ 741 ddi_soft_state_free(hxge_list, hxgep->instance); 742 743 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach")); 744 } 745 746 static hxge_status_t 747 hxge_map_regs(p_hxge_t hxgep) 748 { 749 int ddi_status = DDI_SUCCESS; 750 p_dev_regs_t dev_regs; 751 752 #ifdef HXGE_DEBUG 753 char *sysname; 754 #endif 755 756 off_t regsize; 757 hxge_status_t status = HXGE_OK; 758 int nregs; 759 760 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs")); 761 762 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS) 763 return (HXGE_ERROR); 764 765 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs)); 766 767 hxgep->dev_regs = NULL; 768 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 769 dev_regs->hxge_regh = NULL; 770 dev_regs->hxge_pciregh = NULL; 771 dev_regs->hxge_msix_regh = NULL; 772 773 (void) ddi_dev_regsize(hxgep->dip, 0, ®size); 774 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 775 "hxge_map_regs: pci config size 0x%x", regsize)); 776 777 ddi_status = ddi_regs_map_setup(hxgep->dip, 0, 778 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0, 779 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh); 780 if (ddi_status != DDI_SUCCESS) { 781 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 782 "ddi_map_regs, hxge bus config regs failed")); 783 goto hxge_map_regs_fail0; 784 } 785 786 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 787 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx", 788 dev_regs->hxge_pciregp, 789 dev_regs->hxge_pciregh)); 790 791 (void) ddi_dev_regsize(hxgep->dip, 1, ®size); 792 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 793 "hxge_map_regs: pio size 0x%x", regsize)); 794 795 /* set up the device mapped register */ 796 ddi_status = ddi_regs_map_setup(hxgep->dip, 1, 797 (caddr_t *)&(dev_regs->hxge_regp), 0, 0, 798 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh); 799 800 if (ddi_status != DDI_SUCCESS) { 801 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 802 "ddi_map_regs for Hydra global reg failed")); 803 goto hxge_map_regs_fail1; 804 } 805 806 /* set up the msi/msi-x mapped register */ 807 (void) ddi_dev_regsize(hxgep->dip, 2, ®size); 808 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 809 "hxge_map_regs: msix size 0x%x", regsize)); 810 811 ddi_status = ddi_regs_map_setup(hxgep->dip, 2, 812 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0, 813 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh); 814 815 if (ddi_status != DDI_SUCCESS) { 816 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 817 "ddi_map_regs for msi reg failed")); 818 goto hxge_map_regs_fail2; 819 } 820 821 hxgep->dev_regs = dev_regs; 822 823 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh); 824 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp); 825 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh); 826 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp); 827 828 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh); 829 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp); 830 831 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh); 832 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp); 833 834 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx " 835 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh)); 836 837 goto hxge_map_regs_exit; 838 839 hxge_map_regs_fail3: 840 if (dev_regs->hxge_msix_regh) { 841 ddi_regs_map_free(&dev_regs->hxge_msix_regh); 842 } 843 844 hxge_map_regs_fail2: 845 if (dev_regs->hxge_regh) { 846 ddi_regs_map_free(&dev_regs->hxge_regh); 847 } 848 849 hxge_map_regs_fail1: 850 if (dev_regs->hxge_pciregh) { 851 ddi_regs_map_free(&dev_regs->hxge_pciregh); 852 } 853 854 hxge_map_regs_fail0: 855 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory")); 856 kmem_free(dev_regs, sizeof (dev_regs_t)); 857 858 hxge_map_regs_exit: 859 if (ddi_status != DDI_SUCCESS) 860 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 861 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs")); 862 return (status); 863 } 864 865 static void 866 hxge_unmap_regs(p_hxge_t hxgep) 867 { 868 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs")); 869 if (hxgep->dev_regs) { 870 if (hxgep->dev_regs->hxge_pciregh) { 871 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 872 "==> hxge_unmap_regs: bus")); 873 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh); 874 hxgep->dev_regs->hxge_pciregh = NULL; 875 } 876 877 if (hxgep->dev_regs->hxge_regh) { 878 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 879 "==> hxge_unmap_regs: device registers")); 880 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh); 881 hxgep->dev_regs->hxge_regh = NULL; 882 } 883 884 if (hxgep->dev_regs->hxge_msix_regh) { 885 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 886 "==> hxge_unmap_regs: device interrupts")); 887 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh); 888 hxgep->dev_regs->hxge_msix_regh = NULL; 889 } 890 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t)); 891 hxgep->dev_regs = NULL; 892 } 893 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs")); 894 } 895 896 static hxge_status_t 897 hxge_setup_mutexes(p_hxge_t hxgep) 898 { 899 int ddi_status = DDI_SUCCESS; 900 hxge_status_t status = HXGE_OK; 901 902 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes")); 903 904 /* 905 * Get the interrupt cookie so the mutexes can be Initialised. 906 */ 907 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0, 908 &hxgep->interrupt_cookie); 909 910 if (ddi_status != DDI_SUCCESS) { 911 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 912 "<== hxge_setup_mutexes: failed 0x%x", ddi_status)); 913 goto hxge_setup_mutexes_exit; 914 } 915 916 /* 917 * Initialize mutex's for this device. 918 */ 919 MUTEX_INIT(hxgep->genlock, NULL, 920 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 921 MUTEX_INIT(&hxgep->ouraddr_lock, NULL, 922 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 923 RW_INIT(&hxgep->filter_lock, NULL, 924 RW_DRIVER, (void *) hxgep->interrupt_cookie); 925 MUTEX_INIT(&hxgep->pio_lock, NULL, 926 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 927 MUTEX_INIT(&hxgep->timeout.lock, NULL, 928 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie); 929 930 hxge_setup_mutexes_exit: 931 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 932 "<== hxge_setup_mutexes status = %x", status)); 933 934 if (ddi_status != DDI_SUCCESS) 935 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 936 937 return (status); 938 } 939 940 static void 941 hxge_destroy_mutexes(p_hxge_t hxgep) 942 { 943 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes")); 944 RW_DESTROY(&hxgep->filter_lock); 945 MUTEX_DESTROY(&hxgep->ouraddr_lock); 946 MUTEX_DESTROY(hxgep->genlock); 947 MUTEX_DESTROY(&hxgep->pio_lock); 948 MUTEX_DESTROY(&hxgep->timeout.lock); 949 950 if (hxge_debug_init == 1) { 951 MUTEX_DESTROY(&hxgedebuglock); 952 hxge_debug_init = 0; 953 } 954 955 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes")); 956 } 957 958 hxge_status_t 959 hxge_init(p_hxge_t hxgep) 960 { 961 hxge_status_t status = HXGE_OK; 962 963 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init")); 964 965 if (hxgep->drv_state & STATE_HW_INITIALIZED) { 966 return (status); 967 } 968 969 /* 970 * Allocate system memory for the receive/transmit buffer blocks and 971 * receive/transmit descriptor rings. 972 */ 973 status = hxge_alloc_mem_pool(hxgep); 974 if (status != HXGE_OK) { 975 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n")); 976 goto hxge_init_fail1; 977 } 978 979 /* 980 * Initialize and enable TXDMA channels. 981 */ 982 status = hxge_init_txdma_channels(hxgep); 983 if (status != HXGE_OK) { 984 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n")); 985 goto hxge_init_fail3; 986 } 987 988 /* 989 * Initialize and enable RXDMA channels. 990 */ 991 status = hxge_init_rxdma_channels(hxgep); 992 if (status != HXGE_OK) { 993 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n")); 994 goto hxge_init_fail4; 995 } 996 997 /* 998 * Initialize TCAM 999 */ 1000 status = hxge_classify_init(hxgep); 1001 if (status != HXGE_OK) { 1002 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n")); 1003 goto hxge_init_fail5; 1004 } 1005 1006 /* 1007 * Initialize the VMAC block. 1008 */ 1009 status = hxge_vmac_init(hxgep); 1010 if (status != HXGE_OK) { 1011 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n")); 1012 goto hxge_init_fail5; 1013 } 1014 1015 /* Bringup - this may be unnecessary when PXE and FCODE available */ 1016 status = hxge_pfc_set_default_mac_addr(hxgep); 1017 if (status != HXGE_OK) { 1018 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1019 "Default Address Failure\n")); 1020 goto hxge_init_fail5; 1021 } 1022 1023 hxge_intrs_enable(hxgep); 1024 1025 /* 1026 * Enable hardware interrupts. 1027 */ 1028 hxge_intr_hw_enable(hxgep); 1029 hxgep->drv_state |= STATE_HW_INITIALIZED; 1030 1031 goto hxge_init_exit; 1032 1033 hxge_init_fail5: 1034 hxge_uninit_rxdma_channels(hxgep); 1035 hxge_init_fail4: 1036 hxge_uninit_txdma_channels(hxgep); 1037 hxge_init_fail3: 1038 hxge_free_mem_pool(hxgep); 1039 hxge_init_fail1: 1040 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1041 "<== hxge_init status (failed) = 0x%08x", status)); 1042 return (status); 1043 1044 hxge_init_exit: 1045 1046 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x", 1047 status)); 1048 1049 return (status); 1050 } 1051 1052 timeout_id_t 1053 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec) 1054 { 1055 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) { 1056 return (timeout(func, (caddr_t)hxgep, 1057 drv_usectohz(1000 * msec))); 1058 } 1059 return (NULL); 1060 } 1061 1062 /*ARGSUSED*/ 1063 void 1064 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid) 1065 { 1066 if (timerid) { 1067 (void) untimeout(timerid); 1068 } 1069 } 1070 1071 void 1072 hxge_uninit(p_hxge_t hxgep) 1073 { 1074 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit")); 1075 1076 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 1077 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1078 "==> hxge_uninit: not initialized")); 1079 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit")); 1080 return; 1081 } 1082 1083 /* Stop timer */ 1084 if (hxgep->hxge_timerid) { 1085 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 1086 hxgep->hxge_timerid = 0; 1087 } 1088 1089 (void) hxge_intr_hw_disable(hxgep); 1090 1091 /* Reset the receive VMAC side. */ 1092 (void) hxge_rx_vmac_disable(hxgep); 1093 1094 /* Free classification resources */ 1095 (void) hxge_classify_uninit(hxgep); 1096 1097 /* Reset the transmit/receive DMA side. */ 1098 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 1099 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 1100 1101 hxge_uninit_txdma_channels(hxgep); 1102 hxge_uninit_rxdma_channels(hxgep); 1103 1104 /* Reset the transmit VMAC side. */ 1105 (void) hxge_tx_vmac_disable(hxgep); 1106 1107 hxge_free_mem_pool(hxgep); 1108 1109 hxgep->drv_state &= ~STATE_HW_INITIALIZED; 1110 1111 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit")); 1112 } 1113 1114 void 1115 hxge_get64(p_hxge_t hxgep, p_mblk_t mp) 1116 { 1117 #if defined(__i386) 1118 size_t reg; 1119 #else 1120 uint64_t reg; 1121 #endif 1122 uint64_t regdata; 1123 int i, retry; 1124 1125 bcopy((char *)mp->b_rptr, (char *)®, sizeof (uint64_t)); 1126 regdata = 0; 1127 retry = 1; 1128 1129 for (i = 0; i < retry; i++) { 1130 HXGE_REG_RD64(hxgep->hpi_handle, reg, ®data); 1131 } 1132 bcopy((char *)®data, (char *)mp->b_rptr, sizeof (uint64_t)); 1133 } 1134 1135 void 1136 hxge_put64(p_hxge_t hxgep, p_mblk_t mp) 1137 { 1138 #if defined(__i386) 1139 size_t reg; 1140 #else 1141 uint64_t reg; 1142 #endif 1143 uint64_t buf[2]; 1144 1145 bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t)); 1146 #if defined(__i386) 1147 reg = (size_t)buf[0]; 1148 #else 1149 reg = buf[0]; 1150 #endif 1151 1152 HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]); 1153 } 1154 1155 /*ARGSUSED*/ 1156 /*VARARGS*/ 1157 void 1158 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...) 1159 { 1160 char msg_buffer[1048]; 1161 char prefix_buffer[32]; 1162 int instance; 1163 uint64_t debug_level; 1164 int cmn_level = CE_CONT; 1165 va_list ap; 1166 1167 debug_level = (hxgep == NULL) ? hxge_debug_level : 1168 hxgep->hxge_debug_level; 1169 1170 if ((level & debug_level) || (level == HXGE_NOTE) || 1171 (level == HXGE_ERR_CTL)) { 1172 /* do the msg processing */ 1173 if (hxge_debug_init == 0) { 1174 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL); 1175 hxge_debug_init = 1; 1176 } 1177 1178 MUTEX_ENTER(&hxgedebuglock); 1179 1180 if ((level & HXGE_NOTE)) { 1181 cmn_level = CE_NOTE; 1182 } 1183 1184 if (level & HXGE_ERR_CTL) { 1185 cmn_level = CE_WARN; 1186 } 1187 1188 va_start(ap, fmt); 1189 (void) vsprintf(msg_buffer, fmt, ap); 1190 va_end(ap); 1191 1192 if (hxgep == NULL) { 1193 instance = -1; 1194 (void) sprintf(prefix_buffer, "%s :", "hxge"); 1195 } else { 1196 instance = hxgep->instance; 1197 (void) sprintf(prefix_buffer, 1198 "%s%d :", "hxge", instance); 1199 } 1200 1201 MUTEX_EXIT(&hxgedebuglock); 1202 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer); 1203 } 1204 } 1205 1206 char * 1207 hxge_dump_packet(char *addr, int size) 1208 { 1209 uchar_t *ap = (uchar_t *)addr; 1210 int i; 1211 static char etherbuf[1024]; 1212 char *cp = etherbuf; 1213 char digits[] = "0123456789abcdef"; 1214 1215 if (!size) 1216 size = 60; 1217 1218 if (size > MAX_DUMP_SZ) { 1219 /* Dump the leading bytes */ 1220 for (i = 0; i < MAX_DUMP_SZ / 2; i++) { 1221 if (*ap > 0x0f) 1222 *cp++ = digits[*ap >> 4]; 1223 *cp++ = digits[*ap++ & 0xf]; 1224 *cp++ = ':'; 1225 } 1226 for (i = 0; i < 20; i++) 1227 *cp++ = '.'; 1228 /* Dump the last MAX_DUMP_SZ/2 bytes */ 1229 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2)); 1230 for (i = 0; i < MAX_DUMP_SZ / 2; i++) { 1231 if (*ap > 0x0f) 1232 *cp++ = digits[*ap >> 4]; 1233 *cp++ = digits[*ap++ & 0xf]; 1234 *cp++ = ':'; 1235 } 1236 } else { 1237 for (i = 0; i < size; i++) { 1238 if (*ap > 0x0f) 1239 *cp++ = digits[*ap >> 4]; 1240 *cp++ = digits[*ap++ & 0xf]; 1241 *cp++ = ':'; 1242 } 1243 } 1244 *--cp = 0; 1245 return (etherbuf); 1246 } 1247 1248 static void 1249 hxge_suspend(p_hxge_t hxgep) 1250 { 1251 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend")); 1252 1253 hxge_intrs_disable(hxgep); 1254 hxge_destroy_dev(hxgep); 1255 1256 /* Stop the link status timer */ 1257 MUTEX_ENTER(&hxgep->timeout.lock); 1258 if (hxgep->timeout.id) 1259 (void) untimeout(hxgep->timeout.id); 1260 MUTEX_EXIT(&hxgep->timeout.lock); 1261 1262 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend")); 1263 } 1264 1265 static hxge_status_t 1266 hxge_resume(p_hxge_t hxgep) 1267 { 1268 hxge_status_t status = HXGE_OK; 1269 1270 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume")); 1271 hxgep->suspended = DDI_RESUME; 1272 1273 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START); 1274 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START); 1275 1276 (void) hxge_rx_vmac_enable(hxgep); 1277 (void) hxge_tx_vmac_enable(hxgep); 1278 1279 hxge_intrs_enable(hxgep); 1280 1281 hxgep->suspended = 0; 1282 1283 /* Resume the link status timer */ 1284 MUTEX_ENTER(&hxgep->timeout.lock); 1285 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep, 1286 hxgep->timeout.ticks); 1287 MUTEX_EXIT(&hxgep->timeout.lock); 1288 1289 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1290 "<== hxge_resume status = 0x%x", status)); 1291 1292 return (status); 1293 } 1294 1295 hxge_status_t 1296 hxge_setup_dev(p_hxge_t hxgep) 1297 { 1298 hxge_status_t status = HXGE_OK; 1299 1300 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev")); 1301 1302 status = hxge_link_init(hxgep); 1303 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) { 1304 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1305 "Bad register acc handle")); 1306 status = HXGE_ERROR; 1307 } 1308 1309 if (status != HXGE_OK) { 1310 HXGE_DEBUG_MSG((hxgep, MAC_CTL, 1311 " hxge_setup_dev status (link init 0x%08x)", status)); 1312 goto hxge_setup_dev_exit; 1313 } 1314 1315 hxge_setup_dev_exit: 1316 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1317 "<== hxge_setup_dev status = 0x%08x", status)); 1318 1319 return (status); 1320 } 1321 1322 static void 1323 hxge_destroy_dev(p_hxge_t hxgep) 1324 { 1325 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev")); 1326 1327 (void) hxge_hw_stop(hxgep); 1328 1329 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev")); 1330 } 1331 1332 static hxge_status_t 1333 hxge_setup_system_dma_pages(p_hxge_t hxgep) 1334 { 1335 int ddi_status = DDI_SUCCESS; 1336 uint_t count; 1337 ddi_dma_cookie_t cookie; 1338 uint_t iommu_pagesize; 1339 hxge_status_t status = HXGE_OK; 1340 1341 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages")); 1342 1343 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1); 1344 iommu_pagesize = dvma_pagesize(hxgep->dip); 1345 1346 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1347 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1348 " default_block_size %d iommu_pagesize %d", 1349 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1), 1350 hxgep->rx_default_block_size, iommu_pagesize)); 1351 1352 if (iommu_pagesize != 0) { 1353 if (hxgep->sys_page_sz == iommu_pagesize) { 1354 /* Hydra support up to 8K pages */ 1355 if (iommu_pagesize > 0x2000) 1356 hxgep->sys_page_sz = 0x2000; 1357 } else { 1358 if (hxgep->sys_page_sz > iommu_pagesize) 1359 hxgep->sys_page_sz = iommu_pagesize; 1360 } 1361 } 1362 1363 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1); 1364 1365 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1366 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) " 1367 "default_block_size %d page mask %d", 1368 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1), 1369 hxgep->rx_default_block_size, hxgep->sys_page_mask)); 1370 1371 switch (hxgep->sys_page_sz) { 1372 default: 1373 hxgep->sys_page_sz = 0x1000; 1374 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1); 1375 hxgep->rx_default_block_size = 0x1000; 1376 hxgep->rx_bksize_code = RBR_BKSIZE_4K; 1377 break; 1378 case 0x1000: 1379 hxgep->rx_default_block_size = 0x1000; 1380 hxgep->rx_bksize_code = RBR_BKSIZE_4K; 1381 break; 1382 case 0x2000: 1383 hxgep->rx_default_block_size = 0x2000; 1384 hxgep->rx_bksize_code = RBR_BKSIZE_8K; 1385 break; 1386 } 1387 1388 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1389 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz; 1390 1391 /* 1392 * Get the system DMA burst size. 1393 */ 1394 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr, 1395 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle); 1396 if (ddi_status != DDI_SUCCESS) { 1397 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1398 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status)); 1399 goto hxge_get_soft_properties_exit; 1400 } 1401 1402 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL, 1403 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle), 1404 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0, 1405 &cookie, &count); 1406 if (ddi_status != DDI_DMA_MAPPED) { 1407 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1408 "Binding spare handle to find system burstsize failed.")); 1409 ddi_status = DDI_FAILURE; 1410 goto hxge_get_soft_properties_fail1; 1411 } 1412 1413 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle); 1414 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle); 1415 1416 hxge_get_soft_properties_fail1: 1417 ddi_dma_free_handle(&hxgep->dmasparehandle); 1418 1419 hxge_get_soft_properties_exit: 1420 1421 if (ddi_status != DDI_SUCCESS) 1422 status |= (HXGE_ERROR | HXGE_DDI_FAILED); 1423 1424 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 1425 "<== hxge_setup_system_dma_pages status = 0x%08x", status)); 1426 1427 return (status); 1428 } 1429 1430 hxge_status_t 1431 hxge_alloc_mem_pool(p_hxge_t hxgep) 1432 { 1433 hxge_status_t status = HXGE_OK; 1434 1435 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool")); 1436 1437 status = hxge_alloc_rx_mem_pool(hxgep); 1438 if (status != HXGE_OK) { 1439 return (HXGE_ERROR); 1440 } 1441 1442 status = hxge_alloc_tx_mem_pool(hxgep); 1443 if (status != HXGE_OK) { 1444 hxge_free_rx_mem_pool(hxgep); 1445 return (HXGE_ERROR); 1446 } 1447 1448 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool")); 1449 return (HXGE_OK); 1450 } 1451 1452 static void 1453 hxge_free_mem_pool(p_hxge_t hxgep) 1454 { 1455 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool")); 1456 1457 hxge_free_rx_mem_pool(hxgep); 1458 hxge_free_tx_mem_pool(hxgep); 1459 1460 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool")); 1461 } 1462 1463 static hxge_status_t 1464 hxge_alloc_rx_mem_pool(p_hxge_t hxgep) 1465 { 1466 int i, j; 1467 uint32_t ndmas, st_rdc; 1468 p_hxge_dma_pt_cfg_t p_all_cfgp; 1469 p_hxge_hw_pt_cfg_t p_cfgp; 1470 p_hxge_dma_pool_t dma_poolp; 1471 p_hxge_dma_common_t *dma_buf_p; 1472 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 1473 p_hxge_dma_common_t *dma_rbr_cntl_p; 1474 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 1475 p_hxge_dma_common_t *dma_rcr_cntl_p; 1476 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 1477 p_hxge_dma_common_t *dma_mbox_cntl_p; 1478 size_t rx_buf_alloc_size; 1479 size_t rx_rbr_cntl_alloc_size; 1480 size_t rx_rcr_cntl_alloc_size; 1481 size_t rx_mbox_cntl_alloc_size; 1482 uint32_t *num_chunks; /* per dma */ 1483 hxge_status_t status = HXGE_OK; 1484 1485 uint32_t hxge_port_rbr_size; 1486 uint32_t hxge_port_rbr_spare_size; 1487 uint32_t hxge_port_rcr_size; 1488 1489 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool")); 1490 1491 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 1492 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1493 st_rdc = p_cfgp->start_rdc; 1494 ndmas = p_cfgp->max_rdcs; 1495 1496 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1497 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas)); 1498 1499 /* 1500 * Allocate memory for each receive DMA channel. 1501 */ 1502 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t), 1503 KM_SLEEP); 1504 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1505 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1506 1507 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t) 1508 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1509 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1510 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1511 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t) 1512 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1513 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1514 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1515 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t) 1516 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1517 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1518 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1519 1520 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas, 1521 KM_SLEEP); 1522 1523 /* 1524 * Assume that each DMA channel will be configured with default block 1525 * size. rbr block counts are mod of batch count (16). 1526 */ 1527 hxge_port_rbr_size = p_all_cfgp->rbr_size; 1528 hxge_port_rcr_size = p_all_cfgp->rcr_size; 1529 1530 if (!hxge_port_rbr_size) { 1531 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT; 1532 } 1533 1534 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) { 1535 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH * 1536 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1)); 1537 } 1538 1539 p_all_cfgp->rbr_size = hxge_port_rbr_size; 1540 hxge_port_rbr_spare_size = hxge_rbr_spare_size; 1541 1542 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) { 1543 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH * 1544 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1)); 1545 } 1546 1547 rx_buf_alloc_size = (hxgep->rx_default_block_size * 1548 (hxge_port_rbr_size + hxge_port_rbr_spare_size)); 1549 1550 /* 1551 * Addresses of receive block ring, receive completion ring and the 1552 * mailbox must be all cache-aligned (64 bytes). 1553 */ 1554 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size; 1555 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t); 1556 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size; 1557 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t); 1558 1559 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: " 1560 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d " 1561 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d", 1562 hxge_port_rbr_size, hxge_port_rbr_spare_size, 1563 hxge_port_rcr_size, rx_cntl_alloc_size)); 1564 1565 hxgep->hxge_port_rbr_size = hxge_port_rbr_size; 1566 hxgep->hxge_port_rcr_size = hxge_port_rcr_size; 1567 1568 /* 1569 * Allocate memory for receive buffers and descriptor rings. Replace 1570 * allocation functions with interface functions provided by the 1571 * partition manager when it is available. 1572 */ 1573 /* 1574 * Allocate memory for the receive buffer blocks. 1575 */ 1576 for (i = 0; i < ndmas; i++) { 1577 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1578 " hxge_alloc_rx_mem_pool to alloc mem: " 1579 " dma %d dma_buf_p %llx &dma_buf_p %llx", 1580 i, dma_buf_p[i], &dma_buf_p[i])); 1581 1582 num_chunks[i] = 0; 1583 1584 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i], 1585 rx_buf_alloc_size, hxgep->rx_default_block_size, 1586 &num_chunks[i]); 1587 if (status != HXGE_OK) { 1588 break; 1589 } 1590 1591 st_rdc++; 1592 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1593 " hxge_alloc_rx_mem_pool DONE alloc mem: " 1594 "dma %d dma_buf_p %llx &dma_buf_p %llx", i, 1595 dma_buf_p[i], &dma_buf_p[i])); 1596 } 1597 1598 if (i < ndmas) { 1599 goto hxge_alloc_rx_mem_fail1; 1600 } 1601 1602 /* 1603 * Allocate memory for descriptor rings and mailbox. 1604 */ 1605 st_rdc = p_cfgp->start_rdc; 1606 for (j = 0; j < ndmas; j++) { 1607 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1608 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr, 1609 rx_rbr_cntl_alloc_size)) != HXGE_OK) { 1610 break; 1611 } 1612 1613 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1614 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr, 1615 rx_rcr_cntl_alloc_size)) != HXGE_OK) { 1616 break; 1617 } 1618 1619 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, 1620 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr, 1621 rx_mbox_cntl_alloc_size)) != HXGE_OK) { 1622 break; 1623 } 1624 st_rdc++; 1625 } 1626 1627 if (j < ndmas) { 1628 goto hxge_alloc_rx_mem_fail2; 1629 } 1630 1631 dma_poolp->ndmas = ndmas; 1632 dma_poolp->num_chunks = num_chunks; 1633 dma_poolp->buf_allocated = B_TRUE; 1634 hxgep->rx_buf_pool_p = dma_poolp; 1635 dma_poolp->dma_buf_pool_p = dma_buf_p; 1636 1637 dma_rbr_cntl_poolp->ndmas = ndmas; 1638 dma_rbr_cntl_poolp->buf_allocated = B_TRUE; 1639 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp; 1640 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p; 1641 1642 dma_rcr_cntl_poolp->ndmas = ndmas; 1643 dma_rcr_cntl_poolp->buf_allocated = B_TRUE; 1644 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp; 1645 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p; 1646 1647 dma_mbox_cntl_poolp->ndmas = ndmas; 1648 dma_mbox_cntl_poolp->buf_allocated = B_TRUE; 1649 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp; 1650 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p; 1651 1652 goto hxge_alloc_rx_mem_pool_exit; 1653 1654 hxge_alloc_rx_mem_fail2: 1655 /* Free control buffers */ 1656 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1657 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j)); 1658 for (; j >= 0; j--) { 1659 hxge_free_rx_cntl_dma(hxgep, 1660 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]); 1661 hxge_free_rx_cntl_dma(hxgep, 1662 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]); 1663 hxge_free_rx_cntl_dma(hxgep, 1664 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]); 1665 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1666 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 1667 } 1668 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1669 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j)); 1670 1671 hxge_alloc_rx_mem_fail1: 1672 /* Free data buffers */ 1673 i--; 1674 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1675 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i)); 1676 for (; i >= 0; i--) { 1677 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i], 1678 num_chunks[i]); 1679 } 1680 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1681 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i)); 1682 1683 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1684 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1685 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1686 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1687 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1688 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1689 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1690 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t)); 1691 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1692 1693 hxge_alloc_rx_mem_pool_exit: 1694 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1695 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status)); 1696 1697 return (status); 1698 } 1699 1700 static void 1701 hxge_free_rx_mem_pool(p_hxge_t hxgep) 1702 { 1703 uint32_t i, ndmas; 1704 p_hxge_dma_pool_t dma_poolp; 1705 p_hxge_dma_common_t *dma_buf_p; 1706 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 1707 p_hxge_dma_common_t *dma_rbr_cntl_p; 1708 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 1709 p_hxge_dma_common_t *dma_rcr_cntl_p; 1710 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 1711 p_hxge_dma_common_t *dma_mbox_cntl_p; 1712 uint32_t *num_chunks; 1713 1714 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool")); 1715 1716 dma_poolp = hxgep->rx_buf_pool_p; 1717 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 1718 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool " 1719 "(null rx buf pool or buf not allocated")); 1720 return; 1721 } 1722 1723 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 1724 if (dma_rbr_cntl_poolp == NULL || 1725 (!dma_rbr_cntl_poolp->buf_allocated)) { 1726 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1727 "<== hxge_free_rx_mem_pool " 1728 "(null rbr cntl buf pool or rbr cntl buf not allocated")); 1729 return; 1730 } 1731 1732 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 1733 if (dma_rcr_cntl_poolp == NULL || 1734 (!dma_rcr_cntl_poolp->buf_allocated)) { 1735 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1736 "<== hxge_free_rx_mem_pool " 1737 "(null rcr cntl buf pool or rcr cntl buf not allocated")); 1738 return; 1739 } 1740 1741 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 1742 if (dma_mbox_cntl_poolp == NULL || 1743 (!dma_mbox_cntl_poolp->buf_allocated)) { 1744 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1745 "<== hxge_free_rx_mem_pool " 1746 "(null mbox cntl buf pool or mbox cntl buf not allocated")); 1747 return; 1748 } 1749 1750 dma_buf_p = dma_poolp->dma_buf_pool_p; 1751 num_chunks = dma_poolp->num_chunks; 1752 1753 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p; 1754 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p; 1755 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p; 1756 ndmas = dma_rbr_cntl_poolp->ndmas; 1757 1758 for (i = 0; i < ndmas; i++) { 1759 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]); 1760 } 1761 1762 for (i = 0; i < ndmas; i++) { 1763 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]); 1764 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]); 1765 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]); 1766 } 1767 1768 for (i = 0; i < ndmas; i++) { 1769 KMEM_FREE(dma_buf_p[i], 1770 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1771 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t)); 1772 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t)); 1773 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t)); 1774 } 1775 1776 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 1777 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1778 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1779 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1780 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t)); 1781 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 1782 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t)); 1783 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 1784 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 1785 1786 hxgep->rx_buf_pool_p = NULL; 1787 hxgep->rx_rbr_cntl_pool_p = NULL; 1788 hxgep->rx_rcr_cntl_pool_p = NULL; 1789 hxgep->rx_mbox_cntl_pool_p = NULL; 1790 1791 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool")); 1792 } 1793 1794 static hxge_status_t 1795 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel, 1796 p_hxge_dma_common_t *dmap, 1797 size_t alloc_size, size_t block_size, uint32_t *num_chunks) 1798 { 1799 p_hxge_dma_common_t rx_dmap; 1800 hxge_status_t status = HXGE_OK; 1801 size_t total_alloc_size; 1802 size_t allocated = 0; 1803 int i, size_index, array_size; 1804 1805 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma")); 1806 1807 rx_dmap = (p_hxge_dma_common_t) 1808 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP); 1809 1810 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1811 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ", 1812 dma_channel, alloc_size, block_size, dmap)); 1813 1814 total_alloc_size = alloc_size; 1815 1816 i = 0; 1817 size_index = 0; 1818 array_size = sizeof (alloc_sizes) / sizeof (size_t); 1819 while ((alloc_sizes[size_index] < alloc_size) && 1820 (size_index < array_size)) 1821 size_index++; 1822 if (size_index >= array_size) { 1823 size_index = array_size - 1; 1824 } 1825 1826 while ((allocated < total_alloc_size) && 1827 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) { 1828 rx_dmap[i].dma_chunk_index = i; 1829 rx_dmap[i].block_size = block_size; 1830 rx_dmap[i].alength = alloc_sizes[size_index]; 1831 rx_dmap[i].orig_alength = rx_dmap[i].alength; 1832 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 1833 rx_dmap[i].dma_channel = dma_channel; 1834 rx_dmap[i].contig_alloc_type = B_FALSE; 1835 1836 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1837 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x " 1838 "i %d nblocks %d alength %d", 1839 dma_channel, i, &rx_dmap[i], block_size, 1840 i, rx_dmap[i].nblocks, rx_dmap[i].alength)); 1841 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1842 &hxge_rx_dma_attr, rx_dmap[i].alength, 1843 &hxge_dev_buf_dma_acc_attr, 1844 DDI_DMA_READ | DDI_DMA_STREAMING, 1845 (p_hxge_dma_common_t)(&rx_dmap[i])); 1846 if (status != HXGE_OK) { 1847 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1848 " hxge_alloc_rx_buf_dma: Alloc Failed: " 1849 " for size: %d", alloc_sizes[size_index])); 1850 size_index--; 1851 } else { 1852 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1853 " alloc_rx_buf_dma allocated rdc %d " 1854 "chunk %d size %x dvma %x bufp %llx ", 1855 dma_channel, i, rx_dmap[i].alength, 1856 rx_dmap[i].ioaddr_pp, &rx_dmap[i])); 1857 i++; 1858 allocated += alloc_sizes[size_index]; 1859 } 1860 } 1861 1862 if (allocated < total_alloc_size) { 1863 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1864 " hxge_alloc_rx_buf_dma failed due to" 1865 " allocated(%d) < required(%d)", 1866 allocated, total_alloc_size)); 1867 goto hxge_alloc_rx_mem_fail1; 1868 } 1869 1870 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1871 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i)); 1872 1873 *num_chunks = i; 1874 *dmap = rx_dmap; 1875 1876 goto hxge_alloc_rx_mem_exit; 1877 1878 hxge_alloc_rx_mem_fail1: 1879 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 1880 1881 hxge_alloc_rx_mem_exit: 1882 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1883 "<== hxge_alloc_rx_buf_dma status 0x%08x", status)); 1884 1885 return (status); 1886 } 1887 1888 /*ARGSUSED*/ 1889 static void 1890 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap, 1891 uint32_t num_chunks) 1892 { 1893 int i; 1894 1895 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1896 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks)); 1897 1898 for (i = 0; i < num_chunks; i++) { 1899 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 1900 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap)); 1901 hxge_dma_mem_free(dmap++); 1902 } 1903 1904 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma")); 1905 } 1906 1907 /*ARGSUSED*/ 1908 static hxge_status_t 1909 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel, 1910 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size) 1911 { 1912 p_hxge_dma_common_t rx_dmap; 1913 hxge_status_t status = HXGE_OK; 1914 1915 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma")); 1916 1917 rx_dmap = (p_hxge_dma_common_t) 1918 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP); 1919 1920 rx_dmap->contig_alloc_type = B_FALSE; 1921 1922 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 1923 attr, size, &hxge_dev_desc_dma_acc_attr, 1924 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap); 1925 if (status != HXGE_OK) { 1926 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1927 " hxge_alloc_rx_cntl_dma: Alloc Failed: " 1928 " for size: %d", size)); 1929 goto hxge_alloc_rx_cntl_dma_fail1; 1930 } 1931 1932 *dmap = rx_dmap; 1933 1934 goto hxge_alloc_rx_cntl_dma_exit; 1935 1936 hxge_alloc_rx_cntl_dma_fail1: 1937 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t)); 1938 1939 hxge_alloc_rx_cntl_dma_exit: 1940 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1941 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status)); 1942 1943 return (status); 1944 } 1945 1946 /*ARGSUSED*/ 1947 static void 1948 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap) 1949 { 1950 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma")); 1951 1952 hxge_dma_mem_free(dmap); 1953 1954 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma")); 1955 } 1956 1957 static hxge_status_t 1958 hxge_alloc_tx_mem_pool(p_hxge_t hxgep) 1959 { 1960 hxge_status_t status = HXGE_OK; 1961 int i, j; 1962 uint32_t ndmas, st_tdc; 1963 p_hxge_dma_pt_cfg_t p_all_cfgp; 1964 p_hxge_hw_pt_cfg_t p_cfgp; 1965 p_hxge_dma_pool_t dma_poolp; 1966 p_hxge_dma_common_t *dma_buf_p; 1967 p_hxge_dma_pool_t dma_cntl_poolp; 1968 p_hxge_dma_common_t *dma_cntl_p; 1969 size_t tx_buf_alloc_size; 1970 size_t tx_cntl_alloc_size; 1971 uint32_t *num_chunks; /* per dma */ 1972 1973 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool")); 1974 1975 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config; 1976 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config; 1977 st_tdc = p_cfgp->start_tdc; 1978 ndmas = p_cfgp->max_tdcs; 1979 1980 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: " 1981 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d", 1982 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs)); 1983 /* 1984 * Allocate memory for each transmit DMA channel. 1985 */ 1986 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t), 1987 KM_SLEEP); 1988 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1989 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1990 1991 dma_cntl_poolp = (p_hxge_dma_pool_t) 1992 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP); 1993 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC( 1994 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP); 1995 1996 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size; 1997 1998 /* 1999 * Assume that each DMA channel will be configured with default 2000 * transmit bufer size for copying transmit data. (For packet payload 2001 * over this limit, packets will not be copied.) 2002 */ 2003 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size); 2004 2005 /* 2006 * Addresses of transmit descriptor ring and the mailbox must be all 2007 * cache-aligned (64 bytes). 2008 */ 2009 tx_cntl_alloc_size = hxge_tx_ring_size; 2010 tx_cntl_alloc_size *= (sizeof (tx_desc_t)); 2011 tx_cntl_alloc_size += sizeof (txdma_mailbox_t); 2012 2013 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas, 2014 KM_SLEEP); 2015 2016 /* 2017 * Allocate memory for transmit buffers and descriptor rings. Replace 2018 * allocation functions with interface functions provided by the 2019 * partition manager when it is available. 2020 * 2021 * Allocate memory for the transmit buffer pool. 2022 */ 2023 for (i = 0; i < ndmas; i++) { 2024 num_chunks[i] = 0; 2025 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i], 2026 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]); 2027 if (status != HXGE_OK) { 2028 break; 2029 } 2030 st_tdc++; 2031 } 2032 2033 if (i < ndmas) { 2034 goto hxge_alloc_tx_mem_pool_fail1; 2035 } 2036 2037 st_tdc = p_cfgp->start_tdc; 2038 2039 /* 2040 * Allocate memory for descriptor rings and mailbox. 2041 */ 2042 for (j = 0; j < ndmas; j++) { 2043 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j], 2044 tx_cntl_alloc_size); 2045 if (status != HXGE_OK) { 2046 break; 2047 } 2048 st_tdc++; 2049 } 2050 2051 if (j < ndmas) { 2052 goto hxge_alloc_tx_mem_pool_fail2; 2053 } 2054 2055 dma_poolp->ndmas = ndmas; 2056 dma_poolp->num_chunks = num_chunks; 2057 dma_poolp->buf_allocated = B_TRUE; 2058 dma_poolp->dma_buf_pool_p = dma_buf_p; 2059 hxgep->tx_buf_pool_p = dma_poolp; 2060 2061 dma_cntl_poolp->ndmas = ndmas; 2062 dma_cntl_poolp->buf_allocated = B_TRUE; 2063 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p; 2064 hxgep->tx_cntl_pool_p = dma_cntl_poolp; 2065 2066 HXGE_DEBUG_MSG((hxgep, MEM_CTL, 2067 "==> hxge_alloc_tx_mem_pool: start_tdc %d " 2068 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas)); 2069 2070 goto hxge_alloc_tx_mem_pool_exit; 2071 2072 hxge_alloc_tx_mem_pool_fail2: 2073 /* Free control buffers */ 2074 j--; 2075 for (; j >= 0; j--) { 2076 hxge_free_tx_cntl_dma(hxgep, 2077 (p_hxge_dma_common_t)dma_cntl_p[j]); 2078 } 2079 2080 hxge_alloc_tx_mem_pool_fail1: 2081 /* Free data buffers */ 2082 i--; 2083 for (; i >= 0; i--) { 2084 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i], 2085 num_chunks[i]); 2086 } 2087 2088 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 2089 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 2090 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 2091 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 2092 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2093 2094 hxge_alloc_tx_mem_pool_exit: 2095 HXGE_DEBUG_MSG((hxgep, MEM_CTL, 2096 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status)); 2097 2098 return (status); 2099 } 2100 2101 static hxge_status_t 2102 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel, 2103 p_hxge_dma_common_t *dmap, size_t alloc_size, 2104 size_t block_size, uint32_t *num_chunks) 2105 { 2106 p_hxge_dma_common_t tx_dmap; 2107 hxge_status_t status = HXGE_OK; 2108 size_t total_alloc_size; 2109 size_t allocated = 0; 2110 int i, size_index, array_size; 2111 2112 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma")); 2113 2114 tx_dmap = (p_hxge_dma_common_t) 2115 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP); 2116 2117 total_alloc_size = alloc_size; 2118 i = 0; 2119 size_index = 0; 2120 array_size = sizeof (alloc_sizes) / sizeof (size_t); 2121 while ((alloc_sizes[size_index] < alloc_size) && 2122 (size_index < array_size)) 2123 size_index++; 2124 if (size_index >= array_size) { 2125 size_index = array_size - 1; 2126 } 2127 2128 while ((allocated < total_alloc_size) && 2129 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) { 2130 tx_dmap[i].dma_chunk_index = i; 2131 tx_dmap[i].block_size = block_size; 2132 tx_dmap[i].alength = alloc_sizes[size_index]; 2133 tx_dmap[i].orig_alength = tx_dmap[i].alength; 2134 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size; 2135 tx_dmap[i].dma_channel = dma_channel; 2136 tx_dmap[i].contig_alloc_type = B_FALSE; 2137 2138 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 2139 &hxge_tx_dma_attr, tx_dmap[i].alength, 2140 &hxge_dev_buf_dma_acc_attr, 2141 DDI_DMA_WRITE | DDI_DMA_STREAMING, 2142 (p_hxge_dma_common_t)(&tx_dmap[i])); 2143 if (status != HXGE_OK) { 2144 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2145 " hxge_alloc_tx_buf_dma: Alloc Failed: " 2146 " for size: %d", alloc_sizes[size_index])); 2147 size_index--; 2148 } else { 2149 i++; 2150 allocated += alloc_sizes[size_index]; 2151 } 2152 } 2153 2154 if (allocated < total_alloc_size) { 2155 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2156 " hxge_alloc_tx_buf_dma: failed due to" 2157 " allocated(%d) < required(%d)", 2158 allocated, total_alloc_size)); 2159 goto hxge_alloc_tx_mem_fail1; 2160 } 2161 2162 *num_chunks = i; 2163 *dmap = tx_dmap; 2164 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2165 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d", 2166 *dmap, i)); 2167 goto hxge_alloc_tx_mem_exit; 2168 2169 hxge_alloc_tx_mem_fail1: 2170 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 2171 2172 hxge_alloc_tx_mem_exit: 2173 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2174 "<== hxge_alloc_tx_buf_dma status 0x%08x", status)); 2175 2176 return (status); 2177 } 2178 2179 /*ARGSUSED*/ 2180 static void 2181 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap, 2182 uint32_t num_chunks) 2183 { 2184 int i; 2185 2186 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma")); 2187 2188 for (i = 0; i < num_chunks; i++) { 2189 hxge_dma_mem_free(dmap++); 2190 } 2191 2192 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma")); 2193 } 2194 2195 /*ARGSUSED*/ 2196 static hxge_status_t 2197 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel, 2198 p_hxge_dma_common_t *dmap, size_t size) 2199 { 2200 p_hxge_dma_common_t tx_dmap; 2201 hxge_status_t status = HXGE_OK; 2202 2203 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma")); 2204 2205 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t), 2206 KM_SLEEP); 2207 2208 tx_dmap->contig_alloc_type = B_FALSE; 2209 2210 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma, 2211 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr, 2212 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap); 2213 if (status != HXGE_OK) { 2214 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2215 " hxge_alloc_tx_cntl_dma: Alloc Failed: " 2216 " for size: %d", size)); 2217 goto hxge_alloc_tx_cntl_dma_fail1; 2218 } 2219 2220 *dmap = tx_dmap; 2221 2222 goto hxge_alloc_tx_cntl_dma_exit; 2223 2224 hxge_alloc_tx_cntl_dma_fail1: 2225 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t)); 2226 2227 hxge_alloc_tx_cntl_dma_exit: 2228 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2229 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status)); 2230 2231 return (status); 2232 } 2233 2234 /*ARGSUSED*/ 2235 static void 2236 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap) 2237 { 2238 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma")); 2239 2240 hxge_dma_mem_free(dmap); 2241 2242 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma")); 2243 } 2244 2245 static void 2246 hxge_free_tx_mem_pool(p_hxge_t hxgep) 2247 { 2248 uint32_t i, ndmas; 2249 p_hxge_dma_pool_t dma_poolp; 2250 p_hxge_dma_common_t *dma_buf_p; 2251 p_hxge_dma_pool_t dma_cntl_poolp; 2252 p_hxge_dma_common_t *dma_cntl_p; 2253 uint32_t *num_chunks; 2254 2255 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool")); 2256 2257 dma_poolp = hxgep->tx_buf_pool_p; 2258 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) { 2259 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2260 "<== hxge_free_tx_mem_pool " 2261 "(null rx buf pool or buf not allocated")); 2262 return; 2263 } 2264 2265 dma_cntl_poolp = hxgep->tx_cntl_pool_p; 2266 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) { 2267 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2268 "<== hxge_free_tx_mem_pool " 2269 "(null tx cntl buf pool or cntl buf not allocated")); 2270 return; 2271 } 2272 2273 dma_buf_p = dma_poolp->dma_buf_pool_p; 2274 num_chunks = dma_poolp->num_chunks; 2275 2276 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2277 ndmas = dma_cntl_poolp->ndmas; 2278 2279 for (i = 0; i < ndmas; i++) { 2280 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]); 2281 } 2282 2283 for (i = 0; i < ndmas; i++) { 2284 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]); 2285 } 2286 2287 for (i = 0; i < ndmas; i++) { 2288 KMEM_FREE(dma_buf_p[i], 2289 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK); 2290 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t)); 2291 } 2292 2293 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas); 2294 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t)); 2295 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t)); 2296 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t)); 2297 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t)); 2298 2299 hxgep->tx_buf_pool_p = NULL; 2300 hxgep->tx_cntl_pool_p = NULL; 2301 2302 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool")); 2303 } 2304 2305 /*ARGSUSED*/ 2306 static hxge_status_t 2307 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method, 2308 struct ddi_dma_attr *dma_attrp, 2309 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags, 2310 p_hxge_dma_common_t dma_p) 2311 { 2312 caddr_t kaddrp; 2313 int ddi_status = DDI_SUCCESS; 2314 2315 dma_p->dma_handle = NULL; 2316 dma_p->acc_handle = NULL; 2317 dma_p->kaddrp = NULL; 2318 2319 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp, 2320 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle); 2321 if (ddi_status != DDI_SUCCESS) { 2322 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2323 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed.")); 2324 return (HXGE_ERROR | HXGE_DDI_FAILED); 2325 } 2326 2327 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p, 2328 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength, 2329 &dma_p->acc_handle); 2330 if (ddi_status != DDI_SUCCESS) { 2331 /* The caller will decide whether it is fatal */ 2332 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2333 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed")); 2334 ddi_dma_free_handle(&dma_p->dma_handle); 2335 dma_p->dma_handle = NULL; 2336 return (HXGE_ERROR | HXGE_DDI_FAILED); 2337 } 2338 2339 if (dma_p->alength < length) { 2340 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2341 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length.")); 2342 ddi_dma_mem_free(&dma_p->acc_handle); 2343 ddi_dma_free_handle(&dma_p->dma_handle); 2344 dma_p->acc_handle = NULL; 2345 dma_p->dma_handle = NULL; 2346 return (HXGE_ERROR); 2347 } 2348 2349 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL, 2350 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0, 2351 &dma_p->dma_cookie, &dma_p->ncookies); 2352 if (ddi_status != DDI_DMA_MAPPED) { 2353 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2354 "hxge_dma_mem_alloc:di_dma_addr_bind failed " 2355 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies)); 2356 if (dma_p->acc_handle) { 2357 ddi_dma_mem_free(&dma_p->acc_handle); 2358 dma_p->acc_handle = NULL; 2359 } 2360 ddi_dma_free_handle(&dma_p->dma_handle); 2361 dma_p->dma_handle = NULL; 2362 return (HXGE_ERROR | HXGE_DDI_FAILED); 2363 } 2364 2365 if (dma_p->ncookies != 1) { 2366 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 2367 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie" 2368 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies)); 2369 if (dma_p->acc_handle) { 2370 ddi_dma_mem_free(&dma_p->acc_handle); 2371 dma_p->acc_handle = NULL; 2372 } 2373 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2374 ddi_dma_free_handle(&dma_p->dma_handle); 2375 dma_p->dma_handle = NULL; 2376 return (HXGE_ERROR); 2377 } 2378 2379 dma_p->kaddrp = kaddrp; 2380 #if defined(__i386) 2381 dma_p->ioaddr_pp = 2382 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress; 2383 #else 2384 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress; 2385 #endif 2386 2387 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle); 2388 2389 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: " 2390 "dma buffer allocated: dma_p $%p " 2391 "return dmac_ladress from cookie $%p dmac_size %d " 2392 "dma_p->ioaddr_p $%p " 2393 "dma_p->orig_ioaddr_p $%p " 2394 "orig_vatopa $%p " 2395 "alength %d (0x%x) " 2396 "kaddrp $%p " 2397 "length %d (0x%x)", 2398 dma_p, 2399 dma_p->dma_cookie.dmac_laddress, 2400 dma_p->dma_cookie.dmac_size, 2401 dma_p->ioaddr_pp, 2402 dma_p->orig_ioaddr_pp, 2403 dma_p->orig_vatopa, 2404 dma_p->alength, dma_p->alength, 2405 kaddrp, 2406 length, length)); 2407 2408 return (HXGE_OK); 2409 } 2410 2411 static void 2412 hxge_dma_mem_free(p_hxge_dma_common_t dma_p) 2413 { 2414 if (dma_p == NULL) 2415 return; 2416 2417 if (dma_p->dma_handle != NULL) { 2418 if (dma_p->ncookies) { 2419 (void) ddi_dma_unbind_handle(dma_p->dma_handle); 2420 dma_p->ncookies = 0; 2421 } 2422 ddi_dma_free_handle(&dma_p->dma_handle); 2423 dma_p->dma_handle = NULL; 2424 } 2425 2426 if (dma_p->acc_handle != NULL) { 2427 ddi_dma_mem_free(&dma_p->acc_handle); 2428 dma_p->acc_handle = NULL; 2429 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL); 2430 } 2431 2432 dma_p->kaddrp = NULL; 2433 dma_p->alength = NULL; 2434 } 2435 2436 /* 2437 * hxge_m_start() -- start transmitting and receiving. 2438 * 2439 * This function is called by the MAC layer when the first 2440 * stream is open to prepare the hardware ready for sending 2441 * and transmitting packets. 2442 */ 2443 static int 2444 hxge_m_start(void *arg) 2445 { 2446 p_hxge_t hxgep = (p_hxge_t)arg; 2447 2448 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start")); 2449 2450 MUTEX_ENTER(hxgep->genlock); 2451 2452 if (hxge_init(hxgep) != DDI_SUCCESS) { 2453 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2454 "<== hxge_m_start: initialization failed")); 2455 MUTEX_EXIT(hxgep->genlock); 2456 return (EIO); 2457 } 2458 2459 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) { 2460 /* 2461 * Start timer to check the system error and tx hangs 2462 */ 2463 hxgep->hxge_timerid = hxge_start_timer(hxgep, 2464 hxge_check_hw_state, HXGE_CHECK_TIMER); 2465 2466 hxgep->hxge_mac_state = HXGE_MAC_STARTED; 2467 2468 hxgep->timeout.link_status = 0; 2469 hxgep->timeout.report_link_status = B_TRUE; 2470 hxgep->timeout.ticks = drv_usectohz(2 * 1000000); 2471 2472 /* Start the link status timer to check the link status */ 2473 MUTEX_ENTER(&hxgep->timeout.lock); 2474 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep, 2475 hxgep->timeout.ticks); 2476 MUTEX_EXIT(&hxgep->timeout.lock); 2477 } 2478 2479 MUTEX_EXIT(hxgep->genlock); 2480 2481 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start")); 2482 2483 return (0); 2484 } 2485 2486 /* 2487 * hxge_m_stop(): stop transmitting and receiving. 2488 */ 2489 static void 2490 hxge_m_stop(void *arg) 2491 { 2492 p_hxge_t hxgep = (p_hxge_t)arg; 2493 2494 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop")); 2495 2496 if (hxgep->hxge_timerid) { 2497 hxge_stop_timer(hxgep, hxgep->hxge_timerid); 2498 hxgep->hxge_timerid = 0; 2499 } 2500 2501 /* Stop the link status timer before unregistering */ 2502 MUTEX_ENTER(&hxgep->timeout.lock); 2503 if (hxgep->timeout.id) { 2504 (void) untimeout(hxgep->timeout.id); 2505 hxgep->timeout.id = 0; 2506 } 2507 hxge_link_update(hxgep, LINK_STATE_DOWN); 2508 MUTEX_EXIT(&hxgep->timeout.lock); 2509 2510 MUTEX_ENTER(hxgep->genlock); 2511 2512 hxge_uninit(hxgep); 2513 2514 hxgep->hxge_mac_state = HXGE_MAC_STOPPED; 2515 2516 MUTEX_EXIT(hxgep->genlock); 2517 2518 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop")); 2519 } 2520 2521 static int 2522 hxge_m_unicst(void *arg, const uint8_t *macaddr) 2523 { 2524 p_hxge_t hxgep = (p_hxge_t)arg; 2525 struct ether_addr addrp; 2526 hxge_status_t status; 2527 2528 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst")); 2529 2530 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 2531 2532 status = hxge_set_mac_addr(hxgep, &addrp); 2533 if (status != HXGE_OK) { 2534 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2535 "<== hxge_m_unicst: set unitcast failed")); 2536 return (EINVAL); 2537 } 2538 2539 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst")); 2540 2541 return (0); 2542 } 2543 2544 static int 2545 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 2546 { 2547 p_hxge_t hxgep = (p_hxge_t)arg; 2548 struct ether_addr addrp; 2549 2550 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add)); 2551 2552 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL); 2553 2554 if (add) { 2555 if (hxge_add_mcast_addr(hxgep, &addrp)) { 2556 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2557 "<== hxge_m_multicst: add multicast failed")); 2558 return (EINVAL); 2559 } 2560 } else { 2561 if (hxge_del_mcast_addr(hxgep, &addrp)) { 2562 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2563 "<== hxge_m_multicst: del multicast failed")); 2564 return (EINVAL); 2565 } 2566 } 2567 2568 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst")); 2569 2570 return (0); 2571 } 2572 2573 static int 2574 hxge_m_promisc(void *arg, boolean_t on) 2575 { 2576 p_hxge_t hxgep = (p_hxge_t)arg; 2577 2578 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on)); 2579 2580 if (hxge_set_promisc(hxgep, on)) { 2581 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2582 "<== hxge_m_promisc: set promisc failed")); 2583 return (EINVAL); 2584 } 2585 2586 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on)); 2587 2588 return (0); 2589 } 2590 2591 static void 2592 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2593 { 2594 p_hxge_t hxgep = (p_hxge_t)arg; 2595 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 2596 boolean_t need_privilege; 2597 int err; 2598 int cmd; 2599 2600 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl")); 2601 2602 iocp = (struct iocblk *)mp->b_rptr; 2603 iocp->ioc_error = 0; 2604 need_privilege = B_TRUE; 2605 cmd = iocp->ioc_cmd; 2606 2607 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd)); 2608 switch (cmd) { 2609 default: 2610 miocnak(wq, mp, 0, EINVAL); 2611 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid")); 2612 return; 2613 2614 case LB_GET_INFO_SIZE: 2615 case LB_GET_INFO: 2616 case LB_GET_MODE: 2617 need_privilege = B_FALSE; 2618 break; 2619 2620 case LB_SET_MODE: 2621 break; 2622 2623 case ND_GET: 2624 need_privilege = B_FALSE; 2625 break; 2626 case ND_SET: 2627 break; 2628 2629 case HXGE_GET64: 2630 case HXGE_PUT64: 2631 case HXGE_GET_TX_RING_SZ: 2632 case HXGE_GET_TX_DESC: 2633 case HXGE_TX_SIDE_RESET: 2634 case HXGE_RX_SIDE_RESET: 2635 case HXGE_GLOBAL_RESET: 2636 case HXGE_RESET_MAC: 2637 case HXGE_PUT_TCAM: 2638 case HXGE_GET_TCAM: 2639 case HXGE_RTRACE: 2640 2641 need_privilege = B_FALSE; 2642 break; 2643 } 2644 2645 if (need_privilege) { 2646 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 2647 if (err != 0) { 2648 miocnak(wq, mp, 0, err); 2649 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2650 "<== hxge_m_ioctl: no priv")); 2651 return; 2652 } 2653 } 2654 2655 switch (cmd) { 2656 case ND_GET: 2657 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command")); 2658 case ND_SET: 2659 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command")); 2660 hxge_param_ioctl(hxgep, wq, mp, iocp); 2661 break; 2662 2663 case LB_GET_MODE: 2664 case LB_SET_MODE: 2665 case LB_GET_INFO_SIZE: 2666 case LB_GET_INFO: 2667 hxge_loopback_ioctl(hxgep, wq, mp, iocp); 2668 break; 2669 2670 case HXGE_PUT_TCAM: 2671 case HXGE_GET_TCAM: 2672 case HXGE_GET64: 2673 case HXGE_PUT64: 2674 case HXGE_GET_TX_RING_SZ: 2675 case HXGE_GET_TX_DESC: 2676 case HXGE_TX_SIDE_RESET: 2677 case HXGE_RX_SIDE_RESET: 2678 case HXGE_GLOBAL_RESET: 2679 case HXGE_RESET_MAC: 2680 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, 2681 "==> hxge_m_ioctl: cmd 0x%x", cmd)); 2682 hxge_hw_ioctl(hxgep, wq, mp, iocp); 2683 break; 2684 } 2685 2686 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl")); 2687 } 2688 2689 extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count); 2690 2691 static void 2692 hxge_m_resources(void *arg) 2693 { 2694 p_hxge_t hxgep = arg; 2695 mac_rx_fifo_t mrf; 2696 p_rx_rcr_rings_t rcr_rings; 2697 p_rx_rcr_ring_t *rcr_p; 2698 p_rx_rcr_ring_t rcrp; 2699 uint32_t i, ndmas; 2700 int status; 2701 2702 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources")); 2703 2704 MUTEX_ENTER(hxgep->genlock); 2705 2706 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2707 status = hxge_init(hxgep); 2708 if (status != HXGE_OK) { 2709 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: " 2710 "hxge_init failed")); 2711 MUTEX_EXIT(hxgep->genlock); 2712 return; 2713 } 2714 } 2715 2716 mrf.mrf_type = MAC_RX_FIFO; 2717 mrf.mrf_blank = hxge_rx_hw_blank; 2718 mrf.mrf_arg = (void *)hxgep; 2719 2720 mrf.mrf_normal_blank_time = RXDMA_RCR_TO_DEFAULT; 2721 mrf.mrf_normal_pkt_count = RXDMA_RCR_PTHRES_DEFAULT; 2722 2723 rcr_rings = hxgep->rx_rcr_rings; 2724 rcr_p = rcr_rings->rcr_rings; 2725 ndmas = rcr_rings->ndmas; 2726 2727 /* 2728 * Export our receive resources to the MAC layer. 2729 */ 2730 for (i = 0; i < ndmas; i++) { 2731 rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i]; 2732 rcrp->rcr_mac_handle = 2733 mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf); 2734 2735 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2736 "==> hxge_m_resources: vdma %d dma %d " 2737 "rcrptr 0x%016llx mac_handle 0x%016llx", 2738 i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle)); 2739 } 2740 2741 MUTEX_EXIT(hxgep->genlock); 2742 2743 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources")); 2744 } 2745 2746 /* 2747 * Set an alternate MAC address 2748 */ 2749 static int 2750 hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot) 2751 { 2752 uint64_t address; 2753 uint64_t tmp; 2754 hpi_status_t status; 2755 uint8_t addrn; 2756 int i; 2757 2758 /* 2759 * Convert a byte array to a 48 bit value. 2760 * Need to check endianess if in doubt 2761 */ 2762 address = 0; 2763 for (i = 0; i < ETHERADDRL; i++) { 2764 tmp = maddr[i]; 2765 address <<= 8; 2766 address |= tmp; 2767 } 2768 2769 addrn = (uint8_t)slot; 2770 status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address); 2771 if (status != HPI_SUCCESS) 2772 return (EIO); 2773 2774 return (0); 2775 } 2776 2777 static void 2778 hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot) 2779 { 2780 p_hxge_mmac_stats_t mmac_stats; 2781 int i; 2782 hxge_mmac_t *mmac_info; 2783 2784 mmac_info = &hxgep->hxge_mmac_info; 2785 mmac_stats = &hxgep->statsp->mmac_stats; 2786 mmac_stats->mmac_max_cnt = mmac_info->num_mmac; 2787 mmac_stats->mmac_avail_cnt = mmac_info->naddrfree; 2788 2789 for (i = 0; i < ETHERADDRL; i++) { 2790 mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] = 2791 mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i]; 2792 } 2793 } 2794 2795 /* 2796 * Find an unused address slot, set the address value to the one specified, 2797 * enable the port to start filtering on the new MAC address. 2798 * Returns: 0 on success. 2799 */ 2800 int 2801 hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr) 2802 { 2803 p_hxge_t hxgep = arg; 2804 mac_addr_slot_t slot; 2805 hxge_mmac_t *mmac_info; 2806 int err; 2807 hxge_status_t status; 2808 2809 mutex_enter(hxgep->genlock); 2810 2811 /* 2812 * Make sure that hxge is initialized, if _start() has 2813 * not been called. 2814 */ 2815 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2816 status = hxge_init(hxgep); 2817 if (status != HXGE_OK) { 2818 mutex_exit(hxgep->genlock); 2819 return (ENXIO); 2820 } 2821 } 2822 2823 mmac_info = &hxgep->hxge_mmac_info; 2824 if (mmac_info->naddrfree == 0) { 2825 mutex_exit(hxgep->genlock); 2826 return (ENOSPC); 2827 } 2828 2829 if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr, 2830 maddr->mma_addrlen)) { 2831 mutex_exit(hxgep->genlock); 2832 return (EINVAL); 2833 } 2834 2835 /* 2836 * Search for the first available slot. Because naddrfree 2837 * is not zero, we are guaranteed to find one. 2838 * Slot 0 is for unique (primary) MAC. The first alternate 2839 * MAC slot is slot 1. 2840 */ 2841 for (slot = 1; slot < mmac_info->num_mmac; slot++) { 2842 if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)) 2843 break; 2844 } 2845 2846 ASSERT(slot < mmac_info->num_mmac); 2847 if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) { 2848 mutex_exit(hxgep->genlock); 2849 return (err); 2850 } 2851 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL); 2852 mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED; 2853 mmac_info->naddrfree--; 2854 hxge_mmac_kstat_update(hxgep, slot); 2855 2856 maddr->mma_slot = slot; 2857 2858 mutex_exit(hxgep->genlock); 2859 return (0); 2860 } 2861 2862 /* 2863 * Remove the specified mac address and update 2864 * the h/w not to filter the mac address anymore. 2865 * Returns: 0, on success. 2866 */ 2867 int 2868 hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot) 2869 { 2870 p_hxge_t hxgep = arg; 2871 hxge_mmac_t *mmac_info; 2872 int err = 0; 2873 hxge_status_t status; 2874 2875 mutex_enter(hxgep->genlock); 2876 2877 /* 2878 * Make sure that hxge is initialized, if _start() has 2879 * not been called. 2880 */ 2881 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2882 status = hxge_init(hxgep); 2883 if (status != HXGE_OK) { 2884 mutex_exit(hxgep->genlock); 2885 return (ENXIO); 2886 } 2887 } 2888 2889 mmac_info = &hxgep->hxge_mmac_info; 2890 if (slot <= 0 || slot >= mmac_info->num_mmac) { 2891 mutex_exit(hxgep->genlock); 2892 return (EINVAL); 2893 } 2894 2895 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 2896 if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) == 2897 HPI_SUCCESS) { 2898 mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED; 2899 mmac_info->naddrfree++; 2900 /* 2901 * Clear mac_pool[slot].addr so that kstat shows 0 2902 * alternate MAC address if the slot is not used. 2903 */ 2904 bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL); 2905 hxge_mmac_kstat_update(hxgep, slot); 2906 } else { 2907 err = EIO; 2908 } 2909 } else { 2910 err = EINVAL; 2911 } 2912 2913 mutex_exit(hxgep->genlock); 2914 return (err); 2915 } 2916 2917 /* 2918 * Modify a mac address added by hxge_mmac_add(). 2919 * Returns: 0, on success. 2920 */ 2921 int 2922 hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr) 2923 { 2924 p_hxge_t hxgep = arg; 2925 mac_addr_slot_t slot; 2926 hxge_mmac_t *mmac_info; 2927 int err = 0; 2928 hxge_status_t status; 2929 2930 if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr, 2931 maddr->mma_addrlen)) 2932 return (EINVAL); 2933 2934 slot = maddr->mma_slot; 2935 2936 mutex_enter(hxgep->genlock); 2937 2938 /* 2939 * Make sure that hxge is initialized, if _start() has 2940 * not been called. 2941 */ 2942 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 2943 status = hxge_init(hxgep); 2944 if (status != HXGE_OK) { 2945 mutex_exit(hxgep->genlock); 2946 return (ENXIO); 2947 } 2948 } 2949 2950 mmac_info = &hxgep->hxge_mmac_info; 2951 if (slot <= 0 || slot >= mmac_info->num_mmac) { 2952 mutex_exit(hxgep->genlock); 2953 return (EINVAL); 2954 } 2955 2956 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 2957 if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, 2958 slot)) == 0) { 2959 bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, 2960 ETHERADDRL); 2961 hxge_mmac_kstat_update(hxgep, slot); 2962 } 2963 } else { 2964 err = EINVAL; 2965 } 2966 2967 mutex_exit(hxgep->genlock); 2968 return (err); 2969 } 2970 2971 /* 2972 * static int 2973 * hxge_m_mmac_get() - Get the MAC address and other information 2974 * related to the slot. mma_flags should be set to 0 in the call. 2975 * Note: although kstat shows MAC address as zero when a slot is 2976 * not used, Crossbow expects hxge_m_mmac_get to copy factory MAC 2977 * to the caller as long as the slot is not using a user MAC address. 2978 * The following table shows the rules, 2979 * 2980 * USED VENDOR mma_addr 2981 * ------------------------------------------------------------ 2982 * (1) Slot uses a user MAC: yes no user MAC 2983 * (2) Slot uses a factory MAC: yes yes factory MAC 2984 * (3) Slot is not used but is 2985 * factory MAC capable: no yes factory MAC 2986 * (4) Slot is not used and is 2987 * not factory MAC capable: no no 0 2988 * ------------------------------------------------------------ 2989 */ 2990 int 2991 hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr) 2992 { 2993 hxge_t *hxgep = arg; 2994 mac_addr_slot_t slot; 2995 hxge_mmac_t *mmac_info; 2996 hxge_status_t status; 2997 2998 slot = maddr->mma_slot; 2999 3000 mutex_enter(hxgep->genlock); 3001 3002 /* 3003 * Make sure that hxge is initialized, if _start() has 3004 * not been called. 3005 */ 3006 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 3007 status = hxge_init(hxgep); 3008 if (status != HXGE_OK) { 3009 mutex_exit(hxgep->genlock); 3010 return (ENXIO); 3011 } 3012 } 3013 3014 mmac_info = &hxgep->hxge_mmac_info; 3015 if (slot <= 0 || slot >= mmac_info->num_mmac) { 3016 mutex_exit(hxgep->genlock); 3017 return (EINVAL); 3018 } 3019 3020 maddr->mma_flags = 0; 3021 if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) { 3022 maddr->mma_flags |= MMAC_SLOT_USED; 3023 bcopy(mmac_info->mac_pool[slot].addr, 3024 maddr->mma_addr, ETHERADDRL); 3025 maddr->mma_addrlen = ETHERADDRL; 3026 } 3027 3028 mutex_exit(hxgep->genlock); 3029 return (0); 3030 } 3031 3032 /*ARGSUSED*/ 3033 boolean_t 3034 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3035 { 3036 p_hxge_t hxgep = (p_hxge_t)arg; 3037 uint32_t *txflags = cap_data; 3038 multiaddress_capab_t *mmacp = cap_data; 3039 3040 switch (cap) { 3041 case MAC_CAPAB_HCKSUM: 3042 *txflags = HCKSUM_INET_PARTIAL; 3043 break; 3044 3045 case MAC_CAPAB_POLL: 3046 /* 3047 * There's nothing for us to fill in, simply returning B_TRUE 3048 * stating that we support polling is sufficient. 3049 */ 3050 break; 3051 3052 case MAC_CAPAB_MULTIADDRESS: 3053 /* 3054 * The number of MAC addresses made available by 3055 * this capability is one less than the total as 3056 * the primary address in slot 0 is counted in 3057 * the total. 3058 */ 3059 mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1; 3060 mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree; 3061 mmacp->maddr_flag = 0; /* No multiple factory macs */ 3062 mmacp->maddr_handle = hxgep; 3063 mmacp->maddr_add = hxge_m_mmac_add; 3064 mmacp->maddr_remove = hxge_m_mmac_remove; 3065 mmacp->maddr_modify = hxge_m_mmac_modify; 3066 mmacp->maddr_get = hxge_m_mmac_get; 3067 mmacp->maddr_reserve = NULL; /* No multiple factory macs */ 3068 break; 3069 default: 3070 return (B_FALSE); 3071 } 3072 return (B_TRUE); 3073 } 3074 3075 static boolean_t 3076 hxge_param_locked(mac_prop_id_t pr_num) 3077 { 3078 /* 3079 * All adv_* parameters are locked (read-only) while 3080 * the device is in any sort of loopback mode ... 3081 */ 3082 switch (pr_num) { 3083 case MAC_PROP_ADV_1000FDX_CAP: 3084 case MAC_PROP_EN_1000FDX_CAP: 3085 case MAC_PROP_ADV_1000HDX_CAP: 3086 case MAC_PROP_EN_1000HDX_CAP: 3087 case MAC_PROP_ADV_100FDX_CAP: 3088 case MAC_PROP_EN_100FDX_CAP: 3089 case MAC_PROP_ADV_100HDX_CAP: 3090 case MAC_PROP_EN_100HDX_CAP: 3091 case MAC_PROP_ADV_10FDX_CAP: 3092 case MAC_PROP_EN_10FDX_CAP: 3093 case MAC_PROP_ADV_10HDX_CAP: 3094 case MAC_PROP_EN_10HDX_CAP: 3095 case MAC_PROP_AUTONEG: 3096 case MAC_PROP_FLOWCTRL: 3097 return (B_TRUE); 3098 } 3099 return (B_FALSE); 3100 } 3101 3102 /* 3103 * callback functions for set/get of properties 3104 */ 3105 static int 3106 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 3107 uint_t pr_valsize, const void *pr_val) 3108 { 3109 hxge_t *hxgep = barg; 3110 p_hxge_stats_t statsp; 3111 int err = 0; 3112 uint32_t new_mtu, old_framesize, new_framesize; 3113 3114 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop")); 3115 3116 statsp = hxgep->statsp; 3117 mutex_enter(hxgep->genlock); 3118 if (statsp->port_stats.lb_mode != hxge_lb_normal && 3119 hxge_param_locked(pr_num)) { 3120 /* 3121 * All adv_* parameters are locked (read-only) 3122 * while the device is in any sort of loopback mode. 3123 */ 3124 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3125 "==> hxge_m_setprop: loopback mode: read only")); 3126 mutex_exit(hxgep->genlock); 3127 return (EBUSY); 3128 } 3129 3130 switch (pr_num) { 3131 /* 3132 * These properties are either not exist or read only 3133 */ 3134 case MAC_PROP_EN_1000FDX_CAP: 3135 case MAC_PROP_EN_100FDX_CAP: 3136 case MAC_PROP_EN_10FDX_CAP: 3137 case MAC_PROP_EN_1000HDX_CAP: 3138 case MAC_PROP_EN_100HDX_CAP: 3139 case MAC_PROP_EN_10HDX_CAP: 3140 case MAC_PROP_ADV_1000FDX_CAP: 3141 case MAC_PROP_ADV_1000HDX_CAP: 3142 case MAC_PROP_ADV_100FDX_CAP: 3143 case MAC_PROP_ADV_100HDX_CAP: 3144 case MAC_PROP_ADV_10FDX_CAP: 3145 case MAC_PROP_ADV_10HDX_CAP: 3146 case MAC_PROP_STATUS: 3147 case MAC_PROP_SPEED: 3148 case MAC_PROP_DUPLEX: 3149 case MAC_PROP_AUTONEG: 3150 /* 3151 * Flow control is handled in the shared domain and 3152 * it is readonly here. 3153 */ 3154 case MAC_PROP_FLOWCTRL: 3155 err = EINVAL; 3156 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3157 "==> hxge_m_setprop: read only property %d", 3158 pr_num)); 3159 break; 3160 3161 case MAC_PROP_MTU: 3162 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3163 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3164 "==> hxge_m_setprop: set MTU: %d", new_mtu)); 3165 3166 new_framesize = new_mtu + MTU_TO_FRAME_SIZE; 3167 if (new_framesize == hxgep->vmac.maxframesize) { 3168 err = 0; 3169 break; 3170 } 3171 3172 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) { 3173 err = EBUSY; 3174 break; 3175 } 3176 3177 if (new_framesize < MIN_FRAME_SIZE || 3178 new_framesize > MAX_FRAME_SIZE) { 3179 err = EINVAL; 3180 break; 3181 } 3182 3183 old_framesize = hxgep->vmac.maxframesize; 3184 hxgep->vmac.maxframesize = (uint16_t)new_framesize; 3185 3186 if (hxge_vmac_set_framesize(hxgep)) { 3187 hxgep->vmac.maxframesize = 3188 (uint16_t)old_framesize; 3189 err = EINVAL; 3190 break; 3191 } 3192 3193 err = mac_maxsdu_update(hxgep->mach, new_mtu); 3194 if (err) { 3195 hxgep->vmac.maxframesize = 3196 (uint16_t)old_framesize; 3197 (void) hxge_vmac_set_framesize(hxgep); 3198 } 3199 3200 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3201 "==> hxge_m_setprop: set MTU: %d maxframe %d", 3202 new_mtu, hxgep->vmac.maxframesize)); 3203 break; 3204 3205 case MAC_PROP_PRIVATE: 3206 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3207 "==> hxge_m_setprop: private property")); 3208 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize, 3209 pr_val); 3210 break; 3211 3212 default: 3213 err = ENOTSUP; 3214 break; 3215 } 3216 3217 mutex_exit(hxgep->genlock); 3218 3219 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3220 "<== hxge_m_setprop (return %d)", err)); 3221 3222 return (err); 3223 } 3224 3225 /* ARGSUSED */ 3226 static int 3227 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize, 3228 void *pr_val) 3229 { 3230 int err = 0; 3231 link_flowctrl_t fl; 3232 3233 switch (pr_num) { 3234 case MAC_PROP_DUPLEX: 3235 *(uint8_t *)pr_val = 2; 3236 break; 3237 case MAC_PROP_AUTONEG: 3238 *(uint8_t *)pr_val = 0; 3239 break; 3240 case MAC_PROP_FLOWCTRL: 3241 if (pr_valsize < sizeof (link_flowctrl_t)) 3242 return (EINVAL); 3243 fl = LINK_FLOWCTRL_TX; 3244 bcopy(&fl, pr_val, sizeof (fl)); 3245 break; 3246 default: 3247 err = ENOTSUP; 3248 break; 3249 } 3250 return (err); 3251 } 3252 3253 static int 3254 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 3255 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 3256 { 3257 hxge_t *hxgep = barg; 3258 p_hxge_stats_t statsp = hxgep->statsp; 3259 int err = 0; 3260 link_flowctrl_t fl; 3261 uint64_t tmp = 0; 3262 link_state_t ls; 3263 3264 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3265 "==> hxge_m_getprop: pr_num %d", pr_num)); 3266 3267 if (pr_valsize == 0) 3268 return (EINVAL); 3269 3270 *perm = MAC_PROP_PERM_RW; 3271 3272 if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) { 3273 err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val); 3274 return (err); 3275 } 3276 3277 bzero(pr_val, pr_valsize); 3278 switch (pr_num) { 3279 case MAC_PROP_DUPLEX: 3280 *perm = MAC_PROP_PERM_READ; 3281 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex; 3282 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3283 "==> hxge_m_getprop: duplex mode %d", 3284 *(uint8_t *)pr_val)); 3285 break; 3286 3287 case MAC_PROP_SPEED: 3288 *perm = MAC_PROP_PERM_READ; 3289 if (pr_valsize < sizeof (uint64_t)) 3290 return (EINVAL); 3291 tmp = statsp->mac_stats.link_speed * 1000000ull; 3292 bcopy(&tmp, pr_val, sizeof (tmp)); 3293 break; 3294 3295 case MAC_PROP_STATUS: 3296 *perm = MAC_PROP_PERM_READ; 3297 if (pr_valsize < sizeof (link_state_t)) 3298 return (EINVAL); 3299 if (!statsp->mac_stats.link_up) 3300 ls = LINK_STATE_DOWN; 3301 else 3302 ls = LINK_STATE_UP; 3303 bcopy(&ls, pr_val, sizeof (ls)); 3304 break; 3305 3306 case MAC_PROP_FLOWCTRL: 3307 /* 3308 * Flow control is supported by the shared domain and 3309 * it is currently transmit only 3310 */ 3311 *perm = MAC_PROP_PERM_READ; 3312 if (pr_valsize < sizeof (link_flowctrl_t)) 3313 return (EINVAL); 3314 fl = LINK_FLOWCTRL_TX; 3315 bcopy(&fl, pr_val, sizeof (fl)); 3316 break; 3317 case MAC_PROP_AUTONEG: 3318 /* 10G link only and it is not negotiable */ 3319 *perm = MAC_PROP_PERM_READ; 3320 *(uint8_t *)pr_val = 0; 3321 break; 3322 case MAC_PROP_ADV_1000FDX_CAP: 3323 case MAC_PROP_ADV_100FDX_CAP: 3324 case MAC_PROP_ADV_10FDX_CAP: 3325 case MAC_PROP_ADV_1000HDX_CAP: 3326 case MAC_PROP_ADV_100HDX_CAP: 3327 case MAC_PROP_ADV_10HDX_CAP: 3328 case MAC_PROP_EN_1000FDX_CAP: 3329 case MAC_PROP_EN_100FDX_CAP: 3330 case MAC_PROP_EN_10FDX_CAP: 3331 case MAC_PROP_EN_1000HDX_CAP: 3332 case MAC_PROP_EN_100HDX_CAP: 3333 case MAC_PROP_EN_10HDX_CAP: 3334 err = ENOTSUP; 3335 break; 3336 3337 case MAC_PROP_PRIVATE: 3338 err = hxge_get_priv_prop(hxgep, pr_name, pr_flags, 3339 pr_valsize, pr_val); 3340 break; 3341 default: 3342 err = EINVAL; 3343 break; 3344 } 3345 3346 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop")); 3347 3348 return (err); 3349 } 3350 3351 /* ARGSUSED */ 3352 static int 3353 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize, 3354 const void *pr_val) 3355 { 3356 p_hxge_param_t param_arr = hxgep->param_arr; 3357 int err = 0; 3358 3359 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3360 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val)); 3361 3362 if (pr_val == NULL) { 3363 return (EINVAL); 3364 } 3365 3366 /* Blanking */ 3367 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3368 err = hxge_param_rx_intr_time(hxgep, NULL, NULL, 3369 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]); 3370 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3371 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL, 3372 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]); 3373 3374 /* Classification */ 3375 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 3376 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3377 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 3378 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 3379 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3380 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 3381 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 3382 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3383 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 3384 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 3385 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3386 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 3387 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 3388 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3389 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 3390 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 3391 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3392 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 3393 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 3394 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3395 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 3396 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3397 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val, 3398 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 3399 } else { 3400 err = EINVAL; 3401 } 3402 3403 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3404 "<== hxge_set_priv_prop: err %d", err)); 3405 3406 return (err); 3407 } 3408 3409 static int 3410 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags, 3411 uint_t pr_valsize, void *pr_val) 3412 { 3413 p_hxge_param_t param_arr = hxgep->param_arr; 3414 char valstr[MAXNAMELEN]; 3415 int err = 0; 3416 uint_t strsize; 3417 int value = 0; 3418 3419 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3420 "==> hxge_get_priv_prop: property %s", pr_name)); 3421 3422 if (pr_flags & MAC_PROP_DEFAULT) { 3423 /* Receive Interrupt Blanking Parameters */ 3424 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3425 value = RXDMA_RCR_TO_DEFAULT; 3426 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3427 value = RXDMA_RCR_PTHRES_DEFAULT; 3428 3429 /* Classification and Load Distribution Configuration */ 3430 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 || 3431 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 || 3432 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 || 3433 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 || 3434 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 || 3435 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 || 3436 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 || 3437 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3438 value = HXGE_CLASS_TCAM_LOOKUP; 3439 } else { 3440 err = EINVAL; 3441 } 3442 } else { 3443 /* Receive Interrupt Blanking Parameters */ 3444 if (strcmp(pr_name, "_rxdma_intr_time") == 0) { 3445 value = hxgep->intr_timeout; 3446 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) { 3447 value = hxgep->intr_threshold; 3448 3449 /* Classification and Load Distribution Configuration */ 3450 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) { 3451 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3452 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]); 3453 3454 value = (int)param_arr[param_class_opt_ipv4_tcp].value; 3455 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) { 3456 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3457 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]); 3458 3459 value = (int)param_arr[param_class_opt_ipv4_udp].value; 3460 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) { 3461 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3462 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]); 3463 3464 value = (int)param_arr[param_class_opt_ipv4_ah].value; 3465 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) { 3466 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3467 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]); 3468 3469 value = (int)param_arr[param_class_opt_ipv4_sctp].value; 3470 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) { 3471 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3472 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]); 3473 3474 value = (int)param_arr[param_class_opt_ipv6_tcp].value; 3475 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) { 3476 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3477 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]); 3478 3479 value = (int)param_arr[param_class_opt_ipv6_udp].value; 3480 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) { 3481 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3482 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]); 3483 3484 value = (int)param_arr[param_class_opt_ipv6_ah].value; 3485 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) { 3486 err = hxge_param_get_ip_opt(hxgep, NULL, NULL, 3487 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]); 3488 3489 value = (int)param_arr[param_class_opt_ipv6_sctp].value; 3490 } else { 3491 err = EINVAL; 3492 } 3493 } 3494 3495 if (err == 0) { 3496 (void) snprintf(valstr, sizeof (valstr), "0x%x", value); 3497 3498 strsize = (uint_t)strlen(valstr); 3499 if (pr_valsize < strsize) { 3500 err = ENOBUFS; 3501 } else { 3502 (void) strlcpy(pr_val, valstr, pr_valsize); 3503 } 3504 } 3505 3506 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, 3507 "<== hxge_get_priv_prop: return %d", err)); 3508 3509 return (err); 3510 } 3511 /* 3512 * Module loading and removing entry points. 3513 */ 3514 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach, 3515 nodev, NULL, D_MP, NULL, NULL); 3516 3517 extern struct mod_ops mod_driverops; 3518 3519 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver" 3520 3521 /* 3522 * Module linkage information for the kernel. 3523 */ 3524 static struct modldrv hxge_modldrv = { 3525 &mod_driverops, 3526 HXGE_DESC_VER, 3527 &hxge_dev_ops 3528 }; 3529 3530 static struct modlinkage modlinkage = { 3531 MODREV_1, (void *) &hxge_modldrv, NULL 3532 }; 3533 3534 int 3535 _init(void) 3536 { 3537 int status; 3538 3539 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init")); 3540 mac_init_ops(&hxge_dev_ops, "hxge"); 3541 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0); 3542 if (status != 0) { 3543 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 3544 "failed to init device soft state")); 3545 mac_fini_ops(&hxge_dev_ops); 3546 goto _init_exit; 3547 } 3548 3549 status = mod_install(&modlinkage); 3550 if (status != 0) { 3551 ddi_soft_state_fini(&hxge_list); 3552 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed")); 3553 goto _init_exit; 3554 } 3555 3556 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL); 3557 3558 _init_exit: 3559 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status)); 3560 3561 return (status); 3562 } 3563 3564 int 3565 _fini(void) 3566 { 3567 int status; 3568 3569 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini")); 3570 3571 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove")); 3572 3573 if (hxge_mblks_pending) 3574 return (EBUSY); 3575 3576 status = mod_remove(&modlinkage); 3577 if (status != DDI_SUCCESS) { 3578 HXGE_DEBUG_MSG((NULL, MOD_CTL, 3579 "Module removal failed 0x%08x", status)); 3580 goto _fini_exit; 3581 } 3582 3583 mac_fini_ops(&hxge_dev_ops); 3584 3585 ddi_soft_state_fini(&hxge_list); 3586 3587 MUTEX_DESTROY(&hxge_common_lock); 3588 3589 _fini_exit: 3590 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status)); 3591 3592 return (status); 3593 } 3594 3595 int 3596 _info(struct modinfo *modinfop) 3597 { 3598 int status; 3599 3600 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info")); 3601 status = mod_info(&modlinkage, modinfop); 3602 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status)); 3603 3604 return (status); 3605 } 3606 3607 /*ARGSUSED*/ 3608 hxge_status_t 3609 hxge_add_intrs(p_hxge_t hxgep) 3610 { 3611 int intr_types; 3612 int type = 0; 3613 int ddi_status = DDI_SUCCESS; 3614 hxge_status_t status = HXGE_OK; 3615 3616 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs")); 3617 3618 hxgep->hxge_intr_type.intr_registered = B_FALSE; 3619 hxgep->hxge_intr_type.intr_enabled = B_FALSE; 3620 hxgep->hxge_intr_type.msi_intx_cnt = 0; 3621 hxgep->hxge_intr_type.intr_added = 0; 3622 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE; 3623 hxgep->hxge_intr_type.intr_type = 0; 3624 3625 if (hxge_msi_enable) { 3626 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE; 3627 } 3628 3629 /* Get the supported interrupt types */ 3630 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types)) 3631 != DDI_SUCCESS) { 3632 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: " 3633 "ddi_intr_get_supported_types failed: status 0x%08x", 3634 ddi_status)); 3635 return (HXGE_ERROR | HXGE_DDI_FAILED); 3636 } 3637 3638 hxgep->hxge_intr_type.intr_types = intr_types; 3639 3640 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3641 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3642 3643 /* 3644 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable: 3645 * (1): 1 - MSI 3646 * (2): 2 - MSI-X 3647 * others - FIXED 3648 */ 3649 switch (hxge_msi_enable) { 3650 default: 3651 type = DDI_INTR_TYPE_FIXED; 3652 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3653 "use fixed (intx emulation) type %08x", type)); 3654 break; 3655 3656 case 2: 3657 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3658 "ddi_intr_get_supported_types: 0x%08x", intr_types)); 3659 if (intr_types & DDI_INTR_TYPE_MSIX) { 3660 type = DDI_INTR_TYPE_MSIX; 3661 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3662 "==> hxge_add_intrs: " 3663 "ddi_intr_get_supported_types: MSIX 0x%08x", type)); 3664 } else if (intr_types & DDI_INTR_TYPE_MSI) { 3665 type = DDI_INTR_TYPE_MSI; 3666 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3667 "==> hxge_add_intrs: " 3668 "ddi_intr_get_supported_types: MSI 0x%08x", type)); 3669 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3670 type = DDI_INTR_TYPE_FIXED; 3671 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: " 3672 "ddi_intr_get_supported_types: MSXED0x%08x", type)); 3673 } 3674 break; 3675 3676 case 1: 3677 if (intr_types & DDI_INTR_TYPE_MSI) { 3678 type = DDI_INTR_TYPE_MSI; 3679 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3680 "==> hxge_add_intrs: " 3681 "ddi_intr_get_supported_types: MSI 0x%08x", type)); 3682 } else if (intr_types & DDI_INTR_TYPE_MSIX) { 3683 type = DDI_INTR_TYPE_MSIX; 3684 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3685 "==> hxge_add_intrs: " 3686 "ddi_intr_get_supported_types: MSIX 0x%08x", type)); 3687 } else if (intr_types & DDI_INTR_TYPE_FIXED) { 3688 type = DDI_INTR_TYPE_FIXED; 3689 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3690 "==> hxge_add_intrs: " 3691 "ddi_intr_get_supported_types: MSXED0x%08x", type)); 3692 } 3693 } 3694 3695 hxgep->hxge_intr_type.intr_type = type; 3696 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI || 3697 type == DDI_INTR_TYPE_FIXED) && 3698 hxgep->hxge_intr_type.niu_msi_enable) { 3699 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) { 3700 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3701 " hxge_add_intrs: " 3702 " hxge_add_intrs_adv failed: status 0x%08x", 3703 status)); 3704 return (status); 3705 } else { 3706 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: " 3707 "interrupts registered : type %d", type)); 3708 hxgep->hxge_intr_type.intr_registered = B_TRUE; 3709 3710 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 3711 "\nAdded advanced hxge add_intr_adv " 3712 "intr type 0x%x\n", type)); 3713 3714 return (status); 3715 } 3716 } 3717 3718 if (!hxgep->hxge_intr_type.intr_registered) { 3719 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3720 "==> hxge_add_intrs: failed to register interrupts")); 3721 return (HXGE_ERROR | HXGE_DDI_FAILED); 3722 } 3723 3724 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs")); 3725 3726 return (status); 3727 } 3728 3729 /*ARGSUSED*/ 3730 static hxge_status_t 3731 hxge_add_soft_intrs(p_hxge_t hxgep) 3732 { 3733 int ddi_status = DDI_SUCCESS; 3734 hxge_status_t status = HXGE_OK; 3735 3736 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs")); 3737 3738 hxgep->resched_id = NULL; 3739 hxgep->resched_running = B_FALSE; 3740 ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW, 3741 &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep); 3742 if (ddi_status != DDI_SUCCESS) { 3743 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: " 3744 "ddi_add_softintrs failed: status 0x%08x", ddi_status)); 3745 return (HXGE_ERROR | HXGE_DDI_FAILED); 3746 } 3747 3748 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs")); 3749 3750 return (status); 3751 } 3752 3753 /*ARGSUSED*/ 3754 static hxge_status_t 3755 hxge_add_intrs_adv(p_hxge_t hxgep) 3756 { 3757 int intr_type; 3758 p_hxge_intr_t intrp; 3759 hxge_status_t status; 3760 3761 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv")); 3762 3763 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3764 intr_type = intrp->intr_type; 3765 3766 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x", 3767 intr_type)); 3768 3769 switch (intr_type) { 3770 case DDI_INTR_TYPE_MSI: /* 0x2 */ 3771 case DDI_INTR_TYPE_MSIX: /* 0x4 */ 3772 status = hxge_add_intrs_adv_type(hxgep, intr_type); 3773 break; 3774 3775 case DDI_INTR_TYPE_FIXED: /* 0x1 */ 3776 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type); 3777 break; 3778 3779 default: 3780 status = HXGE_ERROR; 3781 break; 3782 } 3783 3784 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv")); 3785 3786 return (status); 3787 } 3788 3789 /*ARGSUSED*/ 3790 static hxge_status_t 3791 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type) 3792 { 3793 dev_info_t *dip = hxgep->dip; 3794 p_hxge_ldg_t ldgp; 3795 p_hxge_intr_t intrp; 3796 uint_t *inthandler; 3797 void *arg1, *arg2; 3798 int behavior; 3799 int nintrs, navail; 3800 int nactual, nrequired; 3801 int inum = 0; 3802 int loop = 0; 3803 int x, y; 3804 int ddi_status = DDI_SUCCESS; 3805 hxge_status_t status = HXGE_OK; 3806 3807 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type")); 3808 3809 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3810 3811 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 3812 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 3813 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3814 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 3815 "nintrs: %d", ddi_status, nintrs)); 3816 return (HXGE_ERROR | HXGE_DDI_FAILED); 3817 } 3818 3819 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 3820 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 3821 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3822 "ddi_intr_get_navail() failed, status: 0x%x%, " 3823 "nintrs: %d", ddi_status, navail)); 3824 return (HXGE_ERROR | HXGE_DDI_FAILED); 3825 } 3826 3827 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3828 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d", 3829 int_type, nintrs, navail)); 3830 3831 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) { 3832 /* MSI must be power of 2 */ 3833 if ((navail & 16) == 16) { 3834 navail = 16; 3835 } else if ((navail & 8) == 8) { 3836 navail = 8; 3837 } else if ((navail & 4) == 4) { 3838 navail = 4; 3839 } else if ((navail & 2) == 2) { 3840 navail = 2; 3841 } else { 3842 navail = 1; 3843 } 3844 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3845 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, " 3846 "navail %d", nintrs, navail)); 3847 } 3848 3849 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3850 "requesting: intr type %d nintrs %d, navail %d", 3851 int_type, nintrs, navail)); 3852 3853 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 3854 DDI_INTR_ALLOC_NORMAL); 3855 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 3856 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP); 3857 3858 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 3859 navail, &nactual, behavior); 3860 if (ddi_status != DDI_SUCCESS || nactual == 0) { 3861 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3862 " ddi_intr_alloc() failed: %d", ddi_status)); 3863 kmem_free(intrp->htable, intrp->intr_size); 3864 return (HXGE_ERROR | HXGE_DDI_FAILED); 3865 } 3866 3867 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3868 "ddi_intr_alloc() returned: navail %d nactual %d", 3869 navail, nactual)); 3870 3871 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 3872 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 3873 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3874 " ddi_intr_get_pri() failed: %d", ddi_status)); 3875 /* Free already allocated interrupts */ 3876 for (y = 0; y < nactual; y++) { 3877 (void) ddi_intr_free(intrp->htable[y]); 3878 } 3879 3880 kmem_free(intrp->htable, intrp->intr_size); 3881 return (HXGE_ERROR | HXGE_DDI_FAILED); 3882 } 3883 3884 nrequired = 0; 3885 status = hxge_ldgv_init(hxgep, &nactual, &nrequired); 3886 if (status != HXGE_OK) { 3887 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3888 "hxge_add_intrs_adv_typ:hxge_ldgv_init " 3889 "failed: 0x%x", status)); 3890 /* Free already allocated interrupts */ 3891 for (y = 0; y < nactual; y++) { 3892 (void) ddi_intr_free(intrp->htable[y]); 3893 } 3894 3895 kmem_free(intrp->htable, intrp->intr_size); 3896 return (status); 3897 } 3898 3899 ldgp = hxgep->ldgvp->ldgp; 3900 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3901 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual)); 3902 3903 if (nactual < nrequired) 3904 loop = nactual; 3905 else 3906 loop = nrequired; 3907 3908 for (x = 0; x < loop; x++, ldgp++) { 3909 ldgp->vector = (uint8_t)x; 3910 arg1 = ldgp->ldvp; 3911 arg2 = hxgep; 3912 if (ldgp->nldvs == 1) { 3913 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 3914 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3915 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: " 3916 "1-1 int handler (entry %d)\n", 3917 arg1, arg2, x)); 3918 } else if (ldgp->nldvs > 1) { 3919 inthandler = (uint_t *)ldgp->sys_intr_handler; 3920 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3921 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: " 3922 "nldevs %d int handler (entry %d)\n", 3923 arg1, arg2, ldgp->nldvs, x)); 3924 } 3925 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3926 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d " 3927 "htable 0x%llx", x, intrp->htable[x])); 3928 3929 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 3930 (ddi_intr_handler_t *)inthandler, arg1, arg2)) != 3931 DDI_SUCCESS) { 3932 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3933 "==> hxge_add_intrs_adv_type: failed #%d " 3934 "status 0x%x", x, ddi_status)); 3935 for (y = 0; y < intrp->intr_added; y++) { 3936 (void) ddi_intr_remove_handler( 3937 intrp->htable[y]); 3938 } 3939 3940 /* Free already allocated intr */ 3941 for (y = 0; y < nactual; y++) { 3942 (void) ddi_intr_free(intrp->htable[y]); 3943 } 3944 kmem_free(intrp->htable, intrp->intr_size); 3945 3946 (void) hxge_ldgv_uninit(hxgep); 3947 3948 return (HXGE_ERROR | HXGE_DDI_FAILED); 3949 } 3950 3951 intrp->intr_added++; 3952 } 3953 intrp->msi_intx_cnt = nactual; 3954 3955 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3956 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d", 3957 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added)); 3958 3959 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 3960 (void) hxge_intr_ldgv_init(hxgep); 3961 3962 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type")); 3963 3964 return (status); 3965 } 3966 3967 /*ARGSUSED*/ 3968 static hxge_status_t 3969 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type) 3970 { 3971 dev_info_t *dip = hxgep->dip; 3972 p_hxge_ldg_t ldgp; 3973 p_hxge_intr_t intrp; 3974 uint_t *inthandler; 3975 void *arg1, *arg2; 3976 int behavior; 3977 int nintrs, navail; 3978 int nactual, nrequired; 3979 int inum = 0; 3980 int x, y; 3981 int ddi_status = DDI_SUCCESS; 3982 hxge_status_t status = HXGE_OK; 3983 3984 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix")); 3985 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 3986 3987 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs); 3988 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) { 3989 HXGE_DEBUG_MSG((hxgep, INT_CTL, 3990 "ddi_intr_get_nintrs() failed, status: 0x%x%, " 3991 "nintrs: %d", status, nintrs)); 3992 return (HXGE_ERROR | HXGE_DDI_FAILED); 3993 } 3994 3995 ddi_status = ddi_intr_get_navail(dip, int_type, &navail); 3996 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) { 3997 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3998 "ddi_intr_get_navail() failed, status: 0x%x%, " 3999 "nintrs: %d", ddi_status, navail)); 4000 return (HXGE_ERROR | HXGE_DDI_FAILED); 4001 } 4002 4003 HXGE_DEBUG_MSG((hxgep, INT_CTL, 4004 "ddi_intr_get_navail() returned: nintrs %d, naavail %d", 4005 nintrs, navail)); 4006 4007 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT : 4008 DDI_INTR_ALLOC_NORMAL); 4009 intrp->intr_size = navail * sizeof (ddi_intr_handle_t); 4010 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP); 4011 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum, 4012 navail, &nactual, behavior); 4013 if (ddi_status != DDI_SUCCESS || nactual == 0) { 4014 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 4015 " ddi_intr_alloc() failed: %d", ddi_status)); 4016 kmem_free(intrp->htable, intrp->intr_size); 4017 return (HXGE_ERROR | HXGE_DDI_FAILED); 4018 } 4019 4020 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0], 4021 (uint_t *)&intrp->pri)) != DDI_SUCCESS) { 4022 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 4023 " ddi_intr_get_pri() failed: %d", ddi_status)); 4024 /* Free already allocated interrupts */ 4025 for (y = 0; y < nactual; y++) { 4026 (void) ddi_intr_free(intrp->htable[y]); 4027 } 4028 4029 kmem_free(intrp->htable, intrp->intr_size); 4030 return (HXGE_ERROR | HXGE_DDI_FAILED); 4031 } 4032 4033 nrequired = 0; 4034 status = hxge_ldgv_init(hxgep, &nactual, &nrequired); 4035 if (status != HXGE_OK) { 4036 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 4037 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init " 4038 "failed: 0x%x", status)); 4039 /* Free already allocated interrupts */ 4040 for (y = 0; y < nactual; y++) { 4041 (void) ddi_intr_free(intrp->htable[y]); 4042 } 4043 4044 kmem_free(intrp->htable, intrp->intr_size); 4045 return (status); 4046 } 4047 4048 ldgp = hxgep->ldgvp->ldgp; 4049 for (x = 0; x < nrequired; x++, ldgp++) { 4050 ldgp->vector = (uint8_t)x; 4051 arg1 = ldgp->ldvp; 4052 arg2 = hxgep; 4053 if (ldgp->nldvs == 1) { 4054 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler; 4055 HXGE_DEBUG_MSG((hxgep, INT_CTL, 4056 "hxge_add_intrs_adv_type_fix: " 4057 "1-1 int handler(%d) ldg %d ldv %d " 4058 "arg1 $%p arg2 $%p\n", 4059 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2)); 4060 } else if (ldgp->nldvs > 1) { 4061 inthandler = (uint_t *)ldgp->sys_intr_handler; 4062 HXGE_DEBUG_MSG((hxgep, INT_CTL, 4063 "hxge_add_intrs_adv_type_fix: " 4064 "shared ldv %d int handler(%d) ldv %d ldg %d" 4065 "arg1 0x%016llx arg2 0x%016llx\n", 4066 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv, 4067 arg1, arg2)); 4068 } 4069 4070 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x], 4071 (ddi_intr_handler_t *)inthandler, arg1, arg2)) != 4072 DDI_SUCCESS) { 4073 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 4074 "==> hxge_add_intrs_adv_type_fix: failed #%d " 4075 "status 0x%x", x, ddi_status)); 4076 for (y = 0; y < intrp->intr_added; y++) { 4077 (void) ddi_intr_remove_handler( 4078 intrp->htable[y]); 4079 } 4080 for (y = 0; y < nactual; y++) { 4081 (void) ddi_intr_free(intrp->htable[y]); 4082 } 4083 /* Free already allocated intr */ 4084 kmem_free(intrp->htable, intrp->intr_size); 4085 4086 (void) hxge_ldgv_uninit(hxgep); 4087 4088 return (HXGE_ERROR | HXGE_DDI_FAILED); 4089 } 4090 intrp->intr_added++; 4091 } 4092 4093 intrp->msi_intx_cnt = nactual; 4094 4095 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap); 4096 4097 status = hxge_intr_ldgv_init(hxgep); 4098 4099 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix")); 4100 4101 return (status); 4102 } 4103 4104 /*ARGSUSED*/ 4105 static void 4106 hxge_remove_intrs(p_hxge_t hxgep) 4107 { 4108 int i, inum; 4109 p_hxge_intr_t intrp; 4110 4111 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs")); 4112 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 4113 if (!intrp->intr_registered) { 4114 HXGE_DEBUG_MSG((hxgep, INT_CTL, 4115 "<== hxge_remove_intrs: interrupts not registered")); 4116 return; 4117 } 4118 4119 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced")); 4120 4121 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4122 (void) ddi_intr_block_disable(intrp->htable, 4123 intrp->intr_added); 4124 } else { 4125 for (i = 0; i < intrp->intr_added; i++) { 4126 (void) ddi_intr_disable(intrp->htable[i]); 4127 } 4128 } 4129 4130 for (inum = 0; inum < intrp->intr_added; inum++) { 4131 if (intrp->htable[inum]) { 4132 (void) ddi_intr_remove_handler(intrp->htable[inum]); 4133 } 4134 } 4135 4136 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) { 4137 if (intrp->htable[inum]) { 4138 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 4139 "hxge_remove_intrs: ddi_intr_free inum %d " 4140 "msi_intx_cnt %d intr_added %d", 4141 inum, intrp->msi_intx_cnt, intrp->intr_added)); 4142 4143 (void) ddi_intr_free(intrp->htable[inum]); 4144 } 4145 } 4146 4147 kmem_free(intrp->htable, intrp->intr_size); 4148 intrp->intr_registered = B_FALSE; 4149 intrp->intr_enabled = B_FALSE; 4150 intrp->msi_intx_cnt = 0; 4151 intrp->intr_added = 0; 4152 4153 (void) hxge_ldgv_uninit(hxgep); 4154 4155 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs")); 4156 } 4157 4158 /*ARGSUSED*/ 4159 static void 4160 hxge_remove_soft_intrs(p_hxge_t hxgep) 4161 { 4162 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs")); 4163 4164 if (hxgep->resched_id) { 4165 ddi_remove_softintr(hxgep->resched_id); 4166 HXGE_DEBUG_MSG((hxgep, INT_CTL, 4167 "==> hxge_remove_soft_intrs: removed")); 4168 hxgep->resched_id = NULL; 4169 } 4170 4171 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs")); 4172 } 4173 4174 /*ARGSUSED*/ 4175 void 4176 hxge_intrs_enable(p_hxge_t hxgep) 4177 { 4178 p_hxge_intr_t intrp; 4179 int i; 4180 int status; 4181 4182 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable")); 4183 4184 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 4185 4186 if (!intrp->intr_registered) { 4187 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: " 4188 "interrupts are not registered")); 4189 return; 4190 } 4191 4192 if (intrp->intr_enabled) { 4193 HXGE_DEBUG_MSG((hxgep, INT_CTL, 4194 "<== hxge_intrs_enable: already enabled")); 4195 return; 4196 } 4197 4198 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4199 status = ddi_intr_block_enable(intrp->htable, 4200 intrp->intr_added); 4201 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable " 4202 "block enable - status 0x%x total inums #%d\n", 4203 status, intrp->intr_added)); 4204 } else { 4205 for (i = 0; i < intrp->intr_added; i++) { 4206 status = ddi_intr_enable(intrp->htable[i]); 4207 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable " 4208 "ddi_intr_enable:enable - status 0x%x " 4209 "total inums %d enable inum #%d\n", 4210 status, intrp->intr_added, i)); 4211 if (status == DDI_SUCCESS) { 4212 intrp->intr_enabled = B_TRUE; 4213 } 4214 } 4215 } 4216 4217 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable")); 4218 } 4219 4220 /*ARGSUSED*/ 4221 static void 4222 hxge_intrs_disable(p_hxge_t hxgep) 4223 { 4224 p_hxge_intr_t intrp; 4225 int i; 4226 4227 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable")); 4228 4229 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type; 4230 4231 if (!intrp->intr_registered) { 4232 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: " 4233 "interrupts are not registered")); 4234 return; 4235 } 4236 4237 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) { 4238 (void) ddi_intr_block_disable(intrp->htable, 4239 intrp->intr_added); 4240 } else { 4241 for (i = 0; i < intrp->intr_added; i++) { 4242 (void) ddi_intr_disable(intrp->htable[i]); 4243 } 4244 } 4245 4246 intrp->intr_enabled = B_FALSE; 4247 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable")); 4248 } 4249 4250 static hxge_status_t 4251 hxge_mac_register(p_hxge_t hxgep) 4252 { 4253 mac_register_t *macp; 4254 int status; 4255 4256 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register")); 4257 4258 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4259 return (HXGE_ERROR); 4260 4261 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4262 macp->m_driver = hxgep; 4263 macp->m_dip = hxgep->dip; 4264 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet; 4265 4266 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 4267 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x", 4268 macp->m_src_addr[0], 4269 macp->m_src_addr[1], 4270 macp->m_src_addr[2], 4271 macp->m_src_addr[3], 4272 macp->m_src_addr[4], 4273 macp->m_src_addr[5])); 4274 4275 macp->m_callbacks = &hxge_m_callbacks; 4276 macp->m_min_sdu = 0; 4277 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE; 4278 macp->m_margin = VLAN_TAGSZ; 4279 macp->m_priv_props = hxge_priv_props; 4280 macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS; 4281 4282 status = mac_register(macp, &hxgep->mach); 4283 mac_free(macp); 4284 4285 if (status != 0) { 4286 cmn_err(CE_WARN, 4287 "hxge_mac_register failed (status %d instance %d)", 4288 status, hxgep->instance); 4289 return (HXGE_ERROR); 4290 } 4291 4292 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success " 4293 "(instance %d)", hxgep->instance)); 4294 4295 return (HXGE_OK); 4296 } 4297 4298 static int 4299 hxge_init_common_dev(p_hxge_t hxgep) 4300 { 4301 p_hxge_hw_list_t hw_p; 4302 dev_info_t *p_dip; 4303 4304 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev")); 4305 4306 p_dip = hxgep->p_dip; 4307 MUTEX_ENTER(&hxge_common_lock); 4308 4309 /* 4310 * Loop through existing per Hydra hardware list. 4311 */ 4312 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) { 4313 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4314 "==> hxge_init_common_dev: hw_p $%p parent dip $%p", 4315 hw_p, p_dip)); 4316 if (hw_p->parent_devp == p_dip) { 4317 hxgep->hxge_hw_p = hw_p; 4318 hw_p->ndevs++; 4319 hw_p->hxge_p = hxgep; 4320 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4321 "==> hxge_init_common_device: " 4322 "hw_p $%p parent dip $%p ndevs %d (found)", 4323 hw_p, p_dip, hw_p->ndevs)); 4324 break; 4325 } 4326 } 4327 4328 if (hw_p == NULL) { 4329 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4330 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip)); 4331 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP); 4332 hw_p->parent_devp = p_dip; 4333 hw_p->magic = HXGE_MAGIC; 4334 hxgep->hxge_hw_p = hw_p; 4335 hw_p->ndevs++; 4336 hw_p->hxge_p = hxgep; 4337 hw_p->next = hxge_hw_list; 4338 4339 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL); 4340 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL); 4341 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL); 4342 4343 hxge_hw_list = hw_p; 4344 } 4345 MUTEX_EXIT(&hxge_common_lock); 4346 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4347 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list)); 4348 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev")); 4349 4350 return (HXGE_OK); 4351 } 4352 4353 static void 4354 hxge_uninit_common_dev(p_hxge_t hxgep) 4355 { 4356 p_hxge_hw_list_t hw_p, h_hw_p; 4357 dev_info_t *p_dip; 4358 4359 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev")); 4360 if (hxgep->hxge_hw_p == NULL) { 4361 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4362 "<== hxge_uninit_common_dev (no common)")); 4363 return; 4364 } 4365 4366 MUTEX_ENTER(&hxge_common_lock); 4367 h_hw_p = hxge_hw_list; 4368 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) { 4369 p_dip = hw_p->parent_devp; 4370 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip && 4371 hxgep->hxge_hw_p->magic == HXGE_MAGIC && 4372 hw_p->magic == HXGE_MAGIC) { 4373 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4374 "==> hxge_uninit_common_dev: " 4375 "hw_p $%p parent dip $%p ndevs %d (found)", 4376 hw_p, p_dip, hw_p->ndevs)); 4377 4378 hxgep->hxge_hw_p = NULL; 4379 if (hw_p->ndevs) { 4380 hw_p->ndevs--; 4381 } 4382 hw_p->hxge_p = NULL; 4383 if (!hw_p->ndevs) { 4384 MUTEX_DESTROY(&hw_p->hxge_vlan_lock); 4385 MUTEX_DESTROY(&hw_p->hxge_tcam_lock); 4386 MUTEX_DESTROY(&hw_p->hxge_cfg_lock); 4387 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4388 "==> hxge_uninit_common_dev: " 4389 "hw_p $%p parent dip $%p ndevs %d (last)", 4390 hw_p, p_dip, hw_p->ndevs)); 4391 4392 if (hw_p == hxge_hw_list) { 4393 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4394 "==> hxge_uninit_common_dev:" 4395 "remove head " 4396 "hw_p $%p parent dip $%p " 4397 "ndevs %d (head)", 4398 hw_p, p_dip, hw_p->ndevs)); 4399 hxge_hw_list = hw_p->next; 4400 } else { 4401 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4402 "==> hxge_uninit_common_dev:" 4403 "remove middle " 4404 "hw_p $%p parent dip $%p " 4405 "ndevs %d (middle)", 4406 hw_p, p_dip, hw_p->ndevs)); 4407 h_hw_p->next = hw_p->next; 4408 } 4409 4410 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t)); 4411 } 4412 break; 4413 } else { 4414 h_hw_p = hw_p; 4415 } 4416 } 4417 4418 MUTEX_EXIT(&hxge_common_lock); 4419 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 4420 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list)); 4421 4422 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev")); 4423 } 4424 4425 static void 4426 hxge_link_poll(void *arg) 4427 { 4428 p_hxge_t hxgep = (p_hxge_t)arg; 4429 hpi_handle_t handle; 4430 cip_link_stat_t link_stat; 4431 hxge_timeout *to = &hxgep->timeout; 4432 4433 handle = HXGE_DEV_HPI_HANDLE(hxgep); 4434 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value); 4435 4436 if (to->report_link_status || 4437 (to->link_status != link_stat.bits.xpcs0_link_up)) { 4438 to->link_status = link_stat.bits.xpcs0_link_up; 4439 to->report_link_status = B_FALSE; 4440 4441 if (link_stat.bits.xpcs0_link_up) { 4442 hxge_link_update(hxgep, LINK_STATE_UP); 4443 } else { 4444 hxge_link_update(hxgep, LINK_STATE_DOWN); 4445 } 4446 } 4447 4448 /* Restart the link status timer to check the link status */ 4449 MUTEX_ENTER(&to->lock); 4450 to->id = timeout(hxge_link_poll, arg, to->ticks); 4451 MUTEX_EXIT(&to->lock); 4452 } 4453 4454 static void 4455 hxge_link_update(p_hxge_t hxgep, link_state_t state) 4456 { 4457 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp; 4458 4459 mac_link_update(hxgep->mach, state); 4460 if (state == LINK_STATE_UP) { 4461 statsp->mac_stats.link_speed = 10000; 4462 statsp->mac_stats.link_duplex = 2; 4463 statsp->mac_stats.link_up = 1; 4464 } else { 4465 statsp->mac_stats.link_speed = 0; 4466 statsp->mac_stats.link_duplex = 0; 4467 statsp->mac_stats.link_up = 0; 4468 } 4469 } 4470