1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac.h> 30 31 /* 32 * This is the string displayed by modinfo, etc. 33 * Make sure you keep the version ID up to date! 34 */ 35 static char bge_ident[] = "Broadcom Gb Ethernet v1.01"; 36 37 /* 38 * Property names 39 */ 40 static char debug_propname[] = "bge-debug-flags"; 41 static char clsize_propname[] = "cache-line-size"; 42 static char latency_propname[] = "latency-timer"; 43 static char localmac_boolname[] = "local-mac-address?"; 44 static char localmac_propname[] = "local-mac-address"; 45 static char macaddr_propname[] = "mac-address"; 46 static char subdev_propname[] = "subsystem-id"; 47 static char subven_propname[] = "subsystem-vendor-id"; 48 static char rxrings_propname[] = "bge-rx-rings"; 49 static char txrings_propname[] = "bge-tx-rings"; 50 static char fm_cap[] = "fm-capable"; 51 static char default_mtu[] = "default_mtu"; 52 53 static int bge_add_intrs(bge_t *, int); 54 static void bge_rem_intrs(bge_t *); 55 56 /* 57 * Describes the chip's DMA engine 58 */ 59 static ddi_dma_attr_t dma_attr = { 60 DMA_ATTR_V0, /* dma_attr version */ 61 0x0000000000000000ull, /* dma_attr_addr_lo */ 62 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 63 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 64 0x0000000000000001ull, /* dma_attr_align */ 65 0x00000FFF, /* dma_attr_burstsizes */ 66 0x00000001, /* dma_attr_minxfer */ 67 0x000000000000FFFFull, /* dma_attr_maxxfer */ 68 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 69 1, /* dma_attr_sgllen */ 70 0x00000001, /* dma_attr_granular */ 71 DDI_DMA_FLAGERR /* dma_attr_flags */ 72 }; 73 74 /* 75 * PIO access attributes for registers 76 */ 77 static ddi_device_acc_attr_t bge_reg_accattr = { 78 DDI_DEVICE_ATTR_V0, 79 DDI_NEVERSWAP_ACC, 80 DDI_STRICTORDER_ACC, 81 DDI_FLAGERR_ACC 82 }; 83 84 /* 85 * DMA access attributes for descriptors: NOT to be byte swapped. 86 */ 87 static ddi_device_acc_attr_t bge_desc_accattr = { 88 DDI_DEVICE_ATTR_V0, 89 DDI_NEVERSWAP_ACC, 90 DDI_STRICTORDER_ACC, 91 DDI_FLAGERR_ACC 92 }; 93 94 /* 95 * DMA access attributes for data: NOT to be byte swapped. 96 */ 97 static ddi_device_acc_attr_t bge_data_accattr = { 98 DDI_DEVICE_ATTR_V0, 99 DDI_NEVERSWAP_ACC, 100 DDI_STRICTORDER_ACC 101 }; 102 103 static int bge_m_start(void *); 104 static void bge_m_stop(void *); 105 static int bge_m_promisc(void *, boolean_t); 106 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 107 static int bge_m_unicst(void *, const uint8_t *); 108 static void bge_m_resources(void *); 109 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 110 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 111 static int bge_unicst_set(void *, const uint8_t *, 112 mac_addr_slot_t); 113 static int bge_m_unicst_add(void *, mac_multi_addr_t *); 114 static int bge_m_unicst_remove(void *, mac_addr_slot_t); 115 static int bge_m_unicst_modify(void *, mac_multi_addr_t *); 116 static int bge_m_unicst_get(void *, mac_multi_addr_t *); 117 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 118 uint_t, const void *); 119 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 120 uint_t, uint_t, void *, uint_t *); 121 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 122 const void *); 123 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 124 uint_t, void *); 125 126 #define BGE_M_CALLBACK_FLAGS\ 127 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 128 129 static mac_callbacks_t bge_m_callbacks = { 130 BGE_M_CALLBACK_FLAGS, 131 bge_m_stat, 132 bge_m_start, 133 bge_m_stop, 134 bge_m_promisc, 135 bge_m_multicst, 136 bge_m_unicst, 137 bge_m_tx, 138 bge_m_resources, 139 bge_m_ioctl, 140 bge_m_getcapab, 141 NULL, 142 NULL, 143 bge_m_setprop, 144 bge_m_getprop 145 }; 146 147 mac_priv_prop_t bge_priv_prop[] = { 148 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 149 {"_adv_pause_cap", MAC_PROP_PERM_RW} 150 }; 151 152 #define BGE_MAX_PRIV_PROPS \ 153 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 154 155 /* 156 * ========== Transmit and receive ring reinitialisation ========== 157 */ 158 159 /* 160 * These <reinit> routines each reset the specified ring to an initial 161 * state, assuming that the corresponding <init> routine has already 162 * been called exactly once. 163 */ 164 165 static void 166 bge_reinit_send_ring(send_ring_t *srp) 167 { 168 bge_queue_t *txbuf_queue; 169 bge_queue_item_t *txbuf_head; 170 sw_txbuf_t *txbuf; 171 sw_sbd_t *ssbdp; 172 uint32_t slot; 173 174 /* 175 * Reinitialise control variables ... 176 */ 177 srp->tx_flow = 0; 178 srp->tx_next = 0; 179 srp->txfill_next = 0; 180 srp->tx_free = srp->desc.nslots; 181 ASSERT(mutex_owned(srp->tc_lock)); 182 srp->tc_next = 0; 183 srp->txpkt_next = 0; 184 srp->tx_block = 0; 185 srp->tx_nobd = 0; 186 srp->tx_nobuf = 0; 187 188 /* 189 * Initialize the tx buffer push queue 190 */ 191 mutex_enter(srp->freetxbuf_lock); 192 mutex_enter(srp->txbuf_lock); 193 txbuf_queue = &srp->freetxbuf_queue; 194 txbuf_queue->head = NULL; 195 txbuf_queue->count = 0; 196 txbuf_queue->lock = srp->freetxbuf_lock; 197 srp->txbuf_push_queue = txbuf_queue; 198 199 /* 200 * Initialize the tx buffer pop queue 201 */ 202 txbuf_queue = &srp->txbuf_queue; 203 txbuf_queue->head = NULL; 204 txbuf_queue->count = 0; 205 txbuf_queue->lock = srp->txbuf_lock; 206 srp->txbuf_pop_queue = txbuf_queue; 207 txbuf_head = srp->txbuf_head; 208 txbuf = srp->txbuf; 209 for (slot = 0; slot < srp->tx_buffers; ++slot) { 210 txbuf_head->item = txbuf; 211 txbuf_head->next = txbuf_queue->head; 212 txbuf_queue->head = txbuf_head; 213 txbuf_queue->count++; 214 txbuf++; 215 txbuf_head++; 216 } 217 mutex_exit(srp->txbuf_lock); 218 mutex_exit(srp->freetxbuf_lock); 219 220 /* 221 * Zero and sync all the h/w Send Buffer Descriptors 222 */ 223 DMA_ZERO(srp->desc); 224 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 225 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 226 ssbdp = srp->sw_sbds; 227 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 228 ssbdp->pbuf = NULL; 229 } 230 231 static void 232 bge_reinit_recv_ring(recv_ring_t *rrp) 233 { 234 /* 235 * Reinitialise control variables ... 236 */ 237 rrp->rx_next = 0; 238 } 239 240 static void 241 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 242 { 243 bge_rbd_t *hw_rbd_p; 244 sw_rbd_t *srbdp; 245 uint32_t bufsize; 246 uint32_t nslots; 247 uint32_t slot; 248 249 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 250 RBD_FLAG_STD_RING, 251 RBD_FLAG_JUMBO_RING, 252 RBD_FLAG_MINI_RING 253 }; 254 255 /* 256 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 257 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 258 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 259 * should be zeroed, and so don't need to be set up specifically 260 * once the whole area has been cleared. 261 */ 262 DMA_ZERO(brp->desc); 263 264 hw_rbd_p = DMA_VPTR(brp->desc); 265 nslots = brp->desc.nslots; 266 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 267 bufsize = brp->buf[0].size; 268 srbdp = brp->sw_rbds; 269 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 270 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 271 hw_rbd_p->index = (uint16_t)slot; 272 hw_rbd_p->len = (uint16_t)bufsize; 273 hw_rbd_p->opaque = srbdp->pbuf.token; 274 hw_rbd_p->flags |= ring_type_flag[ring]; 275 } 276 277 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 278 279 /* 280 * Finally, reinitialise the ring control variables ... 281 */ 282 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 283 } 284 285 /* 286 * Reinitialize all rings 287 */ 288 static void 289 bge_reinit_rings(bge_t *bgep) 290 { 291 uint32_t ring; 292 293 ASSERT(mutex_owned(bgep->genlock)); 294 295 /* 296 * Send Rings ... 297 */ 298 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 299 bge_reinit_send_ring(&bgep->send[ring]); 300 301 /* 302 * Receive Return Rings ... 303 */ 304 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 305 bge_reinit_recv_ring(&bgep->recv[ring]); 306 307 /* 308 * Receive Producer Rings ... 309 */ 310 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 311 bge_reinit_buff_ring(&bgep->buff[ring], ring); 312 } 313 314 /* 315 * ========== Internal state management entry points ========== 316 */ 317 318 #undef BGE_DBG 319 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 320 321 /* 322 * These routines provide all the functionality required by the 323 * corresponding GLD entry points, but don't update the GLD state 324 * so they can be called internally without disturbing our record 325 * of what GLD thinks we should be doing ... 326 */ 327 328 /* 329 * bge_reset() -- reset h/w & rings to initial state 330 */ 331 static int 332 #ifdef BGE_IPMI_ASF 333 bge_reset(bge_t *bgep, uint_t asf_mode) 334 #else 335 bge_reset(bge_t *bgep) 336 #endif 337 { 338 uint32_t ring; 339 int retval; 340 341 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 342 343 ASSERT(mutex_owned(bgep->genlock)); 344 345 /* 346 * Grab all the other mutexes in the world (this should 347 * ensure no other threads are manipulating driver state) 348 */ 349 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 350 mutex_enter(bgep->recv[ring].rx_lock); 351 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 352 mutex_enter(bgep->buff[ring].rf_lock); 353 rw_enter(bgep->errlock, RW_WRITER); 354 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 355 mutex_enter(bgep->send[ring].tx_lock); 356 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 357 mutex_enter(bgep->send[ring].tc_lock); 358 359 #ifdef BGE_IPMI_ASF 360 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 361 #else 362 retval = bge_chip_reset(bgep, B_TRUE); 363 #endif 364 bge_reinit_rings(bgep); 365 366 /* 367 * Free the world ... 368 */ 369 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 370 mutex_exit(bgep->send[ring].tc_lock); 371 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 372 mutex_exit(bgep->send[ring].tx_lock); 373 rw_exit(bgep->errlock); 374 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 375 mutex_exit(bgep->buff[ring].rf_lock); 376 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 377 mutex_exit(bgep->recv[ring].rx_lock); 378 379 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 380 return (retval); 381 } 382 383 /* 384 * bge_stop() -- stop processing, don't reset h/w or rings 385 */ 386 static void 387 bge_stop(bge_t *bgep) 388 { 389 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 390 391 ASSERT(mutex_owned(bgep->genlock)); 392 393 #ifdef BGE_IPMI_ASF 394 if (bgep->asf_enabled) { 395 bgep->asf_pseudostop = B_TRUE; 396 } else { 397 #endif 398 bge_chip_stop(bgep, B_FALSE); 399 #ifdef BGE_IPMI_ASF 400 } 401 #endif 402 403 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 404 } 405 406 /* 407 * bge_start() -- start transmitting/receiving 408 */ 409 static int 410 bge_start(bge_t *bgep, boolean_t reset_phys) 411 { 412 int retval; 413 414 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 415 416 ASSERT(mutex_owned(bgep->genlock)); 417 418 /* 419 * Start chip processing, including enabling interrupts 420 */ 421 retval = bge_chip_start(bgep, reset_phys); 422 423 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 424 return (retval); 425 } 426 427 /* 428 * bge_restart - restart transmitting/receiving after error or suspend 429 */ 430 int 431 bge_restart(bge_t *bgep, boolean_t reset_phys) 432 { 433 int retval = DDI_SUCCESS; 434 ASSERT(mutex_owned(bgep->genlock)); 435 436 #ifdef BGE_IPMI_ASF 437 if (bgep->asf_enabled) { 438 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 439 retval = DDI_FAILURE; 440 } else 441 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 442 retval = DDI_FAILURE; 443 #else 444 if (bge_reset(bgep) != DDI_SUCCESS) 445 retval = DDI_FAILURE; 446 #endif 447 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 448 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 449 retval = DDI_FAILURE; 450 bgep->watchdog = 0; 451 ddi_trigger_softintr(bgep->drain_id); 452 } 453 454 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 455 return (retval); 456 } 457 458 459 /* 460 * ========== Nemo-required management entry points ========== 461 */ 462 463 #undef BGE_DBG 464 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 465 466 /* 467 * bge_m_stop() -- stop transmitting/receiving 468 */ 469 static void 470 bge_m_stop(void *arg) 471 { 472 bge_t *bgep = arg; /* private device info */ 473 send_ring_t *srp; 474 uint32_t ring; 475 476 BGE_TRACE(("bge_m_stop($%p)", arg)); 477 478 /* 479 * Just stop processing, then record new GLD state 480 */ 481 mutex_enter(bgep->genlock); 482 if (!(bgep->progress & PROGRESS_INTR)) { 483 /* can happen during autorecovery */ 484 mutex_exit(bgep->genlock); 485 return; 486 } 487 bge_stop(bgep); 488 489 bgep->link_update_timer = 0; 490 bgep->link_state = LINK_STATE_UNKNOWN; 491 mac_link_update(bgep->mh, bgep->link_state); 492 493 /* 494 * Free the possible tx buffers allocated in tx process. 495 */ 496 #ifdef BGE_IPMI_ASF 497 if (!bgep->asf_pseudostop) 498 #endif 499 { 500 rw_enter(bgep->errlock, RW_WRITER); 501 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 502 srp = &bgep->send[ring]; 503 mutex_enter(srp->tx_lock); 504 if (srp->tx_array > 1) 505 bge_free_txbuf_arrays(srp); 506 mutex_exit(srp->tx_lock); 507 } 508 rw_exit(bgep->errlock); 509 } 510 bgep->bge_mac_state = BGE_MAC_STOPPED; 511 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 512 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 513 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 514 mutex_exit(bgep->genlock); 515 } 516 517 /* 518 * bge_m_start() -- start transmitting/receiving 519 */ 520 static int 521 bge_m_start(void *arg) 522 { 523 bge_t *bgep = arg; /* private device info */ 524 525 BGE_TRACE(("bge_m_start($%p)", arg)); 526 527 /* 528 * Start processing and record new GLD state 529 */ 530 mutex_enter(bgep->genlock); 531 if (!(bgep->progress & PROGRESS_INTR)) { 532 /* can happen during autorecovery */ 533 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 534 mutex_exit(bgep->genlock); 535 return (EIO); 536 } 537 #ifdef BGE_IPMI_ASF 538 if (bgep->asf_enabled) { 539 if ((bgep->asf_status == ASF_STAT_RUN) && 540 (bgep->asf_pseudostop)) { 541 bgep->bge_mac_state = BGE_MAC_STARTED; 542 mutex_exit(bgep->genlock); 543 return (0); 544 } 545 } 546 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 547 #else 548 if (bge_reset(bgep) != DDI_SUCCESS) { 549 #endif 550 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 551 (void) bge_check_acc_handle(bgep, bgep->io_handle); 552 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 553 mutex_exit(bgep->genlock); 554 return (EIO); 555 } 556 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 557 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 558 (void) bge_check_acc_handle(bgep, bgep->io_handle); 559 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 560 mutex_exit(bgep->genlock); 561 return (EIO); 562 } 563 bgep->bge_mac_state = BGE_MAC_STARTED; 564 BGE_DEBUG(("bge_m_start($%p) done", arg)); 565 566 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 567 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 568 mutex_exit(bgep->genlock); 569 return (EIO); 570 } 571 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 572 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 573 mutex_exit(bgep->genlock); 574 return (EIO); 575 } 576 #ifdef BGE_IPMI_ASF 577 if (bgep->asf_enabled) { 578 if (bgep->asf_status != ASF_STAT_RUN) { 579 /* start ASF heart beat */ 580 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 581 (void *)bgep, 582 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 583 bgep->asf_status = ASF_STAT_RUN; 584 } 585 } 586 #endif 587 mutex_exit(bgep->genlock); 588 589 return (0); 590 } 591 592 /* 593 * bge_m_unicst() -- set the physical network address 594 */ 595 static int 596 bge_m_unicst(void *arg, const uint8_t *macaddr) 597 { 598 /* 599 * Request to set address in 600 * address slot 0, i.e., default address 601 */ 602 return (bge_unicst_set(arg, macaddr, 0)); 603 } 604 605 /* 606 * bge_unicst_set() -- set the physical network address 607 */ 608 static int 609 bge_unicst_set(void *arg, const uint8_t *macaddr, mac_addr_slot_t slot) 610 { 611 bge_t *bgep = arg; /* private device info */ 612 613 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 614 ether_sprintf((void *)macaddr))); 615 /* 616 * Remember the new current address in the driver state 617 * Sync the chip's idea of the address too ... 618 */ 619 mutex_enter(bgep->genlock); 620 if (!(bgep->progress & PROGRESS_INTR)) { 621 /* can happen during autorecovery */ 622 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 623 mutex_exit(bgep->genlock); 624 return (EIO); 625 } 626 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 627 #ifdef BGE_IPMI_ASF 628 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 629 #else 630 if (bge_chip_sync(bgep) == DDI_FAILURE) { 631 #endif 632 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 633 (void) bge_check_acc_handle(bgep, bgep->io_handle); 634 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 635 mutex_exit(bgep->genlock); 636 return (EIO); 637 } 638 #ifdef BGE_IPMI_ASF 639 if (bgep->asf_enabled) { 640 /* 641 * The above bge_chip_sync() function wrote the ethernet MAC 642 * addresses registers which destroyed the IPMI/ASF sideband. 643 * Here, we have to reset chip to make IPMI/ASF sideband work. 644 */ 645 if (bgep->asf_status == ASF_STAT_RUN) { 646 /* 647 * We must stop ASF heart beat before bge_chip_stop(), 648 * otherwise some computers (ex. IBM HS20 blade server) 649 * may crash. 650 */ 651 bge_asf_update_status(bgep); 652 bge_asf_stop_timer(bgep); 653 bgep->asf_status = ASF_STAT_STOP; 654 655 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 656 } 657 bge_chip_stop(bgep, B_FALSE); 658 659 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 660 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 661 (void) bge_check_acc_handle(bgep, bgep->io_handle); 662 ddi_fm_service_impact(bgep->devinfo, 663 DDI_SERVICE_DEGRADED); 664 mutex_exit(bgep->genlock); 665 return (EIO); 666 } 667 668 /* 669 * Start our ASF heartbeat counter as soon as possible. 670 */ 671 if (bgep->asf_status != ASF_STAT_RUN) { 672 /* start ASF heart beat */ 673 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 674 (void *)bgep, 675 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 676 bgep->asf_status = ASF_STAT_RUN; 677 } 678 } 679 #endif 680 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 681 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 682 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 683 mutex_exit(bgep->genlock); 684 return (EIO); 685 } 686 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 687 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 688 mutex_exit(bgep->genlock); 689 return (EIO); 690 } 691 mutex_exit(bgep->genlock); 692 693 return (0); 694 } 695 696 /* 697 * The following four routines are used as callbacks for multiple MAC 698 * address support: 699 * - bge_m_unicst_add(void *, mac_multi_addr_t *); 700 * - bge_m_unicst_remove(void *, mac_addr_slot_t); 701 * - bge_m_unicst_modify(void *, mac_multi_addr_t *); 702 * - bge_m_unicst_get(void *, mac_multi_addr_t *); 703 */ 704 705 /* 706 * bge_m_unicst_add() - will find an unused address slot, set the 707 * address value to the one specified, reserve that slot and enable 708 * the NIC to start filtering on the new MAC address. 709 * address slot. Returns 0 on success. 710 */ 711 static int 712 bge_m_unicst_add(void *arg, mac_multi_addr_t *maddr) 713 { 714 bge_t *bgep = arg; /* private device info */ 715 mac_addr_slot_t slot; 716 int err; 717 718 if (mac_unicst_verify(bgep->mh, 719 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 720 return (EINVAL); 721 722 mutex_enter(bgep->genlock); 723 if (bgep->unicst_addr_avail == 0) { 724 /* no slots available */ 725 mutex_exit(bgep->genlock); 726 return (ENOSPC); 727 } 728 729 /* 730 * Primary/default address is in slot 0. The next three 731 * addresses are the multiple MAC addresses. So multiple 732 * MAC address 0 is in slot 1, 1 in slot 2, and so on. 733 * So the first multiple MAC address resides in slot 1. 734 */ 735 for (slot = 1; slot < bgep->unicst_addr_total; slot++) { 736 if (bgep->curr_addr[slot].set == B_FALSE) { 737 bgep->curr_addr[slot].set = B_TRUE; 738 break; 739 } 740 } 741 742 ASSERT(slot < bgep->unicst_addr_total); 743 bgep->unicst_addr_avail--; 744 mutex_exit(bgep->genlock); 745 maddr->mma_slot = slot; 746 747 if ((err = bge_unicst_set(bgep, maddr->mma_addr, slot)) != 0) { 748 mutex_enter(bgep->genlock); 749 bgep->curr_addr[slot].set = B_FALSE; 750 bgep->unicst_addr_avail++; 751 mutex_exit(bgep->genlock); 752 } 753 return (err); 754 } 755 756 /* 757 * bge_m_unicst_remove() - removes a MAC address that was added by a 758 * call to bge_m_unicst_add(). The slot number that was returned in 759 * add() is passed in the call to remove the address. 760 * Returns 0 on success. 761 */ 762 static int 763 bge_m_unicst_remove(void *arg, mac_addr_slot_t slot) 764 { 765 bge_t *bgep = arg; /* private device info */ 766 767 if (slot <= 0 || slot >= bgep->unicst_addr_total) 768 return (EINVAL); 769 770 mutex_enter(bgep->genlock); 771 if (bgep->curr_addr[slot].set == B_TRUE) { 772 bgep->curr_addr[slot].set = B_FALSE; 773 bgep->unicst_addr_avail++; 774 mutex_exit(bgep->genlock); 775 /* 776 * Copy the default address to the passed slot 777 */ 778 return (bge_unicst_set(bgep, bgep->curr_addr[0].addr, slot)); 779 } 780 mutex_exit(bgep->genlock); 781 return (EINVAL); 782 } 783 784 /* 785 * bge_m_unicst_modify() - modifies the value of an address that 786 * has been added by bge_m_unicst_add(). The new address, address 787 * length and the slot number that was returned in the call to add 788 * should be passed to bge_m_unicst_modify(). mma_flags should be 789 * set to 0. Returns 0 on success. 790 */ 791 static int 792 bge_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) 793 { 794 bge_t *bgep = arg; /* private device info */ 795 mac_addr_slot_t slot; 796 797 if (mac_unicst_verify(bgep->mh, 798 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 799 return (EINVAL); 800 801 slot = maddr->mma_slot; 802 803 if (slot <= 0 || slot >= bgep->unicst_addr_total) 804 return (EINVAL); 805 806 mutex_enter(bgep->genlock); 807 if (bgep->curr_addr[slot].set == B_TRUE) { 808 mutex_exit(bgep->genlock); 809 return (bge_unicst_set(bgep, maddr->mma_addr, slot)); 810 } 811 mutex_exit(bgep->genlock); 812 813 return (EINVAL); 814 } 815 816 /* 817 * bge_m_unicst_get() - will get the MAC address and all other 818 * information related to the address slot passed in mac_multi_addr_t. 819 * mma_flags should be set to 0 in the call. 820 * On return, mma_flags can take the following values: 821 * 1) MMAC_SLOT_UNUSED 822 * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR 823 * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR 824 * 4) MMAC_SLOT_USED 825 */ 826 static int 827 bge_m_unicst_get(void *arg, mac_multi_addr_t *maddr) 828 { 829 bge_t *bgep = arg; /* private device info */ 830 mac_addr_slot_t slot; 831 832 slot = maddr->mma_slot; 833 834 if (slot <= 0 || slot >= bgep->unicst_addr_total) 835 return (EINVAL); 836 837 mutex_enter(bgep->genlock); 838 if (bgep->curr_addr[slot].set == B_TRUE) { 839 ethaddr_copy(bgep->curr_addr[slot].addr, 840 maddr->mma_addr); 841 maddr->mma_flags = MMAC_SLOT_USED; 842 } else { 843 maddr->mma_flags = MMAC_SLOT_UNUSED; 844 } 845 mutex_exit(bgep->genlock); 846 847 return (0); 848 } 849 850 extern void bge_wake_factotum(bge_t *); 851 852 static boolean_t 853 bge_param_locked(mac_prop_id_t pr_num) 854 { 855 /* 856 * All adv_* parameters are locked (read-only) while 857 * the device is in any sort of loopback mode ... 858 */ 859 switch (pr_num) { 860 case MAC_PROP_ADV_1000FDX_CAP: 861 case MAC_PROP_EN_1000FDX_CAP: 862 case MAC_PROP_ADV_1000HDX_CAP: 863 case MAC_PROP_EN_1000HDX_CAP: 864 case MAC_PROP_ADV_100FDX_CAP: 865 case MAC_PROP_EN_100FDX_CAP: 866 case MAC_PROP_ADV_100HDX_CAP: 867 case MAC_PROP_EN_100HDX_CAP: 868 case MAC_PROP_ADV_10FDX_CAP: 869 case MAC_PROP_EN_10FDX_CAP: 870 case MAC_PROP_ADV_10HDX_CAP: 871 case MAC_PROP_EN_10HDX_CAP: 872 case MAC_PROP_AUTONEG: 873 case MAC_PROP_FLOWCTRL: 874 return (B_TRUE); 875 } 876 return (B_FALSE); 877 } 878 /* 879 * callback functions for set/get of properties 880 */ 881 static int 882 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 883 uint_t pr_valsize, const void *pr_val) 884 { 885 bge_t *bgep = barg; 886 int err = 0; 887 uint32_t cur_mtu, new_mtu; 888 uint_t maxsdu; 889 link_flowctrl_t fl; 890 891 mutex_enter(bgep->genlock); 892 if (bgep->param_loop_mode != BGE_LOOP_NONE && 893 bge_param_locked(pr_num)) { 894 /* 895 * All adv_* parameters are locked (read-only) 896 * while the device is in any sort of loopback mode. 897 */ 898 mutex_exit(bgep->genlock); 899 return (EBUSY); 900 } 901 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 902 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 903 (pr_num == MAC_PROP_EN_100HDX_CAP) || 904 (pr_num == MAC_PROP_EN_10FDX_CAP) || 905 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 906 /* 907 * these properties are read/write on copper, 908 * read-only and 0 on serdes 909 */ 910 mutex_exit(bgep->genlock); 911 return (ENOTSUP); 912 } 913 if ((DEVICE_5906_SERIES_CHIPSETS(bgep) && 914 (pr_num == MAC_PROP_EN_1000FDX_CAP) || 915 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 916 mutex_exit(bgep->genlock); 917 return (ENOTSUP); 918 } 919 920 switch (pr_num) { 921 case MAC_PROP_EN_1000FDX_CAP: 922 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 923 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 924 goto reprogram; 925 case MAC_PROP_EN_1000HDX_CAP: 926 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 927 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 928 goto reprogram; 929 case MAC_PROP_EN_100FDX_CAP: 930 bgep->param_en_100fdx = *(uint8_t *)pr_val; 931 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 932 goto reprogram; 933 case MAC_PROP_EN_100HDX_CAP: 934 bgep->param_en_100hdx = *(uint8_t *)pr_val; 935 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 936 goto reprogram; 937 case MAC_PROP_EN_10FDX_CAP: 938 bgep->param_en_10fdx = *(uint8_t *)pr_val; 939 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 940 goto reprogram; 941 case MAC_PROP_EN_10HDX_CAP: 942 bgep->param_en_10hdx = *(uint8_t *)pr_val; 943 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 944 reprogram: 945 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 946 err = EINVAL; 947 break; 948 case MAC_PROP_ADV_1000FDX_CAP: 949 case MAC_PROP_ADV_1000HDX_CAP: 950 case MAC_PROP_ADV_100FDX_CAP: 951 case MAC_PROP_ADV_100HDX_CAP: 952 case MAC_PROP_ADV_10FDX_CAP: 953 case MAC_PROP_ADV_10HDX_CAP: 954 case MAC_PROP_STATUS: 955 case MAC_PROP_SPEED: 956 case MAC_PROP_DUPLEX: 957 err = ENOTSUP; /* read-only prop. Can't set this */ 958 break; 959 case MAC_PROP_AUTONEG: 960 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 961 if (bge_reprogram(bgep) == IOC_INVAL) 962 err = EINVAL; 963 break; 964 case MAC_PROP_MTU: 965 cur_mtu = bgep->chipid.default_mtu; 966 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 967 968 if (new_mtu == cur_mtu) { 969 err = 0; 970 break; 971 } 972 if (new_mtu < BGE_DEFAULT_MTU || 973 new_mtu > BGE_MAXIMUM_MTU) { 974 err = EINVAL; 975 break; 976 } 977 if ((new_mtu > BGE_DEFAULT_MTU) && 978 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 979 err = EINVAL; 980 break; 981 } 982 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 983 err = EBUSY; 984 break; 985 } 986 bgep->chipid.default_mtu = new_mtu; 987 if (bge_chip_id_init(bgep)) { 988 err = EINVAL; 989 break; 990 } 991 maxsdu = bgep->chipid.ethmax_size - 992 sizeof (struct ether_header); 993 err = mac_maxsdu_update(bgep->mh, maxsdu); 994 if (err == 0) { 995 bgep->bge_dma_error = B_TRUE; 996 bgep->manual_reset = B_TRUE; 997 bge_chip_stop(bgep, B_TRUE); 998 bge_wake_factotum(bgep); 999 err = 0; 1000 } 1001 break; 1002 case MAC_PROP_FLOWCTRL: 1003 bcopy(pr_val, &fl, sizeof (fl)); 1004 switch (fl) { 1005 default: 1006 err = ENOTSUP; 1007 break; 1008 case LINK_FLOWCTRL_NONE: 1009 bgep->param_adv_pause = 0; 1010 bgep->param_adv_asym_pause = 0; 1011 1012 bgep->param_link_rx_pause = B_FALSE; 1013 bgep->param_link_tx_pause = B_FALSE; 1014 break; 1015 case LINK_FLOWCTRL_RX: 1016 if (!((bgep->param_lp_pause == 0) && 1017 (bgep->param_lp_asym_pause == 1))) { 1018 err = EINVAL; 1019 break; 1020 } 1021 bgep->param_adv_pause = 1; 1022 bgep->param_adv_asym_pause = 1; 1023 1024 bgep->param_link_rx_pause = B_TRUE; 1025 bgep->param_link_tx_pause = B_FALSE; 1026 break; 1027 case LINK_FLOWCTRL_TX: 1028 if (!((bgep->param_lp_pause == 1) && 1029 (bgep->param_lp_asym_pause == 1))) { 1030 err = EINVAL; 1031 break; 1032 } 1033 bgep->param_adv_pause = 0; 1034 bgep->param_adv_asym_pause = 1; 1035 1036 bgep->param_link_rx_pause = B_FALSE; 1037 bgep->param_link_tx_pause = B_TRUE; 1038 break; 1039 case LINK_FLOWCTRL_BI: 1040 if (bgep->param_lp_pause != 1) { 1041 err = EINVAL; 1042 break; 1043 } 1044 bgep->param_adv_pause = 1; 1045 1046 bgep->param_link_rx_pause = B_TRUE; 1047 bgep->param_link_tx_pause = B_TRUE; 1048 break; 1049 } 1050 1051 if (err == 0) { 1052 if (bge_reprogram(bgep) == IOC_INVAL) 1053 err = EINVAL; 1054 } 1055 1056 break; 1057 case MAC_PROP_PRIVATE: 1058 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 1059 pr_val); 1060 break; 1061 default: 1062 err = ENOTSUP; 1063 break; 1064 } 1065 mutex_exit(bgep->genlock); 1066 return (err); 1067 } 1068 1069 /* ARGSUSED */ 1070 static int 1071 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1072 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 1073 { 1074 bge_t *bgep = barg; 1075 int err = 0; 1076 link_flowctrl_t fl; 1077 uint64_t speed; 1078 int flags = bgep->chipid.flags; 1079 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1080 1081 if (pr_valsize == 0) 1082 return (EINVAL); 1083 bzero(pr_val, pr_valsize); 1084 1085 *perm = MAC_PROP_PERM_RW; 1086 1087 mutex_enter(bgep->genlock); 1088 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 1089 bge_param_locked(pr_num)) || 1090 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 1091 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 1092 (pr_num == MAC_PROP_EN_100HDX_CAP) || 1093 (pr_num == MAC_PROP_EN_10FDX_CAP) || 1094 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 1095 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 1096 (pr_num == MAC_PROP_EN_1000FDX_CAP) || 1097 (pr_num == MAC_PROP_EN_1000HDX_CAP))) 1098 *perm = MAC_PROP_PERM_READ; 1099 mutex_exit(bgep->genlock); 1100 1101 switch (pr_num) { 1102 case MAC_PROP_DUPLEX: 1103 *perm = MAC_PROP_PERM_READ; 1104 if (pr_valsize < sizeof (link_duplex_t)) 1105 return (EINVAL); 1106 bcopy(&bgep->param_link_duplex, pr_val, 1107 sizeof (link_duplex_t)); 1108 break; 1109 case MAC_PROP_SPEED: 1110 *perm = MAC_PROP_PERM_READ; 1111 if (pr_valsize < sizeof (speed)) 1112 return (EINVAL); 1113 speed = bgep->param_link_speed * 1000000ull; 1114 bcopy(&speed, pr_val, sizeof (speed)); 1115 break; 1116 case MAC_PROP_STATUS: 1117 *perm = MAC_PROP_PERM_READ; 1118 if (pr_valsize < sizeof (link_state_t)) 1119 return (EINVAL); 1120 bcopy(&bgep->link_state, pr_val, 1121 sizeof (link_state_t)); 1122 break; 1123 case MAC_PROP_AUTONEG: 1124 if (is_default) 1125 *(uint8_t *)pr_val = 1; 1126 else 1127 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 1128 break; 1129 case MAC_PROP_FLOWCTRL: 1130 if (pr_valsize < sizeof (fl)) 1131 return (EINVAL); 1132 if (is_default) { 1133 fl = LINK_FLOWCTRL_BI; 1134 bcopy(&fl, pr_val, sizeof (fl)); 1135 break; 1136 } 1137 1138 if (bgep->param_link_rx_pause && 1139 !bgep->param_link_tx_pause) 1140 fl = LINK_FLOWCTRL_RX; 1141 1142 if (!bgep->param_link_rx_pause && 1143 !bgep->param_link_tx_pause) 1144 fl = LINK_FLOWCTRL_NONE; 1145 1146 if (!bgep->param_link_rx_pause && 1147 bgep->param_link_tx_pause) 1148 fl = LINK_FLOWCTRL_TX; 1149 1150 if (bgep->param_link_rx_pause && 1151 bgep->param_link_tx_pause) 1152 fl = LINK_FLOWCTRL_BI; 1153 bcopy(&fl, pr_val, sizeof (fl)); 1154 break; 1155 case MAC_PROP_ADV_1000FDX_CAP: 1156 *perm = MAC_PROP_PERM_READ; 1157 if (is_default) { 1158 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1159 *(uint8_t *)pr_val = 0; 1160 else 1161 *(uint8_t *)pr_val = 1; 1162 } 1163 else 1164 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 1165 break; 1166 case MAC_PROP_EN_1000FDX_CAP: 1167 if (is_default) { 1168 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1169 *(uint8_t *)pr_val = 0; 1170 else 1171 *(uint8_t *)pr_val = 1; 1172 } 1173 else 1174 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 1175 break; 1176 case MAC_PROP_ADV_1000HDX_CAP: 1177 *perm = MAC_PROP_PERM_READ; 1178 if (is_default) { 1179 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1180 *(uint8_t *)pr_val = 0; 1181 else 1182 *(uint8_t *)pr_val = 1; 1183 } 1184 else 1185 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1186 break; 1187 case MAC_PROP_EN_1000HDX_CAP: 1188 if (is_default) { 1189 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1190 *(uint8_t *)pr_val = 0; 1191 else 1192 *(uint8_t *)pr_val = 1; 1193 } 1194 else 1195 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1196 break; 1197 case MAC_PROP_ADV_100FDX_CAP: 1198 *perm = MAC_PROP_PERM_READ; 1199 if (is_default) { 1200 *(uint8_t *)pr_val = 1201 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1202 } else { 1203 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1204 } 1205 break; 1206 case MAC_PROP_EN_100FDX_CAP: 1207 if (is_default) { 1208 *(uint8_t *)pr_val = 1209 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1210 } else { 1211 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1212 } 1213 break; 1214 case MAC_PROP_ADV_100HDX_CAP: 1215 *perm = MAC_PROP_PERM_READ; 1216 if (is_default) { 1217 *(uint8_t *)pr_val = 1218 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1219 } else { 1220 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1221 } 1222 break; 1223 case MAC_PROP_EN_100HDX_CAP: 1224 if (is_default) { 1225 *(uint8_t *)pr_val = 1226 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1227 } else { 1228 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1229 } 1230 break; 1231 case MAC_PROP_ADV_10FDX_CAP: 1232 *perm = MAC_PROP_PERM_READ; 1233 if (is_default) { 1234 *(uint8_t *)pr_val = 1235 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1236 } else { 1237 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1238 } 1239 break; 1240 case MAC_PROP_EN_10FDX_CAP: 1241 if (is_default) { 1242 *(uint8_t *)pr_val = 1243 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1244 } else { 1245 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1246 } 1247 break; 1248 case MAC_PROP_ADV_10HDX_CAP: 1249 *perm = MAC_PROP_PERM_READ; 1250 if (is_default) { 1251 *(uint8_t *)pr_val = 1252 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1253 } else { 1254 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1255 } 1256 break; 1257 case MAC_PROP_EN_10HDX_CAP: 1258 if (is_default) { 1259 *(uint8_t *)pr_val = 1260 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1261 } else { 1262 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1263 } 1264 break; 1265 case MAC_PROP_ADV_100T4_CAP: 1266 case MAC_PROP_EN_100T4_CAP: 1267 *perm = MAC_PROP_PERM_READ; 1268 *(uint8_t *)pr_val = 0; 1269 break; 1270 case MAC_PROP_PRIVATE: 1271 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1272 pr_valsize, pr_val); 1273 return (err); 1274 default: 1275 return (ENOTSUP); 1276 } 1277 return (0); 1278 } 1279 1280 /* ARGSUSED */ 1281 static int 1282 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1283 const void *pr_val) 1284 { 1285 int err = 0; 1286 long result; 1287 1288 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1289 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1290 if (result > 1 || result < 0) { 1291 err = EINVAL; 1292 } else { 1293 bgep->param_adv_pause = (uint32_t)result; 1294 if (bge_reprogram(bgep) == IOC_INVAL) 1295 err = EINVAL; 1296 } 1297 return (err); 1298 } 1299 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1300 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1301 if (result > 1 || result < 0) { 1302 err = EINVAL; 1303 } else { 1304 bgep->param_adv_asym_pause = (uint32_t)result; 1305 if (bge_reprogram(bgep) == IOC_INVAL) 1306 err = EINVAL; 1307 } 1308 return (err); 1309 } 1310 if (strcmp(pr_name, "_drain_max") == 0) { 1311 1312 /* 1313 * on the Tx side, we need to update the h/w register for 1314 * real packet transmission per packet. The drain_max parameter 1315 * is used to reduce the register access. This parameter 1316 * controls the max number of packets that we will hold before 1317 * updating the bge h/w to trigger h/w transmit. The bge 1318 * chipset usually has a max of 512 Tx descriptors, thus 1319 * the upper bound on drain_max is 512. 1320 */ 1321 if (pr_val == NULL) { 1322 err = EINVAL; 1323 return (err); 1324 } 1325 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1326 if (result > 512 || result < 1) 1327 err = EINVAL; 1328 else { 1329 bgep->param_drain_max = (uint32_t)result; 1330 if (bge_reprogram(bgep) == IOC_INVAL) 1331 err = EINVAL; 1332 } 1333 return (err); 1334 } 1335 if (strcmp(pr_name, "_msi_cnt") == 0) { 1336 1337 if (pr_val == NULL) { 1338 err = EINVAL; 1339 return (err); 1340 } 1341 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1342 if (result > 7 || result < 0) 1343 err = EINVAL; 1344 else { 1345 bgep->param_msi_cnt = (uint32_t)result; 1346 if (bge_reprogram(bgep) == IOC_INVAL) 1347 err = EINVAL; 1348 } 1349 return (err); 1350 } 1351 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1352 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1353 return (EINVAL); 1354 1355 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1356 return (0); 1357 } 1358 1359 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1360 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1361 return (EINVAL); 1362 1363 bgep->chipid.rx_count_norm = (uint32_t)result; 1364 return (0); 1365 } 1366 return (ENOTSUP); 1367 } 1368 1369 static int 1370 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1371 uint_t pr_valsize, void *pr_val) 1372 { 1373 int err = ENOTSUP; 1374 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1375 int value; 1376 1377 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1378 value = (is_default? 1 : bge->param_adv_pause); 1379 err = 0; 1380 goto done; 1381 } 1382 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1383 value = (is_default? 1 : bge->param_adv_asym_pause); 1384 err = 0; 1385 goto done; 1386 } 1387 if (strcmp(pr_name, "_drain_max") == 0) { 1388 value = (is_default? 64 : bge->param_drain_max); 1389 err = 0; 1390 goto done; 1391 } 1392 if (strcmp(pr_name, "_msi_cnt") == 0) { 1393 value = (is_default? 0 : bge->param_msi_cnt); 1394 err = 0; 1395 goto done; 1396 } 1397 1398 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1399 value = (is_default? bge_rx_ticks_norm : 1400 bge->chipid.rx_ticks_norm); 1401 err = 0; 1402 goto done; 1403 } 1404 1405 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1406 value = (is_default? bge_rx_count_norm : 1407 bge->chipid.rx_count_norm); 1408 err = 0; 1409 goto done; 1410 } 1411 1412 done: 1413 if (err == 0) { 1414 (void) snprintf(pr_val, pr_valsize, "%d", value); 1415 } 1416 return (err); 1417 } 1418 1419 /* 1420 * Compute the index of the required bit in the multicast hash map. 1421 * This must mirror the way the hardware actually does it! 1422 * See Broadcom document 570X-PG102-R page 125. 1423 */ 1424 static uint32_t 1425 bge_hash_index(const uint8_t *mca) 1426 { 1427 uint32_t hash; 1428 1429 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1430 1431 return (hash); 1432 } 1433 1434 /* 1435 * bge_m_multicst_add() -- enable/disable a multicast address 1436 */ 1437 static int 1438 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1439 { 1440 bge_t *bgep = arg; /* private device info */ 1441 uint32_t hash; 1442 uint32_t index; 1443 uint32_t word; 1444 uint32_t bit; 1445 uint8_t *refp; 1446 1447 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1448 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1449 1450 /* 1451 * Precalculate all required masks, pointers etc ... 1452 */ 1453 hash = bge_hash_index(mca); 1454 index = hash % BGE_HASH_TABLE_SIZE; 1455 word = index/32u; 1456 bit = 1 << (index % 32u); 1457 refp = &bgep->mcast_refs[index]; 1458 1459 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1460 hash, index, word, bit, *refp)); 1461 1462 /* 1463 * We must set the appropriate bit in the hash map (and the 1464 * corresponding h/w register) when the refcount goes from 0 1465 * to >0, and clear it when the last ref goes away (refcount 1466 * goes from >0 back to 0). If we change the hash map, we 1467 * must also update the chip's hardware map registers. 1468 */ 1469 mutex_enter(bgep->genlock); 1470 if (!(bgep->progress & PROGRESS_INTR)) { 1471 /* can happen during autorecovery */ 1472 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1473 mutex_exit(bgep->genlock); 1474 return (EIO); 1475 } 1476 if (add) { 1477 if ((*refp)++ == 0) { 1478 bgep->mcast_hash[word] |= bit; 1479 #ifdef BGE_IPMI_ASF 1480 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1481 #else 1482 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1483 #endif 1484 (void) bge_check_acc_handle(bgep, 1485 bgep->cfg_handle); 1486 (void) bge_check_acc_handle(bgep, 1487 bgep->io_handle); 1488 ddi_fm_service_impact(bgep->devinfo, 1489 DDI_SERVICE_DEGRADED); 1490 mutex_exit(bgep->genlock); 1491 return (EIO); 1492 } 1493 } 1494 } else { 1495 if (--(*refp) == 0) { 1496 bgep->mcast_hash[word] &= ~bit; 1497 #ifdef BGE_IPMI_ASF 1498 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1499 #else 1500 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1501 #endif 1502 (void) bge_check_acc_handle(bgep, 1503 bgep->cfg_handle); 1504 (void) bge_check_acc_handle(bgep, 1505 bgep->io_handle); 1506 ddi_fm_service_impact(bgep->devinfo, 1507 DDI_SERVICE_DEGRADED); 1508 mutex_exit(bgep->genlock); 1509 return (EIO); 1510 } 1511 } 1512 } 1513 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1514 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1515 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1516 mutex_exit(bgep->genlock); 1517 return (EIO); 1518 } 1519 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1520 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1521 mutex_exit(bgep->genlock); 1522 return (EIO); 1523 } 1524 mutex_exit(bgep->genlock); 1525 1526 return (0); 1527 } 1528 1529 /* 1530 * bge_m_promisc() -- set or reset promiscuous mode on the board 1531 * 1532 * Program the hardware to enable/disable promiscuous and/or 1533 * receive-all-multicast modes. 1534 */ 1535 static int 1536 bge_m_promisc(void *arg, boolean_t on) 1537 { 1538 bge_t *bgep = arg; 1539 1540 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1541 1542 /* 1543 * Store MAC layer specified mode and pass to chip layer to update h/w 1544 */ 1545 mutex_enter(bgep->genlock); 1546 if (!(bgep->progress & PROGRESS_INTR)) { 1547 /* can happen during autorecovery */ 1548 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1549 mutex_exit(bgep->genlock); 1550 return (EIO); 1551 } 1552 bgep->promisc = on; 1553 #ifdef BGE_IPMI_ASF 1554 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1555 #else 1556 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1557 #endif 1558 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1559 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1560 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1561 mutex_exit(bgep->genlock); 1562 return (EIO); 1563 } 1564 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1565 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1566 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1567 mutex_exit(bgep->genlock); 1568 return (EIO); 1569 } 1570 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1571 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1572 mutex_exit(bgep->genlock); 1573 return (EIO); 1574 } 1575 mutex_exit(bgep->genlock); 1576 return (0); 1577 } 1578 1579 /*ARGSUSED*/ 1580 static boolean_t 1581 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1582 { 1583 bge_t *bgep = arg; 1584 1585 switch (cap) { 1586 case MAC_CAPAB_HCKSUM: { 1587 uint32_t *txflags = cap_data; 1588 1589 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1590 break; 1591 } 1592 1593 case MAC_CAPAB_POLL: 1594 /* 1595 * There's nothing for us to fill in, simply returning 1596 * B_TRUE stating that we support polling is sufficient. 1597 */ 1598 break; 1599 1600 case MAC_CAPAB_MULTIADDRESS: { 1601 multiaddress_capab_t *mmacp = cap_data; 1602 1603 mutex_enter(bgep->genlock); 1604 /* 1605 * The number of MAC addresses made available by 1606 * this capability is one less than the total as 1607 * the primary address in slot 0 is counted in 1608 * the total. 1609 */ 1610 mmacp->maddr_naddr = bgep->unicst_addr_total - 1; 1611 mmacp->maddr_naddrfree = bgep->unicst_addr_avail; 1612 /* No multiple factory addresses, set mma_flag to 0 */ 1613 mmacp->maddr_flag = 0; 1614 mmacp->maddr_handle = bgep; 1615 mmacp->maddr_add = bge_m_unicst_add; 1616 mmacp->maddr_remove = bge_m_unicst_remove; 1617 mmacp->maddr_modify = bge_m_unicst_modify; 1618 mmacp->maddr_get = bge_m_unicst_get; 1619 mmacp->maddr_reserve = NULL; 1620 mutex_exit(bgep->genlock); 1621 break; 1622 } 1623 1624 default: 1625 return (B_FALSE); 1626 } 1627 return (B_TRUE); 1628 } 1629 1630 /* 1631 * Loopback ioctl code 1632 */ 1633 1634 static lb_property_t loopmodes[] = { 1635 { normal, "normal", BGE_LOOP_NONE }, 1636 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1637 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1638 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1639 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1640 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1641 }; 1642 1643 static enum ioc_reply 1644 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1645 { 1646 /* 1647 * If the mode isn't being changed, there's nothing to do ... 1648 */ 1649 if (mode == bgep->param_loop_mode) 1650 return (IOC_ACK); 1651 1652 /* 1653 * Validate the requested mode and prepare a suitable message 1654 * to explain the link down/up cycle that the change will 1655 * probably induce ... 1656 */ 1657 switch (mode) { 1658 default: 1659 return (IOC_INVAL); 1660 1661 case BGE_LOOP_NONE: 1662 case BGE_LOOP_EXTERNAL_1000: 1663 case BGE_LOOP_EXTERNAL_100: 1664 case BGE_LOOP_EXTERNAL_10: 1665 case BGE_LOOP_INTERNAL_PHY: 1666 case BGE_LOOP_INTERNAL_MAC: 1667 break; 1668 } 1669 1670 /* 1671 * All OK; tell the caller to reprogram 1672 * the PHY and/or MAC for the new mode ... 1673 */ 1674 bgep->param_loop_mode = mode; 1675 return (IOC_RESTART_ACK); 1676 } 1677 1678 static enum ioc_reply 1679 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1680 { 1681 lb_info_sz_t *lbsp; 1682 lb_property_t *lbpp; 1683 uint32_t *lbmp; 1684 int cmd; 1685 1686 _NOTE(ARGUNUSED(wq)) 1687 1688 /* 1689 * Validate format of ioctl 1690 */ 1691 if (mp->b_cont == NULL) 1692 return (IOC_INVAL); 1693 1694 cmd = iocp->ioc_cmd; 1695 switch (cmd) { 1696 default: 1697 /* NOTREACHED */ 1698 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1699 return (IOC_INVAL); 1700 1701 case LB_GET_INFO_SIZE: 1702 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1703 return (IOC_INVAL); 1704 lbsp = (void *)mp->b_cont->b_rptr; 1705 *lbsp = sizeof (loopmodes); 1706 return (IOC_REPLY); 1707 1708 case LB_GET_INFO: 1709 if (iocp->ioc_count != sizeof (loopmodes)) 1710 return (IOC_INVAL); 1711 lbpp = (void *)mp->b_cont->b_rptr; 1712 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1713 return (IOC_REPLY); 1714 1715 case LB_GET_MODE: 1716 if (iocp->ioc_count != sizeof (uint32_t)) 1717 return (IOC_INVAL); 1718 lbmp = (void *)mp->b_cont->b_rptr; 1719 *lbmp = bgep->param_loop_mode; 1720 return (IOC_REPLY); 1721 1722 case LB_SET_MODE: 1723 if (iocp->ioc_count != sizeof (uint32_t)) 1724 return (IOC_INVAL); 1725 lbmp = (void *)mp->b_cont->b_rptr; 1726 return (bge_set_loop_mode(bgep, *lbmp)); 1727 } 1728 } 1729 1730 /* 1731 * Specific bge IOCTLs, the gld module handles the generic ones. 1732 */ 1733 static void 1734 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1735 { 1736 bge_t *bgep = arg; 1737 struct iocblk *iocp; 1738 enum ioc_reply status; 1739 boolean_t need_privilege; 1740 int err; 1741 int cmd; 1742 1743 /* 1744 * Validate the command before bothering with the mutex ... 1745 */ 1746 iocp = (void *)mp->b_rptr; 1747 iocp->ioc_error = 0; 1748 need_privilege = B_TRUE; 1749 cmd = iocp->ioc_cmd; 1750 switch (cmd) { 1751 default: 1752 miocnak(wq, mp, 0, EINVAL); 1753 return; 1754 1755 case BGE_MII_READ: 1756 case BGE_MII_WRITE: 1757 case BGE_SEE_READ: 1758 case BGE_SEE_WRITE: 1759 case BGE_FLASH_READ: 1760 case BGE_FLASH_WRITE: 1761 case BGE_DIAG: 1762 case BGE_PEEK: 1763 case BGE_POKE: 1764 case BGE_PHY_RESET: 1765 case BGE_SOFT_RESET: 1766 case BGE_HARD_RESET: 1767 break; 1768 1769 case LB_GET_INFO_SIZE: 1770 case LB_GET_INFO: 1771 case LB_GET_MODE: 1772 need_privilege = B_FALSE; 1773 /* FALLTHRU */ 1774 case LB_SET_MODE: 1775 break; 1776 1777 } 1778 1779 if (need_privilege) { 1780 /* 1781 * Check for specific net_config privilege on Solaris 10+. 1782 */ 1783 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1784 if (err != 0) { 1785 miocnak(wq, mp, 0, err); 1786 return; 1787 } 1788 } 1789 1790 mutex_enter(bgep->genlock); 1791 if (!(bgep->progress & PROGRESS_INTR)) { 1792 /* can happen during autorecovery */ 1793 mutex_exit(bgep->genlock); 1794 miocnak(wq, mp, 0, EIO); 1795 return; 1796 } 1797 1798 switch (cmd) { 1799 default: 1800 _NOTE(NOTREACHED) 1801 status = IOC_INVAL; 1802 break; 1803 1804 case BGE_MII_READ: 1805 case BGE_MII_WRITE: 1806 case BGE_SEE_READ: 1807 case BGE_SEE_WRITE: 1808 case BGE_FLASH_READ: 1809 case BGE_FLASH_WRITE: 1810 case BGE_DIAG: 1811 case BGE_PEEK: 1812 case BGE_POKE: 1813 case BGE_PHY_RESET: 1814 case BGE_SOFT_RESET: 1815 case BGE_HARD_RESET: 1816 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1817 break; 1818 1819 case LB_GET_INFO_SIZE: 1820 case LB_GET_INFO: 1821 case LB_GET_MODE: 1822 case LB_SET_MODE: 1823 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1824 break; 1825 1826 } 1827 1828 /* 1829 * Do we need to reprogram the PHY and/or the MAC? 1830 * Do it now, while we still have the mutex. 1831 * 1832 * Note: update the PHY first, 'cos it controls the 1833 * speed/duplex parameters that the MAC code uses. 1834 */ 1835 switch (status) { 1836 case IOC_RESTART_REPLY: 1837 case IOC_RESTART_ACK: 1838 if (bge_reprogram(bgep) == IOC_INVAL) 1839 status = IOC_INVAL; 1840 break; 1841 } 1842 1843 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1844 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1845 status = IOC_INVAL; 1846 } 1847 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1848 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1849 status = IOC_INVAL; 1850 } 1851 mutex_exit(bgep->genlock); 1852 1853 /* 1854 * Finally, decide how to reply 1855 */ 1856 switch (status) { 1857 default: 1858 case IOC_INVAL: 1859 /* 1860 * Error, reply with a NAK and EINVAL or the specified error 1861 */ 1862 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1863 EINVAL : iocp->ioc_error); 1864 break; 1865 1866 case IOC_DONE: 1867 /* 1868 * OK, reply already sent 1869 */ 1870 break; 1871 1872 case IOC_RESTART_ACK: 1873 case IOC_ACK: 1874 /* 1875 * OK, reply with an ACK 1876 */ 1877 miocack(wq, mp, 0, 0); 1878 break; 1879 1880 case IOC_RESTART_REPLY: 1881 case IOC_REPLY: 1882 /* 1883 * OK, send prepared reply as ACK or NAK 1884 */ 1885 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1886 M_IOCACK : M_IOCNAK; 1887 qreply(wq, mp); 1888 break; 1889 } 1890 } 1891 1892 static void 1893 bge_resources_add(bge_t *bgep, time_t time, uint_t pkt_cnt) 1894 { 1895 1896 recv_ring_t *rrp; 1897 mac_rx_fifo_t mrf; 1898 int ring; 1899 1900 /* 1901 * Register Rx rings as resources and save mac 1902 * resource id for future reference 1903 */ 1904 mrf.mrf_type = MAC_RX_FIFO; 1905 mrf.mrf_blank = bge_chip_blank; 1906 mrf.mrf_arg = (void *)bgep; 1907 mrf.mrf_normal_blank_time = time; 1908 mrf.mrf_normal_pkt_count = pkt_cnt; 1909 1910 for (ring = 0; ring < bgep->chipid.rx_rings; ring++) { 1911 rrp = &bgep->recv[ring]; 1912 rrp->handle = mac_resource_add(bgep->mh, 1913 (mac_resource_t *)&mrf); 1914 } 1915 } 1916 1917 static void 1918 bge_m_resources(void *arg) 1919 { 1920 bge_t *bgep = arg; 1921 1922 mutex_enter(bgep->genlock); 1923 1924 bge_resources_add(bgep, bgep->chipid.rx_ticks_norm, 1925 bgep->chipid.rx_count_norm); 1926 mutex_exit(bgep->genlock); 1927 } 1928 1929 /* 1930 * ========== Per-instance setup/teardown code ========== 1931 */ 1932 1933 #undef BGE_DBG 1934 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1935 /* 1936 * Allocate an area of memory and a DMA handle for accessing it 1937 */ 1938 static int 1939 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 1940 uint_t dma_flags, dma_area_t *dma_p) 1941 { 1942 caddr_t va; 1943 int err; 1944 1945 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 1946 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 1947 1948 /* 1949 * Allocate handle 1950 */ 1951 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 1952 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 1953 if (err != DDI_SUCCESS) 1954 return (DDI_FAILURE); 1955 1956 /* 1957 * Allocate memory 1958 */ 1959 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 1960 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 1961 &dma_p->acc_hdl); 1962 if (err != DDI_SUCCESS) 1963 return (DDI_FAILURE); 1964 1965 /* 1966 * Bind the two together 1967 */ 1968 dma_p->mem_va = va; 1969 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1970 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 1971 &dma_p->cookie, &dma_p->ncookies); 1972 1973 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 1974 dma_p->alength, err, dma_p->ncookies)); 1975 1976 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 1977 return (DDI_FAILURE); 1978 1979 dma_p->nslots = ~0U; 1980 dma_p->size = ~0U; 1981 dma_p->token = ~0U; 1982 dma_p->offset = 0; 1983 return (DDI_SUCCESS); 1984 } 1985 1986 /* 1987 * Free one allocated area of DMAable memory 1988 */ 1989 static void 1990 bge_free_dma_mem(dma_area_t *dma_p) 1991 { 1992 if (dma_p->dma_hdl != NULL) { 1993 if (dma_p->ncookies) { 1994 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1995 dma_p->ncookies = 0; 1996 } 1997 ddi_dma_free_handle(&dma_p->dma_hdl); 1998 dma_p->dma_hdl = NULL; 1999 } 2000 2001 if (dma_p->acc_hdl != NULL) { 2002 ddi_dma_mem_free(&dma_p->acc_hdl); 2003 dma_p->acc_hdl = NULL; 2004 } 2005 } 2006 /* 2007 * Utility routine to carve a slice off a chunk of allocated memory, 2008 * updating the chunk descriptor accordingly. The size of the slice 2009 * is given by the product of the <qty> and <size> parameters. 2010 */ 2011 static void 2012 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2013 uint32_t qty, uint32_t size) 2014 { 2015 static uint32_t sequence = 0xbcd5704a; 2016 size_t totsize; 2017 2018 totsize = qty*size; 2019 ASSERT(totsize <= chunk->alength); 2020 2021 *slice = *chunk; 2022 slice->nslots = qty; 2023 slice->size = size; 2024 slice->alength = totsize; 2025 slice->token = ++sequence; 2026 2027 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2028 chunk->alength -= totsize; 2029 chunk->offset += totsize; 2030 chunk->cookie.dmac_laddress += totsize; 2031 chunk->cookie.dmac_size -= totsize; 2032 } 2033 2034 /* 2035 * Initialise the specified Receive Producer (Buffer) Ring, using 2036 * the information in the <dma_area> descriptors that it contains 2037 * to set up all the other fields. This routine should be called 2038 * only once for each ring. 2039 */ 2040 static void 2041 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2042 { 2043 buff_ring_t *brp; 2044 bge_status_t *bsp; 2045 sw_rbd_t *srbdp; 2046 dma_area_t pbuf; 2047 uint32_t bufsize; 2048 uint32_t nslots; 2049 uint32_t slot; 2050 uint32_t split; 2051 2052 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2053 NIC_MEM_SHADOW_BUFF_STD, 2054 NIC_MEM_SHADOW_BUFF_JUMBO, 2055 NIC_MEM_SHADOW_BUFF_MINI 2056 }; 2057 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2058 RECV_STD_PROD_INDEX_REG, 2059 RECV_JUMBO_PROD_INDEX_REG, 2060 RECV_MINI_PROD_INDEX_REG 2061 }; 2062 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2063 STATUS_STD_BUFF_CONS_INDEX, 2064 STATUS_JUMBO_BUFF_CONS_INDEX, 2065 STATUS_MINI_BUFF_CONS_INDEX 2066 }; 2067 2068 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2069 (void *)bgep, ring)); 2070 2071 brp = &bgep->buff[ring]; 2072 nslots = brp->desc.nslots; 2073 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2074 bufsize = brp->buf[0].size; 2075 2076 /* 2077 * Set up the copy of the h/w RCB 2078 * 2079 * Note: unlike Send & Receive Return Rings, (where the max_len 2080 * field holds the number of slots), in a Receive Buffer Ring 2081 * this field indicates the size of each buffer in the ring. 2082 */ 2083 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2084 brp->hw_rcb.max_len = (uint16_t)bufsize; 2085 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2086 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2087 2088 /* 2089 * Other one-off initialisation of per-ring data 2090 */ 2091 brp->bgep = bgep; 2092 bsp = DMA_VPTR(bgep->status_block); 2093 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2094 brp->chip_mbx_reg = mailbox_regs[ring]; 2095 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2096 DDI_INTR_PRI(bgep->intr_pri)); 2097 2098 /* 2099 * Allocate the array of s/w Receive Buffer Descriptors 2100 */ 2101 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2102 brp->sw_rbds = srbdp; 2103 2104 /* 2105 * Now initialise each array element once and for all 2106 */ 2107 for (split = 0; split < BGE_SPLIT; ++split) { 2108 pbuf = brp->buf[split]; 2109 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2110 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2111 ASSERT(pbuf.alength == 0); 2112 } 2113 } 2114 2115 /* 2116 * Clean up initialisation done above before the memory is freed 2117 */ 2118 static void 2119 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2120 { 2121 buff_ring_t *brp; 2122 sw_rbd_t *srbdp; 2123 2124 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2125 (void *)bgep, ring)); 2126 2127 brp = &bgep->buff[ring]; 2128 srbdp = brp->sw_rbds; 2129 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2130 2131 mutex_destroy(brp->rf_lock); 2132 } 2133 2134 /* 2135 * Initialise the specified Receive (Return) Ring, using the 2136 * information in the <dma_area> descriptors that it contains 2137 * to set up all the other fields. This routine should be called 2138 * only once for each ring. 2139 */ 2140 static void 2141 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2142 { 2143 recv_ring_t *rrp; 2144 bge_status_t *bsp; 2145 uint32_t nslots; 2146 2147 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2148 (void *)bgep, ring)); 2149 2150 /* 2151 * The chip architecture requires that receive return rings have 2152 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2153 */ 2154 rrp = &bgep->recv[ring]; 2155 nslots = rrp->desc.nslots; 2156 ASSERT(nslots == 0 || nslots == 512 || 2157 nslots == 1024 || nslots == 2048); 2158 2159 /* 2160 * Set up the copy of the h/w RCB 2161 */ 2162 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2163 rrp->hw_rcb.max_len = (uint16_t)nslots; 2164 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2165 rrp->hw_rcb.nic_ring_addr = 0; 2166 2167 /* 2168 * Other one-off initialisation of per-ring data 2169 */ 2170 rrp->bgep = bgep; 2171 bsp = DMA_VPTR(bgep->status_block); 2172 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2173 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2174 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2175 DDI_INTR_PRI(bgep->intr_pri)); 2176 } 2177 2178 2179 /* 2180 * Clean up initialisation done above before the memory is freed 2181 */ 2182 static void 2183 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2184 { 2185 recv_ring_t *rrp; 2186 2187 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2188 (void *)bgep, ring)); 2189 2190 rrp = &bgep->recv[ring]; 2191 if (rrp->rx_softint) 2192 ddi_remove_softintr(rrp->rx_softint); 2193 mutex_destroy(rrp->rx_lock); 2194 } 2195 2196 /* 2197 * Initialise the specified Send Ring, using the information in the 2198 * <dma_area> descriptors that it contains to set up all the other 2199 * fields. This routine should be called only once for each ring. 2200 */ 2201 static void 2202 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2203 { 2204 send_ring_t *srp; 2205 bge_status_t *bsp; 2206 sw_sbd_t *ssbdp; 2207 dma_area_t desc; 2208 dma_area_t pbuf; 2209 uint32_t nslots; 2210 uint32_t slot; 2211 uint32_t split; 2212 sw_txbuf_t *txbuf; 2213 2214 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2215 (void *)bgep, ring)); 2216 2217 /* 2218 * The chip architecture requires that host-based send rings 2219 * have 512 elements per ring. See 570X-PG102-R page 56. 2220 */ 2221 srp = &bgep->send[ring]; 2222 nslots = srp->desc.nslots; 2223 ASSERT(nslots == 0 || nslots == 512); 2224 2225 /* 2226 * Set up the copy of the h/w RCB 2227 */ 2228 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2229 srp->hw_rcb.max_len = (uint16_t)nslots; 2230 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2231 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2232 2233 /* 2234 * Other one-off initialisation of per-ring data 2235 */ 2236 srp->bgep = bgep; 2237 bsp = DMA_VPTR(bgep->status_block); 2238 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2239 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2240 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2241 DDI_INTR_PRI(bgep->intr_pri)); 2242 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2243 DDI_INTR_PRI(bgep->intr_pri)); 2244 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2245 DDI_INTR_PRI(bgep->intr_pri)); 2246 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2247 DDI_INTR_PRI(bgep->intr_pri)); 2248 if (nslots == 0) 2249 return; 2250 2251 /* 2252 * Allocate the array of s/w Send Buffer Descriptors 2253 */ 2254 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2255 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2256 srp->txbuf_head = 2257 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2258 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2259 srp->sw_sbds = ssbdp; 2260 srp->txbuf = txbuf; 2261 srp->tx_buffers = BGE_SEND_BUF_NUM; 2262 srp->tx_buffers_low = srp->tx_buffers / 4; 2263 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2264 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2265 else 2266 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2267 srp->tx_array = 1; 2268 2269 /* 2270 * Chunk tx desc area 2271 */ 2272 desc = srp->desc; 2273 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2274 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2275 sizeof (bge_sbd_t)); 2276 } 2277 ASSERT(desc.alength == 0); 2278 2279 /* 2280 * Chunk tx buffer area 2281 */ 2282 for (split = 0; split < BGE_SPLIT; ++split) { 2283 pbuf = srp->buf[0][split]; 2284 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2285 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2286 bgep->chipid.snd_buff_size); 2287 txbuf++; 2288 } 2289 ASSERT(pbuf.alength == 0); 2290 } 2291 } 2292 2293 /* 2294 * Clean up initialisation done above before the memory is freed 2295 */ 2296 static void 2297 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2298 { 2299 send_ring_t *srp; 2300 uint32_t array; 2301 uint32_t split; 2302 uint32_t nslots; 2303 2304 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2305 (void *)bgep, ring)); 2306 2307 srp = &bgep->send[ring]; 2308 mutex_destroy(srp->tc_lock); 2309 mutex_destroy(srp->freetxbuf_lock); 2310 mutex_destroy(srp->txbuf_lock); 2311 mutex_destroy(srp->tx_lock); 2312 nslots = srp->desc.nslots; 2313 if (nslots == 0) 2314 return; 2315 2316 for (array = 1; array < srp->tx_array; ++array) 2317 for (split = 0; split < BGE_SPLIT; ++split) 2318 bge_free_dma_mem(&srp->buf[array][split]); 2319 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2320 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2321 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2322 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2323 srp->sw_sbds = NULL; 2324 srp->txbuf_head = NULL; 2325 srp->txbuf = NULL; 2326 srp->pktp = NULL; 2327 } 2328 2329 /* 2330 * Initialise all transmit, receive, and buffer rings. 2331 */ 2332 void 2333 bge_init_rings(bge_t *bgep) 2334 { 2335 uint32_t ring; 2336 2337 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2338 2339 /* 2340 * Perform one-off initialisation of each ring ... 2341 */ 2342 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2343 bge_init_send_ring(bgep, ring); 2344 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2345 bge_init_recv_ring(bgep, ring); 2346 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2347 bge_init_buff_ring(bgep, ring); 2348 } 2349 2350 /* 2351 * Undo the work of bge_init_rings() above before the memory is freed 2352 */ 2353 void 2354 bge_fini_rings(bge_t *bgep) 2355 { 2356 uint32_t ring; 2357 2358 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2359 2360 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2361 bge_fini_buff_ring(bgep, ring); 2362 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2363 bge_fini_recv_ring(bgep, ring); 2364 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2365 bge_fini_send_ring(bgep, ring); 2366 } 2367 2368 /* 2369 * Called from the bge_m_stop() to free the tx buffers which are 2370 * allocated from the tx process. 2371 */ 2372 void 2373 bge_free_txbuf_arrays(send_ring_t *srp) 2374 { 2375 uint32_t array; 2376 uint32_t split; 2377 2378 ASSERT(mutex_owned(srp->tx_lock)); 2379 2380 /* 2381 * Free the extra tx buffer DMA area 2382 */ 2383 for (array = 1; array < srp->tx_array; ++array) 2384 for (split = 0; split < BGE_SPLIT; ++split) 2385 bge_free_dma_mem(&srp->buf[array][split]); 2386 2387 /* 2388 * Restore initial tx buffer numbers 2389 */ 2390 srp->tx_array = 1; 2391 srp->tx_buffers = BGE_SEND_BUF_NUM; 2392 srp->tx_buffers_low = srp->tx_buffers / 4; 2393 srp->tx_flow = 0; 2394 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2395 } 2396 2397 /* 2398 * Called from tx process to allocate more tx buffers 2399 */ 2400 bge_queue_item_t * 2401 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2402 { 2403 bge_queue_t *txbuf_queue; 2404 bge_queue_item_t *txbuf_item_last; 2405 bge_queue_item_t *txbuf_item; 2406 bge_queue_item_t *txbuf_item_rtn; 2407 sw_txbuf_t *txbuf; 2408 dma_area_t area; 2409 size_t txbuffsize; 2410 uint32_t slot; 2411 uint32_t array; 2412 uint32_t split; 2413 uint32_t err; 2414 2415 ASSERT(mutex_owned(srp->tx_lock)); 2416 2417 array = srp->tx_array; 2418 if (array >= srp->tx_array_max) 2419 return (NULL); 2420 2421 /* 2422 * Allocate memory & handles for TX buffers 2423 */ 2424 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2425 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2426 for (split = 0; split < BGE_SPLIT; ++split) { 2427 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2428 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2429 &srp->buf[array][split]); 2430 if (err != DDI_SUCCESS) { 2431 /* Free the last already allocated OK chunks */ 2432 for (slot = 0; slot <= split; ++slot) 2433 bge_free_dma_mem(&srp->buf[array][slot]); 2434 srp->tx_alloc_fail++; 2435 return (NULL); 2436 } 2437 } 2438 2439 /* 2440 * Chunk tx buffer area 2441 */ 2442 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2443 for (split = 0; split < BGE_SPLIT; ++split) { 2444 area = srp->buf[array][split]; 2445 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2446 bge_slice_chunk(&txbuf->buf, &area, 1, 2447 bgep->chipid.snd_buff_size); 2448 txbuf++; 2449 } 2450 } 2451 2452 /* 2453 * Add above buffers to the tx buffer pop queue 2454 */ 2455 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2456 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2457 txbuf_item_last = NULL; 2458 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2459 txbuf_item->item = txbuf; 2460 txbuf_item->next = txbuf_item_last; 2461 txbuf_item_last = txbuf_item; 2462 txbuf++; 2463 txbuf_item++; 2464 } 2465 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2466 txbuf_item_rtn = txbuf_item; 2467 txbuf_item++; 2468 txbuf_queue = srp->txbuf_pop_queue; 2469 mutex_enter(txbuf_queue->lock); 2470 txbuf_item->next = txbuf_queue->head; 2471 txbuf_queue->head = txbuf_item_last; 2472 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2473 mutex_exit(txbuf_queue->lock); 2474 2475 srp->tx_array++; 2476 srp->tx_buffers += BGE_SEND_BUF_NUM; 2477 srp->tx_buffers_low = srp->tx_buffers / 4; 2478 2479 return (txbuf_item_rtn); 2480 } 2481 2482 /* 2483 * This function allocates all the transmit and receive buffers 2484 * and descriptors, in four chunks. 2485 */ 2486 int 2487 bge_alloc_bufs(bge_t *bgep) 2488 { 2489 dma_area_t area; 2490 size_t rxbuffsize; 2491 size_t txbuffsize; 2492 size_t rxbuffdescsize; 2493 size_t rxdescsize; 2494 size_t txdescsize; 2495 uint32_t ring; 2496 uint32_t rx_rings = bgep->chipid.rx_rings; 2497 uint32_t tx_rings = bgep->chipid.tx_rings; 2498 int split; 2499 int err; 2500 2501 BGE_TRACE(("bge_alloc_bufs($%p)", 2502 (void *)bgep)); 2503 2504 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2505 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2506 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2507 2508 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2509 txbuffsize *= tx_rings; 2510 2511 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2512 rxdescsize *= sizeof (bge_rbd_t); 2513 2514 rxbuffdescsize = BGE_STD_SLOTS_USED; 2515 rxbuffdescsize += bgep->chipid.jumbo_slots; 2516 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2517 rxbuffdescsize *= sizeof (bge_rbd_t); 2518 2519 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2520 txdescsize *= sizeof (bge_sbd_t); 2521 txdescsize += sizeof (bge_statistics_t); 2522 txdescsize += sizeof (bge_status_t); 2523 txdescsize += BGE_STATUS_PADDING; 2524 2525 /* 2526 * Enable PCI relaxed ordering only for RX/TX data buffers 2527 */ 2528 if (bge_relaxed_ordering) 2529 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2530 2531 /* 2532 * Allocate memory & handles for RX buffers 2533 */ 2534 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2535 for (split = 0; split < BGE_SPLIT; ++split) { 2536 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2537 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2538 &bgep->rx_buff[split]); 2539 if (err != DDI_SUCCESS) 2540 return (DDI_FAILURE); 2541 } 2542 2543 /* 2544 * Allocate memory & handles for TX buffers 2545 */ 2546 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2547 for (split = 0; split < BGE_SPLIT; ++split) { 2548 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2549 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2550 &bgep->tx_buff[split]); 2551 if (err != DDI_SUCCESS) 2552 return (DDI_FAILURE); 2553 } 2554 2555 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2556 2557 /* 2558 * Allocate memory & handles for receive return rings 2559 */ 2560 ASSERT((rxdescsize % rx_rings) == 0); 2561 for (split = 0; split < rx_rings; ++split) { 2562 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2563 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2564 &bgep->rx_desc[split]); 2565 if (err != DDI_SUCCESS) 2566 return (DDI_FAILURE); 2567 } 2568 2569 /* 2570 * Allocate memory & handles for buffer (producer) descriptor rings 2571 */ 2572 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2573 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2574 if (err != DDI_SUCCESS) 2575 return (DDI_FAILURE); 2576 2577 /* 2578 * Allocate memory & handles for TX descriptor rings, 2579 * status block, and statistics area 2580 */ 2581 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2582 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2583 if (err != DDI_SUCCESS) 2584 return (DDI_FAILURE); 2585 2586 /* 2587 * Now carve up each of the allocated areas ... 2588 */ 2589 for (split = 0; split < BGE_SPLIT; ++split) { 2590 area = bgep->rx_buff[split]; 2591 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2592 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2593 bgep->chipid.std_buf_size); 2594 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2595 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2596 bgep->chipid.recv_jumbo_size); 2597 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2598 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2599 BGE_MINI_BUFF_SIZE); 2600 } 2601 2602 for (split = 0; split < BGE_SPLIT; ++split) { 2603 area = bgep->tx_buff[split]; 2604 for (ring = 0; ring < tx_rings; ++ring) 2605 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2606 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2607 bgep->chipid.snd_buff_size); 2608 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2609 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2610 &area, 0, bgep->chipid.snd_buff_size); 2611 } 2612 2613 for (ring = 0; ring < rx_rings; ++ring) 2614 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2615 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2616 2617 area = bgep->rx_desc[rx_rings]; 2618 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2619 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2620 0, sizeof (bge_rbd_t)); 2621 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2622 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2623 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2624 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2625 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2626 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2627 ASSERT(area.alength == 0); 2628 2629 area = bgep->tx_desc; 2630 for (ring = 0; ring < tx_rings; ++ring) 2631 bge_slice_chunk(&bgep->send[ring].desc, &area, 2632 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2633 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2634 bge_slice_chunk(&bgep->send[ring].desc, &area, 2635 0, sizeof (bge_sbd_t)); 2636 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2637 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2638 ASSERT(area.alength == BGE_STATUS_PADDING); 2639 DMA_ZERO(bgep->status_block); 2640 2641 return (DDI_SUCCESS); 2642 } 2643 2644 /* 2645 * This routine frees the transmit and receive buffers and descriptors. 2646 * Make sure the chip is stopped before calling it! 2647 */ 2648 void 2649 bge_free_bufs(bge_t *bgep) 2650 { 2651 int split; 2652 2653 BGE_TRACE(("bge_free_bufs($%p)", 2654 (void *)bgep)); 2655 2656 bge_free_dma_mem(&bgep->tx_desc); 2657 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2658 bge_free_dma_mem(&bgep->rx_desc[split]); 2659 for (split = 0; split < BGE_SPLIT; ++split) 2660 bge_free_dma_mem(&bgep->tx_buff[split]); 2661 for (split = 0; split < BGE_SPLIT; ++split) 2662 bge_free_dma_mem(&bgep->rx_buff[split]); 2663 } 2664 2665 /* 2666 * Determine (initial) MAC address ("BIA") to use for this interface 2667 */ 2668 2669 static void 2670 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2671 { 2672 struct ether_addr sysaddr; 2673 char propbuf[8]; /* "true" or "false", plus NUL */ 2674 uchar_t *bytes; 2675 int *ints; 2676 uint_t nelts; 2677 int err; 2678 2679 BGE_TRACE(("bge_find_mac_address($%p)", 2680 (void *)bgep)); 2681 2682 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2683 cidp->hw_mac_addr, 2684 ether_sprintf((void *)cidp->vendor_addr.addr), 2685 cidp->vendor_addr.set ? "" : "not ")); 2686 2687 /* 2688 * The "vendor's factory-set address" may already have 2689 * been extracted from the chip, but if the property 2690 * "local-mac-address" is set we use that instead. It 2691 * will normally be set by OBP, but it could also be 2692 * specified in a .conf file(!) 2693 * 2694 * There doesn't seem to be a way to define byte-array 2695 * properties in a .conf, so we check whether it looks 2696 * like an array of 6 ints instead. 2697 * 2698 * Then, we check whether it looks like an array of 6 2699 * bytes (which it should, if OBP set it). If we can't 2700 * make sense of it either way, we'll ignore it. 2701 */ 2702 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2703 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2704 if (err == DDI_PROP_SUCCESS) { 2705 if (nelts == ETHERADDRL) { 2706 while (nelts--) 2707 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2708 cidp->vendor_addr.set = B_TRUE; 2709 } 2710 ddi_prop_free(ints); 2711 } 2712 2713 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2714 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2715 if (err == DDI_PROP_SUCCESS) { 2716 if (nelts == ETHERADDRL) { 2717 while (nelts--) 2718 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2719 cidp->vendor_addr.set = B_TRUE; 2720 } 2721 ddi_prop_free(bytes); 2722 } 2723 2724 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2725 ether_sprintf((void *)cidp->vendor_addr.addr), 2726 cidp->vendor_addr.set ? "" : "not ")); 2727 2728 /* 2729 * Look up the OBP property "local-mac-address?". Note that even 2730 * though its value is a string (which should be "true" or "false"), 2731 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2732 * the buffer first and then fetch the property as an untyped array; 2733 * this may or may not include a final NUL, but since there will 2734 * always be one left at the end of the buffer we can now treat it 2735 * as a string anyway. 2736 */ 2737 nelts = sizeof (propbuf); 2738 bzero(propbuf, nelts--); 2739 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2740 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2741 2742 /* 2743 * Now, if the address still isn't set from the hardware (SEEPROM) 2744 * or the OBP or .conf property, OR if the user has foolishly set 2745 * 'local-mac-address? = false', use "the system address" instead 2746 * (but only if it's non-null i.e. has been set from the IDPROM). 2747 */ 2748 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2749 if (localetheraddr(NULL, &sysaddr) != 0) { 2750 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2751 cidp->vendor_addr.set = B_TRUE; 2752 } 2753 2754 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2755 ether_sprintf((void *)cidp->vendor_addr.addr), 2756 cidp->vendor_addr.set ? "" : "not ")); 2757 2758 /* 2759 * Finally(!), if there's a valid "mac-address" property (created 2760 * if we netbooted from this interface), we must use this instead 2761 * of any of the above to ensure that the NFS/install server doesn't 2762 * get confused by the address changing as Solaris takes over! 2763 */ 2764 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2765 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2766 if (err == DDI_PROP_SUCCESS) { 2767 if (nelts == ETHERADDRL) { 2768 while (nelts--) 2769 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2770 cidp->vendor_addr.set = B_TRUE; 2771 } 2772 ddi_prop_free(bytes); 2773 } 2774 2775 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2776 ether_sprintf((void *)cidp->vendor_addr.addr), 2777 cidp->vendor_addr.set ? "" : "not ")); 2778 } 2779 2780 2781 /*ARGSUSED*/ 2782 int 2783 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2784 { 2785 ddi_fm_error_t de; 2786 2787 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2788 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2789 return (de.fme_status); 2790 } 2791 2792 /*ARGSUSED*/ 2793 int 2794 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2795 { 2796 ddi_fm_error_t de; 2797 2798 ASSERT(bgep->progress & PROGRESS_BUFS); 2799 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2800 return (de.fme_status); 2801 } 2802 2803 /* 2804 * The IO fault service error handling callback function 2805 */ 2806 /*ARGSUSED*/ 2807 static int 2808 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2809 { 2810 /* 2811 * as the driver can always deal with an error in any dma or 2812 * access handle, we can just return the fme_status value. 2813 */ 2814 pci_ereport_post(dip, err, NULL); 2815 return (err->fme_status); 2816 } 2817 2818 static void 2819 bge_fm_init(bge_t *bgep) 2820 { 2821 ddi_iblock_cookie_t iblk; 2822 2823 /* Only register with IO Fault Services if we have some capability */ 2824 if (bgep->fm_capabilities) { 2825 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2826 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2827 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2828 2829 /* Register capabilities with IO Fault Services */ 2830 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2831 2832 /* 2833 * Initialize pci ereport capabilities if ereport capable 2834 */ 2835 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2836 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2837 pci_ereport_setup(bgep->devinfo); 2838 2839 /* 2840 * Register error callback if error callback capable 2841 */ 2842 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2843 ddi_fm_handler_register(bgep->devinfo, 2844 bge_fm_error_cb, (void*) bgep); 2845 } else { 2846 /* 2847 * These fields have to be cleared of FMA if there are no 2848 * FMA capabilities at runtime. 2849 */ 2850 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2851 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2852 dma_attr.dma_attr_flags = 0; 2853 } 2854 } 2855 2856 static void 2857 bge_fm_fini(bge_t *bgep) 2858 { 2859 /* Only unregister FMA capabilities if we registered some */ 2860 if (bgep->fm_capabilities) { 2861 2862 /* 2863 * Release any resources allocated by pci_ereport_setup() 2864 */ 2865 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2866 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2867 pci_ereport_teardown(bgep->devinfo); 2868 2869 /* 2870 * Un-register error callback if error callback capable 2871 */ 2872 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2873 ddi_fm_handler_unregister(bgep->devinfo); 2874 2875 /* Unregister from IO Fault Services */ 2876 ddi_fm_fini(bgep->devinfo); 2877 } 2878 } 2879 2880 static void 2881 #ifdef BGE_IPMI_ASF 2882 bge_unattach(bge_t *bgep, uint_t asf_mode) 2883 #else 2884 bge_unattach(bge_t *bgep) 2885 #endif 2886 { 2887 BGE_TRACE(("bge_unattach($%p)", 2888 (void *)bgep)); 2889 2890 /* 2891 * Flag that no more activity may be initiated 2892 */ 2893 bgep->progress &= ~PROGRESS_READY; 2894 2895 /* 2896 * Quiesce the PHY and MAC (leave it reset but still powered). 2897 * Clean up and free all BGE data structures 2898 */ 2899 if (bgep->periodic_id != NULL) { 2900 ddi_periodic_delete(bgep->periodic_id); 2901 bgep->periodic_id = NULL; 2902 } 2903 if (bgep->progress & PROGRESS_KSTATS) 2904 bge_fini_kstats(bgep); 2905 if (bgep->progress & PROGRESS_PHY) 2906 bge_phys_reset(bgep); 2907 if (bgep->progress & PROGRESS_HWINT) { 2908 mutex_enter(bgep->genlock); 2909 #ifdef BGE_IPMI_ASF 2910 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2911 #else 2912 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2913 #endif 2914 ddi_fm_service_impact(bgep->devinfo, 2915 DDI_SERVICE_UNAFFECTED); 2916 #ifdef BGE_IPMI_ASF 2917 if (bgep->asf_enabled) { 2918 /* 2919 * This register has been overlaid. We restore its 2920 * initial value here. 2921 */ 2922 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2923 BGE_NIC_DATA_SIG); 2924 } 2925 #endif 2926 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2927 ddi_fm_service_impact(bgep->devinfo, 2928 DDI_SERVICE_UNAFFECTED); 2929 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2930 ddi_fm_service_impact(bgep->devinfo, 2931 DDI_SERVICE_UNAFFECTED); 2932 mutex_exit(bgep->genlock); 2933 } 2934 if (bgep->progress & PROGRESS_INTR) { 2935 bge_intr_disable(bgep); 2936 bge_fini_rings(bgep); 2937 } 2938 if (bgep->progress & PROGRESS_HWINT) { 2939 bge_rem_intrs(bgep); 2940 rw_destroy(bgep->errlock); 2941 mutex_destroy(bgep->softintrlock); 2942 mutex_destroy(bgep->genlock); 2943 } 2944 if (bgep->progress & PROGRESS_FACTOTUM) 2945 ddi_remove_softintr(bgep->factotum_id); 2946 if (bgep->progress & PROGRESS_RESCHED) 2947 ddi_remove_softintr(bgep->drain_id); 2948 if (bgep->progress & PROGRESS_BUFS) 2949 bge_free_bufs(bgep); 2950 if (bgep->progress & PROGRESS_REGS) 2951 ddi_regs_map_free(&bgep->io_handle); 2952 if (bgep->progress & PROGRESS_CFG) 2953 pci_config_teardown(&bgep->cfg_handle); 2954 2955 bge_fm_fini(bgep); 2956 2957 ddi_remove_minor_node(bgep->devinfo, NULL); 2958 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 2959 kmem_free(bgep, sizeof (*bgep)); 2960 } 2961 2962 static int 2963 bge_resume(dev_info_t *devinfo) 2964 { 2965 bge_t *bgep; /* Our private data */ 2966 chip_id_t *cidp; 2967 chip_id_t chipid; 2968 2969 bgep = ddi_get_driver_private(devinfo); 2970 if (bgep == NULL) 2971 return (DDI_FAILURE); 2972 2973 /* 2974 * Refuse to resume if the data structures aren't consistent 2975 */ 2976 if (bgep->devinfo != devinfo) 2977 return (DDI_FAILURE); 2978 2979 #ifdef BGE_IPMI_ASF 2980 /* 2981 * Power management hasn't been supported in BGE now. If you 2982 * want to implement it, please add the ASF/IPMI related 2983 * code here. 2984 */ 2985 2986 #endif 2987 2988 /* 2989 * Read chip ID & set up config space command register(s) 2990 * Refuse to resume if the chip has changed its identity! 2991 */ 2992 cidp = &bgep->chipid; 2993 mutex_enter(bgep->genlock); 2994 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 2995 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2996 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2997 mutex_exit(bgep->genlock); 2998 return (DDI_FAILURE); 2999 } 3000 mutex_exit(bgep->genlock); 3001 if (chipid.vendor != cidp->vendor) 3002 return (DDI_FAILURE); 3003 if (chipid.device != cidp->device) 3004 return (DDI_FAILURE); 3005 if (chipid.revision != cidp->revision) 3006 return (DDI_FAILURE); 3007 if (chipid.asic_rev != cidp->asic_rev) 3008 return (DDI_FAILURE); 3009 3010 /* 3011 * All OK, reinitialise h/w & kick off GLD scheduling 3012 */ 3013 mutex_enter(bgep->genlock); 3014 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3015 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3016 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3017 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3018 mutex_exit(bgep->genlock); 3019 return (DDI_FAILURE); 3020 } 3021 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3022 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3023 mutex_exit(bgep->genlock); 3024 return (DDI_FAILURE); 3025 } 3026 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3027 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3028 mutex_exit(bgep->genlock); 3029 return (DDI_FAILURE); 3030 } 3031 mutex_exit(bgep->genlock); 3032 return (DDI_SUCCESS); 3033 } 3034 3035 /* 3036 * attach(9E) -- Attach a device to the system 3037 * 3038 * Called once for each board successfully probed. 3039 */ 3040 static int 3041 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3042 { 3043 bge_t *bgep; /* Our private data */ 3044 mac_register_t *macp; 3045 chip_id_t *cidp; 3046 caddr_t regs; 3047 int instance; 3048 int err; 3049 int intr_types; 3050 #ifdef BGE_IPMI_ASF 3051 uint32_t mhcrValue; 3052 #ifdef __sparc 3053 uint16_t value16; 3054 #endif 3055 #ifdef BGE_NETCONSOLE 3056 int retval; 3057 #endif 3058 #endif 3059 3060 instance = ddi_get_instance(devinfo); 3061 3062 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3063 (void *)devinfo, cmd, instance)); 3064 BGE_BRKPT(NULL, "bge_attach"); 3065 3066 switch (cmd) { 3067 default: 3068 return (DDI_FAILURE); 3069 3070 case DDI_RESUME: 3071 return (bge_resume(devinfo)); 3072 3073 case DDI_ATTACH: 3074 break; 3075 } 3076 3077 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3078 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3079 ddi_set_driver_private(devinfo, bgep); 3080 bgep->bge_guard = BGE_GUARD; 3081 bgep->devinfo = devinfo; 3082 bgep->param_drain_max = 64; 3083 bgep->param_msi_cnt = 0; 3084 bgep->param_loop_mode = 0; 3085 3086 /* 3087 * Initialize more fields in BGE private data 3088 */ 3089 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3090 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3091 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3092 BGE_DRIVER_NAME, instance); 3093 3094 /* 3095 * Initialize for fma support 3096 */ 3097 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3098 DDI_PROP_DONTPASS, fm_cap, 3099 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3100 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3101 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3102 bge_fm_init(bgep); 3103 3104 /* 3105 * Look up the IOMMU's page size for DVMA mappings (must be 3106 * a power of 2) and convert to a mask. This can be used to 3107 * determine whether a message buffer crosses a page boundary. 3108 * Note: in 2s complement binary notation, if X is a power of 3109 * 2, then -X has the representation "11...1100...00". 3110 */ 3111 bgep->pagemask = dvma_pagesize(devinfo); 3112 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3113 bgep->pagemask = -bgep->pagemask; 3114 3115 /* 3116 * Map config space registers 3117 * Read chip ID & set up config space command register(s) 3118 * 3119 * Note: this leaves the chip accessible by Memory Space 3120 * accesses, but with interrupts and Bus Mastering off. 3121 * This should ensure that nothing untoward will happen 3122 * if it has been left active by the (net-)bootloader. 3123 * We'll re-enable Bus Mastering once we've reset the chip, 3124 * and allow interrupts only when everything else is set up. 3125 */ 3126 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3127 #ifdef BGE_IPMI_ASF 3128 #ifdef __sparc 3129 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3130 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3131 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3132 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3133 MHCR_ENABLE_TAGGED_STATUS_MODE | 3134 MHCR_MASK_INTERRUPT_MODE | 3135 MHCR_MASK_PCI_INT_OUTPUT | 3136 MHCR_CLEAR_INTERRUPT_INTA | 3137 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3138 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3139 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3140 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3141 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3142 MEMORY_ARBITER_ENABLE); 3143 #else 3144 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3145 #endif 3146 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3147 bgep->asf_wordswapped = B_TRUE; 3148 } else { 3149 bgep->asf_wordswapped = B_FALSE; 3150 } 3151 bge_asf_get_config(bgep); 3152 #endif 3153 if (err != DDI_SUCCESS) { 3154 bge_problem(bgep, "pci_config_setup() failed"); 3155 goto attach_fail; 3156 } 3157 bgep->progress |= PROGRESS_CFG; 3158 cidp = &bgep->chipid; 3159 bzero(cidp, sizeof (*cidp)); 3160 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3161 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3162 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3163 goto attach_fail; 3164 } 3165 3166 #ifdef BGE_IPMI_ASF 3167 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3168 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3169 bgep->asf_newhandshake = B_TRUE; 3170 } else { 3171 bgep->asf_newhandshake = B_FALSE; 3172 } 3173 #endif 3174 3175 /* 3176 * Update those parts of the chip ID derived from volatile 3177 * registers with the values seen by OBP (in case the chip 3178 * has been reset externally and therefore lost them). 3179 */ 3180 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3181 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3182 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3183 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3184 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3185 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3186 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3187 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3188 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3189 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3190 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3191 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3192 3193 if (bge_jumbo_enable == B_TRUE) { 3194 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3195 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3196 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3197 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3198 cidp->default_mtu = BGE_DEFAULT_MTU; 3199 } 3200 } 3201 /* 3202 * Map operating registers 3203 */ 3204 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3205 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3206 if (err != DDI_SUCCESS) { 3207 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3208 goto attach_fail; 3209 } 3210 bgep->io_regs = regs; 3211 bgep->progress |= PROGRESS_REGS; 3212 3213 /* 3214 * Characterise the device, so we know its requirements. 3215 * Then allocate the appropriate TX and RX descriptors & buffers. 3216 */ 3217 if (bge_chip_id_init(bgep) == EIO) { 3218 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3219 goto attach_fail; 3220 } 3221 3222 3223 err = bge_alloc_bufs(bgep); 3224 if (err != DDI_SUCCESS) { 3225 bge_problem(bgep, "DMA buffer allocation failed"); 3226 goto attach_fail; 3227 } 3228 bgep->progress |= PROGRESS_BUFS; 3229 3230 /* 3231 * Add the softint handlers: 3232 * 3233 * Both of these handlers are used to avoid restrictions on the 3234 * context and/or mutexes required for some operations. In 3235 * particular, the hardware interrupt handler and its subfunctions 3236 * can detect a number of conditions that we don't want to handle 3237 * in that context or with that set of mutexes held. So, these 3238 * softints are triggered instead: 3239 * 3240 * the <resched> softint is triggered if we have previously 3241 * had to refuse to send a packet because of resource shortage 3242 * (we've run out of transmit buffers), but the send completion 3243 * interrupt handler has now detected that more buffers have 3244 * become available. 3245 * 3246 * the <factotum> is triggered if the h/w interrupt handler 3247 * sees the <link state changed> or <error> bits in the status 3248 * block. It's also triggered periodically to poll the link 3249 * state, just in case we aren't getting link status change 3250 * interrupts ... 3251 */ 3252 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3253 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3254 if (err != DDI_SUCCESS) { 3255 bge_problem(bgep, "ddi_add_softintr() failed"); 3256 goto attach_fail; 3257 } 3258 bgep->progress |= PROGRESS_RESCHED; 3259 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3260 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3261 if (err != DDI_SUCCESS) { 3262 bge_problem(bgep, "ddi_add_softintr() failed"); 3263 goto attach_fail; 3264 } 3265 bgep->progress |= PROGRESS_FACTOTUM; 3266 3267 /* Get supported interrupt types */ 3268 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3269 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3270 3271 goto attach_fail; 3272 } 3273 3274 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3275 bgep->ifname, intr_types)); 3276 3277 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3278 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3279 bge_error(bgep, "MSI registration failed, " 3280 "trying FIXED interrupt type\n"); 3281 } else { 3282 BGE_DEBUG(("%s: Using MSI interrupt type", 3283 bgep->ifname)); 3284 bgep->intr_type = DDI_INTR_TYPE_MSI; 3285 bgep->progress |= PROGRESS_HWINT; 3286 } 3287 } 3288 3289 if (!(bgep->progress & PROGRESS_HWINT) && 3290 (intr_types & DDI_INTR_TYPE_FIXED)) { 3291 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3292 bge_error(bgep, "FIXED interrupt " 3293 "registration failed\n"); 3294 goto attach_fail; 3295 } 3296 3297 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3298 3299 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3300 bgep->progress |= PROGRESS_HWINT; 3301 } 3302 3303 if (!(bgep->progress & PROGRESS_HWINT)) { 3304 bge_error(bgep, "No interrupts registered\n"); 3305 goto attach_fail; 3306 } 3307 3308 /* 3309 * Note that interrupts are not enabled yet as 3310 * mutex locks are not initialized. Initialize mutex locks. 3311 */ 3312 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3313 DDI_INTR_PRI(bgep->intr_pri)); 3314 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3315 DDI_INTR_PRI(bgep->intr_pri)); 3316 rw_init(bgep->errlock, NULL, RW_DRIVER, 3317 DDI_INTR_PRI(bgep->intr_pri)); 3318 3319 /* 3320 * Initialize rings. 3321 */ 3322 bge_init_rings(bgep); 3323 3324 /* 3325 * Now that mutex locks are initialized, enable interrupts. 3326 */ 3327 bge_intr_enable(bgep); 3328 bgep->progress |= PROGRESS_INTR; 3329 3330 /* 3331 * Initialise link state variables 3332 * Stop, reset & reinitialise the chip. 3333 * Initialise the (internal) PHY. 3334 */ 3335 bgep->link_state = LINK_STATE_UNKNOWN; 3336 3337 mutex_enter(bgep->genlock); 3338 3339 /* 3340 * Reset chip & rings to initial state; also reset address 3341 * filtering, promiscuity, loopback mode. 3342 */ 3343 #ifdef BGE_IPMI_ASF 3344 #ifdef BGE_NETCONSOLE 3345 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3346 #else 3347 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3348 #endif 3349 #else 3350 if (bge_reset(bgep) != DDI_SUCCESS) { 3351 #endif 3352 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3353 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3354 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3355 mutex_exit(bgep->genlock); 3356 goto attach_fail; 3357 } 3358 3359 #ifdef BGE_IPMI_ASF 3360 if (bgep->asf_enabled) { 3361 bgep->asf_status = ASF_STAT_RUN_INIT; 3362 } 3363 #endif 3364 3365 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3366 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3367 bgep->promisc = B_FALSE; 3368 bgep->param_loop_mode = BGE_LOOP_NONE; 3369 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3370 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3371 mutex_exit(bgep->genlock); 3372 goto attach_fail; 3373 } 3374 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3375 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3376 mutex_exit(bgep->genlock); 3377 goto attach_fail; 3378 } 3379 3380 mutex_exit(bgep->genlock); 3381 3382 if (bge_phys_init(bgep) == EIO) { 3383 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3384 goto attach_fail; 3385 } 3386 bgep->progress |= PROGRESS_PHY; 3387 3388 /* 3389 * initialize NDD-tweakable parameters 3390 */ 3391 if (bge_nd_init(bgep)) { 3392 bge_problem(bgep, "bge_nd_init() failed"); 3393 goto attach_fail; 3394 } 3395 bgep->progress |= PROGRESS_NDD; 3396 3397 /* 3398 * Create & initialise named kstats 3399 */ 3400 bge_init_kstats(bgep, instance); 3401 bgep->progress |= PROGRESS_KSTATS; 3402 3403 /* 3404 * Determine whether to override the chip's own MAC address 3405 */ 3406 bge_find_mac_address(bgep, cidp); 3407 ethaddr_copy(cidp->vendor_addr.addr, bgep->curr_addr[0].addr); 3408 bgep->curr_addr[0].set = B_TRUE; 3409 3410 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3411 /* 3412 * Address available is one less than MAX 3413 * as primary address is not advertised 3414 * as a multiple MAC address. 3415 */ 3416 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX - 1; 3417 3418 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3419 goto attach_fail; 3420 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3421 macp->m_driver = bgep; 3422 macp->m_dip = devinfo; 3423 macp->m_src_addr = bgep->curr_addr[0].addr; 3424 macp->m_callbacks = &bge_m_callbacks; 3425 macp->m_min_sdu = 0; 3426 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3427 macp->m_margin = VLAN_TAGSZ; 3428 macp->m_priv_props = bge_priv_prop; 3429 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3430 3431 /* 3432 * Finally, we're ready to register ourselves with the MAC layer 3433 * interface; if this succeeds, we're all ready to start() 3434 */ 3435 err = mac_register(macp, &bgep->mh); 3436 mac_free(macp); 3437 if (err != 0) 3438 goto attach_fail; 3439 3440 /* 3441 * Register a periodical handler. 3442 * bge_chip_cyclic() is invoked in kernel context. 3443 */ 3444 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3445 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3446 3447 bgep->progress |= PROGRESS_READY; 3448 ASSERT(bgep->bge_guard == BGE_GUARD); 3449 #ifdef BGE_IPMI_ASF 3450 #ifdef BGE_NETCONSOLE 3451 if (bgep->asf_enabled) { 3452 mutex_enter(bgep->genlock); 3453 retval = bge_chip_start(bgep, B_TRUE); 3454 mutex_exit(bgep->genlock); 3455 if (retval != DDI_SUCCESS) 3456 goto attach_fail; 3457 } 3458 #endif 3459 #endif 3460 3461 ddi_report_dev(devinfo); 3462 return (DDI_SUCCESS); 3463 3464 attach_fail: 3465 #ifdef BGE_IPMI_ASF 3466 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3467 #else 3468 bge_unattach(bgep); 3469 #endif 3470 return (DDI_FAILURE); 3471 } 3472 3473 /* 3474 * bge_suspend() -- suspend transmit/receive for powerdown 3475 */ 3476 static int 3477 bge_suspend(bge_t *bgep) 3478 { 3479 /* 3480 * Stop processing and idle (powerdown) the PHY ... 3481 */ 3482 mutex_enter(bgep->genlock); 3483 #ifdef BGE_IPMI_ASF 3484 /* 3485 * Power management hasn't been supported in BGE now. If you 3486 * want to implement it, please add the ASF/IPMI related 3487 * code here. 3488 */ 3489 #endif 3490 bge_stop(bgep); 3491 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3492 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3493 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3494 mutex_exit(bgep->genlock); 3495 return (DDI_FAILURE); 3496 } 3497 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3498 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3499 mutex_exit(bgep->genlock); 3500 return (DDI_FAILURE); 3501 } 3502 mutex_exit(bgep->genlock); 3503 3504 return (DDI_SUCCESS); 3505 } 3506 3507 /* 3508 * quiesce(9E) entry point. 3509 * 3510 * This function is called when the system is single-threaded at high 3511 * PIL with preemption disabled. Therefore, this function must not be 3512 * blocked. 3513 * 3514 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3515 * DDI_FAILURE indicates an error condition and should almost never happen. 3516 */ 3517 #ifdef __sparc 3518 #define bge_quiesce ddi_quiesce_not_supported 3519 #else 3520 static int 3521 bge_quiesce(dev_info_t *devinfo) 3522 { 3523 bge_t *bgep = ddi_get_driver_private(devinfo); 3524 3525 if (bgep == NULL) 3526 return (DDI_FAILURE); 3527 3528 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3529 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3530 MHCR_MASK_PCI_INT_OUTPUT); 3531 } else { 3532 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3533 } 3534 3535 /* Stop the chip */ 3536 bge_chip_stop_nonblocking(bgep); 3537 3538 return (DDI_SUCCESS); 3539 } 3540 #endif 3541 3542 /* 3543 * detach(9E) -- Detach a device from the system 3544 */ 3545 static int 3546 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3547 { 3548 bge_t *bgep; 3549 #ifdef BGE_IPMI_ASF 3550 uint_t asf_mode; 3551 asf_mode = ASF_MODE_NONE; 3552 #endif 3553 3554 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3555 3556 bgep = ddi_get_driver_private(devinfo); 3557 3558 switch (cmd) { 3559 default: 3560 return (DDI_FAILURE); 3561 3562 case DDI_SUSPEND: 3563 return (bge_suspend(bgep)); 3564 3565 case DDI_DETACH: 3566 break; 3567 } 3568 3569 #ifdef BGE_IPMI_ASF 3570 mutex_enter(bgep->genlock); 3571 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3572 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3573 3574 bge_asf_update_status(bgep); 3575 if (bgep->asf_status == ASF_STAT_RUN) { 3576 bge_asf_stop_timer(bgep); 3577 } 3578 bgep->asf_status = ASF_STAT_STOP; 3579 3580 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3581 3582 if (bgep->asf_pseudostop) { 3583 bge_chip_stop(bgep, B_FALSE); 3584 bgep->bge_mac_state = BGE_MAC_STOPPED; 3585 bgep->asf_pseudostop = B_FALSE; 3586 } 3587 3588 asf_mode = ASF_MODE_POST_SHUTDOWN; 3589 3590 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3591 ddi_fm_service_impact(bgep->devinfo, 3592 DDI_SERVICE_UNAFFECTED); 3593 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3594 ddi_fm_service_impact(bgep->devinfo, 3595 DDI_SERVICE_UNAFFECTED); 3596 } 3597 mutex_exit(bgep->genlock); 3598 #endif 3599 3600 /* 3601 * Unregister from the GLD subsystem. This can fail, in 3602 * particular if there are DLPI style-2 streams still open - 3603 * in which case we just return failure without shutting 3604 * down chip operations. 3605 */ 3606 if (mac_unregister(bgep->mh) != 0) 3607 return (DDI_FAILURE); 3608 3609 /* 3610 * All activity stopped, so we can clean up & exit 3611 */ 3612 #ifdef BGE_IPMI_ASF 3613 bge_unattach(bgep, asf_mode); 3614 #else 3615 bge_unattach(bgep); 3616 #endif 3617 return (DDI_SUCCESS); 3618 } 3619 3620 3621 /* 3622 * ========== Module Loading Data & Entry Points ========== 3623 */ 3624 3625 #undef BGE_DBG 3626 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3627 3628 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3629 nulldev, /* identify */ 3630 nulldev, /* probe */ 3631 bge_attach, /* attach */ 3632 bge_detach, /* detach */ 3633 nodev, /* reset */ 3634 NULL, /* cb_ops */ 3635 D_MP, /* bus_ops */ 3636 NULL, /* power */ 3637 bge_quiesce /* quiesce */ 3638 ); 3639 3640 static struct modldrv bge_modldrv = { 3641 &mod_driverops, /* Type of module. This one is a driver */ 3642 bge_ident, /* short description */ 3643 &bge_dev_ops /* driver specific ops */ 3644 }; 3645 3646 static struct modlinkage modlinkage = { 3647 MODREV_1, (void *)&bge_modldrv, NULL 3648 }; 3649 3650 3651 int 3652 _info(struct modinfo *modinfop) 3653 { 3654 return (mod_info(&modlinkage, modinfop)); 3655 } 3656 3657 int 3658 _init(void) 3659 { 3660 int status; 3661 3662 mac_init_ops(&bge_dev_ops, "bge"); 3663 status = mod_install(&modlinkage); 3664 if (status == DDI_SUCCESS) 3665 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3666 else 3667 mac_fini_ops(&bge_dev_ops); 3668 return (status); 3669 } 3670 3671 int 3672 _fini(void) 3673 { 3674 int status; 3675 3676 status = mod_remove(&modlinkage); 3677 if (status == DDI_SUCCESS) { 3678 mac_fini_ops(&bge_dev_ops); 3679 mutex_destroy(bge_log_mutex); 3680 } 3681 return (status); 3682 } 3683 3684 3685 /* 3686 * bge_add_intrs: 3687 * 3688 * Register FIXED or MSI interrupts. 3689 */ 3690 static int 3691 bge_add_intrs(bge_t *bgep, int intr_type) 3692 { 3693 dev_info_t *dip = bgep->devinfo; 3694 int avail, actual, intr_size, count = 0; 3695 int i, flag, ret; 3696 3697 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3698 3699 /* Get number of interrupts */ 3700 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3701 if ((ret != DDI_SUCCESS) || (count == 0)) { 3702 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3703 "count: %d", ret, count); 3704 3705 return (DDI_FAILURE); 3706 } 3707 3708 /* Get number of available interrupts */ 3709 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3710 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3711 bge_error(bgep, "ddi_intr_get_navail() failure, " 3712 "ret: %d, avail: %d\n", ret, avail); 3713 3714 return (DDI_FAILURE); 3715 } 3716 3717 if (avail < count) { 3718 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3719 bgep->ifname, count, avail)); 3720 } 3721 3722 /* 3723 * BGE hardware generates only single MSI even though it claims 3724 * to support multiple MSIs. So, hard code MSI count value to 1. 3725 */ 3726 if (intr_type == DDI_INTR_TYPE_MSI) { 3727 count = 1; 3728 flag = DDI_INTR_ALLOC_STRICT; 3729 } else { 3730 flag = DDI_INTR_ALLOC_NORMAL; 3731 } 3732 3733 /* Allocate an array of interrupt handles */ 3734 intr_size = count * sizeof (ddi_intr_handle_t); 3735 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3736 3737 /* Call ddi_intr_alloc() */ 3738 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3739 count, &actual, flag); 3740 3741 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3742 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3743 3744 kmem_free(bgep->htable, intr_size); 3745 return (DDI_FAILURE); 3746 } 3747 3748 if (actual < count) { 3749 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3750 bgep->ifname, count, actual)); 3751 } 3752 3753 bgep->intr_cnt = actual; 3754 3755 /* 3756 * Get priority for first msi, assume remaining are all the same 3757 */ 3758 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3759 DDI_SUCCESS) { 3760 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3761 3762 /* Free already allocated intr */ 3763 for (i = 0; i < actual; i++) { 3764 (void) ddi_intr_free(bgep->htable[i]); 3765 } 3766 3767 kmem_free(bgep->htable, intr_size); 3768 return (DDI_FAILURE); 3769 } 3770 3771 /* Call ddi_intr_add_handler() */ 3772 for (i = 0; i < actual; i++) { 3773 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3774 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3775 bge_error(bgep, "ddi_intr_add_handler() " 3776 "failed %d\n", ret); 3777 3778 /* Free already allocated intr */ 3779 for (i = 0; i < actual; i++) { 3780 (void) ddi_intr_free(bgep->htable[i]); 3781 } 3782 3783 kmem_free(bgep->htable, intr_size); 3784 return (DDI_FAILURE); 3785 } 3786 } 3787 3788 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3789 != DDI_SUCCESS) { 3790 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3791 3792 for (i = 0; i < actual; i++) { 3793 (void) ddi_intr_remove_handler(bgep->htable[i]); 3794 (void) ddi_intr_free(bgep->htable[i]); 3795 } 3796 3797 kmem_free(bgep->htable, intr_size); 3798 return (DDI_FAILURE); 3799 } 3800 3801 return (DDI_SUCCESS); 3802 } 3803 3804 /* 3805 * bge_rem_intrs: 3806 * 3807 * Unregister FIXED or MSI interrupts 3808 */ 3809 static void 3810 bge_rem_intrs(bge_t *bgep) 3811 { 3812 int i; 3813 3814 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3815 3816 /* Call ddi_intr_remove_handler() */ 3817 for (i = 0; i < bgep->intr_cnt; i++) { 3818 (void) ddi_intr_remove_handler(bgep->htable[i]); 3819 (void) ddi_intr_free(bgep->htable[i]); 3820 } 3821 3822 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3823 } 3824 3825 3826 void 3827 bge_intr_enable(bge_t *bgep) 3828 { 3829 int i; 3830 3831 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3832 /* Call ddi_intr_block_enable() for MSI interrupts */ 3833 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3834 } else { 3835 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3836 for (i = 0; i < bgep->intr_cnt; i++) { 3837 (void) ddi_intr_enable(bgep->htable[i]); 3838 } 3839 } 3840 } 3841 3842 3843 void 3844 bge_intr_disable(bge_t *bgep) 3845 { 3846 int i; 3847 3848 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3849 /* Call ddi_intr_block_disable() */ 3850 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3851 } else { 3852 for (i = 0; i < bgep->intr_cnt; i++) { 3853 (void) ddi_intr_disable(bgep->htable[i]); 3854 } 3855 } 3856 } 3857 3858 int 3859 bge_reprogram(bge_t *bgep) 3860 { 3861 int status = 0; 3862 3863 ASSERT(mutex_owned(bgep->genlock)); 3864 3865 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3866 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3867 status = IOC_INVAL; 3868 } 3869 #ifdef BGE_IPMI_ASF 3870 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3871 #else 3872 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3873 #endif 3874 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3875 status = IOC_INVAL; 3876 } 3877 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3878 bge_chip_msi_trig(bgep); 3879 return (status); 3880 } 3881