1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "bge_impl.h" 30 #include <sys/sdt.h> 31 #include <sys/dld.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 * Make sure you keep the version ID up to date! 36 */ 37 static char bge_ident[] = "Broadcom Gb Ethernet v0.60"; 38 39 /* 40 * Property names 41 */ 42 static char debug_propname[] = "bge-debug-flags"; 43 static char clsize_propname[] = "cache-line-size"; 44 static char latency_propname[] = "latency-timer"; 45 static char localmac_boolname[] = "local-mac-address?"; 46 static char localmac_propname[] = "local-mac-address"; 47 static char macaddr_propname[] = "mac-address"; 48 static char subdev_propname[] = "subsystem-id"; 49 static char subven_propname[] = "subsystem-vendor-id"; 50 static char rxrings_propname[] = "bge-rx-rings"; 51 static char txrings_propname[] = "bge-tx-rings"; 52 static char fm_cap[] = "fm-capable"; 53 static char default_mtu[] = "default_mtu"; 54 55 static int bge_add_intrs(bge_t *, int); 56 static void bge_rem_intrs(bge_t *); 57 58 /* 59 * Describes the chip's DMA engine 60 */ 61 static ddi_dma_attr_t dma_attr = { 62 DMA_ATTR_V0, /* dma_attr version */ 63 0x0000000000000000ull, /* dma_attr_addr_lo */ 64 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 65 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 66 0x0000000000000001ull, /* dma_attr_align */ 67 0x00000FFF, /* dma_attr_burstsizes */ 68 0x00000001, /* dma_attr_minxfer */ 69 0x000000000000FFFFull, /* dma_attr_maxxfer */ 70 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 71 1, /* dma_attr_sgllen */ 72 0x00000001, /* dma_attr_granular */ 73 DDI_DMA_FLAGERR /* dma_attr_flags */ 74 }; 75 76 /* 77 * PIO access attributes for registers 78 */ 79 static ddi_device_acc_attr_t bge_reg_accattr = { 80 DDI_DEVICE_ATTR_V0, 81 DDI_NEVERSWAP_ACC, 82 DDI_STRICTORDER_ACC, 83 DDI_FLAGERR_ACC 84 }; 85 86 /* 87 * DMA access attributes for descriptors: NOT to be byte swapped. 88 */ 89 static ddi_device_acc_attr_t bge_desc_accattr = { 90 DDI_DEVICE_ATTR_V0, 91 DDI_NEVERSWAP_ACC, 92 DDI_STRICTORDER_ACC, 93 DDI_FLAGERR_ACC 94 }; 95 96 /* 97 * DMA access attributes for data: NOT to be byte swapped. 98 */ 99 static ddi_device_acc_attr_t bge_data_accattr = { 100 DDI_DEVICE_ATTR_V0, 101 DDI_NEVERSWAP_ACC, 102 DDI_STRICTORDER_ACC 103 }; 104 105 /* 106 * Versions of the O/S up to Solaris 8 didn't support network booting 107 * from any network interface except the first (NET0). Patching this 108 * flag to a non-zero value will tell the driver to work around this 109 * limitation by creating an extra (internal) pathname node. To do 110 * this, just add a line like the following to the CLIENT'S etc/system 111 * file ON THE ROOT FILESYSTEM SERVER before booting the client: 112 * 113 * set bge:bge_net1_boot_support = 1; 114 */ 115 static uint32_t bge_net1_boot_support = 1; 116 117 static int bge_m_start(void *); 118 static void bge_m_stop(void *); 119 static int bge_m_promisc(void *, boolean_t); 120 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 121 static int bge_m_unicst(void *, const uint8_t *); 122 static void bge_m_resources(void *); 123 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 124 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 125 static int bge_unicst_set(void *, const uint8_t *, 126 mac_addr_slot_t); 127 static int bge_m_unicst_add(void *, mac_multi_addr_t *); 128 static int bge_m_unicst_remove(void *, mac_addr_slot_t); 129 static int bge_m_unicst_modify(void *, mac_multi_addr_t *); 130 static int bge_m_unicst_get(void *, mac_multi_addr_t *); 131 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 132 uint_t, const void *); 133 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 134 uint_t, void *); 135 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 136 const void *); 137 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 138 void *); 139 140 #define BGE_M_CALLBACK_FLAGS\ 141 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 142 143 static mac_callbacks_t bge_m_callbacks = { 144 BGE_M_CALLBACK_FLAGS, 145 bge_m_stat, 146 bge_m_start, 147 bge_m_stop, 148 bge_m_promisc, 149 bge_m_multicst, 150 bge_m_unicst, 151 bge_m_tx, 152 bge_m_resources, 153 bge_m_ioctl, 154 bge_m_getcapab, 155 NULL, 156 NULL, 157 bge_m_setprop, 158 bge_m_getprop 159 }; 160 161 /* 162 * ========== Transmit and receive ring reinitialisation ========== 163 */ 164 165 /* 166 * These <reinit> routines each reset the specified ring to an initial 167 * state, assuming that the corresponding <init> routine has already 168 * been called exactly once. 169 */ 170 171 static void 172 bge_reinit_send_ring(send_ring_t *srp) 173 { 174 bge_queue_t *txbuf_queue; 175 bge_queue_item_t *txbuf_head; 176 sw_txbuf_t *txbuf; 177 sw_sbd_t *ssbdp; 178 uint32_t slot; 179 180 /* 181 * Reinitialise control variables ... 182 */ 183 srp->tx_flow = 0; 184 srp->tx_next = 0; 185 srp->txfill_next = 0; 186 srp->tx_free = srp->desc.nslots; 187 ASSERT(mutex_owned(srp->tc_lock)); 188 srp->tc_next = 0; 189 srp->txpkt_next = 0; 190 srp->tx_block = 0; 191 srp->tx_nobd = 0; 192 srp->tx_nobuf = 0; 193 194 /* 195 * Initialize the tx buffer push queue 196 */ 197 mutex_enter(srp->freetxbuf_lock); 198 mutex_enter(srp->txbuf_lock); 199 txbuf_queue = &srp->freetxbuf_queue; 200 txbuf_queue->head = NULL; 201 txbuf_queue->count = 0; 202 txbuf_queue->lock = srp->freetxbuf_lock; 203 srp->txbuf_push_queue = txbuf_queue; 204 205 /* 206 * Initialize the tx buffer pop queue 207 */ 208 txbuf_queue = &srp->txbuf_queue; 209 txbuf_queue->head = NULL; 210 txbuf_queue->count = 0; 211 txbuf_queue->lock = srp->txbuf_lock; 212 srp->txbuf_pop_queue = txbuf_queue; 213 txbuf_head = srp->txbuf_head; 214 txbuf = srp->txbuf; 215 for (slot = 0; slot < srp->tx_buffers; ++slot) { 216 txbuf_head->item = txbuf; 217 txbuf_head->next = txbuf_queue->head; 218 txbuf_queue->head = txbuf_head; 219 txbuf_queue->count++; 220 txbuf++; 221 txbuf_head++; 222 } 223 mutex_exit(srp->txbuf_lock); 224 mutex_exit(srp->freetxbuf_lock); 225 226 /* 227 * Zero and sync all the h/w Send Buffer Descriptors 228 */ 229 DMA_ZERO(srp->desc); 230 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 231 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 232 ssbdp = srp->sw_sbds; 233 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 234 ssbdp->pbuf = NULL; 235 } 236 237 static void 238 bge_reinit_recv_ring(recv_ring_t *rrp) 239 { 240 /* 241 * Reinitialise control variables ... 242 */ 243 rrp->rx_next = 0; 244 } 245 246 static void 247 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 248 { 249 bge_rbd_t *hw_rbd_p; 250 sw_rbd_t *srbdp; 251 uint32_t bufsize; 252 uint32_t nslots; 253 uint32_t slot; 254 255 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 256 RBD_FLAG_STD_RING, 257 RBD_FLAG_JUMBO_RING, 258 RBD_FLAG_MINI_RING 259 }; 260 261 /* 262 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 263 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 264 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 265 * should be zeroed, and so don't need to be set up specifically 266 * once the whole area has been cleared. 267 */ 268 DMA_ZERO(brp->desc); 269 270 hw_rbd_p = DMA_VPTR(brp->desc); 271 nslots = brp->desc.nslots; 272 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 273 bufsize = brp->buf[0].size; 274 srbdp = brp->sw_rbds; 275 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 276 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 277 hw_rbd_p->index = slot; 278 hw_rbd_p->len = bufsize; 279 hw_rbd_p->opaque = srbdp->pbuf.token; 280 hw_rbd_p->flags |= ring_type_flag[ring]; 281 } 282 283 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 284 285 /* 286 * Finally, reinitialise the ring control variables ... 287 */ 288 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 289 } 290 291 /* 292 * Reinitialize all rings 293 */ 294 static void 295 bge_reinit_rings(bge_t *bgep) 296 { 297 uint32_t ring; 298 299 ASSERT(mutex_owned(bgep->genlock)); 300 301 /* 302 * Send Rings ... 303 */ 304 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 305 bge_reinit_send_ring(&bgep->send[ring]); 306 307 /* 308 * Receive Return Rings ... 309 */ 310 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 311 bge_reinit_recv_ring(&bgep->recv[ring]); 312 313 /* 314 * Receive Producer Rings ... 315 */ 316 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 317 bge_reinit_buff_ring(&bgep->buff[ring], ring); 318 } 319 320 /* 321 * ========== Internal state management entry points ========== 322 */ 323 324 #undef BGE_DBG 325 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 326 327 /* 328 * These routines provide all the functionality required by the 329 * corresponding GLD entry points, but don't update the GLD state 330 * so they can be called internally without disturbing our record 331 * of what GLD thinks we should be doing ... 332 */ 333 334 /* 335 * bge_reset() -- reset h/w & rings to initial state 336 */ 337 static int 338 #ifdef BGE_IPMI_ASF 339 bge_reset(bge_t *bgep, uint_t asf_mode) 340 #else 341 bge_reset(bge_t *bgep) 342 #endif 343 { 344 uint32_t ring; 345 int retval; 346 347 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 348 349 ASSERT(mutex_owned(bgep->genlock)); 350 351 /* 352 * Grab all the other mutexes in the world (this should 353 * ensure no other threads are manipulating driver state) 354 */ 355 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 356 mutex_enter(bgep->recv[ring].rx_lock); 357 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 358 mutex_enter(bgep->buff[ring].rf_lock); 359 rw_enter(bgep->errlock, RW_WRITER); 360 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 361 mutex_enter(bgep->send[ring].tx_lock); 362 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 363 mutex_enter(bgep->send[ring].tc_lock); 364 365 #ifdef BGE_IPMI_ASF 366 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 367 #else 368 retval = bge_chip_reset(bgep, B_TRUE); 369 #endif 370 bge_reinit_rings(bgep); 371 372 /* 373 * Free the world ... 374 */ 375 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 376 mutex_exit(bgep->send[ring].tc_lock); 377 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 378 mutex_exit(bgep->send[ring].tx_lock); 379 rw_exit(bgep->errlock); 380 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 381 mutex_exit(bgep->buff[ring].rf_lock); 382 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 383 mutex_exit(bgep->recv[ring].rx_lock); 384 385 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 386 return (retval); 387 } 388 389 /* 390 * bge_stop() -- stop processing, don't reset h/w or rings 391 */ 392 static void 393 bge_stop(bge_t *bgep) 394 { 395 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 396 397 ASSERT(mutex_owned(bgep->genlock)); 398 399 #ifdef BGE_IPMI_ASF 400 if (bgep->asf_enabled) { 401 bgep->asf_pseudostop = B_TRUE; 402 } else { 403 #endif 404 bge_chip_stop(bgep, B_FALSE); 405 #ifdef BGE_IPMI_ASF 406 } 407 #endif 408 409 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 410 } 411 412 /* 413 * bge_start() -- start transmitting/receiving 414 */ 415 static int 416 bge_start(bge_t *bgep, boolean_t reset_phys) 417 { 418 int retval; 419 420 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 421 422 ASSERT(mutex_owned(bgep->genlock)); 423 424 /* 425 * Start chip processing, including enabling interrupts 426 */ 427 retval = bge_chip_start(bgep, reset_phys); 428 429 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 430 return (retval); 431 } 432 433 /* 434 * bge_restart - restart transmitting/receiving after error or suspend 435 */ 436 int 437 bge_restart(bge_t *bgep, boolean_t reset_phys) 438 { 439 int retval = DDI_SUCCESS; 440 ASSERT(mutex_owned(bgep->genlock)); 441 442 #ifdef BGE_IPMI_ASF 443 if (bgep->asf_enabled) { 444 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 445 retval = DDI_FAILURE; 446 } else 447 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 448 retval = DDI_FAILURE; 449 #else 450 if (bge_reset(bgep) != DDI_SUCCESS) 451 retval = DDI_FAILURE; 452 #endif 453 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 454 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 455 retval = DDI_FAILURE; 456 bgep->watchdog = 0; 457 ddi_trigger_softintr(bgep->drain_id); 458 } 459 460 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 461 return (retval); 462 } 463 464 465 /* 466 * ========== Nemo-required management entry points ========== 467 */ 468 469 #undef BGE_DBG 470 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 471 472 /* 473 * bge_m_stop() -- stop transmitting/receiving 474 */ 475 static void 476 bge_m_stop(void *arg) 477 { 478 bge_t *bgep = arg; /* private device info */ 479 send_ring_t *srp; 480 uint32_t ring; 481 482 BGE_TRACE(("bge_m_stop($%p)", arg)); 483 484 /* 485 * Just stop processing, then record new GLD state 486 */ 487 mutex_enter(bgep->genlock); 488 if (!(bgep->progress & PROGRESS_INTR)) { 489 /* can happen during autorecovery */ 490 mutex_exit(bgep->genlock); 491 return; 492 } 493 bge_stop(bgep); 494 /* 495 * Free the possible tx buffers allocated in tx process. 496 */ 497 #ifdef BGE_IPMI_ASF 498 if (!bgep->asf_pseudostop) 499 #endif 500 { 501 rw_enter(bgep->errlock, RW_WRITER); 502 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 503 srp = &bgep->send[ring]; 504 mutex_enter(srp->tx_lock); 505 if (srp->tx_array > 1) 506 bge_free_txbuf_arrays(srp); 507 mutex_exit(srp->tx_lock); 508 } 509 rw_exit(bgep->errlock); 510 } 511 bgep->bge_mac_state = BGE_MAC_STOPPED; 512 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 513 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 514 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 515 mutex_exit(bgep->genlock); 516 } 517 518 /* 519 * bge_m_start() -- start transmitting/receiving 520 */ 521 static int 522 bge_m_start(void *arg) 523 { 524 bge_t *bgep = arg; /* private device info */ 525 526 BGE_TRACE(("bge_m_start($%p)", arg)); 527 528 /* 529 * Start processing and record new GLD state 530 */ 531 mutex_enter(bgep->genlock); 532 if (!(bgep->progress & PROGRESS_INTR)) { 533 /* can happen during autorecovery */ 534 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 535 mutex_exit(bgep->genlock); 536 return (EIO); 537 } 538 #ifdef BGE_IPMI_ASF 539 if (bgep->asf_enabled) { 540 if ((bgep->asf_status == ASF_STAT_RUN) && 541 (bgep->asf_pseudostop)) { 542 bgep->bge_mac_state = BGE_MAC_STARTED; 543 mutex_exit(bgep->genlock); 544 return (0); 545 } 546 } 547 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 548 #else 549 if (bge_reset(bgep) != DDI_SUCCESS) { 550 #endif 551 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 552 (void) bge_check_acc_handle(bgep, bgep->io_handle); 553 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 554 mutex_exit(bgep->genlock); 555 return (EIO); 556 } 557 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 558 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 559 (void) bge_check_acc_handle(bgep, bgep->io_handle); 560 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 561 mutex_exit(bgep->genlock); 562 return (EIO); 563 } 564 bgep->bge_mac_state = BGE_MAC_STARTED; 565 BGE_DEBUG(("bge_m_start($%p) done", arg)); 566 567 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 568 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 569 mutex_exit(bgep->genlock); 570 return (EIO); 571 } 572 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 573 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 574 mutex_exit(bgep->genlock); 575 return (EIO); 576 } 577 #ifdef BGE_IPMI_ASF 578 if (bgep->asf_enabled) { 579 if (bgep->asf_status != ASF_STAT_RUN) { 580 /* start ASF heart beat */ 581 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 582 (void *)bgep, 583 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 584 bgep->asf_status = ASF_STAT_RUN; 585 } 586 } 587 #endif 588 mutex_exit(bgep->genlock); 589 590 return (0); 591 } 592 593 /* 594 * bge_m_unicst() -- set the physical network address 595 */ 596 static int 597 bge_m_unicst(void *arg, const uint8_t *macaddr) 598 { 599 /* 600 * Request to set address in 601 * address slot 0, i.e., default address 602 */ 603 return (bge_unicst_set(arg, macaddr, 0)); 604 } 605 606 /* 607 * bge_unicst_set() -- set the physical network address 608 */ 609 static int 610 bge_unicst_set(void *arg, const uint8_t *macaddr, mac_addr_slot_t slot) 611 { 612 bge_t *bgep = arg; /* private device info */ 613 614 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 615 ether_sprintf((void *)macaddr))); 616 /* 617 * Remember the new current address in the driver state 618 * Sync the chip's idea of the address too ... 619 */ 620 mutex_enter(bgep->genlock); 621 if (!(bgep->progress & PROGRESS_INTR)) { 622 /* can happen during autorecovery */ 623 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 624 mutex_exit(bgep->genlock); 625 return (EIO); 626 } 627 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 628 #ifdef BGE_IPMI_ASF 629 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 630 #else 631 if (bge_chip_sync(bgep) == DDI_FAILURE) { 632 #endif 633 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 634 (void) bge_check_acc_handle(bgep, bgep->io_handle); 635 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 636 mutex_exit(bgep->genlock); 637 return (EIO); 638 } 639 #ifdef BGE_IPMI_ASF 640 if (bgep->asf_enabled) { 641 /* 642 * The above bge_chip_sync() function wrote the ethernet MAC 643 * addresses registers which destroyed the IPMI/ASF sideband. 644 * Here, we have to reset chip to make IPMI/ASF sideband work. 645 */ 646 if (bgep->asf_status == ASF_STAT_RUN) { 647 /* 648 * We must stop ASF heart beat before bge_chip_stop(), 649 * otherwise some computers (ex. IBM HS20 blade server) 650 * may crash. 651 */ 652 bge_asf_update_status(bgep); 653 bge_asf_stop_timer(bgep); 654 bgep->asf_status = ASF_STAT_STOP; 655 656 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 657 } 658 bge_chip_stop(bgep, B_FALSE); 659 660 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 661 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 662 (void) bge_check_acc_handle(bgep, bgep->io_handle); 663 ddi_fm_service_impact(bgep->devinfo, 664 DDI_SERVICE_DEGRADED); 665 mutex_exit(bgep->genlock); 666 return (EIO); 667 } 668 669 /* 670 * Start our ASF heartbeat counter as soon as possible. 671 */ 672 if (bgep->asf_status != ASF_STAT_RUN) { 673 /* start ASF heart beat */ 674 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 675 (void *)bgep, 676 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 677 bgep->asf_status = ASF_STAT_RUN; 678 } 679 } 680 #endif 681 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 682 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 683 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 684 mutex_exit(bgep->genlock); 685 return (EIO); 686 } 687 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 688 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 689 mutex_exit(bgep->genlock); 690 return (EIO); 691 } 692 mutex_exit(bgep->genlock); 693 694 return (0); 695 } 696 697 /* 698 * The following four routines are used as callbacks for multiple MAC 699 * address support: 700 * - bge_m_unicst_add(void *, mac_multi_addr_t *); 701 * - bge_m_unicst_remove(void *, mac_addr_slot_t); 702 * - bge_m_unicst_modify(void *, mac_multi_addr_t *); 703 * - bge_m_unicst_get(void *, mac_multi_addr_t *); 704 */ 705 706 /* 707 * bge_m_unicst_add() - will find an unused address slot, set the 708 * address value to the one specified, reserve that slot and enable 709 * the NIC to start filtering on the new MAC address. 710 * address slot. Returns 0 on success. 711 */ 712 static int 713 bge_m_unicst_add(void *arg, mac_multi_addr_t *maddr) 714 { 715 bge_t *bgep = arg; /* private device info */ 716 mac_addr_slot_t slot; 717 int err; 718 719 if (mac_unicst_verify(bgep->mh, 720 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 721 return (EINVAL); 722 723 mutex_enter(bgep->genlock); 724 if (bgep->unicst_addr_avail == 0) { 725 /* no slots available */ 726 mutex_exit(bgep->genlock); 727 return (ENOSPC); 728 } 729 730 /* 731 * Primary/default address is in slot 0. The next three 732 * addresses are the multiple MAC addresses. So multiple 733 * MAC address 0 is in slot 1, 1 in slot 2, and so on. 734 * So the first multiple MAC address resides in slot 1. 735 */ 736 for (slot = 1; slot < bgep->unicst_addr_total; slot++) { 737 if (bgep->curr_addr[slot].set == B_FALSE) { 738 bgep->curr_addr[slot].set = B_TRUE; 739 break; 740 } 741 } 742 743 ASSERT(slot < bgep->unicst_addr_total); 744 bgep->unicst_addr_avail--; 745 mutex_exit(bgep->genlock); 746 maddr->mma_slot = slot; 747 748 if ((err = bge_unicst_set(bgep, maddr->mma_addr, slot)) != 0) { 749 mutex_enter(bgep->genlock); 750 bgep->curr_addr[slot].set = B_FALSE; 751 bgep->unicst_addr_avail++; 752 mutex_exit(bgep->genlock); 753 } 754 return (err); 755 } 756 757 /* 758 * bge_m_unicst_remove() - removes a MAC address that was added by a 759 * call to bge_m_unicst_add(). The slot number that was returned in 760 * add() is passed in the call to remove the address. 761 * Returns 0 on success. 762 */ 763 static int 764 bge_m_unicst_remove(void *arg, mac_addr_slot_t slot) 765 { 766 bge_t *bgep = arg; /* private device info */ 767 768 if (slot <= 0 || slot >= bgep->unicst_addr_total) 769 return (EINVAL); 770 771 mutex_enter(bgep->genlock); 772 if (bgep->curr_addr[slot].set == B_TRUE) { 773 bgep->curr_addr[slot].set = B_FALSE; 774 bgep->unicst_addr_avail++; 775 mutex_exit(bgep->genlock); 776 /* 777 * Copy the default address to the passed slot 778 */ 779 return (bge_unicst_set(bgep, bgep->curr_addr[0].addr, slot)); 780 } 781 mutex_exit(bgep->genlock); 782 return (EINVAL); 783 } 784 785 /* 786 * bge_m_unicst_modify() - modifies the value of an address that 787 * has been added by bge_m_unicst_add(). The new address, address 788 * length and the slot number that was returned in the call to add 789 * should be passed to bge_m_unicst_modify(). mma_flags should be 790 * set to 0. Returns 0 on success. 791 */ 792 static int 793 bge_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) 794 { 795 bge_t *bgep = arg; /* private device info */ 796 mac_addr_slot_t slot; 797 798 if (mac_unicst_verify(bgep->mh, 799 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 800 return (EINVAL); 801 802 slot = maddr->mma_slot; 803 804 if (slot <= 0 || slot >= bgep->unicst_addr_total) 805 return (EINVAL); 806 807 mutex_enter(bgep->genlock); 808 if (bgep->curr_addr[slot].set == B_TRUE) { 809 mutex_exit(bgep->genlock); 810 return (bge_unicst_set(bgep, maddr->mma_addr, slot)); 811 } 812 mutex_exit(bgep->genlock); 813 814 return (EINVAL); 815 } 816 817 /* 818 * bge_m_unicst_get() - will get the MAC address and all other 819 * information related to the address slot passed in mac_multi_addr_t. 820 * mma_flags should be set to 0 in the call. 821 * On return, mma_flags can take the following values: 822 * 1) MMAC_SLOT_UNUSED 823 * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR 824 * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR 825 * 4) MMAC_SLOT_USED 826 */ 827 static int 828 bge_m_unicst_get(void *arg, mac_multi_addr_t *maddr) 829 { 830 bge_t *bgep = arg; /* private device info */ 831 mac_addr_slot_t slot; 832 833 slot = maddr->mma_slot; 834 835 if (slot <= 0 || slot >= bgep->unicst_addr_total) 836 return (EINVAL); 837 838 mutex_enter(bgep->genlock); 839 if (bgep->curr_addr[slot].set == B_TRUE) { 840 ethaddr_copy(bgep->curr_addr[slot].addr, 841 maddr->mma_addr); 842 maddr->mma_flags = MMAC_SLOT_USED; 843 } else { 844 maddr->mma_flags = MMAC_SLOT_UNUSED; 845 } 846 mutex_exit(bgep->genlock); 847 848 return (0); 849 } 850 851 extern void bge_wake_factotum(bge_t *); 852 853 static boolean_t 854 bge_param_locked(mac_prop_id_t pr_num) 855 { 856 /* 857 * All adv_* parameters are locked (read-only) while 858 * the device is in any sort of loopback mode ... 859 */ 860 switch (pr_num) { 861 case DLD_PROP_ADV_1000FDX_CAP: 862 case DLD_PROP_EN_1000FDX_CAP: 863 case DLD_PROP_ADV_1000HDX_CAP: 864 case DLD_PROP_EN_1000HDX_CAP: 865 case DLD_PROP_ADV_100FDX_CAP: 866 case DLD_PROP_EN_100FDX_CAP: 867 case DLD_PROP_ADV_100HDX_CAP: 868 case DLD_PROP_EN_100HDX_CAP: 869 case DLD_PROP_ADV_10FDX_CAP: 870 case DLD_PROP_EN_10FDX_CAP: 871 case DLD_PROP_ADV_10HDX_CAP: 872 case DLD_PROP_EN_10HDX_CAP: 873 case DLD_PROP_AUTONEG: 874 case DLD_PROP_FLOWCTRL: 875 return (B_TRUE); 876 } 877 return (B_FALSE); 878 } 879 /* 880 * callback functions for set/get of properties 881 */ 882 static int 883 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 884 uint_t pr_valsize, const void *pr_val) 885 { 886 bge_t *bgep = barg; 887 int err = 0; 888 uint64_t cur_mtu, new_mtu; 889 uint_t maxsdu; 890 link_flowctrl_t fl; 891 892 mutex_enter(bgep->genlock); 893 if (bgep->param_loop_mode != BGE_LOOP_NONE && 894 bge_param_locked(pr_num)) { 895 /* 896 * All adv_* parameters are locked (read-only) 897 * while the device is in any sort of loopback mode. 898 */ 899 mutex_exit(bgep->genlock); 900 return (EBUSY); 901 } 902 switch (pr_num) { 903 case DLD_PROP_EN_1000FDX_CAP: 904 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 905 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 906 goto reprogram; 907 case DLD_PROP_EN_1000HDX_CAP: 908 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 909 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 910 goto reprogram; 911 case DLD_PROP_EN_100FDX_CAP: 912 bgep->param_en_100fdx = *(uint8_t *)pr_val; 913 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 914 goto reprogram; 915 case DLD_PROP_EN_100HDX_CAP: 916 bgep->param_en_100hdx = *(uint8_t *)pr_val; 917 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 918 goto reprogram; 919 case DLD_PROP_EN_10FDX_CAP: 920 bgep->param_en_10fdx = *(uint8_t *)pr_val; 921 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 922 goto reprogram; 923 case DLD_PROP_EN_10HDX_CAP: 924 bgep->param_en_10hdx = *(uint8_t *)pr_val; 925 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 926 reprogram: 927 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 928 err = EINVAL; 929 break; 930 case DLD_PROP_ADV_1000FDX_CAP: 931 case DLD_PROP_ADV_1000HDX_CAP: 932 case DLD_PROP_ADV_100FDX_CAP: 933 case DLD_PROP_ADV_100HDX_CAP: 934 case DLD_PROP_ADV_10FDX_CAP: 935 case DLD_PROP_ADV_10HDX_CAP: 936 case DLD_PROP_STATUS: 937 case DLD_PROP_SPEED: 938 case DLD_PROP_DUPLEX: 939 err = EINVAL; /* read-only prop. Can't set this */ 940 break; 941 case DLD_PROP_AUTONEG: 942 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 943 if (bge_reprogram(bgep) == IOC_INVAL) 944 err = EINVAL; 945 break; 946 case DLD_PROP_DEFMTU: 947 cur_mtu = bgep->chipid.default_mtu; 948 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 949 if (new_mtu == cur_mtu) { 950 err = 0; 951 break; 952 } 953 if (new_mtu < BGE_DEFAULT_MTU || 954 new_mtu > BGE_MAXIMUM_MTU) { 955 err = EINVAL; 956 break; 957 } 958 if ((new_mtu > BGE_DEFAULT_MTU) && 959 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 960 err = EINVAL; 961 break; 962 } 963 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 964 err = EBUSY; 965 break; 966 } 967 bgep->chipid.default_mtu = new_mtu; 968 if (bge_chip_id_init(bgep)) { 969 err = EINVAL; 970 break; 971 } 972 maxsdu = bgep->chipid.ethmax_size - 973 sizeof (struct ether_header); 974 err = mac_maxsdu_update(bgep->mh, maxsdu); 975 if (err == 0) { 976 bgep->bge_dma_error = B_TRUE; 977 bgep->manual_reset = B_TRUE; 978 bge_chip_stop(bgep, B_TRUE); 979 bge_wake_factotum(bgep); 980 err = 0; 981 } 982 break; 983 case DLD_PROP_FLOWCTRL: 984 bcopy(pr_val, &fl, sizeof (fl)); 985 switch (fl) { 986 default: 987 err = EINVAL; 988 break; 989 case LINK_FLOWCTRL_NONE: 990 bgep->param_adv_pause = 0; 991 bgep->param_adv_asym_pause = 0; 992 993 bgep->param_link_rx_pause = B_FALSE; 994 bgep->param_link_tx_pause = B_FALSE; 995 break; 996 case LINK_FLOWCTRL_RX: 997 if (!((bgep->param_lp_pause == 0) && 998 (bgep->param_lp_asym_pause == 1))) { 999 err = EINVAL; 1000 break; 1001 } 1002 bgep->param_adv_pause = 1; 1003 bgep->param_adv_asym_pause = 1; 1004 1005 bgep->param_link_rx_pause = B_TRUE; 1006 bgep->param_link_tx_pause = B_FALSE; 1007 break; 1008 case LINK_FLOWCTRL_TX: 1009 if (!((bgep->param_lp_pause == 1) && 1010 (bgep->param_lp_asym_pause == 1))) { 1011 err = EINVAL; 1012 break; 1013 } 1014 bgep->param_adv_pause = 0; 1015 bgep->param_adv_asym_pause = 1; 1016 1017 bgep->param_link_rx_pause = B_FALSE; 1018 bgep->param_link_tx_pause = B_TRUE; 1019 break; 1020 case LINK_FLOWCTRL_BI: 1021 if (bgep->param_lp_pause != 1) { 1022 err = EINVAL; 1023 break; 1024 } 1025 bgep->param_adv_pause = 1; 1026 1027 bgep->param_link_rx_pause = B_TRUE; 1028 bgep->param_link_tx_pause = B_TRUE; 1029 break; 1030 } 1031 1032 if (err == 0) { 1033 if (bge_reprogram(bgep) == IOC_INVAL) 1034 err = EINVAL; 1035 } 1036 1037 break; 1038 default: 1039 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 1040 pr_val); 1041 break; 1042 } 1043 mutex_exit(bgep->genlock); 1044 return (err); 1045 } 1046 static int 1047 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1048 uint_t pr_valsize, void *pr_val) 1049 { 1050 bge_t *bgep = barg; 1051 int err = 0; 1052 link_flowctrl_t fl; 1053 1054 bzero(pr_val, pr_valsize); 1055 switch (pr_num) { 1056 case DLD_PROP_DUPLEX: 1057 if (pr_valsize < sizeof (uint8_t)) 1058 return (EINVAL); 1059 *(uint8_t *)pr_val = bgep->param_link_duplex; 1060 break; 1061 case DLD_PROP_SPEED: 1062 if (pr_valsize < sizeof (uint_t)) 1063 return (EINVAL); 1064 bcopy(&(bgep->param_link_speed), pr_val, 1065 sizeof (bgep->param_link_speed)); 1066 break; 1067 case DLD_PROP_STATUS: 1068 if (pr_valsize < sizeof (uint8_t)) 1069 return (EINVAL); 1070 *(uint8_t *)pr_val = bgep->param_link_up; 1071 break; 1072 case DLD_PROP_AUTONEG: 1073 if (pr_valsize < sizeof (uint8_t)) 1074 return (EINVAL); 1075 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 1076 break; 1077 case DLD_PROP_DEFMTU: { 1078 uint64_t tmp = 0; 1079 1080 if (pr_valsize < sizeof (uint64_t)) 1081 return (EINVAL); 1082 tmp = bgep->chipid.default_mtu; 1083 bcopy(&tmp, pr_val, sizeof (tmp)); 1084 break; 1085 } 1086 case DLD_PROP_FLOWCTRL: 1087 if (pr_valsize < sizeof (link_flowctrl_t)) 1088 return (EINVAL); 1089 if (bgep->param_link_rx_pause && 1090 !bgep->param_link_tx_pause) 1091 fl = LINK_FLOWCTRL_RX; 1092 1093 if (!bgep->param_link_rx_pause && 1094 !bgep->param_link_tx_pause) 1095 fl = LINK_FLOWCTRL_NONE; 1096 1097 if (!bgep->param_link_rx_pause && 1098 bgep->param_link_tx_pause) 1099 fl = LINK_FLOWCTRL_TX; 1100 1101 if (bgep->param_link_rx_pause && 1102 bgep->param_link_tx_pause) 1103 fl = LINK_FLOWCTRL_BI; 1104 bcopy(&fl, pr_val, sizeof (fl)); 1105 break; 1106 case DLD_PROP_ADV_1000FDX_CAP: 1107 if (pr_valsize < sizeof (uint8_t)) 1108 return (EINVAL); 1109 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 1110 break; 1111 case DLD_PROP_EN_1000FDX_CAP: 1112 if (pr_valsize < sizeof (uint8_t)) 1113 return (EINVAL); 1114 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 1115 break; 1116 case DLD_PROP_ADV_1000HDX_CAP: 1117 if (pr_valsize < sizeof (uint8_t)) 1118 return (EINVAL); 1119 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1120 break; 1121 case DLD_PROP_EN_1000HDX_CAP: 1122 if (pr_valsize < sizeof (uint8_t)) 1123 return (EINVAL); 1124 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1125 break; 1126 case DLD_PROP_ADV_100FDX_CAP: 1127 if (pr_valsize < sizeof (uint8_t)) 1128 return (EINVAL); 1129 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1130 break; 1131 case DLD_PROP_EN_100FDX_CAP: 1132 if (pr_valsize < sizeof (uint8_t)) 1133 return (EINVAL); 1134 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1135 break; 1136 case DLD_PROP_ADV_100HDX_CAP: 1137 if (pr_valsize < sizeof (uint8_t)) 1138 return (EINVAL); 1139 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1140 break; 1141 case DLD_PROP_EN_100HDX_CAP: 1142 if (pr_valsize < sizeof (uint8_t)) 1143 return (EINVAL); 1144 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1145 break; 1146 case DLD_PROP_ADV_10FDX_CAP: 1147 if (pr_valsize < sizeof (uint8_t)) 1148 return (EINVAL); 1149 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1150 break; 1151 case DLD_PROP_EN_10FDX_CAP: 1152 if (pr_valsize < sizeof (uint8_t)) 1153 return (EINVAL); 1154 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1155 break; 1156 case DLD_PROP_ADV_10HDX_CAP: 1157 if (pr_valsize < sizeof (uint8_t)) 1158 return (EINVAL); 1159 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1160 break; 1161 case DLD_PROP_EN_10HDX_CAP: 1162 if (pr_valsize < sizeof (uint8_t)) 1163 return (EINVAL); 1164 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1165 break; 1166 default: 1167 err = bge_get_priv_prop(bgep, pr_name, pr_valsize, 1168 pr_val); 1169 return (err); 1170 } 1171 return (0); 1172 } 1173 1174 /* ARGSUSED */ 1175 static int 1176 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1177 const void *pr_val) 1178 { 1179 int err = 0; 1180 long result; 1181 1182 if (strcmp(pr_name, "_drain_max") == 0) { 1183 1184 /* 1185 * on the Tx side, we need to update the h/w register for 1186 * real packet transmission per packet. The drain_max parameter 1187 * is used to reduce the register access. This parameter 1188 * controls the max number of packets that we will hold before 1189 * updating the bge h/w to trigger h/w transmit. The bge 1190 * chipset usually has a max of 512 Tx descriptors, thus 1191 * the upper bound on drain_max is 512. 1192 */ 1193 if (pr_val == NULL) { 1194 err = EINVAL; 1195 return (err); 1196 } 1197 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1198 if (result > 512 || result < 1) 1199 err = EINVAL; 1200 else { 1201 bgep->param_drain_max = (uint32_t)result; 1202 if (bge_reprogram(bgep) == IOC_INVAL) 1203 err = EINVAL; 1204 } 1205 return (err); 1206 } 1207 if (strcmp(pr_name, "_msi_cnt") == 0) { 1208 1209 if (pr_val == NULL) { 1210 err = EINVAL; 1211 return (err); 1212 } 1213 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1214 if (result > 7 || result < 0) 1215 err = EINVAL; 1216 else { 1217 bgep->param_msi_cnt = (uint32_t)result; 1218 if (bge_reprogram(bgep) == IOC_INVAL) 1219 err = EINVAL; 1220 } 1221 return (err); 1222 } 1223 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1224 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) { 1225 return (EINVAL); 1226 } 1227 1228 bgep->chipid.rx_ticks_norm = result; 1229 return (0); 1230 } 1231 1232 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1233 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1234 return (EINVAL); 1235 1236 bgep->chipid.rx_count_norm = result; 1237 return (0); 1238 } 1239 return (EINVAL); 1240 } 1241 1242 static int 1243 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize, 1244 void *pr_val) 1245 { 1246 char valstr[MAXNAMELEN]; 1247 int err = EINVAL; 1248 uint_t strsize; 1249 1250 1251 if (strcmp(pr_name, "_drain_max") == 0) { 1252 (void) sprintf(valstr, "%d", bge->param_drain_max); 1253 err = 0; 1254 goto done; 1255 } 1256 if (strcmp(pr_name, "_msi_cnt") == 0) { 1257 (void) sprintf(valstr, "%d", bge->param_msi_cnt); 1258 err = 0; 1259 goto done; 1260 } 1261 1262 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1263 (void) sprintf(valstr, "%d", bge->chipid.rx_ticks_norm); 1264 err = 0; 1265 goto done; 1266 } 1267 1268 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1269 (void) sprintf(valstr, "%d", bge->chipid.rx_count_norm); 1270 err = 0; 1271 goto done; 1272 } 1273 1274 done: 1275 strsize = (uint_t)strlen(valstr); 1276 if (pr_valsize < strsize) { 1277 err = ENOBUFS; 1278 } else { 1279 (void) strlcpy(pr_val, valstr, pr_valsize); 1280 } 1281 return (err); 1282 } 1283 1284 /* 1285 * Compute the index of the required bit in the multicast hash map. 1286 * This must mirror the way the hardware actually does it! 1287 * See Broadcom document 570X-PG102-R page 125. 1288 */ 1289 static uint32_t 1290 bge_hash_index(const uint8_t *mca) 1291 { 1292 uint32_t hash; 1293 1294 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1295 1296 return (hash); 1297 } 1298 1299 /* 1300 * bge_m_multicst_add() -- enable/disable a multicast address 1301 */ 1302 static int 1303 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1304 { 1305 bge_t *bgep = arg; /* private device info */ 1306 uint32_t hash; 1307 uint32_t index; 1308 uint32_t word; 1309 uint32_t bit; 1310 uint8_t *refp; 1311 1312 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1313 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1314 1315 /* 1316 * Precalculate all required masks, pointers etc ... 1317 */ 1318 hash = bge_hash_index(mca); 1319 index = hash % BGE_HASH_TABLE_SIZE; 1320 word = index/32u; 1321 bit = 1 << (index % 32u); 1322 refp = &bgep->mcast_refs[index]; 1323 1324 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1325 hash, index, word, bit, *refp)); 1326 1327 /* 1328 * We must set the appropriate bit in the hash map (and the 1329 * corresponding h/w register) when the refcount goes from 0 1330 * to >0, and clear it when the last ref goes away (refcount 1331 * goes from >0 back to 0). If we change the hash map, we 1332 * must also update the chip's hardware map registers. 1333 */ 1334 mutex_enter(bgep->genlock); 1335 if (!(bgep->progress & PROGRESS_INTR)) { 1336 /* can happen during autorecovery */ 1337 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1338 mutex_exit(bgep->genlock); 1339 return (EIO); 1340 } 1341 if (add) { 1342 if ((*refp)++ == 0) { 1343 bgep->mcast_hash[word] |= bit; 1344 #ifdef BGE_IPMI_ASF 1345 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1346 #else 1347 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1348 #endif 1349 (void) bge_check_acc_handle(bgep, 1350 bgep->cfg_handle); 1351 (void) bge_check_acc_handle(bgep, 1352 bgep->io_handle); 1353 ddi_fm_service_impact(bgep->devinfo, 1354 DDI_SERVICE_DEGRADED); 1355 mutex_exit(bgep->genlock); 1356 return (EIO); 1357 } 1358 } 1359 } else { 1360 if (--(*refp) == 0) { 1361 bgep->mcast_hash[word] &= ~bit; 1362 #ifdef BGE_IPMI_ASF 1363 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1364 #else 1365 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1366 #endif 1367 (void) bge_check_acc_handle(bgep, 1368 bgep->cfg_handle); 1369 (void) bge_check_acc_handle(bgep, 1370 bgep->io_handle); 1371 ddi_fm_service_impact(bgep->devinfo, 1372 DDI_SERVICE_DEGRADED); 1373 mutex_exit(bgep->genlock); 1374 return (EIO); 1375 } 1376 } 1377 } 1378 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1379 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1380 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1381 mutex_exit(bgep->genlock); 1382 return (EIO); 1383 } 1384 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1385 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1386 mutex_exit(bgep->genlock); 1387 return (EIO); 1388 } 1389 mutex_exit(bgep->genlock); 1390 1391 return (0); 1392 } 1393 1394 /* 1395 * bge_m_promisc() -- set or reset promiscuous mode on the board 1396 * 1397 * Program the hardware to enable/disable promiscuous and/or 1398 * receive-all-multicast modes. 1399 */ 1400 static int 1401 bge_m_promisc(void *arg, boolean_t on) 1402 { 1403 bge_t *bgep = arg; 1404 1405 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1406 1407 /* 1408 * Store MAC layer specified mode and pass to chip layer to update h/w 1409 */ 1410 mutex_enter(bgep->genlock); 1411 if (!(bgep->progress & PROGRESS_INTR)) { 1412 /* can happen during autorecovery */ 1413 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1414 mutex_exit(bgep->genlock); 1415 return (EIO); 1416 } 1417 bgep->promisc = on; 1418 #ifdef BGE_IPMI_ASF 1419 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1420 #else 1421 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1422 #endif 1423 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1424 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1425 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1426 mutex_exit(bgep->genlock); 1427 return (EIO); 1428 } 1429 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1430 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1431 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1432 mutex_exit(bgep->genlock); 1433 return (EIO); 1434 } 1435 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1436 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1437 mutex_exit(bgep->genlock); 1438 return (EIO); 1439 } 1440 mutex_exit(bgep->genlock); 1441 return (0); 1442 } 1443 1444 /*ARGSUSED*/ 1445 static boolean_t 1446 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1447 { 1448 bge_t *bgep = arg; 1449 1450 switch (cap) { 1451 case MAC_CAPAB_HCKSUM: { 1452 uint32_t *txflags = cap_data; 1453 1454 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1455 break; 1456 } 1457 1458 case MAC_CAPAB_POLL: 1459 /* 1460 * There's nothing for us to fill in, simply returning 1461 * B_TRUE stating that we support polling is sufficient. 1462 */ 1463 break; 1464 1465 case MAC_CAPAB_MULTIADDRESS: { 1466 multiaddress_capab_t *mmacp = cap_data; 1467 1468 mutex_enter(bgep->genlock); 1469 /* 1470 * The number of MAC addresses made available by 1471 * this capability is one less than the total as 1472 * the primary address in slot 0 is counted in 1473 * the total. 1474 */ 1475 mmacp->maddr_naddr = bgep->unicst_addr_total - 1; 1476 mmacp->maddr_naddrfree = bgep->unicst_addr_avail; 1477 /* No multiple factory addresses, set mma_flag to 0 */ 1478 mmacp->maddr_flag = 0; 1479 mmacp->maddr_handle = bgep; 1480 mmacp->maddr_add = bge_m_unicst_add; 1481 mmacp->maddr_remove = bge_m_unicst_remove; 1482 mmacp->maddr_modify = bge_m_unicst_modify; 1483 mmacp->maddr_get = bge_m_unicst_get; 1484 mmacp->maddr_reserve = NULL; 1485 mutex_exit(bgep->genlock); 1486 break; 1487 } 1488 1489 default: 1490 return (B_FALSE); 1491 } 1492 return (B_TRUE); 1493 } 1494 1495 /* 1496 * Loopback ioctl code 1497 */ 1498 1499 static lb_property_t loopmodes[] = { 1500 { normal, "normal", BGE_LOOP_NONE }, 1501 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1502 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1503 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1504 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1505 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1506 }; 1507 1508 static enum ioc_reply 1509 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1510 { 1511 /* 1512 * If the mode isn't being changed, there's nothing to do ... 1513 */ 1514 if (mode == bgep->param_loop_mode) 1515 return (IOC_ACK); 1516 1517 /* 1518 * Validate the requested mode and prepare a suitable message 1519 * to explain the link down/up cycle that the change will 1520 * probably induce ... 1521 */ 1522 switch (mode) { 1523 default: 1524 return (IOC_INVAL); 1525 1526 case BGE_LOOP_NONE: 1527 case BGE_LOOP_EXTERNAL_1000: 1528 case BGE_LOOP_EXTERNAL_100: 1529 case BGE_LOOP_EXTERNAL_10: 1530 case BGE_LOOP_INTERNAL_PHY: 1531 case BGE_LOOP_INTERNAL_MAC: 1532 break; 1533 } 1534 1535 /* 1536 * All OK; tell the caller to reprogram 1537 * the PHY and/or MAC for the new mode ... 1538 */ 1539 bgep->param_loop_mode = mode; 1540 return (IOC_RESTART_ACK); 1541 } 1542 1543 static enum ioc_reply 1544 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1545 { 1546 lb_info_sz_t *lbsp; 1547 lb_property_t *lbpp; 1548 uint32_t *lbmp; 1549 int cmd; 1550 1551 _NOTE(ARGUNUSED(wq)) 1552 1553 /* 1554 * Validate format of ioctl 1555 */ 1556 if (mp->b_cont == NULL) 1557 return (IOC_INVAL); 1558 1559 cmd = iocp->ioc_cmd; 1560 switch (cmd) { 1561 default: 1562 /* NOTREACHED */ 1563 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1564 return (IOC_INVAL); 1565 1566 case LB_GET_INFO_SIZE: 1567 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1568 return (IOC_INVAL); 1569 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 1570 *lbsp = sizeof (loopmodes); 1571 return (IOC_REPLY); 1572 1573 case LB_GET_INFO: 1574 if (iocp->ioc_count != sizeof (loopmodes)) 1575 return (IOC_INVAL); 1576 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 1577 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1578 return (IOC_REPLY); 1579 1580 case LB_GET_MODE: 1581 if (iocp->ioc_count != sizeof (uint32_t)) 1582 return (IOC_INVAL); 1583 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1584 *lbmp = bgep->param_loop_mode; 1585 return (IOC_REPLY); 1586 1587 case LB_SET_MODE: 1588 if (iocp->ioc_count != sizeof (uint32_t)) 1589 return (IOC_INVAL); 1590 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1591 return (bge_set_loop_mode(bgep, *lbmp)); 1592 } 1593 } 1594 1595 /* 1596 * Specific bge IOCTLs, the gld module handles the generic ones. 1597 */ 1598 static void 1599 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1600 { 1601 bge_t *bgep = arg; 1602 struct iocblk *iocp; 1603 enum ioc_reply status; 1604 boolean_t need_privilege; 1605 int err; 1606 int cmd; 1607 1608 /* 1609 * Validate the command before bothering with the mutex ... 1610 */ 1611 iocp = (struct iocblk *)mp->b_rptr; 1612 iocp->ioc_error = 0; 1613 need_privilege = B_TRUE; 1614 cmd = iocp->ioc_cmd; 1615 switch (cmd) { 1616 default: 1617 miocnak(wq, mp, 0, EINVAL); 1618 return; 1619 1620 case BGE_MII_READ: 1621 case BGE_MII_WRITE: 1622 case BGE_SEE_READ: 1623 case BGE_SEE_WRITE: 1624 case BGE_FLASH_READ: 1625 case BGE_FLASH_WRITE: 1626 case BGE_DIAG: 1627 case BGE_PEEK: 1628 case BGE_POKE: 1629 case BGE_PHY_RESET: 1630 case BGE_SOFT_RESET: 1631 case BGE_HARD_RESET: 1632 break; 1633 1634 case LB_GET_INFO_SIZE: 1635 case LB_GET_INFO: 1636 case LB_GET_MODE: 1637 need_privilege = B_FALSE; 1638 /* FALLTHRU */ 1639 case LB_SET_MODE: 1640 break; 1641 1642 case ND_GET: 1643 need_privilege = B_FALSE; 1644 /* FALLTHRU */ 1645 case ND_SET: 1646 break; 1647 } 1648 1649 if (need_privilege) { 1650 /* 1651 * Check for specific net_config privilege on Solaris 10+. 1652 */ 1653 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1654 if (err != 0) { 1655 miocnak(wq, mp, 0, err); 1656 return; 1657 } 1658 } 1659 1660 mutex_enter(bgep->genlock); 1661 if (!(bgep->progress & PROGRESS_INTR)) { 1662 /* can happen during autorecovery */ 1663 mutex_exit(bgep->genlock); 1664 miocnak(wq, mp, 0, EIO); 1665 return; 1666 } 1667 1668 switch (cmd) { 1669 default: 1670 _NOTE(NOTREACHED) 1671 status = IOC_INVAL; 1672 break; 1673 1674 case BGE_MII_READ: 1675 case BGE_MII_WRITE: 1676 case BGE_SEE_READ: 1677 case BGE_SEE_WRITE: 1678 case BGE_FLASH_READ: 1679 case BGE_FLASH_WRITE: 1680 case BGE_DIAG: 1681 case BGE_PEEK: 1682 case BGE_POKE: 1683 case BGE_PHY_RESET: 1684 case BGE_SOFT_RESET: 1685 case BGE_HARD_RESET: 1686 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1687 break; 1688 1689 case LB_GET_INFO_SIZE: 1690 case LB_GET_INFO: 1691 case LB_GET_MODE: 1692 case LB_SET_MODE: 1693 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1694 break; 1695 1696 case ND_GET: 1697 case ND_SET: 1698 status = bge_nd_ioctl(bgep, wq, mp, iocp); 1699 break; 1700 } 1701 1702 /* 1703 * Do we need to reprogram the PHY and/or the MAC? 1704 * Do it now, while we still have the mutex. 1705 * 1706 * Note: update the PHY first, 'cos it controls the 1707 * speed/duplex parameters that the MAC code uses. 1708 */ 1709 switch (status) { 1710 case IOC_RESTART_REPLY: 1711 case IOC_RESTART_ACK: 1712 if (bge_reprogram(bgep) == IOC_INVAL) 1713 status = IOC_INVAL; 1714 break; 1715 } 1716 1717 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1718 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1719 status = IOC_INVAL; 1720 } 1721 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1722 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1723 status = IOC_INVAL; 1724 } 1725 mutex_exit(bgep->genlock); 1726 1727 /* 1728 * Finally, decide how to reply 1729 */ 1730 switch (status) { 1731 default: 1732 case IOC_INVAL: 1733 /* 1734 * Error, reply with a NAK and EINVAL or the specified error 1735 */ 1736 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1737 EINVAL : iocp->ioc_error); 1738 break; 1739 1740 case IOC_DONE: 1741 /* 1742 * OK, reply already sent 1743 */ 1744 break; 1745 1746 case IOC_RESTART_ACK: 1747 case IOC_ACK: 1748 /* 1749 * OK, reply with an ACK 1750 */ 1751 miocack(wq, mp, 0, 0); 1752 break; 1753 1754 case IOC_RESTART_REPLY: 1755 case IOC_REPLY: 1756 /* 1757 * OK, send prepared reply as ACK or NAK 1758 */ 1759 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1760 M_IOCACK : M_IOCNAK; 1761 qreply(wq, mp); 1762 break; 1763 } 1764 } 1765 1766 static void 1767 bge_resources_add(bge_t *bgep, time_t time, uint_t pkt_cnt) 1768 { 1769 1770 recv_ring_t *rrp; 1771 mac_rx_fifo_t mrf; 1772 int ring; 1773 1774 /* 1775 * Register Rx rings as resources and save mac 1776 * resource id for future reference 1777 */ 1778 mrf.mrf_type = MAC_RX_FIFO; 1779 mrf.mrf_blank = bge_chip_blank; 1780 mrf.mrf_arg = (void *)bgep; 1781 mrf.mrf_normal_blank_time = time; 1782 mrf.mrf_normal_pkt_count = pkt_cnt; 1783 1784 for (ring = 0; ring < bgep->chipid.rx_rings; ring++) { 1785 rrp = &bgep->recv[ring]; 1786 rrp->handle = mac_resource_add(bgep->mh, 1787 (mac_resource_t *)&mrf); 1788 } 1789 } 1790 1791 static void 1792 bge_m_resources(void *arg) 1793 { 1794 bge_t *bgep = arg; 1795 1796 mutex_enter(bgep->genlock); 1797 1798 bge_resources_add(bgep, bgep->chipid.rx_ticks_norm, 1799 bgep->chipid.rx_count_norm); 1800 mutex_exit(bgep->genlock); 1801 } 1802 1803 /* 1804 * ========== Per-instance setup/teardown code ========== 1805 */ 1806 1807 #undef BGE_DBG 1808 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1809 /* 1810 * Allocate an area of memory and a DMA handle for accessing it 1811 */ 1812 static int 1813 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 1814 uint_t dma_flags, dma_area_t *dma_p) 1815 { 1816 caddr_t va; 1817 int err; 1818 1819 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 1820 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 1821 1822 /* 1823 * Allocate handle 1824 */ 1825 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 1826 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 1827 if (err != DDI_SUCCESS) 1828 return (DDI_FAILURE); 1829 1830 /* 1831 * Allocate memory 1832 */ 1833 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 1834 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 1835 &dma_p->acc_hdl); 1836 if (err != DDI_SUCCESS) 1837 return (DDI_FAILURE); 1838 1839 /* 1840 * Bind the two together 1841 */ 1842 dma_p->mem_va = va; 1843 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1844 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 1845 &dma_p->cookie, &dma_p->ncookies); 1846 1847 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 1848 dma_p->alength, err, dma_p->ncookies)); 1849 1850 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 1851 return (DDI_FAILURE); 1852 1853 dma_p->nslots = ~0U; 1854 dma_p->size = ~0U; 1855 dma_p->token = ~0U; 1856 dma_p->offset = 0; 1857 return (DDI_SUCCESS); 1858 } 1859 1860 /* 1861 * Free one allocated area of DMAable memory 1862 */ 1863 static void 1864 bge_free_dma_mem(dma_area_t *dma_p) 1865 { 1866 if (dma_p->dma_hdl != NULL) { 1867 if (dma_p->ncookies) { 1868 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1869 dma_p->ncookies = 0; 1870 } 1871 ddi_dma_free_handle(&dma_p->dma_hdl); 1872 dma_p->dma_hdl = NULL; 1873 } 1874 1875 if (dma_p->acc_hdl != NULL) { 1876 ddi_dma_mem_free(&dma_p->acc_hdl); 1877 dma_p->acc_hdl = NULL; 1878 } 1879 } 1880 /* 1881 * Utility routine to carve a slice off a chunk of allocated memory, 1882 * updating the chunk descriptor accordingly. The size of the slice 1883 * is given by the product of the <qty> and <size> parameters. 1884 */ 1885 static void 1886 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 1887 uint32_t qty, uint32_t size) 1888 { 1889 static uint32_t sequence = 0xbcd5704a; 1890 size_t totsize; 1891 1892 totsize = qty*size; 1893 ASSERT(size >= 0); 1894 ASSERT(totsize <= chunk->alength); 1895 1896 *slice = *chunk; 1897 slice->nslots = qty; 1898 slice->size = size; 1899 slice->alength = totsize; 1900 slice->token = ++sequence; 1901 1902 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 1903 chunk->alength -= totsize; 1904 chunk->offset += totsize; 1905 chunk->cookie.dmac_laddress += totsize; 1906 chunk->cookie.dmac_size -= totsize; 1907 } 1908 1909 /* 1910 * Initialise the specified Receive Producer (Buffer) Ring, using 1911 * the information in the <dma_area> descriptors that it contains 1912 * to set up all the other fields. This routine should be called 1913 * only once for each ring. 1914 */ 1915 static void 1916 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 1917 { 1918 buff_ring_t *brp; 1919 bge_status_t *bsp; 1920 sw_rbd_t *srbdp; 1921 dma_area_t pbuf; 1922 uint32_t bufsize; 1923 uint32_t nslots; 1924 uint32_t slot; 1925 uint32_t split; 1926 1927 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 1928 NIC_MEM_SHADOW_BUFF_STD, 1929 NIC_MEM_SHADOW_BUFF_JUMBO, 1930 NIC_MEM_SHADOW_BUFF_MINI 1931 }; 1932 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 1933 RECV_STD_PROD_INDEX_REG, 1934 RECV_JUMBO_PROD_INDEX_REG, 1935 RECV_MINI_PROD_INDEX_REG 1936 }; 1937 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 1938 STATUS_STD_BUFF_CONS_INDEX, 1939 STATUS_JUMBO_BUFF_CONS_INDEX, 1940 STATUS_MINI_BUFF_CONS_INDEX 1941 }; 1942 1943 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 1944 (void *)bgep, ring)); 1945 1946 brp = &bgep->buff[ring]; 1947 nslots = brp->desc.nslots; 1948 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 1949 bufsize = brp->buf[0].size; 1950 1951 /* 1952 * Set up the copy of the h/w RCB 1953 * 1954 * Note: unlike Send & Receive Return Rings, (where the max_len 1955 * field holds the number of slots), in a Receive Buffer Ring 1956 * this field indicates the size of each buffer in the ring. 1957 */ 1958 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 1959 brp->hw_rcb.max_len = bufsize; 1960 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 1961 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 1962 1963 /* 1964 * Other one-off initialisation of per-ring data 1965 */ 1966 brp->bgep = bgep; 1967 bsp = DMA_VPTR(bgep->status_block); 1968 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 1969 brp->chip_mbx_reg = mailbox_regs[ring]; 1970 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 1971 DDI_INTR_PRI(bgep->intr_pri)); 1972 1973 /* 1974 * Allocate the array of s/w Receive Buffer Descriptors 1975 */ 1976 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 1977 brp->sw_rbds = srbdp; 1978 1979 /* 1980 * Now initialise each array element once and for all 1981 */ 1982 for (split = 0; split < BGE_SPLIT; ++split) { 1983 pbuf = brp->buf[split]; 1984 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 1985 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 1986 ASSERT(pbuf.alength == 0); 1987 } 1988 } 1989 1990 /* 1991 * Clean up initialisation done above before the memory is freed 1992 */ 1993 static void 1994 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 1995 { 1996 buff_ring_t *brp; 1997 sw_rbd_t *srbdp; 1998 1999 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2000 (void *)bgep, ring)); 2001 2002 brp = &bgep->buff[ring]; 2003 srbdp = brp->sw_rbds; 2004 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2005 2006 mutex_destroy(brp->rf_lock); 2007 } 2008 2009 /* 2010 * Initialise the specified Receive (Return) Ring, using the 2011 * information in the <dma_area> descriptors that it contains 2012 * to set up all the other fields. This routine should be called 2013 * only once for each ring. 2014 */ 2015 static void 2016 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2017 { 2018 recv_ring_t *rrp; 2019 bge_status_t *bsp; 2020 uint32_t nslots; 2021 2022 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2023 (void *)bgep, ring)); 2024 2025 /* 2026 * The chip architecture requires that receive return rings have 2027 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2028 */ 2029 rrp = &bgep->recv[ring]; 2030 nslots = rrp->desc.nslots; 2031 ASSERT(nslots == 0 || nslots == 512 || 2032 nslots == 1024 || nslots == 2048); 2033 2034 /* 2035 * Set up the copy of the h/w RCB 2036 */ 2037 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2038 rrp->hw_rcb.max_len = nslots; 2039 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2040 rrp->hw_rcb.nic_ring_addr = 0; 2041 2042 /* 2043 * Other one-off initialisation of per-ring data 2044 */ 2045 rrp->bgep = bgep; 2046 bsp = DMA_VPTR(bgep->status_block); 2047 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2048 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2049 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2050 DDI_INTR_PRI(bgep->intr_pri)); 2051 } 2052 2053 2054 /* 2055 * Clean up initialisation done above before the memory is freed 2056 */ 2057 static void 2058 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2059 { 2060 recv_ring_t *rrp; 2061 2062 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2063 (void *)bgep, ring)); 2064 2065 rrp = &bgep->recv[ring]; 2066 if (rrp->rx_softint) 2067 ddi_remove_softintr(rrp->rx_softint); 2068 mutex_destroy(rrp->rx_lock); 2069 } 2070 2071 /* 2072 * Initialise the specified Send Ring, using the information in the 2073 * <dma_area> descriptors that it contains to set up all the other 2074 * fields. This routine should be called only once for each ring. 2075 */ 2076 static void 2077 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2078 { 2079 send_ring_t *srp; 2080 bge_status_t *bsp; 2081 sw_sbd_t *ssbdp; 2082 dma_area_t desc; 2083 dma_area_t pbuf; 2084 uint32_t nslots; 2085 uint32_t slot; 2086 uint32_t split; 2087 sw_txbuf_t *txbuf; 2088 2089 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2090 (void *)bgep, ring)); 2091 2092 /* 2093 * The chip architecture requires that host-based send rings 2094 * have 512 elements per ring. See 570X-PG102-R page 56. 2095 */ 2096 srp = &bgep->send[ring]; 2097 nslots = srp->desc.nslots; 2098 ASSERT(nslots == 0 || nslots == 512); 2099 2100 /* 2101 * Set up the copy of the h/w RCB 2102 */ 2103 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2104 srp->hw_rcb.max_len = nslots; 2105 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2106 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2107 2108 /* 2109 * Other one-off initialisation of per-ring data 2110 */ 2111 srp->bgep = bgep; 2112 bsp = DMA_VPTR(bgep->status_block); 2113 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2114 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2115 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2116 DDI_INTR_PRI(bgep->intr_pri)); 2117 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2118 DDI_INTR_PRI(bgep->intr_pri)); 2119 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2120 DDI_INTR_PRI(bgep->intr_pri)); 2121 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2122 DDI_INTR_PRI(bgep->intr_pri)); 2123 if (nslots == 0) 2124 return; 2125 2126 /* 2127 * Allocate the array of s/w Send Buffer Descriptors 2128 */ 2129 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2130 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2131 srp->txbuf_head = 2132 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2133 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2134 srp->sw_sbds = ssbdp; 2135 srp->txbuf = txbuf; 2136 srp->tx_buffers = BGE_SEND_BUF_NUM; 2137 srp->tx_buffers_low = srp->tx_buffers / 4; 2138 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2139 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2140 else 2141 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2142 srp->tx_array = 1; 2143 2144 /* 2145 * Chunk tx desc area 2146 */ 2147 desc = srp->desc; 2148 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2149 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2150 sizeof (bge_sbd_t)); 2151 } 2152 ASSERT(desc.alength == 0); 2153 2154 /* 2155 * Chunk tx buffer area 2156 */ 2157 for (split = 0; split < BGE_SPLIT; ++split) { 2158 pbuf = srp->buf[0][split]; 2159 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2160 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2161 bgep->chipid.snd_buff_size); 2162 txbuf++; 2163 } 2164 ASSERT(pbuf.alength == 0); 2165 } 2166 } 2167 2168 /* 2169 * Clean up initialisation done above before the memory is freed 2170 */ 2171 static void 2172 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2173 { 2174 send_ring_t *srp; 2175 uint32_t array; 2176 uint32_t split; 2177 uint32_t nslots; 2178 2179 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2180 (void *)bgep, ring)); 2181 2182 srp = &bgep->send[ring]; 2183 mutex_destroy(srp->tc_lock); 2184 mutex_destroy(srp->freetxbuf_lock); 2185 mutex_destroy(srp->txbuf_lock); 2186 mutex_destroy(srp->tx_lock); 2187 nslots = srp->desc.nslots; 2188 if (nslots == 0) 2189 return; 2190 2191 for (array = 1; array < srp->tx_array; ++array) 2192 for (split = 0; split < BGE_SPLIT; ++split) 2193 bge_free_dma_mem(&srp->buf[array][split]); 2194 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2195 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2196 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2197 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2198 srp->sw_sbds = NULL; 2199 srp->txbuf_head = NULL; 2200 srp->txbuf = NULL; 2201 srp->pktp = NULL; 2202 } 2203 2204 /* 2205 * Initialise all transmit, receive, and buffer rings. 2206 */ 2207 void 2208 bge_init_rings(bge_t *bgep) 2209 { 2210 uint32_t ring; 2211 2212 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2213 2214 /* 2215 * Perform one-off initialisation of each ring ... 2216 */ 2217 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2218 bge_init_send_ring(bgep, ring); 2219 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2220 bge_init_recv_ring(bgep, ring); 2221 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2222 bge_init_buff_ring(bgep, ring); 2223 } 2224 2225 /* 2226 * Undo the work of bge_init_rings() above before the memory is freed 2227 */ 2228 void 2229 bge_fini_rings(bge_t *bgep) 2230 { 2231 uint32_t ring; 2232 2233 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2234 2235 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2236 bge_fini_buff_ring(bgep, ring); 2237 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2238 bge_fini_recv_ring(bgep, ring); 2239 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2240 bge_fini_send_ring(bgep, ring); 2241 } 2242 2243 /* 2244 * Called from the bge_m_stop() to free the tx buffers which are 2245 * allocated from the tx process. 2246 */ 2247 void 2248 bge_free_txbuf_arrays(send_ring_t *srp) 2249 { 2250 uint32_t array; 2251 uint32_t split; 2252 2253 ASSERT(mutex_owned(srp->tx_lock)); 2254 2255 /* 2256 * Free the extra tx buffer DMA area 2257 */ 2258 for (array = 1; array < srp->tx_array; ++array) 2259 for (split = 0; split < BGE_SPLIT; ++split) 2260 bge_free_dma_mem(&srp->buf[array][split]); 2261 2262 /* 2263 * Restore initial tx buffer numbers 2264 */ 2265 srp->tx_array = 1; 2266 srp->tx_buffers = BGE_SEND_BUF_NUM; 2267 srp->tx_buffers_low = srp->tx_buffers / 4; 2268 srp->tx_flow = 0; 2269 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2270 } 2271 2272 /* 2273 * Called from tx process to allocate more tx buffers 2274 */ 2275 bge_queue_item_t * 2276 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2277 { 2278 bge_queue_t *txbuf_queue; 2279 bge_queue_item_t *txbuf_item_last; 2280 bge_queue_item_t *txbuf_item; 2281 bge_queue_item_t *txbuf_item_rtn; 2282 sw_txbuf_t *txbuf; 2283 dma_area_t area; 2284 size_t txbuffsize; 2285 uint32_t slot; 2286 uint32_t array; 2287 uint32_t split; 2288 uint32_t err; 2289 2290 ASSERT(mutex_owned(srp->tx_lock)); 2291 2292 array = srp->tx_array; 2293 if (array >= srp->tx_array_max) 2294 return (NULL); 2295 2296 /* 2297 * Allocate memory & handles for TX buffers 2298 */ 2299 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2300 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2301 for (split = 0; split < BGE_SPLIT; ++split) { 2302 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2303 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2304 &srp->buf[array][split]); 2305 if (err != DDI_SUCCESS) { 2306 /* Free the last already allocated OK chunks */ 2307 for (slot = 0; slot <= split; ++slot) 2308 bge_free_dma_mem(&srp->buf[array][slot]); 2309 srp->tx_alloc_fail++; 2310 return (NULL); 2311 } 2312 } 2313 2314 /* 2315 * Chunk tx buffer area 2316 */ 2317 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2318 for (split = 0; split < BGE_SPLIT; ++split) { 2319 area = srp->buf[array][split]; 2320 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2321 bge_slice_chunk(&txbuf->buf, &area, 1, 2322 bgep->chipid.snd_buff_size); 2323 txbuf++; 2324 } 2325 } 2326 2327 /* 2328 * Add above buffers to the tx buffer pop queue 2329 */ 2330 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2331 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2332 txbuf_item_last = NULL; 2333 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2334 txbuf_item->item = txbuf; 2335 txbuf_item->next = txbuf_item_last; 2336 txbuf_item_last = txbuf_item; 2337 txbuf++; 2338 txbuf_item++; 2339 } 2340 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2341 txbuf_item_rtn = txbuf_item; 2342 txbuf_item++; 2343 txbuf_queue = srp->txbuf_pop_queue; 2344 mutex_enter(txbuf_queue->lock); 2345 txbuf_item->next = txbuf_queue->head; 2346 txbuf_queue->head = txbuf_item_last; 2347 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2348 mutex_exit(txbuf_queue->lock); 2349 2350 srp->tx_array++; 2351 srp->tx_buffers += BGE_SEND_BUF_NUM; 2352 srp->tx_buffers_low = srp->tx_buffers / 4; 2353 2354 return (txbuf_item_rtn); 2355 } 2356 2357 /* 2358 * This function allocates all the transmit and receive buffers 2359 * and descriptors, in four chunks. 2360 */ 2361 int 2362 bge_alloc_bufs(bge_t *bgep) 2363 { 2364 dma_area_t area; 2365 size_t rxbuffsize; 2366 size_t txbuffsize; 2367 size_t rxbuffdescsize; 2368 size_t rxdescsize; 2369 size_t txdescsize; 2370 uint32_t ring; 2371 uint32_t rx_rings = bgep->chipid.rx_rings; 2372 uint32_t tx_rings = bgep->chipid.tx_rings; 2373 int split; 2374 int err; 2375 2376 BGE_TRACE(("bge_alloc_bufs($%p)", 2377 (void *)bgep)); 2378 2379 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2380 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2381 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2382 2383 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2384 txbuffsize *= tx_rings; 2385 2386 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2387 rxdescsize *= sizeof (bge_rbd_t); 2388 2389 rxbuffdescsize = BGE_STD_SLOTS_USED; 2390 rxbuffdescsize += bgep->chipid.jumbo_slots; 2391 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2392 rxbuffdescsize *= sizeof (bge_rbd_t); 2393 2394 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2395 txdescsize *= sizeof (bge_sbd_t); 2396 txdescsize += sizeof (bge_statistics_t); 2397 txdescsize += sizeof (bge_status_t); 2398 txdescsize += BGE_STATUS_PADDING; 2399 2400 /* 2401 * Enable PCI relaxed ordering only for RX/TX data buffers 2402 */ 2403 if (bge_relaxed_ordering) 2404 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2405 2406 /* 2407 * Allocate memory & handles for RX buffers 2408 */ 2409 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2410 for (split = 0; split < BGE_SPLIT; ++split) { 2411 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2412 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2413 &bgep->rx_buff[split]); 2414 if (err != DDI_SUCCESS) 2415 return (DDI_FAILURE); 2416 } 2417 2418 /* 2419 * Allocate memory & handles for TX buffers 2420 */ 2421 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2422 for (split = 0; split < BGE_SPLIT; ++split) { 2423 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2424 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2425 &bgep->tx_buff[split]); 2426 if (err != DDI_SUCCESS) 2427 return (DDI_FAILURE); 2428 } 2429 2430 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2431 2432 /* 2433 * Allocate memory & handles for receive return rings 2434 */ 2435 ASSERT((rxdescsize % rx_rings) == 0); 2436 for (split = 0; split < rx_rings; ++split) { 2437 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2438 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2439 &bgep->rx_desc[split]); 2440 if (err != DDI_SUCCESS) 2441 return (DDI_FAILURE); 2442 } 2443 2444 /* 2445 * Allocate memory & handles for buffer (producer) descriptor rings 2446 */ 2447 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2448 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2449 if (err != DDI_SUCCESS) 2450 return (DDI_FAILURE); 2451 2452 /* 2453 * Allocate memory & handles for TX descriptor rings, 2454 * status block, and statistics area 2455 */ 2456 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2457 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2458 if (err != DDI_SUCCESS) 2459 return (DDI_FAILURE); 2460 2461 /* 2462 * Now carve up each of the allocated areas ... 2463 */ 2464 for (split = 0; split < BGE_SPLIT; ++split) { 2465 area = bgep->rx_buff[split]; 2466 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2467 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2468 bgep->chipid.std_buf_size); 2469 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2470 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2471 bgep->chipid.recv_jumbo_size); 2472 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2473 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2474 BGE_MINI_BUFF_SIZE); 2475 ASSERT(area.alength >= 0); 2476 } 2477 2478 for (split = 0; split < BGE_SPLIT; ++split) { 2479 area = bgep->tx_buff[split]; 2480 for (ring = 0; ring < tx_rings; ++ring) 2481 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2482 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2483 bgep->chipid.snd_buff_size); 2484 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2485 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2486 &area, 0, bgep->chipid.snd_buff_size); 2487 ASSERT(area.alength >= 0); 2488 } 2489 2490 for (ring = 0; ring < rx_rings; ++ring) 2491 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2492 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2493 2494 area = bgep->rx_desc[rx_rings]; 2495 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2496 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2497 0, sizeof (bge_rbd_t)); 2498 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2499 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2500 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2501 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2502 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2503 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2504 ASSERT(area.alength == 0); 2505 2506 area = bgep->tx_desc; 2507 for (ring = 0; ring < tx_rings; ++ring) 2508 bge_slice_chunk(&bgep->send[ring].desc, &area, 2509 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2510 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2511 bge_slice_chunk(&bgep->send[ring].desc, &area, 2512 0, sizeof (bge_sbd_t)); 2513 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2514 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2515 ASSERT(area.alength == BGE_STATUS_PADDING); 2516 DMA_ZERO(bgep->status_block); 2517 2518 return (DDI_SUCCESS); 2519 } 2520 2521 /* 2522 * This routine frees the transmit and receive buffers and descriptors. 2523 * Make sure the chip is stopped before calling it! 2524 */ 2525 void 2526 bge_free_bufs(bge_t *bgep) 2527 { 2528 int split; 2529 2530 BGE_TRACE(("bge_free_bufs($%p)", 2531 (void *)bgep)); 2532 2533 bge_free_dma_mem(&bgep->tx_desc); 2534 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2535 bge_free_dma_mem(&bgep->rx_desc[split]); 2536 for (split = 0; split < BGE_SPLIT; ++split) 2537 bge_free_dma_mem(&bgep->tx_buff[split]); 2538 for (split = 0; split < BGE_SPLIT; ++split) 2539 bge_free_dma_mem(&bgep->rx_buff[split]); 2540 } 2541 2542 /* 2543 * Determine (initial) MAC address ("BIA") to use for this interface 2544 */ 2545 2546 static void 2547 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2548 { 2549 struct ether_addr sysaddr; 2550 char propbuf[8]; /* "true" or "false", plus NUL */ 2551 uchar_t *bytes; 2552 int *ints; 2553 uint_t nelts; 2554 int err; 2555 2556 BGE_TRACE(("bge_find_mac_address($%p)", 2557 (void *)bgep)); 2558 2559 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2560 cidp->hw_mac_addr, 2561 ether_sprintf((void *)cidp->vendor_addr.addr), 2562 cidp->vendor_addr.set ? "" : "not ")); 2563 2564 /* 2565 * The "vendor's factory-set address" may already have 2566 * been extracted from the chip, but if the property 2567 * "local-mac-address" is set we use that instead. It 2568 * will normally be set by OBP, but it could also be 2569 * specified in a .conf file(!) 2570 * 2571 * There doesn't seem to be a way to define byte-array 2572 * properties in a .conf, so we check whether it looks 2573 * like an array of 6 ints instead. 2574 * 2575 * Then, we check whether it looks like an array of 6 2576 * bytes (which it should, if OBP set it). If we can't 2577 * make sense of it either way, we'll ignore it. 2578 */ 2579 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2580 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2581 if (err == DDI_PROP_SUCCESS) { 2582 if (nelts == ETHERADDRL) { 2583 while (nelts--) 2584 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2585 cidp->vendor_addr.set = B_TRUE; 2586 } 2587 ddi_prop_free(ints); 2588 } 2589 2590 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2591 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2592 if (err == DDI_PROP_SUCCESS) { 2593 if (nelts == ETHERADDRL) { 2594 while (nelts--) 2595 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2596 cidp->vendor_addr.set = B_TRUE; 2597 } 2598 ddi_prop_free(bytes); 2599 } 2600 2601 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2602 ether_sprintf((void *)cidp->vendor_addr.addr), 2603 cidp->vendor_addr.set ? "" : "not ")); 2604 2605 /* 2606 * Look up the OBP property "local-mac-address?". Note that even 2607 * though its value is a string (which should be "true" or "false"), 2608 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2609 * the buffer first and then fetch the property as an untyped array; 2610 * this may or may not include a final NUL, but since there will 2611 * always be one left at the end of the buffer we can now treat it 2612 * as a string anyway. 2613 */ 2614 nelts = sizeof (propbuf); 2615 bzero(propbuf, nelts--); 2616 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2617 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2618 2619 /* 2620 * Now, if the address still isn't set from the hardware (SEEPROM) 2621 * or the OBP or .conf property, OR if the user has foolishly set 2622 * 'local-mac-address? = false', use "the system address" instead 2623 * (but only if it's non-null i.e. has been set from the IDPROM). 2624 */ 2625 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2626 if (localetheraddr(NULL, &sysaddr) != 0) { 2627 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2628 cidp->vendor_addr.set = B_TRUE; 2629 } 2630 2631 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2632 ether_sprintf((void *)cidp->vendor_addr.addr), 2633 cidp->vendor_addr.set ? "" : "not ")); 2634 2635 /* 2636 * Finally(!), if there's a valid "mac-address" property (created 2637 * if we netbooted from this interface), we must use this instead 2638 * of any of the above to ensure that the NFS/install server doesn't 2639 * get confused by the address changing as Solaris takes over! 2640 */ 2641 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2642 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2643 if (err == DDI_PROP_SUCCESS) { 2644 if (nelts == ETHERADDRL) { 2645 while (nelts--) 2646 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2647 cidp->vendor_addr.set = B_TRUE; 2648 } 2649 ddi_prop_free(bytes); 2650 } 2651 2652 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2653 ether_sprintf((void *)cidp->vendor_addr.addr), 2654 cidp->vendor_addr.set ? "" : "not ")); 2655 } 2656 2657 2658 /*ARGSUSED*/ 2659 int 2660 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2661 { 2662 ddi_fm_error_t de; 2663 2664 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2665 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2666 return (de.fme_status); 2667 } 2668 2669 /*ARGSUSED*/ 2670 int 2671 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2672 { 2673 ddi_fm_error_t de; 2674 2675 ASSERT(bgep->progress & PROGRESS_BUFS); 2676 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2677 return (de.fme_status); 2678 } 2679 2680 /* 2681 * The IO fault service error handling callback function 2682 */ 2683 /*ARGSUSED*/ 2684 static int 2685 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2686 { 2687 /* 2688 * as the driver can always deal with an error in any dma or 2689 * access handle, we can just return the fme_status value. 2690 */ 2691 pci_ereport_post(dip, err, NULL); 2692 return (err->fme_status); 2693 } 2694 2695 static void 2696 bge_fm_init(bge_t *bgep) 2697 { 2698 ddi_iblock_cookie_t iblk; 2699 2700 /* Only register with IO Fault Services if we have some capability */ 2701 if (bgep->fm_capabilities) { 2702 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2703 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2704 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2705 2706 /* Register capabilities with IO Fault Services */ 2707 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2708 2709 /* 2710 * Initialize pci ereport capabilities if ereport capable 2711 */ 2712 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2713 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2714 pci_ereport_setup(bgep->devinfo); 2715 2716 /* 2717 * Register error callback if error callback capable 2718 */ 2719 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2720 ddi_fm_handler_register(bgep->devinfo, 2721 bge_fm_error_cb, (void*) bgep); 2722 } else { 2723 /* 2724 * These fields have to be cleared of FMA if there are no 2725 * FMA capabilities at runtime. 2726 */ 2727 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2728 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2729 dma_attr.dma_attr_flags = 0; 2730 } 2731 } 2732 2733 static void 2734 bge_fm_fini(bge_t *bgep) 2735 { 2736 /* Only unregister FMA capabilities if we registered some */ 2737 if (bgep->fm_capabilities) { 2738 2739 /* 2740 * Release any resources allocated by pci_ereport_setup() 2741 */ 2742 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2743 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2744 pci_ereport_teardown(bgep->devinfo); 2745 2746 /* 2747 * Un-register error callback if error callback capable 2748 */ 2749 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2750 ddi_fm_handler_unregister(bgep->devinfo); 2751 2752 /* Unregister from IO Fault Services */ 2753 ddi_fm_fini(bgep->devinfo); 2754 } 2755 } 2756 2757 static void 2758 #ifdef BGE_IPMI_ASF 2759 bge_unattach(bge_t *bgep, uint_t asf_mode) 2760 #else 2761 bge_unattach(bge_t *bgep) 2762 #endif 2763 { 2764 BGE_TRACE(("bge_unattach($%p)", 2765 (void *)bgep)); 2766 2767 /* 2768 * Flag that no more activity may be initiated 2769 */ 2770 bgep->progress &= ~PROGRESS_READY; 2771 2772 /* 2773 * Quiesce the PHY and MAC (leave it reset but still powered). 2774 * Clean up and free all BGE data structures 2775 */ 2776 if (bgep->periodic_id != NULL) { 2777 ddi_periodic_delete(bgep->periodic_id); 2778 bgep->periodic_id = NULL; 2779 } 2780 if (bgep->progress & PROGRESS_KSTATS) 2781 bge_fini_kstats(bgep); 2782 if (bgep->progress & PROGRESS_NDD) 2783 bge_nd_cleanup(bgep); 2784 if (bgep->progress & PROGRESS_PHY) 2785 bge_phys_reset(bgep); 2786 if (bgep->progress & PROGRESS_HWINT) { 2787 mutex_enter(bgep->genlock); 2788 #ifdef BGE_IPMI_ASF 2789 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2790 #else 2791 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2792 #endif 2793 ddi_fm_service_impact(bgep->devinfo, 2794 DDI_SERVICE_UNAFFECTED); 2795 #ifdef BGE_IPMI_ASF 2796 if (bgep->asf_enabled) { 2797 /* 2798 * This register has been overlaid. We restore its 2799 * initial value here. 2800 */ 2801 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2802 BGE_NIC_DATA_SIG); 2803 } 2804 #endif 2805 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2806 ddi_fm_service_impact(bgep->devinfo, 2807 DDI_SERVICE_UNAFFECTED); 2808 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2809 ddi_fm_service_impact(bgep->devinfo, 2810 DDI_SERVICE_UNAFFECTED); 2811 mutex_exit(bgep->genlock); 2812 } 2813 if (bgep->progress & PROGRESS_INTR) { 2814 bge_intr_disable(bgep); 2815 bge_fini_rings(bgep); 2816 } 2817 if (bgep->progress & PROGRESS_HWINT) { 2818 bge_rem_intrs(bgep); 2819 rw_destroy(bgep->errlock); 2820 mutex_destroy(bgep->softintrlock); 2821 mutex_destroy(bgep->genlock); 2822 } 2823 if (bgep->progress & PROGRESS_FACTOTUM) 2824 ddi_remove_softintr(bgep->factotum_id); 2825 if (bgep->progress & PROGRESS_RESCHED) 2826 ddi_remove_softintr(bgep->drain_id); 2827 if (bgep->progress & PROGRESS_BUFS) 2828 bge_free_bufs(bgep); 2829 if (bgep->progress & PROGRESS_REGS) 2830 ddi_regs_map_free(&bgep->io_handle); 2831 if (bgep->progress & PROGRESS_CFG) 2832 pci_config_teardown(&bgep->cfg_handle); 2833 2834 bge_fm_fini(bgep); 2835 2836 ddi_remove_minor_node(bgep->devinfo, NULL); 2837 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 2838 kmem_free(bgep->nd_params, PARAM_COUNT * sizeof (nd_param_t)); 2839 kmem_free(bgep, sizeof (*bgep)); 2840 } 2841 2842 static int 2843 bge_resume(dev_info_t *devinfo) 2844 { 2845 bge_t *bgep; /* Our private data */ 2846 chip_id_t *cidp; 2847 chip_id_t chipid; 2848 2849 bgep = ddi_get_driver_private(devinfo); 2850 if (bgep == NULL) 2851 return (DDI_FAILURE); 2852 2853 /* 2854 * Refuse to resume if the data structures aren't consistent 2855 */ 2856 if (bgep->devinfo != devinfo) 2857 return (DDI_FAILURE); 2858 2859 #ifdef BGE_IPMI_ASF 2860 /* 2861 * Power management hasn't been supported in BGE now. If you 2862 * want to implement it, please add the ASF/IPMI related 2863 * code here. 2864 */ 2865 2866 #endif 2867 2868 /* 2869 * Read chip ID & set up config space command register(s) 2870 * Refuse to resume if the chip has changed its identity! 2871 */ 2872 cidp = &bgep->chipid; 2873 mutex_enter(bgep->genlock); 2874 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 2875 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2876 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2877 mutex_exit(bgep->genlock); 2878 return (DDI_FAILURE); 2879 } 2880 mutex_exit(bgep->genlock); 2881 if (chipid.vendor != cidp->vendor) 2882 return (DDI_FAILURE); 2883 if (chipid.device != cidp->device) 2884 return (DDI_FAILURE); 2885 if (chipid.revision != cidp->revision) 2886 return (DDI_FAILURE); 2887 if (chipid.asic_rev != cidp->asic_rev) 2888 return (DDI_FAILURE); 2889 2890 /* 2891 * All OK, reinitialise h/w & kick off GLD scheduling 2892 */ 2893 mutex_enter(bgep->genlock); 2894 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 2895 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 2896 (void) bge_check_acc_handle(bgep, bgep->io_handle); 2897 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2898 mutex_exit(bgep->genlock); 2899 return (DDI_FAILURE); 2900 } 2901 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2902 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2903 mutex_exit(bgep->genlock); 2904 return (DDI_FAILURE); 2905 } 2906 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 2907 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2908 mutex_exit(bgep->genlock); 2909 return (DDI_FAILURE); 2910 } 2911 mutex_exit(bgep->genlock); 2912 return (DDI_SUCCESS); 2913 } 2914 2915 /* 2916 * attach(9E) -- Attach a device to the system 2917 * 2918 * Called once for each board successfully probed. 2919 */ 2920 static int 2921 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2922 { 2923 bge_t *bgep; /* Our private data */ 2924 mac_register_t *macp; 2925 chip_id_t *cidp; 2926 caddr_t regs; 2927 int instance; 2928 int err; 2929 int intr_types; 2930 #ifdef BGE_IPMI_ASF 2931 uint32_t mhcrValue; 2932 #ifdef __sparc 2933 uint16_t value16; 2934 #endif 2935 #ifdef BGE_NETCONSOLE 2936 int retval; 2937 #endif 2938 #endif 2939 2940 instance = ddi_get_instance(devinfo); 2941 2942 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 2943 (void *)devinfo, cmd, instance)); 2944 BGE_BRKPT(NULL, "bge_attach"); 2945 2946 switch (cmd) { 2947 default: 2948 return (DDI_FAILURE); 2949 2950 case DDI_RESUME: 2951 return (bge_resume(devinfo)); 2952 2953 case DDI_ATTACH: 2954 break; 2955 } 2956 2957 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 2958 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 2959 bgep->nd_params = 2960 kmem_zalloc(PARAM_COUNT * sizeof (nd_param_t), KM_SLEEP); 2961 ddi_set_driver_private(devinfo, bgep); 2962 bgep->bge_guard = BGE_GUARD; 2963 bgep->devinfo = devinfo; 2964 bgep->param_drain_max = 64; 2965 bgep->param_msi_cnt = 0; 2966 bgep->param_loop_mode = 0; 2967 2968 /* 2969 * Initialize more fields in BGE private data 2970 */ 2971 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 2972 DDI_PROP_DONTPASS, debug_propname, bge_debug); 2973 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 2974 BGE_DRIVER_NAME, instance); 2975 2976 /* 2977 * Initialize for fma support 2978 */ 2979 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 2980 DDI_PROP_DONTPASS, fm_cap, 2981 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 2982 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 2983 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 2984 bge_fm_init(bgep); 2985 2986 /* 2987 * Look up the IOMMU's page size for DVMA mappings (must be 2988 * a power of 2) and convert to a mask. This can be used to 2989 * determine whether a message buffer crosses a page boundary. 2990 * Note: in 2s complement binary notation, if X is a power of 2991 * 2, then -X has the representation "11...1100...00". 2992 */ 2993 bgep->pagemask = dvma_pagesize(devinfo); 2994 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 2995 bgep->pagemask = -bgep->pagemask; 2996 2997 /* 2998 * Map config space registers 2999 * Read chip ID & set up config space command register(s) 3000 * 3001 * Note: this leaves the chip accessible by Memory Space 3002 * accesses, but with interrupts and Bus Mastering off. 3003 * This should ensure that nothing untoward will happen 3004 * if it has been left active by the (net-)bootloader. 3005 * We'll re-enable Bus Mastering once we've reset the chip, 3006 * and allow interrupts only when everything else is set up. 3007 */ 3008 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3009 #ifdef BGE_IPMI_ASF 3010 #ifdef __sparc 3011 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3012 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3013 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3014 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3015 MHCR_ENABLE_TAGGED_STATUS_MODE | 3016 MHCR_MASK_INTERRUPT_MODE | 3017 MHCR_MASK_PCI_INT_OUTPUT | 3018 MHCR_CLEAR_INTERRUPT_INTA | 3019 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3020 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3021 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3022 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3023 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3024 MEMORY_ARBITER_ENABLE); 3025 #else 3026 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3027 #endif 3028 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3029 bgep->asf_wordswapped = B_TRUE; 3030 } else { 3031 bgep->asf_wordswapped = B_FALSE; 3032 } 3033 bge_asf_get_config(bgep); 3034 #endif 3035 if (err != DDI_SUCCESS) { 3036 bge_problem(bgep, "pci_config_setup() failed"); 3037 goto attach_fail; 3038 } 3039 bgep->progress |= PROGRESS_CFG; 3040 cidp = &bgep->chipid; 3041 bzero(cidp, sizeof (*cidp)); 3042 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3043 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3044 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3045 goto attach_fail; 3046 } 3047 3048 #ifdef BGE_IPMI_ASF 3049 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3050 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3051 bgep->asf_newhandshake = B_TRUE; 3052 } else { 3053 bgep->asf_newhandshake = B_FALSE; 3054 } 3055 #endif 3056 3057 /* 3058 * Update those parts of the chip ID derived from volatile 3059 * registers with the values seen by OBP (in case the chip 3060 * has been reset externally and therefore lost them). 3061 */ 3062 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3063 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3064 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3065 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3066 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3067 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3068 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3069 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3070 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3071 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3072 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3073 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3074 3075 if (bge_jumbo_enable == B_TRUE) { 3076 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3077 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3078 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3079 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3080 cidp->default_mtu = BGE_DEFAULT_MTU; 3081 } 3082 } 3083 /* 3084 * Map operating registers 3085 */ 3086 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3087 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3088 if (err != DDI_SUCCESS) { 3089 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3090 goto attach_fail; 3091 } 3092 bgep->io_regs = regs; 3093 bgep->progress |= PROGRESS_REGS; 3094 3095 /* 3096 * Characterise the device, so we know its requirements. 3097 * Then allocate the appropriate TX and RX descriptors & buffers. 3098 */ 3099 if (bge_chip_id_init(bgep) == EIO) { 3100 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3101 goto attach_fail; 3102 } 3103 err = bge_alloc_bufs(bgep); 3104 if (err != DDI_SUCCESS) { 3105 bge_problem(bgep, "DMA buffer allocation failed"); 3106 goto attach_fail; 3107 } 3108 bgep->progress |= PROGRESS_BUFS; 3109 3110 /* 3111 * Add the softint handlers: 3112 * 3113 * Both of these handlers are used to avoid restrictions on the 3114 * context and/or mutexes required for some operations. In 3115 * particular, the hardware interrupt handler and its subfunctions 3116 * can detect a number of conditions that we don't want to handle 3117 * in that context or with that set of mutexes held. So, these 3118 * softints are triggered instead: 3119 * 3120 * the <resched> softint is triggered if we have previously 3121 * had to refuse to send a packet because of resource shortage 3122 * (we've run out of transmit buffers), but the send completion 3123 * interrupt handler has now detected that more buffers have 3124 * become available. 3125 * 3126 * the <factotum> is triggered if the h/w interrupt handler 3127 * sees the <link state changed> or <error> bits in the status 3128 * block. It's also triggered periodically to poll the link 3129 * state, just in case we aren't getting link status change 3130 * interrupts ... 3131 */ 3132 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3133 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3134 if (err != DDI_SUCCESS) { 3135 bge_problem(bgep, "ddi_add_softintr() failed"); 3136 goto attach_fail; 3137 } 3138 bgep->progress |= PROGRESS_RESCHED; 3139 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3140 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3141 if (err != DDI_SUCCESS) { 3142 bge_problem(bgep, "ddi_add_softintr() failed"); 3143 goto attach_fail; 3144 } 3145 bgep->progress |= PROGRESS_FACTOTUM; 3146 3147 /* Get supported interrupt types */ 3148 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3149 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3150 3151 goto attach_fail; 3152 } 3153 3154 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3155 bgep->ifname, intr_types)); 3156 3157 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3158 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3159 bge_error(bgep, "MSI registration failed, " 3160 "trying FIXED interrupt type\n"); 3161 } else { 3162 BGE_DEBUG(("%s: Using MSI interrupt type", 3163 bgep->ifname)); 3164 bgep->intr_type = DDI_INTR_TYPE_MSI; 3165 bgep->progress |= PROGRESS_HWINT; 3166 } 3167 } 3168 3169 if (!(bgep->progress & PROGRESS_HWINT) && 3170 (intr_types & DDI_INTR_TYPE_FIXED)) { 3171 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3172 bge_error(bgep, "FIXED interrupt " 3173 "registration failed\n"); 3174 goto attach_fail; 3175 } 3176 3177 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3178 3179 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3180 bgep->progress |= PROGRESS_HWINT; 3181 } 3182 3183 if (!(bgep->progress & PROGRESS_HWINT)) { 3184 bge_error(bgep, "No interrupts registered\n"); 3185 goto attach_fail; 3186 } 3187 3188 /* 3189 * Note that interrupts are not enabled yet as 3190 * mutex locks are not initialized. Initialize mutex locks. 3191 */ 3192 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3193 DDI_INTR_PRI(bgep->intr_pri)); 3194 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3195 DDI_INTR_PRI(bgep->intr_pri)); 3196 rw_init(bgep->errlock, NULL, RW_DRIVER, 3197 DDI_INTR_PRI(bgep->intr_pri)); 3198 3199 /* 3200 * Initialize rings. 3201 */ 3202 bge_init_rings(bgep); 3203 3204 /* 3205 * Now that mutex locks are initialized, enable interrupts. 3206 */ 3207 bge_intr_enable(bgep); 3208 bgep->progress |= PROGRESS_INTR; 3209 3210 /* 3211 * Initialise link state variables 3212 * Stop, reset & reinitialise the chip. 3213 * Initialise the (internal) PHY. 3214 */ 3215 bgep->link_state = LINK_STATE_UNKNOWN; 3216 3217 mutex_enter(bgep->genlock); 3218 3219 /* 3220 * Reset chip & rings to initial state; also reset address 3221 * filtering, promiscuity, loopback mode. 3222 */ 3223 #ifdef BGE_IPMI_ASF 3224 #ifdef BGE_NETCONSOLE 3225 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3226 #else 3227 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3228 #endif 3229 #else 3230 if (bge_reset(bgep) != DDI_SUCCESS) { 3231 #endif 3232 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3233 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3234 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3235 mutex_exit(bgep->genlock); 3236 goto attach_fail; 3237 } 3238 3239 #ifdef BGE_IPMI_ASF 3240 if (bgep->asf_enabled) { 3241 bgep->asf_status = ASF_STAT_RUN_INIT; 3242 } 3243 #endif 3244 3245 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3246 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3247 bgep->promisc = B_FALSE; 3248 bgep->param_loop_mode = BGE_LOOP_NONE; 3249 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3250 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3251 mutex_exit(bgep->genlock); 3252 goto attach_fail; 3253 } 3254 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3255 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3256 mutex_exit(bgep->genlock); 3257 goto attach_fail; 3258 } 3259 3260 mutex_exit(bgep->genlock); 3261 3262 if (bge_phys_init(bgep) == EIO) { 3263 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3264 goto attach_fail; 3265 } 3266 bgep->progress |= PROGRESS_PHY; 3267 3268 /* 3269 * Register NDD-tweakable parameters 3270 */ 3271 if (bge_nd_init(bgep)) { 3272 bge_problem(bgep, "bge_nd_init() failed"); 3273 goto attach_fail; 3274 } 3275 bgep->progress |= PROGRESS_NDD; 3276 3277 /* 3278 * Create & initialise named kstats 3279 */ 3280 bge_init_kstats(bgep, instance); 3281 bgep->progress |= PROGRESS_KSTATS; 3282 3283 /* 3284 * Determine whether to override the chip's own MAC address 3285 */ 3286 bge_find_mac_address(bgep, cidp); 3287 ethaddr_copy(cidp->vendor_addr.addr, bgep->curr_addr[0].addr); 3288 bgep->curr_addr[0].set = B_TRUE; 3289 3290 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3291 /* 3292 * Address available is one less than MAX 3293 * as primary address is not advertised 3294 * as a multiple MAC address. 3295 */ 3296 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX - 1; 3297 3298 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3299 goto attach_fail; 3300 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3301 macp->m_driver = bgep; 3302 macp->m_dip = devinfo; 3303 macp->m_src_addr = bgep->curr_addr[0].addr; 3304 macp->m_callbacks = &bge_m_callbacks; 3305 macp->m_min_sdu = 0; 3306 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3307 macp->m_margin = VLAN_TAGSZ; 3308 /* 3309 * Finally, we're ready to register ourselves with the MAC layer 3310 * interface; if this succeeds, we're all ready to start() 3311 */ 3312 err = mac_register(macp, &bgep->mh); 3313 mac_free(macp); 3314 if (err != 0) 3315 goto attach_fail; 3316 3317 /* 3318 * Register a periodical handler. 3319 * bge_chip_cyclic() is invoked in kernel context. 3320 */ 3321 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3322 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3323 3324 bgep->progress |= PROGRESS_READY; 3325 ASSERT(bgep->bge_guard == BGE_GUARD); 3326 #ifdef BGE_IPMI_ASF 3327 #ifdef BGE_NETCONSOLE 3328 if (bgep->asf_enabled) { 3329 mutex_enter(bgep->genlock); 3330 retval = bge_chip_start(bgep, B_TRUE); 3331 mutex_exit(bgep->genlock); 3332 if (retval != DDI_SUCCESS) 3333 goto attach_fail; 3334 } 3335 #endif 3336 #endif 3337 return (DDI_SUCCESS); 3338 3339 attach_fail: 3340 #ifdef BGE_IPMI_ASF 3341 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3342 #else 3343 bge_unattach(bgep); 3344 #endif 3345 return (DDI_FAILURE); 3346 } 3347 3348 /* 3349 * bge_suspend() -- suspend transmit/receive for powerdown 3350 */ 3351 static int 3352 bge_suspend(bge_t *bgep) 3353 { 3354 /* 3355 * Stop processing and idle (powerdown) the PHY ... 3356 */ 3357 mutex_enter(bgep->genlock); 3358 #ifdef BGE_IPMI_ASF 3359 /* 3360 * Power management hasn't been supported in BGE now. If you 3361 * want to implement it, please add the ASF/IPMI related 3362 * code here. 3363 */ 3364 #endif 3365 bge_stop(bgep); 3366 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3367 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3368 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3369 mutex_exit(bgep->genlock); 3370 return (DDI_FAILURE); 3371 } 3372 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3373 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3374 mutex_exit(bgep->genlock); 3375 return (DDI_FAILURE); 3376 } 3377 mutex_exit(bgep->genlock); 3378 3379 return (DDI_SUCCESS); 3380 } 3381 3382 /* 3383 * detach(9E) -- Detach a device from the system 3384 */ 3385 static int 3386 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3387 { 3388 bge_t *bgep; 3389 #ifdef BGE_IPMI_ASF 3390 uint_t asf_mode; 3391 asf_mode = ASF_MODE_NONE; 3392 #endif 3393 3394 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3395 3396 bgep = ddi_get_driver_private(devinfo); 3397 3398 switch (cmd) { 3399 default: 3400 return (DDI_FAILURE); 3401 3402 case DDI_SUSPEND: 3403 return (bge_suspend(bgep)); 3404 3405 case DDI_DETACH: 3406 break; 3407 } 3408 3409 #ifdef BGE_IPMI_ASF 3410 mutex_enter(bgep->genlock); 3411 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3412 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3413 3414 bge_asf_update_status(bgep); 3415 if (bgep->asf_status == ASF_STAT_RUN) { 3416 bge_asf_stop_timer(bgep); 3417 } 3418 bgep->asf_status = ASF_STAT_STOP; 3419 3420 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3421 3422 if (bgep->asf_pseudostop) { 3423 bge_chip_stop(bgep, B_FALSE); 3424 bgep->bge_mac_state = BGE_MAC_STOPPED; 3425 bgep->asf_pseudostop = B_FALSE; 3426 } 3427 3428 asf_mode = ASF_MODE_POST_SHUTDOWN; 3429 3430 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3431 ddi_fm_service_impact(bgep->devinfo, 3432 DDI_SERVICE_UNAFFECTED); 3433 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3434 ddi_fm_service_impact(bgep->devinfo, 3435 DDI_SERVICE_UNAFFECTED); 3436 } 3437 mutex_exit(bgep->genlock); 3438 #endif 3439 3440 /* 3441 * Unregister from the GLD subsystem. This can fail, in 3442 * particular if there are DLPI style-2 streams still open - 3443 * in which case we just return failure without shutting 3444 * down chip operations. 3445 */ 3446 if (mac_unregister(bgep->mh) != 0) 3447 return (DDI_FAILURE); 3448 3449 /* 3450 * All activity stopped, so we can clean up & exit 3451 */ 3452 #ifdef BGE_IPMI_ASF 3453 bge_unattach(bgep, asf_mode); 3454 #else 3455 bge_unattach(bgep); 3456 #endif 3457 return (DDI_SUCCESS); 3458 } 3459 3460 3461 /* 3462 * ========== Module Loading Data & Entry Points ========== 3463 */ 3464 3465 #undef BGE_DBG 3466 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3467 3468 DDI_DEFINE_STREAM_OPS(bge_dev_ops, nulldev, nulldev, bge_attach, bge_detach, 3469 nodev, NULL, D_MP, NULL); 3470 3471 static struct modldrv bge_modldrv = { 3472 &mod_driverops, /* Type of module. This one is a driver */ 3473 bge_ident, /* short description */ 3474 &bge_dev_ops /* driver specific ops */ 3475 }; 3476 3477 static struct modlinkage modlinkage = { 3478 MODREV_1, (void *)&bge_modldrv, NULL 3479 }; 3480 3481 3482 int 3483 _info(struct modinfo *modinfop) 3484 { 3485 return (mod_info(&modlinkage, modinfop)); 3486 } 3487 3488 int 3489 _init(void) 3490 { 3491 int status; 3492 3493 mac_init_ops(&bge_dev_ops, "bge"); 3494 status = mod_install(&modlinkage); 3495 if (status == DDI_SUCCESS) 3496 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3497 else 3498 mac_fini_ops(&bge_dev_ops); 3499 return (status); 3500 } 3501 3502 int 3503 _fini(void) 3504 { 3505 int status; 3506 3507 status = mod_remove(&modlinkage); 3508 if (status == DDI_SUCCESS) { 3509 mac_fini_ops(&bge_dev_ops); 3510 mutex_destroy(bge_log_mutex); 3511 } 3512 return (status); 3513 } 3514 3515 3516 /* 3517 * bge_add_intrs: 3518 * 3519 * Register FIXED or MSI interrupts. 3520 */ 3521 static int 3522 bge_add_intrs(bge_t *bgep, int intr_type) 3523 { 3524 dev_info_t *dip = bgep->devinfo; 3525 int avail, actual, intr_size, count = 0; 3526 int i, flag, ret; 3527 3528 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3529 3530 /* Get number of interrupts */ 3531 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3532 if ((ret != DDI_SUCCESS) || (count == 0)) { 3533 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3534 "count: %d", ret, count); 3535 3536 return (DDI_FAILURE); 3537 } 3538 3539 /* Get number of available interrupts */ 3540 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3541 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3542 bge_error(bgep, "ddi_intr_get_navail() failure, " 3543 "ret: %d, avail: %d\n", ret, avail); 3544 3545 return (DDI_FAILURE); 3546 } 3547 3548 if (avail < count) { 3549 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3550 bgep->ifname, count, avail)); 3551 } 3552 3553 /* 3554 * BGE hardware generates only single MSI even though it claims 3555 * to support multiple MSIs. So, hard code MSI count value to 1. 3556 */ 3557 if (intr_type == DDI_INTR_TYPE_MSI) { 3558 count = 1; 3559 flag = DDI_INTR_ALLOC_STRICT; 3560 } else { 3561 flag = DDI_INTR_ALLOC_NORMAL; 3562 } 3563 3564 /* Allocate an array of interrupt handles */ 3565 intr_size = count * sizeof (ddi_intr_handle_t); 3566 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3567 3568 /* Call ddi_intr_alloc() */ 3569 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3570 count, &actual, flag); 3571 3572 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3573 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3574 3575 kmem_free(bgep->htable, intr_size); 3576 return (DDI_FAILURE); 3577 } 3578 3579 if (actual < count) { 3580 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3581 bgep->ifname, count, actual)); 3582 } 3583 3584 bgep->intr_cnt = actual; 3585 3586 /* 3587 * Get priority for first msi, assume remaining are all the same 3588 */ 3589 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3590 DDI_SUCCESS) { 3591 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3592 3593 /* Free already allocated intr */ 3594 for (i = 0; i < actual; i++) { 3595 (void) ddi_intr_free(bgep->htable[i]); 3596 } 3597 3598 kmem_free(bgep->htable, intr_size); 3599 return (DDI_FAILURE); 3600 } 3601 3602 /* Call ddi_intr_add_handler() */ 3603 for (i = 0; i < actual; i++) { 3604 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3605 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3606 bge_error(bgep, "ddi_intr_add_handler() " 3607 "failed %d\n", ret); 3608 3609 /* Free already allocated intr */ 3610 for (i = 0; i < actual; i++) { 3611 (void) ddi_intr_free(bgep->htable[i]); 3612 } 3613 3614 kmem_free(bgep->htable, intr_size); 3615 return (DDI_FAILURE); 3616 } 3617 } 3618 3619 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3620 != DDI_SUCCESS) { 3621 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3622 3623 for (i = 0; i < actual; i++) { 3624 (void) ddi_intr_remove_handler(bgep->htable[i]); 3625 (void) ddi_intr_free(bgep->htable[i]); 3626 } 3627 3628 kmem_free(bgep->htable, intr_size); 3629 return (DDI_FAILURE); 3630 } 3631 3632 return (DDI_SUCCESS); 3633 } 3634 3635 /* 3636 * bge_rem_intrs: 3637 * 3638 * Unregister FIXED or MSI interrupts 3639 */ 3640 static void 3641 bge_rem_intrs(bge_t *bgep) 3642 { 3643 int i; 3644 3645 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3646 3647 /* Call ddi_intr_remove_handler() */ 3648 for (i = 0; i < bgep->intr_cnt; i++) { 3649 (void) ddi_intr_remove_handler(bgep->htable[i]); 3650 (void) ddi_intr_free(bgep->htable[i]); 3651 } 3652 3653 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3654 } 3655 3656 3657 void 3658 bge_intr_enable(bge_t *bgep) 3659 { 3660 int i; 3661 3662 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3663 /* Call ddi_intr_block_enable() for MSI interrupts */ 3664 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3665 } else { 3666 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3667 for (i = 0; i < bgep->intr_cnt; i++) { 3668 (void) ddi_intr_enable(bgep->htable[i]); 3669 } 3670 } 3671 } 3672 3673 3674 void 3675 bge_intr_disable(bge_t *bgep) 3676 { 3677 int i; 3678 3679 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3680 /* Call ddi_intr_block_disable() */ 3681 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3682 } else { 3683 for (i = 0; i < bgep->intr_cnt; i++) { 3684 (void) ddi_intr_disable(bgep->htable[i]); 3685 } 3686 } 3687 } 3688 3689 int 3690 bge_reprogram(bge_t *bgep) 3691 { 3692 int status = 0; 3693 3694 ASSERT(mutex_owned(bgep->genlock)); 3695 3696 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3697 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3698 status = IOC_INVAL; 3699 } 3700 #ifdef BGE_IPMI_ASF 3701 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3702 #else 3703 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3704 #endif 3705 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3706 status = IOC_INVAL; 3707 } 3708 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3709 bge_chip_msi_trig(bgep); 3710 return (status); 3711 } 3712