1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "bge_impl.h" 30 #include <sys/sdt.h> 31 #include <sys/dld.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 * Make sure you keep the version ID up to date! 36 */ 37 static char bge_ident[] = "Broadcom Gb Ethernet v0.61"; 38 39 /* 40 * Property names 41 */ 42 static char debug_propname[] = "bge-debug-flags"; 43 static char clsize_propname[] = "cache-line-size"; 44 static char latency_propname[] = "latency-timer"; 45 static char localmac_boolname[] = "local-mac-address?"; 46 static char localmac_propname[] = "local-mac-address"; 47 static char macaddr_propname[] = "mac-address"; 48 static char subdev_propname[] = "subsystem-id"; 49 static char subven_propname[] = "subsystem-vendor-id"; 50 static char rxrings_propname[] = "bge-rx-rings"; 51 static char txrings_propname[] = "bge-tx-rings"; 52 static char fm_cap[] = "fm-capable"; 53 static char default_mtu[] = "default_mtu"; 54 55 static int bge_add_intrs(bge_t *, int); 56 static void bge_rem_intrs(bge_t *); 57 58 /* 59 * Describes the chip's DMA engine 60 */ 61 static ddi_dma_attr_t dma_attr = { 62 DMA_ATTR_V0, /* dma_attr version */ 63 0x0000000000000000ull, /* dma_attr_addr_lo */ 64 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 65 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 66 0x0000000000000001ull, /* dma_attr_align */ 67 0x00000FFF, /* dma_attr_burstsizes */ 68 0x00000001, /* dma_attr_minxfer */ 69 0x000000000000FFFFull, /* dma_attr_maxxfer */ 70 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 71 1, /* dma_attr_sgllen */ 72 0x00000001, /* dma_attr_granular */ 73 DDI_DMA_FLAGERR /* dma_attr_flags */ 74 }; 75 76 /* 77 * PIO access attributes for registers 78 */ 79 static ddi_device_acc_attr_t bge_reg_accattr = { 80 DDI_DEVICE_ATTR_V0, 81 DDI_NEVERSWAP_ACC, 82 DDI_STRICTORDER_ACC, 83 DDI_FLAGERR_ACC 84 }; 85 86 /* 87 * DMA access attributes for descriptors: NOT to be byte swapped. 88 */ 89 static ddi_device_acc_attr_t bge_desc_accattr = { 90 DDI_DEVICE_ATTR_V0, 91 DDI_NEVERSWAP_ACC, 92 DDI_STRICTORDER_ACC, 93 DDI_FLAGERR_ACC 94 }; 95 96 /* 97 * DMA access attributes for data: NOT to be byte swapped. 98 */ 99 static ddi_device_acc_attr_t bge_data_accattr = { 100 DDI_DEVICE_ATTR_V0, 101 DDI_NEVERSWAP_ACC, 102 DDI_STRICTORDER_ACC 103 }; 104 105 /* 106 * Versions of the O/S up to Solaris 8 didn't support network booting 107 * from any network interface except the first (NET0). Patching this 108 * flag to a non-zero value will tell the driver to work around this 109 * limitation by creating an extra (internal) pathname node. To do 110 * this, just add a line like the following to the CLIENT'S etc/system 111 * file ON THE ROOT FILESYSTEM SERVER before booting the client: 112 * 113 * set bge:bge_net1_boot_support = 1; 114 */ 115 static uint32_t bge_net1_boot_support = 1; 116 117 static int bge_m_start(void *); 118 static void bge_m_stop(void *); 119 static int bge_m_promisc(void *, boolean_t); 120 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 121 static int bge_m_unicst(void *, const uint8_t *); 122 static void bge_m_resources(void *); 123 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 124 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 125 static int bge_unicst_set(void *, const uint8_t *, 126 mac_addr_slot_t); 127 static int bge_m_unicst_add(void *, mac_multi_addr_t *); 128 static int bge_m_unicst_remove(void *, mac_addr_slot_t); 129 static int bge_m_unicst_modify(void *, mac_multi_addr_t *); 130 static int bge_m_unicst_get(void *, mac_multi_addr_t *); 131 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 132 uint_t, const void *); 133 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 134 uint_t, void *); 135 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 136 const void *); 137 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 138 void *); 139 140 #define BGE_M_CALLBACK_FLAGS\ 141 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 142 143 static mac_callbacks_t bge_m_callbacks = { 144 BGE_M_CALLBACK_FLAGS, 145 bge_m_stat, 146 bge_m_start, 147 bge_m_stop, 148 bge_m_promisc, 149 bge_m_multicst, 150 bge_m_unicst, 151 bge_m_tx, 152 bge_m_resources, 153 bge_m_ioctl, 154 bge_m_getcapab, 155 NULL, 156 NULL, 157 bge_m_setprop, 158 bge_m_getprop 159 }; 160 161 /* 162 * ========== Transmit and receive ring reinitialisation ========== 163 */ 164 165 /* 166 * These <reinit> routines each reset the specified ring to an initial 167 * state, assuming that the corresponding <init> routine has already 168 * been called exactly once. 169 */ 170 171 static void 172 bge_reinit_send_ring(send_ring_t *srp) 173 { 174 bge_queue_t *txbuf_queue; 175 bge_queue_item_t *txbuf_head; 176 sw_txbuf_t *txbuf; 177 sw_sbd_t *ssbdp; 178 uint32_t slot; 179 180 /* 181 * Reinitialise control variables ... 182 */ 183 srp->tx_flow = 0; 184 srp->tx_next = 0; 185 srp->txfill_next = 0; 186 srp->tx_free = srp->desc.nslots; 187 ASSERT(mutex_owned(srp->tc_lock)); 188 srp->tc_next = 0; 189 srp->txpkt_next = 0; 190 srp->tx_block = 0; 191 srp->tx_nobd = 0; 192 srp->tx_nobuf = 0; 193 194 /* 195 * Initialize the tx buffer push queue 196 */ 197 mutex_enter(srp->freetxbuf_lock); 198 mutex_enter(srp->txbuf_lock); 199 txbuf_queue = &srp->freetxbuf_queue; 200 txbuf_queue->head = NULL; 201 txbuf_queue->count = 0; 202 txbuf_queue->lock = srp->freetxbuf_lock; 203 srp->txbuf_push_queue = txbuf_queue; 204 205 /* 206 * Initialize the tx buffer pop queue 207 */ 208 txbuf_queue = &srp->txbuf_queue; 209 txbuf_queue->head = NULL; 210 txbuf_queue->count = 0; 211 txbuf_queue->lock = srp->txbuf_lock; 212 srp->txbuf_pop_queue = txbuf_queue; 213 txbuf_head = srp->txbuf_head; 214 txbuf = srp->txbuf; 215 for (slot = 0; slot < srp->tx_buffers; ++slot) { 216 txbuf_head->item = txbuf; 217 txbuf_head->next = txbuf_queue->head; 218 txbuf_queue->head = txbuf_head; 219 txbuf_queue->count++; 220 txbuf++; 221 txbuf_head++; 222 } 223 mutex_exit(srp->txbuf_lock); 224 mutex_exit(srp->freetxbuf_lock); 225 226 /* 227 * Zero and sync all the h/w Send Buffer Descriptors 228 */ 229 DMA_ZERO(srp->desc); 230 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 231 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 232 ssbdp = srp->sw_sbds; 233 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 234 ssbdp->pbuf = NULL; 235 } 236 237 static void 238 bge_reinit_recv_ring(recv_ring_t *rrp) 239 { 240 /* 241 * Reinitialise control variables ... 242 */ 243 rrp->rx_next = 0; 244 } 245 246 static void 247 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 248 { 249 bge_rbd_t *hw_rbd_p; 250 sw_rbd_t *srbdp; 251 uint32_t bufsize; 252 uint32_t nslots; 253 uint32_t slot; 254 255 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 256 RBD_FLAG_STD_RING, 257 RBD_FLAG_JUMBO_RING, 258 RBD_FLAG_MINI_RING 259 }; 260 261 /* 262 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 263 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 264 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 265 * should be zeroed, and so don't need to be set up specifically 266 * once the whole area has been cleared. 267 */ 268 DMA_ZERO(brp->desc); 269 270 hw_rbd_p = DMA_VPTR(brp->desc); 271 nslots = brp->desc.nslots; 272 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 273 bufsize = brp->buf[0].size; 274 srbdp = brp->sw_rbds; 275 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 276 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 277 hw_rbd_p->index = slot; 278 hw_rbd_p->len = bufsize; 279 hw_rbd_p->opaque = srbdp->pbuf.token; 280 hw_rbd_p->flags |= ring_type_flag[ring]; 281 } 282 283 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 284 285 /* 286 * Finally, reinitialise the ring control variables ... 287 */ 288 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 289 } 290 291 /* 292 * Reinitialize all rings 293 */ 294 static void 295 bge_reinit_rings(bge_t *bgep) 296 { 297 uint32_t ring; 298 299 ASSERT(mutex_owned(bgep->genlock)); 300 301 /* 302 * Send Rings ... 303 */ 304 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 305 bge_reinit_send_ring(&bgep->send[ring]); 306 307 /* 308 * Receive Return Rings ... 309 */ 310 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 311 bge_reinit_recv_ring(&bgep->recv[ring]); 312 313 /* 314 * Receive Producer Rings ... 315 */ 316 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 317 bge_reinit_buff_ring(&bgep->buff[ring], ring); 318 } 319 320 /* 321 * ========== Internal state management entry points ========== 322 */ 323 324 #undef BGE_DBG 325 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 326 327 /* 328 * These routines provide all the functionality required by the 329 * corresponding GLD entry points, but don't update the GLD state 330 * so they can be called internally without disturbing our record 331 * of what GLD thinks we should be doing ... 332 */ 333 334 /* 335 * bge_reset() -- reset h/w & rings to initial state 336 */ 337 static int 338 #ifdef BGE_IPMI_ASF 339 bge_reset(bge_t *bgep, uint_t asf_mode) 340 #else 341 bge_reset(bge_t *bgep) 342 #endif 343 { 344 uint32_t ring; 345 int retval; 346 347 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 348 349 ASSERT(mutex_owned(bgep->genlock)); 350 351 /* 352 * Grab all the other mutexes in the world (this should 353 * ensure no other threads are manipulating driver state) 354 */ 355 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 356 mutex_enter(bgep->recv[ring].rx_lock); 357 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 358 mutex_enter(bgep->buff[ring].rf_lock); 359 rw_enter(bgep->errlock, RW_WRITER); 360 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 361 mutex_enter(bgep->send[ring].tx_lock); 362 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 363 mutex_enter(bgep->send[ring].tc_lock); 364 365 #ifdef BGE_IPMI_ASF 366 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 367 #else 368 retval = bge_chip_reset(bgep, B_TRUE); 369 #endif 370 bge_reinit_rings(bgep); 371 372 /* 373 * Free the world ... 374 */ 375 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 376 mutex_exit(bgep->send[ring].tc_lock); 377 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 378 mutex_exit(bgep->send[ring].tx_lock); 379 rw_exit(bgep->errlock); 380 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 381 mutex_exit(bgep->buff[ring].rf_lock); 382 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 383 mutex_exit(bgep->recv[ring].rx_lock); 384 385 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 386 return (retval); 387 } 388 389 /* 390 * bge_stop() -- stop processing, don't reset h/w or rings 391 */ 392 static void 393 bge_stop(bge_t *bgep) 394 { 395 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 396 397 ASSERT(mutex_owned(bgep->genlock)); 398 399 #ifdef BGE_IPMI_ASF 400 if (bgep->asf_enabled) { 401 bgep->asf_pseudostop = B_TRUE; 402 } else { 403 #endif 404 bge_chip_stop(bgep, B_FALSE); 405 #ifdef BGE_IPMI_ASF 406 } 407 #endif 408 409 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 410 } 411 412 /* 413 * bge_start() -- start transmitting/receiving 414 */ 415 static int 416 bge_start(bge_t *bgep, boolean_t reset_phys) 417 { 418 int retval; 419 420 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 421 422 ASSERT(mutex_owned(bgep->genlock)); 423 424 /* 425 * Start chip processing, including enabling interrupts 426 */ 427 retval = bge_chip_start(bgep, reset_phys); 428 429 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 430 return (retval); 431 } 432 433 /* 434 * bge_restart - restart transmitting/receiving after error or suspend 435 */ 436 int 437 bge_restart(bge_t *bgep, boolean_t reset_phys) 438 { 439 int retval = DDI_SUCCESS; 440 ASSERT(mutex_owned(bgep->genlock)); 441 442 #ifdef BGE_IPMI_ASF 443 if (bgep->asf_enabled) { 444 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 445 retval = DDI_FAILURE; 446 } else 447 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 448 retval = DDI_FAILURE; 449 #else 450 if (bge_reset(bgep) != DDI_SUCCESS) 451 retval = DDI_FAILURE; 452 #endif 453 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 454 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 455 retval = DDI_FAILURE; 456 bgep->watchdog = 0; 457 ddi_trigger_softintr(bgep->drain_id); 458 } 459 460 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 461 return (retval); 462 } 463 464 465 /* 466 * ========== Nemo-required management entry points ========== 467 */ 468 469 #undef BGE_DBG 470 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 471 472 /* 473 * bge_m_stop() -- stop transmitting/receiving 474 */ 475 static void 476 bge_m_stop(void *arg) 477 { 478 bge_t *bgep = arg; /* private device info */ 479 send_ring_t *srp; 480 uint32_t ring; 481 482 BGE_TRACE(("bge_m_stop($%p)", arg)); 483 484 /* 485 * Just stop processing, then record new GLD state 486 */ 487 mutex_enter(bgep->genlock); 488 if (!(bgep->progress & PROGRESS_INTR)) { 489 /* can happen during autorecovery */ 490 mutex_exit(bgep->genlock); 491 return; 492 } 493 bge_stop(bgep); 494 /* 495 * Free the possible tx buffers allocated in tx process. 496 */ 497 #ifdef BGE_IPMI_ASF 498 if (!bgep->asf_pseudostop) 499 #endif 500 { 501 rw_enter(bgep->errlock, RW_WRITER); 502 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 503 srp = &bgep->send[ring]; 504 mutex_enter(srp->tx_lock); 505 if (srp->tx_array > 1) 506 bge_free_txbuf_arrays(srp); 507 mutex_exit(srp->tx_lock); 508 } 509 rw_exit(bgep->errlock); 510 } 511 bgep->bge_mac_state = BGE_MAC_STOPPED; 512 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 513 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 514 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 515 mutex_exit(bgep->genlock); 516 } 517 518 /* 519 * bge_m_start() -- start transmitting/receiving 520 */ 521 static int 522 bge_m_start(void *arg) 523 { 524 bge_t *bgep = arg; /* private device info */ 525 526 BGE_TRACE(("bge_m_start($%p)", arg)); 527 528 /* 529 * Start processing and record new GLD state 530 */ 531 mutex_enter(bgep->genlock); 532 if (!(bgep->progress & PROGRESS_INTR)) { 533 /* can happen during autorecovery */ 534 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 535 mutex_exit(bgep->genlock); 536 return (EIO); 537 } 538 #ifdef BGE_IPMI_ASF 539 if (bgep->asf_enabled) { 540 if ((bgep->asf_status == ASF_STAT_RUN) && 541 (bgep->asf_pseudostop)) { 542 bgep->bge_mac_state = BGE_MAC_STARTED; 543 mutex_exit(bgep->genlock); 544 return (0); 545 } 546 } 547 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 548 #else 549 if (bge_reset(bgep) != DDI_SUCCESS) { 550 #endif 551 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 552 (void) bge_check_acc_handle(bgep, bgep->io_handle); 553 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 554 mutex_exit(bgep->genlock); 555 return (EIO); 556 } 557 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 558 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 559 (void) bge_check_acc_handle(bgep, bgep->io_handle); 560 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 561 mutex_exit(bgep->genlock); 562 return (EIO); 563 } 564 bgep->bge_mac_state = BGE_MAC_STARTED; 565 BGE_DEBUG(("bge_m_start($%p) done", arg)); 566 567 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 568 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 569 mutex_exit(bgep->genlock); 570 return (EIO); 571 } 572 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 573 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 574 mutex_exit(bgep->genlock); 575 return (EIO); 576 } 577 #ifdef BGE_IPMI_ASF 578 if (bgep->asf_enabled) { 579 if (bgep->asf_status != ASF_STAT_RUN) { 580 /* start ASF heart beat */ 581 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 582 (void *)bgep, 583 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 584 bgep->asf_status = ASF_STAT_RUN; 585 } 586 } 587 #endif 588 mutex_exit(bgep->genlock); 589 590 return (0); 591 } 592 593 /* 594 * bge_m_unicst() -- set the physical network address 595 */ 596 static int 597 bge_m_unicst(void *arg, const uint8_t *macaddr) 598 { 599 /* 600 * Request to set address in 601 * address slot 0, i.e., default address 602 */ 603 return (bge_unicst_set(arg, macaddr, 0)); 604 } 605 606 /* 607 * bge_unicst_set() -- set the physical network address 608 */ 609 static int 610 bge_unicst_set(void *arg, const uint8_t *macaddr, mac_addr_slot_t slot) 611 { 612 bge_t *bgep = arg; /* private device info */ 613 614 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 615 ether_sprintf((void *)macaddr))); 616 /* 617 * Remember the new current address in the driver state 618 * Sync the chip's idea of the address too ... 619 */ 620 mutex_enter(bgep->genlock); 621 if (!(bgep->progress & PROGRESS_INTR)) { 622 /* can happen during autorecovery */ 623 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 624 mutex_exit(bgep->genlock); 625 return (EIO); 626 } 627 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 628 #ifdef BGE_IPMI_ASF 629 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 630 #else 631 if (bge_chip_sync(bgep) == DDI_FAILURE) { 632 #endif 633 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 634 (void) bge_check_acc_handle(bgep, bgep->io_handle); 635 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 636 mutex_exit(bgep->genlock); 637 return (EIO); 638 } 639 #ifdef BGE_IPMI_ASF 640 if (bgep->asf_enabled) { 641 /* 642 * The above bge_chip_sync() function wrote the ethernet MAC 643 * addresses registers which destroyed the IPMI/ASF sideband. 644 * Here, we have to reset chip to make IPMI/ASF sideband work. 645 */ 646 if (bgep->asf_status == ASF_STAT_RUN) { 647 /* 648 * We must stop ASF heart beat before bge_chip_stop(), 649 * otherwise some computers (ex. IBM HS20 blade server) 650 * may crash. 651 */ 652 bge_asf_update_status(bgep); 653 bge_asf_stop_timer(bgep); 654 bgep->asf_status = ASF_STAT_STOP; 655 656 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 657 } 658 bge_chip_stop(bgep, B_FALSE); 659 660 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 661 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 662 (void) bge_check_acc_handle(bgep, bgep->io_handle); 663 ddi_fm_service_impact(bgep->devinfo, 664 DDI_SERVICE_DEGRADED); 665 mutex_exit(bgep->genlock); 666 return (EIO); 667 } 668 669 /* 670 * Start our ASF heartbeat counter as soon as possible. 671 */ 672 if (bgep->asf_status != ASF_STAT_RUN) { 673 /* start ASF heart beat */ 674 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 675 (void *)bgep, 676 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 677 bgep->asf_status = ASF_STAT_RUN; 678 } 679 } 680 #endif 681 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 682 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 683 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 684 mutex_exit(bgep->genlock); 685 return (EIO); 686 } 687 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 688 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 689 mutex_exit(bgep->genlock); 690 return (EIO); 691 } 692 mutex_exit(bgep->genlock); 693 694 return (0); 695 } 696 697 /* 698 * The following four routines are used as callbacks for multiple MAC 699 * address support: 700 * - bge_m_unicst_add(void *, mac_multi_addr_t *); 701 * - bge_m_unicst_remove(void *, mac_addr_slot_t); 702 * - bge_m_unicst_modify(void *, mac_multi_addr_t *); 703 * - bge_m_unicst_get(void *, mac_multi_addr_t *); 704 */ 705 706 /* 707 * bge_m_unicst_add() - will find an unused address slot, set the 708 * address value to the one specified, reserve that slot and enable 709 * the NIC to start filtering on the new MAC address. 710 * address slot. Returns 0 on success. 711 */ 712 static int 713 bge_m_unicst_add(void *arg, mac_multi_addr_t *maddr) 714 { 715 bge_t *bgep = arg; /* private device info */ 716 mac_addr_slot_t slot; 717 int err; 718 719 if (mac_unicst_verify(bgep->mh, 720 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 721 return (EINVAL); 722 723 mutex_enter(bgep->genlock); 724 if (bgep->unicst_addr_avail == 0) { 725 /* no slots available */ 726 mutex_exit(bgep->genlock); 727 return (ENOSPC); 728 } 729 730 /* 731 * Primary/default address is in slot 0. The next three 732 * addresses are the multiple MAC addresses. So multiple 733 * MAC address 0 is in slot 1, 1 in slot 2, and so on. 734 * So the first multiple MAC address resides in slot 1. 735 */ 736 for (slot = 1; slot < bgep->unicst_addr_total; slot++) { 737 if (bgep->curr_addr[slot].set == B_FALSE) { 738 bgep->curr_addr[slot].set = B_TRUE; 739 break; 740 } 741 } 742 743 ASSERT(slot < bgep->unicst_addr_total); 744 bgep->unicst_addr_avail--; 745 mutex_exit(bgep->genlock); 746 maddr->mma_slot = slot; 747 748 if ((err = bge_unicst_set(bgep, maddr->mma_addr, slot)) != 0) { 749 mutex_enter(bgep->genlock); 750 bgep->curr_addr[slot].set = B_FALSE; 751 bgep->unicst_addr_avail++; 752 mutex_exit(bgep->genlock); 753 } 754 return (err); 755 } 756 757 /* 758 * bge_m_unicst_remove() - removes a MAC address that was added by a 759 * call to bge_m_unicst_add(). The slot number that was returned in 760 * add() is passed in the call to remove the address. 761 * Returns 0 on success. 762 */ 763 static int 764 bge_m_unicst_remove(void *arg, mac_addr_slot_t slot) 765 { 766 bge_t *bgep = arg; /* private device info */ 767 768 if (slot <= 0 || slot >= bgep->unicst_addr_total) 769 return (EINVAL); 770 771 mutex_enter(bgep->genlock); 772 if (bgep->curr_addr[slot].set == B_TRUE) { 773 bgep->curr_addr[slot].set = B_FALSE; 774 bgep->unicst_addr_avail++; 775 mutex_exit(bgep->genlock); 776 /* 777 * Copy the default address to the passed slot 778 */ 779 return (bge_unicst_set(bgep, bgep->curr_addr[0].addr, slot)); 780 } 781 mutex_exit(bgep->genlock); 782 return (EINVAL); 783 } 784 785 /* 786 * bge_m_unicst_modify() - modifies the value of an address that 787 * has been added by bge_m_unicst_add(). The new address, address 788 * length and the slot number that was returned in the call to add 789 * should be passed to bge_m_unicst_modify(). mma_flags should be 790 * set to 0. Returns 0 on success. 791 */ 792 static int 793 bge_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) 794 { 795 bge_t *bgep = arg; /* private device info */ 796 mac_addr_slot_t slot; 797 798 if (mac_unicst_verify(bgep->mh, 799 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 800 return (EINVAL); 801 802 slot = maddr->mma_slot; 803 804 if (slot <= 0 || slot >= bgep->unicst_addr_total) 805 return (EINVAL); 806 807 mutex_enter(bgep->genlock); 808 if (bgep->curr_addr[slot].set == B_TRUE) { 809 mutex_exit(bgep->genlock); 810 return (bge_unicst_set(bgep, maddr->mma_addr, slot)); 811 } 812 mutex_exit(bgep->genlock); 813 814 return (EINVAL); 815 } 816 817 /* 818 * bge_m_unicst_get() - will get the MAC address and all other 819 * information related to the address slot passed in mac_multi_addr_t. 820 * mma_flags should be set to 0 in the call. 821 * On return, mma_flags can take the following values: 822 * 1) MMAC_SLOT_UNUSED 823 * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR 824 * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR 825 * 4) MMAC_SLOT_USED 826 */ 827 static int 828 bge_m_unicst_get(void *arg, mac_multi_addr_t *maddr) 829 { 830 bge_t *bgep = arg; /* private device info */ 831 mac_addr_slot_t slot; 832 833 slot = maddr->mma_slot; 834 835 if (slot <= 0 || slot >= bgep->unicst_addr_total) 836 return (EINVAL); 837 838 mutex_enter(bgep->genlock); 839 if (bgep->curr_addr[slot].set == B_TRUE) { 840 ethaddr_copy(bgep->curr_addr[slot].addr, 841 maddr->mma_addr); 842 maddr->mma_flags = MMAC_SLOT_USED; 843 } else { 844 maddr->mma_flags = MMAC_SLOT_UNUSED; 845 } 846 mutex_exit(bgep->genlock); 847 848 return (0); 849 } 850 851 extern void bge_wake_factotum(bge_t *); 852 853 static boolean_t 854 bge_param_locked(mac_prop_id_t pr_num) 855 { 856 /* 857 * All adv_* parameters are locked (read-only) while 858 * the device is in any sort of loopback mode ... 859 */ 860 switch (pr_num) { 861 case DLD_PROP_ADV_1000FDX_CAP: 862 case DLD_PROP_EN_1000FDX_CAP: 863 case DLD_PROP_ADV_1000HDX_CAP: 864 case DLD_PROP_EN_1000HDX_CAP: 865 case DLD_PROP_ADV_100FDX_CAP: 866 case DLD_PROP_EN_100FDX_CAP: 867 case DLD_PROP_ADV_100HDX_CAP: 868 case DLD_PROP_EN_100HDX_CAP: 869 case DLD_PROP_ADV_10FDX_CAP: 870 case DLD_PROP_EN_10FDX_CAP: 871 case DLD_PROP_ADV_10HDX_CAP: 872 case DLD_PROP_EN_10HDX_CAP: 873 case DLD_PROP_AUTONEG: 874 case DLD_PROP_FLOWCTRL: 875 return (B_TRUE); 876 } 877 return (B_FALSE); 878 } 879 /* 880 * callback functions for set/get of properties 881 */ 882 static int 883 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 884 uint_t pr_valsize, const void *pr_val) 885 { 886 bge_t *bgep = barg; 887 int err = 0; 888 uint64_t cur_mtu, new_mtu; 889 uint_t maxsdu; 890 link_flowctrl_t fl; 891 892 mutex_enter(bgep->genlock); 893 if (bgep->param_loop_mode != BGE_LOOP_NONE && 894 bge_param_locked(pr_num)) { 895 /* 896 * All adv_* parameters are locked (read-only) 897 * while the device is in any sort of loopback mode. 898 */ 899 mutex_exit(bgep->genlock); 900 return (EBUSY); 901 } 902 switch (pr_num) { 903 case DLD_PROP_EN_1000FDX_CAP: 904 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 905 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 906 goto reprogram; 907 case DLD_PROP_EN_1000HDX_CAP: 908 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 909 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 910 goto reprogram; 911 case DLD_PROP_EN_100FDX_CAP: 912 bgep->param_en_100fdx = *(uint8_t *)pr_val; 913 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 914 goto reprogram; 915 case DLD_PROP_EN_100HDX_CAP: 916 bgep->param_en_100hdx = *(uint8_t *)pr_val; 917 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 918 goto reprogram; 919 case DLD_PROP_EN_10FDX_CAP: 920 bgep->param_en_10fdx = *(uint8_t *)pr_val; 921 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 922 goto reprogram; 923 case DLD_PROP_EN_10HDX_CAP: 924 bgep->param_en_10hdx = *(uint8_t *)pr_val; 925 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 926 reprogram: 927 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 928 err = EINVAL; 929 break; 930 case DLD_PROP_ADV_1000FDX_CAP: 931 case DLD_PROP_ADV_1000HDX_CAP: 932 case DLD_PROP_ADV_100FDX_CAP: 933 case DLD_PROP_ADV_100HDX_CAP: 934 case DLD_PROP_ADV_10FDX_CAP: 935 case DLD_PROP_ADV_10HDX_CAP: 936 case DLD_PROP_STATUS: 937 case DLD_PROP_SPEED: 938 case DLD_PROP_DUPLEX: 939 err = EINVAL; /* read-only prop. Can't set this */ 940 break; 941 case DLD_PROP_AUTONEG: 942 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 943 if (bge_reprogram(bgep) == IOC_INVAL) 944 err = EINVAL; 945 break; 946 case DLD_PROP_DEFMTU: 947 cur_mtu = bgep->chipid.default_mtu; 948 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 949 if (new_mtu == cur_mtu) { 950 err = 0; 951 break; 952 } 953 if (new_mtu < BGE_DEFAULT_MTU || 954 new_mtu > BGE_MAXIMUM_MTU) { 955 err = EINVAL; 956 break; 957 } 958 if ((new_mtu > BGE_DEFAULT_MTU) && 959 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 960 err = EINVAL; 961 break; 962 } 963 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 964 err = EBUSY; 965 break; 966 } 967 bgep->chipid.default_mtu = new_mtu; 968 if (bge_chip_id_init(bgep)) { 969 err = EINVAL; 970 break; 971 } 972 maxsdu = bgep->chipid.ethmax_size - 973 sizeof (struct ether_header); 974 err = mac_maxsdu_update(bgep->mh, maxsdu); 975 if (err == 0) { 976 bgep->bge_dma_error = B_TRUE; 977 bgep->manual_reset = B_TRUE; 978 bge_chip_stop(bgep, B_TRUE); 979 bge_wake_factotum(bgep); 980 err = 0; 981 } 982 break; 983 case DLD_PROP_FLOWCTRL: 984 bcopy(pr_val, &fl, sizeof (fl)); 985 switch (fl) { 986 default: 987 err = EINVAL; 988 break; 989 case LINK_FLOWCTRL_NONE: 990 bgep->param_adv_pause = 0; 991 bgep->param_adv_asym_pause = 0; 992 993 bgep->param_link_rx_pause = B_FALSE; 994 bgep->param_link_tx_pause = B_FALSE; 995 break; 996 case LINK_FLOWCTRL_RX: 997 if (!((bgep->param_lp_pause == 0) && 998 (bgep->param_lp_asym_pause == 1))) { 999 err = EINVAL; 1000 break; 1001 } 1002 bgep->param_adv_pause = 1; 1003 bgep->param_adv_asym_pause = 1; 1004 1005 bgep->param_link_rx_pause = B_TRUE; 1006 bgep->param_link_tx_pause = B_FALSE; 1007 break; 1008 case LINK_FLOWCTRL_TX: 1009 if (!((bgep->param_lp_pause == 1) && 1010 (bgep->param_lp_asym_pause == 1))) { 1011 err = EINVAL; 1012 break; 1013 } 1014 bgep->param_adv_pause = 0; 1015 bgep->param_adv_asym_pause = 1; 1016 1017 bgep->param_link_rx_pause = B_FALSE; 1018 bgep->param_link_tx_pause = B_TRUE; 1019 break; 1020 case LINK_FLOWCTRL_BI: 1021 if (bgep->param_lp_pause != 1) { 1022 err = EINVAL; 1023 break; 1024 } 1025 bgep->param_adv_pause = 1; 1026 1027 bgep->param_link_rx_pause = B_TRUE; 1028 bgep->param_link_tx_pause = B_TRUE; 1029 break; 1030 } 1031 1032 if (err == 0) { 1033 if (bge_reprogram(bgep) == IOC_INVAL) 1034 err = EINVAL; 1035 } 1036 1037 break; 1038 default: 1039 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 1040 pr_val); 1041 break; 1042 } 1043 mutex_exit(bgep->genlock); 1044 return (err); 1045 } 1046 static int 1047 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1048 uint_t pr_valsize, void *pr_val) 1049 { 1050 bge_t *bgep = barg; 1051 int err = 0; 1052 link_flowctrl_t fl; 1053 uint64_t tmp = 0; 1054 1055 bzero(pr_val, pr_valsize); 1056 switch (pr_num) { 1057 case DLD_PROP_DUPLEX: 1058 if (pr_valsize < sizeof (uint8_t)) 1059 return (EINVAL); 1060 *(uint8_t *)pr_val = bgep->param_link_duplex; 1061 break; 1062 case DLD_PROP_SPEED: 1063 if (pr_valsize < sizeof (uint64_t)) 1064 return (EINVAL); 1065 tmp = bgep->param_link_speed * 1000000ull; 1066 bcopy(&tmp, pr_val, sizeof (tmp)); 1067 break; 1068 case DLD_PROP_STATUS: 1069 if (pr_valsize < sizeof (uint8_t)) 1070 return (EINVAL); 1071 *(uint8_t *)pr_val = bgep->param_link_up; 1072 break; 1073 case DLD_PROP_AUTONEG: 1074 if (pr_valsize < sizeof (uint8_t)) 1075 return (EINVAL); 1076 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 1077 break; 1078 case DLD_PROP_DEFMTU: { 1079 if (pr_valsize < sizeof (uint64_t)) 1080 return (EINVAL); 1081 tmp = bgep->chipid.default_mtu; 1082 bcopy(&tmp, pr_val, sizeof (tmp)); 1083 break; 1084 } 1085 case DLD_PROP_FLOWCTRL: 1086 if (pr_valsize < sizeof (link_flowctrl_t)) 1087 return (EINVAL); 1088 if (bgep->param_link_rx_pause && 1089 !bgep->param_link_tx_pause) 1090 fl = LINK_FLOWCTRL_RX; 1091 1092 if (!bgep->param_link_rx_pause && 1093 !bgep->param_link_tx_pause) 1094 fl = LINK_FLOWCTRL_NONE; 1095 1096 if (!bgep->param_link_rx_pause && 1097 bgep->param_link_tx_pause) 1098 fl = LINK_FLOWCTRL_TX; 1099 1100 if (bgep->param_link_rx_pause && 1101 bgep->param_link_tx_pause) 1102 fl = LINK_FLOWCTRL_BI; 1103 bcopy(&fl, pr_val, sizeof (fl)); 1104 break; 1105 case DLD_PROP_ADV_1000FDX_CAP: 1106 if (pr_valsize < sizeof (uint8_t)) 1107 return (EINVAL); 1108 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 1109 break; 1110 case DLD_PROP_EN_1000FDX_CAP: 1111 if (pr_valsize < sizeof (uint8_t)) 1112 return (EINVAL); 1113 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 1114 break; 1115 case DLD_PROP_ADV_1000HDX_CAP: 1116 if (pr_valsize < sizeof (uint8_t)) 1117 return (EINVAL); 1118 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1119 break; 1120 case DLD_PROP_EN_1000HDX_CAP: 1121 if (pr_valsize < sizeof (uint8_t)) 1122 return (EINVAL); 1123 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1124 break; 1125 case DLD_PROP_ADV_100FDX_CAP: 1126 if (pr_valsize < sizeof (uint8_t)) 1127 return (EINVAL); 1128 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1129 break; 1130 case DLD_PROP_EN_100FDX_CAP: 1131 if (pr_valsize < sizeof (uint8_t)) 1132 return (EINVAL); 1133 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1134 break; 1135 case DLD_PROP_ADV_100HDX_CAP: 1136 if (pr_valsize < sizeof (uint8_t)) 1137 return (EINVAL); 1138 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1139 break; 1140 case DLD_PROP_EN_100HDX_CAP: 1141 if (pr_valsize < sizeof (uint8_t)) 1142 return (EINVAL); 1143 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1144 break; 1145 case DLD_PROP_ADV_10FDX_CAP: 1146 if (pr_valsize < sizeof (uint8_t)) 1147 return (EINVAL); 1148 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1149 break; 1150 case DLD_PROP_EN_10FDX_CAP: 1151 if (pr_valsize < sizeof (uint8_t)) 1152 return (EINVAL); 1153 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1154 break; 1155 case DLD_PROP_ADV_10HDX_CAP: 1156 if (pr_valsize < sizeof (uint8_t)) 1157 return (EINVAL); 1158 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1159 break; 1160 case DLD_PROP_EN_10HDX_CAP: 1161 if (pr_valsize < sizeof (uint8_t)) 1162 return (EINVAL); 1163 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1164 break; 1165 default: 1166 err = bge_get_priv_prop(bgep, pr_name, pr_valsize, 1167 pr_val); 1168 return (err); 1169 } 1170 return (0); 1171 } 1172 1173 /* ARGSUSED */ 1174 static int 1175 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1176 const void *pr_val) 1177 { 1178 int err = 0; 1179 long result; 1180 1181 if (strcmp(pr_name, "_drain_max") == 0) { 1182 1183 /* 1184 * on the Tx side, we need to update the h/w register for 1185 * real packet transmission per packet. The drain_max parameter 1186 * is used to reduce the register access. This parameter 1187 * controls the max number of packets that we will hold before 1188 * updating the bge h/w to trigger h/w transmit. The bge 1189 * chipset usually has a max of 512 Tx descriptors, thus 1190 * the upper bound on drain_max is 512. 1191 */ 1192 if (pr_val == NULL) { 1193 err = EINVAL; 1194 return (err); 1195 } 1196 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1197 if (result > 512 || result < 1) 1198 err = EINVAL; 1199 else { 1200 bgep->param_drain_max = (uint32_t)result; 1201 if (bge_reprogram(bgep) == IOC_INVAL) 1202 err = EINVAL; 1203 } 1204 return (err); 1205 } 1206 if (strcmp(pr_name, "_msi_cnt") == 0) { 1207 1208 if (pr_val == NULL) { 1209 err = EINVAL; 1210 return (err); 1211 } 1212 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1213 if (result > 7 || result < 0) 1214 err = EINVAL; 1215 else { 1216 bgep->param_msi_cnt = (uint32_t)result; 1217 if (bge_reprogram(bgep) == IOC_INVAL) 1218 err = EINVAL; 1219 } 1220 return (err); 1221 } 1222 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1223 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) { 1224 return (EINVAL); 1225 } 1226 1227 bgep->chipid.rx_ticks_norm = result; 1228 return (0); 1229 } 1230 1231 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1232 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1233 return (EINVAL); 1234 1235 bgep->chipid.rx_count_norm = result; 1236 return (0); 1237 } 1238 return (EINVAL); 1239 } 1240 1241 static int 1242 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize, 1243 void *pr_val) 1244 { 1245 char valstr[MAXNAMELEN]; 1246 int err = EINVAL; 1247 uint_t strsize; 1248 1249 1250 if (strcmp(pr_name, "_drain_max") == 0) { 1251 (void) sprintf(valstr, "%d", bge->param_drain_max); 1252 err = 0; 1253 goto done; 1254 } 1255 if (strcmp(pr_name, "_msi_cnt") == 0) { 1256 (void) sprintf(valstr, "%d", bge->param_msi_cnt); 1257 err = 0; 1258 goto done; 1259 } 1260 1261 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1262 (void) sprintf(valstr, "%d", bge->chipid.rx_ticks_norm); 1263 err = 0; 1264 goto done; 1265 } 1266 1267 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1268 (void) sprintf(valstr, "%d", bge->chipid.rx_count_norm); 1269 err = 0; 1270 goto done; 1271 } 1272 1273 done: 1274 if (err != 0) { 1275 strsize = (uint_t)strlen(valstr); 1276 if (pr_valsize < strsize) { 1277 err = ENOBUFS; 1278 } else { 1279 (void) strlcpy(pr_val, valstr, pr_valsize); 1280 } 1281 } 1282 return (err); 1283 } 1284 1285 /* 1286 * Compute the index of the required bit in the multicast hash map. 1287 * This must mirror the way the hardware actually does it! 1288 * See Broadcom document 570X-PG102-R page 125. 1289 */ 1290 static uint32_t 1291 bge_hash_index(const uint8_t *mca) 1292 { 1293 uint32_t hash; 1294 1295 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1296 1297 return (hash); 1298 } 1299 1300 /* 1301 * bge_m_multicst_add() -- enable/disable a multicast address 1302 */ 1303 static int 1304 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1305 { 1306 bge_t *bgep = arg; /* private device info */ 1307 uint32_t hash; 1308 uint32_t index; 1309 uint32_t word; 1310 uint32_t bit; 1311 uint8_t *refp; 1312 1313 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1314 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1315 1316 /* 1317 * Precalculate all required masks, pointers etc ... 1318 */ 1319 hash = bge_hash_index(mca); 1320 index = hash % BGE_HASH_TABLE_SIZE; 1321 word = index/32u; 1322 bit = 1 << (index % 32u); 1323 refp = &bgep->mcast_refs[index]; 1324 1325 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1326 hash, index, word, bit, *refp)); 1327 1328 /* 1329 * We must set the appropriate bit in the hash map (and the 1330 * corresponding h/w register) when the refcount goes from 0 1331 * to >0, and clear it when the last ref goes away (refcount 1332 * goes from >0 back to 0). If we change the hash map, we 1333 * must also update the chip's hardware map registers. 1334 */ 1335 mutex_enter(bgep->genlock); 1336 if (!(bgep->progress & PROGRESS_INTR)) { 1337 /* can happen during autorecovery */ 1338 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1339 mutex_exit(bgep->genlock); 1340 return (EIO); 1341 } 1342 if (add) { 1343 if ((*refp)++ == 0) { 1344 bgep->mcast_hash[word] |= bit; 1345 #ifdef BGE_IPMI_ASF 1346 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1347 #else 1348 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1349 #endif 1350 (void) bge_check_acc_handle(bgep, 1351 bgep->cfg_handle); 1352 (void) bge_check_acc_handle(bgep, 1353 bgep->io_handle); 1354 ddi_fm_service_impact(bgep->devinfo, 1355 DDI_SERVICE_DEGRADED); 1356 mutex_exit(bgep->genlock); 1357 return (EIO); 1358 } 1359 } 1360 } else { 1361 if (--(*refp) == 0) { 1362 bgep->mcast_hash[word] &= ~bit; 1363 #ifdef BGE_IPMI_ASF 1364 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1365 #else 1366 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1367 #endif 1368 (void) bge_check_acc_handle(bgep, 1369 bgep->cfg_handle); 1370 (void) bge_check_acc_handle(bgep, 1371 bgep->io_handle); 1372 ddi_fm_service_impact(bgep->devinfo, 1373 DDI_SERVICE_DEGRADED); 1374 mutex_exit(bgep->genlock); 1375 return (EIO); 1376 } 1377 } 1378 } 1379 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1380 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1381 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1382 mutex_exit(bgep->genlock); 1383 return (EIO); 1384 } 1385 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1386 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1387 mutex_exit(bgep->genlock); 1388 return (EIO); 1389 } 1390 mutex_exit(bgep->genlock); 1391 1392 return (0); 1393 } 1394 1395 /* 1396 * bge_m_promisc() -- set or reset promiscuous mode on the board 1397 * 1398 * Program the hardware to enable/disable promiscuous and/or 1399 * receive-all-multicast modes. 1400 */ 1401 static int 1402 bge_m_promisc(void *arg, boolean_t on) 1403 { 1404 bge_t *bgep = arg; 1405 1406 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1407 1408 /* 1409 * Store MAC layer specified mode and pass to chip layer to update h/w 1410 */ 1411 mutex_enter(bgep->genlock); 1412 if (!(bgep->progress & PROGRESS_INTR)) { 1413 /* can happen during autorecovery */ 1414 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1415 mutex_exit(bgep->genlock); 1416 return (EIO); 1417 } 1418 bgep->promisc = on; 1419 #ifdef BGE_IPMI_ASF 1420 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1421 #else 1422 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1423 #endif 1424 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1425 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1426 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1427 mutex_exit(bgep->genlock); 1428 return (EIO); 1429 } 1430 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1431 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1432 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1433 mutex_exit(bgep->genlock); 1434 return (EIO); 1435 } 1436 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1437 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1438 mutex_exit(bgep->genlock); 1439 return (EIO); 1440 } 1441 mutex_exit(bgep->genlock); 1442 return (0); 1443 } 1444 1445 /*ARGSUSED*/ 1446 static boolean_t 1447 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1448 { 1449 bge_t *bgep = arg; 1450 1451 switch (cap) { 1452 case MAC_CAPAB_HCKSUM: { 1453 uint32_t *txflags = cap_data; 1454 1455 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1456 break; 1457 } 1458 1459 case MAC_CAPAB_POLL: 1460 /* 1461 * There's nothing for us to fill in, simply returning 1462 * B_TRUE stating that we support polling is sufficient. 1463 */ 1464 break; 1465 1466 case MAC_CAPAB_MULTIADDRESS: { 1467 multiaddress_capab_t *mmacp = cap_data; 1468 1469 mutex_enter(bgep->genlock); 1470 /* 1471 * The number of MAC addresses made available by 1472 * this capability is one less than the total as 1473 * the primary address in slot 0 is counted in 1474 * the total. 1475 */ 1476 mmacp->maddr_naddr = bgep->unicst_addr_total - 1; 1477 mmacp->maddr_naddrfree = bgep->unicst_addr_avail; 1478 /* No multiple factory addresses, set mma_flag to 0 */ 1479 mmacp->maddr_flag = 0; 1480 mmacp->maddr_handle = bgep; 1481 mmacp->maddr_add = bge_m_unicst_add; 1482 mmacp->maddr_remove = bge_m_unicst_remove; 1483 mmacp->maddr_modify = bge_m_unicst_modify; 1484 mmacp->maddr_get = bge_m_unicst_get; 1485 mmacp->maddr_reserve = NULL; 1486 mutex_exit(bgep->genlock); 1487 break; 1488 } 1489 1490 default: 1491 return (B_FALSE); 1492 } 1493 return (B_TRUE); 1494 } 1495 1496 /* 1497 * Loopback ioctl code 1498 */ 1499 1500 static lb_property_t loopmodes[] = { 1501 { normal, "normal", BGE_LOOP_NONE }, 1502 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1503 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1504 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1505 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1506 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1507 }; 1508 1509 static enum ioc_reply 1510 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1511 { 1512 /* 1513 * If the mode isn't being changed, there's nothing to do ... 1514 */ 1515 if (mode == bgep->param_loop_mode) 1516 return (IOC_ACK); 1517 1518 /* 1519 * Validate the requested mode and prepare a suitable message 1520 * to explain the link down/up cycle that the change will 1521 * probably induce ... 1522 */ 1523 switch (mode) { 1524 default: 1525 return (IOC_INVAL); 1526 1527 case BGE_LOOP_NONE: 1528 case BGE_LOOP_EXTERNAL_1000: 1529 case BGE_LOOP_EXTERNAL_100: 1530 case BGE_LOOP_EXTERNAL_10: 1531 case BGE_LOOP_INTERNAL_PHY: 1532 case BGE_LOOP_INTERNAL_MAC: 1533 break; 1534 } 1535 1536 /* 1537 * All OK; tell the caller to reprogram 1538 * the PHY and/or MAC for the new mode ... 1539 */ 1540 bgep->param_loop_mode = mode; 1541 return (IOC_RESTART_ACK); 1542 } 1543 1544 static enum ioc_reply 1545 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1546 { 1547 lb_info_sz_t *lbsp; 1548 lb_property_t *lbpp; 1549 uint32_t *lbmp; 1550 int cmd; 1551 1552 _NOTE(ARGUNUSED(wq)) 1553 1554 /* 1555 * Validate format of ioctl 1556 */ 1557 if (mp->b_cont == NULL) 1558 return (IOC_INVAL); 1559 1560 cmd = iocp->ioc_cmd; 1561 switch (cmd) { 1562 default: 1563 /* NOTREACHED */ 1564 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1565 return (IOC_INVAL); 1566 1567 case LB_GET_INFO_SIZE: 1568 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1569 return (IOC_INVAL); 1570 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 1571 *lbsp = sizeof (loopmodes); 1572 return (IOC_REPLY); 1573 1574 case LB_GET_INFO: 1575 if (iocp->ioc_count != sizeof (loopmodes)) 1576 return (IOC_INVAL); 1577 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 1578 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1579 return (IOC_REPLY); 1580 1581 case LB_GET_MODE: 1582 if (iocp->ioc_count != sizeof (uint32_t)) 1583 return (IOC_INVAL); 1584 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1585 *lbmp = bgep->param_loop_mode; 1586 return (IOC_REPLY); 1587 1588 case LB_SET_MODE: 1589 if (iocp->ioc_count != sizeof (uint32_t)) 1590 return (IOC_INVAL); 1591 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1592 return (bge_set_loop_mode(bgep, *lbmp)); 1593 } 1594 } 1595 1596 /* 1597 * Specific bge IOCTLs, the gld module handles the generic ones. 1598 */ 1599 static void 1600 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1601 { 1602 bge_t *bgep = arg; 1603 struct iocblk *iocp; 1604 enum ioc_reply status; 1605 boolean_t need_privilege; 1606 int err; 1607 int cmd; 1608 1609 /* 1610 * Validate the command before bothering with the mutex ... 1611 */ 1612 iocp = (struct iocblk *)mp->b_rptr; 1613 iocp->ioc_error = 0; 1614 need_privilege = B_TRUE; 1615 cmd = iocp->ioc_cmd; 1616 switch (cmd) { 1617 default: 1618 miocnak(wq, mp, 0, EINVAL); 1619 return; 1620 1621 case BGE_MII_READ: 1622 case BGE_MII_WRITE: 1623 case BGE_SEE_READ: 1624 case BGE_SEE_WRITE: 1625 case BGE_FLASH_READ: 1626 case BGE_FLASH_WRITE: 1627 case BGE_DIAG: 1628 case BGE_PEEK: 1629 case BGE_POKE: 1630 case BGE_PHY_RESET: 1631 case BGE_SOFT_RESET: 1632 case BGE_HARD_RESET: 1633 break; 1634 1635 case LB_GET_INFO_SIZE: 1636 case LB_GET_INFO: 1637 case LB_GET_MODE: 1638 need_privilege = B_FALSE; 1639 /* FALLTHRU */ 1640 case LB_SET_MODE: 1641 break; 1642 1643 case ND_GET: 1644 need_privilege = B_FALSE; 1645 /* FALLTHRU */ 1646 case ND_SET: 1647 break; 1648 } 1649 1650 if (need_privilege) { 1651 /* 1652 * Check for specific net_config privilege on Solaris 10+. 1653 */ 1654 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1655 if (err != 0) { 1656 miocnak(wq, mp, 0, err); 1657 return; 1658 } 1659 } 1660 1661 mutex_enter(bgep->genlock); 1662 if (!(bgep->progress & PROGRESS_INTR)) { 1663 /* can happen during autorecovery */ 1664 mutex_exit(bgep->genlock); 1665 miocnak(wq, mp, 0, EIO); 1666 return; 1667 } 1668 1669 switch (cmd) { 1670 default: 1671 _NOTE(NOTREACHED) 1672 status = IOC_INVAL; 1673 break; 1674 1675 case BGE_MII_READ: 1676 case BGE_MII_WRITE: 1677 case BGE_SEE_READ: 1678 case BGE_SEE_WRITE: 1679 case BGE_FLASH_READ: 1680 case BGE_FLASH_WRITE: 1681 case BGE_DIAG: 1682 case BGE_PEEK: 1683 case BGE_POKE: 1684 case BGE_PHY_RESET: 1685 case BGE_SOFT_RESET: 1686 case BGE_HARD_RESET: 1687 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1688 break; 1689 1690 case LB_GET_INFO_SIZE: 1691 case LB_GET_INFO: 1692 case LB_GET_MODE: 1693 case LB_SET_MODE: 1694 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1695 break; 1696 1697 case ND_GET: 1698 case ND_SET: 1699 status = bge_nd_ioctl(bgep, wq, mp, iocp); 1700 break; 1701 } 1702 1703 /* 1704 * Do we need to reprogram the PHY and/or the MAC? 1705 * Do it now, while we still have the mutex. 1706 * 1707 * Note: update the PHY first, 'cos it controls the 1708 * speed/duplex parameters that the MAC code uses. 1709 */ 1710 switch (status) { 1711 case IOC_RESTART_REPLY: 1712 case IOC_RESTART_ACK: 1713 if (bge_reprogram(bgep) == IOC_INVAL) 1714 status = IOC_INVAL; 1715 break; 1716 } 1717 1718 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1719 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1720 status = IOC_INVAL; 1721 } 1722 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1723 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1724 status = IOC_INVAL; 1725 } 1726 mutex_exit(bgep->genlock); 1727 1728 /* 1729 * Finally, decide how to reply 1730 */ 1731 switch (status) { 1732 default: 1733 case IOC_INVAL: 1734 /* 1735 * Error, reply with a NAK and EINVAL or the specified error 1736 */ 1737 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1738 EINVAL : iocp->ioc_error); 1739 break; 1740 1741 case IOC_DONE: 1742 /* 1743 * OK, reply already sent 1744 */ 1745 break; 1746 1747 case IOC_RESTART_ACK: 1748 case IOC_ACK: 1749 /* 1750 * OK, reply with an ACK 1751 */ 1752 miocack(wq, mp, 0, 0); 1753 break; 1754 1755 case IOC_RESTART_REPLY: 1756 case IOC_REPLY: 1757 /* 1758 * OK, send prepared reply as ACK or NAK 1759 */ 1760 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1761 M_IOCACK : M_IOCNAK; 1762 qreply(wq, mp); 1763 break; 1764 } 1765 } 1766 1767 static void 1768 bge_resources_add(bge_t *bgep, time_t time, uint_t pkt_cnt) 1769 { 1770 1771 recv_ring_t *rrp; 1772 mac_rx_fifo_t mrf; 1773 int ring; 1774 1775 /* 1776 * Register Rx rings as resources and save mac 1777 * resource id for future reference 1778 */ 1779 mrf.mrf_type = MAC_RX_FIFO; 1780 mrf.mrf_blank = bge_chip_blank; 1781 mrf.mrf_arg = (void *)bgep; 1782 mrf.mrf_normal_blank_time = time; 1783 mrf.mrf_normal_pkt_count = pkt_cnt; 1784 1785 for (ring = 0; ring < bgep->chipid.rx_rings; ring++) { 1786 rrp = &bgep->recv[ring]; 1787 rrp->handle = mac_resource_add(bgep->mh, 1788 (mac_resource_t *)&mrf); 1789 } 1790 } 1791 1792 static void 1793 bge_m_resources(void *arg) 1794 { 1795 bge_t *bgep = arg; 1796 1797 mutex_enter(bgep->genlock); 1798 1799 bge_resources_add(bgep, bgep->chipid.rx_ticks_norm, 1800 bgep->chipid.rx_count_norm); 1801 mutex_exit(bgep->genlock); 1802 } 1803 1804 /* 1805 * ========== Per-instance setup/teardown code ========== 1806 */ 1807 1808 #undef BGE_DBG 1809 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1810 /* 1811 * Allocate an area of memory and a DMA handle for accessing it 1812 */ 1813 static int 1814 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 1815 uint_t dma_flags, dma_area_t *dma_p) 1816 { 1817 caddr_t va; 1818 int err; 1819 1820 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 1821 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 1822 1823 /* 1824 * Allocate handle 1825 */ 1826 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 1827 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 1828 if (err != DDI_SUCCESS) 1829 return (DDI_FAILURE); 1830 1831 /* 1832 * Allocate memory 1833 */ 1834 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 1835 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 1836 &dma_p->acc_hdl); 1837 if (err != DDI_SUCCESS) 1838 return (DDI_FAILURE); 1839 1840 /* 1841 * Bind the two together 1842 */ 1843 dma_p->mem_va = va; 1844 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1845 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 1846 &dma_p->cookie, &dma_p->ncookies); 1847 1848 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 1849 dma_p->alength, err, dma_p->ncookies)); 1850 1851 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 1852 return (DDI_FAILURE); 1853 1854 dma_p->nslots = ~0U; 1855 dma_p->size = ~0U; 1856 dma_p->token = ~0U; 1857 dma_p->offset = 0; 1858 return (DDI_SUCCESS); 1859 } 1860 1861 /* 1862 * Free one allocated area of DMAable memory 1863 */ 1864 static void 1865 bge_free_dma_mem(dma_area_t *dma_p) 1866 { 1867 if (dma_p->dma_hdl != NULL) { 1868 if (dma_p->ncookies) { 1869 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1870 dma_p->ncookies = 0; 1871 } 1872 ddi_dma_free_handle(&dma_p->dma_hdl); 1873 dma_p->dma_hdl = NULL; 1874 } 1875 1876 if (dma_p->acc_hdl != NULL) { 1877 ddi_dma_mem_free(&dma_p->acc_hdl); 1878 dma_p->acc_hdl = NULL; 1879 } 1880 } 1881 /* 1882 * Utility routine to carve a slice off a chunk of allocated memory, 1883 * updating the chunk descriptor accordingly. The size of the slice 1884 * is given by the product of the <qty> and <size> parameters. 1885 */ 1886 static void 1887 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 1888 uint32_t qty, uint32_t size) 1889 { 1890 static uint32_t sequence = 0xbcd5704a; 1891 size_t totsize; 1892 1893 totsize = qty*size; 1894 ASSERT(size >= 0); 1895 ASSERT(totsize <= chunk->alength); 1896 1897 *slice = *chunk; 1898 slice->nslots = qty; 1899 slice->size = size; 1900 slice->alength = totsize; 1901 slice->token = ++sequence; 1902 1903 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 1904 chunk->alength -= totsize; 1905 chunk->offset += totsize; 1906 chunk->cookie.dmac_laddress += totsize; 1907 chunk->cookie.dmac_size -= totsize; 1908 } 1909 1910 /* 1911 * Initialise the specified Receive Producer (Buffer) Ring, using 1912 * the information in the <dma_area> descriptors that it contains 1913 * to set up all the other fields. This routine should be called 1914 * only once for each ring. 1915 */ 1916 static void 1917 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 1918 { 1919 buff_ring_t *brp; 1920 bge_status_t *bsp; 1921 sw_rbd_t *srbdp; 1922 dma_area_t pbuf; 1923 uint32_t bufsize; 1924 uint32_t nslots; 1925 uint32_t slot; 1926 uint32_t split; 1927 1928 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 1929 NIC_MEM_SHADOW_BUFF_STD, 1930 NIC_MEM_SHADOW_BUFF_JUMBO, 1931 NIC_MEM_SHADOW_BUFF_MINI 1932 }; 1933 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 1934 RECV_STD_PROD_INDEX_REG, 1935 RECV_JUMBO_PROD_INDEX_REG, 1936 RECV_MINI_PROD_INDEX_REG 1937 }; 1938 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 1939 STATUS_STD_BUFF_CONS_INDEX, 1940 STATUS_JUMBO_BUFF_CONS_INDEX, 1941 STATUS_MINI_BUFF_CONS_INDEX 1942 }; 1943 1944 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 1945 (void *)bgep, ring)); 1946 1947 brp = &bgep->buff[ring]; 1948 nslots = brp->desc.nslots; 1949 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 1950 bufsize = brp->buf[0].size; 1951 1952 /* 1953 * Set up the copy of the h/w RCB 1954 * 1955 * Note: unlike Send & Receive Return Rings, (where the max_len 1956 * field holds the number of slots), in a Receive Buffer Ring 1957 * this field indicates the size of each buffer in the ring. 1958 */ 1959 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 1960 brp->hw_rcb.max_len = bufsize; 1961 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 1962 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 1963 1964 /* 1965 * Other one-off initialisation of per-ring data 1966 */ 1967 brp->bgep = bgep; 1968 bsp = DMA_VPTR(bgep->status_block); 1969 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 1970 brp->chip_mbx_reg = mailbox_regs[ring]; 1971 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 1972 DDI_INTR_PRI(bgep->intr_pri)); 1973 1974 /* 1975 * Allocate the array of s/w Receive Buffer Descriptors 1976 */ 1977 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 1978 brp->sw_rbds = srbdp; 1979 1980 /* 1981 * Now initialise each array element once and for all 1982 */ 1983 for (split = 0; split < BGE_SPLIT; ++split) { 1984 pbuf = brp->buf[split]; 1985 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 1986 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 1987 ASSERT(pbuf.alength == 0); 1988 } 1989 } 1990 1991 /* 1992 * Clean up initialisation done above before the memory is freed 1993 */ 1994 static void 1995 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 1996 { 1997 buff_ring_t *brp; 1998 sw_rbd_t *srbdp; 1999 2000 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2001 (void *)bgep, ring)); 2002 2003 brp = &bgep->buff[ring]; 2004 srbdp = brp->sw_rbds; 2005 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2006 2007 mutex_destroy(brp->rf_lock); 2008 } 2009 2010 /* 2011 * Initialise the specified Receive (Return) Ring, using the 2012 * information in the <dma_area> descriptors that it contains 2013 * to set up all the other fields. This routine should be called 2014 * only once for each ring. 2015 */ 2016 static void 2017 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2018 { 2019 recv_ring_t *rrp; 2020 bge_status_t *bsp; 2021 uint32_t nslots; 2022 2023 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2024 (void *)bgep, ring)); 2025 2026 /* 2027 * The chip architecture requires that receive return rings have 2028 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2029 */ 2030 rrp = &bgep->recv[ring]; 2031 nslots = rrp->desc.nslots; 2032 ASSERT(nslots == 0 || nslots == 512 || 2033 nslots == 1024 || nslots == 2048); 2034 2035 /* 2036 * Set up the copy of the h/w RCB 2037 */ 2038 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2039 rrp->hw_rcb.max_len = nslots; 2040 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2041 rrp->hw_rcb.nic_ring_addr = 0; 2042 2043 /* 2044 * Other one-off initialisation of per-ring data 2045 */ 2046 rrp->bgep = bgep; 2047 bsp = DMA_VPTR(bgep->status_block); 2048 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2049 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2050 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2051 DDI_INTR_PRI(bgep->intr_pri)); 2052 } 2053 2054 2055 /* 2056 * Clean up initialisation done above before the memory is freed 2057 */ 2058 static void 2059 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2060 { 2061 recv_ring_t *rrp; 2062 2063 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2064 (void *)bgep, ring)); 2065 2066 rrp = &bgep->recv[ring]; 2067 if (rrp->rx_softint) 2068 ddi_remove_softintr(rrp->rx_softint); 2069 mutex_destroy(rrp->rx_lock); 2070 } 2071 2072 /* 2073 * Initialise the specified Send Ring, using the information in the 2074 * <dma_area> descriptors that it contains to set up all the other 2075 * fields. This routine should be called only once for each ring. 2076 */ 2077 static void 2078 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2079 { 2080 send_ring_t *srp; 2081 bge_status_t *bsp; 2082 sw_sbd_t *ssbdp; 2083 dma_area_t desc; 2084 dma_area_t pbuf; 2085 uint32_t nslots; 2086 uint32_t slot; 2087 uint32_t split; 2088 sw_txbuf_t *txbuf; 2089 2090 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2091 (void *)bgep, ring)); 2092 2093 /* 2094 * The chip architecture requires that host-based send rings 2095 * have 512 elements per ring. See 570X-PG102-R page 56. 2096 */ 2097 srp = &bgep->send[ring]; 2098 nslots = srp->desc.nslots; 2099 ASSERT(nslots == 0 || nslots == 512); 2100 2101 /* 2102 * Set up the copy of the h/w RCB 2103 */ 2104 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2105 srp->hw_rcb.max_len = nslots; 2106 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2107 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2108 2109 /* 2110 * Other one-off initialisation of per-ring data 2111 */ 2112 srp->bgep = bgep; 2113 bsp = DMA_VPTR(bgep->status_block); 2114 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2115 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2116 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2117 DDI_INTR_PRI(bgep->intr_pri)); 2118 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2119 DDI_INTR_PRI(bgep->intr_pri)); 2120 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2121 DDI_INTR_PRI(bgep->intr_pri)); 2122 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2123 DDI_INTR_PRI(bgep->intr_pri)); 2124 if (nslots == 0) 2125 return; 2126 2127 /* 2128 * Allocate the array of s/w Send Buffer Descriptors 2129 */ 2130 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2131 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2132 srp->txbuf_head = 2133 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2134 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2135 srp->sw_sbds = ssbdp; 2136 srp->txbuf = txbuf; 2137 srp->tx_buffers = BGE_SEND_BUF_NUM; 2138 srp->tx_buffers_low = srp->tx_buffers / 4; 2139 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2140 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2141 else 2142 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2143 srp->tx_array = 1; 2144 2145 /* 2146 * Chunk tx desc area 2147 */ 2148 desc = srp->desc; 2149 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2150 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2151 sizeof (bge_sbd_t)); 2152 } 2153 ASSERT(desc.alength == 0); 2154 2155 /* 2156 * Chunk tx buffer area 2157 */ 2158 for (split = 0; split < BGE_SPLIT; ++split) { 2159 pbuf = srp->buf[0][split]; 2160 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2161 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2162 bgep->chipid.snd_buff_size); 2163 txbuf++; 2164 } 2165 ASSERT(pbuf.alength == 0); 2166 } 2167 } 2168 2169 /* 2170 * Clean up initialisation done above before the memory is freed 2171 */ 2172 static void 2173 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2174 { 2175 send_ring_t *srp; 2176 uint32_t array; 2177 uint32_t split; 2178 uint32_t nslots; 2179 2180 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2181 (void *)bgep, ring)); 2182 2183 srp = &bgep->send[ring]; 2184 mutex_destroy(srp->tc_lock); 2185 mutex_destroy(srp->freetxbuf_lock); 2186 mutex_destroy(srp->txbuf_lock); 2187 mutex_destroy(srp->tx_lock); 2188 nslots = srp->desc.nslots; 2189 if (nslots == 0) 2190 return; 2191 2192 for (array = 1; array < srp->tx_array; ++array) 2193 for (split = 0; split < BGE_SPLIT; ++split) 2194 bge_free_dma_mem(&srp->buf[array][split]); 2195 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2196 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2197 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2198 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2199 srp->sw_sbds = NULL; 2200 srp->txbuf_head = NULL; 2201 srp->txbuf = NULL; 2202 srp->pktp = NULL; 2203 } 2204 2205 /* 2206 * Initialise all transmit, receive, and buffer rings. 2207 */ 2208 void 2209 bge_init_rings(bge_t *bgep) 2210 { 2211 uint32_t ring; 2212 2213 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2214 2215 /* 2216 * Perform one-off initialisation of each ring ... 2217 */ 2218 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2219 bge_init_send_ring(bgep, ring); 2220 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2221 bge_init_recv_ring(bgep, ring); 2222 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2223 bge_init_buff_ring(bgep, ring); 2224 } 2225 2226 /* 2227 * Undo the work of bge_init_rings() above before the memory is freed 2228 */ 2229 void 2230 bge_fini_rings(bge_t *bgep) 2231 { 2232 uint32_t ring; 2233 2234 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2235 2236 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2237 bge_fini_buff_ring(bgep, ring); 2238 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2239 bge_fini_recv_ring(bgep, ring); 2240 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2241 bge_fini_send_ring(bgep, ring); 2242 } 2243 2244 /* 2245 * Called from the bge_m_stop() to free the tx buffers which are 2246 * allocated from the tx process. 2247 */ 2248 void 2249 bge_free_txbuf_arrays(send_ring_t *srp) 2250 { 2251 uint32_t array; 2252 uint32_t split; 2253 2254 ASSERT(mutex_owned(srp->tx_lock)); 2255 2256 /* 2257 * Free the extra tx buffer DMA area 2258 */ 2259 for (array = 1; array < srp->tx_array; ++array) 2260 for (split = 0; split < BGE_SPLIT; ++split) 2261 bge_free_dma_mem(&srp->buf[array][split]); 2262 2263 /* 2264 * Restore initial tx buffer numbers 2265 */ 2266 srp->tx_array = 1; 2267 srp->tx_buffers = BGE_SEND_BUF_NUM; 2268 srp->tx_buffers_low = srp->tx_buffers / 4; 2269 srp->tx_flow = 0; 2270 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2271 } 2272 2273 /* 2274 * Called from tx process to allocate more tx buffers 2275 */ 2276 bge_queue_item_t * 2277 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2278 { 2279 bge_queue_t *txbuf_queue; 2280 bge_queue_item_t *txbuf_item_last; 2281 bge_queue_item_t *txbuf_item; 2282 bge_queue_item_t *txbuf_item_rtn; 2283 sw_txbuf_t *txbuf; 2284 dma_area_t area; 2285 size_t txbuffsize; 2286 uint32_t slot; 2287 uint32_t array; 2288 uint32_t split; 2289 uint32_t err; 2290 2291 ASSERT(mutex_owned(srp->tx_lock)); 2292 2293 array = srp->tx_array; 2294 if (array >= srp->tx_array_max) 2295 return (NULL); 2296 2297 /* 2298 * Allocate memory & handles for TX buffers 2299 */ 2300 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2301 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2302 for (split = 0; split < BGE_SPLIT; ++split) { 2303 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2304 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2305 &srp->buf[array][split]); 2306 if (err != DDI_SUCCESS) { 2307 /* Free the last already allocated OK chunks */ 2308 for (slot = 0; slot <= split; ++slot) 2309 bge_free_dma_mem(&srp->buf[array][slot]); 2310 srp->tx_alloc_fail++; 2311 return (NULL); 2312 } 2313 } 2314 2315 /* 2316 * Chunk tx buffer area 2317 */ 2318 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2319 for (split = 0; split < BGE_SPLIT; ++split) { 2320 area = srp->buf[array][split]; 2321 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2322 bge_slice_chunk(&txbuf->buf, &area, 1, 2323 bgep->chipid.snd_buff_size); 2324 txbuf++; 2325 } 2326 } 2327 2328 /* 2329 * Add above buffers to the tx buffer pop queue 2330 */ 2331 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2332 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2333 txbuf_item_last = NULL; 2334 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2335 txbuf_item->item = txbuf; 2336 txbuf_item->next = txbuf_item_last; 2337 txbuf_item_last = txbuf_item; 2338 txbuf++; 2339 txbuf_item++; 2340 } 2341 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2342 txbuf_item_rtn = txbuf_item; 2343 txbuf_item++; 2344 txbuf_queue = srp->txbuf_pop_queue; 2345 mutex_enter(txbuf_queue->lock); 2346 txbuf_item->next = txbuf_queue->head; 2347 txbuf_queue->head = txbuf_item_last; 2348 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2349 mutex_exit(txbuf_queue->lock); 2350 2351 srp->tx_array++; 2352 srp->tx_buffers += BGE_SEND_BUF_NUM; 2353 srp->tx_buffers_low = srp->tx_buffers / 4; 2354 2355 return (txbuf_item_rtn); 2356 } 2357 2358 /* 2359 * This function allocates all the transmit and receive buffers 2360 * and descriptors, in four chunks. 2361 */ 2362 int 2363 bge_alloc_bufs(bge_t *bgep) 2364 { 2365 dma_area_t area; 2366 size_t rxbuffsize; 2367 size_t txbuffsize; 2368 size_t rxbuffdescsize; 2369 size_t rxdescsize; 2370 size_t txdescsize; 2371 uint32_t ring; 2372 uint32_t rx_rings = bgep->chipid.rx_rings; 2373 uint32_t tx_rings = bgep->chipid.tx_rings; 2374 int split; 2375 int err; 2376 2377 BGE_TRACE(("bge_alloc_bufs($%p)", 2378 (void *)bgep)); 2379 2380 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2381 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2382 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2383 2384 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2385 txbuffsize *= tx_rings; 2386 2387 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2388 rxdescsize *= sizeof (bge_rbd_t); 2389 2390 rxbuffdescsize = BGE_STD_SLOTS_USED; 2391 rxbuffdescsize += bgep->chipid.jumbo_slots; 2392 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2393 rxbuffdescsize *= sizeof (bge_rbd_t); 2394 2395 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2396 txdescsize *= sizeof (bge_sbd_t); 2397 txdescsize += sizeof (bge_statistics_t); 2398 txdescsize += sizeof (bge_status_t); 2399 txdescsize += BGE_STATUS_PADDING; 2400 2401 /* 2402 * Enable PCI relaxed ordering only for RX/TX data buffers 2403 */ 2404 if (bge_relaxed_ordering) 2405 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2406 2407 /* 2408 * Allocate memory & handles for RX buffers 2409 */ 2410 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2411 for (split = 0; split < BGE_SPLIT; ++split) { 2412 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2413 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2414 &bgep->rx_buff[split]); 2415 if (err != DDI_SUCCESS) 2416 return (DDI_FAILURE); 2417 } 2418 2419 /* 2420 * Allocate memory & handles for TX buffers 2421 */ 2422 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2423 for (split = 0; split < BGE_SPLIT; ++split) { 2424 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2425 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2426 &bgep->tx_buff[split]); 2427 if (err != DDI_SUCCESS) 2428 return (DDI_FAILURE); 2429 } 2430 2431 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2432 2433 /* 2434 * Allocate memory & handles for receive return rings 2435 */ 2436 ASSERT((rxdescsize % rx_rings) == 0); 2437 for (split = 0; split < rx_rings; ++split) { 2438 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2439 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2440 &bgep->rx_desc[split]); 2441 if (err != DDI_SUCCESS) 2442 return (DDI_FAILURE); 2443 } 2444 2445 /* 2446 * Allocate memory & handles for buffer (producer) descriptor rings 2447 */ 2448 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2449 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2450 if (err != DDI_SUCCESS) 2451 return (DDI_FAILURE); 2452 2453 /* 2454 * Allocate memory & handles for TX descriptor rings, 2455 * status block, and statistics area 2456 */ 2457 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2458 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2459 if (err != DDI_SUCCESS) 2460 return (DDI_FAILURE); 2461 2462 /* 2463 * Now carve up each of the allocated areas ... 2464 */ 2465 for (split = 0; split < BGE_SPLIT; ++split) { 2466 area = bgep->rx_buff[split]; 2467 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2468 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2469 bgep->chipid.std_buf_size); 2470 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2471 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2472 bgep->chipid.recv_jumbo_size); 2473 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2474 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2475 BGE_MINI_BUFF_SIZE); 2476 ASSERT(area.alength >= 0); 2477 } 2478 2479 for (split = 0; split < BGE_SPLIT; ++split) { 2480 area = bgep->tx_buff[split]; 2481 for (ring = 0; ring < tx_rings; ++ring) 2482 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2483 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2484 bgep->chipid.snd_buff_size); 2485 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2486 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2487 &area, 0, bgep->chipid.snd_buff_size); 2488 ASSERT(area.alength >= 0); 2489 } 2490 2491 for (ring = 0; ring < rx_rings; ++ring) 2492 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2493 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2494 2495 area = bgep->rx_desc[rx_rings]; 2496 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2497 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2498 0, sizeof (bge_rbd_t)); 2499 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2500 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2501 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2502 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2503 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2504 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2505 ASSERT(area.alength == 0); 2506 2507 area = bgep->tx_desc; 2508 for (ring = 0; ring < tx_rings; ++ring) 2509 bge_slice_chunk(&bgep->send[ring].desc, &area, 2510 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2511 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2512 bge_slice_chunk(&bgep->send[ring].desc, &area, 2513 0, sizeof (bge_sbd_t)); 2514 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2515 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2516 ASSERT(area.alength == BGE_STATUS_PADDING); 2517 DMA_ZERO(bgep->status_block); 2518 2519 return (DDI_SUCCESS); 2520 } 2521 2522 /* 2523 * This routine frees the transmit and receive buffers and descriptors. 2524 * Make sure the chip is stopped before calling it! 2525 */ 2526 void 2527 bge_free_bufs(bge_t *bgep) 2528 { 2529 int split; 2530 2531 BGE_TRACE(("bge_free_bufs($%p)", 2532 (void *)bgep)); 2533 2534 bge_free_dma_mem(&bgep->tx_desc); 2535 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2536 bge_free_dma_mem(&bgep->rx_desc[split]); 2537 for (split = 0; split < BGE_SPLIT; ++split) 2538 bge_free_dma_mem(&bgep->tx_buff[split]); 2539 for (split = 0; split < BGE_SPLIT; ++split) 2540 bge_free_dma_mem(&bgep->rx_buff[split]); 2541 } 2542 2543 /* 2544 * Determine (initial) MAC address ("BIA") to use for this interface 2545 */ 2546 2547 static void 2548 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2549 { 2550 struct ether_addr sysaddr; 2551 char propbuf[8]; /* "true" or "false", plus NUL */ 2552 uchar_t *bytes; 2553 int *ints; 2554 uint_t nelts; 2555 int err; 2556 2557 BGE_TRACE(("bge_find_mac_address($%p)", 2558 (void *)bgep)); 2559 2560 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2561 cidp->hw_mac_addr, 2562 ether_sprintf((void *)cidp->vendor_addr.addr), 2563 cidp->vendor_addr.set ? "" : "not ")); 2564 2565 /* 2566 * The "vendor's factory-set address" may already have 2567 * been extracted from the chip, but if the property 2568 * "local-mac-address" is set we use that instead. It 2569 * will normally be set by OBP, but it could also be 2570 * specified in a .conf file(!) 2571 * 2572 * There doesn't seem to be a way to define byte-array 2573 * properties in a .conf, so we check whether it looks 2574 * like an array of 6 ints instead. 2575 * 2576 * Then, we check whether it looks like an array of 6 2577 * bytes (which it should, if OBP set it). If we can't 2578 * make sense of it either way, we'll ignore it. 2579 */ 2580 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2581 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2582 if (err == DDI_PROP_SUCCESS) { 2583 if (nelts == ETHERADDRL) { 2584 while (nelts--) 2585 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2586 cidp->vendor_addr.set = B_TRUE; 2587 } 2588 ddi_prop_free(ints); 2589 } 2590 2591 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2592 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2593 if (err == DDI_PROP_SUCCESS) { 2594 if (nelts == ETHERADDRL) { 2595 while (nelts--) 2596 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2597 cidp->vendor_addr.set = B_TRUE; 2598 } 2599 ddi_prop_free(bytes); 2600 } 2601 2602 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2603 ether_sprintf((void *)cidp->vendor_addr.addr), 2604 cidp->vendor_addr.set ? "" : "not ")); 2605 2606 /* 2607 * Look up the OBP property "local-mac-address?". Note that even 2608 * though its value is a string (which should be "true" or "false"), 2609 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2610 * the buffer first and then fetch the property as an untyped array; 2611 * this may or may not include a final NUL, but since there will 2612 * always be one left at the end of the buffer we can now treat it 2613 * as a string anyway. 2614 */ 2615 nelts = sizeof (propbuf); 2616 bzero(propbuf, nelts--); 2617 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2618 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2619 2620 /* 2621 * Now, if the address still isn't set from the hardware (SEEPROM) 2622 * or the OBP or .conf property, OR if the user has foolishly set 2623 * 'local-mac-address? = false', use "the system address" instead 2624 * (but only if it's non-null i.e. has been set from the IDPROM). 2625 */ 2626 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2627 if (localetheraddr(NULL, &sysaddr) != 0) { 2628 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2629 cidp->vendor_addr.set = B_TRUE; 2630 } 2631 2632 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2633 ether_sprintf((void *)cidp->vendor_addr.addr), 2634 cidp->vendor_addr.set ? "" : "not ")); 2635 2636 /* 2637 * Finally(!), if there's a valid "mac-address" property (created 2638 * if we netbooted from this interface), we must use this instead 2639 * of any of the above to ensure that the NFS/install server doesn't 2640 * get confused by the address changing as Solaris takes over! 2641 */ 2642 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2643 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2644 if (err == DDI_PROP_SUCCESS) { 2645 if (nelts == ETHERADDRL) { 2646 while (nelts--) 2647 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2648 cidp->vendor_addr.set = B_TRUE; 2649 } 2650 ddi_prop_free(bytes); 2651 } 2652 2653 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2654 ether_sprintf((void *)cidp->vendor_addr.addr), 2655 cidp->vendor_addr.set ? "" : "not ")); 2656 } 2657 2658 2659 /*ARGSUSED*/ 2660 int 2661 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2662 { 2663 ddi_fm_error_t de; 2664 2665 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2666 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2667 return (de.fme_status); 2668 } 2669 2670 /*ARGSUSED*/ 2671 int 2672 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2673 { 2674 ddi_fm_error_t de; 2675 2676 ASSERT(bgep->progress & PROGRESS_BUFS); 2677 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2678 return (de.fme_status); 2679 } 2680 2681 /* 2682 * The IO fault service error handling callback function 2683 */ 2684 /*ARGSUSED*/ 2685 static int 2686 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2687 { 2688 /* 2689 * as the driver can always deal with an error in any dma or 2690 * access handle, we can just return the fme_status value. 2691 */ 2692 pci_ereport_post(dip, err, NULL); 2693 return (err->fme_status); 2694 } 2695 2696 static void 2697 bge_fm_init(bge_t *bgep) 2698 { 2699 ddi_iblock_cookie_t iblk; 2700 2701 /* Only register with IO Fault Services if we have some capability */ 2702 if (bgep->fm_capabilities) { 2703 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2704 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2705 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2706 2707 /* Register capabilities with IO Fault Services */ 2708 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2709 2710 /* 2711 * Initialize pci ereport capabilities if ereport capable 2712 */ 2713 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2714 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2715 pci_ereport_setup(bgep->devinfo); 2716 2717 /* 2718 * Register error callback if error callback capable 2719 */ 2720 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2721 ddi_fm_handler_register(bgep->devinfo, 2722 bge_fm_error_cb, (void*) bgep); 2723 } else { 2724 /* 2725 * These fields have to be cleared of FMA if there are no 2726 * FMA capabilities at runtime. 2727 */ 2728 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2729 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2730 dma_attr.dma_attr_flags = 0; 2731 } 2732 } 2733 2734 static void 2735 bge_fm_fini(bge_t *bgep) 2736 { 2737 /* Only unregister FMA capabilities if we registered some */ 2738 if (bgep->fm_capabilities) { 2739 2740 /* 2741 * Release any resources allocated by pci_ereport_setup() 2742 */ 2743 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2744 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2745 pci_ereport_teardown(bgep->devinfo); 2746 2747 /* 2748 * Un-register error callback if error callback capable 2749 */ 2750 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2751 ddi_fm_handler_unregister(bgep->devinfo); 2752 2753 /* Unregister from IO Fault Services */ 2754 ddi_fm_fini(bgep->devinfo); 2755 } 2756 } 2757 2758 static void 2759 #ifdef BGE_IPMI_ASF 2760 bge_unattach(bge_t *bgep, uint_t asf_mode) 2761 #else 2762 bge_unattach(bge_t *bgep) 2763 #endif 2764 { 2765 BGE_TRACE(("bge_unattach($%p)", 2766 (void *)bgep)); 2767 2768 /* 2769 * Flag that no more activity may be initiated 2770 */ 2771 bgep->progress &= ~PROGRESS_READY; 2772 2773 /* 2774 * Quiesce the PHY and MAC (leave it reset but still powered). 2775 * Clean up and free all BGE data structures 2776 */ 2777 if (bgep->periodic_id != NULL) { 2778 ddi_periodic_delete(bgep->periodic_id); 2779 bgep->periodic_id = NULL; 2780 } 2781 if (bgep->progress & PROGRESS_KSTATS) 2782 bge_fini_kstats(bgep); 2783 if (bgep->progress & PROGRESS_NDD) 2784 bge_nd_cleanup(bgep); 2785 if (bgep->progress & PROGRESS_PHY) 2786 bge_phys_reset(bgep); 2787 if (bgep->progress & PROGRESS_HWINT) { 2788 mutex_enter(bgep->genlock); 2789 #ifdef BGE_IPMI_ASF 2790 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2791 #else 2792 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2793 #endif 2794 ddi_fm_service_impact(bgep->devinfo, 2795 DDI_SERVICE_UNAFFECTED); 2796 #ifdef BGE_IPMI_ASF 2797 if (bgep->asf_enabled) { 2798 /* 2799 * This register has been overlaid. We restore its 2800 * initial value here. 2801 */ 2802 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2803 BGE_NIC_DATA_SIG); 2804 } 2805 #endif 2806 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2807 ddi_fm_service_impact(bgep->devinfo, 2808 DDI_SERVICE_UNAFFECTED); 2809 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2810 ddi_fm_service_impact(bgep->devinfo, 2811 DDI_SERVICE_UNAFFECTED); 2812 mutex_exit(bgep->genlock); 2813 } 2814 if (bgep->progress & PROGRESS_INTR) { 2815 bge_intr_disable(bgep); 2816 bge_fini_rings(bgep); 2817 } 2818 if (bgep->progress & PROGRESS_HWINT) { 2819 bge_rem_intrs(bgep); 2820 rw_destroy(bgep->errlock); 2821 mutex_destroy(bgep->softintrlock); 2822 mutex_destroy(bgep->genlock); 2823 } 2824 if (bgep->progress & PROGRESS_FACTOTUM) 2825 ddi_remove_softintr(bgep->factotum_id); 2826 if (bgep->progress & PROGRESS_RESCHED) 2827 ddi_remove_softintr(bgep->drain_id); 2828 if (bgep->progress & PROGRESS_BUFS) 2829 bge_free_bufs(bgep); 2830 if (bgep->progress & PROGRESS_REGS) 2831 ddi_regs_map_free(&bgep->io_handle); 2832 if (bgep->progress & PROGRESS_CFG) 2833 pci_config_teardown(&bgep->cfg_handle); 2834 2835 bge_fm_fini(bgep); 2836 2837 ddi_remove_minor_node(bgep->devinfo, NULL); 2838 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 2839 kmem_free(bgep->nd_params, PARAM_COUNT * sizeof (nd_param_t)); 2840 kmem_free(bgep, sizeof (*bgep)); 2841 } 2842 2843 static int 2844 bge_resume(dev_info_t *devinfo) 2845 { 2846 bge_t *bgep; /* Our private data */ 2847 chip_id_t *cidp; 2848 chip_id_t chipid; 2849 2850 bgep = ddi_get_driver_private(devinfo); 2851 if (bgep == NULL) 2852 return (DDI_FAILURE); 2853 2854 /* 2855 * Refuse to resume if the data structures aren't consistent 2856 */ 2857 if (bgep->devinfo != devinfo) 2858 return (DDI_FAILURE); 2859 2860 #ifdef BGE_IPMI_ASF 2861 /* 2862 * Power management hasn't been supported in BGE now. If you 2863 * want to implement it, please add the ASF/IPMI related 2864 * code here. 2865 */ 2866 2867 #endif 2868 2869 /* 2870 * Read chip ID & set up config space command register(s) 2871 * Refuse to resume if the chip has changed its identity! 2872 */ 2873 cidp = &bgep->chipid; 2874 mutex_enter(bgep->genlock); 2875 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 2876 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2877 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2878 mutex_exit(bgep->genlock); 2879 return (DDI_FAILURE); 2880 } 2881 mutex_exit(bgep->genlock); 2882 if (chipid.vendor != cidp->vendor) 2883 return (DDI_FAILURE); 2884 if (chipid.device != cidp->device) 2885 return (DDI_FAILURE); 2886 if (chipid.revision != cidp->revision) 2887 return (DDI_FAILURE); 2888 if (chipid.asic_rev != cidp->asic_rev) 2889 return (DDI_FAILURE); 2890 2891 /* 2892 * All OK, reinitialise h/w & kick off GLD scheduling 2893 */ 2894 mutex_enter(bgep->genlock); 2895 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 2896 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 2897 (void) bge_check_acc_handle(bgep, bgep->io_handle); 2898 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2899 mutex_exit(bgep->genlock); 2900 return (DDI_FAILURE); 2901 } 2902 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2903 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2904 mutex_exit(bgep->genlock); 2905 return (DDI_FAILURE); 2906 } 2907 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 2908 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2909 mutex_exit(bgep->genlock); 2910 return (DDI_FAILURE); 2911 } 2912 mutex_exit(bgep->genlock); 2913 return (DDI_SUCCESS); 2914 } 2915 2916 /* 2917 * attach(9E) -- Attach a device to the system 2918 * 2919 * Called once for each board successfully probed. 2920 */ 2921 static int 2922 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2923 { 2924 bge_t *bgep; /* Our private data */ 2925 mac_register_t *macp; 2926 chip_id_t *cidp; 2927 caddr_t regs; 2928 int instance; 2929 int err; 2930 int intr_types; 2931 #ifdef BGE_IPMI_ASF 2932 uint32_t mhcrValue; 2933 #ifdef __sparc 2934 uint16_t value16; 2935 #endif 2936 #ifdef BGE_NETCONSOLE 2937 int retval; 2938 #endif 2939 #endif 2940 2941 instance = ddi_get_instance(devinfo); 2942 2943 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 2944 (void *)devinfo, cmd, instance)); 2945 BGE_BRKPT(NULL, "bge_attach"); 2946 2947 switch (cmd) { 2948 default: 2949 return (DDI_FAILURE); 2950 2951 case DDI_RESUME: 2952 return (bge_resume(devinfo)); 2953 2954 case DDI_ATTACH: 2955 break; 2956 } 2957 2958 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 2959 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 2960 bgep->nd_params = 2961 kmem_zalloc(PARAM_COUNT * sizeof (nd_param_t), KM_SLEEP); 2962 ddi_set_driver_private(devinfo, bgep); 2963 bgep->bge_guard = BGE_GUARD; 2964 bgep->devinfo = devinfo; 2965 bgep->param_drain_max = 64; 2966 bgep->param_msi_cnt = 0; 2967 bgep->param_loop_mode = 0; 2968 2969 /* 2970 * Initialize more fields in BGE private data 2971 */ 2972 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 2973 DDI_PROP_DONTPASS, debug_propname, bge_debug); 2974 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 2975 BGE_DRIVER_NAME, instance); 2976 2977 /* 2978 * Initialize for fma support 2979 */ 2980 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 2981 DDI_PROP_DONTPASS, fm_cap, 2982 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 2983 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 2984 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 2985 bge_fm_init(bgep); 2986 2987 /* 2988 * Look up the IOMMU's page size for DVMA mappings (must be 2989 * a power of 2) and convert to a mask. This can be used to 2990 * determine whether a message buffer crosses a page boundary. 2991 * Note: in 2s complement binary notation, if X is a power of 2992 * 2, then -X has the representation "11...1100...00". 2993 */ 2994 bgep->pagemask = dvma_pagesize(devinfo); 2995 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 2996 bgep->pagemask = -bgep->pagemask; 2997 2998 /* 2999 * Map config space registers 3000 * Read chip ID & set up config space command register(s) 3001 * 3002 * Note: this leaves the chip accessible by Memory Space 3003 * accesses, but with interrupts and Bus Mastering off. 3004 * This should ensure that nothing untoward will happen 3005 * if it has been left active by the (net-)bootloader. 3006 * We'll re-enable Bus Mastering once we've reset the chip, 3007 * and allow interrupts only when everything else is set up. 3008 */ 3009 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3010 #ifdef BGE_IPMI_ASF 3011 #ifdef __sparc 3012 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3013 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3014 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3015 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3016 MHCR_ENABLE_TAGGED_STATUS_MODE | 3017 MHCR_MASK_INTERRUPT_MODE | 3018 MHCR_MASK_PCI_INT_OUTPUT | 3019 MHCR_CLEAR_INTERRUPT_INTA | 3020 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3021 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3022 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3023 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3024 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3025 MEMORY_ARBITER_ENABLE); 3026 #else 3027 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3028 #endif 3029 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3030 bgep->asf_wordswapped = B_TRUE; 3031 } else { 3032 bgep->asf_wordswapped = B_FALSE; 3033 } 3034 bge_asf_get_config(bgep); 3035 #endif 3036 if (err != DDI_SUCCESS) { 3037 bge_problem(bgep, "pci_config_setup() failed"); 3038 goto attach_fail; 3039 } 3040 bgep->progress |= PROGRESS_CFG; 3041 cidp = &bgep->chipid; 3042 bzero(cidp, sizeof (*cidp)); 3043 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3044 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3045 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3046 goto attach_fail; 3047 } 3048 3049 #ifdef BGE_IPMI_ASF 3050 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3051 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3052 bgep->asf_newhandshake = B_TRUE; 3053 } else { 3054 bgep->asf_newhandshake = B_FALSE; 3055 } 3056 #endif 3057 3058 /* 3059 * Update those parts of the chip ID derived from volatile 3060 * registers with the values seen by OBP (in case the chip 3061 * has been reset externally and therefore lost them). 3062 */ 3063 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3064 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3065 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3066 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3067 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3068 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3069 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3070 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3071 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3072 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3073 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3074 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3075 3076 if (bge_jumbo_enable == B_TRUE) { 3077 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3078 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3079 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3080 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3081 cidp->default_mtu = BGE_DEFAULT_MTU; 3082 } 3083 } 3084 /* 3085 * Map operating registers 3086 */ 3087 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3088 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3089 if (err != DDI_SUCCESS) { 3090 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3091 goto attach_fail; 3092 } 3093 bgep->io_regs = regs; 3094 bgep->progress |= PROGRESS_REGS; 3095 3096 /* 3097 * Characterise the device, so we know its requirements. 3098 * Then allocate the appropriate TX and RX descriptors & buffers. 3099 */ 3100 if (bge_chip_id_init(bgep) == EIO) { 3101 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3102 goto attach_fail; 3103 } 3104 err = bge_alloc_bufs(bgep); 3105 if (err != DDI_SUCCESS) { 3106 bge_problem(bgep, "DMA buffer allocation failed"); 3107 goto attach_fail; 3108 } 3109 bgep->progress |= PROGRESS_BUFS; 3110 3111 /* 3112 * Add the softint handlers: 3113 * 3114 * Both of these handlers are used to avoid restrictions on the 3115 * context and/or mutexes required for some operations. In 3116 * particular, the hardware interrupt handler and its subfunctions 3117 * can detect a number of conditions that we don't want to handle 3118 * in that context or with that set of mutexes held. So, these 3119 * softints are triggered instead: 3120 * 3121 * the <resched> softint is triggered if we have previously 3122 * had to refuse to send a packet because of resource shortage 3123 * (we've run out of transmit buffers), but the send completion 3124 * interrupt handler has now detected that more buffers have 3125 * become available. 3126 * 3127 * the <factotum> is triggered if the h/w interrupt handler 3128 * sees the <link state changed> or <error> bits in the status 3129 * block. It's also triggered periodically to poll the link 3130 * state, just in case we aren't getting link status change 3131 * interrupts ... 3132 */ 3133 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3134 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3135 if (err != DDI_SUCCESS) { 3136 bge_problem(bgep, "ddi_add_softintr() failed"); 3137 goto attach_fail; 3138 } 3139 bgep->progress |= PROGRESS_RESCHED; 3140 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3141 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3142 if (err != DDI_SUCCESS) { 3143 bge_problem(bgep, "ddi_add_softintr() failed"); 3144 goto attach_fail; 3145 } 3146 bgep->progress |= PROGRESS_FACTOTUM; 3147 3148 /* Get supported interrupt types */ 3149 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3150 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3151 3152 goto attach_fail; 3153 } 3154 3155 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3156 bgep->ifname, intr_types)); 3157 3158 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3159 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3160 bge_error(bgep, "MSI registration failed, " 3161 "trying FIXED interrupt type\n"); 3162 } else { 3163 BGE_DEBUG(("%s: Using MSI interrupt type", 3164 bgep->ifname)); 3165 bgep->intr_type = DDI_INTR_TYPE_MSI; 3166 bgep->progress |= PROGRESS_HWINT; 3167 } 3168 } 3169 3170 if (!(bgep->progress & PROGRESS_HWINT) && 3171 (intr_types & DDI_INTR_TYPE_FIXED)) { 3172 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3173 bge_error(bgep, "FIXED interrupt " 3174 "registration failed\n"); 3175 goto attach_fail; 3176 } 3177 3178 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3179 3180 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3181 bgep->progress |= PROGRESS_HWINT; 3182 } 3183 3184 if (!(bgep->progress & PROGRESS_HWINT)) { 3185 bge_error(bgep, "No interrupts registered\n"); 3186 goto attach_fail; 3187 } 3188 3189 /* 3190 * Note that interrupts are not enabled yet as 3191 * mutex locks are not initialized. Initialize mutex locks. 3192 */ 3193 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3194 DDI_INTR_PRI(bgep->intr_pri)); 3195 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3196 DDI_INTR_PRI(bgep->intr_pri)); 3197 rw_init(bgep->errlock, NULL, RW_DRIVER, 3198 DDI_INTR_PRI(bgep->intr_pri)); 3199 3200 /* 3201 * Initialize rings. 3202 */ 3203 bge_init_rings(bgep); 3204 3205 /* 3206 * Now that mutex locks are initialized, enable interrupts. 3207 */ 3208 bge_intr_enable(bgep); 3209 bgep->progress |= PROGRESS_INTR; 3210 3211 /* 3212 * Initialise link state variables 3213 * Stop, reset & reinitialise the chip. 3214 * Initialise the (internal) PHY. 3215 */ 3216 bgep->link_state = LINK_STATE_UNKNOWN; 3217 3218 mutex_enter(bgep->genlock); 3219 3220 /* 3221 * Reset chip & rings to initial state; also reset address 3222 * filtering, promiscuity, loopback mode. 3223 */ 3224 #ifdef BGE_IPMI_ASF 3225 #ifdef BGE_NETCONSOLE 3226 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3227 #else 3228 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3229 #endif 3230 #else 3231 if (bge_reset(bgep) != DDI_SUCCESS) { 3232 #endif 3233 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3234 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3235 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3236 mutex_exit(bgep->genlock); 3237 goto attach_fail; 3238 } 3239 3240 #ifdef BGE_IPMI_ASF 3241 if (bgep->asf_enabled) { 3242 bgep->asf_status = ASF_STAT_RUN_INIT; 3243 } 3244 #endif 3245 3246 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3247 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3248 bgep->promisc = B_FALSE; 3249 bgep->param_loop_mode = BGE_LOOP_NONE; 3250 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3251 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3252 mutex_exit(bgep->genlock); 3253 goto attach_fail; 3254 } 3255 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3256 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3257 mutex_exit(bgep->genlock); 3258 goto attach_fail; 3259 } 3260 3261 mutex_exit(bgep->genlock); 3262 3263 if (bge_phys_init(bgep) == EIO) { 3264 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3265 goto attach_fail; 3266 } 3267 bgep->progress |= PROGRESS_PHY; 3268 3269 /* 3270 * Register NDD-tweakable parameters 3271 */ 3272 if (bge_nd_init(bgep)) { 3273 bge_problem(bgep, "bge_nd_init() failed"); 3274 goto attach_fail; 3275 } 3276 bgep->progress |= PROGRESS_NDD; 3277 3278 /* 3279 * Create & initialise named kstats 3280 */ 3281 bge_init_kstats(bgep, instance); 3282 bgep->progress |= PROGRESS_KSTATS; 3283 3284 /* 3285 * Determine whether to override the chip's own MAC address 3286 */ 3287 bge_find_mac_address(bgep, cidp); 3288 ethaddr_copy(cidp->vendor_addr.addr, bgep->curr_addr[0].addr); 3289 bgep->curr_addr[0].set = B_TRUE; 3290 3291 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3292 /* 3293 * Address available is one less than MAX 3294 * as primary address is not advertised 3295 * as a multiple MAC address. 3296 */ 3297 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX - 1; 3298 3299 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3300 goto attach_fail; 3301 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3302 macp->m_driver = bgep; 3303 macp->m_dip = devinfo; 3304 macp->m_src_addr = bgep->curr_addr[0].addr; 3305 macp->m_callbacks = &bge_m_callbacks; 3306 macp->m_min_sdu = 0; 3307 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3308 macp->m_margin = VLAN_TAGSZ; 3309 /* 3310 * Finally, we're ready to register ourselves with the MAC layer 3311 * interface; if this succeeds, we're all ready to start() 3312 */ 3313 err = mac_register(macp, &bgep->mh); 3314 mac_free(macp); 3315 if (err != 0) 3316 goto attach_fail; 3317 3318 /* 3319 * Register a periodical handler. 3320 * bge_chip_cyclic() is invoked in kernel context. 3321 */ 3322 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3323 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3324 3325 bgep->progress |= PROGRESS_READY; 3326 ASSERT(bgep->bge_guard == BGE_GUARD); 3327 #ifdef BGE_IPMI_ASF 3328 #ifdef BGE_NETCONSOLE 3329 if (bgep->asf_enabled) { 3330 mutex_enter(bgep->genlock); 3331 retval = bge_chip_start(bgep, B_TRUE); 3332 mutex_exit(bgep->genlock); 3333 if (retval != DDI_SUCCESS) 3334 goto attach_fail; 3335 } 3336 #endif 3337 #endif 3338 return (DDI_SUCCESS); 3339 3340 attach_fail: 3341 #ifdef BGE_IPMI_ASF 3342 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3343 #else 3344 bge_unattach(bgep); 3345 #endif 3346 return (DDI_FAILURE); 3347 } 3348 3349 /* 3350 * bge_suspend() -- suspend transmit/receive for powerdown 3351 */ 3352 static int 3353 bge_suspend(bge_t *bgep) 3354 { 3355 /* 3356 * Stop processing and idle (powerdown) the PHY ... 3357 */ 3358 mutex_enter(bgep->genlock); 3359 #ifdef BGE_IPMI_ASF 3360 /* 3361 * Power management hasn't been supported in BGE now. If you 3362 * want to implement it, please add the ASF/IPMI related 3363 * code here. 3364 */ 3365 #endif 3366 bge_stop(bgep); 3367 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3368 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3369 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3370 mutex_exit(bgep->genlock); 3371 return (DDI_FAILURE); 3372 } 3373 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3374 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3375 mutex_exit(bgep->genlock); 3376 return (DDI_FAILURE); 3377 } 3378 mutex_exit(bgep->genlock); 3379 3380 return (DDI_SUCCESS); 3381 } 3382 3383 /* 3384 * detach(9E) -- Detach a device from the system 3385 */ 3386 static int 3387 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3388 { 3389 bge_t *bgep; 3390 #ifdef BGE_IPMI_ASF 3391 uint_t asf_mode; 3392 asf_mode = ASF_MODE_NONE; 3393 #endif 3394 3395 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3396 3397 bgep = ddi_get_driver_private(devinfo); 3398 3399 switch (cmd) { 3400 default: 3401 return (DDI_FAILURE); 3402 3403 case DDI_SUSPEND: 3404 return (bge_suspend(bgep)); 3405 3406 case DDI_DETACH: 3407 break; 3408 } 3409 3410 #ifdef BGE_IPMI_ASF 3411 mutex_enter(bgep->genlock); 3412 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3413 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3414 3415 bge_asf_update_status(bgep); 3416 if (bgep->asf_status == ASF_STAT_RUN) { 3417 bge_asf_stop_timer(bgep); 3418 } 3419 bgep->asf_status = ASF_STAT_STOP; 3420 3421 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3422 3423 if (bgep->asf_pseudostop) { 3424 bge_chip_stop(bgep, B_FALSE); 3425 bgep->bge_mac_state = BGE_MAC_STOPPED; 3426 bgep->asf_pseudostop = B_FALSE; 3427 } 3428 3429 asf_mode = ASF_MODE_POST_SHUTDOWN; 3430 3431 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3432 ddi_fm_service_impact(bgep->devinfo, 3433 DDI_SERVICE_UNAFFECTED); 3434 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3435 ddi_fm_service_impact(bgep->devinfo, 3436 DDI_SERVICE_UNAFFECTED); 3437 } 3438 mutex_exit(bgep->genlock); 3439 #endif 3440 3441 /* 3442 * Unregister from the GLD subsystem. This can fail, in 3443 * particular if there are DLPI style-2 streams still open - 3444 * in which case we just return failure without shutting 3445 * down chip operations. 3446 */ 3447 if (mac_unregister(bgep->mh) != 0) 3448 return (DDI_FAILURE); 3449 3450 /* 3451 * All activity stopped, so we can clean up & exit 3452 */ 3453 #ifdef BGE_IPMI_ASF 3454 bge_unattach(bgep, asf_mode); 3455 #else 3456 bge_unattach(bgep); 3457 #endif 3458 return (DDI_SUCCESS); 3459 } 3460 3461 3462 /* 3463 * ========== Module Loading Data & Entry Points ========== 3464 */ 3465 3466 #undef BGE_DBG 3467 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3468 3469 DDI_DEFINE_STREAM_OPS(bge_dev_ops, nulldev, nulldev, bge_attach, bge_detach, 3470 nodev, NULL, D_MP, NULL); 3471 3472 static struct modldrv bge_modldrv = { 3473 &mod_driverops, /* Type of module. This one is a driver */ 3474 bge_ident, /* short description */ 3475 &bge_dev_ops /* driver specific ops */ 3476 }; 3477 3478 static struct modlinkage modlinkage = { 3479 MODREV_1, (void *)&bge_modldrv, NULL 3480 }; 3481 3482 3483 int 3484 _info(struct modinfo *modinfop) 3485 { 3486 return (mod_info(&modlinkage, modinfop)); 3487 } 3488 3489 int 3490 _init(void) 3491 { 3492 int status; 3493 3494 mac_init_ops(&bge_dev_ops, "bge"); 3495 status = mod_install(&modlinkage); 3496 if (status == DDI_SUCCESS) 3497 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3498 else 3499 mac_fini_ops(&bge_dev_ops); 3500 return (status); 3501 } 3502 3503 int 3504 _fini(void) 3505 { 3506 int status; 3507 3508 status = mod_remove(&modlinkage); 3509 if (status == DDI_SUCCESS) { 3510 mac_fini_ops(&bge_dev_ops); 3511 mutex_destroy(bge_log_mutex); 3512 } 3513 return (status); 3514 } 3515 3516 3517 /* 3518 * bge_add_intrs: 3519 * 3520 * Register FIXED or MSI interrupts. 3521 */ 3522 static int 3523 bge_add_intrs(bge_t *bgep, int intr_type) 3524 { 3525 dev_info_t *dip = bgep->devinfo; 3526 int avail, actual, intr_size, count = 0; 3527 int i, flag, ret; 3528 3529 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3530 3531 /* Get number of interrupts */ 3532 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3533 if ((ret != DDI_SUCCESS) || (count == 0)) { 3534 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3535 "count: %d", ret, count); 3536 3537 return (DDI_FAILURE); 3538 } 3539 3540 /* Get number of available interrupts */ 3541 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3542 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3543 bge_error(bgep, "ddi_intr_get_navail() failure, " 3544 "ret: %d, avail: %d\n", ret, avail); 3545 3546 return (DDI_FAILURE); 3547 } 3548 3549 if (avail < count) { 3550 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3551 bgep->ifname, count, avail)); 3552 } 3553 3554 /* 3555 * BGE hardware generates only single MSI even though it claims 3556 * to support multiple MSIs. So, hard code MSI count value to 1. 3557 */ 3558 if (intr_type == DDI_INTR_TYPE_MSI) { 3559 count = 1; 3560 flag = DDI_INTR_ALLOC_STRICT; 3561 } else { 3562 flag = DDI_INTR_ALLOC_NORMAL; 3563 } 3564 3565 /* Allocate an array of interrupt handles */ 3566 intr_size = count * sizeof (ddi_intr_handle_t); 3567 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3568 3569 /* Call ddi_intr_alloc() */ 3570 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3571 count, &actual, flag); 3572 3573 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3574 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3575 3576 kmem_free(bgep->htable, intr_size); 3577 return (DDI_FAILURE); 3578 } 3579 3580 if (actual < count) { 3581 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3582 bgep->ifname, count, actual)); 3583 } 3584 3585 bgep->intr_cnt = actual; 3586 3587 /* 3588 * Get priority for first msi, assume remaining are all the same 3589 */ 3590 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3591 DDI_SUCCESS) { 3592 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3593 3594 /* Free already allocated intr */ 3595 for (i = 0; i < actual; i++) { 3596 (void) ddi_intr_free(bgep->htable[i]); 3597 } 3598 3599 kmem_free(bgep->htable, intr_size); 3600 return (DDI_FAILURE); 3601 } 3602 3603 /* Call ddi_intr_add_handler() */ 3604 for (i = 0; i < actual; i++) { 3605 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3606 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3607 bge_error(bgep, "ddi_intr_add_handler() " 3608 "failed %d\n", ret); 3609 3610 /* Free already allocated intr */ 3611 for (i = 0; i < actual; i++) { 3612 (void) ddi_intr_free(bgep->htable[i]); 3613 } 3614 3615 kmem_free(bgep->htable, intr_size); 3616 return (DDI_FAILURE); 3617 } 3618 } 3619 3620 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3621 != DDI_SUCCESS) { 3622 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3623 3624 for (i = 0; i < actual; i++) { 3625 (void) ddi_intr_remove_handler(bgep->htable[i]); 3626 (void) ddi_intr_free(bgep->htable[i]); 3627 } 3628 3629 kmem_free(bgep->htable, intr_size); 3630 return (DDI_FAILURE); 3631 } 3632 3633 return (DDI_SUCCESS); 3634 } 3635 3636 /* 3637 * bge_rem_intrs: 3638 * 3639 * Unregister FIXED or MSI interrupts 3640 */ 3641 static void 3642 bge_rem_intrs(bge_t *bgep) 3643 { 3644 int i; 3645 3646 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3647 3648 /* Call ddi_intr_remove_handler() */ 3649 for (i = 0; i < bgep->intr_cnt; i++) { 3650 (void) ddi_intr_remove_handler(bgep->htable[i]); 3651 (void) ddi_intr_free(bgep->htable[i]); 3652 } 3653 3654 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3655 } 3656 3657 3658 void 3659 bge_intr_enable(bge_t *bgep) 3660 { 3661 int i; 3662 3663 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3664 /* Call ddi_intr_block_enable() for MSI interrupts */ 3665 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3666 } else { 3667 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3668 for (i = 0; i < bgep->intr_cnt; i++) { 3669 (void) ddi_intr_enable(bgep->htable[i]); 3670 } 3671 } 3672 } 3673 3674 3675 void 3676 bge_intr_disable(bge_t *bgep) 3677 { 3678 int i; 3679 3680 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3681 /* Call ddi_intr_block_disable() */ 3682 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3683 } else { 3684 for (i = 0; i < bgep->intr_cnt; i++) { 3685 (void) ddi_intr_disable(bgep->htable[i]); 3686 } 3687 } 3688 } 3689 3690 int 3691 bge_reprogram(bge_t *bgep) 3692 { 3693 int status = 0; 3694 3695 ASSERT(mutex_owned(bgep->genlock)); 3696 3697 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3698 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3699 status = IOC_INVAL; 3700 } 3701 #ifdef BGE_IPMI_ASF 3702 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3703 #else 3704 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3705 #endif 3706 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3707 status = IOC_INVAL; 3708 } 3709 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3710 bge_chip_msi_trig(bgep); 3711 return (status); 3712 } 3713