1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac_provider.h> 30 #include <sys/mac.h> 31 #include <sys/mac_flow.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 */ 36 static char bge_ident[] = "Broadcom Gb Ethernet"; 37 /* 38 * Make sure you keep the version ID up to date! 39 */ 40 static char bge_version[] = "Broadcom Gb Ethernet v1.02"; 41 42 /* 43 * Property names 44 */ 45 static char debug_propname[] = "bge-debug-flags"; 46 static char clsize_propname[] = "cache-line-size"; 47 static char latency_propname[] = "latency-timer"; 48 static char localmac_boolname[] = "local-mac-address?"; 49 static char localmac_propname[] = "local-mac-address"; 50 static char macaddr_propname[] = "mac-address"; 51 static char subdev_propname[] = "subsystem-id"; 52 static char subven_propname[] = "subsystem-vendor-id"; 53 static char rxrings_propname[] = "bge-rx-rings"; 54 static char txrings_propname[] = "bge-tx-rings"; 55 static char fm_cap[] = "fm-capable"; 56 static char default_mtu[] = "default_mtu"; 57 58 static int bge_add_intrs(bge_t *, int); 59 static void bge_rem_intrs(bge_t *); 60 static int bge_unicst_set(void *, const uint8_t *, int); 61 62 /* 63 * Describes the chip's DMA engine 64 */ 65 static ddi_dma_attr_t dma_attr = { 66 DMA_ATTR_V0, /* dma_attr version */ 67 0x0000000000000000ull, /* dma_attr_addr_lo */ 68 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 69 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 70 0x0000000000000001ull, /* dma_attr_align */ 71 0x00000FFF, /* dma_attr_burstsizes */ 72 0x00000001, /* dma_attr_minxfer */ 73 0x000000000000FFFFull, /* dma_attr_maxxfer */ 74 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 75 1, /* dma_attr_sgllen */ 76 0x00000001, /* dma_attr_granular */ 77 DDI_DMA_FLAGERR /* dma_attr_flags */ 78 }; 79 80 /* 81 * PIO access attributes for registers 82 */ 83 static ddi_device_acc_attr_t bge_reg_accattr = { 84 DDI_DEVICE_ATTR_V0, 85 DDI_NEVERSWAP_ACC, 86 DDI_STRICTORDER_ACC, 87 DDI_FLAGERR_ACC 88 }; 89 90 /* 91 * DMA access attributes for descriptors: NOT to be byte swapped. 92 */ 93 static ddi_device_acc_attr_t bge_desc_accattr = { 94 DDI_DEVICE_ATTR_V0, 95 DDI_NEVERSWAP_ACC, 96 DDI_STRICTORDER_ACC, 97 DDI_FLAGERR_ACC 98 }; 99 100 /* 101 * DMA access attributes for data: NOT to be byte swapped. 102 */ 103 static ddi_device_acc_attr_t bge_data_accattr = { 104 DDI_DEVICE_ATTR_V0, 105 DDI_NEVERSWAP_ACC, 106 DDI_STRICTORDER_ACC 107 }; 108 109 static int bge_m_start(void *); 110 static void bge_m_stop(void *); 111 static int bge_m_promisc(void *, boolean_t); 112 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 113 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 114 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 115 static int bge_unicst_set(void *, const uint8_t *, 116 int); 117 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 118 uint_t, const void *); 119 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 120 uint_t, uint_t, void *, uint_t *); 121 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 122 const void *); 123 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 124 uint_t, void *); 125 126 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 127 128 static mac_callbacks_t bge_m_callbacks = { 129 BGE_M_CALLBACK_FLAGS, 130 bge_m_stat, 131 bge_m_start, 132 bge_m_stop, 133 bge_m_promisc, 134 bge_m_multicst, 135 NULL, 136 bge_m_tx, 137 bge_m_ioctl, 138 bge_m_getcapab, 139 NULL, 140 NULL, 141 bge_m_setprop, 142 bge_m_getprop 143 }; 144 145 mac_priv_prop_t bge_priv_prop[] = { 146 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 147 {"_adv_pause_cap", MAC_PROP_PERM_RW} 148 }; 149 150 #define BGE_MAX_PRIV_PROPS \ 151 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 152 153 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 154 /* 155 * ========== Transmit and receive ring reinitialisation ========== 156 */ 157 158 /* 159 * These <reinit> routines each reset the specified ring to an initial 160 * state, assuming that the corresponding <init> routine has already 161 * been called exactly once. 162 */ 163 164 static void 165 bge_reinit_send_ring(send_ring_t *srp) 166 { 167 bge_queue_t *txbuf_queue; 168 bge_queue_item_t *txbuf_head; 169 sw_txbuf_t *txbuf; 170 sw_sbd_t *ssbdp; 171 uint32_t slot; 172 173 /* 174 * Reinitialise control variables ... 175 */ 176 srp->tx_flow = 0; 177 srp->tx_next = 0; 178 srp->txfill_next = 0; 179 srp->tx_free = srp->desc.nslots; 180 ASSERT(mutex_owned(srp->tc_lock)); 181 srp->tc_next = 0; 182 srp->txpkt_next = 0; 183 srp->tx_block = 0; 184 srp->tx_nobd = 0; 185 srp->tx_nobuf = 0; 186 187 /* 188 * Initialize the tx buffer push queue 189 */ 190 mutex_enter(srp->freetxbuf_lock); 191 mutex_enter(srp->txbuf_lock); 192 txbuf_queue = &srp->freetxbuf_queue; 193 txbuf_queue->head = NULL; 194 txbuf_queue->count = 0; 195 txbuf_queue->lock = srp->freetxbuf_lock; 196 srp->txbuf_push_queue = txbuf_queue; 197 198 /* 199 * Initialize the tx buffer pop queue 200 */ 201 txbuf_queue = &srp->txbuf_queue; 202 txbuf_queue->head = NULL; 203 txbuf_queue->count = 0; 204 txbuf_queue->lock = srp->txbuf_lock; 205 srp->txbuf_pop_queue = txbuf_queue; 206 txbuf_head = srp->txbuf_head; 207 txbuf = srp->txbuf; 208 for (slot = 0; slot < srp->tx_buffers; ++slot) { 209 txbuf_head->item = txbuf; 210 txbuf_head->next = txbuf_queue->head; 211 txbuf_queue->head = txbuf_head; 212 txbuf_queue->count++; 213 txbuf++; 214 txbuf_head++; 215 } 216 mutex_exit(srp->txbuf_lock); 217 mutex_exit(srp->freetxbuf_lock); 218 219 /* 220 * Zero and sync all the h/w Send Buffer Descriptors 221 */ 222 DMA_ZERO(srp->desc); 223 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 224 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 225 ssbdp = srp->sw_sbds; 226 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 227 ssbdp->pbuf = NULL; 228 } 229 230 static void 231 bge_reinit_recv_ring(recv_ring_t *rrp) 232 { 233 /* 234 * Reinitialise control variables ... 235 */ 236 rrp->rx_next = 0; 237 } 238 239 static void 240 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 241 { 242 bge_rbd_t *hw_rbd_p; 243 sw_rbd_t *srbdp; 244 uint32_t bufsize; 245 uint32_t nslots; 246 uint32_t slot; 247 248 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 249 RBD_FLAG_STD_RING, 250 RBD_FLAG_JUMBO_RING, 251 RBD_FLAG_MINI_RING 252 }; 253 254 /* 255 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 256 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 257 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 258 * should be zeroed, and so don't need to be set up specifically 259 * once the whole area has been cleared. 260 */ 261 DMA_ZERO(brp->desc); 262 263 hw_rbd_p = DMA_VPTR(brp->desc); 264 nslots = brp->desc.nslots; 265 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 266 bufsize = brp->buf[0].size; 267 srbdp = brp->sw_rbds; 268 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 269 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 270 hw_rbd_p->index = (uint16_t)slot; 271 hw_rbd_p->len = (uint16_t)bufsize; 272 hw_rbd_p->opaque = srbdp->pbuf.token; 273 hw_rbd_p->flags |= ring_type_flag[ring]; 274 } 275 276 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 277 278 /* 279 * Finally, reinitialise the ring control variables ... 280 */ 281 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 282 } 283 284 /* 285 * Reinitialize all rings 286 */ 287 static void 288 bge_reinit_rings(bge_t *bgep) 289 { 290 uint32_t ring; 291 292 ASSERT(mutex_owned(bgep->genlock)); 293 294 /* 295 * Send Rings ... 296 */ 297 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 298 bge_reinit_send_ring(&bgep->send[ring]); 299 300 /* 301 * Receive Return Rings ... 302 */ 303 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 304 bge_reinit_recv_ring(&bgep->recv[ring]); 305 306 /* 307 * Receive Producer Rings ... 308 */ 309 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 310 bge_reinit_buff_ring(&bgep->buff[ring], ring); 311 } 312 313 /* 314 * ========== Internal state management entry points ========== 315 */ 316 317 #undef BGE_DBG 318 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 319 320 /* 321 * These routines provide all the functionality required by the 322 * corresponding GLD entry points, but don't update the GLD state 323 * so they can be called internally without disturbing our record 324 * of what GLD thinks we should be doing ... 325 */ 326 327 /* 328 * bge_reset() -- reset h/w & rings to initial state 329 */ 330 static int 331 #ifdef BGE_IPMI_ASF 332 bge_reset(bge_t *bgep, uint_t asf_mode) 333 #else 334 bge_reset(bge_t *bgep) 335 #endif 336 { 337 uint32_t ring; 338 int retval; 339 340 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 341 342 ASSERT(mutex_owned(bgep->genlock)); 343 344 /* 345 * Grab all the other mutexes in the world (this should 346 * ensure no other threads are manipulating driver state) 347 */ 348 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 349 mutex_enter(bgep->recv[ring].rx_lock); 350 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 351 mutex_enter(bgep->buff[ring].rf_lock); 352 rw_enter(bgep->errlock, RW_WRITER); 353 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 354 mutex_enter(bgep->send[ring].tx_lock); 355 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 356 mutex_enter(bgep->send[ring].tc_lock); 357 358 #ifdef BGE_IPMI_ASF 359 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 360 #else 361 retval = bge_chip_reset(bgep, B_TRUE); 362 #endif 363 bge_reinit_rings(bgep); 364 365 /* 366 * Free the world ... 367 */ 368 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 369 mutex_exit(bgep->send[ring].tc_lock); 370 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 371 mutex_exit(bgep->send[ring].tx_lock); 372 rw_exit(bgep->errlock); 373 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 374 mutex_exit(bgep->buff[ring].rf_lock); 375 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 376 mutex_exit(bgep->recv[ring].rx_lock); 377 378 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 379 return (retval); 380 } 381 382 /* 383 * bge_stop() -- stop processing, don't reset h/w or rings 384 */ 385 static void 386 bge_stop(bge_t *bgep) 387 { 388 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 389 390 ASSERT(mutex_owned(bgep->genlock)); 391 392 #ifdef BGE_IPMI_ASF 393 if (bgep->asf_enabled) { 394 bgep->asf_pseudostop = B_TRUE; 395 } else { 396 #endif 397 bge_chip_stop(bgep, B_FALSE); 398 #ifdef BGE_IPMI_ASF 399 } 400 #endif 401 402 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 403 } 404 405 /* 406 * bge_start() -- start transmitting/receiving 407 */ 408 static int 409 bge_start(bge_t *bgep, boolean_t reset_phys) 410 { 411 int retval; 412 413 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 414 415 ASSERT(mutex_owned(bgep->genlock)); 416 417 /* 418 * Start chip processing, including enabling interrupts 419 */ 420 retval = bge_chip_start(bgep, reset_phys); 421 422 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 423 return (retval); 424 } 425 426 /* 427 * bge_restart - restart transmitting/receiving after error or suspend 428 */ 429 int 430 bge_restart(bge_t *bgep, boolean_t reset_phys) 431 { 432 int retval = DDI_SUCCESS; 433 ASSERT(mutex_owned(bgep->genlock)); 434 435 #ifdef BGE_IPMI_ASF 436 if (bgep->asf_enabled) { 437 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 438 retval = DDI_FAILURE; 439 } else 440 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 441 retval = DDI_FAILURE; 442 #else 443 if (bge_reset(bgep) != DDI_SUCCESS) 444 retval = DDI_FAILURE; 445 #endif 446 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 447 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 448 retval = DDI_FAILURE; 449 bgep->watchdog = 0; 450 ddi_trigger_softintr(bgep->drain_id); 451 } 452 453 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 454 return (retval); 455 } 456 457 458 /* 459 * ========== Nemo-required management entry points ========== 460 */ 461 462 #undef BGE_DBG 463 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 464 465 /* 466 * bge_m_stop() -- stop transmitting/receiving 467 */ 468 static void 469 bge_m_stop(void *arg) 470 { 471 bge_t *bgep = arg; /* private device info */ 472 send_ring_t *srp; 473 uint32_t ring; 474 475 BGE_TRACE(("bge_m_stop($%p)", arg)); 476 477 /* 478 * Just stop processing, then record new GLD state 479 */ 480 mutex_enter(bgep->genlock); 481 if (!(bgep->progress & PROGRESS_INTR)) { 482 /* can happen during autorecovery */ 483 bgep->bge_chip_state = BGE_CHIP_STOPPED; 484 } else 485 bge_stop(bgep); 486 487 bgep->link_update_timer = 0; 488 bgep->link_state = LINK_STATE_UNKNOWN; 489 mac_link_update(bgep->mh, bgep->link_state); 490 491 /* 492 * Free the possible tx buffers allocated in tx process. 493 */ 494 #ifdef BGE_IPMI_ASF 495 if (!bgep->asf_pseudostop) 496 #endif 497 { 498 rw_enter(bgep->errlock, RW_WRITER); 499 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 500 srp = &bgep->send[ring]; 501 mutex_enter(srp->tx_lock); 502 if (srp->tx_array > 1) 503 bge_free_txbuf_arrays(srp); 504 mutex_exit(srp->tx_lock); 505 } 506 rw_exit(bgep->errlock); 507 } 508 bgep->bge_mac_state = BGE_MAC_STOPPED; 509 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 510 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 511 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 512 mutex_exit(bgep->genlock); 513 } 514 515 /* 516 * bge_m_start() -- start transmitting/receiving 517 */ 518 static int 519 bge_m_start(void *arg) 520 { 521 bge_t *bgep = arg; /* private device info */ 522 523 BGE_TRACE(("bge_m_start($%p)", arg)); 524 525 /* 526 * Start processing and record new GLD state 527 */ 528 mutex_enter(bgep->genlock); 529 if (!(bgep->progress & PROGRESS_INTR)) { 530 /* can happen during autorecovery */ 531 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 532 mutex_exit(bgep->genlock); 533 return (EIO); 534 } 535 #ifdef BGE_IPMI_ASF 536 if (bgep->asf_enabled) { 537 if ((bgep->asf_status == ASF_STAT_RUN) && 538 (bgep->asf_pseudostop)) { 539 bgep->bge_mac_state = BGE_MAC_STARTED; 540 mutex_exit(bgep->genlock); 541 return (0); 542 } 543 } 544 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 545 #else 546 if (bge_reset(bgep) != DDI_SUCCESS) { 547 #endif 548 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 549 (void) bge_check_acc_handle(bgep, bgep->io_handle); 550 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 551 mutex_exit(bgep->genlock); 552 return (EIO); 553 } 554 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 555 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 556 (void) bge_check_acc_handle(bgep, bgep->io_handle); 557 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 558 mutex_exit(bgep->genlock); 559 return (EIO); 560 } 561 bgep->bge_mac_state = BGE_MAC_STARTED; 562 BGE_DEBUG(("bge_m_start($%p) done", arg)); 563 564 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 565 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 566 mutex_exit(bgep->genlock); 567 return (EIO); 568 } 569 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 570 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 571 mutex_exit(bgep->genlock); 572 return (EIO); 573 } 574 #ifdef BGE_IPMI_ASF 575 if (bgep->asf_enabled) { 576 if (bgep->asf_status != ASF_STAT_RUN) { 577 /* start ASF heart beat */ 578 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 579 (void *)bgep, 580 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 581 bgep->asf_status = ASF_STAT_RUN; 582 } 583 } 584 #endif 585 mutex_exit(bgep->genlock); 586 587 return (0); 588 } 589 590 /* 591 * bge_unicst_set() -- set the physical network address 592 */ 593 static int 594 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 595 { 596 bge_t *bgep = arg; /* private device info */ 597 598 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 599 ether_sprintf((void *)macaddr))); 600 /* 601 * Remember the new current address in the driver state 602 * Sync the chip's idea of the address too ... 603 */ 604 mutex_enter(bgep->genlock); 605 if (!(bgep->progress & PROGRESS_INTR)) { 606 /* can happen during autorecovery */ 607 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 608 mutex_exit(bgep->genlock); 609 return (EIO); 610 } 611 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 612 #ifdef BGE_IPMI_ASF 613 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 614 #else 615 if (bge_chip_sync(bgep) == DDI_FAILURE) { 616 #endif 617 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 618 (void) bge_check_acc_handle(bgep, bgep->io_handle); 619 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 620 mutex_exit(bgep->genlock); 621 return (EIO); 622 } 623 #ifdef BGE_IPMI_ASF 624 if (bgep->asf_enabled) { 625 /* 626 * The above bge_chip_sync() function wrote the ethernet MAC 627 * addresses registers which destroyed the IPMI/ASF sideband. 628 * Here, we have to reset chip to make IPMI/ASF sideband work. 629 */ 630 if (bgep->asf_status == ASF_STAT_RUN) { 631 /* 632 * We must stop ASF heart beat before bge_chip_stop(), 633 * otherwise some computers (ex. IBM HS20 blade server) 634 * may crash. 635 */ 636 bge_asf_update_status(bgep); 637 bge_asf_stop_timer(bgep); 638 bgep->asf_status = ASF_STAT_STOP; 639 640 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 641 } 642 bge_chip_stop(bgep, B_FALSE); 643 644 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 645 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 646 (void) bge_check_acc_handle(bgep, bgep->io_handle); 647 ddi_fm_service_impact(bgep->devinfo, 648 DDI_SERVICE_DEGRADED); 649 mutex_exit(bgep->genlock); 650 return (EIO); 651 } 652 653 /* 654 * Start our ASF heartbeat counter as soon as possible. 655 */ 656 if (bgep->asf_status != ASF_STAT_RUN) { 657 /* start ASF heart beat */ 658 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 659 (void *)bgep, 660 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 661 bgep->asf_status = ASF_STAT_RUN; 662 } 663 } 664 #endif 665 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 666 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 667 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 668 mutex_exit(bgep->genlock); 669 return (EIO); 670 } 671 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 672 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 673 mutex_exit(bgep->genlock); 674 return (EIO); 675 } 676 mutex_exit(bgep->genlock); 677 678 return (0); 679 } 680 681 extern void bge_wake_factotum(bge_t *); 682 683 static boolean_t 684 bge_param_locked(mac_prop_id_t pr_num) 685 { 686 /* 687 * All adv_* parameters are locked (read-only) while 688 * the device is in any sort of loopback mode ... 689 */ 690 switch (pr_num) { 691 case MAC_PROP_ADV_1000FDX_CAP: 692 case MAC_PROP_EN_1000FDX_CAP: 693 case MAC_PROP_ADV_1000HDX_CAP: 694 case MAC_PROP_EN_1000HDX_CAP: 695 case MAC_PROP_ADV_100FDX_CAP: 696 case MAC_PROP_EN_100FDX_CAP: 697 case MAC_PROP_ADV_100HDX_CAP: 698 case MAC_PROP_EN_100HDX_CAP: 699 case MAC_PROP_ADV_10FDX_CAP: 700 case MAC_PROP_EN_10FDX_CAP: 701 case MAC_PROP_ADV_10HDX_CAP: 702 case MAC_PROP_EN_10HDX_CAP: 703 case MAC_PROP_AUTONEG: 704 case MAC_PROP_FLOWCTRL: 705 return (B_TRUE); 706 } 707 return (B_FALSE); 708 } 709 /* 710 * callback functions for set/get of properties 711 */ 712 static int 713 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 714 uint_t pr_valsize, const void *pr_val) 715 { 716 bge_t *bgep = barg; 717 int err = 0; 718 uint32_t cur_mtu, new_mtu; 719 uint_t maxsdu; 720 link_flowctrl_t fl; 721 722 mutex_enter(bgep->genlock); 723 if (bgep->param_loop_mode != BGE_LOOP_NONE && 724 bge_param_locked(pr_num)) { 725 /* 726 * All adv_* parameters are locked (read-only) 727 * while the device is in any sort of loopback mode. 728 */ 729 mutex_exit(bgep->genlock); 730 return (EBUSY); 731 } 732 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 733 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 734 (pr_num == MAC_PROP_EN_100HDX_CAP) || 735 (pr_num == MAC_PROP_EN_10FDX_CAP) || 736 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 737 /* 738 * these properties are read/write on copper, 739 * read-only and 0 on serdes 740 */ 741 mutex_exit(bgep->genlock); 742 return (ENOTSUP); 743 } 744 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 745 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 746 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 747 mutex_exit(bgep->genlock); 748 return (ENOTSUP); 749 } 750 751 switch (pr_num) { 752 case MAC_PROP_EN_1000FDX_CAP: 753 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 754 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 755 goto reprogram; 756 case MAC_PROP_EN_1000HDX_CAP: 757 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 758 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 759 goto reprogram; 760 case MAC_PROP_EN_100FDX_CAP: 761 bgep->param_en_100fdx = *(uint8_t *)pr_val; 762 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 763 goto reprogram; 764 case MAC_PROP_EN_100HDX_CAP: 765 bgep->param_en_100hdx = *(uint8_t *)pr_val; 766 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 767 goto reprogram; 768 case MAC_PROP_EN_10FDX_CAP: 769 bgep->param_en_10fdx = *(uint8_t *)pr_val; 770 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 771 goto reprogram; 772 case MAC_PROP_EN_10HDX_CAP: 773 bgep->param_en_10hdx = *(uint8_t *)pr_val; 774 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 775 reprogram: 776 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 777 err = EINVAL; 778 break; 779 case MAC_PROP_ADV_1000FDX_CAP: 780 case MAC_PROP_ADV_1000HDX_CAP: 781 case MAC_PROP_ADV_100FDX_CAP: 782 case MAC_PROP_ADV_100HDX_CAP: 783 case MAC_PROP_ADV_10FDX_CAP: 784 case MAC_PROP_ADV_10HDX_CAP: 785 case MAC_PROP_STATUS: 786 case MAC_PROP_SPEED: 787 case MAC_PROP_DUPLEX: 788 err = ENOTSUP; /* read-only prop. Can't set this */ 789 break; 790 case MAC_PROP_AUTONEG: 791 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 792 if (bge_reprogram(bgep) == IOC_INVAL) 793 err = EINVAL; 794 break; 795 case MAC_PROP_MTU: 796 cur_mtu = bgep->chipid.default_mtu; 797 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 798 799 if (new_mtu == cur_mtu) { 800 err = 0; 801 break; 802 } 803 if (new_mtu < BGE_DEFAULT_MTU || 804 new_mtu > BGE_MAXIMUM_MTU) { 805 err = EINVAL; 806 break; 807 } 808 if ((new_mtu > BGE_DEFAULT_MTU) && 809 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 810 err = EINVAL; 811 break; 812 } 813 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 814 err = EBUSY; 815 break; 816 } 817 bgep->chipid.default_mtu = new_mtu; 818 if (bge_chip_id_init(bgep)) { 819 err = EINVAL; 820 break; 821 } 822 maxsdu = bgep->chipid.ethmax_size - 823 sizeof (struct ether_header); 824 err = mac_maxsdu_update(bgep->mh, maxsdu); 825 if (err == 0) { 826 bgep->bge_dma_error = B_TRUE; 827 bgep->manual_reset = B_TRUE; 828 bge_chip_stop(bgep, B_TRUE); 829 bge_wake_factotum(bgep); 830 err = 0; 831 } 832 break; 833 case MAC_PROP_FLOWCTRL: 834 bcopy(pr_val, &fl, sizeof (fl)); 835 switch (fl) { 836 default: 837 err = ENOTSUP; 838 break; 839 case LINK_FLOWCTRL_NONE: 840 bgep->param_adv_pause = 0; 841 bgep->param_adv_asym_pause = 0; 842 843 bgep->param_link_rx_pause = B_FALSE; 844 bgep->param_link_tx_pause = B_FALSE; 845 break; 846 case LINK_FLOWCTRL_RX: 847 bgep->param_adv_pause = 1; 848 bgep->param_adv_asym_pause = 1; 849 850 bgep->param_link_rx_pause = B_TRUE; 851 bgep->param_link_tx_pause = B_FALSE; 852 break; 853 case LINK_FLOWCTRL_TX: 854 bgep->param_adv_pause = 0; 855 bgep->param_adv_asym_pause = 1; 856 857 bgep->param_link_rx_pause = B_FALSE; 858 bgep->param_link_tx_pause = B_TRUE; 859 break; 860 case LINK_FLOWCTRL_BI: 861 bgep->param_adv_pause = 1; 862 bgep->param_adv_asym_pause = 0; 863 864 bgep->param_link_rx_pause = B_TRUE; 865 bgep->param_link_tx_pause = B_TRUE; 866 break; 867 } 868 869 if (err == 0) { 870 if (bge_reprogram(bgep) == IOC_INVAL) 871 err = EINVAL; 872 } 873 874 break; 875 case MAC_PROP_PRIVATE: 876 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 877 pr_val); 878 break; 879 default: 880 err = ENOTSUP; 881 break; 882 } 883 mutex_exit(bgep->genlock); 884 return (err); 885 } 886 887 /* ARGSUSED */ 888 static int 889 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 890 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 891 { 892 bge_t *bgep = barg; 893 int err = 0; 894 link_flowctrl_t fl; 895 uint64_t speed; 896 int flags = bgep->chipid.flags; 897 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 898 899 if (pr_valsize == 0) 900 return (EINVAL); 901 bzero(pr_val, pr_valsize); 902 903 *perm = MAC_PROP_PERM_RW; 904 905 mutex_enter(bgep->genlock); 906 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 907 bge_param_locked(pr_num)) || 908 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 909 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 910 (pr_num == MAC_PROP_EN_100HDX_CAP) || 911 (pr_num == MAC_PROP_EN_10FDX_CAP) || 912 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 913 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 914 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 915 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 916 *perm = MAC_PROP_PERM_READ; 917 mutex_exit(bgep->genlock); 918 919 switch (pr_num) { 920 case MAC_PROP_DUPLEX: 921 *perm = MAC_PROP_PERM_READ; 922 if (pr_valsize < sizeof (link_duplex_t)) 923 return (EINVAL); 924 bcopy(&bgep->param_link_duplex, pr_val, 925 sizeof (link_duplex_t)); 926 break; 927 case MAC_PROP_SPEED: 928 *perm = MAC_PROP_PERM_READ; 929 if (pr_valsize < sizeof (speed)) 930 return (EINVAL); 931 speed = bgep->param_link_speed * 1000000ull; 932 bcopy(&speed, pr_val, sizeof (speed)); 933 break; 934 case MAC_PROP_STATUS: 935 *perm = MAC_PROP_PERM_READ; 936 if (pr_valsize < sizeof (link_state_t)) 937 return (EINVAL); 938 bcopy(&bgep->link_state, pr_val, 939 sizeof (link_state_t)); 940 break; 941 case MAC_PROP_AUTONEG: 942 if (is_default) 943 *(uint8_t *)pr_val = 1; 944 else 945 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 946 break; 947 case MAC_PROP_FLOWCTRL: 948 if (pr_valsize < sizeof (fl)) 949 return (EINVAL); 950 if (is_default) { 951 fl = LINK_FLOWCTRL_BI; 952 bcopy(&fl, pr_val, sizeof (fl)); 953 break; 954 } 955 956 if (bgep->param_link_rx_pause && 957 !bgep->param_link_tx_pause) 958 fl = LINK_FLOWCTRL_RX; 959 960 if (!bgep->param_link_rx_pause && 961 !bgep->param_link_tx_pause) 962 fl = LINK_FLOWCTRL_NONE; 963 964 if (!bgep->param_link_rx_pause && 965 bgep->param_link_tx_pause) 966 fl = LINK_FLOWCTRL_TX; 967 968 if (bgep->param_link_rx_pause && 969 bgep->param_link_tx_pause) 970 fl = LINK_FLOWCTRL_BI; 971 bcopy(&fl, pr_val, sizeof (fl)); 972 break; 973 case MAC_PROP_ADV_1000FDX_CAP: 974 *perm = MAC_PROP_PERM_READ; 975 if (is_default) { 976 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 977 *(uint8_t *)pr_val = 0; 978 else 979 *(uint8_t *)pr_val = 1; 980 } 981 else 982 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 983 break; 984 case MAC_PROP_EN_1000FDX_CAP: 985 if (is_default) { 986 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 987 *(uint8_t *)pr_val = 0; 988 else 989 *(uint8_t *)pr_val = 1; 990 } 991 else 992 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 993 break; 994 case MAC_PROP_ADV_1000HDX_CAP: 995 *perm = MAC_PROP_PERM_READ; 996 if (is_default) { 997 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 998 *(uint8_t *)pr_val = 0; 999 else 1000 *(uint8_t *)pr_val = 1; 1001 } 1002 else 1003 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1004 break; 1005 case MAC_PROP_EN_1000HDX_CAP: 1006 if (is_default) { 1007 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1008 *(uint8_t *)pr_val = 0; 1009 else 1010 *(uint8_t *)pr_val = 1; 1011 } 1012 else 1013 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1014 break; 1015 case MAC_PROP_ADV_100FDX_CAP: 1016 *perm = MAC_PROP_PERM_READ; 1017 if (is_default) { 1018 *(uint8_t *)pr_val = 1019 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1020 } else { 1021 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1022 } 1023 break; 1024 case MAC_PROP_EN_100FDX_CAP: 1025 if (is_default) { 1026 *(uint8_t *)pr_val = 1027 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1028 } else { 1029 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1030 } 1031 break; 1032 case MAC_PROP_ADV_100HDX_CAP: 1033 *perm = MAC_PROP_PERM_READ; 1034 if (is_default) { 1035 *(uint8_t *)pr_val = 1036 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1037 } else { 1038 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1039 } 1040 break; 1041 case MAC_PROP_EN_100HDX_CAP: 1042 if (is_default) { 1043 *(uint8_t *)pr_val = 1044 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1045 } else { 1046 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1047 } 1048 break; 1049 case MAC_PROP_ADV_10FDX_CAP: 1050 *perm = MAC_PROP_PERM_READ; 1051 if (is_default) { 1052 *(uint8_t *)pr_val = 1053 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1054 } else { 1055 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1056 } 1057 break; 1058 case MAC_PROP_EN_10FDX_CAP: 1059 if (is_default) { 1060 *(uint8_t *)pr_val = 1061 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1062 } else { 1063 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1064 } 1065 break; 1066 case MAC_PROP_ADV_10HDX_CAP: 1067 *perm = MAC_PROP_PERM_READ; 1068 if (is_default) { 1069 *(uint8_t *)pr_val = 1070 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1071 } else { 1072 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1073 } 1074 break; 1075 case MAC_PROP_EN_10HDX_CAP: 1076 if (is_default) { 1077 *(uint8_t *)pr_val = 1078 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1079 } else { 1080 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1081 } 1082 break; 1083 case MAC_PROP_ADV_100T4_CAP: 1084 case MAC_PROP_EN_100T4_CAP: 1085 *perm = MAC_PROP_PERM_READ; 1086 *(uint8_t *)pr_val = 0; 1087 break; 1088 case MAC_PROP_PRIVATE: 1089 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1090 pr_valsize, pr_val); 1091 return (err); 1092 default: 1093 return (ENOTSUP); 1094 } 1095 return (0); 1096 } 1097 1098 /* ARGSUSED */ 1099 static int 1100 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1101 const void *pr_val) 1102 { 1103 int err = 0; 1104 long result; 1105 1106 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1107 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1108 if (result > 1 || result < 0) { 1109 err = EINVAL; 1110 } else { 1111 bgep->param_adv_pause = (uint32_t)result; 1112 if (bge_reprogram(bgep) == IOC_INVAL) 1113 err = EINVAL; 1114 } 1115 return (err); 1116 } 1117 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1118 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1119 if (result > 1 || result < 0) { 1120 err = EINVAL; 1121 } else { 1122 bgep->param_adv_asym_pause = (uint32_t)result; 1123 if (bge_reprogram(bgep) == IOC_INVAL) 1124 err = EINVAL; 1125 } 1126 return (err); 1127 } 1128 if (strcmp(pr_name, "_drain_max") == 0) { 1129 1130 /* 1131 * on the Tx side, we need to update the h/w register for 1132 * real packet transmission per packet. The drain_max parameter 1133 * is used to reduce the register access. This parameter 1134 * controls the max number of packets that we will hold before 1135 * updating the bge h/w to trigger h/w transmit. The bge 1136 * chipset usually has a max of 512 Tx descriptors, thus 1137 * the upper bound on drain_max is 512. 1138 */ 1139 if (pr_val == NULL) { 1140 err = EINVAL; 1141 return (err); 1142 } 1143 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1144 if (result > 512 || result < 1) 1145 err = EINVAL; 1146 else { 1147 bgep->param_drain_max = (uint32_t)result; 1148 if (bge_reprogram(bgep) == IOC_INVAL) 1149 err = EINVAL; 1150 } 1151 return (err); 1152 } 1153 if (strcmp(pr_name, "_msi_cnt") == 0) { 1154 1155 if (pr_val == NULL) { 1156 err = EINVAL; 1157 return (err); 1158 } 1159 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1160 if (result > 7 || result < 0) 1161 err = EINVAL; 1162 else { 1163 bgep->param_msi_cnt = (uint32_t)result; 1164 if (bge_reprogram(bgep) == IOC_INVAL) 1165 err = EINVAL; 1166 } 1167 return (err); 1168 } 1169 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1170 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1171 return (EINVAL); 1172 1173 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1174 return (0); 1175 } 1176 1177 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1178 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1179 return (EINVAL); 1180 1181 bgep->chipid.rx_count_norm = (uint32_t)result; 1182 return (0); 1183 } 1184 return (ENOTSUP); 1185 } 1186 1187 static int 1188 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1189 uint_t pr_valsize, void *pr_val) 1190 { 1191 int err = ENOTSUP; 1192 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1193 int value; 1194 1195 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1196 value = (is_default? 1 : bge->param_adv_pause); 1197 err = 0; 1198 goto done; 1199 } 1200 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1201 value = (is_default? 1 : bge->param_adv_asym_pause); 1202 err = 0; 1203 goto done; 1204 } 1205 if (strcmp(pr_name, "_drain_max") == 0) { 1206 value = (is_default? 64 : bge->param_drain_max); 1207 err = 0; 1208 goto done; 1209 } 1210 if (strcmp(pr_name, "_msi_cnt") == 0) { 1211 value = (is_default? 0 : bge->param_msi_cnt); 1212 err = 0; 1213 goto done; 1214 } 1215 1216 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1217 value = (is_default? bge_rx_ticks_norm : 1218 bge->chipid.rx_ticks_norm); 1219 err = 0; 1220 goto done; 1221 } 1222 1223 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1224 value = (is_default? bge_rx_count_norm : 1225 bge->chipid.rx_count_norm); 1226 err = 0; 1227 goto done; 1228 } 1229 1230 done: 1231 if (err == 0) { 1232 (void) snprintf(pr_val, pr_valsize, "%d", value); 1233 } 1234 return (err); 1235 } 1236 1237 /* 1238 * Compute the index of the required bit in the multicast hash map. 1239 * This must mirror the way the hardware actually does it! 1240 * See Broadcom document 570X-PG102-R page 125. 1241 */ 1242 static uint32_t 1243 bge_hash_index(const uint8_t *mca) 1244 { 1245 uint32_t hash; 1246 1247 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1248 1249 return (hash); 1250 } 1251 1252 /* 1253 * bge_m_multicst_add() -- enable/disable a multicast address 1254 */ 1255 static int 1256 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1257 { 1258 bge_t *bgep = arg; /* private device info */ 1259 uint32_t hash; 1260 uint32_t index; 1261 uint32_t word; 1262 uint32_t bit; 1263 uint8_t *refp; 1264 1265 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1266 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1267 1268 /* 1269 * Precalculate all required masks, pointers etc ... 1270 */ 1271 hash = bge_hash_index(mca); 1272 index = hash % BGE_HASH_TABLE_SIZE; 1273 word = index/32u; 1274 bit = 1 << (index % 32u); 1275 refp = &bgep->mcast_refs[index]; 1276 1277 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1278 hash, index, word, bit, *refp)); 1279 1280 /* 1281 * We must set the appropriate bit in the hash map (and the 1282 * corresponding h/w register) when the refcount goes from 0 1283 * to >0, and clear it when the last ref goes away (refcount 1284 * goes from >0 back to 0). If we change the hash map, we 1285 * must also update the chip's hardware map registers. 1286 */ 1287 mutex_enter(bgep->genlock); 1288 if (!(bgep->progress & PROGRESS_INTR)) { 1289 /* can happen during autorecovery */ 1290 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1291 mutex_exit(bgep->genlock); 1292 return (EIO); 1293 } 1294 if (add) { 1295 if ((*refp)++ == 0) { 1296 bgep->mcast_hash[word] |= bit; 1297 #ifdef BGE_IPMI_ASF 1298 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1299 #else 1300 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1301 #endif 1302 (void) bge_check_acc_handle(bgep, 1303 bgep->cfg_handle); 1304 (void) bge_check_acc_handle(bgep, 1305 bgep->io_handle); 1306 ddi_fm_service_impact(bgep->devinfo, 1307 DDI_SERVICE_DEGRADED); 1308 mutex_exit(bgep->genlock); 1309 return (EIO); 1310 } 1311 } 1312 } else { 1313 if (--(*refp) == 0) { 1314 bgep->mcast_hash[word] &= ~bit; 1315 #ifdef BGE_IPMI_ASF 1316 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1317 #else 1318 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1319 #endif 1320 (void) bge_check_acc_handle(bgep, 1321 bgep->cfg_handle); 1322 (void) bge_check_acc_handle(bgep, 1323 bgep->io_handle); 1324 ddi_fm_service_impact(bgep->devinfo, 1325 DDI_SERVICE_DEGRADED); 1326 mutex_exit(bgep->genlock); 1327 return (EIO); 1328 } 1329 } 1330 } 1331 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1332 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1333 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1334 mutex_exit(bgep->genlock); 1335 return (EIO); 1336 } 1337 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1338 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1339 mutex_exit(bgep->genlock); 1340 return (EIO); 1341 } 1342 mutex_exit(bgep->genlock); 1343 1344 return (0); 1345 } 1346 1347 /* 1348 * bge_m_promisc() -- set or reset promiscuous mode on the board 1349 * 1350 * Program the hardware to enable/disable promiscuous and/or 1351 * receive-all-multicast modes. 1352 */ 1353 static int 1354 bge_m_promisc(void *arg, boolean_t on) 1355 { 1356 bge_t *bgep = arg; 1357 1358 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1359 1360 /* 1361 * Store MAC layer specified mode and pass to chip layer to update h/w 1362 */ 1363 mutex_enter(bgep->genlock); 1364 if (!(bgep->progress & PROGRESS_INTR)) { 1365 /* can happen during autorecovery */ 1366 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1367 mutex_exit(bgep->genlock); 1368 return (EIO); 1369 } 1370 bgep->promisc = on; 1371 #ifdef BGE_IPMI_ASF 1372 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1373 #else 1374 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1375 #endif 1376 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1377 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1378 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1379 mutex_exit(bgep->genlock); 1380 return (EIO); 1381 } 1382 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1383 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1384 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1385 mutex_exit(bgep->genlock); 1386 return (EIO); 1387 } 1388 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1389 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1390 mutex_exit(bgep->genlock); 1391 return (EIO); 1392 } 1393 mutex_exit(bgep->genlock); 1394 return (0); 1395 } 1396 1397 /* 1398 * Find the slot for the specified unicast address 1399 */ 1400 int 1401 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1402 { 1403 int slot; 1404 1405 ASSERT(mutex_owned(bgep->genlock)); 1406 1407 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1408 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1409 return (slot); 1410 } 1411 1412 return (-1); 1413 } 1414 1415 /* 1416 * Programs the classifier to start steering packets matching 'mac_addr' to the 1417 * specified ring 'arg'. 1418 */ 1419 static int 1420 bge_addmac(void *arg, const uint8_t *mac_addr) 1421 { 1422 recv_ring_t *rrp = (recv_ring_t *)arg; 1423 bge_t *bgep = rrp->bgep; 1424 bge_recv_rule_t *rulep = bgep->recv_rules; 1425 bge_rule_info_t *rinfop = NULL; 1426 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1427 int i; 1428 uint16_t tmp16; 1429 uint32_t tmp32; 1430 int slot; 1431 int err; 1432 1433 mutex_enter(bgep->genlock); 1434 if (bgep->unicst_addr_avail == 0) { 1435 mutex_exit(bgep->genlock); 1436 return (ENOSPC); 1437 } 1438 1439 /* 1440 * First add the unicast address to a available slot. 1441 */ 1442 slot = bge_unicst_find(bgep, mac_addr); 1443 ASSERT(slot == -1); 1444 1445 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1446 if (!bgep->curr_addr[slot].set) { 1447 bgep->curr_addr[slot].set = B_TRUE; 1448 break; 1449 } 1450 } 1451 1452 ASSERT(slot < bgep->unicst_addr_total); 1453 bgep->unicst_addr_avail--; 1454 mutex_exit(bgep->genlock); 1455 1456 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1457 goto fail; 1458 1459 /* A rule is already here. Deny this. */ 1460 if (rrp->mac_addr_rule != NULL) { 1461 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1462 goto fail; 1463 } 1464 1465 /* 1466 * Allocate a bge_rule_info_t to keep track of which rule slots 1467 * are being used. 1468 */ 1469 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1470 if (rinfop == NULL) { 1471 err = ENOMEM; 1472 goto fail; 1473 } 1474 1475 /* 1476 * Look for the starting slot to place the rules. 1477 * The two slots we reserve must be contiguous. 1478 */ 1479 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1480 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1481 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1482 break; 1483 1484 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1485 1486 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1487 rulep[i].mask_value = ntohl(tmp32); 1488 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1489 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1490 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1491 1492 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1493 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1494 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1495 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1496 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1497 rinfop->start = i; 1498 rinfop->count = 2; 1499 1500 rrp->mac_addr_rule = rinfop; 1501 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1502 1503 return (0); 1504 1505 fail: 1506 /* Clear the address just set */ 1507 (void) bge_unicst_set(bgep, zero_addr, slot); 1508 mutex_enter(bgep->genlock); 1509 bgep->curr_addr[slot].set = B_FALSE; 1510 bgep->unicst_addr_avail++; 1511 mutex_exit(bgep->genlock); 1512 1513 return (err); 1514 } 1515 1516 /* 1517 * Stop classifying packets matching the MAC address to the specified ring. 1518 */ 1519 static int 1520 bge_remmac(void *arg, const uint8_t *mac_addr) 1521 { 1522 recv_ring_t *rrp = (recv_ring_t *)arg; 1523 bge_t *bgep = rrp->bgep; 1524 bge_recv_rule_t *rulep = bgep->recv_rules; 1525 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1526 int start; 1527 int slot; 1528 int err; 1529 1530 /* 1531 * Remove the MAC address from its slot. 1532 */ 1533 mutex_enter(bgep->genlock); 1534 slot = bge_unicst_find(bgep, mac_addr); 1535 if (slot == -1) { 1536 mutex_exit(bgep->genlock); 1537 return (EINVAL); 1538 } 1539 1540 ASSERT(bgep->curr_addr[slot].set); 1541 mutex_exit(bgep->genlock); 1542 1543 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1544 return (err); 1545 1546 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1547 return (EINVAL); 1548 1549 start = rinfop->start; 1550 rulep[start].mask_value = 0; 1551 rulep[start].control = 0; 1552 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1553 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1554 start++; 1555 rulep[start].mask_value = 0; 1556 rulep[start].control = 0; 1557 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1558 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1559 1560 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1561 rrp->mac_addr_rule = NULL; 1562 bzero(rrp->mac_addr_val, ETHERADDRL); 1563 1564 mutex_enter(bgep->genlock); 1565 bgep->curr_addr[slot].set = B_FALSE; 1566 bgep->unicst_addr_avail++; 1567 mutex_exit(bgep->genlock); 1568 1569 return (0); 1570 } 1571 1572 static int 1573 bge_flag_intr_enable(mac_intr_handle_t ih) 1574 { 1575 recv_ring_t *rrp = (recv_ring_t *)ih; 1576 bge_t *bgep = rrp->bgep; 1577 1578 mutex_enter(bgep->genlock); 1579 rrp->poll_flag = 0; 1580 mutex_exit(bgep->genlock); 1581 1582 return (0); 1583 } 1584 1585 static int 1586 bge_flag_intr_disable(mac_intr_handle_t ih) 1587 { 1588 recv_ring_t *rrp = (recv_ring_t *)ih; 1589 bge_t *bgep = rrp->bgep; 1590 1591 mutex_enter(bgep->genlock); 1592 rrp->poll_flag = 1; 1593 mutex_exit(bgep->genlock); 1594 1595 return (0); 1596 } 1597 1598 static int 1599 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1600 { 1601 recv_ring_t *rx_ring; 1602 1603 rx_ring = (recv_ring_t *)rh; 1604 mutex_enter(rx_ring->rx_lock); 1605 rx_ring->ring_gen_num = mr_gen_num; 1606 mutex_exit(rx_ring->rx_lock); 1607 return (0); 1608 } 1609 1610 1611 /* 1612 * Callback funtion for MAC layer to register all rings 1613 * for given ring_group, noted by rg_index. 1614 */ 1615 void 1616 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1617 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1618 { 1619 bge_t *bgep = arg; 1620 mac_intr_t *mintr; 1621 1622 switch (rtype) { 1623 case MAC_RING_TYPE_RX: { 1624 recv_ring_t *rx_ring; 1625 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1626 MAC_ADDRESS_REGS_MAX) && index == 0); 1627 1628 rx_ring = &bgep->recv[rg_index]; 1629 rx_ring->ring_handle = rh; 1630 1631 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1632 infop->mri_start = bge_ring_start; 1633 infop->mri_stop = NULL; 1634 infop->mri_poll = bge_poll_ring; 1635 1636 mintr = &infop->mri_intr; 1637 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1638 mintr->mi_enable = bge_flag_intr_enable; 1639 mintr->mi_disable = bge_flag_intr_disable; 1640 1641 break; 1642 } 1643 case MAC_RING_TYPE_TX: 1644 default: 1645 ASSERT(0); 1646 break; 1647 } 1648 } 1649 1650 /* 1651 * Fill infop passed as argument 1652 * fill in respective ring_group info 1653 * Each group has a single ring in it. We keep it simple 1654 * and use the same internal handle for rings and groups. 1655 */ 1656 void 1657 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1658 mac_group_info_t *infop, mac_group_handle_t gh) 1659 { 1660 bge_t *bgep = arg; 1661 1662 switch (rtype) { 1663 case MAC_RING_TYPE_RX: { 1664 recv_ring_t *rx_ring; 1665 1666 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1667 MAC_ADDRESS_REGS_MAX)); 1668 rx_ring = &bgep->recv[rg_index]; 1669 rx_ring->ring_group_handle = gh; 1670 1671 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1672 infop->mgi_start = NULL; 1673 infop->mgi_stop = NULL; 1674 infop->mgi_addmac = bge_addmac; 1675 infop->mgi_remmac = bge_remmac; 1676 infop->mgi_count = 1; 1677 break; 1678 } 1679 case MAC_RING_TYPE_TX: 1680 default: 1681 ASSERT(0); 1682 break; 1683 } 1684 } 1685 1686 /*ARGSUSED*/ 1687 static boolean_t 1688 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1689 { 1690 bge_t *bgep = arg; 1691 1692 switch (cap) { 1693 case MAC_CAPAB_HCKSUM: { 1694 uint32_t *txflags = cap_data; 1695 1696 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1697 break; 1698 } 1699 case MAC_CAPAB_RINGS: { 1700 mac_capab_rings_t *cap_rings = cap_data; 1701 1702 /* Temporarily disable multiple tx rings. */ 1703 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1704 return (B_FALSE); 1705 1706 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1707 cap_rings->mr_rnum = cap_rings->mr_gnum = 1708 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1709 cap_rings->mr_rget = bge_fill_ring; 1710 cap_rings->mr_gget = bge_fill_group; 1711 break; 1712 } 1713 default: 1714 return (B_FALSE); 1715 } 1716 return (B_TRUE); 1717 } 1718 1719 /* 1720 * Loopback ioctl code 1721 */ 1722 1723 static lb_property_t loopmodes[] = { 1724 { normal, "normal", BGE_LOOP_NONE }, 1725 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1726 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1727 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1728 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1729 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1730 }; 1731 1732 static enum ioc_reply 1733 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1734 { 1735 /* 1736 * If the mode isn't being changed, there's nothing to do ... 1737 */ 1738 if (mode == bgep->param_loop_mode) 1739 return (IOC_ACK); 1740 1741 /* 1742 * Validate the requested mode and prepare a suitable message 1743 * to explain the link down/up cycle that the change will 1744 * probably induce ... 1745 */ 1746 switch (mode) { 1747 default: 1748 return (IOC_INVAL); 1749 1750 case BGE_LOOP_NONE: 1751 case BGE_LOOP_EXTERNAL_1000: 1752 case BGE_LOOP_EXTERNAL_100: 1753 case BGE_LOOP_EXTERNAL_10: 1754 case BGE_LOOP_INTERNAL_PHY: 1755 case BGE_LOOP_INTERNAL_MAC: 1756 break; 1757 } 1758 1759 /* 1760 * All OK; tell the caller to reprogram 1761 * the PHY and/or MAC for the new mode ... 1762 */ 1763 bgep->param_loop_mode = mode; 1764 return (IOC_RESTART_ACK); 1765 } 1766 1767 static enum ioc_reply 1768 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1769 { 1770 lb_info_sz_t *lbsp; 1771 lb_property_t *lbpp; 1772 uint32_t *lbmp; 1773 int cmd; 1774 1775 _NOTE(ARGUNUSED(wq)) 1776 1777 /* 1778 * Validate format of ioctl 1779 */ 1780 if (mp->b_cont == NULL) 1781 return (IOC_INVAL); 1782 1783 cmd = iocp->ioc_cmd; 1784 switch (cmd) { 1785 default: 1786 /* NOTREACHED */ 1787 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1788 return (IOC_INVAL); 1789 1790 case LB_GET_INFO_SIZE: 1791 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1792 return (IOC_INVAL); 1793 lbsp = (void *)mp->b_cont->b_rptr; 1794 *lbsp = sizeof (loopmodes); 1795 return (IOC_REPLY); 1796 1797 case LB_GET_INFO: 1798 if (iocp->ioc_count != sizeof (loopmodes)) 1799 return (IOC_INVAL); 1800 lbpp = (void *)mp->b_cont->b_rptr; 1801 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1802 return (IOC_REPLY); 1803 1804 case LB_GET_MODE: 1805 if (iocp->ioc_count != sizeof (uint32_t)) 1806 return (IOC_INVAL); 1807 lbmp = (void *)mp->b_cont->b_rptr; 1808 *lbmp = bgep->param_loop_mode; 1809 return (IOC_REPLY); 1810 1811 case LB_SET_MODE: 1812 if (iocp->ioc_count != sizeof (uint32_t)) 1813 return (IOC_INVAL); 1814 lbmp = (void *)mp->b_cont->b_rptr; 1815 return (bge_set_loop_mode(bgep, *lbmp)); 1816 } 1817 } 1818 1819 /* 1820 * Specific bge IOCTLs, the gld module handles the generic ones. 1821 */ 1822 static void 1823 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1824 { 1825 bge_t *bgep = arg; 1826 struct iocblk *iocp; 1827 enum ioc_reply status; 1828 boolean_t need_privilege; 1829 int err; 1830 int cmd; 1831 1832 /* 1833 * Validate the command before bothering with the mutex ... 1834 */ 1835 iocp = (void *)mp->b_rptr; 1836 iocp->ioc_error = 0; 1837 need_privilege = B_TRUE; 1838 cmd = iocp->ioc_cmd; 1839 switch (cmd) { 1840 default: 1841 miocnak(wq, mp, 0, EINVAL); 1842 return; 1843 1844 case BGE_MII_READ: 1845 case BGE_MII_WRITE: 1846 case BGE_SEE_READ: 1847 case BGE_SEE_WRITE: 1848 case BGE_FLASH_READ: 1849 case BGE_FLASH_WRITE: 1850 case BGE_DIAG: 1851 case BGE_PEEK: 1852 case BGE_POKE: 1853 case BGE_PHY_RESET: 1854 case BGE_SOFT_RESET: 1855 case BGE_HARD_RESET: 1856 break; 1857 1858 case LB_GET_INFO_SIZE: 1859 case LB_GET_INFO: 1860 case LB_GET_MODE: 1861 need_privilege = B_FALSE; 1862 /* FALLTHRU */ 1863 case LB_SET_MODE: 1864 break; 1865 1866 } 1867 1868 if (need_privilege) { 1869 /* 1870 * Check for specific net_config privilege on Solaris 10+. 1871 */ 1872 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1873 if (err != 0) { 1874 miocnak(wq, mp, 0, err); 1875 return; 1876 } 1877 } 1878 1879 mutex_enter(bgep->genlock); 1880 if (!(bgep->progress & PROGRESS_INTR)) { 1881 /* can happen during autorecovery */ 1882 mutex_exit(bgep->genlock); 1883 miocnak(wq, mp, 0, EIO); 1884 return; 1885 } 1886 1887 switch (cmd) { 1888 default: 1889 _NOTE(NOTREACHED) 1890 status = IOC_INVAL; 1891 break; 1892 1893 case BGE_MII_READ: 1894 case BGE_MII_WRITE: 1895 case BGE_SEE_READ: 1896 case BGE_SEE_WRITE: 1897 case BGE_FLASH_READ: 1898 case BGE_FLASH_WRITE: 1899 case BGE_DIAG: 1900 case BGE_PEEK: 1901 case BGE_POKE: 1902 case BGE_PHY_RESET: 1903 case BGE_SOFT_RESET: 1904 case BGE_HARD_RESET: 1905 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1906 break; 1907 1908 case LB_GET_INFO_SIZE: 1909 case LB_GET_INFO: 1910 case LB_GET_MODE: 1911 case LB_SET_MODE: 1912 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1913 break; 1914 1915 } 1916 1917 /* 1918 * Do we need to reprogram the PHY and/or the MAC? 1919 * Do it now, while we still have the mutex. 1920 * 1921 * Note: update the PHY first, 'cos it controls the 1922 * speed/duplex parameters that the MAC code uses. 1923 */ 1924 switch (status) { 1925 case IOC_RESTART_REPLY: 1926 case IOC_RESTART_ACK: 1927 if (bge_reprogram(bgep) == IOC_INVAL) 1928 status = IOC_INVAL; 1929 break; 1930 } 1931 1932 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1933 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1934 status = IOC_INVAL; 1935 } 1936 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1937 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1938 status = IOC_INVAL; 1939 } 1940 mutex_exit(bgep->genlock); 1941 1942 /* 1943 * Finally, decide how to reply 1944 */ 1945 switch (status) { 1946 default: 1947 case IOC_INVAL: 1948 /* 1949 * Error, reply with a NAK and EINVAL or the specified error 1950 */ 1951 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1952 EINVAL : iocp->ioc_error); 1953 break; 1954 1955 case IOC_DONE: 1956 /* 1957 * OK, reply already sent 1958 */ 1959 break; 1960 1961 case IOC_RESTART_ACK: 1962 case IOC_ACK: 1963 /* 1964 * OK, reply with an ACK 1965 */ 1966 miocack(wq, mp, 0, 0); 1967 break; 1968 1969 case IOC_RESTART_REPLY: 1970 case IOC_REPLY: 1971 /* 1972 * OK, send prepared reply as ACK or NAK 1973 */ 1974 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1975 M_IOCACK : M_IOCNAK; 1976 qreply(wq, mp); 1977 break; 1978 } 1979 } 1980 1981 /* 1982 * ========== Per-instance setup/teardown code ========== 1983 */ 1984 1985 #undef BGE_DBG 1986 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1987 /* 1988 * Allocate an area of memory and a DMA handle for accessing it 1989 */ 1990 static int 1991 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 1992 uint_t dma_flags, dma_area_t *dma_p) 1993 { 1994 caddr_t va; 1995 int err; 1996 1997 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 1998 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 1999 2000 /* 2001 * Allocate handle 2002 */ 2003 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2004 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2005 if (err != DDI_SUCCESS) 2006 return (DDI_FAILURE); 2007 2008 /* 2009 * Allocate memory 2010 */ 2011 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2012 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2013 &dma_p->acc_hdl); 2014 if (err != DDI_SUCCESS) 2015 return (DDI_FAILURE); 2016 2017 /* 2018 * Bind the two together 2019 */ 2020 dma_p->mem_va = va; 2021 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2022 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2023 &dma_p->cookie, &dma_p->ncookies); 2024 2025 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2026 dma_p->alength, err, dma_p->ncookies)); 2027 2028 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2029 return (DDI_FAILURE); 2030 2031 dma_p->nslots = ~0U; 2032 dma_p->size = ~0U; 2033 dma_p->token = ~0U; 2034 dma_p->offset = 0; 2035 return (DDI_SUCCESS); 2036 } 2037 2038 /* 2039 * Free one allocated area of DMAable memory 2040 */ 2041 static void 2042 bge_free_dma_mem(dma_area_t *dma_p) 2043 { 2044 if (dma_p->dma_hdl != NULL) { 2045 if (dma_p->ncookies) { 2046 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2047 dma_p->ncookies = 0; 2048 } 2049 ddi_dma_free_handle(&dma_p->dma_hdl); 2050 dma_p->dma_hdl = NULL; 2051 } 2052 2053 if (dma_p->acc_hdl != NULL) { 2054 ddi_dma_mem_free(&dma_p->acc_hdl); 2055 dma_p->acc_hdl = NULL; 2056 } 2057 } 2058 /* 2059 * Utility routine to carve a slice off a chunk of allocated memory, 2060 * updating the chunk descriptor accordingly. The size of the slice 2061 * is given by the product of the <qty> and <size> parameters. 2062 */ 2063 static void 2064 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2065 uint32_t qty, uint32_t size) 2066 { 2067 static uint32_t sequence = 0xbcd5704a; 2068 size_t totsize; 2069 2070 totsize = qty*size; 2071 ASSERT(totsize <= chunk->alength); 2072 2073 *slice = *chunk; 2074 slice->nslots = qty; 2075 slice->size = size; 2076 slice->alength = totsize; 2077 slice->token = ++sequence; 2078 2079 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2080 chunk->alength -= totsize; 2081 chunk->offset += totsize; 2082 chunk->cookie.dmac_laddress += totsize; 2083 chunk->cookie.dmac_size -= totsize; 2084 } 2085 2086 /* 2087 * Initialise the specified Receive Producer (Buffer) Ring, using 2088 * the information in the <dma_area> descriptors that it contains 2089 * to set up all the other fields. This routine should be called 2090 * only once for each ring. 2091 */ 2092 static void 2093 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2094 { 2095 buff_ring_t *brp; 2096 bge_status_t *bsp; 2097 sw_rbd_t *srbdp; 2098 dma_area_t pbuf; 2099 uint32_t bufsize; 2100 uint32_t nslots; 2101 uint32_t slot; 2102 uint32_t split; 2103 2104 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2105 NIC_MEM_SHADOW_BUFF_STD, 2106 NIC_MEM_SHADOW_BUFF_JUMBO, 2107 NIC_MEM_SHADOW_BUFF_MINI 2108 }; 2109 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2110 RECV_STD_PROD_INDEX_REG, 2111 RECV_JUMBO_PROD_INDEX_REG, 2112 RECV_MINI_PROD_INDEX_REG 2113 }; 2114 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2115 STATUS_STD_BUFF_CONS_INDEX, 2116 STATUS_JUMBO_BUFF_CONS_INDEX, 2117 STATUS_MINI_BUFF_CONS_INDEX 2118 }; 2119 2120 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2121 (void *)bgep, ring)); 2122 2123 brp = &bgep->buff[ring]; 2124 nslots = brp->desc.nslots; 2125 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2126 bufsize = brp->buf[0].size; 2127 2128 /* 2129 * Set up the copy of the h/w RCB 2130 * 2131 * Note: unlike Send & Receive Return Rings, (where the max_len 2132 * field holds the number of slots), in a Receive Buffer Ring 2133 * this field indicates the size of each buffer in the ring. 2134 */ 2135 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2136 brp->hw_rcb.max_len = (uint16_t)bufsize; 2137 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2138 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2139 2140 /* 2141 * Other one-off initialisation of per-ring data 2142 */ 2143 brp->bgep = bgep; 2144 bsp = DMA_VPTR(bgep->status_block); 2145 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2146 brp->chip_mbx_reg = mailbox_regs[ring]; 2147 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2148 DDI_INTR_PRI(bgep->intr_pri)); 2149 2150 /* 2151 * Allocate the array of s/w Receive Buffer Descriptors 2152 */ 2153 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2154 brp->sw_rbds = srbdp; 2155 2156 /* 2157 * Now initialise each array element once and for all 2158 */ 2159 for (split = 0; split < BGE_SPLIT; ++split) { 2160 pbuf = brp->buf[split]; 2161 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2162 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2163 ASSERT(pbuf.alength == 0); 2164 } 2165 } 2166 2167 /* 2168 * Clean up initialisation done above before the memory is freed 2169 */ 2170 static void 2171 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2172 { 2173 buff_ring_t *brp; 2174 sw_rbd_t *srbdp; 2175 2176 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2177 (void *)bgep, ring)); 2178 2179 brp = &bgep->buff[ring]; 2180 srbdp = brp->sw_rbds; 2181 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2182 2183 mutex_destroy(brp->rf_lock); 2184 } 2185 2186 /* 2187 * Initialise the specified Receive (Return) Ring, using the 2188 * information in the <dma_area> descriptors that it contains 2189 * to set up all the other fields. This routine should be called 2190 * only once for each ring. 2191 */ 2192 static void 2193 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2194 { 2195 recv_ring_t *rrp; 2196 bge_status_t *bsp; 2197 uint32_t nslots; 2198 2199 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2200 (void *)bgep, ring)); 2201 2202 /* 2203 * The chip architecture requires that receive return rings have 2204 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2205 */ 2206 rrp = &bgep->recv[ring]; 2207 nslots = rrp->desc.nslots; 2208 ASSERT(nslots == 0 || nslots == 512 || 2209 nslots == 1024 || nslots == 2048); 2210 2211 /* 2212 * Set up the copy of the h/w RCB 2213 */ 2214 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2215 rrp->hw_rcb.max_len = (uint16_t)nslots; 2216 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2217 rrp->hw_rcb.nic_ring_addr = 0; 2218 2219 /* 2220 * Other one-off initialisation of per-ring data 2221 */ 2222 rrp->bgep = bgep; 2223 bsp = DMA_VPTR(bgep->status_block); 2224 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2225 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2226 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2227 DDI_INTR_PRI(bgep->intr_pri)); 2228 } 2229 2230 2231 /* 2232 * Clean up initialisation done above before the memory is freed 2233 */ 2234 static void 2235 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2236 { 2237 recv_ring_t *rrp; 2238 2239 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2240 (void *)bgep, ring)); 2241 2242 rrp = &bgep->recv[ring]; 2243 if (rrp->rx_softint) 2244 ddi_remove_softintr(rrp->rx_softint); 2245 mutex_destroy(rrp->rx_lock); 2246 } 2247 2248 /* 2249 * Initialise the specified Send Ring, using the information in the 2250 * <dma_area> descriptors that it contains to set up all the other 2251 * fields. This routine should be called only once for each ring. 2252 */ 2253 static void 2254 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2255 { 2256 send_ring_t *srp; 2257 bge_status_t *bsp; 2258 sw_sbd_t *ssbdp; 2259 dma_area_t desc; 2260 dma_area_t pbuf; 2261 uint32_t nslots; 2262 uint32_t slot; 2263 uint32_t split; 2264 sw_txbuf_t *txbuf; 2265 2266 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2267 (void *)bgep, ring)); 2268 2269 /* 2270 * The chip architecture requires that host-based send rings 2271 * have 512 elements per ring. See 570X-PG102-R page 56. 2272 */ 2273 srp = &bgep->send[ring]; 2274 nslots = srp->desc.nslots; 2275 ASSERT(nslots == 0 || nslots == 512); 2276 2277 /* 2278 * Set up the copy of the h/w RCB 2279 */ 2280 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2281 srp->hw_rcb.max_len = (uint16_t)nslots; 2282 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2283 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2284 2285 /* 2286 * Other one-off initialisation of per-ring data 2287 */ 2288 srp->bgep = bgep; 2289 bsp = DMA_VPTR(bgep->status_block); 2290 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2291 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2292 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2293 DDI_INTR_PRI(bgep->intr_pri)); 2294 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2295 DDI_INTR_PRI(bgep->intr_pri)); 2296 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2297 DDI_INTR_PRI(bgep->intr_pri)); 2298 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2299 DDI_INTR_PRI(bgep->intr_pri)); 2300 if (nslots == 0) 2301 return; 2302 2303 /* 2304 * Allocate the array of s/w Send Buffer Descriptors 2305 */ 2306 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2307 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2308 srp->txbuf_head = 2309 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2310 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2311 srp->sw_sbds = ssbdp; 2312 srp->txbuf = txbuf; 2313 srp->tx_buffers = BGE_SEND_BUF_NUM; 2314 srp->tx_buffers_low = srp->tx_buffers / 4; 2315 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2316 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2317 else 2318 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2319 srp->tx_array = 1; 2320 2321 /* 2322 * Chunk tx desc area 2323 */ 2324 desc = srp->desc; 2325 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2326 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2327 sizeof (bge_sbd_t)); 2328 } 2329 ASSERT(desc.alength == 0); 2330 2331 /* 2332 * Chunk tx buffer area 2333 */ 2334 for (split = 0; split < BGE_SPLIT; ++split) { 2335 pbuf = srp->buf[0][split]; 2336 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2337 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2338 bgep->chipid.snd_buff_size); 2339 txbuf++; 2340 } 2341 ASSERT(pbuf.alength == 0); 2342 } 2343 } 2344 2345 /* 2346 * Clean up initialisation done above before the memory is freed 2347 */ 2348 static void 2349 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2350 { 2351 send_ring_t *srp; 2352 uint32_t array; 2353 uint32_t split; 2354 uint32_t nslots; 2355 2356 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2357 (void *)bgep, ring)); 2358 2359 srp = &bgep->send[ring]; 2360 mutex_destroy(srp->tc_lock); 2361 mutex_destroy(srp->freetxbuf_lock); 2362 mutex_destroy(srp->txbuf_lock); 2363 mutex_destroy(srp->tx_lock); 2364 nslots = srp->desc.nslots; 2365 if (nslots == 0) 2366 return; 2367 2368 for (array = 1; array < srp->tx_array; ++array) 2369 for (split = 0; split < BGE_SPLIT; ++split) 2370 bge_free_dma_mem(&srp->buf[array][split]); 2371 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2372 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2373 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2374 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2375 srp->sw_sbds = NULL; 2376 srp->txbuf_head = NULL; 2377 srp->txbuf = NULL; 2378 srp->pktp = NULL; 2379 } 2380 2381 /* 2382 * Initialise all transmit, receive, and buffer rings. 2383 */ 2384 void 2385 bge_init_rings(bge_t *bgep) 2386 { 2387 uint32_t ring; 2388 2389 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2390 2391 /* 2392 * Perform one-off initialisation of each ring ... 2393 */ 2394 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2395 bge_init_send_ring(bgep, ring); 2396 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2397 bge_init_recv_ring(bgep, ring); 2398 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2399 bge_init_buff_ring(bgep, ring); 2400 } 2401 2402 /* 2403 * Undo the work of bge_init_rings() above before the memory is freed 2404 */ 2405 void 2406 bge_fini_rings(bge_t *bgep) 2407 { 2408 uint32_t ring; 2409 2410 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2411 2412 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2413 bge_fini_buff_ring(bgep, ring); 2414 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2415 bge_fini_recv_ring(bgep, ring); 2416 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2417 bge_fini_send_ring(bgep, ring); 2418 } 2419 2420 /* 2421 * Called from the bge_m_stop() to free the tx buffers which are 2422 * allocated from the tx process. 2423 */ 2424 void 2425 bge_free_txbuf_arrays(send_ring_t *srp) 2426 { 2427 uint32_t array; 2428 uint32_t split; 2429 2430 ASSERT(mutex_owned(srp->tx_lock)); 2431 2432 /* 2433 * Free the extra tx buffer DMA area 2434 */ 2435 for (array = 1; array < srp->tx_array; ++array) 2436 for (split = 0; split < BGE_SPLIT; ++split) 2437 bge_free_dma_mem(&srp->buf[array][split]); 2438 2439 /* 2440 * Restore initial tx buffer numbers 2441 */ 2442 srp->tx_array = 1; 2443 srp->tx_buffers = BGE_SEND_BUF_NUM; 2444 srp->tx_buffers_low = srp->tx_buffers / 4; 2445 srp->tx_flow = 0; 2446 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2447 } 2448 2449 /* 2450 * Called from tx process to allocate more tx buffers 2451 */ 2452 bge_queue_item_t * 2453 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2454 { 2455 bge_queue_t *txbuf_queue; 2456 bge_queue_item_t *txbuf_item_last; 2457 bge_queue_item_t *txbuf_item; 2458 bge_queue_item_t *txbuf_item_rtn; 2459 sw_txbuf_t *txbuf; 2460 dma_area_t area; 2461 size_t txbuffsize; 2462 uint32_t slot; 2463 uint32_t array; 2464 uint32_t split; 2465 uint32_t err; 2466 2467 ASSERT(mutex_owned(srp->tx_lock)); 2468 2469 array = srp->tx_array; 2470 if (array >= srp->tx_array_max) 2471 return (NULL); 2472 2473 /* 2474 * Allocate memory & handles for TX buffers 2475 */ 2476 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2477 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2478 for (split = 0; split < BGE_SPLIT; ++split) { 2479 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2480 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2481 &srp->buf[array][split]); 2482 if (err != DDI_SUCCESS) { 2483 /* Free the last already allocated OK chunks */ 2484 for (slot = 0; slot <= split; ++slot) 2485 bge_free_dma_mem(&srp->buf[array][slot]); 2486 srp->tx_alloc_fail++; 2487 return (NULL); 2488 } 2489 } 2490 2491 /* 2492 * Chunk tx buffer area 2493 */ 2494 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2495 for (split = 0; split < BGE_SPLIT; ++split) { 2496 area = srp->buf[array][split]; 2497 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2498 bge_slice_chunk(&txbuf->buf, &area, 1, 2499 bgep->chipid.snd_buff_size); 2500 txbuf++; 2501 } 2502 } 2503 2504 /* 2505 * Add above buffers to the tx buffer pop queue 2506 */ 2507 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2508 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2509 txbuf_item_last = NULL; 2510 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2511 txbuf_item->item = txbuf; 2512 txbuf_item->next = txbuf_item_last; 2513 txbuf_item_last = txbuf_item; 2514 txbuf++; 2515 txbuf_item++; 2516 } 2517 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2518 txbuf_item_rtn = txbuf_item; 2519 txbuf_item++; 2520 txbuf_queue = srp->txbuf_pop_queue; 2521 mutex_enter(txbuf_queue->lock); 2522 txbuf_item->next = txbuf_queue->head; 2523 txbuf_queue->head = txbuf_item_last; 2524 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2525 mutex_exit(txbuf_queue->lock); 2526 2527 srp->tx_array++; 2528 srp->tx_buffers += BGE_SEND_BUF_NUM; 2529 srp->tx_buffers_low = srp->tx_buffers / 4; 2530 2531 return (txbuf_item_rtn); 2532 } 2533 2534 /* 2535 * This function allocates all the transmit and receive buffers 2536 * and descriptors, in four chunks. 2537 */ 2538 int 2539 bge_alloc_bufs(bge_t *bgep) 2540 { 2541 dma_area_t area; 2542 size_t rxbuffsize; 2543 size_t txbuffsize; 2544 size_t rxbuffdescsize; 2545 size_t rxdescsize; 2546 size_t txdescsize; 2547 uint32_t ring; 2548 uint32_t rx_rings = bgep->chipid.rx_rings; 2549 uint32_t tx_rings = bgep->chipid.tx_rings; 2550 int split; 2551 int err; 2552 2553 BGE_TRACE(("bge_alloc_bufs($%p)", 2554 (void *)bgep)); 2555 2556 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2557 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2558 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2559 2560 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2561 txbuffsize *= tx_rings; 2562 2563 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2564 rxdescsize *= sizeof (bge_rbd_t); 2565 2566 rxbuffdescsize = BGE_STD_SLOTS_USED; 2567 rxbuffdescsize += bgep->chipid.jumbo_slots; 2568 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2569 rxbuffdescsize *= sizeof (bge_rbd_t); 2570 2571 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2572 txdescsize *= sizeof (bge_sbd_t); 2573 txdescsize += sizeof (bge_statistics_t); 2574 txdescsize += sizeof (bge_status_t); 2575 txdescsize += BGE_STATUS_PADDING; 2576 2577 /* 2578 * Enable PCI relaxed ordering only for RX/TX data buffers 2579 */ 2580 if (bge_relaxed_ordering) 2581 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2582 2583 /* 2584 * Allocate memory & handles for RX buffers 2585 */ 2586 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2587 for (split = 0; split < BGE_SPLIT; ++split) { 2588 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2589 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2590 &bgep->rx_buff[split]); 2591 if (err != DDI_SUCCESS) 2592 return (DDI_FAILURE); 2593 } 2594 2595 /* 2596 * Allocate memory & handles for TX buffers 2597 */ 2598 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2599 for (split = 0; split < BGE_SPLIT; ++split) { 2600 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2601 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2602 &bgep->tx_buff[split]); 2603 if (err != DDI_SUCCESS) 2604 return (DDI_FAILURE); 2605 } 2606 2607 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2608 2609 /* 2610 * Allocate memory & handles for receive return rings 2611 */ 2612 ASSERT((rxdescsize % rx_rings) == 0); 2613 for (split = 0; split < rx_rings; ++split) { 2614 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2615 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2616 &bgep->rx_desc[split]); 2617 if (err != DDI_SUCCESS) 2618 return (DDI_FAILURE); 2619 } 2620 2621 /* 2622 * Allocate memory & handles for buffer (producer) descriptor rings 2623 */ 2624 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2625 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2626 if (err != DDI_SUCCESS) 2627 return (DDI_FAILURE); 2628 2629 /* 2630 * Allocate memory & handles for TX descriptor rings, 2631 * status block, and statistics area 2632 */ 2633 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2634 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2635 if (err != DDI_SUCCESS) 2636 return (DDI_FAILURE); 2637 2638 /* 2639 * Now carve up each of the allocated areas ... 2640 */ 2641 for (split = 0; split < BGE_SPLIT; ++split) { 2642 area = bgep->rx_buff[split]; 2643 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2644 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2645 bgep->chipid.std_buf_size); 2646 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2647 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2648 bgep->chipid.recv_jumbo_size); 2649 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2650 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2651 BGE_MINI_BUFF_SIZE); 2652 } 2653 2654 for (split = 0; split < BGE_SPLIT; ++split) { 2655 area = bgep->tx_buff[split]; 2656 for (ring = 0; ring < tx_rings; ++ring) 2657 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2658 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2659 bgep->chipid.snd_buff_size); 2660 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2661 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2662 &area, 0, bgep->chipid.snd_buff_size); 2663 } 2664 2665 for (ring = 0; ring < rx_rings; ++ring) 2666 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2667 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2668 2669 area = bgep->rx_desc[rx_rings]; 2670 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2671 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2672 0, sizeof (bge_rbd_t)); 2673 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2674 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2675 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2676 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2677 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2678 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2679 ASSERT(area.alength == 0); 2680 2681 area = bgep->tx_desc; 2682 for (ring = 0; ring < tx_rings; ++ring) 2683 bge_slice_chunk(&bgep->send[ring].desc, &area, 2684 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2685 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2686 bge_slice_chunk(&bgep->send[ring].desc, &area, 2687 0, sizeof (bge_sbd_t)); 2688 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2689 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2690 ASSERT(area.alength == BGE_STATUS_PADDING); 2691 DMA_ZERO(bgep->status_block); 2692 2693 return (DDI_SUCCESS); 2694 } 2695 2696 /* 2697 * This routine frees the transmit and receive buffers and descriptors. 2698 * Make sure the chip is stopped before calling it! 2699 */ 2700 void 2701 bge_free_bufs(bge_t *bgep) 2702 { 2703 int split; 2704 2705 BGE_TRACE(("bge_free_bufs($%p)", 2706 (void *)bgep)); 2707 2708 bge_free_dma_mem(&bgep->tx_desc); 2709 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2710 bge_free_dma_mem(&bgep->rx_desc[split]); 2711 for (split = 0; split < BGE_SPLIT; ++split) 2712 bge_free_dma_mem(&bgep->tx_buff[split]); 2713 for (split = 0; split < BGE_SPLIT; ++split) 2714 bge_free_dma_mem(&bgep->rx_buff[split]); 2715 } 2716 2717 /* 2718 * Determine (initial) MAC address ("BIA") to use for this interface 2719 */ 2720 2721 static void 2722 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2723 { 2724 struct ether_addr sysaddr; 2725 char propbuf[8]; /* "true" or "false", plus NUL */ 2726 uchar_t *bytes; 2727 int *ints; 2728 uint_t nelts; 2729 int err; 2730 2731 BGE_TRACE(("bge_find_mac_address($%p)", 2732 (void *)bgep)); 2733 2734 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2735 cidp->hw_mac_addr, 2736 ether_sprintf((void *)cidp->vendor_addr.addr), 2737 cidp->vendor_addr.set ? "" : "not ")); 2738 2739 /* 2740 * The "vendor's factory-set address" may already have 2741 * been extracted from the chip, but if the property 2742 * "local-mac-address" is set we use that instead. It 2743 * will normally be set by OBP, but it could also be 2744 * specified in a .conf file(!) 2745 * 2746 * There doesn't seem to be a way to define byte-array 2747 * properties in a .conf, so we check whether it looks 2748 * like an array of 6 ints instead. 2749 * 2750 * Then, we check whether it looks like an array of 6 2751 * bytes (which it should, if OBP set it). If we can't 2752 * make sense of it either way, we'll ignore it. 2753 */ 2754 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2755 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2756 if (err == DDI_PROP_SUCCESS) { 2757 if (nelts == ETHERADDRL) { 2758 while (nelts--) 2759 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2760 cidp->vendor_addr.set = B_TRUE; 2761 } 2762 ddi_prop_free(ints); 2763 } 2764 2765 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2766 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2767 if (err == DDI_PROP_SUCCESS) { 2768 if (nelts == ETHERADDRL) { 2769 while (nelts--) 2770 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2771 cidp->vendor_addr.set = B_TRUE; 2772 } 2773 ddi_prop_free(bytes); 2774 } 2775 2776 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2777 ether_sprintf((void *)cidp->vendor_addr.addr), 2778 cidp->vendor_addr.set ? "" : "not ")); 2779 2780 /* 2781 * Look up the OBP property "local-mac-address?". Note that even 2782 * though its value is a string (which should be "true" or "false"), 2783 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2784 * the buffer first and then fetch the property as an untyped array; 2785 * this may or may not include a final NUL, but since there will 2786 * always be one left at the end of the buffer we can now treat it 2787 * as a string anyway. 2788 */ 2789 nelts = sizeof (propbuf); 2790 bzero(propbuf, nelts--); 2791 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2792 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2793 2794 /* 2795 * Now, if the address still isn't set from the hardware (SEEPROM) 2796 * or the OBP or .conf property, OR if the user has foolishly set 2797 * 'local-mac-address? = false', use "the system address" instead 2798 * (but only if it's non-null i.e. has been set from the IDPROM). 2799 */ 2800 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2801 if (localetheraddr(NULL, &sysaddr) != 0) { 2802 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2803 cidp->vendor_addr.set = B_TRUE; 2804 } 2805 2806 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2807 ether_sprintf((void *)cidp->vendor_addr.addr), 2808 cidp->vendor_addr.set ? "" : "not ")); 2809 2810 /* 2811 * Finally(!), if there's a valid "mac-address" property (created 2812 * if we netbooted from this interface), we must use this instead 2813 * of any of the above to ensure that the NFS/install server doesn't 2814 * get confused by the address changing as Solaris takes over! 2815 */ 2816 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2817 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2818 if (err == DDI_PROP_SUCCESS) { 2819 if (nelts == ETHERADDRL) { 2820 while (nelts--) 2821 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2822 cidp->vendor_addr.set = B_TRUE; 2823 } 2824 ddi_prop_free(bytes); 2825 } 2826 2827 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2828 ether_sprintf((void *)cidp->vendor_addr.addr), 2829 cidp->vendor_addr.set ? "" : "not ")); 2830 } 2831 2832 2833 /*ARGSUSED*/ 2834 int 2835 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2836 { 2837 ddi_fm_error_t de; 2838 2839 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2840 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2841 return (de.fme_status); 2842 } 2843 2844 /*ARGSUSED*/ 2845 int 2846 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2847 { 2848 ddi_fm_error_t de; 2849 2850 ASSERT(bgep->progress & PROGRESS_BUFS); 2851 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2852 return (de.fme_status); 2853 } 2854 2855 /* 2856 * The IO fault service error handling callback function 2857 */ 2858 /*ARGSUSED*/ 2859 static int 2860 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2861 { 2862 /* 2863 * as the driver can always deal with an error in any dma or 2864 * access handle, we can just return the fme_status value. 2865 */ 2866 pci_ereport_post(dip, err, NULL); 2867 return (err->fme_status); 2868 } 2869 2870 static void 2871 bge_fm_init(bge_t *bgep) 2872 { 2873 ddi_iblock_cookie_t iblk; 2874 2875 /* Only register with IO Fault Services if we have some capability */ 2876 if (bgep->fm_capabilities) { 2877 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2878 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2879 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2880 2881 /* Register capabilities with IO Fault Services */ 2882 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2883 2884 /* 2885 * Initialize pci ereport capabilities if ereport capable 2886 */ 2887 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2888 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2889 pci_ereport_setup(bgep->devinfo); 2890 2891 /* 2892 * Register error callback if error callback capable 2893 */ 2894 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2895 ddi_fm_handler_register(bgep->devinfo, 2896 bge_fm_error_cb, (void*) bgep); 2897 } else { 2898 /* 2899 * These fields have to be cleared of FMA if there are no 2900 * FMA capabilities at runtime. 2901 */ 2902 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2903 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2904 dma_attr.dma_attr_flags = 0; 2905 } 2906 } 2907 2908 static void 2909 bge_fm_fini(bge_t *bgep) 2910 { 2911 /* Only unregister FMA capabilities if we registered some */ 2912 if (bgep->fm_capabilities) { 2913 2914 /* 2915 * Release any resources allocated by pci_ereport_setup() 2916 */ 2917 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2918 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2919 pci_ereport_teardown(bgep->devinfo); 2920 2921 /* 2922 * Un-register error callback if error callback capable 2923 */ 2924 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2925 ddi_fm_handler_unregister(bgep->devinfo); 2926 2927 /* Unregister from IO Fault Services */ 2928 ddi_fm_fini(bgep->devinfo); 2929 } 2930 } 2931 2932 static void 2933 #ifdef BGE_IPMI_ASF 2934 bge_unattach(bge_t *bgep, uint_t asf_mode) 2935 #else 2936 bge_unattach(bge_t *bgep) 2937 #endif 2938 { 2939 BGE_TRACE(("bge_unattach($%p)", 2940 (void *)bgep)); 2941 2942 /* 2943 * Flag that no more activity may be initiated 2944 */ 2945 bgep->progress &= ~PROGRESS_READY; 2946 2947 /* 2948 * Quiesce the PHY and MAC (leave it reset but still powered). 2949 * Clean up and free all BGE data structures 2950 */ 2951 if (bgep->periodic_id != NULL) { 2952 ddi_periodic_delete(bgep->periodic_id); 2953 bgep->periodic_id = NULL; 2954 } 2955 if (bgep->progress & PROGRESS_KSTATS) 2956 bge_fini_kstats(bgep); 2957 if (bgep->progress & PROGRESS_PHY) 2958 bge_phys_reset(bgep); 2959 if (bgep->progress & PROGRESS_HWINT) { 2960 mutex_enter(bgep->genlock); 2961 #ifdef BGE_IPMI_ASF 2962 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2963 #else 2964 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2965 #endif 2966 ddi_fm_service_impact(bgep->devinfo, 2967 DDI_SERVICE_UNAFFECTED); 2968 #ifdef BGE_IPMI_ASF 2969 if (bgep->asf_enabled) { 2970 /* 2971 * This register has been overlaid. We restore its 2972 * initial value here. 2973 */ 2974 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2975 BGE_NIC_DATA_SIG); 2976 } 2977 #endif 2978 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2979 ddi_fm_service_impact(bgep->devinfo, 2980 DDI_SERVICE_UNAFFECTED); 2981 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2982 ddi_fm_service_impact(bgep->devinfo, 2983 DDI_SERVICE_UNAFFECTED); 2984 mutex_exit(bgep->genlock); 2985 } 2986 if (bgep->progress & PROGRESS_INTR) { 2987 bge_intr_disable(bgep); 2988 bge_fini_rings(bgep); 2989 } 2990 if (bgep->progress & PROGRESS_HWINT) { 2991 bge_rem_intrs(bgep); 2992 rw_destroy(bgep->errlock); 2993 mutex_destroy(bgep->softintrlock); 2994 mutex_destroy(bgep->genlock); 2995 } 2996 if (bgep->progress & PROGRESS_FACTOTUM) 2997 ddi_remove_softintr(bgep->factotum_id); 2998 if (bgep->progress & PROGRESS_RESCHED) 2999 ddi_remove_softintr(bgep->drain_id); 3000 if (bgep->progress & PROGRESS_BUFS) 3001 bge_free_bufs(bgep); 3002 if (bgep->progress & PROGRESS_REGS) 3003 ddi_regs_map_free(&bgep->io_handle); 3004 if (bgep->progress & PROGRESS_CFG) 3005 pci_config_teardown(&bgep->cfg_handle); 3006 3007 bge_fm_fini(bgep); 3008 3009 ddi_remove_minor_node(bgep->devinfo, NULL); 3010 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3011 kmem_free(bgep, sizeof (*bgep)); 3012 } 3013 3014 static int 3015 bge_resume(dev_info_t *devinfo) 3016 { 3017 bge_t *bgep; /* Our private data */ 3018 chip_id_t *cidp; 3019 chip_id_t chipid; 3020 3021 bgep = ddi_get_driver_private(devinfo); 3022 if (bgep == NULL) 3023 return (DDI_FAILURE); 3024 3025 /* 3026 * Refuse to resume if the data structures aren't consistent 3027 */ 3028 if (bgep->devinfo != devinfo) 3029 return (DDI_FAILURE); 3030 3031 #ifdef BGE_IPMI_ASF 3032 /* 3033 * Power management hasn't been supported in BGE now. If you 3034 * want to implement it, please add the ASF/IPMI related 3035 * code here. 3036 */ 3037 3038 #endif 3039 3040 /* 3041 * Read chip ID & set up config space command register(s) 3042 * Refuse to resume if the chip has changed its identity! 3043 */ 3044 cidp = &bgep->chipid; 3045 mutex_enter(bgep->genlock); 3046 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3047 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3048 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3049 mutex_exit(bgep->genlock); 3050 return (DDI_FAILURE); 3051 } 3052 mutex_exit(bgep->genlock); 3053 if (chipid.vendor != cidp->vendor) 3054 return (DDI_FAILURE); 3055 if (chipid.device != cidp->device) 3056 return (DDI_FAILURE); 3057 if (chipid.revision != cidp->revision) 3058 return (DDI_FAILURE); 3059 if (chipid.asic_rev != cidp->asic_rev) 3060 return (DDI_FAILURE); 3061 3062 /* 3063 * All OK, reinitialise h/w & kick off GLD scheduling 3064 */ 3065 mutex_enter(bgep->genlock); 3066 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3067 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3068 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3069 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3070 mutex_exit(bgep->genlock); 3071 return (DDI_FAILURE); 3072 } 3073 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3074 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3075 mutex_exit(bgep->genlock); 3076 return (DDI_FAILURE); 3077 } 3078 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3079 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3080 mutex_exit(bgep->genlock); 3081 return (DDI_FAILURE); 3082 } 3083 mutex_exit(bgep->genlock); 3084 return (DDI_SUCCESS); 3085 } 3086 3087 /* 3088 * attach(9E) -- Attach a device to the system 3089 * 3090 * Called once for each board successfully probed. 3091 */ 3092 static int 3093 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3094 { 3095 bge_t *bgep; /* Our private data */ 3096 mac_register_t *macp; 3097 chip_id_t *cidp; 3098 caddr_t regs; 3099 int instance; 3100 int err; 3101 int intr_types; 3102 #ifdef BGE_IPMI_ASF 3103 uint32_t mhcrValue; 3104 #ifdef __sparc 3105 uint16_t value16; 3106 #endif 3107 #ifdef BGE_NETCONSOLE 3108 int retval; 3109 #endif 3110 #endif 3111 3112 instance = ddi_get_instance(devinfo); 3113 3114 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3115 (void *)devinfo, cmd, instance)); 3116 BGE_BRKPT(NULL, "bge_attach"); 3117 3118 switch (cmd) { 3119 default: 3120 return (DDI_FAILURE); 3121 3122 case DDI_RESUME: 3123 return (bge_resume(devinfo)); 3124 3125 case DDI_ATTACH: 3126 break; 3127 } 3128 3129 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3130 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3131 ddi_set_driver_private(devinfo, bgep); 3132 bgep->bge_guard = BGE_GUARD; 3133 bgep->devinfo = devinfo; 3134 bgep->param_drain_max = 64; 3135 bgep->param_msi_cnt = 0; 3136 bgep->param_loop_mode = 0; 3137 3138 /* 3139 * Initialize more fields in BGE private data 3140 */ 3141 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3142 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3143 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3144 BGE_DRIVER_NAME, instance); 3145 3146 /* 3147 * Initialize for fma support 3148 */ 3149 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3150 DDI_PROP_DONTPASS, fm_cap, 3151 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3152 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3153 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3154 bge_fm_init(bgep); 3155 3156 /* 3157 * Look up the IOMMU's page size for DVMA mappings (must be 3158 * a power of 2) and convert to a mask. This can be used to 3159 * determine whether a message buffer crosses a page boundary. 3160 * Note: in 2s complement binary notation, if X is a power of 3161 * 2, then -X has the representation "11...1100...00". 3162 */ 3163 bgep->pagemask = dvma_pagesize(devinfo); 3164 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3165 bgep->pagemask = -bgep->pagemask; 3166 3167 /* 3168 * Map config space registers 3169 * Read chip ID & set up config space command register(s) 3170 * 3171 * Note: this leaves the chip accessible by Memory Space 3172 * accesses, but with interrupts and Bus Mastering off. 3173 * This should ensure that nothing untoward will happen 3174 * if it has been left active by the (net-)bootloader. 3175 * We'll re-enable Bus Mastering once we've reset the chip, 3176 * and allow interrupts only when everything else is set up. 3177 */ 3178 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3179 #ifdef BGE_IPMI_ASF 3180 #ifdef __sparc 3181 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3182 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3183 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3184 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3185 MHCR_ENABLE_TAGGED_STATUS_MODE | 3186 MHCR_MASK_INTERRUPT_MODE | 3187 MHCR_MASK_PCI_INT_OUTPUT | 3188 MHCR_CLEAR_INTERRUPT_INTA | 3189 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3190 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3191 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3192 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3193 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3194 MEMORY_ARBITER_ENABLE); 3195 #else 3196 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3197 #endif 3198 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3199 bgep->asf_wordswapped = B_TRUE; 3200 } else { 3201 bgep->asf_wordswapped = B_FALSE; 3202 } 3203 bge_asf_get_config(bgep); 3204 #endif 3205 if (err != DDI_SUCCESS) { 3206 bge_problem(bgep, "pci_config_setup() failed"); 3207 goto attach_fail; 3208 } 3209 bgep->progress |= PROGRESS_CFG; 3210 cidp = &bgep->chipid; 3211 bzero(cidp, sizeof (*cidp)); 3212 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3213 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3214 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3215 goto attach_fail; 3216 } 3217 3218 #ifdef BGE_IPMI_ASF 3219 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3220 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3221 bgep->asf_newhandshake = B_TRUE; 3222 } else { 3223 bgep->asf_newhandshake = B_FALSE; 3224 } 3225 #endif 3226 3227 /* 3228 * Update those parts of the chip ID derived from volatile 3229 * registers with the values seen by OBP (in case the chip 3230 * has been reset externally and therefore lost them). 3231 */ 3232 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3233 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3234 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3235 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3236 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3237 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3238 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3239 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3240 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3241 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3242 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3243 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3244 3245 if (bge_jumbo_enable == B_TRUE) { 3246 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3247 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3248 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3249 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3250 cidp->default_mtu = BGE_DEFAULT_MTU; 3251 } 3252 } 3253 /* 3254 * Map operating registers 3255 */ 3256 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3257 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3258 if (err != DDI_SUCCESS) { 3259 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3260 goto attach_fail; 3261 } 3262 bgep->io_regs = regs; 3263 bgep->progress |= PROGRESS_REGS; 3264 3265 /* 3266 * Characterise the device, so we know its requirements. 3267 * Then allocate the appropriate TX and RX descriptors & buffers. 3268 */ 3269 if (bge_chip_id_init(bgep) == EIO) { 3270 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3271 goto attach_fail; 3272 } 3273 3274 3275 err = bge_alloc_bufs(bgep); 3276 if (err != DDI_SUCCESS) { 3277 bge_problem(bgep, "DMA buffer allocation failed"); 3278 goto attach_fail; 3279 } 3280 bgep->progress |= PROGRESS_BUFS; 3281 3282 /* 3283 * Add the softint handlers: 3284 * 3285 * Both of these handlers are used to avoid restrictions on the 3286 * context and/or mutexes required for some operations. In 3287 * particular, the hardware interrupt handler and its subfunctions 3288 * can detect a number of conditions that we don't want to handle 3289 * in that context or with that set of mutexes held. So, these 3290 * softints are triggered instead: 3291 * 3292 * the <resched> softint is triggered if we have previously 3293 * had to refuse to send a packet because of resource shortage 3294 * (we've run out of transmit buffers), but the send completion 3295 * interrupt handler has now detected that more buffers have 3296 * become available. 3297 * 3298 * the <factotum> is triggered if the h/w interrupt handler 3299 * sees the <link state changed> or <error> bits in the status 3300 * block. It's also triggered periodically to poll the link 3301 * state, just in case we aren't getting link status change 3302 * interrupts ... 3303 */ 3304 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3305 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3306 if (err != DDI_SUCCESS) { 3307 bge_problem(bgep, "ddi_add_softintr() failed"); 3308 goto attach_fail; 3309 } 3310 bgep->progress |= PROGRESS_RESCHED; 3311 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3312 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3313 if (err != DDI_SUCCESS) { 3314 bge_problem(bgep, "ddi_add_softintr() failed"); 3315 goto attach_fail; 3316 } 3317 bgep->progress |= PROGRESS_FACTOTUM; 3318 3319 /* Get supported interrupt types */ 3320 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3321 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3322 3323 goto attach_fail; 3324 } 3325 3326 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3327 bgep->ifname, intr_types)); 3328 3329 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3330 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3331 bge_error(bgep, "MSI registration failed, " 3332 "trying FIXED interrupt type\n"); 3333 } else { 3334 BGE_DEBUG(("%s: Using MSI interrupt type", 3335 bgep->ifname)); 3336 bgep->intr_type = DDI_INTR_TYPE_MSI; 3337 bgep->progress |= PROGRESS_HWINT; 3338 } 3339 } 3340 3341 if (!(bgep->progress & PROGRESS_HWINT) && 3342 (intr_types & DDI_INTR_TYPE_FIXED)) { 3343 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3344 bge_error(bgep, "FIXED interrupt " 3345 "registration failed\n"); 3346 goto attach_fail; 3347 } 3348 3349 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3350 3351 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3352 bgep->progress |= PROGRESS_HWINT; 3353 } 3354 3355 if (!(bgep->progress & PROGRESS_HWINT)) { 3356 bge_error(bgep, "No interrupts registered\n"); 3357 goto attach_fail; 3358 } 3359 3360 /* 3361 * Note that interrupts are not enabled yet as 3362 * mutex locks are not initialized. Initialize mutex locks. 3363 */ 3364 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3365 DDI_INTR_PRI(bgep->intr_pri)); 3366 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3367 DDI_INTR_PRI(bgep->intr_pri)); 3368 rw_init(bgep->errlock, NULL, RW_DRIVER, 3369 DDI_INTR_PRI(bgep->intr_pri)); 3370 3371 /* 3372 * Initialize rings. 3373 */ 3374 bge_init_rings(bgep); 3375 3376 /* 3377 * Now that mutex locks are initialized, enable interrupts. 3378 */ 3379 bge_intr_enable(bgep); 3380 bgep->progress |= PROGRESS_INTR; 3381 3382 /* 3383 * Initialise link state variables 3384 * Stop, reset & reinitialise the chip. 3385 * Initialise the (internal) PHY. 3386 */ 3387 bgep->link_state = LINK_STATE_UNKNOWN; 3388 3389 mutex_enter(bgep->genlock); 3390 3391 /* 3392 * Reset chip & rings to initial state; also reset address 3393 * filtering, promiscuity, loopback mode. 3394 */ 3395 #ifdef BGE_IPMI_ASF 3396 #ifdef BGE_NETCONSOLE 3397 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3398 #else 3399 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3400 #endif 3401 #else 3402 if (bge_reset(bgep) != DDI_SUCCESS) { 3403 #endif 3404 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3405 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3406 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3407 mutex_exit(bgep->genlock); 3408 goto attach_fail; 3409 } 3410 3411 #ifdef BGE_IPMI_ASF 3412 if (bgep->asf_enabled) { 3413 bgep->asf_status = ASF_STAT_RUN_INIT; 3414 } 3415 #endif 3416 3417 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3418 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3419 bgep->promisc = B_FALSE; 3420 bgep->param_loop_mode = BGE_LOOP_NONE; 3421 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3422 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3423 mutex_exit(bgep->genlock); 3424 goto attach_fail; 3425 } 3426 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3427 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3428 mutex_exit(bgep->genlock); 3429 goto attach_fail; 3430 } 3431 3432 mutex_exit(bgep->genlock); 3433 3434 if (bge_phys_init(bgep) == EIO) { 3435 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3436 goto attach_fail; 3437 } 3438 bgep->progress |= PROGRESS_PHY; 3439 3440 /* 3441 * initialize NDD-tweakable parameters 3442 */ 3443 if (bge_nd_init(bgep)) { 3444 bge_problem(bgep, "bge_nd_init() failed"); 3445 goto attach_fail; 3446 } 3447 bgep->progress |= PROGRESS_NDD; 3448 3449 /* 3450 * Create & initialise named kstats 3451 */ 3452 bge_init_kstats(bgep, instance); 3453 bgep->progress |= PROGRESS_KSTATS; 3454 3455 /* 3456 * Determine whether to override the chip's own MAC address 3457 */ 3458 bge_find_mac_address(bgep, cidp); 3459 3460 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3461 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 3462 3463 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3464 goto attach_fail; 3465 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3466 macp->m_driver = bgep; 3467 macp->m_dip = devinfo; 3468 macp->m_src_addr = cidp->vendor_addr.addr; 3469 macp->m_callbacks = &bge_m_callbacks; 3470 macp->m_min_sdu = 0; 3471 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3472 macp->m_margin = VLAN_TAGSZ; 3473 macp->m_priv_props = bge_priv_prop; 3474 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3475 macp->m_v12n = MAC_VIRT_LEVEL1; 3476 3477 /* 3478 * Finally, we're ready to register ourselves with the MAC layer 3479 * interface; if this succeeds, we're all ready to start() 3480 */ 3481 err = mac_register(macp, &bgep->mh); 3482 mac_free(macp); 3483 if (err != 0) 3484 goto attach_fail; 3485 3486 /* 3487 * Register a periodical handler. 3488 * bge_chip_cyclic() is invoked in kernel context. 3489 */ 3490 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3491 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3492 3493 bgep->progress |= PROGRESS_READY; 3494 ASSERT(bgep->bge_guard == BGE_GUARD); 3495 #ifdef BGE_IPMI_ASF 3496 #ifdef BGE_NETCONSOLE 3497 if (bgep->asf_enabled) { 3498 mutex_enter(bgep->genlock); 3499 retval = bge_chip_start(bgep, B_TRUE); 3500 mutex_exit(bgep->genlock); 3501 if (retval != DDI_SUCCESS) 3502 goto attach_fail; 3503 } 3504 #endif 3505 #endif 3506 3507 ddi_report_dev(devinfo); 3508 BGE_REPORT((bgep, "bge version: %s", bge_version)); 3509 3510 return (DDI_SUCCESS); 3511 3512 attach_fail: 3513 #ifdef BGE_IPMI_ASF 3514 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3515 #else 3516 bge_unattach(bgep); 3517 #endif 3518 return (DDI_FAILURE); 3519 } 3520 3521 /* 3522 * bge_suspend() -- suspend transmit/receive for powerdown 3523 */ 3524 static int 3525 bge_suspend(bge_t *bgep) 3526 { 3527 /* 3528 * Stop processing and idle (powerdown) the PHY ... 3529 */ 3530 mutex_enter(bgep->genlock); 3531 #ifdef BGE_IPMI_ASF 3532 /* 3533 * Power management hasn't been supported in BGE now. If you 3534 * want to implement it, please add the ASF/IPMI related 3535 * code here. 3536 */ 3537 #endif 3538 bge_stop(bgep); 3539 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3540 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3541 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3542 mutex_exit(bgep->genlock); 3543 return (DDI_FAILURE); 3544 } 3545 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3546 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3547 mutex_exit(bgep->genlock); 3548 return (DDI_FAILURE); 3549 } 3550 mutex_exit(bgep->genlock); 3551 3552 return (DDI_SUCCESS); 3553 } 3554 3555 /* 3556 * quiesce(9E) entry point. 3557 * 3558 * This function is called when the system is single-threaded at high 3559 * PIL with preemption disabled. Therefore, this function must not be 3560 * blocked. 3561 * 3562 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3563 * DDI_FAILURE indicates an error condition and should almost never happen. 3564 */ 3565 #ifdef __sparc 3566 #define bge_quiesce ddi_quiesce_not_supported 3567 #else 3568 static int 3569 bge_quiesce(dev_info_t *devinfo) 3570 { 3571 bge_t *bgep = ddi_get_driver_private(devinfo); 3572 3573 if (bgep == NULL) 3574 return (DDI_FAILURE); 3575 3576 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3577 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3578 MHCR_MASK_PCI_INT_OUTPUT); 3579 } else { 3580 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3581 } 3582 3583 /* Stop the chip */ 3584 bge_chip_stop_nonblocking(bgep); 3585 3586 return (DDI_SUCCESS); 3587 } 3588 #endif 3589 3590 /* 3591 * detach(9E) -- Detach a device from the system 3592 */ 3593 static int 3594 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3595 { 3596 bge_t *bgep; 3597 #ifdef BGE_IPMI_ASF 3598 uint_t asf_mode; 3599 asf_mode = ASF_MODE_NONE; 3600 #endif 3601 3602 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3603 3604 bgep = ddi_get_driver_private(devinfo); 3605 3606 switch (cmd) { 3607 default: 3608 return (DDI_FAILURE); 3609 3610 case DDI_SUSPEND: 3611 return (bge_suspend(bgep)); 3612 3613 case DDI_DETACH: 3614 break; 3615 } 3616 3617 #ifdef BGE_IPMI_ASF 3618 mutex_enter(bgep->genlock); 3619 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3620 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3621 3622 bge_asf_update_status(bgep); 3623 if (bgep->asf_status == ASF_STAT_RUN) { 3624 bge_asf_stop_timer(bgep); 3625 } 3626 bgep->asf_status = ASF_STAT_STOP; 3627 3628 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3629 3630 if (bgep->asf_pseudostop) { 3631 bge_chip_stop(bgep, B_FALSE); 3632 bgep->bge_mac_state = BGE_MAC_STOPPED; 3633 bgep->asf_pseudostop = B_FALSE; 3634 } 3635 3636 asf_mode = ASF_MODE_POST_SHUTDOWN; 3637 3638 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3639 ddi_fm_service_impact(bgep->devinfo, 3640 DDI_SERVICE_UNAFFECTED); 3641 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3642 ddi_fm_service_impact(bgep->devinfo, 3643 DDI_SERVICE_UNAFFECTED); 3644 } 3645 mutex_exit(bgep->genlock); 3646 #endif 3647 3648 /* 3649 * Unregister from the GLD subsystem. This can fail, in 3650 * particular if there are DLPI style-2 streams still open - 3651 * in which case we just return failure without shutting 3652 * down chip operations. 3653 */ 3654 if (mac_unregister(bgep->mh) != 0) 3655 return (DDI_FAILURE); 3656 3657 /* 3658 * All activity stopped, so we can clean up & exit 3659 */ 3660 #ifdef BGE_IPMI_ASF 3661 bge_unattach(bgep, asf_mode); 3662 #else 3663 bge_unattach(bgep); 3664 #endif 3665 return (DDI_SUCCESS); 3666 } 3667 3668 3669 /* 3670 * ========== Module Loading Data & Entry Points ========== 3671 */ 3672 3673 #undef BGE_DBG 3674 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3675 3676 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3677 nulldev, /* identify */ 3678 nulldev, /* probe */ 3679 bge_attach, /* attach */ 3680 bge_detach, /* detach */ 3681 nodev, /* reset */ 3682 NULL, /* cb_ops */ 3683 D_MP, /* bus_ops */ 3684 NULL, /* power */ 3685 bge_quiesce /* quiesce */ 3686 ); 3687 3688 static struct modldrv bge_modldrv = { 3689 &mod_driverops, /* Type of module. This one is a driver */ 3690 bge_ident, /* short description */ 3691 &bge_dev_ops /* driver specific ops */ 3692 }; 3693 3694 static struct modlinkage modlinkage = { 3695 MODREV_1, (void *)&bge_modldrv, NULL 3696 }; 3697 3698 3699 int 3700 _info(struct modinfo *modinfop) 3701 { 3702 return (mod_info(&modlinkage, modinfop)); 3703 } 3704 3705 int 3706 _init(void) 3707 { 3708 int status; 3709 3710 mac_init_ops(&bge_dev_ops, "bge"); 3711 status = mod_install(&modlinkage); 3712 if (status == DDI_SUCCESS) 3713 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3714 else 3715 mac_fini_ops(&bge_dev_ops); 3716 return (status); 3717 } 3718 3719 int 3720 _fini(void) 3721 { 3722 int status; 3723 3724 status = mod_remove(&modlinkage); 3725 if (status == DDI_SUCCESS) { 3726 mac_fini_ops(&bge_dev_ops); 3727 mutex_destroy(bge_log_mutex); 3728 } 3729 return (status); 3730 } 3731 3732 3733 /* 3734 * bge_add_intrs: 3735 * 3736 * Register FIXED or MSI interrupts. 3737 */ 3738 static int 3739 bge_add_intrs(bge_t *bgep, int intr_type) 3740 { 3741 dev_info_t *dip = bgep->devinfo; 3742 int avail, actual, intr_size, count = 0; 3743 int i, flag, ret; 3744 3745 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3746 3747 /* Get number of interrupts */ 3748 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3749 if ((ret != DDI_SUCCESS) || (count == 0)) { 3750 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3751 "count: %d", ret, count); 3752 3753 return (DDI_FAILURE); 3754 } 3755 3756 /* Get number of available interrupts */ 3757 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3758 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3759 bge_error(bgep, "ddi_intr_get_navail() failure, " 3760 "ret: %d, avail: %d\n", ret, avail); 3761 3762 return (DDI_FAILURE); 3763 } 3764 3765 if (avail < count) { 3766 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3767 bgep->ifname, count, avail)); 3768 } 3769 3770 /* 3771 * BGE hardware generates only single MSI even though it claims 3772 * to support multiple MSIs. So, hard code MSI count value to 1. 3773 */ 3774 if (intr_type == DDI_INTR_TYPE_MSI) { 3775 count = 1; 3776 flag = DDI_INTR_ALLOC_STRICT; 3777 } else { 3778 flag = DDI_INTR_ALLOC_NORMAL; 3779 } 3780 3781 /* Allocate an array of interrupt handles */ 3782 intr_size = count * sizeof (ddi_intr_handle_t); 3783 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3784 3785 /* Call ddi_intr_alloc() */ 3786 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3787 count, &actual, flag); 3788 3789 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3790 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3791 3792 kmem_free(bgep->htable, intr_size); 3793 return (DDI_FAILURE); 3794 } 3795 3796 if (actual < count) { 3797 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3798 bgep->ifname, count, actual)); 3799 } 3800 3801 bgep->intr_cnt = actual; 3802 3803 /* 3804 * Get priority for first msi, assume remaining are all the same 3805 */ 3806 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3807 DDI_SUCCESS) { 3808 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3809 3810 /* Free already allocated intr */ 3811 for (i = 0; i < actual; i++) { 3812 (void) ddi_intr_free(bgep->htable[i]); 3813 } 3814 3815 kmem_free(bgep->htable, intr_size); 3816 return (DDI_FAILURE); 3817 } 3818 3819 /* Call ddi_intr_add_handler() */ 3820 for (i = 0; i < actual; i++) { 3821 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3822 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3823 bge_error(bgep, "ddi_intr_add_handler() " 3824 "failed %d\n", ret); 3825 3826 /* Free already allocated intr */ 3827 for (i = 0; i < actual; i++) { 3828 (void) ddi_intr_free(bgep->htable[i]); 3829 } 3830 3831 kmem_free(bgep->htable, intr_size); 3832 return (DDI_FAILURE); 3833 } 3834 } 3835 3836 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3837 != DDI_SUCCESS) { 3838 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3839 3840 for (i = 0; i < actual; i++) { 3841 (void) ddi_intr_remove_handler(bgep->htable[i]); 3842 (void) ddi_intr_free(bgep->htable[i]); 3843 } 3844 3845 kmem_free(bgep->htable, intr_size); 3846 return (DDI_FAILURE); 3847 } 3848 3849 return (DDI_SUCCESS); 3850 } 3851 3852 /* 3853 * bge_rem_intrs: 3854 * 3855 * Unregister FIXED or MSI interrupts 3856 */ 3857 static void 3858 bge_rem_intrs(bge_t *bgep) 3859 { 3860 int i; 3861 3862 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3863 3864 /* Call ddi_intr_remove_handler() */ 3865 for (i = 0; i < bgep->intr_cnt; i++) { 3866 (void) ddi_intr_remove_handler(bgep->htable[i]); 3867 (void) ddi_intr_free(bgep->htable[i]); 3868 } 3869 3870 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3871 } 3872 3873 3874 void 3875 bge_intr_enable(bge_t *bgep) 3876 { 3877 int i; 3878 3879 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3880 /* Call ddi_intr_block_enable() for MSI interrupts */ 3881 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3882 } else { 3883 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3884 for (i = 0; i < bgep->intr_cnt; i++) { 3885 (void) ddi_intr_enable(bgep->htable[i]); 3886 } 3887 } 3888 } 3889 3890 3891 void 3892 bge_intr_disable(bge_t *bgep) 3893 { 3894 int i; 3895 3896 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3897 /* Call ddi_intr_block_disable() */ 3898 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3899 } else { 3900 for (i = 0; i < bgep->intr_cnt; i++) { 3901 (void) ddi_intr_disable(bgep->htable[i]); 3902 } 3903 } 3904 } 3905 3906 int 3907 bge_reprogram(bge_t *bgep) 3908 { 3909 int status = 0; 3910 3911 ASSERT(mutex_owned(bgep->genlock)); 3912 3913 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3914 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3915 status = IOC_INVAL; 3916 } 3917 #ifdef BGE_IPMI_ASF 3918 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3919 #else 3920 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3921 #endif 3922 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3923 status = IOC_INVAL; 3924 } 3925 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3926 bge_chip_msi_trig(bgep); 3927 return (status); 3928 } 3929