1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac_provider.h> 30 #include <sys/mac.h> 31 #include <sys/mac_flow.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 * Make sure you keep the version ID up to date! 36 */ 37 static char bge_ident[] = "Broadcom Gb Ethernet v1.01"; 38 39 /* 40 * Property names 41 */ 42 static char debug_propname[] = "bge-debug-flags"; 43 static char clsize_propname[] = "cache-line-size"; 44 static char latency_propname[] = "latency-timer"; 45 static char localmac_boolname[] = "local-mac-address?"; 46 static char localmac_propname[] = "local-mac-address"; 47 static char macaddr_propname[] = "mac-address"; 48 static char subdev_propname[] = "subsystem-id"; 49 static char subven_propname[] = "subsystem-vendor-id"; 50 static char rxrings_propname[] = "bge-rx-rings"; 51 static char txrings_propname[] = "bge-tx-rings"; 52 static char fm_cap[] = "fm-capable"; 53 static char default_mtu[] = "default_mtu"; 54 55 static int bge_add_intrs(bge_t *, int); 56 static void bge_rem_intrs(bge_t *); 57 static int bge_unicst_set(void *, const uint8_t *, int); 58 59 /* 60 * Describes the chip's DMA engine 61 */ 62 static ddi_dma_attr_t dma_attr = { 63 DMA_ATTR_V0, /* dma_attr version */ 64 0x0000000000000000ull, /* dma_attr_addr_lo */ 65 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 66 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 67 0x0000000000000001ull, /* dma_attr_align */ 68 0x00000FFF, /* dma_attr_burstsizes */ 69 0x00000001, /* dma_attr_minxfer */ 70 0x000000000000FFFFull, /* dma_attr_maxxfer */ 71 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 72 1, /* dma_attr_sgllen */ 73 0x00000001, /* dma_attr_granular */ 74 DDI_DMA_FLAGERR /* dma_attr_flags */ 75 }; 76 77 /* 78 * PIO access attributes for registers 79 */ 80 static ddi_device_acc_attr_t bge_reg_accattr = { 81 DDI_DEVICE_ATTR_V0, 82 DDI_NEVERSWAP_ACC, 83 DDI_STRICTORDER_ACC, 84 DDI_FLAGERR_ACC 85 }; 86 87 /* 88 * DMA access attributes for descriptors: NOT to be byte swapped. 89 */ 90 static ddi_device_acc_attr_t bge_desc_accattr = { 91 DDI_DEVICE_ATTR_V0, 92 DDI_NEVERSWAP_ACC, 93 DDI_STRICTORDER_ACC, 94 DDI_FLAGERR_ACC 95 }; 96 97 /* 98 * DMA access attributes for data: NOT to be byte swapped. 99 */ 100 static ddi_device_acc_attr_t bge_data_accattr = { 101 DDI_DEVICE_ATTR_V0, 102 DDI_NEVERSWAP_ACC, 103 DDI_STRICTORDER_ACC 104 }; 105 106 static int bge_m_start(void *); 107 static void bge_m_stop(void *); 108 static int bge_m_promisc(void *, boolean_t); 109 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 110 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 111 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 112 static int bge_unicst_set(void *, const uint8_t *, 113 int); 114 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 115 uint_t, const void *); 116 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 117 uint_t, uint_t, void *, uint_t *); 118 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 119 const void *); 120 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 121 uint_t, void *); 122 123 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 124 125 static mac_callbacks_t bge_m_callbacks = { 126 BGE_M_CALLBACK_FLAGS, 127 bge_m_stat, 128 bge_m_start, 129 bge_m_stop, 130 bge_m_promisc, 131 bge_m_multicst, 132 NULL, 133 bge_m_tx, 134 bge_m_ioctl, 135 bge_m_getcapab, 136 NULL, 137 NULL, 138 bge_m_setprop, 139 bge_m_getprop 140 }; 141 142 mac_priv_prop_t bge_priv_prop[] = { 143 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 144 {"_adv_pause_cap", MAC_PROP_PERM_RW} 145 }; 146 147 #define BGE_MAX_PRIV_PROPS \ 148 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 149 150 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 151 /* 152 * ========== Transmit and receive ring reinitialisation ========== 153 */ 154 155 /* 156 * These <reinit> routines each reset the specified ring to an initial 157 * state, assuming that the corresponding <init> routine has already 158 * been called exactly once. 159 */ 160 161 static void 162 bge_reinit_send_ring(send_ring_t *srp) 163 { 164 bge_queue_t *txbuf_queue; 165 bge_queue_item_t *txbuf_head; 166 sw_txbuf_t *txbuf; 167 sw_sbd_t *ssbdp; 168 uint32_t slot; 169 170 /* 171 * Reinitialise control variables ... 172 */ 173 srp->tx_flow = 0; 174 srp->tx_next = 0; 175 srp->txfill_next = 0; 176 srp->tx_free = srp->desc.nslots; 177 ASSERT(mutex_owned(srp->tc_lock)); 178 srp->tc_next = 0; 179 srp->txpkt_next = 0; 180 srp->tx_block = 0; 181 srp->tx_nobd = 0; 182 srp->tx_nobuf = 0; 183 184 /* 185 * Initialize the tx buffer push queue 186 */ 187 mutex_enter(srp->freetxbuf_lock); 188 mutex_enter(srp->txbuf_lock); 189 txbuf_queue = &srp->freetxbuf_queue; 190 txbuf_queue->head = NULL; 191 txbuf_queue->count = 0; 192 txbuf_queue->lock = srp->freetxbuf_lock; 193 srp->txbuf_push_queue = txbuf_queue; 194 195 /* 196 * Initialize the tx buffer pop queue 197 */ 198 txbuf_queue = &srp->txbuf_queue; 199 txbuf_queue->head = NULL; 200 txbuf_queue->count = 0; 201 txbuf_queue->lock = srp->txbuf_lock; 202 srp->txbuf_pop_queue = txbuf_queue; 203 txbuf_head = srp->txbuf_head; 204 txbuf = srp->txbuf; 205 for (slot = 0; slot < srp->tx_buffers; ++slot) { 206 txbuf_head->item = txbuf; 207 txbuf_head->next = txbuf_queue->head; 208 txbuf_queue->head = txbuf_head; 209 txbuf_queue->count++; 210 txbuf++; 211 txbuf_head++; 212 } 213 mutex_exit(srp->txbuf_lock); 214 mutex_exit(srp->freetxbuf_lock); 215 216 /* 217 * Zero and sync all the h/w Send Buffer Descriptors 218 */ 219 DMA_ZERO(srp->desc); 220 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 221 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 222 ssbdp = srp->sw_sbds; 223 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 224 ssbdp->pbuf = NULL; 225 } 226 227 static void 228 bge_reinit_recv_ring(recv_ring_t *rrp) 229 { 230 /* 231 * Reinitialise control variables ... 232 */ 233 rrp->rx_next = 0; 234 } 235 236 static void 237 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 238 { 239 bge_rbd_t *hw_rbd_p; 240 sw_rbd_t *srbdp; 241 uint32_t bufsize; 242 uint32_t nslots; 243 uint32_t slot; 244 245 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 246 RBD_FLAG_STD_RING, 247 RBD_FLAG_JUMBO_RING, 248 RBD_FLAG_MINI_RING 249 }; 250 251 /* 252 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 253 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 254 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 255 * should be zeroed, and so don't need to be set up specifically 256 * once the whole area has been cleared. 257 */ 258 DMA_ZERO(brp->desc); 259 260 hw_rbd_p = DMA_VPTR(brp->desc); 261 nslots = brp->desc.nslots; 262 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 263 bufsize = brp->buf[0].size; 264 srbdp = brp->sw_rbds; 265 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 266 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 267 hw_rbd_p->index = (uint16_t)slot; 268 hw_rbd_p->len = (uint16_t)bufsize; 269 hw_rbd_p->opaque = srbdp->pbuf.token; 270 hw_rbd_p->flags |= ring_type_flag[ring]; 271 } 272 273 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 274 275 /* 276 * Finally, reinitialise the ring control variables ... 277 */ 278 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 279 } 280 281 /* 282 * Reinitialize all rings 283 */ 284 static void 285 bge_reinit_rings(bge_t *bgep) 286 { 287 uint32_t ring; 288 289 ASSERT(mutex_owned(bgep->genlock)); 290 291 /* 292 * Send Rings ... 293 */ 294 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 295 bge_reinit_send_ring(&bgep->send[ring]); 296 297 /* 298 * Receive Return Rings ... 299 */ 300 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 301 bge_reinit_recv_ring(&bgep->recv[ring]); 302 303 /* 304 * Receive Producer Rings ... 305 */ 306 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 307 bge_reinit_buff_ring(&bgep->buff[ring], ring); 308 } 309 310 /* 311 * ========== Internal state management entry points ========== 312 */ 313 314 #undef BGE_DBG 315 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 316 317 /* 318 * These routines provide all the functionality required by the 319 * corresponding GLD entry points, but don't update the GLD state 320 * so they can be called internally without disturbing our record 321 * of what GLD thinks we should be doing ... 322 */ 323 324 /* 325 * bge_reset() -- reset h/w & rings to initial state 326 */ 327 static int 328 #ifdef BGE_IPMI_ASF 329 bge_reset(bge_t *bgep, uint_t asf_mode) 330 #else 331 bge_reset(bge_t *bgep) 332 #endif 333 { 334 uint32_t ring; 335 int retval; 336 337 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 338 339 ASSERT(mutex_owned(bgep->genlock)); 340 341 /* 342 * Grab all the other mutexes in the world (this should 343 * ensure no other threads are manipulating driver state) 344 */ 345 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 346 mutex_enter(bgep->recv[ring].rx_lock); 347 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 348 mutex_enter(bgep->buff[ring].rf_lock); 349 rw_enter(bgep->errlock, RW_WRITER); 350 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 351 mutex_enter(bgep->send[ring].tx_lock); 352 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 353 mutex_enter(bgep->send[ring].tc_lock); 354 355 #ifdef BGE_IPMI_ASF 356 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 357 #else 358 retval = bge_chip_reset(bgep, B_TRUE); 359 #endif 360 bge_reinit_rings(bgep); 361 362 /* 363 * Free the world ... 364 */ 365 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 366 mutex_exit(bgep->send[ring].tc_lock); 367 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 368 mutex_exit(bgep->send[ring].tx_lock); 369 rw_exit(bgep->errlock); 370 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 371 mutex_exit(bgep->buff[ring].rf_lock); 372 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 373 mutex_exit(bgep->recv[ring].rx_lock); 374 375 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 376 return (retval); 377 } 378 379 /* 380 * bge_stop() -- stop processing, don't reset h/w or rings 381 */ 382 static void 383 bge_stop(bge_t *bgep) 384 { 385 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 386 387 ASSERT(mutex_owned(bgep->genlock)); 388 389 #ifdef BGE_IPMI_ASF 390 if (bgep->asf_enabled) { 391 bgep->asf_pseudostop = B_TRUE; 392 } else { 393 #endif 394 bge_chip_stop(bgep, B_FALSE); 395 #ifdef BGE_IPMI_ASF 396 } 397 #endif 398 399 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 400 } 401 402 /* 403 * bge_start() -- start transmitting/receiving 404 */ 405 static int 406 bge_start(bge_t *bgep, boolean_t reset_phys) 407 { 408 int retval; 409 410 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 411 412 ASSERT(mutex_owned(bgep->genlock)); 413 414 /* 415 * Start chip processing, including enabling interrupts 416 */ 417 retval = bge_chip_start(bgep, reset_phys); 418 419 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 420 return (retval); 421 } 422 423 /* 424 * bge_restart - restart transmitting/receiving after error or suspend 425 */ 426 int 427 bge_restart(bge_t *bgep, boolean_t reset_phys) 428 { 429 int retval = DDI_SUCCESS; 430 ASSERT(mutex_owned(bgep->genlock)); 431 432 #ifdef BGE_IPMI_ASF 433 if (bgep->asf_enabled) { 434 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 435 retval = DDI_FAILURE; 436 } else 437 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 438 retval = DDI_FAILURE; 439 #else 440 if (bge_reset(bgep) != DDI_SUCCESS) 441 retval = DDI_FAILURE; 442 #endif 443 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 444 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 445 retval = DDI_FAILURE; 446 bgep->watchdog = 0; 447 ddi_trigger_softintr(bgep->drain_id); 448 } 449 450 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 451 return (retval); 452 } 453 454 455 /* 456 * ========== Nemo-required management entry points ========== 457 */ 458 459 #undef BGE_DBG 460 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 461 462 /* 463 * bge_m_stop() -- stop transmitting/receiving 464 */ 465 static void 466 bge_m_stop(void *arg) 467 { 468 bge_t *bgep = arg; /* private device info */ 469 send_ring_t *srp; 470 uint32_t ring; 471 472 BGE_TRACE(("bge_m_stop($%p)", arg)); 473 474 /* 475 * Just stop processing, then record new GLD state 476 */ 477 mutex_enter(bgep->genlock); 478 if (!(bgep->progress & PROGRESS_INTR)) { 479 /* can happen during autorecovery */ 480 mutex_exit(bgep->genlock); 481 return; 482 } 483 bge_stop(bgep); 484 485 bgep->link_update_timer = 0; 486 bgep->link_state = LINK_STATE_UNKNOWN; 487 mac_link_update(bgep->mh, bgep->link_state); 488 489 /* 490 * Free the possible tx buffers allocated in tx process. 491 */ 492 #ifdef BGE_IPMI_ASF 493 if (!bgep->asf_pseudostop) 494 #endif 495 { 496 rw_enter(bgep->errlock, RW_WRITER); 497 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 498 srp = &bgep->send[ring]; 499 mutex_enter(srp->tx_lock); 500 if (srp->tx_array > 1) 501 bge_free_txbuf_arrays(srp); 502 mutex_exit(srp->tx_lock); 503 } 504 rw_exit(bgep->errlock); 505 } 506 bgep->bge_mac_state = BGE_MAC_STOPPED; 507 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 508 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 509 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 510 mutex_exit(bgep->genlock); 511 } 512 513 /* 514 * bge_m_start() -- start transmitting/receiving 515 */ 516 static int 517 bge_m_start(void *arg) 518 { 519 bge_t *bgep = arg; /* private device info */ 520 521 BGE_TRACE(("bge_m_start($%p)", arg)); 522 523 /* 524 * Start processing and record new GLD state 525 */ 526 mutex_enter(bgep->genlock); 527 if (!(bgep->progress & PROGRESS_INTR)) { 528 /* can happen during autorecovery */ 529 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 530 mutex_exit(bgep->genlock); 531 return (EIO); 532 } 533 #ifdef BGE_IPMI_ASF 534 if (bgep->asf_enabled) { 535 if ((bgep->asf_status == ASF_STAT_RUN) && 536 (bgep->asf_pseudostop)) { 537 bgep->bge_mac_state = BGE_MAC_STARTED; 538 mutex_exit(bgep->genlock); 539 return (0); 540 } 541 } 542 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 543 #else 544 if (bge_reset(bgep) != DDI_SUCCESS) { 545 #endif 546 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 547 (void) bge_check_acc_handle(bgep, bgep->io_handle); 548 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 549 mutex_exit(bgep->genlock); 550 return (EIO); 551 } 552 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 553 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 554 (void) bge_check_acc_handle(bgep, bgep->io_handle); 555 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 556 mutex_exit(bgep->genlock); 557 return (EIO); 558 } 559 bgep->bge_mac_state = BGE_MAC_STARTED; 560 BGE_DEBUG(("bge_m_start($%p) done", arg)); 561 562 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 563 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 564 mutex_exit(bgep->genlock); 565 return (EIO); 566 } 567 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 568 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 569 mutex_exit(bgep->genlock); 570 return (EIO); 571 } 572 #ifdef BGE_IPMI_ASF 573 if (bgep->asf_enabled) { 574 if (bgep->asf_status != ASF_STAT_RUN) { 575 /* start ASF heart beat */ 576 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 577 (void *)bgep, 578 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 579 bgep->asf_status = ASF_STAT_RUN; 580 } 581 } 582 #endif 583 mutex_exit(bgep->genlock); 584 585 return (0); 586 } 587 588 /* 589 * bge_unicst_set() -- set the physical network address 590 */ 591 static int 592 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 593 { 594 bge_t *bgep = arg; /* private device info */ 595 596 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 597 ether_sprintf((void *)macaddr))); 598 /* 599 * Remember the new current address in the driver state 600 * Sync the chip's idea of the address too ... 601 */ 602 mutex_enter(bgep->genlock); 603 if (!(bgep->progress & PROGRESS_INTR)) { 604 /* can happen during autorecovery */ 605 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 606 mutex_exit(bgep->genlock); 607 return (EIO); 608 } 609 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 610 #ifdef BGE_IPMI_ASF 611 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 612 #else 613 if (bge_chip_sync(bgep) == DDI_FAILURE) { 614 #endif 615 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 616 (void) bge_check_acc_handle(bgep, bgep->io_handle); 617 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 618 mutex_exit(bgep->genlock); 619 return (EIO); 620 } 621 #ifdef BGE_IPMI_ASF 622 if (bgep->asf_enabled) { 623 /* 624 * The above bge_chip_sync() function wrote the ethernet MAC 625 * addresses registers which destroyed the IPMI/ASF sideband. 626 * Here, we have to reset chip to make IPMI/ASF sideband work. 627 */ 628 if (bgep->asf_status == ASF_STAT_RUN) { 629 /* 630 * We must stop ASF heart beat before bge_chip_stop(), 631 * otherwise some computers (ex. IBM HS20 blade server) 632 * may crash. 633 */ 634 bge_asf_update_status(bgep); 635 bge_asf_stop_timer(bgep); 636 bgep->asf_status = ASF_STAT_STOP; 637 638 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 639 } 640 bge_chip_stop(bgep, B_FALSE); 641 642 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 643 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 644 (void) bge_check_acc_handle(bgep, bgep->io_handle); 645 ddi_fm_service_impact(bgep->devinfo, 646 DDI_SERVICE_DEGRADED); 647 mutex_exit(bgep->genlock); 648 return (EIO); 649 } 650 651 /* 652 * Start our ASF heartbeat counter as soon as possible. 653 */ 654 if (bgep->asf_status != ASF_STAT_RUN) { 655 /* start ASF heart beat */ 656 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 657 (void *)bgep, 658 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 659 bgep->asf_status = ASF_STAT_RUN; 660 } 661 } 662 #endif 663 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 664 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 665 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 666 mutex_exit(bgep->genlock); 667 return (EIO); 668 } 669 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 670 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 671 mutex_exit(bgep->genlock); 672 return (EIO); 673 } 674 mutex_exit(bgep->genlock); 675 676 return (0); 677 } 678 679 extern void bge_wake_factotum(bge_t *); 680 681 static boolean_t 682 bge_param_locked(mac_prop_id_t pr_num) 683 { 684 /* 685 * All adv_* parameters are locked (read-only) while 686 * the device is in any sort of loopback mode ... 687 */ 688 switch (pr_num) { 689 case MAC_PROP_ADV_1000FDX_CAP: 690 case MAC_PROP_EN_1000FDX_CAP: 691 case MAC_PROP_ADV_1000HDX_CAP: 692 case MAC_PROP_EN_1000HDX_CAP: 693 case MAC_PROP_ADV_100FDX_CAP: 694 case MAC_PROP_EN_100FDX_CAP: 695 case MAC_PROP_ADV_100HDX_CAP: 696 case MAC_PROP_EN_100HDX_CAP: 697 case MAC_PROP_ADV_10FDX_CAP: 698 case MAC_PROP_EN_10FDX_CAP: 699 case MAC_PROP_ADV_10HDX_CAP: 700 case MAC_PROP_EN_10HDX_CAP: 701 case MAC_PROP_AUTONEG: 702 case MAC_PROP_FLOWCTRL: 703 return (B_TRUE); 704 } 705 return (B_FALSE); 706 } 707 /* 708 * callback functions for set/get of properties 709 */ 710 static int 711 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 712 uint_t pr_valsize, const void *pr_val) 713 { 714 bge_t *bgep = barg; 715 int err = 0; 716 uint32_t cur_mtu, new_mtu; 717 uint_t maxsdu; 718 link_flowctrl_t fl; 719 720 mutex_enter(bgep->genlock); 721 if (bgep->param_loop_mode != BGE_LOOP_NONE && 722 bge_param_locked(pr_num)) { 723 /* 724 * All adv_* parameters are locked (read-only) 725 * while the device is in any sort of loopback mode. 726 */ 727 mutex_exit(bgep->genlock); 728 return (EBUSY); 729 } 730 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 731 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 732 (pr_num == MAC_PROP_EN_100HDX_CAP) || 733 (pr_num == MAC_PROP_EN_10FDX_CAP) || 734 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 735 /* 736 * these properties are read/write on copper, 737 * read-only and 0 on serdes 738 */ 739 mutex_exit(bgep->genlock); 740 return (ENOTSUP); 741 } 742 if ((DEVICE_5906_SERIES_CHIPSETS(bgep) && 743 (pr_num == MAC_PROP_EN_1000FDX_CAP) || 744 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 745 mutex_exit(bgep->genlock); 746 return (ENOTSUP); 747 } 748 749 switch (pr_num) { 750 case MAC_PROP_EN_1000FDX_CAP: 751 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 752 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 753 goto reprogram; 754 case MAC_PROP_EN_1000HDX_CAP: 755 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 756 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 757 goto reprogram; 758 case MAC_PROP_EN_100FDX_CAP: 759 bgep->param_en_100fdx = *(uint8_t *)pr_val; 760 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 761 goto reprogram; 762 case MAC_PROP_EN_100HDX_CAP: 763 bgep->param_en_100hdx = *(uint8_t *)pr_val; 764 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 765 goto reprogram; 766 case MAC_PROP_EN_10FDX_CAP: 767 bgep->param_en_10fdx = *(uint8_t *)pr_val; 768 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 769 goto reprogram; 770 case MAC_PROP_EN_10HDX_CAP: 771 bgep->param_en_10hdx = *(uint8_t *)pr_val; 772 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 773 reprogram: 774 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 775 err = EINVAL; 776 break; 777 case MAC_PROP_ADV_1000FDX_CAP: 778 case MAC_PROP_ADV_1000HDX_CAP: 779 case MAC_PROP_ADV_100FDX_CAP: 780 case MAC_PROP_ADV_100HDX_CAP: 781 case MAC_PROP_ADV_10FDX_CAP: 782 case MAC_PROP_ADV_10HDX_CAP: 783 case MAC_PROP_STATUS: 784 case MAC_PROP_SPEED: 785 case MAC_PROP_DUPLEX: 786 err = ENOTSUP; /* read-only prop. Can't set this */ 787 break; 788 case MAC_PROP_AUTONEG: 789 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 790 if (bge_reprogram(bgep) == IOC_INVAL) 791 err = EINVAL; 792 break; 793 case MAC_PROP_MTU: 794 cur_mtu = bgep->chipid.default_mtu; 795 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 796 797 if (new_mtu == cur_mtu) { 798 err = 0; 799 break; 800 } 801 if (new_mtu < BGE_DEFAULT_MTU || 802 new_mtu > BGE_MAXIMUM_MTU) { 803 err = EINVAL; 804 break; 805 } 806 if ((new_mtu > BGE_DEFAULT_MTU) && 807 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 808 err = EINVAL; 809 break; 810 } 811 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 812 err = EBUSY; 813 break; 814 } 815 bgep->chipid.default_mtu = new_mtu; 816 if (bge_chip_id_init(bgep)) { 817 err = EINVAL; 818 break; 819 } 820 maxsdu = bgep->chipid.ethmax_size - 821 sizeof (struct ether_header); 822 err = mac_maxsdu_update(bgep->mh, maxsdu); 823 if (err == 0) { 824 bgep->bge_dma_error = B_TRUE; 825 bgep->manual_reset = B_TRUE; 826 bge_chip_stop(bgep, B_TRUE); 827 bge_wake_factotum(bgep); 828 err = 0; 829 } 830 break; 831 case MAC_PROP_FLOWCTRL: 832 bcopy(pr_val, &fl, sizeof (fl)); 833 switch (fl) { 834 default: 835 err = ENOTSUP; 836 break; 837 case LINK_FLOWCTRL_NONE: 838 bgep->param_adv_pause = 0; 839 bgep->param_adv_asym_pause = 0; 840 841 bgep->param_link_rx_pause = B_FALSE; 842 bgep->param_link_tx_pause = B_FALSE; 843 break; 844 case LINK_FLOWCTRL_RX: 845 if (!((bgep->param_lp_pause == 0) && 846 (bgep->param_lp_asym_pause == 1))) { 847 err = EINVAL; 848 break; 849 } 850 bgep->param_adv_pause = 1; 851 bgep->param_adv_asym_pause = 1; 852 853 bgep->param_link_rx_pause = B_TRUE; 854 bgep->param_link_tx_pause = B_FALSE; 855 break; 856 case LINK_FLOWCTRL_TX: 857 if (!((bgep->param_lp_pause == 1) && 858 (bgep->param_lp_asym_pause == 1))) { 859 err = EINVAL; 860 break; 861 } 862 bgep->param_adv_pause = 0; 863 bgep->param_adv_asym_pause = 1; 864 865 bgep->param_link_rx_pause = B_FALSE; 866 bgep->param_link_tx_pause = B_TRUE; 867 break; 868 case LINK_FLOWCTRL_BI: 869 if (bgep->param_lp_pause != 1) { 870 err = EINVAL; 871 break; 872 } 873 bgep->param_adv_pause = 1; 874 875 bgep->param_link_rx_pause = B_TRUE; 876 bgep->param_link_tx_pause = B_TRUE; 877 break; 878 } 879 880 if (err == 0) { 881 if (bge_reprogram(bgep) == IOC_INVAL) 882 err = EINVAL; 883 } 884 885 break; 886 case MAC_PROP_PRIVATE: 887 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 888 pr_val); 889 break; 890 default: 891 err = ENOTSUP; 892 break; 893 } 894 mutex_exit(bgep->genlock); 895 return (err); 896 } 897 898 /* ARGSUSED */ 899 static int 900 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 901 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 902 { 903 bge_t *bgep = barg; 904 int err = 0; 905 link_flowctrl_t fl; 906 uint64_t speed; 907 int flags = bgep->chipid.flags; 908 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 909 910 if (pr_valsize == 0) 911 return (EINVAL); 912 bzero(pr_val, pr_valsize); 913 914 *perm = MAC_PROP_PERM_RW; 915 916 mutex_enter(bgep->genlock); 917 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 918 bge_param_locked(pr_num)) || 919 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 920 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 921 (pr_num == MAC_PROP_EN_100HDX_CAP) || 922 (pr_num == MAC_PROP_EN_10FDX_CAP) || 923 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 924 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 925 (pr_num == MAC_PROP_EN_1000FDX_CAP) || 926 (pr_num == MAC_PROP_EN_1000HDX_CAP))) 927 *perm = MAC_PROP_PERM_READ; 928 mutex_exit(bgep->genlock); 929 930 switch (pr_num) { 931 case MAC_PROP_DUPLEX: 932 *perm = MAC_PROP_PERM_READ; 933 if (pr_valsize < sizeof (link_duplex_t)) 934 return (EINVAL); 935 bcopy(&bgep->param_link_duplex, pr_val, 936 sizeof (link_duplex_t)); 937 break; 938 case MAC_PROP_SPEED: 939 *perm = MAC_PROP_PERM_READ; 940 if (pr_valsize < sizeof (speed)) 941 return (EINVAL); 942 speed = bgep->param_link_speed * 1000000ull; 943 bcopy(&speed, pr_val, sizeof (speed)); 944 break; 945 case MAC_PROP_STATUS: 946 *perm = MAC_PROP_PERM_READ; 947 if (pr_valsize < sizeof (link_state_t)) 948 return (EINVAL); 949 bcopy(&bgep->link_state, pr_val, 950 sizeof (link_state_t)); 951 break; 952 case MAC_PROP_AUTONEG: 953 if (is_default) 954 *(uint8_t *)pr_val = 1; 955 else 956 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 957 break; 958 case MAC_PROP_FLOWCTRL: 959 if (pr_valsize < sizeof (fl)) 960 return (EINVAL); 961 if (is_default) { 962 fl = LINK_FLOWCTRL_BI; 963 bcopy(&fl, pr_val, sizeof (fl)); 964 break; 965 } 966 967 if (bgep->param_link_rx_pause && 968 !bgep->param_link_tx_pause) 969 fl = LINK_FLOWCTRL_RX; 970 971 if (!bgep->param_link_rx_pause && 972 !bgep->param_link_tx_pause) 973 fl = LINK_FLOWCTRL_NONE; 974 975 if (!bgep->param_link_rx_pause && 976 bgep->param_link_tx_pause) 977 fl = LINK_FLOWCTRL_TX; 978 979 if (bgep->param_link_rx_pause && 980 bgep->param_link_tx_pause) 981 fl = LINK_FLOWCTRL_BI; 982 bcopy(&fl, pr_val, sizeof (fl)); 983 break; 984 case MAC_PROP_ADV_1000FDX_CAP: 985 *perm = MAC_PROP_PERM_READ; 986 if (is_default) { 987 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 988 *(uint8_t *)pr_val = 0; 989 else 990 *(uint8_t *)pr_val = 1; 991 } 992 else 993 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 994 break; 995 case MAC_PROP_EN_1000FDX_CAP: 996 if (is_default) { 997 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 998 *(uint8_t *)pr_val = 0; 999 else 1000 *(uint8_t *)pr_val = 1; 1001 } 1002 else 1003 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 1004 break; 1005 case MAC_PROP_ADV_1000HDX_CAP: 1006 *perm = MAC_PROP_PERM_READ; 1007 if (is_default) { 1008 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1009 *(uint8_t *)pr_val = 0; 1010 else 1011 *(uint8_t *)pr_val = 1; 1012 } 1013 else 1014 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1015 break; 1016 case MAC_PROP_EN_1000HDX_CAP: 1017 if (is_default) { 1018 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1019 *(uint8_t *)pr_val = 0; 1020 else 1021 *(uint8_t *)pr_val = 1; 1022 } 1023 else 1024 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1025 break; 1026 case MAC_PROP_ADV_100FDX_CAP: 1027 *perm = MAC_PROP_PERM_READ; 1028 if (is_default) { 1029 *(uint8_t *)pr_val = 1030 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1031 } else { 1032 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1033 } 1034 break; 1035 case MAC_PROP_EN_100FDX_CAP: 1036 if (is_default) { 1037 *(uint8_t *)pr_val = 1038 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1039 } else { 1040 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1041 } 1042 break; 1043 case MAC_PROP_ADV_100HDX_CAP: 1044 *perm = MAC_PROP_PERM_READ; 1045 if (is_default) { 1046 *(uint8_t *)pr_val = 1047 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1048 } else { 1049 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1050 } 1051 break; 1052 case MAC_PROP_EN_100HDX_CAP: 1053 if (is_default) { 1054 *(uint8_t *)pr_val = 1055 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1056 } else { 1057 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1058 } 1059 break; 1060 case MAC_PROP_ADV_10FDX_CAP: 1061 *perm = MAC_PROP_PERM_READ; 1062 if (is_default) { 1063 *(uint8_t *)pr_val = 1064 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1065 } else { 1066 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1067 } 1068 break; 1069 case MAC_PROP_EN_10FDX_CAP: 1070 if (is_default) { 1071 *(uint8_t *)pr_val = 1072 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1073 } else { 1074 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1075 } 1076 break; 1077 case MAC_PROP_ADV_10HDX_CAP: 1078 *perm = MAC_PROP_PERM_READ; 1079 if (is_default) { 1080 *(uint8_t *)pr_val = 1081 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1082 } else { 1083 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1084 } 1085 break; 1086 case MAC_PROP_EN_10HDX_CAP: 1087 if (is_default) { 1088 *(uint8_t *)pr_val = 1089 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1090 } else { 1091 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1092 } 1093 break; 1094 case MAC_PROP_ADV_100T4_CAP: 1095 case MAC_PROP_EN_100T4_CAP: 1096 *perm = MAC_PROP_PERM_READ; 1097 *(uint8_t *)pr_val = 0; 1098 break; 1099 case MAC_PROP_PRIVATE: 1100 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1101 pr_valsize, pr_val); 1102 return (err); 1103 default: 1104 return (ENOTSUP); 1105 } 1106 return (0); 1107 } 1108 1109 /* ARGSUSED */ 1110 static int 1111 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1112 const void *pr_val) 1113 { 1114 int err = 0; 1115 long result; 1116 1117 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1118 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1119 if (result > 1 || result < 0) { 1120 err = EINVAL; 1121 } else { 1122 bgep->param_adv_pause = (uint32_t)result; 1123 if (bge_reprogram(bgep) == IOC_INVAL) 1124 err = EINVAL; 1125 } 1126 return (err); 1127 } 1128 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1129 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1130 if (result > 1 || result < 0) { 1131 err = EINVAL; 1132 } else { 1133 bgep->param_adv_asym_pause = (uint32_t)result; 1134 if (bge_reprogram(bgep) == IOC_INVAL) 1135 err = EINVAL; 1136 } 1137 return (err); 1138 } 1139 if (strcmp(pr_name, "_drain_max") == 0) { 1140 1141 /* 1142 * on the Tx side, we need to update the h/w register for 1143 * real packet transmission per packet. The drain_max parameter 1144 * is used to reduce the register access. This parameter 1145 * controls the max number of packets that we will hold before 1146 * updating the bge h/w to trigger h/w transmit. The bge 1147 * chipset usually has a max of 512 Tx descriptors, thus 1148 * the upper bound on drain_max is 512. 1149 */ 1150 if (pr_val == NULL) { 1151 err = EINVAL; 1152 return (err); 1153 } 1154 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1155 if (result > 512 || result < 1) 1156 err = EINVAL; 1157 else { 1158 bgep->param_drain_max = (uint32_t)result; 1159 if (bge_reprogram(bgep) == IOC_INVAL) 1160 err = EINVAL; 1161 } 1162 return (err); 1163 } 1164 if (strcmp(pr_name, "_msi_cnt") == 0) { 1165 1166 if (pr_val == NULL) { 1167 err = EINVAL; 1168 return (err); 1169 } 1170 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1171 if (result > 7 || result < 0) 1172 err = EINVAL; 1173 else { 1174 bgep->param_msi_cnt = (uint32_t)result; 1175 if (bge_reprogram(bgep) == IOC_INVAL) 1176 err = EINVAL; 1177 } 1178 return (err); 1179 } 1180 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1181 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1182 return (EINVAL); 1183 1184 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1185 return (0); 1186 } 1187 1188 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1189 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1190 return (EINVAL); 1191 1192 bgep->chipid.rx_count_norm = (uint32_t)result; 1193 return (0); 1194 } 1195 return (ENOTSUP); 1196 } 1197 1198 static int 1199 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1200 uint_t pr_valsize, void *pr_val) 1201 { 1202 int err = ENOTSUP; 1203 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1204 int value; 1205 1206 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1207 value = (is_default? 1 : bge->param_adv_pause); 1208 err = 0; 1209 goto done; 1210 } 1211 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1212 value = (is_default? 1 : bge->param_adv_asym_pause); 1213 err = 0; 1214 goto done; 1215 } 1216 if (strcmp(pr_name, "_drain_max") == 0) { 1217 value = (is_default? 64 : bge->param_drain_max); 1218 err = 0; 1219 goto done; 1220 } 1221 if (strcmp(pr_name, "_msi_cnt") == 0) { 1222 value = (is_default? 0 : bge->param_msi_cnt); 1223 err = 0; 1224 goto done; 1225 } 1226 1227 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1228 value = (is_default? bge_rx_ticks_norm : 1229 bge->chipid.rx_ticks_norm); 1230 err = 0; 1231 goto done; 1232 } 1233 1234 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1235 value = (is_default? bge_rx_count_norm : 1236 bge->chipid.rx_count_norm); 1237 err = 0; 1238 goto done; 1239 } 1240 1241 done: 1242 if (err == 0) { 1243 (void) snprintf(pr_val, pr_valsize, "%d", value); 1244 } 1245 return (err); 1246 } 1247 1248 /* 1249 * Compute the index of the required bit in the multicast hash map. 1250 * This must mirror the way the hardware actually does it! 1251 * See Broadcom document 570X-PG102-R page 125. 1252 */ 1253 static uint32_t 1254 bge_hash_index(const uint8_t *mca) 1255 { 1256 uint32_t hash; 1257 1258 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1259 1260 return (hash); 1261 } 1262 1263 /* 1264 * bge_m_multicst_add() -- enable/disable a multicast address 1265 */ 1266 static int 1267 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1268 { 1269 bge_t *bgep = arg; /* private device info */ 1270 uint32_t hash; 1271 uint32_t index; 1272 uint32_t word; 1273 uint32_t bit; 1274 uint8_t *refp; 1275 1276 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1277 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1278 1279 /* 1280 * Precalculate all required masks, pointers etc ... 1281 */ 1282 hash = bge_hash_index(mca); 1283 index = hash % BGE_HASH_TABLE_SIZE; 1284 word = index/32u; 1285 bit = 1 << (index % 32u); 1286 refp = &bgep->mcast_refs[index]; 1287 1288 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1289 hash, index, word, bit, *refp)); 1290 1291 /* 1292 * We must set the appropriate bit in the hash map (and the 1293 * corresponding h/w register) when the refcount goes from 0 1294 * to >0, and clear it when the last ref goes away (refcount 1295 * goes from >0 back to 0). If we change the hash map, we 1296 * must also update the chip's hardware map registers. 1297 */ 1298 mutex_enter(bgep->genlock); 1299 if (!(bgep->progress & PROGRESS_INTR)) { 1300 /* can happen during autorecovery */ 1301 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1302 mutex_exit(bgep->genlock); 1303 return (EIO); 1304 } 1305 if (add) { 1306 if ((*refp)++ == 0) { 1307 bgep->mcast_hash[word] |= bit; 1308 #ifdef BGE_IPMI_ASF 1309 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1310 #else 1311 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1312 #endif 1313 (void) bge_check_acc_handle(bgep, 1314 bgep->cfg_handle); 1315 (void) bge_check_acc_handle(bgep, 1316 bgep->io_handle); 1317 ddi_fm_service_impact(bgep->devinfo, 1318 DDI_SERVICE_DEGRADED); 1319 mutex_exit(bgep->genlock); 1320 return (EIO); 1321 } 1322 } 1323 } else { 1324 if (--(*refp) == 0) { 1325 bgep->mcast_hash[word] &= ~bit; 1326 #ifdef BGE_IPMI_ASF 1327 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1328 #else 1329 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1330 #endif 1331 (void) bge_check_acc_handle(bgep, 1332 bgep->cfg_handle); 1333 (void) bge_check_acc_handle(bgep, 1334 bgep->io_handle); 1335 ddi_fm_service_impact(bgep->devinfo, 1336 DDI_SERVICE_DEGRADED); 1337 mutex_exit(bgep->genlock); 1338 return (EIO); 1339 } 1340 } 1341 } 1342 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1343 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1344 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1345 mutex_exit(bgep->genlock); 1346 return (EIO); 1347 } 1348 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1349 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1350 mutex_exit(bgep->genlock); 1351 return (EIO); 1352 } 1353 mutex_exit(bgep->genlock); 1354 1355 return (0); 1356 } 1357 1358 /* 1359 * bge_m_promisc() -- set or reset promiscuous mode on the board 1360 * 1361 * Program the hardware to enable/disable promiscuous and/or 1362 * receive-all-multicast modes. 1363 */ 1364 static int 1365 bge_m_promisc(void *arg, boolean_t on) 1366 { 1367 bge_t *bgep = arg; 1368 1369 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1370 1371 /* 1372 * Store MAC layer specified mode and pass to chip layer to update h/w 1373 */ 1374 mutex_enter(bgep->genlock); 1375 if (!(bgep->progress & PROGRESS_INTR)) { 1376 /* can happen during autorecovery */ 1377 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1378 mutex_exit(bgep->genlock); 1379 return (EIO); 1380 } 1381 bgep->promisc = on; 1382 #ifdef BGE_IPMI_ASF 1383 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1384 #else 1385 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1386 #endif 1387 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1388 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1389 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1390 mutex_exit(bgep->genlock); 1391 return (EIO); 1392 } 1393 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1394 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1395 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1396 mutex_exit(bgep->genlock); 1397 return (EIO); 1398 } 1399 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1400 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1401 mutex_exit(bgep->genlock); 1402 return (EIO); 1403 } 1404 mutex_exit(bgep->genlock); 1405 return (0); 1406 } 1407 1408 /* 1409 * Find the slot for the specified unicast address 1410 */ 1411 int 1412 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1413 { 1414 int slot; 1415 1416 ASSERT(mutex_owned(bgep->genlock)); 1417 1418 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1419 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1420 return (slot); 1421 } 1422 1423 return (-1); 1424 } 1425 1426 /* 1427 * Programs the classifier to start steering packets matching 'mac_addr' to the 1428 * specified ring 'arg'. 1429 */ 1430 static int 1431 bge_addmac(void *arg, const uint8_t *mac_addr) 1432 { 1433 recv_ring_t *rrp = (recv_ring_t *)arg; 1434 bge_t *bgep = rrp->bgep; 1435 bge_recv_rule_t *rulep = bgep->recv_rules; 1436 bge_rule_info_t *rinfop = NULL; 1437 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1438 int i; 1439 uint16_t tmp16; 1440 uint32_t tmp32; 1441 int slot; 1442 int err; 1443 1444 mutex_enter(bgep->genlock); 1445 if (bgep->unicst_addr_avail == 0) { 1446 mutex_exit(bgep->genlock); 1447 return (ENOSPC); 1448 } 1449 1450 /* 1451 * First add the unicast address to a available slot. 1452 */ 1453 slot = bge_unicst_find(bgep, mac_addr); 1454 ASSERT(slot == -1); 1455 1456 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1457 if (!bgep->curr_addr[slot].set) { 1458 bgep->curr_addr[slot].set = B_TRUE; 1459 break; 1460 } 1461 } 1462 1463 ASSERT(slot < bgep->unicst_addr_total); 1464 bgep->unicst_addr_avail--; 1465 mutex_exit(bgep->genlock); 1466 1467 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1468 goto fail; 1469 1470 /* A rule is already here. Deny this. */ 1471 if (rrp->mac_addr_rule != NULL) { 1472 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1473 goto fail; 1474 } 1475 1476 /* 1477 * Allocate a bge_rule_info_t to keep track of which rule slots 1478 * are being used. 1479 */ 1480 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1481 if (rinfop == NULL) { 1482 err = ENOMEM; 1483 goto fail; 1484 } 1485 1486 /* 1487 * Look for the starting slot to place the rules. 1488 * The two slots we reserve must be contiguous. 1489 */ 1490 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1491 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1492 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1493 break; 1494 1495 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1496 1497 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1498 rulep[i].mask_value = ntohl(tmp32); 1499 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1500 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1501 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1502 1503 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1504 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1505 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1506 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1507 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1508 rinfop->start = i; 1509 rinfop->count = 2; 1510 1511 rrp->mac_addr_rule = rinfop; 1512 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1513 1514 return (0); 1515 1516 fail: 1517 /* Clear the address just set */ 1518 (void) bge_unicst_set(bgep, zero_addr, slot); 1519 mutex_enter(bgep->genlock); 1520 bgep->curr_addr[slot].set = B_FALSE; 1521 bgep->unicst_addr_avail++; 1522 mutex_exit(bgep->genlock); 1523 1524 return (err); 1525 } 1526 1527 /* 1528 * Stop classifying packets matching the MAC address to the specified ring. 1529 */ 1530 static int 1531 bge_remmac(void *arg, const uint8_t *mac_addr) 1532 { 1533 recv_ring_t *rrp = (recv_ring_t *)arg; 1534 bge_t *bgep = rrp->bgep; 1535 bge_recv_rule_t *rulep = bgep->recv_rules; 1536 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1537 int start; 1538 int slot; 1539 int err; 1540 1541 /* 1542 * Remove the MAC address from its slot. 1543 */ 1544 mutex_enter(bgep->genlock); 1545 slot = bge_unicst_find(bgep, mac_addr); 1546 if (slot == -1) { 1547 mutex_exit(bgep->genlock); 1548 return (EINVAL); 1549 } 1550 1551 ASSERT(bgep->curr_addr[slot].set); 1552 mutex_exit(bgep->genlock); 1553 1554 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1555 return (err); 1556 1557 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1558 return (EINVAL); 1559 1560 start = rinfop->start; 1561 rulep[start].mask_value = 0; 1562 rulep[start].control = 0; 1563 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1564 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1565 start++; 1566 rulep[start].mask_value = 0; 1567 rulep[start].control = 0; 1568 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1569 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1570 1571 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1572 rrp->mac_addr_rule = NULL; 1573 bzero(rrp->mac_addr_val, ETHERADDRL); 1574 1575 mutex_enter(bgep->genlock); 1576 bgep->curr_addr[slot].set = B_FALSE; 1577 bgep->unicst_addr_avail++; 1578 mutex_exit(bgep->genlock); 1579 1580 return (0); 1581 } 1582 1583 static int 1584 bge_flag_intr_enable(mac_intr_handle_t ih) 1585 { 1586 recv_ring_t *rrp = (recv_ring_t *)ih; 1587 bge_t *bgep = rrp->bgep; 1588 1589 mutex_enter(bgep->genlock); 1590 rrp->poll_flag = 0; 1591 mutex_exit(bgep->genlock); 1592 1593 return (0); 1594 } 1595 1596 static int 1597 bge_flag_intr_disable(mac_intr_handle_t ih) 1598 { 1599 recv_ring_t *rrp = (recv_ring_t *)ih; 1600 bge_t *bgep = rrp->bgep; 1601 1602 mutex_enter(bgep->genlock); 1603 rrp->poll_flag = 1; 1604 mutex_exit(bgep->genlock); 1605 1606 return (0); 1607 } 1608 1609 static int 1610 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1611 { 1612 recv_ring_t *rx_ring; 1613 1614 rx_ring = (recv_ring_t *)rh; 1615 mutex_enter(rx_ring->rx_lock); 1616 rx_ring->ring_gen_num = mr_gen_num; 1617 mutex_exit(rx_ring->rx_lock); 1618 return (0); 1619 } 1620 1621 1622 /* 1623 * Callback funtion for MAC layer to register all rings 1624 * for given ring_group, noted by rg_index. 1625 */ 1626 void 1627 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1628 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1629 { 1630 bge_t *bgep = arg; 1631 mac_intr_t *mintr; 1632 1633 switch (rtype) { 1634 case MAC_RING_TYPE_RX: { 1635 recv_ring_t *rx_ring; 1636 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1637 MAC_ADDRESS_REGS_MAX) && index == 0); 1638 1639 rx_ring = &bgep->recv[rg_index]; 1640 rx_ring->ring_handle = rh; 1641 1642 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1643 infop->mri_start = bge_ring_start; 1644 infop->mri_stop = NULL; 1645 infop->mri_poll = bge_poll_ring; 1646 1647 mintr = &infop->mri_intr; 1648 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1649 mintr->mi_enable = bge_flag_intr_enable; 1650 mintr->mi_disable = bge_flag_intr_disable; 1651 1652 break; 1653 } 1654 case MAC_RING_TYPE_TX: 1655 default: 1656 ASSERT(0); 1657 break; 1658 } 1659 } 1660 1661 /* 1662 * Fill infop passed as argument 1663 * fill in respective ring_group info 1664 * Each group has a single ring in it. We keep it simple 1665 * and use the same internal handle for rings and groups. 1666 */ 1667 void 1668 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1669 mac_group_info_t *infop, mac_group_handle_t gh) 1670 { 1671 bge_t *bgep = arg; 1672 1673 switch (rtype) { 1674 case MAC_RING_TYPE_RX: { 1675 recv_ring_t *rx_ring; 1676 1677 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1678 MAC_ADDRESS_REGS_MAX)); 1679 rx_ring = &bgep->recv[rg_index]; 1680 rx_ring->ring_group_handle = gh; 1681 1682 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1683 infop->mgi_start = NULL; 1684 infop->mgi_stop = NULL; 1685 infop->mgi_addmac = bge_addmac; 1686 infop->mgi_remmac = bge_remmac; 1687 infop->mgi_count = 1; 1688 break; 1689 } 1690 case MAC_RING_TYPE_TX: 1691 default: 1692 ASSERT(0); 1693 break; 1694 } 1695 } 1696 1697 /*ARGSUSED*/ 1698 static boolean_t 1699 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1700 { 1701 bge_t *bgep = arg; 1702 1703 switch (cap) { 1704 case MAC_CAPAB_HCKSUM: { 1705 uint32_t *txflags = cap_data; 1706 1707 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1708 break; 1709 } 1710 case MAC_CAPAB_RINGS: { 1711 mac_capab_rings_t *cap_rings = cap_data; 1712 1713 /* Temporarily disable multiple tx rings. */ 1714 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1715 return (B_FALSE); 1716 1717 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1718 cap_rings->mr_rnum = cap_rings->mr_gnum = 1719 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1720 cap_rings->mr_rget = bge_fill_ring; 1721 cap_rings->mr_gget = bge_fill_group; 1722 break; 1723 } 1724 default: 1725 return (B_FALSE); 1726 } 1727 return (B_TRUE); 1728 } 1729 1730 /* 1731 * Loopback ioctl code 1732 */ 1733 1734 static lb_property_t loopmodes[] = { 1735 { normal, "normal", BGE_LOOP_NONE }, 1736 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1737 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1738 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1739 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1740 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1741 }; 1742 1743 static enum ioc_reply 1744 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1745 { 1746 /* 1747 * If the mode isn't being changed, there's nothing to do ... 1748 */ 1749 if (mode == bgep->param_loop_mode) 1750 return (IOC_ACK); 1751 1752 /* 1753 * Validate the requested mode and prepare a suitable message 1754 * to explain the link down/up cycle that the change will 1755 * probably induce ... 1756 */ 1757 switch (mode) { 1758 default: 1759 return (IOC_INVAL); 1760 1761 case BGE_LOOP_NONE: 1762 case BGE_LOOP_EXTERNAL_1000: 1763 case BGE_LOOP_EXTERNAL_100: 1764 case BGE_LOOP_EXTERNAL_10: 1765 case BGE_LOOP_INTERNAL_PHY: 1766 case BGE_LOOP_INTERNAL_MAC: 1767 break; 1768 } 1769 1770 /* 1771 * All OK; tell the caller to reprogram 1772 * the PHY and/or MAC for the new mode ... 1773 */ 1774 bgep->param_loop_mode = mode; 1775 return (IOC_RESTART_ACK); 1776 } 1777 1778 static enum ioc_reply 1779 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1780 { 1781 lb_info_sz_t *lbsp; 1782 lb_property_t *lbpp; 1783 uint32_t *lbmp; 1784 int cmd; 1785 1786 _NOTE(ARGUNUSED(wq)) 1787 1788 /* 1789 * Validate format of ioctl 1790 */ 1791 if (mp->b_cont == NULL) 1792 return (IOC_INVAL); 1793 1794 cmd = iocp->ioc_cmd; 1795 switch (cmd) { 1796 default: 1797 /* NOTREACHED */ 1798 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1799 return (IOC_INVAL); 1800 1801 case LB_GET_INFO_SIZE: 1802 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1803 return (IOC_INVAL); 1804 lbsp = (void *)mp->b_cont->b_rptr; 1805 *lbsp = sizeof (loopmodes); 1806 return (IOC_REPLY); 1807 1808 case LB_GET_INFO: 1809 if (iocp->ioc_count != sizeof (loopmodes)) 1810 return (IOC_INVAL); 1811 lbpp = (void *)mp->b_cont->b_rptr; 1812 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1813 return (IOC_REPLY); 1814 1815 case LB_GET_MODE: 1816 if (iocp->ioc_count != sizeof (uint32_t)) 1817 return (IOC_INVAL); 1818 lbmp = (void *)mp->b_cont->b_rptr; 1819 *lbmp = bgep->param_loop_mode; 1820 return (IOC_REPLY); 1821 1822 case LB_SET_MODE: 1823 if (iocp->ioc_count != sizeof (uint32_t)) 1824 return (IOC_INVAL); 1825 lbmp = (void *)mp->b_cont->b_rptr; 1826 return (bge_set_loop_mode(bgep, *lbmp)); 1827 } 1828 } 1829 1830 /* 1831 * Specific bge IOCTLs, the gld module handles the generic ones. 1832 */ 1833 static void 1834 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1835 { 1836 bge_t *bgep = arg; 1837 struct iocblk *iocp; 1838 enum ioc_reply status; 1839 boolean_t need_privilege; 1840 int err; 1841 int cmd; 1842 1843 /* 1844 * Validate the command before bothering with the mutex ... 1845 */ 1846 iocp = (void *)mp->b_rptr; 1847 iocp->ioc_error = 0; 1848 need_privilege = B_TRUE; 1849 cmd = iocp->ioc_cmd; 1850 switch (cmd) { 1851 default: 1852 miocnak(wq, mp, 0, EINVAL); 1853 return; 1854 1855 case BGE_MII_READ: 1856 case BGE_MII_WRITE: 1857 case BGE_SEE_READ: 1858 case BGE_SEE_WRITE: 1859 case BGE_FLASH_READ: 1860 case BGE_FLASH_WRITE: 1861 case BGE_DIAG: 1862 case BGE_PEEK: 1863 case BGE_POKE: 1864 case BGE_PHY_RESET: 1865 case BGE_SOFT_RESET: 1866 case BGE_HARD_RESET: 1867 break; 1868 1869 case LB_GET_INFO_SIZE: 1870 case LB_GET_INFO: 1871 case LB_GET_MODE: 1872 need_privilege = B_FALSE; 1873 /* FALLTHRU */ 1874 case LB_SET_MODE: 1875 break; 1876 1877 } 1878 1879 if (need_privilege) { 1880 /* 1881 * Check for specific net_config privilege on Solaris 10+. 1882 */ 1883 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1884 if (err != 0) { 1885 miocnak(wq, mp, 0, err); 1886 return; 1887 } 1888 } 1889 1890 mutex_enter(bgep->genlock); 1891 if (!(bgep->progress & PROGRESS_INTR)) { 1892 /* can happen during autorecovery */ 1893 mutex_exit(bgep->genlock); 1894 miocnak(wq, mp, 0, EIO); 1895 return; 1896 } 1897 1898 switch (cmd) { 1899 default: 1900 _NOTE(NOTREACHED) 1901 status = IOC_INVAL; 1902 break; 1903 1904 case BGE_MII_READ: 1905 case BGE_MII_WRITE: 1906 case BGE_SEE_READ: 1907 case BGE_SEE_WRITE: 1908 case BGE_FLASH_READ: 1909 case BGE_FLASH_WRITE: 1910 case BGE_DIAG: 1911 case BGE_PEEK: 1912 case BGE_POKE: 1913 case BGE_PHY_RESET: 1914 case BGE_SOFT_RESET: 1915 case BGE_HARD_RESET: 1916 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1917 break; 1918 1919 case LB_GET_INFO_SIZE: 1920 case LB_GET_INFO: 1921 case LB_GET_MODE: 1922 case LB_SET_MODE: 1923 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1924 break; 1925 1926 } 1927 1928 /* 1929 * Do we need to reprogram the PHY and/or the MAC? 1930 * Do it now, while we still have the mutex. 1931 * 1932 * Note: update the PHY first, 'cos it controls the 1933 * speed/duplex parameters that the MAC code uses. 1934 */ 1935 switch (status) { 1936 case IOC_RESTART_REPLY: 1937 case IOC_RESTART_ACK: 1938 if (bge_reprogram(bgep) == IOC_INVAL) 1939 status = IOC_INVAL; 1940 break; 1941 } 1942 1943 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1944 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1945 status = IOC_INVAL; 1946 } 1947 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1948 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1949 status = IOC_INVAL; 1950 } 1951 mutex_exit(bgep->genlock); 1952 1953 /* 1954 * Finally, decide how to reply 1955 */ 1956 switch (status) { 1957 default: 1958 case IOC_INVAL: 1959 /* 1960 * Error, reply with a NAK and EINVAL or the specified error 1961 */ 1962 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1963 EINVAL : iocp->ioc_error); 1964 break; 1965 1966 case IOC_DONE: 1967 /* 1968 * OK, reply already sent 1969 */ 1970 break; 1971 1972 case IOC_RESTART_ACK: 1973 case IOC_ACK: 1974 /* 1975 * OK, reply with an ACK 1976 */ 1977 miocack(wq, mp, 0, 0); 1978 break; 1979 1980 case IOC_RESTART_REPLY: 1981 case IOC_REPLY: 1982 /* 1983 * OK, send prepared reply as ACK or NAK 1984 */ 1985 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1986 M_IOCACK : M_IOCNAK; 1987 qreply(wq, mp); 1988 break; 1989 } 1990 } 1991 1992 /* 1993 * ========== Per-instance setup/teardown code ========== 1994 */ 1995 1996 #undef BGE_DBG 1997 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1998 /* 1999 * Allocate an area of memory and a DMA handle for accessing it 2000 */ 2001 static int 2002 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2003 uint_t dma_flags, dma_area_t *dma_p) 2004 { 2005 caddr_t va; 2006 int err; 2007 2008 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2009 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2010 2011 /* 2012 * Allocate handle 2013 */ 2014 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2015 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2016 if (err != DDI_SUCCESS) 2017 return (DDI_FAILURE); 2018 2019 /* 2020 * Allocate memory 2021 */ 2022 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2023 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2024 &dma_p->acc_hdl); 2025 if (err != DDI_SUCCESS) 2026 return (DDI_FAILURE); 2027 2028 /* 2029 * Bind the two together 2030 */ 2031 dma_p->mem_va = va; 2032 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2033 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2034 &dma_p->cookie, &dma_p->ncookies); 2035 2036 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2037 dma_p->alength, err, dma_p->ncookies)); 2038 2039 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2040 return (DDI_FAILURE); 2041 2042 dma_p->nslots = ~0U; 2043 dma_p->size = ~0U; 2044 dma_p->token = ~0U; 2045 dma_p->offset = 0; 2046 return (DDI_SUCCESS); 2047 } 2048 2049 /* 2050 * Free one allocated area of DMAable memory 2051 */ 2052 static void 2053 bge_free_dma_mem(dma_area_t *dma_p) 2054 { 2055 if (dma_p->dma_hdl != NULL) { 2056 if (dma_p->ncookies) { 2057 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2058 dma_p->ncookies = 0; 2059 } 2060 ddi_dma_free_handle(&dma_p->dma_hdl); 2061 dma_p->dma_hdl = NULL; 2062 } 2063 2064 if (dma_p->acc_hdl != NULL) { 2065 ddi_dma_mem_free(&dma_p->acc_hdl); 2066 dma_p->acc_hdl = NULL; 2067 } 2068 } 2069 /* 2070 * Utility routine to carve a slice off a chunk of allocated memory, 2071 * updating the chunk descriptor accordingly. The size of the slice 2072 * is given by the product of the <qty> and <size> parameters. 2073 */ 2074 static void 2075 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2076 uint32_t qty, uint32_t size) 2077 { 2078 static uint32_t sequence = 0xbcd5704a; 2079 size_t totsize; 2080 2081 totsize = qty*size; 2082 ASSERT(totsize <= chunk->alength); 2083 2084 *slice = *chunk; 2085 slice->nslots = qty; 2086 slice->size = size; 2087 slice->alength = totsize; 2088 slice->token = ++sequence; 2089 2090 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2091 chunk->alength -= totsize; 2092 chunk->offset += totsize; 2093 chunk->cookie.dmac_laddress += totsize; 2094 chunk->cookie.dmac_size -= totsize; 2095 } 2096 2097 /* 2098 * Initialise the specified Receive Producer (Buffer) Ring, using 2099 * the information in the <dma_area> descriptors that it contains 2100 * to set up all the other fields. This routine should be called 2101 * only once for each ring. 2102 */ 2103 static void 2104 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2105 { 2106 buff_ring_t *brp; 2107 bge_status_t *bsp; 2108 sw_rbd_t *srbdp; 2109 dma_area_t pbuf; 2110 uint32_t bufsize; 2111 uint32_t nslots; 2112 uint32_t slot; 2113 uint32_t split; 2114 2115 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2116 NIC_MEM_SHADOW_BUFF_STD, 2117 NIC_MEM_SHADOW_BUFF_JUMBO, 2118 NIC_MEM_SHADOW_BUFF_MINI 2119 }; 2120 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2121 RECV_STD_PROD_INDEX_REG, 2122 RECV_JUMBO_PROD_INDEX_REG, 2123 RECV_MINI_PROD_INDEX_REG 2124 }; 2125 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2126 STATUS_STD_BUFF_CONS_INDEX, 2127 STATUS_JUMBO_BUFF_CONS_INDEX, 2128 STATUS_MINI_BUFF_CONS_INDEX 2129 }; 2130 2131 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2132 (void *)bgep, ring)); 2133 2134 brp = &bgep->buff[ring]; 2135 nslots = brp->desc.nslots; 2136 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2137 bufsize = brp->buf[0].size; 2138 2139 /* 2140 * Set up the copy of the h/w RCB 2141 * 2142 * Note: unlike Send & Receive Return Rings, (where the max_len 2143 * field holds the number of slots), in a Receive Buffer Ring 2144 * this field indicates the size of each buffer in the ring. 2145 */ 2146 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2147 brp->hw_rcb.max_len = (uint16_t)bufsize; 2148 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2149 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2150 2151 /* 2152 * Other one-off initialisation of per-ring data 2153 */ 2154 brp->bgep = bgep; 2155 bsp = DMA_VPTR(bgep->status_block); 2156 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2157 brp->chip_mbx_reg = mailbox_regs[ring]; 2158 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2159 DDI_INTR_PRI(bgep->intr_pri)); 2160 2161 /* 2162 * Allocate the array of s/w Receive Buffer Descriptors 2163 */ 2164 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2165 brp->sw_rbds = srbdp; 2166 2167 /* 2168 * Now initialise each array element once and for all 2169 */ 2170 for (split = 0; split < BGE_SPLIT; ++split) { 2171 pbuf = brp->buf[split]; 2172 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2173 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2174 ASSERT(pbuf.alength == 0); 2175 } 2176 } 2177 2178 /* 2179 * Clean up initialisation done above before the memory is freed 2180 */ 2181 static void 2182 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2183 { 2184 buff_ring_t *brp; 2185 sw_rbd_t *srbdp; 2186 2187 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2188 (void *)bgep, ring)); 2189 2190 brp = &bgep->buff[ring]; 2191 srbdp = brp->sw_rbds; 2192 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2193 2194 mutex_destroy(brp->rf_lock); 2195 } 2196 2197 /* 2198 * Initialise the specified Receive (Return) Ring, using the 2199 * information in the <dma_area> descriptors that it contains 2200 * to set up all the other fields. This routine should be called 2201 * only once for each ring. 2202 */ 2203 static void 2204 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2205 { 2206 recv_ring_t *rrp; 2207 bge_status_t *bsp; 2208 uint32_t nslots; 2209 2210 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2211 (void *)bgep, ring)); 2212 2213 /* 2214 * The chip architecture requires that receive return rings have 2215 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2216 */ 2217 rrp = &bgep->recv[ring]; 2218 nslots = rrp->desc.nslots; 2219 ASSERT(nslots == 0 || nslots == 512 || 2220 nslots == 1024 || nslots == 2048); 2221 2222 /* 2223 * Set up the copy of the h/w RCB 2224 */ 2225 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2226 rrp->hw_rcb.max_len = (uint16_t)nslots; 2227 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2228 rrp->hw_rcb.nic_ring_addr = 0; 2229 2230 /* 2231 * Other one-off initialisation of per-ring data 2232 */ 2233 rrp->bgep = bgep; 2234 bsp = DMA_VPTR(bgep->status_block); 2235 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2236 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2237 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2238 DDI_INTR_PRI(bgep->intr_pri)); 2239 } 2240 2241 2242 /* 2243 * Clean up initialisation done above before the memory is freed 2244 */ 2245 static void 2246 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2247 { 2248 recv_ring_t *rrp; 2249 2250 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2251 (void *)bgep, ring)); 2252 2253 rrp = &bgep->recv[ring]; 2254 if (rrp->rx_softint) 2255 ddi_remove_softintr(rrp->rx_softint); 2256 mutex_destroy(rrp->rx_lock); 2257 } 2258 2259 /* 2260 * Initialise the specified Send Ring, using the information in the 2261 * <dma_area> descriptors that it contains to set up all the other 2262 * fields. This routine should be called only once for each ring. 2263 */ 2264 static void 2265 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2266 { 2267 send_ring_t *srp; 2268 bge_status_t *bsp; 2269 sw_sbd_t *ssbdp; 2270 dma_area_t desc; 2271 dma_area_t pbuf; 2272 uint32_t nslots; 2273 uint32_t slot; 2274 uint32_t split; 2275 sw_txbuf_t *txbuf; 2276 2277 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2278 (void *)bgep, ring)); 2279 2280 /* 2281 * The chip architecture requires that host-based send rings 2282 * have 512 elements per ring. See 570X-PG102-R page 56. 2283 */ 2284 srp = &bgep->send[ring]; 2285 nslots = srp->desc.nslots; 2286 ASSERT(nslots == 0 || nslots == 512); 2287 2288 /* 2289 * Set up the copy of the h/w RCB 2290 */ 2291 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2292 srp->hw_rcb.max_len = (uint16_t)nslots; 2293 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2294 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2295 2296 /* 2297 * Other one-off initialisation of per-ring data 2298 */ 2299 srp->bgep = bgep; 2300 bsp = DMA_VPTR(bgep->status_block); 2301 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2302 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2303 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2304 DDI_INTR_PRI(bgep->intr_pri)); 2305 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2306 DDI_INTR_PRI(bgep->intr_pri)); 2307 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2308 DDI_INTR_PRI(bgep->intr_pri)); 2309 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2310 DDI_INTR_PRI(bgep->intr_pri)); 2311 if (nslots == 0) 2312 return; 2313 2314 /* 2315 * Allocate the array of s/w Send Buffer Descriptors 2316 */ 2317 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2318 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2319 srp->txbuf_head = 2320 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2321 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2322 srp->sw_sbds = ssbdp; 2323 srp->txbuf = txbuf; 2324 srp->tx_buffers = BGE_SEND_BUF_NUM; 2325 srp->tx_buffers_low = srp->tx_buffers / 4; 2326 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2327 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2328 else 2329 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2330 srp->tx_array = 1; 2331 2332 /* 2333 * Chunk tx desc area 2334 */ 2335 desc = srp->desc; 2336 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2337 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2338 sizeof (bge_sbd_t)); 2339 } 2340 ASSERT(desc.alength == 0); 2341 2342 /* 2343 * Chunk tx buffer area 2344 */ 2345 for (split = 0; split < BGE_SPLIT; ++split) { 2346 pbuf = srp->buf[0][split]; 2347 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2348 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2349 bgep->chipid.snd_buff_size); 2350 txbuf++; 2351 } 2352 ASSERT(pbuf.alength == 0); 2353 } 2354 } 2355 2356 /* 2357 * Clean up initialisation done above before the memory is freed 2358 */ 2359 static void 2360 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2361 { 2362 send_ring_t *srp; 2363 uint32_t array; 2364 uint32_t split; 2365 uint32_t nslots; 2366 2367 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2368 (void *)bgep, ring)); 2369 2370 srp = &bgep->send[ring]; 2371 mutex_destroy(srp->tc_lock); 2372 mutex_destroy(srp->freetxbuf_lock); 2373 mutex_destroy(srp->txbuf_lock); 2374 mutex_destroy(srp->tx_lock); 2375 nslots = srp->desc.nslots; 2376 if (nslots == 0) 2377 return; 2378 2379 for (array = 1; array < srp->tx_array; ++array) 2380 for (split = 0; split < BGE_SPLIT; ++split) 2381 bge_free_dma_mem(&srp->buf[array][split]); 2382 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2383 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2384 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2385 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2386 srp->sw_sbds = NULL; 2387 srp->txbuf_head = NULL; 2388 srp->txbuf = NULL; 2389 srp->pktp = NULL; 2390 } 2391 2392 /* 2393 * Initialise all transmit, receive, and buffer rings. 2394 */ 2395 void 2396 bge_init_rings(bge_t *bgep) 2397 { 2398 uint32_t ring; 2399 2400 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2401 2402 /* 2403 * Perform one-off initialisation of each ring ... 2404 */ 2405 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2406 bge_init_send_ring(bgep, ring); 2407 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2408 bge_init_recv_ring(bgep, ring); 2409 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2410 bge_init_buff_ring(bgep, ring); 2411 } 2412 2413 /* 2414 * Undo the work of bge_init_rings() above before the memory is freed 2415 */ 2416 void 2417 bge_fini_rings(bge_t *bgep) 2418 { 2419 uint32_t ring; 2420 2421 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2422 2423 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2424 bge_fini_buff_ring(bgep, ring); 2425 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2426 bge_fini_recv_ring(bgep, ring); 2427 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2428 bge_fini_send_ring(bgep, ring); 2429 } 2430 2431 /* 2432 * Called from the bge_m_stop() to free the tx buffers which are 2433 * allocated from the tx process. 2434 */ 2435 void 2436 bge_free_txbuf_arrays(send_ring_t *srp) 2437 { 2438 uint32_t array; 2439 uint32_t split; 2440 2441 ASSERT(mutex_owned(srp->tx_lock)); 2442 2443 /* 2444 * Free the extra tx buffer DMA area 2445 */ 2446 for (array = 1; array < srp->tx_array; ++array) 2447 for (split = 0; split < BGE_SPLIT; ++split) 2448 bge_free_dma_mem(&srp->buf[array][split]); 2449 2450 /* 2451 * Restore initial tx buffer numbers 2452 */ 2453 srp->tx_array = 1; 2454 srp->tx_buffers = BGE_SEND_BUF_NUM; 2455 srp->tx_buffers_low = srp->tx_buffers / 4; 2456 srp->tx_flow = 0; 2457 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2458 } 2459 2460 /* 2461 * Called from tx process to allocate more tx buffers 2462 */ 2463 bge_queue_item_t * 2464 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2465 { 2466 bge_queue_t *txbuf_queue; 2467 bge_queue_item_t *txbuf_item_last; 2468 bge_queue_item_t *txbuf_item; 2469 bge_queue_item_t *txbuf_item_rtn; 2470 sw_txbuf_t *txbuf; 2471 dma_area_t area; 2472 size_t txbuffsize; 2473 uint32_t slot; 2474 uint32_t array; 2475 uint32_t split; 2476 uint32_t err; 2477 2478 ASSERT(mutex_owned(srp->tx_lock)); 2479 2480 array = srp->tx_array; 2481 if (array >= srp->tx_array_max) 2482 return (NULL); 2483 2484 /* 2485 * Allocate memory & handles for TX buffers 2486 */ 2487 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2488 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2489 for (split = 0; split < BGE_SPLIT; ++split) { 2490 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2491 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2492 &srp->buf[array][split]); 2493 if (err != DDI_SUCCESS) { 2494 /* Free the last already allocated OK chunks */ 2495 for (slot = 0; slot <= split; ++slot) 2496 bge_free_dma_mem(&srp->buf[array][slot]); 2497 srp->tx_alloc_fail++; 2498 return (NULL); 2499 } 2500 } 2501 2502 /* 2503 * Chunk tx buffer area 2504 */ 2505 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2506 for (split = 0; split < BGE_SPLIT; ++split) { 2507 area = srp->buf[array][split]; 2508 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2509 bge_slice_chunk(&txbuf->buf, &area, 1, 2510 bgep->chipid.snd_buff_size); 2511 txbuf++; 2512 } 2513 } 2514 2515 /* 2516 * Add above buffers to the tx buffer pop queue 2517 */ 2518 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2519 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2520 txbuf_item_last = NULL; 2521 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2522 txbuf_item->item = txbuf; 2523 txbuf_item->next = txbuf_item_last; 2524 txbuf_item_last = txbuf_item; 2525 txbuf++; 2526 txbuf_item++; 2527 } 2528 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2529 txbuf_item_rtn = txbuf_item; 2530 txbuf_item++; 2531 txbuf_queue = srp->txbuf_pop_queue; 2532 mutex_enter(txbuf_queue->lock); 2533 txbuf_item->next = txbuf_queue->head; 2534 txbuf_queue->head = txbuf_item_last; 2535 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2536 mutex_exit(txbuf_queue->lock); 2537 2538 srp->tx_array++; 2539 srp->tx_buffers += BGE_SEND_BUF_NUM; 2540 srp->tx_buffers_low = srp->tx_buffers / 4; 2541 2542 return (txbuf_item_rtn); 2543 } 2544 2545 /* 2546 * This function allocates all the transmit and receive buffers 2547 * and descriptors, in four chunks. 2548 */ 2549 int 2550 bge_alloc_bufs(bge_t *bgep) 2551 { 2552 dma_area_t area; 2553 size_t rxbuffsize; 2554 size_t txbuffsize; 2555 size_t rxbuffdescsize; 2556 size_t rxdescsize; 2557 size_t txdescsize; 2558 uint32_t ring; 2559 uint32_t rx_rings = bgep->chipid.rx_rings; 2560 uint32_t tx_rings = bgep->chipid.tx_rings; 2561 int split; 2562 int err; 2563 2564 BGE_TRACE(("bge_alloc_bufs($%p)", 2565 (void *)bgep)); 2566 2567 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2568 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2569 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2570 2571 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2572 txbuffsize *= tx_rings; 2573 2574 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2575 rxdescsize *= sizeof (bge_rbd_t); 2576 2577 rxbuffdescsize = BGE_STD_SLOTS_USED; 2578 rxbuffdescsize += bgep->chipid.jumbo_slots; 2579 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2580 rxbuffdescsize *= sizeof (bge_rbd_t); 2581 2582 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2583 txdescsize *= sizeof (bge_sbd_t); 2584 txdescsize += sizeof (bge_statistics_t); 2585 txdescsize += sizeof (bge_status_t); 2586 txdescsize += BGE_STATUS_PADDING; 2587 2588 /* 2589 * Enable PCI relaxed ordering only for RX/TX data buffers 2590 */ 2591 if (bge_relaxed_ordering) 2592 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2593 2594 /* 2595 * Allocate memory & handles for RX buffers 2596 */ 2597 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2598 for (split = 0; split < BGE_SPLIT; ++split) { 2599 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2600 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2601 &bgep->rx_buff[split]); 2602 if (err != DDI_SUCCESS) 2603 return (DDI_FAILURE); 2604 } 2605 2606 /* 2607 * Allocate memory & handles for TX buffers 2608 */ 2609 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2610 for (split = 0; split < BGE_SPLIT; ++split) { 2611 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2612 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2613 &bgep->tx_buff[split]); 2614 if (err != DDI_SUCCESS) 2615 return (DDI_FAILURE); 2616 } 2617 2618 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2619 2620 /* 2621 * Allocate memory & handles for receive return rings 2622 */ 2623 ASSERT((rxdescsize % rx_rings) == 0); 2624 for (split = 0; split < rx_rings; ++split) { 2625 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2626 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2627 &bgep->rx_desc[split]); 2628 if (err != DDI_SUCCESS) 2629 return (DDI_FAILURE); 2630 } 2631 2632 /* 2633 * Allocate memory & handles for buffer (producer) descriptor rings 2634 */ 2635 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2636 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2637 if (err != DDI_SUCCESS) 2638 return (DDI_FAILURE); 2639 2640 /* 2641 * Allocate memory & handles for TX descriptor rings, 2642 * status block, and statistics area 2643 */ 2644 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2645 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2646 if (err != DDI_SUCCESS) 2647 return (DDI_FAILURE); 2648 2649 /* 2650 * Now carve up each of the allocated areas ... 2651 */ 2652 for (split = 0; split < BGE_SPLIT; ++split) { 2653 area = bgep->rx_buff[split]; 2654 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2655 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2656 bgep->chipid.std_buf_size); 2657 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2658 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2659 bgep->chipid.recv_jumbo_size); 2660 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2661 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2662 BGE_MINI_BUFF_SIZE); 2663 } 2664 2665 for (split = 0; split < BGE_SPLIT; ++split) { 2666 area = bgep->tx_buff[split]; 2667 for (ring = 0; ring < tx_rings; ++ring) 2668 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2669 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2670 bgep->chipid.snd_buff_size); 2671 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2672 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2673 &area, 0, bgep->chipid.snd_buff_size); 2674 } 2675 2676 for (ring = 0; ring < rx_rings; ++ring) 2677 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2678 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2679 2680 area = bgep->rx_desc[rx_rings]; 2681 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2682 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2683 0, sizeof (bge_rbd_t)); 2684 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2685 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2686 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2687 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2688 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2689 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2690 ASSERT(area.alength == 0); 2691 2692 area = bgep->tx_desc; 2693 for (ring = 0; ring < tx_rings; ++ring) 2694 bge_slice_chunk(&bgep->send[ring].desc, &area, 2695 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2696 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2697 bge_slice_chunk(&bgep->send[ring].desc, &area, 2698 0, sizeof (bge_sbd_t)); 2699 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2700 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2701 ASSERT(area.alength == BGE_STATUS_PADDING); 2702 DMA_ZERO(bgep->status_block); 2703 2704 return (DDI_SUCCESS); 2705 } 2706 2707 /* 2708 * This routine frees the transmit and receive buffers and descriptors. 2709 * Make sure the chip is stopped before calling it! 2710 */ 2711 void 2712 bge_free_bufs(bge_t *bgep) 2713 { 2714 int split; 2715 2716 BGE_TRACE(("bge_free_bufs($%p)", 2717 (void *)bgep)); 2718 2719 bge_free_dma_mem(&bgep->tx_desc); 2720 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2721 bge_free_dma_mem(&bgep->rx_desc[split]); 2722 for (split = 0; split < BGE_SPLIT; ++split) 2723 bge_free_dma_mem(&bgep->tx_buff[split]); 2724 for (split = 0; split < BGE_SPLIT; ++split) 2725 bge_free_dma_mem(&bgep->rx_buff[split]); 2726 } 2727 2728 /* 2729 * Determine (initial) MAC address ("BIA") to use for this interface 2730 */ 2731 2732 static void 2733 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2734 { 2735 struct ether_addr sysaddr; 2736 char propbuf[8]; /* "true" or "false", plus NUL */ 2737 uchar_t *bytes; 2738 int *ints; 2739 uint_t nelts; 2740 int err; 2741 2742 BGE_TRACE(("bge_find_mac_address($%p)", 2743 (void *)bgep)); 2744 2745 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2746 cidp->hw_mac_addr, 2747 ether_sprintf((void *)cidp->vendor_addr.addr), 2748 cidp->vendor_addr.set ? "" : "not ")); 2749 2750 /* 2751 * The "vendor's factory-set address" may already have 2752 * been extracted from the chip, but if the property 2753 * "local-mac-address" is set we use that instead. It 2754 * will normally be set by OBP, but it could also be 2755 * specified in a .conf file(!) 2756 * 2757 * There doesn't seem to be a way to define byte-array 2758 * properties in a .conf, so we check whether it looks 2759 * like an array of 6 ints instead. 2760 * 2761 * Then, we check whether it looks like an array of 6 2762 * bytes (which it should, if OBP set it). If we can't 2763 * make sense of it either way, we'll ignore it. 2764 */ 2765 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2766 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2767 if (err == DDI_PROP_SUCCESS) { 2768 if (nelts == ETHERADDRL) { 2769 while (nelts--) 2770 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2771 cidp->vendor_addr.set = B_TRUE; 2772 } 2773 ddi_prop_free(ints); 2774 } 2775 2776 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2777 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2778 if (err == DDI_PROP_SUCCESS) { 2779 if (nelts == ETHERADDRL) { 2780 while (nelts--) 2781 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2782 cidp->vendor_addr.set = B_TRUE; 2783 } 2784 ddi_prop_free(bytes); 2785 } 2786 2787 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2788 ether_sprintf((void *)cidp->vendor_addr.addr), 2789 cidp->vendor_addr.set ? "" : "not ")); 2790 2791 /* 2792 * Look up the OBP property "local-mac-address?". Note that even 2793 * though its value is a string (which should be "true" or "false"), 2794 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2795 * the buffer first and then fetch the property as an untyped array; 2796 * this may or may not include a final NUL, but since there will 2797 * always be one left at the end of the buffer we can now treat it 2798 * as a string anyway. 2799 */ 2800 nelts = sizeof (propbuf); 2801 bzero(propbuf, nelts--); 2802 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2803 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2804 2805 /* 2806 * Now, if the address still isn't set from the hardware (SEEPROM) 2807 * or the OBP or .conf property, OR if the user has foolishly set 2808 * 'local-mac-address? = false', use "the system address" instead 2809 * (but only if it's non-null i.e. has been set from the IDPROM). 2810 */ 2811 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2812 if (localetheraddr(NULL, &sysaddr) != 0) { 2813 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2814 cidp->vendor_addr.set = B_TRUE; 2815 } 2816 2817 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2818 ether_sprintf((void *)cidp->vendor_addr.addr), 2819 cidp->vendor_addr.set ? "" : "not ")); 2820 2821 /* 2822 * Finally(!), if there's a valid "mac-address" property (created 2823 * if we netbooted from this interface), we must use this instead 2824 * of any of the above to ensure that the NFS/install server doesn't 2825 * get confused by the address changing as Solaris takes over! 2826 */ 2827 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2828 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2829 if (err == DDI_PROP_SUCCESS) { 2830 if (nelts == ETHERADDRL) { 2831 while (nelts--) 2832 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2833 cidp->vendor_addr.set = B_TRUE; 2834 } 2835 ddi_prop_free(bytes); 2836 } 2837 2838 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2839 ether_sprintf((void *)cidp->vendor_addr.addr), 2840 cidp->vendor_addr.set ? "" : "not ")); 2841 } 2842 2843 2844 /*ARGSUSED*/ 2845 int 2846 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2847 { 2848 ddi_fm_error_t de; 2849 2850 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2851 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2852 return (de.fme_status); 2853 } 2854 2855 /*ARGSUSED*/ 2856 int 2857 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2858 { 2859 ddi_fm_error_t de; 2860 2861 ASSERT(bgep->progress & PROGRESS_BUFS); 2862 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2863 return (de.fme_status); 2864 } 2865 2866 /* 2867 * The IO fault service error handling callback function 2868 */ 2869 /*ARGSUSED*/ 2870 static int 2871 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2872 { 2873 /* 2874 * as the driver can always deal with an error in any dma or 2875 * access handle, we can just return the fme_status value. 2876 */ 2877 pci_ereport_post(dip, err, NULL); 2878 return (err->fme_status); 2879 } 2880 2881 static void 2882 bge_fm_init(bge_t *bgep) 2883 { 2884 ddi_iblock_cookie_t iblk; 2885 2886 /* Only register with IO Fault Services if we have some capability */ 2887 if (bgep->fm_capabilities) { 2888 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2889 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2890 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2891 2892 /* Register capabilities with IO Fault Services */ 2893 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2894 2895 /* 2896 * Initialize pci ereport capabilities if ereport capable 2897 */ 2898 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2899 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2900 pci_ereport_setup(bgep->devinfo); 2901 2902 /* 2903 * Register error callback if error callback capable 2904 */ 2905 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2906 ddi_fm_handler_register(bgep->devinfo, 2907 bge_fm_error_cb, (void*) bgep); 2908 } else { 2909 /* 2910 * These fields have to be cleared of FMA if there are no 2911 * FMA capabilities at runtime. 2912 */ 2913 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2914 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2915 dma_attr.dma_attr_flags = 0; 2916 } 2917 } 2918 2919 static void 2920 bge_fm_fini(bge_t *bgep) 2921 { 2922 /* Only unregister FMA capabilities if we registered some */ 2923 if (bgep->fm_capabilities) { 2924 2925 /* 2926 * Release any resources allocated by pci_ereport_setup() 2927 */ 2928 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2929 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2930 pci_ereport_teardown(bgep->devinfo); 2931 2932 /* 2933 * Un-register error callback if error callback capable 2934 */ 2935 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2936 ddi_fm_handler_unregister(bgep->devinfo); 2937 2938 /* Unregister from IO Fault Services */ 2939 ddi_fm_fini(bgep->devinfo); 2940 } 2941 } 2942 2943 static void 2944 #ifdef BGE_IPMI_ASF 2945 bge_unattach(bge_t *bgep, uint_t asf_mode) 2946 #else 2947 bge_unattach(bge_t *bgep) 2948 #endif 2949 { 2950 BGE_TRACE(("bge_unattach($%p)", 2951 (void *)bgep)); 2952 2953 /* 2954 * Flag that no more activity may be initiated 2955 */ 2956 bgep->progress &= ~PROGRESS_READY; 2957 2958 /* 2959 * Quiesce the PHY and MAC (leave it reset but still powered). 2960 * Clean up and free all BGE data structures 2961 */ 2962 if (bgep->periodic_id != NULL) { 2963 ddi_periodic_delete(bgep->periodic_id); 2964 bgep->periodic_id = NULL; 2965 } 2966 if (bgep->progress & PROGRESS_KSTATS) 2967 bge_fini_kstats(bgep); 2968 if (bgep->progress & PROGRESS_PHY) 2969 bge_phys_reset(bgep); 2970 if (bgep->progress & PROGRESS_HWINT) { 2971 mutex_enter(bgep->genlock); 2972 #ifdef BGE_IPMI_ASF 2973 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2974 #else 2975 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2976 #endif 2977 ddi_fm_service_impact(bgep->devinfo, 2978 DDI_SERVICE_UNAFFECTED); 2979 #ifdef BGE_IPMI_ASF 2980 if (bgep->asf_enabled) { 2981 /* 2982 * This register has been overlaid. We restore its 2983 * initial value here. 2984 */ 2985 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2986 BGE_NIC_DATA_SIG); 2987 } 2988 #endif 2989 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2990 ddi_fm_service_impact(bgep->devinfo, 2991 DDI_SERVICE_UNAFFECTED); 2992 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2993 ddi_fm_service_impact(bgep->devinfo, 2994 DDI_SERVICE_UNAFFECTED); 2995 mutex_exit(bgep->genlock); 2996 } 2997 if (bgep->progress & PROGRESS_INTR) { 2998 bge_intr_disable(bgep); 2999 bge_fini_rings(bgep); 3000 } 3001 if (bgep->progress & PROGRESS_HWINT) { 3002 bge_rem_intrs(bgep); 3003 rw_destroy(bgep->errlock); 3004 mutex_destroy(bgep->softintrlock); 3005 mutex_destroy(bgep->genlock); 3006 } 3007 if (bgep->progress & PROGRESS_FACTOTUM) 3008 ddi_remove_softintr(bgep->factotum_id); 3009 if (bgep->progress & PROGRESS_RESCHED) 3010 ddi_remove_softintr(bgep->drain_id); 3011 if (bgep->progress & PROGRESS_BUFS) 3012 bge_free_bufs(bgep); 3013 if (bgep->progress & PROGRESS_REGS) 3014 ddi_regs_map_free(&bgep->io_handle); 3015 if (bgep->progress & PROGRESS_CFG) 3016 pci_config_teardown(&bgep->cfg_handle); 3017 3018 bge_fm_fini(bgep); 3019 3020 ddi_remove_minor_node(bgep->devinfo, NULL); 3021 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3022 kmem_free(bgep, sizeof (*bgep)); 3023 } 3024 3025 static int 3026 bge_resume(dev_info_t *devinfo) 3027 { 3028 bge_t *bgep; /* Our private data */ 3029 chip_id_t *cidp; 3030 chip_id_t chipid; 3031 3032 bgep = ddi_get_driver_private(devinfo); 3033 if (bgep == NULL) 3034 return (DDI_FAILURE); 3035 3036 /* 3037 * Refuse to resume if the data structures aren't consistent 3038 */ 3039 if (bgep->devinfo != devinfo) 3040 return (DDI_FAILURE); 3041 3042 #ifdef BGE_IPMI_ASF 3043 /* 3044 * Power management hasn't been supported in BGE now. If you 3045 * want to implement it, please add the ASF/IPMI related 3046 * code here. 3047 */ 3048 3049 #endif 3050 3051 /* 3052 * Read chip ID & set up config space command register(s) 3053 * Refuse to resume if the chip has changed its identity! 3054 */ 3055 cidp = &bgep->chipid; 3056 mutex_enter(bgep->genlock); 3057 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3058 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3059 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3060 mutex_exit(bgep->genlock); 3061 return (DDI_FAILURE); 3062 } 3063 mutex_exit(bgep->genlock); 3064 if (chipid.vendor != cidp->vendor) 3065 return (DDI_FAILURE); 3066 if (chipid.device != cidp->device) 3067 return (DDI_FAILURE); 3068 if (chipid.revision != cidp->revision) 3069 return (DDI_FAILURE); 3070 if (chipid.asic_rev != cidp->asic_rev) 3071 return (DDI_FAILURE); 3072 3073 /* 3074 * All OK, reinitialise h/w & kick off GLD scheduling 3075 */ 3076 mutex_enter(bgep->genlock); 3077 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3078 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3079 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3080 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3081 mutex_exit(bgep->genlock); 3082 return (DDI_FAILURE); 3083 } 3084 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3085 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3086 mutex_exit(bgep->genlock); 3087 return (DDI_FAILURE); 3088 } 3089 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3090 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3091 mutex_exit(bgep->genlock); 3092 return (DDI_FAILURE); 3093 } 3094 mutex_exit(bgep->genlock); 3095 return (DDI_SUCCESS); 3096 } 3097 3098 /* 3099 * attach(9E) -- Attach a device to the system 3100 * 3101 * Called once for each board successfully probed. 3102 */ 3103 static int 3104 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3105 { 3106 bge_t *bgep; /* Our private data */ 3107 mac_register_t *macp; 3108 chip_id_t *cidp; 3109 caddr_t regs; 3110 int instance; 3111 int err; 3112 int intr_types; 3113 #ifdef BGE_IPMI_ASF 3114 uint32_t mhcrValue; 3115 #ifdef __sparc 3116 uint16_t value16; 3117 #endif 3118 #ifdef BGE_NETCONSOLE 3119 int retval; 3120 #endif 3121 #endif 3122 3123 instance = ddi_get_instance(devinfo); 3124 3125 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3126 (void *)devinfo, cmd, instance)); 3127 BGE_BRKPT(NULL, "bge_attach"); 3128 3129 switch (cmd) { 3130 default: 3131 return (DDI_FAILURE); 3132 3133 case DDI_RESUME: 3134 return (bge_resume(devinfo)); 3135 3136 case DDI_ATTACH: 3137 break; 3138 } 3139 3140 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3141 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3142 ddi_set_driver_private(devinfo, bgep); 3143 bgep->bge_guard = BGE_GUARD; 3144 bgep->devinfo = devinfo; 3145 bgep->param_drain_max = 64; 3146 bgep->param_msi_cnt = 0; 3147 bgep->param_loop_mode = 0; 3148 3149 /* 3150 * Initialize more fields in BGE private data 3151 */ 3152 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3153 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3154 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3155 BGE_DRIVER_NAME, instance); 3156 3157 /* 3158 * Initialize for fma support 3159 */ 3160 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3161 DDI_PROP_DONTPASS, fm_cap, 3162 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3163 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3164 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3165 bge_fm_init(bgep); 3166 3167 /* 3168 * Look up the IOMMU's page size for DVMA mappings (must be 3169 * a power of 2) and convert to a mask. This can be used to 3170 * determine whether a message buffer crosses a page boundary. 3171 * Note: in 2s complement binary notation, if X is a power of 3172 * 2, then -X has the representation "11...1100...00". 3173 */ 3174 bgep->pagemask = dvma_pagesize(devinfo); 3175 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3176 bgep->pagemask = -bgep->pagemask; 3177 3178 /* 3179 * Map config space registers 3180 * Read chip ID & set up config space command register(s) 3181 * 3182 * Note: this leaves the chip accessible by Memory Space 3183 * accesses, but with interrupts and Bus Mastering off. 3184 * This should ensure that nothing untoward will happen 3185 * if it has been left active by the (net-)bootloader. 3186 * We'll re-enable Bus Mastering once we've reset the chip, 3187 * and allow interrupts only when everything else is set up. 3188 */ 3189 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3190 #ifdef BGE_IPMI_ASF 3191 #ifdef __sparc 3192 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3193 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3194 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3195 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3196 MHCR_ENABLE_TAGGED_STATUS_MODE | 3197 MHCR_MASK_INTERRUPT_MODE | 3198 MHCR_MASK_PCI_INT_OUTPUT | 3199 MHCR_CLEAR_INTERRUPT_INTA | 3200 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3201 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3202 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3203 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3204 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3205 MEMORY_ARBITER_ENABLE); 3206 #else 3207 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3208 #endif 3209 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3210 bgep->asf_wordswapped = B_TRUE; 3211 } else { 3212 bgep->asf_wordswapped = B_FALSE; 3213 } 3214 bge_asf_get_config(bgep); 3215 #endif 3216 if (err != DDI_SUCCESS) { 3217 bge_problem(bgep, "pci_config_setup() failed"); 3218 goto attach_fail; 3219 } 3220 bgep->progress |= PROGRESS_CFG; 3221 cidp = &bgep->chipid; 3222 bzero(cidp, sizeof (*cidp)); 3223 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3224 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3225 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3226 goto attach_fail; 3227 } 3228 3229 #ifdef BGE_IPMI_ASF 3230 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3231 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3232 bgep->asf_newhandshake = B_TRUE; 3233 } else { 3234 bgep->asf_newhandshake = B_FALSE; 3235 } 3236 #endif 3237 3238 /* 3239 * Update those parts of the chip ID derived from volatile 3240 * registers with the values seen by OBP (in case the chip 3241 * has been reset externally and therefore lost them). 3242 */ 3243 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3244 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3245 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3246 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3247 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3248 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3249 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3250 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3251 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3252 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3253 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3254 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3255 3256 if (bge_jumbo_enable == B_TRUE) { 3257 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3258 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3259 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3260 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3261 cidp->default_mtu = BGE_DEFAULT_MTU; 3262 } 3263 } 3264 /* 3265 * Map operating registers 3266 */ 3267 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3268 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3269 if (err != DDI_SUCCESS) { 3270 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3271 goto attach_fail; 3272 } 3273 bgep->io_regs = regs; 3274 bgep->progress |= PROGRESS_REGS; 3275 3276 /* 3277 * Characterise the device, so we know its requirements. 3278 * Then allocate the appropriate TX and RX descriptors & buffers. 3279 */ 3280 if (bge_chip_id_init(bgep) == EIO) { 3281 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3282 goto attach_fail; 3283 } 3284 3285 3286 err = bge_alloc_bufs(bgep); 3287 if (err != DDI_SUCCESS) { 3288 bge_problem(bgep, "DMA buffer allocation failed"); 3289 goto attach_fail; 3290 } 3291 bgep->progress |= PROGRESS_BUFS; 3292 3293 /* 3294 * Add the softint handlers: 3295 * 3296 * Both of these handlers are used to avoid restrictions on the 3297 * context and/or mutexes required for some operations. In 3298 * particular, the hardware interrupt handler and its subfunctions 3299 * can detect a number of conditions that we don't want to handle 3300 * in that context or with that set of mutexes held. So, these 3301 * softints are triggered instead: 3302 * 3303 * the <resched> softint is triggered if we have previously 3304 * had to refuse to send a packet because of resource shortage 3305 * (we've run out of transmit buffers), but the send completion 3306 * interrupt handler has now detected that more buffers have 3307 * become available. 3308 * 3309 * the <factotum> is triggered if the h/w interrupt handler 3310 * sees the <link state changed> or <error> bits in the status 3311 * block. It's also triggered periodically to poll the link 3312 * state, just in case we aren't getting link status change 3313 * interrupts ... 3314 */ 3315 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3316 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3317 if (err != DDI_SUCCESS) { 3318 bge_problem(bgep, "ddi_add_softintr() failed"); 3319 goto attach_fail; 3320 } 3321 bgep->progress |= PROGRESS_RESCHED; 3322 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3323 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3324 if (err != DDI_SUCCESS) { 3325 bge_problem(bgep, "ddi_add_softintr() failed"); 3326 goto attach_fail; 3327 } 3328 bgep->progress |= PROGRESS_FACTOTUM; 3329 3330 /* Get supported interrupt types */ 3331 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3332 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3333 3334 goto attach_fail; 3335 } 3336 3337 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3338 bgep->ifname, intr_types)); 3339 3340 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3341 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3342 bge_error(bgep, "MSI registration failed, " 3343 "trying FIXED interrupt type\n"); 3344 } else { 3345 BGE_DEBUG(("%s: Using MSI interrupt type", 3346 bgep->ifname)); 3347 bgep->intr_type = DDI_INTR_TYPE_MSI; 3348 bgep->progress |= PROGRESS_HWINT; 3349 } 3350 } 3351 3352 if (!(bgep->progress & PROGRESS_HWINT) && 3353 (intr_types & DDI_INTR_TYPE_FIXED)) { 3354 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3355 bge_error(bgep, "FIXED interrupt " 3356 "registration failed\n"); 3357 goto attach_fail; 3358 } 3359 3360 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3361 3362 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3363 bgep->progress |= PROGRESS_HWINT; 3364 } 3365 3366 if (!(bgep->progress & PROGRESS_HWINT)) { 3367 bge_error(bgep, "No interrupts registered\n"); 3368 goto attach_fail; 3369 } 3370 3371 /* 3372 * Note that interrupts are not enabled yet as 3373 * mutex locks are not initialized. Initialize mutex locks. 3374 */ 3375 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3376 DDI_INTR_PRI(bgep->intr_pri)); 3377 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3378 DDI_INTR_PRI(bgep->intr_pri)); 3379 rw_init(bgep->errlock, NULL, RW_DRIVER, 3380 DDI_INTR_PRI(bgep->intr_pri)); 3381 3382 /* 3383 * Initialize rings. 3384 */ 3385 bge_init_rings(bgep); 3386 3387 /* 3388 * Now that mutex locks are initialized, enable interrupts. 3389 */ 3390 bge_intr_enable(bgep); 3391 bgep->progress |= PROGRESS_INTR; 3392 3393 /* 3394 * Initialise link state variables 3395 * Stop, reset & reinitialise the chip. 3396 * Initialise the (internal) PHY. 3397 */ 3398 bgep->link_state = LINK_STATE_UNKNOWN; 3399 3400 mutex_enter(bgep->genlock); 3401 3402 /* 3403 * Reset chip & rings to initial state; also reset address 3404 * filtering, promiscuity, loopback mode. 3405 */ 3406 #ifdef BGE_IPMI_ASF 3407 #ifdef BGE_NETCONSOLE 3408 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3409 #else 3410 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3411 #endif 3412 #else 3413 if (bge_reset(bgep) != DDI_SUCCESS) { 3414 #endif 3415 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3416 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3417 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3418 mutex_exit(bgep->genlock); 3419 goto attach_fail; 3420 } 3421 3422 #ifdef BGE_IPMI_ASF 3423 if (bgep->asf_enabled) { 3424 bgep->asf_status = ASF_STAT_RUN_INIT; 3425 } 3426 #endif 3427 3428 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3429 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3430 bgep->promisc = B_FALSE; 3431 bgep->param_loop_mode = BGE_LOOP_NONE; 3432 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3433 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3434 mutex_exit(bgep->genlock); 3435 goto attach_fail; 3436 } 3437 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3438 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3439 mutex_exit(bgep->genlock); 3440 goto attach_fail; 3441 } 3442 3443 mutex_exit(bgep->genlock); 3444 3445 if (bge_phys_init(bgep) == EIO) { 3446 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3447 goto attach_fail; 3448 } 3449 bgep->progress |= PROGRESS_PHY; 3450 3451 /* 3452 * initialize NDD-tweakable parameters 3453 */ 3454 if (bge_nd_init(bgep)) { 3455 bge_problem(bgep, "bge_nd_init() failed"); 3456 goto attach_fail; 3457 } 3458 bgep->progress |= PROGRESS_NDD; 3459 3460 /* 3461 * Create & initialise named kstats 3462 */ 3463 bge_init_kstats(bgep, instance); 3464 bgep->progress |= PROGRESS_KSTATS; 3465 3466 /* 3467 * Determine whether to override the chip's own MAC address 3468 */ 3469 bge_find_mac_address(bgep, cidp); 3470 3471 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3472 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 3473 3474 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3475 goto attach_fail; 3476 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3477 macp->m_driver = bgep; 3478 macp->m_dip = devinfo; 3479 macp->m_src_addr = cidp->vendor_addr.addr; 3480 macp->m_callbacks = &bge_m_callbacks; 3481 macp->m_min_sdu = 0; 3482 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3483 macp->m_margin = VLAN_TAGSZ; 3484 macp->m_priv_props = bge_priv_prop; 3485 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3486 macp->m_v12n = MAC_VIRT_LEVEL1; 3487 3488 /* 3489 * Finally, we're ready to register ourselves with the MAC layer 3490 * interface; if this succeeds, we're all ready to start() 3491 */ 3492 err = mac_register(macp, &bgep->mh); 3493 mac_free(macp); 3494 if (err != 0) 3495 goto attach_fail; 3496 3497 /* 3498 * Register a periodical handler. 3499 * bge_chip_cyclic() is invoked in kernel context. 3500 */ 3501 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3502 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3503 3504 bgep->progress |= PROGRESS_READY; 3505 ASSERT(bgep->bge_guard == BGE_GUARD); 3506 #ifdef BGE_IPMI_ASF 3507 #ifdef BGE_NETCONSOLE 3508 if (bgep->asf_enabled) { 3509 mutex_enter(bgep->genlock); 3510 retval = bge_chip_start(bgep, B_TRUE); 3511 mutex_exit(bgep->genlock); 3512 if (retval != DDI_SUCCESS) 3513 goto attach_fail; 3514 } 3515 #endif 3516 #endif 3517 3518 ddi_report_dev(devinfo); 3519 return (DDI_SUCCESS); 3520 3521 attach_fail: 3522 #ifdef BGE_IPMI_ASF 3523 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3524 #else 3525 bge_unattach(bgep); 3526 #endif 3527 return (DDI_FAILURE); 3528 } 3529 3530 /* 3531 * bge_suspend() -- suspend transmit/receive for powerdown 3532 */ 3533 static int 3534 bge_suspend(bge_t *bgep) 3535 { 3536 /* 3537 * Stop processing and idle (powerdown) the PHY ... 3538 */ 3539 mutex_enter(bgep->genlock); 3540 #ifdef BGE_IPMI_ASF 3541 /* 3542 * Power management hasn't been supported in BGE now. If you 3543 * want to implement it, please add the ASF/IPMI related 3544 * code here. 3545 */ 3546 #endif 3547 bge_stop(bgep); 3548 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3549 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3550 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3551 mutex_exit(bgep->genlock); 3552 return (DDI_FAILURE); 3553 } 3554 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3555 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3556 mutex_exit(bgep->genlock); 3557 return (DDI_FAILURE); 3558 } 3559 mutex_exit(bgep->genlock); 3560 3561 return (DDI_SUCCESS); 3562 } 3563 3564 /* 3565 * quiesce(9E) entry point. 3566 * 3567 * This function is called when the system is single-threaded at high 3568 * PIL with preemption disabled. Therefore, this function must not be 3569 * blocked. 3570 * 3571 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3572 * DDI_FAILURE indicates an error condition and should almost never happen. 3573 */ 3574 #ifdef __sparc 3575 #define bge_quiesce ddi_quiesce_not_supported 3576 #else 3577 static int 3578 bge_quiesce(dev_info_t *devinfo) 3579 { 3580 bge_t *bgep = ddi_get_driver_private(devinfo); 3581 3582 if (bgep == NULL) 3583 return (DDI_FAILURE); 3584 3585 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3586 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3587 MHCR_MASK_PCI_INT_OUTPUT); 3588 } else { 3589 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3590 } 3591 3592 /* Stop the chip */ 3593 bge_chip_stop_nonblocking(bgep); 3594 3595 return (DDI_SUCCESS); 3596 } 3597 #endif 3598 3599 /* 3600 * detach(9E) -- Detach a device from the system 3601 */ 3602 static int 3603 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3604 { 3605 bge_t *bgep; 3606 #ifdef BGE_IPMI_ASF 3607 uint_t asf_mode; 3608 asf_mode = ASF_MODE_NONE; 3609 #endif 3610 3611 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3612 3613 bgep = ddi_get_driver_private(devinfo); 3614 3615 switch (cmd) { 3616 default: 3617 return (DDI_FAILURE); 3618 3619 case DDI_SUSPEND: 3620 return (bge_suspend(bgep)); 3621 3622 case DDI_DETACH: 3623 break; 3624 } 3625 3626 #ifdef BGE_IPMI_ASF 3627 mutex_enter(bgep->genlock); 3628 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3629 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3630 3631 bge_asf_update_status(bgep); 3632 if (bgep->asf_status == ASF_STAT_RUN) { 3633 bge_asf_stop_timer(bgep); 3634 } 3635 bgep->asf_status = ASF_STAT_STOP; 3636 3637 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3638 3639 if (bgep->asf_pseudostop) { 3640 bge_chip_stop(bgep, B_FALSE); 3641 bgep->bge_mac_state = BGE_MAC_STOPPED; 3642 bgep->asf_pseudostop = B_FALSE; 3643 } 3644 3645 asf_mode = ASF_MODE_POST_SHUTDOWN; 3646 3647 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3648 ddi_fm_service_impact(bgep->devinfo, 3649 DDI_SERVICE_UNAFFECTED); 3650 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3651 ddi_fm_service_impact(bgep->devinfo, 3652 DDI_SERVICE_UNAFFECTED); 3653 } 3654 mutex_exit(bgep->genlock); 3655 #endif 3656 3657 /* 3658 * Unregister from the GLD subsystem. This can fail, in 3659 * particular if there are DLPI style-2 streams still open - 3660 * in which case we just return failure without shutting 3661 * down chip operations. 3662 */ 3663 if (mac_unregister(bgep->mh) != 0) 3664 return (DDI_FAILURE); 3665 3666 /* 3667 * All activity stopped, so we can clean up & exit 3668 */ 3669 #ifdef BGE_IPMI_ASF 3670 bge_unattach(bgep, asf_mode); 3671 #else 3672 bge_unattach(bgep); 3673 #endif 3674 return (DDI_SUCCESS); 3675 } 3676 3677 3678 /* 3679 * ========== Module Loading Data & Entry Points ========== 3680 */ 3681 3682 #undef BGE_DBG 3683 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3684 3685 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3686 nulldev, /* identify */ 3687 nulldev, /* probe */ 3688 bge_attach, /* attach */ 3689 bge_detach, /* detach */ 3690 nodev, /* reset */ 3691 NULL, /* cb_ops */ 3692 D_MP, /* bus_ops */ 3693 NULL, /* power */ 3694 bge_quiesce /* quiesce */ 3695 ); 3696 3697 static struct modldrv bge_modldrv = { 3698 &mod_driverops, /* Type of module. This one is a driver */ 3699 bge_ident, /* short description */ 3700 &bge_dev_ops /* driver specific ops */ 3701 }; 3702 3703 static struct modlinkage modlinkage = { 3704 MODREV_1, (void *)&bge_modldrv, NULL 3705 }; 3706 3707 3708 int 3709 _info(struct modinfo *modinfop) 3710 { 3711 return (mod_info(&modlinkage, modinfop)); 3712 } 3713 3714 int 3715 _init(void) 3716 { 3717 int status; 3718 3719 mac_init_ops(&bge_dev_ops, "bge"); 3720 status = mod_install(&modlinkage); 3721 if (status == DDI_SUCCESS) 3722 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3723 else 3724 mac_fini_ops(&bge_dev_ops); 3725 return (status); 3726 } 3727 3728 int 3729 _fini(void) 3730 { 3731 int status; 3732 3733 status = mod_remove(&modlinkage); 3734 if (status == DDI_SUCCESS) { 3735 mac_fini_ops(&bge_dev_ops); 3736 mutex_destroy(bge_log_mutex); 3737 } 3738 return (status); 3739 } 3740 3741 3742 /* 3743 * bge_add_intrs: 3744 * 3745 * Register FIXED or MSI interrupts. 3746 */ 3747 static int 3748 bge_add_intrs(bge_t *bgep, int intr_type) 3749 { 3750 dev_info_t *dip = bgep->devinfo; 3751 int avail, actual, intr_size, count = 0; 3752 int i, flag, ret; 3753 3754 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3755 3756 /* Get number of interrupts */ 3757 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3758 if ((ret != DDI_SUCCESS) || (count == 0)) { 3759 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3760 "count: %d", ret, count); 3761 3762 return (DDI_FAILURE); 3763 } 3764 3765 /* Get number of available interrupts */ 3766 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3767 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3768 bge_error(bgep, "ddi_intr_get_navail() failure, " 3769 "ret: %d, avail: %d\n", ret, avail); 3770 3771 return (DDI_FAILURE); 3772 } 3773 3774 if (avail < count) { 3775 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3776 bgep->ifname, count, avail)); 3777 } 3778 3779 /* 3780 * BGE hardware generates only single MSI even though it claims 3781 * to support multiple MSIs. So, hard code MSI count value to 1. 3782 */ 3783 if (intr_type == DDI_INTR_TYPE_MSI) { 3784 count = 1; 3785 flag = DDI_INTR_ALLOC_STRICT; 3786 } else { 3787 flag = DDI_INTR_ALLOC_NORMAL; 3788 } 3789 3790 /* Allocate an array of interrupt handles */ 3791 intr_size = count * sizeof (ddi_intr_handle_t); 3792 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3793 3794 /* Call ddi_intr_alloc() */ 3795 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3796 count, &actual, flag); 3797 3798 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3799 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3800 3801 kmem_free(bgep->htable, intr_size); 3802 return (DDI_FAILURE); 3803 } 3804 3805 if (actual < count) { 3806 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3807 bgep->ifname, count, actual)); 3808 } 3809 3810 bgep->intr_cnt = actual; 3811 3812 /* 3813 * Get priority for first msi, assume remaining are all the same 3814 */ 3815 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3816 DDI_SUCCESS) { 3817 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3818 3819 /* Free already allocated intr */ 3820 for (i = 0; i < actual; i++) { 3821 (void) ddi_intr_free(bgep->htable[i]); 3822 } 3823 3824 kmem_free(bgep->htable, intr_size); 3825 return (DDI_FAILURE); 3826 } 3827 3828 /* Call ddi_intr_add_handler() */ 3829 for (i = 0; i < actual; i++) { 3830 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3831 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3832 bge_error(bgep, "ddi_intr_add_handler() " 3833 "failed %d\n", ret); 3834 3835 /* Free already allocated intr */ 3836 for (i = 0; i < actual; i++) { 3837 (void) ddi_intr_free(bgep->htable[i]); 3838 } 3839 3840 kmem_free(bgep->htable, intr_size); 3841 return (DDI_FAILURE); 3842 } 3843 } 3844 3845 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3846 != DDI_SUCCESS) { 3847 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3848 3849 for (i = 0; i < actual; i++) { 3850 (void) ddi_intr_remove_handler(bgep->htable[i]); 3851 (void) ddi_intr_free(bgep->htable[i]); 3852 } 3853 3854 kmem_free(bgep->htable, intr_size); 3855 return (DDI_FAILURE); 3856 } 3857 3858 return (DDI_SUCCESS); 3859 } 3860 3861 /* 3862 * bge_rem_intrs: 3863 * 3864 * Unregister FIXED or MSI interrupts 3865 */ 3866 static void 3867 bge_rem_intrs(bge_t *bgep) 3868 { 3869 int i; 3870 3871 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3872 3873 /* Call ddi_intr_remove_handler() */ 3874 for (i = 0; i < bgep->intr_cnt; i++) { 3875 (void) ddi_intr_remove_handler(bgep->htable[i]); 3876 (void) ddi_intr_free(bgep->htable[i]); 3877 } 3878 3879 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3880 } 3881 3882 3883 void 3884 bge_intr_enable(bge_t *bgep) 3885 { 3886 int i; 3887 3888 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3889 /* Call ddi_intr_block_enable() for MSI interrupts */ 3890 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3891 } else { 3892 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3893 for (i = 0; i < bgep->intr_cnt; i++) { 3894 (void) ddi_intr_enable(bgep->htable[i]); 3895 } 3896 } 3897 } 3898 3899 3900 void 3901 bge_intr_disable(bge_t *bgep) 3902 { 3903 int i; 3904 3905 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3906 /* Call ddi_intr_block_disable() */ 3907 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3908 } else { 3909 for (i = 0; i < bgep->intr_cnt; i++) { 3910 (void) ddi_intr_disable(bgep->htable[i]); 3911 } 3912 } 3913 } 3914 3915 int 3916 bge_reprogram(bge_t *bgep) 3917 { 3918 int status = 0; 3919 3920 ASSERT(mutex_owned(bgep->genlock)); 3921 3922 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3923 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3924 status = IOC_INVAL; 3925 } 3926 #ifdef BGE_IPMI_ASF 3927 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3928 #else 3929 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3930 #endif 3931 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3932 status = IOC_INVAL; 3933 } 3934 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3935 bge_chip_msi_trig(bgep); 3936 return (status); 3937 } 3938