1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac_provider.h> 30 #include <sys/mac.h> 31 #include <sys/mac_flow.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 */ 36 static char bge_ident[] = "Broadcom Gb Ethernet"; 37 /* 38 * Make sure you keep the version ID up to date! 39 */ 40 static char bge_version[] = "Broadcom Gb Ethernet v1.06"; 41 42 /* 43 * Property names 44 */ 45 static char debug_propname[] = "bge-debug-flags"; 46 static char clsize_propname[] = "cache-line-size"; 47 static char latency_propname[] = "latency-timer"; 48 static char localmac_boolname[] = "local-mac-address?"; 49 static char localmac_propname[] = "local-mac-address"; 50 static char macaddr_propname[] = "mac-address"; 51 static char subdev_propname[] = "subsystem-id"; 52 static char subven_propname[] = "subsystem-vendor-id"; 53 static char rxrings_propname[] = "bge-rx-rings"; 54 static char txrings_propname[] = "bge-tx-rings"; 55 static char fm_cap[] = "fm-capable"; 56 static char default_mtu[] = "default_mtu"; 57 58 static int bge_add_intrs(bge_t *, int); 59 static void bge_rem_intrs(bge_t *); 60 static int bge_unicst_set(void *, const uint8_t *, int); 61 62 /* 63 * Describes the chip's DMA engine 64 */ 65 static ddi_dma_attr_t dma_attr = { 66 DMA_ATTR_V0, /* dma_attr version */ 67 0x0000000000000000ull, /* dma_attr_addr_lo */ 68 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 69 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 70 0x0000000000000001ull, /* dma_attr_align */ 71 0x00000FFF, /* dma_attr_burstsizes */ 72 0x00000001, /* dma_attr_minxfer */ 73 0x000000000000FFFFull, /* dma_attr_maxxfer */ 74 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 75 1, /* dma_attr_sgllen */ 76 0x00000001, /* dma_attr_granular */ 77 DDI_DMA_FLAGERR /* dma_attr_flags */ 78 }; 79 80 /* 81 * PIO access attributes for registers 82 */ 83 static ddi_device_acc_attr_t bge_reg_accattr = { 84 DDI_DEVICE_ATTR_V0, 85 DDI_NEVERSWAP_ACC, 86 DDI_STRICTORDER_ACC, 87 DDI_FLAGERR_ACC 88 }; 89 90 /* 91 * DMA access attributes for descriptors: NOT to be byte swapped. 92 */ 93 static ddi_device_acc_attr_t bge_desc_accattr = { 94 DDI_DEVICE_ATTR_V0, 95 DDI_NEVERSWAP_ACC, 96 DDI_STRICTORDER_ACC, 97 DDI_FLAGERR_ACC 98 }; 99 100 /* 101 * DMA access attributes for data: NOT to be byte swapped. 102 */ 103 static ddi_device_acc_attr_t bge_data_accattr = { 104 DDI_DEVICE_ATTR_V0, 105 DDI_NEVERSWAP_ACC, 106 DDI_STRICTORDER_ACC 107 }; 108 109 static int bge_m_start(void *); 110 static void bge_m_stop(void *); 111 static int bge_m_promisc(void *, boolean_t); 112 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 113 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 114 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 115 static int bge_unicst_set(void *, const uint8_t *, 116 int); 117 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 118 uint_t, const void *); 119 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 120 uint_t, uint_t, void *, uint_t *); 121 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 122 const void *); 123 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 124 uint_t, void *); 125 126 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 127 128 static mac_callbacks_t bge_m_callbacks = { 129 BGE_M_CALLBACK_FLAGS, 130 bge_m_stat, 131 bge_m_start, 132 bge_m_stop, 133 bge_m_promisc, 134 bge_m_multicst, 135 NULL, 136 bge_m_tx, 137 bge_m_ioctl, 138 bge_m_getcapab, 139 NULL, 140 NULL, 141 bge_m_setprop, 142 bge_m_getprop 143 }; 144 145 mac_priv_prop_t bge_priv_prop[] = { 146 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 147 {"_adv_pause_cap", MAC_PROP_PERM_RW} 148 }; 149 150 #define BGE_MAX_PRIV_PROPS \ 151 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 152 153 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 154 /* 155 * ========== Transmit and receive ring reinitialisation ========== 156 */ 157 158 /* 159 * These <reinit> routines each reset the specified ring to an initial 160 * state, assuming that the corresponding <init> routine has already 161 * been called exactly once. 162 */ 163 164 static void 165 bge_reinit_send_ring(send_ring_t *srp) 166 { 167 bge_queue_t *txbuf_queue; 168 bge_queue_item_t *txbuf_head; 169 sw_txbuf_t *txbuf; 170 sw_sbd_t *ssbdp; 171 uint32_t slot; 172 173 /* 174 * Reinitialise control variables ... 175 */ 176 srp->tx_flow = 0; 177 srp->tx_next = 0; 178 srp->txfill_next = 0; 179 srp->tx_free = srp->desc.nslots; 180 ASSERT(mutex_owned(srp->tc_lock)); 181 srp->tc_next = 0; 182 srp->txpkt_next = 0; 183 srp->tx_block = 0; 184 srp->tx_nobd = 0; 185 srp->tx_nobuf = 0; 186 187 /* 188 * Initialize the tx buffer push queue 189 */ 190 mutex_enter(srp->freetxbuf_lock); 191 mutex_enter(srp->txbuf_lock); 192 txbuf_queue = &srp->freetxbuf_queue; 193 txbuf_queue->head = NULL; 194 txbuf_queue->count = 0; 195 txbuf_queue->lock = srp->freetxbuf_lock; 196 srp->txbuf_push_queue = txbuf_queue; 197 198 /* 199 * Initialize the tx buffer pop queue 200 */ 201 txbuf_queue = &srp->txbuf_queue; 202 txbuf_queue->head = NULL; 203 txbuf_queue->count = 0; 204 txbuf_queue->lock = srp->txbuf_lock; 205 srp->txbuf_pop_queue = txbuf_queue; 206 txbuf_head = srp->txbuf_head; 207 txbuf = srp->txbuf; 208 for (slot = 0; slot < srp->tx_buffers; ++slot) { 209 txbuf_head->item = txbuf; 210 txbuf_head->next = txbuf_queue->head; 211 txbuf_queue->head = txbuf_head; 212 txbuf_queue->count++; 213 txbuf++; 214 txbuf_head++; 215 } 216 mutex_exit(srp->txbuf_lock); 217 mutex_exit(srp->freetxbuf_lock); 218 219 /* 220 * Zero and sync all the h/w Send Buffer Descriptors 221 */ 222 DMA_ZERO(srp->desc); 223 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 224 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 225 ssbdp = srp->sw_sbds; 226 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 227 ssbdp->pbuf = NULL; 228 } 229 230 static void 231 bge_reinit_recv_ring(recv_ring_t *rrp) 232 { 233 /* 234 * Reinitialise control variables ... 235 */ 236 rrp->rx_next = 0; 237 } 238 239 static void 240 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 241 { 242 bge_rbd_t *hw_rbd_p; 243 sw_rbd_t *srbdp; 244 uint32_t bufsize; 245 uint32_t nslots; 246 uint32_t slot; 247 248 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 249 RBD_FLAG_STD_RING, 250 RBD_FLAG_JUMBO_RING, 251 RBD_FLAG_MINI_RING 252 }; 253 254 /* 255 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 256 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 257 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 258 * should be zeroed, and so don't need to be set up specifically 259 * once the whole area has been cleared. 260 */ 261 DMA_ZERO(brp->desc); 262 263 hw_rbd_p = DMA_VPTR(brp->desc); 264 nslots = brp->desc.nslots; 265 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 266 bufsize = brp->buf[0].size; 267 srbdp = brp->sw_rbds; 268 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 269 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 270 hw_rbd_p->index = (uint16_t)slot; 271 hw_rbd_p->len = (uint16_t)bufsize; 272 hw_rbd_p->opaque = srbdp->pbuf.token; 273 hw_rbd_p->flags |= ring_type_flag[ring]; 274 } 275 276 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 277 278 /* 279 * Finally, reinitialise the ring control variables ... 280 */ 281 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 282 } 283 284 /* 285 * Reinitialize all rings 286 */ 287 static void 288 bge_reinit_rings(bge_t *bgep) 289 { 290 uint32_t ring; 291 292 ASSERT(mutex_owned(bgep->genlock)); 293 294 /* 295 * Send Rings ... 296 */ 297 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 298 bge_reinit_send_ring(&bgep->send[ring]); 299 300 /* 301 * Receive Return Rings ... 302 */ 303 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 304 bge_reinit_recv_ring(&bgep->recv[ring]); 305 306 /* 307 * Receive Producer Rings ... 308 */ 309 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 310 bge_reinit_buff_ring(&bgep->buff[ring], ring); 311 } 312 313 /* 314 * ========== Internal state management entry points ========== 315 */ 316 317 #undef BGE_DBG 318 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 319 320 /* 321 * These routines provide all the functionality required by the 322 * corresponding GLD entry points, but don't update the GLD state 323 * so they can be called internally without disturbing our record 324 * of what GLD thinks we should be doing ... 325 */ 326 327 /* 328 * bge_reset() -- reset h/w & rings to initial state 329 */ 330 static int 331 #ifdef BGE_IPMI_ASF 332 bge_reset(bge_t *bgep, uint_t asf_mode) 333 #else 334 bge_reset(bge_t *bgep) 335 #endif 336 { 337 uint32_t ring; 338 int retval; 339 340 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 341 342 ASSERT(mutex_owned(bgep->genlock)); 343 344 /* 345 * Grab all the other mutexes in the world (this should 346 * ensure no other threads are manipulating driver state) 347 */ 348 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 349 mutex_enter(bgep->recv[ring].rx_lock); 350 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 351 mutex_enter(bgep->buff[ring].rf_lock); 352 rw_enter(bgep->errlock, RW_WRITER); 353 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 354 mutex_enter(bgep->send[ring].tx_lock); 355 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 356 mutex_enter(bgep->send[ring].tc_lock); 357 358 #ifdef BGE_IPMI_ASF 359 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 360 #else 361 retval = bge_chip_reset(bgep, B_TRUE); 362 #endif 363 bge_reinit_rings(bgep); 364 365 /* 366 * Free the world ... 367 */ 368 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 369 mutex_exit(bgep->send[ring].tc_lock); 370 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 371 mutex_exit(bgep->send[ring].tx_lock); 372 rw_exit(bgep->errlock); 373 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 374 mutex_exit(bgep->buff[ring].rf_lock); 375 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 376 mutex_exit(bgep->recv[ring].rx_lock); 377 378 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 379 return (retval); 380 } 381 382 /* 383 * bge_stop() -- stop processing, don't reset h/w or rings 384 */ 385 static void 386 bge_stop(bge_t *bgep) 387 { 388 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 389 390 ASSERT(mutex_owned(bgep->genlock)); 391 392 #ifdef BGE_IPMI_ASF 393 if (bgep->asf_enabled) { 394 bgep->asf_pseudostop = B_TRUE; 395 } else { 396 #endif 397 bge_chip_stop(bgep, B_FALSE); 398 #ifdef BGE_IPMI_ASF 399 } 400 #endif 401 402 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 403 } 404 405 /* 406 * bge_start() -- start transmitting/receiving 407 */ 408 static int 409 bge_start(bge_t *bgep, boolean_t reset_phys) 410 { 411 int retval; 412 413 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 414 415 ASSERT(mutex_owned(bgep->genlock)); 416 417 /* 418 * Start chip processing, including enabling interrupts 419 */ 420 retval = bge_chip_start(bgep, reset_phys); 421 422 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 423 return (retval); 424 } 425 426 /* 427 * bge_restart - restart transmitting/receiving after error or suspend 428 */ 429 int 430 bge_restart(bge_t *bgep, boolean_t reset_phys) 431 { 432 int retval = DDI_SUCCESS; 433 ASSERT(mutex_owned(bgep->genlock)); 434 435 #ifdef BGE_IPMI_ASF 436 if (bgep->asf_enabled) { 437 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 438 retval = DDI_FAILURE; 439 } else 440 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 441 retval = DDI_FAILURE; 442 #else 443 if (bge_reset(bgep) != DDI_SUCCESS) 444 retval = DDI_FAILURE; 445 #endif 446 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 447 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 448 retval = DDI_FAILURE; 449 bgep->watchdog = 0; 450 ddi_trigger_softintr(bgep->drain_id); 451 } 452 453 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 454 return (retval); 455 } 456 457 458 /* 459 * ========== Nemo-required management entry points ========== 460 */ 461 462 #undef BGE_DBG 463 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 464 465 /* 466 * bge_m_stop() -- stop transmitting/receiving 467 */ 468 static void 469 bge_m_stop(void *arg) 470 { 471 bge_t *bgep = arg; /* private device info */ 472 send_ring_t *srp; 473 uint32_t ring; 474 475 BGE_TRACE(("bge_m_stop($%p)", arg)); 476 477 /* 478 * Just stop processing, then record new GLD state 479 */ 480 mutex_enter(bgep->genlock); 481 if (!(bgep->progress & PROGRESS_INTR)) { 482 /* can happen during autorecovery */ 483 bgep->bge_chip_state = BGE_CHIP_STOPPED; 484 } else 485 bge_stop(bgep); 486 487 bgep->link_update_timer = 0; 488 bgep->link_state = LINK_STATE_UNKNOWN; 489 mac_link_update(bgep->mh, bgep->link_state); 490 491 /* 492 * Free the possible tx buffers allocated in tx process. 493 */ 494 #ifdef BGE_IPMI_ASF 495 if (!bgep->asf_pseudostop) 496 #endif 497 { 498 rw_enter(bgep->errlock, RW_WRITER); 499 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 500 srp = &bgep->send[ring]; 501 mutex_enter(srp->tx_lock); 502 if (srp->tx_array > 1) 503 bge_free_txbuf_arrays(srp); 504 mutex_exit(srp->tx_lock); 505 } 506 rw_exit(bgep->errlock); 507 } 508 bgep->bge_mac_state = BGE_MAC_STOPPED; 509 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 510 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 511 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 512 mutex_exit(bgep->genlock); 513 } 514 515 /* 516 * bge_m_start() -- start transmitting/receiving 517 */ 518 static int 519 bge_m_start(void *arg) 520 { 521 bge_t *bgep = arg; /* private device info */ 522 523 BGE_TRACE(("bge_m_start($%p)", arg)); 524 525 /* 526 * Start processing and record new GLD state 527 */ 528 mutex_enter(bgep->genlock); 529 if (!(bgep->progress & PROGRESS_INTR)) { 530 /* can happen during autorecovery */ 531 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 532 mutex_exit(bgep->genlock); 533 return (EIO); 534 } 535 #ifdef BGE_IPMI_ASF 536 if (bgep->asf_enabled) { 537 if ((bgep->asf_status == ASF_STAT_RUN) && 538 (bgep->asf_pseudostop)) { 539 bgep->bge_mac_state = BGE_MAC_STARTED; 540 mutex_exit(bgep->genlock); 541 return (0); 542 } 543 } 544 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 545 #else 546 if (bge_reset(bgep) != DDI_SUCCESS) { 547 #endif 548 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 549 (void) bge_check_acc_handle(bgep, bgep->io_handle); 550 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 551 mutex_exit(bgep->genlock); 552 return (EIO); 553 } 554 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 555 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 556 (void) bge_check_acc_handle(bgep, bgep->io_handle); 557 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 558 mutex_exit(bgep->genlock); 559 return (EIO); 560 } 561 bgep->bge_mac_state = BGE_MAC_STARTED; 562 BGE_DEBUG(("bge_m_start($%p) done", arg)); 563 564 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 565 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 566 mutex_exit(bgep->genlock); 567 return (EIO); 568 } 569 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 570 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 571 mutex_exit(bgep->genlock); 572 return (EIO); 573 } 574 #ifdef BGE_IPMI_ASF 575 if (bgep->asf_enabled) { 576 if (bgep->asf_status != ASF_STAT_RUN) { 577 /* start ASF heart beat */ 578 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 579 (void *)bgep, 580 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 581 bgep->asf_status = ASF_STAT_RUN; 582 } 583 } 584 #endif 585 mutex_exit(bgep->genlock); 586 587 return (0); 588 } 589 590 /* 591 * bge_unicst_set() -- set the physical network address 592 */ 593 static int 594 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 595 { 596 bge_t *bgep = arg; /* private device info */ 597 598 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 599 ether_sprintf((void *)macaddr))); 600 /* 601 * Remember the new current address in the driver state 602 * Sync the chip's idea of the address too ... 603 */ 604 mutex_enter(bgep->genlock); 605 if (!(bgep->progress & PROGRESS_INTR)) { 606 /* can happen during autorecovery */ 607 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 608 mutex_exit(bgep->genlock); 609 return (EIO); 610 } 611 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 612 #ifdef BGE_IPMI_ASF 613 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 614 #else 615 if (bge_chip_sync(bgep) == DDI_FAILURE) { 616 #endif 617 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 618 (void) bge_check_acc_handle(bgep, bgep->io_handle); 619 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 620 mutex_exit(bgep->genlock); 621 return (EIO); 622 } 623 #ifdef BGE_IPMI_ASF 624 if (bgep->asf_enabled) { 625 /* 626 * The above bge_chip_sync() function wrote the ethernet MAC 627 * addresses registers which destroyed the IPMI/ASF sideband. 628 * Here, we have to reset chip to make IPMI/ASF sideband work. 629 */ 630 if (bgep->asf_status == ASF_STAT_RUN) { 631 /* 632 * We must stop ASF heart beat before bge_chip_stop(), 633 * otherwise some computers (ex. IBM HS20 blade server) 634 * may crash. 635 */ 636 bge_asf_update_status(bgep); 637 bge_asf_stop_timer(bgep); 638 bgep->asf_status = ASF_STAT_STOP; 639 640 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 641 } 642 bge_chip_stop(bgep, B_FALSE); 643 644 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 645 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 646 (void) bge_check_acc_handle(bgep, bgep->io_handle); 647 ddi_fm_service_impact(bgep->devinfo, 648 DDI_SERVICE_DEGRADED); 649 mutex_exit(bgep->genlock); 650 return (EIO); 651 } 652 653 /* 654 * Start our ASF heartbeat counter as soon as possible. 655 */ 656 if (bgep->asf_status != ASF_STAT_RUN) { 657 /* start ASF heart beat */ 658 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 659 (void *)bgep, 660 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 661 bgep->asf_status = ASF_STAT_RUN; 662 } 663 } 664 #endif 665 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 666 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 667 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 668 mutex_exit(bgep->genlock); 669 return (EIO); 670 } 671 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 672 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 673 mutex_exit(bgep->genlock); 674 return (EIO); 675 } 676 mutex_exit(bgep->genlock); 677 678 return (0); 679 } 680 681 extern void bge_wake_factotum(bge_t *); 682 683 static boolean_t 684 bge_param_locked(mac_prop_id_t pr_num) 685 { 686 /* 687 * All adv_* parameters are locked (read-only) while 688 * the device is in any sort of loopback mode ... 689 */ 690 switch (pr_num) { 691 case MAC_PROP_ADV_1000FDX_CAP: 692 case MAC_PROP_EN_1000FDX_CAP: 693 case MAC_PROP_ADV_1000HDX_CAP: 694 case MAC_PROP_EN_1000HDX_CAP: 695 case MAC_PROP_ADV_100FDX_CAP: 696 case MAC_PROP_EN_100FDX_CAP: 697 case MAC_PROP_ADV_100HDX_CAP: 698 case MAC_PROP_EN_100HDX_CAP: 699 case MAC_PROP_ADV_10FDX_CAP: 700 case MAC_PROP_EN_10FDX_CAP: 701 case MAC_PROP_ADV_10HDX_CAP: 702 case MAC_PROP_EN_10HDX_CAP: 703 case MAC_PROP_AUTONEG: 704 case MAC_PROP_FLOWCTRL: 705 return (B_TRUE); 706 } 707 return (B_FALSE); 708 } 709 /* 710 * callback functions for set/get of properties 711 */ 712 static int 713 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 714 uint_t pr_valsize, const void *pr_val) 715 { 716 bge_t *bgep = barg; 717 int err = 0; 718 uint32_t cur_mtu, new_mtu; 719 uint_t maxsdu; 720 link_flowctrl_t fl; 721 722 mutex_enter(bgep->genlock); 723 if (bgep->param_loop_mode != BGE_LOOP_NONE && 724 bge_param_locked(pr_num)) { 725 /* 726 * All adv_* parameters are locked (read-only) 727 * while the device is in any sort of loopback mode. 728 */ 729 mutex_exit(bgep->genlock); 730 return (EBUSY); 731 } 732 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 733 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 734 (pr_num == MAC_PROP_EN_100HDX_CAP) || 735 (pr_num == MAC_PROP_EN_10FDX_CAP) || 736 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 737 /* 738 * these properties are read/write on copper, 739 * read-only and 0 on serdes 740 */ 741 mutex_exit(bgep->genlock); 742 return (ENOTSUP); 743 } 744 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 745 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 746 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 747 mutex_exit(bgep->genlock); 748 return (ENOTSUP); 749 } 750 751 switch (pr_num) { 752 case MAC_PROP_EN_1000FDX_CAP: 753 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 754 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 755 goto reprogram; 756 case MAC_PROP_EN_1000HDX_CAP: 757 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 758 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 759 goto reprogram; 760 case MAC_PROP_EN_100FDX_CAP: 761 bgep->param_en_100fdx = *(uint8_t *)pr_val; 762 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 763 goto reprogram; 764 case MAC_PROP_EN_100HDX_CAP: 765 bgep->param_en_100hdx = *(uint8_t *)pr_val; 766 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 767 goto reprogram; 768 case MAC_PROP_EN_10FDX_CAP: 769 bgep->param_en_10fdx = *(uint8_t *)pr_val; 770 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 771 goto reprogram; 772 case MAC_PROP_EN_10HDX_CAP: 773 bgep->param_en_10hdx = *(uint8_t *)pr_val; 774 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 775 reprogram: 776 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 777 err = EINVAL; 778 break; 779 case MAC_PROP_ADV_1000FDX_CAP: 780 case MAC_PROP_ADV_1000HDX_CAP: 781 case MAC_PROP_ADV_100FDX_CAP: 782 case MAC_PROP_ADV_100HDX_CAP: 783 case MAC_PROP_ADV_10FDX_CAP: 784 case MAC_PROP_ADV_10HDX_CAP: 785 case MAC_PROP_STATUS: 786 case MAC_PROP_SPEED: 787 case MAC_PROP_DUPLEX: 788 err = ENOTSUP; /* read-only prop. Can't set this */ 789 break; 790 case MAC_PROP_AUTONEG: 791 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 792 if (bge_reprogram(bgep) == IOC_INVAL) 793 err = EINVAL; 794 break; 795 case MAC_PROP_MTU: 796 cur_mtu = bgep->chipid.default_mtu; 797 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 798 799 if (new_mtu == cur_mtu) { 800 err = 0; 801 break; 802 } 803 if (new_mtu < BGE_DEFAULT_MTU || 804 new_mtu > BGE_MAXIMUM_MTU) { 805 err = EINVAL; 806 break; 807 } 808 if ((new_mtu > BGE_DEFAULT_MTU) && 809 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 810 err = EINVAL; 811 break; 812 } 813 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 814 err = EBUSY; 815 break; 816 } 817 bgep->chipid.default_mtu = new_mtu; 818 if (bge_chip_id_init(bgep)) { 819 err = EINVAL; 820 break; 821 } 822 maxsdu = bgep->chipid.ethmax_size - 823 sizeof (struct ether_header); 824 err = mac_maxsdu_update(bgep->mh, maxsdu); 825 if (err == 0) { 826 bgep->bge_dma_error = B_TRUE; 827 bgep->manual_reset = B_TRUE; 828 bge_chip_stop(bgep, B_TRUE); 829 bge_wake_factotum(bgep); 830 err = 0; 831 } 832 break; 833 case MAC_PROP_FLOWCTRL: 834 bcopy(pr_val, &fl, sizeof (fl)); 835 switch (fl) { 836 default: 837 err = ENOTSUP; 838 break; 839 case LINK_FLOWCTRL_NONE: 840 bgep->param_adv_pause = 0; 841 bgep->param_adv_asym_pause = 0; 842 843 bgep->param_link_rx_pause = B_FALSE; 844 bgep->param_link_tx_pause = B_FALSE; 845 break; 846 case LINK_FLOWCTRL_RX: 847 bgep->param_adv_pause = 1; 848 bgep->param_adv_asym_pause = 1; 849 850 bgep->param_link_rx_pause = B_TRUE; 851 bgep->param_link_tx_pause = B_FALSE; 852 break; 853 case LINK_FLOWCTRL_TX: 854 bgep->param_adv_pause = 0; 855 bgep->param_adv_asym_pause = 1; 856 857 bgep->param_link_rx_pause = B_FALSE; 858 bgep->param_link_tx_pause = B_TRUE; 859 break; 860 case LINK_FLOWCTRL_BI: 861 bgep->param_adv_pause = 1; 862 bgep->param_adv_asym_pause = 0; 863 864 bgep->param_link_rx_pause = B_TRUE; 865 bgep->param_link_tx_pause = B_TRUE; 866 break; 867 } 868 869 if (err == 0) { 870 if (bge_reprogram(bgep) == IOC_INVAL) 871 err = EINVAL; 872 } 873 874 break; 875 case MAC_PROP_PRIVATE: 876 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 877 pr_val); 878 break; 879 default: 880 err = ENOTSUP; 881 break; 882 } 883 mutex_exit(bgep->genlock); 884 return (err); 885 } 886 887 /* ARGSUSED */ 888 static int 889 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 890 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 891 { 892 bge_t *bgep = barg; 893 int err = 0; 894 link_flowctrl_t fl; 895 uint64_t speed; 896 int flags = bgep->chipid.flags; 897 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 898 899 if (pr_valsize == 0) 900 return (EINVAL); 901 bzero(pr_val, pr_valsize); 902 903 *perm = MAC_PROP_PERM_RW; 904 905 mutex_enter(bgep->genlock); 906 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 907 bge_param_locked(pr_num)) || 908 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 909 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 910 (pr_num == MAC_PROP_EN_100HDX_CAP) || 911 (pr_num == MAC_PROP_EN_10FDX_CAP) || 912 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 913 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 914 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 915 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 916 *perm = MAC_PROP_PERM_READ; 917 mutex_exit(bgep->genlock); 918 919 switch (pr_num) { 920 case MAC_PROP_DUPLEX: 921 *perm = MAC_PROP_PERM_READ; 922 if (pr_valsize < sizeof (link_duplex_t)) 923 return (EINVAL); 924 bcopy(&bgep->param_link_duplex, pr_val, 925 sizeof (link_duplex_t)); 926 break; 927 case MAC_PROP_SPEED: 928 *perm = MAC_PROP_PERM_READ; 929 if (pr_valsize < sizeof (speed)) 930 return (EINVAL); 931 speed = bgep->param_link_speed * 1000000ull; 932 bcopy(&speed, pr_val, sizeof (speed)); 933 break; 934 case MAC_PROP_STATUS: 935 *perm = MAC_PROP_PERM_READ; 936 if (pr_valsize < sizeof (link_state_t)) 937 return (EINVAL); 938 bcopy(&bgep->link_state, pr_val, 939 sizeof (link_state_t)); 940 break; 941 case MAC_PROP_AUTONEG: 942 if (is_default) 943 *(uint8_t *)pr_val = 1; 944 else 945 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 946 break; 947 case MAC_PROP_FLOWCTRL: 948 if (pr_valsize < sizeof (fl)) 949 return (EINVAL); 950 if (is_default) { 951 fl = LINK_FLOWCTRL_BI; 952 bcopy(&fl, pr_val, sizeof (fl)); 953 break; 954 } 955 956 if (bgep->param_link_rx_pause && 957 !bgep->param_link_tx_pause) 958 fl = LINK_FLOWCTRL_RX; 959 960 if (!bgep->param_link_rx_pause && 961 !bgep->param_link_tx_pause) 962 fl = LINK_FLOWCTRL_NONE; 963 964 if (!bgep->param_link_rx_pause && 965 bgep->param_link_tx_pause) 966 fl = LINK_FLOWCTRL_TX; 967 968 if (bgep->param_link_rx_pause && 969 bgep->param_link_tx_pause) 970 fl = LINK_FLOWCTRL_BI; 971 bcopy(&fl, pr_val, sizeof (fl)); 972 break; 973 case MAC_PROP_ADV_1000FDX_CAP: 974 *perm = MAC_PROP_PERM_READ; 975 if (is_default) { 976 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 977 *(uint8_t *)pr_val = 0; 978 else 979 *(uint8_t *)pr_val = 1; 980 } 981 else 982 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 983 break; 984 case MAC_PROP_EN_1000FDX_CAP: 985 if (is_default) { 986 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 987 *(uint8_t *)pr_val = 0; 988 else 989 *(uint8_t *)pr_val = 1; 990 } 991 else 992 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 993 break; 994 case MAC_PROP_ADV_1000HDX_CAP: 995 *perm = MAC_PROP_PERM_READ; 996 if (is_default) { 997 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 998 *(uint8_t *)pr_val = 0; 999 else 1000 *(uint8_t *)pr_val = 1; 1001 } 1002 else 1003 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1004 break; 1005 case MAC_PROP_EN_1000HDX_CAP: 1006 if (is_default) { 1007 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1008 *(uint8_t *)pr_val = 0; 1009 else 1010 *(uint8_t *)pr_val = 1; 1011 } 1012 else 1013 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1014 break; 1015 case MAC_PROP_ADV_100FDX_CAP: 1016 *perm = MAC_PROP_PERM_READ; 1017 if (is_default) { 1018 *(uint8_t *)pr_val = 1019 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1020 } else { 1021 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1022 } 1023 break; 1024 case MAC_PROP_EN_100FDX_CAP: 1025 if (is_default) { 1026 *(uint8_t *)pr_val = 1027 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1028 } else { 1029 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1030 } 1031 break; 1032 case MAC_PROP_ADV_100HDX_CAP: 1033 *perm = MAC_PROP_PERM_READ; 1034 if (is_default) { 1035 *(uint8_t *)pr_val = 1036 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1037 } else { 1038 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1039 } 1040 break; 1041 case MAC_PROP_EN_100HDX_CAP: 1042 if (is_default) { 1043 *(uint8_t *)pr_val = 1044 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1045 } else { 1046 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1047 } 1048 break; 1049 case MAC_PROP_ADV_10FDX_CAP: 1050 *perm = MAC_PROP_PERM_READ; 1051 if (is_default) { 1052 *(uint8_t *)pr_val = 1053 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1054 } else { 1055 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1056 } 1057 break; 1058 case MAC_PROP_EN_10FDX_CAP: 1059 if (is_default) { 1060 *(uint8_t *)pr_val = 1061 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1062 } else { 1063 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1064 } 1065 break; 1066 case MAC_PROP_ADV_10HDX_CAP: 1067 *perm = MAC_PROP_PERM_READ; 1068 if (is_default) { 1069 *(uint8_t *)pr_val = 1070 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1071 } else { 1072 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1073 } 1074 break; 1075 case MAC_PROP_EN_10HDX_CAP: 1076 if (is_default) { 1077 *(uint8_t *)pr_val = 1078 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1079 } else { 1080 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1081 } 1082 break; 1083 case MAC_PROP_ADV_100T4_CAP: 1084 case MAC_PROP_EN_100T4_CAP: 1085 *perm = MAC_PROP_PERM_READ; 1086 *(uint8_t *)pr_val = 0; 1087 break; 1088 case MAC_PROP_PRIVATE: 1089 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1090 pr_valsize, pr_val); 1091 return (err); 1092 case MAC_PROP_MTU: { 1093 mac_propval_range_t range; 1094 1095 if (!(pr_flags & MAC_PROP_POSSIBLE)) 1096 return (ENOTSUP); 1097 if (pr_valsize < sizeof (mac_propval_range_t)) 1098 return (EINVAL); 1099 range.mpr_count = 1; 1100 range.mpr_type = MAC_PROPVAL_UINT32; 1101 range.range_uint32[0].mpur_min = 1102 range.range_uint32[0].mpur_max = BGE_DEFAULT_MTU; 1103 if (bge_jumbo_enable && !(flags & CHIP_FLAG_NO_JUMBO)) 1104 range.range_uint32[0].mpur_max = 1105 BGE_MAXIMUM_MTU; 1106 bcopy(&range, pr_val, sizeof (range)); 1107 break; 1108 } 1109 default: 1110 return (ENOTSUP); 1111 } 1112 return (0); 1113 } 1114 1115 /* ARGSUSED */ 1116 static int 1117 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1118 const void *pr_val) 1119 { 1120 int err = 0; 1121 long result; 1122 1123 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1124 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1125 if (result > 1 || result < 0) { 1126 err = EINVAL; 1127 } else { 1128 bgep->param_adv_pause = (uint32_t)result; 1129 if (bge_reprogram(bgep) == IOC_INVAL) 1130 err = EINVAL; 1131 } 1132 return (err); 1133 } 1134 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1135 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1136 if (result > 1 || result < 0) { 1137 err = EINVAL; 1138 } else { 1139 bgep->param_adv_asym_pause = (uint32_t)result; 1140 if (bge_reprogram(bgep) == IOC_INVAL) 1141 err = EINVAL; 1142 } 1143 return (err); 1144 } 1145 if (strcmp(pr_name, "_drain_max") == 0) { 1146 1147 /* 1148 * on the Tx side, we need to update the h/w register for 1149 * real packet transmission per packet. The drain_max parameter 1150 * is used to reduce the register access. This parameter 1151 * controls the max number of packets that we will hold before 1152 * updating the bge h/w to trigger h/w transmit. The bge 1153 * chipset usually has a max of 512 Tx descriptors, thus 1154 * the upper bound on drain_max is 512. 1155 */ 1156 if (pr_val == NULL) { 1157 err = EINVAL; 1158 return (err); 1159 } 1160 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1161 if (result > 512 || result < 1) 1162 err = EINVAL; 1163 else { 1164 bgep->param_drain_max = (uint32_t)result; 1165 if (bge_reprogram(bgep) == IOC_INVAL) 1166 err = EINVAL; 1167 } 1168 return (err); 1169 } 1170 if (strcmp(pr_name, "_msi_cnt") == 0) { 1171 1172 if (pr_val == NULL) { 1173 err = EINVAL; 1174 return (err); 1175 } 1176 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1177 if (result > 7 || result < 0) 1178 err = EINVAL; 1179 else { 1180 bgep->param_msi_cnt = (uint32_t)result; 1181 if (bge_reprogram(bgep) == IOC_INVAL) 1182 err = EINVAL; 1183 } 1184 return (err); 1185 } 1186 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1187 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1188 return (EINVAL); 1189 1190 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1191 return (0); 1192 } 1193 1194 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1195 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1196 return (EINVAL); 1197 1198 bgep->chipid.rx_count_norm = (uint32_t)result; 1199 return (0); 1200 } 1201 return (ENOTSUP); 1202 } 1203 1204 static int 1205 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1206 uint_t pr_valsize, void *pr_val) 1207 { 1208 int err = ENOTSUP; 1209 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1210 int value; 1211 1212 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1213 value = (is_default? 1 : bge->param_adv_pause); 1214 err = 0; 1215 goto done; 1216 } 1217 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1218 value = (is_default? 1 : bge->param_adv_asym_pause); 1219 err = 0; 1220 goto done; 1221 } 1222 if (strcmp(pr_name, "_drain_max") == 0) { 1223 value = (is_default? 64 : bge->param_drain_max); 1224 err = 0; 1225 goto done; 1226 } 1227 if (strcmp(pr_name, "_msi_cnt") == 0) { 1228 value = (is_default? 0 : bge->param_msi_cnt); 1229 err = 0; 1230 goto done; 1231 } 1232 1233 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1234 value = (is_default? bge_rx_ticks_norm : 1235 bge->chipid.rx_ticks_norm); 1236 err = 0; 1237 goto done; 1238 } 1239 1240 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1241 value = (is_default? bge_rx_count_norm : 1242 bge->chipid.rx_count_norm); 1243 err = 0; 1244 goto done; 1245 } 1246 1247 done: 1248 if (err == 0) { 1249 (void) snprintf(pr_val, pr_valsize, "%d", value); 1250 } 1251 return (err); 1252 } 1253 1254 /* 1255 * Compute the index of the required bit in the multicast hash map. 1256 * This must mirror the way the hardware actually does it! 1257 * See Broadcom document 570X-PG102-R page 125. 1258 */ 1259 static uint32_t 1260 bge_hash_index(const uint8_t *mca) 1261 { 1262 uint32_t hash; 1263 1264 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1265 1266 return (hash); 1267 } 1268 1269 /* 1270 * bge_m_multicst_add() -- enable/disable a multicast address 1271 */ 1272 static int 1273 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1274 { 1275 bge_t *bgep = arg; /* private device info */ 1276 uint32_t hash; 1277 uint32_t index; 1278 uint32_t word; 1279 uint32_t bit; 1280 uint8_t *refp; 1281 1282 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1283 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1284 1285 /* 1286 * Precalculate all required masks, pointers etc ... 1287 */ 1288 hash = bge_hash_index(mca); 1289 index = hash % BGE_HASH_TABLE_SIZE; 1290 word = index/32u; 1291 bit = 1 << (index % 32u); 1292 refp = &bgep->mcast_refs[index]; 1293 1294 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1295 hash, index, word, bit, *refp)); 1296 1297 /* 1298 * We must set the appropriate bit in the hash map (and the 1299 * corresponding h/w register) when the refcount goes from 0 1300 * to >0, and clear it when the last ref goes away (refcount 1301 * goes from >0 back to 0). If we change the hash map, we 1302 * must also update the chip's hardware map registers. 1303 */ 1304 mutex_enter(bgep->genlock); 1305 if (!(bgep->progress & PROGRESS_INTR)) { 1306 /* can happen during autorecovery */ 1307 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1308 mutex_exit(bgep->genlock); 1309 return (EIO); 1310 } 1311 if (add) { 1312 if ((*refp)++ == 0) { 1313 bgep->mcast_hash[word] |= bit; 1314 #ifdef BGE_IPMI_ASF 1315 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1316 #else 1317 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1318 #endif 1319 (void) bge_check_acc_handle(bgep, 1320 bgep->cfg_handle); 1321 (void) bge_check_acc_handle(bgep, 1322 bgep->io_handle); 1323 ddi_fm_service_impact(bgep->devinfo, 1324 DDI_SERVICE_DEGRADED); 1325 mutex_exit(bgep->genlock); 1326 return (EIO); 1327 } 1328 } 1329 } else { 1330 if (--(*refp) == 0) { 1331 bgep->mcast_hash[word] &= ~bit; 1332 #ifdef BGE_IPMI_ASF 1333 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1334 #else 1335 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1336 #endif 1337 (void) bge_check_acc_handle(bgep, 1338 bgep->cfg_handle); 1339 (void) bge_check_acc_handle(bgep, 1340 bgep->io_handle); 1341 ddi_fm_service_impact(bgep->devinfo, 1342 DDI_SERVICE_DEGRADED); 1343 mutex_exit(bgep->genlock); 1344 return (EIO); 1345 } 1346 } 1347 } 1348 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1349 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1350 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1351 mutex_exit(bgep->genlock); 1352 return (EIO); 1353 } 1354 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1355 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1356 mutex_exit(bgep->genlock); 1357 return (EIO); 1358 } 1359 mutex_exit(bgep->genlock); 1360 1361 return (0); 1362 } 1363 1364 /* 1365 * bge_m_promisc() -- set or reset promiscuous mode on the board 1366 * 1367 * Program the hardware to enable/disable promiscuous and/or 1368 * receive-all-multicast modes. 1369 */ 1370 static int 1371 bge_m_promisc(void *arg, boolean_t on) 1372 { 1373 bge_t *bgep = arg; 1374 1375 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1376 1377 /* 1378 * Store MAC layer specified mode and pass to chip layer to update h/w 1379 */ 1380 mutex_enter(bgep->genlock); 1381 if (!(bgep->progress & PROGRESS_INTR)) { 1382 /* can happen during autorecovery */ 1383 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1384 mutex_exit(bgep->genlock); 1385 return (EIO); 1386 } 1387 bgep->promisc = on; 1388 #ifdef BGE_IPMI_ASF 1389 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1390 #else 1391 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1392 #endif 1393 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1394 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1395 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1396 mutex_exit(bgep->genlock); 1397 return (EIO); 1398 } 1399 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1400 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1401 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1402 mutex_exit(bgep->genlock); 1403 return (EIO); 1404 } 1405 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1406 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1407 mutex_exit(bgep->genlock); 1408 return (EIO); 1409 } 1410 mutex_exit(bgep->genlock); 1411 return (0); 1412 } 1413 1414 /* 1415 * Find the slot for the specified unicast address 1416 */ 1417 int 1418 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1419 { 1420 int slot; 1421 1422 ASSERT(mutex_owned(bgep->genlock)); 1423 1424 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1425 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1426 return (slot); 1427 } 1428 1429 return (-1); 1430 } 1431 1432 /* 1433 * Programs the classifier to start steering packets matching 'mac_addr' to the 1434 * specified ring 'arg'. 1435 */ 1436 static int 1437 bge_addmac(void *arg, const uint8_t *mac_addr) 1438 { 1439 recv_ring_t *rrp = (recv_ring_t *)arg; 1440 bge_t *bgep = rrp->bgep; 1441 bge_recv_rule_t *rulep = bgep->recv_rules; 1442 bge_rule_info_t *rinfop = NULL; 1443 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1444 int i; 1445 uint16_t tmp16; 1446 uint32_t tmp32; 1447 int slot; 1448 int err; 1449 1450 mutex_enter(bgep->genlock); 1451 if (bgep->unicst_addr_avail == 0) { 1452 mutex_exit(bgep->genlock); 1453 return (ENOSPC); 1454 } 1455 1456 /* 1457 * First add the unicast address to a available slot. 1458 */ 1459 slot = bge_unicst_find(bgep, mac_addr); 1460 ASSERT(slot == -1); 1461 1462 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1463 if (!bgep->curr_addr[slot].set) { 1464 bgep->curr_addr[slot].set = B_TRUE; 1465 break; 1466 } 1467 } 1468 1469 ASSERT(slot < bgep->unicst_addr_total); 1470 bgep->unicst_addr_avail--; 1471 mutex_exit(bgep->genlock); 1472 1473 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1474 goto fail; 1475 1476 /* A rule is already here. Deny this. */ 1477 if (rrp->mac_addr_rule != NULL) { 1478 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1479 goto fail; 1480 } 1481 1482 /* 1483 * Allocate a bge_rule_info_t to keep track of which rule slots 1484 * are being used. 1485 */ 1486 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1487 if (rinfop == NULL) { 1488 err = ENOMEM; 1489 goto fail; 1490 } 1491 1492 /* 1493 * Look for the starting slot to place the rules. 1494 * The two slots we reserve must be contiguous. 1495 */ 1496 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1497 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1498 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1499 break; 1500 1501 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1502 1503 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1504 rulep[i].mask_value = ntohl(tmp32); 1505 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1506 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1507 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1508 1509 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1510 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1511 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1512 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1513 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1514 rinfop->start = i; 1515 rinfop->count = 2; 1516 1517 rrp->mac_addr_rule = rinfop; 1518 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1519 1520 return (0); 1521 1522 fail: 1523 /* Clear the address just set */ 1524 (void) bge_unicst_set(bgep, zero_addr, slot); 1525 mutex_enter(bgep->genlock); 1526 bgep->curr_addr[slot].set = B_FALSE; 1527 bgep->unicst_addr_avail++; 1528 mutex_exit(bgep->genlock); 1529 1530 return (err); 1531 } 1532 1533 /* 1534 * Stop classifying packets matching the MAC address to the specified ring. 1535 */ 1536 static int 1537 bge_remmac(void *arg, const uint8_t *mac_addr) 1538 { 1539 recv_ring_t *rrp = (recv_ring_t *)arg; 1540 bge_t *bgep = rrp->bgep; 1541 bge_recv_rule_t *rulep = bgep->recv_rules; 1542 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1543 int start; 1544 int slot; 1545 int err; 1546 1547 /* 1548 * Remove the MAC address from its slot. 1549 */ 1550 mutex_enter(bgep->genlock); 1551 slot = bge_unicst_find(bgep, mac_addr); 1552 if (slot == -1) { 1553 mutex_exit(bgep->genlock); 1554 return (EINVAL); 1555 } 1556 1557 ASSERT(bgep->curr_addr[slot].set); 1558 mutex_exit(bgep->genlock); 1559 1560 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1561 return (err); 1562 1563 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1564 return (EINVAL); 1565 1566 start = rinfop->start; 1567 rulep[start].mask_value = 0; 1568 rulep[start].control = 0; 1569 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1570 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1571 start++; 1572 rulep[start].mask_value = 0; 1573 rulep[start].control = 0; 1574 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1575 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1576 1577 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1578 rrp->mac_addr_rule = NULL; 1579 bzero(rrp->mac_addr_val, ETHERADDRL); 1580 1581 mutex_enter(bgep->genlock); 1582 bgep->curr_addr[slot].set = B_FALSE; 1583 bgep->unicst_addr_avail++; 1584 mutex_exit(bgep->genlock); 1585 1586 return (0); 1587 } 1588 1589 static int 1590 bge_flag_intr_enable(mac_intr_handle_t ih) 1591 { 1592 recv_ring_t *rrp = (recv_ring_t *)ih; 1593 bge_t *bgep = rrp->bgep; 1594 1595 mutex_enter(bgep->genlock); 1596 rrp->poll_flag = 0; 1597 mutex_exit(bgep->genlock); 1598 1599 return (0); 1600 } 1601 1602 static int 1603 bge_flag_intr_disable(mac_intr_handle_t ih) 1604 { 1605 recv_ring_t *rrp = (recv_ring_t *)ih; 1606 bge_t *bgep = rrp->bgep; 1607 1608 mutex_enter(bgep->genlock); 1609 rrp->poll_flag = 1; 1610 mutex_exit(bgep->genlock); 1611 1612 return (0); 1613 } 1614 1615 static int 1616 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1617 { 1618 recv_ring_t *rx_ring; 1619 1620 rx_ring = (recv_ring_t *)rh; 1621 mutex_enter(rx_ring->rx_lock); 1622 rx_ring->ring_gen_num = mr_gen_num; 1623 mutex_exit(rx_ring->rx_lock); 1624 return (0); 1625 } 1626 1627 1628 /* 1629 * Callback funtion for MAC layer to register all rings 1630 * for given ring_group, noted by rg_index. 1631 */ 1632 void 1633 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1634 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1635 { 1636 bge_t *bgep = arg; 1637 mac_intr_t *mintr; 1638 1639 switch (rtype) { 1640 case MAC_RING_TYPE_RX: { 1641 recv_ring_t *rx_ring; 1642 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1643 MAC_ADDRESS_REGS_MAX) && index == 0); 1644 1645 rx_ring = &bgep->recv[rg_index]; 1646 rx_ring->ring_handle = rh; 1647 1648 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1649 infop->mri_start = bge_ring_start; 1650 infop->mri_stop = NULL; 1651 infop->mri_poll = bge_poll_ring; 1652 1653 mintr = &infop->mri_intr; 1654 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1655 mintr->mi_enable = bge_flag_intr_enable; 1656 mintr->mi_disable = bge_flag_intr_disable; 1657 1658 break; 1659 } 1660 case MAC_RING_TYPE_TX: 1661 default: 1662 ASSERT(0); 1663 break; 1664 } 1665 } 1666 1667 /* 1668 * Fill infop passed as argument 1669 * fill in respective ring_group info 1670 * Each group has a single ring in it. We keep it simple 1671 * and use the same internal handle for rings and groups. 1672 */ 1673 void 1674 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1675 mac_group_info_t *infop, mac_group_handle_t gh) 1676 { 1677 bge_t *bgep = arg; 1678 1679 switch (rtype) { 1680 case MAC_RING_TYPE_RX: { 1681 recv_ring_t *rx_ring; 1682 1683 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1684 MAC_ADDRESS_REGS_MAX)); 1685 rx_ring = &bgep->recv[rg_index]; 1686 rx_ring->ring_group_handle = gh; 1687 1688 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1689 infop->mgi_start = NULL; 1690 infop->mgi_stop = NULL; 1691 infop->mgi_addmac = bge_addmac; 1692 infop->mgi_remmac = bge_remmac; 1693 infop->mgi_count = 1; 1694 break; 1695 } 1696 case MAC_RING_TYPE_TX: 1697 default: 1698 ASSERT(0); 1699 break; 1700 } 1701 } 1702 1703 /*ARGSUSED*/ 1704 static boolean_t 1705 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1706 { 1707 bge_t *bgep = arg; 1708 1709 switch (cap) { 1710 case MAC_CAPAB_HCKSUM: { 1711 uint32_t *txflags = cap_data; 1712 1713 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1714 break; 1715 } 1716 case MAC_CAPAB_RINGS: { 1717 mac_capab_rings_t *cap_rings = cap_data; 1718 1719 /* Temporarily disable multiple tx rings. */ 1720 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1721 return (B_FALSE); 1722 1723 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1724 cap_rings->mr_rnum = cap_rings->mr_gnum = 1725 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1726 cap_rings->mr_rget = bge_fill_ring; 1727 cap_rings->mr_gget = bge_fill_group; 1728 break; 1729 } 1730 default: 1731 return (B_FALSE); 1732 } 1733 return (B_TRUE); 1734 } 1735 1736 /* 1737 * Loopback ioctl code 1738 */ 1739 1740 static lb_property_t loopmodes[] = { 1741 { normal, "normal", BGE_LOOP_NONE }, 1742 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1743 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1744 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1745 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1746 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1747 }; 1748 1749 static enum ioc_reply 1750 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1751 { 1752 /* 1753 * If the mode isn't being changed, there's nothing to do ... 1754 */ 1755 if (mode == bgep->param_loop_mode) 1756 return (IOC_ACK); 1757 1758 /* 1759 * Validate the requested mode and prepare a suitable message 1760 * to explain the link down/up cycle that the change will 1761 * probably induce ... 1762 */ 1763 switch (mode) { 1764 default: 1765 return (IOC_INVAL); 1766 1767 case BGE_LOOP_NONE: 1768 case BGE_LOOP_EXTERNAL_1000: 1769 case BGE_LOOP_EXTERNAL_100: 1770 case BGE_LOOP_EXTERNAL_10: 1771 case BGE_LOOP_INTERNAL_PHY: 1772 case BGE_LOOP_INTERNAL_MAC: 1773 break; 1774 } 1775 1776 /* 1777 * All OK; tell the caller to reprogram 1778 * the PHY and/or MAC for the new mode ... 1779 */ 1780 bgep->param_loop_mode = mode; 1781 return (IOC_RESTART_ACK); 1782 } 1783 1784 static enum ioc_reply 1785 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1786 { 1787 lb_info_sz_t *lbsp; 1788 lb_property_t *lbpp; 1789 uint32_t *lbmp; 1790 int cmd; 1791 1792 _NOTE(ARGUNUSED(wq)) 1793 1794 /* 1795 * Validate format of ioctl 1796 */ 1797 if (mp->b_cont == NULL) 1798 return (IOC_INVAL); 1799 1800 cmd = iocp->ioc_cmd; 1801 switch (cmd) { 1802 default: 1803 /* NOTREACHED */ 1804 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1805 return (IOC_INVAL); 1806 1807 case LB_GET_INFO_SIZE: 1808 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1809 return (IOC_INVAL); 1810 lbsp = (void *)mp->b_cont->b_rptr; 1811 *lbsp = sizeof (loopmodes); 1812 return (IOC_REPLY); 1813 1814 case LB_GET_INFO: 1815 if (iocp->ioc_count != sizeof (loopmodes)) 1816 return (IOC_INVAL); 1817 lbpp = (void *)mp->b_cont->b_rptr; 1818 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1819 return (IOC_REPLY); 1820 1821 case LB_GET_MODE: 1822 if (iocp->ioc_count != sizeof (uint32_t)) 1823 return (IOC_INVAL); 1824 lbmp = (void *)mp->b_cont->b_rptr; 1825 *lbmp = bgep->param_loop_mode; 1826 return (IOC_REPLY); 1827 1828 case LB_SET_MODE: 1829 if (iocp->ioc_count != sizeof (uint32_t)) 1830 return (IOC_INVAL); 1831 lbmp = (void *)mp->b_cont->b_rptr; 1832 return (bge_set_loop_mode(bgep, *lbmp)); 1833 } 1834 } 1835 1836 /* 1837 * Specific bge IOCTLs, the gld module handles the generic ones. 1838 */ 1839 static void 1840 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1841 { 1842 bge_t *bgep = arg; 1843 struct iocblk *iocp; 1844 enum ioc_reply status; 1845 boolean_t need_privilege; 1846 int err; 1847 int cmd; 1848 1849 /* 1850 * Validate the command before bothering with the mutex ... 1851 */ 1852 iocp = (void *)mp->b_rptr; 1853 iocp->ioc_error = 0; 1854 need_privilege = B_TRUE; 1855 cmd = iocp->ioc_cmd; 1856 switch (cmd) { 1857 default: 1858 miocnak(wq, mp, 0, EINVAL); 1859 return; 1860 1861 case BGE_MII_READ: 1862 case BGE_MII_WRITE: 1863 case BGE_SEE_READ: 1864 case BGE_SEE_WRITE: 1865 case BGE_FLASH_READ: 1866 case BGE_FLASH_WRITE: 1867 case BGE_DIAG: 1868 case BGE_PEEK: 1869 case BGE_POKE: 1870 case BGE_PHY_RESET: 1871 case BGE_SOFT_RESET: 1872 case BGE_HARD_RESET: 1873 break; 1874 1875 case LB_GET_INFO_SIZE: 1876 case LB_GET_INFO: 1877 case LB_GET_MODE: 1878 need_privilege = B_FALSE; 1879 /* FALLTHRU */ 1880 case LB_SET_MODE: 1881 break; 1882 1883 } 1884 1885 if (need_privilege) { 1886 /* 1887 * Check for specific net_config privilege on Solaris 10+. 1888 */ 1889 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1890 if (err != 0) { 1891 miocnak(wq, mp, 0, err); 1892 return; 1893 } 1894 } 1895 1896 mutex_enter(bgep->genlock); 1897 if (!(bgep->progress & PROGRESS_INTR)) { 1898 /* can happen during autorecovery */ 1899 mutex_exit(bgep->genlock); 1900 miocnak(wq, mp, 0, EIO); 1901 return; 1902 } 1903 1904 switch (cmd) { 1905 default: 1906 _NOTE(NOTREACHED) 1907 status = IOC_INVAL; 1908 break; 1909 1910 case BGE_MII_READ: 1911 case BGE_MII_WRITE: 1912 case BGE_SEE_READ: 1913 case BGE_SEE_WRITE: 1914 case BGE_FLASH_READ: 1915 case BGE_FLASH_WRITE: 1916 case BGE_DIAG: 1917 case BGE_PEEK: 1918 case BGE_POKE: 1919 case BGE_PHY_RESET: 1920 case BGE_SOFT_RESET: 1921 case BGE_HARD_RESET: 1922 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1923 break; 1924 1925 case LB_GET_INFO_SIZE: 1926 case LB_GET_INFO: 1927 case LB_GET_MODE: 1928 case LB_SET_MODE: 1929 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1930 break; 1931 1932 } 1933 1934 /* 1935 * Do we need to reprogram the PHY and/or the MAC? 1936 * Do it now, while we still have the mutex. 1937 * 1938 * Note: update the PHY first, 'cos it controls the 1939 * speed/duplex parameters that the MAC code uses. 1940 */ 1941 switch (status) { 1942 case IOC_RESTART_REPLY: 1943 case IOC_RESTART_ACK: 1944 if (bge_reprogram(bgep) == IOC_INVAL) 1945 status = IOC_INVAL; 1946 break; 1947 } 1948 1949 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1950 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1951 status = IOC_INVAL; 1952 } 1953 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1954 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1955 status = IOC_INVAL; 1956 } 1957 mutex_exit(bgep->genlock); 1958 1959 /* 1960 * Finally, decide how to reply 1961 */ 1962 switch (status) { 1963 default: 1964 case IOC_INVAL: 1965 /* 1966 * Error, reply with a NAK and EINVAL or the specified error 1967 */ 1968 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1969 EINVAL : iocp->ioc_error); 1970 break; 1971 1972 case IOC_DONE: 1973 /* 1974 * OK, reply already sent 1975 */ 1976 break; 1977 1978 case IOC_RESTART_ACK: 1979 case IOC_ACK: 1980 /* 1981 * OK, reply with an ACK 1982 */ 1983 miocack(wq, mp, 0, 0); 1984 break; 1985 1986 case IOC_RESTART_REPLY: 1987 case IOC_REPLY: 1988 /* 1989 * OK, send prepared reply as ACK or NAK 1990 */ 1991 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1992 M_IOCACK : M_IOCNAK; 1993 qreply(wq, mp); 1994 break; 1995 } 1996 } 1997 1998 /* 1999 * ========== Per-instance setup/teardown code ========== 2000 */ 2001 2002 #undef BGE_DBG 2003 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 2004 /* 2005 * Allocate an area of memory and a DMA handle for accessing it 2006 */ 2007 static int 2008 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2009 uint_t dma_flags, dma_area_t *dma_p) 2010 { 2011 caddr_t va; 2012 int err; 2013 2014 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2015 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2016 2017 /* 2018 * Allocate handle 2019 */ 2020 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2021 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2022 if (err != DDI_SUCCESS) 2023 return (DDI_FAILURE); 2024 2025 /* 2026 * Allocate memory 2027 */ 2028 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2029 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2030 &dma_p->acc_hdl); 2031 if (err != DDI_SUCCESS) 2032 return (DDI_FAILURE); 2033 2034 /* 2035 * Bind the two together 2036 */ 2037 dma_p->mem_va = va; 2038 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2039 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2040 &dma_p->cookie, &dma_p->ncookies); 2041 2042 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2043 dma_p->alength, err, dma_p->ncookies)); 2044 2045 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2046 return (DDI_FAILURE); 2047 2048 dma_p->nslots = ~0U; 2049 dma_p->size = ~0U; 2050 dma_p->token = ~0U; 2051 dma_p->offset = 0; 2052 return (DDI_SUCCESS); 2053 } 2054 2055 /* 2056 * Free one allocated area of DMAable memory 2057 */ 2058 static void 2059 bge_free_dma_mem(dma_area_t *dma_p) 2060 { 2061 if (dma_p->dma_hdl != NULL) { 2062 if (dma_p->ncookies) { 2063 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2064 dma_p->ncookies = 0; 2065 } 2066 ddi_dma_free_handle(&dma_p->dma_hdl); 2067 dma_p->dma_hdl = NULL; 2068 } 2069 2070 if (dma_p->acc_hdl != NULL) { 2071 ddi_dma_mem_free(&dma_p->acc_hdl); 2072 dma_p->acc_hdl = NULL; 2073 } 2074 } 2075 /* 2076 * Utility routine to carve a slice off a chunk of allocated memory, 2077 * updating the chunk descriptor accordingly. The size of the slice 2078 * is given by the product of the <qty> and <size> parameters. 2079 */ 2080 static void 2081 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2082 uint32_t qty, uint32_t size) 2083 { 2084 static uint32_t sequence = 0xbcd5704a; 2085 size_t totsize; 2086 2087 totsize = qty*size; 2088 ASSERT(totsize <= chunk->alength); 2089 2090 *slice = *chunk; 2091 slice->nslots = qty; 2092 slice->size = size; 2093 slice->alength = totsize; 2094 slice->token = ++sequence; 2095 2096 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2097 chunk->alength -= totsize; 2098 chunk->offset += totsize; 2099 chunk->cookie.dmac_laddress += totsize; 2100 chunk->cookie.dmac_size -= totsize; 2101 } 2102 2103 /* 2104 * Initialise the specified Receive Producer (Buffer) Ring, using 2105 * the information in the <dma_area> descriptors that it contains 2106 * to set up all the other fields. This routine should be called 2107 * only once for each ring. 2108 */ 2109 static void 2110 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2111 { 2112 buff_ring_t *brp; 2113 bge_status_t *bsp; 2114 sw_rbd_t *srbdp; 2115 dma_area_t pbuf; 2116 uint32_t bufsize; 2117 uint32_t nslots; 2118 uint32_t slot; 2119 uint32_t split; 2120 2121 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2122 NIC_MEM_SHADOW_BUFF_STD, 2123 NIC_MEM_SHADOW_BUFF_JUMBO, 2124 NIC_MEM_SHADOW_BUFF_MINI 2125 }; 2126 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2127 RECV_STD_PROD_INDEX_REG, 2128 RECV_JUMBO_PROD_INDEX_REG, 2129 RECV_MINI_PROD_INDEX_REG 2130 }; 2131 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2132 STATUS_STD_BUFF_CONS_INDEX, 2133 STATUS_JUMBO_BUFF_CONS_INDEX, 2134 STATUS_MINI_BUFF_CONS_INDEX 2135 }; 2136 2137 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2138 (void *)bgep, ring)); 2139 2140 brp = &bgep->buff[ring]; 2141 nslots = brp->desc.nslots; 2142 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2143 bufsize = brp->buf[0].size; 2144 2145 /* 2146 * Set up the copy of the h/w RCB 2147 * 2148 * Note: unlike Send & Receive Return Rings, (where the max_len 2149 * field holds the number of slots), in a Receive Buffer Ring 2150 * this field indicates the size of each buffer in the ring. 2151 */ 2152 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2153 brp->hw_rcb.max_len = (uint16_t)bufsize; 2154 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2155 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2156 2157 /* 2158 * Other one-off initialisation of per-ring data 2159 */ 2160 brp->bgep = bgep; 2161 bsp = DMA_VPTR(bgep->status_block); 2162 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2163 brp->chip_mbx_reg = mailbox_regs[ring]; 2164 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2165 DDI_INTR_PRI(bgep->intr_pri)); 2166 2167 /* 2168 * Allocate the array of s/w Receive Buffer Descriptors 2169 */ 2170 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2171 brp->sw_rbds = srbdp; 2172 2173 /* 2174 * Now initialise each array element once and for all 2175 */ 2176 for (split = 0; split < BGE_SPLIT; ++split) { 2177 pbuf = brp->buf[split]; 2178 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2179 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2180 ASSERT(pbuf.alength == 0); 2181 } 2182 } 2183 2184 /* 2185 * Clean up initialisation done above before the memory is freed 2186 */ 2187 static void 2188 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2189 { 2190 buff_ring_t *brp; 2191 sw_rbd_t *srbdp; 2192 2193 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2194 (void *)bgep, ring)); 2195 2196 brp = &bgep->buff[ring]; 2197 srbdp = brp->sw_rbds; 2198 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2199 2200 mutex_destroy(brp->rf_lock); 2201 } 2202 2203 /* 2204 * Initialise the specified Receive (Return) Ring, using the 2205 * information in the <dma_area> descriptors that it contains 2206 * to set up all the other fields. This routine should be called 2207 * only once for each ring. 2208 */ 2209 static void 2210 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2211 { 2212 recv_ring_t *rrp; 2213 bge_status_t *bsp; 2214 uint32_t nslots; 2215 2216 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2217 (void *)bgep, ring)); 2218 2219 /* 2220 * The chip architecture requires that receive return rings have 2221 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2222 */ 2223 rrp = &bgep->recv[ring]; 2224 nslots = rrp->desc.nslots; 2225 ASSERT(nslots == 0 || nslots == 512 || 2226 nslots == 1024 || nslots == 2048); 2227 2228 /* 2229 * Set up the copy of the h/w RCB 2230 */ 2231 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2232 rrp->hw_rcb.max_len = (uint16_t)nslots; 2233 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2234 rrp->hw_rcb.nic_ring_addr = 0; 2235 2236 /* 2237 * Other one-off initialisation of per-ring data 2238 */ 2239 rrp->bgep = bgep; 2240 bsp = DMA_VPTR(bgep->status_block); 2241 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2242 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2243 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2244 DDI_INTR_PRI(bgep->intr_pri)); 2245 } 2246 2247 2248 /* 2249 * Clean up initialisation done above before the memory is freed 2250 */ 2251 static void 2252 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2253 { 2254 recv_ring_t *rrp; 2255 2256 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2257 (void *)bgep, ring)); 2258 2259 rrp = &bgep->recv[ring]; 2260 if (rrp->rx_softint) 2261 ddi_remove_softintr(rrp->rx_softint); 2262 mutex_destroy(rrp->rx_lock); 2263 } 2264 2265 /* 2266 * Initialise the specified Send Ring, using the information in the 2267 * <dma_area> descriptors that it contains to set up all the other 2268 * fields. This routine should be called only once for each ring. 2269 */ 2270 static void 2271 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2272 { 2273 send_ring_t *srp; 2274 bge_status_t *bsp; 2275 sw_sbd_t *ssbdp; 2276 dma_area_t desc; 2277 dma_area_t pbuf; 2278 uint32_t nslots; 2279 uint32_t slot; 2280 uint32_t split; 2281 sw_txbuf_t *txbuf; 2282 2283 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2284 (void *)bgep, ring)); 2285 2286 /* 2287 * The chip architecture requires that host-based send rings 2288 * have 512 elements per ring. See 570X-PG102-R page 56. 2289 */ 2290 srp = &bgep->send[ring]; 2291 nslots = srp->desc.nslots; 2292 ASSERT(nslots == 0 || nslots == 512); 2293 2294 /* 2295 * Set up the copy of the h/w RCB 2296 */ 2297 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2298 srp->hw_rcb.max_len = (uint16_t)nslots; 2299 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2300 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2301 2302 /* 2303 * Other one-off initialisation of per-ring data 2304 */ 2305 srp->bgep = bgep; 2306 bsp = DMA_VPTR(bgep->status_block); 2307 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2308 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2309 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2310 DDI_INTR_PRI(bgep->intr_pri)); 2311 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2312 DDI_INTR_PRI(bgep->intr_pri)); 2313 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2314 DDI_INTR_PRI(bgep->intr_pri)); 2315 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2316 DDI_INTR_PRI(bgep->intr_pri)); 2317 if (nslots == 0) 2318 return; 2319 2320 /* 2321 * Allocate the array of s/w Send Buffer Descriptors 2322 */ 2323 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2324 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2325 srp->txbuf_head = 2326 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2327 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2328 srp->sw_sbds = ssbdp; 2329 srp->txbuf = txbuf; 2330 srp->tx_buffers = BGE_SEND_BUF_NUM; 2331 srp->tx_buffers_low = srp->tx_buffers / 4; 2332 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2333 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2334 else 2335 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2336 srp->tx_array = 1; 2337 2338 /* 2339 * Chunk tx desc area 2340 */ 2341 desc = srp->desc; 2342 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2343 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2344 sizeof (bge_sbd_t)); 2345 } 2346 ASSERT(desc.alength == 0); 2347 2348 /* 2349 * Chunk tx buffer area 2350 */ 2351 for (split = 0; split < BGE_SPLIT; ++split) { 2352 pbuf = srp->buf[0][split]; 2353 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2354 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2355 bgep->chipid.snd_buff_size); 2356 txbuf++; 2357 } 2358 ASSERT(pbuf.alength == 0); 2359 } 2360 } 2361 2362 /* 2363 * Clean up initialisation done above before the memory is freed 2364 */ 2365 static void 2366 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2367 { 2368 send_ring_t *srp; 2369 uint32_t array; 2370 uint32_t split; 2371 uint32_t nslots; 2372 2373 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2374 (void *)bgep, ring)); 2375 2376 srp = &bgep->send[ring]; 2377 mutex_destroy(srp->tc_lock); 2378 mutex_destroy(srp->freetxbuf_lock); 2379 mutex_destroy(srp->txbuf_lock); 2380 mutex_destroy(srp->tx_lock); 2381 nslots = srp->desc.nslots; 2382 if (nslots == 0) 2383 return; 2384 2385 for (array = 1; array < srp->tx_array; ++array) 2386 for (split = 0; split < BGE_SPLIT; ++split) 2387 bge_free_dma_mem(&srp->buf[array][split]); 2388 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2389 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2390 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2391 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2392 srp->sw_sbds = NULL; 2393 srp->txbuf_head = NULL; 2394 srp->txbuf = NULL; 2395 srp->pktp = NULL; 2396 } 2397 2398 /* 2399 * Initialise all transmit, receive, and buffer rings. 2400 */ 2401 void 2402 bge_init_rings(bge_t *bgep) 2403 { 2404 uint32_t ring; 2405 2406 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2407 2408 /* 2409 * Perform one-off initialisation of each ring ... 2410 */ 2411 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2412 bge_init_send_ring(bgep, ring); 2413 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2414 bge_init_recv_ring(bgep, ring); 2415 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2416 bge_init_buff_ring(bgep, ring); 2417 } 2418 2419 /* 2420 * Undo the work of bge_init_rings() above before the memory is freed 2421 */ 2422 void 2423 bge_fini_rings(bge_t *bgep) 2424 { 2425 uint32_t ring; 2426 2427 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2428 2429 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2430 bge_fini_buff_ring(bgep, ring); 2431 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2432 bge_fini_recv_ring(bgep, ring); 2433 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2434 bge_fini_send_ring(bgep, ring); 2435 } 2436 2437 /* 2438 * Called from the bge_m_stop() to free the tx buffers which are 2439 * allocated from the tx process. 2440 */ 2441 void 2442 bge_free_txbuf_arrays(send_ring_t *srp) 2443 { 2444 uint32_t array; 2445 uint32_t split; 2446 2447 ASSERT(mutex_owned(srp->tx_lock)); 2448 2449 /* 2450 * Free the extra tx buffer DMA area 2451 */ 2452 for (array = 1; array < srp->tx_array; ++array) 2453 for (split = 0; split < BGE_SPLIT; ++split) 2454 bge_free_dma_mem(&srp->buf[array][split]); 2455 2456 /* 2457 * Restore initial tx buffer numbers 2458 */ 2459 srp->tx_array = 1; 2460 srp->tx_buffers = BGE_SEND_BUF_NUM; 2461 srp->tx_buffers_low = srp->tx_buffers / 4; 2462 srp->tx_flow = 0; 2463 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2464 } 2465 2466 /* 2467 * Called from tx process to allocate more tx buffers 2468 */ 2469 bge_queue_item_t * 2470 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2471 { 2472 bge_queue_t *txbuf_queue; 2473 bge_queue_item_t *txbuf_item_last; 2474 bge_queue_item_t *txbuf_item; 2475 bge_queue_item_t *txbuf_item_rtn; 2476 sw_txbuf_t *txbuf; 2477 dma_area_t area; 2478 size_t txbuffsize; 2479 uint32_t slot; 2480 uint32_t array; 2481 uint32_t split; 2482 uint32_t err; 2483 2484 ASSERT(mutex_owned(srp->tx_lock)); 2485 2486 array = srp->tx_array; 2487 if (array >= srp->tx_array_max) 2488 return (NULL); 2489 2490 /* 2491 * Allocate memory & handles for TX buffers 2492 */ 2493 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2494 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2495 for (split = 0; split < BGE_SPLIT; ++split) { 2496 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2497 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2498 &srp->buf[array][split]); 2499 if (err != DDI_SUCCESS) { 2500 /* Free the last already allocated OK chunks */ 2501 for (slot = 0; slot <= split; ++slot) 2502 bge_free_dma_mem(&srp->buf[array][slot]); 2503 srp->tx_alloc_fail++; 2504 return (NULL); 2505 } 2506 } 2507 2508 /* 2509 * Chunk tx buffer area 2510 */ 2511 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2512 for (split = 0; split < BGE_SPLIT; ++split) { 2513 area = srp->buf[array][split]; 2514 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2515 bge_slice_chunk(&txbuf->buf, &area, 1, 2516 bgep->chipid.snd_buff_size); 2517 txbuf++; 2518 } 2519 } 2520 2521 /* 2522 * Add above buffers to the tx buffer pop queue 2523 */ 2524 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2525 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2526 txbuf_item_last = NULL; 2527 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2528 txbuf_item->item = txbuf; 2529 txbuf_item->next = txbuf_item_last; 2530 txbuf_item_last = txbuf_item; 2531 txbuf++; 2532 txbuf_item++; 2533 } 2534 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2535 txbuf_item_rtn = txbuf_item; 2536 txbuf_item++; 2537 txbuf_queue = srp->txbuf_pop_queue; 2538 mutex_enter(txbuf_queue->lock); 2539 txbuf_item->next = txbuf_queue->head; 2540 txbuf_queue->head = txbuf_item_last; 2541 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2542 mutex_exit(txbuf_queue->lock); 2543 2544 srp->tx_array++; 2545 srp->tx_buffers += BGE_SEND_BUF_NUM; 2546 srp->tx_buffers_low = srp->tx_buffers / 4; 2547 2548 return (txbuf_item_rtn); 2549 } 2550 2551 /* 2552 * This function allocates all the transmit and receive buffers 2553 * and descriptors, in four chunks. 2554 */ 2555 int 2556 bge_alloc_bufs(bge_t *bgep) 2557 { 2558 dma_area_t area; 2559 size_t rxbuffsize; 2560 size_t txbuffsize; 2561 size_t rxbuffdescsize; 2562 size_t rxdescsize; 2563 size_t txdescsize; 2564 uint32_t ring; 2565 uint32_t rx_rings = bgep->chipid.rx_rings; 2566 uint32_t tx_rings = bgep->chipid.tx_rings; 2567 int split; 2568 int err; 2569 2570 BGE_TRACE(("bge_alloc_bufs($%p)", 2571 (void *)bgep)); 2572 2573 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2574 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2575 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2576 2577 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2578 txbuffsize *= tx_rings; 2579 2580 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2581 rxdescsize *= sizeof (bge_rbd_t); 2582 2583 rxbuffdescsize = BGE_STD_SLOTS_USED; 2584 rxbuffdescsize += bgep->chipid.jumbo_slots; 2585 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2586 rxbuffdescsize *= sizeof (bge_rbd_t); 2587 2588 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2589 txdescsize *= sizeof (bge_sbd_t); 2590 txdescsize += sizeof (bge_statistics_t); 2591 txdescsize += sizeof (bge_status_t); 2592 txdescsize += BGE_STATUS_PADDING; 2593 2594 /* 2595 * Enable PCI relaxed ordering only for RX/TX data buffers 2596 */ 2597 if (bge_relaxed_ordering) 2598 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2599 2600 /* 2601 * Allocate memory & handles for RX buffers 2602 */ 2603 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2604 for (split = 0; split < BGE_SPLIT; ++split) { 2605 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2606 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2607 &bgep->rx_buff[split]); 2608 if (err != DDI_SUCCESS) 2609 return (DDI_FAILURE); 2610 } 2611 2612 /* 2613 * Allocate memory & handles for TX buffers 2614 */ 2615 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2616 for (split = 0; split < BGE_SPLIT; ++split) { 2617 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2618 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2619 &bgep->tx_buff[split]); 2620 if (err != DDI_SUCCESS) 2621 return (DDI_FAILURE); 2622 } 2623 2624 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2625 2626 /* 2627 * Allocate memory & handles for receive return rings 2628 */ 2629 ASSERT((rxdescsize % rx_rings) == 0); 2630 for (split = 0; split < rx_rings; ++split) { 2631 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2632 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2633 &bgep->rx_desc[split]); 2634 if (err != DDI_SUCCESS) 2635 return (DDI_FAILURE); 2636 } 2637 2638 /* 2639 * Allocate memory & handles for buffer (producer) descriptor rings 2640 */ 2641 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2642 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2643 if (err != DDI_SUCCESS) 2644 return (DDI_FAILURE); 2645 2646 /* 2647 * Allocate memory & handles for TX descriptor rings, 2648 * status block, and statistics area 2649 */ 2650 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2651 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2652 if (err != DDI_SUCCESS) 2653 return (DDI_FAILURE); 2654 2655 /* 2656 * Now carve up each of the allocated areas ... 2657 */ 2658 for (split = 0; split < BGE_SPLIT; ++split) { 2659 area = bgep->rx_buff[split]; 2660 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2661 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2662 bgep->chipid.std_buf_size); 2663 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2664 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2665 bgep->chipid.recv_jumbo_size); 2666 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2667 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2668 BGE_MINI_BUFF_SIZE); 2669 } 2670 2671 for (split = 0; split < BGE_SPLIT; ++split) { 2672 area = bgep->tx_buff[split]; 2673 for (ring = 0; ring < tx_rings; ++ring) 2674 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2675 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2676 bgep->chipid.snd_buff_size); 2677 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2678 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2679 &area, 0, bgep->chipid.snd_buff_size); 2680 } 2681 2682 for (ring = 0; ring < rx_rings; ++ring) 2683 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2684 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2685 2686 area = bgep->rx_desc[rx_rings]; 2687 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2688 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2689 0, sizeof (bge_rbd_t)); 2690 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2691 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2692 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2693 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2694 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2695 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2696 ASSERT(area.alength == 0); 2697 2698 area = bgep->tx_desc; 2699 for (ring = 0; ring < tx_rings; ++ring) 2700 bge_slice_chunk(&bgep->send[ring].desc, &area, 2701 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2702 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2703 bge_slice_chunk(&bgep->send[ring].desc, &area, 2704 0, sizeof (bge_sbd_t)); 2705 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2706 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2707 ASSERT(area.alength == BGE_STATUS_PADDING); 2708 DMA_ZERO(bgep->status_block); 2709 2710 return (DDI_SUCCESS); 2711 } 2712 2713 /* 2714 * This routine frees the transmit and receive buffers and descriptors. 2715 * Make sure the chip is stopped before calling it! 2716 */ 2717 void 2718 bge_free_bufs(bge_t *bgep) 2719 { 2720 int split; 2721 2722 BGE_TRACE(("bge_free_bufs($%p)", 2723 (void *)bgep)); 2724 2725 bge_free_dma_mem(&bgep->tx_desc); 2726 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2727 bge_free_dma_mem(&bgep->rx_desc[split]); 2728 for (split = 0; split < BGE_SPLIT; ++split) 2729 bge_free_dma_mem(&bgep->tx_buff[split]); 2730 for (split = 0; split < BGE_SPLIT; ++split) 2731 bge_free_dma_mem(&bgep->rx_buff[split]); 2732 } 2733 2734 /* 2735 * Determine (initial) MAC address ("BIA") to use for this interface 2736 */ 2737 2738 static void 2739 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2740 { 2741 struct ether_addr sysaddr; 2742 char propbuf[8]; /* "true" or "false", plus NUL */ 2743 uchar_t *bytes; 2744 int *ints; 2745 uint_t nelts; 2746 int err; 2747 2748 BGE_TRACE(("bge_find_mac_address($%p)", 2749 (void *)bgep)); 2750 2751 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2752 cidp->hw_mac_addr, 2753 ether_sprintf((void *)cidp->vendor_addr.addr), 2754 cidp->vendor_addr.set ? "" : "not ")); 2755 2756 /* 2757 * The "vendor's factory-set address" may already have 2758 * been extracted from the chip, but if the property 2759 * "local-mac-address" is set we use that instead. It 2760 * will normally be set by OBP, but it could also be 2761 * specified in a .conf file(!) 2762 * 2763 * There doesn't seem to be a way to define byte-array 2764 * properties in a .conf, so we check whether it looks 2765 * like an array of 6 ints instead. 2766 * 2767 * Then, we check whether it looks like an array of 6 2768 * bytes (which it should, if OBP set it). If we can't 2769 * make sense of it either way, we'll ignore it. 2770 */ 2771 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2772 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2773 if (err == DDI_PROP_SUCCESS) { 2774 if (nelts == ETHERADDRL) { 2775 while (nelts--) 2776 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2777 cidp->vendor_addr.set = B_TRUE; 2778 } 2779 ddi_prop_free(ints); 2780 } 2781 2782 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2783 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2784 if (err == DDI_PROP_SUCCESS) { 2785 if (nelts == ETHERADDRL) { 2786 while (nelts--) 2787 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2788 cidp->vendor_addr.set = B_TRUE; 2789 } 2790 ddi_prop_free(bytes); 2791 } 2792 2793 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2794 ether_sprintf((void *)cidp->vendor_addr.addr), 2795 cidp->vendor_addr.set ? "" : "not ")); 2796 2797 /* 2798 * Look up the OBP property "local-mac-address?". Note that even 2799 * though its value is a string (which should be "true" or "false"), 2800 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2801 * the buffer first and then fetch the property as an untyped array; 2802 * this may or may not include a final NUL, but since there will 2803 * always be one left at the end of the buffer we can now treat it 2804 * as a string anyway. 2805 */ 2806 nelts = sizeof (propbuf); 2807 bzero(propbuf, nelts--); 2808 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2809 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2810 2811 /* 2812 * Now, if the address still isn't set from the hardware (SEEPROM) 2813 * or the OBP or .conf property, OR if the user has foolishly set 2814 * 'local-mac-address? = false', use "the system address" instead 2815 * (but only if it's non-null i.e. has been set from the IDPROM). 2816 */ 2817 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2818 if (localetheraddr(NULL, &sysaddr) != 0) { 2819 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2820 cidp->vendor_addr.set = B_TRUE; 2821 } 2822 2823 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2824 ether_sprintf((void *)cidp->vendor_addr.addr), 2825 cidp->vendor_addr.set ? "" : "not ")); 2826 2827 /* 2828 * Finally(!), if there's a valid "mac-address" property (created 2829 * if we netbooted from this interface), we must use this instead 2830 * of any of the above to ensure that the NFS/install server doesn't 2831 * get confused by the address changing as Solaris takes over! 2832 */ 2833 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2834 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2835 if (err == DDI_PROP_SUCCESS) { 2836 if (nelts == ETHERADDRL) { 2837 while (nelts--) 2838 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2839 cidp->vendor_addr.set = B_TRUE; 2840 } 2841 ddi_prop_free(bytes); 2842 } 2843 2844 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2845 ether_sprintf((void *)cidp->vendor_addr.addr), 2846 cidp->vendor_addr.set ? "" : "not ")); 2847 } 2848 2849 2850 /*ARGSUSED*/ 2851 int 2852 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2853 { 2854 ddi_fm_error_t de; 2855 2856 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2857 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2858 return (de.fme_status); 2859 } 2860 2861 /*ARGSUSED*/ 2862 int 2863 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2864 { 2865 ddi_fm_error_t de; 2866 2867 ASSERT(bgep->progress & PROGRESS_BUFS); 2868 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2869 return (de.fme_status); 2870 } 2871 2872 /* 2873 * The IO fault service error handling callback function 2874 */ 2875 /*ARGSUSED*/ 2876 static int 2877 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2878 { 2879 /* 2880 * as the driver can always deal with an error in any dma or 2881 * access handle, we can just return the fme_status value. 2882 */ 2883 pci_ereport_post(dip, err, NULL); 2884 return (err->fme_status); 2885 } 2886 2887 static void 2888 bge_fm_init(bge_t *bgep) 2889 { 2890 ddi_iblock_cookie_t iblk; 2891 2892 /* Only register with IO Fault Services if we have some capability */ 2893 if (bgep->fm_capabilities) { 2894 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2895 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2896 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2897 2898 /* Register capabilities with IO Fault Services */ 2899 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2900 2901 /* 2902 * Initialize pci ereport capabilities if ereport capable 2903 */ 2904 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2905 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2906 pci_ereport_setup(bgep->devinfo); 2907 2908 /* 2909 * Register error callback if error callback capable 2910 */ 2911 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2912 ddi_fm_handler_register(bgep->devinfo, 2913 bge_fm_error_cb, (void*) bgep); 2914 } else { 2915 /* 2916 * These fields have to be cleared of FMA if there are no 2917 * FMA capabilities at runtime. 2918 */ 2919 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2920 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2921 dma_attr.dma_attr_flags = 0; 2922 } 2923 } 2924 2925 static void 2926 bge_fm_fini(bge_t *bgep) 2927 { 2928 /* Only unregister FMA capabilities if we registered some */ 2929 if (bgep->fm_capabilities) { 2930 2931 /* 2932 * Release any resources allocated by pci_ereport_setup() 2933 */ 2934 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2935 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2936 pci_ereport_teardown(bgep->devinfo); 2937 2938 /* 2939 * Un-register error callback if error callback capable 2940 */ 2941 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2942 ddi_fm_handler_unregister(bgep->devinfo); 2943 2944 /* Unregister from IO Fault Services */ 2945 ddi_fm_fini(bgep->devinfo); 2946 } 2947 } 2948 2949 static void 2950 #ifdef BGE_IPMI_ASF 2951 bge_unattach(bge_t *bgep, uint_t asf_mode) 2952 #else 2953 bge_unattach(bge_t *bgep) 2954 #endif 2955 { 2956 BGE_TRACE(("bge_unattach($%p)", 2957 (void *)bgep)); 2958 2959 /* 2960 * Flag that no more activity may be initiated 2961 */ 2962 bgep->progress &= ~PROGRESS_READY; 2963 2964 /* 2965 * Quiesce the PHY and MAC (leave it reset but still powered). 2966 * Clean up and free all BGE data structures 2967 */ 2968 if (bgep->periodic_id != NULL) { 2969 ddi_periodic_delete(bgep->periodic_id); 2970 bgep->periodic_id = NULL; 2971 } 2972 if (bgep->progress & PROGRESS_KSTATS) 2973 bge_fini_kstats(bgep); 2974 if (bgep->progress & PROGRESS_PHY) 2975 bge_phys_reset(bgep); 2976 if (bgep->progress & PROGRESS_HWINT) { 2977 mutex_enter(bgep->genlock); 2978 #ifdef BGE_IPMI_ASF 2979 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2980 #else 2981 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2982 #endif 2983 ddi_fm_service_impact(bgep->devinfo, 2984 DDI_SERVICE_UNAFFECTED); 2985 #ifdef BGE_IPMI_ASF 2986 if (bgep->asf_enabled) { 2987 /* 2988 * This register has been overlaid. We restore its 2989 * initial value here. 2990 */ 2991 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2992 BGE_NIC_DATA_SIG); 2993 } 2994 #endif 2995 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2996 ddi_fm_service_impact(bgep->devinfo, 2997 DDI_SERVICE_UNAFFECTED); 2998 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2999 ddi_fm_service_impact(bgep->devinfo, 3000 DDI_SERVICE_UNAFFECTED); 3001 mutex_exit(bgep->genlock); 3002 } 3003 if (bgep->progress & PROGRESS_INTR) { 3004 bge_intr_disable(bgep); 3005 bge_fini_rings(bgep); 3006 } 3007 if (bgep->progress & PROGRESS_HWINT) { 3008 bge_rem_intrs(bgep); 3009 rw_destroy(bgep->errlock); 3010 mutex_destroy(bgep->softintrlock); 3011 mutex_destroy(bgep->genlock); 3012 } 3013 if (bgep->progress & PROGRESS_FACTOTUM) 3014 ddi_remove_softintr(bgep->factotum_id); 3015 if (bgep->progress & PROGRESS_RESCHED) 3016 ddi_remove_softintr(bgep->drain_id); 3017 if (bgep->progress & PROGRESS_BUFS) 3018 bge_free_bufs(bgep); 3019 if (bgep->progress & PROGRESS_REGS) 3020 ddi_regs_map_free(&bgep->io_handle); 3021 if (bgep->progress & PROGRESS_CFG) 3022 pci_config_teardown(&bgep->cfg_handle); 3023 3024 bge_fm_fini(bgep); 3025 3026 ddi_remove_minor_node(bgep->devinfo, NULL); 3027 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3028 kmem_free(bgep, sizeof (*bgep)); 3029 } 3030 3031 static int 3032 bge_resume(dev_info_t *devinfo) 3033 { 3034 bge_t *bgep; /* Our private data */ 3035 chip_id_t *cidp; 3036 chip_id_t chipid; 3037 3038 bgep = ddi_get_driver_private(devinfo); 3039 if (bgep == NULL) 3040 return (DDI_FAILURE); 3041 3042 /* 3043 * Refuse to resume if the data structures aren't consistent 3044 */ 3045 if (bgep->devinfo != devinfo) 3046 return (DDI_FAILURE); 3047 3048 #ifdef BGE_IPMI_ASF 3049 /* 3050 * Power management hasn't been supported in BGE now. If you 3051 * want to implement it, please add the ASF/IPMI related 3052 * code here. 3053 */ 3054 3055 #endif 3056 3057 /* 3058 * Read chip ID & set up config space command register(s) 3059 * Refuse to resume if the chip has changed its identity! 3060 */ 3061 cidp = &bgep->chipid; 3062 mutex_enter(bgep->genlock); 3063 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3064 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3065 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3066 mutex_exit(bgep->genlock); 3067 return (DDI_FAILURE); 3068 } 3069 mutex_exit(bgep->genlock); 3070 if (chipid.vendor != cidp->vendor) 3071 return (DDI_FAILURE); 3072 if (chipid.device != cidp->device) 3073 return (DDI_FAILURE); 3074 if (chipid.revision != cidp->revision) 3075 return (DDI_FAILURE); 3076 if (chipid.asic_rev != cidp->asic_rev) 3077 return (DDI_FAILURE); 3078 3079 /* 3080 * All OK, reinitialise h/w & kick off GLD scheduling 3081 */ 3082 mutex_enter(bgep->genlock); 3083 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3084 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3085 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3086 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3087 mutex_exit(bgep->genlock); 3088 return (DDI_FAILURE); 3089 } 3090 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3091 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3092 mutex_exit(bgep->genlock); 3093 return (DDI_FAILURE); 3094 } 3095 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3096 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3097 mutex_exit(bgep->genlock); 3098 return (DDI_FAILURE); 3099 } 3100 mutex_exit(bgep->genlock); 3101 return (DDI_SUCCESS); 3102 } 3103 3104 /* 3105 * attach(9E) -- Attach a device to the system 3106 * 3107 * Called once for each board successfully probed. 3108 */ 3109 static int 3110 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3111 { 3112 bge_t *bgep; /* Our private data */ 3113 mac_register_t *macp; 3114 chip_id_t *cidp; 3115 caddr_t regs; 3116 int instance; 3117 int err; 3118 int intr_types; 3119 #ifdef BGE_IPMI_ASF 3120 uint32_t mhcrValue; 3121 #ifdef __sparc 3122 uint16_t value16; 3123 #endif 3124 #ifdef BGE_NETCONSOLE 3125 int retval; 3126 #endif 3127 #endif 3128 3129 instance = ddi_get_instance(devinfo); 3130 3131 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3132 (void *)devinfo, cmd, instance)); 3133 BGE_BRKPT(NULL, "bge_attach"); 3134 3135 switch (cmd) { 3136 default: 3137 return (DDI_FAILURE); 3138 3139 case DDI_RESUME: 3140 return (bge_resume(devinfo)); 3141 3142 case DDI_ATTACH: 3143 break; 3144 } 3145 3146 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3147 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3148 ddi_set_driver_private(devinfo, bgep); 3149 bgep->bge_guard = BGE_GUARD; 3150 bgep->devinfo = devinfo; 3151 bgep->param_drain_max = 64; 3152 bgep->param_msi_cnt = 0; 3153 bgep->param_loop_mode = 0; 3154 3155 /* 3156 * Initialize more fields in BGE private data 3157 */ 3158 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3159 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3160 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3161 BGE_DRIVER_NAME, instance); 3162 3163 /* 3164 * Initialize for fma support 3165 */ 3166 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3167 DDI_PROP_DONTPASS, fm_cap, 3168 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3169 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3170 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3171 bge_fm_init(bgep); 3172 3173 /* 3174 * Look up the IOMMU's page size for DVMA mappings (must be 3175 * a power of 2) and convert to a mask. This can be used to 3176 * determine whether a message buffer crosses a page boundary. 3177 * Note: in 2s complement binary notation, if X is a power of 3178 * 2, then -X has the representation "11...1100...00". 3179 */ 3180 bgep->pagemask = dvma_pagesize(devinfo); 3181 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3182 bgep->pagemask = -bgep->pagemask; 3183 3184 /* 3185 * Map config space registers 3186 * Read chip ID & set up config space command register(s) 3187 * 3188 * Note: this leaves the chip accessible by Memory Space 3189 * accesses, but with interrupts and Bus Mastering off. 3190 * This should ensure that nothing untoward will happen 3191 * if it has been left active by the (net-)bootloader. 3192 * We'll re-enable Bus Mastering once we've reset the chip, 3193 * and allow interrupts only when everything else is set up. 3194 */ 3195 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3196 #ifdef BGE_IPMI_ASF 3197 #ifdef __sparc 3198 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3199 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3200 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3201 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3202 MHCR_ENABLE_TAGGED_STATUS_MODE | 3203 MHCR_MASK_INTERRUPT_MODE | 3204 MHCR_MASK_PCI_INT_OUTPUT | 3205 MHCR_CLEAR_INTERRUPT_INTA | 3206 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3207 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3208 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3209 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3210 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3211 MEMORY_ARBITER_ENABLE); 3212 #else 3213 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3214 #endif 3215 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3216 bgep->asf_wordswapped = B_TRUE; 3217 } else { 3218 bgep->asf_wordswapped = B_FALSE; 3219 } 3220 bge_asf_get_config(bgep); 3221 #endif 3222 if (err != DDI_SUCCESS) { 3223 bge_problem(bgep, "pci_config_setup() failed"); 3224 goto attach_fail; 3225 } 3226 bgep->progress |= PROGRESS_CFG; 3227 cidp = &bgep->chipid; 3228 bzero(cidp, sizeof (*cidp)); 3229 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3230 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3231 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3232 goto attach_fail; 3233 } 3234 3235 #ifdef BGE_IPMI_ASF 3236 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3237 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3238 bgep->asf_newhandshake = B_TRUE; 3239 } else { 3240 bgep->asf_newhandshake = B_FALSE; 3241 } 3242 #endif 3243 3244 /* 3245 * Update those parts of the chip ID derived from volatile 3246 * registers with the values seen by OBP (in case the chip 3247 * has been reset externally and therefore lost them). 3248 */ 3249 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3250 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3251 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3252 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3253 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3254 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3255 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3256 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3257 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3258 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3259 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3260 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3261 3262 if (bge_jumbo_enable == B_TRUE) { 3263 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3264 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3265 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3266 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3267 cidp->default_mtu = BGE_DEFAULT_MTU; 3268 } 3269 } 3270 /* 3271 * Map operating registers 3272 */ 3273 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3274 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3275 if (err != DDI_SUCCESS) { 3276 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3277 goto attach_fail; 3278 } 3279 bgep->io_regs = regs; 3280 bgep->progress |= PROGRESS_REGS; 3281 3282 /* 3283 * Characterise the device, so we know its requirements. 3284 * Then allocate the appropriate TX and RX descriptors & buffers. 3285 */ 3286 if (bge_chip_id_init(bgep) == EIO) { 3287 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3288 goto attach_fail; 3289 } 3290 3291 3292 err = bge_alloc_bufs(bgep); 3293 if (err != DDI_SUCCESS) { 3294 bge_problem(bgep, "DMA buffer allocation failed"); 3295 goto attach_fail; 3296 } 3297 bgep->progress |= PROGRESS_BUFS; 3298 3299 /* 3300 * Add the softint handlers: 3301 * 3302 * Both of these handlers are used to avoid restrictions on the 3303 * context and/or mutexes required for some operations. In 3304 * particular, the hardware interrupt handler and its subfunctions 3305 * can detect a number of conditions that we don't want to handle 3306 * in that context or with that set of mutexes held. So, these 3307 * softints are triggered instead: 3308 * 3309 * the <resched> softint is triggered if we have previously 3310 * had to refuse to send a packet because of resource shortage 3311 * (we've run out of transmit buffers), but the send completion 3312 * interrupt handler has now detected that more buffers have 3313 * become available. 3314 * 3315 * the <factotum> is triggered if the h/w interrupt handler 3316 * sees the <link state changed> or <error> bits in the status 3317 * block. It's also triggered periodically to poll the link 3318 * state, just in case we aren't getting link status change 3319 * interrupts ... 3320 */ 3321 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3322 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3323 if (err != DDI_SUCCESS) { 3324 bge_problem(bgep, "ddi_add_softintr() failed"); 3325 goto attach_fail; 3326 } 3327 bgep->progress |= PROGRESS_RESCHED; 3328 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3329 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3330 if (err != DDI_SUCCESS) { 3331 bge_problem(bgep, "ddi_add_softintr() failed"); 3332 goto attach_fail; 3333 } 3334 bgep->progress |= PROGRESS_FACTOTUM; 3335 3336 /* Get supported interrupt types */ 3337 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3338 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3339 3340 goto attach_fail; 3341 } 3342 3343 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3344 bgep->ifname, intr_types)); 3345 3346 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3347 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3348 bge_error(bgep, "MSI registration failed, " 3349 "trying FIXED interrupt type\n"); 3350 } else { 3351 BGE_DEBUG(("%s: Using MSI interrupt type", 3352 bgep->ifname)); 3353 bgep->intr_type = DDI_INTR_TYPE_MSI; 3354 bgep->progress |= PROGRESS_HWINT; 3355 } 3356 } 3357 3358 if (!(bgep->progress & PROGRESS_HWINT) && 3359 (intr_types & DDI_INTR_TYPE_FIXED)) { 3360 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3361 bge_error(bgep, "FIXED interrupt " 3362 "registration failed\n"); 3363 goto attach_fail; 3364 } 3365 3366 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3367 3368 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3369 bgep->progress |= PROGRESS_HWINT; 3370 } 3371 3372 if (!(bgep->progress & PROGRESS_HWINT)) { 3373 bge_error(bgep, "No interrupts registered\n"); 3374 goto attach_fail; 3375 } 3376 3377 /* 3378 * Note that interrupts are not enabled yet as 3379 * mutex locks are not initialized. Initialize mutex locks. 3380 */ 3381 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3382 DDI_INTR_PRI(bgep->intr_pri)); 3383 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3384 DDI_INTR_PRI(bgep->intr_pri)); 3385 rw_init(bgep->errlock, NULL, RW_DRIVER, 3386 DDI_INTR_PRI(bgep->intr_pri)); 3387 3388 /* 3389 * Initialize rings. 3390 */ 3391 bge_init_rings(bgep); 3392 3393 /* 3394 * Now that mutex locks are initialized, enable interrupts. 3395 */ 3396 bge_intr_enable(bgep); 3397 bgep->progress |= PROGRESS_INTR; 3398 3399 /* 3400 * Initialise link state variables 3401 * Stop, reset & reinitialise the chip. 3402 * Initialise the (internal) PHY. 3403 */ 3404 bgep->link_state = LINK_STATE_UNKNOWN; 3405 3406 mutex_enter(bgep->genlock); 3407 3408 /* 3409 * Reset chip & rings to initial state; also reset address 3410 * filtering, promiscuity, loopback mode. 3411 */ 3412 #ifdef BGE_IPMI_ASF 3413 #ifdef BGE_NETCONSOLE 3414 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3415 #else 3416 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3417 #endif 3418 #else 3419 if (bge_reset(bgep) != DDI_SUCCESS) { 3420 #endif 3421 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3422 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3423 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3424 mutex_exit(bgep->genlock); 3425 goto attach_fail; 3426 } 3427 3428 #ifdef BGE_IPMI_ASF 3429 if (bgep->asf_enabled) { 3430 bgep->asf_status = ASF_STAT_RUN_INIT; 3431 } 3432 #endif 3433 3434 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3435 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3436 bgep->promisc = B_FALSE; 3437 bgep->param_loop_mode = BGE_LOOP_NONE; 3438 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3439 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3440 mutex_exit(bgep->genlock); 3441 goto attach_fail; 3442 } 3443 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3444 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3445 mutex_exit(bgep->genlock); 3446 goto attach_fail; 3447 } 3448 3449 mutex_exit(bgep->genlock); 3450 3451 if (bge_phys_init(bgep) == EIO) { 3452 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3453 goto attach_fail; 3454 } 3455 bgep->progress |= PROGRESS_PHY; 3456 3457 /* 3458 * initialize NDD-tweakable parameters 3459 */ 3460 if (bge_nd_init(bgep)) { 3461 bge_problem(bgep, "bge_nd_init() failed"); 3462 goto attach_fail; 3463 } 3464 bgep->progress |= PROGRESS_NDD; 3465 3466 /* 3467 * Create & initialise named kstats 3468 */ 3469 bge_init_kstats(bgep, instance); 3470 bgep->progress |= PROGRESS_KSTATS; 3471 3472 /* 3473 * Determine whether to override the chip's own MAC address 3474 */ 3475 bge_find_mac_address(bgep, cidp); 3476 3477 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3478 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 3479 3480 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3481 goto attach_fail; 3482 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3483 macp->m_driver = bgep; 3484 macp->m_dip = devinfo; 3485 macp->m_src_addr = cidp->vendor_addr.addr; 3486 macp->m_callbacks = &bge_m_callbacks; 3487 macp->m_min_sdu = 0; 3488 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3489 macp->m_margin = VLAN_TAGSZ; 3490 macp->m_priv_props = bge_priv_prop; 3491 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3492 macp->m_v12n = MAC_VIRT_LEVEL1; 3493 3494 /* 3495 * Finally, we're ready to register ourselves with the MAC layer 3496 * interface; if this succeeds, we're all ready to start() 3497 */ 3498 err = mac_register(macp, &bgep->mh); 3499 mac_free(macp); 3500 if (err != 0) 3501 goto attach_fail; 3502 3503 /* 3504 * Register a periodical handler. 3505 * bge_chip_cyclic() is invoked in kernel context. 3506 */ 3507 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3508 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3509 3510 bgep->progress |= PROGRESS_READY; 3511 ASSERT(bgep->bge_guard == BGE_GUARD); 3512 #ifdef BGE_IPMI_ASF 3513 #ifdef BGE_NETCONSOLE 3514 if (bgep->asf_enabled) { 3515 mutex_enter(bgep->genlock); 3516 retval = bge_chip_start(bgep, B_TRUE); 3517 mutex_exit(bgep->genlock); 3518 if (retval != DDI_SUCCESS) 3519 goto attach_fail; 3520 } 3521 #endif 3522 #endif 3523 3524 ddi_report_dev(devinfo); 3525 BGE_REPORT((bgep, "bge version: %s", bge_version)); 3526 3527 return (DDI_SUCCESS); 3528 3529 attach_fail: 3530 #ifdef BGE_IPMI_ASF 3531 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3532 #else 3533 bge_unattach(bgep); 3534 #endif 3535 return (DDI_FAILURE); 3536 } 3537 3538 /* 3539 * bge_suspend() -- suspend transmit/receive for powerdown 3540 */ 3541 static int 3542 bge_suspend(bge_t *bgep) 3543 { 3544 /* 3545 * Stop processing and idle (powerdown) the PHY ... 3546 */ 3547 mutex_enter(bgep->genlock); 3548 #ifdef BGE_IPMI_ASF 3549 /* 3550 * Power management hasn't been supported in BGE now. If you 3551 * want to implement it, please add the ASF/IPMI related 3552 * code here. 3553 */ 3554 #endif 3555 bge_stop(bgep); 3556 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3557 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3558 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3559 mutex_exit(bgep->genlock); 3560 return (DDI_FAILURE); 3561 } 3562 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3563 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3564 mutex_exit(bgep->genlock); 3565 return (DDI_FAILURE); 3566 } 3567 mutex_exit(bgep->genlock); 3568 3569 return (DDI_SUCCESS); 3570 } 3571 3572 /* 3573 * quiesce(9E) entry point. 3574 * 3575 * This function is called when the system is single-threaded at high 3576 * PIL with preemption disabled. Therefore, this function must not be 3577 * blocked. 3578 * 3579 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3580 * DDI_FAILURE indicates an error condition and should almost never happen. 3581 */ 3582 #ifdef __sparc 3583 #define bge_quiesce ddi_quiesce_not_supported 3584 #else 3585 static int 3586 bge_quiesce(dev_info_t *devinfo) 3587 { 3588 bge_t *bgep = ddi_get_driver_private(devinfo); 3589 3590 if (bgep == NULL) 3591 return (DDI_FAILURE); 3592 3593 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3594 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3595 MHCR_MASK_PCI_INT_OUTPUT); 3596 } else { 3597 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3598 } 3599 3600 /* Stop the chip */ 3601 bge_chip_stop_nonblocking(bgep); 3602 3603 return (DDI_SUCCESS); 3604 } 3605 #endif 3606 3607 /* 3608 * detach(9E) -- Detach a device from the system 3609 */ 3610 static int 3611 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3612 { 3613 bge_t *bgep; 3614 #ifdef BGE_IPMI_ASF 3615 uint_t asf_mode; 3616 asf_mode = ASF_MODE_NONE; 3617 #endif 3618 3619 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3620 3621 bgep = ddi_get_driver_private(devinfo); 3622 3623 switch (cmd) { 3624 default: 3625 return (DDI_FAILURE); 3626 3627 case DDI_SUSPEND: 3628 return (bge_suspend(bgep)); 3629 3630 case DDI_DETACH: 3631 break; 3632 } 3633 3634 #ifdef BGE_IPMI_ASF 3635 mutex_enter(bgep->genlock); 3636 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3637 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3638 3639 bge_asf_update_status(bgep); 3640 if (bgep->asf_status == ASF_STAT_RUN) { 3641 bge_asf_stop_timer(bgep); 3642 } 3643 bgep->asf_status = ASF_STAT_STOP; 3644 3645 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3646 3647 if (bgep->asf_pseudostop) { 3648 bge_chip_stop(bgep, B_FALSE); 3649 bgep->bge_mac_state = BGE_MAC_STOPPED; 3650 bgep->asf_pseudostop = B_FALSE; 3651 } 3652 3653 asf_mode = ASF_MODE_POST_SHUTDOWN; 3654 3655 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3656 ddi_fm_service_impact(bgep->devinfo, 3657 DDI_SERVICE_UNAFFECTED); 3658 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3659 ddi_fm_service_impact(bgep->devinfo, 3660 DDI_SERVICE_UNAFFECTED); 3661 } 3662 mutex_exit(bgep->genlock); 3663 #endif 3664 3665 /* 3666 * Unregister from the GLD subsystem. This can fail, in 3667 * particular if there are DLPI style-2 streams still open - 3668 * in which case we just return failure without shutting 3669 * down chip operations. 3670 */ 3671 if (mac_unregister(bgep->mh) != 0) 3672 return (DDI_FAILURE); 3673 3674 /* 3675 * All activity stopped, so we can clean up & exit 3676 */ 3677 #ifdef BGE_IPMI_ASF 3678 bge_unattach(bgep, asf_mode); 3679 #else 3680 bge_unattach(bgep); 3681 #endif 3682 return (DDI_SUCCESS); 3683 } 3684 3685 3686 /* 3687 * ========== Module Loading Data & Entry Points ========== 3688 */ 3689 3690 #undef BGE_DBG 3691 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3692 3693 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3694 nulldev, /* identify */ 3695 nulldev, /* probe */ 3696 bge_attach, /* attach */ 3697 bge_detach, /* detach */ 3698 nodev, /* reset */ 3699 NULL, /* cb_ops */ 3700 D_MP, /* bus_ops */ 3701 NULL, /* power */ 3702 bge_quiesce /* quiesce */ 3703 ); 3704 3705 static struct modldrv bge_modldrv = { 3706 &mod_driverops, /* Type of module. This one is a driver */ 3707 bge_ident, /* short description */ 3708 &bge_dev_ops /* driver specific ops */ 3709 }; 3710 3711 static struct modlinkage modlinkage = { 3712 MODREV_1, (void *)&bge_modldrv, NULL 3713 }; 3714 3715 3716 int 3717 _info(struct modinfo *modinfop) 3718 { 3719 return (mod_info(&modlinkage, modinfop)); 3720 } 3721 3722 int 3723 _init(void) 3724 { 3725 int status; 3726 3727 mac_init_ops(&bge_dev_ops, "bge"); 3728 status = mod_install(&modlinkage); 3729 if (status == DDI_SUCCESS) 3730 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3731 else 3732 mac_fini_ops(&bge_dev_ops); 3733 return (status); 3734 } 3735 3736 int 3737 _fini(void) 3738 { 3739 int status; 3740 3741 status = mod_remove(&modlinkage); 3742 if (status == DDI_SUCCESS) { 3743 mac_fini_ops(&bge_dev_ops); 3744 mutex_destroy(bge_log_mutex); 3745 } 3746 return (status); 3747 } 3748 3749 3750 /* 3751 * bge_add_intrs: 3752 * 3753 * Register FIXED or MSI interrupts. 3754 */ 3755 static int 3756 bge_add_intrs(bge_t *bgep, int intr_type) 3757 { 3758 dev_info_t *dip = bgep->devinfo; 3759 int avail, actual, intr_size, count = 0; 3760 int i, flag, ret; 3761 3762 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3763 3764 /* Get number of interrupts */ 3765 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3766 if ((ret != DDI_SUCCESS) || (count == 0)) { 3767 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3768 "count: %d", ret, count); 3769 3770 return (DDI_FAILURE); 3771 } 3772 3773 /* Get number of available interrupts */ 3774 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3775 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3776 bge_error(bgep, "ddi_intr_get_navail() failure, " 3777 "ret: %d, avail: %d\n", ret, avail); 3778 3779 return (DDI_FAILURE); 3780 } 3781 3782 if (avail < count) { 3783 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3784 bgep->ifname, count, avail)); 3785 } 3786 3787 /* 3788 * BGE hardware generates only single MSI even though it claims 3789 * to support multiple MSIs. So, hard code MSI count value to 1. 3790 */ 3791 if (intr_type == DDI_INTR_TYPE_MSI) { 3792 count = 1; 3793 flag = DDI_INTR_ALLOC_STRICT; 3794 } else { 3795 flag = DDI_INTR_ALLOC_NORMAL; 3796 } 3797 3798 /* Allocate an array of interrupt handles */ 3799 intr_size = count * sizeof (ddi_intr_handle_t); 3800 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3801 3802 /* Call ddi_intr_alloc() */ 3803 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3804 count, &actual, flag); 3805 3806 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3807 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3808 3809 kmem_free(bgep->htable, intr_size); 3810 return (DDI_FAILURE); 3811 } 3812 3813 if (actual < count) { 3814 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3815 bgep->ifname, count, actual)); 3816 } 3817 3818 bgep->intr_cnt = actual; 3819 3820 /* 3821 * Get priority for first msi, assume remaining are all the same 3822 */ 3823 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3824 DDI_SUCCESS) { 3825 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3826 3827 /* Free already allocated intr */ 3828 for (i = 0; i < actual; i++) { 3829 (void) ddi_intr_free(bgep->htable[i]); 3830 } 3831 3832 kmem_free(bgep->htable, intr_size); 3833 return (DDI_FAILURE); 3834 } 3835 3836 /* Call ddi_intr_add_handler() */ 3837 for (i = 0; i < actual; i++) { 3838 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3839 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3840 bge_error(bgep, "ddi_intr_add_handler() " 3841 "failed %d\n", ret); 3842 3843 /* Free already allocated intr */ 3844 for (i = 0; i < actual; i++) { 3845 (void) ddi_intr_free(bgep->htable[i]); 3846 } 3847 3848 kmem_free(bgep->htable, intr_size); 3849 return (DDI_FAILURE); 3850 } 3851 } 3852 3853 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3854 != DDI_SUCCESS) { 3855 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3856 3857 for (i = 0; i < actual; i++) { 3858 (void) ddi_intr_remove_handler(bgep->htable[i]); 3859 (void) ddi_intr_free(bgep->htable[i]); 3860 } 3861 3862 kmem_free(bgep->htable, intr_size); 3863 return (DDI_FAILURE); 3864 } 3865 3866 return (DDI_SUCCESS); 3867 } 3868 3869 /* 3870 * bge_rem_intrs: 3871 * 3872 * Unregister FIXED or MSI interrupts 3873 */ 3874 static void 3875 bge_rem_intrs(bge_t *bgep) 3876 { 3877 int i; 3878 3879 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3880 3881 /* Call ddi_intr_remove_handler() */ 3882 for (i = 0; i < bgep->intr_cnt; i++) { 3883 (void) ddi_intr_remove_handler(bgep->htable[i]); 3884 (void) ddi_intr_free(bgep->htable[i]); 3885 } 3886 3887 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3888 } 3889 3890 3891 void 3892 bge_intr_enable(bge_t *bgep) 3893 { 3894 int i; 3895 3896 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3897 /* Call ddi_intr_block_enable() for MSI interrupts */ 3898 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3899 } else { 3900 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3901 for (i = 0; i < bgep->intr_cnt; i++) { 3902 (void) ddi_intr_enable(bgep->htable[i]); 3903 } 3904 } 3905 } 3906 3907 3908 void 3909 bge_intr_disable(bge_t *bgep) 3910 { 3911 int i; 3912 3913 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3914 /* Call ddi_intr_block_disable() */ 3915 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3916 } else { 3917 for (i = 0; i < bgep->intr_cnt; i++) { 3918 (void) ddi_intr_disable(bgep->htable[i]); 3919 } 3920 } 3921 } 3922 3923 int 3924 bge_reprogram(bge_t *bgep) 3925 { 3926 int status = 0; 3927 3928 ASSERT(mutex_owned(bgep->genlock)); 3929 3930 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3931 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3932 status = IOC_INVAL; 3933 } 3934 #ifdef BGE_IPMI_ASF 3935 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3936 #else 3937 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3938 #endif 3939 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3940 status = IOC_INVAL; 3941 } 3942 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3943 bge_chip_msi_trig(bgep); 3944 return (status); 3945 } 3946