1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac_provider.h> 30 #include <sys/mac.h> 31 #include <sys/mac_flow.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 */ 36 static char bge_ident[] = "Broadcom Gb Ethernet"; 37 /* 38 * Make sure you keep the version ID up to date! 39 */ 40 static char bge_version[] = "Broadcom Gb Ethernet v1.13"; 41 42 /* 43 * Property names 44 */ 45 static char debug_propname[] = "bge-debug-flags"; 46 static char clsize_propname[] = "cache-line-size"; 47 static char latency_propname[] = "latency-timer"; 48 static char localmac_boolname[] = "local-mac-address?"; 49 static char localmac_propname[] = "local-mac-address"; 50 static char macaddr_propname[] = "mac-address"; 51 static char subdev_propname[] = "subsystem-id"; 52 static char subven_propname[] = "subsystem-vendor-id"; 53 static char rxrings_propname[] = "bge-rx-rings"; 54 static char txrings_propname[] = "bge-tx-rings"; 55 static char fm_cap[] = "fm-capable"; 56 static char default_mtu[] = "default_mtu"; 57 58 static int bge_add_intrs(bge_t *, int); 59 static void bge_rem_intrs(bge_t *); 60 static int bge_unicst_set(void *, const uint8_t *, int); 61 62 /* 63 * Describes the chip's DMA engine 64 */ 65 static ddi_dma_attr_t dma_attr = { 66 DMA_ATTR_V0, /* dma_attr version */ 67 0x0000000000000000ull, /* dma_attr_addr_lo */ 68 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 69 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 70 0x0000000000000001ull, /* dma_attr_align */ 71 0x00000FFF, /* dma_attr_burstsizes */ 72 0x00000001, /* dma_attr_minxfer */ 73 0x000000000000FFFFull, /* dma_attr_maxxfer */ 74 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 75 1, /* dma_attr_sgllen */ 76 0x00000001, /* dma_attr_granular */ 77 DDI_DMA_FLAGERR /* dma_attr_flags */ 78 }; 79 80 /* 81 * PIO access attributes for registers 82 */ 83 static ddi_device_acc_attr_t bge_reg_accattr = { 84 DDI_DEVICE_ATTR_V1, 85 DDI_NEVERSWAP_ACC, 86 DDI_STRICTORDER_ACC, 87 DDI_FLAGERR_ACC 88 }; 89 90 /* 91 * DMA access attributes for descriptors: NOT to be byte swapped. 92 */ 93 static ddi_device_acc_attr_t bge_desc_accattr = { 94 DDI_DEVICE_ATTR_V0, 95 DDI_NEVERSWAP_ACC, 96 DDI_STRICTORDER_ACC 97 }; 98 99 /* 100 * DMA access attributes for data: NOT to be byte swapped. 101 */ 102 static ddi_device_acc_attr_t bge_data_accattr = { 103 DDI_DEVICE_ATTR_V0, 104 DDI_NEVERSWAP_ACC, 105 DDI_STRICTORDER_ACC 106 }; 107 108 static int bge_m_start(void *); 109 static void bge_m_stop(void *); 110 static int bge_m_promisc(void *, boolean_t); 111 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 112 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 113 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 114 static int bge_unicst_set(void *, const uint8_t *, 115 int); 116 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 117 uint_t, const void *); 118 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 119 uint_t, uint_t, void *, uint_t *); 120 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 121 const void *); 122 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 123 uint_t, void *); 124 125 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 126 127 static mac_callbacks_t bge_m_callbacks = { 128 BGE_M_CALLBACK_FLAGS, 129 bge_m_stat, 130 bge_m_start, 131 bge_m_stop, 132 bge_m_promisc, 133 bge_m_multicst, 134 NULL, 135 bge_m_tx, 136 bge_m_ioctl, 137 bge_m_getcapab, 138 NULL, 139 NULL, 140 bge_m_setprop, 141 bge_m_getprop 142 }; 143 144 mac_priv_prop_t bge_priv_prop[] = { 145 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 146 {"_adv_pause_cap", MAC_PROP_PERM_RW} 147 }; 148 149 #define BGE_MAX_PRIV_PROPS \ 150 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 151 152 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 153 /* 154 * ========== Transmit and receive ring reinitialisation ========== 155 */ 156 157 /* 158 * These <reinit> routines each reset the specified ring to an initial 159 * state, assuming that the corresponding <init> routine has already 160 * been called exactly once. 161 */ 162 163 static void 164 bge_reinit_send_ring(send_ring_t *srp) 165 { 166 bge_queue_t *txbuf_queue; 167 bge_queue_item_t *txbuf_head; 168 sw_txbuf_t *txbuf; 169 sw_sbd_t *ssbdp; 170 uint32_t slot; 171 172 /* 173 * Reinitialise control variables ... 174 */ 175 srp->tx_flow = 0; 176 srp->tx_next = 0; 177 srp->txfill_next = 0; 178 srp->tx_free = srp->desc.nslots; 179 ASSERT(mutex_owned(srp->tc_lock)); 180 srp->tc_next = 0; 181 srp->txpkt_next = 0; 182 srp->tx_block = 0; 183 srp->tx_nobd = 0; 184 srp->tx_nobuf = 0; 185 186 /* 187 * Initialize the tx buffer push queue 188 */ 189 mutex_enter(srp->freetxbuf_lock); 190 mutex_enter(srp->txbuf_lock); 191 txbuf_queue = &srp->freetxbuf_queue; 192 txbuf_queue->head = NULL; 193 txbuf_queue->count = 0; 194 txbuf_queue->lock = srp->freetxbuf_lock; 195 srp->txbuf_push_queue = txbuf_queue; 196 197 /* 198 * Initialize the tx buffer pop queue 199 */ 200 txbuf_queue = &srp->txbuf_queue; 201 txbuf_queue->head = NULL; 202 txbuf_queue->count = 0; 203 txbuf_queue->lock = srp->txbuf_lock; 204 srp->txbuf_pop_queue = txbuf_queue; 205 txbuf_head = srp->txbuf_head; 206 txbuf = srp->txbuf; 207 for (slot = 0; slot < srp->tx_buffers; ++slot) { 208 txbuf_head->item = txbuf; 209 txbuf_head->next = txbuf_queue->head; 210 txbuf_queue->head = txbuf_head; 211 txbuf_queue->count++; 212 txbuf++; 213 txbuf_head++; 214 } 215 mutex_exit(srp->txbuf_lock); 216 mutex_exit(srp->freetxbuf_lock); 217 218 /* 219 * Zero and sync all the h/w Send Buffer Descriptors 220 */ 221 DMA_ZERO(srp->desc); 222 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 223 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 224 ssbdp = srp->sw_sbds; 225 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 226 ssbdp->pbuf = NULL; 227 } 228 229 static void 230 bge_reinit_recv_ring(recv_ring_t *rrp) 231 { 232 /* 233 * Reinitialise control variables ... 234 */ 235 rrp->rx_next = 0; 236 } 237 238 static void 239 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 240 { 241 bge_rbd_t *hw_rbd_p; 242 sw_rbd_t *srbdp; 243 uint32_t bufsize; 244 uint32_t nslots; 245 uint32_t slot; 246 247 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 248 RBD_FLAG_STD_RING, 249 RBD_FLAG_JUMBO_RING, 250 RBD_FLAG_MINI_RING 251 }; 252 253 /* 254 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 255 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 256 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 257 * should be zeroed, and so don't need to be set up specifically 258 * once the whole area has been cleared. 259 */ 260 DMA_ZERO(brp->desc); 261 262 hw_rbd_p = DMA_VPTR(brp->desc); 263 nslots = brp->desc.nslots; 264 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 265 bufsize = brp->buf[0].size; 266 srbdp = brp->sw_rbds; 267 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 268 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 269 hw_rbd_p->index = (uint16_t)slot; 270 hw_rbd_p->len = (uint16_t)bufsize; 271 hw_rbd_p->opaque = srbdp->pbuf.token; 272 hw_rbd_p->flags |= ring_type_flag[ring]; 273 } 274 275 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 276 277 /* 278 * Finally, reinitialise the ring control variables ... 279 */ 280 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 281 } 282 283 /* 284 * Reinitialize all rings 285 */ 286 static void 287 bge_reinit_rings(bge_t *bgep) 288 { 289 uint32_t ring; 290 291 ASSERT(mutex_owned(bgep->genlock)); 292 293 /* 294 * Send Rings ... 295 */ 296 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 297 bge_reinit_send_ring(&bgep->send[ring]); 298 299 /* 300 * Receive Return Rings ... 301 */ 302 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 303 bge_reinit_recv_ring(&bgep->recv[ring]); 304 305 /* 306 * Receive Producer Rings ... 307 */ 308 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 309 bge_reinit_buff_ring(&bgep->buff[ring], ring); 310 } 311 312 /* 313 * ========== Internal state management entry points ========== 314 */ 315 316 #undef BGE_DBG 317 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 318 319 /* 320 * These routines provide all the functionality required by the 321 * corresponding GLD entry points, but don't update the GLD state 322 * so they can be called internally without disturbing our record 323 * of what GLD thinks we should be doing ... 324 */ 325 326 /* 327 * bge_reset() -- reset h/w & rings to initial state 328 */ 329 static int 330 #ifdef BGE_IPMI_ASF 331 bge_reset(bge_t *bgep, uint_t asf_mode) 332 #else 333 bge_reset(bge_t *bgep) 334 #endif 335 { 336 uint32_t ring; 337 int retval; 338 339 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 340 341 ASSERT(mutex_owned(bgep->genlock)); 342 343 /* 344 * Grab all the other mutexes in the world (this should 345 * ensure no other threads are manipulating driver state) 346 */ 347 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 348 mutex_enter(bgep->recv[ring].rx_lock); 349 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 350 mutex_enter(bgep->buff[ring].rf_lock); 351 rw_enter(bgep->errlock, RW_WRITER); 352 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 353 mutex_enter(bgep->send[ring].tx_lock); 354 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 355 mutex_enter(bgep->send[ring].tc_lock); 356 357 #ifdef BGE_IPMI_ASF 358 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 359 #else 360 retval = bge_chip_reset(bgep, B_TRUE); 361 #endif 362 bge_reinit_rings(bgep); 363 364 /* 365 * Free the world ... 366 */ 367 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 368 mutex_exit(bgep->send[ring].tc_lock); 369 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 370 mutex_exit(bgep->send[ring].tx_lock); 371 rw_exit(bgep->errlock); 372 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 373 mutex_exit(bgep->buff[ring].rf_lock); 374 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 375 mutex_exit(bgep->recv[ring].rx_lock); 376 377 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 378 return (retval); 379 } 380 381 /* 382 * bge_stop() -- stop processing, don't reset h/w or rings 383 */ 384 static void 385 bge_stop(bge_t *bgep) 386 { 387 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 388 389 ASSERT(mutex_owned(bgep->genlock)); 390 391 #ifdef BGE_IPMI_ASF 392 if (bgep->asf_enabled) { 393 bgep->asf_pseudostop = B_TRUE; 394 } else { 395 #endif 396 bge_chip_stop(bgep, B_FALSE); 397 #ifdef BGE_IPMI_ASF 398 } 399 #endif 400 401 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 402 } 403 404 /* 405 * bge_start() -- start transmitting/receiving 406 */ 407 static int 408 bge_start(bge_t *bgep, boolean_t reset_phys) 409 { 410 int retval; 411 412 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 413 414 ASSERT(mutex_owned(bgep->genlock)); 415 416 /* 417 * Start chip processing, including enabling interrupts 418 */ 419 retval = bge_chip_start(bgep, reset_phys); 420 421 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 422 return (retval); 423 } 424 425 /* 426 * bge_restart - restart transmitting/receiving after error or suspend 427 */ 428 int 429 bge_restart(bge_t *bgep, boolean_t reset_phys) 430 { 431 int retval = DDI_SUCCESS; 432 ASSERT(mutex_owned(bgep->genlock)); 433 434 #ifdef BGE_IPMI_ASF 435 if (bgep->asf_enabled) { 436 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 437 retval = DDI_FAILURE; 438 } else 439 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 440 retval = DDI_FAILURE; 441 #else 442 if (bge_reset(bgep) != DDI_SUCCESS) 443 retval = DDI_FAILURE; 444 #endif 445 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 446 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 447 retval = DDI_FAILURE; 448 bgep->watchdog = 0; 449 ddi_trigger_softintr(bgep->drain_id); 450 } 451 452 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 453 return (retval); 454 } 455 456 457 /* 458 * ========== Nemo-required management entry points ========== 459 */ 460 461 #undef BGE_DBG 462 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 463 464 /* 465 * bge_m_stop() -- stop transmitting/receiving 466 */ 467 static void 468 bge_m_stop(void *arg) 469 { 470 bge_t *bgep = arg; /* private device info */ 471 send_ring_t *srp; 472 uint32_t ring; 473 474 BGE_TRACE(("bge_m_stop($%p)", arg)); 475 476 /* 477 * Just stop processing, then record new GLD state 478 */ 479 mutex_enter(bgep->genlock); 480 if (!(bgep->progress & PROGRESS_INTR)) { 481 /* can happen during autorecovery */ 482 bgep->bge_chip_state = BGE_CHIP_STOPPED; 483 } else 484 bge_stop(bgep); 485 486 bgep->link_update_timer = 0; 487 bgep->link_state = LINK_STATE_UNKNOWN; 488 mac_link_update(bgep->mh, bgep->link_state); 489 490 /* 491 * Free the possible tx buffers allocated in tx process. 492 */ 493 #ifdef BGE_IPMI_ASF 494 if (!bgep->asf_pseudostop) 495 #endif 496 { 497 rw_enter(bgep->errlock, RW_WRITER); 498 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 499 srp = &bgep->send[ring]; 500 mutex_enter(srp->tx_lock); 501 if (srp->tx_array > 1) 502 bge_free_txbuf_arrays(srp); 503 mutex_exit(srp->tx_lock); 504 } 505 rw_exit(bgep->errlock); 506 } 507 bgep->bge_mac_state = BGE_MAC_STOPPED; 508 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 509 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 510 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 511 mutex_exit(bgep->genlock); 512 } 513 514 /* 515 * bge_m_start() -- start transmitting/receiving 516 */ 517 static int 518 bge_m_start(void *arg) 519 { 520 bge_t *bgep = arg; /* private device info */ 521 522 BGE_TRACE(("bge_m_start($%p)", arg)); 523 524 /* 525 * Start processing and record new GLD state 526 */ 527 mutex_enter(bgep->genlock); 528 if (!(bgep->progress & PROGRESS_INTR)) { 529 /* can happen during autorecovery */ 530 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 531 mutex_exit(bgep->genlock); 532 return (EIO); 533 } 534 #ifdef BGE_IPMI_ASF 535 if (bgep->asf_enabled) { 536 if ((bgep->asf_status == ASF_STAT_RUN) && 537 (bgep->asf_pseudostop)) { 538 bgep->bge_mac_state = BGE_MAC_STARTED; 539 mutex_exit(bgep->genlock); 540 return (0); 541 } 542 } 543 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 544 #else 545 if (bge_reset(bgep) != DDI_SUCCESS) { 546 #endif 547 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 548 (void) bge_check_acc_handle(bgep, bgep->io_handle); 549 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 550 mutex_exit(bgep->genlock); 551 return (EIO); 552 } 553 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 554 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 555 (void) bge_check_acc_handle(bgep, bgep->io_handle); 556 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 557 mutex_exit(bgep->genlock); 558 return (EIO); 559 } 560 bgep->watchdog = 0; 561 bgep->bge_mac_state = BGE_MAC_STARTED; 562 BGE_DEBUG(("bge_m_start($%p) done", arg)); 563 564 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 565 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 566 mutex_exit(bgep->genlock); 567 return (EIO); 568 } 569 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 570 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 571 mutex_exit(bgep->genlock); 572 return (EIO); 573 } 574 #ifdef BGE_IPMI_ASF 575 if (bgep->asf_enabled) { 576 if (bgep->asf_status != ASF_STAT_RUN) { 577 /* start ASF heart beat */ 578 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 579 (void *)bgep, 580 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 581 bgep->asf_status = ASF_STAT_RUN; 582 } 583 } 584 #endif 585 mutex_exit(bgep->genlock); 586 587 return (0); 588 } 589 590 /* 591 * bge_unicst_set() -- set the physical network address 592 */ 593 static int 594 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 595 { 596 bge_t *bgep = arg; /* private device info */ 597 598 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 599 ether_sprintf((void *)macaddr))); 600 /* 601 * Remember the new current address in the driver state 602 * Sync the chip's idea of the address too ... 603 */ 604 mutex_enter(bgep->genlock); 605 if (!(bgep->progress & PROGRESS_INTR)) { 606 /* can happen during autorecovery */ 607 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 608 mutex_exit(bgep->genlock); 609 return (EIO); 610 } 611 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 612 #ifdef BGE_IPMI_ASF 613 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 614 #else 615 if (bge_chip_sync(bgep) == DDI_FAILURE) { 616 #endif 617 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 618 (void) bge_check_acc_handle(bgep, bgep->io_handle); 619 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 620 mutex_exit(bgep->genlock); 621 return (EIO); 622 } 623 #ifdef BGE_IPMI_ASF 624 if (bgep->asf_enabled) { 625 /* 626 * The above bge_chip_sync() function wrote the ethernet MAC 627 * addresses registers which destroyed the IPMI/ASF sideband. 628 * Here, we have to reset chip to make IPMI/ASF sideband work. 629 */ 630 if (bgep->asf_status == ASF_STAT_RUN) { 631 /* 632 * We must stop ASF heart beat before bge_chip_stop(), 633 * otherwise some computers (ex. IBM HS20 blade server) 634 * may crash. 635 */ 636 bge_asf_update_status(bgep); 637 bge_asf_stop_timer(bgep); 638 bgep->asf_status = ASF_STAT_STOP; 639 640 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 641 } 642 bge_chip_stop(bgep, B_FALSE); 643 644 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 645 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 646 (void) bge_check_acc_handle(bgep, bgep->io_handle); 647 ddi_fm_service_impact(bgep->devinfo, 648 DDI_SERVICE_DEGRADED); 649 mutex_exit(bgep->genlock); 650 return (EIO); 651 } 652 653 /* 654 * Start our ASF heartbeat counter as soon as possible. 655 */ 656 if (bgep->asf_status != ASF_STAT_RUN) { 657 /* start ASF heart beat */ 658 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 659 (void *)bgep, 660 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 661 bgep->asf_status = ASF_STAT_RUN; 662 } 663 } 664 #endif 665 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 666 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 667 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 668 mutex_exit(bgep->genlock); 669 return (EIO); 670 } 671 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 672 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 673 mutex_exit(bgep->genlock); 674 return (EIO); 675 } 676 mutex_exit(bgep->genlock); 677 678 return (0); 679 } 680 681 extern void bge_wake_factotum(bge_t *); 682 683 static boolean_t 684 bge_param_locked(mac_prop_id_t pr_num) 685 { 686 /* 687 * All adv_* parameters are locked (read-only) while 688 * the device is in any sort of loopback mode ... 689 */ 690 switch (pr_num) { 691 case MAC_PROP_ADV_1000FDX_CAP: 692 case MAC_PROP_EN_1000FDX_CAP: 693 case MAC_PROP_ADV_1000HDX_CAP: 694 case MAC_PROP_EN_1000HDX_CAP: 695 case MAC_PROP_ADV_100FDX_CAP: 696 case MAC_PROP_EN_100FDX_CAP: 697 case MAC_PROP_ADV_100HDX_CAP: 698 case MAC_PROP_EN_100HDX_CAP: 699 case MAC_PROP_ADV_10FDX_CAP: 700 case MAC_PROP_EN_10FDX_CAP: 701 case MAC_PROP_ADV_10HDX_CAP: 702 case MAC_PROP_EN_10HDX_CAP: 703 case MAC_PROP_AUTONEG: 704 case MAC_PROP_FLOWCTRL: 705 return (B_TRUE); 706 } 707 return (B_FALSE); 708 } 709 /* 710 * callback functions for set/get of properties 711 */ 712 static int 713 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 714 uint_t pr_valsize, const void *pr_val) 715 { 716 bge_t *bgep = barg; 717 int err = 0; 718 uint32_t cur_mtu, new_mtu; 719 uint_t maxsdu; 720 link_flowctrl_t fl; 721 722 mutex_enter(bgep->genlock); 723 if (bgep->param_loop_mode != BGE_LOOP_NONE && 724 bge_param_locked(pr_num)) { 725 /* 726 * All adv_* parameters are locked (read-only) 727 * while the device is in any sort of loopback mode. 728 */ 729 mutex_exit(bgep->genlock); 730 return (EBUSY); 731 } 732 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 733 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 734 (pr_num == MAC_PROP_EN_100HDX_CAP) || 735 (pr_num == MAC_PROP_EN_10FDX_CAP) || 736 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 737 /* 738 * these properties are read/write on copper, 739 * read-only and 0 on serdes 740 */ 741 mutex_exit(bgep->genlock); 742 return (ENOTSUP); 743 } 744 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 745 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 746 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 747 mutex_exit(bgep->genlock); 748 return (ENOTSUP); 749 } 750 751 switch (pr_num) { 752 case MAC_PROP_EN_1000FDX_CAP: 753 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 754 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 755 goto reprogram; 756 case MAC_PROP_EN_1000HDX_CAP: 757 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 758 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 759 goto reprogram; 760 case MAC_PROP_EN_100FDX_CAP: 761 bgep->param_en_100fdx = *(uint8_t *)pr_val; 762 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 763 goto reprogram; 764 case MAC_PROP_EN_100HDX_CAP: 765 bgep->param_en_100hdx = *(uint8_t *)pr_val; 766 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 767 goto reprogram; 768 case MAC_PROP_EN_10FDX_CAP: 769 bgep->param_en_10fdx = *(uint8_t *)pr_val; 770 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 771 goto reprogram; 772 case MAC_PROP_EN_10HDX_CAP: 773 bgep->param_en_10hdx = *(uint8_t *)pr_val; 774 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 775 reprogram: 776 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 777 err = EINVAL; 778 break; 779 case MAC_PROP_ADV_1000FDX_CAP: 780 case MAC_PROP_ADV_1000HDX_CAP: 781 case MAC_PROP_ADV_100FDX_CAP: 782 case MAC_PROP_ADV_100HDX_CAP: 783 case MAC_PROP_ADV_10FDX_CAP: 784 case MAC_PROP_ADV_10HDX_CAP: 785 case MAC_PROP_STATUS: 786 case MAC_PROP_SPEED: 787 case MAC_PROP_DUPLEX: 788 err = ENOTSUP; /* read-only prop. Can't set this */ 789 break; 790 case MAC_PROP_AUTONEG: 791 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 792 if (bge_reprogram(bgep) == IOC_INVAL) 793 err = EINVAL; 794 break; 795 case MAC_PROP_MTU: 796 cur_mtu = bgep->chipid.default_mtu; 797 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 798 799 if (new_mtu == cur_mtu) { 800 err = 0; 801 break; 802 } 803 if (new_mtu < BGE_DEFAULT_MTU || 804 new_mtu > BGE_MAXIMUM_MTU) { 805 err = EINVAL; 806 break; 807 } 808 if ((new_mtu > BGE_DEFAULT_MTU) && 809 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 810 err = EINVAL; 811 break; 812 } 813 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 814 err = EBUSY; 815 break; 816 } 817 bgep->chipid.default_mtu = new_mtu; 818 if (bge_chip_id_init(bgep)) { 819 err = EINVAL; 820 break; 821 } 822 maxsdu = bgep->chipid.ethmax_size - 823 sizeof (struct ether_header); 824 err = mac_maxsdu_update(bgep->mh, maxsdu); 825 if (err == 0) { 826 bgep->bge_dma_error = B_TRUE; 827 bgep->manual_reset = B_TRUE; 828 bge_chip_stop(bgep, B_TRUE); 829 bge_wake_factotum(bgep); 830 err = 0; 831 } 832 break; 833 case MAC_PROP_FLOWCTRL: 834 bcopy(pr_val, &fl, sizeof (fl)); 835 switch (fl) { 836 default: 837 err = ENOTSUP; 838 break; 839 case LINK_FLOWCTRL_NONE: 840 bgep->param_adv_pause = 0; 841 bgep->param_adv_asym_pause = 0; 842 843 bgep->param_link_rx_pause = B_FALSE; 844 bgep->param_link_tx_pause = B_FALSE; 845 break; 846 case LINK_FLOWCTRL_RX: 847 bgep->param_adv_pause = 1; 848 bgep->param_adv_asym_pause = 1; 849 850 bgep->param_link_rx_pause = B_TRUE; 851 bgep->param_link_tx_pause = B_FALSE; 852 break; 853 case LINK_FLOWCTRL_TX: 854 bgep->param_adv_pause = 0; 855 bgep->param_adv_asym_pause = 1; 856 857 bgep->param_link_rx_pause = B_FALSE; 858 bgep->param_link_tx_pause = B_TRUE; 859 break; 860 case LINK_FLOWCTRL_BI: 861 bgep->param_adv_pause = 1; 862 bgep->param_adv_asym_pause = 0; 863 864 bgep->param_link_rx_pause = B_TRUE; 865 bgep->param_link_tx_pause = B_TRUE; 866 break; 867 } 868 869 if (err == 0) { 870 if (bge_reprogram(bgep) == IOC_INVAL) 871 err = EINVAL; 872 } 873 874 break; 875 case MAC_PROP_PRIVATE: 876 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 877 pr_val); 878 break; 879 default: 880 err = ENOTSUP; 881 break; 882 } 883 mutex_exit(bgep->genlock); 884 return (err); 885 } 886 887 /* ARGSUSED */ 888 static int 889 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 890 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 891 { 892 bge_t *bgep = barg; 893 int err = 0; 894 link_flowctrl_t fl; 895 uint64_t speed; 896 int flags = bgep->chipid.flags; 897 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 898 899 if (pr_valsize == 0) 900 return (EINVAL); 901 bzero(pr_val, pr_valsize); 902 903 *perm = MAC_PROP_PERM_RW; 904 905 mutex_enter(bgep->genlock); 906 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 907 bge_param_locked(pr_num)) || 908 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 909 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 910 (pr_num == MAC_PROP_EN_100HDX_CAP) || 911 (pr_num == MAC_PROP_EN_10FDX_CAP) || 912 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 913 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 914 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 915 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 916 *perm = MAC_PROP_PERM_READ; 917 mutex_exit(bgep->genlock); 918 919 switch (pr_num) { 920 case MAC_PROP_DUPLEX: 921 *perm = MAC_PROP_PERM_READ; 922 if (pr_valsize < sizeof (link_duplex_t)) 923 return (EINVAL); 924 bcopy(&bgep->param_link_duplex, pr_val, 925 sizeof (link_duplex_t)); 926 break; 927 case MAC_PROP_SPEED: 928 *perm = MAC_PROP_PERM_READ; 929 if (pr_valsize < sizeof (speed)) 930 return (EINVAL); 931 speed = bgep->param_link_speed * 1000000ull; 932 bcopy(&speed, pr_val, sizeof (speed)); 933 break; 934 case MAC_PROP_STATUS: 935 *perm = MAC_PROP_PERM_READ; 936 if (pr_valsize < sizeof (link_state_t)) 937 return (EINVAL); 938 bcopy(&bgep->link_state, pr_val, 939 sizeof (link_state_t)); 940 break; 941 case MAC_PROP_AUTONEG: 942 if (is_default) 943 *(uint8_t *)pr_val = 1; 944 else 945 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 946 break; 947 case MAC_PROP_FLOWCTRL: 948 if (pr_valsize < sizeof (fl)) 949 return (EINVAL); 950 if (is_default) { 951 fl = LINK_FLOWCTRL_BI; 952 bcopy(&fl, pr_val, sizeof (fl)); 953 break; 954 } 955 956 if (bgep->param_link_rx_pause && 957 !bgep->param_link_tx_pause) 958 fl = LINK_FLOWCTRL_RX; 959 960 if (!bgep->param_link_rx_pause && 961 !bgep->param_link_tx_pause) 962 fl = LINK_FLOWCTRL_NONE; 963 964 if (!bgep->param_link_rx_pause && 965 bgep->param_link_tx_pause) 966 fl = LINK_FLOWCTRL_TX; 967 968 if (bgep->param_link_rx_pause && 969 bgep->param_link_tx_pause) 970 fl = LINK_FLOWCTRL_BI; 971 bcopy(&fl, pr_val, sizeof (fl)); 972 break; 973 case MAC_PROP_ADV_1000FDX_CAP: 974 *perm = MAC_PROP_PERM_READ; 975 if (is_default) { 976 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 977 *(uint8_t *)pr_val = 0; 978 else 979 *(uint8_t *)pr_val = 1; 980 } 981 else 982 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 983 break; 984 case MAC_PROP_EN_1000FDX_CAP: 985 if (is_default) { 986 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 987 *(uint8_t *)pr_val = 0; 988 else 989 *(uint8_t *)pr_val = 1; 990 } 991 else 992 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 993 break; 994 case MAC_PROP_ADV_1000HDX_CAP: 995 *perm = MAC_PROP_PERM_READ; 996 if (is_default) { 997 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 998 *(uint8_t *)pr_val = 0; 999 else 1000 *(uint8_t *)pr_val = 1; 1001 } 1002 else 1003 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1004 break; 1005 case MAC_PROP_EN_1000HDX_CAP: 1006 if (is_default) { 1007 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1008 *(uint8_t *)pr_val = 0; 1009 else 1010 *(uint8_t *)pr_val = 1; 1011 } 1012 else 1013 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1014 break; 1015 case MAC_PROP_ADV_100FDX_CAP: 1016 *perm = MAC_PROP_PERM_READ; 1017 if (is_default) { 1018 *(uint8_t *)pr_val = 1019 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1020 } else { 1021 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1022 } 1023 break; 1024 case MAC_PROP_EN_100FDX_CAP: 1025 if (is_default) { 1026 *(uint8_t *)pr_val = 1027 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1028 } else { 1029 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1030 } 1031 break; 1032 case MAC_PROP_ADV_100HDX_CAP: 1033 *perm = MAC_PROP_PERM_READ; 1034 if (is_default) { 1035 *(uint8_t *)pr_val = 1036 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1037 } else { 1038 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1039 } 1040 break; 1041 case MAC_PROP_EN_100HDX_CAP: 1042 if (is_default) { 1043 *(uint8_t *)pr_val = 1044 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1045 } else { 1046 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1047 } 1048 break; 1049 case MAC_PROP_ADV_10FDX_CAP: 1050 *perm = MAC_PROP_PERM_READ; 1051 if (is_default) { 1052 *(uint8_t *)pr_val = 1053 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1054 } else { 1055 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1056 } 1057 break; 1058 case MAC_PROP_EN_10FDX_CAP: 1059 if (is_default) { 1060 *(uint8_t *)pr_val = 1061 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1062 } else { 1063 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1064 } 1065 break; 1066 case MAC_PROP_ADV_10HDX_CAP: 1067 *perm = MAC_PROP_PERM_READ; 1068 if (is_default) { 1069 *(uint8_t *)pr_val = 1070 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1071 } else { 1072 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1073 } 1074 break; 1075 case MAC_PROP_EN_10HDX_CAP: 1076 if (is_default) { 1077 *(uint8_t *)pr_val = 1078 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1079 } else { 1080 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1081 } 1082 break; 1083 case MAC_PROP_ADV_100T4_CAP: 1084 case MAC_PROP_EN_100T4_CAP: 1085 *perm = MAC_PROP_PERM_READ; 1086 *(uint8_t *)pr_val = 0; 1087 break; 1088 case MAC_PROP_PRIVATE: 1089 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1090 pr_valsize, pr_val); 1091 return (err); 1092 case MAC_PROP_MTU: { 1093 mac_propval_range_t range; 1094 1095 if (!(pr_flags & MAC_PROP_POSSIBLE)) 1096 return (ENOTSUP); 1097 if (pr_valsize < sizeof (mac_propval_range_t)) 1098 return (EINVAL); 1099 range.mpr_count = 1; 1100 range.mpr_type = MAC_PROPVAL_UINT32; 1101 range.range_uint32[0].mpur_min = 1102 range.range_uint32[0].mpur_max = BGE_DEFAULT_MTU; 1103 if (!(flags & CHIP_FLAG_NO_JUMBO)) 1104 range.range_uint32[0].mpur_max = 1105 BGE_MAXIMUM_MTU; 1106 bcopy(&range, pr_val, sizeof (range)); 1107 break; 1108 } 1109 default: 1110 return (ENOTSUP); 1111 } 1112 return (0); 1113 } 1114 1115 /* ARGSUSED */ 1116 static int 1117 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1118 const void *pr_val) 1119 { 1120 int err = 0; 1121 long result; 1122 1123 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1124 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1125 if (result > 1 || result < 0) { 1126 err = EINVAL; 1127 } else { 1128 bgep->param_adv_pause = (uint32_t)result; 1129 if (bge_reprogram(bgep) == IOC_INVAL) 1130 err = EINVAL; 1131 } 1132 return (err); 1133 } 1134 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1135 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1136 if (result > 1 || result < 0) { 1137 err = EINVAL; 1138 } else { 1139 bgep->param_adv_asym_pause = (uint32_t)result; 1140 if (bge_reprogram(bgep) == IOC_INVAL) 1141 err = EINVAL; 1142 } 1143 return (err); 1144 } 1145 if (strcmp(pr_name, "_drain_max") == 0) { 1146 1147 /* 1148 * on the Tx side, we need to update the h/w register for 1149 * real packet transmission per packet. The drain_max parameter 1150 * is used to reduce the register access. This parameter 1151 * controls the max number of packets that we will hold before 1152 * updating the bge h/w to trigger h/w transmit. The bge 1153 * chipset usually has a max of 512 Tx descriptors, thus 1154 * the upper bound on drain_max is 512. 1155 */ 1156 if (pr_val == NULL) { 1157 err = EINVAL; 1158 return (err); 1159 } 1160 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1161 if (result > 512 || result < 1) 1162 err = EINVAL; 1163 else { 1164 bgep->param_drain_max = (uint32_t)result; 1165 if (bge_reprogram(bgep) == IOC_INVAL) 1166 err = EINVAL; 1167 } 1168 return (err); 1169 } 1170 if (strcmp(pr_name, "_msi_cnt") == 0) { 1171 1172 if (pr_val == NULL) { 1173 err = EINVAL; 1174 return (err); 1175 } 1176 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1177 if (result > 7 || result < 0) 1178 err = EINVAL; 1179 else { 1180 bgep->param_msi_cnt = (uint32_t)result; 1181 if (bge_reprogram(bgep) == IOC_INVAL) 1182 err = EINVAL; 1183 } 1184 return (err); 1185 } 1186 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) { 1187 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1188 return (EINVAL); 1189 if (result < 0) 1190 err = EINVAL; 1191 else { 1192 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1193 bge_chip_coalesce_update(bgep); 1194 } 1195 return (err); 1196 } 1197 1198 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) { 1199 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1200 return (EINVAL); 1201 1202 if (result < 0) 1203 err = EINVAL; 1204 else { 1205 bgep->chipid.rx_count_norm = (uint32_t)result; 1206 bge_chip_coalesce_update(bgep); 1207 } 1208 return (err); 1209 } 1210 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) { 1211 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1212 return (EINVAL); 1213 if (result < 0) 1214 err = EINVAL; 1215 else { 1216 bgep->chipid.tx_ticks_norm = (uint32_t)result; 1217 bge_chip_coalesce_update(bgep); 1218 } 1219 return (err); 1220 } 1221 1222 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) { 1223 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1224 return (EINVAL); 1225 1226 if (result < 0) 1227 err = EINVAL; 1228 else { 1229 bgep->chipid.tx_count_norm = (uint32_t)result; 1230 bge_chip_coalesce_update(bgep); 1231 } 1232 return (err); 1233 } 1234 return (ENOTSUP); 1235 } 1236 1237 static int 1238 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1239 uint_t pr_valsize, void *pr_val) 1240 { 1241 int err = ENOTSUP; 1242 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1243 int value; 1244 1245 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1246 value = (is_default? 1 : bge->param_adv_pause); 1247 err = 0; 1248 goto done; 1249 } 1250 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1251 value = (is_default? 1 : bge->param_adv_asym_pause); 1252 err = 0; 1253 goto done; 1254 } 1255 if (strcmp(pr_name, "_drain_max") == 0) { 1256 value = (is_default? 64 : bge->param_drain_max); 1257 err = 0; 1258 goto done; 1259 } 1260 if (strcmp(pr_name, "_msi_cnt") == 0) { 1261 value = (is_default? 0 : bge->param_msi_cnt); 1262 err = 0; 1263 goto done; 1264 } 1265 1266 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1267 value = (is_default? bge_rx_ticks_norm : 1268 bge->chipid.rx_ticks_norm); 1269 err = 0; 1270 goto done; 1271 } 1272 1273 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1274 value = (is_default? bge_rx_count_norm : 1275 bge->chipid.rx_count_norm); 1276 err = 0; 1277 goto done; 1278 } 1279 1280 done: 1281 if (err == 0) { 1282 (void) snprintf(pr_val, pr_valsize, "%d", value); 1283 } 1284 return (err); 1285 } 1286 1287 /* 1288 * Compute the index of the required bit in the multicast hash map. 1289 * This must mirror the way the hardware actually does it! 1290 * See Broadcom document 570X-PG102-R page 125. 1291 */ 1292 static uint32_t 1293 bge_hash_index(const uint8_t *mca) 1294 { 1295 uint32_t hash; 1296 1297 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1298 1299 return (hash); 1300 } 1301 1302 /* 1303 * bge_m_multicst_add() -- enable/disable a multicast address 1304 */ 1305 static int 1306 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1307 { 1308 bge_t *bgep = arg; /* private device info */ 1309 uint32_t hash; 1310 uint32_t index; 1311 uint32_t word; 1312 uint32_t bit; 1313 uint8_t *refp; 1314 1315 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1316 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1317 1318 /* 1319 * Precalculate all required masks, pointers etc ... 1320 */ 1321 hash = bge_hash_index(mca); 1322 index = hash % BGE_HASH_TABLE_SIZE; 1323 word = index/32u; 1324 bit = 1 << (index % 32u); 1325 refp = &bgep->mcast_refs[index]; 1326 1327 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1328 hash, index, word, bit, *refp)); 1329 1330 /* 1331 * We must set the appropriate bit in the hash map (and the 1332 * corresponding h/w register) when the refcount goes from 0 1333 * to >0, and clear it when the last ref goes away (refcount 1334 * goes from >0 back to 0). If we change the hash map, we 1335 * must also update the chip's hardware map registers. 1336 */ 1337 mutex_enter(bgep->genlock); 1338 if (!(bgep->progress & PROGRESS_INTR)) { 1339 /* can happen during autorecovery */ 1340 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1341 mutex_exit(bgep->genlock); 1342 return (EIO); 1343 } 1344 if (add) { 1345 if ((*refp)++ == 0) { 1346 bgep->mcast_hash[word] |= bit; 1347 #ifdef BGE_IPMI_ASF 1348 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1349 #else 1350 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1351 #endif 1352 (void) bge_check_acc_handle(bgep, 1353 bgep->cfg_handle); 1354 (void) bge_check_acc_handle(bgep, 1355 bgep->io_handle); 1356 ddi_fm_service_impact(bgep->devinfo, 1357 DDI_SERVICE_DEGRADED); 1358 mutex_exit(bgep->genlock); 1359 return (EIO); 1360 } 1361 } 1362 } else { 1363 if (--(*refp) == 0) { 1364 bgep->mcast_hash[word] &= ~bit; 1365 #ifdef BGE_IPMI_ASF 1366 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1367 #else 1368 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1369 #endif 1370 (void) bge_check_acc_handle(bgep, 1371 bgep->cfg_handle); 1372 (void) bge_check_acc_handle(bgep, 1373 bgep->io_handle); 1374 ddi_fm_service_impact(bgep->devinfo, 1375 DDI_SERVICE_DEGRADED); 1376 mutex_exit(bgep->genlock); 1377 return (EIO); 1378 } 1379 } 1380 } 1381 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1382 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1383 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1384 mutex_exit(bgep->genlock); 1385 return (EIO); 1386 } 1387 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1388 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1389 mutex_exit(bgep->genlock); 1390 return (EIO); 1391 } 1392 mutex_exit(bgep->genlock); 1393 1394 return (0); 1395 } 1396 1397 /* 1398 * bge_m_promisc() -- set or reset promiscuous mode on the board 1399 * 1400 * Program the hardware to enable/disable promiscuous and/or 1401 * receive-all-multicast modes. 1402 */ 1403 static int 1404 bge_m_promisc(void *arg, boolean_t on) 1405 { 1406 bge_t *bgep = arg; 1407 1408 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1409 1410 /* 1411 * Store MAC layer specified mode and pass to chip layer to update h/w 1412 */ 1413 mutex_enter(bgep->genlock); 1414 if (!(bgep->progress & PROGRESS_INTR)) { 1415 /* can happen during autorecovery */ 1416 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1417 mutex_exit(bgep->genlock); 1418 return (EIO); 1419 } 1420 bgep->promisc = on; 1421 #ifdef BGE_IPMI_ASF 1422 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1423 #else 1424 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1425 #endif 1426 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1427 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1428 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1429 mutex_exit(bgep->genlock); 1430 return (EIO); 1431 } 1432 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1433 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1434 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1435 mutex_exit(bgep->genlock); 1436 return (EIO); 1437 } 1438 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1439 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1440 mutex_exit(bgep->genlock); 1441 return (EIO); 1442 } 1443 mutex_exit(bgep->genlock); 1444 return (0); 1445 } 1446 1447 /* 1448 * Find the slot for the specified unicast address 1449 */ 1450 int 1451 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1452 { 1453 int slot; 1454 1455 ASSERT(mutex_owned(bgep->genlock)); 1456 1457 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1458 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1459 return (slot); 1460 } 1461 1462 return (-1); 1463 } 1464 1465 /* 1466 * Programs the classifier to start steering packets matching 'mac_addr' to the 1467 * specified ring 'arg'. 1468 */ 1469 static int 1470 bge_addmac(void *arg, const uint8_t *mac_addr) 1471 { 1472 recv_ring_t *rrp = (recv_ring_t *)arg; 1473 bge_t *bgep = rrp->bgep; 1474 bge_recv_rule_t *rulep = bgep->recv_rules; 1475 bge_rule_info_t *rinfop = NULL; 1476 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1477 int i; 1478 uint16_t tmp16; 1479 uint32_t tmp32; 1480 int slot; 1481 int err; 1482 1483 mutex_enter(bgep->genlock); 1484 if (bgep->unicst_addr_avail == 0) { 1485 mutex_exit(bgep->genlock); 1486 return (ENOSPC); 1487 } 1488 1489 /* 1490 * First add the unicast address to a available slot. 1491 */ 1492 slot = bge_unicst_find(bgep, mac_addr); 1493 ASSERT(slot == -1); 1494 1495 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1496 if (!bgep->curr_addr[slot].set) { 1497 bgep->curr_addr[slot].set = B_TRUE; 1498 break; 1499 } 1500 } 1501 1502 ASSERT(slot < bgep->unicst_addr_total); 1503 bgep->unicst_addr_avail--; 1504 mutex_exit(bgep->genlock); 1505 1506 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1507 goto fail; 1508 1509 /* A rule is already here. Deny this. */ 1510 if (rrp->mac_addr_rule != NULL) { 1511 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1512 goto fail; 1513 } 1514 1515 /* 1516 * Allocate a bge_rule_info_t to keep track of which rule slots 1517 * are being used. 1518 */ 1519 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1520 if (rinfop == NULL) { 1521 err = ENOMEM; 1522 goto fail; 1523 } 1524 1525 /* 1526 * Look for the starting slot to place the rules. 1527 * The two slots we reserve must be contiguous. 1528 */ 1529 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1530 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1531 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1532 break; 1533 1534 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1535 1536 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1537 rulep[i].mask_value = ntohl(tmp32); 1538 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1539 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1540 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1541 1542 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1543 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1544 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1545 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1546 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1547 rinfop->start = i; 1548 rinfop->count = 2; 1549 1550 rrp->mac_addr_rule = rinfop; 1551 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1552 1553 return (0); 1554 1555 fail: 1556 /* Clear the address just set */ 1557 (void) bge_unicst_set(bgep, zero_addr, slot); 1558 mutex_enter(bgep->genlock); 1559 bgep->curr_addr[slot].set = B_FALSE; 1560 bgep->unicst_addr_avail++; 1561 mutex_exit(bgep->genlock); 1562 1563 return (err); 1564 } 1565 1566 /* 1567 * Stop classifying packets matching the MAC address to the specified ring. 1568 */ 1569 static int 1570 bge_remmac(void *arg, const uint8_t *mac_addr) 1571 { 1572 recv_ring_t *rrp = (recv_ring_t *)arg; 1573 bge_t *bgep = rrp->bgep; 1574 bge_recv_rule_t *rulep = bgep->recv_rules; 1575 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1576 int start; 1577 int slot; 1578 int err; 1579 1580 /* 1581 * Remove the MAC address from its slot. 1582 */ 1583 mutex_enter(bgep->genlock); 1584 slot = bge_unicst_find(bgep, mac_addr); 1585 if (slot == -1) { 1586 mutex_exit(bgep->genlock); 1587 return (EINVAL); 1588 } 1589 1590 ASSERT(bgep->curr_addr[slot].set); 1591 mutex_exit(bgep->genlock); 1592 1593 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1594 return (err); 1595 1596 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1597 return (EINVAL); 1598 1599 start = rinfop->start; 1600 rulep[start].mask_value = 0; 1601 rulep[start].control = 0; 1602 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1603 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1604 start++; 1605 rulep[start].mask_value = 0; 1606 rulep[start].control = 0; 1607 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1608 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1609 1610 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1611 rrp->mac_addr_rule = NULL; 1612 bzero(rrp->mac_addr_val, ETHERADDRL); 1613 1614 mutex_enter(bgep->genlock); 1615 bgep->curr_addr[slot].set = B_FALSE; 1616 bgep->unicst_addr_avail++; 1617 mutex_exit(bgep->genlock); 1618 1619 return (0); 1620 } 1621 1622 static int 1623 bge_flag_intr_enable(mac_intr_handle_t ih) 1624 { 1625 recv_ring_t *rrp = (recv_ring_t *)ih; 1626 bge_t *bgep = rrp->bgep; 1627 1628 mutex_enter(bgep->genlock); 1629 rrp->poll_flag = 0; 1630 mutex_exit(bgep->genlock); 1631 1632 return (0); 1633 } 1634 1635 static int 1636 bge_flag_intr_disable(mac_intr_handle_t ih) 1637 { 1638 recv_ring_t *rrp = (recv_ring_t *)ih; 1639 bge_t *bgep = rrp->bgep; 1640 1641 mutex_enter(bgep->genlock); 1642 rrp->poll_flag = 1; 1643 mutex_exit(bgep->genlock); 1644 1645 return (0); 1646 } 1647 1648 static int 1649 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1650 { 1651 recv_ring_t *rx_ring; 1652 1653 rx_ring = (recv_ring_t *)rh; 1654 mutex_enter(rx_ring->rx_lock); 1655 rx_ring->ring_gen_num = mr_gen_num; 1656 mutex_exit(rx_ring->rx_lock); 1657 return (0); 1658 } 1659 1660 1661 /* 1662 * Callback funtion for MAC layer to register all rings 1663 * for given ring_group, noted by rg_index. 1664 */ 1665 void 1666 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1667 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1668 { 1669 bge_t *bgep = arg; 1670 mac_intr_t *mintr; 1671 1672 switch (rtype) { 1673 case MAC_RING_TYPE_RX: { 1674 recv_ring_t *rx_ring; 1675 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1676 MAC_ADDRESS_REGS_MAX) && index == 0); 1677 1678 rx_ring = &bgep->recv[rg_index]; 1679 rx_ring->ring_handle = rh; 1680 1681 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1682 infop->mri_start = bge_ring_start; 1683 infop->mri_stop = NULL; 1684 infop->mri_poll = bge_poll_ring; 1685 1686 mintr = &infop->mri_intr; 1687 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1688 mintr->mi_enable = bge_flag_intr_enable; 1689 mintr->mi_disable = bge_flag_intr_disable; 1690 1691 break; 1692 } 1693 case MAC_RING_TYPE_TX: 1694 default: 1695 ASSERT(0); 1696 break; 1697 } 1698 } 1699 1700 /* 1701 * Fill infop passed as argument 1702 * fill in respective ring_group info 1703 * Each group has a single ring in it. We keep it simple 1704 * and use the same internal handle for rings and groups. 1705 */ 1706 void 1707 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1708 mac_group_info_t *infop, mac_group_handle_t gh) 1709 { 1710 bge_t *bgep = arg; 1711 1712 switch (rtype) { 1713 case MAC_RING_TYPE_RX: { 1714 recv_ring_t *rx_ring; 1715 1716 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1717 MAC_ADDRESS_REGS_MAX)); 1718 rx_ring = &bgep->recv[rg_index]; 1719 rx_ring->ring_group_handle = gh; 1720 1721 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1722 infop->mgi_start = NULL; 1723 infop->mgi_stop = NULL; 1724 infop->mgi_addmac = bge_addmac; 1725 infop->mgi_remmac = bge_remmac; 1726 infop->mgi_count = 1; 1727 break; 1728 } 1729 case MAC_RING_TYPE_TX: 1730 default: 1731 ASSERT(0); 1732 break; 1733 } 1734 } 1735 1736 /*ARGSUSED*/ 1737 static boolean_t 1738 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1739 { 1740 bge_t *bgep = arg; 1741 1742 switch (cap) { 1743 case MAC_CAPAB_HCKSUM: { 1744 uint32_t *txflags = cap_data; 1745 1746 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1747 break; 1748 } 1749 case MAC_CAPAB_RINGS: { 1750 mac_capab_rings_t *cap_rings = cap_data; 1751 1752 /* Temporarily disable multiple tx rings. */ 1753 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1754 return (B_FALSE); 1755 1756 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1757 cap_rings->mr_rnum = cap_rings->mr_gnum = 1758 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1759 cap_rings->mr_rget = bge_fill_ring; 1760 cap_rings->mr_gget = bge_fill_group; 1761 break; 1762 } 1763 default: 1764 return (B_FALSE); 1765 } 1766 return (B_TRUE); 1767 } 1768 1769 /* 1770 * Loopback ioctl code 1771 */ 1772 1773 static lb_property_t loopmodes[] = { 1774 { normal, "normal", BGE_LOOP_NONE }, 1775 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1776 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1777 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1778 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1779 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1780 }; 1781 1782 static enum ioc_reply 1783 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1784 { 1785 /* 1786 * If the mode isn't being changed, there's nothing to do ... 1787 */ 1788 if (mode == bgep->param_loop_mode) 1789 return (IOC_ACK); 1790 1791 /* 1792 * Validate the requested mode and prepare a suitable message 1793 * to explain the link down/up cycle that the change will 1794 * probably induce ... 1795 */ 1796 switch (mode) { 1797 default: 1798 return (IOC_INVAL); 1799 1800 case BGE_LOOP_NONE: 1801 case BGE_LOOP_EXTERNAL_1000: 1802 case BGE_LOOP_EXTERNAL_100: 1803 case BGE_LOOP_EXTERNAL_10: 1804 case BGE_LOOP_INTERNAL_PHY: 1805 case BGE_LOOP_INTERNAL_MAC: 1806 break; 1807 } 1808 1809 /* 1810 * All OK; tell the caller to reprogram 1811 * the PHY and/or MAC for the new mode ... 1812 */ 1813 bgep->param_loop_mode = mode; 1814 return (IOC_RESTART_ACK); 1815 } 1816 1817 static enum ioc_reply 1818 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1819 { 1820 lb_info_sz_t *lbsp; 1821 lb_property_t *lbpp; 1822 uint32_t *lbmp; 1823 int cmd; 1824 1825 _NOTE(ARGUNUSED(wq)) 1826 1827 /* 1828 * Validate format of ioctl 1829 */ 1830 if (mp->b_cont == NULL) 1831 return (IOC_INVAL); 1832 1833 cmd = iocp->ioc_cmd; 1834 switch (cmd) { 1835 default: 1836 /* NOTREACHED */ 1837 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1838 return (IOC_INVAL); 1839 1840 case LB_GET_INFO_SIZE: 1841 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1842 return (IOC_INVAL); 1843 lbsp = (void *)mp->b_cont->b_rptr; 1844 *lbsp = sizeof (loopmodes); 1845 return (IOC_REPLY); 1846 1847 case LB_GET_INFO: 1848 if (iocp->ioc_count != sizeof (loopmodes)) 1849 return (IOC_INVAL); 1850 lbpp = (void *)mp->b_cont->b_rptr; 1851 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1852 return (IOC_REPLY); 1853 1854 case LB_GET_MODE: 1855 if (iocp->ioc_count != sizeof (uint32_t)) 1856 return (IOC_INVAL); 1857 lbmp = (void *)mp->b_cont->b_rptr; 1858 *lbmp = bgep->param_loop_mode; 1859 return (IOC_REPLY); 1860 1861 case LB_SET_MODE: 1862 if (iocp->ioc_count != sizeof (uint32_t)) 1863 return (IOC_INVAL); 1864 lbmp = (void *)mp->b_cont->b_rptr; 1865 return (bge_set_loop_mode(bgep, *lbmp)); 1866 } 1867 } 1868 1869 /* 1870 * Specific bge IOCTLs, the gld module handles the generic ones. 1871 */ 1872 static void 1873 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1874 { 1875 bge_t *bgep = arg; 1876 struct iocblk *iocp; 1877 enum ioc_reply status; 1878 boolean_t need_privilege; 1879 int err; 1880 int cmd; 1881 1882 /* 1883 * Validate the command before bothering with the mutex ... 1884 */ 1885 iocp = (void *)mp->b_rptr; 1886 iocp->ioc_error = 0; 1887 need_privilege = B_TRUE; 1888 cmd = iocp->ioc_cmd; 1889 switch (cmd) { 1890 default: 1891 miocnak(wq, mp, 0, EINVAL); 1892 return; 1893 1894 case BGE_MII_READ: 1895 case BGE_MII_WRITE: 1896 case BGE_SEE_READ: 1897 case BGE_SEE_WRITE: 1898 case BGE_FLASH_READ: 1899 case BGE_FLASH_WRITE: 1900 case BGE_DIAG: 1901 case BGE_PEEK: 1902 case BGE_POKE: 1903 case BGE_PHY_RESET: 1904 case BGE_SOFT_RESET: 1905 case BGE_HARD_RESET: 1906 break; 1907 1908 case LB_GET_INFO_SIZE: 1909 case LB_GET_INFO: 1910 case LB_GET_MODE: 1911 need_privilege = B_FALSE; 1912 /* FALLTHRU */ 1913 case LB_SET_MODE: 1914 break; 1915 1916 } 1917 1918 if (need_privilege) { 1919 /* 1920 * Check for specific net_config privilege on Solaris 10+. 1921 */ 1922 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1923 if (err != 0) { 1924 miocnak(wq, mp, 0, err); 1925 return; 1926 } 1927 } 1928 1929 mutex_enter(bgep->genlock); 1930 if (!(bgep->progress & PROGRESS_INTR)) { 1931 /* can happen during autorecovery */ 1932 mutex_exit(bgep->genlock); 1933 miocnak(wq, mp, 0, EIO); 1934 return; 1935 } 1936 1937 switch (cmd) { 1938 default: 1939 _NOTE(NOTREACHED) 1940 status = IOC_INVAL; 1941 break; 1942 1943 case BGE_MII_READ: 1944 case BGE_MII_WRITE: 1945 case BGE_SEE_READ: 1946 case BGE_SEE_WRITE: 1947 case BGE_FLASH_READ: 1948 case BGE_FLASH_WRITE: 1949 case BGE_DIAG: 1950 case BGE_PEEK: 1951 case BGE_POKE: 1952 case BGE_PHY_RESET: 1953 case BGE_SOFT_RESET: 1954 case BGE_HARD_RESET: 1955 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1956 break; 1957 1958 case LB_GET_INFO_SIZE: 1959 case LB_GET_INFO: 1960 case LB_GET_MODE: 1961 case LB_SET_MODE: 1962 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1963 break; 1964 1965 } 1966 1967 /* 1968 * Do we need to reprogram the PHY and/or the MAC? 1969 * Do it now, while we still have the mutex. 1970 * 1971 * Note: update the PHY first, 'cos it controls the 1972 * speed/duplex parameters that the MAC code uses. 1973 */ 1974 switch (status) { 1975 case IOC_RESTART_REPLY: 1976 case IOC_RESTART_ACK: 1977 if (bge_reprogram(bgep) == IOC_INVAL) 1978 status = IOC_INVAL; 1979 break; 1980 } 1981 1982 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1983 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1984 status = IOC_INVAL; 1985 } 1986 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1987 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1988 status = IOC_INVAL; 1989 } 1990 mutex_exit(bgep->genlock); 1991 1992 /* 1993 * Finally, decide how to reply 1994 */ 1995 switch (status) { 1996 default: 1997 case IOC_INVAL: 1998 /* 1999 * Error, reply with a NAK and EINVAL or the specified error 2000 */ 2001 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 2002 EINVAL : iocp->ioc_error); 2003 break; 2004 2005 case IOC_DONE: 2006 /* 2007 * OK, reply already sent 2008 */ 2009 break; 2010 2011 case IOC_RESTART_ACK: 2012 case IOC_ACK: 2013 /* 2014 * OK, reply with an ACK 2015 */ 2016 miocack(wq, mp, 0, 0); 2017 break; 2018 2019 case IOC_RESTART_REPLY: 2020 case IOC_REPLY: 2021 /* 2022 * OK, send prepared reply as ACK or NAK 2023 */ 2024 mp->b_datap->db_type = iocp->ioc_error == 0 ? 2025 M_IOCACK : M_IOCNAK; 2026 qreply(wq, mp); 2027 break; 2028 } 2029 } 2030 2031 /* 2032 * ========== Per-instance setup/teardown code ========== 2033 */ 2034 2035 #undef BGE_DBG 2036 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 2037 /* 2038 * Allocate an area of memory and a DMA handle for accessing it 2039 */ 2040 static int 2041 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2042 uint_t dma_flags, dma_area_t *dma_p) 2043 { 2044 caddr_t va; 2045 int err; 2046 2047 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2048 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2049 2050 /* 2051 * Allocate handle 2052 */ 2053 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2054 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2055 if (err != DDI_SUCCESS) 2056 return (DDI_FAILURE); 2057 2058 /* 2059 * Allocate memory 2060 */ 2061 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2062 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2063 &dma_p->acc_hdl); 2064 if (err != DDI_SUCCESS) 2065 return (DDI_FAILURE); 2066 2067 /* 2068 * Bind the two together 2069 */ 2070 dma_p->mem_va = va; 2071 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2072 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2073 &dma_p->cookie, &dma_p->ncookies); 2074 2075 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2076 dma_p->alength, err, dma_p->ncookies)); 2077 2078 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2079 return (DDI_FAILURE); 2080 2081 dma_p->nslots = ~0U; 2082 dma_p->size = ~0U; 2083 dma_p->token = ~0U; 2084 dma_p->offset = 0; 2085 return (DDI_SUCCESS); 2086 } 2087 2088 /* 2089 * Free one allocated area of DMAable memory 2090 */ 2091 static void 2092 bge_free_dma_mem(dma_area_t *dma_p) 2093 { 2094 if (dma_p->dma_hdl != NULL) { 2095 if (dma_p->ncookies) { 2096 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2097 dma_p->ncookies = 0; 2098 } 2099 ddi_dma_free_handle(&dma_p->dma_hdl); 2100 dma_p->dma_hdl = NULL; 2101 } 2102 2103 if (dma_p->acc_hdl != NULL) { 2104 ddi_dma_mem_free(&dma_p->acc_hdl); 2105 dma_p->acc_hdl = NULL; 2106 } 2107 } 2108 /* 2109 * Utility routine to carve a slice off a chunk of allocated memory, 2110 * updating the chunk descriptor accordingly. The size of the slice 2111 * is given by the product of the <qty> and <size> parameters. 2112 */ 2113 static void 2114 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2115 uint32_t qty, uint32_t size) 2116 { 2117 static uint32_t sequence = 0xbcd5704a; 2118 size_t totsize; 2119 2120 totsize = qty*size; 2121 ASSERT(totsize <= chunk->alength); 2122 2123 *slice = *chunk; 2124 slice->nslots = qty; 2125 slice->size = size; 2126 slice->alength = totsize; 2127 slice->token = ++sequence; 2128 2129 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2130 chunk->alength -= totsize; 2131 chunk->offset += totsize; 2132 chunk->cookie.dmac_laddress += totsize; 2133 chunk->cookie.dmac_size -= totsize; 2134 } 2135 2136 /* 2137 * Initialise the specified Receive Producer (Buffer) Ring, using 2138 * the information in the <dma_area> descriptors that it contains 2139 * to set up all the other fields. This routine should be called 2140 * only once for each ring. 2141 */ 2142 static void 2143 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2144 { 2145 buff_ring_t *brp; 2146 bge_status_t *bsp; 2147 sw_rbd_t *srbdp; 2148 dma_area_t pbuf; 2149 uint32_t bufsize; 2150 uint32_t nslots; 2151 uint32_t slot; 2152 uint32_t split; 2153 2154 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2155 NIC_MEM_SHADOW_BUFF_STD, 2156 NIC_MEM_SHADOW_BUFF_JUMBO, 2157 NIC_MEM_SHADOW_BUFF_MINI 2158 }; 2159 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2160 RECV_STD_PROD_INDEX_REG, 2161 RECV_JUMBO_PROD_INDEX_REG, 2162 RECV_MINI_PROD_INDEX_REG 2163 }; 2164 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2165 STATUS_STD_BUFF_CONS_INDEX, 2166 STATUS_JUMBO_BUFF_CONS_INDEX, 2167 STATUS_MINI_BUFF_CONS_INDEX 2168 }; 2169 2170 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2171 (void *)bgep, ring)); 2172 2173 brp = &bgep->buff[ring]; 2174 nslots = brp->desc.nslots; 2175 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2176 bufsize = brp->buf[0].size; 2177 2178 /* 2179 * Set up the copy of the h/w RCB 2180 * 2181 * Note: unlike Send & Receive Return Rings, (where the max_len 2182 * field holds the number of slots), in a Receive Buffer Ring 2183 * this field indicates the size of each buffer in the ring. 2184 */ 2185 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2186 brp->hw_rcb.max_len = (uint16_t)bufsize; 2187 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2188 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2189 2190 /* 2191 * Other one-off initialisation of per-ring data 2192 */ 2193 brp->bgep = bgep; 2194 bsp = DMA_VPTR(bgep->status_block); 2195 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2196 brp->chip_mbx_reg = mailbox_regs[ring]; 2197 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2198 DDI_INTR_PRI(bgep->intr_pri)); 2199 2200 /* 2201 * Allocate the array of s/w Receive Buffer Descriptors 2202 */ 2203 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2204 brp->sw_rbds = srbdp; 2205 2206 /* 2207 * Now initialise each array element once and for all 2208 */ 2209 for (split = 0; split < BGE_SPLIT; ++split) { 2210 pbuf = brp->buf[split]; 2211 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2212 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2213 ASSERT(pbuf.alength == 0); 2214 } 2215 } 2216 2217 /* 2218 * Clean up initialisation done above before the memory is freed 2219 */ 2220 static void 2221 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2222 { 2223 buff_ring_t *brp; 2224 sw_rbd_t *srbdp; 2225 2226 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2227 (void *)bgep, ring)); 2228 2229 brp = &bgep->buff[ring]; 2230 srbdp = brp->sw_rbds; 2231 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2232 2233 mutex_destroy(brp->rf_lock); 2234 } 2235 2236 /* 2237 * Initialise the specified Receive (Return) Ring, using the 2238 * information in the <dma_area> descriptors that it contains 2239 * to set up all the other fields. This routine should be called 2240 * only once for each ring. 2241 */ 2242 static void 2243 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2244 { 2245 recv_ring_t *rrp; 2246 bge_status_t *bsp; 2247 uint32_t nslots; 2248 2249 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2250 (void *)bgep, ring)); 2251 2252 /* 2253 * The chip architecture requires that receive return rings have 2254 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2255 */ 2256 rrp = &bgep->recv[ring]; 2257 nslots = rrp->desc.nslots; 2258 ASSERT(nslots == 0 || nslots == 512 || 2259 nslots == 1024 || nslots == 2048); 2260 2261 /* 2262 * Set up the copy of the h/w RCB 2263 */ 2264 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2265 rrp->hw_rcb.max_len = (uint16_t)nslots; 2266 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2267 rrp->hw_rcb.nic_ring_addr = 0; 2268 2269 /* 2270 * Other one-off initialisation of per-ring data 2271 */ 2272 rrp->bgep = bgep; 2273 bsp = DMA_VPTR(bgep->status_block); 2274 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2275 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2276 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2277 DDI_INTR_PRI(bgep->intr_pri)); 2278 } 2279 2280 2281 /* 2282 * Clean up initialisation done above before the memory is freed 2283 */ 2284 static void 2285 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2286 { 2287 recv_ring_t *rrp; 2288 2289 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2290 (void *)bgep, ring)); 2291 2292 rrp = &bgep->recv[ring]; 2293 if (rrp->rx_softint) 2294 ddi_remove_softintr(rrp->rx_softint); 2295 mutex_destroy(rrp->rx_lock); 2296 } 2297 2298 /* 2299 * Initialise the specified Send Ring, using the information in the 2300 * <dma_area> descriptors that it contains to set up all the other 2301 * fields. This routine should be called only once for each ring. 2302 */ 2303 static void 2304 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2305 { 2306 send_ring_t *srp; 2307 bge_status_t *bsp; 2308 sw_sbd_t *ssbdp; 2309 dma_area_t desc; 2310 dma_area_t pbuf; 2311 uint32_t nslots; 2312 uint32_t slot; 2313 uint32_t split; 2314 sw_txbuf_t *txbuf; 2315 2316 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2317 (void *)bgep, ring)); 2318 2319 /* 2320 * The chip architecture requires that host-based send rings 2321 * have 512 elements per ring. See 570X-PG102-R page 56. 2322 */ 2323 srp = &bgep->send[ring]; 2324 nslots = srp->desc.nslots; 2325 ASSERT(nslots == 0 || nslots == 512); 2326 2327 /* 2328 * Set up the copy of the h/w RCB 2329 */ 2330 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2331 srp->hw_rcb.max_len = (uint16_t)nslots; 2332 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2333 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2334 2335 /* 2336 * Other one-off initialisation of per-ring data 2337 */ 2338 srp->bgep = bgep; 2339 bsp = DMA_VPTR(bgep->status_block); 2340 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2341 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2342 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2343 DDI_INTR_PRI(bgep->intr_pri)); 2344 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2345 DDI_INTR_PRI(bgep->intr_pri)); 2346 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2347 DDI_INTR_PRI(bgep->intr_pri)); 2348 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2349 DDI_INTR_PRI(bgep->intr_pri)); 2350 if (nslots == 0) 2351 return; 2352 2353 /* 2354 * Allocate the array of s/w Send Buffer Descriptors 2355 */ 2356 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2357 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2358 srp->txbuf_head = 2359 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2360 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2361 srp->sw_sbds = ssbdp; 2362 srp->txbuf = txbuf; 2363 srp->tx_buffers = BGE_SEND_BUF_NUM; 2364 srp->tx_buffers_low = srp->tx_buffers / 4; 2365 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2366 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2367 else 2368 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2369 srp->tx_array = 1; 2370 2371 /* 2372 * Chunk tx desc area 2373 */ 2374 desc = srp->desc; 2375 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2376 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2377 sizeof (bge_sbd_t)); 2378 } 2379 ASSERT(desc.alength == 0); 2380 2381 /* 2382 * Chunk tx buffer area 2383 */ 2384 for (split = 0; split < BGE_SPLIT; ++split) { 2385 pbuf = srp->buf[0][split]; 2386 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2387 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2388 bgep->chipid.snd_buff_size); 2389 txbuf++; 2390 } 2391 ASSERT(pbuf.alength == 0); 2392 } 2393 } 2394 2395 /* 2396 * Clean up initialisation done above before the memory is freed 2397 */ 2398 static void 2399 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2400 { 2401 send_ring_t *srp; 2402 uint32_t array; 2403 uint32_t split; 2404 uint32_t nslots; 2405 2406 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2407 (void *)bgep, ring)); 2408 2409 srp = &bgep->send[ring]; 2410 mutex_destroy(srp->tc_lock); 2411 mutex_destroy(srp->freetxbuf_lock); 2412 mutex_destroy(srp->txbuf_lock); 2413 mutex_destroy(srp->tx_lock); 2414 nslots = srp->desc.nslots; 2415 if (nslots == 0) 2416 return; 2417 2418 for (array = 1; array < srp->tx_array; ++array) 2419 for (split = 0; split < BGE_SPLIT; ++split) 2420 bge_free_dma_mem(&srp->buf[array][split]); 2421 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2422 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2423 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2424 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2425 srp->sw_sbds = NULL; 2426 srp->txbuf_head = NULL; 2427 srp->txbuf = NULL; 2428 srp->pktp = NULL; 2429 } 2430 2431 /* 2432 * Initialise all transmit, receive, and buffer rings. 2433 */ 2434 void 2435 bge_init_rings(bge_t *bgep) 2436 { 2437 uint32_t ring; 2438 2439 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2440 2441 /* 2442 * Perform one-off initialisation of each ring ... 2443 */ 2444 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2445 bge_init_send_ring(bgep, ring); 2446 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2447 bge_init_recv_ring(bgep, ring); 2448 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2449 bge_init_buff_ring(bgep, ring); 2450 } 2451 2452 /* 2453 * Undo the work of bge_init_rings() above before the memory is freed 2454 */ 2455 void 2456 bge_fini_rings(bge_t *bgep) 2457 { 2458 uint32_t ring; 2459 2460 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2461 2462 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2463 bge_fini_buff_ring(bgep, ring); 2464 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2465 bge_fini_recv_ring(bgep, ring); 2466 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2467 bge_fini_send_ring(bgep, ring); 2468 } 2469 2470 /* 2471 * Called from the bge_m_stop() to free the tx buffers which are 2472 * allocated from the tx process. 2473 */ 2474 void 2475 bge_free_txbuf_arrays(send_ring_t *srp) 2476 { 2477 uint32_t array; 2478 uint32_t split; 2479 2480 ASSERT(mutex_owned(srp->tx_lock)); 2481 2482 /* 2483 * Free the extra tx buffer DMA area 2484 */ 2485 for (array = 1; array < srp->tx_array; ++array) 2486 for (split = 0; split < BGE_SPLIT; ++split) 2487 bge_free_dma_mem(&srp->buf[array][split]); 2488 2489 /* 2490 * Restore initial tx buffer numbers 2491 */ 2492 srp->tx_array = 1; 2493 srp->tx_buffers = BGE_SEND_BUF_NUM; 2494 srp->tx_buffers_low = srp->tx_buffers / 4; 2495 srp->tx_flow = 0; 2496 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2497 } 2498 2499 /* 2500 * Called from tx process to allocate more tx buffers 2501 */ 2502 bge_queue_item_t * 2503 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2504 { 2505 bge_queue_t *txbuf_queue; 2506 bge_queue_item_t *txbuf_item_last; 2507 bge_queue_item_t *txbuf_item; 2508 bge_queue_item_t *txbuf_item_rtn; 2509 sw_txbuf_t *txbuf; 2510 dma_area_t area; 2511 size_t txbuffsize; 2512 uint32_t slot; 2513 uint32_t array; 2514 uint32_t split; 2515 uint32_t err; 2516 2517 ASSERT(mutex_owned(srp->tx_lock)); 2518 2519 array = srp->tx_array; 2520 if (array >= srp->tx_array_max) 2521 return (NULL); 2522 2523 /* 2524 * Allocate memory & handles for TX buffers 2525 */ 2526 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2527 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2528 for (split = 0; split < BGE_SPLIT; ++split) { 2529 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2530 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2531 &srp->buf[array][split]); 2532 if (err != DDI_SUCCESS) { 2533 /* Free the last already allocated OK chunks */ 2534 for (slot = 0; slot <= split; ++slot) 2535 bge_free_dma_mem(&srp->buf[array][slot]); 2536 srp->tx_alloc_fail++; 2537 return (NULL); 2538 } 2539 } 2540 2541 /* 2542 * Chunk tx buffer area 2543 */ 2544 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2545 for (split = 0; split < BGE_SPLIT; ++split) { 2546 area = srp->buf[array][split]; 2547 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2548 bge_slice_chunk(&txbuf->buf, &area, 1, 2549 bgep->chipid.snd_buff_size); 2550 txbuf++; 2551 } 2552 } 2553 2554 /* 2555 * Add above buffers to the tx buffer pop queue 2556 */ 2557 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2558 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2559 txbuf_item_last = NULL; 2560 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2561 txbuf_item->item = txbuf; 2562 txbuf_item->next = txbuf_item_last; 2563 txbuf_item_last = txbuf_item; 2564 txbuf++; 2565 txbuf_item++; 2566 } 2567 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2568 txbuf_item_rtn = txbuf_item; 2569 txbuf_item++; 2570 txbuf_queue = srp->txbuf_pop_queue; 2571 mutex_enter(txbuf_queue->lock); 2572 txbuf_item->next = txbuf_queue->head; 2573 txbuf_queue->head = txbuf_item_last; 2574 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2575 mutex_exit(txbuf_queue->lock); 2576 2577 srp->tx_array++; 2578 srp->tx_buffers += BGE_SEND_BUF_NUM; 2579 srp->tx_buffers_low = srp->tx_buffers / 4; 2580 2581 return (txbuf_item_rtn); 2582 } 2583 2584 /* 2585 * This function allocates all the transmit and receive buffers 2586 * and descriptors, in four chunks. 2587 */ 2588 int 2589 bge_alloc_bufs(bge_t *bgep) 2590 { 2591 dma_area_t area; 2592 size_t rxbuffsize; 2593 size_t txbuffsize; 2594 size_t rxbuffdescsize; 2595 size_t rxdescsize; 2596 size_t txdescsize; 2597 uint32_t ring; 2598 uint32_t rx_rings = bgep->chipid.rx_rings; 2599 uint32_t tx_rings = bgep->chipid.tx_rings; 2600 int split; 2601 int err; 2602 2603 BGE_TRACE(("bge_alloc_bufs($%p)", 2604 (void *)bgep)); 2605 2606 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2607 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2608 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2609 2610 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2611 txbuffsize *= tx_rings; 2612 2613 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2614 rxdescsize *= sizeof (bge_rbd_t); 2615 2616 rxbuffdescsize = BGE_STD_SLOTS_USED; 2617 rxbuffdescsize += bgep->chipid.jumbo_slots; 2618 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2619 rxbuffdescsize *= sizeof (bge_rbd_t); 2620 2621 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2622 txdescsize *= sizeof (bge_sbd_t); 2623 txdescsize += sizeof (bge_statistics_t); 2624 txdescsize += sizeof (bge_status_t); 2625 txdescsize += BGE_STATUS_PADDING; 2626 2627 /* 2628 * Enable PCI relaxed ordering only for RX/TX data buffers 2629 */ 2630 if (bge_relaxed_ordering) 2631 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2632 2633 /* 2634 * Allocate memory & handles for RX buffers 2635 */ 2636 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2637 for (split = 0; split < BGE_SPLIT; ++split) { 2638 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2639 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2640 &bgep->rx_buff[split]); 2641 if (err != DDI_SUCCESS) 2642 return (DDI_FAILURE); 2643 } 2644 2645 /* 2646 * Allocate memory & handles for TX buffers 2647 */ 2648 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2649 for (split = 0; split < BGE_SPLIT; ++split) { 2650 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2651 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2652 &bgep->tx_buff[split]); 2653 if (err != DDI_SUCCESS) 2654 return (DDI_FAILURE); 2655 } 2656 2657 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2658 2659 /* 2660 * Allocate memory & handles for receive return rings 2661 */ 2662 ASSERT((rxdescsize % rx_rings) == 0); 2663 for (split = 0; split < rx_rings; ++split) { 2664 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2665 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2666 &bgep->rx_desc[split]); 2667 if (err != DDI_SUCCESS) 2668 return (DDI_FAILURE); 2669 } 2670 2671 /* 2672 * Allocate memory & handles for buffer (producer) descriptor rings 2673 */ 2674 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2675 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2676 if (err != DDI_SUCCESS) 2677 return (DDI_FAILURE); 2678 2679 /* 2680 * Allocate memory & handles for TX descriptor rings, 2681 * status block, and statistics area 2682 */ 2683 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2684 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2685 if (err != DDI_SUCCESS) 2686 return (DDI_FAILURE); 2687 2688 /* 2689 * Now carve up each of the allocated areas ... 2690 */ 2691 for (split = 0; split < BGE_SPLIT; ++split) { 2692 area = bgep->rx_buff[split]; 2693 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2694 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2695 bgep->chipid.std_buf_size); 2696 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2697 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2698 bgep->chipid.recv_jumbo_size); 2699 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2700 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2701 BGE_MINI_BUFF_SIZE); 2702 } 2703 2704 for (split = 0; split < BGE_SPLIT; ++split) { 2705 area = bgep->tx_buff[split]; 2706 for (ring = 0; ring < tx_rings; ++ring) 2707 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2708 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2709 bgep->chipid.snd_buff_size); 2710 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2711 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2712 &area, 0, bgep->chipid.snd_buff_size); 2713 } 2714 2715 for (ring = 0; ring < rx_rings; ++ring) 2716 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2717 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2718 2719 area = bgep->rx_desc[rx_rings]; 2720 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2721 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2722 0, sizeof (bge_rbd_t)); 2723 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2724 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2725 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2726 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2727 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2728 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2729 ASSERT(area.alength == 0); 2730 2731 area = bgep->tx_desc; 2732 for (ring = 0; ring < tx_rings; ++ring) 2733 bge_slice_chunk(&bgep->send[ring].desc, &area, 2734 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2735 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2736 bge_slice_chunk(&bgep->send[ring].desc, &area, 2737 0, sizeof (bge_sbd_t)); 2738 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2739 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2740 ASSERT(area.alength == BGE_STATUS_PADDING); 2741 DMA_ZERO(bgep->status_block); 2742 2743 return (DDI_SUCCESS); 2744 } 2745 2746 /* 2747 * This routine frees the transmit and receive buffers and descriptors. 2748 * Make sure the chip is stopped before calling it! 2749 */ 2750 void 2751 bge_free_bufs(bge_t *bgep) 2752 { 2753 int split; 2754 2755 BGE_TRACE(("bge_free_bufs($%p)", 2756 (void *)bgep)); 2757 2758 bge_free_dma_mem(&bgep->tx_desc); 2759 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2760 bge_free_dma_mem(&bgep->rx_desc[split]); 2761 for (split = 0; split < BGE_SPLIT; ++split) 2762 bge_free_dma_mem(&bgep->tx_buff[split]); 2763 for (split = 0; split < BGE_SPLIT; ++split) 2764 bge_free_dma_mem(&bgep->rx_buff[split]); 2765 } 2766 2767 /* 2768 * Determine (initial) MAC address ("BIA") to use for this interface 2769 */ 2770 2771 static void 2772 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2773 { 2774 struct ether_addr sysaddr; 2775 char propbuf[8]; /* "true" or "false", plus NUL */ 2776 uchar_t *bytes; 2777 int *ints; 2778 uint_t nelts; 2779 int err; 2780 2781 BGE_TRACE(("bge_find_mac_address($%p)", 2782 (void *)bgep)); 2783 2784 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2785 cidp->hw_mac_addr, 2786 ether_sprintf((void *)cidp->vendor_addr.addr), 2787 cidp->vendor_addr.set ? "" : "not ")); 2788 2789 /* 2790 * The "vendor's factory-set address" may already have 2791 * been extracted from the chip, but if the property 2792 * "local-mac-address" is set we use that instead. It 2793 * will normally be set by OBP, but it could also be 2794 * specified in a .conf file(!) 2795 * 2796 * There doesn't seem to be a way to define byte-array 2797 * properties in a .conf, so we check whether it looks 2798 * like an array of 6 ints instead. 2799 * 2800 * Then, we check whether it looks like an array of 6 2801 * bytes (which it should, if OBP set it). If we can't 2802 * make sense of it either way, we'll ignore it. 2803 */ 2804 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2805 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2806 if (err == DDI_PROP_SUCCESS) { 2807 if (nelts == ETHERADDRL) { 2808 while (nelts--) 2809 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2810 cidp->vendor_addr.set = B_TRUE; 2811 } 2812 ddi_prop_free(ints); 2813 } 2814 2815 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2816 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2817 if (err == DDI_PROP_SUCCESS) { 2818 if (nelts == ETHERADDRL) { 2819 while (nelts--) 2820 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2821 cidp->vendor_addr.set = B_TRUE; 2822 } 2823 ddi_prop_free(bytes); 2824 } 2825 2826 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2827 ether_sprintf((void *)cidp->vendor_addr.addr), 2828 cidp->vendor_addr.set ? "" : "not ")); 2829 2830 /* 2831 * Look up the OBP property "local-mac-address?". Note that even 2832 * though its value is a string (which should be "true" or "false"), 2833 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2834 * the buffer first and then fetch the property as an untyped array; 2835 * this may or may not include a final NUL, but since there will 2836 * always be one left at the end of the buffer we can now treat it 2837 * as a string anyway. 2838 */ 2839 nelts = sizeof (propbuf); 2840 bzero(propbuf, nelts--); 2841 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2842 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2843 2844 /* 2845 * Now, if the address still isn't set from the hardware (SEEPROM) 2846 * or the OBP or .conf property, OR if the user has foolishly set 2847 * 'local-mac-address? = false', use "the system address" instead 2848 * (but only if it's non-null i.e. has been set from the IDPROM). 2849 */ 2850 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2851 if (localetheraddr(NULL, &sysaddr) != 0) { 2852 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2853 cidp->vendor_addr.set = B_TRUE; 2854 } 2855 2856 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2857 ether_sprintf((void *)cidp->vendor_addr.addr), 2858 cidp->vendor_addr.set ? "" : "not ")); 2859 2860 /* 2861 * Finally(!), if there's a valid "mac-address" property (created 2862 * if we netbooted from this interface), we must use this instead 2863 * of any of the above to ensure that the NFS/install server doesn't 2864 * get confused by the address changing as Solaris takes over! 2865 */ 2866 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2867 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2868 if (err == DDI_PROP_SUCCESS) { 2869 if (nelts == ETHERADDRL) { 2870 while (nelts--) 2871 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2872 cidp->vendor_addr.set = B_TRUE; 2873 } 2874 ddi_prop_free(bytes); 2875 } 2876 2877 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2878 ether_sprintf((void *)cidp->vendor_addr.addr), 2879 cidp->vendor_addr.set ? "" : "not ")); 2880 } 2881 2882 2883 /*ARGSUSED*/ 2884 int 2885 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2886 { 2887 ddi_fm_error_t de; 2888 2889 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2890 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2891 return (de.fme_status); 2892 } 2893 2894 /*ARGSUSED*/ 2895 int 2896 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2897 { 2898 ddi_fm_error_t de; 2899 2900 ASSERT(bgep->progress & PROGRESS_BUFS); 2901 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2902 return (de.fme_status); 2903 } 2904 2905 /* 2906 * The IO fault service error handling callback function 2907 */ 2908 /*ARGSUSED*/ 2909 static int 2910 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2911 { 2912 /* 2913 * as the driver can always deal with an error in any dma or 2914 * access handle, we can just return the fme_status value. 2915 */ 2916 pci_ereport_post(dip, err, NULL); 2917 return (err->fme_status); 2918 } 2919 2920 static void 2921 bge_fm_init(bge_t *bgep) 2922 { 2923 ddi_iblock_cookie_t iblk; 2924 2925 /* Only register with IO Fault Services if we have some capability */ 2926 if (bgep->fm_capabilities) { 2927 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2928 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2929 2930 /* Register capabilities with IO Fault Services */ 2931 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2932 2933 /* 2934 * Initialize pci ereport capabilities if ereport capable 2935 */ 2936 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2937 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2938 pci_ereport_setup(bgep->devinfo); 2939 2940 /* 2941 * Register error callback if error callback capable 2942 */ 2943 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2944 ddi_fm_handler_register(bgep->devinfo, 2945 bge_fm_error_cb, (void*) bgep); 2946 } else { 2947 /* 2948 * These fields have to be cleared of FMA if there are no 2949 * FMA capabilities at runtime. 2950 */ 2951 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2952 dma_attr.dma_attr_flags = 0; 2953 } 2954 } 2955 2956 static void 2957 bge_fm_fini(bge_t *bgep) 2958 { 2959 /* Only unregister FMA capabilities if we registered some */ 2960 if (bgep->fm_capabilities) { 2961 2962 /* 2963 * Release any resources allocated by pci_ereport_setup() 2964 */ 2965 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2966 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2967 pci_ereport_teardown(bgep->devinfo); 2968 2969 /* 2970 * Un-register error callback if error callback capable 2971 */ 2972 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2973 ddi_fm_handler_unregister(bgep->devinfo); 2974 2975 /* Unregister from IO Fault Services */ 2976 ddi_fm_fini(bgep->devinfo); 2977 } 2978 } 2979 2980 static void 2981 #ifdef BGE_IPMI_ASF 2982 bge_unattach(bge_t *bgep, uint_t asf_mode) 2983 #else 2984 bge_unattach(bge_t *bgep) 2985 #endif 2986 { 2987 BGE_TRACE(("bge_unattach($%p)", 2988 (void *)bgep)); 2989 2990 /* 2991 * Flag that no more activity may be initiated 2992 */ 2993 bgep->progress &= ~PROGRESS_READY; 2994 2995 /* 2996 * Quiesce the PHY and MAC (leave it reset but still powered). 2997 * Clean up and free all BGE data structures 2998 */ 2999 if (bgep->periodic_id != NULL) { 3000 ddi_periodic_delete(bgep->periodic_id); 3001 bgep->periodic_id = NULL; 3002 } 3003 if (bgep->progress & PROGRESS_KSTATS) 3004 bge_fini_kstats(bgep); 3005 if (bgep->progress & PROGRESS_PHY) 3006 bge_phys_reset(bgep); 3007 if (bgep->progress & PROGRESS_HWINT) { 3008 mutex_enter(bgep->genlock); 3009 #ifdef BGE_IPMI_ASF 3010 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 3011 #else 3012 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 3013 #endif 3014 ddi_fm_service_impact(bgep->devinfo, 3015 DDI_SERVICE_UNAFFECTED); 3016 #ifdef BGE_IPMI_ASF 3017 if (bgep->asf_enabled) { 3018 /* 3019 * This register has been overlaid. We restore its 3020 * initial value here. 3021 */ 3022 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 3023 BGE_NIC_DATA_SIG); 3024 } 3025 #endif 3026 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3027 ddi_fm_service_impact(bgep->devinfo, 3028 DDI_SERVICE_UNAFFECTED); 3029 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3030 ddi_fm_service_impact(bgep->devinfo, 3031 DDI_SERVICE_UNAFFECTED); 3032 mutex_exit(bgep->genlock); 3033 } 3034 if (bgep->progress & PROGRESS_INTR) { 3035 bge_intr_disable(bgep); 3036 bge_fini_rings(bgep); 3037 } 3038 if (bgep->progress & PROGRESS_HWINT) { 3039 bge_rem_intrs(bgep); 3040 rw_destroy(bgep->errlock); 3041 mutex_destroy(bgep->softintrlock); 3042 mutex_destroy(bgep->genlock); 3043 } 3044 if (bgep->progress & PROGRESS_FACTOTUM) 3045 ddi_remove_softintr(bgep->factotum_id); 3046 if (bgep->progress & PROGRESS_RESCHED) 3047 ddi_remove_softintr(bgep->drain_id); 3048 if (bgep->progress & PROGRESS_BUFS) 3049 bge_free_bufs(bgep); 3050 if (bgep->progress & PROGRESS_REGS) 3051 ddi_regs_map_free(&bgep->io_handle); 3052 if (bgep->progress & PROGRESS_CFG) 3053 pci_config_teardown(&bgep->cfg_handle); 3054 3055 bge_fm_fini(bgep); 3056 3057 ddi_remove_minor_node(bgep->devinfo, NULL); 3058 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3059 kmem_free(bgep, sizeof (*bgep)); 3060 } 3061 3062 static int 3063 bge_resume(dev_info_t *devinfo) 3064 { 3065 bge_t *bgep; /* Our private data */ 3066 chip_id_t *cidp; 3067 chip_id_t chipid; 3068 3069 bgep = ddi_get_driver_private(devinfo); 3070 if (bgep == NULL) 3071 return (DDI_FAILURE); 3072 3073 /* 3074 * Refuse to resume if the data structures aren't consistent 3075 */ 3076 if (bgep->devinfo != devinfo) 3077 return (DDI_FAILURE); 3078 3079 #ifdef BGE_IPMI_ASF 3080 /* 3081 * Power management hasn't been supported in BGE now. If you 3082 * want to implement it, please add the ASF/IPMI related 3083 * code here. 3084 */ 3085 3086 #endif 3087 3088 /* 3089 * Read chip ID & set up config space command register(s) 3090 * Refuse to resume if the chip has changed its identity! 3091 */ 3092 cidp = &bgep->chipid; 3093 mutex_enter(bgep->genlock); 3094 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3095 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3096 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3097 mutex_exit(bgep->genlock); 3098 return (DDI_FAILURE); 3099 } 3100 mutex_exit(bgep->genlock); 3101 if (chipid.vendor != cidp->vendor) 3102 return (DDI_FAILURE); 3103 if (chipid.device != cidp->device) 3104 return (DDI_FAILURE); 3105 if (chipid.revision != cidp->revision) 3106 return (DDI_FAILURE); 3107 if (chipid.asic_rev != cidp->asic_rev) 3108 return (DDI_FAILURE); 3109 3110 /* 3111 * All OK, reinitialise h/w & kick off GLD scheduling 3112 */ 3113 mutex_enter(bgep->genlock); 3114 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3115 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3116 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3117 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3118 mutex_exit(bgep->genlock); 3119 return (DDI_FAILURE); 3120 } 3121 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3122 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3123 mutex_exit(bgep->genlock); 3124 return (DDI_FAILURE); 3125 } 3126 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3127 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3128 mutex_exit(bgep->genlock); 3129 return (DDI_FAILURE); 3130 } 3131 mutex_exit(bgep->genlock); 3132 return (DDI_SUCCESS); 3133 } 3134 3135 /* 3136 * attach(9E) -- Attach a device to the system 3137 * 3138 * Called once for each board successfully probed. 3139 */ 3140 static int 3141 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3142 { 3143 bge_t *bgep; /* Our private data */ 3144 mac_register_t *macp; 3145 chip_id_t *cidp; 3146 caddr_t regs; 3147 int instance; 3148 int err; 3149 int intr_types; 3150 #ifdef BGE_IPMI_ASF 3151 uint32_t mhcrValue; 3152 #ifdef __sparc 3153 uint16_t value16; 3154 #endif 3155 #ifdef BGE_NETCONSOLE 3156 int retval; 3157 #endif 3158 #endif 3159 3160 instance = ddi_get_instance(devinfo); 3161 3162 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3163 (void *)devinfo, cmd, instance)); 3164 BGE_BRKPT(NULL, "bge_attach"); 3165 3166 switch (cmd) { 3167 default: 3168 return (DDI_FAILURE); 3169 3170 case DDI_RESUME: 3171 return (bge_resume(devinfo)); 3172 3173 case DDI_ATTACH: 3174 break; 3175 } 3176 3177 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3178 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3179 ddi_set_driver_private(devinfo, bgep); 3180 bgep->bge_guard = BGE_GUARD; 3181 bgep->devinfo = devinfo; 3182 bgep->param_drain_max = 64; 3183 bgep->param_msi_cnt = 0; 3184 bgep->param_loop_mode = 0; 3185 3186 /* 3187 * Initialize more fields in BGE private data 3188 */ 3189 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3190 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3191 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3192 BGE_DRIVER_NAME, instance); 3193 3194 /* 3195 * Initialize for fma support 3196 */ 3197 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3198 DDI_PROP_DONTPASS, fm_cap, 3199 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3200 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3201 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3202 bge_fm_init(bgep); 3203 3204 /* 3205 * Look up the IOMMU's page size for DVMA mappings (must be 3206 * a power of 2) and convert to a mask. This can be used to 3207 * determine whether a message buffer crosses a page boundary. 3208 * Note: in 2s complement binary notation, if X is a power of 3209 * 2, then -X has the representation "11...1100...00". 3210 */ 3211 bgep->pagemask = dvma_pagesize(devinfo); 3212 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3213 bgep->pagemask = -bgep->pagemask; 3214 3215 /* 3216 * Map config space registers 3217 * Read chip ID & set up config space command register(s) 3218 * 3219 * Note: this leaves the chip accessible by Memory Space 3220 * accesses, but with interrupts and Bus Mastering off. 3221 * This should ensure that nothing untoward will happen 3222 * if it has been left active by the (net-)bootloader. 3223 * We'll re-enable Bus Mastering once we've reset the chip, 3224 * and allow interrupts only when everything else is set up. 3225 */ 3226 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3227 #ifdef BGE_IPMI_ASF 3228 #ifdef __sparc 3229 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3230 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3231 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3232 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3233 MHCR_ENABLE_TAGGED_STATUS_MODE | 3234 MHCR_MASK_INTERRUPT_MODE | 3235 MHCR_MASK_PCI_INT_OUTPUT | 3236 MHCR_CLEAR_INTERRUPT_INTA | 3237 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3238 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3239 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3240 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3241 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3242 MEMORY_ARBITER_ENABLE); 3243 #else 3244 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3245 #endif 3246 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3247 bgep->asf_wordswapped = B_TRUE; 3248 } else { 3249 bgep->asf_wordswapped = B_FALSE; 3250 } 3251 bge_asf_get_config(bgep); 3252 #endif 3253 if (err != DDI_SUCCESS) { 3254 bge_problem(bgep, "pci_config_setup() failed"); 3255 goto attach_fail; 3256 } 3257 bgep->progress |= PROGRESS_CFG; 3258 cidp = &bgep->chipid; 3259 bzero(cidp, sizeof (*cidp)); 3260 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3261 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3262 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3263 goto attach_fail; 3264 } 3265 3266 #ifdef BGE_IPMI_ASF 3267 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3268 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3269 bgep->asf_newhandshake = B_TRUE; 3270 } else { 3271 bgep->asf_newhandshake = B_FALSE; 3272 } 3273 #endif 3274 3275 /* 3276 * Update those parts of the chip ID derived from volatile 3277 * registers with the values seen by OBP (in case the chip 3278 * has been reset externally and therefore lost them). 3279 */ 3280 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3281 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3282 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3283 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3284 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3285 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3286 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3287 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3288 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3289 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3290 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3291 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3292 3293 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3294 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3295 if ((cidp->default_mtu < BGE_DEFAULT_MTU) || 3296 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3297 cidp->default_mtu = BGE_DEFAULT_MTU; 3298 } 3299 3300 /* 3301 * Map operating registers 3302 */ 3303 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3304 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3305 if (err != DDI_SUCCESS) { 3306 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3307 goto attach_fail; 3308 } 3309 bgep->io_regs = regs; 3310 bgep->progress |= PROGRESS_REGS; 3311 3312 /* 3313 * Characterise the device, so we know its requirements. 3314 * Then allocate the appropriate TX and RX descriptors & buffers. 3315 */ 3316 if (bge_chip_id_init(bgep) == EIO) { 3317 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3318 goto attach_fail; 3319 } 3320 3321 err = bge_alloc_bufs(bgep); 3322 if (err != DDI_SUCCESS) { 3323 bge_problem(bgep, "DMA buffer allocation failed"); 3324 goto attach_fail; 3325 } 3326 bgep->progress |= PROGRESS_BUFS; 3327 3328 /* 3329 * Add the softint handlers: 3330 * 3331 * Both of these handlers are used to avoid restrictions on the 3332 * context and/or mutexes required for some operations. In 3333 * particular, the hardware interrupt handler and its subfunctions 3334 * can detect a number of conditions that we don't want to handle 3335 * in that context or with that set of mutexes held. So, these 3336 * softints are triggered instead: 3337 * 3338 * the <resched> softint is triggered if we have previously 3339 * had to refuse to send a packet because of resource shortage 3340 * (we've run out of transmit buffers), but the send completion 3341 * interrupt handler has now detected that more buffers have 3342 * become available. 3343 * 3344 * the <factotum> is triggered if the h/w interrupt handler 3345 * sees the <link state changed> or <error> bits in the status 3346 * block. It's also triggered periodically to poll the link 3347 * state, just in case we aren't getting link status change 3348 * interrupts ... 3349 */ 3350 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3351 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3352 if (err != DDI_SUCCESS) { 3353 bge_problem(bgep, "ddi_add_softintr() failed"); 3354 goto attach_fail; 3355 } 3356 bgep->progress |= PROGRESS_RESCHED; 3357 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3358 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3359 if (err != DDI_SUCCESS) { 3360 bge_problem(bgep, "ddi_add_softintr() failed"); 3361 goto attach_fail; 3362 } 3363 bgep->progress |= PROGRESS_FACTOTUM; 3364 3365 /* Get supported interrupt types */ 3366 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3367 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3368 3369 goto attach_fail; 3370 } 3371 3372 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3373 bgep->ifname, intr_types)); 3374 3375 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3376 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3377 bge_error(bgep, "MSI registration failed, " 3378 "trying FIXED interrupt type\n"); 3379 } else { 3380 BGE_DEBUG(("%s: Using MSI interrupt type", 3381 bgep->ifname)); 3382 bgep->intr_type = DDI_INTR_TYPE_MSI; 3383 bgep->progress |= PROGRESS_HWINT; 3384 } 3385 } 3386 3387 if (!(bgep->progress & PROGRESS_HWINT) && 3388 (intr_types & DDI_INTR_TYPE_FIXED)) { 3389 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3390 bge_error(bgep, "FIXED interrupt " 3391 "registration failed\n"); 3392 goto attach_fail; 3393 } 3394 3395 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3396 3397 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3398 bgep->progress |= PROGRESS_HWINT; 3399 } 3400 3401 if (!(bgep->progress & PROGRESS_HWINT)) { 3402 bge_error(bgep, "No interrupts registered\n"); 3403 goto attach_fail; 3404 } 3405 3406 /* 3407 * Note that interrupts are not enabled yet as 3408 * mutex locks are not initialized. Initialize mutex locks. 3409 */ 3410 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3411 DDI_INTR_PRI(bgep->intr_pri)); 3412 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3413 DDI_INTR_PRI(bgep->intr_pri)); 3414 rw_init(bgep->errlock, NULL, RW_DRIVER, 3415 DDI_INTR_PRI(bgep->intr_pri)); 3416 3417 /* 3418 * Initialize rings. 3419 */ 3420 bge_init_rings(bgep); 3421 3422 /* 3423 * Now that mutex locks are initialized, enable interrupts. 3424 */ 3425 bge_intr_enable(bgep); 3426 bgep->progress |= PROGRESS_INTR; 3427 3428 /* 3429 * Initialise link state variables 3430 * Stop, reset & reinitialise the chip. 3431 * Initialise the (internal) PHY. 3432 */ 3433 bgep->link_state = LINK_STATE_UNKNOWN; 3434 3435 mutex_enter(bgep->genlock); 3436 3437 /* 3438 * Reset chip & rings to initial state; also reset address 3439 * filtering, promiscuity, loopback mode. 3440 */ 3441 #ifdef BGE_IPMI_ASF 3442 #ifdef BGE_NETCONSOLE 3443 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3444 #else 3445 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3446 #endif 3447 #else 3448 if (bge_reset(bgep) != DDI_SUCCESS) { 3449 #endif 3450 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3451 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3452 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3453 mutex_exit(bgep->genlock); 3454 goto attach_fail; 3455 } 3456 3457 #ifdef BGE_IPMI_ASF 3458 if (bgep->asf_enabled) { 3459 bgep->asf_status = ASF_STAT_RUN_INIT; 3460 } 3461 #endif 3462 3463 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3464 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3465 bgep->promisc = B_FALSE; 3466 bgep->param_loop_mode = BGE_LOOP_NONE; 3467 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3468 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3469 mutex_exit(bgep->genlock); 3470 goto attach_fail; 3471 } 3472 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3473 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3474 mutex_exit(bgep->genlock); 3475 goto attach_fail; 3476 } 3477 3478 mutex_exit(bgep->genlock); 3479 3480 if (bge_phys_init(bgep) == EIO) { 3481 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3482 goto attach_fail; 3483 } 3484 bgep->progress |= PROGRESS_PHY; 3485 3486 /* 3487 * initialize NDD-tweakable parameters 3488 */ 3489 if (bge_nd_init(bgep)) { 3490 bge_problem(bgep, "bge_nd_init() failed"); 3491 goto attach_fail; 3492 } 3493 bgep->progress |= PROGRESS_NDD; 3494 3495 /* 3496 * Create & initialise named kstats 3497 */ 3498 bge_init_kstats(bgep, instance); 3499 bgep->progress |= PROGRESS_KSTATS; 3500 3501 /* 3502 * Determine whether to override the chip's own MAC address 3503 */ 3504 bge_find_mac_address(bgep, cidp); 3505 3506 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3507 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 3508 3509 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3510 goto attach_fail; 3511 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3512 macp->m_driver = bgep; 3513 macp->m_dip = devinfo; 3514 macp->m_src_addr = cidp->vendor_addr.addr; 3515 macp->m_callbacks = &bge_m_callbacks; 3516 macp->m_min_sdu = 0; 3517 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3518 macp->m_margin = VLAN_TAGSZ; 3519 macp->m_priv_props = bge_priv_prop; 3520 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3521 macp->m_v12n = MAC_VIRT_LEVEL1; 3522 3523 /* 3524 * Finally, we're ready to register ourselves with the MAC layer 3525 * interface; if this succeeds, we're all ready to start() 3526 */ 3527 err = mac_register(macp, &bgep->mh); 3528 mac_free(macp); 3529 if (err != 0) 3530 goto attach_fail; 3531 3532 /* 3533 * Register a periodical handler. 3534 * bge_chip_cyclic() is invoked in kernel context. 3535 */ 3536 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3537 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3538 3539 bgep->progress |= PROGRESS_READY; 3540 ASSERT(bgep->bge_guard == BGE_GUARD); 3541 #ifdef BGE_IPMI_ASF 3542 #ifdef BGE_NETCONSOLE 3543 if (bgep->asf_enabled) { 3544 mutex_enter(bgep->genlock); 3545 retval = bge_chip_start(bgep, B_TRUE); 3546 mutex_exit(bgep->genlock); 3547 if (retval != DDI_SUCCESS) 3548 goto attach_fail; 3549 } 3550 #endif 3551 #endif 3552 3553 ddi_report_dev(devinfo); 3554 BGE_REPORT((bgep, "bge version: %s", bge_version)); 3555 3556 return (DDI_SUCCESS); 3557 3558 attach_fail: 3559 #ifdef BGE_IPMI_ASF 3560 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3561 #else 3562 bge_unattach(bgep); 3563 #endif 3564 return (DDI_FAILURE); 3565 } 3566 3567 /* 3568 * bge_suspend() -- suspend transmit/receive for powerdown 3569 */ 3570 static int 3571 bge_suspend(bge_t *bgep) 3572 { 3573 /* 3574 * Stop processing and idle (powerdown) the PHY ... 3575 */ 3576 mutex_enter(bgep->genlock); 3577 #ifdef BGE_IPMI_ASF 3578 /* 3579 * Power management hasn't been supported in BGE now. If you 3580 * want to implement it, please add the ASF/IPMI related 3581 * code here. 3582 */ 3583 #endif 3584 bge_stop(bgep); 3585 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3586 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3587 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3588 mutex_exit(bgep->genlock); 3589 return (DDI_FAILURE); 3590 } 3591 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3592 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3593 mutex_exit(bgep->genlock); 3594 return (DDI_FAILURE); 3595 } 3596 mutex_exit(bgep->genlock); 3597 3598 return (DDI_SUCCESS); 3599 } 3600 3601 /* 3602 * quiesce(9E) entry point. 3603 * 3604 * This function is called when the system is single-threaded at high 3605 * PIL with preemption disabled. Therefore, this function must not be 3606 * blocked. 3607 * 3608 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3609 * DDI_FAILURE indicates an error condition and should almost never happen. 3610 */ 3611 #ifdef __sparc 3612 #define bge_quiesce ddi_quiesce_not_supported 3613 #else 3614 static int 3615 bge_quiesce(dev_info_t *devinfo) 3616 { 3617 bge_t *bgep = ddi_get_driver_private(devinfo); 3618 3619 if (bgep == NULL) 3620 return (DDI_FAILURE); 3621 3622 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3623 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3624 MHCR_MASK_PCI_INT_OUTPUT); 3625 } else { 3626 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3627 } 3628 3629 /* Stop the chip */ 3630 bge_chip_stop_nonblocking(bgep); 3631 3632 return (DDI_SUCCESS); 3633 } 3634 #endif 3635 3636 /* 3637 * detach(9E) -- Detach a device from the system 3638 */ 3639 static int 3640 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3641 { 3642 bge_t *bgep; 3643 #ifdef BGE_IPMI_ASF 3644 uint_t asf_mode; 3645 asf_mode = ASF_MODE_NONE; 3646 #endif 3647 3648 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3649 3650 bgep = ddi_get_driver_private(devinfo); 3651 3652 switch (cmd) { 3653 default: 3654 return (DDI_FAILURE); 3655 3656 case DDI_SUSPEND: 3657 return (bge_suspend(bgep)); 3658 3659 case DDI_DETACH: 3660 break; 3661 } 3662 3663 #ifdef BGE_IPMI_ASF 3664 mutex_enter(bgep->genlock); 3665 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3666 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3667 3668 bge_asf_update_status(bgep); 3669 if (bgep->asf_status == ASF_STAT_RUN) { 3670 bge_asf_stop_timer(bgep); 3671 } 3672 bgep->asf_status = ASF_STAT_STOP; 3673 3674 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3675 3676 if (bgep->asf_pseudostop) { 3677 bge_chip_stop(bgep, B_FALSE); 3678 bgep->bge_mac_state = BGE_MAC_STOPPED; 3679 bgep->asf_pseudostop = B_FALSE; 3680 } 3681 3682 asf_mode = ASF_MODE_POST_SHUTDOWN; 3683 3684 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3685 ddi_fm_service_impact(bgep->devinfo, 3686 DDI_SERVICE_UNAFFECTED); 3687 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3688 ddi_fm_service_impact(bgep->devinfo, 3689 DDI_SERVICE_UNAFFECTED); 3690 } 3691 mutex_exit(bgep->genlock); 3692 #endif 3693 3694 /* 3695 * Unregister from the GLD subsystem. This can fail, in 3696 * particular if there are DLPI style-2 streams still open - 3697 * in which case we just return failure without shutting 3698 * down chip operations. 3699 */ 3700 if (mac_unregister(bgep->mh) != 0) 3701 return (DDI_FAILURE); 3702 3703 /* 3704 * All activity stopped, so we can clean up & exit 3705 */ 3706 #ifdef BGE_IPMI_ASF 3707 bge_unattach(bgep, asf_mode); 3708 #else 3709 bge_unattach(bgep); 3710 #endif 3711 return (DDI_SUCCESS); 3712 } 3713 3714 3715 /* 3716 * ========== Module Loading Data & Entry Points ========== 3717 */ 3718 3719 #undef BGE_DBG 3720 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3721 3722 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3723 nulldev, /* identify */ 3724 nulldev, /* probe */ 3725 bge_attach, /* attach */ 3726 bge_detach, /* detach */ 3727 nodev, /* reset */ 3728 NULL, /* cb_ops */ 3729 D_MP, /* bus_ops */ 3730 NULL, /* power */ 3731 bge_quiesce /* quiesce */ 3732 ); 3733 3734 static struct modldrv bge_modldrv = { 3735 &mod_driverops, /* Type of module. This one is a driver */ 3736 bge_ident, /* short description */ 3737 &bge_dev_ops /* driver specific ops */ 3738 }; 3739 3740 static struct modlinkage modlinkage = { 3741 MODREV_1, (void *)&bge_modldrv, NULL 3742 }; 3743 3744 3745 int 3746 _info(struct modinfo *modinfop) 3747 { 3748 return (mod_info(&modlinkage, modinfop)); 3749 } 3750 3751 int 3752 _init(void) 3753 { 3754 int status; 3755 3756 mac_init_ops(&bge_dev_ops, "bge"); 3757 status = mod_install(&modlinkage); 3758 if (status == DDI_SUCCESS) 3759 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3760 else 3761 mac_fini_ops(&bge_dev_ops); 3762 return (status); 3763 } 3764 3765 int 3766 _fini(void) 3767 { 3768 int status; 3769 3770 status = mod_remove(&modlinkage); 3771 if (status == DDI_SUCCESS) { 3772 mac_fini_ops(&bge_dev_ops); 3773 mutex_destroy(bge_log_mutex); 3774 } 3775 return (status); 3776 } 3777 3778 3779 /* 3780 * bge_add_intrs: 3781 * 3782 * Register FIXED or MSI interrupts. 3783 */ 3784 static int 3785 bge_add_intrs(bge_t *bgep, int intr_type) 3786 { 3787 dev_info_t *dip = bgep->devinfo; 3788 int avail, actual, intr_size, count = 0; 3789 int i, flag, ret; 3790 3791 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3792 3793 /* Get number of interrupts */ 3794 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3795 if ((ret != DDI_SUCCESS) || (count == 0)) { 3796 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3797 "count: %d", ret, count); 3798 3799 return (DDI_FAILURE); 3800 } 3801 3802 /* Get number of available interrupts */ 3803 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3804 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3805 bge_error(bgep, "ddi_intr_get_navail() failure, " 3806 "ret: %d, avail: %d\n", ret, avail); 3807 3808 return (DDI_FAILURE); 3809 } 3810 3811 if (avail < count) { 3812 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3813 bgep->ifname, count, avail)); 3814 } 3815 3816 /* 3817 * BGE hardware generates only single MSI even though it claims 3818 * to support multiple MSIs. So, hard code MSI count value to 1. 3819 */ 3820 if (intr_type == DDI_INTR_TYPE_MSI) { 3821 count = 1; 3822 flag = DDI_INTR_ALLOC_STRICT; 3823 } else { 3824 flag = DDI_INTR_ALLOC_NORMAL; 3825 } 3826 3827 /* Allocate an array of interrupt handles */ 3828 intr_size = count * sizeof (ddi_intr_handle_t); 3829 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3830 3831 /* Call ddi_intr_alloc() */ 3832 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3833 count, &actual, flag); 3834 3835 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3836 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3837 3838 kmem_free(bgep->htable, intr_size); 3839 return (DDI_FAILURE); 3840 } 3841 3842 if (actual < count) { 3843 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3844 bgep->ifname, count, actual)); 3845 } 3846 3847 bgep->intr_cnt = actual; 3848 3849 /* 3850 * Get priority for first msi, assume remaining are all the same 3851 */ 3852 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3853 DDI_SUCCESS) { 3854 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3855 3856 /* Free already allocated intr */ 3857 for (i = 0; i < actual; i++) { 3858 (void) ddi_intr_free(bgep->htable[i]); 3859 } 3860 3861 kmem_free(bgep->htable, intr_size); 3862 return (DDI_FAILURE); 3863 } 3864 3865 /* Call ddi_intr_add_handler() */ 3866 for (i = 0; i < actual; i++) { 3867 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3868 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3869 bge_error(bgep, "ddi_intr_add_handler() " 3870 "failed %d\n", ret); 3871 3872 /* Free already allocated intr */ 3873 for (i = 0; i < actual; i++) { 3874 (void) ddi_intr_free(bgep->htable[i]); 3875 } 3876 3877 kmem_free(bgep->htable, intr_size); 3878 return (DDI_FAILURE); 3879 } 3880 } 3881 3882 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3883 != DDI_SUCCESS) { 3884 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3885 3886 for (i = 0; i < actual; i++) { 3887 (void) ddi_intr_remove_handler(bgep->htable[i]); 3888 (void) ddi_intr_free(bgep->htable[i]); 3889 } 3890 3891 kmem_free(bgep->htable, intr_size); 3892 return (DDI_FAILURE); 3893 } 3894 3895 return (DDI_SUCCESS); 3896 } 3897 3898 /* 3899 * bge_rem_intrs: 3900 * 3901 * Unregister FIXED or MSI interrupts 3902 */ 3903 static void 3904 bge_rem_intrs(bge_t *bgep) 3905 { 3906 int i; 3907 3908 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3909 3910 /* Call ddi_intr_remove_handler() */ 3911 for (i = 0; i < bgep->intr_cnt; i++) { 3912 (void) ddi_intr_remove_handler(bgep->htable[i]); 3913 (void) ddi_intr_free(bgep->htable[i]); 3914 } 3915 3916 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3917 } 3918 3919 3920 void 3921 bge_intr_enable(bge_t *bgep) 3922 { 3923 int i; 3924 3925 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3926 /* Call ddi_intr_block_enable() for MSI interrupts */ 3927 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3928 } else { 3929 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3930 for (i = 0; i < bgep->intr_cnt; i++) { 3931 (void) ddi_intr_enable(bgep->htable[i]); 3932 } 3933 } 3934 } 3935 3936 3937 void 3938 bge_intr_disable(bge_t *bgep) 3939 { 3940 int i; 3941 3942 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3943 /* Call ddi_intr_block_disable() */ 3944 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3945 } else { 3946 for (i = 0; i < bgep->intr_cnt; i++) { 3947 (void) ddi_intr_disable(bgep->htable[i]); 3948 } 3949 } 3950 } 3951 3952 int 3953 bge_reprogram(bge_t *bgep) 3954 { 3955 int status = 0; 3956 3957 ASSERT(mutex_owned(bgep->genlock)); 3958 3959 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3960 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3961 status = IOC_INVAL; 3962 } 3963 #ifdef BGE_IPMI_ASF 3964 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3965 #else 3966 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3967 #endif 3968 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3969 status = IOC_INVAL; 3970 } 3971 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3972 bge_chip_msi_trig(bgep); 3973 return (status); 3974 } 3975