1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010-2013, by Broadcom, Inc. 24 * All Rights Reserved. 25 */ 26 27 /* 28 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. 29 * All rights reserved. 30 * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 31 */ 32 33 #include "bge_impl.h" 34 #include <sys/sdt.h> 35 #include <sys/mac_provider.h> 36 #include <sys/mac.h> 37 #include <sys/mac_flow.h> 38 39 40 #ifndef STRINGIFY 41 #define XSTRINGIFY(x) #x 42 #define STRINGIFY(x) XSTRINGIFY(x) 43 #endif 44 45 /* 46 * This is the string displayed by modinfo, etc. 47 */ 48 static char bge_ident[] = "Broadcom Gb Ethernet"; 49 50 /* 51 * Property names 52 */ 53 static char debug_propname[] = "bge-debug-flags"; 54 static char clsize_propname[] = "cache-line-size"; 55 static char latency_propname[] = "latency-timer"; 56 static char localmac_boolname[] = "local-mac-address?"; 57 static char localmac_propname[] = "local-mac-address"; 58 static char macaddr_propname[] = "mac-address"; 59 static char subdev_propname[] = "subsystem-id"; 60 static char subven_propname[] = "subsystem-vendor-id"; 61 static char rxrings_propname[] = "bge-rx-rings"; 62 static char txrings_propname[] = "bge-tx-rings"; 63 static char eee_propname[] = "bge-eee"; 64 static char fm_cap[] = "fm-capable"; 65 static char default_mtu[] = "default_mtu"; 66 67 static int bge_add_intrs(bge_t *, int); 68 static void bge_rem_intrs(bge_t *); 69 static int bge_unicst_set(void *, const uint8_t *, int); 70 static int bge_addmac(void *, const uint8_t *); 71 static int bge_remmac(void *, const uint8_t *); 72 73 /* 74 * Describes the chip's DMA engine 75 */ 76 static ddi_dma_attr_t dma_attr = { 77 DMA_ATTR_V0, /* dma_attr_version */ 78 0x0000000000000000ull, /* dma_attr_addr_lo */ 79 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 80 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 81 0x0000000000000001ull, /* dma_attr_align */ 82 0x00000FFF, /* dma_attr_burstsizes */ 83 0x00000001, /* dma_attr_minxfer */ 84 0x000000000000FFFFull, /* dma_attr_maxxfer */ 85 0x00000000FFFFFFFFull, /* dma_attr_seg */ 86 1, /* dma_attr_sgllen */ 87 0x00000001, /* dma_attr_granular */ 88 DDI_DMA_FLAGERR /* dma_attr_flags */ 89 }; 90 91 /* 92 * PIO access attributes for registers 93 */ 94 static ddi_device_acc_attr_t bge_reg_accattr = { 95 DDI_DEVICE_ATTR_V1, 96 DDI_NEVERSWAP_ACC, 97 DDI_STRICTORDER_ACC, 98 DDI_FLAGERR_ACC 99 }; 100 101 /* 102 * DMA access attributes for descriptors: NOT to be byte swapped. 103 */ 104 static ddi_device_acc_attr_t bge_desc_accattr = { 105 DDI_DEVICE_ATTR_V0, 106 DDI_NEVERSWAP_ACC, 107 DDI_STRICTORDER_ACC 108 }; 109 110 /* 111 * DMA access attributes for data: NOT to be byte swapped. 112 */ 113 static ddi_device_acc_attr_t bge_data_accattr = { 114 DDI_DEVICE_ATTR_V0, 115 DDI_NEVERSWAP_ACC, 116 DDI_STRICTORDER_ACC 117 }; 118 119 static int bge_m_start(void *); 120 static void bge_m_stop(void *); 121 static int bge_m_promisc(void *, boolean_t); 122 static int bge_m_unicst(void * pArg, const uint8_t *); 123 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 124 static void bge_m_resources(void * arg); 125 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 126 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 127 static int bge_unicst_set(void *, const uint8_t *, 128 int); 129 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 130 uint_t, const void *); 131 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 132 uint_t, void *); 133 static void bge_m_propinfo(void *, const char *, mac_prop_id_t, 134 mac_prop_info_handle_t); 135 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 136 const void *); 137 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 138 void *); 139 static void bge_priv_propinfo(const char *, 140 mac_prop_info_handle_t); 141 142 static mac_callbacks_t bge_m_callbacks = { 143 MC_IOCTL 144 #ifdef MC_RESOURCES 145 | MC_RESOURCES 146 #endif 147 #ifdef MC_SETPROP 148 | MC_SETPROP 149 #endif 150 #ifdef MC_GETPROP 151 | MC_GETPROP 152 #endif 153 #ifdef MC_PROPINFO 154 | MC_PROPINFO 155 #endif 156 | MC_GETCAPAB, 157 bge_m_stat, 158 bge_m_start, 159 bge_m_stop, 160 bge_m_promisc, 161 bge_m_multicst, 162 bge_m_unicst, 163 bge_m_tx, 164 #ifdef MC_RESOURCES 165 bge_m_resources, 166 #else 167 NULL, 168 #endif 169 bge_m_ioctl, 170 bge_m_getcapab, 171 #ifdef MC_OPEN 172 NULL, 173 NULL, 174 #endif 175 #ifdef MC_SETPROP 176 bge_m_setprop, 177 #endif 178 #ifdef MC_GETPROP 179 bge_m_getprop, 180 #endif 181 #ifdef MC_PROPINFO 182 bge_m_propinfo 183 #endif 184 }; 185 186 char *bge_priv_prop[] = { 187 "_adv_asym_pause_cap", 188 "_adv_pause_cap", 189 "_drain_max", 190 "_msi_cnt", 191 "_rx_intr_coalesce_blank_time", 192 "_tx_intr_coalesce_blank_time", 193 "_rx_intr_coalesce_pkt_cnt", 194 "_tx_intr_coalesce_pkt_cnt", 195 NULL 196 }; 197 198 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 199 /* 200 * ========== Transmit and receive ring reinitialisation ========== 201 */ 202 203 /* 204 * These <reinit> routines each reset the specified ring to an initial 205 * state, assuming that the corresponding <init> routine has already 206 * been called exactly once. 207 */ 208 209 static void 210 bge_reinit_send_ring(send_ring_t *srp) 211 { 212 bge_queue_t *txbuf_queue; 213 bge_queue_item_t *txbuf_head; 214 sw_txbuf_t *txbuf; 215 sw_sbd_t *ssbdp; 216 uint32_t slot; 217 218 /* 219 * Reinitialise control variables ... 220 */ 221 srp->tx_flow = 0; 222 srp->tx_next = 0; 223 srp->txfill_next = 0; 224 srp->tx_free = srp->desc.nslots; 225 ASSERT(mutex_owned(srp->tc_lock)); 226 srp->tc_next = 0; 227 srp->txpkt_next = 0; 228 srp->tx_block = 0; 229 srp->tx_nobd = 0; 230 srp->tx_nobuf = 0; 231 232 /* 233 * Initialize the tx buffer push queue 234 */ 235 mutex_enter(srp->freetxbuf_lock); 236 mutex_enter(srp->txbuf_lock); 237 txbuf_queue = &srp->freetxbuf_queue; 238 txbuf_queue->head = NULL; 239 txbuf_queue->count = 0; 240 txbuf_queue->lock = srp->freetxbuf_lock; 241 srp->txbuf_push_queue = txbuf_queue; 242 243 /* 244 * Initialize the tx buffer pop queue 245 */ 246 txbuf_queue = &srp->txbuf_queue; 247 txbuf_queue->head = NULL; 248 txbuf_queue->count = 0; 249 txbuf_queue->lock = srp->txbuf_lock; 250 srp->txbuf_pop_queue = txbuf_queue; 251 txbuf_head = srp->txbuf_head; 252 txbuf = srp->txbuf; 253 for (slot = 0; slot < srp->tx_buffers; ++slot) { 254 txbuf_head->item = txbuf; 255 txbuf_head->next = txbuf_queue->head; 256 txbuf_queue->head = txbuf_head; 257 txbuf_queue->count++; 258 txbuf++; 259 txbuf_head++; 260 } 261 mutex_exit(srp->txbuf_lock); 262 mutex_exit(srp->freetxbuf_lock); 263 264 /* 265 * Zero and sync all the h/w Send Buffer Descriptors 266 */ 267 DMA_ZERO(srp->desc); 268 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 269 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 270 ssbdp = srp->sw_sbds; 271 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 272 ssbdp->pbuf = NULL; 273 } 274 275 static void 276 bge_reinit_recv_ring(recv_ring_t *rrp) 277 { 278 /* 279 * Reinitialise control variables ... 280 */ 281 rrp->rx_next = 0; 282 } 283 284 static void 285 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 286 { 287 bge_rbd_t *hw_rbd_p; 288 sw_rbd_t *srbdp; 289 uint32_t bufsize; 290 uint32_t nslots; 291 uint32_t slot; 292 293 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 294 RBD_FLAG_STD_RING, 295 RBD_FLAG_JUMBO_RING, 296 RBD_FLAG_MINI_RING 297 }; 298 299 /* 300 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 301 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 302 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 303 * should be zeroed, and so don't need to be set up specifically 304 * once the whole area has been cleared. 305 */ 306 DMA_ZERO(brp->desc); 307 308 hw_rbd_p = DMA_VPTR(brp->desc); 309 nslots = brp->desc.nslots; 310 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 311 bufsize = brp->buf[0].size; 312 srbdp = brp->sw_rbds; 313 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 314 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 315 hw_rbd_p->index = (uint16_t)slot; 316 hw_rbd_p->len = (uint16_t)bufsize; 317 hw_rbd_p->opaque = srbdp->pbuf.token; 318 hw_rbd_p->flags |= ring_type_flag[ring]; 319 } 320 321 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 322 323 /* 324 * Finally, reinitialise the ring control variables ... 325 */ 326 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 327 } 328 329 /* 330 * Reinitialize all rings 331 */ 332 static void 333 bge_reinit_rings(bge_t *bgep) 334 { 335 uint32_t ring; 336 337 ASSERT(mutex_owned(bgep->genlock)); 338 339 /* 340 * Send Rings ... 341 */ 342 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 343 bge_reinit_send_ring(&bgep->send[ring]); 344 345 /* 346 * Receive Return Rings ... 347 */ 348 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 349 bge_reinit_recv_ring(&bgep->recv[ring]); 350 351 /* 352 * Receive Producer Rings ... 353 */ 354 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 355 bge_reinit_buff_ring(&bgep->buff[ring], ring); 356 } 357 358 /* 359 * ========== Internal state management entry points ========== 360 */ 361 362 #undef BGE_DBG 363 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 364 365 /* 366 * These routines provide all the functionality required by the 367 * corresponding GLD entry points, but don't update the GLD state 368 * so they can be called internally without disturbing our record 369 * of what GLD thinks we should be doing ... 370 */ 371 372 /* 373 * bge_reset() -- reset h/w & rings to initial state 374 */ 375 static int 376 #ifdef BGE_IPMI_ASF 377 bge_reset(bge_t *bgep, uint_t asf_mode) 378 #else 379 bge_reset(bge_t *bgep) 380 #endif 381 { 382 uint32_t ring; 383 int retval; 384 385 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 386 387 ASSERT(mutex_owned(bgep->genlock)); 388 389 /* 390 * Grab all the other mutexes in the world (this should 391 * ensure no other threads are manipulating driver state) 392 */ 393 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 394 mutex_enter(bgep->recv[ring].rx_lock); 395 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 396 mutex_enter(bgep->buff[ring].rf_lock); 397 rw_enter(bgep->errlock, RW_WRITER); 398 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 399 mutex_enter(bgep->send[ring].tx_lock); 400 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 401 mutex_enter(bgep->send[ring].tc_lock); 402 403 #ifdef BGE_IPMI_ASF 404 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 405 #else 406 retval = bge_chip_reset(bgep, B_TRUE); 407 #endif 408 bge_reinit_rings(bgep); 409 410 /* 411 * Free the world ... 412 */ 413 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 414 mutex_exit(bgep->send[ring].tc_lock); 415 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 416 mutex_exit(bgep->send[ring].tx_lock); 417 rw_exit(bgep->errlock); 418 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 419 mutex_exit(bgep->buff[ring].rf_lock); 420 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 421 mutex_exit(bgep->recv[ring].rx_lock); 422 423 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 424 return (retval); 425 } 426 427 /* 428 * bge_stop() -- stop processing, don't reset h/w or rings 429 */ 430 static void 431 bge_stop(bge_t *bgep) 432 { 433 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 434 435 ASSERT(mutex_owned(bgep->genlock)); 436 437 #ifdef BGE_IPMI_ASF 438 if (bgep->asf_enabled) { 439 bgep->asf_pseudostop = B_TRUE; 440 } else { 441 #endif 442 bge_chip_stop(bgep, B_FALSE); 443 #ifdef BGE_IPMI_ASF 444 } 445 #endif 446 447 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 448 } 449 450 /* 451 * bge_start() -- start transmitting/receiving 452 */ 453 static int 454 bge_start(bge_t *bgep, boolean_t reset_phys) 455 { 456 int retval; 457 458 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 459 460 ASSERT(mutex_owned(bgep->genlock)); 461 462 /* 463 * Start chip processing, including enabling interrupts 464 */ 465 retval = bge_chip_start(bgep, reset_phys); 466 467 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 468 return (retval); 469 } 470 471 /* 472 * bge_restart - restart transmitting/receiving after error or suspend 473 */ 474 int 475 bge_restart(bge_t *bgep, boolean_t reset_phys) 476 { 477 int retval = DDI_SUCCESS; 478 ASSERT(mutex_owned(bgep->genlock)); 479 480 #ifdef BGE_IPMI_ASF 481 if (bgep->asf_enabled) { 482 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 483 retval = DDI_FAILURE; 484 } else 485 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 486 retval = DDI_FAILURE; 487 #else 488 if (bge_reset(bgep) != DDI_SUCCESS) 489 retval = DDI_FAILURE; 490 #endif 491 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 492 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 493 retval = DDI_FAILURE; 494 bgep->watchdog = 0; 495 ddi_trigger_softintr(bgep->drain_id); 496 } 497 498 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 499 return (retval); 500 } 501 502 503 /* 504 * ========== Nemo-required management entry points ========== 505 */ 506 507 #undef BGE_DBG 508 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 509 510 /* 511 * bge_m_stop() -- stop transmitting/receiving 512 */ 513 static void 514 bge_m_stop(void *arg) 515 { 516 bge_t *bgep = arg; /* private device info */ 517 send_ring_t *srp; 518 uint32_t ring; 519 520 BGE_TRACE(("bge_m_stop($%p)", arg)); 521 522 /* 523 * Just stop processing, then record new GLD state 524 */ 525 mutex_enter(bgep->genlock); 526 if (!(bgep->progress & PROGRESS_INTR)) { 527 /* can happen during autorecovery */ 528 bgep->bge_chip_state = BGE_CHIP_STOPPED; 529 } else 530 bge_stop(bgep); 531 532 bgep->link_state = LINK_STATE_UNKNOWN; 533 mac_link_update(bgep->mh, bgep->link_state); 534 535 /* 536 * Free the possible tx buffers allocated in tx process. 537 */ 538 #ifdef BGE_IPMI_ASF 539 if (!bgep->asf_pseudostop) 540 #endif 541 { 542 rw_enter(bgep->errlock, RW_WRITER); 543 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 544 srp = &bgep->send[ring]; 545 mutex_enter(srp->tx_lock); 546 if (srp->tx_array > 1) 547 bge_free_txbuf_arrays(srp); 548 mutex_exit(srp->tx_lock); 549 } 550 rw_exit(bgep->errlock); 551 } 552 bgep->bge_mac_state = BGE_MAC_STOPPED; 553 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 554 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 555 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 556 mutex_exit(bgep->genlock); 557 } 558 559 /* 560 * bge_m_start() -- start transmitting/receiving 561 */ 562 static int 563 bge_m_start(void *arg) 564 { 565 bge_t *bgep = arg; /* private device info */ 566 567 BGE_TRACE(("bge_m_start($%p)", arg)); 568 569 /* 570 * Start processing and record new GLD state 571 */ 572 mutex_enter(bgep->genlock); 573 if (!(bgep->progress & PROGRESS_INTR)) { 574 /* can happen during autorecovery */ 575 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 576 mutex_exit(bgep->genlock); 577 return (EIO); 578 } 579 #ifdef BGE_IPMI_ASF 580 if (bgep->asf_enabled) { 581 if ((bgep->asf_status == ASF_STAT_RUN) && 582 (bgep->asf_pseudostop)) { 583 bgep->bge_mac_state = BGE_MAC_STARTED; 584 /* forcing a mac link update here */ 585 bge_phys_check(bgep); 586 bgep->link_state = (bgep->param_link_up) ? LINK_STATE_UP : 587 LINK_STATE_DOWN; 588 mac_link_update(bgep->mh, bgep->link_state); 589 mutex_exit(bgep->genlock); 590 return (0); 591 } 592 } 593 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 594 #else 595 if (bge_reset(bgep) != DDI_SUCCESS) { 596 #endif 597 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 598 (void) bge_check_acc_handle(bgep, bgep->io_handle); 599 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 600 mutex_exit(bgep->genlock); 601 return (EIO); 602 } 603 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 604 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 605 (void) bge_check_acc_handle(bgep, bgep->io_handle); 606 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 607 mutex_exit(bgep->genlock); 608 return (EIO); 609 } 610 bgep->watchdog = 0; 611 bgep->bge_mac_state = BGE_MAC_STARTED; 612 BGE_DEBUG(("bge_m_start($%p) done", arg)); 613 614 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 615 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 616 mutex_exit(bgep->genlock); 617 return (EIO); 618 } 619 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 620 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 621 mutex_exit(bgep->genlock); 622 return (EIO); 623 } 624 #ifdef BGE_IPMI_ASF 625 if (bgep->asf_enabled) { 626 if (bgep->asf_status != ASF_STAT_RUN) { 627 /* start ASF heart beat */ 628 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 629 (void *)bgep, 630 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 631 bgep->asf_status = ASF_STAT_RUN; 632 } 633 } 634 #endif 635 mutex_exit(bgep->genlock); 636 637 return (0); 638 } 639 640 /* 641 * bge_unicst_set() -- set the physical network address 642 */ 643 static int 644 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 645 { 646 bge_t *bgep = arg; /* private device info */ 647 648 BGE_TRACE(("bge_unicst_set($%p, %s)", arg, 649 ether_sprintf((void *)macaddr))); 650 /* 651 * Remember the new current address in the driver state 652 * Sync the chip's idea of the address too ... 653 */ 654 mutex_enter(bgep->genlock); 655 if (!(bgep->progress & PROGRESS_INTR)) { 656 /* can happen during autorecovery */ 657 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 658 mutex_exit(bgep->genlock); 659 return (EIO); 660 } 661 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 662 #ifdef BGE_IPMI_ASF 663 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 664 #else 665 if (bge_chip_sync(bgep) == DDI_FAILURE) { 666 #endif 667 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 668 (void) bge_check_acc_handle(bgep, bgep->io_handle); 669 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 670 mutex_exit(bgep->genlock); 671 return (EIO); 672 } 673 #ifdef BGE_IPMI_ASF 674 if (bgep->asf_enabled) { 675 /* 676 * The above bge_chip_sync() function wrote the ethernet MAC 677 * addresses registers which destroyed the IPMI/ASF sideband. 678 * Here, we have to reset chip to make IPMI/ASF sideband work. 679 */ 680 if (bgep->asf_status == ASF_STAT_RUN) { 681 /* 682 * We must stop ASF heart beat before bge_chip_stop(), 683 * otherwise some computers (ex. IBM HS20 blade server) 684 * may crash. 685 */ 686 bge_asf_update_status(bgep); 687 bge_asf_stop_timer(bgep); 688 bgep->asf_status = ASF_STAT_STOP; 689 690 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 691 } 692 bge_chip_stop(bgep, B_FALSE); 693 694 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 695 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 696 (void) bge_check_acc_handle(bgep, bgep->io_handle); 697 ddi_fm_service_impact(bgep->devinfo, 698 DDI_SERVICE_DEGRADED); 699 mutex_exit(bgep->genlock); 700 return (EIO); 701 } 702 703 /* 704 * Start our ASF heartbeat counter as soon as possible. 705 */ 706 if (bgep->asf_status != ASF_STAT_RUN) { 707 /* start ASF heart beat */ 708 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 709 (void *)bgep, 710 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 711 bgep->asf_status = ASF_STAT_RUN; 712 } 713 } 714 #endif 715 BGE_DEBUG(("bge_unicst_set($%p) done", arg)); 716 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 717 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 718 mutex_exit(bgep->genlock); 719 return (EIO); 720 } 721 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 722 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 723 mutex_exit(bgep->genlock); 724 return (EIO); 725 } 726 mutex_exit(bgep->genlock); 727 728 return (0); 729 } 730 731 extern void bge_wake_factotum(bge_t *); 732 733 static boolean_t 734 bge_param_locked(mac_prop_id_t pr_num) 735 { 736 /* 737 * All adv_* parameters are locked (read-only) while 738 * the device is in any sort of loopback mode ... 739 */ 740 switch (pr_num) { 741 case MAC_PROP_ADV_1000FDX_CAP: 742 case MAC_PROP_EN_1000FDX_CAP: 743 case MAC_PROP_ADV_1000HDX_CAP: 744 case MAC_PROP_EN_1000HDX_CAP: 745 case MAC_PROP_ADV_100FDX_CAP: 746 case MAC_PROP_EN_100FDX_CAP: 747 case MAC_PROP_ADV_100HDX_CAP: 748 case MAC_PROP_EN_100HDX_CAP: 749 case MAC_PROP_ADV_10FDX_CAP: 750 case MAC_PROP_EN_10FDX_CAP: 751 case MAC_PROP_ADV_10HDX_CAP: 752 case MAC_PROP_EN_10HDX_CAP: 753 case MAC_PROP_AUTONEG: 754 case MAC_PROP_FLOWCTRL: 755 return (B_TRUE); 756 } 757 return (B_FALSE); 758 } 759 /* 760 * callback functions for set/get of properties 761 */ 762 static int 763 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 764 uint_t pr_valsize, const void *pr_val) 765 { 766 bge_t *bgep = barg; 767 int err = 0; 768 uint32_t cur_mtu, new_mtu; 769 link_flowctrl_t fl; 770 771 mutex_enter(bgep->genlock); 772 if (bgep->param_loop_mode != BGE_LOOP_NONE && 773 bge_param_locked(pr_num)) { 774 /* 775 * All adv_* parameters are locked (read-only) 776 * while the device is in any sort of loopback mode. 777 */ 778 mutex_exit(bgep->genlock); 779 return (EBUSY); 780 } 781 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 782 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 783 (pr_num == MAC_PROP_EN_100HDX_CAP) || 784 (pr_num == MAC_PROP_EN_10FDX_CAP) || 785 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 786 /* 787 * these properties are read/write on copper, 788 * read-only and 0 on serdes 789 */ 790 mutex_exit(bgep->genlock); 791 return (ENOTSUP); 792 } 793 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 794 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 795 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 796 mutex_exit(bgep->genlock); 797 return (ENOTSUP); 798 } 799 800 switch (pr_num) { 801 case MAC_PROP_EN_1000FDX_CAP: 802 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 803 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 804 goto reprogram; 805 case MAC_PROP_EN_1000HDX_CAP: 806 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 807 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 808 goto reprogram; 809 case MAC_PROP_EN_100FDX_CAP: 810 bgep->param_en_100fdx = *(uint8_t *)pr_val; 811 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 812 goto reprogram; 813 case MAC_PROP_EN_100HDX_CAP: 814 bgep->param_en_100hdx = *(uint8_t *)pr_val; 815 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 816 goto reprogram; 817 case MAC_PROP_EN_10FDX_CAP: 818 bgep->param_en_10fdx = *(uint8_t *)pr_val; 819 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 820 goto reprogram; 821 case MAC_PROP_EN_10HDX_CAP: 822 bgep->param_en_10hdx = *(uint8_t *)pr_val; 823 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 824 reprogram: 825 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 826 err = EINVAL; 827 break; 828 case MAC_PROP_ADV_1000FDX_CAP: 829 case MAC_PROP_ADV_1000HDX_CAP: 830 case MAC_PROP_ADV_100FDX_CAP: 831 case MAC_PROP_ADV_100HDX_CAP: 832 case MAC_PROP_ADV_10FDX_CAP: 833 case MAC_PROP_ADV_10HDX_CAP: 834 case MAC_PROP_STATUS: 835 case MAC_PROP_SPEED: 836 case MAC_PROP_DUPLEX: 837 err = ENOTSUP; /* read-only prop. Can't set this */ 838 break; 839 case MAC_PROP_AUTONEG: 840 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 841 if (bge_reprogram(bgep) == IOC_INVAL) 842 err = EINVAL; 843 break; 844 case MAC_PROP_MTU: 845 cur_mtu = bgep->chipid.default_mtu; 846 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 847 848 if (new_mtu == cur_mtu) { 849 err = 0; 850 break; 851 } 852 if (new_mtu < BGE_DEFAULT_MTU || 853 new_mtu > BGE_MAXIMUM_MTU) { 854 err = EINVAL; 855 break; 856 } 857 if ((new_mtu > BGE_DEFAULT_MTU) && 858 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 859 err = EINVAL; 860 break; 861 } 862 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 863 err = EBUSY; 864 break; 865 } 866 bgep->chipid.default_mtu = new_mtu; 867 if (bge_chip_id_init(bgep)) { 868 err = EINVAL; 869 break; 870 } 871 bgep->bge_dma_error = B_TRUE; 872 bgep->manual_reset = B_TRUE; 873 bge_chip_stop(bgep, B_TRUE); 874 bge_wake_factotum(bgep); 875 err = 0; 876 break; 877 case MAC_PROP_FLOWCTRL: 878 bcopy(pr_val, &fl, sizeof (fl)); 879 switch (fl) { 880 default: 881 err = ENOTSUP; 882 break; 883 case LINK_FLOWCTRL_NONE: 884 bgep->param_adv_pause = 0; 885 bgep->param_adv_asym_pause = 0; 886 887 bgep->param_link_rx_pause = B_FALSE; 888 bgep->param_link_tx_pause = B_FALSE; 889 break; 890 case LINK_FLOWCTRL_RX: 891 bgep->param_adv_pause = 1; 892 bgep->param_adv_asym_pause = 1; 893 894 bgep->param_link_rx_pause = B_TRUE; 895 bgep->param_link_tx_pause = B_FALSE; 896 break; 897 case LINK_FLOWCTRL_TX: 898 bgep->param_adv_pause = 0; 899 bgep->param_adv_asym_pause = 1; 900 901 bgep->param_link_rx_pause = B_FALSE; 902 bgep->param_link_tx_pause = B_TRUE; 903 break; 904 case LINK_FLOWCTRL_BI: 905 bgep->param_adv_pause = 1; 906 bgep->param_adv_asym_pause = 0; 907 908 bgep->param_link_rx_pause = B_TRUE; 909 bgep->param_link_tx_pause = B_TRUE; 910 break; 911 } 912 913 if (err == 0) { 914 if (bge_reprogram(bgep) == IOC_INVAL) 915 err = EINVAL; 916 } 917 918 break; 919 case MAC_PROP_PRIVATE: 920 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 921 pr_val); 922 break; 923 default: 924 err = ENOTSUP; 925 break; 926 } 927 mutex_exit(bgep->genlock); 928 return (err); 929 } 930 931 /* ARGSUSED */ 932 static int 933 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 934 uint_t pr_valsize, void *pr_val) 935 { 936 bge_t *bgep = barg; 937 int err = 0; 938 939 switch (pr_num) { 940 case MAC_PROP_DUPLEX: 941 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 942 bcopy(&bgep->param_link_duplex, pr_val, 943 sizeof (link_duplex_t)); 944 break; 945 case MAC_PROP_SPEED: { 946 uint64_t speed = bgep->param_link_speed * 1000000ull; 947 948 ASSERT(pr_valsize >= sizeof (speed)); 949 bcopy(&speed, pr_val, sizeof (speed)); 950 break; 951 } 952 case MAC_PROP_STATUS: 953 ASSERT(pr_valsize >= sizeof (link_state_t)); 954 bcopy(&bgep->link_state, pr_val, 955 sizeof (link_state_t)); 956 break; 957 case MAC_PROP_AUTONEG: 958 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 959 break; 960 case MAC_PROP_FLOWCTRL: { 961 link_flowctrl_t fl; 962 963 ASSERT(pr_valsize >= sizeof (fl)); 964 965 if (bgep->param_link_rx_pause && 966 !bgep->param_link_tx_pause) 967 fl = LINK_FLOWCTRL_RX; 968 969 if (!bgep->param_link_rx_pause && 970 !bgep->param_link_tx_pause) 971 fl = LINK_FLOWCTRL_NONE; 972 973 if (!bgep->param_link_rx_pause && 974 bgep->param_link_tx_pause) 975 fl = LINK_FLOWCTRL_TX; 976 977 if (bgep->param_link_rx_pause && 978 bgep->param_link_tx_pause) 979 fl = LINK_FLOWCTRL_BI; 980 bcopy(&fl, pr_val, sizeof (fl)); 981 break; 982 } 983 case MAC_PROP_ADV_1000FDX_CAP: 984 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 985 break; 986 case MAC_PROP_EN_1000FDX_CAP: 987 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 988 break; 989 case MAC_PROP_ADV_1000HDX_CAP: 990 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 991 break; 992 case MAC_PROP_EN_1000HDX_CAP: 993 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 994 break; 995 case MAC_PROP_ADV_100FDX_CAP: 996 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 997 break; 998 case MAC_PROP_EN_100FDX_CAP: 999 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1000 break; 1001 case MAC_PROP_ADV_100HDX_CAP: 1002 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1003 break; 1004 case MAC_PROP_EN_100HDX_CAP: 1005 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1006 break; 1007 case MAC_PROP_ADV_10FDX_CAP: 1008 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1009 break; 1010 case MAC_PROP_EN_10FDX_CAP: 1011 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1012 break; 1013 case MAC_PROP_ADV_10HDX_CAP: 1014 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1015 break; 1016 case MAC_PROP_EN_10HDX_CAP: 1017 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1018 break; 1019 case MAC_PROP_ADV_100T4_CAP: 1020 case MAC_PROP_EN_100T4_CAP: 1021 *(uint8_t *)pr_val = 0; 1022 break; 1023 case MAC_PROP_PRIVATE: 1024 err = bge_get_priv_prop(bgep, pr_name, 1025 pr_valsize, pr_val); 1026 return (err); 1027 default: 1028 return (ENOTSUP); 1029 } 1030 return (0); 1031 } 1032 1033 static void 1034 bge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1035 mac_prop_info_handle_t prh) 1036 { 1037 bge_t *bgep = barg; 1038 int flags = bgep->chipid.flags; 1039 1040 /* 1041 * By default permissions are read/write unless specified 1042 * otherwise by the driver. 1043 */ 1044 1045 switch (pr_num) { 1046 case MAC_PROP_DUPLEX: 1047 case MAC_PROP_SPEED: 1048 case MAC_PROP_STATUS: 1049 case MAC_PROP_ADV_1000FDX_CAP: 1050 case MAC_PROP_ADV_1000HDX_CAP: 1051 case MAC_PROP_ADV_100FDX_CAP: 1052 case MAC_PROP_ADV_100HDX_CAP: 1053 case MAC_PROP_ADV_10FDX_CAP: 1054 case MAC_PROP_ADV_10HDX_CAP: 1055 case MAC_PROP_ADV_100T4_CAP: 1056 case MAC_PROP_EN_100T4_CAP: 1057 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1058 break; 1059 1060 case MAC_PROP_EN_1000FDX_CAP: 1061 case MAC_PROP_EN_1000HDX_CAP: 1062 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1063 mac_prop_info_set_default_uint8(prh, 0); 1064 else 1065 mac_prop_info_set_default_uint8(prh, 1); 1066 break; 1067 1068 case MAC_PROP_EN_100FDX_CAP: 1069 case MAC_PROP_EN_100HDX_CAP: 1070 case MAC_PROP_EN_10FDX_CAP: 1071 case MAC_PROP_EN_10HDX_CAP: 1072 mac_prop_info_set_default_uint8(prh, 1073 (flags & CHIP_FLAG_SERDES) ? 0 : 1); 1074 break; 1075 1076 case MAC_PROP_AUTONEG: 1077 mac_prop_info_set_default_uint8(prh, 1); 1078 break; 1079 1080 case MAC_PROP_FLOWCTRL: 1081 mac_prop_info_set_default_link_flowctrl(prh, 1082 LINK_FLOWCTRL_BI); 1083 break; 1084 1085 case MAC_PROP_MTU: 1086 mac_prop_info_set_range_uint32(prh, BGE_DEFAULT_MTU, 1087 (flags & CHIP_FLAG_NO_JUMBO) ? 1088 BGE_DEFAULT_MTU : BGE_MAXIMUM_MTU); 1089 break; 1090 1091 case MAC_PROP_PRIVATE: 1092 bge_priv_propinfo(pr_name, prh); 1093 break; 1094 } 1095 1096 mutex_enter(bgep->genlock); 1097 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 1098 bge_param_locked(pr_num)) || 1099 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 1100 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 1101 (pr_num == MAC_PROP_EN_100HDX_CAP) || 1102 (pr_num == MAC_PROP_EN_10FDX_CAP) || 1103 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 1104 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 1105 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 1106 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 1107 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1108 mutex_exit(bgep->genlock); 1109 } 1110 1111 /* ARGSUSED */ 1112 static int 1113 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1114 const void *pr_val) 1115 { 1116 int err = 0; 1117 long result; 1118 1119 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1120 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1121 if (result > 1 || result < 0) { 1122 err = EINVAL; 1123 } else { 1124 bgep->param_adv_pause = (uint32_t)result; 1125 if (bge_reprogram(bgep) == IOC_INVAL) 1126 err = EINVAL; 1127 } 1128 return (err); 1129 } 1130 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1131 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1132 if (result > 1 || result < 0) { 1133 err = EINVAL; 1134 } else { 1135 bgep->param_adv_asym_pause = (uint32_t)result; 1136 if (bge_reprogram(bgep) == IOC_INVAL) 1137 err = EINVAL; 1138 } 1139 return (err); 1140 } 1141 if (strcmp(pr_name, "_drain_max") == 0) { 1142 1143 /* 1144 * on the Tx side, we need to update the h/w register for 1145 * real packet transmission per packet. The drain_max parameter 1146 * is used to reduce the register access. This parameter 1147 * controls the max number of packets that we will hold before 1148 * updating the bge h/w to trigger h/w transmit. The bge 1149 * chipset usually has a max of 512 Tx descriptors, thus 1150 * the upper bound on drain_max is 512. 1151 */ 1152 if (pr_val == NULL) { 1153 err = EINVAL; 1154 return (err); 1155 } 1156 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1157 if (result > 512 || result < 1) 1158 err = EINVAL; 1159 else { 1160 bgep->param_drain_max = (uint32_t)result; 1161 if (bge_reprogram(bgep) == IOC_INVAL) 1162 err = EINVAL; 1163 } 1164 return (err); 1165 } 1166 if (strcmp(pr_name, "_msi_cnt") == 0) { 1167 1168 if (pr_val == NULL) { 1169 err = EINVAL; 1170 return (err); 1171 } 1172 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1173 if (result > 7 || result < 0) 1174 err = EINVAL; 1175 else { 1176 bgep->param_msi_cnt = (uint32_t)result; 1177 if (bge_reprogram(bgep) == IOC_INVAL) 1178 err = EINVAL; 1179 } 1180 return (err); 1181 } 1182 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) { 1183 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1184 return (EINVAL); 1185 if (result < 0) 1186 err = EINVAL; 1187 else { 1188 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1189 bge_chip_coalesce_update(bgep); 1190 } 1191 return (err); 1192 } 1193 1194 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) { 1195 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1196 return (EINVAL); 1197 1198 if (result < 0) 1199 err = EINVAL; 1200 else { 1201 bgep->chipid.rx_count_norm = (uint32_t)result; 1202 bge_chip_coalesce_update(bgep); 1203 } 1204 return (err); 1205 } 1206 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) { 1207 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1208 return (EINVAL); 1209 if (result < 0) 1210 err = EINVAL; 1211 else { 1212 bgep->chipid.tx_ticks_norm = (uint32_t)result; 1213 bge_chip_coalesce_update(bgep); 1214 } 1215 return (err); 1216 } 1217 1218 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) { 1219 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1220 return (EINVAL); 1221 1222 if (result < 0) 1223 err = EINVAL; 1224 else { 1225 bgep->chipid.tx_count_norm = (uint32_t)result; 1226 bge_chip_coalesce_update(bgep); 1227 } 1228 return (err); 1229 } 1230 return (ENOTSUP); 1231 } 1232 1233 static int 1234 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize, 1235 void *pr_val) 1236 { 1237 int value; 1238 1239 if (strcmp(pr_name, "_adv_pause_cap") == 0) 1240 value = bge->param_adv_pause; 1241 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) 1242 value = bge->param_adv_asym_pause; 1243 else if (strcmp(pr_name, "_drain_max") == 0) 1244 value = bge->param_drain_max; 1245 else if (strcmp(pr_name, "_msi_cnt") == 0) 1246 value = bge->param_msi_cnt; 1247 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) 1248 value = bge->chipid.rx_ticks_norm; 1249 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) 1250 value = bge->chipid.tx_ticks_norm; 1251 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) 1252 value = bge->chipid.rx_count_norm; 1253 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) 1254 value = bge->chipid.tx_count_norm; 1255 else 1256 return (ENOTSUP); 1257 1258 (void) snprintf(pr_val, pr_valsize, "%d", value); 1259 return (0); 1260 } 1261 1262 static void 1263 bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph) 1264 { 1265 char valstr[64]; 1266 int value; 1267 1268 if (strcmp(pr_name, "_adv_pause_cap") == 0) 1269 value = 1; 1270 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) 1271 value = 1; 1272 else if (strcmp(pr_name, "_drain_max") == 0) 1273 value = 64; 1274 else if (strcmp(pr_name, "_msi_cnt") == 0) 1275 value = 0; 1276 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) 1277 value = bge_rx_ticks_norm; 1278 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) 1279 value = bge_tx_ticks_norm; 1280 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) 1281 value = bge_rx_count_norm; 1282 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) 1283 value = bge_tx_count_norm; 1284 else 1285 return; 1286 1287 (void) snprintf(valstr, sizeof (valstr), "%d", value); 1288 mac_prop_info_set_default_str(mph, valstr); 1289 } 1290 1291 1292 static int 1293 bge_m_unicst(void * arg, const uint8_t * mac_addr) 1294 { 1295 bge_t *bgep = arg; 1296 int i; 1297 1298 /* XXX sets the mac address for all ring slots... OK? */ 1299 for (i = 0; i < MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); i++) 1300 bge_addmac(&bgep->recv[i], mac_addr); 1301 1302 return (0); 1303 } 1304 1305 1306 /* 1307 * Compute the index of the required bit in the multicast hash map. 1308 * This must mirror the way the hardware actually does it! 1309 * See Broadcom document 570X-PG102-R page 125. 1310 */ 1311 static uint32_t 1312 bge_hash_index(const uint8_t *mca) 1313 { 1314 uint32_t hash; 1315 1316 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1317 1318 return (hash); 1319 } 1320 1321 /* 1322 * bge_m_multicst_add() -- enable/disable a multicast address 1323 */ 1324 static int 1325 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1326 { 1327 bge_t *bgep = arg; /* private device info */ 1328 uint32_t hash; 1329 uint32_t index; 1330 uint32_t word; 1331 uint32_t bit; 1332 uint8_t *refp; 1333 1334 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1335 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1336 1337 /* 1338 * Precalculate all required masks, pointers etc ... 1339 */ 1340 hash = bge_hash_index(mca); 1341 index = hash % BGE_HASH_TABLE_SIZE; 1342 word = index/32u; 1343 bit = 1 << (index % 32u); 1344 refp = &bgep->mcast_refs[index]; 1345 1346 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1347 hash, index, word, bit, *refp)); 1348 1349 /* 1350 * We must set the appropriate bit in the hash map (and the 1351 * corresponding h/w register) when the refcount goes from 0 1352 * to >0, and clear it when the last ref goes away (refcount 1353 * goes from >0 back to 0). If we change the hash map, we 1354 * must also update the chip's hardware map registers. 1355 */ 1356 mutex_enter(bgep->genlock); 1357 if (!(bgep->progress & PROGRESS_INTR)) { 1358 /* can happen during autorecovery */ 1359 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1360 mutex_exit(bgep->genlock); 1361 return (EIO); 1362 } 1363 if (add) { 1364 if ((*refp)++ == 0) { 1365 bgep->mcast_hash[word] |= bit; 1366 #ifdef BGE_IPMI_ASF 1367 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1368 #else 1369 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1370 #endif 1371 (void) bge_check_acc_handle(bgep, 1372 bgep->cfg_handle); 1373 (void) bge_check_acc_handle(bgep, 1374 bgep->io_handle); 1375 ddi_fm_service_impact(bgep->devinfo, 1376 DDI_SERVICE_DEGRADED); 1377 mutex_exit(bgep->genlock); 1378 return (EIO); 1379 } 1380 } 1381 } else { 1382 if (--(*refp) == 0) { 1383 bgep->mcast_hash[word] &= ~bit; 1384 #ifdef BGE_IPMI_ASF 1385 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1386 #else 1387 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1388 #endif 1389 (void) bge_check_acc_handle(bgep, 1390 bgep->cfg_handle); 1391 (void) bge_check_acc_handle(bgep, 1392 bgep->io_handle); 1393 ddi_fm_service_impact(bgep->devinfo, 1394 DDI_SERVICE_DEGRADED); 1395 mutex_exit(bgep->genlock); 1396 return (EIO); 1397 } 1398 } 1399 } 1400 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1401 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1402 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1403 mutex_exit(bgep->genlock); 1404 return (EIO); 1405 } 1406 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1407 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1408 mutex_exit(bgep->genlock); 1409 return (EIO); 1410 } 1411 mutex_exit(bgep->genlock); 1412 1413 return (0); 1414 } 1415 1416 /* 1417 * bge_m_promisc() -- set or reset promiscuous mode on the board 1418 * 1419 * Program the hardware to enable/disable promiscuous and/or 1420 * receive-all-multicast modes. 1421 */ 1422 static int 1423 bge_m_promisc(void *arg, boolean_t on) 1424 { 1425 bge_t *bgep = arg; 1426 1427 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1428 1429 /* 1430 * Store MAC layer specified mode and pass to chip layer to update h/w 1431 */ 1432 mutex_enter(bgep->genlock); 1433 if (!(bgep->progress & PROGRESS_INTR)) { 1434 /* can happen during autorecovery */ 1435 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1436 mutex_exit(bgep->genlock); 1437 return (EIO); 1438 } 1439 bgep->promisc = on; 1440 #ifdef BGE_IPMI_ASF 1441 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1442 #else 1443 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1444 #endif 1445 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1446 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1447 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1448 mutex_exit(bgep->genlock); 1449 return (EIO); 1450 } 1451 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1452 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1453 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1454 mutex_exit(bgep->genlock); 1455 return (EIO); 1456 } 1457 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1458 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1459 mutex_exit(bgep->genlock); 1460 return (EIO); 1461 } 1462 mutex_exit(bgep->genlock); 1463 return (0); 1464 } 1465 1466 #ifdef MC_RESOURCES 1467 1468 static void 1469 bge_blank(void * arg, time_t tick_cnt, uint_t pkt_cnt) 1470 { 1471 (void)arg; 1472 (void)tick_cnt; 1473 (void)pkt_cnt; 1474 } 1475 1476 static void 1477 bge_m_resources(void * arg) 1478 { 1479 bge_t *bgep = arg; 1480 mac_rx_fifo_t mrf; 1481 int i; 1482 1483 mrf.mrf_type = MAC_RX_FIFO; 1484 mrf.mrf_blank = bge_blank; 1485 mrf.mrf_arg = (void *)bgep; 1486 mrf.mrf_normal_blank_time = 25; 1487 mrf.mrf_normal_pkt_count = 8; 1488 1489 for (i = 0; i < BGE_RECV_RINGS_MAX; i++) { 1490 bgep->macRxResourceHandles[i] = 1491 mac_resource_add(bgep->mh, (mac_resource_t *)&mrf); 1492 } 1493 } 1494 1495 #endif /* MC_RESOURCES */ 1496 1497 /* 1498 * Find the slot for the specified unicast address 1499 */ 1500 int 1501 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1502 { 1503 int slot; 1504 1505 ASSERT(mutex_owned(bgep->genlock)); 1506 1507 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1508 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1509 return (slot); 1510 } 1511 1512 return (-1); 1513 } 1514 1515 /* 1516 * Programs the classifier to start steering packets matching 'mac_addr' to the 1517 * specified ring 'arg'. 1518 */ 1519 static int 1520 bge_addmac(void *arg, const uint8_t * mac_addr) 1521 { 1522 recv_ring_t *rrp = (recv_ring_t *)arg; 1523 bge_t *bgep = rrp->bgep; 1524 bge_recv_rule_t *rulep = bgep->recv_rules; 1525 bge_rule_info_t *rinfop = NULL; 1526 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1527 int i; 1528 uint16_t tmp16; 1529 uint32_t tmp32; 1530 int slot; 1531 int err; 1532 1533 mutex_enter(bgep->genlock); 1534 if (bgep->unicst_addr_avail == 0) { 1535 mutex_exit(bgep->genlock); 1536 return (ENOSPC); 1537 } 1538 1539 /* 1540 * First add the unicast address to a available slot. 1541 */ 1542 slot = bge_unicst_find(bgep, mac_addr); 1543 ASSERT(slot == -1); 1544 1545 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1546 if (!bgep->curr_addr[slot].set) { 1547 bgep->curr_addr[slot].set = B_TRUE; 1548 break; 1549 } 1550 } 1551 1552 ASSERT(slot < bgep->unicst_addr_total); 1553 bgep->unicst_addr_avail--; 1554 mutex_exit(bgep->genlock); 1555 1556 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1557 goto fail; 1558 1559 /* A rule is already here. Deny this. */ 1560 if (rrp->mac_addr_rule != NULL) { 1561 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1562 goto fail; 1563 } 1564 1565 /* 1566 * Allocate a bge_rule_info_t to keep track of which rule slots 1567 * are being used. 1568 */ 1569 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1570 if (rinfop == NULL) { 1571 err = ENOMEM; 1572 goto fail; 1573 } 1574 1575 /* 1576 * Look for the starting slot to place the rules. 1577 * The two slots we reserve must be contiguous. 1578 */ 1579 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1580 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1581 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1582 break; 1583 1584 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1585 1586 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1587 rulep[i].mask_value = ntohl(tmp32); 1588 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1589 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1590 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1591 1592 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1593 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1594 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1595 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1596 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1597 rinfop->start = i; 1598 rinfop->count = 2; 1599 1600 rrp->mac_addr_rule = rinfop; 1601 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1602 1603 return (0); 1604 1605 fail: 1606 /* Clear the address just set */ 1607 (void) bge_unicst_set(bgep, zero_addr, slot); 1608 mutex_enter(bgep->genlock); 1609 bgep->curr_addr[slot].set = B_FALSE; 1610 bgep->unicst_addr_avail++; 1611 mutex_exit(bgep->genlock); 1612 1613 return (err); 1614 } 1615 1616 /* 1617 * Stop classifying packets matching the MAC address to the specified ring. 1618 */ 1619 static int 1620 bge_remmac(void *arg, const uint8_t *mac_addr) 1621 { 1622 recv_ring_t *rrp = (recv_ring_t *)arg; 1623 bge_t *bgep = rrp->bgep; 1624 bge_recv_rule_t *rulep = bgep->recv_rules; 1625 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1626 int start; 1627 int slot; 1628 int err; 1629 1630 /* 1631 * Remove the MAC address from its slot. 1632 */ 1633 mutex_enter(bgep->genlock); 1634 slot = bge_unicst_find(bgep, mac_addr); 1635 if (slot == -1) { 1636 mutex_exit(bgep->genlock); 1637 return (EINVAL); 1638 } 1639 1640 ASSERT(bgep->curr_addr[slot].set); 1641 mutex_exit(bgep->genlock); 1642 1643 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1644 return (err); 1645 1646 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1647 return (EINVAL); 1648 1649 start = rinfop->start; 1650 rulep[start].mask_value = 0; 1651 rulep[start].control = 0; 1652 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1653 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1654 start++; 1655 rulep[start].mask_value = 0; 1656 rulep[start].control = 0; 1657 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1658 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1659 1660 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1661 rrp->mac_addr_rule = NULL; 1662 bzero(rrp->mac_addr_val, ETHERADDRL); 1663 1664 mutex_enter(bgep->genlock); 1665 bgep->curr_addr[slot].set = B_FALSE; 1666 bgep->unicst_addr_avail++; 1667 mutex_exit(bgep->genlock); 1668 1669 return (0); 1670 } 1671 1672 1673 static int 1674 bge_flag_intr_enable(mac_ring_driver_t ih) 1675 { 1676 recv_ring_t *rrp = (recv_ring_t *)ih; 1677 bge_t *bgep = rrp->bgep; 1678 1679 mutex_enter(bgep->genlock); 1680 rrp->poll_flag = 0; 1681 mutex_exit(bgep->genlock); 1682 1683 return (0); 1684 } 1685 1686 static int 1687 bge_flag_intr_disable(mac_ring_driver_t ih) 1688 { 1689 recv_ring_t *rrp = (recv_ring_t *)ih; 1690 bge_t *bgep = rrp->bgep; 1691 1692 mutex_enter(bgep->genlock); 1693 rrp->poll_flag = 1; 1694 mutex_exit(bgep->genlock); 1695 1696 return (0); 1697 } 1698 1699 static int 1700 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1701 { 1702 recv_ring_t *rx_ring; 1703 1704 rx_ring = (recv_ring_t *)rh; 1705 mutex_enter(rx_ring->rx_lock); 1706 rx_ring->ring_gen_num = mr_gen_num; 1707 mutex_exit(rx_ring->rx_lock); 1708 return (0); 1709 } 1710 1711 1712 /* 1713 * Callback funtion for MAC layer to register all rings 1714 * for given ring_group, noted by rg_index. 1715 */ 1716 void 1717 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1718 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1719 { 1720 bge_t *bgep = arg; 1721 mac_intr_t *mintr; 1722 1723 switch (rtype) { 1724 case MAC_RING_TYPE_RX: { 1725 recv_ring_t *rx_ring; 1726 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1727 MAC_ADDRESS_REGS_MAX) && index == 0); 1728 1729 rx_ring = &bgep->recv[rg_index]; 1730 rx_ring->ring_handle = rh; 1731 1732 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1733 infop->mri_start = bge_ring_start; 1734 infop->mri_stop = NULL; 1735 infop->mri_poll = bge_poll_ring; 1736 infop->mri_stat = bge_rx_ring_stat; 1737 1738 mintr = &infop->mri_intr; 1739 mintr->mi_enable = (mac_intr_enable_t)bge_flag_intr_enable; 1740 mintr->mi_disable = (mac_intr_disable_t)bge_flag_intr_disable; 1741 1742 break; 1743 } 1744 case MAC_RING_TYPE_TX: 1745 default: 1746 ASSERT(0); 1747 break; 1748 } 1749 } 1750 1751 /* 1752 * Fill infop passed as argument 1753 * fill in respective ring_group info 1754 * Each group has a single ring in it. We keep it simple 1755 * and use the same internal handle for rings and groups. 1756 */ 1757 void 1758 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1759 mac_group_info_t * infop, mac_group_handle_t gh) 1760 { 1761 bge_t *bgep = arg; 1762 1763 switch (rtype) { 1764 case MAC_RING_TYPE_RX: { 1765 recv_ring_t *rx_ring; 1766 1767 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1768 MAC_ADDRESS_REGS_MAX)); 1769 rx_ring = &bgep->recv[rg_index]; 1770 rx_ring->ring_group_handle = gh; 1771 1772 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1773 infop->mgi_start = NULL; 1774 infop->mgi_stop = NULL; 1775 infop->mgi_addmac = bge_addmac; 1776 infop->mgi_remmac = bge_remmac; 1777 infop->mgi_count = 1; 1778 break; 1779 } 1780 case MAC_RING_TYPE_TX: 1781 default: 1782 ASSERT(0); 1783 break; 1784 } 1785 } 1786 1787 1788 /*ARGSUSED*/ 1789 static boolean_t 1790 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1791 { 1792 bge_t *bgep = arg; 1793 mac_capab_rings_t *cap_rings; 1794 1795 switch (cap) { 1796 case MAC_CAPAB_HCKSUM: { 1797 uint32_t *txflags = cap_data; 1798 1799 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1800 break; 1801 } 1802 1803 case MAC_CAPAB_RINGS: 1804 cap_rings = (mac_capab_rings_t *)cap_data; 1805 1806 /* Temporarily disable multiple tx rings. */ 1807 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1808 return (B_FALSE); 1809 1810 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1811 cap_rings->mr_rnum = 1812 cap_rings->mr_gnum = 1813 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1814 cap_rings->mr_rget = bge_fill_ring; 1815 cap_rings->mr_gget = bge_fill_group; 1816 break; 1817 1818 default: 1819 return (B_FALSE); 1820 } 1821 return (B_TRUE); 1822 } 1823 1824 #ifdef NOT_SUPPORTED_XXX 1825 1826 /* 1827 * Loopback ioctl code 1828 */ 1829 1830 static lb_property_t loopmodes[] = { 1831 { normal, "normal", BGE_LOOP_NONE }, 1832 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1833 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1834 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1835 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1836 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1837 }; 1838 1839 static enum ioc_reply 1840 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1841 { 1842 /* 1843 * If the mode isn't being changed, there's nothing to do ... 1844 */ 1845 if (mode == bgep->param_loop_mode) 1846 return (IOC_ACK); 1847 1848 /* 1849 * Validate the requested mode and prepare a suitable message 1850 * to explain the link down/up cycle that the change will 1851 * probably induce ... 1852 */ 1853 switch (mode) { 1854 default: 1855 return (IOC_INVAL); 1856 1857 case BGE_LOOP_NONE: 1858 case BGE_LOOP_EXTERNAL_1000: 1859 case BGE_LOOP_EXTERNAL_100: 1860 case BGE_LOOP_EXTERNAL_10: 1861 case BGE_LOOP_INTERNAL_PHY: 1862 case BGE_LOOP_INTERNAL_MAC: 1863 break; 1864 } 1865 1866 /* 1867 * All OK; tell the caller to reprogram 1868 * the PHY and/or MAC for the new mode ... 1869 */ 1870 bgep->param_loop_mode = mode; 1871 return (IOC_RESTART_ACK); 1872 } 1873 1874 static enum ioc_reply 1875 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1876 { 1877 lb_info_sz_t *lbsp; 1878 lb_property_t *lbpp; 1879 uint32_t *lbmp; 1880 int cmd; 1881 1882 _NOTE(ARGUNUSED(wq)) 1883 1884 /* 1885 * Validate format of ioctl 1886 */ 1887 if (mp->b_cont == NULL) 1888 return (IOC_INVAL); 1889 1890 cmd = iocp->ioc_cmd; 1891 switch (cmd) { 1892 default: 1893 /* NOTREACHED */ 1894 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1895 return (IOC_INVAL); 1896 1897 case LB_GET_INFO_SIZE: 1898 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1899 return (IOC_INVAL); 1900 lbsp = (void *)mp->b_cont->b_rptr; 1901 *lbsp = sizeof (loopmodes); 1902 return (IOC_REPLY); 1903 1904 case LB_GET_INFO: 1905 if (iocp->ioc_count != sizeof (loopmodes)) 1906 return (IOC_INVAL); 1907 lbpp = (void *)mp->b_cont->b_rptr; 1908 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1909 return (IOC_REPLY); 1910 1911 case LB_GET_MODE: 1912 if (iocp->ioc_count != sizeof (uint32_t)) 1913 return (IOC_INVAL); 1914 lbmp = (void *)mp->b_cont->b_rptr; 1915 *lbmp = bgep->param_loop_mode; 1916 return (IOC_REPLY); 1917 1918 case LB_SET_MODE: 1919 if (iocp->ioc_count != sizeof (uint32_t)) 1920 return (IOC_INVAL); 1921 lbmp = (void *)mp->b_cont->b_rptr; 1922 return (bge_set_loop_mode(bgep, *lbmp)); 1923 } 1924 } 1925 1926 #endif /* NOT_SUPPORTED_XXX */ 1927 1928 /* 1929 * Specific bge IOCTLs, the gld module handles the generic ones. 1930 */ 1931 static void 1932 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1933 { 1934 bge_t *bgep = arg; 1935 struct iocblk *iocp; 1936 enum ioc_reply status; 1937 boolean_t need_privilege; 1938 int err; 1939 int cmd; 1940 1941 /* 1942 * Validate the command before bothering with the mutex ... 1943 */ 1944 iocp = (void *)mp->b_rptr; 1945 iocp->ioc_error = 0; 1946 need_privilege = B_TRUE; 1947 cmd = iocp->ioc_cmd; 1948 switch (cmd) { 1949 default: 1950 miocnak(wq, mp, 0, EINVAL); 1951 return; 1952 1953 case BGE_MII_READ: 1954 case BGE_MII_WRITE: 1955 case BGE_SEE_READ: 1956 case BGE_SEE_WRITE: 1957 case BGE_FLASH_READ: 1958 case BGE_FLASH_WRITE: 1959 case BGE_DIAG: 1960 case BGE_PEEK: 1961 case BGE_POKE: 1962 case BGE_PHY_RESET: 1963 case BGE_SOFT_RESET: 1964 case BGE_HARD_RESET: 1965 break; 1966 1967 #ifdef NOT_SUPPORTED_XXX 1968 case LB_GET_INFO_SIZE: 1969 case LB_GET_INFO: 1970 case LB_GET_MODE: 1971 need_privilege = B_FALSE; 1972 /* FALLTHRU */ 1973 case LB_SET_MODE: 1974 break; 1975 #endif 1976 1977 } 1978 1979 if (need_privilege) { 1980 /* 1981 * Check for specific net_config privilege on Solaris 10+. 1982 */ 1983 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1984 if (err != 0) { 1985 miocnak(wq, mp, 0, err); 1986 return; 1987 } 1988 } 1989 1990 mutex_enter(bgep->genlock); 1991 if (!(bgep->progress & PROGRESS_INTR)) { 1992 /* can happen during autorecovery */ 1993 mutex_exit(bgep->genlock); 1994 miocnak(wq, mp, 0, EIO); 1995 return; 1996 } 1997 1998 switch (cmd) { 1999 default: 2000 _NOTE(NOTREACHED) 2001 status = IOC_INVAL; 2002 break; 2003 2004 case BGE_MII_READ: 2005 case BGE_MII_WRITE: 2006 case BGE_SEE_READ: 2007 case BGE_SEE_WRITE: 2008 case BGE_FLASH_READ: 2009 case BGE_FLASH_WRITE: 2010 case BGE_DIAG: 2011 case BGE_PEEK: 2012 case BGE_POKE: 2013 case BGE_PHY_RESET: 2014 case BGE_SOFT_RESET: 2015 case BGE_HARD_RESET: 2016 status = bge_chip_ioctl(bgep, wq, mp, iocp); 2017 break; 2018 2019 #ifdef NOT_SUPPORTED_XXX 2020 case LB_GET_INFO_SIZE: 2021 case LB_GET_INFO: 2022 case LB_GET_MODE: 2023 case LB_SET_MODE: 2024 status = bge_loop_ioctl(bgep, wq, mp, iocp); 2025 break; 2026 #endif 2027 2028 } 2029 2030 /* 2031 * Do we need to reprogram the PHY and/or the MAC? 2032 * Do it now, while we still have the mutex. 2033 * 2034 * Note: update the PHY first, 'cos it controls the 2035 * speed/duplex parameters that the MAC code uses. 2036 */ 2037 switch (status) { 2038 case IOC_RESTART_REPLY: 2039 case IOC_RESTART_ACK: 2040 if (bge_reprogram(bgep) == IOC_INVAL) 2041 status = IOC_INVAL; 2042 break; 2043 } 2044 2045 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2046 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 2047 status = IOC_INVAL; 2048 } 2049 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 2050 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 2051 status = IOC_INVAL; 2052 } 2053 mutex_exit(bgep->genlock); 2054 2055 /* 2056 * Finally, decide how to reply 2057 */ 2058 switch (status) { 2059 default: 2060 case IOC_INVAL: 2061 /* 2062 * Error, reply with a NAK and EINVAL or the specified error 2063 */ 2064 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 2065 EINVAL : iocp->ioc_error); 2066 break; 2067 2068 case IOC_DONE: 2069 /* 2070 * OK, reply already sent 2071 */ 2072 break; 2073 2074 case IOC_RESTART_ACK: 2075 case IOC_ACK: 2076 /* 2077 * OK, reply with an ACK 2078 */ 2079 miocack(wq, mp, 0, 0); 2080 break; 2081 2082 case IOC_RESTART_REPLY: 2083 case IOC_REPLY: 2084 /* 2085 * OK, send prepared reply as ACK or NAK 2086 */ 2087 mp->b_datap->db_type = iocp->ioc_error == 0 ? 2088 M_IOCACK : M_IOCNAK; 2089 qreply(wq, mp); 2090 break; 2091 } 2092 } 2093 2094 /* 2095 * ========== Per-instance setup/teardown code ========== 2096 */ 2097 2098 #undef BGE_DBG 2099 #define BGE_DBG BGE_DBG_MEM /* debug flag for this code */ 2100 /* 2101 * Allocate an area of memory and a DMA handle for accessing it 2102 */ 2103 static int 2104 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2105 uint_t dma_flags, dma_area_t *dma_p) 2106 { 2107 caddr_t va; 2108 int err; 2109 2110 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2111 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2112 2113 /* 2114 * Allocate handle 2115 */ 2116 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2117 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2118 if (err != DDI_SUCCESS) 2119 return (DDI_FAILURE); 2120 2121 /* 2122 * Allocate memory 2123 */ 2124 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2125 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2126 &dma_p->acc_hdl); 2127 if (err != DDI_SUCCESS) 2128 return (DDI_FAILURE); 2129 2130 /* 2131 * Bind the two together 2132 */ 2133 dma_p->mem_va = va; 2134 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2135 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2136 &dma_p->cookie, &dma_p->ncookies); 2137 2138 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2139 dma_p->alength, err, dma_p->ncookies)); 2140 2141 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2142 return (DDI_FAILURE); 2143 2144 dma_p->nslots = ~0U; 2145 dma_p->size = ~0U; 2146 dma_p->token = ~0U; 2147 dma_p->offset = 0; 2148 return (DDI_SUCCESS); 2149 } 2150 2151 /* 2152 * Free one allocated area of DMAable memory 2153 */ 2154 static void 2155 bge_free_dma_mem(dma_area_t *dma_p) 2156 { 2157 if (dma_p->dma_hdl != NULL) { 2158 if (dma_p->ncookies) { 2159 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2160 dma_p->ncookies = 0; 2161 } 2162 ddi_dma_free_handle(&dma_p->dma_hdl); 2163 dma_p->dma_hdl = NULL; 2164 } 2165 2166 if (dma_p->acc_hdl != NULL) { 2167 ddi_dma_mem_free(&dma_p->acc_hdl); 2168 dma_p->acc_hdl = NULL; 2169 } 2170 } 2171 /* 2172 * Utility routine to carve a slice off a chunk of allocated memory, 2173 * updating the chunk descriptor accordingly. The size of the slice 2174 * is given by the product of the <qty> and <size> parameters. 2175 */ 2176 static void 2177 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2178 uint32_t qty, uint32_t size) 2179 { 2180 static uint32_t sequence = 0xbcd5704a; 2181 size_t totsize; 2182 2183 totsize = qty*size; 2184 ASSERT(totsize <= chunk->alength); 2185 2186 *slice = *chunk; 2187 slice->nslots = qty; 2188 slice->size = size; 2189 slice->alength = totsize; 2190 slice->token = ++sequence; 2191 2192 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2193 chunk->alength -= totsize; 2194 chunk->offset += totsize; 2195 chunk->cookie.dmac_laddress += totsize; 2196 chunk->cookie.dmac_size -= totsize; 2197 } 2198 2199 /* 2200 * Initialise the specified Receive Producer (Buffer) Ring, using 2201 * the information in the <dma_area> descriptors that it contains 2202 * to set up all the other fields. This routine should be called 2203 * only once for each ring. 2204 */ 2205 static void 2206 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2207 { 2208 buff_ring_t *brp; 2209 bge_status_t *bsp; 2210 sw_rbd_t *srbdp; 2211 dma_area_t pbuf; 2212 uint32_t bufsize; 2213 uint32_t nslots; 2214 uint32_t slot; 2215 uint32_t split; 2216 2217 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2218 NIC_MEM_SHADOW_BUFF_STD, 2219 NIC_MEM_SHADOW_BUFF_JUMBO, 2220 NIC_MEM_SHADOW_BUFF_MINI 2221 }; 2222 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2223 RECV_STD_PROD_INDEX_REG, 2224 RECV_JUMBO_PROD_INDEX_REG, 2225 RECV_MINI_PROD_INDEX_REG 2226 }; 2227 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2228 STATUS_STD_BUFF_CONS_INDEX, 2229 STATUS_JUMBO_BUFF_CONS_INDEX, 2230 STATUS_MINI_BUFF_CONS_INDEX 2231 }; 2232 2233 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2234 (void *)bgep, ring)); 2235 2236 brp = &bgep->buff[ring]; 2237 nslots = brp->desc.nslots; 2238 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2239 bufsize = brp->buf[0].size; 2240 2241 /* 2242 * Set up the copy of the h/w RCB 2243 * 2244 * Note: unlike Send & Receive Return Rings, (where the max_len 2245 * field holds the number of slots), in a Receive Buffer Ring 2246 * this field indicates the size of each buffer in the ring. 2247 */ 2248 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2249 brp->hw_rcb.max_len = (uint16_t)bufsize; 2250 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2251 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2252 2253 /* 2254 * Other one-off initialisation of per-ring data 2255 */ 2256 brp->bgep = bgep; 2257 bsp = DMA_VPTR(bgep->status_block); 2258 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2259 brp->chip_mbx_reg = mailbox_regs[ring]; 2260 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2261 DDI_INTR_PRI(bgep->intr_pri)); 2262 2263 /* 2264 * Allocate the array of s/w Receive Buffer Descriptors 2265 */ 2266 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2267 brp->sw_rbds = srbdp; 2268 2269 /* 2270 * Now initialise each array element once and for all 2271 */ 2272 for (split = 0; split < BGE_SPLIT; ++split) { 2273 pbuf = brp->buf[split]; 2274 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2275 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2276 ASSERT(pbuf.alength == 0); 2277 } 2278 } 2279 2280 /* 2281 * Clean up initialisation done above before the memory is freed 2282 */ 2283 static void 2284 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2285 { 2286 buff_ring_t *brp; 2287 sw_rbd_t *srbdp; 2288 2289 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2290 (void *)bgep, ring)); 2291 2292 brp = &bgep->buff[ring]; 2293 srbdp = brp->sw_rbds; 2294 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2295 2296 mutex_destroy(brp->rf_lock); 2297 } 2298 2299 /* 2300 * Initialise the specified Receive (Return) Ring, using the 2301 * information in the <dma_area> descriptors that it contains 2302 * to set up all the other fields. This routine should be called 2303 * only once for each ring. 2304 */ 2305 static void 2306 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2307 { 2308 recv_ring_t *rrp; 2309 bge_status_t *bsp; 2310 uint32_t nslots; 2311 2312 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2313 (void *)bgep, ring)); 2314 2315 /* 2316 * The chip architecture requires that receive return rings have 2317 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2318 */ 2319 rrp = &bgep->recv[ring]; 2320 nslots = rrp->desc.nslots; 2321 ASSERT(nslots == 0 || nslots == 512 || 2322 nslots == 1024 || nslots == 2048); 2323 2324 /* 2325 * Set up the copy of the h/w RCB 2326 */ 2327 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2328 rrp->hw_rcb.max_len = (uint16_t)nslots; 2329 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2330 rrp->hw_rcb.nic_ring_addr = 0; 2331 2332 /* 2333 * Other one-off initialisation of per-ring data 2334 */ 2335 rrp->bgep = bgep; 2336 bsp = DMA_VPTR(bgep->status_block); 2337 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2338 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2339 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2340 DDI_INTR_PRI(bgep->intr_pri)); 2341 } 2342 2343 2344 /* 2345 * Clean up initialisation done above before the memory is freed 2346 */ 2347 static void 2348 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2349 { 2350 recv_ring_t *rrp; 2351 2352 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2353 (void *)bgep, ring)); 2354 2355 rrp = &bgep->recv[ring]; 2356 if (rrp->rx_softint) 2357 ddi_remove_softintr(rrp->rx_softint); 2358 mutex_destroy(rrp->rx_lock); 2359 } 2360 2361 /* 2362 * Initialise the specified Send Ring, using the information in the 2363 * <dma_area> descriptors that it contains to set up all the other 2364 * fields. This routine should be called only once for each ring. 2365 */ 2366 static void 2367 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2368 { 2369 send_ring_t *srp; 2370 bge_status_t *bsp; 2371 sw_sbd_t *ssbdp; 2372 dma_area_t desc; 2373 dma_area_t pbuf; 2374 uint32_t nslots; 2375 uint32_t slot; 2376 uint32_t split; 2377 sw_txbuf_t *txbuf; 2378 2379 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2380 (void *)bgep, ring)); 2381 2382 /* 2383 * The chip architecture requires that host-based send rings 2384 * have 512 elements per ring. See 570X-PG102-R page 56. 2385 */ 2386 srp = &bgep->send[ring]; 2387 nslots = srp->desc.nslots; 2388 ASSERT(nslots == 0 || nslots == 512); 2389 2390 /* 2391 * Set up the copy of the h/w RCB 2392 */ 2393 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2394 srp->hw_rcb.max_len = (uint16_t)nslots; 2395 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2396 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2397 2398 /* 2399 * Other one-off initialisation of per-ring data 2400 */ 2401 srp->bgep = bgep; 2402 bsp = DMA_VPTR(bgep->status_block); 2403 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2404 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2405 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2406 DDI_INTR_PRI(bgep->intr_pri)); 2407 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2408 DDI_INTR_PRI(bgep->intr_pri)); 2409 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2410 DDI_INTR_PRI(bgep->intr_pri)); 2411 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2412 DDI_INTR_PRI(bgep->intr_pri)); 2413 if (nslots == 0) 2414 return; 2415 2416 /* 2417 * Allocate the array of s/w Send Buffer Descriptors 2418 */ 2419 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2420 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2421 srp->txbuf_head = 2422 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2423 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2424 srp->sw_sbds = ssbdp; 2425 srp->txbuf = txbuf; 2426 srp->tx_buffers = BGE_SEND_BUF_NUM; 2427 srp->tx_buffers_low = srp->tx_buffers / 4; 2428 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2429 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2430 else 2431 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2432 srp->tx_array = 1; 2433 2434 /* 2435 * Chunk tx desc area 2436 */ 2437 desc = srp->desc; 2438 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2439 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2440 sizeof (bge_sbd_t)); 2441 } 2442 ASSERT(desc.alength == 0); 2443 2444 /* 2445 * Chunk tx buffer area 2446 */ 2447 for (split = 0; split < BGE_SPLIT; ++split) { 2448 pbuf = srp->buf[0][split]; 2449 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2450 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2451 bgep->chipid.snd_buff_size); 2452 txbuf++; 2453 } 2454 ASSERT(pbuf.alength == 0); 2455 } 2456 } 2457 2458 /* 2459 * Clean up initialisation done above before the memory is freed 2460 */ 2461 static void 2462 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2463 { 2464 send_ring_t *srp; 2465 uint32_t array; 2466 uint32_t split; 2467 uint32_t nslots; 2468 2469 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2470 (void *)bgep, ring)); 2471 2472 srp = &bgep->send[ring]; 2473 mutex_destroy(srp->tc_lock); 2474 mutex_destroy(srp->freetxbuf_lock); 2475 mutex_destroy(srp->txbuf_lock); 2476 mutex_destroy(srp->tx_lock); 2477 nslots = srp->desc.nslots; 2478 if (nslots == 0) 2479 return; 2480 2481 for (array = 1; array < srp->tx_array; ++array) 2482 for (split = 0; split < BGE_SPLIT; ++split) 2483 bge_free_dma_mem(&srp->buf[array][split]); 2484 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2485 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2486 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2487 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2488 srp->sw_sbds = NULL; 2489 srp->txbuf_head = NULL; 2490 srp->txbuf = NULL; 2491 srp->pktp = NULL; 2492 } 2493 2494 /* 2495 * Initialise all transmit, receive, and buffer rings. 2496 */ 2497 void 2498 bge_init_rings(bge_t *bgep) 2499 { 2500 uint32_t ring; 2501 2502 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2503 2504 /* 2505 * Perform one-off initialisation of each ring ... 2506 */ 2507 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2508 bge_init_send_ring(bgep, ring); 2509 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2510 bge_init_recv_ring(bgep, ring); 2511 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2512 bge_init_buff_ring(bgep, ring); 2513 } 2514 2515 /* 2516 * Undo the work of bge_init_rings() above before the memory is freed 2517 */ 2518 void 2519 bge_fini_rings(bge_t *bgep) 2520 { 2521 uint32_t ring; 2522 2523 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2524 2525 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2526 bge_fini_buff_ring(bgep, ring); 2527 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2528 bge_fini_recv_ring(bgep, ring); 2529 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2530 bge_fini_send_ring(bgep, ring); 2531 } 2532 2533 /* 2534 * Called from the bge_m_stop() to free the tx buffers which are 2535 * allocated from the tx process. 2536 */ 2537 void 2538 bge_free_txbuf_arrays(send_ring_t *srp) 2539 { 2540 uint32_t array; 2541 uint32_t split; 2542 2543 ASSERT(mutex_owned(srp->tx_lock)); 2544 2545 /* 2546 * Free the extra tx buffer DMA area 2547 */ 2548 for (array = 1; array < srp->tx_array; ++array) 2549 for (split = 0; split < BGE_SPLIT; ++split) 2550 bge_free_dma_mem(&srp->buf[array][split]); 2551 2552 /* 2553 * Restore initial tx buffer numbers 2554 */ 2555 srp->tx_array = 1; 2556 srp->tx_buffers = BGE_SEND_BUF_NUM; 2557 srp->tx_buffers_low = srp->tx_buffers / 4; 2558 srp->tx_flow = 0; 2559 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2560 } 2561 2562 /* 2563 * Called from tx process to allocate more tx buffers 2564 */ 2565 bge_queue_item_t * 2566 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2567 { 2568 bge_queue_t *txbuf_queue; 2569 bge_queue_item_t *txbuf_item_last; 2570 bge_queue_item_t *txbuf_item; 2571 bge_queue_item_t *txbuf_item_rtn; 2572 sw_txbuf_t *txbuf; 2573 dma_area_t area; 2574 size_t txbuffsize; 2575 uint32_t slot; 2576 uint32_t array; 2577 uint32_t split; 2578 uint32_t err; 2579 2580 ASSERT(mutex_owned(srp->tx_lock)); 2581 2582 array = srp->tx_array; 2583 if (array >= srp->tx_array_max) 2584 return (NULL); 2585 2586 /* 2587 * Allocate memory & handles for TX buffers 2588 */ 2589 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2590 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2591 for (split = 0; split < BGE_SPLIT; ++split) { 2592 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2593 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2594 &srp->buf[array][split]); 2595 if (err != DDI_SUCCESS) { 2596 /* Free the last already allocated OK chunks */ 2597 for (slot = 0; slot <= split; ++slot) 2598 bge_free_dma_mem(&srp->buf[array][slot]); 2599 srp->tx_alloc_fail++; 2600 return (NULL); 2601 } 2602 } 2603 2604 /* 2605 * Chunk tx buffer area 2606 */ 2607 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2608 for (split = 0; split < BGE_SPLIT; ++split) { 2609 area = srp->buf[array][split]; 2610 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2611 bge_slice_chunk(&txbuf->buf, &area, 1, 2612 bgep->chipid.snd_buff_size); 2613 txbuf++; 2614 } 2615 } 2616 2617 /* 2618 * Add above buffers to the tx buffer pop queue 2619 */ 2620 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2621 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2622 txbuf_item_last = NULL; 2623 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2624 txbuf_item->item = txbuf; 2625 txbuf_item->next = txbuf_item_last; 2626 txbuf_item_last = txbuf_item; 2627 txbuf++; 2628 txbuf_item++; 2629 } 2630 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2631 txbuf_item_rtn = txbuf_item; 2632 txbuf_item++; 2633 txbuf_queue = srp->txbuf_pop_queue; 2634 mutex_enter(txbuf_queue->lock); 2635 txbuf_item->next = txbuf_queue->head; 2636 txbuf_queue->head = txbuf_item_last; 2637 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2638 mutex_exit(txbuf_queue->lock); 2639 2640 srp->tx_array++; 2641 srp->tx_buffers += BGE_SEND_BUF_NUM; 2642 srp->tx_buffers_low = srp->tx_buffers / 4; 2643 2644 return (txbuf_item_rtn); 2645 } 2646 2647 /* 2648 * This function allocates all the transmit and receive buffers 2649 * and descriptors, in four chunks. 2650 */ 2651 int 2652 bge_alloc_bufs(bge_t *bgep) 2653 { 2654 dma_area_t area; 2655 size_t rxbuffsize; 2656 size_t txbuffsize; 2657 size_t rxbuffdescsize; 2658 size_t rxdescsize; 2659 size_t txdescsize; 2660 uint32_t ring; 2661 uint32_t rx_rings = bgep->chipid.rx_rings; 2662 uint32_t tx_rings = bgep->chipid.tx_rings; 2663 int split; 2664 int err; 2665 2666 BGE_TRACE(("bge_alloc_bufs($%p)", 2667 (void *)bgep)); 2668 2669 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2670 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2671 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2672 2673 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2674 txbuffsize *= tx_rings; 2675 2676 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2677 rxdescsize *= sizeof (bge_rbd_t); 2678 2679 rxbuffdescsize = BGE_STD_SLOTS_USED; 2680 rxbuffdescsize += bgep->chipid.jumbo_slots; 2681 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2682 rxbuffdescsize *= sizeof (bge_rbd_t); 2683 2684 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2685 txdescsize *= sizeof (bge_sbd_t); 2686 txdescsize += sizeof (bge_statistics_t); 2687 txdescsize += sizeof (bge_status_t); 2688 txdescsize += BGE_STATUS_PADDING; 2689 2690 /* 2691 * Enable PCI relaxed ordering only for RX/TX data buffers 2692 */ 2693 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) || 2694 DEVICE_5725_SERIES_CHIPSETS(bgep))) { 2695 if (bge_relaxed_ordering) 2696 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2697 } 2698 2699 /* 2700 * Allocate memory & handles for RX buffers 2701 */ 2702 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2703 for (split = 0; split < BGE_SPLIT; ++split) { 2704 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2705 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2706 &bgep->rx_buff[split]); 2707 if (err != DDI_SUCCESS) 2708 return (DDI_FAILURE); 2709 } 2710 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Buffers (rxbuffsize = %d)", 2711 rxbuffsize/BGE_SPLIT, 2712 rxbuffsize)); 2713 2714 /* 2715 * Allocate memory & handles for TX buffers 2716 */ 2717 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2718 for (split = 0; split < BGE_SPLIT; ++split) { 2719 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2720 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2721 &bgep->tx_buff[split]); 2722 if (err != DDI_SUCCESS) 2723 return (DDI_FAILURE); 2724 } 2725 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Tx Buffers (txbuffsize = %d)", 2726 txbuffsize/BGE_SPLIT, 2727 txbuffsize)); 2728 2729 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) || 2730 DEVICE_5725_SERIES_CHIPSETS(bgep))) { 2731 /* no relaxed ordering for descriptors rings? */ 2732 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2733 } 2734 2735 /* 2736 * Allocate memory & handles for receive return rings 2737 */ 2738 ASSERT((rxdescsize % rx_rings) == 0); 2739 for (split = 0; split < rx_rings; ++split) { 2740 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2741 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2742 &bgep->rx_desc[split]); 2743 if (err != DDI_SUCCESS) 2744 return (DDI_FAILURE); 2745 } 2746 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Descs cons (rx_rings = %d, rxdescsize = %d)", 2747 rxdescsize/rx_rings, 2748 rx_rings, 2749 rxdescsize)); 2750 2751 /* 2752 * Allocate memory & handles for buffer (producer) descriptor rings. 2753 * Note that split=rx_rings. 2754 */ 2755 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2756 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2757 if (err != DDI_SUCCESS) 2758 return (DDI_FAILURE); 2759 BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Rx Descs prod (rxbuffdescsize = %d)", 2760 rxdescsize)); 2761 2762 /* 2763 * Allocate memory & handles for TX descriptor rings, 2764 * status block, and statistics area 2765 */ 2766 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2767 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2768 if (err != DDI_SUCCESS) 2769 return (DDI_FAILURE); 2770 BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Tx Descs / Status Block / Stats (txdescdize = %d)", 2771 txdescsize)); 2772 2773 /* 2774 * Now carve up each of the allocated areas ... 2775 */ 2776 2777 /* rx buffers */ 2778 for (split = 0; split < BGE_SPLIT; ++split) { 2779 area = bgep->rx_buff[split]; 2780 2781 BGE_DEBUG(("RXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d", 2782 split, 2783 area.mem_va, 2784 area.alength, 2785 area.offset, 2786 area.cookie.dmac_laddress, 2787 area.cookie.dmac_size)); 2788 2789 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2790 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2791 bgep->chipid.std_buf_size); 2792 2793 BGE_DEBUG(("RXB SLCE %d STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2794 split, 2795 bgep->buff[BGE_STD_BUFF_RING].buf[split].mem_va, 2796 bgep->buff[BGE_STD_BUFF_RING].buf[split].alength, 2797 bgep->buff[BGE_STD_BUFF_RING].buf[split].offset, 2798 bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_laddress, 2799 bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_size, 2800 BGE_STD_SLOTS_USED/BGE_SPLIT, 2801 bgep->chipid.std_buf_size)); 2802 2803 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2804 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2805 bgep->chipid.recv_jumbo_size); 2806 2807 if ((bgep->chipid.jumbo_slots / BGE_SPLIT) > 0) 2808 { 2809 BGE_DEBUG(("RXB SLCE %d JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2810 split, 2811 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].mem_va, 2812 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].alength, 2813 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].offset, 2814 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_laddress, 2815 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_size, 2816 bgep->chipid.jumbo_slots/BGE_SPLIT, 2817 bgep->chipid.recv_jumbo_size)); 2818 } 2819 2820 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2821 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2822 BGE_MINI_BUFF_SIZE); 2823 2824 if ((BGE_MINI_SLOTS_USED / BGE_SPLIT) > 0) 2825 { 2826 BGE_DEBUG(("RXB SLCE %d MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2827 split, 2828 bgep->buff[BGE_MINI_BUFF_RING].buf[split].mem_va, 2829 bgep->buff[BGE_MINI_BUFF_RING].buf[split].alength, 2830 bgep->buff[BGE_MINI_BUFF_RING].buf[split].offset, 2831 bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_laddress, 2832 bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_size, 2833 BGE_MINI_SLOTS_USED/BGE_SPLIT, 2834 BGE_MINI_BUFF_SIZE)); 2835 } 2836 2837 BGE_DEBUG(("RXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d", 2838 split, 2839 area.mem_va, 2840 area.alength, 2841 area.offset, 2842 area.cookie.dmac_laddress, 2843 area.cookie.dmac_size)); 2844 } 2845 2846 /* tx buffers */ 2847 for (split = 0; split < BGE_SPLIT; ++split) { 2848 area = bgep->tx_buff[split]; 2849 2850 BGE_DEBUG(("TXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d", 2851 split, 2852 area.mem_va, 2853 area.alength, 2854 area.offset, 2855 area.cookie.dmac_laddress, 2856 area.cookie.dmac_size)); 2857 2858 for (ring = 0; ring < tx_rings; ++ring) { 2859 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2860 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2861 bgep->chipid.snd_buff_size); 2862 2863 BGE_DEBUG(("TXB SLCE %d RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2864 split, ring, 2865 bgep->send[ring].buf[0][split].mem_va, 2866 bgep->send[ring].buf[0][split].alength, 2867 bgep->send[ring].buf[0][split].offset, 2868 bgep->send[ring].buf[0][split].cookie.dmac_laddress, 2869 bgep->send[ring].buf[0][split].cookie.dmac_size, 2870 BGE_SEND_BUF_NUM/BGE_SPLIT, 2871 bgep->chipid.snd_buff_size)); 2872 } 2873 2874 for (; ring < BGE_SEND_RINGS_MAX; ++ring) { 2875 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2876 &area, 0, bgep->chipid.snd_buff_size); 2877 } 2878 2879 BGE_DEBUG(("TXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d", 2880 split, 2881 area.mem_va, 2882 area.alength, 2883 area.offset, 2884 area.cookie.dmac_laddress, 2885 area.cookie.dmac_size)); 2886 } 2887 2888 for (ring = 0; ring < rx_rings; ++ring) { 2889 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2890 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2891 2892 BGE_DEBUG(("RXD CONS RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2893 ring, 2894 bgep->recv[ring].desc.mem_va, 2895 bgep->recv[ring].desc.alength, 2896 bgep->recv[ring].desc.offset, 2897 bgep->recv[ring].desc.cookie.dmac_laddress, 2898 bgep->recv[ring].desc.cookie.dmac_size, 2899 bgep->chipid.recv_slots, 2900 sizeof(bge_rbd_t))); 2901 } 2902 2903 /* dma alloc for rxbuffdescsize is located at bgep->rx_desc[#rings] */ 2904 area = bgep->rx_desc[rx_rings]; /* note rx_rings = one beyond rings */ 2905 2906 for (; ring < BGE_RECV_RINGS_MAX; ++ring) /* skip unused rings */ 2907 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2908 0, sizeof (bge_rbd_t)); 2909 2910 BGE_DEBUG(("RXD PROD INIT: va=%p alen=%d off=%d pa=%llx psz=%d", 2911 area.mem_va, 2912 area.alength, 2913 area.offset, 2914 area.cookie.dmac_laddress, 2915 area.cookie.dmac_size)); 2916 2917 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2918 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2919 BGE_DEBUG(("RXD PROD STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2920 bgep->buff[BGE_STD_BUFF_RING].desc.mem_va, 2921 bgep->buff[BGE_STD_BUFF_RING].desc.alength, 2922 bgep->buff[BGE_STD_BUFF_RING].desc.offset, 2923 bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_laddress, 2924 bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_size, 2925 BGE_STD_SLOTS_USED, 2926 sizeof(bge_rbd_t))); 2927 2928 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2929 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2930 BGE_DEBUG(("RXD PROD JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2931 bgep->buff[BGE_JUMBO_BUFF_RING].desc.mem_va, 2932 bgep->buff[BGE_JUMBO_BUFF_RING].desc.alength, 2933 bgep->buff[BGE_JUMBO_BUFF_RING].desc.offset, 2934 bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_laddress, 2935 bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_size, 2936 bgep->chipid.jumbo_slots, 2937 sizeof(bge_rbd_t))); 2938 2939 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2940 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2941 BGE_DEBUG(("RXD PROD MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2942 bgep->buff[BGE_MINI_BUFF_RING].desc.mem_va, 2943 bgep->buff[BGE_MINI_BUFF_RING].desc.alength, 2944 bgep->buff[BGE_MINI_BUFF_RING].desc.offset, 2945 bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_laddress, 2946 bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_size, 2947 BGE_MINI_SLOTS_USED, 2948 sizeof(bge_rbd_t))); 2949 2950 BGE_DEBUG(("RXD PROD DONE: va=%p alen=%d off=%d pa=%llx psz=%d", 2951 area.mem_va, 2952 area.alength, 2953 area.offset, 2954 area.cookie.dmac_laddress, 2955 area.cookie.dmac_size)); 2956 2957 ASSERT(area.alength == 0); 2958 2959 area = bgep->tx_desc; 2960 2961 BGE_DEBUG(("TXD INIT: va=%p alen=%d off=%d pa=%llx psz=%d", 2962 area.mem_va, 2963 area.alength, 2964 area.offset, 2965 area.cookie.dmac_laddress, 2966 area.cookie.dmac_size)); 2967 2968 for (ring = 0; ring < tx_rings; ++ring) { 2969 bge_slice_chunk(&bgep->send[ring].desc, &area, 2970 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2971 2972 BGE_DEBUG(("TXD RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2973 ring, 2974 bgep->send[ring].desc.mem_va, 2975 bgep->send[ring].desc.alength, 2976 bgep->send[ring].desc.offset, 2977 bgep->send[ring].desc.cookie.dmac_laddress, 2978 bgep->send[ring].desc.cookie.dmac_size, 2979 BGE_SEND_SLOTS_USED, 2980 sizeof(bge_sbd_t))); 2981 } 2982 2983 for (; ring < BGE_SEND_RINGS_MAX; ++ring) /* skip unused rings */ 2984 bge_slice_chunk(&bgep->send[ring].desc, &area, 2985 0, sizeof (bge_sbd_t)); 2986 2987 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2988 BGE_DEBUG(("TXD STATISTICS: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2989 bgep->statistics.mem_va, 2990 bgep->statistics.alength, 2991 bgep->statistics.offset, 2992 bgep->statistics.cookie.dmac_laddress, 2993 bgep->statistics.cookie.dmac_size, 2994 1, 2995 sizeof(bge_statistics_t))); 2996 2997 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2998 BGE_DEBUG(("TXD STATUS BLOCK: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2999 bgep->status_block.mem_va, 3000 bgep->status_block.alength, 3001 bgep->status_block.offset, 3002 bgep->status_block.cookie.dmac_laddress, 3003 bgep->status_block.cookie.dmac_size, 3004 1, 3005 sizeof(bge_status_t))); 3006 3007 BGE_DEBUG(("TXD DONE: va=%p alen=%d off=%d pa=%llx psz=%d", 3008 area.mem_va, 3009 area.alength, 3010 area.offset, 3011 area.cookie.dmac_laddress, 3012 area.cookie.dmac_size)); 3013 3014 ASSERT(area.alength == BGE_STATUS_PADDING); 3015 3016 DMA_ZERO(bgep->status_block); 3017 3018 return (DDI_SUCCESS); 3019 } 3020 3021 #undef BGE_DBG 3022 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3023 3024 /* 3025 * This routine frees the transmit and receive buffers and descriptors. 3026 * Make sure the chip is stopped before calling it! 3027 */ 3028 void 3029 bge_free_bufs(bge_t *bgep) 3030 { 3031 int split; 3032 3033 BGE_TRACE(("bge_free_bufs($%p)", 3034 (void *)bgep)); 3035 3036 bge_free_dma_mem(&bgep->tx_desc); 3037 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 3038 bge_free_dma_mem(&bgep->rx_desc[split]); 3039 for (split = 0; split < BGE_SPLIT; ++split) 3040 bge_free_dma_mem(&bgep->tx_buff[split]); 3041 for (split = 0; split < BGE_SPLIT; ++split) 3042 bge_free_dma_mem(&bgep->rx_buff[split]); 3043 } 3044 3045 /* 3046 * Determine (initial) MAC address ("BIA") to use for this interface 3047 */ 3048 3049 static void 3050 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 3051 { 3052 struct ether_addr sysaddr; 3053 char propbuf[8]; /* "true" or "false", plus NUL */ 3054 uchar_t *bytes; 3055 int *ints; 3056 uint_t nelts; 3057 int err; 3058 3059 BGE_TRACE(("bge_find_mac_address($%p)", 3060 (void *)bgep)); 3061 3062 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 3063 cidp->hw_mac_addr, 3064 ether_sprintf((void *)cidp->vendor_addr.addr), 3065 cidp->vendor_addr.set ? "" : "not ")); 3066 3067 /* 3068 * The "vendor's factory-set address" may already have 3069 * been extracted from the chip, but if the property 3070 * "local-mac-address" is set we use that instead. It 3071 * will normally be set by OBP, but it could also be 3072 * specified in a .conf file(!) 3073 * 3074 * There doesn't seem to be a way to define byte-array 3075 * properties in a .conf, so we check whether it looks 3076 * like an array of 6 ints instead. 3077 * 3078 * Then, we check whether it looks like an array of 6 3079 * bytes (which it should, if OBP set it). If we can't 3080 * make sense of it either way, we'll ignore it. 3081 */ 3082 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 3083 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 3084 if (err == DDI_PROP_SUCCESS) { 3085 if (nelts == ETHERADDRL) { 3086 while (nelts--) 3087 cidp->vendor_addr.addr[nelts] = ints[nelts]; 3088 cidp->vendor_addr.set = B_TRUE; 3089 } 3090 ddi_prop_free(ints); 3091 } 3092 3093 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 3094 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 3095 if (err == DDI_PROP_SUCCESS) { 3096 if (nelts == ETHERADDRL) { 3097 while (nelts--) 3098 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 3099 cidp->vendor_addr.set = B_TRUE; 3100 } 3101 ddi_prop_free(bytes); 3102 } 3103 3104 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 3105 ether_sprintf((void *)cidp->vendor_addr.addr), 3106 cidp->vendor_addr.set ? "" : "not ")); 3107 3108 /* 3109 * Look up the OBP property "local-mac-address?". Note that even 3110 * though its value is a string (which should be "true" or "false"), 3111 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 3112 * the buffer first and then fetch the property as an untyped array; 3113 * this may or may not include a final NUL, but since there will 3114 * always be one left at the end of the buffer we can now treat it 3115 * as a string anyway. 3116 */ 3117 nelts = sizeof (propbuf); 3118 bzero(propbuf, nelts--); 3119 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 3120 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 3121 3122 /* 3123 * Now, if the address still isn't set from the hardware (SEEPROM) 3124 * or the OBP or .conf property, OR if the user has foolishly set 3125 * 'local-mac-address? = false', use "the system address" instead 3126 * (but only if it's non-null i.e. has been set from the IDPROM). 3127 */ 3128 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 3129 if (localetheraddr(NULL, &sysaddr) != 0) { 3130 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 3131 cidp->vendor_addr.set = B_TRUE; 3132 } 3133 3134 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 3135 ether_sprintf((void *)cidp->vendor_addr.addr), 3136 cidp->vendor_addr.set ? "" : "not ")); 3137 3138 /* 3139 * Finally(!), if there's a valid "mac-address" property (created 3140 * if we netbooted from this interface), we must use this instead 3141 * of any of the above to ensure that the NFS/install server doesn't 3142 * get confused by the address changing as Solaris takes over! 3143 */ 3144 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 3145 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 3146 if (err == DDI_PROP_SUCCESS) { 3147 if (nelts == ETHERADDRL) { 3148 while (nelts--) 3149 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 3150 cidp->vendor_addr.set = B_TRUE; 3151 } 3152 ddi_prop_free(bytes); 3153 } 3154 3155 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 3156 ether_sprintf((void *)cidp->vendor_addr.addr), 3157 cidp->vendor_addr.set ? "" : "not ")); 3158 } 3159 3160 /*ARGSUSED*/ 3161 int 3162 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 3163 { 3164 ddi_fm_error_t de; 3165 3166 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 3167 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 3168 return (de.fme_status); 3169 } 3170 3171 /*ARGSUSED*/ 3172 int 3173 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 3174 { 3175 ddi_fm_error_t de; 3176 3177 ASSERT(bgep->progress & PROGRESS_BUFS); 3178 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 3179 return (de.fme_status); 3180 } 3181 3182 /* 3183 * The IO fault service error handling callback function 3184 */ 3185 /*ARGSUSED*/ 3186 static int 3187 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 3188 { 3189 /* 3190 * as the driver can always deal with an error in any dma or 3191 * access handle, we can just return the fme_status value. 3192 */ 3193 pci_ereport_post(dip, err, NULL); 3194 return (err->fme_status); 3195 } 3196 3197 static void 3198 bge_fm_init(bge_t *bgep) 3199 { 3200 ddi_iblock_cookie_t iblk; 3201 3202 /* Only register with IO Fault Services if we have some capability */ 3203 if (bgep->fm_capabilities) { 3204 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 3205 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 3206 3207 /* Register capabilities with IO Fault Services */ 3208 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 3209 3210 /* 3211 * Initialize pci ereport capabilities if ereport capable 3212 */ 3213 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 3214 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 3215 pci_ereport_setup(bgep->devinfo); 3216 3217 /* 3218 * Register error callback if error callback capable 3219 */ 3220 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 3221 ddi_fm_handler_register(bgep->devinfo, 3222 bge_fm_error_cb, (void*) bgep); 3223 } else { 3224 /* 3225 * These fields have to be cleared of FMA if there are no 3226 * FMA capabilities at runtime. 3227 */ 3228 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 3229 dma_attr.dma_attr_flags = 0; 3230 } 3231 } 3232 3233 static void 3234 bge_fm_fini(bge_t *bgep) 3235 { 3236 /* Only unregister FMA capabilities if we registered some */ 3237 if (bgep->fm_capabilities) { 3238 3239 /* 3240 * Release any resources allocated by pci_ereport_setup() 3241 */ 3242 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 3243 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 3244 pci_ereport_teardown(bgep->devinfo); 3245 3246 /* 3247 * Un-register error callback if error callback capable 3248 */ 3249 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 3250 ddi_fm_handler_unregister(bgep->devinfo); 3251 3252 /* Unregister from IO Fault Services */ 3253 ddi_fm_fini(bgep->devinfo); 3254 } 3255 } 3256 3257 static void 3258 #ifdef BGE_IPMI_ASF 3259 bge_unattach(bge_t *bgep, uint_t asf_mode) 3260 #else 3261 bge_unattach(bge_t *bgep) 3262 #endif 3263 { 3264 BGE_TRACE(("bge_unattach($%p)", 3265 (void *)bgep)); 3266 3267 /* 3268 * Flag that no more activity may be initiated 3269 */ 3270 bgep->progress &= ~PROGRESS_READY; 3271 3272 /* 3273 * Quiesce the PHY and MAC (leave it reset but still powered). 3274 * Clean up and free all BGE data structures 3275 */ 3276 if (bgep->periodic_id != NULL) { 3277 ddi_periodic_delete(bgep->periodic_id); 3278 bgep->periodic_id = NULL; 3279 } 3280 3281 if (bgep->progress & PROGRESS_KSTATS) 3282 bge_fini_kstats(bgep); 3283 if (bgep->progress & PROGRESS_PHY) 3284 bge_phys_reset(bgep); 3285 if (bgep->progress & PROGRESS_HWINT) { 3286 mutex_enter(bgep->genlock); 3287 #ifdef BGE_IPMI_ASF 3288 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 3289 #else 3290 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 3291 #endif 3292 ddi_fm_service_impact(bgep->devinfo, 3293 DDI_SERVICE_UNAFFECTED); 3294 #ifdef BGE_IPMI_ASF 3295 if (bgep->asf_enabled) { 3296 /* 3297 * This register has been overlaid. We restore its 3298 * initial value here. 3299 */ 3300 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 3301 BGE_NIC_DATA_SIG); 3302 } 3303 #endif 3304 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3305 ddi_fm_service_impact(bgep->devinfo, 3306 DDI_SERVICE_UNAFFECTED); 3307 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3308 ddi_fm_service_impact(bgep->devinfo, 3309 DDI_SERVICE_UNAFFECTED); 3310 mutex_exit(bgep->genlock); 3311 } 3312 if (bgep->progress & PROGRESS_INTR) { 3313 bge_intr_disable(bgep); 3314 bge_fini_rings(bgep); 3315 } 3316 if (bgep->progress & PROGRESS_HWINT) { 3317 bge_rem_intrs(bgep); 3318 rw_destroy(bgep->errlock); 3319 mutex_destroy(bgep->softintrlock); 3320 mutex_destroy(bgep->genlock); 3321 } 3322 if (bgep->progress & PROGRESS_FACTOTUM) 3323 ddi_remove_softintr(bgep->factotum_id); 3324 if (bgep->progress & PROGRESS_RESCHED) 3325 ddi_remove_softintr(bgep->drain_id); 3326 if (bgep->progress & PROGRESS_BUFS) 3327 bge_free_bufs(bgep); 3328 if (bgep->progress & PROGRESS_REGS) { 3329 ddi_regs_map_free(&bgep->io_handle); 3330 if (bgep->ape_enabled) 3331 ddi_regs_map_free(&bgep->ape_handle); 3332 } 3333 if (bgep->progress & PROGRESS_CFG) 3334 pci_config_teardown(&bgep->cfg_handle); 3335 3336 bge_fm_fini(bgep); 3337 3338 ddi_remove_minor_node(bgep->devinfo, NULL); 3339 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3340 kmem_free(bgep, sizeof (*bgep)); 3341 } 3342 3343 static int 3344 bge_resume(dev_info_t *devinfo) 3345 { 3346 bge_t *bgep; /* Our private data */ 3347 chip_id_t *cidp; 3348 chip_id_t chipid; 3349 3350 bgep = ddi_get_driver_private(devinfo); 3351 if (bgep == NULL) 3352 return (DDI_FAILURE); 3353 3354 /* 3355 * Refuse to resume if the data structures aren't consistent 3356 */ 3357 if (bgep->devinfo != devinfo) 3358 return (DDI_FAILURE); 3359 3360 #ifdef BGE_IPMI_ASF 3361 /* 3362 * Power management hasn't been supported in BGE now. If you 3363 * want to implement it, please add the ASF/IPMI related 3364 * code here. 3365 */ 3366 3367 #endif 3368 3369 /* 3370 * Read chip ID & set up config space command register(s) 3371 * Refuse to resume if the chip has changed its identity! 3372 */ 3373 cidp = &bgep->chipid; 3374 mutex_enter(bgep->genlock); 3375 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3376 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3377 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3378 mutex_exit(bgep->genlock); 3379 return (DDI_FAILURE); 3380 } 3381 mutex_exit(bgep->genlock); 3382 if (chipid.vendor != cidp->vendor) 3383 return (DDI_FAILURE); 3384 if (chipid.device != cidp->device) 3385 return (DDI_FAILURE); 3386 if (chipid.revision != cidp->revision) 3387 return (DDI_FAILURE); 3388 if (chipid.asic_rev != cidp->asic_rev) 3389 return (DDI_FAILURE); 3390 3391 /* 3392 * All OK, reinitialise h/w & kick off GLD scheduling 3393 */ 3394 mutex_enter(bgep->genlock); 3395 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3396 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3397 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3398 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3399 mutex_exit(bgep->genlock); 3400 return (DDI_FAILURE); 3401 } 3402 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3403 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3404 mutex_exit(bgep->genlock); 3405 return (DDI_FAILURE); 3406 } 3407 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3408 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3409 mutex_exit(bgep->genlock); 3410 return (DDI_FAILURE); 3411 } 3412 mutex_exit(bgep->genlock); 3413 return (DDI_SUCCESS); 3414 } 3415 3416 static int 3417 bge_fw_img_is_valid(bge_t *bgep, uint32_t offset) 3418 { 3419 uint32_t val; 3420 3421 if (bge_nvmem_read32(bgep, offset, &val) || 3422 (val & 0xfc000000) != 0x0c000000 || 3423 bge_nvmem_read32(bgep, offset + 4, &val) || 3424 val != 0) 3425 return (0); 3426 3427 return (1); 3428 } 3429 3430 static void 3431 bge_read_mgmtfw_ver(bge_t *bgep) 3432 { 3433 uint32_t val; 3434 uint32_t offset; 3435 uint32_t start; 3436 int i, vlen; 3437 3438 for (offset = NVM_DIR_START; 3439 offset < NVM_DIR_END; 3440 offset += NVM_DIRENT_SIZE) { 3441 if (bge_nvmem_read32(bgep, offset, &val)) 3442 return; 3443 3444 if ((val >> NVM_DIRTYPE_SHIFT) == NVM_DIRTYPE_ASFINI) 3445 break; 3446 } 3447 3448 if (offset == NVM_DIR_END) 3449 return; 3450 3451 if (bge_nvmem_read32(bgep, offset - 4, &start)) 3452 return; 3453 3454 if (bge_nvmem_read32(bgep, offset + 4, &offset) || 3455 !bge_fw_img_is_valid(bgep, offset) || 3456 bge_nvmem_read32(bgep, offset + 8, &val)) 3457 return; 3458 3459 offset += val - start; 3460 3461 vlen = strlen(bgep->fw_version); 3462 3463 bgep->fw_version[vlen++] = ','; 3464 bgep->fw_version[vlen++] = ' '; 3465 3466 for (i = 0; i < 4; i++) { 3467 uint32_t v; 3468 3469 if (bge_nvmem_read32(bgep, offset, &v)) 3470 return; 3471 3472 v = BE_32(v); 3473 3474 offset += sizeof(v); 3475 3476 if (vlen > BGE_FW_VER_SIZE - sizeof(v)) { 3477 memcpy(&bgep->fw_version[vlen], &v, BGE_FW_VER_SIZE - vlen); 3478 break; 3479 } 3480 3481 memcpy(&bgep->fw_version[vlen], &v, sizeof(v)); 3482 vlen += sizeof(v); 3483 } 3484 } 3485 3486 static void 3487 bge_read_dash_ver(bge_t *bgep) 3488 { 3489 int vlen; 3490 uint32_t apedata; 3491 char *fwtype; 3492 3493 if (!bgep->ape_enabled || !bgep->asf_enabled) 3494 return; 3495 3496 apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG); 3497 if (apedata != APE_SEG_SIG_MAGIC) 3498 return; 3499 3500 apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS); 3501 if (!(apedata & APE_FW_STATUS_READY)) 3502 return; 3503 3504 apedata = bge_ape_get32(bgep, BGE_APE_FW_VERSION); 3505 3506 if (bge_ape_get32(bgep, BGE_APE_FW_FEATURES) & 3507 BGE_APE_FW_FEATURE_NCSI) { 3508 bgep->ape_has_ncsi = B_TRUE; 3509 fwtype = "NCSI"; 3510 } else if ((bgep->chipid.device == DEVICE_ID_5725) || 3511 (bgep->chipid.device == DEVICE_ID_5727)) { 3512 fwtype = "SMASH"; 3513 } else { 3514 fwtype = "DASH"; 3515 } 3516 3517 vlen = strlen(bgep->fw_version); 3518 3519 snprintf(&bgep->fw_version[vlen], BGE_FW_VER_SIZE - vlen, 3520 " %s v%d.%d.%d.%d", fwtype, 3521 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 3522 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 3523 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 3524 (apedata & APE_FW_VERSION_BLDMSK)); 3525 } 3526 3527 static void 3528 bge_read_bc_ver(bge_t *bgep) 3529 { 3530 uint32_t val; 3531 uint32_t offset; 3532 uint32_t start; 3533 uint32_t ver_offset; 3534 int i, dst_off; 3535 uint32_t major; 3536 uint32_t minor; 3537 boolean_t newver = B_FALSE; 3538 3539 if (bge_nvmem_read32(bgep, 0xc, &offset) || 3540 bge_nvmem_read32(bgep, 0x4, &start)) 3541 return; 3542 3543 if (bge_nvmem_read32(bgep, offset, &val)) 3544 return; 3545 3546 if ((val & 0xfc000000) == 0x0c000000) { 3547 if (bge_nvmem_read32(bgep, offset + 4, &val)) 3548 return; 3549 3550 if (val == 0) 3551 newver = B_TRUE; 3552 } 3553 3554 dst_off = strlen(bgep->fw_version); 3555 3556 if (newver) { 3557 if (((BGE_FW_VER_SIZE - dst_off) < 16) || 3558 bge_nvmem_read32(bgep, offset + 8, &ver_offset)) 3559 return; 3560 3561 offset = offset + ver_offset - start; 3562 for (i = 0; i < 16; i += 4) { 3563 if (bge_nvmem_read32(bgep, offset + i, &val)) 3564 return; 3565 val = BE_32(val); 3566 memcpy(bgep->fw_version + dst_off + i, &val, 3567 sizeof(val)); 3568 } 3569 } else { 3570 if (bge_nvmem_read32(bgep, NVM_PTREV_BCVER, &ver_offset)) 3571 return; 3572 3573 major = (ver_offset & NVM_BCVER_MAJMSK) >> NVM_BCVER_MAJSFT; 3574 minor = ver_offset & NVM_BCVER_MINMSK; 3575 snprintf(&bgep->fw_version[dst_off], BGE_FW_VER_SIZE - dst_off, 3576 "v%d.%02d", major, minor); 3577 } 3578 } 3579 3580 static void 3581 bge_read_fw_ver(bge_t *bgep) 3582 { 3583 uint32_t val; 3584 uint32_t magic; 3585 3586 *bgep->fw_version = 0; 3587 3588 if ((bgep->chipid.nvtype == BGE_NVTYPE_NONE) || 3589 (bgep->chipid.nvtype == BGE_NVTYPE_UNKNOWN)) { 3590 snprintf(bgep->fw_version, sizeof(bgep->fw_version), "sb"); 3591 return; 3592 } 3593 3594 mutex_enter(bgep->genlock); 3595 3596 bge_nvmem_read32(bgep, 0, &magic); 3597 3598 if (magic == EEPROM_MAGIC) { 3599 bge_read_bc_ver(bgep); 3600 } else { 3601 /* ignore other configs for now */ 3602 mutex_exit(bgep->genlock); 3603 return; 3604 } 3605 3606 if (bgep->ape_enabled) { 3607 if (bgep->asf_enabled) { 3608 bge_read_dash_ver(bgep); 3609 } 3610 } else if (bgep->asf_enabled) { 3611 bge_read_mgmtfw_ver(bgep); 3612 } 3613 3614 mutex_exit(bgep->genlock); 3615 3616 bgep->fw_version[BGE_FW_VER_SIZE - 1] = 0; /* safety */ 3617 } 3618 3619 /* 3620 * attach(9E) -- Attach a device to the system 3621 * 3622 * Called once for each board successfully probed. 3623 */ 3624 static int 3625 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3626 { 3627 bge_t *bgep; /* Our private data */ 3628 mac_register_t *macp; 3629 chip_id_t *cidp; 3630 caddr_t regs; 3631 int instance; 3632 int err; 3633 int intr_types; 3634 int *props = NULL; 3635 uint_t numProps; 3636 uint32_t regval; 3637 uint32_t pci_state_reg; 3638 #ifdef BGE_IPMI_ASF 3639 uint32_t mhcrValue; 3640 #ifdef __sparc 3641 uint16_t value16; 3642 #endif 3643 #ifdef BGE_NETCONSOLE 3644 int retval; 3645 #endif 3646 #endif 3647 3648 instance = ddi_get_instance(devinfo); 3649 3650 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3651 (void *)devinfo, cmd, instance)); 3652 BGE_BRKPT(NULL, "bge_attach"); 3653 3654 switch (cmd) { 3655 default: 3656 return (DDI_FAILURE); 3657 3658 case DDI_RESUME: 3659 return (bge_resume(devinfo)); 3660 3661 case DDI_ATTACH: 3662 break; 3663 } 3664 3665 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3666 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3667 ddi_set_driver_private(devinfo, bgep); 3668 bgep->bge_guard = BGE_GUARD; 3669 bgep->devinfo = devinfo; 3670 bgep->param_drain_max = 64; 3671 bgep->param_msi_cnt = 0; 3672 bgep->param_loop_mode = 0; 3673 3674 /* 3675 * Initialize more fields in BGE private data 3676 */ 3677 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3678 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3679 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3680 BGE_DRIVER_NAME, instance); 3681 3682 /* 3683 * Initialize for fma support 3684 */ 3685 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3686 DDI_PROP_DONTPASS, fm_cap, 3687 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3688 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3689 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3690 bge_fm_init(bgep); 3691 3692 /* 3693 * Look up the IOMMU's page size for DVMA mappings (must be 3694 * a power of 2) and convert to a mask. This can be used to 3695 * determine whether a message buffer crosses a page boundary. 3696 * Note: in 2s complement binary notation, if X is a power of 3697 * 2, then -X has the representation "11...1100...00". 3698 */ 3699 bgep->pagemask = dvma_pagesize(devinfo); 3700 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3701 bgep->pagemask = -bgep->pagemask; 3702 3703 /* 3704 * Map config space registers 3705 * Read chip ID & set up config space command register(s) 3706 * 3707 * Note: this leaves the chip accessible by Memory Space 3708 * accesses, but with interrupts and Bus Mastering off. 3709 * This should ensure that nothing untoward will happen 3710 * if it has been left active by the (net-)bootloader. 3711 * We'll re-enable Bus Mastering once we've reset the chip, 3712 * and allow interrupts only when everything else is set up. 3713 */ 3714 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3715 3716 bgep->ape_enabled = B_FALSE; 3717 bgep->ape_regs = NULL; 3718 3719 if (DEVICE_5717_SERIES_CHIPSETS(bgep) || 3720 DEVICE_5725_SERIES_CHIPSETS(bgep)) { 3721 err = ddi_regs_map_setup(devinfo, BGE_PCI_APEREGS_RNUMBER, 3722 ®s, 0, 0, &bge_reg_accattr, &bgep->ape_handle); 3723 if (err != DDI_SUCCESS) { 3724 ddi_regs_map_free(&bgep->io_handle); 3725 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3726 goto attach_fail; 3727 } 3728 bgep->ape_regs = regs; 3729 bgep->ape_enabled = B_TRUE; 3730 3731 /* 3732 * Allow reads and writes to the 3733 * APE register and memory space. 3734 */ 3735 3736 pci_state_reg = pci_config_get32(bgep->cfg_handle, 3737 PCI_CONF_BGE_PCISTATE); 3738 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 3739 PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; 3740 pci_config_put32(bgep->cfg_handle, 3741 PCI_CONF_BGE_PCISTATE, pci_state_reg); 3742 bge_ape_lock_init(bgep); 3743 } 3744 3745 #ifdef BGE_IPMI_ASF 3746 #ifdef __sparc 3747 /* 3748 * We need to determine the type of chipset for accessing some configure 3749 * registers. (This information will be used by bge_ind_put32, 3750 * bge_ind_get32 and bge_nic_read32) 3751 */ 3752 bgep->chipid.device = pci_config_get16(bgep->cfg_handle, 3753 PCI_CONF_DEVID); 3754 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3755 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3756 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3757 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3758 MHCR_ENABLE_TAGGED_STATUS_MODE | 3759 MHCR_MASK_INTERRUPT_MODE | 3760 MHCR_MASK_PCI_INT_OUTPUT | 3761 MHCR_CLEAR_INTERRUPT_INTA | 3762 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3763 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3764 /* 3765 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP 3766 * has been set in PCI_CONF_COMM already, we need to write the 3767 * byte-swapped value to it. So we just write zero first for simplicity. 3768 */ 3769 if (DEVICE_5717_SERIES_CHIPSETS(bgep) || 3770 DEVICE_5725_SERIES_CHIPSETS(bgep)) 3771 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0); 3772 #else 3773 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3774 MHCR_ENABLE_TAGGED_STATUS_MODE | 3775 MHCR_MASK_INTERRUPT_MODE | 3776 MHCR_MASK_PCI_INT_OUTPUT | 3777 MHCR_CLEAR_INTERRUPT_INTA; 3778 #endif 3779 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3780 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3781 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3782 MEMORY_ARBITER_ENABLE); 3783 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3784 bgep->asf_wordswapped = B_TRUE; 3785 } else { 3786 bgep->asf_wordswapped = B_FALSE; 3787 } 3788 bge_asf_get_config(bgep); 3789 #endif 3790 if (err != DDI_SUCCESS) { 3791 bge_problem(bgep, "pci_config_setup() failed"); 3792 goto attach_fail; 3793 } 3794 bgep->progress |= PROGRESS_CFG; 3795 cidp = &bgep->chipid; 3796 bzero(cidp, sizeof(*cidp)); 3797 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3798 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3799 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3800 goto attach_fail; 3801 } 3802 3803 #ifdef BGE_IPMI_ASF 3804 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3805 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3806 bgep->asf_newhandshake = B_TRUE; 3807 } else { 3808 bgep->asf_newhandshake = B_FALSE; 3809 } 3810 #endif 3811 3812 /* 3813 * Update those parts of the chip ID derived from volatile 3814 * registers with the values seen by OBP (in case the chip 3815 * has been reset externally and therefore lost them). 3816 */ 3817 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3818 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3819 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3820 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3821 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3822 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3823 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3824 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3825 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3826 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3827 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3828 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3829 cidp->eee = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3830 DDI_PROP_DONTPASS, eee_propname, cidp->eee); 3831 3832 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3833 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3834 if ((cidp->default_mtu < BGE_DEFAULT_MTU) || 3835 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3836 cidp->default_mtu = BGE_DEFAULT_MTU; 3837 } 3838 3839 /* 3840 * Map operating registers 3841 */ 3842 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3843 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3844 if (err != DDI_SUCCESS) { 3845 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3846 goto attach_fail; 3847 } 3848 bgep->io_regs = regs; 3849 3850 bgep->progress |= PROGRESS_REGS; 3851 3852 /* 3853 * Characterise the device, so we know its requirements. 3854 * Then allocate the appropriate TX and RX descriptors & buffers. 3855 */ 3856 if (bge_chip_id_init(bgep) == EIO) { 3857 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3858 goto attach_fail; 3859 } 3860 3861 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 3862 0, "reg", &props, &numProps); 3863 if ((err == DDI_PROP_SUCCESS) && (numProps > 0)) { 3864 bgep->pci_bus = PCI_REG_BUS_G(props[0]); 3865 bgep->pci_dev = PCI_REG_DEV_G(props[0]); 3866 bgep->pci_func = PCI_REG_FUNC_G(props[0]); 3867 ddi_prop_free(props); 3868 } 3869 3870 if (DEVICE_5717_SERIES_CHIPSETS(bgep) || 3871 DEVICE_5725_SERIES_CHIPSETS(bgep)) { 3872 regval = bge_reg_get32(bgep, CPMU_STATUS_REG); 3873 if ((bgep->chipid.device == DEVICE_ID_5719) || 3874 (bgep->chipid.device == DEVICE_ID_5720)) { 3875 bgep->pci_func = 3876 ((regval & CPMU_STATUS_FUNC_NUM_5719) >> 3877 CPMU_STATUS_FUNC_NUM_5719_SHIFT); 3878 } else { 3879 bgep->pci_func = ((regval & CPMU_STATUS_FUNC_NUM) >> 3880 CPMU_STATUS_FUNC_NUM_SHIFT); 3881 } 3882 } 3883 3884 err = bge_alloc_bufs(bgep); 3885 if (err != DDI_SUCCESS) { 3886 bge_problem(bgep, "DMA buffer allocation failed"); 3887 goto attach_fail; 3888 } 3889 bgep->progress |= PROGRESS_BUFS; 3890 3891 /* 3892 * Add the softint handlers: 3893 * 3894 * Both of these handlers are used to avoid restrictions on the 3895 * context and/or mutexes required for some operations. In 3896 * particular, the hardware interrupt handler and its subfunctions 3897 * can detect a number of conditions that we don't want to handle 3898 * in that context or with that set of mutexes held. So, these 3899 * softints are triggered instead: 3900 * 3901 * the <resched> softint is triggered if we have previously 3902 * had to refuse to send a packet because of resource shortage 3903 * (we've run out of transmit buffers), but the send completion 3904 * interrupt handler has now detected that more buffers have 3905 * become available. 3906 * 3907 * the <factotum> is triggered if the h/w interrupt handler 3908 * sees the <link state changed> or <error> bits in the status 3909 * block. It's also triggered periodically to poll the link 3910 * state, just in case we aren't getting link status change 3911 * interrupts ... 3912 */ 3913 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3914 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3915 if (err != DDI_SUCCESS) { 3916 bge_problem(bgep, "ddi_add_softintr() failed"); 3917 goto attach_fail; 3918 } 3919 bgep->progress |= PROGRESS_RESCHED; 3920 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3921 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3922 if (err != DDI_SUCCESS) { 3923 bge_problem(bgep, "ddi_add_softintr() failed"); 3924 goto attach_fail; 3925 } 3926 bgep->progress |= PROGRESS_FACTOTUM; 3927 3928 /* Get supported interrupt types */ 3929 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3930 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3931 3932 goto attach_fail; 3933 } 3934 3935 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3936 bgep->ifname, intr_types)); 3937 3938 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3939 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3940 bge_error(bgep, "MSI registration failed, " 3941 "trying FIXED interrupt type\n"); 3942 } else { 3943 BGE_DEBUG(("%s: Using MSI interrupt type", 3944 bgep->ifname)); 3945 bgep->intr_type = DDI_INTR_TYPE_MSI; 3946 bgep->progress |= PROGRESS_HWINT; 3947 } 3948 } 3949 3950 if (!(bgep->progress & PROGRESS_HWINT) && 3951 (intr_types & DDI_INTR_TYPE_FIXED)) { 3952 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3953 bge_error(bgep, "FIXED interrupt " 3954 "registration failed\n"); 3955 goto attach_fail; 3956 } 3957 3958 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3959 3960 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3961 bgep->progress |= PROGRESS_HWINT; 3962 } 3963 3964 if (!(bgep->progress & PROGRESS_HWINT)) { 3965 bge_error(bgep, "No interrupts registered\n"); 3966 goto attach_fail; 3967 } 3968 3969 /* 3970 * Note that interrupts are not enabled yet as 3971 * mutex locks are not initialized. Initialize mutex locks. 3972 */ 3973 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3974 DDI_INTR_PRI(bgep->intr_pri)); 3975 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3976 DDI_INTR_PRI(bgep->intr_pri)); 3977 rw_init(bgep->errlock, NULL, RW_DRIVER, 3978 DDI_INTR_PRI(bgep->intr_pri)); 3979 3980 /* 3981 * Initialize rings. 3982 */ 3983 bge_init_rings(bgep); 3984 3985 /* 3986 * Now that mutex locks are initialized, enable interrupts. 3987 */ 3988 bge_intr_enable(bgep); 3989 bgep->progress |= PROGRESS_INTR; 3990 3991 /* 3992 * Initialise link state variables 3993 * Stop, reset & reinitialise the chip. 3994 * Initialise the (internal) PHY. 3995 */ 3996 bgep->link_state = LINK_STATE_UNKNOWN; 3997 3998 mutex_enter(bgep->genlock); 3999 4000 /* 4001 * Reset chip & rings to initial state; also reset address 4002 * filtering, promiscuity, loopback mode. 4003 */ 4004 #ifdef BGE_IPMI_ASF 4005 #ifdef BGE_NETCONSOLE 4006 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 4007 #else 4008 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 4009 #endif 4010 #else 4011 if (bge_reset(bgep) != DDI_SUCCESS) { 4012 #endif 4013 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 4014 (void) bge_check_acc_handle(bgep, bgep->io_handle); 4015 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 4016 mutex_exit(bgep->genlock); 4017 goto attach_fail; 4018 } 4019 4020 #ifdef BGE_IPMI_ASF 4021 if (bgep->asf_enabled) { 4022 bgep->asf_status = ASF_STAT_RUN_INIT; 4023 } 4024 #endif 4025 4026 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 4027 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 4028 bgep->promisc = B_FALSE; 4029 bgep->param_loop_mode = BGE_LOOP_NONE; 4030 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 4031 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 4032 mutex_exit(bgep->genlock); 4033 goto attach_fail; 4034 } 4035 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 4036 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 4037 mutex_exit(bgep->genlock); 4038 goto attach_fail; 4039 } 4040 4041 mutex_exit(bgep->genlock); 4042 4043 if (bge_phys_init(bgep) == EIO) { 4044 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 4045 goto attach_fail; 4046 } 4047 bgep->progress |= PROGRESS_PHY; 4048 4049 /* 4050 * initialize NDD-tweakable parameters 4051 */ 4052 if (bge_nd_init(bgep)) { 4053 bge_problem(bgep, "bge_nd_init() failed"); 4054 goto attach_fail; 4055 } 4056 bgep->progress |= PROGRESS_NDD; 4057 4058 /* 4059 * Create & initialise named kstats 4060 */ 4061 bge_init_kstats(bgep, instance); 4062 bgep->progress |= PROGRESS_KSTATS; 4063 4064 /* 4065 * Determine whether to override the chip's own MAC address 4066 */ 4067 bge_find_mac_address(bgep, cidp); 4068 { 4069 int slot; 4070 for (slot = 0; slot < MAC_ADDRESS_REGS_MAX; slot++) { 4071 ethaddr_copy(cidp->vendor_addr.addr, 4072 bgep->curr_addr[slot].addr); 4073 bgep->curr_addr[slot].set = 1; 4074 } 4075 } 4076 4077 bge_read_fw_ver(bgep); 4078 4079 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 4080 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 4081 4082 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4083 goto attach_fail; 4084 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4085 macp->m_driver = bgep; 4086 macp->m_dip = devinfo; 4087 macp->m_src_addr = cidp->vendor_addr.addr; 4088 macp->m_callbacks = &bge_m_callbacks; 4089 macp->m_min_sdu = 0; 4090 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 4091 macp->m_margin = VLAN_TAGSZ; 4092 macp->m_priv_props = bge_priv_prop; 4093 4094 #if defined(ILLUMOS) 4095 bge_m_unicst(bgep, cidp->vendor_addr.addr); 4096 #endif 4097 4098 /* 4099 * Finally, we're ready to register ourselves with the MAC layer 4100 * interface; if this succeeds, we're all ready to start() 4101 */ 4102 err = mac_register(macp, &bgep->mh); 4103 mac_free(macp); 4104 if (err != 0) 4105 goto attach_fail; 4106 4107 mac_link_update(bgep->mh, LINK_STATE_UNKNOWN); 4108 4109 /* 4110 * Register a periodical handler. 4111 * bge_chip_cyclic() is invoked in kernel context. 4112 */ 4113 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 4114 BGE_CYCLIC_PERIOD, DDI_IPL_0); 4115 4116 bgep->progress |= PROGRESS_READY; 4117 ASSERT(bgep->bge_guard == BGE_GUARD); 4118 #ifdef BGE_IPMI_ASF 4119 #ifdef BGE_NETCONSOLE 4120 if (bgep->asf_enabled) { 4121 mutex_enter(bgep->genlock); 4122 retval = bge_chip_start(bgep, B_TRUE); 4123 mutex_exit(bgep->genlock); 4124 if (retval != DDI_SUCCESS) 4125 goto attach_fail; 4126 } 4127 #endif 4128 #endif 4129 4130 ddi_report_dev(devinfo); 4131 4132 return (DDI_SUCCESS); 4133 4134 attach_fail: 4135 #ifdef BGE_IPMI_ASF 4136 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 4137 #else 4138 bge_unattach(bgep); 4139 #endif 4140 return (DDI_FAILURE); 4141 } 4142 4143 /* 4144 * bge_suspend() -- suspend transmit/receive for powerdown 4145 */ 4146 static int 4147 bge_suspend(bge_t *bgep) 4148 { 4149 /* 4150 * Stop processing and idle (powerdown) the PHY ... 4151 */ 4152 mutex_enter(bgep->genlock); 4153 #ifdef BGE_IPMI_ASF 4154 /* 4155 * Power management hasn't been supported in BGE now. If you 4156 * want to implement it, please add the ASF/IPMI related 4157 * code here. 4158 */ 4159 #endif 4160 bge_stop(bgep); 4161 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 4162 (void) bge_check_acc_handle(bgep, bgep->io_handle); 4163 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 4164 mutex_exit(bgep->genlock); 4165 return (DDI_FAILURE); 4166 } 4167 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 4168 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 4169 mutex_exit(bgep->genlock); 4170 return (DDI_FAILURE); 4171 } 4172 mutex_exit(bgep->genlock); 4173 4174 return (DDI_SUCCESS); 4175 } 4176 4177 /* 4178 * quiesce(9E) entry point. 4179 * 4180 * This function is called when the system is single-threaded at high 4181 * PIL with preemption disabled. Therefore, this function must not be 4182 * blocked. 4183 * 4184 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 4185 * DDI_FAILURE indicates an error condition and should almost never happen. 4186 */ 4187 #ifdef __sparc 4188 #define bge_quiesce ddi_quiesce_not_supported 4189 #else 4190 static int 4191 bge_quiesce(dev_info_t *devinfo) 4192 { 4193 bge_t *bgep = ddi_get_driver_private(devinfo); 4194 4195 if (bgep == NULL) 4196 return (DDI_FAILURE); 4197 4198 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 4199 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 4200 MHCR_MASK_PCI_INT_OUTPUT); 4201 } else { 4202 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 4203 } 4204 4205 /* Stop the chip */ 4206 bge_chip_stop_nonblocking(bgep); 4207 4208 return (DDI_SUCCESS); 4209 } 4210 #endif 4211 4212 /* 4213 * detach(9E) -- Detach a device from the system 4214 */ 4215 static int 4216 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 4217 { 4218 bge_t *bgep; 4219 #ifdef BGE_IPMI_ASF 4220 uint_t asf_mode; 4221 asf_mode = ASF_MODE_NONE; 4222 #endif 4223 4224 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 4225 4226 bgep = ddi_get_driver_private(devinfo); 4227 4228 switch (cmd) { 4229 default: 4230 return (DDI_FAILURE); 4231 4232 case DDI_SUSPEND: 4233 return (bge_suspend(bgep)); 4234 4235 case DDI_DETACH: 4236 break; 4237 } 4238 4239 #ifdef BGE_IPMI_ASF 4240 mutex_enter(bgep->genlock); 4241 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 4242 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 4243 4244 bge_asf_update_status(bgep); 4245 if (bgep->asf_status == ASF_STAT_RUN) { 4246 bge_asf_stop_timer(bgep); 4247 } 4248 bgep->asf_status = ASF_STAT_STOP; 4249 4250 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 4251 4252 if (bgep->asf_pseudostop) { 4253 bge_chip_stop(bgep, B_FALSE); 4254 bgep->bge_mac_state = BGE_MAC_STOPPED; 4255 bgep->asf_pseudostop = B_FALSE; 4256 } 4257 4258 asf_mode = ASF_MODE_POST_SHUTDOWN; 4259 4260 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 4261 ddi_fm_service_impact(bgep->devinfo, 4262 DDI_SERVICE_UNAFFECTED); 4263 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 4264 ddi_fm_service_impact(bgep->devinfo, 4265 DDI_SERVICE_UNAFFECTED); 4266 } 4267 mutex_exit(bgep->genlock); 4268 #endif 4269 4270 /* 4271 * Unregister from the GLD subsystem. This can fail, in 4272 * particular if there are DLPI style-2 streams still open - 4273 * in which case we just return failure without shutting 4274 * down chip operations. 4275 */ 4276 if (mac_unregister(bgep->mh) != 0) 4277 return (DDI_FAILURE); 4278 4279 /* 4280 * All activity stopped, so we can clean up & exit 4281 */ 4282 #ifdef BGE_IPMI_ASF 4283 bge_unattach(bgep, asf_mode); 4284 #else 4285 bge_unattach(bgep); 4286 #endif 4287 return (DDI_SUCCESS); 4288 } 4289 4290 4291 /* 4292 * ========== Module Loading Data & Entry Points ========== 4293 */ 4294 4295 #undef BGE_DBG 4296 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 4297 4298 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 4299 nulldev, /* identify */ 4300 nulldev, /* probe */ 4301 bge_attach, /* attach */ 4302 bge_detach, /* detach */ 4303 nodev, /* reset */ 4304 NULL, /* cb_ops */ 4305 D_MP, /* bus_ops */ 4306 NULL, /* power */ 4307 bge_quiesce /* quiesce */ 4308 ); 4309 4310 static struct modldrv bge_modldrv = { 4311 &mod_driverops, /* Type of module. This one is a driver */ 4312 bge_ident, /* short description */ 4313 &bge_dev_ops /* driver specific ops */ 4314 }; 4315 4316 static struct modlinkage modlinkage = { 4317 MODREV_1, (void *)&bge_modldrv, NULL 4318 }; 4319 4320 4321 int 4322 _info(struct modinfo *modinfop) 4323 { 4324 return (mod_info(&modlinkage, modinfop)); 4325 } 4326 4327 int 4328 _init(void) 4329 { 4330 int status; 4331 4332 mac_init_ops(&bge_dev_ops, "bge"); 4333 status = mod_install(&modlinkage); 4334 if (status == DDI_SUCCESS) 4335 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 4336 else 4337 mac_fini_ops(&bge_dev_ops); 4338 return (status); 4339 } 4340 4341 int 4342 _fini(void) 4343 { 4344 int status; 4345 4346 status = mod_remove(&modlinkage); 4347 if (status == DDI_SUCCESS) { 4348 mac_fini_ops(&bge_dev_ops); 4349 mutex_destroy(bge_log_mutex); 4350 } 4351 return (status); 4352 } 4353 4354 4355 /* 4356 * bge_add_intrs: 4357 * 4358 * Register FIXED or MSI interrupts. 4359 */ 4360 static int 4361 bge_add_intrs(bge_t *bgep, int intr_type) 4362 { 4363 dev_info_t *dip = bgep->devinfo; 4364 int avail, actual, intr_size, count = 0; 4365 int i, flag, ret; 4366 4367 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 4368 4369 /* Get number of interrupts */ 4370 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 4371 if ((ret != DDI_SUCCESS) || (count == 0)) { 4372 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 4373 "count: %d", ret, count); 4374 4375 return (DDI_FAILURE); 4376 } 4377 4378 /* Get number of available interrupts */ 4379 ret = ddi_intr_get_navail(dip, intr_type, &avail); 4380 if ((ret != DDI_SUCCESS) || (avail == 0)) { 4381 bge_error(bgep, "ddi_intr_get_navail() failure, " 4382 "ret: %d, avail: %d\n", ret, avail); 4383 4384 return (DDI_FAILURE); 4385 } 4386 4387 if (avail < count) { 4388 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 4389 bgep->ifname, count, avail)); 4390 } 4391 4392 /* 4393 * BGE hardware generates only single MSI even though it claims 4394 * to support multiple MSIs. So, hard code MSI count value to 1. 4395 */ 4396 if (intr_type == DDI_INTR_TYPE_MSI) { 4397 count = 1; 4398 flag = DDI_INTR_ALLOC_STRICT; 4399 } else { 4400 flag = DDI_INTR_ALLOC_NORMAL; 4401 } 4402 4403 /* Allocate an array of interrupt handles */ 4404 intr_size = count * sizeof (ddi_intr_handle_t); 4405 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 4406 4407 /* Call ddi_intr_alloc() */ 4408 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 4409 count, &actual, flag); 4410 4411 if ((ret != DDI_SUCCESS) || (actual == 0)) { 4412 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 4413 4414 kmem_free(bgep->htable, intr_size); 4415 return (DDI_FAILURE); 4416 } 4417 4418 if (actual < count) { 4419 BGE_DEBUG(("%s: Requested: %d, Received: %d", 4420 bgep->ifname, count, actual)); 4421 } 4422 4423 bgep->intr_cnt = actual; 4424 4425 /* 4426 * Get priority for first msi, assume remaining are all the same 4427 */ 4428 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 4429 DDI_SUCCESS) { 4430 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 4431 4432 /* Free already allocated intr */ 4433 for (i = 0; i < actual; i++) { 4434 (void) ddi_intr_free(bgep->htable[i]); 4435 } 4436 4437 kmem_free(bgep->htable, intr_size); 4438 return (DDI_FAILURE); 4439 } 4440 4441 /* Call ddi_intr_add_handler() */ 4442 for (i = 0; i < actual; i++) { 4443 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 4444 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 4445 bge_error(bgep, "ddi_intr_add_handler() " 4446 "failed %d\n", ret); 4447 4448 /* Free already allocated intr */ 4449 for (i = 0; i < actual; i++) { 4450 (void) ddi_intr_free(bgep->htable[i]); 4451 } 4452 4453 kmem_free(bgep->htable, intr_size); 4454 return (DDI_FAILURE); 4455 } 4456 } 4457 4458 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 4459 != DDI_SUCCESS) { 4460 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 4461 4462 for (i = 0; i < actual; i++) { 4463 (void) ddi_intr_remove_handler(bgep->htable[i]); 4464 (void) ddi_intr_free(bgep->htable[i]); 4465 } 4466 4467 kmem_free(bgep->htable, intr_size); 4468 return (DDI_FAILURE); 4469 } 4470 4471 return (DDI_SUCCESS); 4472 } 4473 4474 /* 4475 * bge_rem_intrs: 4476 * 4477 * Unregister FIXED or MSI interrupts 4478 */ 4479 static void 4480 bge_rem_intrs(bge_t *bgep) 4481 { 4482 int i; 4483 4484 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 4485 4486 /* Call ddi_intr_remove_handler() */ 4487 for (i = 0; i < bgep->intr_cnt; i++) { 4488 (void) ddi_intr_remove_handler(bgep->htable[i]); 4489 (void) ddi_intr_free(bgep->htable[i]); 4490 } 4491 4492 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 4493 } 4494 4495 4496 void 4497 bge_intr_enable(bge_t *bgep) 4498 { 4499 int i; 4500 4501 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 4502 /* Call ddi_intr_block_enable() for MSI interrupts */ 4503 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 4504 } else { 4505 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 4506 for (i = 0; i < bgep->intr_cnt; i++) { 4507 (void) ddi_intr_enable(bgep->htable[i]); 4508 } 4509 } 4510 } 4511 4512 4513 void 4514 bge_intr_disable(bge_t *bgep) 4515 { 4516 int i; 4517 4518 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 4519 /* Call ddi_intr_block_disable() */ 4520 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 4521 } else { 4522 for (i = 0; i < bgep->intr_cnt; i++) { 4523 (void) ddi_intr_disable(bgep->htable[i]); 4524 } 4525 } 4526 } 4527 4528 int 4529 bge_reprogram(bge_t *bgep) 4530 { 4531 int status = 0; 4532 4533 ASSERT(mutex_owned(bgep->genlock)); 4534 4535 if (bge_phys_update(bgep) != DDI_SUCCESS) { 4536 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 4537 status = IOC_INVAL; 4538 } 4539 #ifdef BGE_IPMI_ASF 4540 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 4541 #else 4542 if (bge_chip_sync(bgep) == DDI_FAILURE) { 4543 #endif 4544 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 4545 status = IOC_INVAL; 4546 } 4547 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 4548 bge_chip_msi_trig(bgep); 4549 return (status); 4550 } 4551