1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010-2013, by Broadcom, Inc. 24 * All Rights Reserved. 25 */ 26 27 /* 28 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. 29 * All rights reserved. 30 * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 31 */ 32 33 #include "bge_impl.h" 34 #include <sys/sdt.h> 35 #include <sys/mac_provider.h> 36 #include <sys/mac.h> 37 #include <sys/mac_flow.h> 38 39 40 #ifndef STRINGIFY 41 #define XSTRINGIFY(x) #x 42 #define STRINGIFY(x) XSTRINGIFY(x) 43 #endif 44 45 /* 46 * This is the string displayed by modinfo, etc. 47 */ 48 static char bge_ident[] = "Broadcom Gb Ethernet"; 49 50 /* 51 * Property names 52 */ 53 static char debug_propname[] = "bge-debug-flags"; 54 static char clsize_propname[] = "cache-line-size"; 55 static char latency_propname[] = "latency-timer"; 56 static char localmac_boolname[] = "local-mac-address?"; 57 static char localmac_propname[] = "local-mac-address"; 58 static char macaddr_propname[] = "mac-address"; 59 static char subdev_propname[] = "subsystem-id"; 60 static char subven_propname[] = "subsystem-vendor-id"; 61 static char rxrings_propname[] = "bge-rx-rings"; 62 static char txrings_propname[] = "bge-tx-rings"; 63 static char eee_propname[] = "bge-eee"; 64 static char fm_cap[] = "fm-capable"; 65 static char default_mtu[] = "default_mtu"; 66 67 static int bge_add_intrs(bge_t *, int); 68 static void bge_rem_intrs(bge_t *); 69 static int bge_unicst_set(void *, const uint8_t *, int); 70 static int bge_addmac(void *, const uint8_t *); 71 static int bge_remmac(void *, const uint8_t *); 72 73 /* 74 * Describes the chip's DMA engine 75 */ 76 static ddi_dma_attr_t dma_attr = { 77 DMA_ATTR_V0, /* dma_attr_version */ 78 0x0000000000000000ull, /* dma_attr_addr_lo */ 79 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 80 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 81 0x0000000000000001ull, /* dma_attr_align */ 82 0x00000FFF, /* dma_attr_burstsizes */ 83 0x00000001, /* dma_attr_minxfer */ 84 0x000000000000FFFFull, /* dma_attr_maxxfer */ 85 0x00000000FFFFFFFFull, /* dma_attr_seg */ 86 1, /* dma_attr_sgllen */ 87 0x00000001, /* dma_attr_granular */ 88 DDI_DMA_FLAGERR /* dma_attr_flags */ 89 }; 90 91 /* 92 * PIO access attributes for registers 93 */ 94 static ddi_device_acc_attr_t bge_reg_accattr = { 95 DDI_DEVICE_ATTR_V1, 96 DDI_NEVERSWAP_ACC, 97 DDI_STRICTORDER_ACC, 98 DDI_FLAGERR_ACC 99 }; 100 101 /* 102 * DMA access attributes for descriptors: NOT to be byte swapped. 103 */ 104 static ddi_device_acc_attr_t bge_desc_accattr = { 105 DDI_DEVICE_ATTR_V0, 106 DDI_NEVERSWAP_ACC, 107 DDI_STRICTORDER_ACC 108 }; 109 110 /* 111 * DMA access attributes for data: NOT to be byte swapped. 112 */ 113 static ddi_device_acc_attr_t bge_data_accattr = { 114 DDI_DEVICE_ATTR_V0, 115 DDI_NEVERSWAP_ACC, 116 DDI_STRICTORDER_ACC 117 }; 118 119 static int bge_m_start(void *); 120 static void bge_m_stop(void *); 121 static int bge_m_promisc(void *, boolean_t); 122 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 123 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 124 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 125 static int bge_unicst_set(void *, const uint8_t *, 126 int); 127 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 128 uint_t, const void *); 129 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 130 uint_t, void *); 131 static void bge_m_propinfo(void *, const char *, mac_prop_id_t, 132 mac_prop_info_handle_t); 133 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 134 const void *); 135 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 136 void *); 137 static void bge_priv_propinfo(const char *, 138 mac_prop_info_handle_t); 139 140 static mac_callbacks_t bge_m_callbacks = { 141 .mc_callbacks = MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO | 142 MC_GETCAPAB, 143 .mc_getstat = bge_m_stat, 144 .mc_start = bge_m_start, 145 .mc_stop = bge_m_stop, 146 .mc_setpromisc = bge_m_promisc, 147 .mc_multicst = bge_m_multicst, 148 .mc_tx = bge_m_tx, 149 .mc_ioctl = bge_m_ioctl, 150 .mc_getcapab = bge_m_getcapab, 151 .mc_setprop = bge_m_setprop, 152 .mc_getprop = bge_m_getprop, 153 .mc_propinfo = bge_m_propinfo 154 }; 155 156 char *bge_priv_prop[] = { 157 "_adv_asym_pause_cap", 158 "_adv_pause_cap", 159 "_drain_max", 160 "_msi_cnt", 161 "_rx_intr_coalesce_blank_time", 162 "_tx_intr_coalesce_blank_time", 163 "_rx_intr_coalesce_pkt_cnt", 164 "_tx_intr_coalesce_pkt_cnt", 165 NULL 166 }; 167 168 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 169 /* 170 * ========== Transmit and receive ring reinitialisation ========== 171 */ 172 173 /* 174 * These <reinit> routines each reset the specified ring to an initial 175 * state, assuming that the corresponding <init> routine has already 176 * been called exactly once. 177 */ 178 179 static void 180 bge_reinit_send_ring(send_ring_t *srp) 181 { 182 bge_queue_t *txbuf_queue; 183 bge_queue_item_t *txbuf_head; 184 sw_txbuf_t *txbuf; 185 sw_sbd_t *ssbdp; 186 uint32_t slot; 187 188 /* 189 * Reinitialise control variables ... 190 */ 191 srp->tx_flow = 0; 192 srp->tx_next = 0; 193 srp->txfill_next = 0; 194 srp->tx_free = srp->desc.nslots; 195 ASSERT(mutex_owned(srp->tc_lock)); 196 srp->tc_next = 0; 197 srp->txpkt_next = 0; 198 srp->tx_block = 0; 199 srp->tx_nobd = 0; 200 srp->tx_nobuf = 0; 201 202 /* 203 * Initialize the tx buffer push queue 204 */ 205 mutex_enter(srp->freetxbuf_lock); 206 mutex_enter(srp->txbuf_lock); 207 txbuf_queue = &srp->freetxbuf_queue; 208 txbuf_queue->head = NULL; 209 txbuf_queue->count = 0; 210 txbuf_queue->lock = srp->freetxbuf_lock; 211 srp->txbuf_push_queue = txbuf_queue; 212 213 /* 214 * Initialize the tx buffer pop queue 215 */ 216 txbuf_queue = &srp->txbuf_queue; 217 txbuf_queue->head = NULL; 218 txbuf_queue->count = 0; 219 txbuf_queue->lock = srp->txbuf_lock; 220 srp->txbuf_pop_queue = txbuf_queue; 221 txbuf_head = srp->txbuf_head; 222 txbuf = srp->txbuf; 223 for (slot = 0; slot < srp->tx_buffers; ++slot) { 224 txbuf_head->item = txbuf; 225 txbuf_head->next = txbuf_queue->head; 226 txbuf_queue->head = txbuf_head; 227 txbuf_queue->count++; 228 txbuf++; 229 txbuf_head++; 230 } 231 mutex_exit(srp->txbuf_lock); 232 mutex_exit(srp->freetxbuf_lock); 233 234 /* 235 * Zero and sync all the h/w Send Buffer Descriptors 236 */ 237 DMA_ZERO(srp->desc); 238 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 239 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 240 ssbdp = srp->sw_sbds; 241 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 242 ssbdp->pbuf = NULL; 243 } 244 245 static void 246 bge_reinit_recv_ring(recv_ring_t *rrp) 247 { 248 /* 249 * Reinitialise control variables ... 250 */ 251 rrp->rx_next = 0; 252 } 253 254 static void 255 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 256 { 257 bge_rbd_t *hw_rbd_p; 258 sw_rbd_t *srbdp; 259 uint32_t bufsize; 260 uint32_t nslots; 261 uint32_t slot; 262 263 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 264 RBD_FLAG_STD_RING, 265 RBD_FLAG_JUMBO_RING, 266 RBD_FLAG_MINI_RING 267 }; 268 269 /* 270 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 271 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 272 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 273 * should be zeroed, and so don't need to be set up specifically 274 * once the whole area has been cleared. 275 */ 276 DMA_ZERO(brp->desc); 277 278 hw_rbd_p = DMA_VPTR(brp->desc); 279 nslots = brp->desc.nslots; 280 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 281 bufsize = brp->buf[0].size; 282 srbdp = brp->sw_rbds; 283 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 284 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 285 hw_rbd_p->index = (uint16_t)slot; 286 hw_rbd_p->len = (uint16_t)bufsize; 287 hw_rbd_p->opaque = srbdp->pbuf.token; 288 hw_rbd_p->flags |= ring_type_flag[ring]; 289 } 290 291 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 292 293 /* 294 * Finally, reinitialise the ring control variables ... 295 */ 296 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 297 } 298 299 /* 300 * Reinitialize all rings 301 */ 302 static void 303 bge_reinit_rings(bge_t *bgep) 304 { 305 uint32_t ring; 306 307 ASSERT(mutex_owned(bgep->genlock)); 308 309 /* 310 * Send Rings ... 311 */ 312 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 313 bge_reinit_send_ring(&bgep->send[ring]); 314 315 /* 316 * Receive Return Rings ... 317 */ 318 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 319 bge_reinit_recv_ring(&bgep->recv[ring]); 320 321 /* 322 * Receive Producer Rings ... 323 */ 324 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 325 bge_reinit_buff_ring(&bgep->buff[ring], ring); 326 } 327 328 /* 329 * ========== Internal state management entry points ========== 330 */ 331 332 #undef BGE_DBG 333 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 334 335 /* 336 * These routines provide all the functionality required by the 337 * corresponding GLD entry points, but don't update the GLD state 338 * so they can be called internally without disturbing our record 339 * of what GLD thinks we should be doing ... 340 */ 341 342 /* 343 * bge_reset() -- reset h/w & rings to initial state 344 */ 345 static int 346 #ifdef BGE_IPMI_ASF 347 bge_reset(bge_t *bgep, uint_t asf_mode) 348 #else 349 bge_reset(bge_t *bgep) 350 #endif 351 { 352 uint32_t ring; 353 int retval; 354 355 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 356 357 ASSERT(mutex_owned(bgep->genlock)); 358 359 /* 360 * Grab all the other mutexes in the world (this should 361 * ensure no other threads are manipulating driver state) 362 */ 363 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 364 mutex_enter(bgep->recv[ring].rx_lock); 365 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 366 mutex_enter(bgep->buff[ring].rf_lock); 367 rw_enter(bgep->errlock, RW_WRITER); 368 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 369 mutex_enter(bgep->send[ring].tx_lock); 370 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 371 mutex_enter(bgep->send[ring].tc_lock); 372 373 #ifdef BGE_IPMI_ASF 374 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 375 #else 376 retval = bge_chip_reset(bgep, B_TRUE); 377 #endif 378 bge_reinit_rings(bgep); 379 380 /* 381 * Free the world ... 382 */ 383 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 384 mutex_exit(bgep->send[ring].tc_lock); 385 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 386 mutex_exit(bgep->send[ring].tx_lock); 387 rw_exit(bgep->errlock); 388 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 389 mutex_exit(bgep->buff[ring].rf_lock); 390 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 391 mutex_exit(bgep->recv[ring].rx_lock); 392 393 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 394 return (retval); 395 } 396 397 /* 398 * bge_stop() -- stop processing, don't reset h/w or rings 399 */ 400 static void 401 bge_stop(bge_t *bgep) 402 { 403 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 404 405 ASSERT(mutex_owned(bgep->genlock)); 406 407 #ifdef BGE_IPMI_ASF 408 if (bgep->asf_enabled) { 409 bgep->asf_pseudostop = B_TRUE; 410 } else { 411 #endif 412 bge_chip_stop(bgep, B_FALSE); 413 #ifdef BGE_IPMI_ASF 414 } 415 #endif 416 417 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 418 } 419 420 /* 421 * bge_start() -- start transmitting/receiving 422 */ 423 static int 424 bge_start(bge_t *bgep, boolean_t reset_phys) 425 { 426 int retval; 427 428 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 429 430 ASSERT(mutex_owned(bgep->genlock)); 431 432 /* 433 * Start chip processing, including enabling interrupts 434 */ 435 retval = bge_chip_start(bgep, reset_phys); 436 437 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 438 return (retval); 439 } 440 441 /* 442 * bge_restart - restart transmitting/receiving after error or suspend 443 */ 444 int 445 bge_restart(bge_t *bgep, boolean_t reset_phys) 446 { 447 int retval = DDI_SUCCESS; 448 ASSERT(mutex_owned(bgep->genlock)); 449 450 #ifdef BGE_IPMI_ASF 451 if (bgep->asf_enabled) { 452 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 453 retval = DDI_FAILURE; 454 } else 455 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 456 retval = DDI_FAILURE; 457 #else 458 if (bge_reset(bgep) != DDI_SUCCESS) 459 retval = DDI_FAILURE; 460 #endif 461 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 462 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 463 retval = DDI_FAILURE; 464 bgep->watchdog = 0; 465 ddi_trigger_softintr(bgep->drain_id); 466 } 467 468 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 469 return (retval); 470 } 471 472 473 /* 474 * ========== Nemo-required management entry points ========== 475 */ 476 477 #undef BGE_DBG 478 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 479 480 /* 481 * bge_m_stop() -- stop transmitting/receiving 482 */ 483 static void 484 bge_m_stop(void *arg) 485 { 486 bge_t *bgep = arg; /* private device info */ 487 send_ring_t *srp; 488 uint32_t ring; 489 490 BGE_TRACE(("bge_m_stop($%p)", arg)); 491 492 /* 493 * Just stop processing, then record new GLD state 494 */ 495 mutex_enter(bgep->genlock); 496 if (!(bgep->progress & PROGRESS_INTR)) { 497 /* can happen during autorecovery */ 498 bgep->bge_chip_state = BGE_CHIP_STOPPED; 499 } else 500 bge_stop(bgep); 501 502 bgep->link_state = LINK_STATE_UNKNOWN; 503 mac_link_update(bgep->mh, bgep->link_state); 504 505 /* 506 * Free the possible tx buffers allocated in tx process. 507 */ 508 #ifdef BGE_IPMI_ASF 509 if (!bgep->asf_pseudostop) 510 #endif 511 { 512 rw_enter(bgep->errlock, RW_WRITER); 513 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 514 srp = &bgep->send[ring]; 515 mutex_enter(srp->tx_lock); 516 if (srp->tx_array > 1) 517 bge_free_txbuf_arrays(srp); 518 mutex_exit(srp->tx_lock); 519 } 520 rw_exit(bgep->errlock); 521 } 522 bgep->bge_mac_state = BGE_MAC_STOPPED; 523 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 524 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 525 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 526 mutex_exit(bgep->genlock); 527 } 528 529 /* 530 * bge_m_start() -- start transmitting/receiving 531 */ 532 static int 533 bge_m_start(void *arg) 534 { 535 bge_t *bgep = arg; /* private device info */ 536 537 BGE_TRACE(("bge_m_start($%p)", arg)); 538 539 /* 540 * Start processing and record new GLD state 541 */ 542 mutex_enter(bgep->genlock); 543 if (!(bgep->progress & PROGRESS_INTR)) { 544 /* can happen during autorecovery */ 545 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 546 mutex_exit(bgep->genlock); 547 return (EIO); 548 } 549 #ifdef BGE_IPMI_ASF 550 if (bgep->asf_enabled) { 551 if ((bgep->asf_status == ASF_STAT_RUN) && 552 (bgep->asf_pseudostop)) { 553 bgep->bge_mac_state = BGE_MAC_STARTED; 554 /* forcing a mac link update here */ 555 bge_phys_check(bgep); 556 bgep->link_state = (bgep->param_link_up) ? LINK_STATE_UP : 557 LINK_STATE_DOWN; 558 mac_link_update(bgep->mh, bgep->link_state); 559 mutex_exit(bgep->genlock); 560 return (0); 561 } 562 } 563 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 564 #else 565 if (bge_reset(bgep) != DDI_SUCCESS) { 566 #endif 567 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 568 (void) bge_check_acc_handle(bgep, bgep->io_handle); 569 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 570 mutex_exit(bgep->genlock); 571 return (EIO); 572 } 573 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 574 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 575 (void) bge_check_acc_handle(bgep, bgep->io_handle); 576 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 577 mutex_exit(bgep->genlock); 578 return (EIO); 579 } 580 bgep->watchdog = 0; 581 bgep->bge_mac_state = BGE_MAC_STARTED; 582 BGE_DEBUG(("bge_m_start($%p) done", arg)); 583 584 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 585 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 586 mutex_exit(bgep->genlock); 587 return (EIO); 588 } 589 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 590 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 591 mutex_exit(bgep->genlock); 592 return (EIO); 593 } 594 #ifdef BGE_IPMI_ASF 595 if (bgep->asf_enabled) { 596 if (bgep->asf_status != ASF_STAT_RUN) { 597 /* start ASF heart beat */ 598 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 599 (void *)bgep, 600 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 601 bgep->asf_status = ASF_STAT_RUN; 602 } 603 } 604 #endif 605 mutex_exit(bgep->genlock); 606 607 return (0); 608 } 609 610 /* 611 * bge_unicst_set() -- set the physical network address 612 */ 613 static int 614 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 615 { 616 bge_t *bgep = arg; /* private device info */ 617 618 BGE_TRACE(("bge_unicst_set($%p, %s)", arg, 619 ether_sprintf((void *)macaddr))); 620 /* 621 * Remember the new current address in the driver state 622 * Sync the chip's idea of the address too ... 623 */ 624 mutex_enter(bgep->genlock); 625 if (!(bgep->progress & PROGRESS_INTR)) { 626 /* can happen during autorecovery */ 627 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 628 mutex_exit(bgep->genlock); 629 return (EIO); 630 } 631 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 632 #ifdef BGE_IPMI_ASF 633 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 634 #else 635 if (bge_chip_sync(bgep) == DDI_FAILURE) { 636 #endif 637 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 638 (void) bge_check_acc_handle(bgep, bgep->io_handle); 639 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 640 mutex_exit(bgep->genlock); 641 return (EIO); 642 } 643 #ifdef BGE_IPMI_ASF 644 if (bgep->asf_enabled) { 645 /* 646 * The above bge_chip_sync() function wrote the ethernet MAC 647 * addresses registers which destroyed the IPMI/ASF sideband. 648 * Here, we have to reset chip to make IPMI/ASF sideband work. 649 */ 650 if (bgep->asf_status == ASF_STAT_RUN) { 651 /* 652 * We must stop ASF heart beat before bge_chip_stop(), 653 * otherwise some computers (ex. IBM HS20 blade server) 654 * may crash. 655 */ 656 bge_asf_update_status(bgep); 657 bge_asf_stop_timer(bgep); 658 bgep->asf_status = ASF_STAT_STOP; 659 660 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 661 } 662 bge_chip_stop(bgep, B_FALSE); 663 664 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 665 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 666 (void) bge_check_acc_handle(bgep, bgep->io_handle); 667 ddi_fm_service_impact(bgep->devinfo, 668 DDI_SERVICE_DEGRADED); 669 mutex_exit(bgep->genlock); 670 return (EIO); 671 } 672 673 /* 674 * Start our ASF heartbeat counter as soon as possible. 675 */ 676 if (bgep->asf_status != ASF_STAT_RUN) { 677 /* start ASF heart beat */ 678 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 679 (void *)bgep, 680 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 681 bgep->asf_status = ASF_STAT_RUN; 682 } 683 } 684 #endif 685 BGE_DEBUG(("bge_unicst_set($%p) done", arg)); 686 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 687 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 688 mutex_exit(bgep->genlock); 689 return (EIO); 690 } 691 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 692 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 693 mutex_exit(bgep->genlock); 694 return (EIO); 695 } 696 mutex_exit(bgep->genlock); 697 698 return (0); 699 } 700 701 extern void bge_wake_factotum(bge_t *); 702 703 static boolean_t 704 bge_param_locked(mac_prop_id_t pr_num) 705 { 706 /* 707 * All adv_* parameters are locked (read-only) while 708 * the device is in any sort of loopback mode ... 709 */ 710 switch (pr_num) { 711 case MAC_PROP_ADV_1000FDX_CAP: 712 case MAC_PROP_EN_1000FDX_CAP: 713 case MAC_PROP_ADV_1000HDX_CAP: 714 case MAC_PROP_EN_1000HDX_CAP: 715 case MAC_PROP_ADV_100FDX_CAP: 716 case MAC_PROP_EN_100FDX_CAP: 717 case MAC_PROP_ADV_100HDX_CAP: 718 case MAC_PROP_EN_100HDX_CAP: 719 case MAC_PROP_ADV_10FDX_CAP: 720 case MAC_PROP_EN_10FDX_CAP: 721 case MAC_PROP_ADV_10HDX_CAP: 722 case MAC_PROP_EN_10HDX_CAP: 723 case MAC_PROP_AUTONEG: 724 case MAC_PROP_FLOWCTRL: 725 return (B_TRUE); 726 } 727 return (B_FALSE); 728 } 729 /* 730 * callback functions for set/get of properties 731 */ 732 static int 733 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 734 uint_t pr_valsize, const void *pr_val) 735 { 736 bge_t *bgep = barg; 737 int err = 0; 738 uint32_t cur_mtu, new_mtu; 739 link_flowctrl_t fl; 740 741 mutex_enter(bgep->genlock); 742 if (bgep->param_loop_mode != BGE_LOOP_NONE && 743 bge_param_locked(pr_num)) { 744 /* 745 * All adv_* parameters are locked (read-only) 746 * while the device is in any sort of loopback mode. 747 */ 748 mutex_exit(bgep->genlock); 749 return (EBUSY); 750 } 751 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 752 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 753 (pr_num == MAC_PROP_EN_100HDX_CAP) || 754 (pr_num == MAC_PROP_EN_10FDX_CAP) || 755 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 756 /* 757 * these properties are read/write on copper, 758 * read-only and 0 on serdes 759 */ 760 mutex_exit(bgep->genlock); 761 return (ENOTSUP); 762 } 763 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 764 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 765 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 766 mutex_exit(bgep->genlock); 767 return (ENOTSUP); 768 } 769 770 switch (pr_num) { 771 case MAC_PROP_EN_1000FDX_CAP: 772 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 773 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 774 goto reprogram; 775 case MAC_PROP_EN_1000HDX_CAP: 776 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 777 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 778 goto reprogram; 779 case MAC_PROP_EN_100FDX_CAP: 780 bgep->param_en_100fdx = *(uint8_t *)pr_val; 781 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 782 goto reprogram; 783 case MAC_PROP_EN_100HDX_CAP: 784 bgep->param_en_100hdx = *(uint8_t *)pr_val; 785 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 786 goto reprogram; 787 case MAC_PROP_EN_10FDX_CAP: 788 bgep->param_en_10fdx = *(uint8_t *)pr_val; 789 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 790 goto reprogram; 791 case MAC_PROP_EN_10HDX_CAP: 792 bgep->param_en_10hdx = *(uint8_t *)pr_val; 793 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 794 reprogram: 795 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 796 err = EINVAL; 797 break; 798 case MAC_PROP_ADV_1000FDX_CAP: 799 case MAC_PROP_ADV_1000HDX_CAP: 800 case MAC_PROP_ADV_100FDX_CAP: 801 case MAC_PROP_ADV_100HDX_CAP: 802 case MAC_PROP_ADV_10FDX_CAP: 803 case MAC_PROP_ADV_10HDX_CAP: 804 case MAC_PROP_STATUS: 805 case MAC_PROP_SPEED: 806 case MAC_PROP_DUPLEX: 807 err = ENOTSUP; /* read-only prop. Can't set this */ 808 break; 809 case MAC_PROP_AUTONEG: 810 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 811 if (bge_reprogram(bgep) == IOC_INVAL) 812 err = EINVAL; 813 break; 814 case MAC_PROP_MTU: 815 cur_mtu = bgep->chipid.default_mtu; 816 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 817 818 if (new_mtu == cur_mtu) { 819 err = 0; 820 break; 821 } 822 if (new_mtu < BGE_DEFAULT_MTU || 823 new_mtu > BGE_MAXIMUM_MTU) { 824 err = EINVAL; 825 break; 826 } 827 if ((new_mtu > BGE_DEFAULT_MTU) && 828 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 829 err = EINVAL; 830 break; 831 } 832 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 833 err = EBUSY; 834 break; 835 } 836 bgep->chipid.default_mtu = new_mtu; 837 if (bge_chip_id_init(bgep)) { 838 err = EINVAL; 839 break; 840 } 841 bgep->bge_dma_error = B_TRUE; 842 bgep->manual_reset = B_TRUE; 843 bge_chip_stop(bgep, B_TRUE); 844 bge_wake_factotum(bgep); 845 err = 0; 846 break; 847 case MAC_PROP_FLOWCTRL: 848 bcopy(pr_val, &fl, sizeof (fl)); 849 switch (fl) { 850 default: 851 err = ENOTSUP; 852 break; 853 case LINK_FLOWCTRL_NONE: 854 bgep->param_adv_pause = 0; 855 bgep->param_adv_asym_pause = 0; 856 857 bgep->param_link_rx_pause = B_FALSE; 858 bgep->param_link_tx_pause = B_FALSE; 859 break; 860 case LINK_FLOWCTRL_RX: 861 bgep->param_adv_pause = 1; 862 bgep->param_adv_asym_pause = 1; 863 864 bgep->param_link_rx_pause = B_TRUE; 865 bgep->param_link_tx_pause = B_FALSE; 866 break; 867 case LINK_FLOWCTRL_TX: 868 bgep->param_adv_pause = 0; 869 bgep->param_adv_asym_pause = 1; 870 871 bgep->param_link_rx_pause = B_FALSE; 872 bgep->param_link_tx_pause = B_TRUE; 873 break; 874 case LINK_FLOWCTRL_BI: 875 bgep->param_adv_pause = 1; 876 bgep->param_adv_asym_pause = 0; 877 878 bgep->param_link_rx_pause = B_TRUE; 879 bgep->param_link_tx_pause = B_TRUE; 880 break; 881 } 882 883 if (err == 0) { 884 if (bge_reprogram(bgep) == IOC_INVAL) 885 err = EINVAL; 886 } 887 888 break; 889 case MAC_PROP_PRIVATE: 890 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 891 pr_val); 892 break; 893 default: 894 err = ENOTSUP; 895 break; 896 } 897 mutex_exit(bgep->genlock); 898 return (err); 899 } 900 901 /* ARGSUSED */ 902 static int 903 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 904 uint_t pr_valsize, void *pr_val) 905 { 906 bge_t *bgep = barg; 907 int err = 0; 908 909 switch (pr_num) { 910 case MAC_PROP_DUPLEX: 911 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 912 bcopy(&bgep->param_link_duplex, pr_val, 913 sizeof (link_duplex_t)); 914 break; 915 case MAC_PROP_SPEED: { 916 uint64_t speed = bgep->param_link_speed * 1000000ull; 917 918 ASSERT(pr_valsize >= sizeof (speed)); 919 bcopy(&speed, pr_val, sizeof (speed)); 920 break; 921 } 922 case MAC_PROP_STATUS: 923 ASSERT(pr_valsize >= sizeof (link_state_t)); 924 bcopy(&bgep->link_state, pr_val, 925 sizeof (link_state_t)); 926 break; 927 case MAC_PROP_AUTONEG: 928 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 929 break; 930 case MAC_PROP_FLOWCTRL: { 931 link_flowctrl_t fl; 932 933 ASSERT(pr_valsize >= sizeof (fl)); 934 935 if (bgep->param_link_rx_pause && 936 !bgep->param_link_tx_pause) 937 fl = LINK_FLOWCTRL_RX; 938 939 if (!bgep->param_link_rx_pause && 940 !bgep->param_link_tx_pause) 941 fl = LINK_FLOWCTRL_NONE; 942 943 if (!bgep->param_link_rx_pause && 944 bgep->param_link_tx_pause) 945 fl = LINK_FLOWCTRL_TX; 946 947 if (bgep->param_link_rx_pause && 948 bgep->param_link_tx_pause) 949 fl = LINK_FLOWCTRL_BI; 950 bcopy(&fl, pr_val, sizeof (fl)); 951 break; 952 } 953 case MAC_PROP_ADV_1000FDX_CAP: 954 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 955 break; 956 case MAC_PROP_EN_1000FDX_CAP: 957 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 958 break; 959 case MAC_PROP_ADV_1000HDX_CAP: 960 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 961 break; 962 case MAC_PROP_EN_1000HDX_CAP: 963 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 964 break; 965 case MAC_PROP_ADV_100FDX_CAP: 966 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 967 break; 968 case MAC_PROP_EN_100FDX_CAP: 969 *(uint8_t *)pr_val = bgep->param_en_100fdx; 970 break; 971 case MAC_PROP_ADV_100HDX_CAP: 972 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 973 break; 974 case MAC_PROP_EN_100HDX_CAP: 975 *(uint8_t *)pr_val = bgep->param_en_100hdx; 976 break; 977 case MAC_PROP_ADV_10FDX_CAP: 978 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 979 break; 980 case MAC_PROP_EN_10FDX_CAP: 981 *(uint8_t *)pr_val = bgep->param_en_10fdx; 982 break; 983 case MAC_PROP_ADV_10HDX_CAP: 984 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 985 break; 986 case MAC_PROP_EN_10HDX_CAP: 987 *(uint8_t *)pr_val = bgep->param_en_10hdx; 988 break; 989 case MAC_PROP_ADV_100T4_CAP: 990 case MAC_PROP_EN_100T4_CAP: 991 *(uint8_t *)pr_val = 0; 992 break; 993 case MAC_PROP_PRIVATE: 994 err = bge_get_priv_prop(bgep, pr_name, 995 pr_valsize, pr_val); 996 return (err); 997 default: 998 return (ENOTSUP); 999 } 1000 return (0); 1001 } 1002 1003 static void 1004 bge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1005 mac_prop_info_handle_t prh) 1006 { 1007 bge_t *bgep = barg; 1008 int flags = bgep->chipid.flags; 1009 1010 /* 1011 * By default permissions are read/write unless specified 1012 * otherwise by the driver. 1013 */ 1014 1015 switch (pr_num) { 1016 case MAC_PROP_DUPLEX: 1017 case MAC_PROP_SPEED: 1018 case MAC_PROP_STATUS: 1019 case MAC_PROP_ADV_1000FDX_CAP: 1020 case MAC_PROP_ADV_1000HDX_CAP: 1021 case MAC_PROP_ADV_100FDX_CAP: 1022 case MAC_PROP_ADV_100HDX_CAP: 1023 case MAC_PROP_ADV_10FDX_CAP: 1024 case MAC_PROP_ADV_10HDX_CAP: 1025 case MAC_PROP_ADV_100T4_CAP: 1026 case MAC_PROP_EN_100T4_CAP: 1027 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1028 break; 1029 1030 case MAC_PROP_EN_1000FDX_CAP: 1031 case MAC_PROP_EN_1000HDX_CAP: 1032 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1033 mac_prop_info_set_default_uint8(prh, 0); 1034 else 1035 mac_prop_info_set_default_uint8(prh, 1); 1036 break; 1037 1038 case MAC_PROP_EN_100FDX_CAP: 1039 case MAC_PROP_EN_100HDX_CAP: 1040 case MAC_PROP_EN_10FDX_CAP: 1041 case MAC_PROP_EN_10HDX_CAP: 1042 mac_prop_info_set_default_uint8(prh, 1043 (flags & CHIP_FLAG_SERDES) ? 0 : 1); 1044 break; 1045 1046 case MAC_PROP_AUTONEG: 1047 mac_prop_info_set_default_uint8(prh, 1); 1048 break; 1049 1050 case MAC_PROP_FLOWCTRL: 1051 mac_prop_info_set_default_link_flowctrl(prh, 1052 LINK_FLOWCTRL_BI); 1053 break; 1054 1055 case MAC_PROP_MTU: 1056 mac_prop_info_set_range_uint32(prh, BGE_DEFAULT_MTU, 1057 (flags & CHIP_FLAG_NO_JUMBO) ? 1058 BGE_DEFAULT_MTU : BGE_MAXIMUM_MTU); 1059 break; 1060 1061 case MAC_PROP_PRIVATE: 1062 bge_priv_propinfo(pr_name, prh); 1063 break; 1064 } 1065 1066 mutex_enter(bgep->genlock); 1067 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 1068 bge_param_locked(pr_num)) || 1069 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 1070 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 1071 (pr_num == MAC_PROP_EN_100HDX_CAP) || 1072 (pr_num == MAC_PROP_EN_10FDX_CAP) || 1073 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 1074 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 1075 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 1076 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 1077 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1078 mutex_exit(bgep->genlock); 1079 } 1080 1081 /* ARGSUSED */ 1082 static int 1083 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1084 const void *pr_val) 1085 { 1086 int err = 0; 1087 long result; 1088 1089 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1090 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1091 if (result > 1 || result < 0) { 1092 err = EINVAL; 1093 } else { 1094 bgep->param_adv_pause = (uint32_t)result; 1095 if (bge_reprogram(bgep) == IOC_INVAL) 1096 err = EINVAL; 1097 } 1098 return (err); 1099 } 1100 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1101 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1102 if (result > 1 || result < 0) { 1103 err = EINVAL; 1104 } else { 1105 bgep->param_adv_asym_pause = (uint32_t)result; 1106 if (bge_reprogram(bgep) == IOC_INVAL) 1107 err = EINVAL; 1108 } 1109 return (err); 1110 } 1111 if (strcmp(pr_name, "_drain_max") == 0) { 1112 1113 /* 1114 * on the Tx side, we need to update the h/w register for 1115 * real packet transmission per packet. The drain_max parameter 1116 * is used to reduce the register access. This parameter 1117 * controls the max number of packets that we will hold before 1118 * updating the bge h/w to trigger h/w transmit. The bge 1119 * chipset usually has a max of 512 Tx descriptors, thus 1120 * the upper bound on drain_max is 512. 1121 */ 1122 if (pr_val == NULL) { 1123 err = EINVAL; 1124 return (err); 1125 } 1126 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1127 if (result > 512 || result < 1) 1128 err = EINVAL; 1129 else { 1130 bgep->param_drain_max = (uint32_t)result; 1131 if (bge_reprogram(bgep) == IOC_INVAL) 1132 err = EINVAL; 1133 } 1134 return (err); 1135 } 1136 if (strcmp(pr_name, "_msi_cnt") == 0) { 1137 1138 if (pr_val == NULL) { 1139 err = EINVAL; 1140 return (err); 1141 } 1142 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1143 if (result > 7 || result < 0) 1144 err = EINVAL; 1145 else { 1146 bgep->param_msi_cnt = (uint32_t)result; 1147 if (bge_reprogram(bgep) == IOC_INVAL) 1148 err = EINVAL; 1149 } 1150 return (err); 1151 } 1152 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) { 1153 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1154 return (EINVAL); 1155 if (result < 0) 1156 err = EINVAL; 1157 else { 1158 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1159 bge_chip_coalesce_update(bgep); 1160 } 1161 return (err); 1162 } 1163 1164 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) { 1165 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1166 return (EINVAL); 1167 1168 if (result < 0) 1169 err = EINVAL; 1170 else { 1171 bgep->chipid.rx_count_norm = (uint32_t)result; 1172 bge_chip_coalesce_update(bgep); 1173 } 1174 return (err); 1175 } 1176 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) { 1177 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1178 return (EINVAL); 1179 if (result < 0) 1180 err = EINVAL; 1181 else { 1182 bgep->chipid.tx_ticks_norm = (uint32_t)result; 1183 bge_chip_coalesce_update(bgep); 1184 } 1185 return (err); 1186 } 1187 1188 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) { 1189 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1190 return (EINVAL); 1191 1192 if (result < 0) 1193 err = EINVAL; 1194 else { 1195 bgep->chipid.tx_count_norm = (uint32_t)result; 1196 bge_chip_coalesce_update(bgep); 1197 } 1198 return (err); 1199 } 1200 return (ENOTSUP); 1201 } 1202 1203 static int 1204 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize, 1205 void *pr_val) 1206 { 1207 int value; 1208 1209 if (strcmp(pr_name, "_adv_pause_cap") == 0) 1210 value = bge->param_adv_pause; 1211 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) 1212 value = bge->param_adv_asym_pause; 1213 else if (strcmp(pr_name, "_drain_max") == 0) 1214 value = bge->param_drain_max; 1215 else if (strcmp(pr_name, "_msi_cnt") == 0) 1216 value = bge->param_msi_cnt; 1217 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) 1218 value = bge->chipid.rx_ticks_norm; 1219 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) 1220 value = bge->chipid.tx_ticks_norm; 1221 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) 1222 value = bge->chipid.rx_count_norm; 1223 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) 1224 value = bge->chipid.tx_count_norm; 1225 else 1226 return (ENOTSUP); 1227 1228 (void) snprintf(pr_val, pr_valsize, "%d", value); 1229 return (0); 1230 } 1231 1232 static void 1233 bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph) 1234 { 1235 char valstr[64]; 1236 int value; 1237 1238 if (strcmp(pr_name, "_adv_pause_cap") == 0) 1239 value = 1; 1240 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) 1241 value = 1; 1242 else if (strcmp(pr_name, "_drain_max") == 0) 1243 value = 64; 1244 else if (strcmp(pr_name, "_msi_cnt") == 0) 1245 value = 0; 1246 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) 1247 value = bge_rx_ticks_norm; 1248 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) 1249 value = bge_tx_ticks_norm; 1250 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) 1251 value = bge_rx_count_norm; 1252 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) 1253 value = bge_tx_count_norm; 1254 else 1255 return; 1256 1257 (void) snprintf(valstr, sizeof (valstr), "%d", value); 1258 mac_prop_info_set_default_str(mph, valstr); 1259 } 1260 1261 /* 1262 * Compute the index of the required bit in the multicast hash map. 1263 * This must mirror the way the hardware actually does it! 1264 * See Broadcom document 570X-PG102-R page 125. 1265 */ 1266 static uint32_t 1267 bge_hash_index(const uint8_t *mca) 1268 { 1269 uint32_t hash; 1270 1271 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1272 1273 return (hash); 1274 } 1275 1276 /* 1277 * bge_m_multicst_add() -- enable/disable a multicast address 1278 */ 1279 static int 1280 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1281 { 1282 bge_t *bgep = arg; /* private device info */ 1283 uint32_t hash; 1284 uint32_t index; 1285 uint32_t word; 1286 uint32_t bit; 1287 uint8_t *refp; 1288 1289 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1290 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1291 1292 /* 1293 * Precalculate all required masks, pointers etc ... 1294 */ 1295 hash = bge_hash_index(mca); 1296 index = hash % BGE_HASH_TABLE_SIZE; 1297 word = index/32u; 1298 bit = 1 << (index % 32u); 1299 refp = &bgep->mcast_refs[index]; 1300 1301 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1302 hash, index, word, bit, *refp)); 1303 1304 /* 1305 * We must set the appropriate bit in the hash map (and the 1306 * corresponding h/w register) when the refcount goes from 0 1307 * to >0, and clear it when the last ref goes away (refcount 1308 * goes from >0 back to 0). If we change the hash map, we 1309 * must also update the chip's hardware map registers. 1310 */ 1311 mutex_enter(bgep->genlock); 1312 if (!(bgep->progress & PROGRESS_INTR)) { 1313 /* can happen during autorecovery */ 1314 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1315 mutex_exit(bgep->genlock); 1316 return (EIO); 1317 } 1318 if (add) { 1319 if ((*refp)++ == 0) { 1320 bgep->mcast_hash[word] |= bit; 1321 #ifdef BGE_IPMI_ASF 1322 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1323 #else 1324 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1325 #endif 1326 (void) bge_check_acc_handle(bgep, 1327 bgep->cfg_handle); 1328 (void) bge_check_acc_handle(bgep, 1329 bgep->io_handle); 1330 ddi_fm_service_impact(bgep->devinfo, 1331 DDI_SERVICE_DEGRADED); 1332 mutex_exit(bgep->genlock); 1333 return (EIO); 1334 } 1335 } 1336 } else { 1337 if (--(*refp) == 0) { 1338 bgep->mcast_hash[word] &= ~bit; 1339 #ifdef BGE_IPMI_ASF 1340 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1341 #else 1342 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1343 #endif 1344 (void) bge_check_acc_handle(bgep, 1345 bgep->cfg_handle); 1346 (void) bge_check_acc_handle(bgep, 1347 bgep->io_handle); 1348 ddi_fm_service_impact(bgep->devinfo, 1349 DDI_SERVICE_DEGRADED); 1350 mutex_exit(bgep->genlock); 1351 return (EIO); 1352 } 1353 } 1354 } 1355 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1356 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1357 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1358 mutex_exit(bgep->genlock); 1359 return (EIO); 1360 } 1361 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1362 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1363 mutex_exit(bgep->genlock); 1364 return (EIO); 1365 } 1366 mutex_exit(bgep->genlock); 1367 1368 return (0); 1369 } 1370 1371 /* 1372 * bge_m_promisc() -- set or reset promiscuous mode on the board 1373 * 1374 * Program the hardware to enable/disable promiscuous and/or 1375 * receive-all-multicast modes. 1376 */ 1377 static int 1378 bge_m_promisc(void *arg, boolean_t on) 1379 { 1380 bge_t *bgep = arg; 1381 1382 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1383 1384 /* 1385 * Store MAC layer specified mode and pass to chip layer to update h/w 1386 */ 1387 mutex_enter(bgep->genlock); 1388 if (!(bgep->progress & PROGRESS_INTR)) { 1389 /* can happen during autorecovery */ 1390 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1391 mutex_exit(bgep->genlock); 1392 return (EIO); 1393 } 1394 bgep->promisc = on; 1395 #ifdef BGE_IPMI_ASF 1396 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1397 #else 1398 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1399 #endif 1400 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1401 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1402 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1403 mutex_exit(bgep->genlock); 1404 return (EIO); 1405 } 1406 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1407 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1408 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1409 mutex_exit(bgep->genlock); 1410 return (EIO); 1411 } 1412 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1413 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1414 mutex_exit(bgep->genlock); 1415 return (EIO); 1416 } 1417 mutex_exit(bgep->genlock); 1418 return (0); 1419 } 1420 1421 /* 1422 * Find the slot for the specified unicast address 1423 */ 1424 int 1425 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1426 { 1427 int slot; 1428 1429 ASSERT(mutex_owned(bgep->genlock)); 1430 1431 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1432 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1433 return (slot); 1434 } 1435 1436 return (-1); 1437 } 1438 1439 /* 1440 * The job of bge_addmac() is to set up everything in hardware for the mac 1441 * address indicated to map to the specified group. 1442 * 1443 * For this to make sense, we need to first understand how most of the bge chips 1444 * work. A given packet reaches a ring in two distinct logical steps: 1445 * 1446 * 1) The device must accept the packet. 1447 * 2) The device must steer an accepted packet to a specific ring. 1448 * 1449 * For step 1, the device has four global MAC address filtering registers. We 1450 * must either add the address here or put the device in promiscuous mode. 1451 * Because there are only four of these and up to four groups, each group is 1452 * only allowed to program a single entry. Note, this is not explicitly done in 1453 * the driver. Rather, it is implicitly done by how we implement step 2. These 1454 * registers start at 0x410 and are referred to as the 'EMAC MAC Addresses' in 1455 * the manuals. 1456 * 1457 * For step 2, the device has eight sets of rule registers that are used to 1458 * control how a packet in step 1 is mapped to a specific ring. Each set is 1459 * comprised of a control register and a mask register. These start at 0x480 and 1460 * are referred to as the 'Receive Rules Control Registers' and 'Receive Rules 1461 * Value/Mask Registers'. These can be used to check for a 16-bit or 32-bit 1462 * value at an offset in the packet. In addition, two sets can be combined to 1463 * create a single conditional rule. 1464 * 1465 * For our purposes, we need to use this mechanism to steer a mac address to a 1466 * specific ring. This requires that we use two of the sets of registers per MAC 1467 * address that comes in here. The data about this is stored in 'mac_addr_rule' 1468 * member of the 'recv_ring_t'. 1469 * 1470 * A reasonable question to ask is why are we storing this on the ring, when it 1471 * relates to the group. The answer is that the current implementation of the 1472 * driver assumes that each group is comprised of a single ring. While some 1473 * parts may support additional rings, the driver doesn't take advantage of 1474 * that. 1475 * 1476 * A result of all this is that the driver will support up to 4 groups today. 1477 * Each group has a single ring. We want to make sure that each group can have a 1478 * single MAC address programmed into it. This results in the check for a rule 1479 * being assigned in the 'mac_addr_rule' member of the recv_ring_t below. If a 1480 * future part were to support more global MAC address filters in part 1 and 1481 * more rule registers needed for part 2, then we could relax this constraint 1482 * and allow a group to have more than one MAC address assigned to it. 1483 */ 1484 static int 1485 bge_addmac(void *arg, const uint8_t * mac_addr) 1486 { 1487 recv_ring_t *rrp = (recv_ring_t *)arg; 1488 bge_t *bgep = rrp->bgep; 1489 bge_recv_rule_t *rulep = bgep->recv_rules; 1490 bge_rule_info_t *rinfop = NULL; 1491 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1492 int i; 1493 uint16_t tmp16; 1494 uint32_t tmp32; 1495 int slot; 1496 int err; 1497 1498 mutex_enter(bgep->genlock); 1499 if (bgep->unicst_addr_avail == 0) { 1500 mutex_exit(bgep->genlock); 1501 return (ENOSPC); 1502 } 1503 1504 /* 1505 * The driver only supports a MAC address being programmed to be 1506 * received by one ring in step 2. We check the global table of MAC 1507 * addresses to see if this address has already been claimed by another 1508 * group as a way to determine that. 1509 */ 1510 slot = bge_unicst_find(bgep, mac_addr); 1511 if (slot != -1) { 1512 mutex_exit(bgep->genlock); 1513 return (EEXIST); 1514 } 1515 1516 /* 1517 * Check to see if this group has already used its hardware resources 1518 * for step 2. If so, we have to return ENOSPC to MAC to indicate that 1519 * this group cannot handle an additional MAC address and that MAC will 1520 * need to use software classification on the default group. 1521 */ 1522 if (rrp->mac_addr_rule != NULL) { 1523 mutex_exit(bgep->genlock); 1524 return (ENOSPC); 1525 } 1526 1527 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1528 if (!bgep->curr_addr[slot].set) { 1529 bgep->curr_addr[slot].set = B_TRUE; 1530 break; 1531 } 1532 } 1533 1534 VERIFY3S(slot, <, bgep->unicst_addr_total); 1535 bgep->unicst_addr_avail--; 1536 mutex_exit(bgep->genlock); 1537 1538 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1539 goto fail; 1540 1541 /* 1542 * Allocate a bge_rule_info_t to keep track of which rule slots 1543 * are being used. 1544 */ 1545 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1546 if (rinfop == NULL) { 1547 err = ENOMEM; 1548 goto fail; 1549 } 1550 1551 /* 1552 * Look for the starting slot to place the rules. 1553 * The two slots we reserve must be contiguous. 1554 */ 1555 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1556 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1557 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1558 break; 1559 1560 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1561 1562 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1563 rulep[i].mask_value = ntohl(tmp32); 1564 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1565 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1566 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1567 1568 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1569 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1570 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1571 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1572 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1573 rinfop->start = i; 1574 rinfop->count = 2; 1575 1576 rrp->mac_addr_rule = rinfop; 1577 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1578 1579 return (0); 1580 1581 fail: 1582 /* Clear the address just set */ 1583 (void) bge_unicst_set(bgep, zero_addr, slot); 1584 mutex_enter(bgep->genlock); 1585 bgep->curr_addr[slot].set = B_FALSE; 1586 bgep->unicst_addr_avail++; 1587 mutex_exit(bgep->genlock); 1588 1589 return (err); 1590 } 1591 1592 /* 1593 * Stop classifying packets matching the MAC address to the specified ring. 1594 */ 1595 static int 1596 bge_remmac(void *arg, const uint8_t *mac_addr) 1597 { 1598 recv_ring_t *rrp = (recv_ring_t *)arg; 1599 bge_t *bgep = rrp->bgep; 1600 bge_recv_rule_t *rulep = bgep->recv_rules; 1601 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1602 int start; 1603 int slot; 1604 int err; 1605 1606 /* 1607 * Remove the MAC address from its slot. 1608 */ 1609 mutex_enter(bgep->genlock); 1610 slot = bge_unicst_find(bgep, mac_addr); 1611 if (slot == -1) { 1612 mutex_exit(bgep->genlock); 1613 return (EINVAL); 1614 } 1615 1616 ASSERT(bgep->curr_addr[slot].set); 1617 mutex_exit(bgep->genlock); 1618 1619 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1620 return (err); 1621 1622 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1623 return (EINVAL); 1624 1625 start = rinfop->start; 1626 rulep[start].mask_value = 0; 1627 rulep[start].control = 0; 1628 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1629 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1630 start++; 1631 rulep[start].mask_value = 0; 1632 rulep[start].control = 0; 1633 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1634 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1635 1636 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1637 rrp->mac_addr_rule = NULL; 1638 bzero(rrp->mac_addr_val, ETHERADDRL); 1639 1640 mutex_enter(bgep->genlock); 1641 bgep->curr_addr[slot].set = B_FALSE; 1642 bgep->unicst_addr_avail++; 1643 mutex_exit(bgep->genlock); 1644 1645 return (0); 1646 } 1647 1648 1649 static int 1650 bge_flag_intr_enable(mac_intr_handle_t ih) 1651 { 1652 recv_ring_t *rrp = (recv_ring_t *)ih; 1653 bge_t *bgep = rrp->bgep; 1654 1655 mutex_enter(bgep->genlock); 1656 rrp->poll_flag = 0; 1657 mutex_exit(bgep->genlock); 1658 1659 return (0); 1660 } 1661 1662 static int 1663 bge_flag_intr_disable(mac_intr_handle_t ih) 1664 { 1665 recv_ring_t *rrp = (recv_ring_t *)ih; 1666 bge_t *bgep = rrp->bgep; 1667 1668 mutex_enter(bgep->genlock); 1669 rrp->poll_flag = 1; 1670 mutex_exit(bgep->genlock); 1671 1672 return (0); 1673 } 1674 1675 static int 1676 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1677 { 1678 recv_ring_t *rx_ring; 1679 1680 rx_ring = (recv_ring_t *)rh; 1681 mutex_enter(rx_ring->rx_lock); 1682 rx_ring->ring_gen_num = mr_gen_num; 1683 mutex_exit(rx_ring->rx_lock); 1684 return (0); 1685 } 1686 1687 1688 /* 1689 * Callback funtion for MAC layer to register all rings 1690 * for given ring_group, noted by rg_index. 1691 */ 1692 void 1693 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1694 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1695 { 1696 bge_t *bgep = arg; 1697 mac_intr_t *mintr; 1698 1699 switch (rtype) { 1700 case MAC_RING_TYPE_RX: { 1701 recv_ring_t *rx_ring; 1702 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1703 MAC_ADDRESS_REGS_MAX) && index == 0); 1704 1705 rx_ring = &bgep->recv[rg_index]; 1706 rx_ring->ring_handle = rh; 1707 1708 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1709 infop->mri_start = bge_ring_start; 1710 infop->mri_stop = NULL; 1711 infop->mri_poll = bge_poll_ring; 1712 infop->mri_stat = bge_rx_ring_stat; 1713 1714 mintr = &infop->mri_intr; 1715 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1716 mintr->mi_enable = bge_flag_intr_enable; 1717 mintr->mi_disable = bge_flag_intr_disable; 1718 1719 break; 1720 } 1721 case MAC_RING_TYPE_TX: 1722 default: 1723 ASSERT(0); 1724 break; 1725 } 1726 } 1727 1728 /* 1729 * Fill infop passed as argument 1730 * fill in respective ring_group info 1731 * Each group has a single ring in it. We keep it simple 1732 * and use the same internal handle for rings and groups. 1733 */ 1734 void 1735 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1736 mac_group_info_t * infop, mac_group_handle_t gh) 1737 { 1738 bge_t *bgep = arg; 1739 1740 switch (rtype) { 1741 case MAC_RING_TYPE_RX: { 1742 recv_ring_t *rx_ring; 1743 1744 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1745 MAC_ADDRESS_REGS_MAX)); 1746 rx_ring = &bgep->recv[rg_index]; 1747 rx_ring->ring_group_handle = gh; 1748 1749 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1750 infop->mgi_start = NULL; 1751 infop->mgi_stop = NULL; 1752 infop->mgi_addmac = bge_addmac; 1753 infop->mgi_remmac = bge_remmac; 1754 infop->mgi_count = 1; 1755 break; 1756 } 1757 case MAC_RING_TYPE_TX: 1758 default: 1759 ASSERT(0); 1760 break; 1761 } 1762 } 1763 1764 1765 /*ARGSUSED*/ 1766 static boolean_t 1767 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1768 { 1769 bge_t *bgep = arg; 1770 mac_capab_rings_t *cap_rings; 1771 1772 switch (cap) { 1773 case MAC_CAPAB_HCKSUM: { 1774 uint32_t *txflags = cap_data; 1775 1776 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1777 break; 1778 } 1779 1780 case MAC_CAPAB_RINGS: 1781 cap_rings = (mac_capab_rings_t *)cap_data; 1782 1783 /* Temporarily disable multiple tx rings. */ 1784 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1785 return (B_FALSE); 1786 1787 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1788 cap_rings->mr_rnum = 1789 cap_rings->mr_gnum = 1790 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1791 cap_rings->mr_rget = bge_fill_ring; 1792 cap_rings->mr_gget = bge_fill_group; 1793 break; 1794 1795 default: 1796 return (B_FALSE); 1797 } 1798 return (B_TRUE); 1799 } 1800 1801 #ifdef NOT_SUPPORTED_XXX 1802 1803 /* 1804 * Loopback ioctl code 1805 */ 1806 1807 static lb_property_t loopmodes[] = { 1808 { normal, "normal", BGE_LOOP_NONE }, 1809 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1810 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1811 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1812 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1813 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1814 }; 1815 1816 static enum ioc_reply 1817 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1818 { 1819 /* 1820 * If the mode isn't being changed, there's nothing to do ... 1821 */ 1822 if (mode == bgep->param_loop_mode) 1823 return (IOC_ACK); 1824 1825 /* 1826 * Validate the requested mode and prepare a suitable message 1827 * to explain the link down/up cycle that the change will 1828 * probably induce ... 1829 */ 1830 switch (mode) { 1831 default: 1832 return (IOC_INVAL); 1833 1834 case BGE_LOOP_NONE: 1835 case BGE_LOOP_EXTERNAL_1000: 1836 case BGE_LOOP_EXTERNAL_100: 1837 case BGE_LOOP_EXTERNAL_10: 1838 case BGE_LOOP_INTERNAL_PHY: 1839 case BGE_LOOP_INTERNAL_MAC: 1840 break; 1841 } 1842 1843 /* 1844 * All OK; tell the caller to reprogram 1845 * the PHY and/or MAC for the new mode ... 1846 */ 1847 bgep->param_loop_mode = mode; 1848 return (IOC_RESTART_ACK); 1849 } 1850 1851 static enum ioc_reply 1852 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1853 { 1854 lb_info_sz_t *lbsp; 1855 lb_property_t *lbpp; 1856 uint32_t *lbmp; 1857 int cmd; 1858 1859 _NOTE(ARGUNUSED(wq)) 1860 1861 /* 1862 * Validate format of ioctl 1863 */ 1864 if (mp->b_cont == NULL) 1865 return (IOC_INVAL); 1866 1867 cmd = iocp->ioc_cmd; 1868 switch (cmd) { 1869 default: 1870 /* NOTREACHED */ 1871 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1872 return (IOC_INVAL); 1873 1874 case LB_GET_INFO_SIZE: 1875 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1876 return (IOC_INVAL); 1877 lbsp = (void *)mp->b_cont->b_rptr; 1878 *lbsp = sizeof (loopmodes); 1879 return (IOC_REPLY); 1880 1881 case LB_GET_INFO: 1882 if (iocp->ioc_count != sizeof (loopmodes)) 1883 return (IOC_INVAL); 1884 lbpp = (void *)mp->b_cont->b_rptr; 1885 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1886 return (IOC_REPLY); 1887 1888 case LB_GET_MODE: 1889 if (iocp->ioc_count != sizeof (uint32_t)) 1890 return (IOC_INVAL); 1891 lbmp = (void *)mp->b_cont->b_rptr; 1892 *lbmp = bgep->param_loop_mode; 1893 return (IOC_REPLY); 1894 1895 case LB_SET_MODE: 1896 if (iocp->ioc_count != sizeof (uint32_t)) 1897 return (IOC_INVAL); 1898 lbmp = (void *)mp->b_cont->b_rptr; 1899 return (bge_set_loop_mode(bgep, *lbmp)); 1900 } 1901 } 1902 1903 #endif /* NOT_SUPPORTED_XXX */ 1904 1905 /* 1906 * Specific bge IOCTLs, the gld module handles the generic ones. 1907 */ 1908 static void 1909 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1910 { 1911 bge_t *bgep = arg; 1912 struct iocblk *iocp; 1913 enum ioc_reply status; 1914 boolean_t need_privilege; 1915 int err; 1916 int cmd; 1917 1918 /* 1919 * Validate the command before bothering with the mutex ... 1920 */ 1921 iocp = (void *)mp->b_rptr; 1922 iocp->ioc_error = 0; 1923 need_privilege = B_TRUE; 1924 cmd = iocp->ioc_cmd; 1925 switch (cmd) { 1926 default: 1927 miocnak(wq, mp, 0, EINVAL); 1928 return; 1929 1930 case BGE_MII_READ: 1931 case BGE_MII_WRITE: 1932 case BGE_SEE_READ: 1933 case BGE_SEE_WRITE: 1934 case BGE_FLASH_READ: 1935 case BGE_FLASH_WRITE: 1936 case BGE_DIAG: 1937 case BGE_PEEK: 1938 case BGE_POKE: 1939 case BGE_PHY_RESET: 1940 case BGE_SOFT_RESET: 1941 case BGE_HARD_RESET: 1942 break; 1943 1944 #ifdef NOT_SUPPORTED_XXX 1945 case LB_GET_INFO_SIZE: 1946 case LB_GET_INFO: 1947 case LB_GET_MODE: 1948 need_privilege = B_FALSE; 1949 /* FALLTHRU */ 1950 case LB_SET_MODE: 1951 break; 1952 #endif 1953 1954 } 1955 1956 if (need_privilege) { 1957 /* 1958 * Check for specific net_config privilege on Solaris 10+. 1959 */ 1960 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1961 if (err != 0) { 1962 miocnak(wq, mp, 0, err); 1963 return; 1964 } 1965 } 1966 1967 mutex_enter(bgep->genlock); 1968 if (!(bgep->progress & PROGRESS_INTR)) { 1969 /* can happen during autorecovery */ 1970 mutex_exit(bgep->genlock); 1971 miocnak(wq, mp, 0, EIO); 1972 return; 1973 } 1974 1975 switch (cmd) { 1976 default: 1977 _NOTE(NOTREACHED) 1978 status = IOC_INVAL; 1979 break; 1980 1981 case BGE_MII_READ: 1982 case BGE_MII_WRITE: 1983 case BGE_SEE_READ: 1984 case BGE_SEE_WRITE: 1985 case BGE_FLASH_READ: 1986 case BGE_FLASH_WRITE: 1987 case BGE_DIAG: 1988 case BGE_PEEK: 1989 case BGE_POKE: 1990 case BGE_PHY_RESET: 1991 case BGE_SOFT_RESET: 1992 case BGE_HARD_RESET: 1993 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1994 break; 1995 1996 #ifdef NOT_SUPPORTED_XXX 1997 case LB_GET_INFO_SIZE: 1998 case LB_GET_INFO: 1999 case LB_GET_MODE: 2000 case LB_SET_MODE: 2001 status = bge_loop_ioctl(bgep, wq, mp, iocp); 2002 break; 2003 #endif 2004 2005 } 2006 2007 /* 2008 * Do we need to reprogram the PHY and/or the MAC? 2009 * Do it now, while we still have the mutex. 2010 * 2011 * Note: update the PHY first, 'cos it controls the 2012 * speed/duplex parameters that the MAC code uses. 2013 */ 2014 switch (status) { 2015 case IOC_RESTART_REPLY: 2016 case IOC_RESTART_ACK: 2017 if (bge_reprogram(bgep) == IOC_INVAL) 2018 status = IOC_INVAL; 2019 break; 2020 } 2021 2022 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2023 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 2024 status = IOC_INVAL; 2025 } 2026 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 2027 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 2028 status = IOC_INVAL; 2029 } 2030 mutex_exit(bgep->genlock); 2031 2032 /* 2033 * Finally, decide how to reply 2034 */ 2035 switch (status) { 2036 default: 2037 case IOC_INVAL: 2038 /* 2039 * Error, reply with a NAK and EINVAL or the specified error 2040 */ 2041 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 2042 EINVAL : iocp->ioc_error); 2043 break; 2044 2045 case IOC_DONE: 2046 /* 2047 * OK, reply already sent 2048 */ 2049 break; 2050 2051 case IOC_RESTART_ACK: 2052 case IOC_ACK: 2053 /* 2054 * OK, reply with an ACK 2055 */ 2056 miocack(wq, mp, 0, 0); 2057 break; 2058 2059 case IOC_RESTART_REPLY: 2060 case IOC_REPLY: 2061 /* 2062 * OK, send prepared reply as ACK or NAK 2063 */ 2064 mp->b_datap->db_type = iocp->ioc_error == 0 ? 2065 M_IOCACK : M_IOCNAK; 2066 qreply(wq, mp); 2067 break; 2068 } 2069 } 2070 2071 /* 2072 * ========== Per-instance setup/teardown code ========== 2073 */ 2074 2075 #undef BGE_DBG 2076 #define BGE_DBG BGE_DBG_MEM /* debug flag for this code */ 2077 /* 2078 * Allocate an area of memory and a DMA handle for accessing it 2079 */ 2080 static int 2081 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2082 uint_t dma_flags, dma_area_t *dma_p) 2083 { 2084 caddr_t va; 2085 int err; 2086 2087 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2088 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2089 2090 /* 2091 * Allocate handle 2092 */ 2093 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2094 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2095 if (err != DDI_SUCCESS) 2096 return (DDI_FAILURE); 2097 2098 /* 2099 * Allocate memory 2100 */ 2101 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2102 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2103 &dma_p->acc_hdl); 2104 if (err != DDI_SUCCESS) 2105 return (DDI_FAILURE); 2106 2107 /* 2108 * Bind the two together 2109 */ 2110 dma_p->mem_va = va; 2111 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2112 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2113 &dma_p->cookie, &dma_p->ncookies); 2114 2115 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2116 dma_p->alength, err, dma_p->ncookies)); 2117 2118 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2119 return (DDI_FAILURE); 2120 2121 dma_p->nslots = ~0U; 2122 dma_p->size = ~0U; 2123 dma_p->token = ~0U; 2124 dma_p->offset = 0; 2125 return (DDI_SUCCESS); 2126 } 2127 2128 /* 2129 * Free one allocated area of DMAable memory 2130 */ 2131 static void 2132 bge_free_dma_mem(dma_area_t *dma_p) 2133 { 2134 if (dma_p->dma_hdl != NULL) { 2135 if (dma_p->ncookies) { 2136 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2137 dma_p->ncookies = 0; 2138 } 2139 ddi_dma_free_handle(&dma_p->dma_hdl); 2140 dma_p->dma_hdl = NULL; 2141 } 2142 2143 if (dma_p->acc_hdl != NULL) { 2144 ddi_dma_mem_free(&dma_p->acc_hdl); 2145 dma_p->acc_hdl = NULL; 2146 } 2147 } 2148 /* 2149 * Utility routine to carve a slice off a chunk of allocated memory, 2150 * updating the chunk descriptor accordingly. The size of the slice 2151 * is given by the product of the <qty> and <size> parameters. 2152 */ 2153 static void 2154 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2155 uint32_t qty, uint32_t size) 2156 { 2157 static uint32_t sequence = 0xbcd5704a; 2158 size_t totsize; 2159 2160 totsize = qty*size; 2161 ASSERT(totsize <= chunk->alength); 2162 2163 *slice = *chunk; 2164 slice->nslots = qty; 2165 slice->size = size; 2166 slice->alength = totsize; 2167 slice->token = ++sequence; 2168 2169 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2170 chunk->alength -= totsize; 2171 chunk->offset += totsize; 2172 chunk->cookie.dmac_laddress += totsize; 2173 chunk->cookie.dmac_size -= totsize; 2174 } 2175 2176 /* 2177 * Initialise the specified Receive Producer (Buffer) Ring, using 2178 * the information in the <dma_area> descriptors that it contains 2179 * to set up all the other fields. This routine should be called 2180 * only once for each ring. 2181 */ 2182 static void 2183 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2184 { 2185 buff_ring_t *brp; 2186 bge_status_t *bsp; 2187 sw_rbd_t *srbdp; 2188 dma_area_t pbuf; 2189 uint32_t bufsize; 2190 uint32_t nslots; 2191 uint32_t slot; 2192 uint32_t split; 2193 2194 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2195 NIC_MEM_SHADOW_BUFF_STD, 2196 NIC_MEM_SHADOW_BUFF_JUMBO, 2197 NIC_MEM_SHADOW_BUFF_MINI 2198 }; 2199 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2200 RECV_STD_PROD_INDEX_REG, 2201 RECV_JUMBO_PROD_INDEX_REG, 2202 RECV_MINI_PROD_INDEX_REG 2203 }; 2204 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2205 STATUS_STD_BUFF_CONS_INDEX, 2206 STATUS_JUMBO_BUFF_CONS_INDEX, 2207 STATUS_MINI_BUFF_CONS_INDEX 2208 }; 2209 2210 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2211 (void *)bgep, ring)); 2212 2213 brp = &bgep->buff[ring]; 2214 nslots = brp->desc.nslots; 2215 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2216 bufsize = brp->buf[0].size; 2217 2218 /* 2219 * Set up the copy of the h/w RCB 2220 * 2221 * Note: unlike Send & Receive Return Rings, (where the max_len 2222 * field holds the number of slots), in a Receive Buffer Ring 2223 * this field indicates the size of each buffer in the ring. 2224 */ 2225 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2226 brp->hw_rcb.max_len = (uint16_t)bufsize; 2227 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2228 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2229 2230 /* 2231 * Other one-off initialisation of per-ring data 2232 */ 2233 brp->bgep = bgep; 2234 bsp = DMA_VPTR(bgep->status_block); 2235 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2236 brp->chip_mbx_reg = mailbox_regs[ring]; 2237 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2238 DDI_INTR_PRI(bgep->intr_pri)); 2239 2240 /* 2241 * Allocate the array of s/w Receive Buffer Descriptors 2242 */ 2243 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2244 brp->sw_rbds = srbdp; 2245 2246 /* 2247 * Now initialise each array element once and for all 2248 */ 2249 for (split = 0; split < BGE_SPLIT; ++split) { 2250 pbuf = brp->buf[split]; 2251 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2252 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2253 ASSERT(pbuf.alength == 0); 2254 } 2255 } 2256 2257 /* 2258 * Clean up initialisation done above before the memory is freed 2259 */ 2260 static void 2261 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2262 { 2263 buff_ring_t *brp; 2264 sw_rbd_t *srbdp; 2265 2266 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2267 (void *)bgep, ring)); 2268 2269 brp = &bgep->buff[ring]; 2270 srbdp = brp->sw_rbds; 2271 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2272 2273 mutex_destroy(brp->rf_lock); 2274 } 2275 2276 /* 2277 * Initialise the specified Receive (Return) Ring, using the 2278 * information in the <dma_area> descriptors that it contains 2279 * to set up all the other fields. This routine should be called 2280 * only once for each ring. 2281 */ 2282 static void 2283 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2284 { 2285 recv_ring_t *rrp; 2286 bge_status_t *bsp; 2287 uint32_t nslots; 2288 2289 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2290 (void *)bgep, ring)); 2291 2292 /* 2293 * The chip architecture requires that receive return rings have 2294 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2295 */ 2296 rrp = &bgep->recv[ring]; 2297 nslots = rrp->desc.nslots; 2298 ASSERT(nslots == 0 || nslots == 512 || 2299 nslots == 1024 || nslots == 2048); 2300 2301 /* 2302 * Set up the copy of the h/w RCB 2303 */ 2304 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2305 rrp->hw_rcb.max_len = (uint16_t)nslots; 2306 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2307 rrp->hw_rcb.nic_ring_addr = 0; 2308 2309 /* 2310 * Other one-off initialisation of per-ring data 2311 */ 2312 rrp->bgep = bgep; 2313 bsp = DMA_VPTR(bgep->status_block); 2314 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2315 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2316 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2317 DDI_INTR_PRI(bgep->intr_pri)); 2318 } 2319 2320 2321 /* 2322 * Clean up initialisation done above before the memory is freed 2323 */ 2324 static void 2325 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2326 { 2327 recv_ring_t *rrp; 2328 2329 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2330 (void *)bgep, ring)); 2331 2332 rrp = &bgep->recv[ring]; 2333 if (rrp->rx_softint) 2334 ddi_remove_softintr(rrp->rx_softint); 2335 mutex_destroy(rrp->rx_lock); 2336 } 2337 2338 /* 2339 * Initialise the specified Send Ring, using the information in the 2340 * <dma_area> descriptors that it contains to set up all the other 2341 * fields. This routine should be called only once for each ring. 2342 */ 2343 static void 2344 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2345 { 2346 send_ring_t *srp; 2347 bge_status_t *bsp; 2348 sw_sbd_t *ssbdp; 2349 dma_area_t desc; 2350 dma_area_t pbuf; 2351 uint32_t nslots; 2352 uint32_t slot; 2353 uint32_t split; 2354 sw_txbuf_t *txbuf; 2355 2356 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2357 (void *)bgep, ring)); 2358 2359 /* 2360 * The chip architecture requires that host-based send rings 2361 * have 512 elements per ring. See 570X-PG102-R page 56. 2362 */ 2363 srp = &bgep->send[ring]; 2364 nslots = srp->desc.nslots; 2365 ASSERT(nslots == 0 || nslots == 512); 2366 2367 /* 2368 * Set up the copy of the h/w RCB 2369 */ 2370 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2371 srp->hw_rcb.max_len = (uint16_t)nslots; 2372 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2373 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2374 2375 /* 2376 * Other one-off initialisation of per-ring data 2377 */ 2378 srp->bgep = bgep; 2379 bsp = DMA_VPTR(bgep->status_block); 2380 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2381 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2382 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2383 DDI_INTR_PRI(bgep->intr_pri)); 2384 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2385 DDI_INTR_PRI(bgep->intr_pri)); 2386 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2387 DDI_INTR_PRI(bgep->intr_pri)); 2388 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2389 DDI_INTR_PRI(bgep->intr_pri)); 2390 if (nslots == 0) 2391 return; 2392 2393 /* 2394 * Allocate the array of s/w Send Buffer Descriptors 2395 */ 2396 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2397 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2398 srp->txbuf_head = 2399 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2400 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2401 srp->sw_sbds = ssbdp; 2402 srp->txbuf = txbuf; 2403 srp->tx_buffers = BGE_SEND_BUF_NUM; 2404 srp->tx_buffers_low = srp->tx_buffers / 4; 2405 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2406 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2407 else 2408 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2409 srp->tx_array = 1; 2410 2411 /* 2412 * Chunk tx desc area 2413 */ 2414 desc = srp->desc; 2415 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2416 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2417 sizeof (bge_sbd_t)); 2418 } 2419 ASSERT(desc.alength == 0); 2420 2421 /* 2422 * Chunk tx buffer area 2423 */ 2424 for (split = 0; split < BGE_SPLIT; ++split) { 2425 pbuf = srp->buf[0][split]; 2426 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2427 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2428 bgep->chipid.snd_buff_size); 2429 txbuf++; 2430 } 2431 ASSERT(pbuf.alength == 0); 2432 } 2433 } 2434 2435 /* 2436 * Clean up initialisation done above before the memory is freed 2437 */ 2438 static void 2439 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2440 { 2441 send_ring_t *srp; 2442 uint32_t array; 2443 uint32_t split; 2444 uint32_t nslots; 2445 2446 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2447 (void *)bgep, ring)); 2448 2449 srp = &bgep->send[ring]; 2450 mutex_destroy(srp->tc_lock); 2451 mutex_destroy(srp->freetxbuf_lock); 2452 mutex_destroy(srp->txbuf_lock); 2453 mutex_destroy(srp->tx_lock); 2454 nslots = srp->desc.nslots; 2455 if (nslots == 0) 2456 return; 2457 2458 for (array = 1; array < srp->tx_array; ++array) 2459 for (split = 0; split < BGE_SPLIT; ++split) 2460 bge_free_dma_mem(&srp->buf[array][split]); 2461 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2462 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2463 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2464 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2465 srp->sw_sbds = NULL; 2466 srp->txbuf_head = NULL; 2467 srp->txbuf = NULL; 2468 srp->pktp = NULL; 2469 } 2470 2471 /* 2472 * Initialise all transmit, receive, and buffer rings. 2473 */ 2474 void 2475 bge_init_rings(bge_t *bgep) 2476 { 2477 uint32_t ring; 2478 2479 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2480 2481 /* 2482 * Perform one-off initialisation of each ring ... 2483 */ 2484 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2485 bge_init_send_ring(bgep, ring); 2486 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2487 bge_init_recv_ring(bgep, ring); 2488 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2489 bge_init_buff_ring(bgep, ring); 2490 } 2491 2492 /* 2493 * Undo the work of bge_init_rings() above before the memory is freed 2494 */ 2495 void 2496 bge_fini_rings(bge_t *bgep) 2497 { 2498 uint32_t ring; 2499 2500 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2501 2502 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2503 bge_fini_buff_ring(bgep, ring); 2504 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2505 bge_fini_recv_ring(bgep, ring); 2506 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2507 bge_fini_send_ring(bgep, ring); 2508 } 2509 2510 /* 2511 * Called from the bge_m_stop() to free the tx buffers which are 2512 * allocated from the tx process. 2513 */ 2514 void 2515 bge_free_txbuf_arrays(send_ring_t *srp) 2516 { 2517 uint32_t array; 2518 uint32_t split; 2519 2520 ASSERT(mutex_owned(srp->tx_lock)); 2521 2522 /* 2523 * Free the extra tx buffer DMA area 2524 */ 2525 for (array = 1; array < srp->tx_array; ++array) 2526 for (split = 0; split < BGE_SPLIT; ++split) 2527 bge_free_dma_mem(&srp->buf[array][split]); 2528 2529 /* 2530 * Restore initial tx buffer numbers 2531 */ 2532 srp->tx_array = 1; 2533 srp->tx_buffers = BGE_SEND_BUF_NUM; 2534 srp->tx_buffers_low = srp->tx_buffers / 4; 2535 srp->tx_flow = 0; 2536 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2537 } 2538 2539 /* 2540 * Called from tx process to allocate more tx buffers 2541 */ 2542 bge_queue_item_t * 2543 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2544 { 2545 bge_queue_t *txbuf_queue; 2546 bge_queue_item_t *txbuf_item_last; 2547 bge_queue_item_t *txbuf_item; 2548 bge_queue_item_t *txbuf_item_rtn; 2549 sw_txbuf_t *txbuf; 2550 dma_area_t area; 2551 size_t txbuffsize; 2552 uint32_t slot; 2553 uint32_t array; 2554 uint32_t split; 2555 uint32_t err; 2556 2557 ASSERT(mutex_owned(srp->tx_lock)); 2558 2559 array = srp->tx_array; 2560 if (array >= srp->tx_array_max) 2561 return (NULL); 2562 2563 /* 2564 * Allocate memory & handles for TX buffers 2565 */ 2566 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2567 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2568 for (split = 0; split < BGE_SPLIT; ++split) { 2569 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2570 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2571 &srp->buf[array][split]); 2572 if (err != DDI_SUCCESS) { 2573 /* Free the last already allocated OK chunks */ 2574 for (slot = 0; slot <= split; ++slot) 2575 bge_free_dma_mem(&srp->buf[array][slot]); 2576 srp->tx_alloc_fail++; 2577 return (NULL); 2578 } 2579 } 2580 2581 /* 2582 * Chunk tx buffer area 2583 */ 2584 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2585 for (split = 0; split < BGE_SPLIT; ++split) { 2586 area = srp->buf[array][split]; 2587 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2588 bge_slice_chunk(&txbuf->buf, &area, 1, 2589 bgep->chipid.snd_buff_size); 2590 txbuf++; 2591 } 2592 } 2593 2594 /* 2595 * Add above buffers to the tx buffer pop queue 2596 */ 2597 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2598 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2599 txbuf_item_last = NULL; 2600 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2601 txbuf_item->item = txbuf; 2602 txbuf_item->next = txbuf_item_last; 2603 txbuf_item_last = txbuf_item; 2604 txbuf++; 2605 txbuf_item++; 2606 } 2607 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2608 txbuf_item_rtn = txbuf_item; 2609 txbuf_item++; 2610 txbuf_queue = srp->txbuf_pop_queue; 2611 mutex_enter(txbuf_queue->lock); 2612 txbuf_item->next = txbuf_queue->head; 2613 txbuf_queue->head = txbuf_item_last; 2614 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2615 mutex_exit(txbuf_queue->lock); 2616 2617 srp->tx_array++; 2618 srp->tx_buffers += BGE_SEND_BUF_NUM; 2619 srp->tx_buffers_low = srp->tx_buffers / 4; 2620 2621 return (txbuf_item_rtn); 2622 } 2623 2624 /* 2625 * This function allocates all the transmit and receive buffers 2626 * and descriptors, in four chunks. 2627 */ 2628 int 2629 bge_alloc_bufs(bge_t *bgep) 2630 { 2631 dma_area_t area; 2632 size_t rxbuffsize; 2633 size_t txbuffsize; 2634 size_t rxbuffdescsize; 2635 size_t rxdescsize; 2636 size_t txdescsize; 2637 uint32_t ring; 2638 uint32_t rx_rings = bgep->chipid.rx_rings; 2639 uint32_t tx_rings = bgep->chipid.tx_rings; 2640 int split; 2641 int err; 2642 2643 BGE_TRACE(("bge_alloc_bufs($%p)", 2644 (void *)bgep)); 2645 2646 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2647 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2648 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2649 2650 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2651 txbuffsize *= tx_rings; 2652 2653 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2654 rxdescsize *= sizeof (bge_rbd_t); 2655 2656 rxbuffdescsize = BGE_STD_SLOTS_USED; 2657 rxbuffdescsize += bgep->chipid.jumbo_slots; 2658 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2659 rxbuffdescsize *= sizeof (bge_rbd_t); 2660 2661 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2662 txdescsize *= sizeof (bge_sbd_t); 2663 txdescsize += sizeof (bge_statistics_t); 2664 txdescsize += sizeof (bge_status_t); 2665 txdescsize += BGE_STATUS_PADDING; 2666 2667 /* 2668 * Enable PCI relaxed ordering only for RX/TX data buffers 2669 */ 2670 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) || 2671 DEVICE_5725_SERIES_CHIPSETS(bgep) || 2672 DEVICE_57765_SERIES_CHIPSETS(bgep))) { 2673 if (bge_relaxed_ordering) 2674 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2675 } 2676 2677 /* 2678 * Allocate memory & handles for RX buffers 2679 */ 2680 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2681 for (split = 0; split < BGE_SPLIT; ++split) { 2682 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2683 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2684 &bgep->rx_buff[split]); 2685 if (err != DDI_SUCCESS) 2686 return (DDI_FAILURE); 2687 } 2688 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Buffers (rxbuffsize = %d)", 2689 rxbuffsize/BGE_SPLIT, 2690 rxbuffsize)); 2691 2692 /* 2693 * Allocate memory & handles for TX buffers 2694 */ 2695 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2696 for (split = 0; split < BGE_SPLIT; ++split) { 2697 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2698 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2699 &bgep->tx_buff[split]); 2700 if (err != DDI_SUCCESS) 2701 return (DDI_FAILURE); 2702 } 2703 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Tx Buffers (txbuffsize = %d)", 2704 txbuffsize/BGE_SPLIT, 2705 txbuffsize)); 2706 2707 if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) || 2708 DEVICE_5725_SERIES_CHIPSETS(bgep) || 2709 DEVICE_57765_SERIES_CHIPSETS(bgep))) { 2710 /* no relaxed ordering for descriptors rings? */ 2711 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2712 } 2713 2714 /* 2715 * Allocate memory & handles for receive return rings 2716 */ 2717 ASSERT((rxdescsize % rx_rings) == 0); 2718 for (split = 0; split < rx_rings; ++split) { 2719 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2720 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2721 &bgep->rx_desc[split]); 2722 if (err != DDI_SUCCESS) 2723 return (DDI_FAILURE); 2724 } 2725 BGE_DEBUG(("DMA ALLOC: allocated %d chunks for Rx Descs cons (rx_rings = %d, rxdescsize = %d)", 2726 rxdescsize/rx_rings, 2727 rx_rings, 2728 rxdescsize)); 2729 2730 /* 2731 * Allocate memory & handles for buffer (producer) descriptor rings. 2732 * Note that split=rx_rings. 2733 */ 2734 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2735 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2736 if (err != DDI_SUCCESS) 2737 return (DDI_FAILURE); 2738 BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Rx Descs prod (rxbuffdescsize = %d)", 2739 rxdescsize)); 2740 2741 /* 2742 * Allocate memory & handles for TX descriptor rings, 2743 * status block, and statistics area 2744 */ 2745 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2746 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2747 if (err != DDI_SUCCESS) 2748 return (DDI_FAILURE); 2749 BGE_DEBUG(("DMA ALLOC: allocated 1 chunks for Tx Descs / Status Block / Stats (txdescdize = %d)", 2750 txdescsize)); 2751 2752 /* 2753 * Now carve up each of the allocated areas ... 2754 */ 2755 2756 /* rx buffers */ 2757 for (split = 0; split < BGE_SPLIT; ++split) { 2758 area = bgep->rx_buff[split]; 2759 2760 BGE_DEBUG(("RXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d", 2761 split, 2762 area.mem_va, 2763 area.alength, 2764 area.offset, 2765 area.cookie.dmac_laddress, 2766 area.cookie.dmac_size)); 2767 2768 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2769 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2770 bgep->chipid.std_buf_size); 2771 2772 BGE_DEBUG(("RXB SLCE %d STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2773 split, 2774 bgep->buff[BGE_STD_BUFF_RING].buf[split].mem_va, 2775 bgep->buff[BGE_STD_BUFF_RING].buf[split].alength, 2776 bgep->buff[BGE_STD_BUFF_RING].buf[split].offset, 2777 bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_laddress, 2778 bgep->buff[BGE_STD_BUFF_RING].buf[split].cookie.dmac_size, 2779 BGE_STD_SLOTS_USED/BGE_SPLIT, 2780 bgep->chipid.std_buf_size)); 2781 2782 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2783 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2784 bgep->chipid.recv_jumbo_size); 2785 2786 if ((bgep->chipid.jumbo_slots / BGE_SPLIT) > 0) 2787 { 2788 BGE_DEBUG(("RXB SLCE %d JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2789 split, 2790 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].mem_va, 2791 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].alength, 2792 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].offset, 2793 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_laddress, 2794 bgep->buff[BGE_JUMBO_BUFF_RING].buf[split].cookie.dmac_size, 2795 bgep->chipid.jumbo_slots/BGE_SPLIT, 2796 bgep->chipid.recv_jumbo_size)); 2797 } 2798 2799 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2800 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2801 BGE_MINI_BUFF_SIZE); 2802 2803 if ((BGE_MINI_SLOTS_USED / BGE_SPLIT) > 0) 2804 { 2805 BGE_DEBUG(("RXB SLCE %d MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2806 split, 2807 bgep->buff[BGE_MINI_BUFF_RING].buf[split].mem_va, 2808 bgep->buff[BGE_MINI_BUFF_RING].buf[split].alength, 2809 bgep->buff[BGE_MINI_BUFF_RING].buf[split].offset, 2810 bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_laddress, 2811 bgep->buff[BGE_MINI_BUFF_RING].buf[split].cookie.dmac_size, 2812 BGE_MINI_SLOTS_USED/BGE_SPLIT, 2813 BGE_MINI_BUFF_SIZE)); 2814 } 2815 2816 BGE_DEBUG(("RXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d", 2817 split, 2818 area.mem_va, 2819 area.alength, 2820 area.offset, 2821 area.cookie.dmac_laddress, 2822 area.cookie.dmac_size)); 2823 } 2824 2825 /* tx buffers */ 2826 for (split = 0; split < BGE_SPLIT; ++split) { 2827 area = bgep->tx_buff[split]; 2828 2829 BGE_DEBUG(("TXB CHNK %d INIT: va=%p alen=%d off=%d pa=%llx psz=%d", 2830 split, 2831 area.mem_va, 2832 area.alength, 2833 area.offset, 2834 area.cookie.dmac_laddress, 2835 area.cookie.dmac_size)); 2836 2837 for (ring = 0; ring < tx_rings; ++ring) { 2838 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2839 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2840 bgep->chipid.snd_buff_size); 2841 2842 BGE_DEBUG(("TXB SLCE %d RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2843 split, ring, 2844 bgep->send[ring].buf[0][split].mem_va, 2845 bgep->send[ring].buf[0][split].alength, 2846 bgep->send[ring].buf[0][split].offset, 2847 bgep->send[ring].buf[0][split].cookie.dmac_laddress, 2848 bgep->send[ring].buf[0][split].cookie.dmac_size, 2849 BGE_SEND_BUF_NUM/BGE_SPLIT, 2850 bgep->chipid.snd_buff_size)); 2851 } 2852 2853 for (; ring < BGE_SEND_RINGS_MAX; ++ring) { 2854 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2855 &area, 0, bgep->chipid.snd_buff_size); 2856 } 2857 2858 BGE_DEBUG(("TXB CHNK %d DONE: va=%p alen=%d off=%d pa=%llx psz=%d", 2859 split, 2860 area.mem_va, 2861 area.alength, 2862 area.offset, 2863 area.cookie.dmac_laddress, 2864 area.cookie.dmac_size)); 2865 } 2866 2867 for (ring = 0; ring < rx_rings; ++ring) { 2868 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2869 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2870 2871 BGE_DEBUG(("RXD CONS RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2872 ring, 2873 bgep->recv[ring].desc.mem_va, 2874 bgep->recv[ring].desc.alength, 2875 bgep->recv[ring].desc.offset, 2876 bgep->recv[ring].desc.cookie.dmac_laddress, 2877 bgep->recv[ring].desc.cookie.dmac_size, 2878 bgep->chipid.recv_slots, 2879 sizeof(bge_rbd_t))); 2880 } 2881 2882 /* dma alloc for rxbuffdescsize is located at bgep->rx_desc[#rings] */ 2883 area = bgep->rx_desc[rx_rings]; /* note rx_rings = one beyond rings */ 2884 2885 for (; ring < BGE_RECV_RINGS_MAX; ++ring) /* skip unused rings */ 2886 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2887 0, sizeof (bge_rbd_t)); 2888 2889 BGE_DEBUG(("RXD PROD INIT: va=%p alen=%d off=%d pa=%llx psz=%d", 2890 area.mem_va, 2891 area.alength, 2892 area.offset, 2893 area.cookie.dmac_laddress, 2894 area.cookie.dmac_size)); 2895 2896 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2897 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2898 BGE_DEBUG(("RXD PROD STND: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2899 bgep->buff[BGE_STD_BUFF_RING].desc.mem_va, 2900 bgep->buff[BGE_STD_BUFF_RING].desc.alength, 2901 bgep->buff[BGE_STD_BUFF_RING].desc.offset, 2902 bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_laddress, 2903 bgep->buff[BGE_STD_BUFF_RING].desc.cookie.dmac_size, 2904 BGE_STD_SLOTS_USED, 2905 sizeof(bge_rbd_t))); 2906 2907 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2908 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2909 BGE_DEBUG(("RXD PROD JUMB: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2910 bgep->buff[BGE_JUMBO_BUFF_RING].desc.mem_va, 2911 bgep->buff[BGE_JUMBO_BUFF_RING].desc.alength, 2912 bgep->buff[BGE_JUMBO_BUFF_RING].desc.offset, 2913 bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_laddress, 2914 bgep->buff[BGE_JUMBO_BUFF_RING].desc.cookie.dmac_size, 2915 bgep->chipid.jumbo_slots, 2916 sizeof(bge_rbd_t))); 2917 2918 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2919 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2920 BGE_DEBUG(("RXD PROD MINI: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2921 bgep->buff[BGE_MINI_BUFF_RING].desc.mem_va, 2922 bgep->buff[BGE_MINI_BUFF_RING].desc.alength, 2923 bgep->buff[BGE_MINI_BUFF_RING].desc.offset, 2924 bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_laddress, 2925 bgep->buff[BGE_MINI_BUFF_RING].desc.cookie.dmac_size, 2926 BGE_MINI_SLOTS_USED, 2927 sizeof(bge_rbd_t))); 2928 2929 BGE_DEBUG(("RXD PROD DONE: va=%p alen=%d off=%d pa=%llx psz=%d", 2930 area.mem_va, 2931 area.alength, 2932 area.offset, 2933 area.cookie.dmac_laddress, 2934 area.cookie.dmac_size)); 2935 2936 ASSERT(area.alength == 0); 2937 2938 area = bgep->tx_desc; 2939 2940 BGE_DEBUG(("TXD INIT: va=%p alen=%d off=%d pa=%llx psz=%d", 2941 area.mem_va, 2942 area.alength, 2943 area.offset, 2944 area.cookie.dmac_laddress, 2945 area.cookie.dmac_size)); 2946 2947 for (ring = 0; ring < tx_rings; ++ring) { 2948 bge_slice_chunk(&bgep->send[ring].desc, &area, 2949 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2950 2951 BGE_DEBUG(("TXD RING %d: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2952 ring, 2953 bgep->send[ring].desc.mem_va, 2954 bgep->send[ring].desc.alength, 2955 bgep->send[ring].desc.offset, 2956 bgep->send[ring].desc.cookie.dmac_laddress, 2957 bgep->send[ring].desc.cookie.dmac_size, 2958 BGE_SEND_SLOTS_USED, 2959 sizeof(bge_sbd_t))); 2960 } 2961 2962 for (; ring < BGE_SEND_RINGS_MAX; ++ring) /* skip unused rings */ 2963 bge_slice_chunk(&bgep->send[ring].desc, &area, 2964 0, sizeof (bge_sbd_t)); 2965 2966 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2967 BGE_DEBUG(("TXD STATISTICS: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2968 bgep->statistics.mem_va, 2969 bgep->statistics.alength, 2970 bgep->statistics.offset, 2971 bgep->statistics.cookie.dmac_laddress, 2972 bgep->statistics.cookie.dmac_size, 2973 1, 2974 sizeof(bge_statistics_t))); 2975 2976 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2977 BGE_DEBUG(("TXD STATUS BLOCK: va=%p alen=%d off=%d pa=%llx psz=%d (nslots=%d slotlen=%d)", 2978 bgep->status_block.mem_va, 2979 bgep->status_block.alength, 2980 bgep->status_block.offset, 2981 bgep->status_block.cookie.dmac_laddress, 2982 bgep->status_block.cookie.dmac_size, 2983 1, 2984 sizeof(bge_status_t))); 2985 2986 BGE_DEBUG(("TXD DONE: va=%p alen=%d off=%d pa=%llx psz=%d", 2987 area.mem_va, 2988 area.alength, 2989 area.offset, 2990 area.cookie.dmac_laddress, 2991 area.cookie.dmac_size)); 2992 2993 ASSERT(area.alength == BGE_STATUS_PADDING); 2994 2995 DMA_ZERO(bgep->status_block); 2996 2997 return (DDI_SUCCESS); 2998 } 2999 3000 #undef BGE_DBG 3001 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3002 3003 /* 3004 * This routine frees the transmit and receive buffers and descriptors. 3005 * Make sure the chip is stopped before calling it! 3006 */ 3007 void 3008 bge_free_bufs(bge_t *bgep) 3009 { 3010 int split; 3011 3012 BGE_TRACE(("bge_free_bufs($%p)", 3013 (void *)bgep)); 3014 3015 bge_free_dma_mem(&bgep->tx_desc); 3016 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 3017 bge_free_dma_mem(&bgep->rx_desc[split]); 3018 for (split = 0; split < BGE_SPLIT; ++split) 3019 bge_free_dma_mem(&bgep->tx_buff[split]); 3020 for (split = 0; split < BGE_SPLIT; ++split) 3021 bge_free_dma_mem(&bgep->rx_buff[split]); 3022 } 3023 3024 /* 3025 * Determine (initial) MAC address ("BIA") to use for this interface 3026 */ 3027 3028 static void 3029 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 3030 { 3031 struct ether_addr sysaddr; 3032 char propbuf[8]; /* "true" or "false", plus NUL */ 3033 uchar_t *bytes; 3034 int *ints; 3035 uint_t nelts; 3036 int err; 3037 3038 BGE_TRACE(("bge_find_mac_address($%p)", 3039 (void *)bgep)); 3040 3041 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 3042 cidp->hw_mac_addr, 3043 ether_sprintf((void *)cidp->vendor_addr.addr), 3044 cidp->vendor_addr.set ? "" : "not ")); 3045 3046 /* 3047 * The "vendor's factory-set address" may already have 3048 * been extracted from the chip, but if the property 3049 * "local-mac-address" is set we use that instead. It 3050 * will normally be set by OBP, but it could also be 3051 * specified in a .conf file(!) 3052 * 3053 * There doesn't seem to be a way to define byte-array 3054 * properties in a .conf, so we check whether it looks 3055 * like an array of 6 ints instead. 3056 * 3057 * Then, we check whether it looks like an array of 6 3058 * bytes (which it should, if OBP set it). If we can't 3059 * make sense of it either way, we'll ignore it. 3060 */ 3061 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 3062 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 3063 if (err == DDI_PROP_SUCCESS) { 3064 if (nelts == ETHERADDRL) { 3065 while (nelts--) 3066 cidp->vendor_addr.addr[nelts] = ints[nelts]; 3067 cidp->vendor_addr.set = B_TRUE; 3068 } 3069 ddi_prop_free(ints); 3070 } 3071 3072 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 3073 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 3074 if (err == DDI_PROP_SUCCESS) { 3075 if (nelts == ETHERADDRL) { 3076 while (nelts--) 3077 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 3078 cidp->vendor_addr.set = B_TRUE; 3079 } 3080 ddi_prop_free(bytes); 3081 } 3082 3083 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 3084 ether_sprintf((void *)cidp->vendor_addr.addr), 3085 cidp->vendor_addr.set ? "" : "not ")); 3086 3087 /* 3088 * Look up the OBP property "local-mac-address?". Note that even 3089 * though its value is a string (which should be "true" or "false"), 3090 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 3091 * the buffer first and then fetch the property as an untyped array; 3092 * this may or may not include a final NUL, but since there will 3093 * always be one left at the end of the buffer we can now treat it 3094 * as a string anyway. 3095 */ 3096 nelts = sizeof (propbuf); 3097 bzero(propbuf, nelts--); 3098 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 3099 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 3100 3101 /* 3102 * Now, if the address still isn't set from the hardware (SEEPROM) 3103 * or the OBP or .conf property, OR if the user has foolishly set 3104 * 'local-mac-address? = false', use "the system address" instead 3105 * (but only if it's non-null i.e. has been set from the IDPROM). 3106 */ 3107 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 3108 if (localetheraddr(NULL, &sysaddr) != 0) { 3109 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 3110 cidp->vendor_addr.set = B_TRUE; 3111 } 3112 3113 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 3114 ether_sprintf((void *)cidp->vendor_addr.addr), 3115 cidp->vendor_addr.set ? "" : "not ")); 3116 3117 /* 3118 * Finally(!), if there's a valid "mac-address" property (created 3119 * if we netbooted from this interface), we must use this instead 3120 * of any of the above to ensure that the NFS/install server doesn't 3121 * get confused by the address changing as Solaris takes over! 3122 */ 3123 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 3124 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 3125 if (err == DDI_PROP_SUCCESS) { 3126 if (nelts == ETHERADDRL) { 3127 while (nelts--) 3128 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 3129 cidp->vendor_addr.set = B_TRUE; 3130 } 3131 ddi_prop_free(bytes); 3132 } 3133 3134 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 3135 ether_sprintf((void *)cidp->vendor_addr.addr), 3136 cidp->vendor_addr.set ? "" : "not ")); 3137 } 3138 3139 /*ARGSUSED*/ 3140 int 3141 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 3142 { 3143 ddi_fm_error_t de; 3144 3145 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 3146 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 3147 return (de.fme_status); 3148 } 3149 3150 /*ARGSUSED*/ 3151 int 3152 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 3153 { 3154 ddi_fm_error_t de; 3155 3156 ASSERT(bgep->progress & PROGRESS_BUFS); 3157 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 3158 return (de.fme_status); 3159 } 3160 3161 /* 3162 * The IO fault service error handling callback function 3163 */ 3164 /*ARGSUSED*/ 3165 static int 3166 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 3167 { 3168 /* 3169 * as the driver can always deal with an error in any dma or 3170 * access handle, we can just return the fme_status value. 3171 */ 3172 pci_ereport_post(dip, err, NULL); 3173 return (err->fme_status); 3174 } 3175 3176 static void 3177 bge_fm_init(bge_t *bgep) 3178 { 3179 ddi_iblock_cookie_t iblk; 3180 3181 /* Only register with IO Fault Services if we have some capability */ 3182 if (bgep->fm_capabilities) { 3183 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 3184 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 3185 3186 /* Register capabilities with IO Fault Services */ 3187 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 3188 3189 /* 3190 * Initialize pci ereport capabilities if ereport capable 3191 */ 3192 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 3193 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 3194 pci_ereport_setup(bgep->devinfo); 3195 3196 /* 3197 * Register error callback if error callback capable 3198 */ 3199 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 3200 ddi_fm_handler_register(bgep->devinfo, 3201 bge_fm_error_cb, (void*) bgep); 3202 } else { 3203 /* 3204 * These fields have to be cleared of FMA if there are no 3205 * FMA capabilities at runtime. 3206 */ 3207 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 3208 dma_attr.dma_attr_flags = 0; 3209 } 3210 } 3211 3212 static void 3213 bge_fm_fini(bge_t *bgep) 3214 { 3215 /* Only unregister FMA capabilities if we registered some */ 3216 if (bgep->fm_capabilities) { 3217 3218 /* 3219 * Release any resources allocated by pci_ereport_setup() 3220 */ 3221 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 3222 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 3223 pci_ereport_teardown(bgep->devinfo); 3224 3225 /* 3226 * Un-register error callback if error callback capable 3227 */ 3228 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 3229 ddi_fm_handler_unregister(bgep->devinfo); 3230 3231 /* Unregister from IO Fault Services */ 3232 ddi_fm_fini(bgep->devinfo); 3233 } 3234 } 3235 3236 static void 3237 #ifdef BGE_IPMI_ASF 3238 bge_unattach(bge_t *bgep, uint_t asf_mode) 3239 #else 3240 bge_unattach(bge_t *bgep) 3241 #endif 3242 { 3243 BGE_TRACE(("bge_unattach($%p)", 3244 (void *)bgep)); 3245 3246 /* 3247 * Flag that no more activity may be initiated 3248 */ 3249 bgep->progress &= ~PROGRESS_READY; 3250 3251 /* 3252 * Quiesce the PHY and MAC (leave it reset but still powered). 3253 * Clean up and free all BGE data structures 3254 */ 3255 if (bgep->periodic_id != NULL) { 3256 ddi_periodic_delete(bgep->periodic_id); 3257 bgep->periodic_id = NULL; 3258 } 3259 3260 if (bgep->progress & PROGRESS_KSTATS) 3261 bge_fini_kstats(bgep); 3262 if (bgep->progress & PROGRESS_PHY) 3263 bge_phys_reset(bgep); 3264 if (bgep->progress & PROGRESS_HWINT) { 3265 mutex_enter(bgep->genlock); 3266 #ifdef BGE_IPMI_ASF 3267 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 3268 #else 3269 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 3270 #endif 3271 ddi_fm_service_impact(bgep->devinfo, 3272 DDI_SERVICE_UNAFFECTED); 3273 #ifdef BGE_IPMI_ASF 3274 if (bgep->asf_enabled) { 3275 /* 3276 * This register has been overlaid. We restore its 3277 * initial value here. 3278 */ 3279 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 3280 BGE_NIC_DATA_SIG); 3281 } 3282 #endif 3283 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3284 ddi_fm_service_impact(bgep->devinfo, 3285 DDI_SERVICE_UNAFFECTED); 3286 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3287 ddi_fm_service_impact(bgep->devinfo, 3288 DDI_SERVICE_UNAFFECTED); 3289 mutex_exit(bgep->genlock); 3290 } 3291 if (bgep->progress & PROGRESS_INTR) { 3292 bge_intr_disable(bgep); 3293 bge_fini_rings(bgep); 3294 } 3295 if (bgep->progress & PROGRESS_HWINT) { 3296 bge_rem_intrs(bgep); 3297 rw_destroy(bgep->errlock); 3298 mutex_destroy(bgep->softintrlock); 3299 mutex_destroy(bgep->genlock); 3300 } 3301 if (bgep->progress & PROGRESS_FACTOTUM) 3302 ddi_remove_softintr(bgep->factotum_id); 3303 if (bgep->progress & PROGRESS_RESCHED) 3304 ddi_remove_softintr(bgep->drain_id); 3305 if (bgep->progress & PROGRESS_BUFS) 3306 bge_free_bufs(bgep); 3307 if (bgep->progress & PROGRESS_REGS) { 3308 ddi_regs_map_free(&bgep->io_handle); 3309 if (bgep->ape_enabled) 3310 ddi_regs_map_free(&bgep->ape_handle); 3311 } 3312 if (bgep->progress & PROGRESS_CFG) 3313 pci_config_teardown(&bgep->cfg_handle); 3314 3315 bge_fm_fini(bgep); 3316 3317 ddi_remove_minor_node(bgep->devinfo, NULL); 3318 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3319 kmem_free(bgep, sizeof (*bgep)); 3320 } 3321 3322 static int 3323 bge_resume(dev_info_t *devinfo) 3324 { 3325 bge_t *bgep; /* Our private data */ 3326 chip_id_t *cidp; 3327 chip_id_t chipid; 3328 3329 bgep = ddi_get_driver_private(devinfo); 3330 if (bgep == NULL) 3331 return (DDI_FAILURE); 3332 3333 /* 3334 * Refuse to resume if the data structures aren't consistent 3335 */ 3336 if (bgep->devinfo != devinfo) 3337 return (DDI_FAILURE); 3338 3339 #ifdef BGE_IPMI_ASF 3340 /* 3341 * Power management hasn't been supported in BGE now. If you 3342 * want to implement it, please add the ASF/IPMI related 3343 * code here. 3344 */ 3345 3346 #endif 3347 3348 /* 3349 * Read chip ID & set up config space command register(s) 3350 * Refuse to resume if the chip has changed its identity! 3351 */ 3352 cidp = &bgep->chipid; 3353 mutex_enter(bgep->genlock); 3354 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3355 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3356 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3357 mutex_exit(bgep->genlock); 3358 return (DDI_FAILURE); 3359 } 3360 mutex_exit(bgep->genlock); 3361 if (chipid.vendor != cidp->vendor) 3362 return (DDI_FAILURE); 3363 if (chipid.device != cidp->device) 3364 return (DDI_FAILURE); 3365 if (chipid.revision != cidp->revision) 3366 return (DDI_FAILURE); 3367 if (chipid.asic_rev != cidp->asic_rev) 3368 return (DDI_FAILURE); 3369 3370 /* 3371 * All OK, reinitialise h/w & kick off GLD scheduling 3372 */ 3373 mutex_enter(bgep->genlock); 3374 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3375 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3376 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3377 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3378 mutex_exit(bgep->genlock); 3379 return (DDI_FAILURE); 3380 } 3381 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3382 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3383 mutex_exit(bgep->genlock); 3384 return (DDI_FAILURE); 3385 } 3386 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3387 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3388 mutex_exit(bgep->genlock); 3389 return (DDI_FAILURE); 3390 } 3391 mutex_exit(bgep->genlock); 3392 return (DDI_SUCCESS); 3393 } 3394 3395 static int 3396 bge_fw_img_is_valid(bge_t *bgep, uint32_t offset) 3397 { 3398 uint32_t val; 3399 3400 if (bge_nvmem_read32(bgep, offset, &val) || 3401 (val & 0xfc000000) != 0x0c000000 || 3402 bge_nvmem_read32(bgep, offset + 4, &val) || 3403 val != 0) 3404 return (0); 3405 3406 return (1); 3407 } 3408 3409 static void 3410 bge_read_mgmtfw_ver(bge_t *bgep) 3411 { 3412 uint32_t val; 3413 uint32_t offset; 3414 uint32_t start; 3415 int i, vlen; 3416 3417 for (offset = NVM_DIR_START; 3418 offset < NVM_DIR_END; 3419 offset += NVM_DIRENT_SIZE) { 3420 if (bge_nvmem_read32(bgep, offset, &val)) 3421 return; 3422 3423 if ((val >> NVM_DIRTYPE_SHIFT) == NVM_DIRTYPE_ASFINI) 3424 break; 3425 } 3426 3427 if (offset == NVM_DIR_END) 3428 return; 3429 3430 if (bge_nvmem_read32(bgep, offset - 4, &start)) 3431 return; 3432 3433 if (bge_nvmem_read32(bgep, offset + 4, &offset) || 3434 !bge_fw_img_is_valid(bgep, offset) || 3435 bge_nvmem_read32(bgep, offset + 8, &val)) 3436 return; 3437 3438 offset += val - start; 3439 3440 vlen = strlen(bgep->fw_version); 3441 3442 bgep->fw_version[vlen++] = ','; 3443 bgep->fw_version[vlen++] = ' '; 3444 3445 for (i = 0; i < 4; i++) { 3446 uint32_t v; 3447 3448 if (bge_nvmem_read32(bgep, offset, &v)) 3449 return; 3450 3451 v = BE_32(v); 3452 3453 offset += sizeof(v); 3454 3455 if (vlen > BGE_FW_VER_SIZE - sizeof(v)) { 3456 memcpy(&bgep->fw_version[vlen], &v, BGE_FW_VER_SIZE - vlen); 3457 break; 3458 } 3459 3460 memcpy(&bgep->fw_version[vlen], &v, sizeof(v)); 3461 vlen += sizeof(v); 3462 } 3463 } 3464 3465 static void 3466 bge_read_dash_ver(bge_t *bgep) 3467 { 3468 int vlen; 3469 uint32_t apedata; 3470 char *fwtype; 3471 3472 if (!bgep->ape_enabled || !bgep->asf_enabled) 3473 return; 3474 3475 apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG); 3476 if (apedata != APE_SEG_SIG_MAGIC) 3477 return; 3478 3479 apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS); 3480 if (!(apedata & APE_FW_STATUS_READY)) 3481 return; 3482 3483 apedata = bge_ape_get32(bgep, BGE_APE_FW_VERSION); 3484 3485 if (bge_ape_get32(bgep, BGE_APE_FW_FEATURES) & 3486 BGE_APE_FW_FEATURE_NCSI) { 3487 bgep->ape_has_ncsi = B_TRUE; 3488 fwtype = "NCSI"; 3489 } else if ((bgep->chipid.device == DEVICE_ID_5725) || 3490 (bgep->chipid.device == DEVICE_ID_5727)) { 3491 fwtype = "SMASH"; 3492 } else { 3493 fwtype = "DASH"; 3494 } 3495 3496 vlen = strlen(bgep->fw_version); 3497 3498 snprintf(&bgep->fw_version[vlen], BGE_FW_VER_SIZE - vlen, 3499 " %s v%d.%d.%d.%d", fwtype, 3500 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, 3501 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, 3502 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, 3503 (apedata & APE_FW_VERSION_BLDMSK)); 3504 } 3505 3506 static void 3507 bge_read_bc_ver(bge_t *bgep) 3508 { 3509 uint32_t val; 3510 uint32_t offset; 3511 uint32_t start; 3512 uint32_t ver_offset; 3513 int i, dst_off; 3514 uint32_t major; 3515 uint32_t minor; 3516 boolean_t newver = B_FALSE; 3517 3518 if (bge_nvmem_read32(bgep, 0xc, &offset) || 3519 bge_nvmem_read32(bgep, 0x4, &start)) 3520 return; 3521 3522 if (bge_nvmem_read32(bgep, offset, &val)) 3523 return; 3524 3525 if ((val & 0xfc000000) == 0x0c000000) { 3526 if (bge_nvmem_read32(bgep, offset + 4, &val)) 3527 return; 3528 3529 if (val == 0) 3530 newver = B_TRUE; 3531 } 3532 3533 dst_off = strlen(bgep->fw_version); 3534 3535 if (newver) { 3536 if (((BGE_FW_VER_SIZE - dst_off) < 16) || 3537 bge_nvmem_read32(bgep, offset + 8, &ver_offset)) 3538 return; 3539 3540 offset = offset + ver_offset - start; 3541 for (i = 0; i < 16; i += 4) { 3542 if (bge_nvmem_read32(bgep, offset + i, &val)) 3543 return; 3544 val = BE_32(val); 3545 memcpy(bgep->fw_version + dst_off + i, &val, 3546 sizeof(val)); 3547 } 3548 } else { 3549 if (bge_nvmem_read32(bgep, NVM_PTREV_BCVER, &ver_offset)) 3550 return; 3551 3552 major = (ver_offset & NVM_BCVER_MAJMSK) >> NVM_BCVER_MAJSFT; 3553 minor = ver_offset & NVM_BCVER_MINMSK; 3554 snprintf(&bgep->fw_version[dst_off], BGE_FW_VER_SIZE - dst_off, 3555 "v%d.%02d", major, minor); 3556 } 3557 } 3558 3559 static void 3560 bge_read_fw_ver(bge_t *bgep) 3561 { 3562 uint32_t val; 3563 uint32_t magic; 3564 3565 *bgep->fw_version = 0; 3566 3567 if ((bgep->chipid.nvtype == BGE_NVTYPE_NONE) || 3568 (bgep->chipid.nvtype == BGE_NVTYPE_UNKNOWN)) { 3569 snprintf(bgep->fw_version, sizeof(bgep->fw_version), "sb"); 3570 return; 3571 } 3572 3573 mutex_enter(bgep->genlock); 3574 3575 bge_nvmem_read32(bgep, 0, &magic); 3576 3577 if (magic == EEPROM_MAGIC) { 3578 bge_read_bc_ver(bgep); 3579 } else { 3580 /* ignore other configs for now */ 3581 mutex_exit(bgep->genlock); 3582 return; 3583 } 3584 3585 if (bgep->ape_enabled) { 3586 if (bgep->asf_enabled) { 3587 bge_read_dash_ver(bgep); 3588 } 3589 } else if (bgep->asf_enabled) { 3590 bge_read_mgmtfw_ver(bgep); 3591 } 3592 3593 mutex_exit(bgep->genlock); 3594 3595 bgep->fw_version[BGE_FW_VER_SIZE - 1] = 0; /* safety */ 3596 } 3597 3598 /* 3599 * attach(9E) -- Attach a device to the system 3600 * 3601 * Called once for each board successfully probed. 3602 */ 3603 static int 3604 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3605 { 3606 bge_t *bgep; /* Our private data */ 3607 mac_register_t *macp; 3608 chip_id_t *cidp; 3609 caddr_t regs; 3610 int instance; 3611 int err; 3612 int intr_types; 3613 int *props = NULL; 3614 uint_t numProps; 3615 uint32_t regval; 3616 uint32_t pci_state_reg; 3617 #ifdef BGE_IPMI_ASF 3618 uint32_t mhcrValue; 3619 #ifdef __sparc 3620 uint16_t value16; 3621 #endif 3622 #ifdef BGE_NETCONSOLE 3623 int retval; 3624 #endif 3625 #endif 3626 3627 instance = ddi_get_instance(devinfo); 3628 3629 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3630 (void *)devinfo, cmd, instance)); 3631 BGE_BRKPT(NULL, "bge_attach"); 3632 3633 switch (cmd) { 3634 default: 3635 return (DDI_FAILURE); 3636 3637 case DDI_RESUME: 3638 return (bge_resume(devinfo)); 3639 3640 case DDI_ATTACH: 3641 break; 3642 } 3643 3644 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3645 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3646 ddi_set_driver_private(devinfo, bgep); 3647 bgep->bge_guard = BGE_GUARD; 3648 bgep->devinfo = devinfo; 3649 bgep->param_drain_max = 64; 3650 bgep->param_msi_cnt = 0; 3651 bgep->param_loop_mode = 0; 3652 3653 /* 3654 * Initialize more fields in BGE private data 3655 */ 3656 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3657 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3658 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3659 BGE_DRIVER_NAME, instance); 3660 3661 /* 3662 * Initialize for fma support 3663 */ 3664 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3665 DDI_PROP_DONTPASS, fm_cap, 3666 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3667 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3668 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3669 bge_fm_init(bgep); 3670 3671 /* 3672 * Look up the IOMMU's page size for DVMA mappings (must be 3673 * a power of 2) and convert to a mask. This can be used to 3674 * determine whether a message buffer crosses a page boundary. 3675 * Note: in 2s complement binary notation, if X is a power of 3676 * 2, then -X has the representation "11...1100...00". 3677 */ 3678 bgep->pagemask = dvma_pagesize(devinfo); 3679 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3680 bgep->pagemask = -bgep->pagemask; 3681 3682 /* 3683 * Map config space registers 3684 * Read chip ID & set up config space command register(s) 3685 * 3686 * Note: this leaves the chip accessible by Memory Space 3687 * accesses, but with interrupts and Bus Mastering off. 3688 * This should ensure that nothing untoward will happen 3689 * if it has been left active by the (net-)bootloader. 3690 * We'll re-enable Bus Mastering once we've reset the chip, 3691 * and allow interrupts only when everything else is set up. 3692 */ 3693 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3694 3695 bgep->ape_enabled = B_FALSE; 3696 bgep->ape_regs = NULL; 3697 3698 cidp = &bgep->chipid; 3699 cidp->device = pci_config_get16(bgep->cfg_handle, PCI_CONF_DEVID); 3700 if (DEVICE_5717_SERIES_CHIPSETS(bgep) || 3701 DEVICE_5725_SERIES_CHIPSETS(bgep)) { 3702 err = ddi_regs_map_setup(devinfo, BGE_PCI_APEREGS_RNUMBER, 3703 ®s, 0, 0, &bge_reg_accattr, &bgep->ape_handle); 3704 if (err != DDI_SUCCESS) { 3705 ddi_regs_map_free(&bgep->io_handle); 3706 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3707 goto attach_fail; 3708 } 3709 bgep->ape_regs = regs; 3710 bgep->ape_enabled = B_TRUE; 3711 3712 /* 3713 * Allow reads and writes to the 3714 * APE register and memory space. 3715 */ 3716 3717 pci_state_reg = pci_config_get32(bgep->cfg_handle, 3718 PCI_CONF_BGE_PCISTATE); 3719 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | 3720 PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR; 3721 pci_config_put32(bgep->cfg_handle, 3722 PCI_CONF_BGE_PCISTATE, pci_state_reg); 3723 bge_ape_lock_init(bgep); 3724 } 3725 3726 #ifdef BGE_IPMI_ASF 3727 #ifdef __sparc 3728 /* 3729 * We need to determine the type of chipset for accessing some configure 3730 * registers. (This information will be used by bge_ind_put32, 3731 * bge_ind_get32 and bge_nic_read32) 3732 */ 3733 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3734 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3735 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3736 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3737 MHCR_ENABLE_TAGGED_STATUS_MODE | 3738 MHCR_MASK_INTERRUPT_MODE | 3739 MHCR_MASK_PCI_INT_OUTPUT | 3740 MHCR_CLEAR_INTERRUPT_INTA | 3741 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3742 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3743 /* 3744 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP 3745 * has been set in PCI_CONF_COMM already, we need to write the 3746 * byte-swapped value to it. So we just write zero first for simplicity. 3747 */ 3748 if (DEVICE_5717_SERIES_CHIPSETS(bgep) || 3749 DEVICE_5725_SERIES_CHIPSETS(bgep) || 3750 DEVICE_57765_SERIES_CHIPSETS(bgep)) 3751 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0); 3752 #else 3753 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3754 MHCR_ENABLE_TAGGED_STATUS_MODE | 3755 MHCR_MASK_INTERRUPT_MODE | 3756 MHCR_MASK_PCI_INT_OUTPUT | 3757 MHCR_CLEAR_INTERRUPT_INTA; 3758 #endif 3759 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3760 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3761 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3762 MEMORY_ARBITER_ENABLE); 3763 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3764 bgep->asf_wordswapped = B_TRUE; 3765 } else { 3766 bgep->asf_wordswapped = B_FALSE; 3767 } 3768 bge_asf_get_config(bgep); 3769 #endif 3770 if (err != DDI_SUCCESS) { 3771 bge_problem(bgep, "pci_config_setup() failed"); 3772 goto attach_fail; 3773 } 3774 bgep->progress |= PROGRESS_CFG; 3775 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3776 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3777 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3778 goto attach_fail; 3779 } 3780 3781 #ifdef BGE_IPMI_ASF 3782 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3783 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3784 bgep->asf_newhandshake = B_TRUE; 3785 } else { 3786 bgep->asf_newhandshake = B_FALSE; 3787 } 3788 #endif 3789 3790 /* 3791 * Update those parts of the chip ID derived from volatile 3792 * registers with the values seen by OBP (in case the chip 3793 * has been reset externally and therefore lost them). 3794 */ 3795 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3796 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3797 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3798 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3799 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3800 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3801 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3802 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3803 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3804 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3805 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3806 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3807 cidp->eee = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3808 DDI_PROP_DONTPASS, eee_propname, cidp->eee); 3809 3810 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3811 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3812 if ((cidp->default_mtu < BGE_DEFAULT_MTU) || 3813 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3814 cidp->default_mtu = BGE_DEFAULT_MTU; 3815 } 3816 3817 /* 3818 * Map operating registers 3819 */ 3820 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3821 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3822 if (err != DDI_SUCCESS) { 3823 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3824 goto attach_fail; 3825 } 3826 bgep->io_regs = regs; 3827 3828 bgep->progress |= PROGRESS_REGS; 3829 3830 /* 3831 * Characterise the device, so we know its requirements. 3832 * Then allocate the appropriate TX and RX descriptors & buffers. 3833 */ 3834 if (bge_chip_id_init(bgep) == EIO) { 3835 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3836 goto attach_fail; 3837 } 3838 3839 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 3840 0, "reg", &props, &numProps); 3841 if ((err == DDI_PROP_SUCCESS) && (numProps > 0)) { 3842 bgep->pci_bus = PCI_REG_BUS_G(props[0]); 3843 bgep->pci_dev = PCI_REG_DEV_G(props[0]); 3844 bgep->pci_func = PCI_REG_FUNC_G(props[0]); 3845 ddi_prop_free(props); 3846 } 3847 3848 if (DEVICE_5717_SERIES_CHIPSETS(bgep) || 3849 DEVICE_5725_SERIES_CHIPSETS(bgep)) { 3850 regval = bge_reg_get32(bgep, CPMU_STATUS_REG); 3851 if ((bgep->chipid.device == DEVICE_ID_5719) || 3852 (bgep->chipid.device == DEVICE_ID_5720)) { 3853 bgep->pci_func = 3854 ((regval & CPMU_STATUS_FUNC_NUM_5719) >> 3855 CPMU_STATUS_FUNC_NUM_5719_SHIFT); 3856 } else { 3857 bgep->pci_func = ((regval & CPMU_STATUS_FUNC_NUM) >> 3858 CPMU_STATUS_FUNC_NUM_SHIFT); 3859 } 3860 } 3861 3862 err = bge_alloc_bufs(bgep); 3863 if (err != DDI_SUCCESS) { 3864 bge_problem(bgep, "DMA buffer allocation failed"); 3865 goto attach_fail; 3866 } 3867 bgep->progress |= PROGRESS_BUFS; 3868 3869 /* 3870 * Add the softint handlers: 3871 * 3872 * Both of these handlers are used to avoid restrictions on the 3873 * context and/or mutexes required for some operations. In 3874 * particular, the hardware interrupt handler and its subfunctions 3875 * can detect a number of conditions that we don't want to handle 3876 * in that context or with that set of mutexes held. So, these 3877 * softints are triggered instead: 3878 * 3879 * the <resched> softint is triggered if we have previously 3880 * had to refuse to send a packet because of resource shortage 3881 * (we've run out of transmit buffers), but the send completion 3882 * interrupt handler has now detected that more buffers have 3883 * become available. 3884 * 3885 * the <factotum> is triggered if the h/w interrupt handler 3886 * sees the <link state changed> or <error> bits in the status 3887 * block. It's also triggered periodically to poll the link 3888 * state, just in case we aren't getting link status change 3889 * interrupts ... 3890 */ 3891 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3892 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3893 if (err != DDI_SUCCESS) { 3894 bge_problem(bgep, "ddi_add_softintr() failed"); 3895 goto attach_fail; 3896 } 3897 bgep->progress |= PROGRESS_RESCHED; 3898 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3899 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3900 if (err != DDI_SUCCESS) { 3901 bge_problem(bgep, "ddi_add_softintr() failed"); 3902 goto attach_fail; 3903 } 3904 bgep->progress |= PROGRESS_FACTOTUM; 3905 3906 /* Get supported interrupt types */ 3907 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3908 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3909 3910 goto attach_fail; 3911 } 3912 3913 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3914 bgep->ifname, intr_types)); 3915 3916 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3917 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3918 bge_error(bgep, "MSI registration failed, " 3919 "trying FIXED interrupt type\n"); 3920 } else { 3921 BGE_DEBUG(("%s: Using MSI interrupt type", 3922 bgep->ifname)); 3923 bgep->intr_type = DDI_INTR_TYPE_MSI; 3924 bgep->progress |= PROGRESS_HWINT; 3925 } 3926 } 3927 3928 if (!(bgep->progress & PROGRESS_HWINT) && 3929 (intr_types & DDI_INTR_TYPE_FIXED)) { 3930 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3931 bge_error(bgep, "FIXED interrupt " 3932 "registration failed\n"); 3933 goto attach_fail; 3934 } 3935 3936 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3937 3938 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3939 bgep->progress |= PROGRESS_HWINT; 3940 } 3941 3942 if (!(bgep->progress & PROGRESS_HWINT)) { 3943 bge_error(bgep, "No interrupts registered\n"); 3944 goto attach_fail; 3945 } 3946 3947 /* 3948 * Note that interrupts are not enabled yet as 3949 * mutex locks are not initialized. Initialize mutex locks. 3950 */ 3951 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3952 DDI_INTR_PRI(bgep->intr_pri)); 3953 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3954 DDI_INTR_PRI(bgep->intr_pri)); 3955 rw_init(bgep->errlock, NULL, RW_DRIVER, 3956 DDI_INTR_PRI(bgep->intr_pri)); 3957 3958 /* 3959 * Initialize rings. 3960 */ 3961 bge_init_rings(bgep); 3962 3963 /* 3964 * Now that mutex locks are initialized, enable interrupts. 3965 */ 3966 bge_intr_enable(bgep); 3967 bgep->progress |= PROGRESS_INTR; 3968 3969 /* 3970 * Initialise link state variables 3971 * Stop, reset & reinitialise the chip. 3972 * Initialise the (internal) PHY. 3973 */ 3974 bgep->link_state = LINK_STATE_UNKNOWN; 3975 3976 mutex_enter(bgep->genlock); 3977 3978 /* 3979 * Reset chip & rings to initial state; also reset address 3980 * filtering, promiscuity, loopback mode. 3981 */ 3982 #ifdef BGE_IPMI_ASF 3983 #ifdef BGE_NETCONSOLE 3984 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3985 #else 3986 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3987 #endif 3988 #else 3989 if (bge_reset(bgep) != DDI_SUCCESS) { 3990 #endif 3991 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3992 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3993 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3994 mutex_exit(bgep->genlock); 3995 goto attach_fail; 3996 } 3997 3998 #ifdef BGE_IPMI_ASF 3999 if (bgep->asf_enabled) { 4000 bgep->asf_status = ASF_STAT_RUN_INIT; 4001 } 4002 #endif 4003 4004 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 4005 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 4006 bgep->promisc = B_FALSE; 4007 bgep->param_loop_mode = BGE_LOOP_NONE; 4008 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 4009 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 4010 mutex_exit(bgep->genlock); 4011 goto attach_fail; 4012 } 4013 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 4014 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 4015 mutex_exit(bgep->genlock); 4016 goto attach_fail; 4017 } 4018 4019 mutex_exit(bgep->genlock); 4020 4021 if (bge_phys_init(bgep) == EIO) { 4022 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 4023 goto attach_fail; 4024 } 4025 bgep->progress |= PROGRESS_PHY; 4026 4027 /* 4028 * initialize NDD-tweakable parameters 4029 */ 4030 if (bge_nd_init(bgep)) { 4031 bge_problem(bgep, "bge_nd_init() failed"); 4032 goto attach_fail; 4033 } 4034 bgep->progress |= PROGRESS_NDD; 4035 4036 /* 4037 * Create & initialise named kstats 4038 */ 4039 bge_init_kstats(bgep, instance); 4040 bgep->progress |= PROGRESS_KSTATS; 4041 4042 /* 4043 * Determine whether to override the chip's own MAC address 4044 */ 4045 bge_find_mac_address(bgep, cidp); 4046 4047 bge_read_fw_ver(bgep); 4048 4049 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 4050 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 4051 4052 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 4053 goto attach_fail; 4054 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 4055 macp->m_driver = bgep; 4056 macp->m_dip = devinfo; 4057 macp->m_src_addr = cidp->vendor_addr.addr; 4058 macp->m_callbacks = &bge_m_callbacks; 4059 macp->m_min_sdu = 0; 4060 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 4061 macp->m_margin = VLAN_TAGSZ; 4062 macp->m_priv_props = bge_priv_prop; 4063 macp->m_v12n = MAC_VIRT_LEVEL1; 4064 4065 /* 4066 * Finally, we're ready to register ourselves with the MAC layer 4067 * interface; if this succeeds, we're all ready to start() 4068 */ 4069 err = mac_register(macp, &bgep->mh); 4070 mac_free(macp); 4071 if (err != 0) 4072 goto attach_fail; 4073 4074 mac_link_update(bgep->mh, LINK_STATE_UNKNOWN); 4075 4076 /* 4077 * Register a periodical handler. 4078 * bge_chip_cyclic() is invoked in kernel context. 4079 */ 4080 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 4081 BGE_CYCLIC_PERIOD, DDI_IPL_0); 4082 4083 bgep->progress |= PROGRESS_READY; 4084 ASSERT(bgep->bge_guard == BGE_GUARD); 4085 #ifdef BGE_IPMI_ASF 4086 #ifdef BGE_NETCONSOLE 4087 if (bgep->asf_enabled) { 4088 mutex_enter(bgep->genlock); 4089 retval = bge_chip_start(bgep, B_TRUE); 4090 mutex_exit(bgep->genlock); 4091 if (retval != DDI_SUCCESS) 4092 goto attach_fail; 4093 } 4094 #endif 4095 #endif 4096 4097 ddi_report_dev(devinfo); 4098 4099 return (DDI_SUCCESS); 4100 4101 attach_fail: 4102 #ifdef BGE_IPMI_ASF 4103 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 4104 #else 4105 bge_unattach(bgep); 4106 #endif 4107 return (DDI_FAILURE); 4108 } 4109 4110 /* 4111 * bge_suspend() -- suspend transmit/receive for powerdown 4112 */ 4113 static int 4114 bge_suspend(bge_t *bgep) 4115 { 4116 /* 4117 * Stop processing and idle (powerdown) the PHY ... 4118 */ 4119 mutex_enter(bgep->genlock); 4120 #ifdef BGE_IPMI_ASF 4121 /* 4122 * Power management hasn't been supported in BGE now. If you 4123 * want to implement it, please add the ASF/IPMI related 4124 * code here. 4125 */ 4126 #endif 4127 bge_stop(bgep); 4128 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 4129 (void) bge_check_acc_handle(bgep, bgep->io_handle); 4130 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 4131 mutex_exit(bgep->genlock); 4132 return (DDI_FAILURE); 4133 } 4134 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 4135 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 4136 mutex_exit(bgep->genlock); 4137 return (DDI_FAILURE); 4138 } 4139 mutex_exit(bgep->genlock); 4140 4141 return (DDI_SUCCESS); 4142 } 4143 4144 /* 4145 * quiesce(9E) entry point. 4146 * 4147 * This function is called when the system is single-threaded at high 4148 * PIL with preemption disabled. Therefore, this function must not be 4149 * blocked. 4150 * 4151 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 4152 * DDI_FAILURE indicates an error condition and should almost never happen. 4153 */ 4154 #ifdef __sparc 4155 #define bge_quiesce ddi_quiesce_not_supported 4156 #else 4157 static int 4158 bge_quiesce(dev_info_t *devinfo) 4159 { 4160 bge_t *bgep = ddi_get_driver_private(devinfo); 4161 4162 if (bgep == NULL) 4163 return (DDI_FAILURE); 4164 4165 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 4166 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 4167 MHCR_MASK_PCI_INT_OUTPUT); 4168 } else { 4169 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 4170 } 4171 4172 /* Stop the chip */ 4173 bge_chip_stop_nonblocking(bgep); 4174 4175 return (DDI_SUCCESS); 4176 } 4177 #endif 4178 4179 /* 4180 * detach(9E) -- Detach a device from the system 4181 */ 4182 static int 4183 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 4184 { 4185 bge_t *bgep; 4186 #ifdef BGE_IPMI_ASF 4187 uint_t asf_mode; 4188 asf_mode = ASF_MODE_NONE; 4189 #endif 4190 4191 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 4192 4193 bgep = ddi_get_driver_private(devinfo); 4194 4195 switch (cmd) { 4196 default: 4197 return (DDI_FAILURE); 4198 4199 case DDI_SUSPEND: 4200 return (bge_suspend(bgep)); 4201 4202 case DDI_DETACH: 4203 break; 4204 } 4205 4206 #ifdef BGE_IPMI_ASF 4207 mutex_enter(bgep->genlock); 4208 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 4209 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 4210 4211 bge_asf_update_status(bgep); 4212 if (bgep->asf_status == ASF_STAT_RUN) { 4213 bge_asf_stop_timer(bgep); 4214 } 4215 bgep->asf_status = ASF_STAT_STOP; 4216 4217 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 4218 4219 if (bgep->asf_pseudostop) { 4220 bge_chip_stop(bgep, B_FALSE); 4221 bgep->bge_mac_state = BGE_MAC_STOPPED; 4222 bgep->asf_pseudostop = B_FALSE; 4223 } 4224 4225 asf_mode = ASF_MODE_POST_SHUTDOWN; 4226 4227 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 4228 ddi_fm_service_impact(bgep->devinfo, 4229 DDI_SERVICE_UNAFFECTED); 4230 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 4231 ddi_fm_service_impact(bgep->devinfo, 4232 DDI_SERVICE_UNAFFECTED); 4233 } 4234 mutex_exit(bgep->genlock); 4235 #endif 4236 4237 /* 4238 * Unregister from the GLD subsystem. This can fail, in 4239 * particular if there are DLPI style-2 streams still open - 4240 * in which case we just return failure without shutting 4241 * down chip operations. 4242 */ 4243 if (mac_unregister(bgep->mh) != 0) 4244 return (DDI_FAILURE); 4245 4246 /* 4247 * All activity stopped, so we can clean up & exit 4248 */ 4249 #ifdef BGE_IPMI_ASF 4250 bge_unattach(bgep, asf_mode); 4251 #else 4252 bge_unattach(bgep); 4253 #endif 4254 return (DDI_SUCCESS); 4255 } 4256 4257 4258 /* 4259 * ========== Module Loading Data & Entry Points ========== 4260 */ 4261 4262 #undef BGE_DBG 4263 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 4264 4265 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 4266 nulldev, /* identify */ 4267 nulldev, /* probe */ 4268 bge_attach, /* attach */ 4269 bge_detach, /* detach */ 4270 nodev, /* reset */ 4271 NULL, /* cb_ops */ 4272 D_MP, /* bus_ops */ 4273 NULL, /* power */ 4274 bge_quiesce /* quiesce */ 4275 ); 4276 4277 static struct modldrv bge_modldrv = { 4278 &mod_driverops, /* Type of module. This one is a driver */ 4279 bge_ident, /* short description */ 4280 &bge_dev_ops /* driver specific ops */ 4281 }; 4282 4283 static struct modlinkage modlinkage = { 4284 MODREV_1, (void *)&bge_modldrv, NULL 4285 }; 4286 4287 4288 int 4289 _info(struct modinfo *modinfop) 4290 { 4291 return (mod_info(&modlinkage, modinfop)); 4292 } 4293 4294 int 4295 _init(void) 4296 { 4297 int status; 4298 4299 mac_init_ops(&bge_dev_ops, "bge"); 4300 status = mod_install(&modlinkage); 4301 if (status == DDI_SUCCESS) 4302 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 4303 else 4304 mac_fini_ops(&bge_dev_ops); 4305 return (status); 4306 } 4307 4308 int 4309 _fini(void) 4310 { 4311 int status; 4312 4313 status = mod_remove(&modlinkage); 4314 if (status == DDI_SUCCESS) { 4315 mac_fini_ops(&bge_dev_ops); 4316 mutex_destroy(bge_log_mutex); 4317 } 4318 return (status); 4319 } 4320 4321 4322 /* 4323 * bge_add_intrs: 4324 * 4325 * Register FIXED or MSI interrupts. 4326 */ 4327 static int 4328 bge_add_intrs(bge_t *bgep, int intr_type) 4329 { 4330 dev_info_t *dip = bgep->devinfo; 4331 int avail, actual, intr_size, count = 0; 4332 int i, flag, ret; 4333 4334 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 4335 4336 /* Get number of interrupts */ 4337 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 4338 if ((ret != DDI_SUCCESS) || (count == 0)) { 4339 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 4340 "count: %d", ret, count); 4341 4342 return (DDI_FAILURE); 4343 } 4344 4345 /* Get number of available interrupts */ 4346 ret = ddi_intr_get_navail(dip, intr_type, &avail); 4347 if ((ret != DDI_SUCCESS) || (avail == 0)) { 4348 bge_error(bgep, "ddi_intr_get_navail() failure, " 4349 "ret: %d, avail: %d\n", ret, avail); 4350 4351 return (DDI_FAILURE); 4352 } 4353 4354 if (avail < count) { 4355 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 4356 bgep->ifname, count, avail)); 4357 } 4358 4359 /* 4360 * BGE hardware generates only single MSI even though it claims 4361 * to support multiple MSIs. So, hard code MSI count value to 1. 4362 */ 4363 if (intr_type == DDI_INTR_TYPE_MSI) { 4364 count = 1; 4365 flag = DDI_INTR_ALLOC_STRICT; 4366 } else { 4367 flag = DDI_INTR_ALLOC_NORMAL; 4368 } 4369 4370 /* Allocate an array of interrupt handles */ 4371 intr_size = count * sizeof (ddi_intr_handle_t); 4372 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 4373 4374 /* Call ddi_intr_alloc() */ 4375 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 4376 count, &actual, flag); 4377 4378 if ((ret != DDI_SUCCESS) || (actual == 0)) { 4379 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 4380 4381 kmem_free(bgep->htable, intr_size); 4382 return (DDI_FAILURE); 4383 } 4384 4385 if (actual < count) { 4386 BGE_DEBUG(("%s: Requested: %d, Received: %d", 4387 bgep->ifname, count, actual)); 4388 } 4389 4390 bgep->intr_cnt = actual; 4391 4392 /* 4393 * Get priority for first msi, assume remaining are all the same 4394 */ 4395 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 4396 DDI_SUCCESS) { 4397 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 4398 4399 /* Free already allocated intr */ 4400 for (i = 0; i < actual; i++) { 4401 (void) ddi_intr_free(bgep->htable[i]); 4402 } 4403 4404 kmem_free(bgep->htable, intr_size); 4405 return (DDI_FAILURE); 4406 } 4407 4408 /* Call ddi_intr_add_handler() */ 4409 for (i = 0; i < actual; i++) { 4410 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 4411 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 4412 bge_error(bgep, "ddi_intr_add_handler() " 4413 "failed %d\n", ret); 4414 4415 /* Free already allocated intr */ 4416 for (i = 0; i < actual; i++) { 4417 (void) ddi_intr_free(bgep->htable[i]); 4418 } 4419 4420 kmem_free(bgep->htable, intr_size); 4421 return (DDI_FAILURE); 4422 } 4423 } 4424 4425 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 4426 != DDI_SUCCESS) { 4427 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 4428 4429 for (i = 0; i < actual; i++) { 4430 (void) ddi_intr_remove_handler(bgep->htable[i]); 4431 (void) ddi_intr_free(bgep->htable[i]); 4432 } 4433 4434 kmem_free(bgep->htable, intr_size); 4435 return (DDI_FAILURE); 4436 } 4437 4438 return (DDI_SUCCESS); 4439 } 4440 4441 /* 4442 * bge_rem_intrs: 4443 * 4444 * Unregister FIXED or MSI interrupts 4445 */ 4446 static void 4447 bge_rem_intrs(bge_t *bgep) 4448 { 4449 int i; 4450 4451 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 4452 4453 /* Call ddi_intr_remove_handler() */ 4454 for (i = 0; i < bgep->intr_cnt; i++) { 4455 (void) ddi_intr_remove_handler(bgep->htable[i]); 4456 (void) ddi_intr_free(bgep->htable[i]); 4457 } 4458 4459 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 4460 } 4461 4462 4463 void 4464 bge_intr_enable(bge_t *bgep) 4465 { 4466 int i; 4467 4468 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 4469 /* Call ddi_intr_block_enable() for MSI interrupts */ 4470 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 4471 } else { 4472 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 4473 for (i = 0; i < bgep->intr_cnt; i++) { 4474 (void) ddi_intr_enable(bgep->htable[i]); 4475 } 4476 } 4477 } 4478 4479 4480 void 4481 bge_intr_disable(bge_t *bgep) 4482 { 4483 int i; 4484 4485 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 4486 /* Call ddi_intr_block_disable() */ 4487 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 4488 } else { 4489 for (i = 0; i < bgep->intr_cnt; i++) { 4490 (void) ddi_intr_disable(bgep->htable[i]); 4491 } 4492 } 4493 } 4494 4495 int 4496 bge_reprogram(bge_t *bgep) 4497 { 4498 int status = 0; 4499 4500 ASSERT(mutex_owned(bgep->genlock)); 4501 4502 if (bge_phys_update(bgep) != DDI_SUCCESS) { 4503 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 4504 status = IOC_INVAL; 4505 } 4506 #ifdef BGE_IPMI_ASF 4507 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 4508 #else 4509 if (bge_chip_sync(bgep) == DDI_FAILURE) { 4510 #endif 4511 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 4512 status = IOC_INVAL; 4513 } 4514 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 4515 bge_chip_msi_trig(bgep); 4516 return (status); 4517 } 4518