1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac_provider.h> 30 #include <sys/mac.h> 31 #include <sys/mac_flow.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 */ 36 static char bge_ident[] = "Broadcom Gb Ethernet"; 37 38 /* 39 * Property names 40 */ 41 static char debug_propname[] = "bge-debug-flags"; 42 static char clsize_propname[] = "cache-line-size"; 43 static char latency_propname[] = "latency-timer"; 44 static char localmac_boolname[] = "local-mac-address?"; 45 static char localmac_propname[] = "local-mac-address"; 46 static char macaddr_propname[] = "mac-address"; 47 static char subdev_propname[] = "subsystem-id"; 48 static char subven_propname[] = "subsystem-vendor-id"; 49 static char rxrings_propname[] = "bge-rx-rings"; 50 static char txrings_propname[] = "bge-tx-rings"; 51 static char fm_cap[] = "fm-capable"; 52 static char default_mtu[] = "default_mtu"; 53 54 static int bge_add_intrs(bge_t *, int); 55 static void bge_rem_intrs(bge_t *); 56 static int bge_unicst_set(void *, const uint8_t *, int); 57 58 /* 59 * Describes the chip's DMA engine 60 */ 61 static ddi_dma_attr_t dma_attr = { 62 DMA_ATTR_V0, /* dma_attr version */ 63 0x0000000000000000ull, /* dma_attr_addr_lo */ 64 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 65 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 66 0x0000000000000001ull, /* dma_attr_align */ 67 0x00000FFF, /* dma_attr_burstsizes */ 68 0x00000001, /* dma_attr_minxfer */ 69 0x000000000000FFFFull, /* dma_attr_maxxfer */ 70 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 71 1, /* dma_attr_sgllen */ 72 0x00000001, /* dma_attr_granular */ 73 DDI_DMA_FLAGERR /* dma_attr_flags */ 74 }; 75 76 /* 77 * PIO access attributes for registers 78 */ 79 static ddi_device_acc_attr_t bge_reg_accattr = { 80 DDI_DEVICE_ATTR_V1, 81 DDI_NEVERSWAP_ACC, 82 DDI_STRICTORDER_ACC, 83 DDI_FLAGERR_ACC 84 }; 85 86 /* 87 * DMA access attributes for descriptors: NOT to be byte swapped. 88 */ 89 static ddi_device_acc_attr_t bge_desc_accattr = { 90 DDI_DEVICE_ATTR_V0, 91 DDI_NEVERSWAP_ACC, 92 DDI_STRICTORDER_ACC 93 }; 94 95 /* 96 * DMA access attributes for data: NOT to be byte swapped. 97 */ 98 static ddi_device_acc_attr_t bge_data_accattr = { 99 DDI_DEVICE_ATTR_V0, 100 DDI_NEVERSWAP_ACC, 101 DDI_STRICTORDER_ACC 102 }; 103 104 static int bge_m_start(void *); 105 static void bge_m_stop(void *); 106 static int bge_m_promisc(void *, boolean_t); 107 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 108 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 109 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 110 static int bge_unicst_set(void *, const uint8_t *, 111 int); 112 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 113 uint_t, const void *); 114 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 115 uint_t, void *); 116 static void bge_m_propinfo(void *, const char *, mac_prop_id_t, 117 mac_prop_info_handle_t); 118 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 119 const void *); 120 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 121 void *); 122 static void bge_priv_propinfo(const char *, 123 mac_prop_info_handle_t); 124 125 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | \ 126 MC_GETPROP | MC_PROPINFO) 127 128 static mac_callbacks_t bge_m_callbacks = { 129 BGE_M_CALLBACK_FLAGS, 130 bge_m_stat, 131 bge_m_start, 132 bge_m_stop, 133 bge_m_promisc, 134 bge_m_multicst, 135 NULL, 136 bge_m_tx, 137 NULL, 138 bge_m_ioctl, 139 bge_m_getcapab, 140 NULL, 141 NULL, 142 bge_m_setprop, 143 bge_m_getprop, 144 bge_m_propinfo 145 }; 146 147 char *bge_priv_prop[] = { 148 "_adv_asym_pause_cap", 149 "_adv_pause_cap", 150 "_drain_max", 151 "_msi_cnt", 152 "_rx_intr_coalesce_blank_time", 153 "_tx_intr_coalesce_blank_time", 154 "_rx_intr_coalesce_pkt_cnt", 155 "_tx_intr_coalesce_pkt_cnt", 156 NULL 157 }; 158 159 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 160 /* 161 * ========== Transmit and receive ring reinitialisation ========== 162 */ 163 164 /* 165 * These <reinit> routines each reset the specified ring to an initial 166 * state, assuming that the corresponding <init> routine has already 167 * been called exactly once. 168 */ 169 170 static void 171 bge_reinit_send_ring(send_ring_t *srp) 172 { 173 bge_queue_t *txbuf_queue; 174 bge_queue_item_t *txbuf_head; 175 sw_txbuf_t *txbuf; 176 sw_sbd_t *ssbdp; 177 uint32_t slot; 178 179 /* 180 * Reinitialise control variables ... 181 */ 182 srp->tx_flow = 0; 183 srp->tx_next = 0; 184 srp->txfill_next = 0; 185 srp->tx_free = srp->desc.nslots; 186 ASSERT(mutex_owned(srp->tc_lock)); 187 srp->tc_next = 0; 188 srp->txpkt_next = 0; 189 srp->tx_block = 0; 190 srp->tx_nobd = 0; 191 srp->tx_nobuf = 0; 192 193 /* 194 * Initialize the tx buffer push queue 195 */ 196 mutex_enter(srp->freetxbuf_lock); 197 mutex_enter(srp->txbuf_lock); 198 txbuf_queue = &srp->freetxbuf_queue; 199 txbuf_queue->head = NULL; 200 txbuf_queue->count = 0; 201 txbuf_queue->lock = srp->freetxbuf_lock; 202 srp->txbuf_push_queue = txbuf_queue; 203 204 /* 205 * Initialize the tx buffer pop queue 206 */ 207 txbuf_queue = &srp->txbuf_queue; 208 txbuf_queue->head = NULL; 209 txbuf_queue->count = 0; 210 txbuf_queue->lock = srp->txbuf_lock; 211 srp->txbuf_pop_queue = txbuf_queue; 212 txbuf_head = srp->txbuf_head; 213 txbuf = srp->txbuf; 214 for (slot = 0; slot < srp->tx_buffers; ++slot) { 215 txbuf_head->item = txbuf; 216 txbuf_head->next = txbuf_queue->head; 217 txbuf_queue->head = txbuf_head; 218 txbuf_queue->count++; 219 txbuf++; 220 txbuf_head++; 221 } 222 mutex_exit(srp->txbuf_lock); 223 mutex_exit(srp->freetxbuf_lock); 224 225 /* 226 * Zero and sync all the h/w Send Buffer Descriptors 227 */ 228 DMA_ZERO(srp->desc); 229 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 230 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 231 ssbdp = srp->sw_sbds; 232 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 233 ssbdp->pbuf = NULL; 234 } 235 236 static void 237 bge_reinit_recv_ring(recv_ring_t *rrp) 238 { 239 /* 240 * Reinitialise control variables ... 241 */ 242 rrp->rx_next = 0; 243 } 244 245 static void 246 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 247 { 248 bge_rbd_t *hw_rbd_p; 249 sw_rbd_t *srbdp; 250 uint32_t bufsize; 251 uint32_t nslots; 252 uint32_t slot; 253 254 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 255 RBD_FLAG_STD_RING, 256 RBD_FLAG_JUMBO_RING, 257 RBD_FLAG_MINI_RING 258 }; 259 260 /* 261 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 262 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 263 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 264 * should be zeroed, and so don't need to be set up specifically 265 * once the whole area has been cleared. 266 */ 267 DMA_ZERO(brp->desc); 268 269 hw_rbd_p = DMA_VPTR(brp->desc); 270 nslots = brp->desc.nslots; 271 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 272 bufsize = brp->buf[0].size; 273 srbdp = brp->sw_rbds; 274 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 275 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 276 hw_rbd_p->index = (uint16_t)slot; 277 hw_rbd_p->len = (uint16_t)bufsize; 278 hw_rbd_p->opaque = srbdp->pbuf.token; 279 hw_rbd_p->flags |= ring_type_flag[ring]; 280 } 281 282 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 283 284 /* 285 * Finally, reinitialise the ring control variables ... 286 */ 287 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 288 } 289 290 /* 291 * Reinitialize all rings 292 */ 293 static void 294 bge_reinit_rings(bge_t *bgep) 295 { 296 uint32_t ring; 297 298 ASSERT(mutex_owned(bgep->genlock)); 299 300 /* 301 * Send Rings ... 302 */ 303 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 304 bge_reinit_send_ring(&bgep->send[ring]); 305 306 /* 307 * Receive Return Rings ... 308 */ 309 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 310 bge_reinit_recv_ring(&bgep->recv[ring]); 311 312 /* 313 * Receive Producer Rings ... 314 */ 315 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 316 bge_reinit_buff_ring(&bgep->buff[ring], ring); 317 } 318 319 /* 320 * ========== Internal state management entry points ========== 321 */ 322 323 #undef BGE_DBG 324 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 325 326 /* 327 * These routines provide all the functionality required by the 328 * corresponding GLD entry points, but don't update the GLD state 329 * so they can be called internally without disturbing our record 330 * of what GLD thinks we should be doing ... 331 */ 332 333 /* 334 * bge_reset() -- reset h/w & rings to initial state 335 */ 336 static int 337 #ifdef BGE_IPMI_ASF 338 bge_reset(bge_t *bgep, uint_t asf_mode) 339 #else 340 bge_reset(bge_t *bgep) 341 #endif 342 { 343 uint32_t ring; 344 int retval; 345 346 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 347 348 ASSERT(mutex_owned(bgep->genlock)); 349 350 /* 351 * Grab all the other mutexes in the world (this should 352 * ensure no other threads are manipulating driver state) 353 */ 354 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 355 mutex_enter(bgep->recv[ring].rx_lock); 356 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 357 mutex_enter(bgep->buff[ring].rf_lock); 358 rw_enter(bgep->errlock, RW_WRITER); 359 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 360 mutex_enter(bgep->send[ring].tx_lock); 361 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 362 mutex_enter(bgep->send[ring].tc_lock); 363 364 #ifdef BGE_IPMI_ASF 365 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 366 #else 367 retval = bge_chip_reset(bgep, B_TRUE); 368 #endif 369 bge_reinit_rings(bgep); 370 371 /* 372 * Free the world ... 373 */ 374 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 375 mutex_exit(bgep->send[ring].tc_lock); 376 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 377 mutex_exit(bgep->send[ring].tx_lock); 378 rw_exit(bgep->errlock); 379 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 380 mutex_exit(bgep->buff[ring].rf_lock); 381 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 382 mutex_exit(bgep->recv[ring].rx_lock); 383 384 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 385 return (retval); 386 } 387 388 /* 389 * bge_stop() -- stop processing, don't reset h/w or rings 390 */ 391 static void 392 bge_stop(bge_t *bgep) 393 { 394 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 395 396 ASSERT(mutex_owned(bgep->genlock)); 397 398 #ifdef BGE_IPMI_ASF 399 if (bgep->asf_enabled) { 400 bgep->asf_pseudostop = B_TRUE; 401 } else { 402 #endif 403 bge_chip_stop(bgep, B_FALSE); 404 #ifdef BGE_IPMI_ASF 405 } 406 #endif 407 408 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 409 } 410 411 /* 412 * bge_start() -- start transmitting/receiving 413 */ 414 static int 415 bge_start(bge_t *bgep, boolean_t reset_phys) 416 { 417 int retval; 418 419 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 420 421 ASSERT(mutex_owned(bgep->genlock)); 422 423 /* 424 * Start chip processing, including enabling interrupts 425 */ 426 retval = bge_chip_start(bgep, reset_phys); 427 428 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 429 return (retval); 430 } 431 432 /* 433 * bge_restart - restart transmitting/receiving after error or suspend 434 */ 435 int 436 bge_restart(bge_t *bgep, boolean_t reset_phys) 437 { 438 int retval = DDI_SUCCESS; 439 ASSERT(mutex_owned(bgep->genlock)); 440 441 #ifdef BGE_IPMI_ASF 442 if (bgep->asf_enabled) { 443 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 444 retval = DDI_FAILURE; 445 } else 446 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 447 retval = DDI_FAILURE; 448 #else 449 if (bge_reset(bgep) != DDI_SUCCESS) 450 retval = DDI_FAILURE; 451 #endif 452 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 453 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 454 retval = DDI_FAILURE; 455 bgep->watchdog = 0; 456 ddi_trigger_softintr(bgep->drain_id); 457 } 458 459 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 460 return (retval); 461 } 462 463 464 /* 465 * ========== Nemo-required management entry points ========== 466 */ 467 468 #undef BGE_DBG 469 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 470 471 /* 472 * bge_m_stop() -- stop transmitting/receiving 473 */ 474 static void 475 bge_m_stop(void *arg) 476 { 477 bge_t *bgep = arg; /* private device info */ 478 send_ring_t *srp; 479 uint32_t ring; 480 481 BGE_TRACE(("bge_m_stop($%p)", arg)); 482 483 /* 484 * Just stop processing, then record new GLD state 485 */ 486 mutex_enter(bgep->genlock); 487 if (!(bgep->progress & PROGRESS_INTR)) { 488 /* can happen during autorecovery */ 489 bgep->bge_chip_state = BGE_CHIP_STOPPED; 490 } else 491 bge_stop(bgep); 492 493 bgep->link_update_timer = 0; 494 bgep->link_state = LINK_STATE_UNKNOWN; 495 mac_link_update(bgep->mh, bgep->link_state); 496 497 /* 498 * Free the possible tx buffers allocated in tx process. 499 */ 500 #ifdef BGE_IPMI_ASF 501 if (!bgep->asf_pseudostop) 502 #endif 503 { 504 rw_enter(bgep->errlock, RW_WRITER); 505 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 506 srp = &bgep->send[ring]; 507 mutex_enter(srp->tx_lock); 508 if (srp->tx_array > 1) 509 bge_free_txbuf_arrays(srp); 510 mutex_exit(srp->tx_lock); 511 } 512 rw_exit(bgep->errlock); 513 } 514 bgep->bge_mac_state = BGE_MAC_STOPPED; 515 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 516 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 517 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 518 mutex_exit(bgep->genlock); 519 } 520 521 /* 522 * bge_m_start() -- start transmitting/receiving 523 */ 524 static int 525 bge_m_start(void *arg) 526 { 527 bge_t *bgep = arg; /* private device info */ 528 529 BGE_TRACE(("bge_m_start($%p)", arg)); 530 531 /* 532 * Start processing and record new GLD state 533 */ 534 mutex_enter(bgep->genlock); 535 if (!(bgep->progress & PROGRESS_INTR)) { 536 /* can happen during autorecovery */ 537 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 538 mutex_exit(bgep->genlock); 539 return (EIO); 540 } 541 #ifdef BGE_IPMI_ASF 542 if (bgep->asf_enabled) { 543 if ((bgep->asf_status == ASF_STAT_RUN) && 544 (bgep->asf_pseudostop)) { 545 bgep->bge_mac_state = BGE_MAC_STARTED; 546 mutex_exit(bgep->genlock); 547 return (0); 548 } 549 } 550 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 551 #else 552 if (bge_reset(bgep) != DDI_SUCCESS) { 553 #endif 554 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 555 (void) bge_check_acc_handle(bgep, bgep->io_handle); 556 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 557 mutex_exit(bgep->genlock); 558 return (EIO); 559 } 560 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 561 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 562 (void) bge_check_acc_handle(bgep, bgep->io_handle); 563 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 564 mutex_exit(bgep->genlock); 565 return (EIO); 566 } 567 bgep->watchdog = 0; 568 bgep->bge_mac_state = BGE_MAC_STARTED; 569 BGE_DEBUG(("bge_m_start($%p) done", arg)); 570 571 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 572 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 573 mutex_exit(bgep->genlock); 574 return (EIO); 575 } 576 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 577 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 578 mutex_exit(bgep->genlock); 579 return (EIO); 580 } 581 #ifdef BGE_IPMI_ASF 582 if (bgep->asf_enabled) { 583 if (bgep->asf_status != ASF_STAT_RUN) { 584 /* start ASF heart beat */ 585 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 586 (void *)bgep, 587 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 588 bgep->asf_status = ASF_STAT_RUN; 589 } 590 } 591 #endif 592 mutex_exit(bgep->genlock); 593 594 return (0); 595 } 596 597 /* 598 * bge_unicst_set() -- set the physical network address 599 */ 600 static int 601 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 602 { 603 bge_t *bgep = arg; /* private device info */ 604 605 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 606 ether_sprintf((void *)macaddr))); 607 /* 608 * Remember the new current address in the driver state 609 * Sync the chip's idea of the address too ... 610 */ 611 mutex_enter(bgep->genlock); 612 if (!(bgep->progress & PROGRESS_INTR)) { 613 /* can happen during autorecovery */ 614 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 615 mutex_exit(bgep->genlock); 616 return (EIO); 617 } 618 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 619 #ifdef BGE_IPMI_ASF 620 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 621 #else 622 if (bge_chip_sync(bgep) == DDI_FAILURE) { 623 #endif 624 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 625 (void) bge_check_acc_handle(bgep, bgep->io_handle); 626 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 627 mutex_exit(bgep->genlock); 628 return (EIO); 629 } 630 #ifdef BGE_IPMI_ASF 631 if (bgep->asf_enabled) { 632 /* 633 * The above bge_chip_sync() function wrote the ethernet MAC 634 * addresses registers which destroyed the IPMI/ASF sideband. 635 * Here, we have to reset chip to make IPMI/ASF sideband work. 636 */ 637 if (bgep->asf_status == ASF_STAT_RUN) { 638 /* 639 * We must stop ASF heart beat before bge_chip_stop(), 640 * otherwise some computers (ex. IBM HS20 blade server) 641 * may crash. 642 */ 643 bge_asf_update_status(bgep); 644 bge_asf_stop_timer(bgep); 645 bgep->asf_status = ASF_STAT_STOP; 646 647 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 648 } 649 bge_chip_stop(bgep, B_FALSE); 650 651 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 652 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 653 (void) bge_check_acc_handle(bgep, bgep->io_handle); 654 ddi_fm_service_impact(bgep->devinfo, 655 DDI_SERVICE_DEGRADED); 656 mutex_exit(bgep->genlock); 657 return (EIO); 658 } 659 660 /* 661 * Start our ASF heartbeat counter as soon as possible. 662 */ 663 if (bgep->asf_status != ASF_STAT_RUN) { 664 /* start ASF heart beat */ 665 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 666 (void *)bgep, 667 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 668 bgep->asf_status = ASF_STAT_RUN; 669 } 670 } 671 #endif 672 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 673 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 674 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 675 mutex_exit(bgep->genlock); 676 return (EIO); 677 } 678 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 679 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 680 mutex_exit(bgep->genlock); 681 return (EIO); 682 } 683 mutex_exit(bgep->genlock); 684 685 return (0); 686 } 687 688 extern void bge_wake_factotum(bge_t *); 689 690 static boolean_t 691 bge_param_locked(mac_prop_id_t pr_num) 692 { 693 /* 694 * All adv_* parameters are locked (read-only) while 695 * the device is in any sort of loopback mode ... 696 */ 697 switch (pr_num) { 698 case MAC_PROP_ADV_1000FDX_CAP: 699 case MAC_PROP_EN_1000FDX_CAP: 700 case MAC_PROP_ADV_1000HDX_CAP: 701 case MAC_PROP_EN_1000HDX_CAP: 702 case MAC_PROP_ADV_100FDX_CAP: 703 case MAC_PROP_EN_100FDX_CAP: 704 case MAC_PROP_ADV_100HDX_CAP: 705 case MAC_PROP_EN_100HDX_CAP: 706 case MAC_PROP_ADV_10FDX_CAP: 707 case MAC_PROP_EN_10FDX_CAP: 708 case MAC_PROP_ADV_10HDX_CAP: 709 case MAC_PROP_EN_10HDX_CAP: 710 case MAC_PROP_AUTONEG: 711 case MAC_PROP_FLOWCTRL: 712 return (B_TRUE); 713 } 714 return (B_FALSE); 715 } 716 /* 717 * callback functions for set/get of properties 718 */ 719 static int 720 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 721 uint_t pr_valsize, const void *pr_val) 722 { 723 bge_t *bgep = barg; 724 int err = 0; 725 uint32_t cur_mtu, new_mtu; 726 link_flowctrl_t fl; 727 728 mutex_enter(bgep->genlock); 729 if (bgep->param_loop_mode != BGE_LOOP_NONE && 730 bge_param_locked(pr_num)) { 731 /* 732 * All adv_* parameters are locked (read-only) 733 * while the device is in any sort of loopback mode. 734 */ 735 mutex_exit(bgep->genlock); 736 return (EBUSY); 737 } 738 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 739 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 740 (pr_num == MAC_PROP_EN_100HDX_CAP) || 741 (pr_num == MAC_PROP_EN_10FDX_CAP) || 742 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 743 /* 744 * these properties are read/write on copper, 745 * read-only and 0 on serdes 746 */ 747 mutex_exit(bgep->genlock); 748 return (ENOTSUP); 749 } 750 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 751 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 752 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 753 mutex_exit(bgep->genlock); 754 return (ENOTSUP); 755 } 756 757 switch (pr_num) { 758 case MAC_PROP_EN_1000FDX_CAP: 759 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 760 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 761 goto reprogram; 762 case MAC_PROP_EN_1000HDX_CAP: 763 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 764 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 765 goto reprogram; 766 case MAC_PROP_EN_100FDX_CAP: 767 bgep->param_en_100fdx = *(uint8_t *)pr_val; 768 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 769 goto reprogram; 770 case MAC_PROP_EN_100HDX_CAP: 771 bgep->param_en_100hdx = *(uint8_t *)pr_val; 772 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 773 goto reprogram; 774 case MAC_PROP_EN_10FDX_CAP: 775 bgep->param_en_10fdx = *(uint8_t *)pr_val; 776 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 777 goto reprogram; 778 case MAC_PROP_EN_10HDX_CAP: 779 bgep->param_en_10hdx = *(uint8_t *)pr_val; 780 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 781 reprogram: 782 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 783 err = EINVAL; 784 break; 785 case MAC_PROP_ADV_1000FDX_CAP: 786 case MAC_PROP_ADV_1000HDX_CAP: 787 case MAC_PROP_ADV_100FDX_CAP: 788 case MAC_PROP_ADV_100HDX_CAP: 789 case MAC_PROP_ADV_10FDX_CAP: 790 case MAC_PROP_ADV_10HDX_CAP: 791 case MAC_PROP_STATUS: 792 case MAC_PROP_SPEED: 793 case MAC_PROP_DUPLEX: 794 err = ENOTSUP; /* read-only prop. Can't set this */ 795 break; 796 case MAC_PROP_AUTONEG: 797 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 798 if (bge_reprogram(bgep) == IOC_INVAL) 799 err = EINVAL; 800 break; 801 case MAC_PROP_MTU: 802 cur_mtu = bgep->chipid.default_mtu; 803 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 804 805 if (new_mtu == cur_mtu) { 806 err = 0; 807 break; 808 } 809 if (new_mtu < BGE_DEFAULT_MTU || 810 new_mtu > BGE_MAXIMUM_MTU) { 811 err = EINVAL; 812 break; 813 } 814 if ((new_mtu > BGE_DEFAULT_MTU) && 815 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 816 err = EINVAL; 817 break; 818 } 819 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 820 err = EBUSY; 821 break; 822 } 823 bgep->chipid.default_mtu = new_mtu; 824 if (bge_chip_id_init(bgep)) { 825 err = EINVAL; 826 break; 827 } 828 bgep->bge_dma_error = B_TRUE; 829 bgep->manual_reset = B_TRUE; 830 bge_chip_stop(bgep, B_TRUE); 831 bge_wake_factotum(bgep); 832 err = 0; 833 break; 834 case MAC_PROP_FLOWCTRL: 835 bcopy(pr_val, &fl, sizeof (fl)); 836 switch (fl) { 837 default: 838 err = ENOTSUP; 839 break; 840 case LINK_FLOWCTRL_NONE: 841 bgep->param_adv_pause = 0; 842 bgep->param_adv_asym_pause = 0; 843 844 bgep->param_link_rx_pause = B_FALSE; 845 bgep->param_link_tx_pause = B_FALSE; 846 break; 847 case LINK_FLOWCTRL_RX: 848 bgep->param_adv_pause = 1; 849 bgep->param_adv_asym_pause = 1; 850 851 bgep->param_link_rx_pause = B_TRUE; 852 bgep->param_link_tx_pause = B_FALSE; 853 break; 854 case LINK_FLOWCTRL_TX: 855 bgep->param_adv_pause = 0; 856 bgep->param_adv_asym_pause = 1; 857 858 bgep->param_link_rx_pause = B_FALSE; 859 bgep->param_link_tx_pause = B_TRUE; 860 break; 861 case LINK_FLOWCTRL_BI: 862 bgep->param_adv_pause = 1; 863 bgep->param_adv_asym_pause = 0; 864 865 bgep->param_link_rx_pause = B_TRUE; 866 bgep->param_link_tx_pause = B_TRUE; 867 break; 868 } 869 870 if (err == 0) { 871 if (bge_reprogram(bgep) == IOC_INVAL) 872 err = EINVAL; 873 } 874 875 break; 876 case MAC_PROP_PRIVATE: 877 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 878 pr_val); 879 break; 880 default: 881 err = ENOTSUP; 882 break; 883 } 884 mutex_exit(bgep->genlock); 885 return (err); 886 } 887 888 /* ARGSUSED */ 889 static int 890 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 891 uint_t pr_valsize, void *pr_val) 892 { 893 bge_t *bgep = barg; 894 int err = 0; 895 896 switch (pr_num) { 897 case MAC_PROP_DUPLEX: 898 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 899 bcopy(&bgep->param_link_duplex, pr_val, 900 sizeof (link_duplex_t)); 901 break; 902 case MAC_PROP_SPEED: { 903 uint64_t speed = bgep->param_link_speed * 1000000ull; 904 905 ASSERT(pr_valsize >= sizeof (speed)); 906 bcopy(&speed, pr_val, sizeof (speed)); 907 break; 908 } 909 case MAC_PROP_STATUS: 910 ASSERT(pr_valsize >= sizeof (link_state_t)); 911 bcopy(&bgep->link_state, pr_val, 912 sizeof (link_state_t)); 913 break; 914 case MAC_PROP_AUTONEG: 915 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 916 break; 917 case MAC_PROP_FLOWCTRL: { 918 link_flowctrl_t fl; 919 920 ASSERT(pr_valsize >= sizeof (fl)); 921 922 if (bgep->param_link_rx_pause && 923 !bgep->param_link_tx_pause) 924 fl = LINK_FLOWCTRL_RX; 925 926 if (!bgep->param_link_rx_pause && 927 !bgep->param_link_tx_pause) 928 fl = LINK_FLOWCTRL_NONE; 929 930 if (!bgep->param_link_rx_pause && 931 bgep->param_link_tx_pause) 932 fl = LINK_FLOWCTRL_TX; 933 934 if (bgep->param_link_rx_pause && 935 bgep->param_link_tx_pause) 936 fl = LINK_FLOWCTRL_BI; 937 bcopy(&fl, pr_val, sizeof (fl)); 938 break; 939 } 940 case MAC_PROP_ADV_1000FDX_CAP: 941 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 942 break; 943 case MAC_PROP_EN_1000FDX_CAP: 944 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 945 break; 946 case MAC_PROP_ADV_1000HDX_CAP: 947 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 948 break; 949 case MAC_PROP_EN_1000HDX_CAP: 950 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 951 break; 952 case MAC_PROP_ADV_100FDX_CAP: 953 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 954 break; 955 case MAC_PROP_EN_100FDX_CAP: 956 *(uint8_t *)pr_val = bgep->param_en_100fdx; 957 break; 958 case MAC_PROP_ADV_100HDX_CAP: 959 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 960 break; 961 case MAC_PROP_EN_100HDX_CAP: 962 *(uint8_t *)pr_val = bgep->param_en_100hdx; 963 break; 964 case MAC_PROP_ADV_10FDX_CAP: 965 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 966 break; 967 case MAC_PROP_EN_10FDX_CAP: 968 *(uint8_t *)pr_val = bgep->param_en_10fdx; 969 break; 970 case MAC_PROP_ADV_10HDX_CAP: 971 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 972 break; 973 case MAC_PROP_EN_10HDX_CAP: 974 *(uint8_t *)pr_val = bgep->param_en_10hdx; 975 break; 976 case MAC_PROP_ADV_100T4_CAP: 977 case MAC_PROP_EN_100T4_CAP: 978 *(uint8_t *)pr_val = 0; 979 break; 980 case MAC_PROP_PRIVATE: 981 err = bge_get_priv_prop(bgep, pr_name, 982 pr_valsize, pr_val); 983 return (err); 984 default: 985 return (ENOTSUP); 986 } 987 return (0); 988 } 989 990 static void 991 bge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num, 992 mac_prop_info_handle_t prh) 993 { 994 bge_t *bgep = barg; 995 int flags = bgep->chipid.flags; 996 997 /* 998 * By default permissions are read/write unless specified 999 * otherwise by the driver. 1000 */ 1001 1002 switch (pr_num) { 1003 case MAC_PROP_DUPLEX: 1004 case MAC_PROP_SPEED: 1005 case MAC_PROP_STATUS: 1006 case MAC_PROP_ADV_1000FDX_CAP: 1007 case MAC_PROP_ADV_1000HDX_CAP: 1008 case MAC_PROP_ADV_100FDX_CAP: 1009 case MAC_PROP_ADV_100HDX_CAP: 1010 case MAC_PROP_ADV_10FDX_CAP: 1011 case MAC_PROP_ADV_10HDX_CAP: 1012 case MAC_PROP_ADV_100T4_CAP: 1013 case MAC_PROP_EN_100T4_CAP: 1014 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1015 break; 1016 1017 case MAC_PROP_EN_1000FDX_CAP: 1018 case MAC_PROP_EN_1000HDX_CAP: 1019 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1020 mac_prop_info_set_default_uint8(prh, 0); 1021 else 1022 mac_prop_info_set_default_uint8(prh, 1); 1023 break; 1024 1025 case MAC_PROP_EN_100FDX_CAP: 1026 case MAC_PROP_EN_100HDX_CAP: 1027 case MAC_PROP_EN_10FDX_CAP: 1028 case MAC_PROP_EN_10HDX_CAP: 1029 mac_prop_info_set_default_uint8(prh, 1030 (flags & CHIP_FLAG_SERDES) ? 0 : 1); 1031 break; 1032 1033 case MAC_PROP_AUTONEG: 1034 mac_prop_info_set_default_uint8(prh, 1); 1035 break; 1036 1037 case MAC_PROP_FLOWCTRL: 1038 mac_prop_info_set_default_link_flowctrl(prh, 1039 LINK_FLOWCTRL_BI); 1040 break; 1041 1042 case MAC_PROP_MTU: 1043 mac_prop_info_set_range_uint32(prh, BGE_DEFAULT_MTU, 1044 (flags & CHIP_FLAG_NO_JUMBO) ? 1045 BGE_DEFAULT_MTU : BGE_MAXIMUM_MTU); 1046 break; 1047 1048 case MAC_PROP_PRIVATE: 1049 bge_priv_propinfo(pr_name, prh); 1050 break; 1051 } 1052 1053 mutex_enter(bgep->genlock); 1054 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 1055 bge_param_locked(pr_num)) || 1056 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 1057 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 1058 (pr_num == MAC_PROP_EN_100HDX_CAP) || 1059 (pr_num == MAC_PROP_EN_10FDX_CAP) || 1060 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 1061 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 1062 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 1063 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 1064 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1065 mutex_exit(bgep->genlock); 1066 } 1067 1068 /* ARGSUSED */ 1069 static int 1070 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1071 const void *pr_val) 1072 { 1073 int err = 0; 1074 long result; 1075 1076 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1077 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1078 if (result > 1 || result < 0) { 1079 err = EINVAL; 1080 } else { 1081 bgep->param_adv_pause = (uint32_t)result; 1082 if (bge_reprogram(bgep) == IOC_INVAL) 1083 err = EINVAL; 1084 } 1085 return (err); 1086 } 1087 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1088 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1089 if (result > 1 || result < 0) { 1090 err = EINVAL; 1091 } else { 1092 bgep->param_adv_asym_pause = (uint32_t)result; 1093 if (bge_reprogram(bgep) == IOC_INVAL) 1094 err = EINVAL; 1095 } 1096 return (err); 1097 } 1098 if (strcmp(pr_name, "_drain_max") == 0) { 1099 1100 /* 1101 * on the Tx side, we need to update the h/w register for 1102 * real packet transmission per packet. The drain_max parameter 1103 * is used to reduce the register access. This parameter 1104 * controls the max number of packets that we will hold before 1105 * updating the bge h/w to trigger h/w transmit. The bge 1106 * chipset usually has a max of 512 Tx descriptors, thus 1107 * the upper bound on drain_max is 512. 1108 */ 1109 if (pr_val == NULL) { 1110 err = EINVAL; 1111 return (err); 1112 } 1113 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1114 if (result > 512 || result < 1) 1115 err = EINVAL; 1116 else { 1117 bgep->param_drain_max = (uint32_t)result; 1118 if (bge_reprogram(bgep) == IOC_INVAL) 1119 err = EINVAL; 1120 } 1121 return (err); 1122 } 1123 if (strcmp(pr_name, "_msi_cnt") == 0) { 1124 1125 if (pr_val == NULL) { 1126 err = EINVAL; 1127 return (err); 1128 } 1129 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1130 if (result > 7 || result < 0) 1131 err = EINVAL; 1132 else { 1133 bgep->param_msi_cnt = (uint32_t)result; 1134 if (bge_reprogram(bgep) == IOC_INVAL) 1135 err = EINVAL; 1136 } 1137 return (err); 1138 } 1139 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) { 1140 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1141 return (EINVAL); 1142 if (result < 0) 1143 err = EINVAL; 1144 else { 1145 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1146 bge_chip_coalesce_update(bgep); 1147 } 1148 return (err); 1149 } 1150 1151 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) { 1152 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1153 return (EINVAL); 1154 1155 if (result < 0) 1156 err = EINVAL; 1157 else { 1158 bgep->chipid.rx_count_norm = (uint32_t)result; 1159 bge_chip_coalesce_update(bgep); 1160 } 1161 return (err); 1162 } 1163 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) { 1164 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1165 return (EINVAL); 1166 if (result < 0) 1167 err = EINVAL; 1168 else { 1169 bgep->chipid.tx_ticks_norm = (uint32_t)result; 1170 bge_chip_coalesce_update(bgep); 1171 } 1172 return (err); 1173 } 1174 1175 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) { 1176 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1177 return (EINVAL); 1178 1179 if (result < 0) 1180 err = EINVAL; 1181 else { 1182 bgep->chipid.tx_count_norm = (uint32_t)result; 1183 bge_chip_coalesce_update(bgep); 1184 } 1185 return (err); 1186 } 1187 return (ENOTSUP); 1188 } 1189 1190 static int 1191 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize, 1192 void *pr_val) 1193 { 1194 int value; 1195 1196 if (strcmp(pr_name, "_adv_pause_cap") == 0) 1197 value = bge->param_adv_pause; 1198 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) 1199 value = bge->param_adv_asym_pause; 1200 else if (strcmp(pr_name, "_drain_max") == 0) 1201 value = bge->param_drain_max; 1202 else if (strcmp(pr_name, "_msi_cnt") == 0) 1203 value = bge->param_msi_cnt; 1204 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) 1205 value = bge->chipid.rx_ticks_norm; 1206 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) 1207 value = bge->chipid.tx_ticks_norm; 1208 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) 1209 value = bge->chipid.rx_count_norm; 1210 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) 1211 value = bge->chipid.tx_count_norm; 1212 else 1213 return (ENOTSUP); 1214 1215 (void) snprintf(pr_val, pr_valsize, "%d", value); 1216 return (0); 1217 } 1218 1219 static void 1220 bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph) 1221 { 1222 char valstr[64]; 1223 int value; 1224 1225 if (strcmp(pr_name, "_adv_pause_cap") == 0) 1226 value = 1; 1227 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) 1228 value = 1; 1229 else if (strcmp(pr_name, "_drain_max") == 0) 1230 value = 64; 1231 else if (strcmp(pr_name, "_msi_cnt") == 0) 1232 value = 0; 1233 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) 1234 value = bge_rx_ticks_norm; 1235 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) 1236 value = bge_tx_ticks_norm; 1237 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) 1238 value = bge_rx_count_norm; 1239 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) 1240 value = bge_tx_count_norm; 1241 else 1242 return; 1243 1244 (void) snprintf(valstr, sizeof (valstr), "%d", value); 1245 mac_prop_info_set_default_str(mph, valstr); 1246 } 1247 1248 /* 1249 * Compute the index of the required bit in the multicast hash map. 1250 * This must mirror the way the hardware actually does it! 1251 * See Broadcom document 570X-PG102-R page 125. 1252 */ 1253 static uint32_t 1254 bge_hash_index(const uint8_t *mca) 1255 { 1256 uint32_t hash; 1257 1258 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1259 1260 return (hash); 1261 } 1262 1263 /* 1264 * bge_m_multicst_add() -- enable/disable a multicast address 1265 */ 1266 static int 1267 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1268 { 1269 bge_t *bgep = arg; /* private device info */ 1270 uint32_t hash; 1271 uint32_t index; 1272 uint32_t word; 1273 uint32_t bit; 1274 uint8_t *refp; 1275 1276 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1277 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1278 1279 /* 1280 * Precalculate all required masks, pointers etc ... 1281 */ 1282 hash = bge_hash_index(mca); 1283 index = hash % BGE_HASH_TABLE_SIZE; 1284 word = index/32u; 1285 bit = 1 << (index % 32u); 1286 refp = &bgep->mcast_refs[index]; 1287 1288 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1289 hash, index, word, bit, *refp)); 1290 1291 /* 1292 * We must set the appropriate bit in the hash map (and the 1293 * corresponding h/w register) when the refcount goes from 0 1294 * to >0, and clear it when the last ref goes away (refcount 1295 * goes from >0 back to 0). If we change the hash map, we 1296 * must also update the chip's hardware map registers. 1297 */ 1298 mutex_enter(bgep->genlock); 1299 if (!(bgep->progress & PROGRESS_INTR)) { 1300 /* can happen during autorecovery */ 1301 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1302 mutex_exit(bgep->genlock); 1303 return (EIO); 1304 } 1305 if (add) { 1306 if ((*refp)++ == 0) { 1307 bgep->mcast_hash[word] |= bit; 1308 #ifdef BGE_IPMI_ASF 1309 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1310 #else 1311 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1312 #endif 1313 (void) bge_check_acc_handle(bgep, 1314 bgep->cfg_handle); 1315 (void) bge_check_acc_handle(bgep, 1316 bgep->io_handle); 1317 ddi_fm_service_impact(bgep->devinfo, 1318 DDI_SERVICE_DEGRADED); 1319 mutex_exit(bgep->genlock); 1320 return (EIO); 1321 } 1322 } 1323 } else { 1324 if (--(*refp) == 0) { 1325 bgep->mcast_hash[word] &= ~bit; 1326 #ifdef BGE_IPMI_ASF 1327 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1328 #else 1329 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1330 #endif 1331 (void) bge_check_acc_handle(bgep, 1332 bgep->cfg_handle); 1333 (void) bge_check_acc_handle(bgep, 1334 bgep->io_handle); 1335 ddi_fm_service_impact(bgep->devinfo, 1336 DDI_SERVICE_DEGRADED); 1337 mutex_exit(bgep->genlock); 1338 return (EIO); 1339 } 1340 } 1341 } 1342 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1343 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1344 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1345 mutex_exit(bgep->genlock); 1346 return (EIO); 1347 } 1348 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1349 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1350 mutex_exit(bgep->genlock); 1351 return (EIO); 1352 } 1353 mutex_exit(bgep->genlock); 1354 1355 return (0); 1356 } 1357 1358 /* 1359 * bge_m_promisc() -- set or reset promiscuous mode on the board 1360 * 1361 * Program the hardware to enable/disable promiscuous and/or 1362 * receive-all-multicast modes. 1363 */ 1364 static int 1365 bge_m_promisc(void *arg, boolean_t on) 1366 { 1367 bge_t *bgep = arg; 1368 1369 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1370 1371 /* 1372 * Store MAC layer specified mode and pass to chip layer to update h/w 1373 */ 1374 mutex_enter(bgep->genlock); 1375 if (!(bgep->progress & PROGRESS_INTR)) { 1376 /* can happen during autorecovery */ 1377 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1378 mutex_exit(bgep->genlock); 1379 return (EIO); 1380 } 1381 bgep->promisc = on; 1382 #ifdef BGE_IPMI_ASF 1383 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1384 #else 1385 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1386 #endif 1387 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1388 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1389 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1390 mutex_exit(bgep->genlock); 1391 return (EIO); 1392 } 1393 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1394 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1395 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1396 mutex_exit(bgep->genlock); 1397 return (EIO); 1398 } 1399 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1400 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1401 mutex_exit(bgep->genlock); 1402 return (EIO); 1403 } 1404 mutex_exit(bgep->genlock); 1405 return (0); 1406 } 1407 1408 /* 1409 * Find the slot for the specified unicast address 1410 */ 1411 int 1412 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1413 { 1414 int slot; 1415 1416 ASSERT(mutex_owned(bgep->genlock)); 1417 1418 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1419 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1420 return (slot); 1421 } 1422 1423 return (-1); 1424 } 1425 1426 /* 1427 * Programs the classifier to start steering packets matching 'mac_addr' to the 1428 * specified ring 'arg'. 1429 */ 1430 static int 1431 bge_addmac(void *arg, const uint8_t *mac_addr) 1432 { 1433 recv_ring_t *rrp = (recv_ring_t *)arg; 1434 bge_t *bgep = rrp->bgep; 1435 bge_recv_rule_t *rulep = bgep->recv_rules; 1436 bge_rule_info_t *rinfop = NULL; 1437 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1438 int i; 1439 uint16_t tmp16; 1440 uint32_t tmp32; 1441 int slot; 1442 int err; 1443 1444 mutex_enter(bgep->genlock); 1445 if (bgep->unicst_addr_avail == 0) { 1446 mutex_exit(bgep->genlock); 1447 return (ENOSPC); 1448 } 1449 1450 /* 1451 * First add the unicast address to a available slot. 1452 */ 1453 slot = bge_unicst_find(bgep, mac_addr); 1454 ASSERT(slot == -1); 1455 1456 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1457 if (!bgep->curr_addr[slot].set) { 1458 bgep->curr_addr[slot].set = B_TRUE; 1459 break; 1460 } 1461 } 1462 1463 ASSERT(slot < bgep->unicst_addr_total); 1464 bgep->unicst_addr_avail--; 1465 mutex_exit(bgep->genlock); 1466 1467 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1468 goto fail; 1469 1470 /* A rule is already here. Deny this. */ 1471 if (rrp->mac_addr_rule != NULL) { 1472 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1473 goto fail; 1474 } 1475 1476 /* 1477 * Allocate a bge_rule_info_t to keep track of which rule slots 1478 * are being used. 1479 */ 1480 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1481 if (rinfop == NULL) { 1482 err = ENOMEM; 1483 goto fail; 1484 } 1485 1486 /* 1487 * Look for the starting slot to place the rules. 1488 * The two slots we reserve must be contiguous. 1489 */ 1490 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1491 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1492 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1493 break; 1494 1495 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1496 1497 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1498 rulep[i].mask_value = ntohl(tmp32); 1499 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1500 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1501 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1502 1503 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1504 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1505 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1506 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1507 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1508 rinfop->start = i; 1509 rinfop->count = 2; 1510 1511 rrp->mac_addr_rule = rinfop; 1512 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1513 1514 return (0); 1515 1516 fail: 1517 /* Clear the address just set */ 1518 (void) bge_unicst_set(bgep, zero_addr, slot); 1519 mutex_enter(bgep->genlock); 1520 bgep->curr_addr[slot].set = B_FALSE; 1521 bgep->unicst_addr_avail++; 1522 mutex_exit(bgep->genlock); 1523 1524 return (err); 1525 } 1526 1527 /* 1528 * Stop classifying packets matching the MAC address to the specified ring. 1529 */ 1530 static int 1531 bge_remmac(void *arg, const uint8_t *mac_addr) 1532 { 1533 recv_ring_t *rrp = (recv_ring_t *)arg; 1534 bge_t *bgep = rrp->bgep; 1535 bge_recv_rule_t *rulep = bgep->recv_rules; 1536 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1537 int start; 1538 int slot; 1539 int err; 1540 1541 /* 1542 * Remove the MAC address from its slot. 1543 */ 1544 mutex_enter(bgep->genlock); 1545 slot = bge_unicst_find(bgep, mac_addr); 1546 if (slot == -1) { 1547 mutex_exit(bgep->genlock); 1548 return (EINVAL); 1549 } 1550 1551 ASSERT(bgep->curr_addr[slot].set); 1552 mutex_exit(bgep->genlock); 1553 1554 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1555 return (err); 1556 1557 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1558 return (EINVAL); 1559 1560 start = rinfop->start; 1561 rulep[start].mask_value = 0; 1562 rulep[start].control = 0; 1563 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1564 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1565 start++; 1566 rulep[start].mask_value = 0; 1567 rulep[start].control = 0; 1568 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1569 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1570 1571 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1572 rrp->mac_addr_rule = NULL; 1573 bzero(rrp->mac_addr_val, ETHERADDRL); 1574 1575 mutex_enter(bgep->genlock); 1576 bgep->curr_addr[slot].set = B_FALSE; 1577 bgep->unicst_addr_avail++; 1578 mutex_exit(bgep->genlock); 1579 1580 return (0); 1581 } 1582 1583 static int 1584 bge_flag_intr_enable(mac_intr_handle_t ih) 1585 { 1586 recv_ring_t *rrp = (recv_ring_t *)ih; 1587 bge_t *bgep = rrp->bgep; 1588 1589 mutex_enter(bgep->genlock); 1590 rrp->poll_flag = 0; 1591 mutex_exit(bgep->genlock); 1592 1593 return (0); 1594 } 1595 1596 static int 1597 bge_flag_intr_disable(mac_intr_handle_t ih) 1598 { 1599 recv_ring_t *rrp = (recv_ring_t *)ih; 1600 bge_t *bgep = rrp->bgep; 1601 1602 mutex_enter(bgep->genlock); 1603 rrp->poll_flag = 1; 1604 mutex_exit(bgep->genlock); 1605 1606 return (0); 1607 } 1608 1609 static int 1610 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1611 { 1612 recv_ring_t *rx_ring; 1613 1614 rx_ring = (recv_ring_t *)rh; 1615 mutex_enter(rx_ring->rx_lock); 1616 rx_ring->ring_gen_num = mr_gen_num; 1617 mutex_exit(rx_ring->rx_lock); 1618 return (0); 1619 } 1620 1621 1622 /* 1623 * Callback funtion for MAC layer to register all rings 1624 * for given ring_group, noted by rg_index. 1625 */ 1626 void 1627 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1628 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1629 { 1630 bge_t *bgep = arg; 1631 mac_intr_t *mintr; 1632 1633 switch (rtype) { 1634 case MAC_RING_TYPE_RX: { 1635 recv_ring_t *rx_ring; 1636 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1637 MAC_ADDRESS_REGS_MAX) && index == 0); 1638 1639 rx_ring = &bgep->recv[rg_index]; 1640 rx_ring->ring_handle = rh; 1641 1642 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1643 infop->mri_start = bge_ring_start; 1644 infop->mri_stop = NULL; 1645 infop->mri_poll = bge_poll_ring; 1646 infop->mri_stat = bge_rx_ring_stat; 1647 1648 mintr = &infop->mri_intr; 1649 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1650 mintr->mi_enable = bge_flag_intr_enable; 1651 mintr->mi_disable = bge_flag_intr_disable; 1652 1653 break; 1654 } 1655 case MAC_RING_TYPE_TX: 1656 default: 1657 ASSERT(0); 1658 break; 1659 } 1660 } 1661 1662 /* 1663 * Fill infop passed as argument 1664 * fill in respective ring_group info 1665 * Each group has a single ring in it. We keep it simple 1666 * and use the same internal handle for rings and groups. 1667 */ 1668 void 1669 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1670 mac_group_info_t *infop, mac_group_handle_t gh) 1671 { 1672 bge_t *bgep = arg; 1673 1674 switch (rtype) { 1675 case MAC_RING_TYPE_RX: { 1676 recv_ring_t *rx_ring; 1677 1678 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1679 MAC_ADDRESS_REGS_MAX)); 1680 rx_ring = &bgep->recv[rg_index]; 1681 rx_ring->ring_group_handle = gh; 1682 1683 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1684 infop->mgi_start = NULL; 1685 infop->mgi_stop = NULL; 1686 infop->mgi_addmac = bge_addmac; 1687 infop->mgi_remmac = bge_remmac; 1688 infop->mgi_count = 1; 1689 break; 1690 } 1691 case MAC_RING_TYPE_TX: 1692 default: 1693 ASSERT(0); 1694 break; 1695 } 1696 } 1697 1698 /*ARGSUSED*/ 1699 static boolean_t 1700 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1701 { 1702 bge_t *bgep = arg; 1703 1704 switch (cap) { 1705 case MAC_CAPAB_HCKSUM: { 1706 uint32_t *txflags = cap_data; 1707 1708 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1709 break; 1710 } 1711 case MAC_CAPAB_RINGS: { 1712 mac_capab_rings_t *cap_rings = cap_data; 1713 1714 /* Temporarily disable multiple tx rings. */ 1715 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1716 return (B_FALSE); 1717 1718 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1719 cap_rings->mr_rnum = cap_rings->mr_gnum = 1720 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1721 cap_rings->mr_rget = bge_fill_ring; 1722 cap_rings->mr_gget = bge_fill_group; 1723 break; 1724 } 1725 default: 1726 return (B_FALSE); 1727 } 1728 return (B_TRUE); 1729 } 1730 1731 /* 1732 * Loopback ioctl code 1733 */ 1734 1735 static lb_property_t loopmodes[] = { 1736 { normal, "normal", BGE_LOOP_NONE }, 1737 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1738 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1739 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1740 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1741 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1742 }; 1743 1744 static enum ioc_reply 1745 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1746 { 1747 /* 1748 * If the mode isn't being changed, there's nothing to do ... 1749 */ 1750 if (mode == bgep->param_loop_mode) 1751 return (IOC_ACK); 1752 1753 /* 1754 * Validate the requested mode and prepare a suitable message 1755 * to explain the link down/up cycle that the change will 1756 * probably induce ... 1757 */ 1758 switch (mode) { 1759 default: 1760 return (IOC_INVAL); 1761 1762 case BGE_LOOP_NONE: 1763 case BGE_LOOP_EXTERNAL_1000: 1764 case BGE_LOOP_EXTERNAL_100: 1765 case BGE_LOOP_EXTERNAL_10: 1766 case BGE_LOOP_INTERNAL_PHY: 1767 case BGE_LOOP_INTERNAL_MAC: 1768 break; 1769 } 1770 1771 /* 1772 * All OK; tell the caller to reprogram 1773 * the PHY and/or MAC for the new mode ... 1774 */ 1775 bgep->param_loop_mode = mode; 1776 return (IOC_RESTART_ACK); 1777 } 1778 1779 static enum ioc_reply 1780 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1781 { 1782 lb_info_sz_t *lbsp; 1783 lb_property_t *lbpp; 1784 uint32_t *lbmp; 1785 int cmd; 1786 1787 _NOTE(ARGUNUSED(wq)) 1788 1789 /* 1790 * Validate format of ioctl 1791 */ 1792 if (mp->b_cont == NULL) 1793 return (IOC_INVAL); 1794 1795 cmd = iocp->ioc_cmd; 1796 switch (cmd) { 1797 default: 1798 /* NOTREACHED */ 1799 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1800 return (IOC_INVAL); 1801 1802 case LB_GET_INFO_SIZE: 1803 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1804 return (IOC_INVAL); 1805 lbsp = (void *)mp->b_cont->b_rptr; 1806 *lbsp = sizeof (loopmodes); 1807 return (IOC_REPLY); 1808 1809 case LB_GET_INFO: 1810 if (iocp->ioc_count != sizeof (loopmodes)) 1811 return (IOC_INVAL); 1812 lbpp = (void *)mp->b_cont->b_rptr; 1813 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1814 return (IOC_REPLY); 1815 1816 case LB_GET_MODE: 1817 if (iocp->ioc_count != sizeof (uint32_t)) 1818 return (IOC_INVAL); 1819 lbmp = (void *)mp->b_cont->b_rptr; 1820 *lbmp = bgep->param_loop_mode; 1821 return (IOC_REPLY); 1822 1823 case LB_SET_MODE: 1824 if (iocp->ioc_count != sizeof (uint32_t)) 1825 return (IOC_INVAL); 1826 lbmp = (void *)mp->b_cont->b_rptr; 1827 return (bge_set_loop_mode(bgep, *lbmp)); 1828 } 1829 } 1830 1831 /* 1832 * Specific bge IOCTLs, the gld module handles the generic ones. 1833 */ 1834 static void 1835 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1836 { 1837 bge_t *bgep = arg; 1838 struct iocblk *iocp; 1839 enum ioc_reply status; 1840 boolean_t need_privilege; 1841 int err; 1842 int cmd; 1843 1844 /* 1845 * Validate the command before bothering with the mutex ... 1846 */ 1847 iocp = (void *)mp->b_rptr; 1848 iocp->ioc_error = 0; 1849 need_privilege = B_TRUE; 1850 cmd = iocp->ioc_cmd; 1851 switch (cmd) { 1852 default: 1853 miocnak(wq, mp, 0, EINVAL); 1854 return; 1855 1856 case BGE_MII_READ: 1857 case BGE_MII_WRITE: 1858 case BGE_SEE_READ: 1859 case BGE_SEE_WRITE: 1860 case BGE_FLASH_READ: 1861 case BGE_FLASH_WRITE: 1862 case BGE_DIAG: 1863 case BGE_PEEK: 1864 case BGE_POKE: 1865 case BGE_PHY_RESET: 1866 case BGE_SOFT_RESET: 1867 case BGE_HARD_RESET: 1868 break; 1869 1870 case LB_GET_INFO_SIZE: 1871 case LB_GET_INFO: 1872 case LB_GET_MODE: 1873 need_privilege = B_FALSE; 1874 /* FALLTHRU */ 1875 case LB_SET_MODE: 1876 break; 1877 1878 } 1879 1880 if (need_privilege) { 1881 /* 1882 * Check for specific net_config privilege on Solaris 10+. 1883 */ 1884 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1885 if (err != 0) { 1886 miocnak(wq, mp, 0, err); 1887 return; 1888 } 1889 } 1890 1891 mutex_enter(bgep->genlock); 1892 if (!(bgep->progress & PROGRESS_INTR)) { 1893 /* can happen during autorecovery */ 1894 mutex_exit(bgep->genlock); 1895 miocnak(wq, mp, 0, EIO); 1896 return; 1897 } 1898 1899 switch (cmd) { 1900 default: 1901 _NOTE(NOTREACHED) 1902 status = IOC_INVAL; 1903 break; 1904 1905 case BGE_MII_READ: 1906 case BGE_MII_WRITE: 1907 case BGE_SEE_READ: 1908 case BGE_SEE_WRITE: 1909 case BGE_FLASH_READ: 1910 case BGE_FLASH_WRITE: 1911 case BGE_DIAG: 1912 case BGE_PEEK: 1913 case BGE_POKE: 1914 case BGE_PHY_RESET: 1915 case BGE_SOFT_RESET: 1916 case BGE_HARD_RESET: 1917 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1918 break; 1919 1920 case LB_GET_INFO_SIZE: 1921 case LB_GET_INFO: 1922 case LB_GET_MODE: 1923 case LB_SET_MODE: 1924 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1925 break; 1926 1927 } 1928 1929 /* 1930 * Do we need to reprogram the PHY and/or the MAC? 1931 * Do it now, while we still have the mutex. 1932 * 1933 * Note: update the PHY first, 'cos it controls the 1934 * speed/duplex parameters that the MAC code uses. 1935 */ 1936 switch (status) { 1937 case IOC_RESTART_REPLY: 1938 case IOC_RESTART_ACK: 1939 if (bge_reprogram(bgep) == IOC_INVAL) 1940 status = IOC_INVAL; 1941 break; 1942 } 1943 1944 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1945 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1946 status = IOC_INVAL; 1947 } 1948 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1949 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1950 status = IOC_INVAL; 1951 } 1952 mutex_exit(bgep->genlock); 1953 1954 /* 1955 * Finally, decide how to reply 1956 */ 1957 switch (status) { 1958 default: 1959 case IOC_INVAL: 1960 /* 1961 * Error, reply with a NAK and EINVAL or the specified error 1962 */ 1963 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1964 EINVAL : iocp->ioc_error); 1965 break; 1966 1967 case IOC_DONE: 1968 /* 1969 * OK, reply already sent 1970 */ 1971 break; 1972 1973 case IOC_RESTART_ACK: 1974 case IOC_ACK: 1975 /* 1976 * OK, reply with an ACK 1977 */ 1978 miocack(wq, mp, 0, 0); 1979 break; 1980 1981 case IOC_RESTART_REPLY: 1982 case IOC_REPLY: 1983 /* 1984 * OK, send prepared reply as ACK or NAK 1985 */ 1986 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1987 M_IOCACK : M_IOCNAK; 1988 qreply(wq, mp); 1989 break; 1990 } 1991 } 1992 1993 /* 1994 * ========== Per-instance setup/teardown code ========== 1995 */ 1996 1997 #undef BGE_DBG 1998 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1999 /* 2000 * Allocate an area of memory and a DMA handle for accessing it 2001 */ 2002 static int 2003 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2004 uint_t dma_flags, dma_area_t *dma_p) 2005 { 2006 caddr_t va; 2007 int err; 2008 2009 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2010 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2011 2012 /* 2013 * Allocate handle 2014 */ 2015 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2016 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2017 if (err != DDI_SUCCESS) 2018 return (DDI_FAILURE); 2019 2020 /* 2021 * Allocate memory 2022 */ 2023 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2024 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2025 &dma_p->acc_hdl); 2026 if (err != DDI_SUCCESS) 2027 return (DDI_FAILURE); 2028 2029 /* 2030 * Bind the two together 2031 */ 2032 dma_p->mem_va = va; 2033 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2034 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2035 &dma_p->cookie, &dma_p->ncookies); 2036 2037 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2038 dma_p->alength, err, dma_p->ncookies)); 2039 2040 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2041 return (DDI_FAILURE); 2042 2043 dma_p->nslots = ~0U; 2044 dma_p->size = ~0U; 2045 dma_p->token = ~0U; 2046 dma_p->offset = 0; 2047 return (DDI_SUCCESS); 2048 } 2049 2050 /* 2051 * Free one allocated area of DMAable memory 2052 */ 2053 static void 2054 bge_free_dma_mem(dma_area_t *dma_p) 2055 { 2056 if (dma_p->dma_hdl != NULL) { 2057 if (dma_p->ncookies) { 2058 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2059 dma_p->ncookies = 0; 2060 } 2061 ddi_dma_free_handle(&dma_p->dma_hdl); 2062 dma_p->dma_hdl = NULL; 2063 } 2064 2065 if (dma_p->acc_hdl != NULL) { 2066 ddi_dma_mem_free(&dma_p->acc_hdl); 2067 dma_p->acc_hdl = NULL; 2068 } 2069 } 2070 /* 2071 * Utility routine to carve a slice off a chunk of allocated memory, 2072 * updating the chunk descriptor accordingly. The size of the slice 2073 * is given by the product of the <qty> and <size> parameters. 2074 */ 2075 static void 2076 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2077 uint32_t qty, uint32_t size) 2078 { 2079 static uint32_t sequence = 0xbcd5704a; 2080 size_t totsize; 2081 2082 totsize = qty*size; 2083 ASSERT(totsize <= chunk->alength); 2084 2085 *slice = *chunk; 2086 slice->nslots = qty; 2087 slice->size = size; 2088 slice->alength = totsize; 2089 slice->token = ++sequence; 2090 2091 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2092 chunk->alength -= totsize; 2093 chunk->offset += totsize; 2094 chunk->cookie.dmac_laddress += totsize; 2095 chunk->cookie.dmac_size -= totsize; 2096 } 2097 2098 /* 2099 * Initialise the specified Receive Producer (Buffer) Ring, using 2100 * the information in the <dma_area> descriptors that it contains 2101 * to set up all the other fields. This routine should be called 2102 * only once for each ring. 2103 */ 2104 static void 2105 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2106 { 2107 buff_ring_t *brp; 2108 bge_status_t *bsp; 2109 sw_rbd_t *srbdp; 2110 dma_area_t pbuf; 2111 uint32_t bufsize; 2112 uint32_t nslots; 2113 uint32_t slot; 2114 uint32_t split; 2115 2116 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2117 NIC_MEM_SHADOW_BUFF_STD, 2118 NIC_MEM_SHADOW_BUFF_JUMBO, 2119 NIC_MEM_SHADOW_BUFF_MINI 2120 }; 2121 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2122 RECV_STD_PROD_INDEX_REG, 2123 RECV_JUMBO_PROD_INDEX_REG, 2124 RECV_MINI_PROD_INDEX_REG 2125 }; 2126 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2127 STATUS_STD_BUFF_CONS_INDEX, 2128 STATUS_JUMBO_BUFF_CONS_INDEX, 2129 STATUS_MINI_BUFF_CONS_INDEX 2130 }; 2131 2132 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2133 (void *)bgep, ring)); 2134 2135 brp = &bgep->buff[ring]; 2136 nslots = brp->desc.nslots; 2137 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2138 bufsize = brp->buf[0].size; 2139 2140 /* 2141 * Set up the copy of the h/w RCB 2142 * 2143 * Note: unlike Send & Receive Return Rings, (where the max_len 2144 * field holds the number of slots), in a Receive Buffer Ring 2145 * this field indicates the size of each buffer in the ring. 2146 */ 2147 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2148 brp->hw_rcb.max_len = (uint16_t)bufsize; 2149 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2150 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2151 2152 /* 2153 * Other one-off initialisation of per-ring data 2154 */ 2155 brp->bgep = bgep; 2156 bsp = DMA_VPTR(bgep->status_block); 2157 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2158 brp->chip_mbx_reg = mailbox_regs[ring]; 2159 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2160 DDI_INTR_PRI(bgep->intr_pri)); 2161 2162 /* 2163 * Allocate the array of s/w Receive Buffer Descriptors 2164 */ 2165 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2166 brp->sw_rbds = srbdp; 2167 2168 /* 2169 * Now initialise each array element once and for all 2170 */ 2171 for (split = 0; split < BGE_SPLIT; ++split) { 2172 pbuf = brp->buf[split]; 2173 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2174 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2175 ASSERT(pbuf.alength == 0); 2176 } 2177 } 2178 2179 /* 2180 * Clean up initialisation done above before the memory is freed 2181 */ 2182 static void 2183 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2184 { 2185 buff_ring_t *brp; 2186 sw_rbd_t *srbdp; 2187 2188 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2189 (void *)bgep, ring)); 2190 2191 brp = &bgep->buff[ring]; 2192 srbdp = brp->sw_rbds; 2193 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2194 2195 mutex_destroy(brp->rf_lock); 2196 } 2197 2198 /* 2199 * Initialise the specified Receive (Return) Ring, using the 2200 * information in the <dma_area> descriptors that it contains 2201 * to set up all the other fields. This routine should be called 2202 * only once for each ring. 2203 */ 2204 static void 2205 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2206 { 2207 recv_ring_t *rrp; 2208 bge_status_t *bsp; 2209 uint32_t nslots; 2210 2211 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2212 (void *)bgep, ring)); 2213 2214 /* 2215 * The chip architecture requires that receive return rings have 2216 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2217 */ 2218 rrp = &bgep->recv[ring]; 2219 nslots = rrp->desc.nslots; 2220 ASSERT(nslots == 0 || nslots == 512 || 2221 nslots == 1024 || nslots == 2048); 2222 2223 /* 2224 * Set up the copy of the h/w RCB 2225 */ 2226 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2227 rrp->hw_rcb.max_len = (uint16_t)nslots; 2228 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2229 rrp->hw_rcb.nic_ring_addr = 0; 2230 2231 /* 2232 * Other one-off initialisation of per-ring data 2233 */ 2234 rrp->bgep = bgep; 2235 bsp = DMA_VPTR(bgep->status_block); 2236 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2237 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2238 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2239 DDI_INTR_PRI(bgep->intr_pri)); 2240 } 2241 2242 2243 /* 2244 * Clean up initialisation done above before the memory is freed 2245 */ 2246 static void 2247 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2248 { 2249 recv_ring_t *rrp; 2250 2251 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2252 (void *)bgep, ring)); 2253 2254 rrp = &bgep->recv[ring]; 2255 if (rrp->rx_softint) 2256 ddi_remove_softintr(rrp->rx_softint); 2257 mutex_destroy(rrp->rx_lock); 2258 } 2259 2260 /* 2261 * Initialise the specified Send Ring, using the information in the 2262 * <dma_area> descriptors that it contains to set up all the other 2263 * fields. This routine should be called only once for each ring. 2264 */ 2265 static void 2266 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2267 { 2268 send_ring_t *srp; 2269 bge_status_t *bsp; 2270 sw_sbd_t *ssbdp; 2271 dma_area_t desc; 2272 dma_area_t pbuf; 2273 uint32_t nslots; 2274 uint32_t slot; 2275 uint32_t split; 2276 sw_txbuf_t *txbuf; 2277 2278 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2279 (void *)bgep, ring)); 2280 2281 /* 2282 * The chip architecture requires that host-based send rings 2283 * have 512 elements per ring. See 570X-PG102-R page 56. 2284 */ 2285 srp = &bgep->send[ring]; 2286 nslots = srp->desc.nslots; 2287 ASSERT(nslots == 0 || nslots == 512); 2288 2289 /* 2290 * Set up the copy of the h/w RCB 2291 */ 2292 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2293 srp->hw_rcb.max_len = (uint16_t)nslots; 2294 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2295 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2296 2297 /* 2298 * Other one-off initialisation of per-ring data 2299 */ 2300 srp->bgep = bgep; 2301 bsp = DMA_VPTR(bgep->status_block); 2302 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2303 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2304 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2305 DDI_INTR_PRI(bgep->intr_pri)); 2306 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2307 DDI_INTR_PRI(bgep->intr_pri)); 2308 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2309 DDI_INTR_PRI(bgep->intr_pri)); 2310 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2311 DDI_INTR_PRI(bgep->intr_pri)); 2312 if (nslots == 0) 2313 return; 2314 2315 /* 2316 * Allocate the array of s/w Send Buffer Descriptors 2317 */ 2318 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2319 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2320 srp->txbuf_head = 2321 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2322 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2323 srp->sw_sbds = ssbdp; 2324 srp->txbuf = txbuf; 2325 srp->tx_buffers = BGE_SEND_BUF_NUM; 2326 srp->tx_buffers_low = srp->tx_buffers / 4; 2327 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2328 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2329 else 2330 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2331 srp->tx_array = 1; 2332 2333 /* 2334 * Chunk tx desc area 2335 */ 2336 desc = srp->desc; 2337 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2338 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2339 sizeof (bge_sbd_t)); 2340 } 2341 ASSERT(desc.alength == 0); 2342 2343 /* 2344 * Chunk tx buffer area 2345 */ 2346 for (split = 0; split < BGE_SPLIT; ++split) { 2347 pbuf = srp->buf[0][split]; 2348 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2349 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2350 bgep->chipid.snd_buff_size); 2351 txbuf++; 2352 } 2353 ASSERT(pbuf.alength == 0); 2354 } 2355 } 2356 2357 /* 2358 * Clean up initialisation done above before the memory is freed 2359 */ 2360 static void 2361 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2362 { 2363 send_ring_t *srp; 2364 uint32_t array; 2365 uint32_t split; 2366 uint32_t nslots; 2367 2368 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2369 (void *)bgep, ring)); 2370 2371 srp = &bgep->send[ring]; 2372 mutex_destroy(srp->tc_lock); 2373 mutex_destroy(srp->freetxbuf_lock); 2374 mutex_destroy(srp->txbuf_lock); 2375 mutex_destroy(srp->tx_lock); 2376 nslots = srp->desc.nslots; 2377 if (nslots == 0) 2378 return; 2379 2380 for (array = 1; array < srp->tx_array; ++array) 2381 for (split = 0; split < BGE_SPLIT; ++split) 2382 bge_free_dma_mem(&srp->buf[array][split]); 2383 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2384 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2385 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2386 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2387 srp->sw_sbds = NULL; 2388 srp->txbuf_head = NULL; 2389 srp->txbuf = NULL; 2390 srp->pktp = NULL; 2391 } 2392 2393 /* 2394 * Initialise all transmit, receive, and buffer rings. 2395 */ 2396 void 2397 bge_init_rings(bge_t *bgep) 2398 { 2399 uint32_t ring; 2400 2401 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2402 2403 /* 2404 * Perform one-off initialisation of each ring ... 2405 */ 2406 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2407 bge_init_send_ring(bgep, ring); 2408 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2409 bge_init_recv_ring(bgep, ring); 2410 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2411 bge_init_buff_ring(bgep, ring); 2412 } 2413 2414 /* 2415 * Undo the work of bge_init_rings() above before the memory is freed 2416 */ 2417 void 2418 bge_fini_rings(bge_t *bgep) 2419 { 2420 uint32_t ring; 2421 2422 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2423 2424 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2425 bge_fini_buff_ring(bgep, ring); 2426 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2427 bge_fini_recv_ring(bgep, ring); 2428 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2429 bge_fini_send_ring(bgep, ring); 2430 } 2431 2432 /* 2433 * Called from the bge_m_stop() to free the tx buffers which are 2434 * allocated from the tx process. 2435 */ 2436 void 2437 bge_free_txbuf_arrays(send_ring_t *srp) 2438 { 2439 uint32_t array; 2440 uint32_t split; 2441 2442 ASSERT(mutex_owned(srp->tx_lock)); 2443 2444 /* 2445 * Free the extra tx buffer DMA area 2446 */ 2447 for (array = 1; array < srp->tx_array; ++array) 2448 for (split = 0; split < BGE_SPLIT; ++split) 2449 bge_free_dma_mem(&srp->buf[array][split]); 2450 2451 /* 2452 * Restore initial tx buffer numbers 2453 */ 2454 srp->tx_array = 1; 2455 srp->tx_buffers = BGE_SEND_BUF_NUM; 2456 srp->tx_buffers_low = srp->tx_buffers / 4; 2457 srp->tx_flow = 0; 2458 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2459 } 2460 2461 /* 2462 * Called from tx process to allocate more tx buffers 2463 */ 2464 bge_queue_item_t * 2465 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2466 { 2467 bge_queue_t *txbuf_queue; 2468 bge_queue_item_t *txbuf_item_last; 2469 bge_queue_item_t *txbuf_item; 2470 bge_queue_item_t *txbuf_item_rtn; 2471 sw_txbuf_t *txbuf; 2472 dma_area_t area; 2473 size_t txbuffsize; 2474 uint32_t slot; 2475 uint32_t array; 2476 uint32_t split; 2477 uint32_t err; 2478 2479 ASSERT(mutex_owned(srp->tx_lock)); 2480 2481 array = srp->tx_array; 2482 if (array >= srp->tx_array_max) 2483 return (NULL); 2484 2485 /* 2486 * Allocate memory & handles for TX buffers 2487 */ 2488 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2489 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2490 for (split = 0; split < BGE_SPLIT; ++split) { 2491 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2492 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2493 &srp->buf[array][split]); 2494 if (err != DDI_SUCCESS) { 2495 /* Free the last already allocated OK chunks */ 2496 for (slot = 0; slot <= split; ++slot) 2497 bge_free_dma_mem(&srp->buf[array][slot]); 2498 srp->tx_alloc_fail++; 2499 return (NULL); 2500 } 2501 } 2502 2503 /* 2504 * Chunk tx buffer area 2505 */ 2506 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2507 for (split = 0; split < BGE_SPLIT; ++split) { 2508 area = srp->buf[array][split]; 2509 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2510 bge_slice_chunk(&txbuf->buf, &area, 1, 2511 bgep->chipid.snd_buff_size); 2512 txbuf++; 2513 } 2514 } 2515 2516 /* 2517 * Add above buffers to the tx buffer pop queue 2518 */ 2519 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2520 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2521 txbuf_item_last = NULL; 2522 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2523 txbuf_item->item = txbuf; 2524 txbuf_item->next = txbuf_item_last; 2525 txbuf_item_last = txbuf_item; 2526 txbuf++; 2527 txbuf_item++; 2528 } 2529 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2530 txbuf_item_rtn = txbuf_item; 2531 txbuf_item++; 2532 txbuf_queue = srp->txbuf_pop_queue; 2533 mutex_enter(txbuf_queue->lock); 2534 txbuf_item->next = txbuf_queue->head; 2535 txbuf_queue->head = txbuf_item_last; 2536 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2537 mutex_exit(txbuf_queue->lock); 2538 2539 srp->tx_array++; 2540 srp->tx_buffers += BGE_SEND_BUF_NUM; 2541 srp->tx_buffers_low = srp->tx_buffers / 4; 2542 2543 return (txbuf_item_rtn); 2544 } 2545 2546 /* 2547 * This function allocates all the transmit and receive buffers 2548 * and descriptors, in four chunks. 2549 */ 2550 int 2551 bge_alloc_bufs(bge_t *bgep) 2552 { 2553 dma_area_t area; 2554 size_t rxbuffsize; 2555 size_t txbuffsize; 2556 size_t rxbuffdescsize; 2557 size_t rxdescsize; 2558 size_t txdescsize; 2559 uint32_t ring; 2560 uint32_t rx_rings = bgep->chipid.rx_rings; 2561 uint32_t tx_rings = bgep->chipid.tx_rings; 2562 int split; 2563 int err; 2564 2565 BGE_TRACE(("bge_alloc_bufs($%p)", 2566 (void *)bgep)); 2567 2568 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2569 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2570 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2571 2572 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2573 txbuffsize *= tx_rings; 2574 2575 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2576 rxdescsize *= sizeof (bge_rbd_t); 2577 2578 rxbuffdescsize = BGE_STD_SLOTS_USED; 2579 rxbuffdescsize += bgep->chipid.jumbo_slots; 2580 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2581 rxbuffdescsize *= sizeof (bge_rbd_t); 2582 2583 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2584 txdescsize *= sizeof (bge_sbd_t); 2585 txdescsize += sizeof (bge_statistics_t); 2586 txdescsize += sizeof (bge_status_t); 2587 txdescsize += BGE_STATUS_PADDING; 2588 2589 /* 2590 * Enable PCI relaxed ordering only for RX/TX data buffers 2591 */ 2592 if (bge_relaxed_ordering) 2593 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2594 2595 /* 2596 * Allocate memory & handles for RX buffers 2597 */ 2598 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2599 for (split = 0; split < BGE_SPLIT; ++split) { 2600 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2601 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2602 &bgep->rx_buff[split]); 2603 if (err != DDI_SUCCESS) 2604 return (DDI_FAILURE); 2605 } 2606 2607 /* 2608 * Allocate memory & handles for TX buffers 2609 */ 2610 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2611 for (split = 0; split < BGE_SPLIT; ++split) { 2612 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2613 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2614 &bgep->tx_buff[split]); 2615 if (err != DDI_SUCCESS) 2616 return (DDI_FAILURE); 2617 } 2618 2619 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2620 2621 /* 2622 * Allocate memory & handles for receive return rings 2623 */ 2624 ASSERT((rxdescsize % rx_rings) == 0); 2625 for (split = 0; split < rx_rings; ++split) { 2626 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2627 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2628 &bgep->rx_desc[split]); 2629 if (err != DDI_SUCCESS) 2630 return (DDI_FAILURE); 2631 } 2632 2633 /* 2634 * Allocate memory & handles for buffer (producer) descriptor rings 2635 */ 2636 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2637 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2638 if (err != DDI_SUCCESS) 2639 return (DDI_FAILURE); 2640 2641 /* 2642 * Allocate memory & handles for TX descriptor rings, 2643 * status block, and statistics area 2644 */ 2645 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2646 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2647 if (err != DDI_SUCCESS) 2648 return (DDI_FAILURE); 2649 2650 /* 2651 * Now carve up each of the allocated areas ... 2652 */ 2653 for (split = 0; split < BGE_SPLIT; ++split) { 2654 area = bgep->rx_buff[split]; 2655 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2656 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2657 bgep->chipid.std_buf_size); 2658 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2659 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2660 bgep->chipid.recv_jumbo_size); 2661 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2662 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2663 BGE_MINI_BUFF_SIZE); 2664 } 2665 2666 for (split = 0; split < BGE_SPLIT; ++split) { 2667 area = bgep->tx_buff[split]; 2668 for (ring = 0; ring < tx_rings; ++ring) 2669 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2670 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2671 bgep->chipid.snd_buff_size); 2672 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2673 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2674 &area, 0, bgep->chipid.snd_buff_size); 2675 } 2676 2677 for (ring = 0; ring < rx_rings; ++ring) 2678 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2679 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2680 2681 area = bgep->rx_desc[rx_rings]; 2682 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2683 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2684 0, sizeof (bge_rbd_t)); 2685 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2686 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2687 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2688 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2689 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2690 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2691 ASSERT(area.alength == 0); 2692 2693 area = bgep->tx_desc; 2694 for (ring = 0; ring < tx_rings; ++ring) 2695 bge_slice_chunk(&bgep->send[ring].desc, &area, 2696 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2697 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2698 bge_slice_chunk(&bgep->send[ring].desc, &area, 2699 0, sizeof (bge_sbd_t)); 2700 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2701 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2702 ASSERT(area.alength == BGE_STATUS_PADDING); 2703 DMA_ZERO(bgep->status_block); 2704 2705 return (DDI_SUCCESS); 2706 } 2707 2708 /* 2709 * This routine frees the transmit and receive buffers and descriptors. 2710 * Make sure the chip is stopped before calling it! 2711 */ 2712 void 2713 bge_free_bufs(bge_t *bgep) 2714 { 2715 int split; 2716 2717 BGE_TRACE(("bge_free_bufs($%p)", 2718 (void *)bgep)); 2719 2720 bge_free_dma_mem(&bgep->tx_desc); 2721 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2722 bge_free_dma_mem(&bgep->rx_desc[split]); 2723 for (split = 0; split < BGE_SPLIT; ++split) 2724 bge_free_dma_mem(&bgep->tx_buff[split]); 2725 for (split = 0; split < BGE_SPLIT; ++split) 2726 bge_free_dma_mem(&bgep->rx_buff[split]); 2727 } 2728 2729 /* 2730 * Determine (initial) MAC address ("BIA") to use for this interface 2731 */ 2732 2733 static void 2734 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2735 { 2736 struct ether_addr sysaddr; 2737 char propbuf[8]; /* "true" or "false", plus NUL */ 2738 uchar_t *bytes; 2739 int *ints; 2740 uint_t nelts; 2741 int err; 2742 2743 BGE_TRACE(("bge_find_mac_address($%p)", 2744 (void *)bgep)); 2745 2746 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2747 cidp->hw_mac_addr, 2748 ether_sprintf((void *)cidp->vendor_addr.addr), 2749 cidp->vendor_addr.set ? "" : "not ")); 2750 2751 /* 2752 * The "vendor's factory-set address" may already have 2753 * been extracted from the chip, but if the property 2754 * "local-mac-address" is set we use that instead. It 2755 * will normally be set by OBP, but it could also be 2756 * specified in a .conf file(!) 2757 * 2758 * There doesn't seem to be a way to define byte-array 2759 * properties in a .conf, so we check whether it looks 2760 * like an array of 6 ints instead. 2761 * 2762 * Then, we check whether it looks like an array of 6 2763 * bytes (which it should, if OBP set it). If we can't 2764 * make sense of it either way, we'll ignore it. 2765 */ 2766 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2767 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2768 if (err == DDI_PROP_SUCCESS) { 2769 if (nelts == ETHERADDRL) { 2770 while (nelts--) 2771 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2772 cidp->vendor_addr.set = B_TRUE; 2773 } 2774 ddi_prop_free(ints); 2775 } 2776 2777 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2778 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2779 if (err == DDI_PROP_SUCCESS) { 2780 if (nelts == ETHERADDRL) { 2781 while (nelts--) 2782 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2783 cidp->vendor_addr.set = B_TRUE; 2784 } 2785 ddi_prop_free(bytes); 2786 } 2787 2788 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2789 ether_sprintf((void *)cidp->vendor_addr.addr), 2790 cidp->vendor_addr.set ? "" : "not ")); 2791 2792 /* 2793 * Look up the OBP property "local-mac-address?". Note that even 2794 * though its value is a string (which should be "true" or "false"), 2795 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2796 * the buffer first and then fetch the property as an untyped array; 2797 * this may or may not include a final NUL, but since there will 2798 * always be one left at the end of the buffer we can now treat it 2799 * as a string anyway. 2800 */ 2801 nelts = sizeof (propbuf); 2802 bzero(propbuf, nelts--); 2803 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2804 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2805 2806 /* 2807 * Now, if the address still isn't set from the hardware (SEEPROM) 2808 * or the OBP or .conf property, OR if the user has foolishly set 2809 * 'local-mac-address? = false', use "the system address" instead 2810 * (but only if it's non-null i.e. has been set from the IDPROM). 2811 */ 2812 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2813 if (localetheraddr(NULL, &sysaddr) != 0) { 2814 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2815 cidp->vendor_addr.set = B_TRUE; 2816 } 2817 2818 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2819 ether_sprintf((void *)cidp->vendor_addr.addr), 2820 cidp->vendor_addr.set ? "" : "not ")); 2821 2822 /* 2823 * Finally(!), if there's a valid "mac-address" property (created 2824 * if we netbooted from this interface), we must use this instead 2825 * of any of the above to ensure that the NFS/install server doesn't 2826 * get confused by the address changing as Solaris takes over! 2827 */ 2828 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2829 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2830 if (err == DDI_PROP_SUCCESS) { 2831 if (nelts == ETHERADDRL) { 2832 while (nelts--) 2833 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2834 cidp->vendor_addr.set = B_TRUE; 2835 } 2836 ddi_prop_free(bytes); 2837 } 2838 2839 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2840 ether_sprintf((void *)cidp->vendor_addr.addr), 2841 cidp->vendor_addr.set ? "" : "not ")); 2842 } 2843 2844 2845 /*ARGSUSED*/ 2846 int 2847 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2848 { 2849 ddi_fm_error_t de; 2850 2851 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2852 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2853 return (de.fme_status); 2854 } 2855 2856 /*ARGSUSED*/ 2857 int 2858 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2859 { 2860 ddi_fm_error_t de; 2861 2862 ASSERT(bgep->progress & PROGRESS_BUFS); 2863 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2864 return (de.fme_status); 2865 } 2866 2867 /* 2868 * The IO fault service error handling callback function 2869 */ 2870 /*ARGSUSED*/ 2871 static int 2872 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2873 { 2874 /* 2875 * as the driver can always deal with an error in any dma or 2876 * access handle, we can just return the fme_status value. 2877 */ 2878 pci_ereport_post(dip, err, NULL); 2879 return (err->fme_status); 2880 } 2881 2882 static void 2883 bge_fm_init(bge_t *bgep) 2884 { 2885 ddi_iblock_cookie_t iblk; 2886 2887 /* Only register with IO Fault Services if we have some capability */ 2888 if (bgep->fm_capabilities) { 2889 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2890 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2891 2892 /* Register capabilities with IO Fault Services */ 2893 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2894 2895 /* 2896 * Initialize pci ereport capabilities if ereport capable 2897 */ 2898 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2899 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2900 pci_ereport_setup(bgep->devinfo); 2901 2902 /* 2903 * Register error callback if error callback capable 2904 */ 2905 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2906 ddi_fm_handler_register(bgep->devinfo, 2907 bge_fm_error_cb, (void*) bgep); 2908 } else { 2909 /* 2910 * These fields have to be cleared of FMA if there are no 2911 * FMA capabilities at runtime. 2912 */ 2913 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2914 dma_attr.dma_attr_flags = 0; 2915 } 2916 } 2917 2918 static void 2919 bge_fm_fini(bge_t *bgep) 2920 { 2921 /* Only unregister FMA capabilities if we registered some */ 2922 if (bgep->fm_capabilities) { 2923 2924 /* 2925 * Release any resources allocated by pci_ereport_setup() 2926 */ 2927 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2928 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2929 pci_ereport_teardown(bgep->devinfo); 2930 2931 /* 2932 * Un-register error callback if error callback capable 2933 */ 2934 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2935 ddi_fm_handler_unregister(bgep->devinfo); 2936 2937 /* Unregister from IO Fault Services */ 2938 ddi_fm_fini(bgep->devinfo); 2939 } 2940 } 2941 2942 static void 2943 #ifdef BGE_IPMI_ASF 2944 bge_unattach(bge_t *bgep, uint_t asf_mode) 2945 #else 2946 bge_unattach(bge_t *bgep) 2947 #endif 2948 { 2949 BGE_TRACE(("bge_unattach($%p)", 2950 (void *)bgep)); 2951 2952 /* 2953 * Flag that no more activity may be initiated 2954 */ 2955 bgep->progress &= ~PROGRESS_READY; 2956 2957 /* 2958 * Quiesce the PHY and MAC (leave it reset but still powered). 2959 * Clean up and free all BGE data structures 2960 */ 2961 if (bgep->periodic_id != NULL) { 2962 ddi_periodic_delete(bgep->periodic_id); 2963 bgep->periodic_id = NULL; 2964 } 2965 if (bgep->progress & PROGRESS_KSTATS) 2966 bge_fini_kstats(bgep); 2967 if (bgep->progress & PROGRESS_PHY) 2968 bge_phys_reset(bgep); 2969 if (bgep->progress & PROGRESS_HWINT) { 2970 mutex_enter(bgep->genlock); 2971 #ifdef BGE_IPMI_ASF 2972 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2973 #else 2974 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2975 #endif 2976 ddi_fm_service_impact(bgep->devinfo, 2977 DDI_SERVICE_UNAFFECTED); 2978 #ifdef BGE_IPMI_ASF 2979 if (bgep->asf_enabled) { 2980 /* 2981 * This register has been overlaid. We restore its 2982 * initial value here. 2983 */ 2984 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2985 BGE_NIC_DATA_SIG); 2986 } 2987 #endif 2988 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2989 ddi_fm_service_impact(bgep->devinfo, 2990 DDI_SERVICE_UNAFFECTED); 2991 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2992 ddi_fm_service_impact(bgep->devinfo, 2993 DDI_SERVICE_UNAFFECTED); 2994 mutex_exit(bgep->genlock); 2995 } 2996 if (bgep->progress & PROGRESS_INTR) { 2997 bge_intr_disable(bgep); 2998 bge_fini_rings(bgep); 2999 } 3000 if (bgep->progress & PROGRESS_HWINT) { 3001 bge_rem_intrs(bgep); 3002 rw_destroy(bgep->errlock); 3003 mutex_destroy(bgep->softintrlock); 3004 mutex_destroy(bgep->genlock); 3005 } 3006 if (bgep->progress & PROGRESS_FACTOTUM) 3007 ddi_remove_softintr(bgep->factotum_id); 3008 if (bgep->progress & PROGRESS_RESCHED) 3009 ddi_remove_softintr(bgep->drain_id); 3010 if (bgep->progress & PROGRESS_BUFS) 3011 bge_free_bufs(bgep); 3012 if (bgep->progress & PROGRESS_REGS) 3013 ddi_regs_map_free(&bgep->io_handle); 3014 if (bgep->progress & PROGRESS_CFG) 3015 pci_config_teardown(&bgep->cfg_handle); 3016 3017 bge_fm_fini(bgep); 3018 3019 ddi_remove_minor_node(bgep->devinfo, NULL); 3020 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3021 kmem_free(bgep, sizeof (*bgep)); 3022 } 3023 3024 static int 3025 bge_resume(dev_info_t *devinfo) 3026 { 3027 bge_t *bgep; /* Our private data */ 3028 chip_id_t *cidp; 3029 chip_id_t chipid; 3030 3031 bgep = ddi_get_driver_private(devinfo); 3032 if (bgep == NULL) 3033 return (DDI_FAILURE); 3034 3035 /* 3036 * Refuse to resume if the data structures aren't consistent 3037 */ 3038 if (bgep->devinfo != devinfo) 3039 return (DDI_FAILURE); 3040 3041 #ifdef BGE_IPMI_ASF 3042 /* 3043 * Power management hasn't been supported in BGE now. If you 3044 * want to implement it, please add the ASF/IPMI related 3045 * code here. 3046 */ 3047 3048 #endif 3049 3050 /* 3051 * Read chip ID & set up config space command register(s) 3052 * Refuse to resume if the chip has changed its identity! 3053 */ 3054 cidp = &bgep->chipid; 3055 mutex_enter(bgep->genlock); 3056 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3057 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3058 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3059 mutex_exit(bgep->genlock); 3060 return (DDI_FAILURE); 3061 } 3062 mutex_exit(bgep->genlock); 3063 if (chipid.vendor != cidp->vendor) 3064 return (DDI_FAILURE); 3065 if (chipid.device != cidp->device) 3066 return (DDI_FAILURE); 3067 if (chipid.revision != cidp->revision) 3068 return (DDI_FAILURE); 3069 if (chipid.asic_rev != cidp->asic_rev) 3070 return (DDI_FAILURE); 3071 3072 /* 3073 * All OK, reinitialise h/w & kick off GLD scheduling 3074 */ 3075 mutex_enter(bgep->genlock); 3076 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3077 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3078 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3079 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3080 mutex_exit(bgep->genlock); 3081 return (DDI_FAILURE); 3082 } 3083 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3084 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3085 mutex_exit(bgep->genlock); 3086 return (DDI_FAILURE); 3087 } 3088 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3089 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3090 mutex_exit(bgep->genlock); 3091 return (DDI_FAILURE); 3092 } 3093 mutex_exit(bgep->genlock); 3094 return (DDI_SUCCESS); 3095 } 3096 3097 /* 3098 * attach(9E) -- Attach a device to the system 3099 * 3100 * Called once for each board successfully probed. 3101 */ 3102 static int 3103 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3104 { 3105 bge_t *bgep; /* Our private data */ 3106 mac_register_t *macp; 3107 chip_id_t *cidp; 3108 caddr_t regs; 3109 int instance; 3110 int err; 3111 int intr_types; 3112 #ifdef BGE_IPMI_ASF 3113 uint32_t mhcrValue; 3114 #ifdef __sparc 3115 uint16_t value16; 3116 #endif 3117 #ifdef BGE_NETCONSOLE 3118 int retval; 3119 #endif 3120 #endif 3121 3122 instance = ddi_get_instance(devinfo); 3123 3124 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3125 (void *)devinfo, cmd, instance)); 3126 BGE_BRKPT(NULL, "bge_attach"); 3127 3128 switch (cmd) { 3129 default: 3130 return (DDI_FAILURE); 3131 3132 case DDI_RESUME: 3133 return (bge_resume(devinfo)); 3134 3135 case DDI_ATTACH: 3136 break; 3137 } 3138 3139 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3140 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3141 ddi_set_driver_private(devinfo, bgep); 3142 bgep->bge_guard = BGE_GUARD; 3143 bgep->devinfo = devinfo; 3144 bgep->param_drain_max = 64; 3145 bgep->param_msi_cnt = 0; 3146 bgep->param_loop_mode = 0; 3147 3148 /* 3149 * Initialize more fields in BGE private data 3150 */ 3151 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3152 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3153 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3154 BGE_DRIVER_NAME, instance); 3155 3156 /* 3157 * Initialize for fma support 3158 */ 3159 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3160 DDI_PROP_DONTPASS, fm_cap, 3161 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3162 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3163 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3164 bge_fm_init(bgep); 3165 3166 /* 3167 * Look up the IOMMU's page size for DVMA mappings (must be 3168 * a power of 2) and convert to a mask. This can be used to 3169 * determine whether a message buffer crosses a page boundary. 3170 * Note: in 2s complement binary notation, if X is a power of 3171 * 2, then -X has the representation "11...1100...00". 3172 */ 3173 bgep->pagemask = dvma_pagesize(devinfo); 3174 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3175 bgep->pagemask = -bgep->pagemask; 3176 3177 /* 3178 * Map config space registers 3179 * Read chip ID & set up config space command register(s) 3180 * 3181 * Note: this leaves the chip accessible by Memory Space 3182 * accesses, but with interrupts and Bus Mastering off. 3183 * This should ensure that nothing untoward will happen 3184 * if it has been left active by the (net-)bootloader. 3185 * We'll re-enable Bus Mastering once we've reset the chip, 3186 * and allow interrupts only when everything else is set up. 3187 */ 3188 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3189 #ifdef BGE_IPMI_ASF 3190 #ifdef __sparc 3191 /* 3192 * We need to determine the type of chipset for accessing some configure 3193 * registers. (This information will be used by bge_ind_put32, 3194 * bge_ind_get32 and bge_nic_read32) 3195 */ 3196 bgep->chipid.device = pci_config_get16(bgep->cfg_handle, 3197 PCI_CONF_DEVID); 3198 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3199 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3200 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3201 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3202 MHCR_ENABLE_TAGGED_STATUS_MODE | 3203 MHCR_MASK_INTERRUPT_MODE | 3204 MHCR_MASK_PCI_INT_OUTPUT | 3205 MHCR_CLEAR_INTERRUPT_INTA | 3206 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3207 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3208 /* 3209 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP 3210 * has been set in PCI_CONF_COMM already, we need to write the 3211 * byte-swapped value to it. So we just write zero first for simplicity. 3212 */ 3213 if (DEVICE_5717_SERIES_CHIPSETS(bgep)) 3214 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0); 3215 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3216 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3217 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3218 MEMORY_ARBITER_ENABLE); 3219 #else 3220 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3221 #endif 3222 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3223 bgep->asf_wordswapped = B_TRUE; 3224 } else { 3225 bgep->asf_wordswapped = B_FALSE; 3226 } 3227 bge_asf_get_config(bgep); 3228 #endif 3229 if (err != DDI_SUCCESS) { 3230 bge_problem(bgep, "pci_config_setup() failed"); 3231 goto attach_fail; 3232 } 3233 bgep->progress |= PROGRESS_CFG; 3234 cidp = &bgep->chipid; 3235 bzero(cidp, sizeof (*cidp)); 3236 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3237 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3238 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3239 goto attach_fail; 3240 } 3241 3242 #ifdef BGE_IPMI_ASF 3243 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3244 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3245 bgep->asf_newhandshake = B_TRUE; 3246 } else { 3247 bgep->asf_newhandshake = B_FALSE; 3248 } 3249 #endif 3250 3251 /* 3252 * Update those parts of the chip ID derived from volatile 3253 * registers with the values seen by OBP (in case the chip 3254 * has been reset externally and therefore lost them). 3255 */ 3256 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3257 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3258 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3259 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3260 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3261 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3262 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3263 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3264 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3265 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3266 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3267 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3268 3269 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3270 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3271 if ((cidp->default_mtu < BGE_DEFAULT_MTU) || 3272 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3273 cidp->default_mtu = BGE_DEFAULT_MTU; 3274 } 3275 3276 /* 3277 * Map operating registers 3278 */ 3279 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3280 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3281 if (err != DDI_SUCCESS) { 3282 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3283 goto attach_fail; 3284 } 3285 bgep->io_regs = regs; 3286 bgep->progress |= PROGRESS_REGS; 3287 3288 /* 3289 * Characterise the device, so we know its requirements. 3290 * Then allocate the appropriate TX and RX descriptors & buffers. 3291 */ 3292 if (bge_chip_id_init(bgep) == EIO) { 3293 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3294 goto attach_fail; 3295 } 3296 3297 err = bge_alloc_bufs(bgep); 3298 if (err != DDI_SUCCESS) { 3299 bge_problem(bgep, "DMA buffer allocation failed"); 3300 goto attach_fail; 3301 } 3302 bgep->progress |= PROGRESS_BUFS; 3303 3304 /* 3305 * Add the softint handlers: 3306 * 3307 * Both of these handlers are used to avoid restrictions on the 3308 * context and/or mutexes required for some operations. In 3309 * particular, the hardware interrupt handler and its subfunctions 3310 * can detect a number of conditions that we don't want to handle 3311 * in that context or with that set of mutexes held. So, these 3312 * softints are triggered instead: 3313 * 3314 * the <resched> softint is triggered if we have previously 3315 * had to refuse to send a packet because of resource shortage 3316 * (we've run out of transmit buffers), but the send completion 3317 * interrupt handler has now detected that more buffers have 3318 * become available. 3319 * 3320 * the <factotum> is triggered if the h/w interrupt handler 3321 * sees the <link state changed> or <error> bits in the status 3322 * block. It's also triggered periodically to poll the link 3323 * state, just in case we aren't getting link status change 3324 * interrupts ... 3325 */ 3326 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3327 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3328 if (err != DDI_SUCCESS) { 3329 bge_problem(bgep, "ddi_add_softintr() failed"); 3330 goto attach_fail; 3331 } 3332 bgep->progress |= PROGRESS_RESCHED; 3333 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3334 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3335 if (err != DDI_SUCCESS) { 3336 bge_problem(bgep, "ddi_add_softintr() failed"); 3337 goto attach_fail; 3338 } 3339 bgep->progress |= PROGRESS_FACTOTUM; 3340 3341 /* Get supported interrupt types */ 3342 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3343 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3344 3345 goto attach_fail; 3346 } 3347 3348 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3349 bgep->ifname, intr_types)); 3350 3351 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3352 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3353 bge_error(bgep, "MSI registration failed, " 3354 "trying FIXED interrupt type\n"); 3355 } else { 3356 BGE_DEBUG(("%s: Using MSI interrupt type", 3357 bgep->ifname)); 3358 bgep->intr_type = DDI_INTR_TYPE_MSI; 3359 bgep->progress |= PROGRESS_HWINT; 3360 } 3361 } 3362 3363 if (!(bgep->progress & PROGRESS_HWINT) && 3364 (intr_types & DDI_INTR_TYPE_FIXED)) { 3365 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3366 bge_error(bgep, "FIXED interrupt " 3367 "registration failed\n"); 3368 goto attach_fail; 3369 } 3370 3371 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3372 3373 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3374 bgep->progress |= PROGRESS_HWINT; 3375 } 3376 3377 if (!(bgep->progress & PROGRESS_HWINT)) { 3378 bge_error(bgep, "No interrupts registered\n"); 3379 goto attach_fail; 3380 } 3381 3382 /* 3383 * Note that interrupts are not enabled yet as 3384 * mutex locks are not initialized. Initialize mutex locks. 3385 */ 3386 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3387 DDI_INTR_PRI(bgep->intr_pri)); 3388 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3389 DDI_INTR_PRI(bgep->intr_pri)); 3390 rw_init(bgep->errlock, NULL, RW_DRIVER, 3391 DDI_INTR_PRI(bgep->intr_pri)); 3392 3393 /* 3394 * Initialize rings. 3395 */ 3396 bge_init_rings(bgep); 3397 3398 /* 3399 * Now that mutex locks are initialized, enable interrupts. 3400 */ 3401 bge_intr_enable(bgep); 3402 bgep->progress |= PROGRESS_INTR; 3403 3404 /* 3405 * Initialise link state variables 3406 * Stop, reset & reinitialise the chip. 3407 * Initialise the (internal) PHY. 3408 */ 3409 bgep->link_state = LINK_STATE_UNKNOWN; 3410 3411 mutex_enter(bgep->genlock); 3412 3413 /* 3414 * Reset chip & rings to initial state; also reset address 3415 * filtering, promiscuity, loopback mode. 3416 */ 3417 #ifdef BGE_IPMI_ASF 3418 #ifdef BGE_NETCONSOLE 3419 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3420 #else 3421 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3422 #endif 3423 #else 3424 if (bge_reset(bgep) != DDI_SUCCESS) { 3425 #endif 3426 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3427 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3428 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3429 mutex_exit(bgep->genlock); 3430 goto attach_fail; 3431 } 3432 3433 #ifdef BGE_IPMI_ASF 3434 if (bgep->asf_enabled) { 3435 bgep->asf_status = ASF_STAT_RUN_INIT; 3436 } 3437 #endif 3438 3439 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3440 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3441 bgep->promisc = B_FALSE; 3442 bgep->param_loop_mode = BGE_LOOP_NONE; 3443 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3444 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3445 mutex_exit(bgep->genlock); 3446 goto attach_fail; 3447 } 3448 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3449 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3450 mutex_exit(bgep->genlock); 3451 goto attach_fail; 3452 } 3453 3454 mutex_exit(bgep->genlock); 3455 3456 if (bge_phys_init(bgep) == EIO) { 3457 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3458 goto attach_fail; 3459 } 3460 bgep->progress |= PROGRESS_PHY; 3461 3462 /* 3463 * initialize NDD-tweakable parameters 3464 */ 3465 if (bge_nd_init(bgep)) { 3466 bge_problem(bgep, "bge_nd_init() failed"); 3467 goto attach_fail; 3468 } 3469 bgep->progress |= PROGRESS_NDD; 3470 3471 /* 3472 * Create & initialise named kstats 3473 */ 3474 bge_init_kstats(bgep, instance); 3475 bgep->progress |= PROGRESS_KSTATS; 3476 3477 /* 3478 * Determine whether to override the chip's own MAC address 3479 */ 3480 bge_find_mac_address(bgep, cidp); 3481 3482 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3483 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 3484 3485 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3486 goto attach_fail; 3487 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3488 macp->m_driver = bgep; 3489 macp->m_dip = devinfo; 3490 macp->m_src_addr = cidp->vendor_addr.addr; 3491 macp->m_callbacks = &bge_m_callbacks; 3492 macp->m_min_sdu = 0; 3493 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3494 macp->m_margin = VLAN_TAGSZ; 3495 macp->m_priv_props = bge_priv_prop; 3496 macp->m_v12n = MAC_VIRT_LEVEL1; 3497 3498 /* 3499 * Finally, we're ready to register ourselves with the MAC layer 3500 * interface; if this succeeds, we're all ready to start() 3501 */ 3502 err = mac_register(macp, &bgep->mh); 3503 mac_free(macp); 3504 if (err != 0) 3505 goto attach_fail; 3506 3507 /* 3508 * Register a periodical handler. 3509 * bge_chip_cyclic() is invoked in kernel context. 3510 */ 3511 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3512 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3513 3514 bgep->progress |= PROGRESS_READY; 3515 ASSERT(bgep->bge_guard == BGE_GUARD); 3516 #ifdef BGE_IPMI_ASF 3517 #ifdef BGE_NETCONSOLE 3518 if (bgep->asf_enabled) { 3519 mutex_enter(bgep->genlock); 3520 retval = bge_chip_start(bgep, B_TRUE); 3521 mutex_exit(bgep->genlock); 3522 if (retval != DDI_SUCCESS) 3523 goto attach_fail; 3524 } 3525 #endif 3526 #endif 3527 3528 ddi_report_dev(devinfo); 3529 3530 return (DDI_SUCCESS); 3531 3532 attach_fail: 3533 #ifdef BGE_IPMI_ASF 3534 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3535 #else 3536 bge_unattach(bgep); 3537 #endif 3538 return (DDI_FAILURE); 3539 } 3540 3541 /* 3542 * bge_suspend() -- suspend transmit/receive for powerdown 3543 */ 3544 static int 3545 bge_suspend(bge_t *bgep) 3546 { 3547 /* 3548 * Stop processing and idle (powerdown) the PHY ... 3549 */ 3550 mutex_enter(bgep->genlock); 3551 #ifdef BGE_IPMI_ASF 3552 /* 3553 * Power management hasn't been supported in BGE now. If you 3554 * want to implement it, please add the ASF/IPMI related 3555 * code here. 3556 */ 3557 #endif 3558 bge_stop(bgep); 3559 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3560 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3561 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3562 mutex_exit(bgep->genlock); 3563 return (DDI_FAILURE); 3564 } 3565 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3566 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3567 mutex_exit(bgep->genlock); 3568 return (DDI_FAILURE); 3569 } 3570 mutex_exit(bgep->genlock); 3571 3572 return (DDI_SUCCESS); 3573 } 3574 3575 /* 3576 * quiesce(9E) entry point. 3577 * 3578 * This function is called when the system is single-threaded at high 3579 * PIL with preemption disabled. Therefore, this function must not be 3580 * blocked. 3581 * 3582 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3583 * DDI_FAILURE indicates an error condition and should almost never happen. 3584 */ 3585 #ifdef __sparc 3586 #define bge_quiesce ddi_quiesce_not_supported 3587 #else 3588 static int 3589 bge_quiesce(dev_info_t *devinfo) 3590 { 3591 bge_t *bgep = ddi_get_driver_private(devinfo); 3592 3593 if (bgep == NULL) 3594 return (DDI_FAILURE); 3595 3596 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3597 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3598 MHCR_MASK_PCI_INT_OUTPUT); 3599 } else { 3600 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3601 } 3602 3603 /* Stop the chip */ 3604 bge_chip_stop_nonblocking(bgep); 3605 3606 return (DDI_SUCCESS); 3607 } 3608 #endif 3609 3610 /* 3611 * detach(9E) -- Detach a device from the system 3612 */ 3613 static int 3614 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3615 { 3616 bge_t *bgep; 3617 #ifdef BGE_IPMI_ASF 3618 uint_t asf_mode; 3619 asf_mode = ASF_MODE_NONE; 3620 #endif 3621 3622 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3623 3624 bgep = ddi_get_driver_private(devinfo); 3625 3626 switch (cmd) { 3627 default: 3628 return (DDI_FAILURE); 3629 3630 case DDI_SUSPEND: 3631 return (bge_suspend(bgep)); 3632 3633 case DDI_DETACH: 3634 break; 3635 } 3636 3637 #ifdef BGE_IPMI_ASF 3638 mutex_enter(bgep->genlock); 3639 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3640 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3641 3642 bge_asf_update_status(bgep); 3643 if (bgep->asf_status == ASF_STAT_RUN) { 3644 bge_asf_stop_timer(bgep); 3645 } 3646 bgep->asf_status = ASF_STAT_STOP; 3647 3648 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3649 3650 if (bgep->asf_pseudostop) { 3651 bge_chip_stop(bgep, B_FALSE); 3652 bgep->bge_mac_state = BGE_MAC_STOPPED; 3653 bgep->asf_pseudostop = B_FALSE; 3654 } 3655 3656 asf_mode = ASF_MODE_POST_SHUTDOWN; 3657 3658 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3659 ddi_fm_service_impact(bgep->devinfo, 3660 DDI_SERVICE_UNAFFECTED); 3661 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3662 ddi_fm_service_impact(bgep->devinfo, 3663 DDI_SERVICE_UNAFFECTED); 3664 } 3665 mutex_exit(bgep->genlock); 3666 #endif 3667 3668 /* 3669 * Unregister from the GLD subsystem. This can fail, in 3670 * particular if there are DLPI style-2 streams still open - 3671 * in which case we just return failure without shutting 3672 * down chip operations. 3673 */ 3674 if (mac_unregister(bgep->mh) != 0) 3675 return (DDI_FAILURE); 3676 3677 /* 3678 * All activity stopped, so we can clean up & exit 3679 */ 3680 #ifdef BGE_IPMI_ASF 3681 bge_unattach(bgep, asf_mode); 3682 #else 3683 bge_unattach(bgep); 3684 #endif 3685 return (DDI_SUCCESS); 3686 } 3687 3688 3689 /* 3690 * ========== Module Loading Data & Entry Points ========== 3691 */ 3692 3693 #undef BGE_DBG 3694 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3695 3696 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3697 nulldev, /* identify */ 3698 nulldev, /* probe */ 3699 bge_attach, /* attach */ 3700 bge_detach, /* detach */ 3701 nodev, /* reset */ 3702 NULL, /* cb_ops */ 3703 D_MP, /* bus_ops */ 3704 NULL, /* power */ 3705 bge_quiesce /* quiesce */ 3706 ); 3707 3708 static struct modldrv bge_modldrv = { 3709 &mod_driverops, /* Type of module. This one is a driver */ 3710 bge_ident, /* short description */ 3711 &bge_dev_ops /* driver specific ops */ 3712 }; 3713 3714 static struct modlinkage modlinkage = { 3715 MODREV_1, (void *)&bge_modldrv, NULL 3716 }; 3717 3718 3719 int 3720 _info(struct modinfo *modinfop) 3721 { 3722 return (mod_info(&modlinkage, modinfop)); 3723 } 3724 3725 int 3726 _init(void) 3727 { 3728 int status; 3729 3730 mac_init_ops(&bge_dev_ops, "bge"); 3731 status = mod_install(&modlinkage); 3732 if (status == DDI_SUCCESS) 3733 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3734 else 3735 mac_fini_ops(&bge_dev_ops); 3736 return (status); 3737 } 3738 3739 int 3740 _fini(void) 3741 { 3742 int status; 3743 3744 status = mod_remove(&modlinkage); 3745 if (status == DDI_SUCCESS) { 3746 mac_fini_ops(&bge_dev_ops); 3747 mutex_destroy(bge_log_mutex); 3748 } 3749 return (status); 3750 } 3751 3752 3753 /* 3754 * bge_add_intrs: 3755 * 3756 * Register FIXED or MSI interrupts. 3757 */ 3758 static int 3759 bge_add_intrs(bge_t *bgep, int intr_type) 3760 { 3761 dev_info_t *dip = bgep->devinfo; 3762 int avail, actual, intr_size, count = 0; 3763 int i, flag, ret; 3764 3765 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3766 3767 /* Get number of interrupts */ 3768 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3769 if ((ret != DDI_SUCCESS) || (count == 0)) { 3770 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3771 "count: %d", ret, count); 3772 3773 return (DDI_FAILURE); 3774 } 3775 3776 /* Get number of available interrupts */ 3777 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3778 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3779 bge_error(bgep, "ddi_intr_get_navail() failure, " 3780 "ret: %d, avail: %d\n", ret, avail); 3781 3782 return (DDI_FAILURE); 3783 } 3784 3785 if (avail < count) { 3786 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3787 bgep->ifname, count, avail)); 3788 } 3789 3790 /* 3791 * BGE hardware generates only single MSI even though it claims 3792 * to support multiple MSIs. So, hard code MSI count value to 1. 3793 */ 3794 if (intr_type == DDI_INTR_TYPE_MSI) { 3795 count = 1; 3796 flag = DDI_INTR_ALLOC_STRICT; 3797 } else { 3798 flag = DDI_INTR_ALLOC_NORMAL; 3799 } 3800 3801 /* Allocate an array of interrupt handles */ 3802 intr_size = count * sizeof (ddi_intr_handle_t); 3803 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3804 3805 /* Call ddi_intr_alloc() */ 3806 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3807 count, &actual, flag); 3808 3809 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3810 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3811 3812 kmem_free(bgep->htable, intr_size); 3813 return (DDI_FAILURE); 3814 } 3815 3816 if (actual < count) { 3817 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3818 bgep->ifname, count, actual)); 3819 } 3820 3821 bgep->intr_cnt = actual; 3822 3823 /* 3824 * Get priority for first msi, assume remaining are all the same 3825 */ 3826 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3827 DDI_SUCCESS) { 3828 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3829 3830 /* Free already allocated intr */ 3831 for (i = 0; i < actual; i++) { 3832 (void) ddi_intr_free(bgep->htable[i]); 3833 } 3834 3835 kmem_free(bgep->htable, intr_size); 3836 return (DDI_FAILURE); 3837 } 3838 3839 /* Call ddi_intr_add_handler() */ 3840 for (i = 0; i < actual; i++) { 3841 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3842 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3843 bge_error(bgep, "ddi_intr_add_handler() " 3844 "failed %d\n", ret); 3845 3846 /* Free already allocated intr */ 3847 for (i = 0; i < actual; i++) { 3848 (void) ddi_intr_free(bgep->htable[i]); 3849 } 3850 3851 kmem_free(bgep->htable, intr_size); 3852 return (DDI_FAILURE); 3853 } 3854 } 3855 3856 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3857 != DDI_SUCCESS) { 3858 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3859 3860 for (i = 0; i < actual; i++) { 3861 (void) ddi_intr_remove_handler(bgep->htable[i]); 3862 (void) ddi_intr_free(bgep->htable[i]); 3863 } 3864 3865 kmem_free(bgep->htable, intr_size); 3866 return (DDI_FAILURE); 3867 } 3868 3869 return (DDI_SUCCESS); 3870 } 3871 3872 /* 3873 * bge_rem_intrs: 3874 * 3875 * Unregister FIXED or MSI interrupts 3876 */ 3877 static void 3878 bge_rem_intrs(bge_t *bgep) 3879 { 3880 int i; 3881 3882 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3883 3884 /* Call ddi_intr_remove_handler() */ 3885 for (i = 0; i < bgep->intr_cnt; i++) { 3886 (void) ddi_intr_remove_handler(bgep->htable[i]); 3887 (void) ddi_intr_free(bgep->htable[i]); 3888 } 3889 3890 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3891 } 3892 3893 3894 void 3895 bge_intr_enable(bge_t *bgep) 3896 { 3897 int i; 3898 3899 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3900 /* Call ddi_intr_block_enable() for MSI interrupts */ 3901 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3902 } else { 3903 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3904 for (i = 0; i < bgep->intr_cnt; i++) { 3905 (void) ddi_intr_enable(bgep->htable[i]); 3906 } 3907 } 3908 } 3909 3910 3911 void 3912 bge_intr_disable(bge_t *bgep) 3913 { 3914 int i; 3915 3916 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3917 /* Call ddi_intr_block_disable() */ 3918 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3919 } else { 3920 for (i = 0; i < bgep->intr_cnt; i++) { 3921 (void) ddi_intr_disable(bgep->htable[i]); 3922 } 3923 } 3924 } 3925 3926 int 3927 bge_reprogram(bge_t *bgep) 3928 { 3929 int status = 0; 3930 3931 ASSERT(mutex_owned(bgep->genlock)); 3932 3933 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3934 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3935 status = IOC_INVAL; 3936 } 3937 #ifdef BGE_IPMI_ASF 3938 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3939 #else 3940 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3941 #endif 3942 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3943 status = IOC_INVAL; 3944 } 3945 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3946 bge_chip_msi_trig(bgep); 3947 return (status); 3948 } 3949