1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac_provider.h> 30 #include <sys/mac.h> 31 #include <sys/mac_flow.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 */ 36 static char bge_ident[] = "Broadcom Gb Ethernet"; 37 /* 38 * Make sure you keep the version ID up to date! 39 */ 40 static char bge_version[] = "Broadcom Gb Ethernet v1.07"; 41 42 /* 43 * Property names 44 */ 45 static char debug_propname[] = "bge-debug-flags"; 46 static char clsize_propname[] = "cache-line-size"; 47 static char latency_propname[] = "latency-timer"; 48 static char localmac_boolname[] = "local-mac-address?"; 49 static char localmac_propname[] = "local-mac-address"; 50 static char macaddr_propname[] = "mac-address"; 51 static char subdev_propname[] = "subsystem-id"; 52 static char subven_propname[] = "subsystem-vendor-id"; 53 static char rxrings_propname[] = "bge-rx-rings"; 54 static char txrings_propname[] = "bge-tx-rings"; 55 static char fm_cap[] = "fm-capable"; 56 static char default_mtu[] = "default_mtu"; 57 58 static int bge_add_intrs(bge_t *, int); 59 static void bge_rem_intrs(bge_t *); 60 static int bge_unicst_set(void *, const uint8_t *, int); 61 62 /* 63 * Describes the chip's DMA engine 64 */ 65 static ddi_dma_attr_t dma_attr = { 66 DMA_ATTR_V0, /* dma_attr version */ 67 0x0000000000000000ull, /* dma_attr_addr_lo */ 68 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 69 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 70 0x0000000000000001ull, /* dma_attr_align */ 71 0x00000FFF, /* dma_attr_burstsizes */ 72 0x00000001, /* dma_attr_minxfer */ 73 0x000000000000FFFFull, /* dma_attr_maxxfer */ 74 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 75 1, /* dma_attr_sgllen */ 76 0x00000001, /* dma_attr_granular */ 77 DDI_DMA_FLAGERR /* dma_attr_flags */ 78 }; 79 80 /* 81 * PIO access attributes for registers 82 */ 83 static ddi_device_acc_attr_t bge_reg_accattr = { 84 DDI_DEVICE_ATTR_V0, 85 DDI_NEVERSWAP_ACC, 86 DDI_STRICTORDER_ACC, 87 DDI_FLAGERR_ACC 88 }; 89 90 /* 91 * DMA access attributes for descriptors: NOT to be byte swapped. 92 */ 93 static ddi_device_acc_attr_t bge_desc_accattr = { 94 DDI_DEVICE_ATTR_V0, 95 DDI_NEVERSWAP_ACC, 96 DDI_STRICTORDER_ACC, 97 DDI_FLAGERR_ACC 98 }; 99 100 /* 101 * DMA access attributes for data: NOT to be byte swapped. 102 */ 103 static ddi_device_acc_attr_t bge_data_accattr = { 104 DDI_DEVICE_ATTR_V0, 105 DDI_NEVERSWAP_ACC, 106 DDI_STRICTORDER_ACC 107 }; 108 109 static int bge_m_start(void *); 110 static void bge_m_stop(void *); 111 static int bge_m_promisc(void *, boolean_t); 112 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 113 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 114 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 115 static int bge_unicst_set(void *, const uint8_t *, 116 int); 117 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 118 uint_t, const void *); 119 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 120 uint_t, uint_t, void *, uint_t *); 121 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 122 const void *); 123 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 124 uint_t, void *); 125 126 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 127 128 static mac_callbacks_t bge_m_callbacks = { 129 BGE_M_CALLBACK_FLAGS, 130 bge_m_stat, 131 bge_m_start, 132 bge_m_stop, 133 bge_m_promisc, 134 bge_m_multicst, 135 NULL, 136 bge_m_tx, 137 bge_m_ioctl, 138 bge_m_getcapab, 139 NULL, 140 NULL, 141 bge_m_setprop, 142 bge_m_getprop 143 }; 144 145 mac_priv_prop_t bge_priv_prop[] = { 146 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 147 {"_adv_pause_cap", MAC_PROP_PERM_RW} 148 }; 149 150 #define BGE_MAX_PRIV_PROPS \ 151 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 152 153 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 154 /* 155 * ========== Transmit and receive ring reinitialisation ========== 156 */ 157 158 /* 159 * These <reinit> routines each reset the specified ring to an initial 160 * state, assuming that the corresponding <init> routine has already 161 * been called exactly once. 162 */ 163 164 static void 165 bge_reinit_send_ring(send_ring_t *srp) 166 { 167 bge_queue_t *txbuf_queue; 168 bge_queue_item_t *txbuf_head; 169 sw_txbuf_t *txbuf; 170 sw_sbd_t *ssbdp; 171 uint32_t slot; 172 173 /* 174 * Reinitialise control variables ... 175 */ 176 srp->tx_flow = 0; 177 srp->tx_next = 0; 178 srp->txfill_next = 0; 179 srp->tx_free = srp->desc.nslots; 180 ASSERT(mutex_owned(srp->tc_lock)); 181 srp->tc_next = 0; 182 srp->txpkt_next = 0; 183 srp->tx_block = 0; 184 srp->tx_nobd = 0; 185 srp->tx_nobuf = 0; 186 187 /* 188 * Initialize the tx buffer push queue 189 */ 190 mutex_enter(srp->freetxbuf_lock); 191 mutex_enter(srp->txbuf_lock); 192 txbuf_queue = &srp->freetxbuf_queue; 193 txbuf_queue->head = NULL; 194 txbuf_queue->count = 0; 195 txbuf_queue->lock = srp->freetxbuf_lock; 196 srp->txbuf_push_queue = txbuf_queue; 197 198 /* 199 * Initialize the tx buffer pop queue 200 */ 201 txbuf_queue = &srp->txbuf_queue; 202 txbuf_queue->head = NULL; 203 txbuf_queue->count = 0; 204 txbuf_queue->lock = srp->txbuf_lock; 205 srp->txbuf_pop_queue = txbuf_queue; 206 txbuf_head = srp->txbuf_head; 207 txbuf = srp->txbuf; 208 for (slot = 0; slot < srp->tx_buffers; ++slot) { 209 txbuf_head->item = txbuf; 210 txbuf_head->next = txbuf_queue->head; 211 txbuf_queue->head = txbuf_head; 212 txbuf_queue->count++; 213 txbuf++; 214 txbuf_head++; 215 } 216 mutex_exit(srp->txbuf_lock); 217 mutex_exit(srp->freetxbuf_lock); 218 219 /* 220 * Zero and sync all the h/w Send Buffer Descriptors 221 */ 222 DMA_ZERO(srp->desc); 223 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 224 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 225 ssbdp = srp->sw_sbds; 226 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 227 ssbdp->pbuf = NULL; 228 } 229 230 static void 231 bge_reinit_recv_ring(recv_ring_t *rrp) 232 { 233 /* 234 * Reinitialise control variables ... 235 */ 236 rrp->rx_next = 0; 237 } 238 239 static void 240 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 241 { 242 bge_rbd_t *hw_rbd_p; 243 sw_rbd_t *srbdp; 244 uint32_t bufsize; 245 uint32_t nslots; 246 uint32_t slot; 247 248 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 249 RBD_FLAG_STD_RING, 250 RBD_FLAG_JUMBO_RING, 251 RBD_FLAG_MINI_RING 252 }; 253 254 /* 255 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 256 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 257 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 258 * should be zeroed, and so don't need to be set up specifically 259 * once the whole area has been cleared. 260 */ 261 DMA_ZERO(brp->desc); 262 263 hw_rbd_p = DMA_VPTR(brp->desc); 264 nslots = brp->desc.nslots; 265 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 266 bufsize = brp->buf[0].size; 267 srbdp = brp->sw_rbds; 268 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 269 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 270 hw_rbd_p->index = (uint16_t)slot; 271 hw_rbd_p->len = (uint16_t)bufsize; 272 hw_rbd_p->opaque = srbdp->pbuf.token; 273 hw_rbd_p->flags |= ring_type_flag[ring]; 274 } 275 276 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 277 278 /* 279 * Finally, reinitialise the ring control variables ... 280 */ 281 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 282 } 283 284 /* 285 * Reinitialize all rings 286 */ 287 static void 288 bge_reinit_rings(bge_t *bgep) 289 { 290 uint32_t ring; 291 292 ASSERT(mutex_owned(bgep->genlock)); 293 294 /* 295 * Send Rings ... 296 */ 297 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 298 bge_reinit_send_ring(&bgep->send[ring]); 299 300 /* 301 * Receive Return Rings ... 302 */ 303 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 304 bge_reinit_recv_ring(&bgep->recv[ring]); 305 306 /* 307 * Receive Producer Rings ... 308 */ 309 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 310 bge_reinit_buff_ring(&bgep->buff[ring], ring); 311 } 312 313 /* 314 * ========== Internal state management entry points ========== 315 */ 316 317 #undef BGE_DBG 318 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 319 320 /* 321 * These routines provide all the functionality required by the 322 * corresponding GLD entry points, but don't update the GLD state 323 * so they can be called internally without disturbing our record 324 * of what GLD thinks we should be doing ... 325 */ 326 327 /* 328 * bge_reset() -- reset h/w & rings to initial state 329 */ 330 static int 331 #ifdef BGE_IPMI_ASF 332 bge_reset(bge_t *bgep, uint_t asf_mode) 333 #else 334 bge_reset(bge_t *bgep) 335 #endif 336 { 337 uint32_t ring; 338 int retval; 339 340 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 341 342 ASSERT(mutex_owned(bgep->genlock)); 343 344 /* 345 * Grab all the other mutexes in the world (this should 346 * ensure no other threads are manipulating driver state) 347 */ 348 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 349 mutex_enter(bgep->recv[ring].rx_lock); 350 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 351 mutex_enter(bgep->buff[ring].rf_lock); 352 rw_enter(bgep->errlock, RW_WRITER); 353 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 354 mutex_enter(bgep->send[ring].tx_lock); 355 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 356 mutex_enter(bgep->send[ring].tc_lock); 357 358 #ifdef BGE_IPMI_ASF 359 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 360 #else 361 retval = bge_chip_reset(bgep, B_TRUE); 362 #endif 363 bge_reinit_rings(bgep); 364 365 /* 366 * Free the world ... 367 */ 368 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 369 mutex_exit(bgep->send[ring].tc_lock); 370 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 371 mutex_exit(bgep->send[ring].tx_lock); 372 rw_exit(bgep->errlock); 373 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 374 mutex_exit(bgep->buff[ring].rf_lock); 375 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 376 mutex_exit(bgep->recv[ring].rx_lock); 377 378 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 379 return (retval); 380 } 381 382 /* 383 * bge_stop() -- stop processing, don't reset h/w or rings 384 */ 385 static void 386 bge_stop(bge_t *bgep) 387 { 388 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 389 390 ASSERT(mutex_owned(bgep->genlock)); 391 392 #ifdef BGE_IPMI_ASF 393 if (bgep->asf_enabled) { 394 bgep->asf_pseudostop = B_TRUE; 395 } else { 396 #endif 397 bge_chip_stop(bgep, B_FALSE); 398 #ifdef BGE_IPMI_ASF 399 } 400 #endif 401 402 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 403 } 404 405 /* 406 * bge_start() -- start transmitting/receiving 407 */ 408 static int 409 bge_start(bge_t *bgep, boolean_t reset_phys) 410 { 411 int retval; 412 413 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 414 415 ASSERT(mutex_owned(bgep->genlock)); 416 417 /* 418 * Start chip processing, including enabling interrupts 419 */ 420 retval = bge_chip_start(bgep, reset_phys); 421 422 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 423 return (retval); 424 } 425 426 /* 427 * bge_restart - restart transmitting/receiving after error or suspend 428 */ 429 int 430 bge_restart(bge_t *bgep, boolean_t reset_phys) 431 { 432 int retval = DDI_SUCCESS; 433 ASSERT(mutex_owned(bgep->genlock)); 434 435 #ifdef BGE_IPMI_ASF 436 if (bgep->asf_enabled) { 437 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 438 retval = DDI_FAILURE; 439 } else 440 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 441 retval = DDI_FAILURE; 442 #else 443 if (bge_reset(bgep) != DDI_SUCCESS) 444 retval = DDI_FAILURE; 445 #endif 446 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 447 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 448 retval = DDI_FAILURE; 449 bgep->watchdog = 0; 450 ddi_trigger_softintr(bgep->drain_id); 451 } 452 453 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 454 return (retval); 455 } 456 457 458 /* 459 * ========== Nemo-required management entry points ========== 460 */ 461 462 #undef BGE_DBG 463 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 464 465 /* 466 * bge_m_stop() -- stop transmitting/receiving 467 */ 468 static void 469 bge_m_stop(void *arg) 470 { 471 bge_t *bgep = arg; /* private device info */ 472 send_ring_t *srp; 473 uint32_t ring; 474 475 BGE_TRACE(("bge_m_stop($%p)", arg)); 476 477 /* 478 * Just stop processing, then record new GLD state 479 */ 480 mutex_enter(bgep->genlock); 481 if (!(bgep->progress & PROGRESS_INTR)) { 482 /* can happen during autorecovery */ 483 bgep->bge_chip_state = BGE_CHIP_STOPPED; 484 } else 485 bge_stop(bgep); 486 487 bgep->link_update_timer = 0; 488 bgep->link_state = LINK_STATE_UNKNOWN; 489 mac_link_update(bgep->mh, bgep->link_state); 490 491 /* 492 * Free the possible tx buffers allocated in tx process. 493 */ 494 #ifdef BGE_IPMI_ASF 495 if (!bgep->asf_pseudostop) 496 #endif 497 { 498 rw_enter(bgep->errlock, RW_WRITER); 499 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 500 srp = &bgep->send[ring]; 501 mutex_enter(srp->tx_lock); 502 if (srp->tx_array > 1) 503 bge_free_txbuf_arrays(srp); 504 mutex_exit(srp->tx_lock); 505 } 506 rw_exit(bgep->errlock); 507 } 508 bgep->bge_mac_state = BGE_MAC_STOPPED; 509 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 510 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 511 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 512 mutex_exit(bgep->genlock); 513 } 514 515 /* 516 * bge_m_start() -- start transmitting/receiving 517 */ 518 static int 519 bge_m_start(void *arg) 520 { 521 bge_t *bgep = arg; /* private device info */ 522 523 BGE_TRACE(("bge_m_start($%p)", arg)); 524 525 /* 526 * Start processing and record new GLD state 527 */ 528 mutex_enter(bgep->genlock); 529 if (!(bgep->progress & PROGRESS_INTR)) { 530 /* can happen during autorecovery */ 531 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 532 mutex_exit(bgep->genlock); 533 return (EIO); 534 } 535 #ifdef BGE_IPMI_ASF 536 if (bgep->asf_enabled) { 537 if ((bgep->asf_status == ASF_STAT_RUN) && 538 (bgep->asf_pseudostop)) { 539 bgep->bge_mac_state = BGE_MAC_STARTED; 540 mutex_exit(bgep->genlock); 541 return (0); 542 } 543 } 544 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 545 #else 546 if (bge_reset(bgep) != DDI_SUCCESS) { 547 #endif 548 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 549 (void) bge_check_acc_handle(bgep, bgep->io_handle); 550 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 551 mutex_exit(bgep->genlock); 552 return (EIO); 553 } 554 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 555 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 556 (void) bge_check_acc_handle(bgep, bgep->io_handle); 557 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 558 mutex_exit(bgep->genlock); 559 return (EIO); 560 } 561 bgep->bge_mac_state = BGE_MAC_STARTED; 562 BGE_DEBUG(("bge_m_start($%p) done", arg)); 563 564 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 565 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 566 mutex_exit(bgep->genlock); 567 return (EIO); 568 } 569 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 570 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 571 mutex_exit(bgep->genlock); 572 return (EIO); 573 } 574 #ifdef BGE_IPMI_ASF 575 if (bgep->asf_enabled) { 576 if (bgep->asf_status != ASF_STAT_RUN) { 577 /* start ASF heart beat */ 578 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 579 (void *)bgep, 580 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 581 bgep->asf_status = ASF_STAT_RUN; 582 } 583 } 584 #endif 585 mutex_exit(bgep->genlock); 586 587 return (0); 588 } 589 590 /* 591 * bge_unicst_set() -- set the physical network address 592 */ 593 static int 594 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 595 { 596 bge_t *bgep = arg; /* private device info */ 597 598 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 599 ether_sprintf((void *)macaddr))); 600 /* 601 * Remember the new current address in the driver state 602 * Sync the chip's idea of the address too ... 603 */ 604 mutex_enter(bgep->genlock); 605 if (!(bgep->progress & PROGRESS_INTR)) { 606 /* can happen during autorecovery */ 607 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 608 mutex_exit(bgep->genlock); 609 return (EIO); 610 } 611 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 612 #ifdef BGE_IPMI_ASF 613 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 614 #else 615 if (bge_chip_sync(bgep) == DDI_FAILURE) { 616 #endif 617 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 618 (void) bge_check_acc_handle(bgep, bgep->io_handle); 619 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 620 mutex_exit(bgep->genlock); 621 return (EIO); 622 } 623 #ifdef BGE_IPMI_ASF 624 if (bgep->asf_enabled) { 625 /* 626 * The above bge_chip_sync() function wrote the ethernet MAC 627 * addresses registers which destroyed the IPMI/ASF sideband. 628 * Here, we have to reset chip to make IPMI/ASF sideband work. 629 */ 630 if (bgep->asf_status == ASF_STAT_RUN) { 631 /* 632 * We must stop ASF heart beat before bge_chip_stop(), 633 * otherwise some computers (ex. IBM HS20 blade server) 634 * may crash. 635 */ 636 bge_asf_update_status(bgep); 637 bge_asf_stop_timer(bgep); 638 bgep->asf_status = ASF_STAT_STOP; 639 640 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 641 } 642 bge_chip_stop(bgep, B_FALSE); 643 644 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 645 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 646 (void) bge_check_acc_handle(bgep, bgep->io_handle); 647 ddi_fm_service_impact(bgep->devinfo, 648 DDI_SERVICE_DEGRADED); 649 mutex_exit(bgep->genlock); 650 return (EIO); 651 } 652 653 /* 654 * Start our ASF heartbeat counter as soon as possible. 655 */ 656 if (bgep->asf_status != ASF_STAT_RUN) { 657 /* start ASF heart beat */ 658 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 659 (void *)bgep, 660 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 661 bgep->asf_status = ASF_STAT_RUN; 662 } 663 } 664 #endif 665 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 666 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 667 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 668 mutex_exit(bgep->genlock); 669 return (EIO); 670 } 671 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 672 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 673 mutex_exit(bgep->genlock); 674 return (EIO); 675 } 676 mutex_exit(bgep->genlock); 677 678 return (0); 679 } 680 681 extern void bge_wake_factotum(bge_t *); 682 683 static boolean_t 684 bge_param_locked(mac_prop_id_t pr_num) 685 { 686 /* 687 * All adv_* parameters are locked (read-only) while 688 * the device is in any sort of loopback mode ... 689 */ 690 switch (pr_num) { 691 case MAC_PROP_ADV_1000FDX_CAP: 692 case MAC_PROP_EN_1000FDX_CAP: 693 case MAC_PROP_ADV_1000HDX_CAP: 694 case MAC_PROP_EN_1000HDX_CAP: 695 case MAC_PROP_ADV_100FDX_CAP: 696 case MAC_PROP_EN_100FDX_CAP: 697 case MAC_PROP_ADV_100HDX_CAP: 698 case MAC_PROP_EN_100HDX_CAP: 699 case MAC_PROP_ADV_10FDX_CAP: 700 case MAC_PROP_EN_10FDX_CAP: 701 case MAC_PROP_ADV_10HDX_CAP: 702 case MAC_PROP_EN_10HDX_CAP: 703 case MAC_PROP_AUTONEG: 704 case MAC_PROP_FLOWCTRL: 705 return (B_TRUE); 706 } 707 return (B_FALSE); 708 } 709 /* 710 * callback functions for set/get of properties 711 */ 712 static int 713 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 714 uint_t pr_valsize, const void *pr_val) 715 { 716 bge_t *bgep = barg; 717 int err = 0; 718 uint32_t cur_mtu, new_mtu; 719 uint_t maxsdu; 720 link_flowctrl_t fl; 721 722 mutex_enter(bgep->genlock); 723 if (bgep->param_loop_mode != BGE_LOOP_NONE && 724 bge_param_locked(pr_num)) { 725 /* 726 * All adv_* parameters are locked (read-only) 727 * while the device is in any sort of loopback mode. 728 */ 729 mutex_exit(bgep->genlock); 730 return (EBUSY); 731 } 732 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 733 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 734 (pr_num == MAC_PROP_EN_100HDX_CAP) || 735 (pr_num == MAC_PROP_EN_10FDX_CAP) || 736 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 737 /* 738 * these properties are read/write on copper, 739 * read-only and 0 on serdes 740 */ 741 mutex_exit(bgep->genlock); 742 return (ENOTSUP); 743 } 744 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 745 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 746 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 747 mutex_exit(bgep->genlock); 748 return (ENOTSUP); 749 } 750 751 switch (pr_num) { 752 case MAC_PROP_EN_1000FDX_CAP: 753 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 754 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 755 goto reprogram; 756 case MAC_PROP_EN_1000HDX_CAP: 757 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 758 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 759 goto reprogram; 760 case MAC_PROP_EN_100FDX_CAP: 761 bgep->param_en_100fdx = *(uint8_t *)pr_val; 762 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 763 goto reprogram; 764 case MAC_PROP_EN_100HDX_CAP: 765 bgep->param_en_100hdx = *(uint8_t *)pr_val; 766 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 767 goto reprogram; 768 case MAC_PROP_EN_10FDX_CAP: 769 bgep->param_en_10fdx = *(uint8_t *)pr_val; 770 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 771 goto reprogram; 772 case MAC_PROP_EN_10HDX_CAP: 773 bgep->param_en_10hdx = *(uint8_t *)pr_val; 774 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 775 reprogram: 776 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 777 err = EINVAL; 778 break; 779 case MAC_PROP_ADV_1000FDX_CAP: 780 case MAC_PROP_ADV_1000HDX_CAP: 781 case MAC_PROP_ADV_100FDX_CAP: 782 case MAC_PROP_ADV_100HDX_CAP: 783 case MAC_PROP_ADV_10FDX_CAP: 784 case MAC_PROP_ADV_10HDX_CAP: 785 case MAC_PROP_STATUS: 786 case MAC_PROP_SPEED: 787 case MAC_PROP_DUPLEX: 788 err = ENOTSUP; /* read-only prop. Can't set this */ 789 break; 790 case MAC_PROP_AUTONEG: 791 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 792 if (bge_reprogram(bgep) == IOC_INVAL) 793 err = EINVAL; 794 break; 795 case MAC_PROP_MTU: 796 cur_mtu = bgep->chipid.default_mtu; 797 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 798 799 if (new_mtu == cur_mtu) { 800 err = 0; 801 break; 802 } 803 if (new_mtu < BGE_DEFAULT_MTU || 804 new_mtu > BGE_MAXIMUM_MTU) { 805 err = EINVAL; 806 break; 807 } 808 if ((new_mtu > BGE_DEFAULT_MTU) && 809 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 810 err = EINVAL; 811 break; 812 } 813 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 814 err = EBUSY; 815 break; 816 } 817 bgep->chipid.default_mtu = new_mtu; 818 if (bge_chip_id_init(bgep)) { 819 err = EINVAL; 820 break; 821 } 822 maxsdu = bgep->chipid.ethmax_size - 823 sizeof (struct ether_header); 824 err = mac_maxsdu_update(bgep->mh, maxsdu); 825 if (err == 0) { 826 bgep->bge_dma_error = B_TRUE; 827 bgep->manual_reset = B_TRUE; 828 bge_chip_stop(bgep, B_TRUE); 829 bge_wake_factotum(bgep); 830 err = 0; 831 } 832 break; 833 case MAC_PROP_FLOWCTRL: 834 bcopy(pr_val, &fl, sizeof (fl)); 835 switch (fl) { 836 default: 837 err = ENOTSUP; 838 break; 839 case LINK_FLOWCTRL_NONE: 840 bgep->param_adv_pause = 0; 841 bgep->param_adv_asym_pause = 0; 842 843 bgep->param_link_rx_pause = B_FALSE; 844 bgep->param_link_tx_pause = B_FALSE; 845 break; 846 case LINK_FLOWCTRL_RX: 847 bgep->param_adv_pause = 1; 848 bgep->param_adv_asym_pause = 1; 849 850 bgep->param_link_rx_pause = B_TRUE; 851 bgep->param_link_tx_pause = B_FALSE; 852 break; 853 case LINK_FLOWCTRL_TX: 854 bgep->param_adv_pause = 0; 855 bgep->param_adv_asym_pause = 1; 856 857 bgep->param_link_rx_pause = B_FALSE; 858 bgep->param_link_tx_pause = B_TRUE; 859 break; 860 case LINK_FLOWCTRL_BI: 861 bgep->param_adv_pause = 1; 862 bgep->param_adv_asym_pause = 0; 863 864 bgep->param_link_rx_pause = B_TRUE; 865 bgep->param_link_tx_pause = B_TRUE; 866 break; 867 } 868 869 if (err == 0) { 870 if (bge_reprogram(bgep) == IOC_INVAL) 871 err = EINVAL; 872 } 873 874 break; 875 case MAC_PROP_PRIVATE: 876 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 877 pr_val); 878 break; 879 default: 880 err = ENOTSUP; 881 break; 882 } 883 mutex_exit(bgep->genlock); 884 return (err); 885 } 886 887 /* ARGSUSED */ 888 static int 889 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 890 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 891 { 892 bge_t *bgep = barg; 893 int err = 0; 894 link_flowctrl_t fl; 895 uint64_t speed; 896 int flags = bgep->chipid.flags; 897 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 898 899 if (pr_valsize == 0) 900 return (EINVAL); 901 bzero(pr_val, pr_valsize); 902 903 *perm = MAC_PROP_PERM_RW; 904 905 mutex_enter(bgep->genlock); 906 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 907 bge_param_locked(pr_num)) || 908 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 909 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 910 (pr_num == MAC_PROP_EN_100HDX_CAP) || 911 (pr_num == MAC_PROP_EN_10FDX_CAP) || 912 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 913 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 914 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 915 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 916 *perm = MAC_PROP_PERM_READ; 917 mutex_exit(bgep->genlock); 918 919 switch (pr_num) { 920 case MAC_PROP_DUPLEX: 921 *perm = MAC_PROP_PERM_READ; 922 if (pr_valsize < sizeof (link_duplex_t)) 923 return (EINVAL); 924 bcopy(&bgep->param_link_duplex, pr_val, 925 sizeof (link_duplex_t)); 926 break; 927 case MAC_PROP_SPEED: 928 *perm = MAC_PROP_PERM_READ; 929 if (pr_valsize < sizeof (speed)) 930 return (EINVAL); 931 speed = bgep->param_link_speed * 1000000ull; 932 bcopy(&speed, pr_val, sizeof (speed)); 933 break; 934 case MAC_PROP_STATUS: 935 *perm = MAC_PROP_PERM_READ; 936 if (pr_valsize < sizeof (link_state_t)) 937 return (EINVAL); 938 bcopy(&bgep->link_state, pr_val, 939 sizeof (link_state_t)); 940 break; 941 case MAC_PROP_AUTONEG: 942 if (is_default) 943 *(uint8_t *)pr_val = 1; 944 else 945 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 946 break; 947 case MAC_PROP_FLOWCTRL: 948 if (pr_valsize < sizeof (fl)) 949 return (EINVAL); 950 if (is_default) { 951 fl = LINK_FLOWCTRL_BI; 952 bcopy(&fl, pr_val, sizeof (fl)); 953 break; 954 } 955 956 if (bgep->param_link_rx_pause && 957 !bgep->param_link_tx_pause) 958 fl = LINK_FLOWCTRL_RX; 959 960 if (!bgep->param_link_rx_pause && 961 !bgep->param_link_tx_pause) 962 fl = LINK_FLOWCTRL_NONE; 963 964 if (!bgep->param_link_rx_pause && 965 bgep->param_link_tx_pause) 966 fl = LINK_FLOWCTRL_TX; 967 968 if (bgep->param_link_rx_pause && 969 bgep->param_link_tx_pause) 970 fl = LINK_FLOWCTRL_BI; 971 bcopy(&fl, pr_val, sizeof (fl)); 972 break; 973 case MAC_PROP_ADV_1000FDX_CAP: 974 *perm = MAC_PROP_PERM_READ; 975 if (is_default) { 976 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 977 *(uint8_t *)pr_val = 0; 978 else 979 *(uint8_t *)pr_val = 1; 980 } 981 else 982 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 983 break; 984 case MAC_PROP_EN_1000FDX_CAP: 985 if (is_default) { 986 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 987 *(uint8_t *)pr_val = 0; 988 else 989 *(uint8_t *)pr_val = 1; 990 } 991 else 992 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 993 break; 994 case MAC_PROP_ADV_1000HDX_CAP: 995 *perm = MAC_PROP_PERM_READ; 996 if (is_default) { 997 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 998 *(uint8_t *)pr_val = 0; 999 else 1000 *(uint8_t *)pr_val = 1; 1001 } 1002 else 1003 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1004 break; 1005 case MAC_PROP_EN_1000HDX_CAP: 1006 if (is_default) { 1007 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1008 *(uint8_t *)pr_val = 0; 1009 else 1010 *(uint8_t *)pr_val = 1; 1011 } 1012 else 1013 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1014 break; 1015 case MAC_PROP_ADV_100FDX_CAP: 1016 *perm = MAC_PROP_PERM_READ; 1017 if (is_default) { 1018 *(uint8_t *)pr_val = 1019 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1020 } else { 1021 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1022 } 1023 break; 1024 case MAC_PROP_EN_100FDX_CAP: 1025 if (is_default) { 1026 *(uint8_t *)pr_val = 1027 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1028 } else { 1029 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1030 } 1031 break; 1032 case MAC_PROP_ADV_100HDX_CAP: 1033 *perm = MAC_PROP_PERM_READ; 1034 if (is_default) { 1035 *(uint8_t *)pr_val = 1036 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1037 } else { 1038 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1039 } 1040 break; 1041 case MAC_PROP_EN_100HDX_CAP: 1042 if (is_default) { 1043 *(uint8_t *)pr_val = 1044 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1045 } else { 1046 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1047 } 1048 break; 1049 case MAC_PROP_ADV_10FDX_CAP: 1050 *perm = MAC_PROP_PERM_READ; 1051 if (is_default) { 1052 *(uint8_t *)pr_val = 1053 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1054 } else { 1055 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1056 } 1057 break; 1058 case MAC_PROP_EN_10FDX_CAP: 1059 if (is_default) { 1060 *(uint8_t *)pr_val = 1061 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1062 } else { 1063 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1064 } 1065 break; 1066 case MAC_PROP_ADV_10HDX_CAP: 1067 *perm = MAC_PROP_PERM_READ; 1068 if (is_default) { 1069 *(uint8_t *)pr_val = 1070 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1071 } else { 1072 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1073 } 1074 break; 1075 case MAC_PROP_EN_10HDX_CAP: 1076 if (is_default) { 1077 *(uint8_t *)pr_val = 1078 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1079 } else { 1080 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1081 } 1082 break; 1083 case MAC_PROP_ADV_100T4_CAP: 1084 case MAC_PROP_EN_100T4_CAP: 1085 *perm = MAC_PROP_PERM_READ; 1086 *(uint8_t *)pr_val = 0; 1087 break; 1088 case MAC_PROP_PRIVATE: 1089 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1090 pr_valsize, pr_val); 1091 return (err); 1092 case MAC_PROP_MTU: { 1093 mac_propval_range_t range; 1094 1095 if (!(pr_flags & MAC_PROP_POSSIBLE)) 1096 return (ENOTSUP); 1097 if (pr_valsize < sizeof (mac_propval_range_t)) 1098 return (EINVAL); 1099 range.mpr_count = 1; 1100 range.mpr_type = MAC_PROPVAL_UINT32; 1101 range.range_uint32[0].mpur_min = 1102 range.range_uint32[0].mpur_max = BGE_DEFAULT_MTU; 1103 if (bge_jumbo_enable && !(flags & CHIP_FLAG_NO_JUMBO)) 1104 range.range_uint32[0].mpur_max = 1105 BGE_MAXIMUM_MTU; 1106 bcopy(&range, pr_val, sizeof (range)); 1107 break; 1108 } 1109 default: 1110 return (ENOTSUP); 1111 } 1112 return (0); 1113 } 1114 1115 /* ARGSUSED */ 1116 static int 1117 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1118 const void *pr_val) 1119 { 1120 int err = 0; 1121 long result; 1122 1123 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1124 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1125 if (result > 1 || result < 0) { 1126 err = EINVAL; 1127 } else { 1128 bgep->param_adv_pause = (uint32_t)result; 1129 if (bge_reprogram(bgep) == IOC_INVAL) 1130 err = EINVAL; 1131 } 1132 return (err); 1133 } 1134 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1135 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1136 if (result > 1 || result < 0) { 1137 err = EINVAL; 1138 } else { 1139 bgep->param_adv_asym_pause = (uint32_t)result; 1140 if (bge_reprogram(bgep) == IOC_INVAL) 1141 err = EINVAL; 1142 } 1143 return (err); 1144 } 1145 if (strcmp(pr_name, "_drain_max") == 0) { 1146 1147 /* 1148 * on the Tx side, we need to update the h/w register for 1149 * real packet transmission per packet. The drain_max parameter 1150 * is used to reduce the register access. This parameter 1151 * controls the max number of packets that we will hold before 1152 * updating the bge h/w to trigger h/w transmit. The bge 1153 * chipset usually has a max of 512 Tx descriptors, thus 1154 * the upper bound on drain_max is 512. 1155 */ 1156 if (pr_val == NULL) { 1157 err = EINVAL; 1158 return (err); 1159 } 1160 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1161 if (result > 512 || result < 1) 1162 err = EINVAL; 1163 else { 1164 bgep->param_drain_max = (uint32_t)result; 1165 if (bge_reprogram(bgep) == IOC_INVAL) 1166 err = EINVAL; 1167 } 1168 return (err); 1169 } 1170 if (strcmp(pr_name, "_msi_cnt") == 0) { 1171 1172 if (pr_val == NULL) { 1173 err = EINVAL; 1174 return (err); 1175 } 1176 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1177 if (result > 7 || result < 0) 1178 err = EINVAL; 1179 else { 1180 bgep->param_msi_cnt = (uint32_t)result; 1181 if (bge_reprogram(bgep) == IOC_INVAL) 1182 err = EINVAL; 1183 } 1184 return (err); 1185 } 1186 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) { 1187 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1188 return (EINVAL); 1189 if (result < 0) 1190 err = EINVAL; 1191 else { 1192 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1193 bge_chip_coalesce_update(bgep); 1194 } 1195 return (err); 1196 } 1197 1198 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) { 1199 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1200 return (EINVAL); 1201 1202 if (result < 0) 1203 err = EINVAL; 1204 else { 1205 bgep->chipid.rx_count_norm = (uint32_t)result; 1206 bge_chip_coalesce_update(bgep); 1207 } 1208 return (err); 1209 } 1210 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) { 1211 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1212 return (EINVAL); 1213 if (result < 0) 1214 err = EINVAL; 1215 else { 1216 bgep->chipid.tx_ticks_norm = (uint32_t)result; 1217 bge_chip_coalesce_update(bgep); 1218 } 1219 return (err); 1220 } 1221 1222 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) { 1223 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1224 return (EINVAL); 1225 1226 if (result < 0) 1227 err = EINVAL; 1228 else { 1229 bgep->chipid.tx_count_norm = (uint32_t)result; 1230 bge_chip_coalesce_update(bgep); 1231 } 1232 return (err); 1233 } 1234 return (ENOTSUP); 1235 } 1236 1237 static int 1238 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1239 uint_t pr_valsize, void *pr_val) 1240 { 1241 int err = ENOTSUP; 1242 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1243 int value; 1244 1245 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1246 value = (is_default? 1 : bge->param_adv_pause); 1247 err = 0; 1248 goto done; 1249 } 1250 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1251 value = (is_default? 1 : bge->param_adv_asym_pause); 1252 err = 0; 1253 goto done; 1254 } 1255 if (strcmp(pr_name, "_drain_max") == 0) { 1256 value = (is_default? 64 : bge->param_drain_max); 1257 err = 0; 1258 goto done; 1259 } 1260 if (strcmp(pr_name, "_msi_cnt") == 0) { 1261 value = (is_default? 0 : bge->param_msi_cnt); 1262 err = 0; 1263 goto done; 1264 } 1265 1266 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1267 value = (is_default? bge_rx_ticks_norm : 1268 bge->chipid.rx_ticks_norm); 1269 err = 0; 1270 goto done; 1271 } 1272 1273 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1274 value = (is_default? bge_rx_count_norm : 1275 bge->chipid.rx_count_norm); 1276 err = 0; 1277 goto done; 1278 } 1279 1280 done: 1281 if (err == 0) { 1282 (void) snprintf(pr_val, pr_valsize, "%d", value); 1283 } 1284 return (err); 1285 } 1286 1287 /* 1288 * Compute the index of the required bit in the multicast hash map. 1289 * This must mirror the way the hardware actually does it! 1290 * See Broadcom document 570X-PG102-R page 125. 1291 */ 1292 static uint32_t 1293 bge_hash_index(const uint8_t *mca) 1294 { 1295 uint32_t hash; 1296 1297 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1298 1299 return (hash); 1300 } 1301 1302 /* 1303 * bge_m_multicst_add() -- enable/disable a multicast address 1304 */ 1305 static int 1306 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1307 { 1308 bge_t *bgep = arg; /* private device info */ 1309 uint32_t hash; 1310 uint32_t index; 1311 uint32_t word; 1312 uint32_t bit; 1313 uint8_t *refp; 1314 1315 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1316 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1317 1318 /* 1319 * Precalculate all required masks, pointers etc ... 1320 */ 1321 hash = bge_hash_index(mca); 1322 index = hash % BGE_HASH_TABLE_SIZE; 1323 word = index/32u; 1324 bit = 1 << (index % 32u); 1325 refp = &bgep->mcast_refs[index]; 1326 1327 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1328 hash, index, word, bit, *refp)); 1329 1330 /* 1331 * We must set the appropriate bit in the hash map (and the 1332 * corresponding h/w register) when the refcount goes from 0 1333 * to >0, and clear it when the last ref goes away (refcount 1334 * goes from >0 back to 0). If we change the hash map, we 1335 * must also update the chip's hardware map registers. 1336 */ 1337 mutex_enter(bgep->genlock); 1338 if (!(bgep->progress & PROGRESS_INTR)) { 1339 /* can happen during autorecovery */ 1340 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1341 mutex_exit(bgep->genlock); 1342 return (EIO); 1343 } 1344 if (add) { 1345 if ((*refp)++ == 0) { 1346 bgep->mcast_hash[word] |= bit; 1347 #ifdef BGE_IPMI_ASF 1348 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1349 #else 1350 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1351 #endif 1352 (void) bge_check_acc_handle(bgep, 1353 bgep->cfg_handle); 1354 (void) bge_check_acc_handle(bgep, 1355 bgep->io_handle); 1356 ddi_fm_service_impact(bgep->devinfo, 1357 DDI_SERVICE_DEGRADED); 1358 mutex_exit(bgep->genlock); 1359 return (EIO); 1360 } 1361 } 1362 } else { 1363 if (--(*refp) == 0) { 1364 bgep->mcast_hash[word] &= ~bit; 1365 #ifdef BGE_IPMI_ASF 1366 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1367 #else 1368 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1369 #endif 1370 (void) bge_check_acc_handle(bgep, 1371 bgep->cfg_handle); 1372 (void) bge_check_acc_handle(bgep, 1373 bgep->io_handle); 1374 ddi_fm_service_impact(bgep->devinfo, 1375 DDI_SERVICE_DEGRADED); 1376 mutex_exit(bgep->genlock); 1377 return (EIO); 1378 } 1379 } 1380 } 1381 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1382 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1383 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1384 mutex_exit(bgep->genlock); 1385 return (EIO); 1386 } 1387 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1388 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1389 mutex_exit(bgep->genlock); 1390 return (EIO); 1391 } 1392 mutex_exit(bgep->genlock); 1393 1394 return (0); 1395 } 1396 1397 /* 1398 * bge_m_promisc() -- set or reset promiscuous mode on the board 1399 * 1400 * Program the hardware to enable/disable promiscuous and/or 1401 * receive-all-multicast modes. 1402 */ 1403 static int 1404 bge_m_promisc(void *arg, boolean_t on) 1405 { 1406 bge_t *bgep = arg; 1407 1408 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1409 1410 /* 1411 * Store MAC layer specified mode and pass to chip layer to update h/w 1412 */ 1413 mutex_enter(bgep->genlock); 1414 if (!(bgep->progress & PROGRESS_INTR)) { 1415 /* can happen during autorecovery */ 1416 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1417 mutex_exit(bgep->genlock); 1418 return (EIO); 1419 } 1420 bgep->promisc = on; 1421 #ifdef BGE_IPMI_ASF 1422 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1423 #else 1424 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1425 #endif 1426 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1427 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1428 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1429 mutex_exit(bgep->genlock); 1430 return (EIO); 1431 } 1432 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1433 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1434 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1435 mutex_exit(bgep->genlock); 1436 return (EIO); 1437 } 1438 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1439 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1440 mutex_exit(bgep->genlock); 1441 return (EIO); 1442 } 1443 mutex_exit(bgep->genlock); 1444 return (0); 1445 } 1446 1447 /* 1448 * Find the slot for the specified unicast address 1449 */ 1450 int 1451 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1452 { 1453 int slot; 1454 1455 ASSERT(mutex_owned(bgep->genlock)); 1456 1457 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1458 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1459 return (slot); 1460 } 1461 1462 return (-1); 1463 } 1464 1465 /* 1466 * Programs the classifier to start steering packets matching 'mac_addr' to the 1467 * specified ring 'arg'. 1468 */ 1469 static int 1470 bge_addmac(void *arg, const uint8_t *mac_addr) 1471 { 1472 recv_ring_t *rrp = (recv_ring_t *)arg; 1473 bge_t *bgep = rrp->bgep; 1474 bge_recv_rule_t *rulep = bgep->recv_rules; 1475 bge_rule_info_t *rinfop = NULL; 1476 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1477 int i; 1478 uint16_t tmp16; 1479 uint32_t tmp32; 1480 int slot; 1481 int err; 1482 1483 mutex_enter(bgep->genlock); 1484 if (bgep->unicst_addr_avail == 0) { 1485 mutex_exit(bgep->genlock); 1486 return (ENOSPC); 1487 } 1488 1489 /* 1490 * First add the unicast address to a available slot. 1491 */ 1492 slot = bge_unicst_find(bgep, mac_addr); 1493 ASSERT(slot == -1); 1494 1495 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1496 if (!bgep->curr_addr[slot].set) { 1497 bgep->curr_addr[slot].set = B_TRUE; 1498 break; 1499 } 1500 } 1501 1502 ASSERT(slot < bgep->unicst_addr_total); 1503 bgep->unicst_addr_avail--; 1504 mutex_exit(bgep->genlock); 1505 1506 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1507 goto fail; 1508 1509 /* A rule is already here. Deny this. */ 1510 if (rrp->mac_addr_rule != NULL) { 1511 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1512 goto fail; 1513 } 1514 1515 /* 1516 * Allocate a bge_rule_info_t to keep track of which rule slots 1517 * are being used. 1518 */ 1519 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1520 if (rinfop == NULL) { 1521 err = ENOMEM; 1522 goto fail; 1523 } 1524 1525 /* 1526 * Look for the starting slot to place the rules. 1527 * The two slots we reserve must be contiguous. 1528 */ 1529 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1530 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1531 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1532 break; 1533 1534 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1535 1536 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1537 rulep[i].mask_value = ntohl(tmp32); 1538 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1539 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1540 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1541 1542 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1543 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1544 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1545 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1546 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1547 rinfop->start = i; 1548 rinfop->count = 2; 1549 1550 rrp->mac_addr_rule = rinfop; 1551 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1552 1553 return (0); 1554 1555 fail: 1556 /* Clear the address just set */ 1557 (void) bge_unicst_set(bgep, zero_addr, slot); 1558 mutex_enter(bgep->genlock); 1559 bgep->curr_addr[slot].set = B_FALSE; 1560 bgep->unicst_addr_avail++; 1561 mutex_exit(bgep->genlock); 1562 1563 return (err); 1564 } 1565 1566 /* 1567 * Stop classifying packets matching the MAC address to the specified ring. 1568 */ 1569 static int 1570 bge_remmac(void *arg, const uint8_t *mac_addr) 1571 { 1572 recv_ring_t *rrp = (recv_ring_t *)arg; 1573 bge_t *bgep = rrp->bgep; 1574 bge_recv_rule_t *rulep = bgep->recv_rules; 1575 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1576 int start; 1577 int slot; 1578 int err; 1579 1580 /* 1581 * Remove the MAC address from its slot. 1582 */ 1583 mutex_enter(bgep->genlock); 1584 slot = bge_unicst_find(bgep, mac_addr); 1585 if (slot == -1) { 1586 mutex_exit(bgep->genlock); 1587 return (EINVAL); 1588 } 1589 1590 ASSERT(bgep->curr_addr[slot].set); 1591 mutex_exit(bgep->genlock); 1592 1593 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1594 return (err); 1595 1596 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1597 return (EINVAL); 1598 1599 start = rinfop->start; 1600 rulep[start].mask_value = 0; 1601 rulep[start].control = 0; 1602 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1603 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1604 start++; 1605 rulep[start].mask_value = 0; 1606 rulep[start].control = 0; 1607 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1608 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1609 1610 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1611 rrp->mac_addr_rule = NULL; 1612 bzero(rrp->mac_addr_val, ETHERADDRL); 1613 1614 mutex_enter(bgep->genlock); 1615 bgep->curr_addr[slot].set = B_FALSE; 1616 bgep->unicst_addr_avail++; 1617 mutex_exit(bgep->genlock); 1618 1619 return (0); 1620 } 1621 1622 static int 1623 bge_flag_intr_enable(mac_intr_handle_t ih) 1624 { 1625 recv_ring_t *rrp = (recv_ring_t *)ih; 1626 bge_t *bgep = rrp->bgep; 1627 1628 mutex_enter(bgep->genlock); 1629 rrp->poll_flag = 0; 1630 mutex_exit(bgep->genlock); 1631 1632 return (0); 1633 } 1634 1635 static int 1636 bge_flag_intr_disable(mac_intr_handle_t ih) 1637 { 1638 recv_ring_t *rrp = (recv_ring_t *)ih; 1639 bge_t *bgep = rrp->bgep; 1640 1641 mutex_enter(bgep->genlock); 1642 rrp->poll_flag = 1; 1643 mutex_exit(bgep->genlock); 1644 1645 return (0); 1646 } 1647 1648 static int 1649 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1650 { 1651 recv_ring_t *rx_ring; 1652 1653 rx_ring = (recv_ring_t *)rh; 1654 mutex_enter(rx_ring->rx_lock); 1655 rx_ring->ring_gen_num = mr_gen_num; 1656 mutex_exit(rx_ring->rx_lock); 1657 return (0); 1658 } 1659 1660 1661 /* 1662 * Callback funtion for MAC layer to register all rings 1663 * for given ring_group, noted by rg_index. 1664 */ 1665 void 1666 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1667 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1668 { 1669 bge_t *bgep = arg; 1670 mac_intr_t *mintr; 1671 1672 switch (rtype) { 1673 case MAC_RING_TYPE_RX: { 1674 recv_ring_t *rx_ring; 1675 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1676 MAC_ADDRESS_REGS_MAX) && index == 0); 1677 1678 rx_ring = &bgep->recv[rg_index]; 1679 rx_ring->ring_handle = rh; 1680 1681 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1682 infop->mri_start = bge_ring_start; 1683 infop->mri_stop = NULL; 1684 infop->mri_poll = bge_poll_ring; 1685 1686 mintr = &infop->mri_intr; 1687 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1688 mintr->mi_enable = bge_flag_intr_enable; 1689 mintr->mi_disable = bge_flag_intr_disable; 1690 1691 break; 1692 } 1693 case MAC_RING_TYPE_TX: 1694 default: 1695 ASSERT(0); 1696 break; 1697 } 1698 } 1699 1700 /* 1701 * Fill infop passed as argument 1702 * fill in respective ring_group info 1703 * Each group has a single ring in it. We keep it simple 1704 * and use the same internal handle for rings and groups. 1705 */ 1706 void 1707 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1708 mac_group_info_t *infop, mac_group_handle_t gh) 1709 { 1710 bge_t *bgep = arg; 1711 1712 switch (rtype) { 1713 case MAC_RING_TYPE_RX: { 1714 recv_ring_t *rx_ring; 1715 1716 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1717 MAC_ADDRESS_REGS_MAX)); 1718 rx_ring = &bgep->recv[rg_index]; 1719 rx_ring->ring_group_handle = gh; 1720 1721 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1722 infop->mgi_start = NULL; 1723 infop->mgi_stop = NULL; 1724 infop->mgi_addmac = bge_addmac; 1725 infop->mgi_remmac = bge_remmac; 1726 infop->mgi_count = 1; 1727 break; 1728 } 1729 case MAC_RING_TYPE_TX: 1730 default: 1731 ASSERT(0); 1732 break; 1733 } 1734 } 1735 1736 /*ARGSUSED*/ 1737 static boolean_t 1738 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1739 { 1740 bge_t *bgep = arg; 1741 1742 switch (cap) { 1743 case MAC_CAPAB_HCKSUM: { 1744 uint32_t *txflags = cap_data; 1745 1746 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1747 break; 1748 } 1749 case MAC_CAPAB_RINGS: { 1750 mac_capab_rings_t *cap_rings = cap_data; 1751 1752 /* Temporarily disable multiple tx rings. */ 1753 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1754 return (B_FALSE); 1755 1756 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1757 cap_rings->mr_rnum = cap_rings->mr_gnum = 1758 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1759 cap_rings->mr_rget = bge_fill_ring; 1760 cap_rings->mr_gget = bge_fill_group; 1761 break; 1762 } 1763 default: 1764 return (B_FALSE); 1765 } 1766 return (B_TRUE); 1767 } 1768 1769 /* 1770 * Loopback ioctl code 1771 */ 1772 1773 static lb_property_t loopmodes[] = { 1774 { normal, "normal", BGE_LOOP_NONE }, 1775 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1776 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1777 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1778 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1779 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1780 }; 1781 1782 static enum ioc_reply 1783 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1784 { 1785 /* 1786 * If the mode isn't being changed, there's nothing to do ... 1787 */ 1788 if (mode == bgep->param_loop_mode) 1789 return (IOC_ACK); 1790 1791 /* 1792 * Validate the requested mode and prepare a suitable message 1793 * to explain the link down/up cycle that the change will 1794 * probably induce ... 1795 */ 1796 switch (mode) { 1797 default: 1798 return (IOC_INVAL); 1799 1800 case BGE_LOOP_NONE: 1801 case BGE_LOOP_EXTERNAL_1000: 1802 case BGE_LOOP_EXTERNAL_100: 1803 case BGE_LOOP_EXTERNAL_10: 1804 case BGE_LOOP_INTERNAL_PHY: 1805 case BGE_LOOP_INTERNAL_MAC: 1806 break; 1807 } 1808 1809 /* 1810 * All OK; tell the caller to reprogram 1811 * the PHY and/or MAC for the new mode ... 1812 */ 1813 bgep->param_loop_mode = mode; 1814 return (IOC_RESTART_ACK); 1815 } 1816 1817 static enum ioc_reply 1818 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1819 { 1820 lb_info_sz_t *lbsp; 1821 lb_property_t *lbpp; 1822 uint32_t *lbmp; 1823 int cmd; 1824 1825 _NOTE(ARGUNUSED(wq)) 1826 1827 /* 1828 * Validate format of ioctl 1829 */ 1830 if (mp->b_cont == NULL) 1831 return (IOC_INVAL); 1832 1833 cmd = iocp->ioc_cmd; 1834 switch (cmd) { 1835 default: 1836 /* NOTREACHED */ 1837 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1838 return (IOC_INVAL); 1839 1840 case LB_GET_INFO_SIZE: 1841 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1842 return (IOC_INVAL); 1843 lbsp = (void *)mp->b_cont->b_rptr; 1844 *lbsp = sizeof (loopmodes); 1845 return (IOC_REPLY); 1846 1847 case LB_GET_INFO: 1848 if (iocp->ioc_count != sizeof (loopmodes)) 1849 return (IOC_INVAL); 1850 lbpp = (void *)mp->b_cont->b_rptr; 1851 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1852 return (IOC_REPLY); 1853 1854 case LB_GET_MODE: 1855 if (iocp->ioc_count != sizeof (uint32_t)) 1856 return (IOC_INVAL); 1857 lbmp = (void *)mp->b_cont->b_rptr; 1858 *lbmp = bgep->param_loop_mode; 1859 return (IOC_REPLY); 1860 1861 case LB_SET_MODE: 1862 if (iocp->ioc_count != sizeof (uint32_t)) 1863 return (IOC_INVAL); 1864 lbmp = (void *)mp->b_cont->b_rptr; 1865 return (bge_set_loop_mode(bgep, *lbmp)); 1866 } 1867 } 1868 1869 /* 1870 * Specific bge IOCTLs, the gld module handles the generic ones. 1871 */ 1872 static void 1873 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1874 { 1875 bge_t *bgep = arg; 1876 struct iocblk *iocp; 1877 enum ioc_reply status; 1878 boolean_t need_privilege; 1879 int err; 1880 int cmd; 1881 1882 /* 1883 * Validate the command before bothering with the mutex ... 1884 */ 1885 iocp = (void *)mp->b_rptr; 1886 iocp->ioc_error = 0; 1887 need_privilege = B_TRUE; 1888 cmd = iocp->ioc_cmd; 1889 switch (cmd) { 1890 default: 1891 miocnak(wq, mp, 0, EINVAL); 1892 return; 1893 1894 case BGE_MII_READ: 1895 case BGE_MII_WRITE: 1896 case BGE_SEE_READ: 1897 case BGE_SEE_WRITE: 1898 case BGE_FLASH_READ: 1899 case BGE_FLASH_WRITE: 1900 case BGE_DIAG: 1901 case BGE_PEEK: 1902 case BGE_POKE: 1903 case BGE_PHY_RESET: 1904 case BGE_SOFT_RESET: 1905 case BGE_HARD_RESET: 1906 break; 1907 1908 case LB_GET_INFO_SIZE: 1909 case LB_GET_INFO: 1910 case LB_GET_MODE: 1911 need_privilege = B_FALSE; 1912 /* FALLTHRU */ 1913 case LB_SET_MODE: 1914 break; 1915 1916 } 1917 1918 if (need_privilege) { 1919 /* 1920 * Check for specific net_config privilege on Solaris 10+. 1921 */ 1922 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1923 if (err != 0) { 1924 miocnak(wq, mp, 0, err); 1925 return; 1926 } 1927 } 1928 1929 mutex_enter(bgep->genlock); 1930 if (!(bgep->progress & PROGRESS_INTR)) { 1931 /* can happen during autorecovery */ 1932 mutex_exit(bgep->genlock); 1933 miocnak(wq, mp, 0, EIO); 1934 return; 1935 } 1936 1937 switch (cmd) { 1938 default: 1939 _NOTE(NOTREACHED) 1940 status = IOC_INVAL; 1941 break; 1942 1943 case BGE_MII_READ: 1944 case BGE_MII_WRITE: 1945 case BGE_SEE_READ: 1946 case BGE_SEE_WRITE: 1947 case BGE_FLASH_READ: 1948 case BGE_FLASH_WRITE: 1949 case BGE_DIAG: 1950 case BGE_PEEK: 1951 case BGE_POKE: 1952 case BGE_PHY_RESET: 1953 case BGE_SOFT_RESET: 1954 case BGE_HARD_RESET: 1955 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1956 break; 1957 1958 case LB_GET_INFO_SIZE: 1959 case LB_GET_INFO: 1960 case LB_GET_MODE: 1961 case LB_SET_MODE: 1962 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1963 break; 1964 1965 } 1966 1967 /* 1968 * Do we need to reprogram the PHY and/or the MAC? 1969 * Do it now, while we still have the mutex. 1970 * 1971 * Note: update the PHY first, 'cos it controls the 1972 * speed/duplex parameters that the MAC code uses. 1973 */ 1974 switch (status) { 1975 case IOC_RESTART_REPLY: 1976 case IOC_RESTART_ACK: 1977 if (bge_reprogram(bgep) == IOC_INVAL) 1978 status = IOC_INVAL; 1979 break; 1980 } 1981 1982 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1983 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1984 status = IOC_INVAL; 1985 } 1986 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1987 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1988 status = IOC_INVAL; 1989 } 1990 mutex_exit(bgep->genlock); 1991 1992 /* 1993 * Finally, decide how to reply 1994 */ 1995 switch (status) { 1996 default: 1997 case IOC_INVAL: 1998 /* 1999 * Error, reply with a NAK and EINVAL or the specified error 2000 */ 2001 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 2002 EINVAL : iocp->ioc_error); 2003 break; 2004 2005 case IOC_DONE: 2006 /* 2007 * OK, reply already sent 2008 */ 2009 break; 2010 2011 case IOC_RESTART_ACK: 2012 case IOC_ACK: 2013 /* 2014 * OK, reply with an ACK 2015 */ 2016 miocack(wq, mp, 0, 0); 2017 break; 2018 2019 case IOC_RESTART_REPLY: 2020 case IOC_REPLY: 2021 /* 2022 * OK, send prepared reply as ACK or NAK 2023 */ 2024 mp->b_datap->db_type = iocp->ioc_error == 0 ? 2025 M_IOCACK : M_IOCNAK; 2026 qreply(wq, mp); 2027 break; 2028 } 2029 } 2030 2031 /* 2032 * ========== Per-instance setup/teardown code ========== 2033 */ 2034 2035 #undef BGE_DBG 2036 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 2037 /* 2038 * Allocate an area of memory and a DMA handle for accessing it 2039 */ 2040 static int 2041 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2042 uint_t dma_flags, dma_area_t *dma_p) 2043 { 2044 caddr_t va; 2045 int err; 2046 2047 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2048 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2049 2050 /* 2051 * Allocate handle 2052 */ 2053 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2054 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2055 if (err != DDI_SUCCESS) 2056 return (DDI_FAILURE); 2057 2058 /* 2059 * Allocate memory 2060 */ 2061 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2062 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2063 &dma_p->acc_hdl); 2064 if (err != DDI_SUCCESS) 2065 return (DDI_FAILURE); 2066 2067 /* 2068 * Bind the two together 2069 */ 2070 dma_p->mem_va = va; 2071 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2072 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2073 &dma_p->cookie, &dma_p->ncookies); 2074 2075 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2076 dma_p->alength, err, dma_p->ncookies)); 2077 2078 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2079 return (DDI_FAILURE); 2080 2081 dma_p->nslots = ~0U; 2082 dma_p->size = ~0U; 2083 dma_p->token = ~0U; 2084 dma_p->offset = 0; 2085 return (DDI_SUCCESS); 2086 } 2087 2088 /* 2089 * Free one allocated area of DMAable memory 2090 */ 2091 static void 2092 bge_free_dma_mem(dma_area_t *dma_p) 2093 { 2094 if (dma_p->dma_hdl != NULL) { 2095 if (dma_p->ncookies) { 2096 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2097 dma_p->ncookies = 0; 2098 } 2099 ddi_dma_free_handle(&dma_p->dma_hdl); 2100 dma_p->dma_hdl = NULL; 2101 } 2102 2103 if (dma_p->acc_hdl != NULL) { 2104 ddi_dma_mem_free(&dma_p->acc_hdl); 2105 dma_p->acc_hdl = NULL; 2106 } 2107 } 2108 /* 2109 * Utility routine to carve a slice off a chunk of allocated memory, 2110 * updating the chunk descriptor accordingly. The size of the slice 2111 * is given by the product of the <qty> and <size> parameters. 2112 */ 2113 static void 2114 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2115 uint32_t qty, uint32_t size) 2116 { 2117 static uint32_t sequence = 0xbcd5704a; 2118 size_t totsize; 2119 2120 totsize = qty*size; 2121 ASSERT(totsize <= chunk->alength); 2122 2123 *slice = *chunk; 2124 slice->nslots = qty; 2125 slice->size = size; 2126 slice->alength = totsize; 2127 slice->token = ++sequence; 2128 2129 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2130 chunk->alength -= totsize; 2131 chunk->offset += totsize; 2132 chunk->cookie.dmac_laddress += totsize; 2133 chunk->cookie.dmac_size -= totsize; 2134 } 2135 2136 /* 2137 * Initialise the specified Receive Producer (Buffer) Ring, using 2138 * the information in the <dma_area> descriptors that it contains 2139 * to set up all the other fields. This routine should be called 2140 * only once for each ring. 2141 */ 2142 static void 2143 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2144 { 2145 buff_ring_t *brp; 2146 bge_status_t *bsp; 2147 sw_rbd_t *srbdp; 2148 dma_area_t pbuf; 2149 uint32_t bufsize; 2150 uint32_t nslots; 2151 uint32_t slot; 2152 uint32_t split; 2153 2154 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2155 NIC_MEM_SHADOW_BUFF_STD, 2156 NIC_MEM_SHADOW_BUFF_JUMBO, 2157 NIC_MEM_SHADOW_BUFF_MINI 2158 }; 2159 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2160 RECV_STD_PROD_INDEX_REG, 2161 RECV_JUMBO_PROD_INDEX_REG, 2162 RECV_MINI_PROD_INDEX_REG 2163 }; 2164 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2165 STATUS_STD_BUFF_CONS_INDEX, 2166 STATUS_JUMBO_BUFF_CONS_INDEX, 2167 STATUS_MINI_BUFF_CONS_INDEX 2168 }; 2169 2170 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2171 (void *)bgep, ring)); 2172 2173 brp = &bgep->buff[ring]; 2174 nslots = brp->desc.nslots; 2175 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2176 bufsize = brp->buf[0].size; 2177 2178 /* 2179 * Set up the copy of the h/w RCB 2180 * 2181 * Note: unlike Send & Receive Return Rings, (where the max_len 2182 * field holds the number of slots), in a Receive Buffer Ring 2183 * this field indicates the size of each buffer in the ring. 2184 */ 2185 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2186 brp->hw_rcb.max_len = (uint16_t)bufsize; 2187 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2188 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2189 2190 /* 2191 * Other one-off initialisation of per-ring data 2192 */ 2193 brp->bgep = bgep; 2194 bsp = DMA_VPTR(bgep->status_block); 2195 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2196 brp->chip_mbx_reg = mailbox_regs[ring]; 2197 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2198 DDI_INTR_PRI(bgep->intr_pri)); 2199 2200 /* 2201 * Allocate the array of s/w Receive Buffer Descriptors 2202 */ 2203 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2204 brp->sw_rbds = srbdp; 2205 2206 /* 2207 * Now initialise each array element once and for all 2208 */ 2209 for (split = 0; split < BGE_SPLIT; ++split) { 2210 pbuf = brp->buf[split]; 2211 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2212 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2213 ASSERT(pbuf.alength == 0); 2214 } 2215 } 2216 2217 /* 2218 * Clean up initialisation done above before the memory is freed 2219 */ 2220 static void 2221 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2222 { 2223 buff_ring_t *brp; 2224 sw_rbd_t *srbdp; 2225 2226 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2227 (void *)bgep, ring)); 2228 2229 brp = &bgep->buff[ring]; 2230 srbdp = brp->sw_rbds; 2231 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2232 2233 mutex_destroy(brp->rf_lock); 2234 } 2235 2236 /* 2237 * Initialise the specified Receive (Return) Ring, using the 2238 * information in the <dma_area> descriptors that it contains 2239 * to set up all the other fields. This routine should be called 2240 * only once for each ring. 2241 */ 2242 static void 2243 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2244 { 2245 recv_ring_t *rrp; 2246 bge_status_t *bsp; 2247 uint32_t nslots; 2248 2249 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2250 (void *)bgep, ring)); 2251 2252 /* 2253 * The chip architecture requires that receive return rings have 2254 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2255 */ 2256 rrp = &bgep->recv[ring]; 2257 nslots = rrp->desc.nslots; 2258 ASSERT(nslots == 0 || nslots == 512 || 2259 nslots == 1024 || nslots == 2048); 2260 2261 /* 2262 * Set up the copy of the h/w RCB 2263 */ 2264 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2265 rrp->hw_rcb.max_len = (uint16_t)nslots; 2266 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2267 rrp->hw_rcb.nic_ring_addr = 0; 2268 2269 /* 2270 * Other one-off initialisation of per-ring data 2271 */ 2272 rrp->bgep = bgep; 2273 bsp = DMA_VPTR(bgep->status_block); 2274 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2275 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2276 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2277 DDI_INTR_PRI(bgep->intr_pri)); 2278 } 2279 2280 2281 /* 2282 * Clean up initialisation done above before the memory is freed 2283 */ 2284 static void 2285 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2286 { 2287 recv_ring_t *rrp; 2288 2289 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2290 (void *)bgep, ring)); 2291 2292 rrp = &bgep->recv[ring]; 2293 if (rrp->rx_softint) 2294 ddi_remove_softintr(rrp->rx_softint); 2295 mutex_destroy(rrp->rx_lock); 2296 } 2297 2298 /* 2299 * Initialise the specified Send Ring, using the information in the 2300 * <dma_area> descriptors that it contains to set up all the other 2301 * fields. This routine should be called only once for each ring. 2302 */ 2303 static void 2304 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2305 { 2306 send_ring_t *srp; 2307 bge_status_t *bsp; 2308 sw_sbd_t *ssbdp; 2309 dma_area_t desc; 2310 dma_area_t pbuf; 2311 uint32_t nslots; 2312 uint32_t slot; 2313 uint32_t split; 2314 sw_txbuf_t *txbuf; 2315 2316 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2317 (void *)bgep, ring)); 2318 2319 /* 2320 * The chip architecture requires that host-based send rings 2321 * have 512 elements per ring. See 570X-PG102-R page 56. 2322 */ 2323 srp = &bgep->send[ring]; 2324 nslots = srp->desc.nslots; 2325 ASSERT(nslots == 0 || nslots == 512); 2326 2327 /* 2328 * Set up the copy of the h/w RCB 2329 */ 2330 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2331 srp->hw_rcb.max_len = (uint16_t)nslots; 2332 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2333 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2334 2335 /* 2336 * Other one-off initialisation of per-ring data 2337 */ 2338 srp->bgep = bgep; 2339 bsp = DMA_VPTR(bgep->status_block); 2340 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2341 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2342 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2343 DDI_INTR_PRI(bgep->intr_pri)); 2344 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2345 DDI_INTR_PRI(bgep->intr_pri)); 2346 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2347 DDI_INTR_PRI(bgep->intr_pri)); 2348 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2349 DDI_INTR_PRI(bgep->intr_pri)); 2350 if (nslots == 0) 2351 return; 2352 2353 /* 2354 * Allocate the array of s/w Send Buffer Descriptors 2355 */ 2356 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2357 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2358 srp->txbuf_head = 2359 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2360 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2361 srp->sw_sbds = ssbdp; 2362 srp->txbuf = txbuf; 2363 srp->tx_buffers = BGE_SEND_BUF_NUM; 2364 srp->tx_buffers_low = srp->tx_buffers / 4; 2365 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2366 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2367 else 2368 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2369 srp->tx_array = 1; 2370 2371 /* 2372 * Chunk tx desc area 2373 */ 2374 desc = srp->desc; 2375 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2376 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2377 sizeof (bge_sbd_t)); 2378 } 2379 ASSERT(desc.alength == 0); 2380 2381 /* 2382 * Chunk tx buffer area 2383 */ 2384 for (split = 0; split < BGE_SPLIT; ++split) { 2385 pbuf = srp->buf[0][split]; 2386 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2387 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2388 bgep->chipid.snd_buff_size); 2389 txbuf++; 2390 } 2391 ASSERT(pbuf.alength == 0); 2392 } 2393 } 2394 2395 /* 2396 * Clean up initialisation done above before the memory is freed 2397 */ 2398 static void 2399 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2400 { 2401 send_ring_t *srp; 2402 uint32_t array; 2403 uint32_t split; 2404 uint32_t nslots; 2405 2406 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2407 (void *)bgep, ring)); 2408 2409 srp = &bgep->send[ring]; 2410 mutex_destroy(srp->tc_lock); 2411 mutex_destroy(srp->freetxbuf_lock); 2412 mutex_destroy(srp->txbuf_lock); 2413 mutex_destroy(srp->tx_lock); 2414 nslots = srp->desc.nslots; 2415 if (nslots == 0) 2416 return; 2417 2418 for (array = 1; array < srp->tx_array; ++array) 2419 for (split = 0; split < BGE_SPLIT; ++split) 2420 bge_free_dma_mem(&srp->buf[array][split]); 2421 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2422 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2423 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2424 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2425 srp->sw_sbds = NULL; 2426 srp->txbuf_head = NULL; 2427 srp->txbuf = NULL; 2428 srp->pktp = NULL; 2429 } 2430 2431 /* 2432 * Initialise all transmit, receive, and buffer rings. 2433 */ 2434 void 2435 bge_init_rings(bge_t *bgep) 2436 { 2437 uint32_t ring; 2438 2439 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2440 2441 /* 2442 * Perform one-off initialisation of each ring ... 2443 */ 2444 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2445 bge_init_send_ring(bgep, ring); 2446 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2447 bge_init_recv_ring(bgep, ring); 2448 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2449 bge_init_buff_ring(bgep, ring); 2450 } 2451 2452 /* 2453 * Undo the work of bge_init_rings() above before the memory is freed 2454 */ 2455 void 2456 bge_fini_rings(bge_t *bgep) 2457 { 2458 uint32_t ring; 2459 2460 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2461 2462 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2463 bge_fini_buff_ring(bgep, ring); 2464 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2465 bge_fini_recv_ring(bgep, ring); 2466 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2467 bge_fini_send_ring(bgep, ring); 2468 } 2469 2470 /* 2471 * Called from the bge_m_stop() to free the tx buffers which are 2472 * allocated from the tx process. 2473 */ 2474 void 2475 bge_free_txbuf_arrays(send_ring_t *srp) 2476 { 2477 uint32_t array; 2478 uint32_t split; 2479 2480 ASSERT(mutex_owned(srp->tx_lock)); 2481 2482 /* 2483 * Free the extra tx buffer DMA area 2484 */ 2485 for (array = 1; array < srp->tx_array; ++array) 2486 for (split = 0; split < BGE_SPLIT; ++split) 2487 bge_free_dma_mem(&srp->buf[array][split]); 2488 2489 /* 2490 * Restore initial tx buffer numbers 2491 */ 2492 srp->tx_array = 1; 2493 srp->tx_buffers = BGE_SEND_BUF_NUM; 2494 srp->tx_buffers_low = srp->tx_buffers / 4; 2495 srp->tx_flow = 0; 2496 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2497 } 2498 2499 /* 2500 * Called from tx process to allocate more tx buffers 2501 */ 2502 bge_queue_item_t * 2503 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2504 { 2505 bge_queue_t *txbuf_queue; 2506 bge_queue_item_t *txbuf_item_last; 2507 bge_queue_item_t *txbuf_item; 2508 bge_queue_item_t *txbuf_item_rtn; 2509 sw_txbuf_t *txbuf; 2510 dma_area_t area; 2511 size_t txbuffsize; 2512 uint32_t slot; 2513 uint32_t array; 2514 uint32_t split; 2515 uint32_t err; 2516 2517 ASSERT(mutex_owned(srp->tx_lock)); 2518 2519 array = srp->tx_array; 2520 if (array >= srp->tx_array_max) 2521 return (NULL); 2522 2523 /* 2524 * Allocate memory & handles for TX buffers 2525 */ 2526 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2527 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2528 for (split = 0; split < BGE_SPLIT; ++split) { 2529 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2530 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2531 &srp->buf[array][split]); 2532 if (err != DDI_SUCCESS) { 2533 /* Free the last already allocated OK chunks */ 2534 for (slot = 0; slot <= split; ++slot) 2535 bge_free_dma_mem(&srp->buf[array][slot]); 2536 srp->tx_alloc_fail++; 2537 return (NULL); 2538 } 2539 } 2540 2541 /* 2542 * Chunk tx buffer area 2543 */ 2544 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2545 for (split = 0; split < BGE_SPLIT; ++split) { 2546 area = srp->buf[array][split]; 2547 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2548 bge_slice_chunk(&txbuf->buf, &area, 1, 2549 bgep->chipid.snd_buff_size); 2550 txbuf++; 2551 } 2552 } 2553 2554 /* 2555 * Add above buffers to the tx buffer pop queue 2556 */ 2557 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2558 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2559 txbuf_item_last = NULL; 2560 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2561 txbuf_item->item = txbuf; 2562 txbuf_item->next = txbuf_item_last; 2563 txbuf_item_last = txbuf_item; 2564 txbuf++; 2565 txbuf_item++; 2566 } 2567 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2568 txbuf_item_rtn = txbuf_item; 2569 txbuf_item++; 2570 txbuf_queue = srp->txbuf_pop_queue; 2571 mutex_enter(txbuf_queue->lock); 2572 txbuf_item->next = txbuf_queue->head; 2573 txbuf_queue->head = txbuf_item_last; 2574 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2575 mutex_exit(txbuf_queue->lock); 2576 2577 srp->tx_array++; 2578 srp->tx_buffers += BGE_SEND_BUF_NUM; 2579 srp->tx_buffers_low = srp->tx_buffers / 4; 2580 2581 return (txbuf_item_rtn); 2582 } 2583 2584 /* 2585 * This function allocates all the transmit and receive buffers 2586 * and descriptors, in four chunks. 2587 */ 2588 int 2589 bge_alloc_bufs(bge_t *bgep) 2590 { 2591 dma_area_t area; 2592 size_t rxbuffsize; 2593 size_t txbuffsize; 2594 size_t rxbuffdescsize; 2595 size_t rxdescsize; 2596 size_t txdescsize; 2597 uint32_t ring; 2598 uint32_t rx_rings = bgep->chipid.rx_rings; 2599 uint32_t tx_rings = bgep->chipid.tx_rings; 2600 int split; 2601 int err; 2602 2603 BGE_TRACE(("bge_alloc_bufs($%p)", 2604 (void *)bgep)); 2605 2606 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2607 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2608 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2609 2610 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2611 txbuffsize *= tx_rings; 2612 2613 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2614 rxdescsize *= sizeof (bge_rbd_t); 2615 2616 rxbuffdescsize = BGE_STD_SLOTS_USED; 2617 rxbuffdescsize += bgep->chipid.jumbo_slots; 2618 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2619 rxbuffdescsize *= sizeof (bge_rbd_t); 2620 2621 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2622 txdescsize *= sizeof (bge_sbd_t); 2623 txdescsize += sizeof (bge_statistics_t); 2624 txdescsize += sizeof (bge_status_t); 2625 txdescsize += BGE_STATUS_PADDING; 2626 2627 /* 2628 * Enable PCI relaxed ordering only for RX/TX data buffers 2629 */ 2630 if (bge_relaxed_ordering) 2631 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2632 2633 /* 2634 * Allocate memory & handles for RX buffers 2635 */ 2636 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2637 for (split = 0; split < BGE_SPLIT; ++split) { 2638 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2639 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2640 &bgep->rx_buff[split]); 2641 if (err != DDI_SUCCESS) 2642 return (DDI_FAILURE); 2643 } 2644 2645 /* 2646 * Allocate memory & handles for TX buffers 2647 */ 2648 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2649 for (split = 0; split < BGE_SPLIT; ++split) { 2650 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2651 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2652 &bgep->tx_buff[split]); 2653 if (err != DDI_SUCCESS) 2654 return (DDI_FAILURE); 2655 } 2656 2657 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2658 2659 /* 2660 * Allocate memory & handles for receive return rings 2661 */ 2662 ASSERT((rxdescsize % rx_rings) == 0); 2663 for (split = 0; split < rx_rings; ++split) { 2664 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2665 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2666 &bgep->rx_desc[split]); 2667 if (err != DDI_SUCCESS) 2668 return (DDI_FAILURE); 2669 } 2670 2671 /* 2672 * Allocate memory & handles for buffer (producer) descriptor rings 2673 */ 2674 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2675 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2676 if (err != DDI_SUCCESS) 2677 return (DDI_FAILURE); 2678 2679 /* 2680 * Allocate memory & handles for TX descriptor rings, 2681 * status block, and statistics area 2682 */ 2683 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2684 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2685 if (err != DDI_SUCCESS) 2686 return (DDI_FAILURE); 2687 2688 /* 2689 * Now carve up each of the allocated areas ... 2690 */ 2691 for (split = 0; split < BGE_SPLIT; ++split) { 2692 area = bgep->rx_buff[split]; 2693 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2694 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2695 bgep->chipid.std_buf_size); 2696 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2697 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2698 bgep->chipid.recv_jumbo_size); 2699 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2700 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2701 BGE_MINI_BUFF_SIZE); 2702 } 2703 2704 for (split = 0; split < BGE_SPLIT; ++split) { 2705 area = bgep->tx_buff[split]; 2706 for (ring = 0; ring < tx_rings; ++ring) 2707 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2708 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2709 bgep->chipid.snd_buff_size); 2710 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2711 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2712 &area, 0, bgep->chipid.snd_buff_size); 2713 } 2714 2715 for (ring = 0; ring < rx_rings; ++ring) 2716 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2717 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2718 2719 area = bgep->rx_desc[rx_rings]; 2720 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2721 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2722 0, sizeof (bge_rbd_t)); 2723 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2724 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2725 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2726 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2727 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2728 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2729 ASSERT(area.alength == 0); 2730 2731 area = bgep->tx_desc; 2732 for (ring = 0; ring < tx_rings; ++ring) 2733 bge_slice_chunk(&bgep->send[ring].desc, &area, 2734 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2735 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2736 bge_slice_chunk(&bgep->send[ring].desc, &area, 2737 0, sizeof (bge_sbd_t)); 2738 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2739 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2740 ASSERT(area.alength == BGE_STATUS_PADDING); 2741 DMA_ZERO(bgep->status_block); 2742 2743 return (DDI_SUCCESS); 2744 } 2745 2746 /* 2747 * This routine frees the transmit and receive buffers and descriptors. 2748 * Make sure the chip is stopped before calling it! 2749 */ 2750 void 2751 bge_free_bufs(bge_t *bgep) 2752 { 2753 int split; 2754 2755 BGE_TRACE(("bge_free_bufs($%p)", 2756 (void *)bgep)); 2757 2758 bge_free_dma_mem(&bgep->tx_desc); 2759 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2760 bge_free_dma_mem(&bgep->rx_desc[split]); 2761 for (split = 0; split < BGE_SPLIT; ++split) 2762 bge_free_dma_mem(&bgep->tx_buff[split]); 2763 for (split = 0; split < BGE_SPLIT; ++split) 2764 bge_free_dma_mem(&bgep->rx_buff[split]); 2765 } 2766 2767 /* 2768 * Determine (initial) MAC address ("BIA") to use for this interface 2769 */ 2770 2771 static void 2772 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2773 { 2774 struct ether_addr sysaddr; 2775 char propbuf[8]; /* "true" or "false", plus NUL */ 2776 uchar_t *bytes; 2777 int *ints; 2778 uint_t nelts; 2779 int err; 2780 2781 BGE_TRACE(("bge_find_mac_address($%p)", 2782 (void *)bgep)); 2783 2784 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2785 cidp->hw_mac_addr, 2786 ether_sprintf((void *)cidp->vendor_addr.addr), 2787 cidp->vendor_addr.set ? "" : "not ")); 2788 2789 /* 2790 * The "vendor's factory-set address" may already have 2791 * been extracted from the chip, but if the property 2792 * "local-mac-address" is set we use that instead. It 2793 * will normally be set by OBP, but it could also be 2794 * specified in a .conf file(!) 2795 * 2796 * There doesn't seem to be a way to define byte-array 2797 * properties in a .conf, so we check whether it looks 2798 * like an array of 6 ints instead. 2799 * 2800 * Then, we check whether it looks like an array of 6 2801 * bytes (which it should, if OBP set it). If we can't 2802 * make sense of it either way, we'll ignore it. 2803 */ 2804 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2805 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2806 if (err == DDI_PROP_SUCCESS) { 2807 if (nelts == ETHERADDRL) { 2808 while (nelts--) 2809 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2810 cidp->vendor_addr.set = B_TRUE; 2811 } 2812 ddi_prop_free(ints); 2813 } 2814 2815 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2816 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2817 if (err == DDI_PROP_SUCCESS) { 2818 if (nelts == ETHERADDRL) { 2819 while (nelts--) 2820 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2821 cidp->vendor_addr.set = B_TRUE; 2822 } 2823 ddi_prop_free(bytes); 2824 } 2825 2826 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2827 ether_sprintf((void *)cidp->vendor_addr.addr), 2828 cidp->vendor_addr.set ? "" : "not ")); 2829 2830 /* 2831 * Look up the OBP property "local-mac-address?". Note that even 2832 * though its value is a string (which should be "true" or "false"), 2833 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2834 * the buffer first and then fetch the property as an untyped array; 2835 * this may or may not include a final NUL, but since there will 2836 * always be one left at the end of the buffer we can now treat it 2837 * as a string anyway. 2838 */ 2839 nelts = sizeof (propbuf); 2840 bzero(propbuf, nelts--); 2841 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2842 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2843 2844 /* 2845 * Now, if the address still isn't set from the hardware (SEEPROM) 2846 * or the OBP or .conf property, OR if the user has foolishly set 2847 * 'local-mac-address? = false', use "the system address" instead 2848 * (but only if it's non-null i.e. has been set from the IDPROM). 2849 */ 2850 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2851 if (localetheraddr(NULL, &sysaddr) != 0) { 2852 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2853 cidp->vendor_addr.set = B_TRUE; 2854 } 2855 2856 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2857 ether_sprintf((void *)cidp->vendor_addr.addr), 2858 cidp->vendor_addr.set ? "" : "not ")); 2859 2860 /* 2861 * Finally(!), if there's a valid "mac-address" property (created 2862 * if we netbooted from this interface), we must use this instead 2863 * of any of the above to ensure that the NFS/install server doesn't 2864 * get confused by the address changing as Solaris takes over! 2865 */ 2866 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2867 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2868 if (err == DDI_PROP_SUCCESS) { 2869 if (nelts == ETHERADDRL) { 2870 while (nelts--) 2871 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2872 cidp->vendor_addr.set = B_TRUE; 2873 } 2874 ddi_prop_free(bytes); 2875 } 2876 2877 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2878 ether_sprintf((void *)cidp->vendor_addr.addr), 2879 cidp->vendor_addr.set ? "" : "not ")); 2880 } 2881 2882 2883 /*ARGSUSED*/ 2884 int 2885 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2886 { 2887 ddi_fm_error_t de; 2888 2889 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2890 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2891 return (de.fme_status); 2892 } 2893 2894 /*ARGSUSED*/ 2895 int 2896 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2897 { 2898 ddi_fm_error_t de; 2899 2900 ASSERT(bgep->progress & PROGRESS_BUFS); 2901 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2902 return (de.fme_status); 2903 } 2904 2905 /* 2906 * The IO fault service error handling callback function 2907 */ 2908 /*ARGSUSED*/ 2909 static int 2910 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2911 { 2912 /* 2913 * as the driver can always deal with an error in any dma or 2914 * access handle, we can just return the fme_status value. 2915 */ 2916 pci_ereport_post(dip, err, NULL); 2917 return (err->fme_status); 2918 } 2919 2920 static void 2921 bge_fm_init(bge_t *bgep) 2922 { 2923 ddi_iblock_cookie_t iblk; 2924 2925 /* Only register with IO Fault Services if we have some capability */ 2926 if (bgep->fm_capabilities) { 2927 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2928 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2929 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2930 2931 /* Register capabilities with IO Fault Services */ 2932 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2933 2934 /* 2935 * Initialize pci ereport capabilities if ereport capable 2936 */ 2937 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2938 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2939 pci_ereport_setup(bgep->devinfo); 2940 2941 /* 2942 * Register error callback if error callback capable 2943 */ 2944 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2945 ddi_fm_handler_register(bgep->devinfo, 2946 bge_fm_error_cb, (void*) bgep); 2947 } else { 2948 /* 2949 * These fields have to be cleared of FMA if there are no 2950 * FMA capabilities at runtime. 2951 */ 2952 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2953 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2954 dma_attr.dma_attr_flags = 0; 2955 } 2956 } 2957 2958 static void 2959 bge_fm_fini(bge_t *bgep) 2960 { 2961 /* Only unregister FMA capabilities if we registered some */ 2962 if (bgep->fm_capabilities) { 2963 2964 /* 2965 * Release any resources allocated by pci_ereport_setup() 2966 */ 2967 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2968 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2969 pci_ereport_teardown(bgep->devinfo); 2970 2971 /* 2972 * Un-register error callback if error callback capable 2973 */ 2974 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2975 ddi_fm_handler_unregister(bgep->devinfo); 2976 2977 /* Unregister from IO Fault Services */ 2978 ddi_fm_fini(bgep->devinfo); 2979 } 2980 } 2981 2982 static void 2983 #ifdef BGE_IPMI_ASF 2984 bge_unattach(bge_t *bgep, uint_t asf_mode) 2985 #else 2986 bge_unattach(bge_t *bgep) 2987 #endif 2988 { 2989 BGE_TRACE(("bge_unattach($%p)", 2990 (void *)bgep)); 2991 2992 /* 2993 * Flag that no more activity may be initiated 2994 */ 2995 bgep->progress &= ~PROGRESS_READY; 2996 2997 /* 2998 * Quiesce the PHY and MAC (leave it reset but still powered). 2999 * Clean up and free all BGE data structures 3000 */ 3001 if (bgep->periodic_id != NULL) { 3002 ddi_periodic_delete(bgep->periodic_id); 3003 bgep->periodic_id = NULL; 3004 } 3005 if (bgep->progress & PROGRESS_KSTATS) 3006 bge_fini_kstats(bgep); 3007 if (bgep->progress & PROGRESS_PHY) 3008 bge_phys_reset(bgep); 3009 if (bgep->progress & PROGRESS_HWINT) { 3010 mutex_enter(bgep->genlock); 3011 #ifdef BGE_IPMI_ASF 3012 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 3013 #else 3014 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 3015 #endif 3016 ddi_fm_service_impact(bgep->devinfo, 3017 DDI_SERVICE_UNAFFECTED); 3018 #ifdef BGE_IPMI_ASF 3019 if (bgep->asf_enabled) { 3020 /* 3021 * This register has been overlaid. We restore its 3022 * initial value here. 3023 */ 3024 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 3025 BGE_NIC_DATA_SIG); 3026 } 3027 #endif 3028 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3029 ddi_fm_service_impact(bgep->devinfo, 3030 DDI_SERVICE_UNAFFECTED); 3031 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3032 ddi_fm_service_impact(bgep->devinfo, 3033 DDI_SERVICE_UNAFFECTED); 3034 mutex_exit(bgep->genlock); 3035 } 3036 if (bgep->progress & PROGRESS_INTR) { 3037 bge_intr_disable(bgep); 3038 bge_fini_rings(bgep); 3039 } 3040 if (bgep->progress & PROGRESS_HWINT) { 3041 bge_rem_intrs(bgep); 3042 rw_destroy(bgep->errlock); 3043 mutex_destroy(bgep->softintrlock); 3044 mutex_destroy(bgep->genlock); 3045 } 3046 if (bgep->progress & PROGRESS_FACTOTUM) 3047 ddi_remove_softintr(bgep->factotum_id); 3048 if (bgep->progress & PROGRESS_RESCHED) 3049 ddi_remove_softintr(bgep->drain_id); 3050 if (bgep->progress & PROGRESS_BUFS) 3051 bge_free_bufs(bgep); 3052 if (bgep->progress & PROGRESS_REGS) 3053 ddi_regs_map_free(&bgep->io_handle); 3054 if (bgep->progress & PROGRESS_CFG) 3055 pci_config_teardown(&bgep->cfg_handle); 3056 3057 bge_fm_fini(bgep); 3058 3059 ddi_remove_minor_node(bgep->devinfo, NULL); 3060 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3061 kmem_free(bgep, sizeof (*bgep)); 3062 } 3063 3064 static int 3065 bge_resume(dev_info_t *devinfo) 3066 { 3067 bge_t *bgep; /* Our private data */ 3068 chip_id_t *cidp; 3069 chip_id_t chipid; 3070 3071 bgep = ddi_get_driver_private(devinfo); 3072 if (bgep == NULL) 3073 return (DDI_FAILURE); 3074 3075 /* 3076 * Refuse to resume if the data structures aren't consistent 3077 */ 3078 if (bgep->devinfo != devinfo) 3079 return (DDI_FAILURE); 3080 3081 #ifdef BGE_IPMI_ASF 3082 /* 3083 * Power management hasn't been supported in BGE now. If you 3084 * want to implement it, please add the ASF/IPMI related 3085 * code here. 3086 */ 3087 3088 #endif 3089 3090 /* 3091 * Read chip ID & set up config space command register(s) 3092 * Refuse to resume if the chip has changed its identity! 3093 */ 3094 cidp = &bgep->chipid; 3095 mutex_enter(bgep->genlock); 3096 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3097 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3098 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3099 mutex_exit(bgep->genlock); 3100 return (DDI_FAILURE); 3101 } 3102 mutex_exit(bgep->genlock); 3103 if (chipid.vendor != cidp->vendor) 3104 return (DDI_FAILURE); 3105 if (chipid.device != cidp->device) 3106 return (DDI_FAILURE); 3107 if (chipid.revision != cidp->revision) 3108 return (DDI_FAILURE); 3109 if (chipid.asic_rev != cidp->asic_rev) 3110 return (DDI_FAILURE); 3111 3112 /* 3113 * All OK, reinitialise h/w & kick off GLD scheduling 3114 */ 3115 mutex_enter(bgep->genlock); 3116 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3117 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3118 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3119 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3120 mutex_exit(bgep->genlock); 3121 return (DDI_FAILURE); 3122 } 3123 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3124 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3125 mutex_exit(bgep->genlock); 3126 return (DDI_FAILURE); 3127 } 3128 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3129 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3130 mutex_exit(bgep->genlock); 3131 return (DDI_FAILURE); 3132 } 3133 mutex_exit(bgep->genlock); 3134 return (DDI_SUCCESS); 3135 } 3136 3137 /* 3138 * attach(9E) -- Attach a device to the system 3139 * 3140 * Called once for each board successfully probed. 3141 */ 3142 static int 3143 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3144 { 3145 bge_t *bgep; /* Our private data */ 3146 mac_register_t *macp; 3147 chip_id_t *cidp; 3148 caddr_t regs; 3149 int instance; 3150 int err; 3151 int intr_types; 3152 #ifdef BGE_IPMI_ASF 3153 uint32_t mhcrValue; 3154 #ifdef __sparc 3155 uint16_t value16; 3156 #endif 3157 #ifdef BGE_NETCONSOLE 3158 int retval; 3159 #endif 3160 #endif 3161 3162 instance = ddi_get_instance(devinfo); 3163 3164 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3165 (void *)devinfo, cmd, instance)); 3166 BGE_BRKPT(NULL, "bge_attach"); 3167 3168 switch (cmd) { 3169 default: 3170 return (DDI_FAILURE); 3171 3172 case DDI_RESUME: 3173 return (bge_resume(devinfo)); 3174 3175 case DDI_ATTACH: 3176 break; 3177 } 3178 3179 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3180 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3181 ddi_set_driver_private(devinfo, bgep); 3182 bgep->bge_guard = BGE_GUARD; 3183 bgep->devinfo = devinfo; 3184 bgep->param_drain_max = 64; 3185 bgep->param_msi_cnt = 0; 3186 bgep->param_loop_mode = 0; 3187 3188 /* 3189 * Initialize more fields in BGE private data 3190 */ 3191 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3192 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3193 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3194 BGE_DRIVER_NAME, instance); 3195 3196 /* 3197 * Initialize for fma support 3198 */ 3199 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3200 DDI_PROP_DONTPASS, fm_cap, 3201 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3202 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3203 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3204 bge_fm_init(bgep); 3205 3206 /* 3207 * Look up the IOMMU's page size for DVMA mappings (must be 3208 * a power of 2) and convert to a mask. This can be used to 3209 * determine whether a message buffer crosses a page boundary. 3210 * Note: in 2s complement binary notation, if X is a power of 3211 * 2, then -X has the representation "11...1100...00". 3212 */ 3213 bgep->pagemask = dvma_pagesize(devinfo); 3214 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3215 bgep->pagemask = -bgep->pagemask; 3216 3217 /* 3218 * Map config space registers 3219 * Read chip ID & set up config space command register(s) 3220 * 3221 * Note: this leaves the chip accessible by Memory Space 3222 * accesses, but with interrupts and Bus Mastering off. 3223 * This should ensure that nothing untoward will happen 3224 * if it has been left active by the (net-)bootloader. 3225 * We'll re-enable Bus Mastering once we've reset the chip, 3226 * and allow interrupts only when everything else is set up. 3227 */ 3228 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3229 #ifdef BGE_IPMI_ASF 3230 #ifdef __sparc 3231 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3232 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3233 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3234 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3235 MHCR_ENABLE_TAGGED_STATUS_MODE | 3236 MHCR_MASK_INTERRUPT_MODE | 3237 MHCR_MASK_PCI_INT_OUTPUT | 3238 MHCR_CLEAR_INTERRUPT_INTA | 3239 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3240 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3241 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3242 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3243 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3244 MEMORY_ARBITER_ENABLE); 3245 #else 3246 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3247 #endif 3248 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3249 bgep->asf_wordswapped = B_TRUE; 3250 } else { 3251 bgep->asf_wordswapped = B_FALSE; 3252 } 3253 bge_asf_get_config(bgep); 3254 #endif 3255 if (err != DDI_SUCCESS) { 3256 bge_problem(bgep, "pci_config_setup() failed"); 3257 goto attach_fail; 3258 } 3259 bgep->progress |= PROGRESS_CFG; 3260 cidp = &bgep->chipid; 3261 bzero(cidp, sizeof (*cidp)); 3262 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3263 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3264 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3265 goto attach_fail; 3266 } 3267 3268 #ifdef BGE_IPMI_ASF 3269 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3270 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3271 bgep->asf_newhandshake = B_TRUE; 3272 } else { 3273 bgep->asf_newhandshake = B_FALSE; 3274 } 3275 #endif 3276 3277 /* 3278 * Update those parts of the chip ID derived from volatile 3279 * registers with the values seen by OBP (in case the chip 3280 * has been reset externally and therefore lost them). 3281 */ 3282 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3283 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3284 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3285 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3286 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3287 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3288 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3289 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3290 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3291 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3292 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3293 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3294 3295 if (bge_jumbo_enable == B_TRUE) { 3296 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3297 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3298 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3299 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3300 cidp->default_mtu = BGE_DEFAULT_MTU; 3301 } 3302 } 3303 /* 3304 * Map operating registers 3305 */ 3306 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3307 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3308 if (err != DDI_SUCCESS) { 3309 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3310 goto attach_fail; 3311 } 3312 bgep->io_regs = regs; 3313 bgep->progress |= PROGRESS_REGS; 3314 3315 /* 3316 * Characterise the device, so we know its requirements. 3317 * Then allocate the appropriate TX and RX descriptors & buffers. 3318 */ 3319 if (bge_chip_id_init(bgep) == EIO) { 3320 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3321 goto attach_fail; 3322 } 3323 3324 3325 err = bge_alloc_bufs(bgep); 3326 if (err != DDI_SUCCESS) { 3327 bge_problem(bgep, "DMA buffer allocation failed"); 3328 goto attach_fail; 3329 } 3330 bgep->progress |= PROGRESS_BUFS; 3331 3332 /* 3333 * Add the softint handlers: 3334 * 3335 * Both of these handlers are used to avoid restrictions on the 3336 * context and/or mutexes required for some operations. In 3337 * particular, the hardware interrupt handler and its subfunctions 3338 * can detect a number of conditions that we don't want to handle 3339 * in that context or with that set of mutexes held. So, these 3340 * softints are triggered instead: 3341 * 3342 * the <resched> softint is triggered if we have previously 3343 * had to refuse to send a packet because of resource shortage 3344 * (we've run out of transmit buffers), but the send completion 3345 * interrupt handler has now detected that more buffers have 3346 * become available. 3347 * 3348 * the <factotum> is triggered if the h/w interrupt handler 3349 * sees the <link state changed> or <error> bits in the status 3350 * block. It's also triggered periodically to poll the link 3351 * state, just in case we aren't getting link status change 3352 * interrupts ... 3353 */ 3354 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3355 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3356 if (err != DDI_SUCCESS) { 3357 bge_problem(bgep, "ddi_add_softintr() failed"); 3358 goto attach_fail; 3359 } 3360 bgep->progress |= PROGRESS_RESCHED; 3361 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3362 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3363 if (err != DDI_SUCCESS) { 3364 bge_problem(bgep, "ddi_add_softintr() failed"); 3365 goto attach_fail; 3366 } 3367 bgep->progress |= PROGRESS_FACTOTUM; 3368 3369 /* Get supported interrupt types */ 3370 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3371 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3372 3373 goto attach_fail; 3374 } 3375 3376 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3377 bgep->ifname, intr_types)); 3378 3379 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3380 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3381 bge_error(bgep, "MSI registration failed, " 3382 "trying FIXED interrupt type\n"); 3383 } else { 3384 BGE_DEBUG(("%s: Using MSI interrupt type", 3385 bgep->ifname)); 3386 bgep->intr_type = DDI_INTR_TYPE_MSI; 3387 bgep->progress |= PROGRESS_HWINT; 3388 } 3389 } 3390 3391 if (!(bgep->progress & PROGRESS_HWINT) && 3392 (intr_types & DDI_INTR_TYPE_FIXED)) { 3393 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3394 bge_error(bgep, "FIXED interrupt " 3395 "registration failed\n"); 3396 goto attach_fail; 3397 } 3398 3399 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3400 3401 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3402 bgep->progress |= PROGRESS_HWINT; 3403 } 3404 3405 if (!(bgep->progress & PROGRESS_HWINT)) { 3406 bge_error(bgep, "No interrupts registered\n"); 3407 goto attach_fail; 3408 } 3409 3410 /* 3411 * Note that interrupts are not enabled yet as 3412 * mutex locks are not initialized. Initialize mutex locks. 3413 */ 3414 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3415 DDI_INTR_PRI(bgep->intr_pri)); 3416 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3417 DDI_INTR_PRI(bgep->intr_pri)); 3418 rw_init(bgep->errlock, NULL, RW_DRIVER, 3419 DDI_INTR_PRI(bgep->intr_pri)); 3420 3421 /* 3422 * Initialize rings. 3423 */ 3424 bge_init_rings(bgep); 3425 3426 /* 3427 * Now that mutex locks are initialized, enable interrupts. 3428 */ 3429 bge_intr_enable(bgep); 3430 bgep->progress |= PROGRESS_INTR; 3431 3432 /* 3433 * Initialise link state variables 3434 * Stop, reset & reinitialise the chip. 3435 * Initialise the (internal) PHY. 3436 */ 3437 bgep->link_state = LINK_STATE_UNKNOWN; 3438 3439 mutex_enter(bgep->genlock); 3440 3441 /* 3442 * Reset chip & rings to initial state; also reset address 3443 * filtering, promiscuity, loopback mode. 3444 */ 3445 #ifdef BGE_IPMI_ASF 3446 #ifdef BGE_NETCONSOLE 3447 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3448 #else 3449 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3450 #endif 3451 #else 3452 if (bge_reset(bgep) != DDI_SUCCESS) { 3453 #endif 3454 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3455 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3456 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3457 mutex_exit(bgep->genlock); 3458 goto attach_fail; 3459 } 3460 3461 #ifdef BGE_IPMI_ASF 3462 if (bgep->asf_enabled) { 3463 bgep->asf_status = ASF_STAT_RUN_INIT; 3464 } 3465 #endif 3466 3467 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3468 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3469 bgep->promisc = B_FALSE; 3470 bgep->param_loop_mode = BGE_LOOP_NONE; 3471 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3472 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3473 mutex_exit(bgep->genlock); 3474 goto attach_fail; 3475 } 3476 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3477 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3478 mutex_exit(bgep->genlock); 3479 goto attach_fail; 3480 } 3481 3482 mutex_exit(bgep->genlock); 3483 3484 if (bge_phys_init(bgep) == EIO) { 3485 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3486 goto attach_fail; 3487 } 3488 bgep->progress |= PROGRESS_PHY; 3489 3490 /* 3491 * initialize NDD-tweakable parameters 3492 */ 3493 if (bge_nd_init(bgep)) { 3494 bge_problem(bgep, "bge_nd_init() failed"); 3495 goto attach_fail; 3496 } 3497 bgep->progress |= PROGRESS_NDD; 3498 3499 /* 3500 * Create & initialise named kstats 3501 */ 3502 bge_init_kstats(bgep, instance); 3503 bgep->progress |= PROGRESS_KSTATS; 3504 3505 /* 3506 * Determine whether to override the chip's own MAC address 3507 */ 3508 bge_find_mac_address(bgep, cidp); 3509 3510 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3511 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 3512 3513 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3514 goto attach_fail; 3515 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3516 macp->m_driver = bgep; 3517 macp->m_dip = devinfo; 3518 macp->m_src_addr = cidp->vendor_addr.addr; 3519 macp->m_callbacks = &bge_m_callbacks; 3520 macp->m_min_sdu = 0; 3521 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3522 macp->m_margin = VLAN_TAGSZ; 3523 macp->m_priv_props = bge_priv_prop; 3524 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3525 macp->m_v12n = MAC_VIRT_LEVEL1; 3526 3527 /* 3528 * Finally, we're ready to register ourselves with the MAC layer 3529 * interface; if this succeeds, we're all ready to start() 3530 */ 3531 err = mac_register(macp, &bgep->mh); 3532 mac_free(macp); 3533 if (err != 0) 3534 goto attach_fail; 3535 3536 /* 3537 * Register a periodical handler. 3538 * bge_chip_cyclic() is invoked in kernel context. 3539 */ 3540 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3541 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3542 3543 bgep->progress |= PROGRESS_READY; 3544 ASSERT(bgep->bge_guard == BGE_GUARD); 3545 #ifdef BGE_IPMI_ASF 3546 #ifdef BGE_NETCONSOLE 3547 if (bgep->asf_enabled) { 3548 mutex_enter(bgep->genlock); 3549 retval = bge_chip_start(bgep, B_TRUE); 3550 mutex_exit(bgep->genlock); 3551 if (retval != DDI_SUCCESS) 3552 goto attach_fail; 3553 } 3554 #endif 3555 #endif 3556 3557 ddi_report_dev(devinfo); 3558 BGE_REPORT((bgep, "bge version: %s", bge_version)); 3559 3560 return (DDI_SUCCESS); 3561 3562 attach_fail: 3563 #ifdef BGE_IPMI_ASF 3564 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3565 #else 3566 bge_unattach(bgep); 3567 #endif 3568 return (DDI_FAILURE); 3569 } 3570 3571 /* 3572 * bge_suspend() -- suspend transmit/receive for powerdown 3573 */ 3574 static int 3575 bge_suspend(bge_t *bgep) 3576 { 3577 /* 3578 * Stop processing and idle (powerdown) the PHY ... 3579 */ 3580 mutex_enter(bgep->genlock); 3581 #ifdef BGE_IPMI_ASF 3582 /* 3583 * Power management hasn't been supported in BGE now. If you 3584 * want to implement it, please add the ASF/IPMI related 3585 * code here. 3586 */ 3587 #endif 3588 bge_stop(bgep); 3589 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3590 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3591 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3592 mutex_exit(bgep->genlock); 3593 return (DDI_FAILURE); 3594 } 3595 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3596 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3597 mutex_exit(bgep->genlock); 3598 return (DDI_FAILURE); 3599 } 3600 mutex_exit(bgep->genlock); 3601 3602 return (DDI_SUCCESS); 3603 } 3604 3605 /* 3606 * quiesce(9E) entry point. 3607 * 3608 * This function is called when the system is single-threaded at high 3609 * PIL with preemption disabled. Therefore, this function must not be 3610 * blocked. 3611 * 3612 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3613 * DDI_FAILURE indicates an error condition and should almost never happen. 3614 */ 3615 #ifdef __sparc 3616 #define bge_quiesce ddi_quiesce_not_supported 3617 #else 3618 static int 3619 bge_quiesce(dev_info_t *devinfo) 3620 { 3621 bge_t *bgep = ddi_get_driver_private(devinfo); 3622 3623 if (bgep == NULL) 3624 return (DDI_FAILURE); 3625 3626 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3627 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3628 MHCR_MASK_PCI_INT_OUTPUT); 3629 } else { 3630 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3631 } 3632 3633 /* Stop the chip */ 3634 bge_chip_stop_nonblocking(bgep); 3635 3636 return (DDI_SUCCESS); 3637 } 3638 #endif 3639 3640 /* 3641 * detach(9E) -- Detach a device from the system 3642 */ 3643 static int 3644 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3645 { 3646 bge_t *bgep; 3647 #ifdef BGE_IPMI_ASF 3648 uint_t asf_mode; 3649 asf_mode = ASF_MODE_NONE; 3650 #endif 3651 3652 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3653 3654 bgep = ddi_get_driver_private(devinfo); 3655 3656 switch (cmd) { 3657 default: 3658 return (DDI_FAILURE); 3659 3660 case DDI_SUSPEND: 3661 return (bge_suspend(bgep)); 3662 3663 case DDI_DETACH: 3664 break; 3665 } 3666 3667 #ifdef BGE_IPMI_ASF 3668 mutex_enter(bgep->genlock); 3669 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3670 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3671 3672 bge_asf_update_status(bgep); 3673 if (bgep->asf_status == ASF_STAT_RUN) { 3674 bge_asf_stop_timer(bgep); 3675 } 3676 bgep->asf_status = ASF_STAT_STOP; 3677 3678 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3679 3680 if (bgep->asf_pseudostop) { 3681 bge_chip_stop(bgep, B_FALSE); 3682 bgep->bge_mac_state = BGE_MAC_STOPPED; 3683 bgep->asf_pseudostop = B_FALSE; 3684 } 3685 3686 asf_mode = ASF_MODE_POST_SHUTDOWN; 3687 3688 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3689 ddi_fm_service_impact(bgep->devinfo, 3690 DDI_SERVICE_UNAFFECTED); 3691 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3692 ddi_fm_service_impact(bgep->devinfo, 3693 DDI_SERVICE_UNAFFECTED); 3694 } 3695 mutex_exit(bgep->genlock); 3696 #endif 3697 3698 /* 3699 * Unregister from the GLD subsystem. This can fail, in 3700 * particular if there are DLPI style-2 streams still open - 3701 * in which case we just return failure without shutting 3702 * down chip operations. 3703 */ 3704 if (mac_unregister(bgep->mh) != 0) 3705 return (DDI_FAILURE); 3706 3707 /* 3708 * All activity stopped, so we can clean up & exit 3709 */ 3710 #ifdef BGE_IPMI_ASF 3711 bge_unattach(bgep, asf_mode); 3712 #else 3713 bge_unattach(bgep); 3714 #endif 3715 return (DDI_SUCCESS); 3716 } 3717 3718 3719 /* 3720 * ========== Module Loading Data & Entry Points ========== 3721 */ 3722 3723 #undef BGE_DBG 3724 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3725 3726 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3727 nulldev, /* identify */ 3728 nulldev, /* probe */ 3729 bge_attach, /* attach */ 3730 bge_detach, /* detach */ 3731 nodev, /* reset */ 3732 NULL, /* cb_ops */ 3733 D_MP, /* bus_ops */ 3734 NULL, /* power */ 3735 bge_quiesce /* quiesce */ 3736 ); 3737 3738 static struct modldrv bge_modldrv = { 3739 &mod_driverops, /* Type of module. This one is a driver */ 3740 bge_ident, /* short description */ 3741 &bge_dev_ops /* driver specific ops */ 3742 }; 3743 3744 static struct modlinkage modlinkage = { 3745 MODREV_1, (void *)&bge_modldrv, NULL 3746 }; 3747 3748 3749 int 3750 _info(struct modinfo *modinfop) 3751 { 3752 return (mod_info(&modlinkage, modinfop)); 3753 } 3754 3755 int 3756 _init(void) 3757 { 3758 int status; 3759 3760 mac_init_ops(&bge_dev_ops, "bge"); 3761 status = mod_install(&modlinkage); 3762 if (status == DDI_SUCCESS) 3763 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3764 else 3765 mac_fini_ops(&bge_dev_ops); 3766 return (status); 3767 } 3768 3769 int 3770 _fini(void) 3771 { 3772 int status; 3773 3774 status = mod_remove(&modlinkage); 3775 if (status == DDI_SUCCESS) { 3776 mac_fini_ops(&bge_dev_ops); 3777 mutex_destroy(bge_log_mutex); 3778 } 3779 return (status); 3780 } 3781 3782 3783 /* 3784 * bge_add_intrs: 3785 * 3786 * Register FIXED or MSI interrupts. 3787 */ 3788 static int 3789 bge_add_intrs(bge_t *bgep, int intr_type) 3790 { 3791 dev_info_t *dip = bgep->devinfo; 3792 int avail, actual, intr_size, count = 0; 3793 int i, flag, ret; 3794 3795 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3796 3797 /* Get number of interrupts */ 3798 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3799 if ((ret != DDI_SUCCESS) || (count == 0)) { 3800 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3801 "count: %d", ret, count); 3802 3803 return (DDI_FAILURE); 3804 } 3805 3806 /* Get number of available interrupts */ 3807 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3808 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3809 bge_error(bgep, "ddi_intr_get_navail() failure, " 3810 "ret: %d, avail: %d\n", ret, avail); 3811 3812 return (DDI_FAILURE); 3813 } 3814 3815 if (avail < count) { 3816 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3817 bgep->ifname, count, avail)); 3818 } 3819 3820 /* 3821 * BGE hardware generates only single MSI even though it claims 3822 * to support multiple MSIs. So, hard code MSI count value to 1. 3823 */ 3824 if (intr_type == DDI_INTR_TYPE_MSI) { 3825 count = 1; 3826 flag = DDI_INTR_ALLOC_STRICT; 3827 } else { 3828 flag = DDI_INTR_ALLOC_NORMAL; 3829 } 3830 3831 /* Allocate an array of interrupt handles */ 3832 intr_size = count * sizeof (ddi_intr_handle_t); 3833 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3834 3835 /* Call ddi_intr_alloc() */ 3836 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3837 count, &actual, flag); 3838 3839 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3840 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3841 3842 kmem_free(bgep->htable, intr_size); 3843 return (DDI_FAILURE); 3844 } 3845 3846 if (actual < count) { 3847 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3848 bgep->ifname, count, actual)); 3849 } 3850 3851 bgep->intr_cnt = actual; 3852 3853 /* 3854 * Get priority for first msi, assume remaining are all the same 3855 */ 3856 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3857 DDI_SUCCESS) { 3858 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3859 3860 /* Free already allocated intr */ 3861 for (i = 0; i < actual; i++) { 3862 (void) ddi_intr_free(bgep->htable[i]); 3863 } 3864 3865 kmem_free(bgep->htable, intr_size); 3866 return (DDI_FAILURE); 3867 } 3868 3869 /* Call ddi_intr_add_handler() */ 3870 for (i = 0; i < actual; i++) { 3871 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3872 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3873 bge_error(bgep, "ddi_intr_add_handler() " 3874 "failed %d\n", ret); 3875 3876 /* Free already allocated intr */ 3877 for (i = 0; i < actual; i++) { 3878 (void) ddi_intr_free(bgep->htable[i]); 3879 } 3880 3881 kmem_free(bgep->htable, intr_size); 3882 return (DDI_FAILURE); 3883 } 3884 } 3885 3886 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3887 != DDI_SUCCESS) { 3888 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3889 3890 for (i = 0; i < actual; i++) { 3891 (void) ddi_intr_remove_handler(bgep->htable[i]); 3892 (void) ddi_intr_free(bgep->htable[i]); 3893 } 3894 3895 kmem_free(bgep->htable, intr_size); 3896 return (DDI_FAILURE); 3897 } 3898 3899 return (DDI_SUCCESS); 3900 } 3901 3902 /* 3903 * bge_rem_intrs: 3904 * 3905 * Unregister FIXED or MSI interrupts 3906 */ 3907 static void 3908 bge_rem_intrs(bge_t *bgep) 3909 { 3910 int i; 3911 3912 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3913 3914 /* Call ddi_intr_remove_handler() */ 3915 for (i = 0; i < bgep->intr_cnt; i++) { 3916 (void) ddi_intr_remove_handler(bgep->htable[i]); 3917 (void) ddi_intr_free(bgep->htable[i]); 3918 } 3919 3920 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3921 } 3922 3923 3924 void 3925 bge_intr_enable(bge_t *bgep) 3926 { 3927 int i; 3928 3929 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3930 /* Call ddi_intr_block_enable() for MSI interrupts */ 3931 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3932 } else { 3933 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3934 for (i = 0; i < bgep->intr_cnt; i++) { 3935 (void) ddi_intr_enable(bgep->htable[i]); 3936 } 3937 } 3938 } 3939 3940 3941 void 3942 bge_intr_disable(bge_t *bgep) 3943 { 3944 int i; 3945 3946 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3947 /* Call ddi_intr_block_disable() */ 3948 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3949 } else { 3950 for (i = 0; i < bgep->intr_cnt; i++) { 3951 (void) ddi_intr_disable(bgep->htable[i]); 3952 } 3953 } 3954 } 3955 3956 int 3957 bge_reprogram(bge_t *bgep) 3958 { 3959 int status = 0; 3960 3961 ASSERT(mutex_owned(bgep->genlock)); 3962 3963 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3964 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3965 status = IOC_INVAL; 3966 } 3967 #ifdef BGE_IPMI_ASF 3968 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3969 #else 3970 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3971 #endif 3972 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3973 status = IOC_INVAL; 3974 } 3975 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3976 bge_chip_msi_trig(bgep); 3977 return (status); 3978 } 3979