1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac_provider.h> 30 #include <sys/mac.h> 31 #include <sys/mac_flow.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 */ 36 static char bge_ident[] = "Broadcom Gb Ethernet"; 37 /* 38 * Make sure you keep the version ID up to date! 39 */ 40 static char bge_version[] = "Broadcom Gb Ethernet v1.10"; 41 42 /* 43 * Property names 44 */ 45 static char debug_propname[] = "bge-debug-flags"; 46 static char clsize_propname[] = "cache-line-size"; 47 static char latency_propname[] = "latency-timer"; 48 static char localmac_boolname[] = "local-mac-address?"; 49 static char localmac_propname[] = "local-mac-address"; 50 static char macaddr_propname[] = "mac-address"; 51 static char subdev_propname[] = "subsystem-id"; 52 static char subven_propname[] = "subsystem-vendor-id"; 53 static char rxrings_propname[] = "bge-rx-rings"; 54 static char txrings_propname[] = "bge-tx-rings"; 55 static char fm_cap[] = "fm-capable"; 56 static char default_mtu[] = "default_mtu"; 57 58 static int bge_add_intrs(bge_t *, int); 59 static void bge_rem_intrs(bge_t *); 60 static int bge_unicst_set(void *, const uint8_t *, int); 61 62 /* 63 * Describes the chip's DMA engine 64 */ 65 static ddi_dma_attr_t dma_attr = { 66 DMA_ATTR_V0, /* dma_attr version */ 67 0x0000000000000000ull, /* dma_attr_addr_lo */ 68 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 69 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 70 0x0000000000000001ull, /* dma_attr_align */ 71 0x00000FFF, /* dma_attr_burstsizes */ 72 0x00000001, /* dma_attr_minxfer */ 73 0x000000000000FFFFull, /* dma_attr_maxxfer */ 74 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 75 1, /* dma_attr_sgllen */ 76 0x00000001, /* dma_attr_granular */ 77 DDI_DMA_FLAGERR /* dma_attr_flags */ 78 }; 79 80 /* 81 * PIO access attributes for registers 82 */ 83 static ddi_device_acc_attr_t bge_reg_accattr = { 84 DDI_DEVICE_ATTR_V0, 85 DDI_NEVERSWAP_ACC, 86 DDI_STRICTORDER_ACC, 87 DDI_FLAGERR_ACC 88 }; 89 90 /* 91 * DMA access attributes for descriptors: NOT to be byte swapped. 92 */ 93 static ddi_device_acc_attr_t bge_desc_accattr = { 94 DDI_DEVICE_ATTR_V0, 95 DDI_NEVERSWAP_ACC, 96 DDI_STRICTORDER_ACC, 97 DDI_FLAGERR_ACC 98 }; 99 100 /* 101 * DMA access attributes for data: NOT to be byte swapped. 102 */ 103 static ddi_device_acc_attr_t bge_data_accattr = { 104 DDI_DEVICE_ATTR_V0, 105 DDI_NEVERSWAP_ACC, 106 DDI_STRICTORDER_ACC 107 }; 108 109 static int bge_m_start(void *); 110 static void bge_m_stop(void *); 111 static int bge_m_promisc(void *, boolean_t); 112 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 113 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 114 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 115 static int bge_unicst_set(void *, const uint8_t *, 116 int); 117 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 118 uint_t, const void *); 119 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 120 uint_t, uint_t, void *, uint_t *); 121 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 122 const void *); 123 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 124 uint_t, void *); 125 126 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 127 128 static mac_callbacks_t bge_m_callbacks = { 129 BGE_M_CALLBACK_FLAGS, 130 bge_m_stat, 131 bge_m_start, 132 bge_m_stop, 133 bge_m_promisc, 134 bge_m_multicst, 135 NULL, 136 bge_m_tx, 137 bge_m_ioctl, 138 bge_m_getcapab, 139 NULL, 140 NULL, 141 bge_m_setprop, 142 bge_m_getprop 143 }; 144 145 mac_priv_prop_t bge_priv_prop[] = { 146 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 147 {"_adv_pause_cap", MAC_PROP_PERM_RW} 148 }; 149 150 #define BGE_MAX_PRIV_PROPS \ 151 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 152 153 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 154 /* 155 * ========== Transmit and receive ring reinitialisation ========== 156 */ 157 158 /* 159 * These <reinit> routines each reset the specified ring to an initial 160 * state, assuming that the corresponding <init> routine has already 161 * been called exactly once. 162 */ 163 164 static void 165 bge_reinit_send_ring(send_ring_t *srp) 166 { 167 bge_queue_t *txbuf_queue; 168 bge_queue_item_t *txbuf_head; 169 sw_txbuf_t *txbuf; 170 sw_sbd_t *ssbdp; 171 uint32_t slot; 172 173 /* 174 * Reinitialise control variables ... 175 */ 176 srp->tx_flow = 0; 177 srp->tx_next = 0; 178 srp->txfill_next = 0; 179 srp->tx_free = srp->desc.nslots; 180 ASSERT(mutex_owned(srp->tc_lock)); 181 srp->tc_next = 0; 182 srp->txpkt_next = 0; 183 srp->tx_block = 0; 184 srp->tx_nobd = 0; 185 srp->tx_nobuf = 0; 186 187 /* 188 * Initialize the tx buffer push queue 189 */ 190 mutex_enter(srp->freetxbuf_lock); 191 mutex_enter(srp->txbuf_lock); 192 txbuf_queue = &srp->freetxbuf_queue; 193 txbuf_queue->head = NULL; 194 txbuf_queue->count = 0; 195 txbuf_queue->lock = srp->freetxbuf_lock; 196 srp->txbuf_push_queue = txbuf_queue; 197 198 /* 199 * Initialize the tx buffer pop queue 200 */ 201 txbuf_queue = &srp->txbuf_queue; 202 txbuf_queue->head = NULL; 203 txbuf_queue->count = 0; 204 txbuf_queue->lock = srp->txbuf_lock; 205 srp->txbuf_pop_queue = txbuf_queue; 206 txbuf_head = srp->txbuf_head; 207 txbuf = srp->txbuf; 208 for (slot = 0; slot < srp->tx_buffers; ++slot) { 209 txbuf_head->item = txbuf; 210 txbuf_head->next = txbuf_queue->head; 211 txbuf_queue->head = txbuf_head; 212 txbuf_queue->count++; 213 txbuf++; 214 txbuf_head++; 215 } 216 mutex_exit(srp->txbuf_lock); 217 mutex_exit(srp->freetxbuf_lock); 218 219 /* 220 * Zero and sync all the h/w Send Buffer Descriptors 221 */ 222 DMA_ZERO(srp->desc); 223 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 224 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 225 ssbdp = srp->sw_sbds; 226 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 227 ssbdp->pbuf = NULL; 228 } 229 230 static void 231 bge_reinit_recv_ring(recv_ring_t *rrp) 232 { 233 /* 234 * Reinitialise control variables ... 235 */ 236 rrp->rx_next = 0; 237 } 238 239 static void 240 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 241 { 242 bge_rbd_t *hw_rbd_p; 243 sw_rbd_t *srbdp; 244 uint32_t bufsize; 245 uint32_t nslots; 246 uint32_t slot; 247 248 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 249 RBD_FLAG_STD_RING, 250 RBD_FLAG_JUMBO_RING, 251 RBD_FLAG_MINI_RING 252 }; 253 254 /* 255 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 256 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 257 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 258 * should be zeroed, and so don't need to be set up specifically 259 * once the whole area has been cleared. 260 */ 261 DMA_ZERO(brp->desc); 262 263 hw_rbd_p = DMA_VPTR(brp->desc); 264 nslots = brp->desc.nslots; 265 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 266 bufsize = brp->buf[0].size; 267 srbdp = brp->sw_rbds; 268 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 269 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 270 hw_rbd_p->index = (uint16_t)slot; 271 hw_rbd_p->len = (uint16_t)bufsize; 272 hw_rbd_p->opaque = srbdp->pbuf.token; 273 hw_rbd_p->flags |= ring_type_flag[ring]; 274 } 275 276 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 277 278 /* 279 * Finally, reinitialise the ring control variables ... 280 */ 281 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 282 } 283 284 /* 285 * Reinitialize all rings 286 */ 287 static void 288 bge_reinit_rings(bge_t *bgep) 289 { 290 uint32_t ring; 291 292 ASSERT(mutex_owned(bgep->genlock)); 293 294 /* 295 * Send Rings ... 296 */ 297 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 298 bge_reinit_send_ring(&bgep->send[ring]); 299 300 /* 301 * Receive Return Rings ... 302 */ 303 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 304 bge_reinit_recv_ring(&bgep->recv[ring]); 305 306 /* 307 * Receive Producer Rings ... 308 */ 309 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 310 bge_reinit_buff_ring(&bgep->buff[ring], ring); 311 } 312 313 /* 314 * ========== Internal state management entry points ========== 315 */ 316 317 #undef BGE_DBG 318 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 319 320 /* 321 * These routines provide all the functionality required by the 322 * corresponding GLD entry points, but don't update the GLD state 323 * so they can be called internally without disturbing our record 324 * of what GLD thinks we should be doing ... 325 */ 326 327 /* 328 * bge_reset() -- reset h/w & rings to initial state 329 */ 330 static int 331 #ifdef BGE_IPMI_ASF 332 bge_reset(bge_t *bgep, uint_t asf_mode) 333 #else 334 bge_reset(bge_t *bgep) 335 #endif 336 { 337 uint32_t ring; 338 int retval; 339 340 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 341 342 ASSERT(mutex_owned(bgep->genlock)); 343 344 /* 345 * Grab all the other mutexes in the world (this should 346 * ensure no other threads are manipulating driver state) 347 */ 348 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 349 mutex_enter(bgep->recv[ring].rx_lock); 350 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 351 mutex_enter(bgep->buff[ring].rf_lock); 352 rw_enter(bgep->errlock, RW_WRITER); 353 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 354 mutex_enter(bgep->send[ring].tx_lock); 355 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 356 mutex_enter(bgep->send[ring].tc_lock); 357 358 #ifdef BGE_IPMI_ASF 359 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 360 #else 361 retval = bge_chip_reset(bgep, B_TRUE); 362 #endif 363 bge_reinit_rings(bgep); 364 365 /* 366 * Free the world ... 367 */ 368 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 369 mutex_exit(bgep->send[ring].tc_lock); 370 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 371 mutex_exit(bgep->send[ring].tx_lock); 372 rw_exit(bgep->errlock); 373 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 374 mutex_exit(bgep->buff[ring].rf_lock); 375 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 376 mutex_exit(bgep->recv[ring].rx_lock); 377 378 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 379 return (retval); 380 } 381 382 /* 383 * bge_stop() -- stop processing, don't reset h/w or rings 384 */ 385 static void 386 bge_stop(bge_t *bgep) 387 { 388 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 389 390 ASSERT(mutex_owned(bgep->genlock)); 391 392 #ifdef BGE_IPMI_ASF 393 if (bgep->asf_enabled) { 394 bgep->asf_pseudostop = B_TRUE; 395 } else { 396 #endif 397 bge_chip_stop(bgep, B_FALSE); 398 #ifdef BGE_IPMI_ASF 399 } 400 #endif 401 402 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 403 } 404 405 /* 406 * bge_start() -- start transmitting/receiving 407 */ 408 static int 409 bge_start(bge_t *bgep, boolean_t reset_phys) 410 { 411 int retval; 412 413 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 414 415 ASSERT(mutex_owned(bgep->genlock)); 416 417 /* 418 * Start chip processing, including enabling interrupts 419 */ 420 retval = bge_chip_start(bgep, reset_phys); 421 422 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 423 return (retval); 424 } 425 426 /* 427 * bge_restart - restart transmitting/receiving after error or suspend 428 */ 429 int 430 bge_restart(bge_t *bgep, boolean_t reset_phys) 431 { 432 int retval = DDI_SUCCESS; 433 ASSERT(mutex_owned(bgep->genlock)); 434 435 #ifdef BGE_IPMI_ASF 436 if (bgep->asf_enabled) { 437 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 438 retval = DDI_FAILURE; 439 } else 440 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 441 retval = DDI_FAILURE; 442 #else 443 if (bge_reset(bgep) != DDI_SUCCESS) 444 retval = DDI_FAILURE; 445 #endif 446 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 447 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 448 retval = DDI_FAILURE; 449 bgep->watchdog = 0; 450 ddi_trigger_softintr(bgep->drain_id); 451 } 452 453 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 454 return (retval); 455 } 456 457 458 /* 459 * ========== Nemo-required management entry points ========== 460 */ 461 462 #undef BGE_DBG 463 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 464 465 /* 466 * bge_m_stop() -- stop transmitting/receiving 467 */ 468 static void 469 bge_m_stop(void *arg) 470 { 471 bge_t *bgep = arg; /* private device info */ 472 send_ring_t *srp; 473 uint32_t ring; 474 475 BGE_TRACE(("bge_m_stop($%p)", arg)); 476 477 /* 478 * Just stop processing, then record new GLD state 479 */ 480 mutex_enter(bgep->genlock); 481 if (!(bgep->progress & PROGRESS_INTR)) { 482 /* can happen during autorecovery */ 483 bgep->bge_chip_state = BGE_CHIP_STOPPED; 484 } else 485 bge_stop(bgep); 486 487 bgep->link_update_timer = 0; 488 bgep->link_state = LINK_STATE_UNKNOWN; 489 mac_link_update(bgep->mh, bgep->link_state); 490 491 /* 492 * Free the possible tx buffers allocated in tx process. 493 */ 494 #ifdef BGE_IPMI_ASF 495 if (!bgep->asf_pseudostop) 496 #endif 497 { 498 rw_enter(bgep->errlock, RW_WRITER); 499 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 500 srp = &bgep->send[ring]; 501 mutex_enter(srp->tx_lock); 502 if (srp->tx_array > 1) 503 bge_free_txbuf_arrays(srp); 504 mutex_exit(srp->tx_lock); 505 } 506 rw_exit(bgep->errlock); 507 } 508 bgep->bge_mac_state = BGE_MAC_STOPPED; 509 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 510 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 511 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 512 mutex_exit(bgep->genlock); 513 } 514 515 /* 516 * bge_m_start() -- start transmitting/receiving 517 */ 518 static int 519 bge_m_start(void *arg) 520 { 521 bge_t *bgep = arg; /* private device info */ 522 523 BGE_TRACE(("bge_m_start($%p)", arg)); 524 525 /* 526 * Start processing and record new GLD state 527 */ 528 mutex_enter(bgep->genlock); 529 if (!(bgep->progress & PROGRESS_INTR)) { 530 /* can happen during autorecovery */ 531 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 532 mutex_exit(bgep->genlock); 533 return (EIO); 534 } 535 #ifdef BGE_IPMI_ASF 536 if (bgep->asf_enabled) { 537 if ((bgep->asf_status == ASF_STAT_RUN) && 538 (bgep->asf_pseudostop)) { 539 bgep->bge_mac_state = BGE_MAC_STARTED; 540 mutex_exit(bgep->genlock); 541 return (0); 542 } 543 } 544 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 545 #else 546 if (bge_reset(bgep) != DDI_SUCCESS) { 547 #endif 548 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 549 (void) bge_check_acc_handle(bgep, bgep->io_handle); 550 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 551 mutex_exit(bgep->genlock); 552 return (EIO); 553 } 554 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 555 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 556 (void) bge_check_acc_handle(bgep, bgep->io_handle); 557 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 558 mutex_exit(bgep->genlock); 559 return (EIO); 560 } 561 bgep->watchdog = 0; 562 bgep->bge_mac_state = BGE_MAC_STARTED; 563 BGE_DEBUG(("bge_m_start($%p) done", arg)); 564 565 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 566 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 567 mutex_exit(bgep->genlock); 568 return (EIO); 569 } 570 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 571 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 572 mutex_exit(bgep->genlock); 573 return (EIO); 574 } 575 #ifdef BGE_IPMI_ASF 576 if (bgep->asf_enabled) { 577 if (bgep->asf_status != ASF_STAT_RUN) { 578 /* start ASF heart beat */ 579 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 580 (void *)bgep, 581 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 582 bgep->asf_status = ASF_STAT_RUN; 583 } 584 } 585 #endif 586 mutex_exit(bgep->genlock); 587 588 return (0); 589 } 590 591 /* 592 * bge_unicst_set() -- set the physical network address 593 */ 594 static int 595 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 596 { 597 bge_t *bgep = arg; /* private device info */ 598 599 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 600 ether_sprintf((void *)macaddr))); 601 /* 602 * Remember the new current address in the driver state 603 * Sync the chip's idea of the address too ... 604 */ 605 mutex_enter(bgep->genlock); 606 if (!(bgep->progress & PROGRESS_INTR)) { 607 /* can happen during autorecovery */ 608 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 609 mutex_exit(bgep->genlock); 610 return (EIO); 611 } 612 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 613 #ifdef BGE_IPMI_ASF 614 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 615 #else 616 if (bge_chip_sync(bgep) == DDI_FAILURE) { 617 #endif 618 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 619 (void) bge_check_acc_handle(bgep, bgep->io_handle); 620 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 621 mutex_exit(bgep->genlock); 622 return (EIO); 623 } 624 #ifdef BGE_IPMI_ASF 625 if (bgep->asf_enabled) { 626 /* 627 * The above bge_chip_sync() function wrote the ethernet MAC 628 * addresses registers which destroyed the IPMI/ASF sideband. 629 * Here, we have to reset chip to make IPMI/ASF sideband work. 630 */ 631 if (bgep->asf_status == ASF_STAT_RUN) { 632 /* 633 * We must stop ASF heart beat before bge_chip_stop(), 634 * otherwise some computers (ex. IBM HS20 blade server) 635 * may crash. 636 */ 637 bge_asf_update_status(bgep); 638 bge_asf_stop_timer(bgep); 639 bgep->asf_status = ASF_STAT_STOP; 640 641 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 642 } 643 bge_chip_stop(bgep, B_FALSE); 644 645 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 646 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 647 (void) bge_check_acc_handle(bgep, bgep->io_handle); 648 ddi_fm_service_impact(bgep->devinfo, 649 DDI_SERVICE_DEGRADED); 650 mutex_exit(bgep->genlock); 651 return (EIO); 652 } 653 654 /* 655 * Start our ASF heartbeat counter as soon as possible. 656 */ 657 if (bgep->asf_status != ASF_STAT_RUN) { 658 /* start ASF heart beat */ 659 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 660 (void *)bgep, 661 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 662 bgep->asf_status = ASF_STAT_RUN; 663 } 664 } 665 #endif 666 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 667 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 668 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 669 mutex_exit(bgep->genlock); 670 return (EIO); 671 } 672 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 673 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 674 mutex_exit(bgep->genlock); 675 return (EIO); 676 } 677 mutex_exit(bgep->genlock); 678 679 return (0); 680 } 681 682 extern void bge_wake_factotum(bge_t *); 683 684 static boolean_t 685 bge_param_locked(mac_prop_id_t pr_num) 686 { 687 /* 688 * All adv_* parameters are locked (read-only) while 689 * the device is in any sort of loopback mode ... 690 */ 691 switch (pr_num) { 692 case MAC_PROP_ADV_1000FDX_CAP: 693 case MAC_PROP_EN_1000FDX_CAP: 694 case MAC_PROP_ADV_1000HDX_CAP: 695 case MAC_PROP_EN_1000HDX_CAP: 696 case MAC_PROP_ADV_100FDX_CAP: 697 case MAC_PROP_EN_100FDX_CAP: 698 case MAC_PROP_ADV_100HDX_CAP: 699 case MAC_PROP_EN_100HDX_CAP: 700 case MAC_PROP_ADV_10FDX_CAP: 701 case MAC_PROP_EN_10FDX_CAP: 702 case MAC_PROP_ADV_10HDX_CAP: 703 case MAC_PROP_EN_10HDX_CAP: 704 case MAC_PROP_AUTONEG: 705 case MAC_PROP_FLOWCTRL: 706 return (B_TRUE); 707 } 708 return (B_FALSE); 709 } 710 /* 711 * callback functions for set/get of properties 712 */ 713 static int 714 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 715 uint_t pr_valsize, const void *pr_val) 716 { 717 bge_t *bgep = barg; 718 int err = 0; 719 uint32_t cur_mtu, new_mtu; 720 uint_t maxsdu; 721 link_flowctrl_t fl; 722 723 mutex_enter(bgep->genlock); 724 if (bgep->param_loop_mode != BGE_LOOP_NONE && 725 bge_param_locked(pr_num)) { 726 /* 727 * All adv_* parameters are locked (read-only) 728 * while the device is in any sort of loopback mode. 729 */ 730 mutex_exit(bgep->genlock); 731 return (EBUSY); 732 } 733 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 734 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 735 (pr_num == MAC_PROP_EN_100HDX_CAP) || 736 (pr_num == MAC_PROP_EN_10FDX_CAP) || 737 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 738 /* 739 * these properties are read/write on copper, 740 * read-only and 0 on serdes 741 */ 742 mutex_exit(bgep->genlock); 743 return (ENOTSUP); 744 } 745 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 746 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 747 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 748 mutex_exit(bgep->genlock); 749 return (ENOTSUP); 750 } 751 752 switch (pr_num) { 753 case MAC_PROP_EN_1000FDX_CAP: 754 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 755 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 756 goto reprogram; 757 case MAC_PROP_EN_1000HDX_CAP: 758 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 759 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 760 goto reprogram; 761 case MAC_PROP_EN_100FDX_CAP: 762 bgep->param_en_100fdx = *(uint8_t *)pr_val; 763 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 764 goto reprogram; 765 case MAC_PROP_EN_100HDX_CAP: 766 bgep->param_en_100hdx = *(uint8_t *)pr_val; 767 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 768 goto reprogram; 769 case MAC_PROP_EN_10FDX_CAP: 770 bgep->param_en_10fdx = *(uint8_t *)pr_val; 771 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 772 goto reprogram; 773 case MAC_PROP_EN_10HDX_CAP: 774 bgep->param_en_10hdx = *(uint8_t *)pr_val; 775 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 776 reprogram: 777 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 778 err = EINVAL; 779 break; 780 case MAC_PROP_ADV_1000FDX_CAP: 781 case MAC_PROP_ADV_1000HDX_CAP: 782 case MAC_PROP_ADV_100FDX_CAP: 783 case MAC_PROP_ADV_100HDX_CAP: 784 case MAC_PROP_ADV_10FDX_CAP: 785 case MAC_PROP_ADV_10HDX_CAP: 786 case MAC_PROP_STATUS: 787 case MAC_PROP_SPEED: 788 case MAC_PROP_DUPLEX: 789 err = ENOTSUP; /* read-only prop. Can't set this */ 790 break; 791 case MAC_PROP_AUTONEG: 792 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 793 if (bge_reprogram(bgep) == IOC_INVAL) 794 err = EINVAL; 795 break; 796 case MAC_PROP_MTU: 797 cur_mtu = bgep->chipid.default_mtu; 798 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 799 800 if (new_mtu == cur_mtu) { 801 err = 0; 802 break; 803 } 804 if (new_mtu < BGE_DEFAULT_MTU || 805 new_mtu > BGE_MAXIMUM_MTU) { 806 err = EINVAL; 807 break; 808 } 809 if ((new_mtu > BGE_DEFAULT_MTU) && 810 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 811 err = EINVAL; 812 break; 813 } 814 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 815 err = EBUSY; 816 break; 817 } 818 bgep->chipid.default_mtu = new_mtu; 819 if (bge_chip_id_init(bgep)) { 820 err = EINVAL; 821 break; 822 } 823 maxsdu = bgep->chipid.ethmax_size - 824 sizeof (struct ether_header); 825 err = mac_maxsdu_update(bgep->mh, maxsdu); 826 if (err == 0) { 827 bgep->bge_dma_error = B_TRUE; 828 bgep->manual_reset = B_TRUE; 829 bge_chip_stop(bgep, B_TRUE); 830 bge_wake_factotum(bgep); 831 err = 0; 832 } 833 break; 834 case MAC_PROP_FLOWCTRL: 835 bcopy(pr_val, &fl, sizeof (fl)); 836 switch (fl) { 837 default: 838 err = ENOTSUP; 839 break; 840 case LINK_FLOWCTRL_NONE: 841 bgep->param_adv_pause = 0; 842 bgep->param_adv_asym_pause = 0; 843 844 bgep->param_link_rx_pause = B_FALSE; 845 bgep->param_link_tx_pause = B_FALSE; 846 break; 847 case LINK_FLOWCTRL_RX: 848 bgep->param_adv_pause = 1; 849 bgep->param_adv_asym_pause = 1; 850 851 bgep->param_link_rx_pause = B_TRUE; 852 bgep->param_link_tx_pause = B_FALSE; 853 break; 854 case LINK_FLOWCTRL_TX: 855 bgep->param_adv_pause = 0; 856 bgep->param_adv_asym_pause = 1; 857 858 bgep->param_link_rx_pause = B_FALSE; 859 bgep->param_link_tx_pause = B_TRUE; 860 break; 861 case LINK_FLOWCTRL_BI: 862 bgep->param_adv_pause = 1; 863 bgep->param_adv_asym_pause = 0; 864 865 bgep->param_link_rx_pause = B_TRUE; 866 bgep->param_link_tx_pause = B_TRUE; 867 break; 868 } 869 870 if (err == 0) { 871 if (bge_reprogram(bgep) == IOC_INVAL) 872 err = EINVAL; 873 } 874 875 break; 876 case MAC_PROP_PRIVATE: 877 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 878 pr_val); 879 break; 880 default: 881 err = ENOTSUP; 882 break; 883 } 884 mutex_exit(bgep->genlock); 885 return (err); 886 } 887 888 /* ARGSUSED */ 889 static int 890 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 891 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 892 { 893 bge_t *bgep = barg; 894 int err = 0; 895 link_flowctrl_t fl; 896 uint64_t speed; 897 int flags = bgep->chipid.flags; 898 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 899 900 if (pr_valsize == 0) 901 return (EINVAL); 902 bzero(pr_val, pr_valsize); 903 904 *perm = MAC_PROP_PERM_RW; 905 906 mutex_enter(bgep->genlock); 907 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 908 bge_param_locked(pr_num)) || 909 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 910 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 911 (pr_num == MAC_PROP_EN_100HDX_CAP) || 912 (pr_num == MAC_PROP_EN_10FDX_CAP) || 913 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 914 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 915 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 916 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 917 *perm = MAC_PROP_PERM_READ; 918 mutex_exit(bgep->genlock); 919 920 switch (pr_num) { 921 case MAC_PROP_DUPLEX: 922 *perm = MAC_PROP_PERM_READ; 923 if (pr_valsize < sizeof (link_duplex_t)) 924 return (EINVAL); 925 bcopy(&bgep->param_link_duplex, pr_val, 926 sizeof (link_duplex_t)); 927 break; 928 case MAC_PROP_SPEED: 929 *perm = MAC_PROP_PERM_READ; 930 if (pr_valsize < sizeof (speed)) 931 return (EINVAL); 932 speed = bgep->param_link_speed * 1000000ull; 933 bcopy(&speed, pr_val, sizeof (speed)); 934 break; 935 case MAC_PROP_STATUS: 936 *perm = MAC_PROP_PERM_READ; 937 if (pr_valsize < sizeof (link_state_t)) 938 return (EINVAL); 939 bcopy(&bgep->link_state, pr_val, 940 sizeof (link_state_t)); 941 break; 942 case MAC_PROP_AUTONEG: 943 if (is_default) 944 *(uint8_t *)pr_val = 1; 945 else 946 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 947 break; 948 case MAC_PROP_FLOWCTRL: 949 if (pr_valsize < sizeof (fl)) 950 return (EINVAL); 951 if (is_default) { 952 fl = LINK_FLOWCTRL_BI; 953 bcopy(&fl, pr_val, sizeof (fl)); 954 break; 955 } 956 957 if (bgep->param_link_rx_pause && 958 !bgep->param_link_tx_pause) 959 fl = LINK_FLOWCTRL_RX; 960 961 if (!bgep->param_link_rx_pause && 962 !bgep->param_link_tx_pause) 963 fl = LINK_FLOWCTRL_NONE; 964 965 if (!bgep->param_link_rx_pause && 966 bgep->param_link_tx_pause) 967 fl = LINK_FLOWCTRL_TX; 968 969 if (bgep->param_link_rx_pause && 970 bgep->param_link_tx_pause) 971 fl = LINK_FLOWCTRL_BI; 972 bcopy(&fl, pr_val, sizeof (fl)); 973 break; 974 case MAC_PROP_ADV_1000FDX_CAP: 975 *perm = MAC_PROP_PERM_READ; 976 if (is_default) { 977 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 978 *(uint8_t *)pr_val = 0; 979 else 980 *(uint8_t *)pr_val = 1; 981 } 982 else 983 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 984 break; 985 case MAC_PROP_EN_1000FDX_CAP: 986 if (is_default) { 987 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 988 *(uint8_t *)pr_val = 0; 989 else 990 *(uint8_t *)pr_val = 1; 991 } 992 else 993 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 994 break; 995 case MAC_PROP_ADV_1000HDX_CAP: 996 *perm = MAC_PROP_PERM_READ; 997 if (is_default) { 998 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 999 *(uint8_t *)pr_val = 0; 1000 else 1001 *(uint8_t *)pr_val = 1; 1002 } 1003 else 1004 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1005 break; 1006 case MAC_PROP_EN_1000HDX_CAP: 1007 if (is_default) { 1008 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1009 *(uint8_t *)pr_val = 0; 1010 else 1011 *(uint8_t *)pr_val = 1; 1012 } 1013 else 1014 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1015 break; 1016 case MAC_PROP_ADV_100FDX_CAP: 1017 *perm = MAC_PROP_PERM_READ; 1018 if (is_default) { 1019 *(uint8_t *)pr_val = 1020 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1021 } else { 1022 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1023 } 1024 break; 1025 case MAC_PROP_EN_100FDX_CAP: 1026 if (is_default) { 1027 *(uint8_t *)pr_val = 1028 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1029 } else { 1030 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1031 } 1032 break; 1033 case MAC_PROP_ADV_100HDX_CAP: 1034 *perm = MAC_PROP_PERM_READ; 1035 if (is_default) { 1036 *(uint8_t *)pr_val = 1037 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1038 } else { 1039 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1040 } 1041 break; 1042 case MAC_PROP_EN_100HDX_CAP: 1043 if (is_default) { 1044 *(uint8_t *)pr_val = 1045 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1046 } else { 1047 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1048 } 1049 break; 1050 case MAC_PROP_ADV_10FDX_CAP: 1051 *perm = MAC_PROP_PERM_READ; 1052 if (is_default) { 1053 *(uint8_t *)pr_val = 1054 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1055 } else { 1056 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1057 } 1058 break; 1059 case MAC_PROP_EN_10FDX_CAP: 1060 if (is_default) { 1061 *(uint8_t *)pr_val = 1062 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1063 } else { 1064 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1065 } 1066 break; 1067 case MAC_PROP_ADV_10HDX_CAP: 1068 *perm = MAC_PROP_PERM_READ; 1069 if (is_default) { 1070 *(uint8_t *)pr_val = 1071 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1072 } else { 1073 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1074 } 1075 break; 1076 case MAC_PROP_EN_10HDX_CAP: 1077 if (is_default) { 1078 *(uint8_t *)pr_val = 1079 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1080 } else { 1081 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1082 } 1083 break; 1084 case MAC_PROP_ADV_100T4_CAP: 1085 case MAC_PROP_EN_100T4_CAP: 1086 *perm = MAC_PROP_PERM_READ; 1087 *(uint8_t *)pr_val = 0; 1088 break; 1089 case MAC_PROP_PRIVATE: 1090 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1091 pr_valsize, pr_val); 1092 return (err); 1093 case MAC_PROP_MTU: { 1094 mac_propval_range_t range; 1095 1096 if (!(pr_flags & MAC_PROP_POSSIBLE)) 1097 return (ENOTSUP); 1098 if (pr_valsize < sizeof (mac_propval_range_t)) 1099 return (EINVAL); 1100 range.mpr_count = 1; 1101 range.mpr_type = MAC_PROPVAL_UINT32; 1102 range.range_uint32[0].mpur_min = 1103 range.range_uint32[0].mpur_max = BGE_DEFAULT_MTU; 1104 if (!(flags & CHIP_FLAG_NO_JUMBO)) 1105 range.range_uint32[0].mpur_max = 1106 BGE_MAXIMUM_MTU; 1107 bcopy(&range, pr_val, sizeof (range)); 1108 break; 1109 } 1110 default: 1111 return (ENOTSUP); 1112 } 1113 return (0); 1114 } 1115 1116 /* ARGSUSED */ 1117 static int 1118 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1119 const void *pr_val) 1120 { 1121 int err = 0; 1122 long result; 1123 1124 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1125 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1126 if (result > 1 || result < 0) { 1127 err = EINVAL; 1128 } else { 1129 bgep->param_adv_pause = (uint32_t)result; 1130 if (bge_reprogram(bgep) == IOC_INVAL) 1131 err = EINVAL; 1132 } 1133 return (err); 1134 } 1135 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1136 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1137 if (result > 1 || result < 0) { 1138 err = EINVAL; 1139 } else { 1140 bgep->param_adv_asym_pause = (uint32_t)result; 1141 if (bge_reprogram(bgep) == IOC_INVAL) 1142 err = EINVAL; 1143 } 1144 return (err); 1145 } 1146 if (strcmp(pr_name, "_drain_max") == 0) { 1147 1148 /* 1149 * on the Tx side, we need to update the h/w register for 1150 * real packet transmission per packet. The drain_max parameter 1151 * is used to reduce the register access. This parameter 1152 * controls the max number of packets that we will hold before 1153 * updating the bge h/w to trigger h/w transmit. The bge 1154 * chipset usually has a max of 512 Tx descriptors, thus 1155 * the upper bound on drain_max is 512. 1156 */ 1157 if (pr_val == NULL) { 1158 err = EINVAL; 1159 return (err); 1160 } 1161 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1162 if (result > 512 || result < 1) 1163 err = EINVAL; 1164 else { 1165 bgep->param_drain_max = (uint32_t)result; 1166 if (bge_reprogram(bgep) == IOC_INVAL) 1167 err = EINVAL; 1168 } 1169 return (err); 1170 } 1171 if (strcmp(pr_name, "_msi_cnt") == 0) { 1172 1173 if (pr_val == NULL) { 1174 err = EINVAL; 1175 return (err); 1176 } 1177 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1178 if (result > 7 || result < 0) 1179 err = EINVAL; 1180 else { 1181 bgep->param_msi_cnt = (uint32_t)result; 1182 if (bge_reprogram(bgep) == IOC_INVAL) 1183 err = EINVAL; 1184 } 1185 return (err); 1186 } 1187 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) { 1188 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1189 return (EINVAL); 1190 if (result < 0) 1191 err = EINVAL; 1192 else { 1193 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1194 bge_chip_coalesce_update(bgep); 1195 } 1196 return (err); 1197 } 1198 1199 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) { 1200 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1201 return (EINVAL); 1202 1203 if (result < 0) 1204 err = EINVAL; 1205 else { 1206 bgep->chipid.rx_count_norm = (uint32_t)result; 1207 bge_chip_coalesce_update(bgep); 1208 } 1209 return (err); 1210 } 1211 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) { 1212 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1213 return (EINVAL); 1214 if (result < 0) 1215 err = EINVAL; 1216 else { 1217 bgep->chipid.tx_ticks_norm = (uint32_t)result; 1218 bge_chip_coalesce_update(bgep); 1219 } 1220 return (err); 1221 } 1222 1223 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) { 1224 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1225 return (EINVAL); 1226 1227 if (result < 0) 1228 err = EINVAL; 1229 else { 1230 bgep->chipid.tx_count_norm = (uint32_t)result; 1231 bge_chip_coalesce_update(bgep); 1232 } 1233 return (err); 1234 } 1235 return (ENOTSUP); 1236 } 1237 1238 static int 1239 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1240 uint_t pr_valsize, void *pr_val) 1241 { 1242 int err = ENOTSUP; 1243 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1244 int value; 1245 1246 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1247 value = (is_default? 1 : bge->param_adv_pause); 1248 err = 0; 1249 goto done; 1250 } 1251 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1252 value = (is_default? 1 : bge->param_adv_asym_pause); 1253 err = 0; 1254 goto done; 1255 } 1256 if (strcmp(pr_name, "_drain_max") == 0) { 1257 value = (is_default? 64 : bge->param_drain_max); 1258 err = 0; 1259 goto done; 1260 } 1261 if (strcmp(pr_name, "_msi_cnt") == 0) { 1262 value = (is_default? 0 : bge->param_msi_cnt); 1263 err = 0; 1264 goto done; 1265 } 1266 1267 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1268 value = (is_default? bge_rx_ticks_norm : 1269 bge->chipid.rx_ticks_norm); 1270 err = 0; 1271 goto done; 1272 } 1273 1274 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1275 value = (is_default? bge_rx_count_norm : 1276 bge->chipid.rx_count_norm); 1277 err = 0; 1278 goto done; 1279 } 1280 1281 done: 1282 if (err == 0) { 1283 (void) snprintf(pr_val, pr_valsize, "%d", value); 1284 } 1285 return (err); 1286 } 1287 1288 /* 1289 * Compute the index of the required bit in the multicast hash map. 1290 * This must mirror the way the hardware actually does it! 1291 * See Broadcom document 570X-PG102-R page 125. 1292 */ 1293 static uint32_t 1294 bge_hash_index(const uint8_t *mca) 1295 { 1296 uint32_t hash; 1297 1298 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1299 1300 return (hash); 1301 } 1302 1303 /* 1304 * bge_m_multicst_add() -- enable/disable a multicast address 1305 */ 1306 static int 1307 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1308 { 1309 bge_t *bgep = arg; /* private device info */ 1310 uint32_t hash; 1311 uint32_t index; 1312 uint32_t word; 1313 uint32_t bit; 1314 uint8_t *refp; 1315 1316 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1317 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1318 1319 /* 1320 * Precalculate all required masks, pointers etc ... 1321 */ 1322 hash = bge_hash_index(mca); 1323 index = hash % BGE_HASH_TABLE_SIZE; 1324 word = index/32u; 1325 bit = 1 << (index % 32u); 1326 refp = &bgep->mcast_refs[index]; 1327 1328 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1329 hash, index, word, bit, *refp)); 1330 1331 /* 1332 * We must set the appropriate bit in the hash map (and the 1333 * corresponding h/w register) when the refcount goes from 0 1334 * to >0, and clear it when the last ref goes away (refcount 1335 * goes from >0 back to 0). If we change the hash map, we 1336 * must also update the chip's hardware map registers. 1337 */ 1338 mutex_enter(bgep->genlock); 1339 if (!(bgep->progress & PROGRESS_INTR)) { 1340 /* can happen during autorecovery */ 1341 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1342 mutex_exit(bgep->genlock); 1343 return (EIO); 1344 } 1345 if (add) { 1346 if ((*refp)++ == 0) { 1347 bgep->mcast_hash[word] |= bit; 1348 #ifdef BGE_IPMI_ASF 1349 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1350 #else 1351 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1352 #endif 1353 (void) bge_check_acc_handle(bgep, 1354 bgep->cfg_handle); 1355 (void) bge_check_acc_handle(bgep, 1356 bgep->io_handle); 1357 ddi_fm_service_impact(bgep->devinfo, 1358 DDI_SERVICE_DEGRADED); 1359 mutex_exit(bgep->genlock); 1360 return (EIO); 1361 } 1362 } 1363 } else { 1364 if (--(*refp) == 0) { 1365 bgep->mcast_hash[word] &= ~bit; 1366 #ifdef BGE_IPMI_ASF 1367 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1368 #else 1369 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1370 #endif 1371 (void) bge_check_acc_handle(bgep, 1372 bgep->cfg_handle); 1373 (void) bge_check_acc_handle(bgep, 1374 bgep->io_handle); 1375 ddi_fm_service_impact(bgep->devinfo, 1376 DDI_SERVICE_DEGRADED); 1377 mutex_exit(bgep->genlock); 1378 return (EIO); 1379 } 1380 } 1381 } 1382 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1383 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1384 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1385 mutex_exit(bgep->genlock); 1386 return (EIO); 1387 } 1388 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1389 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1390 mutex_exit(bgep->genlock); 1391 return (EIO); 1392 } 1393 mutex_exit(bgep->genlock); 1394 1395 return (0); 1396 } 1397 1398 /* 1399 * bge_m_promisc() -- set or reset promiscuous mode on the board 1400 * 1401 * Program the hardware to enable/disable promiscuous and/or 1402 * receive-all-multicast modes. 1403 */ 1404 static int 1405 bge_m_promisc(void *arg, boolean_t on) 1406 { 1407 bge_t *bgep = arg; 1408 1409 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1410 1411 /* 1412 * Store MAC layer specified mode and pass to chip layer to update h/w 1413 */ 1414 mutex_enter(bgep->genlock); 1415 if (!(bgep->progress & PROGRESS_INTR)) { 1416 /* can happen during autorecovery */ 1417 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1418 mutex_exit(bgep->genlock); 1419 return (EIO); 1420 } 1421 bgep->promisc = on; 1422 #ifdef BGE_IPMI_ASF 1423 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1424 #else 1425 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1426 #endif 1427 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1428 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1429 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1430 mutex_exit(bgep->genlock); 1431 return (EIO); 1432 } 1433 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1434 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1435 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1436 mutex_exit(bgep->genlock); 1437 return (EIO); 1438 } 1439 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1440 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1441 mutex_exit(bgep->genlock); 1442 return (EIO); 1443 } 1444 mutex_exit(bgep->genlock); 1445 return (0); 1446 } 1447 1448 /* 1449 * Find the slot for the specified unicast address 1450 */ 1451 int 1452 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1453 { 1454 int slot; 1455 1456 ASSERT(mutex_owned(bgep->genlock)); 1457 1458 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1459 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1460 return (slot); 1461 } 1462 1463 return (-1); 1464 } 1465 1466 /* 1467 * Programs the classifier to start steering packets matching 'mac_addr' to the 1468 * specified ring 'arg'. 1469 */ 1470 static int 1471 bge_addmac(void *arg, const uint8_t *mac_addr) 1472 { 1473 recv_ring_t *rrp = (recv_ring_t *)arg; 1474 bge_t *bgep = rrp->bgep; 1475 bge_recv_rule_t *rulep = bgep->recv_rules; 1476 bge_rule_info_t *rinfop = NULL; 1477 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1478 int i; 1479 uint16_t tmp16; 1480 uint32_t tmp32; 1481 int slot; 1482 int err; 1483 1484 mutex_enter(bgep->genlock); 1485 if (bgep->unicst_addr_avail == 0) { 1486 mutex_exit(bgep->genlock); 1487 return (ENOSPC); 1488 } 1489 1490 /* 1491 * First add the unicast address to a available slot. 1492 */ 1493 slot = bge_unicst_find(bgep, mac_addr); 1494 ASSERT(slot == -1); 1495 1496 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1497 if (!bgep->curr_addr[slot].set) { 1498 bgep->curr_addr[slot].set = B_TRUE; 1499 break; 1500 } 1501 } 1502 1503 ASSERT(slot < bgep->unicst_addr_total); 1504 bgep->unicst_addr_avail--; 1505 mutex_exit(bgep->genlock); 1506 1507 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1508 goto fail; 1509 1510 /* A rule is already here. Deny this. */ 1511 if (rrp->mac_addr_rule != NULL) { 1512 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1513 goto fail; 1514 } 1515 1516 /* 1517 * Allocate a bge_rule_info_t to keep track of which rule slots 1518 * are being used. 1519 */ 1520 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1521 if (rinfop == NULL) { 1522 err = ENOMEM; 1523 goto fail; 1524 } 1525 1526 /* 1527 * Look for the starting slot to place the rules. 1528 * The two slots we reserve must be contiguous. 1529 */ 1530 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1531 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1532 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1533 break; 1534 1535 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1536 1537 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1538 rulep[i].mask_value = ntohl(tmp32); 1539 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1540 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1541 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1542 1543 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1544 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1545 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1546 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1547 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1548 rinfop->start = i; 1549 rinfop->count = 2; 1550 1551 rrp->mac_addr_rule = rinfop; 1552 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1553 1554 return (0); 1555 1556 fail: 1557 /* Clear the address just set */ 1558 (void) bge_unicst_set(bgep, zero_addr, slot); 1559 mutex_enter(bgep->genlock); 1560 bgep->curr_addr[slot].set = B_FALSE; 1561 bgep->unicst_addr_avail++; 1562 mutex_exit(bgep->genlock); 1563 1564 return (err); 1565 } 1566 1567 /* 1568 * Stop classifying packets matching the MAC address to the specified ring. 1569 */ 1570 static int 1571 bge_remmac(void *arg, const uint8_t *mac_addr) 1572 { 1573 recv_ring_t *rrp = (recv_ring_t *)arg; 1574 bge_t *bgep = rrp->bgep; 1575 bge_recv_rule_t *rulep = bgep->recv_rules; 1576 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1577 int start; 1578 int slot; 1579 int err; 1580 1581 /* 1582 * Remove the MAC address from its slot. 1583 */ 1584 mutex_enter(bgep->genlock); 1585 slot = bge_unicst_find(bgep, mac_addr); 1586 if (slot == -1) { 1587 mutex_exit(bgep->genlock); 1588 return (EINVAL); 1589 } 1590 1591 ASSERT(bgep->curr_addr[slot].set); 1592 mutex_exit(bgep->genlock); 1593 1594 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1595 return (err); 1596 1597 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1598 return (EINVAL); 1599 1600 start = rinfop->start; 1601 rulep[start].mask_value = 0; 1602 rulep[start].control = 0; 1603 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1604 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1605 start++; 1606 rulep[start].mask_value = 0; 1607 rulep[start].control = 0; 1608 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1609 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1610 1611 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1612 rrp->mac_addr_rule = NULL; 1613 bzero(rrp->mac_addr_val, ETHERADDRL); 1614 1615 mutex_enter(bgep->genlock); 1616 bgep->curr_addr[slot].set = B_FALSE; 1617 bgep->unicst_addr_avail++; 1618 mutex_exit(bgep->genlock); 1619 1620 return (0); 1621 } 1622 1623 static int 1624 bge_flag_intr_enable(mac_intr_handle_t ih) 1625 { 1626 recv_ring_t *rrp = (recv_ring_t *)ih; 1627 bge_t *bgep = rrp->bgep; 1628 1629 mutex_enter(bgep->genlock); 1630 rrp->poll_flag = 0; 1631 mutex_exit(bgep->genlock); 1632 1633 return (0); 1634 } 1635 1636 static int 1637 bge_flag_intr_disable(mac_intr_handle_t ih) 1638 { 1639 recv_ring_t *rrp = (recv_ring_t *)ih; 1640 bge_t *bgep = rrp->bgep; 1641 1642 mutex_enter(bgep->genlock); 1643 rrp->poll_flag = 1; 1644 mutex_exit(bgep->genlock); 1645 1646 return (0); 1647 } 1648 1649 static int 1650 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1651 { 1652 recv_ring_t *rx_ring; 1653 1654 rx_ring = (recv_ring_t *)rh; 1655 mutex_enter(rx_ring->rx_lock); 1656 rx_ring->ring_gen_num = mr_gen_num; 1657 mutex_exit(rx_ring->rx_lock); 1658 return (0); 1659 } 1660 1661 1662 /* 1663 * Callback funtion for MAC layer to register all rings 1664 * for given ring_group, noted by rg_index. 1665 */ 1666 void 1667 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1668 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1669 { 1670 bge_t *bgep = arg; 1671 mac_intr_t *mintr; 1672 1673 switch (rtype) { 1674 case MAC_RING_TYPE_RX: { 1675 recv_ring_t *rx_ring; 1676 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1677 MAC_ADDRESS_REGS_MAX) && index == 0); 1678 1679 rx_ring = &bgep->recv[rg_index]; 1680 rx_ring->ring_handle = rh; 1681 1682 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1683 infop->mri_start = bge_ring_start; 1684 infop->mri_stop = NULL; 1685 infop->mri_poll = bge_poll_ring; 1686 1687 mintr = &infop->mri_intr; 1688 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1689 mintr->mi_enable = bge_flag_intr_enable; 1690 mintr->mi_disable = bge_flag_intr_disable; 1691 1692 break; 1693 } 1694 case MAC_RING_TYPE_TX: 1695 default: 1696 ASSERT(0); 1697 break; 1698 } 1699 } 1700 1701 /* 1702 * Fill infop passed as argument 1703 * fill in respective ring_group info 1704 * Each group has a single ring in it. We keep it simple 1705 * and use the same internal handle for rings and groups. 1706 */ 1707 void 1708 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1709 mac_group_info_t *infop, mac_group_handle_t gh) 1710 { 1711 bge_t *bgep = arg; 1712 1713 switch (rtype) { 1714 case MAC_RING_TYPE_RX: { 1715 recv_ring_t *rx_ring; 1716 1717 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1718 MAC_ADDRESS_REGS_MAX)); 1719 rx_ring = &bgep->recv[rg_index]; 1720 rx_ring->ring_group_handle = gh; 1721 1722 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1723 infop->mgi_start = NULL; 1724 infop->mgi_stop = NULL; 1725 infop->mgi_addmac = bge_addmac; 1726 infop->mgi_remmac = bge_remmac; 1727 infop->mgi_count = 1; 1728 break; 1729 } 1730 case MAC_RING_TYPE_TX: 1731 default: 1732 ASSERT(0); 1733 break; 1734 } 1735 } 1736 1737 /*ARGSUSED*/ 1738 static boolean_t 1739 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1740 { 1741 bge_t *bgep = arg; 1742 1743 switch (cap) { 1744 case MAC_CAPAB_HCKSUM: { 1745 uint32_t *txflags = cap_data; 1746 1747 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1748 break; 1749 } 1750 case MAC_CAPAB_RINGS: { 1751 mac_capab_rings_t *cap_rings = cap_data; 1752 1753 /* Temporarily disable multiple tx rings. */ 1754 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1755 return (B_FALSE); 1756 1757 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1758 cap_rings->mr_rnum = cap_rings->mr_gnum = 1759 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1760 cap_rings->mr_rget = bge_fill_ring; 1761 cap_rings->mr_gget = bge_fill_group; 1762 break; 1763 } 1764 default: 1765 return (B_FALSE); 1766 } 1767 return (B_TRUE); 1768 } 1769 1770 /* 1771 * Loopback ioctl code 1772 */ 1773 1774 static lb_property_t loopmodes[] = { 1775 { normal, "normal", BGE_LOOP_NONE }, 1776 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1777 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1778 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1779 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1780 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1781 }; 1782 1783 static enum ioc_reply 1784 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1785 { 1786 /* 1787 * If the mode isn't being changed, there's nothing to do ... 1788 */ 1789 if (mode == bgep->param_loop_mode) 1790 return (IOC_ACK); 1791 1792 /* 1793 * Validate the requested mode and prepare a suitable message 1794 * to explain the link down/up cycle that the change will 1795 * probably induce ... 1796 */ 1797 switch (mode) { 1798 default: 1799 return (IOC_INVAL); 1800 1801 case BGE_LOOP_NONE: 1802 case BGE_LOOP_EXTERNAL_1000: 1803 case BGE_LOOP_EXTERNAL_100: 1804 case BGE_LOOP_EXTERNAL_10: 1805 case BGE_LOOP_INTERNAL_PHY: 1806 case BGE_LOOP_INTERNAL_MAC: 1807 break; 1808 } 1809 1810 /* 1811 * All OK; tell the caller to reprogram 1812 * the PHY and/or MAC for the new mode ... 1813 */ 1814 bgep->param_loop_mode = mode; 1815 return (IOC_RESTART_ACK); 1816 } 1817 1818 static enum ioc_reply 1819 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1820 { 1821 lb_info_sz_t *lbsp; 1822 lb_property_t *lbpp; 1823 uint32_t *lbmp; 1824 int cmd; 1825 1826 _NOTE(ARGUNUSED(wq)) 1827 1828 /* 1829 * Validate format of ioctl 1830 */ 1831 if (mp->b_cont == NULL) 1832 return (IOC_INVAL); 1833 1834 cmd = iocp->ioc_cmd; 1835 switch (cmd) { 1836 default: 1837 /* NOTREACHED */ 1838 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1839 return (IOC_INVAL); 1840 1841 case LB_GET_INFO_SIZE: 1842 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1843 return (IOC_INVAL); 1844 lbsp = (void *)mp->b_cont->b_rptr; 1845 *lbsp = sizeof (loopmodes); 1846 return (IOC_REPLY); 1847 1848 case LB_GET_INFO: 1849 if (iocp->ioc_count != sizeof (loopmodes)) 1850 return (IOC_INVAL); 1851 lbpp = (void *)mp->b_cont->b_rptr; 1852 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1853 return (IOC_REPLY); 1854 1855 case LB_GET_MODE: 1856 if (iocp->ioc_count != sizeof (uint32_t)) 1857 return (IOC_INVAL); 1858 lbmp = (void *)mp->b_cont->b_rptr; 1859 *lbmp = bgep->param_loop_mode; 1860 return (IOC_REPLY); 1861 1862 case LB_SET_MODE: 1863 if (iocp->ioc_count != sizeof (uint32_t)) 1864 return (IOC_INVAL); 1865 lbmp = (void *)mp->b_cont->b_rptr; 1866 return (bge_set_loop_mode(bgep, *lbmp)); 1867 } 1868 } 1869 1870 /* 1871 * Specific bge IOCTLs, the gld module handles the generic ones. 1872 */ 1873 static void 1874 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1875 { 1876 bge_t *bgep = arg; 1877 struct iocblk *iocp; 1878 enum ioc_reply status; 1879 boolean_t need_privilege; 1880 int err; 1881 int cmd; 1882 1883 /* 1884 * Validate the command before bothering with the mutex ... 1885 */ 1886 iocp = (void *)mp->b_rptr; 1887 iocp->ioc_error = 0; 1888 need_privilege = B_TRUE; 1889 cmd = iocp->ioc_cmd; 1890 switch (cmd) { 1891 default: 1892 miocnak(wq, mp, 0, EINVAL); 1893 return; 1894 1895 case BGE_MII_READ: 1896 case BGE_MII_WRITE: 1897 case BGE_SEE_READ: 1898 case BGE_SEE_WRITE: 1899 case BGE_FLASH_READ: 1900 case BGE_FLASH_WRITE: 1901 case BGE_DIAG: 1902 case BGE_PEEK: 1903 case BGE_POKE: 1904 case BGE_PHY_RESET: 1905 case BGE_SOFT_RESET: 1906 case BGE_HARD_RESET: 1907 break; 1908 1909 case LB_GET_INFO_SIZE: 1910 case LB_GET_INFO: 1911 case LB_GET_MODE: 1912 need_privilege = B_FALSE; 1913 /* FALLTHRU */ 1914 case LB_SET_MODE: 1915 break; 1916 1917 } 1918 1919 if (need_privilege) { 1920 /* 1921 * Check for specific net_config privilege on Solaris 10+. 1922 */ 1923 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1924 if (err != 0) { 1925 miocnak(wq, mp, 0, err); 1926 return; 1927 } 1928 } 1929 1930 mutex_enter(bgep->genlock); 1931 if (!(bgep->progress & PROGRESS_INTR)) { 1932 /* can happen during autorecovery */ 1933 mutex_exit(bgep->genlock); 1934 miocnak(wq, mp, 0, EIO); 1935 return; 1936 } 1937 1938 switch (cmd) { 1939 default: 1940 _NOTE(NOTREACHED) 1941 status = IOC_INVAL; 1942 break; 1943 1944 case BGE_MII_READ: 1945 case BGE_MII_WRITE: 1946 case BGE_SEE_READ: 1947 case BGE_SEE_WRITE: 1948 case BGE_FLASH_READ: 1949 case BGE_FLASH_WRITE: 1950 case BGE_DIAG: 1951 case BGE_PEEK: 1952 case BGE_POKE: 1953 case BGE_PHY_RESET: 1954 case BGE_SOFT_RESET: 1955 case BGE_HARD_RESET: 1956 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1957 break; 1958 1959 case LB_GET_INFO_SIZE: 1960 case LB_GET_INFO: 1961 case LB_GET_MODE: 1962 case LB_SET_MODE: 1963 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1964 break; 1965 1966 } 1967 1968 /* 1969 * Do we need to reprogram the PHY and/or the MAC? 1970 * Do it now, while we still have the mutex. 1971 * 1972 * Note: update the PHY first, 'cos it controls the 1973 * speed/duplex parameters that the MAC code uses. 1974 */ 1975 switch (status) { 1976 case IOC_RESTART_REPLY: 1977 case IOC_RESTART_ACK: 1978 if (bge_reprogram(bgep) == IOC_INVAL) 1979 status = IOC_INVAL; 1980 break; 1981 } 1982 1983 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1984 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1985 status = IOC_INVAL; 1986 } 1987 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1988 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1989 status = IOC_INVAL; 1990 } 1991 mutex_exit(bgep->genlock); 1992 1993 /* 1994 * Finally, decide how to reply 1995 */ 1996 switch (status) { 1997 default: 1998 case IOC_INVAL: 1999 /* 2000 * Error, reply with a NAK and EINVAL or the specified error 2001 */ 2002 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 2003 EINVAL : iocp->ioc_error); 2004 break; 2005 2006 case IOC_DONE: 2007 /* 2008 * OK, reply already sent 2009 */ 2010 break; 2011 2012 case IOC_RESTART_ACK: 2013 case IOC_ACK: 2014 /* 2015 * OK, reply with an ACK 2016 */ 2017 miocack(wq, mp, 0, 0); 2018 break; 2019 2020 case IOC_RESTART_REPLY: 2021 case IOC_REPLY: 2022 /* 2023 * OK, send prepared reply as ACK or NAK 2024 */ 2025 mp->b_datap->db_type = iocp->ioc_error == 0 ? 2026 M_IOCACK : M_IOCNAK; 2027 qreply(wq, mp); 2028 break; 2029 } 2030 } 2031 2032 /* 2033 * ========== Per-instance setup/teardown code ========== 2034 */ 2035 2036 #undef BGE_DBG 2037 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 2038 /* 2039 * Allocate an area of memory and a DMA handle for accessing it 2040 */ 2041 static int 2042 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2043 uint_t dma_flags, dma_area_t *dma_p) 2044 { 2045 caddr_t va; 2046 int err; 2047 2048 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2049 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2050 2051 /* 2052 * Allocate handle 2053 */ 2054 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2055 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2056 if (err != DDI_SUCCESS) 2057 return (DDI_FAILURE); 2058 2059 /* 2060 * Allocate memory 2061 */ 2062 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2063 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2064 &dma_p->acc_hdl); 2065 if (err != DDI_SUCCESS) 2066 return (DDI_FAILURE); 2067 2068 /* 2069 * Bind the two together 2070 */ 2071 dma_p->mem_va = va; 2072 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2073 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2074 &dma_p->cookie, &dma_p->ncookies); 2075 2076 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2077 dma_p->alength, err, dma_p->ncookies)); 2078 2079 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2080 return (DDI_FAILURE); 2081 2082 dma_p->nslots = ~0U; 2083 dma_p->size = ~0U; 2084 dma_p->token = ~0U; 2085 dma_p->offset = 0; 2086 return (DDI_SUCCESS); 2087 } 2088 2089 /* 2090 * Free one allocated area of DMAable memory 2091 */ 2092 static void 2093 bge_free_dma_mem(dma_area_t *dma_p) 2094 { 2095 if (dma_p->dma_hdl != NULL) { 2096 if (dma_p->ncookies) { 2097 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2098 dma_p->ncookies = 0; 2099 } 2100 ddi_dma_free_handle(&dma_p->dma_hdl); 2101 dma_p->dma_hdl = NULL; 2102 } 2103 2104 if (dma_p->acc_hdl != NULL) { 2105 ddi_dma_mem_free(&dma_p->acc_hdl); 2106 dma_p->acc_hdl = NULL; 2107 } 2108 } 2109 /* 2110 * Utility routine to carve a slice off a chunk of allocated memory, 2111 * updating the chunk descriptor accordingly. The size of the slice 2112 * is given by the product of the <qty> and <size> parameters. 2113 */ 2114 static void 2115 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2116 uint32_t qty, uint32_t size) 2117 { 2118 static uint32_t sequence = 0xbcd5704a; 2119 size_t totsize; 2120 2121 totsize = qty*size; 2122 ASSERT(totsize <= chunk->alength); 2123 2124 *slice = *chunk; 2125 slice->nslots = qty; 2126 slice->size = size; 2127 slice->alength = totsize; 2128 slice->token = ++sequence; 2129 2130 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2131 chunk->alength -= totsize; 2132 chunk->offset += totsize; 2133 chunk->cookie.dmac_laddress += totsize; 2134 chunk->cookie.dmac_size -= totsize; 2135 } 2136 2137 /* 2138 * Initialise the specified Receive Producer (Buffer) Ring, using 2139 * the information in the <dma_area> descriptors that it contains 2140 * to set up all the other fields. This routine should be called 2141 * only once for each ring. 2142 */ 2143 static void 2144 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2145 { 2146 buff_ring_t *brp; 2147 bge_status_t *bsp; 2148 sw_rbd_t *srbdp; 2149 dma_area_t pbuf; 2150 uint32_t bufsize; 2151 uint32_t nslots; 2152 uint32_t slot; 2153 uint32_t split; 2154 2155 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2156 NIC_MEM_SHADOW_BUFF_STD, 2157 NIC_MEM_SHADOW_BUFF_JUMBO, 2158 NIC_MEM_SHADOW_BUFF_MINI 2159 }; 2160 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2161 RECV_STD_PROD_INDEX_REG, 2162 RECV_JUMBO_PROD_INDEX_REG, 2163 RECV_MINI_PROD_INDEX_REG 2164 }; 2165 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2166 STATUS_STD_BUFF_CONS_INDEX, 2167 STATUS_JUMBO_BUFF_CONS_INDEX, 2168 STATUS_MINI_BUFF_CONS_INDEX 2169 }; 2170 2171 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2172 (void *)bgep, ring)); 2173 2174 brp = &bgep->buff[ring]; 2175 nslots = brp->desc.nslots; 2176 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2177 bufsize = brp->buf[0].size; 2178 2179 /* 2180 * Set up the copy of the h/w RCB 2181 * 2182 * Note: unlike Send & Receive Return Rings, (where the max_len 2183 * field holds the number of slots), in a Receive Buffer Ring 2184 * this field indicates the size of each buffer in the ring. 2185 */ 2186 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2187 brp->hw_rcb.max_len = (uint16_t)bufsize; 2188 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2189 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2190 2191 /* 2192 * Other one-off initialisation of per-ring data 2193 */ 2194 brp->bgep = bgep; 2195 bsp = DMA_VPTR(bgep->status_block); 2196 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2197 brp->chip_mbx_reg = mailbox_regs[ring]; 2198 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2199 DDI_INTR_PRI(bgep->intr_pri)); 2200 2201 /* 2202 * Allocate the array of s/w Receive Buffer Descriptors 2203 */ 2204 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2205 brp->sw_rbds = srbdp; 2206 2207 /* 2208 * Now initialise each array element once and for all 2209 */ 2210 for (split = 0; split < BGE_SPLIT; ++split) { 2211 pbuf = brp->buf[split]; 2212 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2213 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2214 ASSERT(pbuf.alength == 0); 2215 } 2216 } 2217 2218 /* 2219 * Clean up initialisation done above before the memory is freed 2220 */ 2221 static void 2222 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2223 { 2224 buff_ring_t *brp; 2225 sw_rbd_t *srbdp; 2226 2227 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2228 (void *)bgep, ring)); 2229 2230 brp = &bgep->buff[ring]; 2231 srbdp = brp->sw_rbds; 2232 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2233 2234 mutex_destroy(brp->rf_lock); 2235 } 2236 2237 /* 2238 * Initialise the specified Receive (Return) Ring, using the 2239 * information in the <dma_area> descriptors that it contains 2240 * to set up all the other fields. This routine should be called 2241 * only once for each ring. 2242 */ 2243 static void 2244 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2245 { 2246 recv_ring_t *rrp; 2247 bge_status_t *bsp; 2248 uint32_t nslots; 2249 2250 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2251 (void *)bgep, ring)); 2252 2253 /* 2254 * The chip architecture requires that receive return rings have 2255 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2256 */ 2257 rrp = &bgep->recv[ring]; 2258 nslots = rrp->desc.nslots; 2259 ASSERT(nslots == 0 || nslots == 512 || 2260 nslots == 1024 || nslots == 2048); 2261 2262 /* 2263 * Set up the copy of the h/w RCB 2264 */ 2265 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2266 rrp->hw_rcb.max_len = (uint16_t)nslots; 2267 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2268 rrp->hw_rcb.nic_ring_addr = 0; 2269 2270 /* 2271 * Other one-off initialisation of per-ring data 2272 */ 2273 rrp->bgep = bgep; 2274 bsp = DMA_VPTR(bgep->status_block); 2275 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2276 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2277 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2278 DDI_INTR_PRI(bgep->intr_pri)); 2279 } 2280 2281 2282 /* 2283 * Clean up initialisation done above before the memory is freed 2284 */ 2285 static void 2286 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2287 { 2288 recv_ring_t *rrp; 2289 2290 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2291 (void *)bgep, ring)); 2292 2293 rrp = &bgep->recv[ring]; 2294 if (rrp->rx_softint) 2295 ddi_remove_softintr(rrp->rx_softint); 2296 mutex_destroy(rrp->rx_lock); 2297 } 2298 2299 /* 2300 * Initialise the specified Send Ring, using the information in the 2301 * <dma_area> descriptors that it contains to set up all the other 2302 * fields. This routine should be called only once for each ring. 2303 */ 2304 static void 2305 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2306 { 2307 send_ring_t *srp; 2308 bge_status_t *bsp; 2309 sw_sbd_t *ssbdp; 2310 dma_area_t desc; 2311 dma_area_t pbuf; 2312 uint32_t nslots; 2313 uint32_t slot; 2314 uint32_t split; 2315 sw_txbuf_t *txbuf; 2316 2317 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2318 (void *)bgep, ring)); 2319 2320 /* 2321 * The chip architecture requires that host-based send rings 2322 * have 512 elements per ring. See 570X-PG102-R page 56. 2323 */ 2324 srp = &bgep->send[ring]; 2325 nslots = srp->desc.nslots; 2326 ASSERT(nslots == 0 || nslots == 512); 2327 2328 /* 2329 * Set up the copy of the h/w RCB 2330 */ 2331 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2332 srp->hw_rcb.max_len = (uint16_t)nslots; 2333 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2334 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2335 2336 /* 2337 * Other one-off initialisation of per-ring data 2338 */ 2339 srp->bgep = bgep; 2340 bsp = DMA_VPTR(bgep->status_block); 2341 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2342 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2343 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2344 DDI_INTR_PRI(bgep->intr_pri)); 2345 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2346 DDI_INTR_PRI(bgep->intr_pri)); 2347 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2348 DDI_INTR_PRI(bgep->intr_pri)); 2349 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2350 DDI_INTR_PRI(bgep->intr_pri)); 2351 if (nslots == 0) 2352 return; 2353 2354 /* 2355 * Allocate the array of s/w Send Buffer Descriptors 2356 */ 2357 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2358 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2359 srp->txbuf_head = 2360 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2361 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2362 srp->sw_sbds = ssbdp; 2363 srp->txbuf = txbuf; 2364 srp->tx_buffers = BGE_SEND_BUF_NUM; 2365 srp->tx_buffers_low = srp->tx_buffers / 4; 2366 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2367 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2368 else 2369 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2370 srp->tx_array = 1; 2371 2372 /* 2373 * Chunk tx desc area 2374 */ 2375 desc = srp->desc; 2376 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2377 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2378 sizeof (bge_sbd_t)); 2379 } 2380 ASSERT(desc.alength == 0); 2381 2382 /* 2383 * Chunk tx buffer area 2384 */ 2385 for (split = 0; split < BGE_SPLIT; ++split) { 2386 pbuf = srp->buf[0][split]; 2387 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2388 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2389 bgep->chipid.snd_buff_size); 2390 txbuf++; 2391 } 2392 ASSERT(pbuf.alength == 0); 2393 } 2394 } 2395 2396 /* 2397 * Clean up initialisation done above before the memory is freed 2398 */ 2399 static void 2400 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2401 { 2402 send_ring_t *srp; 2403 uint32_t array; 2404 uint32_t split; 2405 uint32_t nslots; 2406 2407 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2408 (void *)bgep, ring)); 2409 2410 srp = &bgep->send[ring]; 2411 mutex_destroy(srp->tc_lock); 2412 mutex_destroy(srp->freetxbuf_lock); 2413 mutex_destroy(srp->txbuf_lock); 2414 mutex_destroy(srp->tx_lock); 2415 nslots = srp->desc.nslots; 2416 if (nslots == 0) 2417 return; 2418 2419 for (array = 1; array < srp->tx_array; ++array) 2420 for (split = 0; split < BGE_SPLIT; ++split) 2421 bge_free_dma_mem(&srp->buf[array][split]); 2422 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2423 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2424 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2425 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2426 srp->sw_sbds = NULL; 2427 srp->txbuf_head = NULL; 2428 srp->txbuf = NULL; 2429 srp->pktp = NULL; 2430 } 2431 2432 /* 2433 * Initialise all transmit, receive, and buffer rings. 2434 */ 2435 void 2436 bge_init_rings(bge_t *bgep) 2437 { 2438 uint32_t ring; 2439 2440 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2441 2442 /* 2443 * Perform one-off initialisation of each ring ... 2444 */ 2445 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2446 bge_init_send_ring(bgep, ring); 2447 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2448 bge_init_recv_ring(bgep, ring); 2449 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2450 bge_init_buff_ring(bgep, ring); 2451 } 2452 2453 /* 2454 * Undo the work of bge_init_rings() above before the memory is freed 2455 */ 2456 void 2457 bge_fini_rings(bge_t *bgep) 2458 { 2459 uint32_t ring; 2460 2461 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2462 2463 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2464 bge_fini_buff_ring(bgep, ring); 2465 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2466 bge_fini_recv_ring(bgep, ring); 2467 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2468 bge_fini_send_ring(bgep, ring); 2469 } 2470 2471 /* 2472 * Called from the bge_m_stop() to free the tx buffers which are 2473 * allocated from the tx process. 2474 */ 2475 void 2476 bge_free_txbuf_arrays(send_ring_t *srp) 2477 { 2478 uint32_t array; 2479 uint32_t split; 2480 2481 ASSERT(mutex_owned(srp->tx_lock)); 2482 2483 /* 2484 * Free the extra tx buffer DMA area 2485 */ 2486 for (array = 1; array < srp->tx_array; ++array) 2487 for (split = 0; split < BGE_SPLIT; ++split) 2488 bge_free_dma_mem(&srp->buf[array][split]); 2489 2490 /* 2491 * Restore initial tx buffer numbers 2492 */ 2493 srp->tx_array = 1; 2494 srp->tx_buffers = BGE_SEND_BUF_NUM; 2495 srp->tx_buffers_low = srp->tx_buffers / 4; 2496 srp->tx_flow = 0; 2497 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2498 } 2499 2500 /* 2501 * Called from tx process to allocate more tx buffers 2502 */ 2503 bge_queue_item_t * 2504 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2505 { 2506 bge_queue_t *txbuf_queue; 2507 bge_queue_item_t *txbuf_item_last; 2508 bge_queue_item_t *txbuf_item; 2509 bge_queue_item_t *txbuf_item_rtn; 2510 sw_txbuf_t *txbuf; 2511 dma_area_t area; 2512 size_t txbuffsize; 2513 uint32_t slot; 2514 uint32_t array; 2515 uint32_t split; 2516 uint32_t err; 2517 2518 ASSERT(mutex_owned(srp->tx_lock)); 2519 2520 array = srp->tx_array; 2521 if (array >= srp->tx_array_max) 2522 return (NULL); 2523 2524 /* 2525 * Allocate memory & handles for TX buffers 2526 */ 2527 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2528 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2529 for (split = 0; split < BGE_SPLIT; ++split) { 2530 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2531 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2532 &srp->buf[array][split]); 2533 if (err != DDI_SUCCESS) { 2534 /* Free the last already allocated OK chunks */ 2535 for (slot = 0; slot <= split; ++slot) 2536 bge_free_dma_mem(&srp->buf[array][slot]); 2537 srp->tx_alloc_fail++; 2538 return (NULL); 2539 } 2540 } 2541 2542 /* 2543 * Chunk tx buffer area 2544 */ 2545 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2546 for (split = 0; split < BGE_SPLIT; ++split) { 2547 area = srp->buf[array][split]; 2548 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2549 bge_slice_chunk(&txbuf->buf, &area, 1, 2550 bgep->chipid.snd_buff_size); 2551 txbuf++; 2552 } 2553 } 2554 2555 /* 2556 * Add above buffers to the tx buffer pop queue 2557 */ 2558 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2559 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2560 txbuf_item_last = NULL; 2561 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2562 txbuf_item->item = txbuf; 2563 txbuf_item->next = txbuf_item_last; 2564 txbuf_item_last = txbuf_item; 2565 txbuf++; 2566 txbuf_item++; 2567 } 2568 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2569 txbuf_item_rtn = txbuf_item; 2570 txbuf_item++; 2571 txbuf_queue = srp->txbuf_pop_queue; 2572 mutex_enter(txbuf_queue->lock); 2573 txbuf_item->next = txbuf_queue->head; 2574 txbuf_queue->head = txbuf_item_last; 2575 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2576 mutex_exit(txbuf_queue->lock); 2577 2578 srp->tx_array++; 2579 srp->tx_buffers += BGE_SEND_BUF_NUM; 2580 srp->tx_buffers_low = srp->tx_buffers / 4; 2581 2582 return (txbuf_item_rtn); 2583 } 2584 2585 /* 2586 * This function allocates all the transmit and receive buffers 2587 * and descriptors, in four chunks. 2588 */ 2589 int 2590 bge_alloc_bufs(bge_t *bgep) 2591 { 2592 dma_area_t area; 2593 size_t rxbuffsize; 2594 size_t txbuffsize; 2595 size_t rxbuffdescsize; 2596 size_t rxdescsize; 2597 size_t txdescsize; 2598 uint32_t ring; 2599 uint32_t rx_rings = bgep->chipid.rx_rings; 2600 uint32_t tx_rings = bgep->chipid.tx_rings; 2601 int split; 2602 int err; 2603 2604 BGE_TRACE(("bge_alloc_bufs($%p)", 2605 (void *)bgep)); 2606 2607 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2608 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2609 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2610 2611 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2612 txbuffsize *= tx_rings; 2613 2614 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2615 rxdescsize *= sizeof (bge_rbd_t); 2616 2617 rxbuffdescsize = BGE_STD_SLOTS_USED; 2618 rxbuffdescsize += bgep->chipid.jumbo_slots; 2619 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2620 rxbuffdescsize *= sizeof (bge_rbd_t); 2621 2622 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2623 txdescsize *= sizeof (bge_sbd_t); 2624 txdescsize += sizeof (bge_statistics_t); 2625 txdescsize += sizeof (bge_status_t); 2626 txdescsize += BGE_STATUS_PADDING; 2627 2628 /* 2629 * Enable PCI relaxed ordering only for RX/TX data buffers 2630 */ 2631 if (bge_relaxed_ordering) 2632 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2633 2634 /* 2635 * Allocate memory & handles for RX buffers 2636 */ 2637 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2638 for (split = 0; split < BGE_SPLIT; ++split) { 2639 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2640 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2641 &bgep->rx_buff[split]); 2642 if (err != DDI_SUCCESS) 2643 return (DDI_FAILURE); 2644 } 2645 2646 /* 2647 * Allocate memory & handles for TX buffers 2648 */ 2649 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2650 for (split = 0; split < BGE_SPLIT; ++split) { 2651 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2652 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2653 &bgep->tx_buff[split]); 2654 if (err != DDI_SUCCESS) 2655 return (DDI_FAILURE); 2656 } 2657 2658 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2659 2660 /* 2661 * Allocate memory & handles for receive return rings 2662 */ 2663 ASSERT((rxdescsize % rx_rings) == 0); 2664 for (split = 0; split < rx_rings; ++split) { 2665 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2666 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2667 &bgep->rx_desc[split]); 2668 if (err != DDI_SUCCESS) 2669 return (DDI_FAILURE); 2670 } 2671 2672 /* 2673 * Allocate memory & handles for buffer (producer) descriptor rings 2674 */ 2675 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2676 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2677 if (err != DDI_SUCCESS) 2678 return (DDI_FAILURE); 2679 2680 /* 2681 * Allocate memory & handles for TX descriptor rings, 2682 * status block, and statistics area 2683 */ 2684 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2685 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2686 if (err != DDI_SUCCESS) 2687 return (DDI_FAILURE); 2688 2689 /* 2690 * Now carve up each of the allocated areas ... 2691 */ 2692 for (split = 0; split < BGE_SPLIT; ++split) { 2693 area = bgep->rx_buff[split]; 2694 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2695 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2696 bgep->chipid.std_buf_size); 2697 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2698 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2699 bgep->chipid.recv_jumbo_size); 2700 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2701 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2702 BGE_MINI_BUFF_SIZE); 2703 } 2704 2705 for (split = 0; split < BGE_SPLIT; ++split) { 2706 area = bgep->tx_buff[split]; 2707 for (ring = 0; ring < tx_rings; ++ring) 2708 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2709 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2710 bgep->chipid.snd_buff_size); 2711 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2712 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2713 &area, 0, bgep->chipid.snd_buff_size); 2714 } 2715 2716 for (ring = 0; ring < rx_rings; ++ring) 2717 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2718 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2719 2720 area = bgep->rx_desc[rx_rings]; 2721 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2722 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2723 0, sizeof (bge_rbd_t)); 2724 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2725 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2726 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2727 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2728 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2729 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2730 ASSERT(area.alength == 0); 2731 2732 area = bgep->tx_desc; 2733 for (ring = 0; ring < tx_rings; ++ring) 2734 bge_slice_chunk(&bgep->send[ring].desc, &area, 2735 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2736 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2737 bge_slice_chunk(&bgep->send[ring].desc, &area, 2738 0, sizeof (bge_sbd_t)); 2739 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2740 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2741 ASSERT(area.alength == BGE_STATUS_PADDING); 2742 DMA_ZERO(bgep->status_block); 2743 2744 return (DDI_SUCCESS); 2745 } 2746 2747 /* 2748 * This routine frees the transmit and receive buffers and descriptors. 2749 * Make sure the chip is stopped before calling it! 2750 */ 2751 void 2752 bge_free_bufs(bge_t *bgep) 2753 { 2754 int split; 2755 2756 BGE_TRACE(("bge_free_bufs($%p)", 2757 (void *)bgep)); 2758 2759 bge_free_dma_mem(&bgep->tx_desc); 2760 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2761 bge_free_dma_mem(&bgep->rx_desc[split]); 2762 for (split = 0; split < BGE_SPLIT; ++split) 2763 bge_free_dma_mem(&bgep->tx_buff[split]); 2764 for (split = 0; split < BGE_SPLIT; ++split) 2765 bge_free_dma_mem(&bgep->rx_buff[split]); 2766 } 2767 2768 /* 2769 * Determine (initial) MAC address ("BIA") to use for this interface 2770 */ 2771 2772 static void 2773 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2774 { 2775 struct ether_addr sysaddr; 2776 char propbuf[8]; /* "true" or "false", plus NUL */ 2777 uchar_t *bytes; 2778 int *ints; 2779 uint_t nelts; 2780 int err; 2781 2782 BGE_TRACE(("bge_find_mac_address($%p)", 2783 (void *)bgep)); 2784 2785 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2786 cidp->hw_mac_addr, 2787 ether_sprintf((void *)cidp->vendor_addr.addr), 2788 cidp->vendor_addr.set ? "" : "not ")); 2789 2790 /* 2791 * The "vendor's factory-set address" may already have 2792 * been extracted from the chip, but if the property 2793 * "local-mac-address" is set we use that instead. It 2794 * will normally be set by OBP, but it could also be 2795 * specified in a .conf file(!) 2796 * 2797 * There doesn't seem to be a way to define byte-array 2798 * properties in a .conf, so we check whether it looks 2799 * like an array of 6 ints instead. 2800 * 2801 * Then, we check whether it looks like an array of 6 2802 * bytes (which it should, if OBP set it). If we can't 2803 * make sense of it either way, we'll ignore it. 2804 */ 2805 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2806 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2807 if (err == DDI_PROP_SUCCESS) { 2808 if (nelts == ETHERADDRL) { 2809 while (nelts--) 2810 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2811 cidp->vendor_addr.set = B_TRUE; 2812 } 2813 ddi_prop_free(ints); 2814 } 2815 2816 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2817 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2818 if (err == DDI_PROP_SUCCESS) { 2819 if (nelts == ETHERADDRL) { 2820 while (nelts--) 2821 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2822 cidp->vendor_addr.set = B_TRUE; 2823 } 2824 ddi_prop_free(bytes); 2825 } 2826 2827 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2828 ether_sprintf((void *)cidp->vendor_addr.addr), 2829 cidp->vendor_addr.set ? "" : "not ")); 2830 2831 /* 2832 * Look up the OBP property "local-mac-address?". Note that even 2833 * though its value is a string (which should be "true" or "false"), 2834 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2835 * the buffer first and then fetch the property as an untyped array; 2836 * this may or may not include a final NUL, but since there will 2837 * always be one left at the end of the buffer we can now treat it 2838 * as a string anyway. 2839 */ 2840 nelts = sizeof (propbuf); 2841 bzero(propbuf, nelts--); 2842 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2843 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2844 2845 /* 2846 * Now, if the address still isn't set from the hardware (SEEPROM) 2847 * or the OBP or .conf property, OR if the user has foolishly set 2848 * 'local-mac-address? = false', use "the system address" instead 2849 * (but only if it's non-null i.e. has been set from the IDPROM). 2850 */ 2851 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2852 if (localetheraddr(NULL, &sysaddr) != 0) { 2853 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2854 cidp->vendor_addr.set = B_TRUE; 2855 } 2856 2857 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2858 ether_sprintf((void *)cidp->vendor_addr.addr), 2859 cidp->vendor_addr.set ? "" : "not ")); 2860 2861 /* 2862 * Finally(!), if there's a valid "mac-address" property (created 2863 * if we netbooted from this interface), we must use this instead 2864 * of any of the above to ensure that the NFS/install server doesn't 2865 * get confused by the address changing as Solaris takes over! 2866 */ 2867 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2868 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2869 if (err == DDI_PROP_SUCCESS) { 2870 if (nelts == ETHERADDRL) { 2871 while (nelts--) 2872 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2873 cidp->vendor_addr.set = B_TRUE; 2874 } 2875 ddi_prop_free(bytes); 2876 } 2877 2878 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2879 ether_sprintf((void *)cidp->vendor_addr.addr), 2880 cidp->vendor_addr.set ? "" : "not ")); 2881 } 2882 2883 2884 /*ARGSUSED*/ 2885 int 2886 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2887 { 2888 ddi_fm_error_t de; 2889 2890 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2891 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2892 return (de.fme_status); 2893 } 2894 2895 /*ARGSUSED*/ 2896 int 2897 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2898 { 2899 ddi_fm_error_t de; 2900 2901 ASSERT(bgep->progress & PROGRESS_BUFS); 2902 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2903 return (de.fme_status); 2904 } 2905 2906 /* 2907 * The IO fault service error handling callback function 2908 */ 2909 /*ARGSUSED*/ 2910 static int 2911 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2912 { 2913 /* 2914 * as the driver can always deal with an error in any dma or 2915 * access handle, we can just return the fme_status value. 2916 */ 2917 pci_ereport_post(dip, err, NULL); 2918 return (err->fme_status); 2919 } 2920 2921 static void 2922 bge_fm_init(bge_t *bgep) 2923 { 2924 ddi_iblock_cookie_t iblk; 2925 2926 /* Only register with IO Fault Services if we have some capability */ 2927 if (bgep->fm_capabilities) { 2928 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2929 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2930 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2931 2932 /* Register capabilities with IO Fault Services */ 2933 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2934 2935 /* 2936 * Initialize pci ereport capabilities if ereport capable 2937 */ 2938 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2939 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2940 pci_ereport_setup(bgep->devinfo); 2941 2942 /* 2943 * Register error callback if error callback capable 2944 */ 2945 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2946 ddi_fm_handler_register(bgep->devinfo, 2947 bge_fm_error_cb, (void*) bgep); 2948 } else { 2949 /* 2950 * These fields have to be cleared of FMA if there are no 2951 * FMA capabilities at runtime. 2952 */ 2953 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2954 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2955 dma_attr.dma_attr_flags = 0; 2956 } 2957 } 2958 2959 static void 2960 bge_fm_fini(bge_t *bgep) 2961 { 2962 /* Only unregister FMA capabilities if we registered some */ 2963 if (bgep->fm_capabilities) { 2964 2965 /* 2966 * Release any resources allocated by pci_ereport_setup() 2967 */ 2968 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2969 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2970 pci_ereport_teardown(bgep->devinfo); 2971 2972 /* 2973 * Un-register error callback if error callback capable 2974 */ 2975 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2976 ddi_fm_handler_unregister(bgep->devinfo); 2977 2978 /* Unregister from IO Fault Services */ 2979 ddi_fm_fini(bgep->devinfo); 2980 } 2981 } 2982 2983 static void 2984 #ifdef BGE_IPMI_ASF 2985 bge_unattach(bge_t *bgep, uint_t asf_mode) 2986 #else 2987 bge_unattach(bge_t *bgep) 2988 #endif 2989 { 2990 BGE_TRACE(("bge_unattach($%p)", 2991 (void *)bgep)); 2992 2993 /* 2994 * Flag that no more activity may be initiated 2995 */ 2996 bgep->progress &= ~PROGRESS_READY; 2997 2998 /* 2999 * Quiesce the PHY and MAC (leave it reset but still powered). 3000 * Clean up and free all BGE data structures 3001 */ 3002 if (bgep->periodic_id != NULL) { 3003 ddi_periodic_delete(bgep->periodic_id); 3004 bgep->periodic_id = NULL; 3005 } 3006 if (bgep->progress & PROGRESS_KSTATS) 3007 bge_fini_kstats(bgep); 3008 if (bgep->progress & PROGRESS_PHY) 3009 bge_phys_reset(bgep); 3010 if (bgep->progress & PROGRESS_HWINT) { 3011 mutex_enter(bgep->genlock); 3012 #ifdef BGE_IPMI_ASF 3013 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 3014 #else 3015 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 3016 #endif 3017 ddi_fm_service_impact(bgep->devinfo, 3018 DDI_SERVICE_UNAFFECTED); 3019 #ifdef BGE_IPMI_ASF 3020 if (bgep->asf_enabled) { 3021 /* 3022 * This register has been overlaid. We restore its 3023 * initial value here. 3024 */ 3025 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 3026 BGE_NIC_DATA_SIG); 3027 } 3028 #endif 3029 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3030 ddi_fm_service_impact(bgep->devinfo, 3031 DDI_SERVICE_UNAFFECTED); 3032 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3033 ddi_fm_service_impact(bgep->devinfo, 3034 DDI_SERVICE_UNAFFECTED); 3035 mutex_exit(bgep->genlock); 3036 } 3037 if (bgep->progress & PROGRESS_INTR) { 3038 bge_intr_disable(bgep); 3039 bge_fini_rings(bgep); 3040 } 3041 if (bgep->progress & PROGRESS_HWINT) { 3042 bge_rem_intrs(bgep); 3043 rw_destroy(bgep->errlock); 3044 mutex_destroy(bgep->softintrlock); 3045 mutex_destroy(bgep->genlock); 3046 } 3047 if (bgep->progress & PROGRESS_FACTOTUM) 3048 ddi_remove_softintr(bgep->factotum_id); 3049 if (bgep->progress & PROGRESS_RESCHED) 3050 ddi_remove_softintr(bgep->drain_id); 3051 if (bgep->progress & PROGRESS_BUFS) 3052 bge_free_bufs(bgep); 3053 if (bgep->progress & PROGRESS_REGS) 3054 ddi_regs_map_free(&bgep->io_handle); 3055 if (bgep->progress & PROGRESS_CFG) 3056 pci_config_teardown(&bgep->cfg_handle); 3057 3058 bge_fm_fini(bgep); 3059 3060 ddi_remove_minor_node(bgep->devinfo, NULL); 3061 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3062 kmem_free(bgep, sizeof (*bgep)); 3063 } 3064 3065 static int 3066 bge_resume(dev_info_t *devinfo) 3067 { 3068 bge_t *bgep; /* Our private data */ 3069 chip_id_t *cidp; 3070 chip_id_t chipid; 3071 3072 bgep = ddi_get_driver_private(devinfo); 3073 if (bgep == NULL) 3074 return (DDI_FAILURE); 3075 3076 /* 3077 * Refuse to resume if the data structures aren't consistent 3078 */ 3079 if (bgep->devinfo != devinfo) 3080 return (DDI_FAILURE); 3081 3082 #ifdef BGE_IPMI_ASF 3083 /* 3084 * Power management hasn't been supported in BGE now. If you 3085 * want to implement it, please add the ASF/IPMI related 3086 * code here. 3087 */ 3088 3089 #endif 3090 3091 /* 3092 * Read chip ID & set up config space command register(s) 3093 * Refuse to resume if the chip has changed its identity! 3094 */ 3095 cidp = &bgep->chipid; 3096 mutex_enter(bgep->genlock); 3097 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3098 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3099 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3100 mutex_exit(bgep->genlock); 3101 return (DDI_FAILURE); 3102 } 3103 mutex_exit(bgep->genlock); 3104 if (chipid.vendor != cidp->vendor) 3105 return (DDI_FAILURE); 3106 if (chipid.device != cidp->device) 3107 return (DDI_FAILURE); 3108 if (chipid.revision != cidp->revision) 3109 return (DDI_FAILURE); 3110 if (chipid.asic_rev != cidp->asic_rev) 3111 return (DDI_FAILURE); 3112 3113 /* 3114 * All OK, reinitialise h/w & kick off GLD scheduling 3115 */ 3116 mutex_enter(bgep->genlock); 3117 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3118 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3119 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3120 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3121 mutex_exit(bgep->genlock); 3122 return (DDI_FAILURE); 3123 } 3124 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3125 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3126 mutex_exit(bgep->genlock); 3127 return (DDI_FAILURE); 3128 } 3129 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3130 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3131 mutex_exit(bgep->genlock); 3132 return (DDI_FAILURE); 3133 } 3134 mutex_exit(bgep->genlock); 3135 return (DDI_SUCCESS); 3136 } 3137 3138 /* 3139 * attach(9E) -- Attach a device to the system 3140 * 3141 * Called once for each board successfully probed. 3142 */ 3143 static int 3144 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3145 { 3146 bge_t *bgep; /* Our private data */ 3147 mac_register_t *macp; 3148 chip_id_t *cidp; 3149 caddr_t regs; 3150 int instance; 3151 int err; 3152 int intr_types; 3153 #ifdef BGE_IPMI_ASF 3154 uint32_t mhcrValue; 3155 #ifdef __sparc 3156 uint16_t value16; 3157 #endif 3158 #ifdef BGE_NETCONSOLE 3159 int retval; 3160 #endif 3161 #endif 3162 3163 instance = ddi_get_instance(devinfo); 3164 3165 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3166 (void *)devinfo, cmd, instance)); 3167 BGE_BRKPT(NULL, "bge_attach"); 3168 3169 switch (cmd) { 3170 default: 3171 return (DDI_FAILURE); 3172 3173 case DDI_RESUME: 3174 return (bge_resume(devinfo)); 3175 3176 case DDI_ATTACH: 3177 break; 3178 } 3179 3180 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3181 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3182 ddi_set_driver_private(devinfo, bgep); 3183 bgep->bge_guard = BGE_GUARD; 3184 bgep->devinfo = devinfo; 3185 bgep->param_drain_max = 64; 3186 bgep->param_msi_cnt = 0; 3187 bgep->param_loop_mode = 0; 3188 3189 /* 3190 * Initialize more fields in BGE private data 3191 */ 3192 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3193 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3194 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3195 BGE_DRIVER_NAME, instance); 3196 3197 /* 3198 * Initialize for fma support 3199 */ 3200 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3201 DDI_PROP_DONTPASS, fm_cap, 3202 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3203 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3204 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3205 bge_fm_init(bgep); 3206 3207 /* 3208 * Look up the IOMMU's page size for DVMA mappings (must be 3209 * a power of 2) and convert to a mask. This can be used to 3210 * determine whether a message buffer crosses a page boundary. 3211 * Note: in 2s complement binary notation, if X is a power of 3212 * 2, then -X has the representation "11...1100...00". 3213 */ 3214 bgep->pagemask = dvma_pagesize(devinfo); 3215 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3216 bgep->pagemask = -bgep->pagemask; 3217 3218 /* 3219 * Map config space registers 3220 * Read chip ID & set up config space command register(s) 3221 * 3222 * Note: this leaves the chip accessible by Memory Space 3223 * accesses, but with interrupts and Bus Mastering off. 3224 * This should ensure that nothing untoward will happen 3225 * if it has been left active by the (net-)bootloader. 3226 * We'll re-enable Bus Mastering once we've reset the chip, 3227 * and allow interrupts only when everything else is set up. 3228 */ 3229 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3230 #ifdef BGE_IPMI_ASF 3231 #ifdef __sparc 3232 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3233 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3234 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3235 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3236 MHCR_ENABLE_TAGGED_STATUS_MODE | 3237 MHCR_MASK_INTERRUPT_MODE | 3238 MHCR_MASK_PCI_INT_OUTPUT | 3239 MHCR_CLEAR_INTERRUPT_INTA | 3240 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3241 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3242 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3243 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3244 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3245 MEMORY_ARBITER_ENABLE); 3246 #else 3247 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3248 #endif 3249 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3250 bgep->asf_wordswapped = B_TRUE; 3251 } else { 3252 bgep->asf_wordswapped = B_FALSE; 3253 } 3254 bge_asf_get_config(bgep); 3255 #endif 3256 if (err != DDI_SUCCESS) { 3257 bge_problem(bgep, "pci_config_setup() failed"); 3258 goto attach_fail; 3259 } 3260 bgep->progress |= PROGRESS_CFG; 3261 cidp = &bgep->chipid; 3262 bzero(cidp, sizeof (*cidp)); 3263 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3264 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3265 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3266 goto attach_fail; 3267 } 3268 3269 #ifdef BGE_IPMI_ASF 3270 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3271 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3272 bgep->asf_newhandshake = B_TRUE; 3273 } else { 3274 bgep->asf_newhandshake = B_FALSE; 3275 } 3276 #endif 3277 3278 /* 3279 * Update those parts of the chip ID derived from volatile 3280 * registers with the values seen by OBP (in case the chip 3281 * has been reset externally and therefore lost them). 3282 */ 3283 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3284 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3285 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3286 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3287 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3288 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3289 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3290 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3291 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3292 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3293 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3294 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3295 3296 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3297 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3298 if ((cidp->default_mtu < BGE_DEFAULT_MTU) || 3299 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3300 cidp->default_mtu = BGE_DEFAULT_MTU; 3301 } 3302 3303 /* 3304 * Map operating registers 3305 */ 3306 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3307 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3308 if (err != DDI_SUCCESS) { 3309 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3310 goto attach_fail; 3311 } 3312 bgep->io_regs = regs; 3313 bgep->progress |= PROGRESS_REGS; 3314 3315 /* 3316 * Characterise the device, so we know its requirements. 3317 * Then allocate the appropriate TX and RX descriptors & buffers. 3318 */ 3319 if (bge_chip_id_init(bgep) == EIO) { 3320 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3321 goto attach_fail; 3322 } 3323 3324 err = bge_alloc_bufs(bgep); 3325 if (err != DDI_SUCCESS) { 3326 bge_problem(bgep, "DMA buffer allocation failed"); 3327 goto attach_fail; 3328 } 3329 bgep->progress |= PROGRESS_BUFS; 3330 3331 /* 3332 * Add the softint handlers: 3333 * 3334 * Both of these handlers are used to avoid restrictions on the 3335 * context and/or mutexes required for some operations. In 3336 * particular, the hardware interrupt handler and its subfunctions 3337 * can detect a number of conditions that we don't want to handle 3338 * in that context or with that set of mutexes held. So, these 3339 * softints are triggered instead: 3340 * 3341 * the <resched> softint is triggered if we have previously 3342 * had to refuse to send a packet because of resource shortage 3343 * (we've run out of transmit buffers), but the send completion 3344 * interrupt handler has now detected that more buffers have 3345 * become available. 3346 * 3347 * the <factotum> is triggered if the h/w interrupt handler 3348 * sees the <link state changed> or <error> bits in the status 3349 * block. It's also triggered periodically to poll the link 3350 * state, just in case we aren't getting link status change 3351 * interrupts ... 3352 */ 3353 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3354 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3355 if (err != DDI_SUCCESS) { 3356 bge_problem(bgep, "ddi_add_softintr() failed"); 3357 goto attach_fail; 3358 } 3359 bgep->progress |= PROGRESS_RESCHED; 3360 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3361 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3362 if (err != DDI_SUCCESS) { 3363 bge_problem(bgep, "ddi_add_softintr() failed"); 3364 goto attach_fail; 3365 } 3366 bgep->progress |= PROGRESS_FACTOTUM; 3367 3368 /* Get supported interrupt types */ 3369 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3370 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3371 3372 goto attach_fail; 3373 } 3374 3375 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3376 bgep->ifname, intr_types)); 3377 3378 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3379 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3380 bge_error(bgep, "MSI registration failed, " 3381 "trying FIXED interrupt type\n"); 3382 } else { 3383 BGE_DEBUG(("%s: Using MSI interrupt type", 3384 bgep->ifname)); 3385 bgep->intr_type = DDI_INTR_TYPE_MSI; 3386 bgep->progress |= PROGRESS_HWINT; 3387 } 3388 } 3389 3390 if (!(bgep->progress & PROGRESS_HWINT) && 3391 (intr_types & DDI_INTR_TYPE_FIXED)) { 3392 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3393 bge_error(bgep, "FIXED interrupt " 3394 "registration failed\n"); 3395 goto attach_fail; 3396 } 3397 3398 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3399 3400 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3401 bgep->progress |= PROGRESS_HWINT; 3402 } 3403 3404 if (!(bgep->progress & PROGRESS_HWINT)) { 3405 bge_error(bgep, "No interrupts registered\n"); 3406 goto attach_fail; 3407 } 3408 3409 /* 3410 * Note that interrupts are not enabled yet as 3411 * mutex locks are not initialized. Initialize mutex locks. 3412 */ 3413 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3414 DDI_INTR_PRI(bgep->intr_pri)); 3415 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3416 DDI_INTR_PRI(bgep->intr_pri)); 3417 rw_init(bgep->errlock, NULL, RW_DRIVER, 3418 DDI_INTR_PRI(bgep->intr_pri)); 3419 3420 /* 3421 * Initialize rings. 3422 */ 3423 bge_init_rings(bgep); 3424 3425 /* 3426 * Now that mutex locks are initialized, enable interrupts. 3427 */ 3428 bge_intr_enable(bgep); 3429 bgep->progress |= PROGRESS_INTR; 3430 3431 /* 3432 * Initialise link state variables 3433 * Stop, reset & reinitialise the chip. 3434 * Initialise the (internal) PHY. 3435 */ 3436 bgep->link_state = LINK_STATE_UNKNOWN; 3437 3438 mutex_enter(bgep->genlock); 3439 3440 /* 3441 * Reset chip & rings to initial state; also reset address 3442 * filtering, promiscuity, loopback mode. 3443 */ 3444 #ifdef BGE_IPMI_ASF 3445 #ifdef BGE_NETCONSOLE 3446 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3447 #else 3448 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3449 #endif 3450 #else 3451 if (bge_reset(bgep) != DDI_SUCCESS) { 3452 #endif 3453 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3454 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3455 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3456 mutex_exit(bgep->genlock); 3457 goto attach_fail; 3458 } 3459 3460 #ifdef BGE_IPMI_ASF 3461 if (bgep->asf_enabled) { 3462 bgep->asf_status = ASF_STAT_RUN_INIT; 3463 } 3464 #endif 3465 3466 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3467 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3468 bgep->promisc = B_FALSE; 3469 bgep->param_loop_mode = BGE_LOOP_NONE; 3470 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3471 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3472 mutex_exit(bgep->genlock); 3473 goto attach_fail; 3474 } 3475 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3476 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3477 mutex_exit(bgep->genlock); 3478 goto attach_fail; 3479 } 3480 3481 mutex_exit(bgep->genlock); 3482 3483 if (bge_phys_init(bgep) == EIO) { 3484 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3485 goto attach_fail; 3486 } 3487 bgep->progress |= PROGRESS_PHY; 3488 3489 /* 3490 * initialize NDD-tweakable parameters 3491 */ 3492 if (bge_nd_init(bgep)) { 3493 bge_problem(bgep, "bge_nd_init() failed"); 3494 goto attach_fail; 3495 } 3496 bgep->progress |= PROGRESS_NDD; 3497 3498 /* 3499 * Create & initialise named kstats 3500 */ 3501 bge_init_kstats(bgep, instance); 3502 bgep->progress |= PROGRESS_KSTATS; 3503 3504 /* 3505 * Determine whether to override the chip's own MAC address 3506 */ 3507 bge_find_mac_address(bgep, cidp); 3508 3509 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3510 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 3511 3512 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3513 goto attach_fail; 3514 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3515 macp->m_driver = bgep; 3516 macp->m_dip = devinfo; 3517 macp->m_src_addr = cidp->vendor_addr.addr; 3518 macp->m_callbacks = &bge_m_callbacks; 3519 macp->m_min_sdu = 0; 3520 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3521 macp->m_margin = VLAN_TAGSZ; 3522 macp->m_priv_props = bge_priv_prop; 3523 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3524 macp->m_v12n = MAC_VIRT_LEVEL1; 3525 3526 /* 3527 * Finally, we're ready to register ourselves with the MAC layer 3528 * interface; if this succeeds, we're all ready to start() 3529 */ 3530 err = mac_register(macp, &bgep->mh); 3531 mac_free(macp); 3532 if (err != 0) 3533 goto attach_fail; 3534 3535 /* 3536 * Register a periodical handler. 3537 * bge_chip_cyclic() is invoked in kernel context. 3538 */ 3539 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3540 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3541 3542 bgep->progress |= PROGRESS_READY; 3543 ASSERT(bgep->bge_guard == BGE_GUARD); 3544 #ifdef BGE_IPMI_ASF 3545 #ifdef BGE_NETCONSOLE 3546 if (bgep->asf_enabled) { 3547 mutex_enter(bgep->genlock); 3548 retval = bge_chip_start(bgep, B_TRUE); 3549 mutex_exit(bgep->genlock); 3550 if (retval != DDI_SUCCESS) 3551 goto attach_fail; 3552 } 3553 #endif 3554 #endif 3555 3556 ddi_report_dev(devinfo); 3557 BGE_REPORT((bgep, "bge version: %s", bge_version)); 3558 3559 return (DDI_SUCCESS); 3560 3561 attach_fail: 3562 #ifdef BGE_IPMI_ASF 3563 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3564 #else 3565 bge_unattach(bgep); 3566 #endif 3567 return (DDI_FAILURE); 3568 } 3569 3570 /* 3571 * bge_suspend() -- suspend transmit/receive for powerdown 3572 */ 3573 static int 3574 bge_suspend(bge_t *bgep) 3575 { 3576 /* 3577 * Stop processing and idle (powerdown) the PHY ... 3578 */ 3579 mutex_enter(bgep->genlock); 3580 #ifdef BGE_IPMI_ASF 3581 /* 3582 * Power management hasn't been supported in BGE now. If you 3583 * want to implement it, please add the ASF/IPMI related 3584 * code here. 3585 */ 3586 #endif 3587 bge_stop(bgep); 3588 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3589 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3590 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3591 mutex_exit(bgep->genlock); 3592 return (DDI_FAILURE); 3593 } 3594 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3595 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3596 mutex_exit(bgep->genlock); 3597 return (DDI_FAILURE); 3598 } 3599 mutex_exit(bgep->genlock); 3600 3601 return (DDI_SUCCESS); 3602 } 3603 3604 /* 3605 * quiesce(9E) entry point. 3606 * 3607 * This function is called when the system is single-threaded at high 3608 * PIL with preemption disabled. Therefore, this function must not be 3609 * blocked. 3610 * 3611 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3612 * DDI_FAILURE indicates an error condition and should almost never happen. 3613 */ 3614 #ifdef __sparc 3615 #define bge_quiesce ddi_quiesce_not_supported 3616 #else 3617 static int 3618 bge_quiesce(dev_info_t *devinfo) 3619 { 3620 bge_t *bgep = ddi_get_driver_private(devinfo); 3621 3622 if (bgep == NULL) 3623 return (DDI_FAILURE); 3624 3625 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3626 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3627 MHCR_MASK_PCI_INT_OUTPUT); 3628 } else { 3629 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3630 } 3631 3632 /* Stop the chip */ 3633 bge_chip_stop_nonblocking(bgep); 3634 3635 return (DDI_SUCCESS); 3636 } 3637 #endif 3638 3639 /* 3640 * detach(9E) -- Detach a device from the system 3641 */ 3642 static int 3643 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3644 { 3645 bge_t *bgep; 3646 #ifdef BGE_IPMI_ASF 3647 uint_t asf_mode; 3648 asf_mode = ASF_MODE_NONE; 3649 #endif 3650 3651 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3652 3653 bgep = ddi_get_driver_private(devinfo); 3654 3655 switch (cmd) { 3656 default: 3657 return (DDI_FAILURE); 3658 3659 case DDI_SUSPEND: 3660 return (bge_suspend(bgep)); 3661 3662 case DDI_DETACH: 3663 break; 3664 } 3665 3666 #ifdef BGE_IPMI_ASF 3667 mutex_enter(bgep->genlock); 3668 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3669 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3670 3671 bge_asf_update_status(bgep); 3672 if (bgep->asf_status == ASF_STAT_RUN) { 3673 bge_asf_stop_timer(bgep); 3674 } 3675 bgep->asf_status = ASF_STAT_STOP; 3676 3677 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3678 3679 if (bgep->asf_pseudostop) { 3680 bge_chip_stop(bgep, B_FALSE); 3681 bgep->bge_mac_state = BGE_MAC_STOPPED; 3682 bgep->asf_pseudostop = B_FALSE; 3683 } 3684 3685 asf_mode = ASF_MODE_POST_SHUTDOWN; 3686 3687 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3688 ddi_fm_service_impact(bgep->devinfo, 3689 DDI_SERVICE_UNAFFECTED); 3690 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3691 ddi_fm_service_impact(bgep->devinfo, 3692 DDI_SERVICE_UNAFFECTED); 3693 } 3694 mutex_exit(bgep->genlock); 3695 #endif 3696 3697 /* 3698 * Unregister from the GLD subsystem. This can fail, in 3699 * particular if there are DLPI style-2 streams still open - 3700 * in which case we just return failure without shutting 3701 * down chip operations. 3702 */ 3703 if (mac_unregister(bgep->mh) != 0) 3704 return (DDI_FAILURE); 3705 3706 /* 3707 * All activity stopped, so we can clean up & exit 3708 */ 3709 #ifdef BGE_IPMI_ASF 3710 bge_unattach(bgep, asf_mode); 3711 #else 3712 bge_unattach(bgep); 3713 #endif 3714 return (DDI_SUCCESS); 3715 } 3716 3717 3718 /* 3719 * ========== Module Loading Data & Entry Points ========== 3720 */ 3721 3722 #undef BGE_DBG 3723 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3724 3725 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3726 nulldev, /* identify */ 3727 nulldev, /* probe */ 3728 bge_attach, /* attach */ 3729 bge_detach, /* detach */ 3730 nodev, /* reset */ 3731 NULL, /* cb_ops */ 3732 D_MP, /* bus_ops */ 3733 NULL, /* power */ 3734 bge_quiesce /* quiesce */ 3735 ); 3736 3737 static struct modldrv bge_modldrv = { 3738 &mod_driverops, /* Type of module. This one is a driver */ 3739 bge_ident, /* short description */ 3740 &bge_dev_ops /* driver specific ops */ 3741 }; 3742 3743 static struct modlinkage modlinkage = { 3744 MODREV_1, (void *)&bge_modldrv, NULL 3745 }; 3746 3747 3748 int 3749 _info(struct modinfo *modinfop) 3750 { 3751 return (mod_info(&modlinkage, modinfop)); 3752 } 3753 3754 int 3755 _init(void) 3756 { 3757 int status; 3758 3759 mac_init_ops(&bge_dev_ops, "bge"); 3760 status = mod_install(&modlinkage); 3761 if (status == DDI_SUCCESS) 3762 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3763 else 3764 mac_fini_ops(&bge_dev_ops); 3765 return (status); 3766 } 3767 3768 int 3769 _fini(void) 3770 { 3771 int status; 3772 3773 status = mod_remove(&modlinkage); 3774 if (status == DDI_SUCCESS) { 3775 mac_fini_ops(&bge_dev_ops); 3776 mutex_destroy(bge_log_mutex); 3777 } 3778 return (status); 3779 } 3780 3781 3782 /* 3783 * bge_add_intrs: 3784 * 3785 * Register FIXED or MSI interrupts. 3786 */ 3787 static int 3788 bge_add_intrs(bge_t *bgep, int intr_type) 3789 { 3790 dev_info_t *dip = bgep->devinfo; 3791 int avail, actual, intr_size, count = 0; 3792 int i, flag, ret; 3793 3794 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3795 3796 /* Get number of interrupts */ 3797 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3798 if ((ret != DDI_SUCCESS) || (count == 0)) { 3799 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3800 "count: %d", ret, count); 3801 3802 return (DDI_FAILURE); 3803 } 3804 3805 /* Get number of available interrupts */ 3806 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3807 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3808 bge_error(bgep, "ddi_intr_get_navail() failure, " 3809 "ret: %d, avail: %d\n", ret, avail); 3810 3811 return (DDI_FAILURE); 3812 } 3813 3814 if (avail < count) { 3815 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3816 bgep->ifname, count, avail)); 3817 } 3818 3819 /* 3820 * BGE hardware generates only single MSI even though it claims 3821 * to support multiple MSIs. So, hard code MSI count value to 1. 3822 */ 3823 if (intr_type == DDI_INTR_TYPE_MSI) { 3824 count = 1; 3825 flag = DDI_INTR_ALLOC_STRICT; 3826 } else { 3827 flag = DDI_INTR_ALLOC_NORMAL; 3828 } 3829 3830 /* Allocate an array of interrupt handles */ 3831 intr_size = count * sizeof (ddi_intr_handle_t); 3832 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3833 3834 /* Call ddi_intr_alloc() */ 3835 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3836 count, &actual, flag); 3837 3838 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3839 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3840 3841 kmem_free(bgep->htable, intr_size); 3842 return (DDI_FAILURE); 3843 } 3844 3845 if (actual < count) { 3846 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3847 bgep->ifname, count, actual)); 3848 } 3849 3850 bgep->intr_cnt = actual; 3851 3852 /* 3853 * Get priority for first msi, assume remaining are all the same 3854 */ 3855 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3856 DDI_SUCCESS) { 3857 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3858 3859 /* Free already allocated intr */ 3860 for (i = 0; i < actual; i++) { 3861 (void) ddi_intr_free(bgep->htable[i]); 3862 } 3863 3864 kmem_free(bgep->htable, intr_size); 3865 return (DDI_FAILURE); 3866 } 3867 3868 /* Call ddi_intr_add_handler() */ 3869 for (i = 0; i < actual; i++) { 3870 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3871 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3872 bge_error(bgep, "ddi_intr_add_handler() " 3873 "failed %d\n", ret); 3874 3875 /* Free already allocated intr */ 3876 for (i = 0; i < actual; i++) { 3877 (void) ddi_intr_free(bgep->htable[i]); 3878 } 3879 3880 kmem_free(bgep->htable, intr_size); 3881 return (DDI_FAILURE); 3882 } 3883 } 3884 3885 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3886 != DDI_SUCCESS) { 3887 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3888 3889 for (i = 0; i < actual; i++) { 3890 (void) ddi_intr_remove_handler(bgep->htable[i]); 3891 (void) ddi_intr_free(bgep->htable[i]); 3892 } 3893 3894 kmem_free(bgep->htable, intr_size); 3895 return (DDI_FAILURE); 3896 } 3897 3898 return (DDI_SUCCESS); 3899 } 3900 3901 /* 3902 * bge_rem_intrs: 3903 * 3904 * Unregister FIXED or MSI interrupts 3905 */ 3906 static void 3907 bge_rem_intrs(bge_t *bgep) 3908 { 3909 int i; 3910 3911 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3912 3913 /* Call ddi_intr_remove_handler() */ 3914 for (i = 0; i < bgep->intr_cnt; i++) { 3915 (void) ddi_intr_remove_handler(bgep->htable[i]); 3916 (void) ddi_intr_free(bgep->htable[i]); 3917 } 3918 3919 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3920 } 3921 3922 3923 void 3924 bge_intr_enable(bge_t *bgep) 3925 { 3926 int i; 3927 3928 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3929 /* Call ddi_intr_block_enable() for MSI interrupts */ 3930 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3931 } else { 3932 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3933 for (i = 0; i < bgep->intr_cnt; i++) { 3934 (void) ddi_intr_enable(bgep->htable[i]); 3935 } 3936 } 3937 } 3938 3939 3940 void 3941 bge_intr_disable(bge_t *bgep) 3942 { 3943 int i; 3944 3945 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3946 /* Call ddi_intr_block_disable() */ 3947 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3948 } else { 3949 for (i = 0; i < bgep->intr_cnt; i++) { 3950 (void) ddi_intr_disable(bgep->htable[i]); 3951 } 3952 } 3953 } 3954 3955 int 3956 bge_reprogram(bge_t *bgep) 3957 { 3958 int status = 0; 3959 3960 ASSERT(mutex_owned(bgep->genlock)); 3961 3962 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3963 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3964 status = IOC_INVAL; 3965 } 3966 #ifdef BGE_IPMI_ASF 3967 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3968 #else 3969 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3970 #endif 3971 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3972 status = IOC_INVAL; 3973 } 3974 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3975 bge_chip_msi_trig(bgep); 3976 return (status); 3977 } 3978