1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include "bge_impl.h" 27 #include <sys/sdt.h> 28 #include <sys/mac_provider.h> 29 #include <sys/mac.h> 30 #include <sys/mac_flow.h> 31 32 /* 33 * This is the string displayed by modinfo, etc. 34 */ 35 static char bge_ident[] = "Broadcom Gb Ethernet"; 36 37 /* 38 * Property names 39 */ 40 static char debug_propname[] = "bge-debug-flags"; 41 static char clsize_propname[] = "cache-line-size"; 42 static char latency_propname[] = "latency-timer"; 43 static char localmac_boolname[] = "local-mac-address?"; 44 static char localmac_propname[] = "local-mac-address"; 45 static char macaddr_propname[] = "mac-address"; 46 static char subdev_propname[] = "subsystem-id"; 47 static char subven_propname[] = "subsystem-vendor-id"; 48 static char rxrings_propname[] = "bge-rx-rings"; 49 static char txrings_propname[] = "bge-tx-rings"; 50 static char fm_cap[] = "fm-capable"; 51 static char default_mtu[] = "default_mtu"; 52 53 static int bge_add_intrs(bge_t *, int); 54 static void bge_rem_intrs(bge_t *); 55 static int bge_unicst_set(void *, const uint8_t *, int); 56 57 /* 58 * Describes the chip's DMA engine 59 */ 60 static ddi_dma_attr_t dma_attr = { 61 DMA_ATTR_V0, /* dma_attr version */ 62 0x0000000000000000ull, /* dma_attr_addr_lo */ 63 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 64 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 65 0x0000000000000001ull, /* dma_attr_align */ 66 0x00000FFF, /* dma_attr_burstsizes */ 67 0x00000001, /* dma_attr_minxfer */ 68 0x000000000000FFFFull, /* dma_attr_maxxfer */ 69 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 70 1, /* dma_attr_sgllen */ 71 0x00000001, /* dma_attr_granular */ 72 DDI_DMA_FLAGERR /* dma_attr_flags */ 73 }; 74 75 /* 76 * PIO access attributes for registers 77 */ 78 static ddi_device_acc_attr_t bge_reg_accattr = { 79 DDI_DEVICE_ATTR_V1, 80 DDI_NEVERSWAP_ACC, 81 DDI_STRICTORDER_ACC, 82 DDI_FLAGERR_ACC 83 }; 84 85 /* 86 * DMA access attributes for descriptors: NOT to be byte swapped. 87 */ 88 static ddi_device_acc_attr_t bge_desc_accattr = { 89 DDI_DEVICE_ATTR_V0, 90 DDI_NEVERSWAP_ACC, 91 DDI_STRICTORDER_ACC 92 }; 93 94 /* 95 * DMA access attributes for data: NOT to be byte swapped. 96 */ 97 static ddi_device_acc_attr_t bge_data_accattr = { 98 DDI_DEVICE_ATTR_V0, 99 DDI_NEVERSWAP_ACC, 100 DDI_STRICTORDER_ACC 101 }; 102 103 static int bge_m_start(void *); 104 static void bge_m_stop(void *); 105 static int bge_m_promisc(void *, boolean_t); 106 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 107 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 108 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 109 static int bge_unicst_set(void *, const uint8_t *, 110 int); 111 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 112 uint_t, const void *); 113 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 114 uint_t, void *); 115 static void bge_m_propinfo(void *, const char *, mac_prop_id_t, 116 mac_prop_info_handle_t); 117 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 118 const void *); 119 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 120 void *); 121 static void bge_priv_propinfo(const char *, 122 mac_prop_info_handle_t); 123 124 #define BGE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | \ 125 MC_GETPROP | MC_PROPINFO) 126 127 static mac_callbacks_t bge_m_callbacks = { 128 BGE_M_CALLBACK_FLAGS, 129 bge_m_stat, 130 bge_m_start, 131 bge_m_stop, 132 bge_m_promisc, 133 bge_m_multicst, 134 NULL, 135 bge_m_tx, 136 NULL, 137 bge_m_ioctl, 138 bge_m_getcapab, 139 NULL, 140 NULL, 141 bge_m_setprop, 142 bge_m_getprop, 143 bge_m_propinfo 144 }; 145 146 char *bge_priv_prop[] = { 147 "_adv_asym_pause_cap", 148 "_adv_pause_cap", 149 "_drain_max", 150 "_msi_cnt", 151 "_rx_intr_coalesce_blank_time", 152 "_tx_intr_coalesce_blank_time", 153 "_rx_intr_coalesce_pkt_cnt", 154 "_tx_intr_coalesce_pkt_cnt", 155 NULL 156 }; 157 158 uint8_t zero_addr[6] = {0, 0, 0, 0, 0, 0}; 159 /* 160 * ========== Transmit and receive ring reinitialisation ========== 161 */ 162 163 /* 164 * These <reinit> routines each reset the specified ring to an initial 165 * state, assuming that the corresponding <init> routine has already 166 * been called exactly once. 167 */ 168 169 static void 170 bge_reinit_send_ring(send_ring_t *srp) 171 { 172 bge_queue_t *txbuf_queue; 173 bge_queue_item_t *txbuf_head; 174 sw_txbuf_t *txbuf; 175 sw_sbd_t *ssbdp; 176 uint32_t slot; 177 178 /* 179 * Reinitialise control variables ... 180 */ 181 srp->tx_flow = 0; 182 srp->tx_next = 0; 183 srp->txfill_next = 0; 184 srp->tx_free = srp->desc.nslots; 185 ASSERT(mutex_owned(srp->tc_lock)); 186 srp->tc_next = 0; 187 srp->txpkt_next = 0; 188 srp->tx_block = 0; 189 srp->tx_nobd = 0; 190 srp->tx_nobuf = 0; 191 192 /* 193 * Initialize the tx buffer push queue 194 */ 195 mutex_enter(srp->freetxbuf_lock); 196 mutex_enter(srp->txbuf_lock); 197 txbuf_queue = &srp->freetxbuf_queue; 198 txbuf_queue->head = NULL; 199 txbuf_queue->count = 0; 200 txbuf_queue->lock = srp->freetxbuf_lock; 201 srp->txbuf_push_queue = txbuf_queue; 202 203 /* 204 * Initialize the tx buffer pop queue 205 */ 206 txbuf_queue = &srp->txbuf_queue; 207 txbuf_queue->head = NULL; 208 txbuf_queue->count = 0; 209 txbuf_queue->lock = srp->txbuf_lock; 210 srp->txbuf_pop_queue = txbuf_queue; 211 txbuf_head = srp->txbuf_head; 212 txbuf = srp->txbuf; 213 for (slot = 0; slot < srp->tx_buffers; ++slot) { 214 txbuf_head->item = txbuf; 215 txbuf_head->next = txbuf_queue->head; 216 txbuf_queue->head = txbuf_head; 217 txbuf_queue->count++; 218 txbuf++; 219 txbuf_head++; 220 } 221 mutex_exit(srp->txbuf_lock); 222 mutex_exit(srp->freetxbuf_lock); 223 224 /* 225 * Zero and sync all the h/w Send Buffer Descriptors 226 */ 227 DMA_ZERO(srp->desc); 228 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 229 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 230 ssbdp = srp->sw_sbds; 231 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 232 ssbdp->pbuf = NULL; 233 } 234 235 static void 236 bge_reinit_recv_ring(recv_ring_t *rrp) 237 { 238 /* 239 * Reinitialise control variables ... 240 */ 241 rrp->rx_next = 0; 242 } 243 244 static void 245 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 246 { 247 bge_rbd_t *hw_rbd_p; 248 sw_rbd_t *srbdp; 249 uint32_t bufsize; 250 uint32_t nslots; 251 uint32_t slot; 252 253 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 254 RBD_FLAG_STD_RING, 255 RBD_FLAG_JUMBO_RING, 256 RBD_FLAG_MINI_RING 257 }; 258 259 /* 260 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 261 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 262 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 263 * should be zeroed, and so don't need to be set up specifically 264 * once the whole area has been cleared. 265 */ 266 DMA_ZERO(brp->desc); 267 268 hw_rbd_p = DMA_VPTR(brp->desc); 269 nslots = brp->desc.nslots; 270 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 271 bufsize = brp->buf[0].size; 272 srbdp = brp->sw_rbds; 273 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 274 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 275 hw_rbd_p->index = (uint16_t)slot; 276 hw_rbd_p->len = (uint16_t)bufsize; 277 hw_rbd_p->opaque = srbdp->pbuf.token; 278 hw_rbd_p->flags |= ring_type_flag[ring]; 279 } 280 281 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 282 283 /* 284 * Finally, reinitialise the ring control variables ... 285 */ 286 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 287 } 288 289 /* 290 * Reinitialize all rings 291 */ 292 static void 293 bge_reinit_rings(bge_t *bgep) 294 { 295 uint32_t ring; 296 297 ASSERT(mutex_owned(bgep->genlock)); 298 299 /* 300 * Send Rings ... 301 */ 302 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 303 bge_reinit_send_ring(&bgep->send[ring]); 304 305 /* 306 * Receive Return Rings ... 307 */ 308 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 309 bge_reinit_recv_ring(&bgep->recv[ring]); 310 311 /* 312 * Receive Producer Rings ... 313 */ 314 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 315 bge_reinit_buff_ring(&bgep->buff[ring], ring); 316 } 317 318 /* 319 * ========== Internal state management entry points ========== 320 */ 321 322 #undef BGE_DBG 323 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 324 325 /* 326 * These routines provide all the functionality required by the 327 * corresponding GLD entry points, but don't update the GLD state 328 * so they can be called internally without disturbing our record 329 * of what GLD thinks we should be doing ... 330 */ 331 332 /* 333 * bge_reset() -- reset h/w & rings to initial state 334 */ 335 static int 336 #ifdef BGE_IPMI_ASF 337 bge_reset(bge_t *bgep, uint_t asf_mode) 338 #else 339 bge_reset(bge_t *bgep) 340 #endif 341 { 342 uint32_t ring; 343 int retval; 344 345 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 346 347 ASSERT(mutex_owned(bgep->genlock)); 348 349 /* 350 * Grab all the other mutexes in the world (this should 351 * ensure no other threads are manipulating driver state) 352 */ 353 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 354 mutex_enter(bgep->recv[ring].rx_lock); 355 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 356 mutex_enter(bgep->buff[ring].rf_lock); 357 rw_enter(bgep->errlock, RW_WRITER); 358 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 359 mutex_enter(bgep->send[ring].tx_lock); 360 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 361 mutex_enter(bgep->send[ring].tc_lock); 362 363 #ifdef BGE_IPMI_ASF 364 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 365 #else 366 retval = bge_chip_reset(bgep, B_TRUE); 367 #endif 368 bge_reinit_rings(bgep); 369 370 /* 371 * Free the world ... 372 */ 373 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 374 mutex_exit(bgep->send[ring].tc_lock); 375 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 376 mutex_exit(bgep->send[ring].tx_lock); 377 rw_exit(bgep->errlock); 378 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 379 mutex_exit(bgep->buff[ring].rf_lock); 380 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 381 mutex_exit(bgep->recv[ring].rx_lock); 382 383 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 384 return (retval); 385 } 386 387 /* 388 * bge_stop() -- stop processing, don't reset h/w or rings 389 */ 390 static void 391 bge_stop(bge_t *bgep) 392 { 393 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 394 395 ASSERT(mutex_owned(bgep->genlock)); 396 397 #ifdef BGE_IPMI_ASF 398 if (bgep->asf_enabled) { 399 bgep->asf_pseudostop = B_TRUE; 400 } else { 401 #endif 402 bge_chip_stop(bgep, B_FALSE); 403 #ifdef BGE_IPMI_ASF 404 } 405 #endif 406 407 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 408 } 409 410 /* 411 * bge_start() -- start transmitting/receiving 412 */ 413 static int 414 bge_start(bge_t *bgep, boolean_t reset_phys) 415 { 416 int retval; 417 418 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 419 420 ASSERT(mutex_owned(bgep->genlock)); 421 422 /* 423 * Start chip processing, including enabling interrupts 424 */ 425 retval = bge_chip_start(bgep, reset_phys); 426 427 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 428 return (retval); 429 } 430 431 /* 432 * bge_restart - restart transmitting/receiving after error or suspend 433 */ 434 int 435 bge_restart(bge_t *bgep, boolean_t reset_phys) 436 { 437 int retval = DDI_SUCCESS; 438 ASSERT(mutex_owned(bgep->genlock)); 439 440 #ifdef BGE_IPMI_ASF 441 if (bgep->asf_enabled) { 442 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 443 retval = DDI_FAILURE; 444 } else 445 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 446 retval = DDI_FAILURE; 447 #else 448 if (bge_reset(bgep) != DDI_SUCCESS) 449 retval = DDI_FAILURE; 450 #endif 451 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 452 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 453 retval = DDI_FAILURE; 454 bgep->watchdog = 0; 455 ddi_trigger_softintr(bgep->drain_id); 456 } 457 458 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 459 return (retval); 460 } 461 462 463 /* 464 * ========== Nemo-required management entry points ========== 465 */ 466 467 #undef BGE_DBG 468 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 469 470 /* 471 * bge_m_stop() -- stop transmitting/receiving 472 */ 473 static void 474 bge_m_stop(void *arg) 475 { 476 bge_t *bgep = arg; /* private device info */ 477 send_ring_t *srp; 478 uint32_t ring; 479 480 BGE_TRACE(("bge_m_stop($%p)", arg)); 481 482 /* 483 * Just stop processing, then record new GLD state 484 */ 485 mutex_enter(bgep->genlock); 486 if (!(bgep->progress & PROGRESS_INTR)) { 487 /* can happen during autorecovery */ 488 bgep->bge_chip_state = BGE_CHIP_STOPPED; 489 } else 490 bge_stop(bgep); 491 492 bgep->link_update_timer = 0; 493 bgep->link_state = LINK_STATE_UNKNOWN; 494 mac_link_update(bgep->mh, bgep->link_state); 495 496 /* 497 * Free the possible tx buffers allocated in tx process. 498 */ 499 #ifdef BGE_IPMI_ASF 500 if (!bgep->asf_pseudostop) 501 #endif 502 { 503 rw_enter(bgep->errlock, RW_WRITER); 504 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 505 srp = &bgep->send[ring]; 506 mutex_enter(srp->tx_lock); 507 if (srp->tx_array > 1) 508 bge_free_txbuf_arrays(srp); 509 mutex_exit(srp->tx_lock); 510 } 511 rw_exit(bgep->errlock); 512 } 513 bgep->bge_mac_state = BGE_MAC_STOPPED; 514 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 515 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 516 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 517 mutex_exit(bgep->genlock); 518 } 519 520 /* 521 * bge_m_start() -- start transmitting/receiving 522 */ 523 static int 524 bge_m_start(void *arg) 525 { 526 bge_t *bgep = arg; /* private device info */ 527 528 BGE_TRACE(("bge_m_start($%p)", arg)); 529 530 /* 531 * Start processing and record new GLD state 532 */ 533 mutex_enter(bgep->genlock); 534 if (!(bgep->progress & PROGRESS_INTR)) { 535 /* can happen during autorecovery */ 536 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 537 mutex_exit(bgep->genlock); 538 return (EIO); 539 } 540 #ifdef BGE_IPMI_ASF 541 if (bgep->asf_enabled) { 542 if ((bgep->asf_status == ASF_STAT_RUN) && 543 (bgep->asf_pseudostop)) { 544 bgep->bge_mac_state = BGE_MAC_STARTED; 545 mutex_exit(bgep->genlock); 546 return (0); 547 } 548 } 549 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 550 #else 551 if (bge_reset(bgep) != DDI_SUCCESS) { 552 #endif 553 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 554 (void) bge_check_acc_handle(bgep, bgep->io_handle); 555 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 556 mutex_exit(bgep->genlock); 557 return (EIO); 558 } 559 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 560 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 561 (void) bge_check_acc_handle(bgep, bgep->io_handle); 562 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 563 mutex_exit(bgep->genlock); 564 return (EIO); 565 } 566 bgep->watchdog = 0; 567 bgep->bge_mac_state = BGE_MAC_STARTED; 568 BGE_DEBUG(("bge_m_start($%p) done", arg)); 569 570 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 571 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 572 mutex_exit(bgep->genlock); 573 return (EIO); 574 } 575 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 576 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 577 mutex_exit(bgep->genlock); 578 return (EIO); 579 } 580 #ifdef BGE_IPMI_ASF 581 if (bgep->asf_enabled) { 582 if (bgep->asf_status != ASF_STAT_RUN) { 583 /* start ASF heart beat */ 584 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 585 (void *)bgep, 586 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 587 bgep->asf_status = ASF_STAT_RUN; 588 } 589 } 590 #endif 591 mutex_exit(bgep->genlock); 592 593 return (0); 594 } 595 596 /* 597 * bge_unicst_set() -- set the physical network address 598 */ 599 static int 600 bge_unicst_set(void *arg, const uint8_t *macaddr, int slot) 601 { 602 bge_t *bgep = arg; /* private device info */ 603 604 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 605 ether_sprintf((void *)macaddr))); 606 /* 607 * Remember the new current address in the driver state 608 * Sync the chip's idea of the address too ... 609 */ 610 mutex_enter(bgep->genlock); 611 if (!(bgep->progress & PROGRESS_INTR)) { 612 /* can happen during autorecovery */ 613 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 614 mutex_exit(bgep->genlock); 615 return (EIO); 616 } 617 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 618 #ifdef BGE_IPMI_ASF 619 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 620 #else 621 if (bge_chip_sync(bgep) == DDI_FAILURE) { 622 #endif 623 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 624 (void) bge_check_acc_handle(bgep, bgep->io_handle); 625 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 626 mutex_exit(bgep->genlock); 627 return (EIO); 628 } 629 #ifdef BGE_IPMI_ASF 630 if (bgep->asf_enabled) { 631 /* 632 * The above bge_chip_sync() function wrote the ethernet MAC 633 * addresses registers which destroyed the IPMI/ASF sideband. 634 * Here, we have to reset chip to make IPMI/ASF sideband work. 635 */ 636 if (bgep->asf_status == ASF_STAT_RUN) { 637 /* 638 * We must stop ASF heart beat before bge_chip_stop(), 639 * otherwise some computers (ex. IBM HS20 blade server) 640 * may crash. 641 */ 642 bge_asf_update_status(bgep); 643 bge_asf_stop_timer(bgep); 644 bgep->asf_status = ASF_STAT_STOP; 645 646 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 647 } 648 bge_chip_stop(bgep, B_FALSE); 649 650 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 651 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 652 (void) bge_check_acc_handle(bgep, bgep->io_handle); 653 ddi_fm_service_impact(bgep->devinfo, 654 DDI_SERVICE_DEGRADED); 655 mutex_exit(bgep->genlock); 656 return (EIO); 657 } 658 659 /* 660 * Start our ASF heartbeat counter as soon as possible. 661 */ 662 if (bgep->asf_status != ASF_STAT_RUN) { 663 /* start ASF heart beat */ 664 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 665 (void *)bgep, 666 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 667 bgep->asf_status = ASF_STAT_RUN; 668 } 669 } 670 #endif 671 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 672 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 673 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 674 mutex_exit(bgep->genlock); 675 return (EIO); 676 } 677 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 678 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 679 mutex_exit(bgep->genlock); 680 return (EIO); 681 } 682 mutex_exit(bgep->genlock); 683 684 return (0); 685 } 686 687 extern void bge_wake_factotum(bge_t *); 688 689 static boolean_t 690 bge_param_locked(mac_prop_id_t pr_num) 691 { 692 /* 693 * All adv_* parameters are locked (read-only) while 694 * the device is in any sort of loopback mode ... 695 */ 696 switch (pr_num) { 697 case MAC_PROP_ADV_1000FDX_CAP: 698 case MAC_PROP_EN_1000FDX_CAP: 699 case MAC_PROP_ADV_1000HDX_CAP: 700 case MAC_PROP_EN_1000HDX_CAP: 701 case MAC_PROP_ADV_100FDX_CAP: 702 case MAC_PROP_EN_100FDX_CAP: 703 case MAC_PROP_ADV_100HDX_CAP: 704 case MAC_PROP_EN_100HDX_CAP: 705 case MAC_PROP_ADV_10FDX_CAP: 706 case MAC_PROP_EN_10FDX_CAP: 707 case MAC_PROP_ADV_10HDX_CAP: 708 case MAC_PROP_EN_10HDX_CAP: 709 case MAC_PROP_AUTONEG: 710 case MAC_PROP_FLOWCTRL: 711 return (B_TRUE); 712 } 713 return (B_FALSE); 714 } 715 /* 716 * callback functions for set/get of properties 717 */ 718 static int 719 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 720 uint_t pr_valsize, const void *pr_val) 721 { 722 bge_t *bgep = barg; 723 int err = 0; 724 uint32_t cur_mtu, new_mtu; 725 link_flowctrl_t fl; 726 727 mutex_enter(bgep->genlock); 728 if (bgep->param_loop_mode != BGE_LOOP_NONE && 729 bge_param_locked(pr_num)) { 730 /* 731 * All adv_* parameters are locked (read-only) 732 * while the device is in any sort of loopback mode. 733 */ 734 mutex_exit(bgep->genlock); 735 return (EBUSY); 736 } 737 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 738 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 739 (pr_num == MAC_PROP_EN_100HDX_CAP) || 740 (pr_num == MAC_PROP_EN_10FDX_CAP) || 741 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 742 /* 743 * these properties are read/write on copper, 744 * read-only and 0 on serdes 745 */ 746 mutex_exit(bgep->genlock); 747 return (ENOTSUP); 748 } 749 if (DEVICE_5906_SERIES_CHIPSETS(bgep) && 750 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 751 (pr_num == MAC_PROP_EN_1000HDX_CAP))) { 752 mutex_exit(bgep->genlock); 753 return (ENOTSUP); 754 } 755 756 switch (pr_num) { 757 case MAC_PROP_EN_1000FDX_CAP: 758 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 759 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 760 goto reprogram; 761 case MAC_PROP_EN_1000HDX_CAP: 762 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 763 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 764 goto reprogram; 765 case MAC_PROP_EN_100FDX_CAP: 766 bgep->param_en_100fdx = *(uint8_t *)pr_val; 767 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 768 goto reprogram; 769 case MAC_PROP_EN_100HDX_CAP: 770 bgep->param_en_100hdx = *(uint8_t *)pr_val; 771 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 772 goto reprogram; 773 case MAC_PROP_EN_10FDX_CAP: 774 bgep->param_en_10fdx = *(uint8_t *)pr_val; 775 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 776 goto reprogram; 777 case MAC_PROP_EN_10HDX_CAP: 778 bgep->param_en_10hdx = *(uint8_t *)pr_val; 779 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 780 reprogram: 781 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 782 err = EINVAL; 783 break; 784 case MAC_PROP_ADV_1000FDX_CAP: 785 case MAC_PROP_ADV_1000HDX_CAP: 786 case MAC_PROP_ADV_100FDX_CAP: 787 case MAC_PROP_ADV_100HDX_CAP: 788 case MAC_PROP_ADV_10FDX_CAP: 789 case MAC_PROP_ADV_10HDX_CAP: 790 case MAC_PROP_STATUS: 791 case MAC_PROP_SPEED: 792 case MAC_PROP_DUPLEX: 793 err = ENOTSUP; /* read-only prop. Can't set this */ 794 break; 795 case MAC_PROP_AUTONEG: 796 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 797 if (bge_reprogram(bgep) == IOC_INVAL) 798 err = EINVAL; 799 break; 800 case MAC_PROP_MTU: 801 cur_mtu = bgep->chipid.default_mtu; 802 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 803 804 if (new_mtu == cur_mtu) { 805 err = 0; 806 break; 807 } 808 if (new_mtu < BGE_DEFAULT_MTU || 809 new_mtu > BGE_MAXIMUM_MTU) { 810 err = EINVAL; 811 break; 812 } 813 if ((new_mtu > BGE_DEFAULT_MTU) && 814 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 815 err = EINVAL; 816 break; 817 } 818 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 819 err = EBUSY; 820 break; 821 } 822 bgep->chipid.default_mtu = new_mtu; 823 if (bge_chip_id_init(bgep)) { 824 err = EINVAL; 825 break; 826 } 827 bgep->bge_dma_error = B_TRUE; 828 bgep->manual_reset = B_TRUE; 829 bge_chip_stop(bgep, B_TRUE); 830 bge_wake_factotum(bgep); 831 err = 0; 832 break; 833 case MAC_PROP_FLOWCTRL: 834 bcopy(pr_val, &fl, sizeof (fl)); 835 switch (fl) { 836 default: 837 err = ENOTSUP; 838 break; 839 case LINK_FLOWCTRL_NONE: 840 bgep->param_adv_pause = 0; 841 bgep->param_adv_asym_pause = 0; 842 843 bgep->param_link_rx_pause = B_FALSE; 844 bgep->param_link_tx_pause = B_FALSE; 845 break; 846 case LINK_FLOWCTRL_RX: 847 bgep->param_adv_pause = 1; 848 bgep->param_adv_asym_pause = 1; 849 850 bgep->param_link_rx_pause = B_TRUE; 851 bgep->param_link_tx_pause = B_FALSE; 852 break; 853 case LINK_FLOWCTRL_TX: 854 bgep->param_adv_pause = 0; 855 bgep->param_adv_asym_pause = 1; 856 857 bgep->param_link_rx_pause = B_FALSE; 858 bgep->param_link_tx_pause = B_TRUE; 859 break; 860 case LINK_FLOWCTRL_BI: 861 bgep->param_adv_pause = 1; 862 bgep->param_adv_asym_pause = 0; 863 864 bgep->param_link_rx_pause = B_TRUE; 865 bgep->param_link_tx_pause = B_TRUE; 866 break; 867 } 868 869 if (err == 0) { 870 if (bge_reprogram(bgep) == IOC_INVAL) 871 err = EINVAL; 872 } 873 874 break; 875 case MAC_PROP_PRIVATE: 876 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 877 pr_val); 878 break; 879 default: 880 err = ENOTSUP; 881 break; 882 } 883 mutex_exit(bgep->genlock); 884 return (err); 885 } 886 887 /* ARGSUSED */ 888 static int 889 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 890 uint_t pr_valsize, void *pr_val) 891 { 892 bge_t *bgep = barg; 893 int err = 0; 894 895 switch (pr_num) { 896 case MAC_PROP_DUPLEX: 897 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 898 bcopy(&bgep->param_link_duplex, pr_val, 899 sizeof (link_duplex_t)); 900 break; 901 case MAC_PROP_SPEED: { 902 uint64_t speed = bgep->param_link_speed * 1000000ull; 903 904 ASSERT(pr_valsize >= sizeof (speed)); 905 bcopy(&speed, pr_val, sizeof (speed)); 906 break; 907 } 908 case MAC_PROP_STATUS: 909 ASSERT(pr_valsize >= sizeof (link_state_t)); 910 bcopy(&bgep->link_state, pr_val, 911 sizeof (link_state_t)); 912 break; 913 case MAC_PROP_AUTONEG: 914 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 915 break; 916 case MAC_PROP_FLOWCTRL: { 917 link_flowctrl_t fl; 918 919 ASSERT(pr_valsize >= sizeof (fl)); 920 921 if (bgep->param_link_rx_pause && 922 !bgep->param_link_tx_pause) 923 fl = LINK_FLOWCTRL_RX; 924 925 if (!bgep->param_link_rx_pause && 926 !bgep->param_link_tx_pause) 927 fl = LINK_FLOWCTRL_NONE; 928 929 if (!bgep->param_link_rx_pause && 930 bgep->param_link_tx_pause) 931 fl = LINK_FLOWCTRL_TX; 932 933 if (bgep->param_link_rx_pause && 934 bgep->param_link_tx_pause) 935 fl = LINK_FLOWCTRL_BI; 936 bcopy(&fl, pr_val, sizeof (fl)); 937 break; 938 } 939 case MAC_PROP_ADV_1000FDX_CAP: 940 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 941 break; 942 case MAC_PROP_EN_1000FDX_CAP: 943 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 944 break; 945 case MAC_PROP_ADV_1000HDX_CAP: 946 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 947 break; 948 case MAC_PROP_EN_1000HDX_CAP: 949 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 950 break; 951 case MAC_PROP_ADV_100FDX_CAP: 952 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 953 break; 954 case MAC_PROP_EN_100FDX_CAP: 955 *(uint8_t *)pr_val = bgep->param_en_100fdx; 956 break; 957 case MAC_PROP_ADV_100HDX_CAP: 958 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 959 break; 960 case MAC_PROP_EN_100HDX_CAP: 961 *(uint8_t *)pr_val = bgep->param_en_100hdx; 962 break; 963 case MAC_PROP_ADV_10FDX_CAP: 964 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 965 break; 966 case MAC_PROP_EN_10FDX_CAP: 967 *(uint8_t *)pr_val = bgep->param_en_10fdx; 968 break; 969 case MAC_PROP_ADV_10HDX_CAP: 970 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 971 break; 972 case MAC_PROP_EN_10HDX_CAP: 973 *(uint8_t *)pr_val = bgep->param_en_10hdx; 974 break; 975 case MAC_PROP_ADV_100T4_CAP: 976 case MAC_PROP_EN_100T4_CAP: 977 *(uint8_t *)pr_val = 0; 978 break; 979 case MAC_PROP_PRIVATE: 980 err = bge_get_priv_prop(bgep, pr_name, 981 pr_valsize, pr_val); 982 return (err); 983 default: 984 return (ENOTSUP); 985 } 986 return (0); 987 } 988 989 static void 990 bge_m_propinfo(void *barg, const char *pr_name, mac_prop_id_t pr_num, 991 mac_prop_info_handle_t prh) 992 { 993 bge_t *bgep = barg; 994 int flags = bgep->chipid.flags; 995 996 /* 997 * By default permissions are read/write unless specified 998 * otherwise by the driver. 999 */ 1000 1001 switch (pr_num) { 1002 case MAC_PROP_DUPLEX: 1003 case MAC_PROP_SPEED: 1004 case MAC_PROP_STATUS: 1005 case MAC_PROP_ADV_1000FDX_CAP: 1006 case MAC_PROP_ADV_1000HDX_CAP: 1007 case MAC_PROP_ADV_100FDX_CAP: 1008 case MAC_PROP_ADV_100HDX_CAP: 1009 case MAC_PROP_ADV_10FDX_CAP: 1010 case MAC_PROP_ADV_10HDX_CAP: 1011 case MAC_PROP_ADV_100T4_CAP: 1012 case MAC_PROP_EN_100T4_CAP: 1013 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1014 break; 1015 1016 case MAC_PROP_EN_1000FDX_CAP: 1017 case MAC_PROP_EN_1000HDX_CAP: 1018 if (DEVICE_5906_SERIES_CHIPSETS(bgep)) 1019 mac_prop_info_set_default_uint8(prh, 0); 1020 else 1021 mac_prop_info_set_default_uint8(prh, 1); 1022 break; 1023 1024 case MAC_PROP_EN_100FDX_CAP: 1025 case MAC_PROP_EN_100HDX_CAP: 1026 case MAC_PROP_EN_10FDX_CAP: 1027 case MAC_PROP_EN_10HDX_CAP: 1028 mac_prop_info_set_default_uint8(prh, 1029 (flags & CHIP_FLAG_SERDES) ? 0 : 1); 1030 break; 1031 1032 case MAC_PROP_AUTONEG: 1033 mac_prop_info_set_default_uint8(prh, 1); 1034 break; 1035 1036 case MAC_PROP_FLOWCTRL: 1037 mac_prop_info_set_default_link_flowctrl(prh, 1038 LINK_FLOWCTRL_BI); 1039 break; 1040 1041 case MAC_PROP_MTU: 1042 mac_prop_info_set_range_uint32(prh, BGE_DEFAULT_MTU, 1043 (flags & CHIP_FLAG_NO_JUMBO) ? 1044 BGE_DEFAULT_MTU : BGE_MAXIMUM_MTU); 1045 break; 1046 1047 case MAC_PROP_PRIVATE: 1048 bge_priv_propinfo(pr_name, prh); 1049 break; 1050 } 1051 1052 mutex_enter(bgep->genlock); 1053 if ((bgep->param_loop_mode != BGE_LOOP_NONE && 1054 bge_param_locked(pr_num)) || 1055 ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 1056 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 1057 (pr_num == MAC_PROP_EN_100HDX_CAP) || 1058 (pr_num == MAC_PROP_EN_10FDX_CAP) || 1059 (pr_num == MAC_PROP_EN_10HDX_CAP))) || 1060 (DEVICE_5906_SERIES_CHIPSETS(bgep) && 1061 ((pr_num == MAC_PROP_EN_1000FDX_CAP) || 1062 (pr_num == MAC_PROP_EN_1000HDX_CAP)))) 1063 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1064 mutex_exit(bgep->genlock); 1065 } 1066 1067 /* ARGSUSED */ 1068 static int 1069 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1070 const void *pr_val) 1071 { 1072 int err = 0; 1073 long result; 1074 1075 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1076 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1077 if (result > 1 || result < 0) { 1078 err = EINVAL; 1079 } else { 1080 bgep->param_adv_pause = (uint32_t)result; 1081 if (bge_reprogram(bgep) == IOC_INVAL) 1082 err = EINVAL; 1083 } 1084 return (err); 1085 } 1086 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1087 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1088 if (result > 1 || result < 0) { 1089 err = EINVAL; 1090 } else { 1091 bgep->param_adv_asym_pause = (uint32_t)result; 1092 if (bge_reprogram(bgep) == IOC_INVAL) 1093 err = EINVAL; 1094 } 1095 return (err); 1096 } 1097 if (strcmp(pr_name, "_drain_max") == 0) { 1098 1099 /* 1100 * on the Tx side, we need to update the h/w register for 1101 * real packet transmission per packet. The drain_max parameter 1102 * is used to reduce the register access. This parameter 1103 * controls the max number of packets that we will hold before 1104 * updating the bge h/w to trigger h/w transmit. The bge 1105 * chipset usually has a max of 512 Tx descriptors, thus 1106 * the upper bound on drain_max is 512. 1107 */ 1108 if (pr_val == NULL) { 1109 err = EINVAL; 1110 return (err); 1111 } 1112 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1113 if (result > 512 || result < 1) 1114 err = EINVAL; 1115 else { 1116 bgep->param_drain_max = (uint32_t)result; 1117 if (bge_reprogram(bgep) == IOC_INVAL) 1118 err = EINVAL; 1119 } 1120 return (err); 1121 } 1122 if (strcmp(pr_name, "_msi_cnt") == 0) { 1123 1124 if (pr_val == NULL) { 1125 err = EINVAL; 1126 return (err); 1127 } 1128 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1129 if (result > 7 || result < 0) 1130 err = EINVAL; 1131 else { 1132 bgep->param_msi_cnt = (uint32_t)result; 1133 if (bge_reprogram(bgep) == IOC_INVAL) 1134 err = EINVAL; 1135 } 1136 return (err); 1137 } 1138 if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) { 1139 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1140 return (EINVAL); 1141 if (result < 0) 1142 err = EINVAL; 1143 else { 1144 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1145 bge_chip_coalesce_update(bgep); 1146 } 1147 return (err); 1148 } 1149 1150 if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) { 1151 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1152 return (EINVAL); 1153 1154 if (result < 0) 1155 err = EINVAL; 1156 else { 1157 bgep->chipid.rx_count_norm = (uint32_t)result; 1158 bge_chip_coalesce_update(bgep); 1159 } 1160 return (err); 1161 } 1162 if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) { 1163 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1164 return (EINVAL); 1165 if (result < 0) 1166 err = EINVAL; 1167 else { 1168 bgep->chipid.tx_ticks_norm = (uint32_t)result; 1169 bge_chip_coalesce_update(bgep); 1170 } 1171 return (err); 1172 } 1173 1174 if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) { 1175 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1176 return (EINVAL); 1177 1178 if (result < 0) 1179 err = EINVAL; 1180 else { 1181 bgep->chipid.tx_count_norm = (uint32_t)result; 1182 bge_chip_coalesce_update(bgep); 1183 } 1184 return (err); 1185 } 1186 return (ENOTSUP); 1187 } 1188 1189 static int 1190 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_valsize, 1191 void *pr_val) 1192 { 1193 int value; 1194 1195 if (strcmp(pr_name, "_adv_pause_cap") == 0) 1196 value = bge->param_adv_pause; 1197 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) 1198 value = bge->param_adv_asym_pause; 1199 else if (strcmp(pr_name, "_drain_max") == 0) 1200 value = bge->param_drain_max; 1201 else if (strcmp(pr_name, "_msi_cnt") == 0) 1202 value = bge->param_msi_cnt; 1203 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) 1204 value = bge->chipid.rx_ticks_norm; 1205 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) 1206 value = bge->chipid.tx_ticks_norm; 1207 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) 1208 value = bge->chipid.rx_count_norm; 1209 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) 1210 value = bge->chipid.tx_count_norm; 1211 else 1212 return (ENOTSUP); 1213 1214 (void) snprintf(pr_val, pr_valsize, "%d", value); 1215 return (0); 1216 } 1217 1218 static void 1219 bge_priv_propinfo(const char *pr_name, mac_prop_info_handle_t mph) 1220 { 1221 char valstr[64]; 1222 int value; 1223 1224 if (strcmp(pr_name, "_adv_pause_cap") == 0) 1225 value = 1; 1226 else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) 1227 value = 1; 1228 else if (strcmp(pr_name, "_drain_max") == 0) 1229 value = 64; 1230 else if (strcmp(pr_name, "_msi_cnt") == 0) 1231 value = 0; 1232 else if (strcmp(pr_name, "_rx_intr_coalesce_blank_time") == 0) 1233 value = bge_rx_ticks_norm; 1234 else if (strcmp(pr_name, "_tx_intr_coalesce_blank_time") == 0) 1235 value = bge_tx_ticks_norm; 1236 else if (strcmp(pr_name, "_rx_intr_coalesce_pkt_cnt") == 0) 1237 value = bge_rx_count_norm; 1238 else if (strcmp(pr_name, "_tx_intr_coalesce_pkt_cnt") == 0) 1239 value = bge_tx_count_norm; 1240 else 1241 return; 1242 1243 (void) snprintf(valstr, sizeof (valstr), "%d", value); 1244 mac_prop_info_set_default_str(mph, valstr); 1245 } 1246 1247 /* 1248 * Compute the index of the required bit in the multicast hash map. 1249 * This must mirror the way the hardware actually does it! 1250 * See Broadcom document 570X-PG102-R page 125. 1251 */ 1252 static uint32_t 1253 bge_hash_index(const uint8_t *mca) 1254 { 1255 uint32_t hash; 1256 1257 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1258 1259 return (hash); 1260 } 1261 1262 /* 1263 * bge_m_multicst_add() -- enable/disable a multicast address 1264 */ 1265 static int 1266 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1267 { 1268 bge_t *bgep = arg; /* private device info */ 1269 uint32_t hash; 1270 uint32_t index; 1271 uint32_t word; 1272 uint32_t bit; 1273 uint8_t *refp; 1274 1275 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1276 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1277 1278 /* 1279 * Precalculate all required masks, pointers etc ... 1280 */ 1281 hash = bge_hash_index(mca); 1282 index = hash % BGE_HASH_TABLE_SIZE; 1283 word = index/32u; 1284 bit = 1 << (index % 32u); 1285 refp = &bgep->mcast_refs[index]; 1286 1287 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1288 hash, index, word, bit, *refp)); 1289 1290 /* 1291 * We must set the appropriate bit in the hash map (and the 1292 * corresponding h/w register) when the refcount goes from 0 1293 * to >0, and clear it when the last ref goes away (refcount 1294 * goes from >0 back to 0). If we change the hash map, we 1295 * must also update the chip's hardware map registers. 1296 */ 1297 mutex_enter(bgep->genlock); 1298 if (!(bgep->progress & PROGRESS_INTR)) { 1299 /* can happen during autorecovery */ 1300 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1301 mutex_exit(bgep->genlock); 1302 return (EIO); 1303 } 1304 if (add) { 1305 if ((*refp)++ == 0) { 1306 bgep->mcast_hash[word] |= bit; 1307 #ifdef BGE_IPMI_ASF 1308 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1309 #else 1310 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1311 #endif 1312 (void) bge_check_acc_handle(bgep, 1313 bgep->cfg_handle); 1314 (void) bge_check_acc_handle(bgep, 1315 bgep->io_handle); 1316 ddi_fm_service_impact(bgep->devinfo, 1317 DDI_SERVICE_DEGRADED); 1318 mutex_exit(bgep->genlock); 1319 return (EIO); 1320 } 1321 } 1322 } else { 1323 if (--(*refp) == 0) { 1324 bgep->mcast_hash[word] &= ~bit; 1325 #ifdef BGE_IPMI_ASF 1326 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1327 #else 1328 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1329 #endif 1330 (void) bge_check_acc_handle(bgep, 1331 bgep->cfg_handle); 1332 (void) bge_check_acc_handle(bgep, 1333 bgep->io_handle); 1334 ddi_fm_service_impact(bgep->devinfo, 1335 DDI_SERVICE_DEGRADED); 1336 mutex_exit(bgep->genlock); 1337 return (EIO); 1338 } 1339 } 1340 } 1341 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1342 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1343 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1344 mutex_exit(bgep->genlock); 1345 return (EIO); 1346 } 1347 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1348 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1349 mutex_exit(bgep->genlock); 1350 return (EIO); 1351 } 1352 mutex_exit(bgep->genlock); 1353 1354 return (0); 1355 } 1356 1357 /* 1358 * bge_m_promisc() -- set or reset promiscuous mode on the board 1359 * 1360 * Program the hardware to enable/disable promiscuous and/or 1361 * receive-all-multicast modes. 1362 */ 1363 static int 1364 bge_m_promisc(void *arg, boolean_t on) 1365 { 1366 bge_t *bgep = arg; 1367 1368 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1369 1370 /* 1371 * Store MAC layer specified mode and pass to chip layer to update h/w 1372 */ 1373 mutex_enter(bgep->genlock); 1374 if (!(bgep->progress & PROGRESS_INTR)) { 1375 /* can happen during autorecovery */ 1376 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1377 mutex_exit(bgep->genlock); 1378 return (EIO); 1379 } 1380 bgep->promisc = on; 1381 #ifdef BGE_IPMI_ASF 1382 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1383 #else 1384 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1385 #endif 1386 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1387 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1388 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1389 mutex_exit(bgep->genlock); 1390 return (EIO); 1391 } 1392 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1393 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1394 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1395 mutex_exit(bgep->genlock); 1396 return (EIO); 1397 } 1398 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1399 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1400 mutex_exit(bgep->genlock); 1401 return (EIO); 1402 } 1403 mutex_exit(bgep->genlock); 1404 return (0); 1405 } 1406 1407 /* 1408 * Find the slot for the specified unicast address 1409 */ 1410 int 1411 bge_unicst_find(bge_t *bgep, const uint8_t *mac_addr) 1412 { 1413 int slot; 1414 1415 ASSERT(mutex_owned(bgep->genlock)); 1416 1417 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1418 if (bcmp(bgep->curr_addr[slot].addr, mac_addr, ETHERADDRL) == 0) 1419 return (slot); 1420 } 1421 1422 return (-1); 1423 } 1424 1425 /* 1426 * Programs the classifier to start steering packets matching 'mac_addr' to the 1427 * specified ring 'arg'. 1428 */ 1429 static int 1430 bge_addmac(void *arg, const uint8_t *mac_addr) 1431 { 1432 recv_ring_t *rrp = (recv_ring_t *)arg; 1433 bge_t *bgep = rrp->bgep; 1434 bge_recv_rule_t *rulep = bgep->recv_rules; 1435 bge_rule_info_t *rinfop = NULL; 1436 uint8_t ring = (uint8_t)(rrp - bgep->recv) + 1; 1437 int i; 1438 uint16_t tmp16; 1439 uint32_t tmp32; 1440 int slot; 1441 int err; 1442 1443 mutex_enter(bgep->genlock); 1444 if (bgep->unicst_addr_avail == 0) { 1445 mutex_exit(bgep->genlock); 1446 return (ENOSPC); 1447 } 1448 1449 /* 1450 * First add the unicast address to a available slot. 1451 */ 1452 slot = bge_unicst_find(bgep, mac_addr); 1453 ASSERT(slot == -1); 1454 1455 for (slot = 0; slot < bgep->unicst_addr_total; slot++) { 1456 if (!bgep->curr_addr[slot].set) { 1457 bgep->curr_addr[slot].set = B_TRUE; 1458 break; 1459 } 1460 } 1461 1462 ASSERT(slot < bgep->unicst_addr_total); 1463 bgep->unicst_addr_avail--; 1464 mutex_exit(bgep->genlock); 1465 1466 if ((err = bge_unicst_set(bgep, mac_addr, slot)) != 0) 1467 goto fail; 1468 1469 /* A rule is already here. Deny this. */ 1470 if (rrp->mac_addr_rule != NULL) { 1471 err = ether_cmp(mac_addr, rrp->mac_addr_val) ? EEXIST : EBUSY; 1472 goto fail; 1473 } 1474 1475 /* 1476 * Allocate a bge_rule_info_t to keep track of which rule slots 1477 * are being used. 1478 */ 1479 rinfop = kmem_zalloc(sizeof (bge_rule_info_t), KM_NOSLEEP); 1480 if (rinfop == NULL) { 1481 err = ENOMEM; 1482 goto fail; 1483 } 1484 1485 /* 1486 * Look for the starting slot to place the rules. 1487 * The two slots we reserve must be contiguous. 1488 */ 1489 for (i = 0; i + 1 < RECV_RULES_NUM_MAX; i++) 1490 if ((rulep[i].control & RECV_RULE_CTL_ENABLE) == 0 && 1491 (rulep[i+1].control & RECV_RULE_CTL_ENABLE) == 0) 1492 break; 1493 1494 ASSERT(i + 1 < RECV_RULES_NUM_MAX); 1495 1496 bcopy(mac_addr, &tmp32, sizeof (tmp32)); 1497 rulep[i].mask_value = ntohl(tmp32); 1498 rulep[i].control = RULE_DEST_MAC_1(ring) | RECV_RULE_CTL_AND; 1499 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep[i].mask_value); 1500 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep[i].control); 1501 1502 bcopy(mac_addr + 4, &tmp16, sizeof (tmp16)); 1503 rulep[i+1].mask_value = 0xffff0000 | ntohs(tmp16); 1504 rulep[i+1].control = RULE_DEST_MAC_2(ring); 1505 bge_reg_put32(bgep, RECV_RULE_MASK_REG(i+1), rulep[i+1].mask_value); 1506 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i+1), rulep[i+1].control); 1507 rinfop->start = i; 1508 rinfop->count = 2; 1509 1510 rrp->mac_addr_rule = rinfop; 1511 bcopy(mac_addr, rrp->mac_addr_val, ETHERADDRL); 1512 1513 return (0); 1514 1515 fail: 1516 /* Clear the address just set */ 1517 (void) bge_unicst_set(bgep, zero_addr, slot); 1518 mutex_enter(bgep->genlock); 1519 bgep->curr_addr[slot].set = B_FALSE; 1520 bgep->unicst_addr_avail++; 1521 mutex_exit(bgep->genlock); 1522 1523 return (err); 1524 } 1525 1526 /* 1527 * Stop classifying packets matching the MAC address to the specified ring. 1528 */ 1529 static int 1530 bge_remmac(void *arg, const uint8_t *mac_addr) 1531 { 1532 recv_ring_t *rrp = (recv_ring_t *)arg; 1533 bge_t *bgep = rrp->bgep; 1534 bge_recv_rule_t *rulep = bgep->recv_rules; 1535 bge_rule_info_t *rinfop = rrp->mac_addr_rule; 1536 int start; 1537 int slot; 1538 int err; 1539 1540 /* 1541 * Remove the MAC address from its slot. 1542 */ 1543 mutex_enter(bgep->genlock); 1544 slot = bge_unicst_find(bgep, mac_addr); 1545 if (slot == -1) { 1546 mutex_exit(bgep->genlock); 1547 return (EINVAL); 1548 } 1549 1550 ASSERT(bgep->curr_addr[slot].set); 1551 mutex_exit(bgep->genlock); 1552 1553 if ((err = bge_unicst_set(bgep, zero_addr, slot)) != 0) 1554 return (err); 1555 1556 if (rinfop == NULL || ether_cmp(mac_addr, rrp->mac_addr_val) != 0) 1557 return (EINVAL); 1558 1559 start = rinfop->start; 1560 rulep[start].mask_value = 0; 1561 rulep[start].control = 0; 1562 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1563 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1564 start++; 1565 rulep[start].mask_value = 0; 1566 rulep[start].control = 0; 1567 bge_reg_put32(bgep, RECV_RULE_MASK_REG(start), rulep[start].mask_value); 1568 bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(start), rulep[start].control); 1569 1570 kmem_free(rinfop, sizeof (bge_rule_info_t)); 1571 rrp->mac_addr_rule = NULL; 1572 bzero(rrp->mac_addr_val, ETHERADDRL); 1573 1574 mutex_enter(bgep->genlock); 1575 bgep->curr_addr[slot].set = B_FALSE; 1576 bgep->unicst_addr_avail++; 1577 mutex_exit(bgep->genlock); 1578 1579 return (0); 1580 } 1581 1582 static int 1583 bge_flag_intr_enable(mac_intr_handle_t ih) 1584 { 1585 recv_ring_t *rrp = (recv_ring_t *)ih; 1586 bge_t *bgep = rrp->bgep; 1587 1588 mutex_enter(bgep->genlock); 1589 rrp->poll_flag = 0; 1590 mutex_exit(bgep->genlock); 1591 1592 return (0); 1593 } 1594 1595 static int 1596 bge_flag_intr_disable(mac_intr_handle_t ih) 1597 { 1598 recv_ring_t *rrp = (recv_ring_t *)ih; 1599 bge_t *bgep = rrp->bgep; 1600 1601 mutex_enter(bgep->genlock); 1602 rrp->poll_flag = 1; 1603 mutex_exit(bgep->genlock); 1604 1605 return (0); 1606 } 1607 1608 static int 1609 bge_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 1610 { 1611 recv_ring_t *rx_ring; 1612 1613 rx_ring = (recv_ring_t *)rh; 1614 mutex_enter(rx_ring->rx_lock); 1615 rx_ring->ring_gen_num = mr_gen_num; 1616 mutex_exit(rx_ring->rx_lock); 1617 return (0); 1618 } 1619 1620 1621 /* 1622 * Callback funtion for MAC layer to register all rings 1623 * for given ring_group, noted by rg_index. 1624 */ 1625 void 1626 bge_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 1627 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 1628 { 1629 bge_t *bgep = arg; 1630 mac_intr_t *mintr; 1631 1632 switch (rtype) { 1633 case MAC_RING_TYPE_RX: { 1634 recv_ring_t *rx_ring; 1635 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1636 MAC_ADDRESS_REGS_MAX) && index == 0); 1637 1638 rx_ring = &bgep->recv[rg_index]; 1639 rx_ring->ring_handle = rh; 1640 1641 infop->mri_driver = (mac_ring_driver_t)rx_ring; 1642 infop->mri_start = bge_ring_start; 1643 infop->mri_stop = NULL; 1644 infop->mri_poll = bge_poll_ring; 1645 infop->mri_stat = bge_rx_ring_stat; 1646 1647 mintr = &infop->mri_intr; 1648 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 1649 mintr->mi_enable = bge_flag_intr_enable; 1650 mintr->mi_disable = bge_flag_intr_disable; 1651 1652 break; 1653 } 1654 case MAC_RING_TYPE_TX: 1655 default: 1656 ASSERT(0); 1657 break; 1658 } 1659 } 1660 1661 /* 1662 * Fill infop passed as argument 1663 * fill in respective ring_group info 1664 * Each group has a single ring in it. We keep it simple 1665 * and use the same internal handle for rings and groups. 1666 */ 1667 void 1668 bge_fill_group(void *arg, mac_ring_type_t rtype, const int rg_index, 1669 mac_group_info_t *infop, mac_group_handle_t gh) 1670 { 1671 bge_t *bgep = arg; 1672 1673 switch (rtype) { 1674 case MAC_RING_TYPE_RX: { 1675 recv_ring_t *rx_ring; 1676 1677 ASSERT(rg_index >= 0 && rg_index < MIN(bgep->chipid.rx_rings, 1678 MAC_ADDRESS_REGS_MAX)); 1679 rx_ring = &bgep->recv[rg_index]; 1680 rx_ring->ring_group_handle = gh; 1681 1682 infop->mgi_driver = (mac_group_driver_t)rx_ring; 1683 infop->mgi_start = NULL; 1684 infop->mgi_stop = NULL; 1685 infop->mgi_addmac = bge_addmac; 1686 infop->mgi_remmac = bge_remmac; 1687 infop->mgi_count = 1; 1688 break; 1689 } 1690 case MAC_RING_TYPE_TX: 1691 default: 1692 ASSERT(0); 1693 break; 1694 } 1695 } 1696 1697 /*ARGSUSED*/ 1698 static boolean_t 1699 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1700 { 1701 bge_t *bgep = arg; 1702 1703 switch (cap) { 1704 case MAC_CAPAB_HCKSUM: { 1705 uint32_t *txflags = cap_data; 1706 1707 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1708 break; 1709 } 1710 case MAC_CAPAB_RINGS: { 1711 mac_capab_rings_t *cap_rings = cap_data; 1712 1713 /* Temporarily disable multiple tx rings. */ 1714 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 1715 return (B_FALSE); 1716 1717 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 1718 cap_rings->mr_rnum = cap_rings->mr_gnum = 1719 MIN(bgep->chipid.rx_rings, MAC_ADDRESS_REGS_MAX); 1720 cap_rings->mr_rget = bge_fill_ring; 1721 cap_rings->mr_gget = bge_fill_group; 1722 break; 1723 } 1724 default: 1725 return (B_FALSE); 1726 } 1727 return (B_TRUE); 1728 } 1729 1730 /* 1731 * Loopback ioctl code 1732 */ 1733 1734 static lb_property_t loopmodes[] = { 1735 { normal, "normal", BGE_LOOP_NONE }, 1736 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1737 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1738 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1739 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1740 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1741 }; 1742 1743 static enum ioc_reply 1744 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1745 { 1746 /* 1747 * If the mode isn't being changed, there's nothing to do ... 1748 */ 1749 if (mode == bgep->param_loop_mode) 1750 return (IOC_ACK); 1751 1752 /* 1753 * Validate the requested mode and prepare a suitable message 1754 * to explain the link down/up cycle that the change will 1755 * probably induce ... 1756 */ 1757 switch (mode) { 1758 default: 1759 return (IOC_INVAL); 1760 1761 case BGE_LOOP_NONE: 1762 case BGE_LOOP_EXTERNAL_1000: 1763 case BGE_LOOP_EXTERNAL_100: 1764 case BGE_LOOP_EXTERNAL_10: 1765 case BGE_LOOP_INTERNAL_PHY: 1766 case BGE_LOOP_INTERNAL_MAC: 1767 break; 1768 } 1769 1770 /* 1771 * All OK; tell the caller to reprogram 1772 * the PHY and/or MAC for the new mode ... 1773 */ 1774 bgep->param_loop_mode = mode; 1775 return (IOC_RESTART_ACK); 1776 } 1777 1778 static enum ioc_reply 1779 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1780 { 1781 lb_info_sz_t *lbsp; 1782 lb_property_t *lbpp; 1783 uint32_t *lbmp; 1784 int cmd; 1785 1786 _NOTE(ARGUNUSED(wq)) 1787 1788 /* 1789 * Validate format of ioctl 1790 */ 1791 if (mp->b_cont == NULL) 1792 return (IOC_INVAL); 1793 1794 cmd = iocp->ioc_cmd; 1795 switch (cmd) { 1796 default: 1797 /* NOTREACHED */ 1798 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1799 return (IOC_INVAL); 1800 1801 case LB_GET_INFO_SIZE: 1802 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1803 return (IOC_INVAL); 1804 lbsp = (void *)mp->b_cont->b_rptr; 1805 *lbsp = sizeof (loopmodes); 1806 return (IOC_REPLY); 1807 1808 case LB_GET_INFO: 1809 if (iocp->ioc_count != sizeof (loopmodes)) 1810 return (IOC_INVAL); 1811 lbpp = (void *)mp->b_cont->b_rptr; 1812 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1813 return (IOC_REPLY); 1814 1815 case LB_GET_MODE: 1816 if (iocp->ioc_count != sizeof (uint32_t)) 1817 return (IOC_INVAL); 1818 lbmp = (void *)mp->b_cont->b_rptr; 1819 *lbmp = bgep->param_loop_mode; 1820 return (IOC_REPLY); 1821 1822 case LB_SET_MODE: 1823 if (iocp->ioc_count != sizeof (uint32_t)) 1824 return (IOC_INVAL); 1825 lbmp = (void *)mp->b_cont->b_rptr; 1826 return (bge_set_loop_mode(bgep, *lbmp)); 1827 } 1828 } 1829 1830 /* 1831 * Specific bge IOCTLs, the gld module handles the generic ones. 1832 */ 1833 static void 1834 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1835 { 1836 bge_t *bgep = arg; 1837 struct iocblk *iocp; 1838 enum ioc_reply status; 1839 boolean_t need_privilege; 1840 int err; 1841 int cmd; 1842 1843 /* 1844 * Validate the command before bothering with the mutex ... 1845 */ 1846 iocp = (void *)mp->b_rptr; 1847 iocp->ioc_error = 0; 1848 need_privilege = B_TRUE; 1849 cmd = iocp->ioc_cmd; 1850 switch (cmd) { 1851 default: 1852 miocnak(wq, mp, 0, EINVAL); 1853 return; 1854 1855 case BGE_MII_READ: 1856 case BGE_MII_WRITE: 1857 case BGE_SEE_READ: 1858 case BGE_SEE_WRITE: 1859 case BGE_FLASH_READ: 1860 case BGE_FLASH_WRITE: 1861 case BGE_DIAG: 1862 case BGE_PEEK: 1863 case BGE_POKE: 1864 case BGE_PHY_RESET: 1865 case BGE_SOFT_RESET: 1866 case BGE_HARD_RESET: 1867 break; 1868 1869 case LB_GET_INFO_SIZE: 1870 case LB_GET_INFO: 1871 case LB_GET_MODE: 1872 need_privilege = B_FALSE; 1873 /* FALLTHRU */ 1874 case LB_SET_MODE: 1875 break; 1876 1877 } 1878 1879 if (need_privilege) { 1880 /* 1881 * Check for specific net_config privilege on Solaris 10+. 1882 */ 1883 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1884 if (err != 0) { 1885 miocnak(wq, mp, 0, err); 1886 return; 1887 } 1888 } 1889 1890 mutex_enter(bgep->genlock); 1891 if (!(bgep->progress & PROGRESS_INTR)) { 1892 /* can happen during autorecovery */ 1893 mutex_exit(bgep->genlock); 1894 miocnak(wq, mp, 0, EIO); 1895 return; 1896 } 1897 1898 switch (cmd) { 1899 default: 1900 _NOTE(NOTREACHED) 1901 status = IOC_INVAL; 1902 break; 1903 1904 case BGE_MII_READ: 1905 case BGE_MII_WRITE: 1906 case BGE_SEE_READ: 1907 case BGE_SEE_WRITE: 1908 case BGE_FLASH_READ: 1909 case BGE_FLASH_WRITE: 1910 case BGE_DIAG: 1911 case BGE_PEEK: 1912 case BGE_POKE: 1913 case BGE_PHY_RESET: 1914 case BGE_SOFT_RESET: 1915 case BGE_HARD_RESET: 1916 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1917 break; 1918 1919 case LB_GET_INFO_SIZE: 1920 case LB_GET_INFO: 1921 case LB_GET_MODE: 1922 case LB_SET_MODE: 1923 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1924 break; 1925 1926 } 1927 1928 /* 1929 * Do we need to reprogram the PHY and/or the MAC? 1930 * Do it now, while we still have the mutex. 1931 * 1932 * Note: update the PHY first, 'cos it controls the 1933 * speed/duplex parameters that the MAC code uses. 1934 */ 1935 switch (status) { 1936 case IOC_RESTART_REPLY: 1937 case IOC_RESTART_ACK: 1938 if (bge_reprogram(bgep) == IOC_INVAL) 1939 status = IOC_INVAL; 1940 break; 1941 } 1942 1943 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1944 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1945 status = IOC_INVAL; 1946 } 1947 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1948 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1949 status = IOC_INVAL; 1950 } 1951 mutex_exit(bgep->genlock); 1952 1953 /* 1954 * Finally, decide how to reply 1955 */ 1956 switch (status) { 1957 default: 1958 case IOC_INVAL: 1959 /* 1960 * Error, reply with a NAK and EINVAL or the specified error 1961 */ 1962 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1963 EINVAL : iocp->ioc_error); 1964 break; 1965 1966 case IOC_DONE: 1967 /* 1968 * OK, reply already sent 1969 */ 1970 break; 1971 1972 case IOC_RESTART_ACK: 1973 case IOC_ACK: 1974 /* 1975 * OK, reply with an ACK 1976 */ 1977 miocack(wq, mp, 0, 0); 1978 break; 1979 1980 case IOC_RESTART_REPLY: 1981 case IOC_REPLY: 1982 /* 1983 * OK, send prepared reply as ACK or NAK 1984 */ 1985 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1986 M_IOCACK : M_IOCNAK; 1987 qreply(wq, mp); 1988 break; 1989 } 1990 } 1991 1992 /* 1993 * ========== Per-instance setup/teardown code ========== 1994 */ 1995 1996 #undef BGE_DBG 1997 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1998 /* 1999 * Allocate an area of memory and a DMA handle for accessing it 2000 */ 2001 static int 2002 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 2003 uint_t dma_flags, dma_area_t *dma_p) 2004 { 2005 caddr_t va; 2006 int err; 2007 2008 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 2009 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 2010 2011 /* 2012 * Allocate handle 2013 */ 2014 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 2015 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 2016 if (err != DDI_SUCCESS) 2017 return (DDI_FAILURE); 2018 2019 /* 2020 * Allocate memory 2021 */ 2022 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 2023 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 2024 &dma_p->acc_hdl); 2025 if (err != DDI_SUCCESS) 2026 return (DDI_FAILURE); 2027 2028 /* 2029 * Bind the two together 2030 */ 2031 dma_p->mem_va = va; 2032 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2033 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 2034 &dma_p->cookie, &dma_p->ncookies); 2035 2036 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 2037 dma_p->alength, err, dma_p->ncookies)); 2038 2039 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 2040 return (DDI_FAILURE); 2041 2042 dma_p->nslots = ~0U; 2043 dma_p->size = ~0U; 2044 dma_p->token = ~0U; 2045 dma_p->offset = 0; 2046 return (DDI_SUCCESS); 2047 } 2048 2049 /* 2050 * Free one allocated area of DMAable memory 2051 */ 2052 static void 2053 bge_free_dma_mem(dma_area_t *dma_p) 2054 { 2055 if (dma_p->dma_hdl != NULL) { 2056 if (dma_p->ncookies) { 2057 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2058 dma_p->ncookies = 0; 2059 } 2060 ddi_dma_free_handle(&dma_p->dma_hdl); 2061 dma_p->dma_hdl = NULL; 2062 } 2063 2064 if (dma_p->acc_hdl != NULL) { 2065 ddi_dma_mem_free(&dma_p->acc_hdl); 2066 dma_p->acc_hdl = NULL; 2067 } 2068 } 2069 /* 2070 * Utility routine to carve a slice off a chunk of allocated memory, 2071 * updating the chunk descriptor accordingly. The size of the slice 2072 * is given by the product of the <qty> and <size> parameters. 2073 */ 2074 static void 2075 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 2076 uint32_t qty, uint32_t size) 2077 { 2078 static uint32_t sequence = 0xbcd5704a; 2079 size_t totsize; 2080 2081 totsize = qty*size; 2082 ASSERT(totsize <= chunk->alength); 2083 2084 *slice = *chunk; 2085 slice->nslots = qty; 2086 slice->size = size; 2087 slice->alength = totsize; 2088 slice->token = ++sequence; 2089 2090 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 2091 chunk->alength -= totsize; 2092 chunk->offset += totsize; 2093 chunk->cookie.dmac_laddress += totsize; 2094 chunk->cookie.dmac_size -= totsize; 2095 } 2096 2097 /* 2098 * Initialise the specified Receive Producer (Buffer) Ring, using 2099 * the information in the <dma_area> descriptors that it contains 2100 * to set up all the other fields. This routine should be called 2101 * only once for each ring. 2102 */ 2103 static void 2104 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2105 { 2106 buff_ring_t *brp; 2107 bge_status_t *bsp; 2108 sw_rbd_t *srbdp; 2109 dma_area_t pbuf; 2110 uint32_t bufsize; 2111 uint32_t nslots; 2112 uint32_t slot; 2113 uint32_t split; 2114 2115 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2116 NIC_MEM_SHADOW_BUFF_STD, 2117 NIC_MEM_SHADOW_BUFF_JUMBO, 2118 NIC_MEM_SHADOW_BUFF_MINI 2119 }; 2120 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2121 RECV_STD_PROD_INDEX_REG, 2122 RECV_JUMBO_PROD_INDEX_REG, 2123 RECV_MINI_PROD_INDEX_REG 2124 }; 2125 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2126 STATUS_STD_BUFF_CONS_INDEX, 2127 STATUS_JUMBO_BUFF_CONS_INDEX, 2128 STATUS_MINI_BUFF_CONS_INDEX 2129 }; 2130 2131 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2132 (void *)bgep, ring)); 2133 2134 brp = &bgep->buff[ring]; 2135 nslots = brp->desc.nslots; 2136 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2137 bufsize = brp->buf[0].size; 2138 2139 /* 2140 * Set up the copy of the h/w RCB 2141 * 2142 * Note: unlike Send & Receive Return Rings, (where the max_len 2143 * field holds the number of slots), in a Receive Buffer Ring 2144 * this field indicates the size of each buffer in the ring. 2145 */ 2146 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2147 brp->hw_rcb.max_len = (uint16_t)bufsize; 2148 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2149 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2150 2151 /* 2152 * Other one-off initialisation of per-ring data 2153 */ 2154 brp->bgep = bgep; 2155 bsp = DMA_VPTR(bgep->status_block); 2156 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2157 brp->chip_mbx_reg = mailbox_regs[ring]; 2158 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2159 DDI_INTR_PRI(bgep->intr_pri)); 2160 2161 /* 2162 * Allocate the array of s/w Receive Buffer Descriptors 2163 */ 2164 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2165 brp->sw_rbds = srbdp; 2166 2167 /* 2168 * Now initialise each array element once and for all 2169 */ 2170 for (split = 0; split < BGE_SPLIT; ++split) { 2171 pbuf = brp->buf[split]; 2172 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2173 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2174 ASSERT(pbuf.alength == 0); 2175 } 2176 } 2177 2178 /* 2179 * Clean up initialisation done above before the memory is freed 2180 */ 2181 static void 2182 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2183 { 2184 buff_ring_t *brp; 2185 sw_rbd_t *srbdp; 2186 2187 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2188 (void *)bgep, ring)); 2189 2190 brp = &bgep->buff[ring]; 2191 srbdp = brp->sw_rbds; 2192 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2193 2194 mutex_destroy(brp->rf_lock); 2195 } 2196 2197 /* 2198 * Initialise the specified Receive (Return) Ring, using the 2199 * information in the <dma_area> descriptors that it contains 2200 * to set up all the other fields. This routine should be called 2201 * only once for each ring. 2202 */ 2203 static void 2204 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2205 { 2206 recv_ring_t *rrp; 2207 bge_status_t *bsp; 2208 uint32_t nslots; 2209 2210 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2211 (void *)bgep, ring)); 2212 2213 /* 2214 * The chip architecture requires that receive return rings have 2215 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2216 */ 2217 rrp = &bgep->recv[ring]; 2218 nslots = rrp->desc.nslots; 2219 ASSERT(nslots == 0 || nslots == 512 || 2220 nslots == 1024 || nslots == 2048); 2221 2222 /* 2223 * Set up the copy of the h/w RCB 2224 */ 2225 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2226 rrp->hw_rcb.max_len = (uint16_t)nslots; 2227 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2228 rrp->hw_rcb.nic_ring_addr = 0; 2229 2230 /* 2231 * Other one-off initialisation of per-ring data 2232 */ 2233 rrp->bgep = bgep; 2234 bsp = DMA_VPTR(bgep->status_block); 2235 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2236 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2237 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2238 DDI_INTR_PRI(bgep->intr_pri)); 2239 } 2240 2241 2242 /* 2243 * Clean up initialisation done above before the memory is freed 2244 */ 2245 static void 2246 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2247 { 2248 recv_ring_t *rrp; 2249 2250 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2251 (void *)bgep, ring)); 2252 2253 rrp = &bgep->recv[ring]; 2254 if (rrp->rx_softint) 2255 ddi_remove_softintr(rrp->rx_softint); 2256 mutex_destroy(rrp->rx_lock); 2257 } 2258 2259 /* 2260 * Initialise the specified Send Ring, using the information in the 2261 * <dma_area> descriptors that it contains to set up all the other 2262 * fields. This routine should be called only once for each ring. 2263 */ 2264 static void 2265 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2266 { 2267 send_ring_t *srp; 2268 bge_status_t *bsp; 2269 sw_sbd_t *ssbdp; 2270 dma_area_t desc; 2271 dma_area_t pbuf; 2272 uint32_t nslots; 2273 uint32_t slot; 2274 uint32_t split; 2275 sw_txbuf_t *txbuf; 2276 2277 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2278 (void *)bgep, ring)); 2279 2280 /* 2281 * The chip architecture requires that host-based send rings 2282 * have 512 elements per ring. See 570X-PG102-R page 56. 2283 */ 2284 srp = &bgep->send[ring]; 2285 nslots = srp->desc.nslots; 2286 ASSERT(nslots == 0 || nslots == 512); 2287 2288 /* 2289 * Set up the copy of the h/w RCB 2290 */ 2291 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2292 srp->hw_rcb.max_len = (uint16_t)nslots; 2293 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2294 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2295 2296 /* 2297 * Other one-off initialisation of per-ring data 2298 */ 2299 srp->bgep = bgep; 2300 bsp = DMA_VPTR(bgep->status_block); 2301 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2302 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2303 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2304 DDI_INTR_PRI(bgep->intr_pri)); 2305 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2306 DDI_INTR_PRI(bgep->intr_pri)); 2307 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2308 DDI_INTR_PRI(bgep->intr_pri)); 2309 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2310 DDI_INTR_PRI(bgep->intr_pri)); 2311 if (nslots == 0) 2312 return; 2313 2314 /* 2315 * Allocate the array of s/w Send Buffer Descriptors 2316 */ 2317 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2318 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2319 srp->txbuf_head = 2320 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2321 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2322 srp->sw_sbds = ssbdp; 2323 srp->txbuf = txbuf; 2324 srp->tx_buffers = BGE_SEND_BUF_NUM; 2325 srp->tx_buffers_low = srp->tx_buffers / 4; 2326 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2327 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2328 else 2329 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2330 srp->tx_array = 1; 2331 2332 /* 2333 * Chunk tx desc area 2334 */ 2335 desc = srp->desc; 2336 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2337 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2338 sizeof (bge_sbd_t)); 2339 } 2340 ASSERT(desc.alength == 0); 2341 2342 /* 2343 * Chunk tx buffer area 2344 */ 2345 for (split = 0; split < BGE_SPLIT; ++split) { 2346 pbuf = srp->buf[0][split]; 2347 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2348 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2349 bgep->chipid.snd_buff_size); 2350 txbuf++; 2351 } 2352 ASSERT(pbuf.alength == 0); 2353 } 2354 } 2355 2356 /* 2357 * Clean up initialisation done above before the memory is freed 2358 */ 2359 static void 2360 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2361 { 2362 send_ring_t *srp; 2363 uint32_t array; 2364 uint32_t split; 2365 uint32_t nslots; 2366 2367 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2368 (void *)bgep, ring)); 2369 2370 srp = &bgep->send[ring]; 2371 mutex_destroy(srp->tc_lock); 2372 mutex_destroy(srp->freetxbuf_lock); 2373 mutex_destroy(srp->txbuf_lock); 2374 mutex_destroy(srp->tx_lock); 2375 nslots = srp->desc.nslots; 2376 if (nslots == 0) 2377 return; 2378 2379 for (array = 1; array < srp->tx_array; ++array) 2380 for (split = 0; split < BGE_SPLIT; ++split) 2381 bge_free_dma_mem(&srp->buf[array][split]); 2382 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2383 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2384 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2385 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2386 srp->sw_sbds = NULL; 2387 srp->txbuf_head = NULL; 2388 srp->txbuf = NULL; 2389 srp->pktp = NULL; 2390 } 2391 2392 /* 2393 * Initialise all transmit, receive, and buffer rings. 2394 */ 2395 void 2396 bge_init_rings(bge_t *bgep) 2397 { 2398 uint32_t ring; 2399 2400 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2401 2402 /* 2403 * Perform one-off initialisation of each ring ... 2404 */ 2405 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2406 bge_init_send_ring(bgep, ring); 2407 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2408 bge_init_recv_ring(bgep, ring); 2409 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2410 bge_init_buff_ring(bgep, ring); 2411 } 2412 2413 /* 2414 * Undo the work of bge_init_rings() above before the memory is freed 2415 */ 2416 void 2417 bge_fini_rings(bge_t *bgep) 2418 { 2419 uint32_t ring; 2420 2421 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2422 2423 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2424 bge_fini_buff_ring(bgep, ring); 2425 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2426 bge_fini_recv_ring(bgep, ring); 2427 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2428 bge_fini_send_ring(bgep, ring); 2429 } 2430 2431 /* 2432 * Called from the bge_m_stop() to free the tx buffers which are 2433 * allocated from the tx process. 2434 */ 2435 void 2436 bge_free_txbuf_arrays(send_ring_t *srp) 2437 { 2438 uint32_t array; 2439 uint32_t split; 2440 2441 ASSERT(mutex_owned(srp->tx_lock)); 2442 2443 /* 2444 * Free the extra tx buffer DMA area 2445 */ 2446 for (array = 1; array < srp->tx_array; ++array) 2447 for (split = 0; split < BGE_SPLIT; ++split) 2448 bge_free_dma_mem(&srp->buf[array][split]); 2449 2450 /* 2451 * Restore initial tx buffer numbers 2452 */ 2453 srp->tx_array = 1; 2454 srp->tx_buffers = BGE_SEND_BUF_NUM; 2455 srp->tx_buffers_low = srp->tx_buffers / 4; 2456 srp->tx_flow = 0; 2457 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2458 } 2459 2460 /* 2461 * Called from tx process to allocate more tx buffers 2462 */ 2463 bge_queue_item_t * 2464 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2465 { 2466 bge_queue_t *txbuf_queue; 2467 bge_queue_item_t *txbuf_item_last; 2468 bge_queue_item_t *txbuf_item; 2469 bge_queue_item_t *txbuf_item_rtn; 2470 sw_txbuf_t *txbuf; 2471 dma_area_t area; 2472 size_t txbuffsize; 2473 uint32_t slot; 2474 uint32_t array; 2475 uint32_t split; 2476 uint32_t err; 2477 2478 ASSERT(mutex_owned(srp->tx_lock)); 2479 2480 array = srp->tx_array; 2481 if (array >= srp->tx_array_max) 2482 return (NULL); 2483 2484 /* 2485 * Allocate memory & handles for TX buffers 2486 */ 2487 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2488 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2489 for (split = 0; split < BGE_SPLIT; ++split) { 2490 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2491 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2492 &srp->buf[array][split]); 2493 if (err != DDI_SUCCESS) { 2494 /* Free the last already allocated OK chunks */ 2495 for (slot = 0; slot <= split; ++slot) 2496 bge_free_dma_mem(&srp->buf[array][slot]); 2497 srp->tx_alloc_fail++; 2498 return (NULL); 2499 } 2500 } 2501 2502 /* 2503 * Chunk tx buffer area 2504 */ 2505 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2506 for (split = 0; split < BGE_SPLIT; ++split) { 2507 area = srp->buf[array][split]; 2508 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2509 bge_slice_chunk(&txbuf->buf, &area, 1, 2510 bgep->chipid.snd_buff_size); 2511 txbuf++; 2512 } 2513 } 2514 2515 /* 2516 * Add above buffers to the tx buffer pop queue 2517 */ 2518 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2519 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2520 txbuf_item_last = NULL; 2521 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2522 txbuf_item->item = txbuf; 2523 txbuf_item->next = txbuf_item_last; 2524 txbuf_item_last = txbuf_item; 2525 txbuf++; 2526 txbuf_item++; 2527 } 2528 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2529 txbuf_item_rtn = txbuf_item; 2530 txbuf_item++; 2531 txbuf_queue = srp->txbuf_pop_queue; 2532 mutex_enter(txbuf_queue->lock); 2533 txbuf_item->next = txbuf_queue->head; 2534 txbuf_queue->head = txbuf_item_last; 2535 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2536 mutex_exit(txbuf_queue->lock); 2537 2538 srp->tx_array++; 2539 srp->tx_buffers += BGE_SEND_BUF_NUM; 2540 srp->tx_buffers_low = srp->tx_buffers / 4; 2541 2542 return (txbuf_item_rtn); 2543 } 2544 2545 /* 2546 * This function allocates all the transmit and receive buffers 2547 * and descriptors, in four chunks. 2548 */ 2549 int 2550 bge_alloc_bufs(bge_t *bgep) 2551 { 2552 dma_area_t area; 2553 size_t rxbuffsize; 2554 size_t txbuffsize; 2555 size_t rxbuffdescsize; 2556 size_t rxdescsize; 2557 size_t txdescsize; 2558 uint32_t ring; 2559 uint32_t rx_rings = bgep->chipid.rx_rings; 2560 uint32_t tx_rings = bgep->chipid.tx_rings; 2561 int split; 2562 int err; 2563 2564 BGE_TRACE(("bge_alloc_bufs($%p)", 2565 (void *)bgep)); 2566 2567 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2568 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2569 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2570 2571 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2572 txbuffsize *= tx_rings; 2573 2574 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2575 rxdescsize *= sizeof (bge_rbd_t); 2576 2577 rxbuffdescsize = BGE_STD_SLOTS_USED; 2578 rxbuffdescsize += bgep->chipid.jumbo_slots; 2579 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2580 rxbuffdescsize *= sizeof (bge_rbd_t); 2581 2582 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2583 txdescsize *= sizeof (bge_sbd_t); 2584 txdescsize += sizeof (bge_statistics_t); 2585 txdescsize += sizeof (bge_status_t); 2586 txdescsize += BGE_STATUS_PADDING; 2587 2588 /* 2589 * Enable PCI relaxed ordering only for RX/TX data buffers 2590 */ 2591 if (bge_relaxed_ordering) 2592 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2593 2594 /* 2595 * Allocate memory & handles for RX buffers 2596 */ 2597 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2598 for (split = 0; split < BGE_SPLIT; ++split) { 2599 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2600 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2601 &bgep->rx_buff[split]); 2602 if (err != DDI_SUCCESS) 2603 return (DDI_FAILURE); 2604 } 2605 2606 /* 2607 * Allocate memory & handles for TX buffers 2608 */ 2609 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2610 for (split = 0; split < BGE_SPLIT; ++split) { 2611 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2612 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2613 &bgep->tx_buff[split]); 2614 if (err != DDI_SUCCESS) 2615 return (DDI_FAILURE); 2616 } 2617 2618 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2619 2620 /* 2621 * Allocate memory & handles for receive return rings 2622 */ 2623 ASSERT((rxdescsize % rx_rings) == 0); 2624 for (split = 0; split < rx_rings; ++split) { 2625 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2626 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2627 &bgep->rx_desc[split]); 2628 if (err != DDI_SUCCESS) 2629 return (DDI_FAILURE); 2630 } 2631 2632 /* 2633 * Allocate memory & handles for buffer (producer) descriptor rings 2634 */ 2635 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2636 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2637 if (err != DDI_SUCCESS) 2638 return (DDI_FAILURE); 2639 2640 /* 2641 * Allocate memory & handles for TX descriptor rings, 2642 * status block, and statistics area 2643 */ 2644 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2645 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2646 if (err != DDI_SUCCESS) 2647 return (DDI_FAILURE); 2648 2649 /* 2650 * Now carve up each of the allocated areas ... 2651 */ 2652 for (split = 0; split < BGE_SPLIT; ++split) { 2653 area = bgep->rx_buff[split]; 2654 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2655 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2656 bgep->chipid.std_buf_size); 2657 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2658 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2659 bgep->chipid.recv_jumbo_size); 2660 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2661 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2662 BGE_MINI_BUFF_SIZE); 2663 } 2664 2665 for (split = 0; split < BGE_SPLIT; ++split) { 2666 area = bgep->tx_buff[split]; 2667 for (ring = 0; ring < tx_rings; ++ring) 2668 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2669 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2670 bgep->chipid.snd_buff_size); 2671 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2672 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2673 &area, 0, bgep->chipid.snd_buff_size); 2674 } 2675 2676 for (ring = 0; ring < rx_rings; ++ring) 2677 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2678 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2679 2680 area = bgep->rx_desc[rx_rings]; 2681 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2682 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2683 0, sizeof (bge_rbd_t)); 2684 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2685 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2686 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2687 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2688 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2689 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2690 ASSERT(area.alength == 0); 2691 2692 area = bgep->tx_desc; 2693 for (ring = 0; ring < tx_rings; ++ring) 2694 bge_slice_chunk(&bgep->send[ring].desc, &area, 2695 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2696 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2697 bge_slice_chunk(&bgep->send[ring].desc, &area, 2698 0, sizeof (bge_sbd_t)); 2699 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2700 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2701 ASSERT(area.alength == BGE_STATUS_PADDING); 2702 DMA_ZERO(bgep->status_block); 2703 2704 return (DDI_SUCCESS); 2705 } 2706 2707 /* 2708 * This routine frees the transmit and receive buffers and descriptors. 2709 * Make sure the chip is stopped before calling it! 2710 */ 2711 void 2712 bge_free_bufs(bge_t *bgep) 2713 { 2714 int split; 2715 2716 BGE_TRACE(("bge_free_bufs($%p)", 2717 (void *)bgep)); 2718 2719 bge_free_dma_mem(&bgep->tx_desc); 2720 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2721 bge_free_dma_mem(&bgep->rx_desc[split]); 2722 for (split = 0; split < BGE_SPLIT; ++split) 2723 bge_free_dma_mem(&bgep->tx_buff[split]); 2724 for (split = 0; split < BGE_SPLIT; ++split) 2725 bge_free_dma_mem(&bgep->rx_buff[split]); 2726 } 2727 2728 /* 2729 * Determine (initial) MAC address ("BIA") to use for this interface 2730 */ 2731 2732 static void 2733 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2734 { 2735 struct ether_addr sysaddr; 2736 char propbuf[8]; /* "true" or "false", plus NUL */ 2737 uchar_t *bytes; 2738 int *ints; 2739 uint_t nelts; 2740 int err; 2741 2742 BGE_TRACE(("bge_find_mac_address($%p)", 2743 (void *)bgep)); 2744 2745 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2746 cidp->hw_mac_addr, 2747 ether_sprintf((void *)cidp->vendor_addr.addr), 2748 cidp->vendor_addr.set ? "" : "not ")); 2749 2750 /* 2751 * The "vendor's factory-set address" may already have 2752 * been extracted from the chip, but if the property 2753 * "local-mac-address" is set we use that instead. It 2754 * will normally be set by OBP, but it could also be 2755 * specified in a .conf file(!) 2756 * 2757 * There doesn't seem to be a way to define byte-array 2758 * properties in a .conf, so we check whether it looks 2759 * like an array of 6 ints instead. 2760 * 2761 * Then, we check whether it looks like an array of 6 2762 * bytes (which it should, if OBP set it). If we can't 2763 * make sense of it either way, we'll ignore it. 2764 */ 2765 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2766 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2767 if (err == DDI_PROP_SUCCESS) { 2768 if (nelts == ETHERADDRL) { 2769 while (nelts--) 2770 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2771 cidp->vendor_addr.set = B_TRUE; 2772 } 2773 ddi_prop_free(ints); 2774 } 2775 2776 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2777 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2778 if (err == DDI_PROP_SUCCESS) { 2779 if (nelts == ETHERADDRL) { 2780 while (nelts--) 2781 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2782 cidp->vendor_addr.set = B_TRUE; 2783 } 2784 ddi_prop_free(bytes); 2785 } 2786 2787 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2788 ether_sprintf((void *)cidp->vendor_addr.addr), 2789 cidp->vendor_addr.set ? "" : "not ")); 2790 2791 /* 2792 * Look up the OBP property "local-mac-address?". Note that even 2793 * though its value is a string (which should be "true" or "false"), 2794 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2795 * the buffer first and then fetch the property as an untyped array; 2796 * this may or may not include a final NUL, but since there will 2797 * always be one left at the end of the buffer we can now treat it 2798 * as a string anyway. 2799 */ 2800 nelts = sizeof (propbuf); 2801 bzero(propbuf, nelts--); 2802 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2803 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2804 2805 /* 2806 * Now, if the address still isn't set from the hardware (SEEPROM) 2807 * or the OBP or .conf property, OR if the user has foolishly set 2808 * 'local-mac-address? = false', use "the system address" instead 2809 * (but only if it's non-null i.e. has been set from the IDPROM). 2810 */ 2811 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2812 if (localetheraddr(NULL, &sysaddr) != 0) { 2813 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2814 cidp->vendor_addr.set = B_TRUE; 2815 } 2816 2817 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2818 ether_sprintf((void *)cidp->vendor_addr.addr), 2819 cidp->vendor_addr.set ? "" : "not ")); 2820 2821 /* 2822 * Finally(!), if there's a valid "mac-address" property (created 2823 * if we netbooted from this interface), we must use this instead 2824 * of any of the above to ensure that the NFS/install server doesn't 2825 * get confused by the address changing as Solaris takes over! 2826 */ 2827 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2828 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2829 if (err == DDI_PROP_SUCCESS) { 2830 if (nelts == ETHERADDRL) { 2831 while (nelts--) 2832 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2833 cidp->vendor_addr.set = B_TRUE; 2834 } 2835 ddi_prop_free(bytes); 2836 } 2837 2838 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2839 ether_sprintf((void *)cidp->vendor_addr.addr), 2840 cidp->vendor_addr.set ? "" : "not ")); 2841 } 2842 2843 2844 /*ARGSUSED*/ 2845 int 2846 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2847 { 2848 ddi_fm_error_t de; 2849 2850 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2851 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2852 return (de.fme_status); 2853 } 2854 2855 /*ARGSUSED*/ 2856 int 2857 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2858 { 2859 ddi_fm_error_t de; 2860 2861 ASSERT(bgep->progress & PROGRESS_BUFS); 2862 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2863 return (de.fme_status); 2864 } 2865 2866 /* 2867 * The IO fault service error handling callback function 2868 */ 2869 /*ARGSUSED*/ 2870 static int 2871 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2872 { 2873 /* 2874 * as the driver can always deal with an error in any dma or 2875 * access handle, we can just return the fme_status value. 2876 */ 2877 pci_ereport_post(dip, err, NULL); 2878 return (err->fme_status); 2879 } 2880 2881 static void 2882 bge_fm_init(bge_t *bgep) 2883 { 2884 ddi_iblock_cookie_t iblk; 2885 2886 /* Only register with IO Fault Services if we have some capability */ 2887 if (bgep->fm_capabilities) { 2888 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2889 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2890 2891 /* Register capabilities with IO Fault Services */ 2892 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2893 2894 /* 2895 * Initialize pci ereport capabilities if ereport capable 2896 */ 2897 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2898 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2899 pci_ereport_setup(bgep->devinfo); 2900 2901 /* 2902 * Register error callback if error callback capable 2903 */ 2904 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2905 ddi_fm_handler_register(bgep->devinfo, 2906 bge_fm_error_cb, (void*) bgep); 2907 } else { 2908 /* 2909 * These fields have to be cleared of FMA if there are no 2910 * FMA capabilities at runtime. 2911 */ 2912 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2913 dma_attr.dma_attr_flags = 0; 2914 } 2915 } 2916 2917 static void 2918 bge_fm_fini(bge_t *bgep) 2919 { 2920 /* Only unregister FMA capabilities if we registered some */ 2921 if (bgep->fm_capabilities) { 2922 2923 /* 2924 * Release any resources allocated by pci_ereport_setup() 2925 */ 2926 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2927 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2928 pci_ereport_teardown(bgep->devinfo); 2929 2930 /* 2931 * Un-register error callback if error callback capable 2932 */ 2933 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2934 ddi_fm_handler_unregister(bgep->devinfo); 2935 2936 /* Unregister from IO Fault Services */ 2937 ddi_fm_fini(bgep->devinfo); 2938 } 2939 } 2940 2941 static void 2942 #ifdef BGE_IPMI_ASF 2943 bge_unattach(bge_t *bgep, uint_t asf_mode) 2944 #else 2945 bge_unattach(bge_t *bgep) 2946 #endif 2947 { 2948 BGE_TRACE(("bge_unattach($%p)", 2949 (void *)bgep)); 2950 2951 /* 2952 * Flag that no more activity may be initiated 2953 */ 2954 bgep->progress &= ~PROGRESS_READY; 2955 2956 /* 2957 * Quiesce the PHY and MAC (leave it reset but still powered). 2958 * Clean up and free all BGE data structures 2959 */ 2960 if (bgep->periodic_id != NULL) { 2961 ddi_periodic_delete(bgep->periodic_id); 2962 bgep->periodic_id = NULL; 2963 } 2964 if (bgep->progress & PROGRESS_KSTATS) 2965 bge_fini_kstats(bgep); 2966 if (bgep->progress & PROGRESS_PHY) 2967 bge_phys_reset(bgep); 2968 if (bgep->progress & PROGRESS_HWINT) { 2969 mutex_enter(bgep->genlock); 2970 #ifdef BGE_IPMI_ASF 2971 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2972 #else 2973 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2974 #endif 2975 ddi_fm_service_impact(bgep->devinfo, 2976 DDI_SERVICE_UNAFFECTED); 2977 #ifdef BGE_IPMI_ASF 2978 if (bgep->asf_enabled) { 2979 /* 2980 * This register has been overlaid. We restore its 2981 * initial value here. 2982 */ 2983 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2984 BGE_NIC_DATA_SIG); 2985 } 2986 #endif 2987 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2988 ddi_fm_service_impact(bgep->devinfo, 2989 DDI_SERVICE_UNAFFECTED); 2990 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2991 ddi_fm_service_impact(bgep->devinfo, 2992 DDI_SERVICE_UNAFFECTED); 2993 mutex_exit(bgep->genlock); 2994 } 2995 if (bgep->progress & PROGRESS_INTR) { 2996 bge_intr_disable(bgep); 2997 bge_fini_rings(bgep); 2998 } 2999 if (bgep->progress & PROGRESS_HWINT) { 3000 bge_rem_intrs(bgep); 3001 rw_destroy(bgep->errlock); 3002 mutex_destroy(bgep->softintrlock); 3003 mutex_destroy(bgep->genlock); 3004 } 3005 if (bgep->progress & PROGRESS_FACTOTUM) 3006 ddi_remove_softintr(bgep->factotum_id); 3007 if (bgep->progress & PROGRESS_RESCHED) 3008 ddi_remove_softintr(bgep->drain_id); 3009 if (bgep->progress & PROGRESS_BUFS) 3010 bge_free_bufs(bgep); 3011 if (bgep->progress & PROGRESS_REGS) 3012 ddi_regs_map_free(&bgep->io_handle); 3013 if (bgep->progress & PROGRESS_CFG) 3014 pci_config_teardown(&bgep->cfg_handle); 3015 3016 bge_fm_fini(bgep); 3017 3018 ddi_remove_minor_node(bgep->devinfo, NULL); 3019 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 3020 kmem_free(bgep, sizeof (*bgep)); 3021 } 3022 3023 static int 3024 bge_resume(dev_info_t *devinfo) 3025 { 3026 bge_t *bgep; /* Our private data */ 3027 chip_id_t *cidp; 3028 chip_id_t chipid; 3029 3030 bgep = ddi_get_driver_private(devinfo); 3031 if (bgep == NULL) 3032 return (DDI_FAILURE); 3033 3034 /* 3035 * Refuse to resume if the data structures aren't consistent 3036 */ 3037 if (bgep->devinfo != devinfo) 3038 return (DDI_FAILURE); 3039 3040 #ifdef BGE_IPMI_ASF 3041 /* 3042 * Power management hasn't been supported in BGE now. If you 3043 * want to implement it, please add the ASF/IPMI related 3044 * code here. 3045 */ 3046 3047 #endif 3048 3049 /* 3050 * Read chip ID & set up config space command register(s) 3051 * Refuse to resume if the chip has changed its identity! 3052 */ 3053 cidp = &bgep->chipid; 3054 mutex_enter(bgep->genlock); 3055 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 3056 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3057 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3058 mutex_exit(bgep->genlock); 3059 return (DDI_FAILURE); 3060 } 3061 mutex_exit(bgep->genlock); 3062 if (chipid.vendor != cidp->vendor) 3063 return (DDI_FAILURE); 3064 if (chipid.device != cidp->device) 3065 return (DDI_FAILURE); 3066 if (chipid.revision != cidp->revision) 3067 return (DDI_FAILURE); 3068 if (chipid.asic_rev != cidp->asic_rev) 3069 return (DDI_FAILURE); 3070 3071 /* 3072 * All OK, reinitialise h/w & kick off GLD scheduling 3073 */ 3074 mutex_enter(bgep->genlock); 3075 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 3076 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3077 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3078 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3079 mutex_exit(bgep->genlock); 3080 return (DDI_FAILURE); 3081 } 3082 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3083 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3084 mutex_exit(bgep->genlock); 3085 return (DDI_FAILURE); 3086 } 3087 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3088 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3089 mutex_exit(bgep->genlock); 3090 return (DDI_FAILURE); 3091 } 3092 mutex_exit(bgep->genlock); 3093 return (DDI_SUCCESS); 3094 } 3095 3096 /* 3097 * attach(9E) -- Attach a device to the system 3098 * 3099 * Called once for each board successfully probed. 3100 */ 3101 static int 3102 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3103 { 3104 bge_t *bgep; /* Our private data */ 3105 mac_register_t *macp; 3106 chip_id_t *cidp; 3107 caddr_t regs; 3108 int instance; 3109 int err; 3110 int intr_types; 3111 #ifdef BGE_IPMI_ASF 3112 uint32_t mhcrValue; 3113 #ifdef __sparc 3114 uint16_t value16; 3115 #endif 3116 #ifdef BGE_NETCONSOLE 3117 int retval; 3118 #endif 3119 #endif 3120 3121 instance = ddi_get_instance(devinfo); 3122 3123 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3124 (void *)devinfo, cmd, instance)); 3125 BGE_BRKPT(NULL, "bge_attach"); 3126 3127 switch (cmd) { 3128 default: 3129 return (DDI_FAILURE); 3130 3131 case DDI_RESUME: 3132 return (bge_resume(devinfo)); 3133 3134 case DDI_ATTACH: 3135 break; 3136 } 3137 3138 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3139 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3140 ddi_set_driver_private(devinfo, bgep); 3141 bgep->bge_guard = BGE_GUARD; 3142 bgep->devinfo = devinfo; 3143 bgep->param_drain_max = 64; 3144 bgep->param_msi_cnt = 0; 3145 bgep->param_loop_mode = 0; 3146 3147 /* 3148 * Initialize more fields in BGE private data 3149 */ 3150 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3151 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3152 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3153 BGE_DRIVER_NAME, instance); 3154 3155 /* 3156 * Initialize for fma support 3157 */ 3158 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3159 DDI_PROP_DONTPASS, fm_cap, 3160 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3161 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3162 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3163 bge_fm_init(bgep); 3164 3165 /* 3166 * Look up the IOMMU's page size for DVMA mappings (must be 3167 * a power of 2) and convert to a mask. This can be used to 3168 * determine whether a message buffer crosses a page boundary. 3169 * Note: in 2s complement binary notation, if X is a power of 3170 * 2, then -X has the representation "11...1100...00". 3171 */ 3172 bgep->pagemask = dvma_pagesize(devinfo); 3173 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3174 bgep->pagemask = -bgep->pagemask; 3175 3176 /* 3177 * Map config space registers 3178 * Read chip ID & set up config space command register(s) 3179 * 3180 * Note: this leaves the chip accessible by Memory Space 3181 * accesses, but with interrupts and Bus Mastering off. 3182 * This should ensure that nothing untoward will happen 3183 * if it has been left active by the (net-)bootloader. 3184 * We'll re-enable Bus Mastering once we've reset the chip, 3185 * and allow interrupts only when everything else is set up. 3186 */ 3187 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3188 #ifdef BGE_IPMI_ASF 3189 #ifdef __sparc 3190 /* 3191 * We need to determine the type of chipset for accessing some configure 3192 * registers. (This information will be used by bge_ind_put32, 3193 * bge_ind_get32 and bge_nic_read32) 3194 */ 3195 bgep->chipid.device = pci_config_get16(bgep->cfg_handle, 3196 PCI_CONF_DEVID); 3197 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3198 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3199 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3200 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3201 MHCR_ENABLE_TAGGED_STATUS_MODE | 3202 MHCR_MASK_INTERRUPT_MODE | 3203 MHCR_MASK_PCI_INT_OUTPUT | 3204 MHCR_CLEAR_INTERRUPT_INTA | 3205 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3206 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3207 /* 3208 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP 3209 * has been set in PCI_CONF_COMM already, we need to write the 3210 * byte-swapped value to it. So we just write zero first for simplicity. 3211 */ 3212 if (DEVICE_5717_SERIES_CHIPSETS(bgep)) 3213 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0); 3214 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3215 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3216 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3217 MEMORY_ARBITER_ENABLE); 3218 #else 3219 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3220 #endif 3221 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3222 bgep->asf_wordswapped = B_TRUE; 3223 } else { 3224 bgep->asf_wordswapped = B_FALSE; 3225 } 3226 bge_asf_get_config(bgep); 3227 #endif 3228 if (err != DDI_SUCCESS) { 3229 bge_problem(bgep, "pci_config_setup() failed"); 3230 goto attach_fail; 3231 } 3232 bgep->progress |= PROGRESS_CFG; 3233 cidp = &bgep->chipid; 3234 bzero(cidp, sizeof (*cidp)); 3235 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3236 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3237 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3238 goto attach_fail; 3239 } 3240 3241 #ifdef BGE_IPMI_ASF 3242 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3243 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3244 bgep->asf_newhandshake = B_TRUE; 3245 } else { 3246 bgep->asf_newhandshake = B_FALSE; 3247 } 3248 #endif 3249 3250 /* 3251 * Update those parts of the chip ID derived from volatile 3252 * registers with the values seen by OBP (in case the chip 3253 * has been reset externally and therefore lost them). 3254 */ 3255 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3256 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3257 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3258 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3259 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3260 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3261 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3262 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3263 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3264 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3265 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3266 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3267 3268 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3269 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3270 if ((cidp->default_mtu < BGE_DEFAULT_MTU) || 3271 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3272 cidp->default_mtu = BGE_DEFAULT_MTU; 3273 } 3274 3275 /* 3276 * Map operating registers 3277 */ 3278 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3279 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3280 if (err != DDI_SUCCESS) { 3281 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3282 goto attach_fail; 3283 } 3284 bgep->io_regs = regs; 3285 bgep->progress |= PROGRESS_REGS; 3286 3287 /* 3288 * Characterise the device, so we know its requirements. 3289 * Then allocate the appropriate TX and RX descriptors & buffers. 3290 */ 3291 if (bge_chip_id_init(bgep) == EIO) { 3292 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3293 goto attach_fail; 3294 } 3295 3296 err = bge_alloc_bufs(bgep); 3297 if (err != DDI_SUCCESS) { 3298 bge_problem(bgep, "DMA buffer allocation failed"); 3299 goto attach_fail; 3300 } 3301 bgep->progress |= PROGRESS_BUFS; 3302 3303 /* 3304 * Add the softint handlers: 3305 * 3306 * Both of these handlers are used to avoid restrictions on the 3307 * context and/or mutexes required for some operations. In 3308 * particular, the hardware interrupt handler and its subfunctions 3309 * can detect a number of conditions that we don't want to handle 3310 * in that context or with that set of mutexes held. So, these 3311 * softints are triggered instead: 3312 * 3313 * the <resched> softint is triggered if we have previously 3314 * had to refuse to send a packet because of resource shortage 3315 * (we've run out of transmit buffers), but the send completion 3316 * interrupt handler has now detected that more buffers have 3317 * become available. 3318 * 3319 * the <factotum> is triggered if the h/w interrupt handler 3320 * sees the <link state changed> or <error> bits in the status 3321 * block. It's also triggered periodically to poll the link 3322 * state, just in case we aren't getting link status change 3323 * interrupts ... 3324 */ 3325 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3326 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3327 if (err != DDI_SUCCESS) { 3328 bge_problem(bgep, "ddi_add_softintr() failed"); 3329 goto attach_fail; 3330 } 3331 bgep->progress |= PROGRESS_RESCHED; 3332 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3333 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3334 if (err != DDI_SUCCESS) { 3335 bge_problem(bgep, "ddi_add_softintr() failed"); 3336 goto attach_fail; 3337 } 3338 bgep->progress |= PROGRESS_FACTOTUM; 3339 3340 /* Get supported interrupt types */ 3341 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3342 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3343 3344 goto attach_fail; 3345 } 3346 3347 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3348 bgep->ifname, intr_types)); 3349 3350 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3351 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3352 bge_error(bgep, "MSI registration failed, " 3353 "trying FIXED interrupt type\n"); 3354 } else { 3355 BGE_DEBUG(("%s: Using MSI interrupt type", 3356 bgep->ifname)); 3357 bgep->intr_type = DDI_INTR_TYPE_MSI; 3358 bgep->progress |= PROGRESS_HWINT; 3359 } 3360 } 3361 3362 if (!(bgep->progress & PROGRESS_HWINT) && 3363 (intr_types & DDI_INTR_TYPE_FIXED)) { 3364 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3365 bge_error(bgep, "FIXED interrupt " 3366 "registration failed\n"); 3367 goto attach_fail; 3368 } 3369 3370 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3371 3372 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3373 bgep->progress |= PROGRESS_HWINT; 3374 } 3375 3376 if (!(bgep->progress & PROGRESS_HWINT)) { 3377 bge_error(bgep, "No interrupts registered\n"); 3378 goto attach_fail; 3379 } 3380 3381 /* 3382 * Note that interrupts are not enabled yet as 3383 * mutex locks are not initialized. Initialize mutex locks. 3384 */ 3385 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3386 DDI_INTR_PRI(bgep->intr_pri)); 3387 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3388 DDI_INTR_PRI(bgep->intr_pri)); 3389 rw_init(bgep->errlock, NULL, RW_DRIVER, 3390 DDI_INTR_PRI(bgep->intr_pri)); 3391 3392 /* 3393 * Initialize rings. 3394 */ 3395 bge_init_rings(bgep); 3396 3397 /* 3398 * Now that mutex locks are initialized, enable interrupts. 3399 */ 3400 bge_intr_enable(bgep); 3401 bgep->progress |= PROGRESS_INTR; 3402 3403 /* 3404 * Initialise link state variables 3405 * Stop, reset & reinitialise the chip. 3406 * Initialise the (internal) PHY. 3407 */ 3408 bgep->link_state = LINK_STATE_UNKNOWN; 3409 3410 mutex_enter(bgep->genlock); 3411 3412 /* 3413 * Reset chip & rings to initial state; also reset address 3414 * filtering, promiscuity, loopback mode. 3415 */ 3416 #ifdef BGE_IPMI_ASF 3417 #ifdef BGE_NETCONSOLE 3418 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3419 #else 3420 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3421 #endif 3422 #else 3423 if (bge_reset(bgep) != DDI_SUCCESS) { 3424 #endif 3425 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3426 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3427 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3428 mutex_exit(bgep->genlock); 3429 goto attach_fail; 3430 } 3431 3432 #ifdef BGE_IPMI_ASF 3433 if (bgep->asf_enabled) { 3434 bgep->asf_status = ASF_STAT_RUN_INIT; 3435 } 3436 #endif 3437 3438 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3439 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3440 bgep->promisc = B_FALSE; 3441 bgep->param_loop_mode = BGE_LOOP_NONE; 3442 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3443 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3444 mutex_exit(bgep->genlock); 3445 goto attach_fail; 3446 } 3447 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3448 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3449 mutex_exit(bgep->genlock); 3450 goto attach_fail; 3451 } 3452 3453 mutex_exit(bgep->genlock); 3454 3455 if (bge_phys_init(bgep) == EIO) { 3456 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3457 goto attach_fail; 3458 } 3459 bgep->progress |= PROGRESS_PHY; 3460 3461 /* 3462 * initialize NDD-tweakable parameters 3463 */ 3464 if (bge_nd_init(bgep)) { 3465 bge_problem(bgep, "bge_nd_init() failed"); 3466 goto attach_fail; 3467 } 3468 bgep->progress |= PROGRESS_NDD; 3469 3470 /* 3471 * Create & initialise named kstats 3472 */ 3473 bge_init_kstats(bgep, instance); 3474 bgep->progress |= PROGRESS_KSTATS; 3475 3476 /* 3477 * Determine whether to override the chip's own MAC address 3478 */ 3479 bge_find_mac_address(bgep, cidp); 3480 3481 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3482 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX; 3483 3484 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3485 goto attach_fail; 3486 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3487 macp->m_driver = bgep; 3488 macp->m_dip = devinfo; 3489 macp->m_src_addr = cidp->vendor_addr.addr; 3490 macp->m_callbacks = &bge_m_callbacks; 3491 macp->m_min_sdu = 0; 3492 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3493 macp->m_margin = VLAN_TAGSZ; 3494 macp->m_priv_props = bge_priv_prop; 3495 macp->m_v12n = MAC_VIRT_LEVEL1; 3496 3497 /* 3498 * Finally, we're ready to register ourselves with the MAC layer 3499 * interface; if this succeeds, we're all ready to start() 3500 */ 3501 err = mac_register(macp, &bgep->mh); 3502 mac_free(macp); 3503 if (err != 0) 3504 goto attach_fail; 3505 3506 mac_link_update(bgep->mh, LINK_STATE_UNKNOWN); 3507 3508 /* 3509 * Register a periodical handler. 3510 * bge_chip_cyclic() is invoked in kernel context. 3511 */ 3512 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3513 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3514 3515 bgep->progress |= PROGRESS_READY; 3516 ASSERT(bgep->bge_guard == BGE_GUARD); 3517 #ifdef BGE_IPMI_ASF 3518 #ifdef BGE_NETCONSOLE 3519 if (bgep->asf_enabled) { 3520 mutex_enter(bgep->genlock); 3521 retval = bge_chip_start(bgep, B_TRUE); 3522 mutex_exit(bgep->genlock); 3523 if (retval != DDI_SUCCESS) 3524 goto attach_fail; 3525 } 3526 #endif 3527 #endif 3528 3529 ddi_report_dev(devinfo); 3530 3531 return (DDI_SUCCESS); 3532 3533 attach_fail: 3534 #ifdef BGE_IPMI_ASF 3535 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3536 #else 3537 bge_unattach(bgep); 3538 #endif 3539 return (DDI_FAILURE); 3540 } 3541 3542 /* 3543 * bge_suspend() -- suspend transmit/receive for powerdown 3544 */ 3545 static int 3546 bge_suspend(bge_t *bgep) 3547 { 3548 /* 3549 * Stop processing and idle (powerdown) the PHY ... 3550 */ 3551 mutex_enter(bgep->genlock); 3552 #ifdef BGE_IPMI_ASF 3553 /* 3554 * Power management hasn't been supported in BGE now. If you 3555 * want to implement it, please add the ASF/IPMI related 3556 * code here. 3557 */ 3558 #endif 3559 bge_stop(bgep); 3560 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3561 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3562 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3563 mutex_exit(bgep->genlock); 3564 return (DDI_FAILURE); 3565 } 3566 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3567 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3568 mutex_exit(bgep->genlock); 3569 return (DDI_FAILURE); 3570 } 3571 mutex_exit(bgep->genlock); 3572 3573 return (DDI_SUCCESS); 3574 } 3575 3576 /* 3577 * quiesce(9E) entry point. 3578 * 3579 * This function is called when the system is single-threaded at high 3580 * PIL with preemption disabled. Therefore, this function must not be 3581 * blocked. 3582 * 3583 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3584 * DDI_FAILURE indicates an error condition and should almost never happen. 3585 */ 3586 #ifdef __sparc 3587 #define bge_quiesce ddi_quiesce_not_supported 3588 #else 3589 static int 3590 bge_quiesce(dev_info_t *devinfo) 3591 { 3592 bge_t *bgep = ddi_get_driver_private(devinfo); 3593 3594 if (bgep == NULL) 3595 return (DDI_FAILURE); 3596 3597 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3598 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3599 MHCR_MASK_PCI_INT_OUTPUT); 3600 } else { 3601 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3602 } 3603 3604 /* Stop the chip */ 3605 bge_chip_stop_nonblocking(bgep); 3606 3607 return (DDI_SUCCESS); 3608 } 3609 #endif 3610 3611 /* 3612 * detach(9E) -- Detach a device from the system 3613 */ 3614 static int 3615 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3616 { 3617 bge_t *bgep; 3618 #ifdef BGE_IPMI_ASF 3619 uint_t asf_mode; 3620 asf_mode = ASF_MODE_NONE; 3621 #endif 3622 3623 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3624 3625 bgep = ddi_get_driver_private(devinfo); 3626 3627 switch (cmd) { 3628 default: 3629 return (DDI_FAILURE); 3630 3631 case DDI_SUSPEND: 3632 return (bge_suspend(bgep)); 3633 3634 case DDI_DETACH: 3635 break; 3636 } 3637 3638 #ifdef BGE_IPMI_ASF 3639 mutex_enter(bgep->genlock); 3640 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3641 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3642 3643 bge_asf_update_status(bgep); 3644 if (bgep->asf_status == ASF_STAT_RUN) { 3645 bge_asf_stop_timer(bgep); 3646 } 3647 bgep->asf_status = ASF_STAT_STOP; 3648 3649 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3650 3651 if (bgep->asf_pseudostop) { 3652 bge_chip_stop(bgep, B_FALSE); 3653 bgep->bge_mac_state = BGE_MAC_STOPPED; 3654 bgep->asf_pseudostop = B_FALSE; 3655 } 3656 3657 asf_mode = ASF_MODE_POST_SHUTDOWN; 3658 3659 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3660 ddi_fm_service_impact(bgep->devinfo, 3661 DDI_SERVICE_UNAFFECTED); 3662 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3663 ddi_fm_service_impact(bgep->devinfo, 3664 DDI_SERVICE_UNAFFECTED); 3665 } 3666 mutex_exit(bgep->genlock); 3667 #endif 3668 3669 /* 3670 * Unregister from the GLD subsystem. This can fail, in 3671 * particular if there are DLPI style-2 streams still open - 3672 * in which case we just return failure without shutting 3673 * down chip operations. 3674 */ 3675 if (mac_unregister(bgep->mh) != 0) 3676 return (DDI_FAILURE); 3677 3678 /* 3679 * All activity stopped, so we can clean up & exit 3680 */ 3681 #ifdef BGE_IPMI_ASF 3682 bge_unattach(bgep, asf_mode); 3683 #else 3684 bge_unattach(bgep); 3685 #endif 3686 return (DDI_SUCCESS); 3687 } 3688 3689 3690 /* 3691 * ========== Module Loading Data & Entry Points ========== 3692 */ 3693 3694 #undef BGE_DBG 3695 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3696 3697 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3698 nulldev, /* identify */ 3699 nulldev, /* probe */ 3700 bge_attach, /* attach */ 3701 bge_detach, /* detach */ 3702 nodev, /* reset */ 3703 NULL, /* cb_ops */ 3704 D_MP, /* bus_ops */ 3705 NULL, /* power */ 3706 bge_quiesce /* quiesce */ 3707 ); 3708 3709 static struct modldrv bge_modldrv = { 3710 &mod_driverops, /* Type of module. This one is a driver */ 3711 bge_ident, /* short description */ 3712 &bge_dev_ops /* driver specific ops */ 3713 }; 3714 3715 static struct modlinkage modlinkage = { 3716 MODREV_1, (void *)&bge_modldrv, NULL 3717 }; 3718 3719 3720 int 3721 _info(struct modinfo *modinfop) 3722 { 3723 return (mod_info(&modlinkage, modinfop)); 3724 } 3725 3726 int 3727 _init(void) 3728 { 3729 int status; 3730 3731 mac_init_ops(&bge_dev_ops, "bge"); 3732 status = mod_install(&modlinkage); 3733 if (status == DDI_SUCCESS) 3734 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3735 else 3736 mac_fini_ops(&bge_dev_ops); 3737 return (status); 3738 } 3739 3740 int 3741 _fini(void) 3742 { 3743 int status; 3744 3745 status = mod_remove(&modlinkage); 3746 if (status == DDI_SUCCESS) { 3747 mac_fini_ops(&bge_dev_ops); 3748 mutex_destroy(bge_log_mutex); 3749 } 3750 return (status); 3751 } 3752 3753 3754 /* 3755 * bge_add_intrs: 3756 * 3757 * Register FIXED or MSI interrupts. 3758 */ 3759 static int 3760 bge_add_intrs(bge_t *bgep, int intr_type) 3761 { 3762 dev_info_t *dip = bgep->devinfo; 3763 int avail, actual, intr_size, count = 0; 3764 int i, flag, ret; 3765 3766 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3767 3768 /* Get number of interrupts */ 3769 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3770 if ((ret != DDI_SUCCESS) || (count == 0)) { 3771 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3772 "count: %d", ret, count); 3773 3774 return (DDI_FAILURE); 3775 } 3776 3777 /* Get number of available interrupts */ 3778 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3779 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3780 bge_error(bgep, "ddi_intr_get_navail() failure, " 3781 "ret: %d, avail: %d\n", ret, avail); 3782 3783 return (DDI_FAILURE); 3784 } 3785 3786 if (avail < count) { 3787 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3788 bgep->ifname, count, avail)); 3789 } 3790 3791 /* 3792 * BGE hardware generates only single MSI even though it claims 3793 * to support multiple MSIs. So, hard code MSI count value to 1. 3794 */ 3795 if (intr_type == DDI_INTR_TYPE_MSI) { 3796 count = 1; 3797 flag = DDI_INTR_ALLOC_STRICT; 3798 } else { 3799 flag = DDI_INTR_ALLOC_NORMAL; 3800 } 3801 3802 /* Allocate an array of interrupt handles */ 3803 intr_size = count * sizeof (ddi_intr_handle_t); 3804 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3805 3806 /* Call ddi_intr_alloc() */ 3807 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3808 count, &actual, flag); 3809 3810 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3811 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3812 3813 kmem_free(bgep->htable, intr_size); 3814 return (DDI_FAILURE); 3815 } 3816 3817 if (actual < count) { 3818 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3819 bgep->ifname, count, actual)); 3820 } 3821 3822 bgep->intr_cnt = actual; 3823 3824 /* 3825 * Get priority for first msi, assume remaining are all the same 3826 */ 3827 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3828 DDI_SUCCESS) { 3829 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3830 3831 /* Free already allocated intr */ 3832 for (i = 0; i < actual; i++) { 3833 (void) ddi_intr_free(bgep->htable[i]); 3834 } 3835 3836 kmem_free(bgep->htable, intr_size); 3837 return (DDI_FAILURE); 3838 } 3839 3840 /* Call ddi_intr_add_handler() */ 3841 for (i = 0; i < actual; i++) { 3842 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3843 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3844 bge_error(bgep, "ddi_intr_add_handler() " 3845 "failed %d\n", ret); 3846 3847 /* Free already allocated intr */ 3848 for (i = 0; i < actual; i++) { 3849 (void) ddi_intr_free(bgep->htable[i]); 3850 } 3851 3852 kmem_free(bgep->htable, intr_size); 3853 return (DDI_FAILURE); 3854 } 3855 } 3856 3857 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3858 != DDI_SUCCESS) { 3859 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3860 3861 for (i = 0; i < actual; i++) { 3862 (void) ddi_intr_remove_handler(bgep->htable[i]); 3863 (void) ddi_intr_free(bgep->htable[i]); 3864 } 3865 3866 kmem_free(bgep->htable, intr_size); 3867 return (DDI_FAILURE); 3868 } 3869 3870 return (DDI_SUCCESS); 3871 } 3872 3873 /* 3874 * bge_rem_intrs: 3875 * 3876 * Unregister FIXED or MSI interrupts 3877 */ 3878 static void 3879 bge_rem_intrs(bge_t *bgep) 3880 { 3881 int i; 3882 3883 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3884 3885 /* Call ddi_intr_remove_handler() */ 3886 for (i = 0; i < bgep->intr_cnt; i++) { 3887 (void) ddi_intr_remove_handler(bgep->htable[i]); 3888 (void) ddi_intr_free(bgep->htable[i]); 3889 } 3890 3891 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3892 } 3893 3894 3895 void 3896 bge_intr_enable(bge_t *bgep) 3897 { 3898 int i; 3899 3900 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3901 /* Call ddi_intr_block_enable() for MSI interrupts */ 3902 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3903 } else { 3904 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3905 for (i = 0; i < bgep->intr_cnt; i++) { 3906 (void) ddi_intr_enable(bgep->htable[i]); 3907 } 3908 } 3909 } 3910 3911 3912 void 3913 bge_intr_disable(bge_t *bgep) 3914 { 3915 int i; 3916 3917 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3918 /* Call ddi_intr_block_disable() */ 3919 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3920 } else { 3921 for (i = 0; i < bgep->intr_cnt; i++) { 3922 (void) ddi_intr_disable(bgep->htable[i]); 3923 } 3924 } 3925 } 3926 3927 int 3928 bge_reprogram(bge_t *bgep) 3929 { 3930 int status = 0; 3931 3932 ASSERT(mutex_owned(bgep->genlock)); 3933 3934 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3935 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3936 status = IOC_INVAL; 3937 } 3938 #ifdef BGE_IPMI_ASF 3939 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3940 #else 3941 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3942 #endif 3943 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3944 status = IOC_INVAL; 3945 } 3946 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3947 bge_chip_msi_trig(bgep); 3948 return (status); 3949 } 3950