1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "bge_impl.h" 30 #include <sys/sdt.h> 31 #include <sys/dld.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 * Make sure you keep the version ID up to date! 36 */ 37 static char bge_ident[] = "Broadcom Gb Ethernet v0.62"; 38 39 /* 40 * Property names 41 */ 42 static char debug_propname[] = "bge-debug-flags"; 43 static char clsize_propname[] = "cache-line-size"; 44 static char latency_propname[] = "latency-timer"; 45 static char localmac_boolname[] = "local-mac-address?"; 46 static char localmac_propname[] = "local-mac-address"; 47 static char macaddr_propname[] = "mac-address"; 48 static char subdev_propname[] = "subsystem-id"; 49 static char subven_propname[] = "subsystem-vendor-id"; 50 static char rxrings_propname[] = "bge-rx-rings"; 51 static char txrings_propname[] = "bge-tx-rings"; 52 static char fm_cap[] = "fm-capable"; 53 static char default_mtu[] = "default_mtu"; 54 55 static int bge_add_intrs(bge_t *, int); 56 static void bge_rem_intrs(bge_t *); 57 58 /* 59 * Describes the chip's DMA engine 60 */ 61 static ddi_dma_attr_t dma_attr = { 62 DMA_ATTR_V0, /* dma_attr version */ 63 0x0000000000000000ull, /* dma_attr_addr_lo */ 64 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 65 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 66 0x0000000000000001ull, /* dma_attr_align */ 67 0x00000FFF, /* dma_attr_burstsizes */ 68 0x00000001, /* dma_attr_minxfer */ 69 0x000000000000FFFFull, /* dma_attr_maxxfer */ 70 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 71 1, /* dma_attr_sgllen */ 72 0x00000001, /* dma_attr_granular */ 73 DDI_DMA_FLAGERR /* dma_attr_flags */ 74 }; 75 76 /* 77 * PIO access attributes for registers 78 */ 79 static ddi_device_acc_attr_t bge_reg_accattr = { 80 DDI_DEVICE_ATTR_V0, 81 DDI_NEVERSWAP_ACC, 82 DDI_STRICTORDER_ACC, 83 DDI_FLAGERR_ACC 84 }; 85 86 /* 87 * DMA access attributes for descriptors: NOT to be byte swapped. 88 */ 89 static ddi_device_acc_attr_t bge_desc_accattr = { 90 DDI_DEVICE_ATTR_V0, 91 DDI_NEVERSWAP_ACC, 92 DDI_STRICTORDER_ACC, 93 DDI_FLAGERR_ACC 94 }; 95 96 /* 97 * DMA access attributes for data: NOT to be byte swapped. 98 */ 99 static ddi_device_acc_attr_t bge_data_accattr = { 100 DDI_DEVICE_ATTR_V0, 101 DDI_NEVERSWAP_ACC, 102 DDI_STRICTORDER_ACC 103 }; 104 105 /* 106 * Versions of the O/S up to Solaris 8 didn't support network booting 107 * from any network interface except the first (NET0). Patching this 108 * flag to a non-zero value will tell the driver to work around this 109 * limitation by creating an extra (internal) pathname node. To do 110 * this, just add a line like the following to the CLIENT'S etc/system 111 * file ON THE ROOT FILESYSTEM SERVER before booting the client: 112 * 113 * set bge:bge_net1_boot_support = 1; 114 */ 115 static uint32_t bge_net1_boot_support = 1; 116 117 static int bge_m_start(void *); 118 static void bge_m_stop(void *); 119 static int bge_m_promisc(void *, boolean_t); 120 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 121 static int bge_m_unicst(void *, const uint8_t *); 122 static void bge_m_resources(void *); 123 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 124 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 125 static int bge_unicst_set(void *, const uint8_t *, 126 mac_addr_slot_t); 127 static int bge_m_unicst_add(void *, mac_multi_addr_t *); 128 static int bge_m_unicst_remove(void *, mac_addr_slot_t); 129 static int bge_m_unicst_modify(void *, mac_multi_addr_t *); 130 static int bge_m_unicst_get(void *, mac_multi_addr_t *); 131 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 132 uint_t, const void *); 133 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 134 uint_t, uint_t, void *); 135 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 136 const void *); 137 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 138 uint_t, void *); 139 140 #define BGE_M_CALLBACK_FLAGS\ 141 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 142 143 static mac_callbacks_t bge_m_callbacks = { 144 BGE_M_CALLBACK_FLAGS, 145 bge_m_stat, 146 bge_m_start, 147 bge_m_stop, 148 bge_m_promisc, 149 bge_m_multicst, 150 bge_m_unicst, 151 bge_m_tx, 152 bge_m_resources, 153 bge_m_ioctl, 154 bge_m_getcapab, 155 NULL, 156 NULL, 157 bge_m_setprop, 158 bge_m_getprop 159 }; 160 161 mac_priv_prop_t bge_priv_prop[] = { 162 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 163 {"_adv_pause_cap", MAC_PROP_PERM_RW} 164 }; 165 166 #define BGE_MAX_PRIV_PROPS \ 167 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 168 169 /* 170 * ========== Transmit and receive ring reinitialisation ========== 171 */ 172 173 /* 174 * These <reinit> routines each reset the specified ring to an initial 175 * state, assuming that the corresponding <init> routine has already 176 * been called exactly once. 177 */ 178 179 static void 180 bge_reinit_send_ring(send_ring_t *srp) 181 { 182 bge_queue_t *txbuf_queue; 183 bge_queue_item_t *txbuf_head; 184 sw_txbuf_t *txbuf; 185 sw_sbd_t *ssbdp; 186 uint32_t slot; 187 188 /* 189 * Reinitialise control variables ... 190 */ 191 srp->tx_flow = 0; 192 srp->tx_next = 0; 193 srp->txfill_next = 0; 194 srp->tx_free = srp->desc.nslots; 195 ASSERT(mutex_owned(srp->tc_lock)); 196 srp->tc_next = 0; 197 srp->txpkt_next = 0; 198 srp->tx_block = 0; 199 srp->tx_nobd = 0; 200 srp->tx_nobuf = 0; 201 202 /* 203 * Initialize the tx buffer push queue 204 */ 205 mutex_enter(srp->freetxbuf_lock); 206 mutex_enter(srp->txbuf_lock); 207 txbuf_queue = &srp->freetxbuf_queue; 208 txbuf_queue->head = NULL; 209 txbuf_queue->count = 0; 210 txbuf_queue->lock = srp->freetxbuf_lock; 211 srp->txbuf_push_queue = txbuf_queue; 212 213 /* 214 * Initialize the tx buffer pop queue 215 */ 216 txbuf_queue = &srp->txbuf_queue; 217 txbuf_queue->head = NULL; 218 txbuf_queue->count = 0; 219 txbuf_queue->lock = srp->txbuf_lock; 220 srp->txbuf_pop_queue = txbuf_queue; 221 txbuf_head = srp->txbuf_head; 222 txbuf = srp->txbuf; 223 for (slot = 0; slot < srp->tx_buffers; ++slot) { 224 txbuf_head->item = txbuf; 225 txbuf_head->next = txbuf_queue->head; 226 txbuf_queue->head = txbuf_head; 227 txbuf_queue->count++; 228 txbuf++; 229 txbuf_head++; 230 } 231 mutex_exit(srp->txbuf_lock); 232 mutex_exit(srp->freetxbuf_lock); 233 234 /* 235 * Zero and sync all the h/w Send Buffer Descriptors 236 */ 237 DMA_ZERO(srp->desc); 238 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 239 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 240 ssbdp = srp->sw_sbds; 241 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 242 ssbdp->pbuf = NULL; 243 } 244 245 static void 246 bge_reinit_recv_ring(recv_ring_t *rrp) 247 { 248 /* 249 * Reinitialise control variables ... 250 */ 251 rrp->rx_next = 0; 252 } 253 254 static void 255 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 256 { 257 bge_rbd_t *hw_rbd_p; 258 sw_rbd_t *srbdp; 259 uint32_t bufsize; 260 uint32_t nslots; 261 uint32_t slot; 262 263 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 264 RBD_FLAG_STD_RING, 265 RBD_FLAG_JUMBO_RING, 266 RBD_FLAG_MINI_RING 267 }; 268 269 /* 270 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 271 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 272 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 273 * should be zeroed, and so don't need to be set up specifically 274 * once the whole area has been cleared. 275 */ 276 DMA_ZERO(brp->desc); 277 278 hw_rbd_p = DMA_VPTR(brp->desc); 279 nslots = brp->desc.nslots; 280 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 281 bufsize = brp->buf[0].size; 282 srbdp = brp->sw_rbds; 283 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 284 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 285 hw_rbd_p->index = slot; 286 hw_rbd_p->len = bufsize; 287 hw_rbd_p->opaque = srbdp->pbuf.token; 288 hw_rbd_p->flags |= ring_type_flag[ring]; 289 } 290 291 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 292 293 /* 294 * Finally, reinitialise the ring control variables ... 295 */ 296 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 297 } 298 299 /* 300 * Reinitialize all rings 301 */ 302 static void 303 bge_reinit_rings(bge_t *bgep) 304 { 305 uint32_t ring; 306 307 ASSERT(mutex_owned(bgep->genlock)); 308 309 /* 310 * Send Rings ... 311 */ 312 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 313 bge_reinit_send_ring(&bgep->send[ring]); 314 315 /* 316 * Receive Return Rings ... 317 */ 318 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 319 bge_reinit_recv_ring(&bgep->recv[ring]); 320 321 /* 322 * Receive Producer Rings ... 323 */ 324 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 325 bge_reinit_buff_ring(&bgep->buff[ring], ring); 326 } 327 328 /* 329 * ========== Internal state management entry points ========== 330 */ 331 332 #undef BGE_DBG 333 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 334 335 /* 336 * These routines provide all the functionality required by the 337 * corresponding GLD entry points, but don't update the GLD state 338 * so they can be called internally without disturbing our record 339 * of what GLD thinks we should be doing ... 340 */ 341 342 /* 343 * bge_reset() -- reset h/w & rings to initial state 344 */ 345 static int 346 #ifdef BGE_IPMI_ASF 347 bge_reset(bge_t *bgep, uint_t asf_mode) 348 #else 349 bge_reset(bge_t *bgep) 350 #endif 351 { 352 uint32_t ring; 353 int retval; 354 355 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 356 357 ASSERT(mutex_owned(bgep->genlock)); 358 359 /* 360 * Grab all the other mutexes in the world (this should 361 * ensure no other threads are manipulating driver state) 362 */ 363 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 364 mutex_enter(bgep->recv[ring].rx_lock); 365 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 366 mutex_enter(bgep->buff[ring].rf_lock); 367 rw_enter(bgep->errlock, RW_WRITER); 368 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 369 mutex_enter(bgep->send[ring].tx_lock); 370 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 371 mutex_enter(bgep->send[ring].tc_lock); 372 373 #ifdef BGE_IPMI_ASF 374 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 375 #else 376 retval = bge_chip_reset(bgep, B_TRUE); 377 #endif 378 bge_reinit_rings(bgep); 379 380 /* 381 * Free the world ... 382 */ 383 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 384 mutex_exit(bgep->send[ring].tc_lock); 385 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 386 mutex_exit(bgep->send[ring].tx_lock); 387 rw_exit(bgep->errlock); 388 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 389 mutex_exit(bgep->buff[ring].rf_lock); 390 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 391 mutex_exit(bgep->recv[ring].rx_lock); 392 393 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 394 return (retval); 395 } 396 397 /* 398 * bge_stop() -- stop processing, don't reset h/w or rings 399 */ 400 static void 401 bge_stop(bge_t *bgep) 402 { 403 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 404 405 ASSERT(mutex_owned(bgep->genlock)); 406 407 #ifdef BGE_IPMI_ASF 408 if (bgep->asf_enabled) { 409 bgep->asf_pseudostop = B_TRUE; 410 } else { 411 #endif 412 bge_chip_stop(bgep, B_FALSE); 413 #ifdef BGE_IPMI_ASF 414 } 415 #endif 416 417 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 418 } 419 420 /* 421 * bge_start() -- start transmitting/receiving 422 */ 423 static int 424 bge_start(bge_t *bgep, boolean_t reset_phys) 425 { 426 int retval; 427 428 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 429 430 ASSERT(mutex_owned(bgep->genlock)); 431 432 /* 433 * Start chip processing, including enabling interrupts 434 */ 435 retval = bge_chip_start(bgep, reset_phys); 436 437 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 438 return (retval); 439 } 440 441 /* 442 * bge_restart - restart transmitting/receiving after error or suspend 443 */ 444 int 445 bge_restart(bge_t *bgep, boolean_t reset_phys) 446 { 447 int retval = DDI_SUCCESS; 448 ASSERT(mutex_owned(bgep->genlock)); 449 450 #ifdef BGE_IPMI_ASF 451 if (bgep->asf_enabled) { 452 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 453 retval = DDI_FAILURE; 454 } else 455 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 456 retval = DDI_FAILURE; 457 #else 458 if (bge_reset(bgep) != DDI_SUCCESS) 459 retval = DDI_FAILURE; 460 #endif 461 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 462 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 463 retval = DDI_FAILURE; 464 bgep->watchdog = 0; 465 ddi_trigger_softintr(bgep->drain_id); 466 } 467 468 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 469 return (retval); 470 } 471 472 473 /* 474 * ========== Nemo-required management entry points ========== 475 */ 476 477 #undef BGE_DBG 478 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 479 480 /* 481 * bge_m_stop() -- stop transmitting/receiving 482 */ 483 static void 484 bge_m_stop(void *arg) 485 { 486 bge_t *bgep = arg; /* private device info */ 487 send_ring_t *srp; 488 uint32_t ring; 489 490 BGE_TRACE(("bge_m_stop($%p)", arg)); 491 492 /* 493 * Just stop processing, then record new GLD state 494 */ 495 mutex_enter(bgep->genlock); 496 if (!(bgep->progress & PROGRESS_INTR)) { 497 /* can happen during autorecovery */ 498 mutex_exit(bgep->genlock); 499 return; 500 } 501 bge_stop(bgep); 502 503 bgep->link_update_timer = 0; 504 bgep->link_state = LINK_STATE_UNKNOWN; 505 mac_link_update(bgep->mh, bgep->link_state); 506 507 /* 508 * Free the possible tx buffers allocated in tx process. 509 */ 510 #ifdef BGE_IPMI_ASF 511 if (!bgep->asf_pseudostop) 512 #endif 513 { 514 rw_enter(bgep->errlock, RW_WRITER); 515 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 516 srp = &bgep->send[ring]; 517 mutex_enter(srp->tx_lock); 518 if (srp->tx_array > 1) 519 bge_free_txbuf_arrays(srp); 520 mutex_exit(srp->tx_lock); 521 } 522 rw_exit(bgep->errlock); 523 } 524 bgep->bge_mac_state = BGE_MAC_STOPPED; 525 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 526 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 527 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 528 mutex_exit(bgep->genlock); 529 } 530 531 /* 532 * bge_m_start() -- start transmitting/receiving 533 */ 534 static int 535 bge_m_start(void *arg) 536 { 537 bge_t *bgep = arg; /* private device info */ 538 539 BGE_TRACE(("bge_m_start($%p)", arg)); 540 541 /* 542 * Start processing and record new GLD state 543 */ 544 mutex_enter(bgep->genlock); 545 if (!(bgep->progress & PROGRESS_INTR)) { 546 /* can happen during autorecovery */ 547 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 548 mutex_exit(bgep->genlock); 549 return (EIO); 550 } 551 #ifdef BGE_IPMI_ASF 552 if (bgep->asf_enabled) { 553 if ((bgep->asf_status == ASF_STAT_RUN) && 554 (bgep->asf_pseudostop)) { 555 bgep->bge_mac_state = BGE_MAC_STARTED; 556 mutex_exit(bgep->genlock); 557 return (0); 558 } 559 } 560 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 561 #else 562 if (bge_reset(bgep) != DDI_SUCCESS) { 563 #endif 564 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 565 (void) bge_check_acc_handle(bgep, bgep->io_handle); 566 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 567 mutex_exit(bgep->genlock); 568 return (EIO); 569 } 570 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 571 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 572 (void) bge_check_acc_handle(bgep, bgep->io_handle); 573 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 574 mutex_exit(bgep->genlock); 575 return (EIO); 576 } 577 bgep->bge_mac_state = BGE_MAC_STARTED; 578 BGE_DEBUG(("bge_m_start($%p) done", arg)); 579 580 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 581 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 582 mutex_exit(bgep->genlock); 583 return (EIO); 584 } 585 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 586 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 587 mutex_exit(bgep->genlock); 588 return (EIO); 589 } 590 #ifdef BGE_IPMI_ASF 591 if (bgep->asf_enabled) { 592 if (bgep->asf_status != ASF_STAT_RUN) { 593 /* start ASF heart beat */ 594 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 595 (void *)bgep, 596 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 597 bgep->asf_status = ASF_STAT_RUN; 598 } 599 } 600 #endif 601 mutex_exit(bgep->genlock); 602 603 return (0); 604 } 605 606 /* 607 * bge_m_unicst() -- set the physical network address 608 */ 609 static int 610 bge_m_unicst(void *arg, const uint8_t *macaddr) 611 { 612 /* 613 * Request to set address in 614 * address slot 0, i.e., default address 615 */ 616 return (bge_unicst_set(arg, macaddr, 0)); 617 } 618 619 /* 620 * bge_unicst_set() -- set the physical network address 621 */ 622 static int 623 bge_unicst_set(void *arg, const uint8_t *macaddr, mac_addr_slot_t slot) 624 { 625 bge_t *bgep = arg; /* private device info */ 626 627 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 628 ether_sprintf((void *)macaddr))); 629 /* 630 * Remember the new current address in the driver state 631 * Sync the chip's idea of the address too ... 632 */ 633 mutex_enter(bgep->genlock); 634 if (!(bgep->progress & PROGRESS_INTR)) { 635 /* can happen during autorecovery */ 636 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 637 mutex_exit(bgep->genlock); 638 return (EIO); 639 } 640 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 641 #ifdef BGE_IPMI_ASF 642 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 643 #else 644 if (bge_chip_sync(bgep) == DDI_FAILURE) { 645 #endif 646 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 647 (void) bge_check_acc_handle(bgep, bgep->io_handle); 648 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 649 mutex_exit(bgep->genlock); 650 return (EIO); 651 } 652 #ifdef BGE_IPMI_ASF 653 if (bgep->asf_enabled) { 654 /* 655 * The above bge_chip_sync() function wrote the ethernet MAC 656 * addresses registers which destroyed the IPMI/ASF sideband. 657 * Here, we have to reset chip to make IPMI/ASF sideband work. 658 */ 659 if (bgep->asf_status == ASF_STAT_RUN) { 660 /* 661 * We must stop ASF heart beat before bge_chip_stop(), 662 * otherwise some computers (ex. IBM HS20 blade server) 663 * may crash. 664 */ 665 bge_asf_update_status(bgep); 666 bge_asf_stop_timer(bgep); 667 bgep->asf_status = ASF_STAT_STOP; 668 669 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 670 } 671 bge_chip_stop(bgep, B_FALSE); 672 673 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 674 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 675 (void) bge_check_acc_handle(bgep, bgep->io_handle); 676 ddi_fm_service_impact(bgep->devinfo, 677 DDI_SERVICE_DEGRADED); 678 mutex_exit(bgep->genlock); 679 return (EIO); 680 } 681 682 /* 683 * Start our ASF heartbeat counter as soon as possible. 684 */ 685 if (bgep->asf_status != ASF_STAT_RUN) { 686 /* start ASF heart beat */ 687 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 688 (void *)bgep, 689 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 690 bgep->asf_status = ASF_STAT_RUN; 691 } 692 } 693 #endif 694 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 695 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 696 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 697 mutex_exit(bgep->genlock); 698 return (EIO); 699 } 700 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 701 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 702 mutex_exit(bgep->genlock); 703 return (EIO); 704 } 705 mutex_exit(bgep->genlock); 706 707 return (0); 708 } 709 710 /* 711 * The following four routines are used as callbacks for multiple MAC 712 * address support: 713 * - bge_m_unicst_add(void *, mac_multi_addr_t *); 714 * - bge_m_unicst_remove(void *, mac_addr_slot_t); 715 * - bge_m_unicst_modify(void *, mac_multi_addr_t *); 716 * - bge_m_unicst_get(void *, mac_multi_addr_t *); 717 */ 718 719 /* 720 * bge_m_unicst_add() - will find an unused address slot, set the 721 * address value to the one specified, reserve that slot and enable 722 * the NIC to start filtering on the new MAC address. 723 * address slot. Returns 0 on success. 724 */ 725 static int 726 bge_m_unicst_add(void *arg, mac_multi_addr_t *maddr) 727 { 728 bge_t *bgep = arg; /* private device info */ 729 mac_addr_slot_t slot; 730 int err; 731 732 if (mac_unicst_verify(bgep->mh, 733 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 734 return (EINVAL); 735 736 mutex_enter(bgep->genlock); 737 if (bgep->unicst_addr_avail == 0) { 738 /* no slots available */ 739 mutex_exit(bgep->genlock); 740 return (ENOSPC); 741 } 742 743 /* 744 * Primary/default address is in slot 0. The next three 745 * addresses are the multiple MAC addresses. So multiple 746 * MAC address 0 is in slot 1, 1 in slot 2, and so on. 747 * So the first multiple MAC address resides in slot 1. 748 */ 749 for (slot = 1; slot < bgep->unicst_addr_total; slot++) { 750 if (bgep->curr_addr[slot].set == B_FALSE) { 751 bgep->curr_addr[slot].set = B_TRUE; 752 break; 753 } 754 } 755 756 ASSERT(slot < bgep->unicst_addr_total); 757 bgep->unicst_addr_avail--; 758 mutex_exit(bgep->genlock); 759 maddr->mma_slot = slot; 760 761 if ((err = bge_unicst_set(bgep, maddr->mma_addr, slot)) != 0) { 762 mutex_enter(bgep->genlock); 763 bgep->curr_addr[slot].set = B_FALSE; 764 bgep->unicst_addr_avail++; 765 mutex_exit(bgep->genlock); 766 } 767 return (err); 768 } 769 770 /* 771 * bge_m_unicst_remove() - removes a MAC address that was added by a 772 * call to bge_m_unicst_add(). The slot number that was returned in 773 * add() is passed in the call to remove the address. 774 * Returns 0 on success. 775 */ 776 static int 777 bge_m_unicst_remove(void *arg, mac_addr_slot_t slot) 778 { 779 bge_t *bgep = arg; /* private device info */ 780 781 if (slot <= 0 || slot >= bgep->unicst_addr_total) 782 return (EINVAL); 783 784 mutex_enter(bgep->genlock); 785 if (bgep->curr_addr[slot].set == B_TRUE) { 786 bgep->curr_addr[slot].set = B_FALSE; 787 bgep->unicst_addr_avail++; 788 mutex_exit(bgep->genlock); 789 /* 790 * Copy the default address to the passed slot 791 */ 792 return (bge_unicst_set(bgep, bgep->curr_addr[0].addr, slot)); 793 } 794 mutex_exit(bgep->genlock); 795 return (EINVAL); 796 } 797 798 /* 799 * bge_m_unicst_modify() - modifies the value of an address that 800 * has been added by bge_m_unicst_add(). The new address, address 801 * length and the slot number that was returned in the call to add 802 * should be passed to bge_m_unicst_modify(). mma_flags should be 803 * set to 0. Returns 0 on success. 804 */ 805 static int 806 bge_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) 807 { 808 bge_t *bgep = arg; /* private device info */ 809 mac_addr_slot_t slot; 810 811 if (mac_unicst_verify(bgep->mh, 812 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 813 return (EINVAL); 814 815 slot = maddr->mma_slot; 816 817 if (slot <= 0 || slot >= bgep->unicst_addr_total) 818 return (EINVAL); 819 820 mutex_enter(bgep->genlock); 821 if (bgep->curr_addr[slot].set == B_TRUE) { 822 mutex_exit(bgep->genlock); 823 return (bge_unicst_set(bgep, maddr->mma_addr, slot)); 824 } 825 mutex_exit(bgep->genlock); 826 827 return (EINVAL); 828 } 829 830 /* 831 * bge_m_unicst_get() - will get the MAC address and all other 832 * information related to the address slot passed in mac_multi_addr_t. 833 * mma_flags should be set to 0 in the call. 834 * On return, mma_flags can take the following values: 835 * 1) MMAC_SLOT_UNUSED 836 * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR 837 * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR 838 * 4) MMAC_SLOT_USED 839 */ 840 static int 841 bge_m_unicst_get(void *arg, mac_multi_addr_t *maddr) 842 { 843 bge_t *bgep = arg; /* private device info */ 844 mac_addr_slot_t slot; 845 846 slot = maddr->mma_slot; 847 848 if (slot <= 0 || slot >= bgep->unicst_addr_total) 849 return (EINVAL); 850 851 mutex_enter(bgep->genlock); 852 if (bgep->curr_addr[slot].set == B_TRUE) { 853 ethaddr_copy(bgep->curr_addr[slot].addr, 854 maddr->mma_addr); 855 maddr->mma_flags = MMAC_SLOT_USED; 856 } else { 857 maddr->mma_flags = MMAC_SLOT_UNUSED; 858 } 859 mutex_exit(bgep->genlock); 860 861 return (0); 862 } 863 864 extern void bge_wake_factotum(bge_t *); 865 866 static boolean_t 867 bge_param_locked(mac_prop_id_t pr_num) 868 { 869 /* 870 * All adv_* parameters are locked (read-only) while 871 * the device is in any sort of loopback mode ... 872 */ 873 switch (pr_num) { 874 case DLD_PROP_ADV_1000FDX_CAP: 875 case DLD_PROP_EN_1000FDX_CAP: 876 case DLD_PROP_ADV_1000HDX_CAP: 877 case DLD_PROP_EN_1000HDX_CAP: 878 case DLD_PROP_ADV_100FDX_CAP: 879 case DLD_PROP_EN_100FDX_CAP: 880 case DLD_PROP_ADV_100HDX_CAP: 881 case DLD_PROP_EN_100HDX_CAP: 882 case DLD_PROP_ADV_10FDX_CAP: 883 case DLD_PROP_EN_10FDX_CAP: 884 case DLD_PROP_ADV_10HDX_CAP: 885 case DLD_PROP_EN_10HDX_CAP: 886 case DLD_PROP_AUTONEG: 887 case DLD_PROP_FLOWCTRL: 888 return (B_TRUE); 889 } 890 return (B_FALSE); 891 } 892 /* 893 * callback functions for set/get of properties 894 */ 895 static int 896 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 897 uint_t pr_valsize, const void *pr_val) 898 { 899 bge_t *bgep = barg; 900 int err = 0; 901 uint32_t cur_mtu, new_mtu; 902 uint_t maxsdu; 903 link_flowctrl_t fl; 904 905 mutex_enter(bgep->genlock); 906 if (bgep->param_loop_mode != BGE_LOOP_NONE && 907 bge_param_locked(pr_num)) { 908 /* 909 * All adv_* parameters are locked (read-only) 910 * while the device is in any sort of loopback mode. 911 */ 912 mutex_exit(bgep->genlock); 913 return (EBUSY); 914 } 915 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 916 ((pr_num == DLD_PROP_EN_100FDX_CAP) || 917 (pr_num == DLD_PROP_EN_100FDX_CAP) || 918 (pr_num == DLD_PROP_EN_10FDX_CAP) || 919 (pr_num == DLD_PROP_EN_10HDX_CAP))) { 920 /* 921 * these properties are read/write on copper, 922 * read-only and 0 on serdes 923 */ 924 mutex_exit(bgep->genlock); 925 return (ENOTSUP); 926 } 927 928 switch (pr_num) { 929 case DLD_PROP_EN_1000FDX_CAP: 930 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 931 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 932 goto reprogram; 933 case DLD_PROP_EN_1000HDX_CAP: 934 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 935 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 936 goto reprogram; 937 case DLD_PROP_EN_100FDX_CAP: 938 bgep->param_en_100fdx = *(uint8_t *)pr_val; 939 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 940 goto reprogram; 941 case DLD_PROP_EN_100HDX_CAP: 942 bgep->param_en_100hdx = *(uint8_t *)pr_val; 943 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 944 goto reprogram; 945 case DLD_PROP_EN_10FDX_CAP: 946 bgep->param_en_10fdx = *(uint8_t *)pr_val; 947 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 948 goto reprogram; 949 case DLD_PROP_EN_10HDX_CAP: 950 bgep->param_en_10hdx = *(uint8_t *)pr_val; 951 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 952 reprogram: 953 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 954 err = EINVAL; 955 break; 956 case DLD_PROP_ADV_1000FDX_CAP: 957 case DLD_PROP_ADV_1000HDX_CAP: 958 case DLD_PROP_ADV_100FDX_CAP: 959 case DLD_PROP_ADV_100HDX_CAP: 960 case DLD_PROP_ADV_10FDX_CAP: 961 case DLD_PROP_ADV_10HDX_CAP: 962 case DLD_PROP_STATUS: 963 case DLD_PROP_SPEED: 964 case DLD_PROP_DUPLEX: 965 err = ENOTSUP; /* read-only prop. Can't set this */ 966 break; 967 case DLD_PROP_AUTONEG: 968 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 969 if (bge_reprogram(bgep) == IOC_INVAL) 970 err = EINVAL; 971 break; 972 case DLD_PROP_MTU: 973 cur_mtu = bgep->chipid.default_mtu; 974 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 975 976 if (new_mtu == cur_mtu) { 977 err = 0; 978 break; 979 } 980 if (new_mtu < BGE_DEFAULT_MTU || 981 new_mtu > BGE_MAXIMUM_MTU) { 982 err = EINVAL; 983 break; 984 } 985 if ((new_mtu > BGE_DEFAULT_MTU) && 986 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 987 err = EINVAL; 988 break; 989 } 990 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 991 err = EBUSY; 992 break; 993 } 994 bgep->chipid.default_mtu = new_mtu; 995 if (bge_chip_id_init(bgep)) { 996 err = EINVAL; 997 break; 998 } 999 maxsdu = bgep->chipid.ethmax_size - 1000 sizeof (struct ether_header); 1001 err = mac_maxsdu_update(bgep->mh, maxsdu); 1002 if (err == 0) { 1003 bgep->bge_dma_error = B_TRUE; 1004 bgep->manual_reset = B_TRUE; 1005 bge_chip_stop(bgep, B_TRUE); 1006 bge_wake_factotum(bgep); 1007 err = 0; 1008 } 1009 break; 1010 case DLD_PROP_FLOWCTRL: 1011 bcopy(pr_val, &fl, sizeof (fl)); 1012 switch (fl) { 1013 default: 1014 err = ENOTSUP; 1015 break; 1016 case LINK_FLOWCTRL_NONE: 1017 bgep->param_adv_pause = 0; 1018 bgep->param_adv_asym_pause = 0; 1019 1020 bgep->param_link_rx_pause = B_FALSE; 1021 bgep->param_link_tx_pause = B_FALSE; 1022 break; 1023 case LINK_FLOWCTRL_RX: 1024 if (!((bgep->param_lp_pause == 0) && 1025 (bgep->param_lp_asym_pause == 1))) { 1026 err = EINVAL; 1027 break; 1028 } 1029 bgep->param_adv_pause = 1; 1030 bgep->param_adv_asym_pause = 1; 1031 1032 bgep->param_link_rx_pause = B_TRUE; 1033 bgep->param_link_tx_pause = B_FALSE; 1034 break; 1035 case LINK_FLOWCTRL_TX: 1036 if (!((bgep->param_lp_pause == 1) && 1037 (bgep->param_lp_asym_pause == 1))) { 1038 err = EINVAL; 1039 break; 1040 } 1041 bgep->param_adv_pause = 0; 1042 bgep->param_adv_asym_pause = 1; 1043 1044 bgep->param_link_rx_pause = B_FALSE; 1045 bgep->param_link_tx_pause = B_TRUE; 1046 break; 1047 case LINK_FLOWCTRL_BI: 1048 if (bgep->param_lp_pause != 1) { 1049 err = EINVAL; 1050 break; 1051 } 1052 bgep->param_adv_pause = 1; 1053 1054 bgep->param_link_rx_pause = B_TRUE; 1055 bgep->param_link_tx_pause = B_TRUE; 1056 break; 1057 } 1058 1059 if (err == 0) { 1060 if (bge_reprogram(bgep) == IOC_INVAL) 1061 err = EINVAL; 1062 } 1063 1064 break; 1065 case DLD_PROP_PRIVATE: 1066 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 1067 pr_val); 1068 break; 1069 default: 1070 err = ENOTSUP; 1071 break; 1072 } 1073 mutex_exit(bgep->genlock); 1074 return (err); 1075 } 1076 1077 static int 1078 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1079 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 1080 { 1081 bge_t *bgep = barg; 1082 int err = 0; 1083 link_flowctrl_t fl; 1084 uint64_t speed; 1085 int flags = bgep->chipid.flags; 1086 boolean_t is_default = (pr_flags & DLD_DEFAULT); 1087 1088 if (pr_valsize == 0) 1089 return (EINVAL); 1090 bzero(pr_val, pr_valsize); 1091 switch (pr_num) { 1092 case DLD_PROP_DUPLEX: 1093 if (pr_valsize < sizeof (link_duplex_t)) 1094 return (EINVAL); 1095 bcopy(&bgep->param_link_duplex, pr_val, 1096 sizeof (link_duplex_t)); 1097 break; 1098 case DLD_PROP_SPEED: 1099 if (pr_valsize < sizeof (speed)) 1100 return (EINVAL); 1101 speed = bgep->param_link_speed * 1000000ull; 1102 bcopy(&speed, pr_val, sizeof (speed)); 1103 break; 1104 case DLD_PROP_STATUS: 1105 if (pr_valsize < sizeof (link_state_t)) 1106 return (EINVAL); 1107 bcopy(&bgep->link_state, pr_val, 1108 sizeof (link_state_t)); 1109 break; 1110 case DLD_PROP_AUTONEG: 1111 if (is_default) 1112 *(uint8_t *)pr_val = 1; 1113 else 1114 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 1115 break; 1116 case DLD_PROP_FLOWCTRL: 1117 if (pr_valsize < sizeof (fl)) 1118 return (EINVAL); 1119 if (is_default) { 1120 fl = LINK_FLOWCTRL_BI; 1121 bcopy(&fl, pr_val, sizeof (fl)); 1122 break; 1123 } 1124 1125 if (bgep->param_link_rx_pause && 1126 !bgep->param_link_tx_pause) 1127 fl = LINK_FLOWCTRL_RX; 1128 1129 if (!bgep->param_link_rx_pause && 1130 !bgep->param_link_tx_pause) 1131 fl = LINK_FLOWCTRL_NONE; 1132 1133 if (!bgep->param_link_rx_pause && 1134 bgep->param_link_tx_pause) 1135 fl = LINK_FLOWCTRL_TX; 1136 1137 if (bgep->param_link_rx_pause && 1138 bgep->param_link_tx_pause) 1139 fl = LINK_FLOWCTRL_BI; 1140 bcopy(&fl, pr_val, sizeof (fl)); 1141 break; 1142 case DLD_PROP_ADV_1000FDX_CAP: 1143 if (is_default) 1144 *(uint8_t *)pr_val = 1; 1145 else 1146 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 1147 break; 1148 case DLD_PROP_EN_1000FDX_CAP: 1149 if (is_default) 1150 *(uint8_t *)pr_val = 1; 1151 else 1152 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 1153 break; 1154 case DLD_PROP_ADV_1000HDX_CAP: 1155 if (is_default) 1156 *(uint8_t *)pr_val = 1; 1157 else 1158 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1159 break; 1160 case DLD_PROP_EN_1000HDX_CAP: 1161 if (is_default) 1162 *(uint8_t *)pr_val = 1; 1163 else 1164 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1165 break; 1166 case DLD_PROP_ADV_100FDX_CAP: 1167 if (is_default) { 1168 *(uint8_t *)pr_val = 1169 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1170 } else { 1171 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1172 } 1173 break; 1174 case DLD_PROP_EN_100FDX_CAP: 1175 if (is_default) { 1176 *(uint8_t *)pr_val = 1177 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1178 } else { 1179 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1180 } 1181 break; 1182 case DLD_PROP_ADV_100HDX_CAP: 1183 if (is_default) { 1184 *(uint8_t *)pr_val = 1185 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1186 } else { 1187 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1188 } 1189 break; 1190 case DLD_PROP_EN_100HDX_CAP: 1191 if (is_default) { 1192 *(uint8_t *)pr_val = 1193 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1194 } else { 1195 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1196 } 1197 break; 1198 case DLD_PROP_ADV_10FDX_CAP: 1199 if (is_default) { 1200 *(uint8_t *)pr_val = 1201 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1202 } else { 1203 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1204 } 1205 break; 1206 case DLD_PROP_EN_10FDX_CAP: 1207 if (is_default) { 1208 *(uint8_t *)pr_val = 1209 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1210 } else { 1211 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1212 } 1213 break; 1214 case DLD_PROP_ADV_10HDX_CAP: 1215 if (is_default) { 1216 *(uint8_t *)pr_val = 1217 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1218 } else { 1219 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1220 } 1221 break; 1222 case DLD_PROP_EN_10HDX_CAP: 1223 if (is_default) { 1224 *(uint8_t *)pr_val = 1225 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1226 } else { 1227 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1228 } 1229 break; 1230 case DLD_PROP_ADV_100T4_CAP: 1231 case DLD_PROP_EN_100T4_CAP: 1232 *(uint8_t *)pr_val = 0; 1233 break; 1234 case DLD_PROP_PRIVATE: 1235 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1236 pr_valsize, pr_val); 1237 return (err); 1238 default: 1239 return (ENOTSUP); 1240 } 1241 return (0); 1242 } 1243 1244 /* ARGSUSED */ 1245 static int 1246 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1247 const void *pr_val) 1248 { 1249 int err = 0; 1250 long result; 1251 1252 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1253 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1254 if (result > 1 || result < 0) { 1255 err = EINVAL; 1256 } else { 1257 bgep->param_adv_pause = result; 1258 if (bge_reprogram(bgep) == IOC_INVAL) 1259 err = EINVAL; 1260 } 1261 return (err); 1262 } 1263 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1264 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1265 if (result > 1 || result < 0) { 1266 err = EINVAL; 1267 } else { 1268 bgep->param_adv_asym_pause = result; 1269 if (bge_reprogram(bgep) == IOC_INVAL) 1270 err = EINVAL; 1271 } 1272 return (err); 1273 } 1274 if (strcmp(pr_name, "_drain_max") == 0) { 1275 1276 /* 1277 * on the Tx side, we need to update the h/w register for 1278 * real packet transmission per packet. The drain_max parameter 1279 * is used to reduce the register access. This parameter 1280 * controls the max number of packets that we will hold before 1281 * updating the bge h/w to trigger h/w transmit. The bge 1282 * chipset usually has a max of 512 Tx descriptors, thus 1283 * the upper bound on drain_max is 512. 1284 */ 1285 if (pr_val == NULL) { 1286 err = EINVAL; 1287 return (err); 1288 } 1289 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1290 if (result > 512 || result < 1) 1291 err = EINVAL; 1292 else { 1293 bgep->param_drain_max = (uint32_t)result; 1294 if (bge_reprogram(bgep) == IOC_INVAL) 1295 err = EINVAL; 1296 } 1297 return (err); 1298 } 1299 if (strcmp(pr_name, "_msi_cnt") == 0) { 1300 1301 if (pr_val == NULL) { 1302 err = EINVAL; 1303 return (err); 1304 } 1305 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1306 if (result > 7 || result < 0) 1307 err = EINVAL; 1308 else { 1309 bgep->param_msi_cnt = (uint32_t)result; 1310 if (bge_reprogram(bgep) == IOC_INVAL) 1311 err = EINVAL; 1312 } 1313 return (err); 1314 } 1315 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1316 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1317 return (EINVAL); 1318 1319 bgep->chipid.rx_ticks_norm = result; 1320 return (0); 1321 } 1322 1323 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1324 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1325 return (EINVAL); 1326 1327 bgep->chipid.rx_count_norm = result; 1328 return (0); 1329 } 1330 return (ENOTSUP); 1331 } 1332 1333 static int 1334 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1335 uint_t pr_valsize, void *pr_val) 1336 { 1337 int err = ENOTSUP; 1338 boolean_t is_default = (pr_flags & DLD_DEFAULT); 1339 int value; 1340 1341 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1342 value = (is_default? 1 : bge->param_adv_pause); 1343 err = 0; 1344 goto done; 1345 } 1346 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1347 value = (is_default? 1 : bge->param_adv_asym_pause); 1348 err = 0; 1349 goto done; 1350 } 1351 if (strcmp(pr_name, "_drain_max") == 0) { 1352 value = (is_default? 64 : bge->param_drain_max); 1353 err = 0; 1354 goto done; 1355 } 1356 if (strcmp(pr_name, "_msi_cnt") == 0) { 1357 value = (is_default? 0 : bge->param_msi_cnt); 1358 err = 0; 1359 goto done; 1360 } 1361 1362 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1363 value = (is_default? bge_rx_ticks_norm : 1364 bge->chipid.rx_ticks_norm); 1365 err = 0; 1366 goto done; 1367 } 1368 1369 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1370 value = (is_default? bge_rx_count_norm : 1371 bge->chipid.rx_count_norm); 1372 err = 0; 1373 goto done; 1374 } 1375 1376 done: 1377 if (err == 0) { 1378 (void) snprintf(pr_val, pr_valsize, "%d", value); 1379 } 1380 return (err); 1381 } 1382 1383 /* 1384 * Compute the index of the required bit in the multicast hash map. 1385 * This must mirror the way the hardware actually does it! 1386 * See Broadcom document 570X-PG102-R page 125. 1387 */ 1388 static uint32_t 1389 bge_hash_index(const uint8_t *mca) 1390 { 1391 uint32_t hash; 1392 1393 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1394 1395 return (hash); 1396 } 1397 1398 /* 1399 * bge_m_multicst_add() -- enable/disable a multicast address 1400 */ 1401 static int 1402 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1403 { 1404 bge_t *bgep = arg; /* private device info */ 1405 uint32_t hash; 1406 uint32_t index; 1407 uint32_t word; 1408 uint32_t bit; 1409 uint8_t *refp; 1410 1411 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1412 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1413 1414 /* 1415 * Precalculate all required masks, pointers etc ... 1416 */ 1417 hash = bge_hash_index(mca); 1418 index = hash % BGE_HASH_TABLE_SIZE; 1419 word = index/32u; 1420 bit = 1 << (index % 32u); 1421 refp = &bgep->mcast_refs[index]; 1422 1423 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1424 hash, index, word, bit, *refp)); 1425 1426 /* 1427 * We must set the appropriate bit in the hash map (and the 1428 * corresponding h/w register) when the refcount goes from 0 1429 * to >0, and clear it when the last ref goes away (refcount 1430 * goes from >0 back to 0). If we change the hash map, we 1431 * must also update the chip's hardware map registers. 1432 */ 1433 mutex_enter(bgep->genlock); 1434 if (!(bgep->progress & PROGRESS_INTR)) { 1435 /* can happen during autorecovery */ 1436 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1437 mutex_exit(bgep->genlock); 1438 return (EIO); 1439 } 1440 if (add) { 1441 if ((*refp)++ == 0) { 1442 bgep->mcast_hash[word] |= bit; 1443 #ifdef BGE_IPMI_ASF 1444 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1445 #else 1446 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1447 #endif 1448 (void) bge_check_acc_handle(bgep, 1449 bgep->cfg_handle); 1450 (void) bge_check_acc_handle(bgep, 1451 bgep->io_handle); 1452 ddi_fm_service_impact(bgep->devinfo, 1453 DDI_SERVICE_DEGRADED); 1454 mutex_exit(bgep->genlock); 1455 return (EIO); 1456 } 1457 } 1458 } else { 1459 if (--(*refp) == 0) { 1460 bgep->mcast_hash[word] &= ~bit; 1461 #ifdef BGE_IPMI_ASF 1462 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1463 #else 1464 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1465 #endif 1466 (void) bge_check_acc_handle(bgep, 1467 bgep->cfg_handle); 1468 (void) bge_check_acc_handle(bgep, 1469 bgep->io_handle); 1470 ddi_fm_service_impact(bgep->devinfo, 1471 DDI_SERVICE_DEGRADED); 1472 mutex_exit(bgep->genlock); 1473 return (EIO); 1474 } 1475 } 1476 } 1477 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1478 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1479 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1480 mutex_exit(bgep->genlock); 1481 return (EIO); 1482 } 1483 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1484 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1485 mutex_exit(bgep->genlock); 1486 return (EIO); 1487 } 1488 mutex_exit(bgep->genlock); 1489 1490 return (0); 1491 } 1492 1493 /* 1494 * bge_m_promisc() -- set or reset promiscuous mode on the board 1495 * 1496 * Program the hardware to enable/disable promiscuous and/or 1497 * receive-all-multicast modes. 1498 */ 1499 static int 1500 bge_m_promisc(void *arg, boolean_t on) 1501 { 1502 bge_t *bgep = arg; 1503 1504 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1505 1506 /* 1507 * Store MAC layer specified mode and pass to chip layer to update h/w 1508 */ 1509 mutex_enter(bgep->genlock); 1510 if (!(bgep->progress & PROGRESS_INTR)) { 1511 /* can happen during autorecovery */ 1512 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1513 mutex_exit(bgep->genlock); 1514 return (EIO); 1515 } 1516 bgep->promisc = on; 1517 #ifdef BGE_IPMI_ASF 1518 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1519 #else 1520 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1521 #endif 1522 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1523 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1524 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1525 mutex_exit(bgep->genlock); 1526 return (EIO); 1527 } 1528 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1529 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1530 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1531 mutex_exit(bgep->genlock); 1532 return (EIO); 1533 } 1534 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1535 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1536 mutex_exit(bgep->genlock); 1537 return (EIO); 1538 } 1539 mutex_exit(bgep->genlock); 1540 return (0); 1541 } 1542 1543 /*ARGSUSED*/ 1544 static boolean_t 1545 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1546 { 1547 bge_t *bgep = arg; 1548 1549 switch (cap) { 1550 case MAC_CAPAB_HCKSUM: { 1551 uint32_t *txflags = cap_data; 1552 1553 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1554 break; 1555 } 1556 1557 case MAC_CAPAB_POLL: 1558 /* 1559 * There's nothing for us to fill in, simply returning 1560 * B_TRUE stating that we support polling is sufficient. 1561 */ 1562 break; 1563 1564 case MAC_CAPAB_MULTIADDRESS: { 1565 multiaddress_capab_t *mmacp = cap_data; 1566 1567 mutex_enter(bgep->genlock); 1568 /* 1569 * The number of MAC addresses made available by 1570 * this capability is one less than the total as 1571 * the primary address in slot 0 is counted in 1572 * the total. 1573 */ 1574 mmacp->maddr_naddr = bgep->unicst_addr_total - 1; 1575 mmacp->maddr_naddrfree = bgep->unicst_addr_avail; 1576 /* No multiple factory addresses, set mma_flag to 0 */ 1577 mmacp->maddr_flag = 0; 1578 mmacp->maddr_handle = bgep; 1579 mmacp->maddr_add = bge_m_unicst_add; 1580 mmacp->maddr_remove = bge_m_unicst_remove; 1581 mmacp->maddr_modify = bge_m_unicst_modify; 1582 mmacp->maddr_get = bge_m_unicst_get; 1583 mmacp->maddr_reserve = NULL; 1584 mutex_exit(bgep->genlock); 1585 break; 1586 } 1587 1588 default: 1589 return (B_FALSE); 1590 } 1591 return (B_TRUE); 1592 } 1593 1594 /* 1595 * Loopback ioctl code 1596 */ 1597 1598 static lb_property_t loopmodes[] = { 1599 { normal, "normal", BGE_LOOP_NONE }, 1600 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1601 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1602 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1603 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1604 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1605 }; 1606 1607 static enum ioc_reply 1608 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1609 { 1610 /* 1611 * If the mode isn't being changed, there's nothing to do ... 1612 */ 1613 if (mode == bgep->param_loop_mode) 1614 return (IOC_ACK); 1615 1616 /* 1617 * Validate the requested mode and prepare a suitable message 1618 * to explain the link down/up cycle that the change will 1619 * probably induce ... 1620 */ 1621 switch (mode) { 1622 default: 1623 return (IOC_INVAL); 1624 1625 case BGE_LOOP_NONE: 1626 case BGE_LOOP_EXTERNAL_1000: 1627 case BGE_LOOP_EXTERNAL_100: 1628 case BGE_LOOP_EXTERNAL_10: 1629 case BGE_LOOP_INTERNAL_PHY: 1630 case BGE_LOOP_INTERNAL_MAC: 1631 break; 1632 } 1633 1634 /* 1635 * All OK; tell the caller to reprogram 1636 * the PHY and/or MAC for the new mode ... 1637 */ 1638 bgep->param_loop_mode = mode; 1639 return (IOC_RESTART_ACK); 1640 } 1641 1642 static enum ioc_reply 1643 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1644 { 1645 lb_info_sz_t *lbsp; 1646 lb_property_t *lbpp; 1647 uint32_t *lbmp; 1648 int cmd; 1649 1650 _NOTE(ARGUNUSED(wq)) 1651 1652 /* 1653 * Validate format of ioctl 1654 */ 1655 if (mp->b_cont == NULL) 1656 return (IOC_INVAL); 1657 1658 cmd = iocp->ioc_cmd; 1659 switch (cmd) { 1660 default: 1661 /* NOTREACHED */ 1662 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1663 return (IOC_INVAL); 1664 1665 case LB_GET_INFO_SIZE: 1666 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1667 return (IOC_INVAL); 1668 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 1669 *lbsp = sizeof (loopmodes); 1670 return (IOC_REPLY); 1671 1672 case LB_GET_INFO: 1673 if (iocp->ioc_count != sizeof (loopmodes)) 1674 return (IOC_INVAL); 1675 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 1676 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1677 return (IOC_REPLY); 1678 1679 case LB_GET_MODE: 1680 if (iocp->ioc_count != sizeof (uint32_t)) 1681 return (IOC_INVAL); 1682 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1683 *lbmp = bgep->param_loop_mode; 1684 return (IOC_REPLY); 1685 1686 case LB_SET_MODE: 1687 if (iocp->ioc_count != sizeof (uint32_t)) 1688 return (IOC_INVAL); 1689 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1690 return (bge_set_loop_mode(bgep, *lbmp)); 1691 } 1692 } 1693 1694 /* 1695 * Specific bge IOCTLs, the gld module handles the generic ones. 1696 */ 1697 static void 1698 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1699 { 1700 bge_t *bgep = arg; 1701 struct iocblk *iocp; 1702 enum ioc_reply status; 1703 boolean_t need_privilege; 1704 int err; 1705 int cmd; 1706 1707 /* 1708 * Validate the command before bothering with the mutex ... 1709 */ 1710 iocp = (struct iocblk *)mp->b_rptr; 1711 iocp->ioc_error = 0; 1712 need_privilege = B_TRUE; 1713 cmd = iocp->ioc_cmd; 1714 switch (cmd) { 1715 default: 1716 miocnak(wq, mp, 0, EINVAL); 1717 return; 1718 1719 case BGE_MII_READ: 1720 case BGE_MII_WRITE: 1721 case BGE_SEE_READ: 1722 case BGE_SEE_WRITE: 1723 case BGE_FLASH_READ: 1724 case BGE_FLASH_WRITE: 1725 case BGE_DIAG: 1726 case BGE_PEEK: 1727 case BGE_POKE: 1728 case BGE_PHY_RESET: 1729 case BGE_SOFT_RESET: 1730 case BGE_HARD_RESET: 1731 break; 1732 1733 case LB_GET_INFO_SIZE: 1734 case LB_GET_INFO: 1735 case LB_GET_MODE: 1736 need_privilege = B_FALSE; 1737 /* FALLTHRU */ 1738 case LB_SET_MODE: 1739 break; 1740 1741 } 1742 1743 if (need_privilege) { 1744 /* 1745 * Check for specific net_config privilege on Solaris 10+. 1746 */ 1747 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1748 if (err != 0) { 1749 miocnak(wq, mp, 0, err); 1750 return; 1751 } 1752 } 1753 1754 mutex_enter(bgep->genlock); 1755 if (!(bgep->progress & PROGRESS_INTR)) { 1756 /* can happen during autorecovery */ 1757 mutex_exit(bgep->genlock); 1758 miocnak(wq, mp, 0, EIO); 1759 return; 1760 } 1761 1762 switch (cmd) { 1763 default: 1764 _NOTE(NOTREACHED) 1765 status = IOC_INVAL; 1766 break; 1767 1768 case BGE_MII_READ: 1769 case BGE_MII_WRITE: 1770 case BGE_SEE_READ: 1771 case BGE_SEE_WRITE: 1772 case BGE_FLASH_READ: 1773 case BGE_FLASH_WRITE: 1774 case BGE_DIAG: 1775 case BGE_PEEK: 1776 case BGE_POKE: 1777 case BGE_PHY_RESET: 1778 case BGE_SOFT_RESET: 1779 case BGE_HARD_RESET: 1780 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1781 break; 1782 1783 case LB_GET_INFO_SIZE: 1784 case LB_GET_INFO: 1785 case LB_GET_MODE: 1786 case LB_SET_MODE: 1787 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1788 break; 1789 1790 } 1791 1792 /* 1793 * Do we need to reprogram the PHY and/or the MAC? 1794 * Do it now, while we still have the mutex. 1795 * 1796 * Note: update the PHY first, 'cos it controls the 1797 * speed/duplex parameters that the MAC code uses. 1798 */ 1799 switch (status) { 1800 case IOC_RESTART_REPLY: 1801 case IOC_RESTART_ACK: 1802 if (bge_reprogram(bgep) == IOC_INVAL) 1803 status = IOC_INVAL; 1804 break; 1805 } 1806 1807 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1808 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1809 status = IOC_INVAL; 1810 } 1811 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1812 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1813 status = IOC_INVAL; 1814 } 1815 mutex_exit(bgep->genlock); 1816 1817 /* 1818 * Finally, decide how to reply 1819 */ 1820 switch (status) { 1821 default: 1822 case IOC_INVAL: 1823 /* 1824 * Error, reply with a NAK and EINVAL or the specified error 1825 */ 1826 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1827 EINVAL : iocp->ioc_error); 1828 break; 1829 1830 case IOC_DONE: 1831 /* 1832 * OK, reply already sent 1833 */ 1834 break; 1835 1836 case IOC_RESTART_ACK: 1837 case IOC_ACK: 1838 /* 1839 * OK, reply with an ACK 1840 */ 1841 miocack(wq, mp, 0, 0); 1842 break; 1843 1844 case IOC_RESTART_REPLY: 1845 case IOC_REPLY: 1846 /* 1847 * OK, send prepared reply as ACK or NAK 1848 */ 1849 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1850 M_IOCACK : M_IOCNAK; 1851 qreply(wq, mp); 1852 break; 1853 } 1854 } 1855 1856 static void 1857 bge_resources_add(bge_t *bgep, time_t time, uint_t pkt_cnt) 1858 { 1859 1860 recv_ring_t *rrp; 1861 mac_rx_fifo_t mrf; 1862 int ring; 1863 1864 /* 1865 * Register Rx rings as resources and save mac 1866 * resource id for future reference 1867 */ 1868 mrf.mrf_type = MAC_RX_FIFO; 1869 mrf.mrf_blank = bge_chip_blank; 1870 mrf.mrf_arg = (void *)bgep; 1871 mrf.mrf_normal_blank_time = time; 1872 mrf.mrf_normal_pkt_count = pkt_cnt; 1873 1874 for (ring = 0; ring < bgep->chipid.rx_rings; ring++) { 1875 rrp = &bgep->recv[ring]; 1876 rrp->handle = mac_resource_add(bgep->mh, 1877 (mac_resource_t *)&mrf); 1878 } 1879 } 1880 1881 static void 1882 bge_m_resources(void *arg) 1883 { 1884 bge_t *bgep = arg; 1885 1886 mutex_enter(bgep->genlock); 1887 1888 bge_resources_add(bgep, bgep->chipid.rx_ticks_norm, 1889 bgep->chipid.rx_count_norm); 1890 mutex_exit(bgep->genlock); 1891 } 1892 1893 /* 1894 * ========== Per-instance setup/teardown code ========== 1895 */ 1896 1897 #undef BGE_DBG 1898 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1899 /* 1900 * Allocate an area of memory and a DMA handle for accessing it 1901 */ 1902 static int 1903 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 1904 uint_t dma_flags, dma_area_t *dma_p) 1905 { 1906 caddr_t va; 1907 int err; 1908 1909 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 1910 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 1911 1912 /* 1913 * Allocate handle 1914 */ 1915 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 1916 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 1917 if (err != DDI_SUCCESS) 1918 return (DDI_FAILURE); 1919 1920 /* 1921 * Allocate memory 1922 */ 1923 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 1924 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 1925 &dma_p->acc_hdl); 1926 if (err != DDI_SUCCESS) 1927 return (DDI_FAILURE); 1928 1929 /* 1930 * Bind the two together 1931 */ 1932 dma_p->mem_va = va; 1933 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1934 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 1935 &dma_p->cookie, &dma_p->ncookies); 1936 1937 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 1938 dma_p->alength, err, dma_p->ncookies)); 1939 1940 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 1941 return (DDI_FAILURE); 1942 1943 dma_p->nslots = ~0U; 1944 dma_p->size = ~0U; 1945 dma_p->token = ~0U; 1946 dma_p->offset = 0; 1947 return (DDI_SUCCESS); 1948 } 1949 1950 /* 1951 * Free one allocated area of DMAable memory 1952 */ 1953 static void 1954 bge_free_dma_mem(dma_area_t *dma_p) 1955 { 1956 if (dma_p->dma_hdl != NULL) { 1957 if (dma_p->ncookies) { 1958 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1959 dma_p->ncookies = 0; 1960 } 1961 ddi_dma_free_handle(&dma_p->dma_hdl); 1962 dma_p->dma_hdl = NULL; 1963 } 1964 1965 if (dma_p->acc_hdl != NULL) { 1966 ddi_dma_mem_free(&dma_p->acc_hdl); 1967 dma_p->acc_hdl = NULL; 1968 } 1969 } 1970 /* 1971 * Utility routine to carve a slice off a chunk of allocated memory, 1972 * updating the chunk descriptor accordingly. The size of the slice 1973 * is given by the product of the <qty> and <size> parameters. 1974 */ 1975 static void 1976 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 1977 uint32_t qty, uint32_t size) 1978 { 1979 static uint32_t sequence = 0xbcd5704a; 1980 size_t totsize; 1981 1982 totsize = qty*size; 1983 ASSERT(size >= 0); 1984 ASSERT(totsize <= chunk->alength); 1985 1986 *slice = *chunk; 1987 slice->nslots = qty; 1988 slice->size = size; 1989 slice->alength = totsize; 1990 slice->token = ++sequence; 1991 1992 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 1993 chunk->alength -= totsize; 1994 chunk->offset += totsize; 1995 chunk->cookie.dmac_laddress += totsize; 1996 chunk->cookie.dmac_size -= totsize; 1997 } 1998 1999 /* 2000 * Initialise the specified Receive Producer (Buffer) Ring, using 2001 * the information in the <dma_area> descriptors that it contains 2002 * to set up all the other fields. This routine should be called 2003 * only once for each ring. 2004 */ 2005 static void 2006 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2007 { 2008 buff_ring_t *brp; 2009 bge_status_t *bsp; 2010 sw_rbd_t *srbdp; 2011 dma_area_t pbuf; 2012 uint32_t bufsize; 2013 uint32_t nslots; 2014 uint32_t slot; 2015 uint32_t split; 2016 2017 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2018 NIC_MEM_SHADOW_BUFF_STD, 2019 NIC_MEM_SHADOW_BUFF_JUMBO, 2020 NIC_MEM_SHADOW_BUFF_MINI 2021 }; 2022 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2023 RECV_STD_PROD_INDEX_REG, 2024 RECV_JUMBO_PROD_INDEX_REG, 2025 RECV_MINI_PROD_INDEX_REG 2026 }; 2027 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2028 STATUS_STD_BUFF_CONS_INDEX, 2029 STATUS_JUMBO_BUFF_CONS_INDEX, 2030 STATUS_MINI_BUFF_CONS_INDEX 2031 }; 2032 2033 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2034 (void *)bgep, ring)); 2035 2036 brp = &bgep->buff[ring]; 2037 nslots = brp->desc.nslots; 2038 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2039 bufsize = brp->buf[0].size; 2040 2041 /* 2042 * Set up the copy of the h/w RCB 2043 * 2044 * Note: unlike Send & Receive Return Rings, (where the max_len 2045 * field holds the number of slots), in a Receive Buffer Ring 2046 * this field indicates the size of each buffer in the ring. 2047 */ 2048 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2049 brp->hw_rcb.max_len = bufsize; 2050 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2051 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2052 2053 /* 2054 * Other one-off initialisation of per-ring data 2055 */ 2056 brp->bgep = bgep; 2057 bsp = DMA_VPTR(bgep->status_block); 2058 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2059 brp->chip_mbx_reg = mailbox_regs[ring]; 2060 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2061 DDI_INTR_PRI(bgep->intr_pri)); 2062 2063 /* 2064 * Allocate the array of s/w Receive Buffer Descriptors 2065 */ 2066 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2067 brp->sw_rbds = srbdp; 2068 2069 /* 2070 * Now initialise each array element once and for all 2071 */ 2072 for (split = 0; split < BGE_SPLIT; ++split) { 2073 pbuf = brp->buf[split]; 2074 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2075 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2076 ASSERT(pbuf.alength == 0); 2077 } 2078 } 2079 2080 /* 2081 * Clean up initialisation done above before the memory is freed 2082 */ 2083 static void 2084 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2085 { 2086 buff_ring_t *brp; 2087 sw_rbd_t *srbdp; 2088 2089 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2090 (void *)bgep, ring)); 2091 2092 brp = &bgep->buff[ring]; 2093 srbdp = brp->sw_rbds; 2094 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2095 2096 mutex_destroy(brp->rf_lock); 2097 } 2098 2099 /* 2100 * Initialise the specified Receive (Return) Ring, using the 2101 * information in the <dma_area> descriptors that it contains 2102 * to set up all the other fields. This routine should be called 2103 * only once for each ring. 2104 */ 2105 static void 2106 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2107 { 2108 recv_ring_t *rrp; 2109 bge_status_t *bsp; 2110 uint32_t nslots; 2111 2112 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2113 (void *)bgep, ring)); 2114 2115 /* 2116 * The chip architecture requires that receive return rings have 2117 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2118 */ 2119 rrp = &bgep->recv[ring]; 2120 nslots = rrp->desc.nslots; 2121 ASSERT(nslots == 0 || nslots == 512 || 2122 nslots == 1024 || nslots == 2048); 2123 2124 /* 2125 * Set up the copy of the h/w RCB 2126 */ 2127 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2128 rrp->hw_rcb.max_len = nslots; 2129 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2130 rrp->hw_rcb.nic_ring_addr = 0; 2131 2132 /* 2133 * Other one-off initialisation of per-ring data 2134 */ 2135 rrp->bgep = bgep; 2136 bsp = DMA_VPTR(bgep->status_block); 2137 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2138 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2139 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2140 DDI_INTR_PRI(bgep->intr_pri)); 2141 } 2142 2143 2144 /* 2145 * Clean up initialisation done above before the memory is freed 2146 */ 2147 static void 2148 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2149 { 2150 recv_ring_t *rrp; 2151 2152 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2153 (void *)bgep, ring)); 2154 2155 rrp = &bgep->recv[ring]; 2156 if (rrp->rx_softint) 2157 ddi_remove_softintr(rrp->rx_softint); 2158 mutex_destroy(rrp->rx_lock); 2159 } 2160 2161 /* 2162 * Initialise the specified Send Ring, using the information in the 2163 * <dma_area> descriptors that it contains to set up all the other 2164 * fields. This routine should be called only once for each ring. 2165 */ 2166 static void 2167 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2168 { 2169 send_ring_t *srp; 2170 bge_status_t *bsp; 2171 sw_sbd_t *ssbdp; 2172 dma_area_t desc; 2173 dma_area_t pbuf; 2174 uint32_t nslots; 2175 uint32_t slot; 2176 uint32_t split; 2177 sw_txbuf_t *txbuf; 2178 2179 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2180 (void *)bgep, ring)); 2181 2182 /* 2183 * The chip architecture requires that host-based send rings 2184 * have 512 elements per ring. See 570X-PG102-R page 56. 2185 */ 2186 srp = &bgep->send[ring]; 2187 nslots = srp->desc.nslots; 2188 ASSERT(nslots == 0 || nslots == 512); 2189 2190 /* 2191 * Set up the copy of the h/w RCB 2192 */ 2193 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2194 srp->hw_rcb.max_len = nslots; 2195 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2196 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2197 2198 /* 2199 * Other one-off initialisation of per-ring data 2200 */ 2201 srp->bgep = bgep; 2202 bsp = DMA_VPTR(bgep->status_block); 2203 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2204 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2205 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2206 DDI_INTR_PRI(bgep->intr_pri)); 2207 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2208 DDI_INTR_PRI(bgep->intr_pri)); 2209 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2210 DDI_INTR_PRI(bgep->intr_pri)); 2211 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2212 DDI_INTR_PRI(bgep->intr_pri)); 2213 if (nslots == 0) 2214 return; 2215 2216 /* 2217 * Allocate the array of s/w Send Buffer Descriptors 2218 */ 2219 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2220 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2221 srp->txbuf_head = 2222 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2223 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2224 srp->sw_sbds = ssbdp; 2225 srp->txbuf = txbuf; 2226 srp->tx_buffers = BGE_SEND_BUF_NUM; 2227 srp->tx_buffers_low = srp->tx_buffers / 4; 2228 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2229 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2230 else 2231 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2232 srp->tx_array = 1; 2233 2234 /* 2235 * Chunk tx desc area 2236 */ 2237 desc = srp->desc; 2238 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2239 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2240 sizeof (bge_sbd_t)); 2241 } 2242 ASSERT(desc.alength == 0); 2243 2244 /* 2245 * Chunk tx buffer area 2246 */ 2247 for (split = 0; split < BGE_SPLIT; ++split) { 2248 pbuf = srp->buf[0][split]; 2249 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2250 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2251 bgep->chipid.snd_buff_size); 2252 txbuf++; 2253 } 2254 ASSERT(pbuf.alength == 0); 2255 } 2256 } 2257 2258 /* 2259 * Clean up initialisation done above before the memory is freed 2260 */ 2261 static void 2262 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2263 { 2264 send_ring_t *srp; 2265 uint32_t array; 2266 uint32_t split; 2267 uint32_t nslots; 2268 2269 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2270 (void *)bgep, ring)); 2271 2272 srp = &bgep->send[ring]; 2273 mutex_destroy(srp->tc_lock); 2274 mutex_destroy(srp->freetxbuf_lock); 2275 mutex_destroy(srp->txbuf_lock); 2276 mutex_destroy(srp->tx_lock); 2277 nslots = srp->desc.nslots; 2278 if (nslots == 0) 2279 return; 2280 2281 for (array = 1; array < srp->tx_array; ++array) 2282 for (split = 0; split < BGE_SPLIT; ++split) 2283 bge_free_dma_mem(&srp->buf[array][split]); 2284 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2285 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2286 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2287 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2288 srp->sw_sbds = NULL; 2289 srp->txbuf_head = NULL; 2290 srp->txbuf = NULL; 2291 srp->pktp = NULL; 2292 } 2293 2294 /* 2295 * Initialise all transmit, receive, and buffer rings. 2296 */ 2297 void 2298 bge_init_rings(bge_t *bgep) 2299 { 2300 uint32_t ring; 2301 2302 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2303 2304 /* 2305 * Perform one-off initialisation of each ring ... 2306 */ 2307 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2308 bge_init_send_ring(bgep, ring); 2309 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2310 bge_init_recv_ring(bgep, ring); 2311 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2312 bge_init_buff_ring(bgep, ring); 2313 } 2314 2315 /* 2316 * Undo the work of bge_init_rings() above before the memory is freed 2317 */ 2318 void 2319 bge_fini_rings(bge_t *bgep) 2320 { 2321 uint32_t ring; 2322 2323 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2324 2325 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2326 bge_fini_buff_ring(bgep, ring); 2327 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2328 bge_fini_recv_ring(bgep, ring); 2329 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2330 bge_fini_send_ring(bgep, ring); 2331 } 2332 2333 /* 2334 * Called from the bge_m_stop() to free the tx buffers which are 2335 * allocated from the tx process. 2336 */ 2337 void 2338 bge_free_txbuf_arrays(send_ring_t *srp) 2339 { 2340 uint32_t array; 2341 uint32_t split; 2342 2343 ASSERT(mutex_owned(srp->tx_lock)); 2344 2345 /* 2346 * Free the extra tx buffer DMA area 2347 */ 2348 for (array = 1; array < srp->tx_array; ++array) 2349 for (split = 0; split < BGE_SPLIT; ++split) 2350 bge_free_dma_mem(&srp->buf[array][split]); 2351 2352 /* 2353 * Restore initial tx buffer numbers 2354 */ 2355 srp->tx_array = 1; 2356 srp->tx_buffers = BGE_SEND_BUF_NUM; 2357 srp->tx_buffers_low = srp->tx_buffers / 4; 2358 srp->tx_flow = 0; 2359 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2360 } 2361 2362 /* 2363 * Called from tx process to allocate more tx buffers 2364 */ 2365 bge_queue_item_t * 2366 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2367 { 2368 bge_queue_t *txbuf_queue; 2369 bge_queue_item_t *txbuf_item_last; 2370 bge_queue_item_t *txbuf_item; 2371 bge_queue_item_t *txbuf_item_rtn; 2372 sw_txbuf_t *txbuf; 2373 dma_area_t area; 2374 size_t txbuffsize; 2375 uint32_t slot; 2376 uint32_t array; 2377 uint32_t split; 2378 uint32_t err; 2379 2380 ASSERT(mutex_owned(srp->tx_lock)); 2381 2382 array = srp->tx_array; 2383 if (array >= srp->tx_array_max) 2384 return (NULL); 2385 2386 /* 2387 * Allocate memory & handles for TX buffers 2388 */ 2389 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2390 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2391 for (split = 0; split < BGE_SPLIT; ++split) { 2392 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2393 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2394 &srp->buf[array][split]); 2395 if (err != DDI_SUCCESS) { 2396 /* Free the last already allocated OK chunks */ 2397 for (slot = 0; slot <= split; ++slot) 2398 bge_free_dma_mem(&srp->buf[array][slot]); 2399 srp->tx_alloc_fail++; 2400 return (NULL); 2401 } 2402 } 2403 2404 /* 2405 * Chunk tx buffer area 2406 */ 2407 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2408 for (split = 0; split < BGE_SPLIT; ++split) { 2409 area = srp->buf[array][split]; 2410 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2411 bge_slice_chunk(&txbuf->buf, &area, 1, 2412 bgep->chipid.snd_buff_size); 2413 txbuf++; 2414 } 2415 } 2416 2417 /* 2418 * Add above buffers to the tx buffer pop queue 2419 */ 2420 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2421 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2422 txbuf_item_last = NULL; 2423 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2424 txbuf_item->item = txbuf; 2425 txbuf_item->next = txbuf_item_last; 2426 txbuf_item_last = txbuf_item; 2427 txbuf++; 2428 txbuf_item++; 2429 } 2430 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2431 txbuf_item_rtn = txbuf_item; 2432 txbuf_item++; 2433 txbuf_queue = srp->txbuf_pop_queue; 2434 mutex_enter(txbuf_queue->lock); 2435 txbuf_item->next = txbuf_queue->head; 2436 txbuf_queue->head = txbuf_item_last; 2437 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2438 mutex_exit(txbuf_queue->lock); 2439 2440 srp->tx_array++; 2441 srp->tx_buffers += BGE_SEND_BUF_NUM; 2442 srp->tx_buffers_low = srp->tx_buffers / 4; 2443 2444 return (txbuf_item_rtn); 2445 } 2446 2447 /* 2448 * This function allocates all the transmit and receive buffers 2449 * and descriptors, in four chunks. 2450 */ 2451 int 2452 bge_alloc_bufs(bge_t *bgep) 2453 { 2454 dma_area_t area; 2455 size_t rxbuffsize; 2456 size_t txbuffsize; 2457 size_t rxbuffdescsize; 2458 size_t rxdescsize; 2459 size_t txdescsize; 2460 uint32_t ring; 2461 uint32_t rx_rings = bgep->chipid.rx_rings; 2462 uint32_t tx_rings = bgep->chipid.tx_rings; 2463 int split; 2464 int err; 2465 2466 BGE_TRACE(("bge_alloc_bufs($%p)", 2467 (void *)bgep)); 2468 2469 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2470 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2471 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2472 2473 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2474 txbuffsize *= tx_rings; 2475 2476 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2477 rxdescsize *= sizeof (bge_rbd_t); 2478 2479 rxbuffdescsize = BGE_STD_SLOTS_USED; 2480 rxbuffdescsize += bgep->chipid.jumbo_slots; 2481 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2482 rxbuffdescsize *= sizeof (bge_rbd_t); 2483 2484 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2485 txdescsize *= sizeof (bge_sbd_t); 2486 txdescsize += sizeof (bge_statistics_t); 2487 txdescsize += sizeof (bge_status_t); 2488 txdescsize += BGE_STATUS_PADDING; 2489 2490 /* 2491 * Enable PCI relaxed ordering only for RX/TX data buffers 2492 */ 2493 if (bge_relaxed_ordering) 2494 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2495 2496 /* 2497 * Allocate memory & handles for RX buffers 2498 */ 2499 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2500 for (split = 0; split < BGE_SPLIT; ++split) { 2501 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2502 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2503 &bgep->rx_buff[split]); 2504 if (err != DDI_SUCCESS) 2505 return (DDI_FAILURE); 2506 } 2507 2508 /* 2509 * Allocate memory & handles for TX buffers 2510 */ 2511 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2512 for (split = 0; split < BGE_SPLIT; ++split) { 2513 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2514 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2515 &bgep->tx_buff[split]); 2516 if (err != DDI_SUCCESS) 2517 return (DDI_FAILURE); 2518 } 2519 2520 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2521 2522 /* 2523 * Allocate memory & handles for receive return rings 2524 */ 2525 ASSERT((rxdescsize % rx_rings) == 0); 2526 for (split = 0; split < rx_rings; ++split) { 2527 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2528 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2529 &bgep->rx_desc[split]); 2530 if (err != DDI_SUCCESS) 2531 return (DDI_FAILURE); 2532 } 2533 2534 /* 2535 * Allocate memory & handles for buffer (producer) descriptor rings 2536 */ 2537 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2538 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2539 if (err != DDI_SUCCESS) 2540 return (DDI_FAILURE); 2541 2542 /* 2543 * Allocate memory & handles for TX descriptor rings, 2544 * status block, and statistics area 2545 */ 2546 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2547 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2548 if (err != DDI_SUCCESS) 2549 return (DDI_FAILURE); 2550 2551 /* 2552 * Now carve up each of the allocated areas ... 2553 */ 2554 for (split = 0; split < BGE_SPLIT; ++split) { 2555 area = bgep->rx_buff[split]; 2556 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2557 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2558 bgep->chipid.std_buf_size); 2559 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2560 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2561 bgep->chipid.recv_jumbo_size); 2562 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2563 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2564 BGE_MINI_BUFF_SIZE); 2565 ASSERT(area.alength >= 0); 2566 } 2567 2568 for (split = 0; split < BGE_SPLIT; ++split) { 2569 area = bgep->tx_buff[split]; 2570 for (ring = 0; ring < tx_rings; ++ring) 2571 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2572 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2573 bgep->chipid.snd_buff_size); 2574 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2575 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2576 &area, 0, bgep->chipid.snd_buff_size); 2577 ASSERT(area.alength >= 0); 2578 } 2579 2580 for (ring = 0; ring < rx_rings; ++ring) 2581 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2582 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2583 2584 area = bgep->rx_desc[rx_rings]; 2585 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2586 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2587 0, sizeof (bge_rbd_t)); 2588 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2589 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2590 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2591 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2592 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2593 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2594 ASSERT(area.alength == 0); 2595 2596 area = bgep->tx_desc; 2597 for (ring = 0; ring < tx_rings; ++ring) 2598 bge_slice_chunk(&bgep->send[ring].desc, &area, 2599 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2600 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2601 bge_slice_chunk(&bgep->send[ring].desc, &area, 2602 0, sizeof (bge_sbd_t)); 2603 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2604 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2605 ASSERT(area.alength == BGE_STATUS_PADDING); 2606 DMA_ZERO(bgep->status_block); 2607 2608 return (DDI_SUCCESS); 2609 } 2610 2611 /* 2612 * This routine frees the transmit and receive buffers and descriptors. 2613 * Make sure the chip is stopped before calling it! 2614 */ 2615 void 2616 bge_free_bufs(bge_t *bgep) 2617 { 2618 int split; 2619 2620 BGE_TRACE(("bge_free_bufs($%p)", 2621 (void *)bgep)); 2622 2623 bge_free_dma_mem(&bgep->tx_desc); 2624 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2625 bge_free_dma_mem(&bgep->rx_desc[split]); 2626 for (split = 0; split < BGE_SPLIT; ++split) 2627 bge_free_dma_mem(&bgep->tx_buff[split]); 2628 for (split = 0; split < BGE_SPLIT; ++split) 2629 bge_free_dma_mem(&bgep->rx_buff[split]); 2630 } 2631 2632 /* 2633 * Determine (initial) MAC address ("BIA") to use for this interface 2634 */ 2635 2636 static void 2637 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2638 { 2639 struct ether_addr sysaddr; 2640 char propbuf[8]; /* "true" or "false", plus NUL */ 2641 uchar_t *bytes; 2642 int *ints; 2643 uint_t nelts; 2644 int err; 2645 2646 BGE_TRACE(("bge_find_mac_address($%p)", 2647 (void *)bgep)); 2648 2649 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2650 cidp->hw_mac_addr, 2651 ether_sprintf((void *)cidp->vendor_addr.addr), 2652 cidp->vendor_addr.set ? "" : "not ")); 2653 2654 /* 2655 * The "vendor's factory-set address" may already have 2656 * been extracted from the chip, but if the property 2657 * "local-mac-address" is set we use that instead. It 2658 * will normally be set by OBP, but it could also be 2659 * specified in a .conf file(!) 2660 * 2661 * There doesn't seem to be a way to define byte-array 2662 * properties in a .conf, so we check whether it looks 2663 * like an array of 6 ints instead. 2664 * 2665 * Then, we check whether it looks like an array of 6 2666 * bytes (which it should, if OBP set it). If we can't 2667 * make sense of it either way, we'll ignore it. 2668 */ 2669 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2670 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2671 if (err == DDI_PROP_SUCCESS) { 2672 if (nelts == ETHERADDRL) { 2673 while (nelts--) 2674 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2675 cidp->vendor_addr.set = B_TRUE; 2676 } 2677 ddi_prop_free(ints); 2678 } 2679 2680 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2681 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2682 if (err == DDI_PROP_SUCCESS) { 2683 if (nelts == ETHERADDRL) { 2684 while (nelts--) 2685 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2686 cidp->vendor_addr.set = B_TRUE; 2687 } 2688 ddi_prop_free(bytes); 2689 } 2690 2691 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2692 ether_sprintf((void *)cidp->vendor_addr.addr), 2693 cidp->vendor_addr.set ? "" : "not ")); 2694 2695 /* 2696 * Look up the OBP property "local-mac-address?". Note that even 2697 * though its value is a string (which should be "true" or "false"), 2698 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2699 * the buffer first and then fetch the property as an untyped array; 2700 * this may or may not include a final NUL, but since there will 2701 * always be one left at the end of the buffer we can now treat it 2702 * as a string anyway. 2703 */ 2704 nelts = sizeof (propbuf); 2705 bzero(propbuf, nelts--); 2706 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2707 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2708 2709 /* 2710 * Now, if the address still isn't set from the hardware (SEEPROM) 2711 * or the OBP or .conf property, OR if the user has foolishly set 2712 * 'local-mac-address? = false', use "the system address" instead 2713 * (but only if it's non-null i.e. has been set from the IDPROM). 2714 */ 2715 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2716 if (localetheraddr(NULL, &sysaddr) != 0) { 2717 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2718 cidp->vendor_addr.set = B_TRUE; 2719 } 2720 2721 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2722 ether_sprintf((void *)cidp->vendor_addr.addr), 2723 cidp->vendor_addr.set ? "" : "not ")); 2724 2725 /* 2726 * Finally(!), if there's a valid "mac-address" property (created 2727 * if we netbooted from this interface), we must use this instead 2728 * of any of the above to ensure that the NFS/install server doesn't 2729 * get confused by the address changing as Solaris takes over! 2730 */ 2731 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2732 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2733 if (err == DDI_PROP_SUCCESS) { 2734 if (nelts == ETHERADDRL) { 2735 while (nelts--) 2736 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2737 cidp->vendor_addr.set = B_TRUE; 2738 } 2739 ddi_prop_free(bytes); 2740 } 2741 2742 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2743 ether_sprintf((void *)cidp->vendor_addr.addr), 2744 cidp->vendor_addr.set ? "" : "not ")); 2745 } 2746 2747 2748 /*ARGSUSED*/ 2749 int 2750 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2751 { 2752 ddi_fm_error_t de; 2753 2754 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2755 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2756 return (de.fme_status); 2757 } 2758 2759 /*ARGSUSED*/ 2760 int 2761 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2762 { 2763 ddi_fm_error_t de; 2764 2765 ASSERT(bgep->progress & PROGRESS_BUFS); 2766 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2767 return (de.fme_status); 2768 } 2769 2770 /* 2771 * The IO fault service error handling callback function 2772 */ 2773 /*ARGSUSED*/ 2774 static int 2775 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2776 { 2777 /* 2778 * as the driver can always deal with an error in any dma or 2779 * access handle, we can just return the fme_status value. 2780 */ 2781 pci_ereport_post(dip, err, NULL); 2782 return (err->fme_status); 2783 } 2784 2785 static void 2786 bge_fm_init(bge_t *bgep) 2787 { 2788 ddi_iblock_cookie_t iblk; 2789 2790 /* Only register with IO Fault Services if we have some capability */ 2791 if (bgep->fm_capabilities) { 2792 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2793 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2794 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2795 2796 /* Register capabilities with IO Fault Services */ 2797 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2798 2799 /* 2800 * Initialize pci ereport capabilities if ereport capable 2801 */ 2802 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2803 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2804 pci_ereport_setup(bgep->devinfo); 2805 2806 /* 2807 * Register error callback if error callback capable 2808 */ 2809 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2810 ddi_fm_handler_register(bgep->devinfo, 2811 bge_fm_error_cb, (void*) bgep); 2812 } else { 2813 /* 2814 * These fields have to be cleared of FMA if there are no 2815 * FMA capabilities at runtime. 2816 */ 2817 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2818 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2819 dma_attr.dma_attr_flags = 0; 2820 } 2821 } 2822 2823 static void 2824 bge_fm_fini(bge_t *bgep) 2825 { 2826 /* Only unregister FMA capabilities if we registered some */ 2827 if (bgep->fm_capabilities) { 2828 2829 /* 2830 * Release any resources allocated by pci_ereport_setup() 2831 */ 2832 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2833 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2834 pci_ereport_teardown(bgep->devinfo); 2835 2836 /* 2837 * Un-register error callback if error callback capable 2838 */ 2839 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2840 ddi_fm_handler_unregister(bgep->devinfo); 2841 2842 /* Unregister from IO Fault Services */ 2843 ddi_fm_fini(bgep->devinfo); 2844 } 2845 } 2846 2847 static void 2848 #ifdef BGE_IPMI_ASF 2849 bge_unattach(bge_t *bgep, uint_t asf_mode) 2850 #else 2851 bge_unattach(bge_t *bgep) 2852 #endif 2853 { 2854 BGE_TRACE(("bge_unattach($%p)", 2855 (void *)bgep)); 2856 2857 /* 2858 * Flag that no more activity may be initiated 2859 */ 2860 bgep->progress &= ~PROGRESS_READY; 2861 2862 /* 2863 * Quiesce the PHY and MAC (leave it reset but still powered). 2864 * Clean up and free all BGE data structures 2865 */ 2866 if (bgep->periodic_id != NULL) { 2867 ddi_periodic_delete(bgep->periodic_id); 2868 bgep->periodic_id = NULL; 2869 } 2870 if (bgep->progress & PROGRESS_KSTATS) 2871 bge_fini_kstats(bgep); 2872 if (bgep->progress & PROGRESS_PHY) 2873 bge_phys_reset(bgep); 2874 if (bgep->progress & PROGRESS_HWINT) { 2875 mutex_enter(bgep->genlock); 2876 #ifdef BGE_IPMI_ASF 2877 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2878 #else 2879 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2880 #endif 2881 ddi_fm_service_impact(bgep->devinfo, 2882 DDI_SERVICE_UNAFFECTED); 2883 #ifdef BGE_IPMI_ASF 2884 if (bgep->asf_enabled) { 2885 /* 2886 * This register has been overlaid. We restore its 2887 * initial value here. 2888 */ 2889 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2890 BGE_NIC_DATA_SIG); 2891 } 2892 #endif 2893 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2894 ddi_fm_service_impact(bgep->devinfo, 2895 DDI_SERVICE_UNAFFECTED); 2896 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2897 ddi_fm_service_impact(bgep->devinfo, 2898 DDI_SERVICE_UNAFFECTED); 2899 mutex_exit(bgep->genlock); 2900 } 2901 if (bgep->progress & PROGRESS_INTR) { 2902 bge_intr_disable(bgep); 2903 bge_fini_rings(bgep); 2904 } 2905 if (bgep->progress & PROGRESS_HWINT) { 2906 bge_rem_intrs(bgep); 2907 rw_destroy(bgep->errlock); 2908 mutex_destroy(bgep->softintrlock); 2909 mutex_destroy(bgep->genlock); 2910 } 2911 if (bgep->progress & PROGRESS_FACTOTUM) 2912 ddi_remove_softintr(bgep->factotum_id); 2913 if (bgep->progress & PROGRESS_RESCHED) 2914 ddi_remove_softintr(bgep->drain_id); 2915 if (bgep->progress & PROGRESS_BUFS) 2916 bge_free_bufs(bgep); 2917 if (bgep->progress & PROGRESS_REGS) 2918 ddi_regs_map_free(&bgep->io_handle); 2919 if (bgep->progress & PROGRESS_CFG) 2920 pci_config_teardown(&bgep->cfg_handle); 2921 2922 bge_fm_fini(bgep); 2923 2924 ddi_remove_minor_node(bgep->devinfo, NULL); 2925 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 2926 kmem_free(bgep, sizeof (*bgep)); 2927 } 2928 2929 static int 2930 bge_resume(dev_info_t *devinfo) 2931 { 2932 bge_t *bgep; /* Our private data */ 2933 chip_id_t *cidp; 2934 chip_id_t chipid; 2935 2936 bgep = ddi_get_driver_private(devinfo); 2937 if (bgep == NULL) 2938 return (DDI_FAILURE); 2939 2940 /* 2941 * Refuse to resume if the data structures aren't consistent 2942 */ 2943 if (bgep->devinfo != devinfo) 2944 return (DDI_FAILURE); 2945 2946 #ifdef BGE_IPMI_ASF 2947 /* 2948 * Power management hasn't been supported in BGE now. If you 2949 * want to implement it, please add the ASF/IPMI related 2950 * code here. 2951 */ 2952 2953 #endif 2954 2955 /* 2956 * Read chip ID & set up config space command register(s) 2957 * Refuse to resume if the chip has changed its identity! 2958 */ 2959 cidp = &bgep->chipid; 2960 mutex_enter(bgep->genlock); 2961 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 2962 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2963 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2964 mutex_exit(bgep->genlock); 2965 return (DDI_FAILURE); 2966 } 2967 mutex_exit(bgep->genlock); 2968 if (chipid.vendor != cidp->vendor) 2969 return (DDI_FAILURE); 2970 if (chipid.device != cidp->device) 2971 return (DDI_FAILURE); 2972 if (chipid.revision != cidp->revision) 2973 return (DDI_FAILURE); 2974 if (chipid.asic_rev != cidp->asic_rev) 2975 return (DDI_FAILURE); 2976 2977 /* 2978 * All OK, reinitialise h/w & kick off GLD scheduling 2979 */ 2980 mutex_enter(bgep->genlock); 2981 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 2982 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 2983 (void) bge_check_acc_handle(bgep, bgep->io_handle); 2984 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2985 mutex_exit(bgep->genlock); 2986 return (DDI_FAILURE); 2987 } 2988 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2989 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2990 mutex_exit(bgep->genlock); 2991 return (DDI_FAILURE); 2992 } 2993 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 2994 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2995 mutex_exit(bgep->genlock); 2996 return (DDI_FAILURE); 2997 } 2998 mutex_exit(bgep->genlock); 2999 return (DDI_SUCCESS); 3000 } 3001 3002 /* 3003 * attach(9E) -- Attach a device to the system 3004 * 3005 * Called once for each board successfully probed. 3006 */ 3007 static int 3008 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3009 { 3010 bge_t *bgep; /* Our private data */ 3011 mac_register_t *macp; 3012 chip_id_t *cidp; 3013 caddr_t regs; 3014 int instance; 3015 int err; 3016 int intr_types; 3017 #ifdef BGE_IPMI_ASF 3018 uint32_t mhcrValue; 3019 #ifdef __sparc 3020 uint16_t value16; 3021 #endif 3022 #ifdef BGE_NETCONSOLE 3023 int retval; 3024 #endif 3025 #endif 3026 3027 instance = ddi_get_instance(devinfo); 3028 3029 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3030 (void *)devinfo, cmd, instance)); 3031 BGE_BRKPT(NULL, "bge_attach"); 3032 3033 switch (cmd) { 3034 default: 3035 return (DDI_FAILURE); 3036 3037 case DDI_RESUME: 3038 return (bge_resume(devinfo)); 3039 3040 case DDI_ATTACH: 3041 break; 3042 } 3043 3044 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3045 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3046 ddi_set_driver_private(devinfo, bgep); 3047 bgep->bge_guard = BGE_GUARD; 3048 bgep->devinfo = devinfo; 3049 bgep->param_drain_max = 64; 3050 bgep->param_msi_cnt = 0; 3051 bgep->param_loop_mode = 0; 3052 3053 /* 3054 * Initialize more fields in BGE private data 3055 */ 3056 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3057 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3058 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3059 BGE_DRIVER_NAME, instance); 3060 3061 /* 3062 * Initialize for fma support 3063 */ 3064 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3065 DDI_PROP_DONTPASS, fm_cap, 3066 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3067 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3068 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3069 bge_fm_init(bgep); 3070 3071 /* 3072 * Look up the IOMMU's page size for DVMA mappings (must be 3073 * a power of 2) and convert to a mask. This can be used to 3074 * determine whether a message buffer crosses a page boundary. 3075 * Note: in 2s complement binary notation, if X is a power of 3076 * 2, then -X has the representation "11...1100...00". 3077 */ 3078 bgep->pagemask = dvma_pagesize(devinfo); 3079 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3080 bgep->pagemask = -bgep->pagemask; 3081 3082 /* 3083 * Map config space registers 3084 * Read chip ID & set up config space command register(s) 3085 * 3086 * Note: this leaves the chip accessible by Memory Space 3087 * accesses, but with interrupts and Bus Mastering off. 3088 * This should ensure that nothing untoward will happen 3089 * if it has been left active by the (net-)bootloader. 3090 * We'll re-enable Bus Mastering once we've reset the chip, 3091 * and allow interrupts only when everything else is set up. 3092 */ 3093 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3094 #ifdef BGE_IPMI_ASF 3095 #ifdef __sparc 3096 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3097 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3098 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3099 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3100 MHCR_ENABLE_TAGGED_STATUS_MODE | 3101 MHCR_MASK_INTERRUPT_MODE | 3102 MHCR_MASK_PCI_INT_OUTPUT | 3103 MHCR_CLEAR_INTERRUPT_INTA | 3104 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3105 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3106 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3107 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3108 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3109 MEMORY_ARBITER_ENABLE); 3110 #else 3111 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3112 #endif 3113 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3114 bgep->asf_wordswapped = B_TRUE; 3115 } else { 3116 bgep->asf_wordswapped = B_FALSE; 3117 } 3118 bge_asf_get_config(bgep); 3119 #endif 3120 if (err != DDI_SUCCESS) { 3121 bge_problem(bgep, "pci_config_setup() failed"); 3122 goto attach_fail; 3123 } 3124 bgep->progress |= PROGRESS_CFG; 3125 cidp = &bgep->chipid; 3126 bzero(cidp, sizeof (*cidp)); 3127 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3128 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3129 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3130 goto attach_fail; 3131 } 3132 3133 #ifdef BGE_IPMI_ASF 3134 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3135 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3136 bgep->asf_newhandshake = B_TRUE; 3137 } else { 3138 bgep->asf_newhandshake = B_FALSE; 3139 } 3140 #endif 3141 3142 /* 3143 * Update those parts of the chip ID derived from volatile 3144 * registers with the values seen by OBP (in case the chip 3145 * has been reset externally and therefore lost them). 3146 */ 3147 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3148 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3149 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3150 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3151 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3152 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3153 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3154 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3155 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3156 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3157 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3158 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3159 3160 if (bge_jumbo_enable == B_TRUE) { 3161 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3162 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3163 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3164 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3165 cidp->default_mtu = BGE_DEFAULT_MTU; 3166 } 3167 } 3168 /* 3169 * Map operating registers 3170 */ 3171 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3172 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3173 if (err != DDI_SUCCESS) { 3174 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3175 goto attach_fail; 3176 } 3177 bgep->io_regs = regs; 3178 bgep->progress |= PROGRESS_REGS; 3179 3180 /* 3181 * Characterise the device, so we know its requirements. 3182 * Then allocate the appropriate TX and RX descriptors & buffers. 3183 */ 3184 if (bge_chip_id_init(bgep) == EIO) { 3185 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3186 goto attach_fail; 3187 } 3188 3189 3190 err = bge_alloc_bufs(bgep); 3191 if (err != DDI_SUCCESS) { 3192 bge_problem(bgep, "DMA buffer allocation failed"); 3193 goto attach_fail; 3194 } 3195 bgep->progress |= PROGRESS_BUFS; 3196 3197 /* 3198 * Add the softint handlers: 3199 * 3200 * Both of these handlers are used to avoid restrictions on the 3201 * context and/or mutexes required for some operations. In 3202 * particular, the hardware interrupt handler and its subfunctions 3203 * can detect a number of conditions that we don't want to handle 3204 * in that context or with that set of mutexes held. So, these 3205 * softints are triggered instead: 3206 * 3207 * the <resched> softint is triggered if we have previously 3208 * had to refuse to send a packet because of resource shortage 3209 * (we've run out of transmit buffers), but the send completion 3210 * interrupt handler has now detected that more buffers have 3211 * become available. 3212 * 3213 * the <factotum> is triggered if the h/w interrupt handler 3214 * sees the <link state changed> or <error> bits in the status 3215 * block. It's also triggered periodically to poll the link 3216 * state, just in case we aren't getting link status change 3217 * interrupts ... 3218 */ 3219 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3220 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3221 if (err != DDI_SUCCESS) { 3222 bge_problem(bgep, "ddi_add_softintr() failed"); 3223 goto attach_fail; 3224 } 3225 bgep->progress |= PROGRESS_RESCHED; 3226 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3227 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3228 if (err != DDI_SUCCESS) { 3229 bge_problem(bgep, "ddi_add_softintr() failed"); 3230 goto attach_fail; 3231 } 3232 bgep->progress |= PROGRESS_FACTOTUM; 3233 3234 /* Get supported interrupt types */ 3235 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3236 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3237 3238 goto attach_fail; 3239 } 3240 3241 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3242 bgep->ifname, intr_types)); 3243 3244 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3245 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3246 bge_error(bgep, "MSI registration failed, " 3247 "trying FIXED interrupt type\n"); 3248 } else { 3249 BGE_DEBUG(("%s: Using MSI interrupt type", 3250 bgep->ifname)); 3251 bgep->intr_type = DDI_INTR_TYPE_MSI; 3252 bgep->progress |= PROGRESS_HWINT; 3253 } 3254 } 3255 3256 if (!(bgep->progress & PROGRESS_HWINT) && 3257 (intr_types & DDI_INTR_TYPE_FIXED)) { 3258 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3259 bge_error(bgep, "FIXED interrupt " 3260 "registration failed\n"); 3261 goto attach_fail; 3262 } 3263 3264 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3265 3266 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3267 bgep->progress |= PROGRESS_HWINT; 3268 } 3269 3270 if (!(bgep->progress & PROGRESS_HWINT)) { 3271 bge_error(bgep, "No interrupts registered\n"); 3272 goto attach_fail; 3273 } 3274 3275 /* 3276 * Note that interrupts are not enabled yet as 3277 * mutex locks are not initialized. Initialize mutex locks. 3278 */ 3279 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3280 DDI_INTR_PRI(bgep->intr_pri)); 3281 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3282 DDI_INTR_PRI(bgep->intr_pri)); 3283 rw_init(bgep->errlock, NULL, RW_DRIVER, 3284 DDI_INTR_PRI(bgep->intr_pri)); 3285 3286 /* 3287 * Initialize rings. 3288 */ 3289 bge_init_rings(bgep); 3290 3291 /* 3292 * Now that mutex locks are initialized, enable interrupts. 3293 */ 3294 bge_intr_enable(bgep); 3295 bgep->progress |= PROGRESS_INTR; 3296 3297 /* 3298 * Initialise link state variables 3299 * Stop, reset & reinitialise the chip. 3300 * Initialise the (internal) PHY. 3301 */ 3302 bgep->link_state = LINK_STATE_UNKNOWN; 3303 3304 mutex_enter(bgep->genlock); 3305 3306 /* 3307 * Reset chip & rings to initial state; also reset address 3308 * filtering, promiscuity, loopback mode. 3309 */ 3310 #ifdef BGE_IPMI_ASF 3311 #ifdef BGE_NETCONSOLE 3312 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3313 #else 3314 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3315 #endif 3316 #else 3317 if (bge_reset(bgep) != DDI_SUCCESS) { 3318 #endif 3319 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3320 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3321 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3322 mutex_exit(bgep->genlock); 3323 goto attach_fail; 3324 } 3325 3326 #ifdef BGE_IPMI_ASF 3327 if (bgep->asf_enabled) { 3328 bgep->asf_status = ASF_STAT_RUN_INIT; 3329 } 3330 #endif 3331 3332 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3333 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3334 bgep->promisc = B_FALSE; 3335 bgep->param_loop_mode = BGE_LOOP_NONE; 3336 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3337 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3338 mutex_exit(bgep->genlock); 3339 goto attach_fail; 3340 } 3341 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3342 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3343 mutex_exit(bgep->genlock); 3344 goto attach_fail; 3345 } 3346 3347 mutex_exit(bgep->genlock); 3348 3349 if (bge_phys_init(bgep) == EIO) { 3350 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3351 goto attach_fail; 3352 } 3353 bgep->progress |= PROGRESS_PHY; 3354 3355 /* 3356 * initialize NDD-tweakable parameters 3357 */ 3358 if (bge_nd_init(bgep)) { 3359 bge_problem(bgep, "bge_nd_init() failed"); 3360 goto attach_fail; 3361 } 3362 bgep->progress |= PROGRESS_NDD; 3363 3364 /* 3365 * Create & initialise named kstats 3366 */ 3367 bge_init_kstats(bgep, instance); 3368 bgep->progress |= PROGRESS_KSTATS; 3369 3370 /* 3371 * Determine whether to override the chip's own MAC address 3372 */ 3373 bge_find_mac_address(bgep, cidp); 3374 ethaddr_copy(cidp->vendor_addr.addr, bgep->curr_addr[0].addr); 3375 bgep->curr_addr[0].set = B_TRUE; 3376 3377 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3378 /* 3379 * Address available is one less than MAX 3380 * as primary address is not advertised 3381 * as a multiple MAC address. 3382 */ 3383 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX - 1; 3384 3385 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3386 goto attach_fail; 3387 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3388 macp->m_driver = bgep; 3389 macp->m_dip = devinfo; 3390 macp->m_src_addr = bgep->curr_addr[0].addr; 3391 macp->m_callbacks = &bge_m_callbacks; 3392 macp->m_min_sdu = 0; 3393 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3394 macp->m_margin = VLAN_TAGSZ; 3395 macp->m_priv_props = bge_priv_prop; 3396 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3397 3398 /* 3399 * Finally, we're ready to register ourselves with the MAC layer 3400 * interface; if this succeeds, we're all ready to start() 3401 */ 3402 err = mac_register(macp, &bgep->mh); 3403 mac_free(macp); 3404 if (err != 0) 3405 goto attach_fail; 3406 3407 /* 3408 * Register a periodical handler. 3409 * bge_chip_cyclic() is invoked in kernel context. 3410 */ 3411 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3412 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3413 3414 bgep->progress |= PROGRESS_READY; 3415 ASSERT(bgep->bge_guard == BGE_GUARD); 3416 #ifdef BGE_IPMI_ASF 3417 #ifdef BGE_NETCONSOLE 3418 if (bgep->asf_enabled) { 3419 mutex_enter(bgep->genlock); 3420 retval = bge_chip_start(bgep, B_TRUE); 3421 mutex_exit(bgep->genlock); 3422 if (retval != DDI_SUCCESS) 3423 goto attach_fail; 3424 } 3425 #endif 3426 #endif 3427 return (DDI_SUCCESS); 3428 3429 attach_fail: 3430 #ifdef BGE_IPMI_ASF 3431 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3432 #else 3433 bge_unattach(bgep); 3434 #endif 3435 return (DDI_FAILURE); 3436 } 3437 3438 /* 3439 * bge_suspend() -- suspend transmit/receive for powerdown 3440 */ 3441 static int 3442 bge_suspend(bge_t *bgep) 3443 { 3444 /* 3445 * Stop processing and idle (powerdown) the PHY ... 3446 */ 3447 mutex_enter(bgep->genlock); 3448 #ifdef BGE_IPMI_ASF 3449 /* 3450 * Power management hasn't been supported in BGE now. If you 3451 * want to implement it, please add the ASF/IPMI related 3452 * code here. 3453 */ 3454 #endif 3455 bge_stop(bgep); 3456 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3457 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3458 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3459 mutex_exit(bgep->genlock); 3460 return (DDI_FAILURE); 3461 } 3462 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3463 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3464 mutex_exit(bgep->genlock); 3465 return (DDI_FAILURE); 3466 } 3467 mutex_exit(bgep->genlock); 3468 3469 return (DDI_SUCCESS); 3470 } 3471 3472 /* 3473 * detach(9E) -- Detach a device from the system 3474 */ 3475 static int 3476 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3477 { 3478 bge_t *bgep; 3479 #ifdef BGE_IPMI_ASF 3480 uint_t asf_mode; 3481 asf_mode = ASF_MODE_NONE; 3482 #endif 3483 3484 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3485 3486 bgep = ddi_get_driver_private(devinfo); 3487 3488 switch (cmd) { 3489 default: 3490 return (DDI_FAILURE); 3491 3492 case DDI_SUSPEND: 3493 return (bge_suspend(bgep)); 3494 3495 case DDI_DETACH: 3496 break; 3497 } 3498 3499 #ifdef BGE_IPMI_ASF 3500 mutex_enter(bgep->genlock); 3501 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3502 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3503 3504 bge_asf_update_status(bgep); 3505 if (bgep->asf_status == ASF_STAT_RUN) { 3506 bge_asf_stop_timer(bgep); 3507 } 3508 bgep->asf_status = ASF_STAT_STOP; 3509 3510 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3511 3512 if (bgep->asf_pseudostop) { 3513 bge_chip_stop(bgep, B_FALSE); 3514 bgep->bge_mac_state = BGE_MAC_STOPPED; 3515 bgep->asf_pseudostop = B_FALSE; 3516 } 3517 3518 asf_mode = ASF_MODE_POST_SHUTDOWN; 3519 3520 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3521 ddi_fm_service_impact(bgep->devinfo, 3522 DDI_SERVICE_UNAFFECTED); 3523 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3524 ddi_fm_service_impact(bgep->devinfo, 3525 DDI_SERVICE_UNAFFECTED); 3526 } 3527 mutex_exit(bgep->genlock); 3528 #endif 3529 3530 /* 3531 * Unregister from the GLD subsystem. This can fail, in 3532 * particular if there are DLPI style-2 streams still open - 3533 * in which case we just return failure without shutting 3534 * down chip operations. 3535 */ 3536 if (mac_unregister(bgep->mh) != 0) 3537 return (DDI_FAILURE); 3538 3539 /* 3540 * All activity stopped, so we can clean up & exit 3541 */ 3542 #ifdef BGE_IPMI_ASF 3543 bge_unattach(bgep, asf_mode); 3544 #else 3545 bge_unattach(bgep); 3546 #endif 3547 return (DDI_SUCCESS); 3548 } 3549 3550 3551 /* 3552 * ========== Module Loading Data & Entry Points ========== 3553 */ 3554 3555 #undef BGE_DBG 3556 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3557 3558 DDI_DEFINE_STREAM_OPS(bge_dev_ops, nulldev, nulldev, bge_attach, bge_detach, 3559 nodev, NULL, D_MP, NULL); 3560 3561 static struct modldrv bge_modldrv = { 3562 &mod_driverops, /* Type of module. This one is a driver */ 3563 bge_ident, /* short description */ 3564 &bge_dev_ops /* driver specific ops */ 3565 }; 3566 3567 static struct modlinkage modlinkage = { 3568 MODREV_1, (void *)&bge_modldrv, NULL 3569 }; 3570 3571 3572 int 3573 _info(struct modinfo *modinfop) 3574 { 3575 return (mod_info(&modlinkage, modinfop)); 3576 } 3577 3578 int 3579 _init(void) 3580 { 3581 int status; 3582 3583 mac_init_ops(&bge_dev_ops, "bge"); 3584 status = mod_install(&modlinkage); 3585 if (status == DDI_SUCCESS) 3586 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3587 else 3588 mac_fini_ops(&bge_dev_ops); 3589 return (status); 3590 } 3591 3592 int 3593 _fini(void) 3594 { 3595 int status; 3596 3597 status = mod_remove(&modlinkage); 3598 if (status == DDI_SUCCESS) { 3599 mac_fini_ops(&bge_dev_ops); 3600 mutex_destroy(bge_log_mutex); 3601 } 3602 return (status); 3603 } 3604 3605 3606 /* 3607 * bge_add_intrs: 3608 * 3609 * Register FIXED or MSI interrupts. 3610 */ 3611 static int 3612 bge_add_intrs(bge_t *bgep, int intr_type) 3613 { 3614 dev_info_t *dip = bgep->devinfo; 3615 int avail, actual, intr_size, count = 0; 3616 int i, flag, ret; 3617 3618 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3619 3620 /* Get number of interrupts */ 3621 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3622 if ((ret != DDI_SUCCESS) || (count == 0)) { 3623 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3624 "count: %d", ret, count); 3625 3626 return (DDI_FAILURE); 3627 } 3628 3629 /* Get number of available interrupts */ 3630 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3631 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3632 bge_error(bgep, "ddi_intr_get_navail() failure, " 3633 "ret: %d, avail: %d\n", ret, avail); 3634 3635 return (DDI_FAILURE); 3636 } 3637 3638 if (avail < count) { 3639 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3640 bgep->ifname, count, avail)); 3641 } 3642 3643 /* 3644 * BGE hardware generates only single MSI even though it claims 3645 * to support multiple MSIs. So, hard code MSI count value to 1. 3646 */ 3647 if (intr_type == DDI_INTR_TYPE_MSI) { 3648 count = 1; 3649 flag = DDI_INTR_ALLOC_STRICT; 3650 } else { 3651 flag = DDI_INTR_ALLOC_NORMAL; 3652 } 3653 3654 /* Allocate an array of interrupt handles */ 3655 intr_size = count * sizeof (ddi_intr_handle_t); 3656 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3657 3658 /* Call ddi_intr_alloc() */ 3659 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3660 count, &actual, flag); 3661 3662 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3663 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3664 3665 kmem_free(bgep->htable, intr_size); 3666 return (DDI_FAILURE); 3667 } 3668 3669 if (actual < count) { 3670 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3671 bgep->ifname, count, actual)); 3672 } 3673 3674 bgep->intr_cnt = actual; 3675 3676 /* 3677 * Get priority for first msi, assume remaining are all the same 3678 */ 3679 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3680 DDI_SUCCESS) { 3681 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3682 3683 /* Free already allocated intr */ 3684 for (i = 0; i < actual; i++) { 3685 (void) ddi_intr_free(bgep->htable[i]); 3686 } 3687 3688 kmem_free(bgep->htable, intr_size); 3689 return (DDI_FAILURE); 3690 } 3691 3692 /* Call ddi_intr_add_handler() */ 3693 for (i = 0; i < actual; i++) { 3694 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3695 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3696 bge_error(bgep, "ddi_intr_add_handler() " 3697 "failed %d\n", ret); 3698 3699 /* Free already allocated intr */ 3700 for (i = 0; i < actual; i++) { 3701 (void) ddi_intr_free(bgep->htable[i]); 3702 } 3703 3704 kmem_free(bgep->htable, intr_size); 3705 return (DDI_FAILURE); 3706 } 3707 } 3708 3709 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3710 != DDI_SUCCESS) { 3711 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3712 3713 for (i = 0; i < actual; i++) { 3714 (void) ddi_intr_remove_handler(bgep->htable[i]); 3715 (void) ddi_intr_free(bgep->htable[i]); 3716 } 3717 3718 kmem_free(bgep->htable, intr_size); 3719 return (DDI_FAILURE); 3720 } 3721 3722 return (DDI_SUCCESS); 3723 } 3724 3725 /* 3726 * bge_rem_intrs: 3727 * 3728 * Unregister FIXED or MSI interrupts 3729 */ 3730 static void 3731 bge_rem_intrs(bge_t *bgep) 3732 { 3733 int i; 3734 3735 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3736 3737 /* Call ddi_intr_remove_handler() */ 3738 for (i = 0; i < bgep->intr_cnt; i++) { 3739 (void) ddi_intr_remove_handler(bgep->htable[i]); 3740 (void) ddi_intr_free(bgep->htable[i]); 3741 } 3742 3743 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3744 } 3745 3746 3747 void 3748 bge_intr_enable(bge_t *bgep) 3749 { 3750 int i; 3751 3752 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3753 /* Call ddi_intr_block_enable() for MSI interrupts */ 3754 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3755 } else { 3756 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3757 for (i = 0; i < bgep->intr_cnt; i++) { 3758 (void) ddi_intr_enable(bgep->htable[i]); 3759 } 3760 } 3761 } 3762 3763 3764 void 3765 bge_intr_disable(bge_t *bgep) 3766 { 3767 int i; 3768 3769 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3770 /* Call ddi_intr_block_disable() */ 3771 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3772 } else { 3773 for (i = 0; i < bgep->intr_cnt; i++) { 3774 (void) ddi_intr_disable(bgep->htable[i]); 3775 } 3776 } 3777 } 3778 3779 int 3780 bge_reprogram(bge_t *bgep) 3781 { 3782 int status = 0; 3783 3784 ASSERT(mutex_owned(bgep->genlock)); 3785 3786 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3787 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3788 status = IOC_INVAL; 3789 } 3790 #ifdef BGE_IPMI_ASF 3791 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3792 #else 3793 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3794 #endif 3795 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3796 status = IOC_INVAL; 3797 } 3798 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3799 bge_chip_msi_trig(bgep); 3800 return (status); 3801 } 3802