1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "bge_impl.h" 28 #include <sys/sdt.h> 29 #include <sys/mac.h> 30 31 /* 32 * This is the string displayed by modinfo, etc. 33 * Make sure you keep the version ID up to date! 34 */ 35 static char bge_ident[] = "Broadcom Gb Ethernet"; 36 37 /* 38 * Property names 39 */ 40 static char debug_propname[] = "bge-debug-flags"; 41 static char clsize_propname[] = "cache-line-size"; 42 static char latency_propname[] = "latency-timer"; 43 static char localmac_boolname[] = "local-mac-address?"; 44 static char localmac_propname[] = "local-mac-address"; 45 static char macaddr_propname[] = "mac-address"; 46 static char subdev_propname[] = "subsystem-id"; 47 static char subven_propname[] = "subsystem-vendor-id"; 48 static char rxrings_propname[] = "bge-rx-rings"; 49 static char txrings_propname[] = "bge-tx-rings"; 50 static char fm_cap[] = "fm-capable"; 51 static char default_mtu[] = "default_mtu"; 52 53 static int bge_add_intrs(bge_t *, int); 54 static void bge_rem_intrs(bge_t *); 55 56 /* 57 * Describes the chip's DMA engine 58 */ 59 static ddi_dma_attr_t dma_attr = { 60 DMA_ATTR_V0, /* dma_attr version */ 61 0x0000000000000000ull, /* dma_attr_addr_lo */ 62 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 63 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 64 0x0000000000000001ull, /* dma_attr_align */ 65 0x00000FFF, /* dma_attr_burstsizes */ 66 0x00000001, /* dma_attr_minxfer */ 67 0x000000000000FFFFull, /* dma_attr_maxxfer */ 68 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 69 1, /* dma_attr_sgllen */ 70 0x00000001, /* dma_attr_granular */ 71 DDI_DMA_FLAGERR /* dma_attr_flags */ 72 }; 73 74 /* 75 * PIO access attributes for registers 76 */ 77 static ddi_device_acc_attr_t bge_reg_accattr = { 78 DDI_DEVICE_ATTR_V0, 79 DDI_NEVERSWAP_ACC, 80 DDI_STRICTORDER_ACC, 81 DDI_FLAGERR_ACC 82 }; 83 84 /* 85 * DMA access attributes for descriptors: NOT to be byte swapped. 86 */ 87 static ddi_device_acc_attr_t bge_desc_accattr = { 88 DDI_DEVICE_ATTR_V0, 89 DDI_NEVERSWAP_ACC, 90 DDI_STRICTORDER_ACC, 91 DDI_FLAGERR_ACC 92 }; 93 94 /* 95 * DMA access attributes for data: NOT to be byte swapped. 96 */ 97 static ddi_device_acc_attr_t bge_data_accattr = { 98 DDI_DEVICE_ATTR_V0, 99 DDI_NEVERSWAP_ACC, 100 DDI_STRICTORDER_ACC 101 }; 102 103 static int bge_m_start(void *); 104 static void bge_m_stop(void *); 105 static int bge_m_promisc(void *, boolean_t); 106 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 107 static int bge_m_unicst(void *, const uint8_t *); 108 static void bge_m_resources(void *); 109 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 110 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 111 static int bge_unicst_set(void *, const uint8_t *, 112 mac_addr_slot_t); 113 static int bge_m_unicst_add(void *, mac_multi_addr_t *); 114 static int bge_m_unicst_remove(void *, mac_addr_slot_t); 115 static int bge_m_unicst_modify(void *, mac_multi_addr_t *); 116 static int bge_m_unicst_get(void *, mac_multi_addr_t *); 117 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 118 uint_t, const void *); 119 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 120 uint_t, uint_t, void *); 121 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 122 const void *); 123 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 124 uint_t, void *); 125 126 #define BGE_M_CALLBACK_FLAGS\ 127 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 128 129 static mac_callbacks_t bge_m_callbacks = { 130 BGE_M_CALLBACK_FLAGS, 131 bge_m_stat, 132 bge_m_start, 133 bge_m_stop, 134 bge_m_promisc, 135 bge_m_multicst, 136 bge_m_unicst, 137 bge_m_tx, 138 bge_m_resources, 139 bge_m_ioctl, 140 bge_m_getcapab, 141 NULL, 142 NULL, 143 bge_m_setprop, 144 bge_m_getprop 145 }; 146 147 mac_priv_prop_t bge_priv_prop[] = { 148 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 149 {"_adv_pause_cap", MAC_PROP_PERM_RW} 150 }; 151 152 #define BGE_MAX_PRIV_PROPS \ 153 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 154 155 /* 156 * ========== Transmit and receive ring reinitialisation ========== 157 */ 158 159 /* 160 * These <reinit> routines each reset the specified ring to an initial 161 * state, assuming that the corresponding <init> routine has already 162 * been called exactly once. 163 */ 164 165 static void 166 bge_reinit_send_ring(send_ring_t *srp) 167 { 168 bge_queue_t *txbuf_queue; 169 bge_queue_item_t *txbuf_head; 170 sw_txbuf_t *txbuf; 171 sw_sbd_t *ssbdp; 172 uint32_t slot; 173 174 /* 175 * Reinitialise control variables ... 176 */ 177 srp->tx_flow = 0; 178 srp->tx_next = 0; 179 srp->txfill_next = 0; 180 srp->tx_free = srp->desc.nslots; 181 ASSERT(mutex_owned(srp->tc_lock)); 182 srp->tc_next = 0; 183 srp->txpkt_next = 0; 184 srp->tx_block = 0; 185 srp->tx_nobd = 0; 186 srp->tx_nobuf = 0; 187 188 /* 189 * Initialize the tx buffer push queue 190 */ 191 mutex_enter(srp->freetxbuf_lock); 192 mutex_enter(srp->txbuf_lock); 193 txbuf_queue = &srp->freetxbuf_queue; 194 txbuf_queue->head = NULL; 195 txbuf_queue->count = 0; 196 txbuf_queue->lock = srp->freetxbuf_lock; 197 srp->txbuf_push_queue = txbuf_queue; 198 199 /* 200 * Initialize the tx buffer pop queue 201 */ 202 txbuf_queue = &srp->txbuf_queue; 203 txbuf_queue->head = NULL; 204 txbuf_queue->count = 0; 205 txbuf_queue->lock = srp->txbuf_lock; 206 srp->txbuf_pop_queue = txbuf_queue; 207 txbuf_head = srp->txbuf_head; 208 txbuf = srp->txbuf; 209 for (slot = 0; slot < srp->tx_buffers; ++slot) { 210 txbuf_head->item = txbuf; 211 txbuf_head->next = txbuf_queue->head; 212 txbuf_queue->head = txbuf_head; 213 txbuf_queue->count++; 214 txbuf++; 215 txbuf_head++; 216 } 217 mutex_exit(srp->txbuf_lock); 218 mutex_exit(srp->freetxbuf_lock); 219 220 /* 221 * Zero and sync all the h/w Send Buffer Descriptors 222 */ 223 DMA_ZERO(srp->desc); 224 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 225 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 226 ssbdp = srp->sw_sbds; 227 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 228 ssbdp->pbuf = NULL; 229 } 230 231 static void 232 bge_reinit_recv_ring(recv_ring_t *rrp) 233 { 234 /* 235 * Reinitialise control variables ... 236 */ 237 rrp->rx_next = 0; 238 } 239 240 static void 241 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 242 { 243 bge_rbd_t *hw_rbd_p; 244 sw_rbd_t *srbdp; 245 uint32_t bufsize; 246 uint32_t nslots; 247 uint32_t slot; 248 249 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 250 RBD_FLAG_STD_RING, 251 RBD_FLAG_JUMBO_RING, 252 RBD_FLAG_MINI_RING 253 }; 254 255 /* 256 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 257 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 258 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 259 * should be zeroed, and so don't need to be set up specifically 260 * once the whole area has been cleared. 261 */ 262 DMA_ZERO(brp->desc); 263 264 hw_rbd_p = DMA_VPTR(brp->desc); 265 nslots = brp->desc.nslots; 266 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 267 bufsize = brp->buf[0].size; 268 srbdp = brp->sw_rbds; 269 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 270 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 271 hw_rbd_p->index = (uint16_t)slot; 272 hw_rbd_p->len = (uint16_t)bufsize; 273 hw_rbd_p->opaque = srbdp->pbuf.token; 274 hw_rbd_p->flags |= ring_type_flag[ring]; 275 } 276 277 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 278 279 /* 280 * Finally, reinitialise the ring control variables ... 281 */ 282 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 283 } 284 285 /* 286 * Reinitialize all rings 287 */ 288 static void 289 bge_reinit_rings(bge_t *bgep) 290 { 291 uint32_t ring; 292 293 ASSERT(mutex_owned(bgep->genlock)); 294 295 /* 296 * Send Rings ... 297 */ 298 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 299 bge_reinit_send_ring(&bgep->send[ring]); 300 301 /* 302 * Receive Return Rings ... 303 */ 304 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 305 bge_reinit_recv_ring(&bgep->recv[ring]); 306 307 /* 308 * Receive Producer Rings ... 309 */ 310 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 311 bge_reinit_buff_ring(&bgep->buff[ring], ring); 312 } 313 314 /* 315 * ========== Internal state management entry points ========== 316 */ 317 318 #undef BGE_DBG 319 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 320 321 /* 322 * These routines provide all the functionality required by the 323 * corresponding GLD entry points, but don't update the GLD state 324 * so they can be called internally without disturbing our record 325 * of what GLD thinks we should be doing ... 326 */ 327 328 /* 329 * bge_reset() -- reset h/w & rings to initial state 330 */ 331 static int 332 #ifdef BGE_IPMI_ASF 333 bge_reset(bge_t *bgep, uint_t asf_mode) 334 #else 335 bge_reset(bge_t *bgep) 336 #endif 337 { 338 uint32_t ring; 339 int retval; 340 341 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 342 343 ASSERT(mutex_owned(bgep->genlock)); 344 345 /* 346 * Grab all the other mutexes in the world (this should 347 * ensure no other threads are manipulating driver state) 348 */ 349 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 350 mutex_enter(bgep->recv[ring].rx_lock); 351 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 352 mutex_enter(bgep->buff[ring].rf_lock); 353 rw_enter(bgep->errlock, RW_WRITER); 354 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 355 mutex_enter(bgep->send[ring].tx_lock); 356 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 357 mutex_enter(bgep->send[ring].tc_lock); 358 359 #ifdef BGE_IPMI_ASF 360 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 361 #else 362 retval = bge_chip_reset(bgep, B_TRUE); 363 #endif 364 bge_reinit_rings(bgep); 365 366 /* 367 * Free the world ... 368 */ 369 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 370 mutex_exit(bgep->send[ring].tc_lock); 371 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 372 mutex_exit(bgep->send[ring].tx_lock); 373 rw_exit(bgep->errlock); 374 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 375 mutex_exit(bgep->buff[ring].rf_lock); 376 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 377 mutex_exit(bgep->recv[ring].rx_lock); 378 379 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 380 return (retval); 381 } 382 383 /* 384 * bge_stop() -- stop processing, don't reset h/w or rings 385 */ 386 static void 387 bge_stop(bge_t *bgep) 388 { 389 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 390 391 ASSERT(mutex_owned(bgep->genlock)); 392 393 #ifdef BGE_IPMI_ASF 394 if (bgep->asf_enabled) { 395 bgep->asf_pseudostop = B_TRUE; 396 } else { 397 #endif 398 bge_chip_stop(bgep, B_FALSE); 399 #ifdef BGE_IPMI_ASF 400 } 401 #endif 402 403 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 404 } 405 406 /* 407 * bge_start() -- start transmitting/receiving 408 */ 409 static int 410 bge_start(bge_t *bgep, boolean_t reset_phys) 411 { 412 int retval; 413 414 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 415 416 ASSERT(mutex_owned(bgep->genlock)); 417 418 /* 419 * Start chip processing, including enabling interrupts 420 */ 421 retval = bge_chip_start(bgep, reset_phys); 422 423 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 424 return (retval); 425 } 426 427 /* 428 * bge_restart - restart transmitting/receiving after error or suspend 429 */ 430 int 431 bge_restart(bge_t *bgep, boolean_t reset_phys) 432 { 433 int retval = DDI_SUCCESS; 434 ASSERT(mutex_owned(bgep->genlock)); 435 436 #ifdef BGE_IPMI_ASF 437 if (bgep->asf_enabled) { 438 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 439 retval = DDI_FAILURE; 440 } else 441 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 442 retval = DDI_FAILURE; 443 #else 444 if (bge_reset(bgep) != DDI_SUCCESS) 445 retval = DDI_FAILURE; 446 #endif 447 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 448 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 449 retval = DDI_FAILURE; 450 bgep->watchdog = 0; 451 ddi_trigger_softintr(bgep->drain_id); 452 } 453 454 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 455 return (retval); 456 } 457 458 459 /* 460 * ========== Nemo-required management entry points ========== 461 */ 462 463 #undef BGE_DBG 464 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 465 466 /* 467 * bge_m_stop() -- stop transmitting/receiving 468 */ 469 static void 470 bge_m_stop(void *arg) 471 { 472 bge_t *bgep = arg; /* private device info */ 473 send_ring_t *srp; 474 uint32_t ring; 475 476 BGE_TRACE(("bge_m_stop($%p)", arg)); 477 478 /* 479 * Just stop processing, then record new GLD state 480 */ 481 mutex_enter(bgep->genlock); 482 if (!(bgep->progress & PROGRESS_INTR)) { 483 /* can happen during autorecovery */ 484 mutex_exit(bgep->genlock); 485 return; 486 } 487 bge_stop(bgep); 488 489 bgep->link_update_timer = 0; 490 bgep->link_state = LINK_STATE_UNKNOWN; 491 mac_link_update(bgep->mh, bgep->link_state); 492 493 /* 494 * Free the possible tx buffers allocated in tx process. 495 */ 496 #ifdef BGE_IPMI_ASF 497 if (!bgep->asf_pseudostop) 498 #endif 499 { 500 rw_enter(bgep->errlock, RW_WRITER); 501 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 502 srp = &bgep->send[ring]; 503 mutex_enter(srp->tx_lock); 504 if (srp->tx_array > 1) 505 bge_free_txbuf_arrays(srp); 506 mutex_exit(srp->tx_lock); 507 } 508 rw_exit(bgep->errlock); 509 } 510 bgep->bge_mac_state = BGE_MAC_STOPPED; 511 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 512 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 513 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 514 mutex_exit(bgep->genlock); 515 } 516 517 /* 518 * bge_m_start() -- start transmitting/receiving 519 */ 520 static int 521 bge_m_start(void *arg) 522 { 523 bge_t *bgep = arg; /* private device info */ 524 525 BGE_TRACE(("bge_m_start($%p)", arg)); 526 527 /* 528 * Start processing and record new GLD state 529 */ 530 mutex_enter(bgep->genlock); 531 if (!(bgep->progress & PROGRESS_INTR)) { 532 /* can happen during autorecovery */ 533 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 534 mutex_exit(bgep->genlock); 535 return (EIO); 536 } 537 #ifdef BGE_IPMI_ASF 538 if (bgep->asf_enabled) { 539 if ((bgep->asf_status == ASF_STAT_RUN) && 540 (bgep->asf_pseudostop)) { 541 bgep->bge_mac_state = BGE_MAC_STARTED; 542 mutex_exit(bgep->genlock); 543 return (0); 544 } 545 } 546 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 547 #else 548 if (bge_reset(bgep) != DDI_SUCCESS) { 549 #endif 550 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 551 (void) bge_check_acc_handle(bgep, bgep->io_handle); 552 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 553 mutex_exit(bgep->genlock); 554 return (EIO); 555 } 556 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 557 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 558 (void) bge_check_acc_handle(bgep, bgep->io_handle); 559 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 560 mutex_exit(bgep->genlock); 561 return (EIO); 562 } 563 bgep->bge_mac_state = BGE_MAC_STARTED; 564 BGE_DEBUG(("bge_m_start($%p) done", arg)); 565 566 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 567 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 568 mutex_exit(bgep->genlock); 569 return (EIO); 570 } 571 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 572 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 573 mutex_exit(bgep->genlock); 574 return (EIO); 575 } 576 #ifdef BGE_IPMI_ASF 577 if (bgep->asf_enabled) { 578 if (bgep->asf_status != ASF_STAT_RUN) { 579 /* start ASF heart beat */ 580 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 581 (void *)bgep, 582 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 583 bgep->asf_status = ASF_STAT_RUN; 584 } 585 } 586 #endif 587 mutex_exit(bgep->genlock); 588 589 return (0); 590 } 591 592 /* 593 * bge_m_unicst() -- set the physical network address 594 */ 595 static int 596 bge_m_unicst(void *arg, const uint8_t *macaddr) 597 { 598 /* 599 * Request to set address in 600 * address slot 0, i.e., default address 601 */ 602 return (bge_unicst_set(arg, macaddr, 0)); 603 } 604 605 /* 606 * bge_unicst_set() -- set the physical network address 607 */ 608 static int 609 bge_unicst_set(void *arg, const uint8_t *macaddr, mac_addr_slot_t slot) 610 { 611 bge_t *bgep = arg; /* private device info */ 612 613 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 614 ether_sprintf((void *)macaddr))); 615 /* 616 * Remember the new current address in the driver state 617 * Sync the chip's idea of the address too ... 618 */ 619 mutex_enter(bgep->genlock); 620 if (!(bgep->progress & PROGRESS_INTR)) { 621 /* can happen during autorecovery */ 622 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 623 mutex_exit(bgep->genlock); 624 return (EIO); 625 } 626 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 627 #ifdef BGE_IPMI_ASF 628 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 629 #else 630 if (bge_chip_sync(bgep) == DDI_FAILURE) { 631 #endif 632 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 633 (void) bge_check_acc_handle(bgep, bgep->io_handle); 634 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 635 mutex_exit(bgep->genlock); 636 return (EIO); 637 } 638 #ifdef BGE_IPMI_ASF 639 if (bgep->asf_enabled) { 640 /* 641 * The above bge_chip_sync() function wrote the ethernet MAC 642 * addresses registers which destroyed the IPMI/ASF sideband. 643 * Here, we have to reset chip to make IPMI/ASF sideband work. 644 */ 645 if (bgep->asf_status == ASF_STAT_RUN) { 646 /* 647 * We must stop ASF heart beat before bge_chip_stop(), 648 * otherwise some computers (ex. IBM HS20 blade server) 649 * may crash. 650 */ 651 bge_asf_update_status(bgep); 652 bge_asf_stop_timer(bgep); 653 bgep->asf_status = ASF_STAT_STOP; 654 655 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 656 } 657 bge_chip_stop(bgep, B_FALSE); 658 659 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 660 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 661 (void) bge_check_acc_handle(bgep, bgep->io_handle); 662 ddi_fm_service_impact(bgep->devinfo, 663 DDI_SERVICE_DEGRADED); 664 mutex_exit(bgep->genlock); 665 return (EIO); 666 } 667 668 /* 669 * Start our ASF heartbeat counter as soon as possible. 670 */ 671 if (bgep->asf_status != ASF_STAT_RUN) { 672 /* start ASF heart beat */ 673 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 674 (void *)bgep, 675 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 676 bgep->asf_status = ASF_STAT_RUN; 677 } 678 } 679 #endif 680 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 681 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 682 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 683 mutex_exit(bgep->genlock); 684 return (EIO); 685 } 686 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 687 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 688 mutex_exit(bgep->genlock); 689 return (EIO); 690 } 691 mutex_exit(bgep->genlock); 692 693 return (0); 694 } 695 696 /* 697 * The following four routines are used as callbacks for multiple MAC 698 * address support: 699 * - bge_m_unicst_add(void *, mac_multi_addr_t *); 700 * - bge_m_unicst_remove(void *, mac_addr_slot_t); 701 * - bge_m_unicst_modify(void *, mac_multi_addr_t *); 702 * - bge_m_unicst_get(void *, mac_multi_addr_t *); 703 */ 704 705 /* 706 * bge_m_unicst_add() - will find an unused address slot, set the 707 * address value to the one specified, reserve that slot and enable 708 * the NIC to start filtering on the new MAC address. 709 * address slot. Returns 0 on success. 710 */ 711 static int 712 bge_m_unicst_add(void *arg, mac_multi_addr_t *maddr) 713 { 714 bge_t *bgep = arg; /* private device info */ 715 mac_addr_slot_t slot; 716 int err; 717 718 if (mac_unicst_verify(bgep->mh, 719 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 720 return (EINVAL); 721 722 mutex_enter(bgep->genlock); 723 if (bgep->unicst_addr_avail == 0) { 724 /* no slots available */ 725 mutex_exit(bgep->genlock); 726 return (ENOSPC); 727 } 728 729 /* 730 * Primary/default address is in slot 0. The next three 731 * addresses are the multiple MAC addresses. So multiple 732 * MAC address 0 is in slot 1, 1 in slot 2, and so on. 733 * So the first multiple MAC address resides in slot 1. 734 */ 735 for (slot = 1; slot < bgep->unicst_addr_total; slot++) { 736 if (bgep->curr_addr[slot].set == B_FALSE) { 737 bgep->curr_addr[slot].set = B_TRUE; 738 break; 739 } 740 } 741 742 ASSERT(slot < bgep->unicst_addr_total); 743 bgep->unicst_addr_avail--; 744 mutex_exit(bgep->genlock); 745 maddr->mma_slot = slot; 746 747 if ((err = bge_unicst_set(bgep, maddr->mma_addr, slot)) != 0) { 748 mutex_enter(bgep->genlock); 749 bgep->curr_addr[slot].set = B_FALSE; 750 bgep->unicst_addr_avail++; 751 mutex_exit(bgep->genlock); 752 } 753 return (err); 754 } 755 756 /* 757 * bge_m_unicst_remove() - removes a MAC address that was added by a 758 * call to bge_m_unicst_add(). The slot number that was returned in 759 * add() is passed in the call to remove the address. 760 * Returns 0 on success. 761 */ 762 static int 763 bge_m_unicst_remove(void *arg, mac_addr_slot_t slot) 764 { 765 bge_t *bgep = arg; /* private device info */ 766 767 if (slot <= 0 || slot >= bgep->unicst_addr_total) 768 return (EINVAL); 769 770 mutex_enter(bgep->genlock); 771 if (bgep->curr_addr[slot].set == B_TRUE) { 772 bgep->curr_addr[slot].set = B_FALSE; 773 bgep->unicst_addr_avail++; 774 mutex_exit(bgep->genlock); 775 /* 776 * Copy the default address to the passed slot 777 */ 778 return (bge_unicst_set(bgep, bgep->curr_addr[0].addr, slot)); 779 } 780 mutex_exit(bgep->genlock); 781 return (EINVAL); 782 } 783 784 /* 785 * bge_m_unicst_modify() - modifies the value of an address that 786 * has been added by bge_m_unicst_add(). The new address, address 787 * length and the slot number that was returned in the call to add 788 * should be passed to bge_m_unicst_modify(). mma_flags should be 789 * set to 0. Returns 0 on success. 790 */ 791 static int 792 bge_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) 793 { 794 bge_t *bgep = arg; /* private device info */ 795 mac_addr_slot_t slot; 796 797 if (mac_unicst_verify(bgep->mh, 798 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 799 return (EINVAL); 800 801 slot = maddr->mma_slot; 802 803 if (slot <= 0 || slot >= bgep->unicst_addr_total) 804 return (EINVAL); 805 806 mutex_enter(bgep->genlock); 807 if (bgep->curr_addr[slot].set == B_TRUE) { 808 mutex_exit(bgep->genlock); 809 return (bge_unicst_set(bgep, maddr->mma_addr, slot)); 810 } 811 mutex_exit(bgep->genlock); 812 813 return (EINVAL); 814 } 815 816 /* 817 * bge_m_unicst_get() - will get the MAC address and all other 818 * information related to the address slot passed in mac_multi_addr_t. 819 * mma_flags should be set to 0 in the call. 820 * On return, mma_flags can take the following values: 821 * 1) MMAC_SLOT_UNUSED 822 * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR 823 * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR 824 * 4) MMAC_SLOT_USED 825 */ 826 static int 827 bge_m_unicst_get(void *arg, mac_multi_addr_t *maddr) 828 { 829 bge_t *bgep = arg; /* private device info */ 830 mac_addr_slot_t slot; 831 832 slot = maddr->mma_slot; 833 834 if (slot <= 0 || slot >= bgep->unicst_addr_total) 835 return (EINVAL); 836 837 mutex_enter(bgep->genlock); 838 if (bgep->curr_addr[slot].set == B_TRUE) { 839 ethaddr_copy(bgep->curr_addr[slot].addr, 840 maddr->mma_addr); 841 maddr->mma_flags = MMAC_SLOT_USED; 842 } else { 843 maddr->mma_flags = MMAC_SLOT_UNUSED; 844 } 845 mutex_exit(bgep->genlock); 846 847 return (0); 848 } 849 850 extern void bge_wake_factotum(bge_t *); 851 852 static boolean_t 853 bge_param_locked(mac_prop_id_t pr_num) 854 { 855 /* 856 * All adv_* parameters are locked (read-only) while 857 * the device is in any sort of loopback mode ... 858 */ 859 switch (pr_num) { 860 case MAC_PROP_ADV_1000FDX_CAP: 861 case MAC_PROP_EN_1000FDX_CAP: 862 case MAC_PROP_ADV_1000HDX_CAP: 863 case MAC_PROP_EN_1000HDX_CAP: 864 case MAC_PROP_ADV_100FDX_CAP: 865 case MAC_PROP_EN_100FDX_CAP: 866 case MAC_PROP_ADV_100HDX_CAP: 867 case MAC_PROP_EN_100HDX_CAP: 868 case MAC_PROP_ADV_10FDX_CAP: 869 case MAC_PROP_EN_10FDX_CAP: 870 case MAC_PROP_ADV_10HDX_CAP: 871 case MAC_PROP_EN_10HDX_CAP: 872 case MAC_PROP_AUTONEG: 873 case MAC_PROP_FLOWCTRL: 874 return (B_TRUE); 875 } 876 return (B_FALSE); 877 } 878 /* 879 * callback functions for set/get of properties 880 */ 881 static int 882 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 883 uint_t pr_valsize, const void *pr_val) 884 { 885 bge_t *bgep = barg; 886 int err = 0; 887 uint32_t cur_mtu, new_mtu; 888 uint_t maxsdu; 889 link_flowctrl_t fl; 890 891 mutex_enter(bgep->genlock); 892 if (bgep->param_loop_mode != BGE_LOOP_NONE && 893 bge_param_locked(pr_num)) { 894 /* 895 * All adv_* parameters are locked (read-only) 896 * while the device is in any sort of loopback mode. 897 */ 898 mutex_exit(bgep->genlock); 899 return (EBUSY); 900 } 901 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 902 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 903 (pr_num == MAC_PROP_EN_100FDX_CAP) || 904 (pr_num == MAC_PROP_EN_10FDX_CAP) || 905 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 906 /* 907 * these properties are read/write on copper, 908 * read-only and 0 on serdes 909 */ 910 mutex_exit(bgep->genlock); 911 return (ENOTSUP); 912 } 913 914 switch (pr_num) { 915 case MAC_PROP_EN_1000FDX_CAP: 916 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 917 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 918 goto reprogram; 919 case MAC_PROP_EN_1000HDX_CAP: 920 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 921 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 922 goto reprogram; 923 case MAC_PROP_EN_100FDX_CAP: 924 bgep->param_en_100fdx = *(uint8_t *)pr_val; 925 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 926 goto reprogram; 927 case MAC_PROP_EN_100HDX_CAP: 928 bgep->param_en_100hdx = *(uint8_t *)pr_val; 929 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 930 goto reprogram; 931 case MAC_PROP_EN_10FDX_CAP: 932 bgep->param_en_10fdx = *(uint8_t *)pr_val; 933 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 934 goto reprogram; 935 case MAC_PROP_EN_10HDX_CAP: 936 bgep->param_en_10hdx = *(uint8_t *)pr_val; 937 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 938 reprogram: 939 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 940 err = EINVAL; 941 break; 942 case MAC_PROP_ADV_1000FDX_CAP: 943 case MAC_PROP_ADV_1000HDX_CAP: 944 case MAC_PROP_ADV_100FDX_CAP: 945 case MAC_PROP_ADV_100HDX_CAP: 946 case MAC_PROP_ADV_10FDX_CAP: 947 case MAC_PROP_ADV_10HDX_CAP: 948 case MAC_PROP_STATUS: 949 case MAC_PROP_SPEED: 950 case MAC_PROP_DUPLEX: 951 err = ENOTSUP; /* read-only prop. Can't set this */ 952 break; 953 case MAC_PROP_AUTONEG: 954 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 955 if (bge_reprogram(bgep) == IOC_INVAL) 956 err = EINVAL; 957 break; 958 case MAC_PROP_MTU: 959 cur_mtu = bgep->chipid.default_mtu; 960 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 961 962 if (new_mtu == cur_mtu) { 963 err = 0; 964 break; 965 } 966 if (new_mtu < BGE_DEFAULT_MTU || 967 new_mtu > BGE_MAXIMUM_MTU) { 968 err = EINVAL; 969 break; 970 } 971 if ((new_mtu > BGE_DEFAULT_MTU) && 972 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 973 err = EINVAL; 974 break; 975 } 976 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 977 err = EBUSY; 978 break; 979 } 980 bgep->chipid.default_mtu = new_mtu; 981 if (bge_chip_id_init(bgep)) { 982 err = EINVAL; 983 break; 984 } 985 maxsdu = bgep->chipid.ethmax_size - 986 sizeof (struct ether_header); 987 err = mac_maxsdu_update(bgep->mh, maxsdu); 988 if (err == 0) { 989 bgep->bge_dma_error = B_TRUE; 990 bgep->manual_reset = B_TRUE; 991 bge_chip_stop(bgep, B_TRUE); 992 bge_wake_factotum(bgep); 993 err = 0; 994 } 995 break; 996 case MAC_PROP_FLOWCTRL: 997 bcopy(pr_val, &fl, sizeof (fl)); 998 switch (fl) { 999 default: 1000 err = ENOTSUP; 1001 break; 1002 case LINK_FLOWCTRL_NONE: 1003 bgep->param_adv_pause = 0; 1004 bgep->param_adv_asym_pause = 0; 1005 1006 bgep->param_link_rx_pause = B_FALSE; 1007 bgep->param_link_tx_pause = B_FALSE; 1008 break; 1009 case LINK_FLOWCTRL_RX: 1010 if (!((bgep->param_lp_pause == 0) && 1011 (bgep->param_lp_asym_pause == 1))) { 1012 err = EINVAL; 1013 break; 1014 } 1015 bgep->param_adv_pause = 1; 1016 bgep->param_adv_asym_pause = 1; 1017 1018 bgep->param_link_rx_pause = B_TRUE; 1019 bgep->param_link_tx_pause = B_FALSE; 1020 break; 1021 case LINK_FLOWCTRL_TX: 1022 if (!((bgep->param_lp_pause == 1) && 1023 (bgep->param_lp_asym_pause == 1))) { 1024 err = EINVAL; 1025 break; 1026 } 1027 bgep->param_adv_pause = 0; 1028 bgep->param_adv_asym_pause = 1; 1029 1030 bgep->param_link_rx_pause = B_FALSE; 1031 bgep->param_link_tx_pause = B_TRUE; 1032 break; 1033 case LINK_FLOWCTRL_BI: 1034 if (bgep->param_lp_pause != 1) { 1035 err = EINVAL; 1036 break; 1037 } 1038 bgep->param_adv_pause = 1; 1039 1040 bgep->param_link_rx_pause = B_TRUE; 1041 bgep->param_link_tx_pause = B_TRUE; 1042 break; 1043 } 1044 1045 if (err == 0) { 1046 if (bge_reprogram(bgep) == IOC_INVAL) 1047 err = EINVAL; 1048 } 1049 1050 break; 1051 case MAC_PROP_PRIVATE: 1052 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 1053 pr_val); 1054 break; 1055 default: 1056 err = ENOTSUP; 1057 break; 1058 } 1059 mutex_exit(bgep->genlock); 1060 return (err); 1061 } 1062 1063 static int 1064 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1065 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 1066 { 1067 bge_t *bgep = barg; 1068 int err = 0; 1069 link_flowctrl_t fl; 1070 uint64_t speed; 1071 int flags = bgep->chipid.flags; 1072 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1073 1074 if (pr_valsize == 0) 1075 return (EINVAL); 1076 bzero(pr_val, pr_valsize); 1077 switch (pr_num) { 1078 case MAC_PROP_DUPLEX: 1079 if (pr_valsize < sizeof (link_duplex_t)) 1080 return (EINVAL); 1081 bcopy(&bgep->param_link_duplex, pr_val, 1082 sizeof (link_duplex_t)); 1083 break; 1084 case MAC_PROP_SPEED: 1085 if (pr_valsize < sizeof (speed)) 1086 return (EINVAL); 1087 speed = bgep->param_link_speed * 1000000ull; 1088 bcopy(&speed, pr_val, sizeof (speed)); 1089 break; 1090 case MAC_PROP_STATUS: 1091 if (pr_valsize < sizeof (link_state_t)) 1092 return (EINVAL); 1093 bcopy(&bgep->link_state, pr_val, 1094 sizeof (link_state_t)); 1095 break; 1096 case MAC_PROP_AUTONEG: 1097 if (is_default) 1098 *(uint8_t *)pr_val = 1; 1099 else 1100 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 1101 break; 1102 case MAC_PROP_FLOWCTRL: 1103 if (pr_valsize < sizeof (fl)) 1104 return (EINVAL); 1105 if (is_default) { 1106 fl = LINK_FLOWCTRL_BI; 1107 bcopy(&fl, pr_val, sizeof (fl)); 1108 break; 1109 } 1110 1111 if (bgep->param_link_rx_pause && 1112 !bgep->param_link_tx_pause) 1113 fl = LINK_FLOWCTRL_RX; 1114 1115 if (!bgep->param_link_rx_pause && 1116 !bgep->param_link_tx_pause) 1117 fl = LINK_FLOWCTRL_NONE; 1118 1119 if (!bgep->param_link_rx_pause && 1120 bgep->param_link_tx_pause) 1121 fl = LINK_FLOWCTRL_TX; 1122 1123 if (bgep->param_link_rx_pause && 1124 bgep->param_link_tx_pause) 1125 fl = LINK_FLOWCTRL_BI; 1126 bcopy(&fl, pr_val, sizeof (fl)); 1127 break; 1128 case MAC_PROP_ADV_1000FDX_CAP: 1129 if (is_default) 1130 *(uint8_t *)pr_val = 1; 1131 else 1132 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 1133 break; 1134 case MAC_PROP_EN_1000FDX_CAP: 1135 if (is_default) 1136 *(uint8_t *)pr_val = 1; 1137 else 1138 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 1139 break; 1140 case MAC_PROP_ADV_1000HDX_CAP: 1141 if (is_default) 1142 *(uint8_t *)pr_val = 1; 1143 else 1144 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1145 break; 1146 case MAC_PROP_EN_1000HDX_CAP: 1147 if (is_default) 1148 *(uint8_t *)pr_val = 1; 1149 else 1150 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1151 break; 1152 case MAC_PROP_ADV_100FDX_CAP: 1153 if (is_default) { 1154 *(uint8_t *)pr_val = 1155 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1156 } else { 1157 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1158 } 1159 break; 1160 case MAC_PROP_EN_100FDX_CAP: 1161 if (is_default) { 1162 *(uint8_t *)pr_val = 1163 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1164 } else { 1165 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1166 } 1167 break; 1168 case MAC_PROP_ADV_100HDX_CAP: 1169 if (is_default) { 1170 *(uint8_t *)pr_val = 1171 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1172 } else { 1173 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1174 } 1175 break; 1176 case MAC_PROP_EN_100HDX_CAP: 1177 if (is_default) { 1178 *(uint8_t *)pr_val = 1179 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1180 } else { 1181 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1182 } 1183 break; 1184 case MAC_PROP_ADV_10FDX_CAP: 1185 if (is_default) { 1186 *(uint8_t *)pr_val = 1187 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1188 } else { 1189 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1190 } 1191 break; 1192 case MAC_PROP_EN_10FDX_CAP: 1193 if (is_default) { 1194 *(uint8_t *)pr_val = 1195 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1196 } else { 1197 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1198 } 1199 break; 1200 case MAC_PROP_ADV_10HDX_CAP: 1201 if (is_default) { 1202 *(uint8_t *)pr_val = 1203 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1204 } else { 1205 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1206 } 1207 break; 1208 case MAC_PROP_EN_10HDX_CAP: 1209 if (is_default) { 1210 *(uint8_t *)pr_val = 1211 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1212 } else { 1213 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1214 } 1215 break; 1216 case MAC_PROP_ADV_100T4_CAP: 1217 case MAC_PROP_EN_100T4_CAP: 1218 *(uint8_t *)pr_val = 0; 1219 break; 1220 case MAC_PROP_PRIVATE: 1221 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1222 pr_valsize, pr_val); 1223 return (err); 1224 default: 1225 return (ENOTSUP); 1226 } 1227 return (0); 1228 } 1229 1230 /* ARGSUSED */ 1231 static int 1232 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1233 const void *pr_val) 1234 { 1235 int err = 0; 1236 long result; 1237 1238 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1239 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1240 if (result > 1 || result < 0) { 1241 err = EINVAL; 1242 } else { 1243 bgep->param_adv_pause = (uint32_t)result; 1244 if (bge_reprogram(bgep) == IOC_INVAL) 1245 err = EINVAL; 1246 } 1247 return (err); 1248 } 1249 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1250 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1251 if (result > 1 || result < 0) { 1252 err = EINVAL; 1253 } else { 1254 bgep->param_adv_asym_pause = (uint32_t)result; 1255 if (bge_reprogram(bgep) == IOC_INVAL) 1256 err = EINVAL; 1257 } 1258 return (err); 1259 } 1260 if (strcmp(pr_name, "_drain_max") == 0) { 1261 1262 /* 1263 * on the Tx side, we need to update the h/w register for 1264 * real packet transmission per packet. The drain_max parameter 1265 * is used to reduce the register access. This parameter 1266 * controls the max number of packets that we will hold before 1267 * updating the bge h/w to trigger h/w transmit. The bge 1268 * chipset usually has a max of 512 Tx descriptors, thus 1269 * the upper bound on drain_max is 512. 1270 */ 1271 if (pr_val == NULL) { 1272 err = EINVAL; 1273 return (err); 1274 } 1275 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1276 if (result > 512 || result < 1) 1277 err = EINVAL; 1278 else { 1279 bgep->param_drain_max = (uint32_t)result; 1280 if (bge_reprogram(bgep) == IOC_INVAL) 1281 err = EINVAL; 1282 } 1283 return (err); 1284 } 1285 if (strcmp(pr_name, "_msi_cnt") == 0) { 1286 1287 if (pr_val == NULL) { 1288 err = EINVAL; 1289 return (err); 1290 } 1291 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1292 if (result > 7 || result < 0) 1293 err = EINVAL; 1294 else { 1295 bgep->param_msi_cnt = (uint32_t)result; 1296 if (bge_reprogram(bgep) == IOC_INVAL) 1297 err = EINVAL; 1298 } 1299 return (err); 1300 } 1301 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1302 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1303 return (EINVAL); 1304 1305 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1306 return (0); 1307 } 1308 1309 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1310 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1311 return (EINVAL); 1312 1313 bgep->chipid.rx_count_norm = (uint32_t)result; 1314 return (0); 1315 } 1316 return (ENOTSUP); 1317 } 1318 1319 static int 1320 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1321 uint_t pr_valsize, void *pr_val) 1322 { 1323 int err = ENOTSUP; 1324 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1325 int value; 1326 1327 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1328 value = (is_default? 1 : bge->param_adv_pause); 1329 err = 0; 1330 goto done; 1331 } 1332 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1333 value = (is_default? 1 : bge->param_adv_asym_pause); 1334 err = 0; 1335 goto done; 1336 } 1337 if (strcmp(pr_name, "_drain_max") == 0) { 1338 value = (is_default? 64 : bge->param_drain_max); 1339 err = 0; 1340 goto done; 1341 } 1342 if (strcmp(pr_name, "_msi_cnt") == 0) { 1343 value = (is_default? 0 : bge->param_msi_cnt); 1344 err = 0; 1345 goto done; 1346 } 1347 1348 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1349 value = (is_default? bge_rx_ticks_norm : 1350 bge->chipid.rx_ticks_norm); 1351 err = 0; 1352 goto done; 1353 } 1354 1355 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1356 value = (is_default? bge_rx_count_norm : 1357 bge->chipid.rx_count_norm); 1358 err = 0; 1359 goto done; 1360 } 1361 1362 done: 1363 if (err == 0) { 1364 (void) snprintf(pr_val, pr_valsize, "%d", value); 1365 } 1366 return (err); 1367 } 1368 1369 /* 1370 * Compute the index of the required bit in the multicast hash map. 1371 * This must mirror the way the hardware actually does it! 1372 * See Broadcom document 570X-PG102-R page 125. 1373 */ 1374 static uint32_t 1375 bge_hash_index(const uint8_t *mca) 1376 { 1377 uint32_t hash; 1378 1379 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1380 1381 return (hash); 1382 } 1383 1384 /* 1385 * bge_m_multicst_add() -- enable/disable a multicast address 1386 */ 1387 static int 1388 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1389 { 1390 bge_t *bgep = arg; /* private device info */ 1391 uint32_t hash; 1392 uint32_t index; 1393 uint32_t word; 1394 uint32_t bit; 1395 uint8_t *refp; 1396 1397 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1398 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1399 1400 /* 1401 * Precalculate all required masks, pointers etc ... 1402 */ 1403 hash = bge_hash_index(mca); 1404 index = hash % BGE_HASH_TABLE_SIZE; 1405 word = index/32u; 1406 bit = 1 << (index % 32u); 1407 refp = &bgep->mcast_refs[index]; 1408 1409 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1410 hash, index, word, bit, *refp)); 1411 1412 /* 1413 * We must set the appropriate bit in the hash map (and the 1414 * corresponding h/w register) when the refcount goes from 0 1415 * to >0, and clear it when the last ref goes away (refcount 1416 * goes from >0 back to 0). If we change the hash map, we 1417 * must also update the chip's hardware map registers. 1418 */ 1419 mutex_enter(bgep->genlock); 1420 if (!(bgep->progress & PROGRESS_INTR)) { 1421 /* can happen during autorecovery */ 1422 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1423 mutex_exit(bgep->genlock); 1424 return (EIO); 1425 } 1426 if (add) { 1427 if ((*refp)++ == 0) { 1428 bgep->mcast_hash[word] |= bit; 1429 #ifdef BGE_IPMI_ASF 1430 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1431 #else 1432 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1433 #endif 1434 (void) bge_check_acc_handle(bgep, 1435 bgep->cfg_handle); 1436 (void) bge_check_acc_handle(bgep, 1437 bgep->io_handle); 1438 ddi_fm_service_impact(bgep->devinfo, 1439 DDI_SERVICE_DEGRADED); 1440 mutex_exit(bgep->genlock); 1441 return (EIO); 1442 } 1443 } 1444 } else { 1445 if (--(*refp) == 0) { 1446 bgep->mcast_hash[word] &= ~bit; 1447 #ifdef BGE_IPMI_ASF 1448 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1449 #else 1450 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1451 #endif 1452 (void) bge_check_acc_handle(bgep, 1453 bgep->cfg_handle); 1454 (void) bge_check_acc_handle(bgep, 1455 bgep->io_handle); 1456 ddi_fm_service_impact(bgep->devinfo, 1457 DDI_SERVICE_DEGRADED); 1458 mutex_exit(bgep->genlock); 1459 return (EIO); 1460 } 1461 } 1462 } 1463 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1464 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1465 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1466 mutex_exit(bgep->genlock); 1467 return (EIO); 1468 } 1469 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1470 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1471 mutex_exit(bgep->genlock); 1472 return (EIO); 1473 } 1474 mutex_exit(bgep->genlock); 1475 1476 return (0); 1477 } 1478 1479 /* 1480 * bge_m_promisc() -- set or reset promiscuous mode on the board 1481 * 1482 * Program the hardware to enable/disable promiscuous and/or 1483 * receive-all-multicast modes. 1484 */ 1485 static int 1486 bge_m_promisc(void *arg, boolean_t on) 1487 { 1488 bge_t *bgep = arg; 1489 1490 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1491 1492 /* 1493 * Store MAC layer specified mode and pass to chip layer to update h/w 1494 */ 1495 mutex_enter(bgep->genlock); 1496 if (!(bgep->progress & PROGRESS_INTR)) { 1497 /* can happen during autorecovery */ 1498 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1499 mutex_exit(bgep->genlock); 1500 return (EIO); 1501 } 1502 bgep->promisc = on; 1503 #ifdef BGE_IPMI_ASF 1504 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1505 #else 1506 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1507 #endif 1508 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1509 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1510 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1511 mutex_exit(bgep->genlock); 1512 return (EIO); 1513 } 1514 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1515 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1516 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1517 mutex_exit(bgep->genlock); 1518 return (EIO); 1519 } 1520 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1521 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1522 mutex_exit(bgep->genlock); 1523 return (EIO); 1524 } 1525 mutex_exit(bgep->genlock); 1526 return (0); 1527 } 1528 1529 /*ARGSUSED*/ 1530 static boolean_t 1531 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1532 { 1533 bge_t *bgep = arg; 1534 1535 switch (cap) { 1536 case MAC_CAPAB_HCKSUM: { 1537 uint32_t *txflags = cap_data; 1538 1539 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1540 break; 1541 } 1542 1543 case MAC_CAPAB_POLL: 1544 /* 1545 * There's nothing for us to fill in, simply returning 1546 * B_TRUE stating that we support polling is sufficient. 1547 */ 1548 break; 1549 1550 case MAC_CAPAB_MULTIADDRESS: { 1551 multiaddress_capab_t *mmacp = cap_data; 1552 1553 mutex_enter(bgep->genlock); 1554 /* 1555 * The number of MAC addresses made available by 1556 * this capability is one less than the total as 1557 * the primary address in slot 0 is counted in 1558 * the total. 1559 */ 1560 mmacp->maddr_naddr = bgep->unicst_addr_total - 1; 1561 mmacp->maddr_naddrfree = bgep->unicst_addr_avail; 1562 /* No multiple factory addresses, set mma_flag to 0 */ 1563 mmacp->maddr_flag = 0; 1564 mmacp->maddr_handle = bgep; 1565 mmacp->maddr_add = bge_m_unicst_add; 1566 mmacp->maddr_remove = bge_m_unicst_remove; 1567 mmacp->maddr_modify = bge_m_unicst_modify; 1568 mmacp->maddr_get = bge_m_unicst_get; 1569 mmacp->maddr_reserve = NULL; 1570 mutex_exit(bgep->genlock); 1571 break; 1572 } 1573 1574 default: 1575 return (B_FALSE); 1576 } 1577 return (B_TRUE); 1578 } 1579 1580 /* 1581 * Loopback ioctl code 1582 */ 1583 1584 static lb_property_t loopmodes[] = { 1585 { normal, "normal", BGE_LOOP_NONE }, 1586 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1587 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1588 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1589 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1590 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1591 }; 1592 1593 static enum ioc_reply 1594 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1595 { 1596 /* 1597 * If the mode isn't being changed, there's nothing to do ... 1598 */ 1599 if (mode == bgep->param_loop_mode) 1600 return (IOC_ACK); 1601 1602 /* 1603 * Validate the requested mode and prepare a suitable message 1604 * to explain the link down/up cycle that the change will 1605 * probably induce ... 1606 */ 1607 switch (mode) { 1608 default: 1609 return (IOC_INVAL); 1610 1611 case BGE_LOOP_NONE: 1612 case BGE_LOOP_EXTERNAL_1000: 1613 case BGE_LOOP_EXTERNAL_100: 1614 case BGE_LOOP_EXTERNAL_10: 1615 case BGE_LOOP_INTERNAL_PHY: 1616 case BGE_LOOP_INTERNAL_MAC: 1617 break; 1618 } 1619 1620 /* 1621 * All OK; tell the caller to reprogram 1622 * the PHY and/or MAC for the new mode ... 1623 */ 1624 bgep->param_loop_mode = mode; 1625 return (IOC_RESTART_ACK); 1626 } 1627 1628 static enum ioc_reply 1629 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1630 { 1631 lb_info_sz_t *lbsp; 1632 lb_property_t *lbpp; 1633 uint32_t *lbmp; 1634 int cmd; 1635 1636 _NOTE(ARGUNUSED(wq)) 1637 1638 /* 1639 * Validate format of ioctl 1640 */ 1641 if (mp->b_cont == NULL) 1642 return (IOC_INVAL); 1643 1644 cmd = iocp->ioc_cmd; 1645 switch (cmd) { 1646 default: 1647 /* NOTREACHED */ 1648 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1649 return (IOC_INVAL); 1650 1651 case LB_GET_INFO_SIZE: 1652 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1653 return (IOC_INVAL); 1654 lbsp = (void *)mp->b_cont->b_rptr; 1655 *lbsp = sizeof (loopmodes); 1656 return (IOC_REPLY); 1657 1658 case LB_GET_INFO: 1659 if (iocp->ioc_count != sizeof (loopmodes)) 1660 return (IOC_INVAL); 1661 lbpp = (void *)mp->b_cont->b_rptr; 1662 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1663 return (IOC_REPLY); 1664 1665 case LB_GET_MODE: 1666 if (iocp->ioc_count != sizeof (uint32_t)) 1667 return (IOC_INVAL); 1668 lbmp = (void *)mp->b_cont->b_rptr; 1669 *lbmp = bgep->param_loop_mode; 1670 return (IOC_REPLY); 1671 1672 case LB_SET_MODE: 1673 if (iocp->ioc_count != sizeof (uint32_t)) 1674 return (IOC_INVAL); 1675 lbmp = (void *)mp->b_cont->b_rptr; 1676 return (bge_set_loop_mode(bgep, *lbmp)); 1677 } 1678 } 1679 1680 /* 1681 * Specific bge IOCTLs, the gld module handles the generic ones. 1682 */ 1683 static void 1684 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1685 { 1686 bge_t *bgep = arg; 1687 struct iocblk *iocp; 1688 enum ioc_reply status; 1689 boolean_t need_privilege; 1690 int err; 1691 int cmd; 1692 1693 /* 1694 * Validate the command before bothering with the mutex ... 1695 */ 1696 iocp = (void *)mp->b_rptr; 1697 iocp->ioc_error = 0; 1698 need_privilege = B_TRUE; 1699 cmd = iocp->ioc_cmd; 1700 switch (cmd) { 1701 default: 1702 miocnak(wq, mp, 0, EINVAL); 1703 return; 1704 1705 case BGE_MII_READ: 1706 case BGE_MII_WRITE: 1707 case BGE_SEE_READ: 1708 case BGE_SEE_WRITE: 1709 case BGE_FLASH_READ: 1710 case BGE_FLASH_WRITE: 1711 case BGE_DIAG: 1712 case BGE_PEEK: 1713 case BGE_POKE: 1714 case BGE_PHY_RESET: 1715 case BGE_SOFT_RESET: 1716 case BGE_HARD_RESET: 1717 break; 1718 1719 case LB_GET_INFO_SIZE: 1720 case LB_GET_INFO: 1721 case LB_GET_MODE: 1722 need_privilege = B_FALSE; 1723 /* FALLTHRU */ 1724 case LB_SET_MODE: 1725 break; 1726 1727 } 1728 1729 if (need_privilege) { 1730 /* 1731 * Check for specific net_config privilege on Solaris 10+. 1732 */ 1733 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1734 if (err != 0) { 1735 miocnak(wq, mp, 0, err); 1736 return; 1737 } 1738 } 1739 1740 mutex_enter(bgep->genlock); 1741 if (!(bgep->progress & PROGRESS_INTR)) { 1742 /* can happen during autorecovery */ 1743 mutex_exit(bgep->genlock); 1744 miocnak(wq, mp, 0, EIO); 1745 return; 1746 } 1747 1748 switch (cmd) { 1749 default: 1750 _NOTE(NOTREACHED) 1751 status = IOC_INVAL; 1752 break; 1753 1754 case BGE_MII_READ: 1755 case BGE_MII_WRITE: 1756 case BGE_SEE_READ: 1757 case BGE_SEE_WRITE: 1758 case BGE_FLASH_READ: 1759 case BGE_FLASH_WRITE: 1760 case BGE_DIAG: 1761 case BGE_PEEK: 1762 case BGE_POKE: 1763 case BGE_PHY_RESET: 1764 case BGE_SOFT_RESET: 1765 case BGE_HARD_RESET: 1766 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1767 break; 1768 1769 case LB_GET_INFO_SIZE: 1770 case LB_GET_INFO: 1771 case LB_GET_MODE: 1772 case LB_SET_MODE: 1773 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1774 break; 1775 1776 } 1777 1778 /* 1779 * Do we need to reprogram the PHY and/or the MAC? 1780 * Do it now, while we still have the mutex. 1781 * 1782 * Note: update the PHY first, 'cos it controls the 1783 * speed/duplex parameters that the MAC code uses. 1784 */ 1785 switch (status) { 1786 case IOC_RESTART_REPLY: 1787 case IOC_RESTART_ACK: 1788 if (bge_reprogram(bgep) == IOC_INVAL) 1789 status = IOC_INVAL; 1790 break; 1791 } 1792 1793 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1794 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1795 status = IOC_INVAL; 1796 } 1797 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1798 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1799 status = IOC_INVAL; 1800 } 1801 mutex_exit(bgep->genlock); 1802 1803 /* 1804 * Finally, decide how to reply 1805 */ 1806 switch (status) { 1807 default: 1808 case IOC_INVAL: 1809 /* 1810 * Error, reply with a NAK and EINVAL or the specified error 1811 */ 1812 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1813 EINVAL : iocp->ioc_error); 1814 break; 1815 1816 case IOC_DONE: 1817 /* 1818 * OK, reply already sent 1819 */ 1820 break; 1821 1822 case IOC_RESTART_ACK: 1823 case IOC_ACK: 1824 /* 1825 * OK, reply with an ACK 1826 */ 1827 miocack(wq, mp, 0, 0); 1828 break; 1829 1830 case IOC_RESTART_REPLY: 1831 case IOC_REPLY: 1832 /* 1833 * OK, send prepared reply as ACK or NAK 1834 */ 1835 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1836 M_IOCACK : M_IOCNAK; 1837 qreply(wq, mp); 1838 break; 1839 } 1840 } 1841 1842 static void 1843 bge_resources_add(bge_t *bgep, time_t time, uint_t pkt_cnt) 1844 { 1845 1846 recv_ring_t *rrp; 1847 mac_rx_fifo_t mrf; 1848 int ring; 1849 1850 /* 1851 * Register Rx rings as resources and save mac 1852 * resource id for future reference 1853 */ 1854 mrf.mrf_type = MAC_RX_FIFO; 1855 mrf.mrf_blank = bge_chip_blank; 1856 mrf.mrf_arg = (void *)bgep; 1857 mrf.mrf_normal_blank_time = time; 1858 mrf.mrf_normal_pkt_count = pkt_cnt; 1859 1860 for (ring = 0; ring < bgep->chipid.rx_rings; ring++) { 1861 rrp = &bgep->recv[ring]; 1862 rrp->handle = mac_resource_add(bgep->mh, 1863 (mac_resource_t *)&mrf); 1864 } 1865 } 1866 1867 static void 1868 bge_m_resources(void *arg) 1869 { 1870 bge_t *bgep = arg; 1871 1872 mutex_enter(bgep->genlock); 1873 1874 bge_resources_add(bgep, bgep->chipid.rx_ticks_norm, 1875 bgep->chipid.rx_count_norm); 1876 mutex_exit(bgep->genlock); 1877 } 1878 1879 /* 1880 * ========== Per-instance setup/teardown code ========== 1881 */ 1882 1883 #undef BGE_DBG 1884 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1885 /* 1886 * Allocate an area of memory and a DMA handle for accessing it 1887 */ 1888 static int 1889 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 1890 uint_t dma_flags, dma_area_t *dma_p) 1891 { 1892 caddr_t va; 1893 int err; 1894 1895 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 1896 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 1897 1898 /* 1899 * Allocate handle 1900 */ 1901 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 1902 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 1903 if (err != DDI_SUCCESS) 1904 return (DDI_FAILURE); 1905 1906 /* 1907 * Allocate memory 1908 */ 1909 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 1910 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 1911 &dma_p->acc_hdl); 1912 if (err != DDI_SUCCESS) 1913 return (DDI_FAILURE); 1914 1915 /* 1916 * Bind the two together 1917 */ 1918 dma_p->mem_va = va; 1919 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1920 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 1921 &dma_p->cookie, &dma_p->ncookies); 1922 1923 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 1924 dma_p->alength, err, dma_p->ncookies)); 1925 1926 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 1927 return (DDI_FAILURE); 1928 1929 dma_p->nslots = ~0U; 1930 dma_p->size = ~0U; 1931 dma_p->token = ~0U; 1932 dma_p->offset = 0; 1933 return (DDI_SUCCESS); 1934 } 1935 1936 /* 1937 * Free one allocated area of DMAable memory 1938 */ 1939 static void 1940 bge_free_dma_mem(dma_area_t *dma_p) 1941 { 1942 if (dma_p->dma_hdl != NULL) { 1943 if (dma_p->ncookies) { 1944 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1945 dma_p->ncookies = 0; 1946 } 1947 ddi_dma_free_handle(&dma_p->dma_hdl); 1948 dma_p->dma_hdl = NULL; 1949 } 1950 1951 if (dma_p->acc_hdl != NULL) { 1952 ddi_dma_mem_free(&dma_p->acc_hdl); 1953 dma_p->acc_hdl = NULL; 1954 } 1955 } 1956 /* 1957 * Utility routine to carve a slice off a chunk of allocated memory, 1958 * updating the chunk descriptor accordingly. The size of the slice 1959 * is given by the product of the <qty> and <size> parameters. 1960 */ 1961 static void 1962 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 1963 uint32_t qty, uint32_t size) 1964 { 1965 static uint32_t sequence = 0xbcd5704a; 1966 size_t totsize; 1967 1968 totsize = qty*size; 1969 ASSERT(totsize <= chunk->alength); 1970 1971 *slice = *chunk; 1972 slice->nslots = qty; 1973 slice->size = size; 1974 slice->alength = totsize; 1975 slice->token = ++sequence; 1976 1977 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 1978 chunk->alength -= totsize; 1979 chunk->offset += totsize; 1980 chunk->cookie.dmac_laddress += totsize; 1981 chunk->cookie.dmac_size -= totsize; 1982 } 1983 1984 /* 1985 * Initialise the specified Receive Producer (Buffer) Ring, using 1986 * the information in the <dma_area> descriptors that it contains 1987 * to set up all the other fields. This routine should be called 1988 * only once for each ring. 1989 */ 1990 static void 1991 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 1992 { 1993 buff_ring_t *brp; 1994 bge_status_t *bsp; 1995 sw_rbd_t *srbdp; 1996 dma_area_t pbuf; 1997 uint32_t bufsize; 1998 uint32_t nslots; 1999 uint32_t slot; 2000 uint32_t split; 2001 2002 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2003 NIC_MEM_SHADOW_BUFF_STD, 2004 NIC_MEM_SHADOW_BUFF_JUMBO, 2005 NIC_MEM_SHADOW_BUFF_MINI 2006 }; 2007 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2008 RECV_STD_PROD_INDEX_REG, 2009 RECV_JUMBO_PROD_INDEX_REG, 2010 RECV_MINI_PROD_INDEX_REG 2011 }; 2012 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2013 STATUS_STD_BUFF_CONS_INDEX, 2014 STATUS_JUMBO_BUFF_CONS_INDEX, 2015 STATUS_MINI_BUFF_CONS_INDEX 2016 }; 2017 2018 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2019 (void *)bgep, ring)); 2020 2021 brp = &bgep->buff[ring]; 2022 nslots = brp->desc.nslots; 2023 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2024 bufsize = brp->buf[0].size; 2025 2026 /* 2027 * Set up the copy of the h/w RCB 2028 * 2029 * Note: unlike Send & Receive Return Rings, (where the max_len 2030 * field holds the number of slots), in a Receive Buffer Ring 2031 * this field indicates the size of each buffer in the ring. 2032 */ 2033 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2034 brp->hw_rcb.max_len = (uint16_t)bufsize; 2035 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2036 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2037 2038 /* 2039 * Other one-off initialisation of per-ring data 2040 */ 2041 brp->bgep = bgep; 2042 bsp = DMA_VPTR(bgep->status_block); 2043 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2044 brp->chip_mbx_reg = mailbox_regs[ring]; 2045 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2046 DDI_INTR_PRI(bgep->intr_pri)); 2047 2048 /* 2049 * Allocate the array of s/w Receive Buffer Descriptors 2050 */ 2051 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2052 brp->sw_rbds = srbdp; 2053 2054 /* 2055 * Now initialise each array element once and for all 2056 */ 2057 for (split = 0; split < BGE_SPLIT; ++split) { 2058 pbuf = brp->buf[split]; 2059 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2060 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2061 ASSERT(pbuf.alength == 0); 2062 } 2063 } 2064 2065 /* 2066 * Clean up initialisation done above before the memory is freed 2067 */ 2068 static void 2069 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2070 { 2071 buff_ring_t *brp; 2072 sw_rbd_t *srbdp; 2073 2074 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2075 (void *)bgep, ring)); 2076 2077 brp = &bgep->buff[ring]; 2078 srbdp = brp->sw_rbds; 2079 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2080 2081 mutex_destroy(brp->rf_lock); 2082 } 2083 2084 /* 2085 * Initialise the specified Receive (Return) Ring, using the 2086 * information in the <dma_area> descriptors that it contains 2087 * to set up all the other fields. This routine should be called 2088 * only once for each ring. 2089 */ 2090 static void 2091 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2092 { 2093 recv_ring_t *rrp; 2094 bge_status_t *bsp; 2095 uint32_t nslots; 2096 2097 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2098 (void *)bgep, ring)); 2099 2100 /* 2101 * The chip architecture requires that receive return rings have 2102 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2103 */ 2104 rrp = &bgep->recv[ring]; 2105 nslots = rrp->desc.nslots; 2106 ASSERT(nslots == 0 || nslots == 512 || 2107 nslots == 1024 || nslots == 2048); 2108 2109 /* 2110 * Set up the copy of the h/w RCB 2111 */ 2112 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2113 rrp->hw_rcb.max_len = (uint16_t)nslots; 2114 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2115 rrp->hw_rcb.nic_ring_addr = 0; 2116 2117 /* 2118 * Other one-off initialisation of per-ring data 2119 */ 2120 rrp->bgep = bgep; 2121 bsp = DMA_VPTR(bgep->status_block); 2122 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2123 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2124 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2125 DDI_INTR_PRI(bgep->intr_pri)); 2126 } 2127 2128 2129 /* 2130 * Clean up initialisation done above before the memory is freed 2131 */ 2132 static void 2133 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2134 { 2135 recv_ring_t *rrp; 2136 2137 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2138 (void *)bgep, ring)); 2139 2140 rrp = &bgep->recv[ring]; 2141 if (rrp->rx_softint) 2142 ddi_remove_softintr(rrp->rx_softint); 2143 mutex_destroy(rrp->rx_lock); 2144 } 2145 2146 /* 2147 * Initialise the specified Send Ring, using the information in the 2148 * <dma_area> descriptors that it contains to set up all the other 2149 * fields. This routine should be called only once for each ring. 2150 */ 2151 static void 2152 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2153 { 2154 send_ring_t *srp; 2155 bge_status_t *bsp; 2156 sw_sbd_t *ssbdp; 2157 dma_area_t desc; 2158 dma_area_t pbuf; 2159 uint32_t nslots; 2160 uint32_t slot; 2161 uint32_t split; 2162 sw_txbuf_t *txbuf; 2163 2164 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2165 (void *)bgep, ring)); 2166 2167 /* 2168 * The chip architecture requires that host-based send rings 2169 * have 512 elements per ring. See 570X-PG102-R page 56. 2170 */ 2171 srp = &bgep->send[ring]; 2172 nslots = srp->desc.nslots; 2173 ASSERT(nslots == 0 || nslots == 512); 2174 2175 /* 2176 * Set up the copy of the h/w RCB 2177 */ 2178 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2179 srp->hw_rcb.max_len = (uint16_t)nslots; 2180 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2181 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2182 2183 /* 2184 * Other one-off initialisation of per-ring data 2185 */ 2186 srp->bgep = bgep; 2187 bsp = DMA_VPTR(bgep->status_block); 2188 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2189 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2190 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2191 DDI_INTR_PRI(bgep->intr_pri)); 2192 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2193 DDI_INTR_PRI(bgep->intr_pri)); 2194 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2195 DDI_INTR_PRI(bgep->intr_pri)); 2196 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2197 DDI_INTR_PRI(bgep->intr_pri)); 2198 if (nslots == 0) 2199 return; 2200 2201 /* 2202 * Allocate the array of s/w Send Buffer Descriptors 2203 */ 2204 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2205 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2206 srp->txbuf_head = 2207 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2208 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2209 srp->sw_sbds = ssbdp; 2210 srp->txbuf = txbuf; 2211 srp->tx_buffers = BGE_SEND_BUF_NUM; 2212 srp->tx_buffers_low = srp->tx_buffers / 4; 2213 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2214 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2215 else 2216 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2217 srp->tx_array = 1; 2218 2219 /* 2220 * Chunk tx desc area 2221 */ 2222 desc = srp->desc; 2223 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2224 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2225 sizeof (bge_sbd_t)); 2226 } 2227 ASSERT(desc.alength == 0); 2228 2229 /* 2230 * Chunk tx buffer area 2231 */ 2232 for (split = 0; split < BGE_SPLIT; ++split) { 2233 pbuf = srp->buf[0][split]; 2234 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2235 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2236 bgep->chipid.snd_buff_size); 2237 txbuf++; 2238 } 2239 ASSERT(pbuf.alength == 0); 2240 } 2241 } 2242 2243 /* 2244 * Clean up initialisation done above before the memory is freed 2245 */ 2246 static void 2247 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2248 { 2249 send_ring_t *srp; 2250 uint32_t array; 2251 uint32_t split; 2252 uint32_t nslots; 2253 2254 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2255 (void *)bgep, ring)); 2256 2257 srp = &bgep->send[ring]; 2258 mutex_destroy(srp->tc_lock); 2259 mutex_destroy(srp->freetxbuf_lock); 2260 mutex_destroy(srp->txbuf_lock); 2261 mutex_destroy(srp->tx_lock); 2262 nslots = srp->desc.nslots; 2263 if (nslots == 0) 2264 return; 2265 2266 for (array = 1; array < srp->tx_array; ++array) 2267 for (split = 0; split < BGE_SPLIT; ++split) 2268 bge_free_dma_mem(&srp->buf[array][split]); 2269 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2270 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2271 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2272 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2273 srp->sw_sbds = NULL; 2274 srp->txbuf_head = NULL; 2275 srp->txbuf = NULL; 2276 srp->pktp = NULL; 2277 } 2278 2279 /* 2280 * Initialise all transmit, receive, and buffer rings. 2281 */ 2282 void 2283 bge_init_rings(bge_t *bgep) 2284 { 2285 uint32_t ring; 2286 2287 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2288 2289 /* 2290 * Perform one-off initialisation of each ring ... 2291 */ 2292 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2293 bge_init_send_ring(bgep, ring); 2294 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2295 bge_init_recv_ring(bgep, ring); 2296 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2297 bge_init_buff_ring(bgep, ring); 2298 } 2299 2300 /* 2301 * Undo the work of bge_init_rings() above before the memory is freed 2302 */ 2303 void 2304 bge_fini_rings(bge_t *bgep) 2305 { 2306 uint32_t ring; 2307 2308 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2309 2310 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2311 bge_fini_buff_ring(bgep, ring); 2312 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2313 bge_fini_recv_ring(bgep, ring); 2314 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2315 bge_fini_send_ring(bgep, ring); 2316 } 2317 2318 /* 2319 * Called from the bge_m_stop() to free the tx buffers which are 2320 * allocated from the tx process. 2321 */ 2322 void 2323 bge_free_txbuf_arrays(send_ring_t *srp) 2324 { 2325 uint32_t array; 2326 uint32_t split; 2327 2328 ASSERT(mutex_owned(srp->tx_lock)); 2329 2330 /* 2331 * Free the extra tx buffer DMA area 2332 */ 2333 for (array = 1; array < srp->tx_array; ++array) 2334 for (split = 0; split < BGE_SPLIT; ++split) 2335 bge_free_dma_mem(&srp->buf[array][split]); 2336 2337 /* 2338 * Restore initial tx buffer numbers 2339 */ 2340 srp->tx_array = 1; 2341 srp->tx_buffers = BGE_SEND_BUF_NUM; 2342 srp->tx_buffers_low = srp->tx_buffers / 4; 2343 srp->tx_flow = 0; 2344 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2345 } 2346 2347 /* 2348 * Called from tx process to allocate more tx buffers 2349 */ 2350 bge_queue_item_t * 2351 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2352 { 2353 bge_queue_t *txbuf_queue; 2354 bge_queue_item_t *txbuf_item_last; 2355 bge_queue_item_t *txbuf_item; 2356 bge_queue_item_t *txbuf_item_rtn; 2357 sw_txbuf_t *txbuf; 2358 dma_area_t area; 2359 size_t txbuffsize; 2360 uint32_t slot; 2361 uint32_t array; 2362 uint32_t split; 2363 uint32_t err; 2364 2365 ASSERT(mutex_owned(srp->tx_lock)); 2366 2367 array = srp->tx_array; 2368 if (array >= srp->tx_array_max) 2369 return (NULL); 2370 2371 /* 2372 * Allocate memory & handles for TX buffers 2373 */ 2374 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2375 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2376 for (split = 0; split < BGE_SPLIT; ++split) { 2377 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2378 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2379 &srp->buf[array][split]); 2380 if (err != DDI_SUCCESS) { 2381 /* Free the last already allocated OK chunks */ 2382 for (slot = 0; slot <= split; ++slot) 2383 bge_free_dma_mem(&srp->buf[array][slot]); 2384 srp->tx_alloc_fail++; 2385 return (NULL); 2386 } 2387 } 2388 2389 /* 2390 * Chunk tx buffer area 2391 */ 2392 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2393 for (split = 0; split < BGE_SPLIT; ++split) { 2394 area = srp->buf[array][split]; 2395 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2396 bge_slice_chunk(&txbuf->buf, &area, 1, 2397 bgep->chipid.snd_buff_size); 2398 txbuf++; 2399 } 2400 } 2401 2402 /* 2403 * Add above buffers to the tx buffer pop queue 2404 */ 2405 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2406 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2407 txbuf_item_last = NULL; 2408 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2409 txbuf_item->item = txbuf; 2410 txbuf_item->next = txbuf_item_last; 2411 txbuf_item_last = txbuf_item; 2412 txbuf++; 2413 txbuf_item++; 2414 } 2415 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2416 txbuf_item_rtn = txbuf_item; 2417 txbuf_item++; 2418 txbuf_queue = srp->txbuf_pop_queue; 2419 mutex_enter(txbuf_queue->lock); 2420 txbuf_item->next = txbuf_queue->head; 2421 txbuf_queue->head = txbuf_item_last; 2422 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2423 mutex_exit(txbuf_queue->lock); 2424 2425 srp->tx_array++; 2426 srp->tx_buffers += BGE_SEND_BUF_NUM; 2427 srp->tx_buffers_low = srp->tx_buffers / 4; 2428 2429 return (txbuf_item_rtn); 2430 } 2431 2432 /* 2433 * This function allocates all the transmit and receive buffers 2434 * and descriptors, in four chunks. 2435 */ 2436 int 2437 bge_alloc_bufs(bge_t *bgep) 2438 { 2439 dma_area_t area; 2440 size_t rxbuffsize; 2441 size_t txbuffsize; 2442 size_t rxbuffdescsize; 2443 size_t rxdescsize; 2444 size_t txdescsize; 2445 uint32_t ring; 2446 uint32_t rx_rings = bgep->chipid.rx_rings; 2447 uint32_t tx_rings = bgep->chipid.tx_rings; 2448 int split; 2449 int err; 2450 2451 BGE_TRACE(("bge_alloc_bufs($%p)", 2452 (void *)bgep)); 2453 2454 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2455 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2456 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2457 2458 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2459 txbuffsize *= tx_rings; 2460 2461 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2462 rxdescsize *= sizeof (bge_rbd_t); 2463 2464 rxbuffdescsize = BGE_STD_SLOTS_USED; 2465 rxbuffdescsize += bgep->chipid.jumbo_slots; 2466 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2467 rxbuffdescsize *= sizeof (bge_rbd_t); 2468 2469 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2470 txdescsize *= sizeof (bge_sbd_t); 2471 txdescsize += sizeof (bge_statistics_t); 2472 txdescsize += sizeof (bge_status_t); 2473 txdescsize += BGE_STATUS_PADDING; 2474 2475 /* 2476 * Enable PCI relaxed ordering only for RX/TX data buffers 2477 */ 2478 if (bge_relaxed_ordering) 2479 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2480 2481 /* 2482 * Allocate memory & handles for RX buffers 2483 */ 2484 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2485 for (split = 0; split < BGE_SPLIT; ++split) { 2486 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2487 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2488 &bgep->rx_buff[split]); 2489 if (err != DDI_SUCCESS) 2490 return (DDI_FAILURE); 2491 } 2492 2493 /* 2494 * Allocate memory & handles for TX buffers 2495 */ 2496 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2497 for (split = 0; split < BGE_SPLIT; ++split) { 2498 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2499 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2500 &bgep->tx_buff[split]); 2501 if (err != DDI_SUCCESS) 2502 return (DDI_FAILURE); 2503 } 2504 2505 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2506 2507 /* 2508 * Allocate memory & handles for receive return rings 2509 */ 2510 ASSERT((rxdescsize % rx_rings) == 0); 2511 for (split = 0; split < rx_rings; ++split) { 2512 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2513 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2514 &bgep->rx_desc[split]); 2515 if (err != DDI_SUCCESS) 2516 return (DDI_FAILURE); 2517 } 2518 2519 /* 2520 * Allocate memory & handles for buffer (producer) descriptor rings 2521 */ 2522 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2523 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2524 if (err != DDI_SUCCESS) 2525 return (DDI_FAILURE); 2526 2527 /* 2528 * Allocate memory & handles for TX descriptor rings, 2529 * status block, and statistics area 2530 */ 2531 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2532 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2533 if (err != DDI_SUCCESS) 2534 return (DDI_FAILURE); 2535 2536 /* 2537 * Now carve up each of the allocated areas ... 2538 */ 2539 for (split = 0; split < BGE_SPLIT; ++split) { 2540 area = bgep->rx_buff[split]; 2541 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2542 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2543 bgep->chipid.std_buf_size); 2544 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2545 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2546 bgep->chipid.recv_jumbo_size); 2547 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2548 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2549 BGE_MINI_BUFF_SIZE); 2550 } 2551 2552 for (split = 0; split < BGE_SPLIT; ++split) { 2553 area = bgep->tx_buff[split]; 2554 for (ring = 0; ring < tx_rings; ++ring) 2555 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2556 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2557 bgep->chipid.snd_buff_size); 2558 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2559 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2560 &area, 0, bgep->chipid.snd_buff_size); 2561 } 2562 2563 for (ring = 0; ring < rx_rings; ++ring) 2564 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2565 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2566 2567 area = bgep->rx_desc[rx_rings]; 2568 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2569 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2570 0, sizeof (bge_rbd_t)); 2571 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2572 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2573 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2574 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2575 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2576 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2577 ASSERT(area.alength == 0); 2578 2579 area = bgep->tx_desc; 2580 for (ring = 0; ring < tx_rings; ++ring) 2581 bge_slice_chunk(&bgep->send[ring].desc, &area, 2582 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2583 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2584 bge_slice_chunk(&bgep->send[ring].desc, &area, 2585 0, sizeof (bge_sbd_t)); 2586 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2587 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2588 ASSERT(area.alength == BGE_STATUS_PADDING); 2589 DMA_ZERO(bgep->status_block); 2590 2591 return (DDI_SUCCESS); 2592 } 2593 2594 /* 2595 * This routine frees the transmit and receive buffers and descriptors. 2596 * Make sure the chip is stopped before calling it! 2597 */ 2598 void 2599 bge_free_bufs(bge_t *bgep) 2600 { 2601 int split; 2602 2603 BGE_TRACE(("bge_free_bufs($%p)", 2604 (void *)bgep)); 2605 2606 bge_free_dma_mem(&bgep->tx_desc); 2607 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2608 bge_free_dma_mem(&bgep->rx_desc[split]); 2609 for (split = 0; split < BGE_SPLIT; ++split) 2610 bge_free_dma_mem(&bgep->tx_buff[split]); 2611 for (split = 0; split < BGE_SPLIT; ++split) 2612 bge_free_dma_mem(&bgep->rx_buff[split]); 2613 } 2614 2615 /* 2616 * Determine (initial) MAC address ("BIA") to use for this interface 2617 */ 2618 2619 static void 2620 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2621 { 2622 struct ether_addr sysaddr; 2623 char propbuf[8]; /* "true" or "false", plus NUL */ 2624 uchar_t *bytes; 2625 int *ints; 2626 uint_t nelts; 2627 int err; 2628 2629 BGE_TRACE(("bge_find_mac_address($%p)", 2630 (void *)bgep)); 2631 2632 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2633 cidp->hw_mac_addr, 2634 ether_sprintf((void *)cidp->vendor_addr.addr), 2635 cidp->vendor_addr.set ? "" : "not ")); 2636 2637 /* 2638 * The "vendor's factory-set address" may already have 2639 * been extracted from the chip, but if the property 2640 * "local-mac-address" is set we use that instead. It 2641 * will normally be set by OBP, but it could also be 2642 * specified in a .conf file(!) 2643 * 2644 * There doesn't seem to be a way to define byte-array 2645 * properties in a .conf, so we check whether it looks 2646 * like an array of 6 ints instead. 2647 * 2648 * Then, we check whether it looks like an array of 6 2649 * bytes (which it should, if OBP set it). If we can't 2650 * make sense of it either way, we'll ignore it. 2651 */ 2652 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2653 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2654 if (err == DDI_PROP_SUCCESS) { 2655 if (nelts == ETHERADDRL) { 2656 while (nelts--) 2657 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2658 cidp->vendor_addr.set = B_TRUE; 2659 } 2660 ddi_prop_free(ints); 2661 } 2662 2663 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2664 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2665 if (err == DDI_PROP_SUCCESS) { 2666 if (nelts == ETHERADDRL) { 2667 while (nelts--) 2668 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2669 cidp->vendor_addr.set = B_TRUE; 2670 } 2671 ddi_prop_free(bytes); 2672 } 2673 2674 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2675 ether_sprintf((void *)cidp->vendor_addr.addr), 2676 cidp->vendor_addr.set ? "" : "not ")); 2677 2678 /* 2679 * Look up the OBP property "local-mac-address?". Note that even 2680 * though its value is a string (which should be "true" or "false"), 2681 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2682 * the buffer first and then fetch the property as an untyped array; 2683 * this may or may not include a final NUL, but since there will 2684 * always be one left at the end of the buffer we can now treat it 2685 * as a string anyway. 2686 */ 2687 nelts = sizeof (propbuf); 2688 bzero(propbuf, nelts--); 2689 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2690 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2691 2692 /* 2693 * Now, if the address still isn't set from the hardware (SEEPROM) 2694 * or the OBP or .conf property, OR if the user has foolishly set 2695 * 'local-mac-address? = false', use "the system address" instead 2696 * (but only if it's non-null i.e. has been set from the IDPROM). 2697 */ 2698 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2699 if (localetheraddr(NULL, &sysaddr) != 0) { 2700 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2701 cidp->vendor_addr.set = B_TRUE; 2702 } 2703 2704 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2705 ether_sprintf((void *)cidp->vendor_addr.addr), 2706 cidp->vendor_addr.set ? "" : "not ")); 2707 2708 /* 2709 * Finally(!), if there's a valid "mac-address" property (created 2710 * if we netbooted from this interface), we must use this instead 2711 * of any of the above to ensure that the NFS/install server doesn't 2712 * get confused by the address changing as Solaris takes over! 2713 */ 2714 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2715 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2716 if (err == DDI_PROP_SUCCESS) { 2717 if (nelts == ETHERADDRL) { 2718 while (nelts--) 2719 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2720 cidp->vendor_addr.set = B_TRUE; 2721 } 2722 ddi_prop_free(bytes); 2723 } 2724 2725 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2726 ether_sprintf((void *)cidp->vendor_addr.addr), 2727 cidp->vendor_addr.set ? "" : "not ")); 2728 } 2729 2730 2731 /*ARGSUSED*/ 2732 int 2733 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2734 { 2735 ddi_fm_error_t de; 2736 2737 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2738 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2739 return (de.fme_status); 2740 } 2741 2742 /*ARGSUSED*/ 2743 int 2744 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2745 { 2746 ddi_fm_error_t de; 2747 2748 ASSERT(bgep->progress & PROGRESS_BUFS); 2749 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2750 return (de.fme_status); 2751 } 2752 2753 /* 2754 * The IO fault service error handling callback function 2755 */ 2756 /*ARGSUSED*/ 2757 static int 2758 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2759 { 2760 /* 2761 * as the driver can always deal with an error in any dma or 2762 * access handle, we can just return the fme_status value. 2763 */ 2764 pci_ereport_post(dip, err, NULL); 2765 return (err->fme_status); 2766 } 2767 2768 static void 2769 bge_fm_init(bge_t *bgep) 2770 { 2771 ddi_iblock_cookie_t iblk; 2772 2773 /* Only register with IO Fault Services if we have some capability */ 2774 if (bgep->fm_capabilities) { 2775 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2776 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2777 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2778 2779 /* Register capabilities with IO Fault Services */ 2780 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2781 2782 /* 2783 * Initialize pci ereport capabilities if ereport capable 2784 */ 2785 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2786 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2787 pci_ereport_setup(bgep->devinfo); 2788 2789 /* 2790 * Register error callback if error callback capable 2791 */ 2792 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2793 ddi_fm_handler_register(bgep->devinfo, 2794 bge_fm_error_cb, (void*) bgep); 2795 } else { 2796 /* 2797 * These fields have to be cleared of FMA if there are no 2798 * FMA capabilities at runtime. 2799 */ 2800 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2801 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2802 dma_attr.dma_attr_flags = 0; 2803 } 2804 } 2805 2806 static void 2807 bge_fm_fini(bge_t *bgep) 2808 { 2809 /* Only unregister FMA capabilities if we registered some */ 2810 if (bgep->fm_capabilities) { 2811 2812 /* 2813 * Release any resources allocated by pci_ereport_setup() 2814 */ 2815 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2816 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2817 pci_ereport_teardown(bgep->devinfo); 2818 2819 /* 2820 * Un-register error callback if error callback capable 2821 */ 2822 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2823 ddi_fm_handler_unregister(bgep->devinfo); 2824 2825 /* Unregister from IO Fault Services */ 2826 ddi_fm_fini(bgep->devinfo); 2827 } 2828 } 2829 2830 static void 2831 #ifdef BGE_IPMI_ASF 2832 bge_unattach(bge_t *bgep, uint_t asf_mode) 2833 #else 2834 bge_unattach(bge_t *bgep) 2835 #endif 2836 { 2837 BGE_TRACE(("bge_unattach($%p)", 2838 (void *)bgep)); 2839 2840 /* 2841 * Flag that no more activity may be initiated 2842 */ 2843 bgep->progress &= ~PROGRESS_READY; 2844 2845 /* 2846 * Quiesce the PHY and MAC (leave it reset but still powered). 2847 * Clean up and free all BGE data structures 2848 */ 2849 if (bgep->periodic_id != NULL) { 2850 ddi_periodic_delete(bgep->periodic_id); 2851 bgep->periodic_id = NULL; 2852 } 2853 if (bgep->progress & PROGRESS_KSTATS) 2854 bge_fini_kstats(bgep); 2855 if (bgep->progress & PROGRESS_PHY) 2856 bge_phys_reset(bgep); 2857 if (bgep->progress & PROGRESS_HWINT) { 2858 mutex_enter(bgep->genlock); 2859 #ifdef BGE_IPMI_ASF 2860 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2861 #else 2862 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2863 #endif 2864 ddi_fm_service_impact(bgep->devinfo, 2865 DDI_SERVICE_UNAFFECTED); 2866 #ifdef BGE_IPMI_ASF 2867 if (bgep->asf_enabled) { 2868 /* 2869 * This register has been overlaid. We restore its 2870 * initial value here. 2871 */ 2872 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2873 BGE_NIC_DATA_SIG); 2874 } 2875 #endif 2876 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2877 ddi_fm_service_impact(bgep->devinfo, 2878 DDI_SERVICE_UNAFFECTED); 2879 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2880 ddi_fm_service_impact(bgep->devinfo, 2881 DDI_SERVICE_UNAFFECTED); 2882 mutex_exit(bgep->genlock); 2883 } 2884 if (bgep->progress & PROGRESS_INTR) { 2885 bge_intr_disable(bgep); 2886 bge_fini_rings(bgep); 2887 } 2888 if (bgep->progress & PROGRESS_HWINT) { 2889 bge_rem_intrs(bgep); 2890 rw_destroy(bgep->errlock); 2891 mutex_destroy(bgep->softintrlock); 2892 mutex_destroy(bgep->genlock); 2893 } 2894 if (bgep->progress & PROGRESS_FACTOTUM) 2895 ddi_remove_softintr(bgep->factotum_id); 2896 if (bgep->progress & PROGRESS_RESCHED) 2897 ddi_remove_softintr(bgep->drain_id); 2898 if (bgep->progress & PROGRESS_BUFS) 2899 bge_free_bufs(bgep); 2900 if (bgep->progress & PROGRESS_REGS) 2901 ddi_regs_map_free(&bgep->io_handle); 2902 if (bgep->progress & PROGRESS_CFG) 2903 pci_config_teardown(&bgep->cfg_handle); 2904 2905 bge_fm_fini(bgep); 2906 2907 ddi_remove_minor_node(bgep->devinfo, NULL); 2908 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 2909 kmem_free(bgep, sizeof (*bgep)); 2910 } 2911 2912 static int 2913 bge_resume(dev_info_t *devinfo) 2914 { 2915 bge_t *bgep; /* Our private data */ 2916 chip_id_t *cidp; 2917 chip_id_t chipid; 2918 2919 bgep = ddi_get_driver_private(devinfo); 2920 if (bgep == NULL) 2921 return (DDI_FAILURE); 2922 2923 /* 2924 * Refuse to resume if the data structures aren't consistent 2925 */ 2926 if (bgep->devinfo != devinfo) 2927 return (DDI_FAILURE); 2928 2929 #ifdef BGE_IPMI_ASF 2930 /* 2931 * Power management hasn't been supported in BGE now. If you 2932 * want to implement it, please add the ASF/IPMI related 2933 * code here. 2934 */ 2935 2936 #endif 2937 2938 /* 2939 * Read chip ID & set up config space command register(s) 2940 * Refuse to resume if the chip has changed its identity! 2941 */ 2942 cidp = &bgep->chipid; 2943 mutex_enter(bgep->genlock); 2944 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 2945 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2946 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2947 mutex_exit(bgep->genlock); 2948 return (DDI_FAILURE); 2949 } 2950 mutex_exit(bgep->genlock); 2951 if (chipid.vendor != cidp->vendor) 2952 return (DDI_FAILURE); 2953 if (chipid.device != cidp->device) 2954 return (DDI_FAILURE); 2955 if (chipid.revision != cidp->revision) 2956 return (DDI_FAILURE); 2957 if (chipid.asic_rev != cidp->asic_rev) 2958 return (DDI_FAILURE); 2959 2960 /* 2961 * All OK, reinitialise h/w & kick off GLD scheduling 2962 */ 2963 mutex_enter(bgep->genlock); 2964 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 2965 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 2966 (void) bge_check_acc_handle(bgep, bgep->io_handle); 2967 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2968 mutex_exit(bgep->genlock); 2969 return (DDI_FAILURE); 2970 } 2971 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2972 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2973 mutex_exit(bgep->genlock); 2974 return (DDI_FAILURE); 2975 } 2976 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 2977 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2978 mutex_exit(bgep->genlock); 2979 return (DDI_FAILURE); 2980 } 2981 mutex_exit(bgep->genlock); 2982 return (DDI_SUCCESS); 2983 } 2984 2985 /* 2986 * attach(9E) -- Attach a device to the system 2987 * 2988 * Called once for each board successfully probed. 2989 */ 2990 static int 2991 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2992 { 2993 bge_t *bgep; /* Our private data */ 2994 mac_register_t *macp; 2995 chip_id_t *cidp; 2996 caddr_t regs; 2997 int instance; 2998 int err; 2999 int intr_types; 3000 #ifdef BGE_IPMI_ASF 3001 uint32_t mhcrValue; 3002 #ifdef __sparc 3003 uint16_t value16; 3004 #endif 3005 #ifdef BGE_NETCONSOLE 3006 int retval; 3007 #endif 3008 #endif 3009 3010 instance = ddi_get_instance(devinfo); 3011 3012 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3013 (void *)devinfo, cmd, instance)); 3014 BGE_BRKPT(NULL, "bge_attach"); 3015 3016 switch (cmd) { 3017 default: 3018 return (DDI_FAILURE); 3019 3020 case DDI_RESUME: 3021 return (bge_resume(devinfo)); 3022 3023 case DDI_ATTACH: 3024 break; 3025 } 3026 3027 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3028 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3029 ddi_set_driver_private(devinfo, bgep); 3030 bgep->bge_guard = BGE_GUARD; 3031 bgep->devinfo = devinfo; 3032 bgep->param_drain_max = 64; 3033 bgep->param_msi_cnt = 0; 3034 bgep->param_loop_mode = 0; 3035 3036 /* 3037 * Initialize more fields in BGE private data 3038 */ 3039 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3040 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3041 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3042 BGE_DRIVER_NAME, instance); 3043 3044 /* 3045 * Initialize for fma support 3046 */ 3047 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3048 DDI_PROP_DONTPASS, fm_cap, 3049 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3050 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3051 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3052 bge_fm_init(bgep); 3053 3054 /* 3055 * Look up the IOMMU's page size for DVMA mappings (must be 3056 * a power of 2) and convert to a mask. This can be used to 3057 * determine whether a message buffer crosses a page boundary. 3058 * Note: in 2s complement binary notation, if X is a power of 3059 * 2, then -X has the representation "11...1100...00". 3060 */ 3061 bgep->pagemask = dvma_pagesize(devinfo); 3062 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3063 bgep->pagemask = -bgep->pagemask; 3064 3065 /* 3066 * Map config space registers 3067 * Read chip ID & set up config space command register(s) 3068 * 3069 * Note: this leaves the chip accessible by Memory Space 3070 * accesses, but with interrupts and Bus Mastering off. 3071 * This should ensure that nothing untoward will happen 3072 * if it has been left active by the (net-)bootloader. 3073 * We'll re-enable Bus Mastering once we've reset the chip, 3074 * and allow interrupts only when everything else is set up. 3075 */ 3076 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3077 #ifdef BGE_IPMI_ASF 3078 #ifdef __sparc 3079 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3080 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3081 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3082 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3083 MHCR_ENABLE_TAGGED_STATUS_MODE | 3084 MHCR_MASK_INTERRUPT_MODE | 3085 MHCR_MASK_PCI_INT_OUTPUT | 3086 MHCR_CLEAR_INTERRUPT_INTA | 3087 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3088 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3089 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3090 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3091 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3092 MEMORY_ARBITER_ENABLE); 3093 #else 3094 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3095 #endif 3096 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3097 bgep->asf_wordswapped = B_TRUE; 3098 } else { 3099 bgep->asf_wordswapped = B_FALSE; 3100 } 3101 bge_asf_get_config(bgep); 3102 #endif 3103 if (err != DDI_SUCCESS) { 3104 bge_problem(bgep, "pci_config_setup() failed"); 3105 goto attach_fail; 3106 } 3107 bgep->progress |= PROGRESS_CFG; 3108 cidp = &bgep->chipid; 3109 bzero(cidp, sizeof (*cidp)); 3110 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3111 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3112 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3113 goto attach_fail; 3114 } 3115 3116 #ifdef BGE_IPMI_ASF 3117 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3118 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3119 bgep->asf_newhandshake = B_TRUE; 3120 } else { 3121 bgep->asf_newhandshake = B_FALSE; 3122 } 3123 #endif 3124 3125 /* 3126 * Update those parts of the chip ID derived from volatile 3127 * registers with the values seen by OBP (in case the chip 3128 * has been reset externally and therefore lost them). 3129 */ 3130 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3131 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3132 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3133 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3134 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3135 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3136 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3137 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3138 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3139 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3140 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3141 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3142 3143 if (bge_jumbo_enable == B_TRUE) { 3144 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3145 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3146 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3147 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3148 cidp->default_mtu = BGE_DEFAULT_MTU; 3149 } 3150 } 3151 /* 3152 * Map operating registers 3153 */ 3154 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3155 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3156 if (err != DDI_SUCCESS) { 3157 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3158 goto attach_fail; 3159 } 3160 bgep->io_regs = regs; 3161 bgep->progress |= PROGRESS_REGS; 3162 3163 /* 3164 * Characterise the device, so we know its requirements. 3165 * Then allocate the appropriate TX and RX descriptors & buffers. 3166 */ 3167 if (bge_chip_id_init(bgep) == EIO) { 3168 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3169 goto attach_fail; 3170 } 3171 3172 3173 err = bge_alloc_bufs(bgep); 3174 if (err != DDI_SUCCESS) { 3175 bge_problem(bgep, "DMA buffer allocation failed"); 3176 goto attach_fail; 3177 } 3178 bgep->progress |= PROGRESS_BUFS; 3179 3180 /* 3181 * Add the softint handlers: 3182 * 3183 * Both of these handlers are used to avoid restrictions on the 3184 * context and/or mutexes required for some operations. In 3185 * particular, the hardware interrupt handler and its subfunctions 3186 * can detect a number of conditions that we don't want to handle 3187 * in that context or with that set of mutexes held. So, these 3188 * softints are triggered instead: 3189 * 3190 * the <resched> softint is triggered if we have previously 3191 * had to refuse to send a packet because of resource shortage 3192 * (we've run out of transmit buffers), but the send completion 3193 * interrupt handler has now detected that more buffers have 3194 * become available. 3195 * 3196 * the <factotum> is triggered if the h/w interrupt handler 3197 * sees the <link state changed> or <error> bits in the status 3198 * block. It's also triggered periodically to poll the link 3199 * state, just in case we aren't getting link status change 3200 * interrupts ... 3201 */ 3202 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3203 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3204 if (err != DDI_SUCCESS) { 3205 bge_problem(bgep, "ddi_add_softintr() failed"); 3206 goto attach_fail; 3207 } 3208 bgep->progress |= PROGRESS_RESCHED; 3209 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3210 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3211 if (err != DDI_SUCCESS) { 3212 bge_problem(bgep, "ddi_add_softintr() failed"); 3213 goto attach_fail; 3214 } 3215 bgep->progress |= PROGRESS_FACTOTUM; 3216 3217 /* Get supported interrupt types */ 3218 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3219 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3220 3221 goto attach_fail; 3222 } 3223 3224 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3225 bgep->ifname, intr_types)); 3226 3227 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3228 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3229 bge_error(bgep, "MSI registration failed, " 3230 "trying FIXED interrupt type\n"); 3231 } else { 3232 BGE_DEBUG(("%s: Using MSI interrupt type", 3233 bgep->ifname)); 3234 bgep->intr_type = DDI_INTR_TYPE_MSI; 3235 bgep->progress |= PROGRESS_HWINT; 3236 } 3237 } 3238 3239 if (!(bgep->progress & PROGRESS_HWINT) && 3240 (intr_types & DDI_INTR_TYPE_FIXED)) { 3241 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3242 bge_error(bgep, "FIXED interrupt " 3243 "registration failed\n"); 3244 goto attach_fail; 3245 } 3246 3247 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3248 3249 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3250 bgep->progress |= PROGRESS_HWINT; 3251 } 3252 3253 if (!(bgep->progress & PROGRESS_HWINT)) { 3254 bge_error(bgep, "No interrupts registered\n"); 3255 goto attach_fail; 3256 } 3257 3258 /* 3259 * Note that interrupts are not enabled yet as 3260 * mutex locks are not initialized. Initialize mutex locks. 3261 */ 3262 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3263 DDI_INTR_PRI(bgep->intr_pri)); 3264 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3265 DDI_INTR_PRI(bgep->intr_pri)); 3266 rw_init(bgep->errlock, NULL, RW_DRIVER, 3267 DDI_INTR_PRI(bgep->intr_pri)); 3268 3269 /* 3270 * Initialize rings. 3271 */ 3272 bge_init_rings(bgep); 3273 3274 /* 3275 * Now that mutex locks are initialized, enable interrupts. 3276 */ 3277 bge_intr_enable(bgep); 3278 bgep->progress |= PROGRESS_INTR; 3279 3280 /* 3281 * Initialise link state variables 3282 * Stop, reset & reinitialise the chip. 3283 * Initialise the (internal) PHY. 3284 */ 3285 bgep->link_state = LINK_STATE_UNKNOWN; 3286 3287 mutex_enter(bgep->genlock); 3288 3289 /* 3290 * Reset chip & rings to initial state; also reset address 3291 * filtering, promiscuity, loopback mode. 3292 */ 3293 #ifdef BGE_IPMI_ASF 3294 #ifdef BGE_NETCONSOLE 3295 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3296 #else 3297 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3298 #endif 3299 #else 3300 if (bge_reset(bgep) != DDI_SUCCESS) { 3301 #endif 3302 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3303 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3304 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3305 mutex_exit(bgep->genlock); 3306 goto attach_fail; 3307 } 3308 3309 #ifdef BGE_IPMI_ASF 3310 if (bgep->asf_enabled) { 3311 bgep->asf_status = ASF_STAT_RUN_INIT; 3312 } 3313 #endif 3314 3315 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3316 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3317 bgep->promisc = B_FALSE; 3318 bgep->param_loop_mode = BGE_LOOP_NONE; 3319 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3320 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3321 mutex_exit(bgep->genlock); 3322 goto attach_fail; 3323 } 3324 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3325 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3326 mutex_exit(bgep->genlock); 3327 goto attach_fail; 3328 } 3329 3330 mutex_exit(bgep->genlock); 3331 3332 if (bge_phys_init(bgep) == EIO) { 3333 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3334 goto attach_fail; 3335 } 3336 bgep->progress |= PROGRESS_PHY; 3337 3338 /* 3339 * initialize NDD-tweakable parameters 3340 */ 3341 if (bge_nd_init(bgep)) { 3342 bge_problem(bgep, "bge_nd_init() failed"); 3343 goto attach_fail; 3344 } 3345 bgep->progress |= PROGRESS_NDD; 3346 3347 /* 3348 * Create & initialise named kstats 3349 */ 3350 bge_init_kstats(bgep, instance); 3351 bgep->progress |= PROGRESS_KSTATS; 3352 3353 /* 3354 * Determine whether to override the chip's own MAC address 3355 */ 3356 bge_find_mac_address(bgep, cidp); 3357 ethaddr_copy(cidp->vendor_addr.addr, bgep->curr_addr[0].addr); 3358 bgep->curr_addr[0].set = B_TRUE; 3359 3360 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3361 /* 3362 * Address available is one less than MAX 3363 * as primary address is not advertised 3364 * as a multiple MAC address. 3365 */ 3366 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX - 1; 3367 3368 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3369 goto attach_fail; 3370 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3371 macp->m_driver = bgep; 3372 macp->m_dip = devinfo; 3373 macp->m_src_addr = bgep->curr_addr[0].addr; 3374 macp->m_callbacks = &bge_m_callbacks; 3375 macp->m_min_sdu = 0; 3376 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3377 macp->m_margin = VLAN_TAGSZ; 3378 macp->m_priv_props = bge_priv_prop; 3379 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3380 3381 /* 3382 * Finally, we're ready to register ourselves with the MAC layer 3383 * interface; if this succeeds, we're all ready to start() 3384 */ 3385 err = mac_register(macp, &bgep->mh); 3386 mac_free(macp); 3387 if (err != 0) 3388 goto attach_fail; 3389 3390 /* 3391 * Register a periodical handler. 3392 * bge_chip_cyclic() is invoked in kernel context. 3393 */ 3394 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3395 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3396 3397 bgep->progress |= PROGRESS_READY; 3398 ASSERT(bgep->bge_guard == BGE_GUARD); 3399 #ifdef BGE_IPMI_ASF 3400 #ifdef BGE_NETCONSOLE 3401 if (bgep->asf_enabled) { 3402 mutex_enter(bgep->genlock); 3403 retval = bge_chip_start(bgep, B_TRUE); 3404 mutex_exit(bgep->genlock); 3405 if (retval != DDI_SUCCESS) 3406 goto attach_fail; 3407 } 3408 #endif 3409 #endif 3410 3411 ddi_report_dev(devinfo); 3412 return (DDI_SUCCESS); 3413 3414 attach_fail: 3415 #ifdef BGE_IPMI_ASF 3416 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3417 #else 3418 bge_unattach(bgep); 3419 #endif 3420 return (DDI_FAILURE); 3421 } 3422 3423 /* 3424 * bge_suspend() -- suspend transmit/receive for powerdown 3425 */ 3426 static int 3427 bge_suspend(bge_t *bgep) 3428 { 3429 /* 3430 * Stop processing and idle (powerdown) the PHY ... 3431 */ 3432 mutex_enter(bgep->genlock); 3433 #ifdef BGE_IPMI_ASF 3434 /* 3435 * Power management hasn't been supported in BGE now. If you 3436 * want to implement it, please add the ASF/IPMI related 3437 * code here. 3438 */ 3439 #endif 3440 bge_stop(bgep); 3441 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3442 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3443 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3444 mutex_exit(bgep->genlock); 3445 return (DDI_FAILURE); 3446 } 3447 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3448 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3449 mutex_exit(bgep->genlock); 3450 return (DDI_FAILURE); 3451 } 3452 mutex_exit(bgep->genlock); 3453 3454 return (DDI_SUCCESS); 3455 } 3456 3457 /* 3458 * quiesce(9E) entry point. 3459 * 3460 * This function is called when the system is single-threaded at high 3461 * PIL with preemption disabled. Therefore, this function must not be 3462 * blocked. 3463 * 3464 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 3465 * DDI_FAILURE indicates an error condition and should almost never happen. 3466 */ 3467 #ifdef __sparc 3468 #define bge_quiesce ddi_quiesce_not_supported 3469 #else 3470 static int 3471 bge_quiesce(dev_info_t *devinfo) 3472 { 3473 bge_t *bgep = ddi_get_driver_private(devinfo); 3474 3475 if (bgep == NULL) 3476 return (DDI_FAILURE); 3477 3478 if (bgep->intr_type == DDI_INTR_TYPE_FIXED) { 3479 bge_reg_set32(bgep, PCI_CONF_BGE_MHCR, 3480 MHCR_MASK_PCI_INT_OUTPUT); 3481 } else { 3482 bge_reg_clr32(bgep, MSI_MODE_REG, MSI_MSI_ENABLE); 3483 } 3484 3485 /* Stop the chip */ 3486 bge_chip_stop_nonblocking(bgep); 3487 3488 return (DDI_SUCCESS); 3489 } 3490 #endif 3491 3492 /* 3493 * detach(9E) -- Detach a device from the system 3494 */ 3495 static int 3496 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3497 { 3498 bge_t *bgep; 3499 #ifdef BGE_IPMI_ASF 3500 uint_t asf_mode; 3501 asf_mode = ASF_MODE_NONE; 3502 #endif 3503 3504 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3505 3506 bgep = ddi_get_driver_private(devinfo); 3507 3508 switch (cmd) { 3509 default: 3510 return (DDI_FAILURE); 3511 3512 case DDI_SUSPEND: 3513 return (bge_suspend(bgep)); 3514 3515 case DDI_DETACH: 3516 break; 3517 } 3518 3519 #ifdef BGE_IPMI_ASF 3520 mutex_enter(bgep->genlock); 3521 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3522 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3523 3524 bge_asf_update_status(bgep); 3525 if (bgep->asf_status == ASF_STAT_RUN) { 3526 bge_asf_stop_timer(bgep); 3527 } 3528 bgep->asf_status = ASF_STAT_STOP; 3529 3530 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3531 3532 if (bgep->asf_pseudostop) { 3533 bge_chip_stop(bgep, B_FALSE); 3534 bgep->bge_mac_state = BGE_MAC_STOPPED; 3535 bgep->asf_pseudostop = B_FALSE; 3536 } 3537 3538 asf_mode = ASF_MODE_POST_SHUTDOWN; 3539 3540 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3541 ddi_fm_service_impact(bgep->devinfo, 3542 DDI_SERVICE_UNAFFECTED); 3543 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3544 ddi_fm_service_impact(bgep->devinfo, 3545 DDI_SERVICE_UNAFFECTED); 3546 } 3547 mutex_exit(bgep->genlock); 3548 #endif 3549 3550 /* 3551 * Unregister from the GLD subsystem. This can fail, in 3552 * particular if there are DLPI style-2 streams still open - 3553 * in which case we just return failure without shutting 3554 * down chip operations. 3555 */ 3556 if (mac_unregister(bgep->mh) != 0) 3557 return (DDI_FAILURE); 3558 3559 /* 3560 * All activity stopped, so we can clean up & exit 3561 */ 3562 #ifdef BGE_IPMI_ASF 3563 bge_unattach(bgep, asf_mode); 3564 #else 3565 bge_unattach(bgep); 3566 #endif 3567 return (DDI_SUCCESS); 3568 } 3569 3570 3571 /* 3572 * ========== Module Loading Data & Entry Points ========== 3573 */ 3574 3575 #undef BGE_DBG 3576 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3577 3578 DDI_DEFINE_STREAM_OPS(bge_dev_ops, 3579 nulldev, /* identify */ 3580 nulldev, /* probe */ 3581 bge_attach, /* attach */ 3582 bge_detach, /* detach */ 3583 nodev, /* reset */ 3584 NULL, /* cb_ops */ 3585 D_MP, /* bus_ops */ 3586 NULL, /* power */ 3587 bge_quiesce /* quiesce */ 3588 ); 3589 3590 static struct modldrv bge_modldrv = { 3591 &mod_driverops, /* Type of module. This one is a driver */ 3592 bge_ident, /* short description */ 3593 &bge_dev_ops /* driver specific ops */ 3594 }; 3595 3596 static struct modlinkage modlinkage = { 3597 MODREV_1, (void *)&bge_modldrv, NULL 3598 }; 3599 3600 3601 int 3602 _info(struct modinfo *modinfop) 3603 { 3604 return (mod_info(&modlinkage, modinfop)); 3605 } 3606 3607 int 3608 _init(void) 3609 { 3610 int status; 3611 3612 mac_init_ops(&bge_dev_ops, "bge"); 3613 status = mod_install(&modlinkage); 3614 if (status == DDI_SUCCESS) 3615 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3616 else 3617 mac_fini_ops(&bge_dev_ops); 3618 return (status); 3619 } 3620 3621 int 3622 _fini(void) 3623 { 3624 int status; 3625 3626 status = mod_remove(&modlinkage); 3627 if (status == DDI_SUCCESS) { 3628 mac_fini_ops(&bge_dev_ops); 3629 mutex_destroy(bge_log_mutex); 3630 } 3631 return (status); 3632 } 3633 3634 3635 /* 3636 * bge_add_intrs: 3637 * 3638 * Register FIXED or MSI interrupts. 3639 */ 3640 static int 3641 bge_add_intrs(bge_t *bgep, int intr_type) 3642 { 3643 dev_info_t *dip = bgep->devinfo; 3644 int avail, actual, intr_size, count = 0; 3645 int i, flag, ret; 3646 3647 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3648 3649 /* Get number of interrupts */ 3650 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3651 if ((ret != DDI_SUCCESS) || (count == 0)) { 3652 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3653 "count: %d", ret, count); 3654 3655 return (DDI_FAILURE); 3656 } 3657 3658 /* Get number of available interrupts */ 3659 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3660 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3661 bge_error(bgep, "ddi_intr_get_navail() failure, " 3662 "ret: %d, avail: %d\n", ret, avail); 3663 3664 return (DDI_FAILURE); 3665 } 3666 3667 if (avail < count) { 3668 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3669 bgep->ifname, count, avail)); 3670 } 3671 3672 /* 3673 * BGE hardware generates only single MSI even though it claims 3674 * to support multiple MSIs. So, hard code MSI count value to 1. 3675 */ 3676 if (intr_type == DDI_INTR_TYPE_MSI) { 3677 count = 1; 3678 flag = DDI_INTR_ALLOC_STRICT; 3679 } else { 3680 flag = DDI_INTR_ALLOC_NORMAL; 3681 } 3682 3683 /* Allocate an array of interrupt handles */ 3684 intr_size = count * sizeof (ddi_intr_handle_t); 3685 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3686 3687 /* Call ddi_intr_alloc() */ 3688 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3689 count, &actual, flag); 3690 3691 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3692 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3693 3694 kmem_free(bgep->htable, intr_size); 3695 return (DDI_FAILURE); 3696 } 3697 3698 if (actual < count) { 3699 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3700 bgep->ifname, count, actual)); 3701 } 3702 3703 bgep->intr_cnt = actual; 3704 3705 /* 3706 * Get priority for first msi, assume remaining are all the same 3707 */ 3708 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3709 DDI_SUCCESS) { 3710 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3711 3712 /* Free already allocated intr */ 3713 for (i = 0; i < actual; i++) { 3714 (void) ddi_intr_free(bgep->htable[i]); 3715 } 3716 3717 kmem_free(bgep->htable, intr_size); 3718 return (DDI_FAILURE); 3719 } 3720 3721 /* Call ddi_intr_add_handler() */ 3722 for (i = 0; i < actual; i++) { 3723 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3724 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3725 bge_error(bgep, "ddi_intr_add_handler() " 3726 "failed %d\n", ret); 3727 3728 /* Free already allocated intr */ 3729 for (i = 0; i < actual; i++) { 3730 (void) ddi_intr_free(bgep->htable[i]); 3731 } 3732 3733 kmem_free(bgep->htable, intr_size); 3734 return (DDI_FAILURE); 3735 } 3736 } 3737 3738 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3739 != DDI_SUCCESS) { 3740 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3741 3742 for (i = 0; i < actual; i++) { 3743 (void) ddi_intr_remove_handler(bgep->htable[i]); 3744 (void) ddi_intr_free(bgep->htable[i]); 3745 } 3746 3747 kmem_free(bgep->htable, intr_size); 3748 return (DDI_FAILURE); 3749 } 3750 3751 return (DDI_SUCCESS); 3752 } 3753 3754 /* 3755 * bge_rem_intrs: 3756 * 3757 * Unregister FIXED or MSI interrupts 3758 */ 3759 static void 3760 bge_rem_intrs(bge_t *bgep) 3761 { 3762 int i; 3763 3764 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3765 3766 /* Call ddi_intr_remove_handler() */ 3767 for (i = 0; i < bgep->intr_cnt; i++) { 3768 (void) ddi_intr_remove_handler(bgep->htable[i]); 3769 (void) ddi_intr_free(bgep->htable[i]); 3770 } 3771 3772 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3773 } 3774 3775 3776 void 3777 bge_intr_enable(bge_t *bgep) 3778 { 3779 int i; 3780 3781 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3782 /* Call ddi_intr_block_enable() for MSI interrupts */ 3783 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3784 } else { 3785 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3786 for (i = 0; i < bgep->intr_cnt; i++) { 3787 (void) ddi_intr_enable(bgep->htable[i]); 3788 } 3789 } 3790 } 3791 3792 3793 void 3794 bge_intr_disable(bge_t *bgep) 3795 { 3796 int i; 3797 3798 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3799 /* Call ddi_intr_block_disable() */ 3800 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3801 } else { 3802 for (i = 0; i < bgep->intr_cnt; i++) { 3803 (void) ddi_intr_disable(bgep->htable[i]); 3804 } 3805 } 3806 } 3807 3808 int 3809 bge_reprogram(bge_t *bgep) 3810 { 3811 int status = 0; 3812 3813 ASSERT(mutex_owned(bgep->genlock)); 3814 3815 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3816 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3817 status = IOC_INVAL; 3818 } 3819 #ifdef BGE_IPMI_ASF 3820 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3821 #else 3822 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3823 #endif 3824 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3825 status = IOC_INVAL; 3826 } 3827 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3828 bge_chip_msi_trig(bgep); 3829 return (status); 3830 } 3831