1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "bge_impl.h" 30 #include <sys/sdt.h> 31 #include <sys/mac.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 * Make sure you keep the version ID up to date! 36 */ 37 static char bge_ident[] = "Broadcom Gb Ethernet v0.65"; 38 39 /* 40 * Property names 41 */ 42 static char debug_propname[] = "bge-debug-flags"; 43 static char clsize_propname[] = "cache-line-size"; 44 static char latency_propname[] = "latency-timer"; 45 static char localmac_boolname[] = "local-mac-address?"; 46 static char localmac_propname[] = "local-mac-address"; 47 static char macaddr_propname[] = "mac-address"; 48 static char subdev_propname[] = "subsystem-id"; 49 static char subven_propname[] = "subsystem-vendor-id"; 50 static char rxrings_propname[] = "bge-rx-rings"; 51 static char txrings_propname[] = "bge-tx-rings"; 52 static char fm_cap[] = "fm-capable"; 53 static char default_mtu[] = "default_mtu"; 54 55 static int bge_add_intrs(bge_t *, int); 56 static void bge_rem_intrs(bge_t *); 57 58 /* 59 * Describes the chip's DMA engine 60 */ 61 static ddi_dma_attr_t dma_attr = { 62 DMA_ATTR_V0, /* dma_attr version */ 63 0x0000000000000000ull, /* dma_attr_addr_lo */ 64 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 65 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 66 0x0000000000000001ull, /* dma_attr_align */ 67 0x00000FFF, /* dma_attr_burstsizes */ 68 0x00000001, /* dma_attr_minxfer */ 69 0x000000000000FFFFull, /* dma_attr_maxxfer */ 70 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 71 1, /* dma_attr_sgllen */ 72 0x00000001, /* dma_attr_granular */ 73 DDI_DMA_FLAGERR /* dma_attr_flags */ 74 }; 75 76 /* 77 * PIO access attributes for registers 78 */ 79 static ddi_device_acc_attr_t bge_reg_accattr = { 80 DDI_DEVICE_ATTR_V0, 81 DDI_NEVERSWAP_ACC, 82 DDI_STRICTORDER_ACC, 83 DDI_FLAGERR_ACC 84 }; 85 86 /* 87 * DMA access attributes for descriptors: NOT to be byte swapped. 88 */ 89 static ddi_device_acc_attr_t bge_desc_accattr = { 90 DDI_DEVICE_ATTR_V0, 91 DDI_NEVERSWAP_ACC, 92 DDI_STRICTORDER_ACC, 93 DDI_FLAGERR_ACC 94 }; 95 96 /* 97 * DMA access attributes for data: NOT to be byte swapped. 98 */ 99 static ddi_device_acc_attr_t bge_data_accattr = { 100 DDI_DEVICE_ATTR_V0, 101 DDI_NEVERSWAP_ACC, 102 DDI_STRICTORDER_ACC 103 }; 104 105 static int bge_m_start(void *); 106 static void bge_m_stop(void *); 107 static int bge_m_promisc(void *, boolean_t); 108 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 109 static int bge_m_unicst(void *, const uint8_t *); 110 static void bge_m_resources(void *); 111 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 112 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 113 static int bge_unicst_set(void *, const uint8_t *, 114 mac_addr_slot_t); 115 static int bge_m_unicst_add(void *, mac_multi_addr_t *); 116 static int bge_m_unicst_remove(void *, mac_addr_slot_t); 117 static int bge_m_unicst_modify(void *, mac_multi_addr_t *); 118 static int bge_m_unicst_get(void *, mac_multi_addr_t *); 119 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 120 uint_t, const void *); 121 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 122 uint_t, uint_t, void *); 123 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 124 const void *); 125 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 126 uint_t, void *); 127 128 #define BGE_M_CALLBACK_FLAGS\ 129 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 130 131 static mac_callbacks_t bge_m_callbacks = { 132 BGE_M_CALLBACK_FLAGS, 133 bge_m_stat, 134 bge_m_start, 135 bge_m_stop, 136 bge_m_promisc, 137 bge_m_multicst, 138 bge_m_unicst, 139 bge_m_tx, 140 bge_m_resources, 141 bge_m_ioctl, 142 bge_m_getcapab, 143 NULL, 144 NULL, 145 bge_m_setprop, 146 bge_m_getprop 147 }; 148 149 mac_priv_prop_t bge_priv_prop[] = { 150 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 151 {"_adv_pause_cap", MAC_PROP_PERM_RW} 152 }; 153 154 #define BGE_MAX_PRIV_PROPS \ 155 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 156 157 /* 158 * ========== Transmit and receive ring reinitialisation ========== 159 */ 160 161 /* 162 * These <reinit> routines each reset the specified ring to an initial 163 * state, assuming that the corresponding <init> routine has already 164 * been called exactly once. 165 */ 166 167 static void 168 bge_reinit_send_ring(send_ring_t *srp) 169 { 170 bge_queue_t *txbuf_queue; 171 bge_queue_item_t *txbuf_head; 172 sw_txbuf_t *txbuf; 173 sw_sbd_t *ssbdp; 174 uint32_t slot; 175 176 /* 177 * Reinitialise control variables ... 178 */ 179 srp->tx_flow = 0; 180 srp->tx_next = 0; 181 srp->txfill_next = 0; 182 srp->tx_free = srp->desc.nslots; 183 ASSERT(mutex_owned(srp->tc_lock)); 184 srp->tc_next = 0; 185 srp->txpkt_next = 0; 186 srp->tx_block = 0; 187 srp->tx_nobd = 0; 188 srp->tx_nobuf = 0; 189 190 /* 191 * Initialize the tx buffer push queue 192 */ 193 mutex_enter(srp->freetxbuf_lock); 194 mutex_enter(srp->txbuf_lock); 195 txbuf_queue = &srp->freetxbuf_queue; 196 txbuf_queue->head = NULL; 197 txbuf_queue->count = 0; 198 txbuf_queue->lock = srp->freetxbuf_lock; 199 srp->txbuf_push_queue = txbuf_queue; 200 201 /* 202 * Initialize the tx buffer pop queue 203 */ 204 txbuf_queue = &srp->txbuf_queue; 205 txbuf_queue->head = NULL; 206 txbuf_queue->count = 0; 207 txbuf_queue->lock = srp->txbuf_lock; 208 srp->txbuf_pop_queue = txbuf_queue; 209 txbuf_head = srp->txbuf_head; 210 txbuf = srp->txbuf; 211 for (slot = 0; slot < srp->tx_buffers; ++slot) { 212 txbuf_head->item = txbuf; 213 txbuf_head->next = txbuf_queue->head; 214 txbuf_queue->head = txbuf_head; 215 txbuf_queue->count++; 216 txbuf++; 217 txbuf_head++; 218 } 219 mutex_exit(srp->txbuf_lock); 220 mutex_exit(srp->freetxbuf_lock); 221 222 /* 223 * Zero and sync all the h/w Send Buffer Descriptors 224 */ 225 DMA_ZERO(srp->desc); 226 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 227 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 228 ssbdp = srp->sw_sbds; 229 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 230 ssbdp->pbuf = NULL; 231 } 232 233 static void 234 bge_reinit_recv_ring(recv_ring_t *rrp) 235 { 236 /* 237 * Reinitialise control variables ... 238 */ 239 rrp->rx_next = 0; 240 } 241 242 static void 243 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 244 { 245 bge_rbd_t *hw_rbd_p; 246 sw_rbd_t *srbdp; 247 uint32_t bufsize; 248 uint32_t nslots; 249 uint32_t slot; 250 251 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 252 RBD_FLAG_STD_RING, 253 RBD_FLAG_JUMBO_RING, 254 RBD_FLAG_MINI_RING 255 }; 256 257 /* 258 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 259 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 260 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 261 * should be zeroed, and so don't need to be set up specifically 262 * once the whole area has been cleared. 263 */ 264 DMA_ZERO(brp->desc); 265 266 hw_rbd_p = DMA_VPTR(brp->desc); 267 nslots = brp->desc.nslots; 268 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 269 bufsize = brp->buf[0].size; 270 srbdp = brp->sw_rbds; 271 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 272 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 273 hw_rbd_p->index = (uint16_t)slot; 274 hw_rbd_p->len = (uint16_t)bufsize; 275 hw_rbd_p->opaque = srbdp->pbuf.token; 276 hw_rbd_p->flags |= ring_type_flag[ring]; 277 } 278 279 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 280 281 /* 282 * Finally, reinitialise the ring control variables ... 283 */ 284 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 285 } 286 287 /* 288 * Reinitialize all rings 289 */ 290 static void 291 bge_reinit_rings(bge_t *bgep) 292 { 293 uint32_t ring; 294 295 ASSERT(mutex_owned(bgep->genlock)); 296 297 /* 298 * Send Rings ... 299 */ 300 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 301 bge_reinit_send_ring(&bgep->send[ring]); 302 303 /* 304 * Receive Return Rings ... 305 */ 306 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 307 bge_reinit_recv_ring(&bgep->recv[ring]); 308 309 /* 310 * Receive Producer Rings ... 311 */ 312 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 313 bge_reinit_buff_ring(&bgep->buff[ring], ring); 314 } 315 316 /* 317 * ========== Internal state management entry points ========== 318 */ 319 320 #undef BGE_DBG 321 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 322 323 /* 324 * These routines provide all the functionality required by the 325 * corresponding GLD entry points, but don't update the GLD state 326 * so they can be called internally without disturbing our record 327 * of what GLD thinks we should be doing ... 328 */ 329 330 /* 331 * bge_reset() -- reset h/w & rings to initial state 332 */ 333 static int 334 #ifdef BGE_IPMI_ASF 335 bge_reset(bge_t *bgep, uint_t asf_mode) 336 #else 337 bge_reset(bge_t *bgep) 338 #endif 339 { 340 uint32_t ring; 341 int retval; 342 343 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 344 345 ASSERT(mutex_owned(bgep->genlock)); 346 347 /* 348 * Grab all the other mutexes in the world (this should 349 * ensure no other threads are manipulating driver state) 350 */ 351 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 352 mutex_enter(bgep->recv[ring].rx_lock); 353 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 354 mutex_enter(bgep->buff[ring].rf_lock); 355 rw_enter(bgep->errlock, RW_WRITER); 356 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 357 mutex_enter(bgep->send[ring].tx_lock); 358 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 359 mutex_enter(bgep->send[ring].tc_lock); 360 361 #ifdef BGE_IPMI_ASF 362 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 363 #else 364 retval = bge_chip_reset(bgep, B_TRUE); 365 #endif 366 bge_reinit_rings(bgep); 367 368 /* 369 * Free the world ... 370 */ 371 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 372 mutex_exit(bgep->send[ring].tc_lock); 373 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 374 mutex_exit(bgep->send[ring].tx_lock); 375 rw_exit(bgep->errlock); 376 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 377 mutex_exit(bgep->buff[ring].rf_lock); 378 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 379 mutex_exit(bgep->recv[ring].rx_lock); 380 381 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 382 return (retval); 383 } 384 385 /* 386 * bge_stop() -- stop processing, don't reset h/w or rings 387 */ 388 static void 389 bge_stop(bge_t *bgep) 390 { 391 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 392 393 ASSERT(mutex_owned(bgep->genlock)); 394 395 #ifdef BGE_IPMI_ASF 396 if (bgep->asf_enabled) { 397 bgep->asf_pseudostop = B_TRUE; 398 } else { 399 #endif 400 bge_chip_stop(bgep, B_FALSE); 401 #ifdef BGE_IPMI_ASF 402 } 403 #endif 404 405 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 406 } 407 408 /* 409 * bge_start() -- start transmitting/receiving 410 */ 411 static int 412 bge_start(bge_t *bgep, boolean_t reset_phys) 413 { 414 int retval; 415 416 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 417 418 ASSERT(mutex_owned(bgep->genlock)); 419 420 /* 421 * Start chip processing, including enabling interrupts 422 */ 423 retval = bge_chip_start(bgep, reset_phys); 424 425 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 426 return (retval); 427 } 428 429 /* 430 * bge_restart - restart transmitting/receiving after error or suspend 431 */ 432 int 433 bge_restart(bge_t *bgep, boolean_t reset_phys) 434 { 435 int retval = DDI_SUCCESS; 436 ASSERT(mutex_owned(bgep->genlock)); 437 438 #ifdef BGE_IPMI_ASF 439 if (bgep->asf_enabled) { 440 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 441 retval = DDI_FAILURE; 442 } else 443 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 444 retval = DDI_FAILURE; 445 #else 446 if (bge_reset(bgep) != DDI_SUCCESS) 447 retval = DDI_FAILURE; 448 #endif 449 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 450 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 451 retval = DDI_FAILURE; 452 bgep->watchdog = 0; 453 ddi_trigger_softintr(bgep->drain_id); 454 } 455 456 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 457 return (retval); 458 } 459 460 461 /* 462 * ========== Nemo-required management entry points ========== 463 */ 464 465 #undef BGE_DBG 466 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 467 468 /* 469 * bge_m_stop() -- stop transmitting/receiving 470 */ 471 static void 472 bge_m_stop(void *arg) 473 { 474 bge_t *bgep = arg; /* private device info */ 475 send_ring_t *srp; 476 uint32_t ring; 477 478 BGE_TRACE(("bge_m_stop($%p)", arg)); 479 480 /* 481 * Just stop processing, then record new GLD state 482 */ 483 mutex_enter(bgep->genlock); 484 if (!(bgep->progress & PROGRESS_INTR)) { 485 /* can happen during autorecovery */ 486 mutex_exit(bgep->genlock); 487 return; 488 } 489 bge_stop(bgep); 490 491 bgep->link_update_timer = 0; 492 bgep->link_state = LINK_STATE_UNKNOWN; 493 mac_link_update(bgep->mh, bgep->link_state); 494 495 /* 496 * Free the possible tx buffers allocated in tx process. 497 */ 498 #ifdef BGE_IPMI_ASF 499 if (!bgep->asf_pseudostop) 500 #endif 501 { 502 rw_enter(bgep->errlock, RW_WRITER); 503 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 504 srp = &bgep->send[ring]; 505 mutex_enter(srp->tx_lock); 506 if (srp->tx_array > 1) 507 bge_free_txbuf_arrays(srp); 508 mutex_exit(srp->tx_lock); 509 } 510 rw_exit(bgep->errlock); 511 } 512 bgep->bge_mac_state = BGE_MAC_STOPPED; 513 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 514 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 515 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 516 mutex_exit(bgep->genlock); 517 } 518 519 /* 520 * bge_m_start() -- start transmitting/receiving 521 */ 522 static int 523 bge_m_start(void *arg) 524 { 525 bge_t *bgep = arg; /* private device info */ 526 527 BGE_TRACE(("bge_m_start($%p)", arg)); 528 529 /* 530 * Start processing and record new GLD state 531 */ 532 mutex_enter(bgep->genlock); 533 if (!(bgep->progress & PROGRESS_INTR)) { 534 /* can happen during autorecovery */ 535 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 536 mutex_exit(bgep->genlock); 537 return (EIO); 538 } 539 #ifdef BGE_IPMI_ASF 540 if (bgep->asf_enabled) { 541 if ((bgep->asf_status == ASF_STAT_RUN) && 542 (bgep->asf_pseudostop)) { 543 bgep->bge_mac_state = BGE_MAC_STARTED; 544 mutex_exit(bgep->genlock); 545 return (0); 546 } 547 } 548 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 549 #else 550 if (bge_reset(bgep) != DDI_SUCCESS) { 551 #endif 552 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 553 (void) bge_check_acc_handle(bgep, bgep->io_handle); 554 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 555 mutex_exit(bgep->genlock); 556 return (EIO); 557 } 558 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 559 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 560 (void) bge_check_acc_handle(bgep, bgep->io_handle); 561 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 562 mutex_exit(bgep->genlock); 563 return (EIO); 564 } 565 bgep->bge_mac_state = BGE_MAC_STARTED; 566 BGE_DEBUG(("bge_m_start($%p) done", arg)); 567 568 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 569 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 570 mutex_exit(bgep->genlock); 571 return (EIO); 572 } 573 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 574 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 575 mutex_exit(bgep->genlock); 576 return (EIO); 577 } 578 #ifdef BGE_IPMI_ASF 579 if (bgep->asf_enabled) { 580 if (bgep->asf_status != ASF_STAT_RUN) { 581 /* start ASF heart beat */ 582 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 583 (void *)bgep, 584 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 585 bgep->asf_status = ASF_STAT_RUN; 586 } 587 } 588 #endif 589 mutex_exit(bgep->genlock); 590 591 return (0); 592 } 593 594 /* 595 * bge_m_unicst() -- set the physical network address 596 */ 597 static int 598 bge_m_unicst(void *arg, const uint8_t *macaddr) 599 { 600 /* 601 * Request to set address in 602 * address slot 0, i.e., default address 603 */ 604 return (bge_unicst_set(arg, macaddr, 0)); 605 } 606 607 /* 608 * bge_unicst_set() -- set the physical network address 609 */ 610 static int 611 bge_unicst_set(void *arg, const uint8_t *macaddr, mac_addr_slot_t slot) 612 { 613 bge_t *bgep = arg; /* private device info */ 614 615 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 616 ether_sprintf((void *)macaddr))); 617 /* 618 * Remember the new current address in the driver state 619 * Sync the chip's idea of the address too ... 620 */ 621 mutex_enter(bgep->genlock); 622 if (!(bgep->progress & PROGRESS_INTR)) { 623 /* can happen during autorecovery */ 624 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 625 mutex_exit(bgep->genlock); 626 return (EIO); 627 } 628 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 629 #ifdef BGE_IPMI_ASF 630 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 631 #else 632 if (bge_chip_sync(bgep) == DDI_FAILURE) { 633 #endif 634 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 635 (void) bge_check_acc_handle(bgep, bgep->io_handle); 636 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 637 mutex_exit(bgep->genlock); 638 return (EIO); 639 } 640 #ifdef BGE_IPMI_ASF 641 if (bgep->asf_enabled) { 642 /* 643 * The above bge_chip_sync() function wrote the ethernet MAC 644 * addresses registers which destroyed the IPMI/ASF sideband. 645 * Here, we have to reset chip to make IPMI/ASF sideband work. 646 */ 647 if (bgep->asf_status == ASF_STAT_RUN) { 648 /* 649 * We must stop ASF heart beat before bge_chip_stop(), 650 * otherwise some computers (ex. IBM HS20 blade server) 651 * may crash. 652 */ 653 bge_asf_update_status(bgep); 654 bge_asf_stop_timer(bgep); 655 bgep->asf_status = ASF_STAT_STOP; 656 657 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 658 } 659 bge_chip_stop(bgep, B_FALSE); 660 661 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 662 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 663 (void) bge_check_acc_handle(bgep, bgep->io_handle); 664 ddi_fm_service_impact(bgep->devinfo, 665 DDI_SERVICE_DEGRADED); 666 mutex_exit(bgep->genlock); 667 return (EIO); 668 } 669 670 /* 671 * Start our ASF heartbeat counter as soon as possible. 672 */ 673 if (bgep->asf_status != ASF_STAT_RUN) { 674 /* start ASF heart beat */ 675 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 676 (void *)bgep, 677 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 678 bgep->asf_status = ASF_STAT_RUN; 679 } 680 } 681 #endif 682 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 683 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 684 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 685 mutex_exit(bgep->genlock); 686 return (EIO); 687 } 688 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 689 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 690 mutex_exit(bgep->genlock); 691 return (EIO); 692 } 693 mutex_exit(bgep->genlock); 694 695 return (0); 696 } 697 698 /* 699 * The following four routines are used as callbacks for multiple MAC 700 * address support: 701 * - bge_m_unicst_add(void *, mac_multi_addr_t *); 702 * - bge_m_unicst_remove(void *, mac_addr_slot_t); 703 * - bge_m_unicst_modify(void *, mac_multi_addr_t *); 704 * - bge_m_unicst_get(void *, mac_multi_addr_t *); 705 */ 706 707 /* 708 * bge_m_unicst_add() - will find an unused address slot, set the 709 * address value to the one specified, reserve that slot and enable 710 * the NIC to start filtering on the new MAC address. 711 * address slot. Returns 0 on success. 712 */ 713 static int 714 bge_m_unicst_add(void *arg, mac_multi_addr_t *maddr) 715 { 716 bge_t *bgep = arg; /* private device info */ 717 mac_addr_slot_t slot; 718 int err; 719 720 if (mac_unicst_verify(bgep->mh, 721 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 722 return (EINVAL); 723 724 mutex_enter(bgep->genlock); 725 if (bgep->unicst_addr_avail == 0) { 726 /* no slots available */ 727 mutex_exit(bgep->genlock); 728 return (ENOSPC); 729 } 730 731 /* 732 * Primary/default address is in slot 0. The next three 733 * addresses are the multiple MAC addresses. So multiple 734 * MAC address 0 is in slot 1, 1 in slot 2, and so on. 735 * So the first multiple MAC address resides in slot 1. 736 */ 737 for (slot = 1; slot < bgep->unicst_addr_total; slot++) { 738 if (bgep->curr_addr[slot].set == B_FALSE) { 739 bgep->curr_addr[slot].set = B_TRUE; 740 break; 741 } 742 } 743 744 ASSERT(slot < bgep->unicst_addr_total); 745 bgep->unicst_addr_avail--; 746 mutex_exit(bgep->genlock); 747 maddr->mma_slot = slot; 748 749 if ((err = bge_unicst_set(bgep, maddr->mma_addr, slot)) != 0) { 750 mutex_enter(bgep->genlock); 751 bgep->curr_addr[slot].set = B_FALSE; 752 bgep->unicst_addr_avail++; 753 mutex_exit(bgep->genlock); 754 } 755 return (err); 756 } 757 758 /* 759 * bge_m_unicst_remove() - removes a MAC address that was added by a 760 * call to bge_m_unicst_add(). The slot number that was returned in 761 * add() is passed in the call to remove the address. 762 * Returns 0 on success. 763 */ 764 static int 765 bge_m_unicst_remove(void *arg, mac_addr_slot_t slot) 766 { 767 bge_t *bgep = arg; /* private device info */ 768 769 if (slot <= 0 || slot >= bgep->unicst_addr_total) 770 return (EINVAL); 771 772 mutex_enter(bgep->genlock); 773 if (bgep->curr_addr[slot].set == B_TRUE) { 774 bgep->curr_addr[slot].set = B_FALSE; 775 bgep->unicst_addr_avail++; 776 mutex_exit(bgep->genlock); 777 /* 778 * Copy the default address to the passed slot 779 */ 780 return (bge_unicst_set(bgep, bgep->curr_addr[0].addr, slot)); 781 } 782 mutex_exit(bgep->genlock); 783 return (EINVAL); 784 } 785 786 /* 787 * bge_m_unicst_modify() - modifies the value of an address that 788 * has been added by bge_m_unicst_add(). The new address, address 789 * length and the slot number that was returned in the call to add 790 * should be passed to bge_m_unicst_modify(). mma_flags should be 791 * set to 0. Returns 0 on success. 792 */ 793 static int 794 bge_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) 795 { 796 bge_t *bgep = arg; /* private device info */ 797 mac_addr_slot_t slot; 798 799 if (mac_unicst_verify(bgep->mh, 800 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 801 return (EINVAL); 802 803 slot = maddr->mma_slot; 804 805 if (slot <= 0 || slot >= bgep->unicst_addr_total) 806 return (EINVAL); 807 808 mutex_enter(bgep->genlock); 809 if (bgep->curr_addr[slot].set == B_TRUE) { 810 mutex_exit(bgep->genlock); 811 return (bge_unicst_set(bgep, maddr->mma_addr, slot)); 812 } 813 mutex_exit(bgep->genlock); 814 815 return (EINVAL); 816 } 817 818 /* 819 * bge_m_unicst_get() - will get the MAC address and all other 820 * information related to the address slot passed in mac_multi_addr_t. 821 * mma_flags should be set to 0 in the call. 822 * On return, mma_flags can take the following values: 823 * 1) MMAC_SLOT_UNUSED 824 * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR 825 * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR 826 * 4) MMAC_SLOT_USED 827 */ 828 static int 829 bge_m_unicst_get(void *arg, mac_multi_addr_t *maddr) 830 { 831 bge_t *bgep = arg; /* private device info */ 832 mac_addr_slot_t slot; 833 834 slot = maddr->mma_slot; 835 836 if (slot <= 0 || slot >= bgep->unicst_addr_total) 837 return (EINVAL); 838 839 mutex_enter(bgep->genlock); 840 if (bgep->curr_addr[slot].set == B_TRUE) { 841 ethaddr_copy(bgep->curr_addr[slot].addr, 842 maddr->mma_addr); 843 maddr->mma_flags = MMAC_SLOT_USED; 844 } else { 845 maddr->mma_flags = MMAC_SLOT_UNUSED; 846 } 847 mutex_exit(bgep->genlock); 848 849 return (0); 850 } 851 852 extern void bge_wake_factotum(bge_t *); 853 854 static boolean_t 855 bge_param_locked(mac_prop_id_t pr_num) 856 { 857 /* 858 * All adv_* parameters are locked (read-only) while 859 * the device is in any sort of loopback mode ... 860 */ 861 switch (pr_num) { 862 case MAC_PROP_ADV_1000FDX_CAP: 863 case MAC_PROP_EN_1000FDX_CAP: 864 case MAC_PROP_ADV_1000HDX_CAP: 865 case MAC_PROP_EN_1000HDX_CAP: 866 case MAC_PROP_ADV_100FDX_CAP: 867 case MAC_PROP_EN_100FDX_CAP: 868 case MAC_PROP_ADV_100HDX_CAP: 869 case MAC_PROP_EN_100HDX_CAP: 870 case MAC_PROP_ADV_10FDX_CAP: 871 case MAC_PROP_EN_10FDX_CAP: 872 case MAC_PROP_ADV_10HDX_CAP: 873 case MAC_PROP_EN_10HDX_CAP: 874 case MAC_PROP_AUTONEG: 875 case MAC_PROP_FLOWCTRL: 876 return (B_TRUE); 877 } 878 return (B_FALSE); 879 } 880 /* 881 * callback functions for set/get of properties 882 */ 883 static int 884 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 885 uint_t pr_valsize, const void *pr_val) 886 { 887 bge_t *bgep = barg; 888 int err = 0; 889 uint32_t cur_mtu, new_mtu; 890 uint_t maxsdu; 891 link_flowctrl_t fl; 892 893 mutex_enter(bgep->genlock); 894 if (bgep->param_loop_mode != BGE_LOOP_NONE && 895 bge_param_locked(pr_num)) { 896 /* 897 * All adv_* parameters are locked (read-only) 898 * while the device is in any sort of loopback mode. 899 */ 900 mutex_exit(bgep->genlock); 901 return (EBUSY); 902 } 903 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 904 ((pr_num == MAC_PROP_EN_100FDX_CAP) || 905 (pr_num == MAC_PROP_EN_100FDX_CAP) || 906 (pr_num == MAC_PROP_EN_10FDX_CAP) || 907 (pr_num == MAC_PROP_EN_10HDX_CAP))) { 908 /* 909 * these properties are read/write on copper, 910 * read-only and 0 on serdes 911 */ 912 mutex_exit(bgep->genlock); 913 return (ENOTSUP); 914 } 915 916 switch (pr_num) { 917 case MAC_PROP_EN_1000FDX_CAP: 918 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 919 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 920 goto reprogram; 921 case MAC_PROP_EN_1000HDX_CAP: 922 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 923 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 924 goto reprogram; 925 case MAC_PROP_EN_100FDX_CAP: 926 bgep->param_en_100fdx = *(uint8_t *)pr_val; 927 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 928 goto reprogram; 929 case MAC_PROP_EN_100HDX_CAP: 930 bgep->param_en_100hdx = *(uint8_t *)pr_val; 931 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 932 goto reprogram; 933 case MAC_PROP_EN_10FDX_CAP: 934 bgep->param_en_10fdx = *(uint8_t *)pr_val; 935 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 936 goto reprogram; 937 case MAC_PROP_EN_10HDX_CAP: 938 bgep->param_en_10hdx = *(uint8_t *)pr_val; 939 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 940 reprogram: 941 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 942 err = EINVAL; 943 break; 944 case MAC_PROP_ADV_1000FDX_CAP: 945 case MAC_PROP_ADV_1000HDX_CAP: 946 case MAC_PROP_ADV_100FDX_CAP: 947 case MAC_PROP_ADV_100HDX_CAP: 948 case MAC_PROP_ADV_10FDX_CAP: 949 case MAC_PROP_ADV_10HDX_CAP: 950 case MAC_PROP_STATUS: 951 case MAC_PROP_SPEED: 952 case MAC_PROP_DUPLEX: 953 err = ENOTSUP; /* read-only prop. Can't set this */ 954 break; 955 case MAC_PROP_AUTONEG: 956 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 957 if (bge_reprogram(bgep) == IOC_INVAL) 958 err = EINVAL; 959 break; 960 case MAC_PROP_MTU: 961 cur_mtu = bgep->chipid.default_mtu; 962 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 963 964 if (new_mtu == cur_mtu) { 965 err = 0; 966 break; 967 } 968 if (new_mtu < BGE_DEFAULT_MTU || 969 new_mtu > BGE_MAXIMUM_MTU) { 970 err = EINVAL; 971 break; 972 } 973 if ((new_mtu > BGE_DEFAULT_MTU) && 974 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 975 err = EINVAL; 976 break; 977 } 978 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 979 err = EBUSY; 980 break; 981 } 982 bgep->chipid.default_mtu = new_mtu; 983 if (bge_chip_id_init(bgep)) { 984 err = EINVAL; 985 break; 986 } 987 maxsdu = bgep->chipid.ethmax_size - 988 sizeof (struct ether_header); 989 err = mac_maxsdu_update(bgep->mh, maxsdu); 990 if (err == 0) { 991 bgep->bge_dma_error = B_TRUE; 992 bgep->manual_reset = B_TRUE; 993 bge_chip_stop(bgep, B_TRUE); 994 bge_wake_factotum(bgep); 995 err = 0; 996 } 997 break; 998 case MAC_PROP_FLOWCTRL: 999 bcopy(pr_val, &fl, sizeof (fl)); 1000 switch (fl) { 1001 default: 1002 err = ENOTSUP; 1003 break; 1004 case LINK_FLOWCTRL_NONE: 1005 bgep->param_adv_pause = 0; 1006 bgep->param_adv_asym_pause = 0; 1007 1008 bgep->param_link_rx_pause = B_FALSE; 1009 bgep->param_link_tx_pause = B_FALSE; 1010 break; 1011 case LINK_FLOWCTRL_RX: 1012 if (!((bgep->param_lp_pause == 0) && 1013 (bgep->param_lp_asym_pause == 1))) { 1014 err = EINVAL; 1015 break; 1016 } 1017 bgep->param_adv_pause = 1; 1018 bgep->param_adv_asym_pause = 1; 1019 1020 bgep->param_link_rx_pause = B_TRUE; 1021 bgep->param_link_tx_pause = B_FALSE; 1022 break; 1023 case LINK_FLOWCTRL_TX: 1024 if (!((bgep->param_lp_pause == 1) && 1025 (bgep->param_lp_asym_pause == 1))) { 1026 err = EINVAL; 1027 break; 1028 } 1029 bgep->param_adv_pause = 0; 1030 bgep->param_adv_asym_pause = 1; 1031 1032 bgep->param_link_rx_pause = B_FALSE; 1033 bgep->param_link_tx_pause = B_TRUE; 1034 break; 1035 case LINK_FLOWCTRL_BI: 1036 if (bgep->param_lp_pause != 1) { 1037 err = EINVAL; 1038 break; 1039 } 1040 bgep->param_adv_pause = 1; 1041 1042 bgep->param_link_rx_pause = B_TRUE; 1043 bgep->param_link_tx_pause = B_TRUE; 1044 break; 1045 } 1046 1047 if (err == 0) { 1048 if (bge_reprogram(bgep) == IOC_INVAL) 1049 err = EINVAL; 1050 } 1051 1052 break; 1053 case MAC_PROP_PRIVATE: 1054 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 1055 pr_val); 1056 break; 1057 default: 1058 err = ENOTSUP; 1059 break; 1060 } 1061 mutex_exit(bgep->genlock); 1062 return (err); 1063 } 1064 1065 static int 1066 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1067 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 1068 { 1069 bge_t *bgep = barg; 1070 int err = 0; 1071 link_flowctrl_t fl; 1072 uint64_t speed; 1073 int flags = bgep->chipid.flags; 1074 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1075 1076 if (pr_valsize == 0) 1077 return (EINVAL); 1078 bzero(pr_val, pr_valsize); 1079 switch (pr_num) { 1080 case MAC_PROP_DUPLEX: 1081 if (pr_valsize < sizeof (link_duplex_t)) 1082 return (EINVAL); 1083 bcopy(&bgep->param_link_duplex, pr_val, 1084 sizeof (link_duplex_t)); 1085 break; 1086 case MAC_PROP_SPEED: 1087 if (pr_valsize < sizeof (speed)) 1088 return (EINVAL); 1089 speed = bgep->param_link_speed * 1000000ull; 1090 bcopy(&speed, pr_val, sizeof (speed)); 1091 break; 1092 case MAC_PROP_STATUS: 1093 if (pr_valsize < sizeof (link_state_t)) 1094 return (EINVAL); 1095 bcopy(&bgep->link_state, pr_val, 1096 sizeof (link_state_t)); 1097 break; 1098 case MAC_PROP_AUTONEG: 1099 if (is_default) 1100 *(uint8_t *)pr_val = 1; 1101 else 1102 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 1103 break; 1104 case MAC_PROP_FLOWCTRL: 1105 if (pr_valsize < sizeof (fl)) 1106 return (EINVAL); 1107 if (is_default) { 1108 fl = LINK_FLOWCTRL_BI; 1109 bcopy(&fl, pr_val, sizeof (fl)); 1110 break; 1111 } 1112 1113 if (bgep->param_link_rx_pause && 1114 !bgep->param_link_tx_pause) 1115 fl = LINK_FLOWCTRL_RX; 1116 1117 if (!bgep->param_link_rx_pause && 1118 !bgep->param_link_tx_pause) 1119 fl = LINK_FLOWCTRL_NONE; 1120 1121 if (!bgep->param_link_rx_pause && 1122 bgep->param_link_tx_pause) 1123 fl = LINK_FLOWCTRL_TX; 1124 1125 if (bgep->param_link_rx_pause && 1126 bgep->param_link_tx_pause) 1127 fl = LINK_FLOWCTRL_BI; 1128 bcopy(&fl, pr_val, sizeof (fl)); 1129 break; 1130 case MAC_PROP_ADV_1000FDX_CAP: 1131 if (is_default) 1132 *(uint8_t *)pr_val = 1; 1133 else 1134 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 1135 break; 1136 case MAC_PROP_EN_1000FDX_CAP: 1137 if (is_default) 1138 *(uint8_t *)pr_val = 1; 1139 else 1140 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 1141 break; 1142 case MAC_PROP_ADV_1000HDX_CAP: 1143 if (is_default) 1144 *(uint8_t *)pr_val = 1; 1145 else 1146 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1147 break; 1148 case MAC_PROP_EN_1000HDX_CAP: 1149 if (is_default) 1150 *(uint8_t *)pr_val = 1; 1151 else 1152 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1153 break; 1154 case MAC_PROP_ADV_100FDX_CAP: 1155 if (is_default) { 1156 *(uint8_t *)pr_val = 1157 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1158 } else { 1159 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1160 } 1161 break; 1162 case MAC_PROP_EN_100FDX_CAP: 1163 if (is_default) { 1164 *(uint8_t *)pr_val = 1165 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1166 } else { 1167 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1168 } 1169 break; 1170 case MAC_PROP_ADV_100HDX_CAP: 1171 if (is_default) { 1172 *(uint8_t *)pr_val = 1173 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1174 } else { 1175 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1176 } 1177 break; 1178 case MAC_PROP_EN_100HDX_CAP: 1179 if (is_default) { 1180 *(uint8_t *)pr_val = 1181 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1182 } else { 1183 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1184 } 1185 break; 1186 case MAC_PROP_ADV_10FDX_CAP: 1187 if (is_default) { 1188 *(uint8_t *)pr_val = 1189 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1190 } else { 1191 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1192 } 1193 break; 1194 case MAC_PROP_EN_10FDX_CAP: 1195 if (is_default) { 1196 *(uint8_t *)pr_val = 1197 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1198 } else { 1199 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1200 } 1201 break; 1202 case MAC_PROP_ADV_10HDX_CAP: 1203 if (is_default) { 1204 *(uint8_t *)pr_val = 1205 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1206 } else { 1207 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1208 } 1209 break; 1210 case MAC_PROP_EN_10HDX_CAP: 1211 if (is_default) { 1212 *(uint8_t *)pr_val = 1213 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1214 } else { 1215 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1216 } 1217 break; 1218 case MAC_PROP_ADV_100T4_CAP: 1219 case MAC_PROP_EN_100T4_CAP: 1220 *(uint8_t *)pr_val = 0; 1221 break; 1222 case MAC_PROP_PRIVATE: 1223 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1224 pr_valsize, pr_val); 1225 return (err); 1226 default: 1227 return (ENOTSUP); 1228 } 1229 return (0); 1230 } 1231 1232 /* ARGSUSED */ 1233 static int 1234 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1235 const void *pr_val) 1236 { 1237 int err = 0; 1238 long result; 1239 1240 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1241 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1242 if (result > 1 || result < 0) { 1243 err = EINVAL; 1244 } else { 1245 bgep->param_adv_pause = (uint32_t)result; 1246 if (bge_reprogram(bgep) == IOC_INVAL) 1247 err = EINVAL; 1248 } 1249 return (err); 1250 } 1251 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1252 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1253 if (result > 1 || result < 0) { 1254 err = EINVAL; 1255 } else { 1256 bgep->param_adv_asym_pause = (uint32_t)result; 1257 if (bge_reprogram(bgep) == IOC_INVAL) 1258 err = EINVAL; 1259 } 1260 return (err); 1261 } 1262 if (strcmp(pr_name, "_drain_max") == 0) { 1263 1264 /* 1265 * on the Tx side, we need to update the h/w register for 1266 * real packet transmission per packet. The drain_max parameter 1267 * is used to reduce the register access. This parameter 1268 * controls the max number of packets that we will hold before 1269 * updating the bge h/w to trigger h/w transmit. The bge 1270 * chipset usually has a max of 512 Tx descriptors, thus 1271 * the upper bound on drain_max is 512. 1272 */ 1273 if (pr_val == NULL) { 1274 err = EINVAL; 1275 return (err); 1276 } 1277 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1278 if (result > 512 || result < 1) 1279 err = EINVAL; 1280 else { 1281 bgep->param_drain_max = (uint32_t)result; 1282 if (bge_reprogram(bgep) == IOC_INVAL) 1283 err = EINVAL; 1284 } 1285 return (err); 1286 } 1287 if (strcmp(pr_name, "_msi_cnt") == 0) { 1288 1289 if (pr_val == NULL) { 1290 err = EINVAL; 1291 return (err); 1292 } 1293 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1294 if (result > 7 || result < 0) 1295 err = EINVAL; 1296 else { 1297 bgep->param_msi_cnt = (uint32_t)result; 1298 if (bge_reprogram(bgep) == IOC_INVAL) 1299 err = EINVAL; 1300 } 1301 return (err); 1302 } 1303 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1304 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1305 return (EINVAL); 1306 1307 bgep->chipid.rx_ticks_norm = (uint32_t)result; 1308 return (0); 1309 } 1310 1311 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1312 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1313 return (EINVAL); 1314 1315 bgep->chipid.rx_count_norm = (uint32_t)result; 1316 return (0); 1317 } 1318 return (ENOTSUP); 1319 } 1320 1321 static int 1322 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1323 uint_t pr_valsize, void *pr_val) 1324 { 1325 int err = ENOTSUP; 1326 boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT); 1327 int value; 1328 1329 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1330 value = (is_default? 1 : bge->param_adv_pause); 1331 err = 0; 1332 goto done; 1333 } 1334 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1335 value = (is_default? 1 : bge->param_adv_asym_pause); 1336 err = 0; 1337 goto done; 1338 } 1339 if (strcmp(pr_name, "_drain_max") == 0) { 1340 value = (is_default? 64 : bge->param_drain_max); 1341 err = 0; 1342 goto done; 1343 } 1344 if (strcmp(pr_name, "_msi_cnt") == 0) { 1345 value = (is_default? 0 : bge->param_msi_cnt); 1346 err = 0; 1347 goto done; 1348 } 1349 1350 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1351 value = (is_default? bge_rx_ticks_norm : 1352 bge->chipid.rx_ticks_norm); 1353 err = 0; 1354 goto done; 1355 } 1356 1357 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1358 value = (is_default? bge_rx_count_norm : 1359 bge->chipid.rx_count_norm); 1360 err = 0; 1361 goto done; 1362 } 1363 1364 done: 1365 if (err == 0) { 1366 (void) snprintf(pr_val, pr_valsize, "%d", value); 1367 } 1368 return (err); 1369 } 1370 1371 /* 1372 * Compute the index of the required bit in the multicast hash map. 1373 * This must mirror the way the hardware actually does it! 1374 * See Broadcom document 570X-PG102-R page 125. 1375 */ 1376 static uint32_t 1377 bge_hash_index(const uint8_t *mca) 1378 { 1379 uint32_t hash; 1380 1381 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1382 1383 return (hash); 1384 } 1385 1386 /* 1387 * bge_m_multicst_add() -- enable/disable a multicast address 1388 */ 1389 static int 1390 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1391 { 1392 bge_t *bgep = arg; /* private device info */ 1393 uint32_t hash; 1394 uint32_t index; 1395 uint32_t word; 1396 uint32_t bit; 1397 uint8_t *refp; 1398 1399 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1400 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1401 1402 /* 1403 * Precalculate all required masks, pointers etc ... 1404 */ 1405 hash = bge_hash_index(mca); 1406 index = hash % BGE_HASH_TABLE_SIZE; 1407 word = index/32u; 1408 bit = 1 << (index % 32u); 1409 refp = &bgep->mcast_refs[index]; 1410 1411 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1412 hash, index, word, bit, *refp)); 1413 1414 /* 1415 * We must set the appropriate bit in the hash map (and the 1416 * corresponding h/w register) when the refcount goes from 0 1417 * to >0, and clear it when the last ref goes away (refcount 1418 * goes from >0 back to 0). If we change the hash map, we 1419 * must also update the chip's hardware map registers. 1420 */ 1421 mutex_enter(bgep->genlock); 1422 if (!(bgep->progress & PROGRESS_INTR)) { 1423 /* can happen during autorecovery */ 1424 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1425 mutex_exit(bgep->genlock); 1426 return (EIO); 1427 } 1428 if (add) { 1429 if ((*refp)++ == 0) { 1430 bgep->mcast_hash[word] |= bit; 1431 #ifdef BGE_IPMI_ASF 1432 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1433 #else 1434 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1435 #endif 1436 (void) bge_check_acc_handle(bgep, 1437 bgep->cfg_handle); 1438 (void) bge_check_acc_handle(bgep, 1439 bgep->io_handle); 1440 ddi_fm_service_impact(bgep->devinfo, 1441 DDI_SERVICE_DEGRADED); 1442 mutex_exit(bgep->genlock); 1443 return (EIO); 1444 } 1445 } 1446 } else { 1447 if (--(*refp) == 0) { 1448 bgep->mcast_hash[word] &= ~bit; 1449 #ifdef BGE_IPMI_ASF 1450 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1451 #else 1452 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1453 #endif 1454 (void) bge_check_acc_handle(bgep, 1455 bgep->cfg_handle); 1456 (void) bge_check_acc_handle(bgep, 1457 bgep->io_handle); 1458 ddi_fm_service_impact(bgep->devinfo, 1459 DDI_SERVICE_DEGRADED); 1460 mutex_exit(bgep->genlock); 1461 return (EIO); 1462 } 1463 } 1464 } 1465 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1466 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1467 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1468 mutex_exit(bgep->genlock); 1469 return (EIO); 1470 } 1471 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1472 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1473 mutex_exit(bgep->genlock); 1474 return (EIO); 1475 } 1476 mutex_exit(bgep->genlock); 1477 1478 return (0); 1479 } 1480 1481 /* 1482 * bge_m_promisc() -- set or reset promiscuous mode on the board 1483 * 1484 * Program the hardware to enable/disable promiscuous and/or 1485 * receive-all-multicast modes. 1486 */ 1487 static int 1488 bge_m_promisc(void *arg, boolean_t on) 1489 { 1490 bge_t *bgep = arg; 1491 1492 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1493 1494 /* 1495 * Store MAC layer specified mode and pass to chip layer to update h/w 1496 */ 1497 mutex_enter(bgep->genlock); 1498 if (!(bgep->progress & PROGRESS_INTR)) { 1499 /* can happen during autorecovery */ 1500 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1501 mutex_exit(bgep->genlock); 1502 return (EIO); 1503 } 1504 bgep->promisc = on; 1505 #ifdef BGE_IPMI_ASF 1506 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1507 #else 1508 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1509 #endif 1510 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1511 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1512 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1513 mutex_exit(bgep->genlock); 1514 return (EIO); 1515 } 1516 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1517 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1518 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1519 mutex_exit(bgep->genlock); 1520 return (EIO); 1521 } 1522 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1523 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1524 mutex_exit(bgep->genlock); 1525 return (EIO); 1526 } 1527 mutex_exit(bgep->genlock); 1528 return (0); 1529 } 1530 1531 /*ARGSUSED*/ 1532 static boolean_t 1533 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1534 { 1535 bge_t *bgep = arg; 1536 1537 switch (cap) { 1538 case MAC_CAPAB_HCKSUM: { 1539 uint32_t *txflags = cap_data; 1540 1541 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1542 break; 1543 } 1544 1545 case MAC_CAPAB_POLL: 1546 /* 1547 * There's nothing for us to fill in, simply returning 1548 * B_TRUE stating that we support polling is sufficient. 1549 */ 1550 break; 1551 1552 case MAC_CAPAB_MULTIADDRESS: { 1553 multiaddress_capab_t *mmacp = cap_data; 1554 1555 mutex_enter(bgep->genlock); 1556 /* 1557 * The number of MAC addresses made available by 1558 * this capability is one less than the total as 1559 * the primary address in slot 0 is counted in 1560 * the total. 1561 */ 1562 mmacp->maddr_naddr = bgep->unicst_addr_total - 1; 1563 mmacp->maddr_naddrfree = bgep->unicst_addr_avail; 1564 /* No multiple factory addresses, set mma_flag to 0 */ 1565 mmacp->maddr_flag = 0; 1566 mmacp->maddr_handle = bgep; 1567 mmacp->maddr_add = bge_m_unicst_add; 1568 mmacp->maddr_remove = bge_m_unicst_remove; 1569 mmacp->maddr_modify = bge_m_unicst_modify; 1570 mmacp->maddr_get = bge_m_unicst_get; 1571 mmacp->maddr_reserve = NULL; 1572 mutex_exit(bgep->genlock); 1573 break; 1574 } 1575 1576 default: 1577 return (B_FALSE); 1578 } 1579 return (B_TRUE); 1580 } 1581 1582 /* 1583 * Loopback ioctl code 1584 */ 1585 1586 static lb_property_t loopmodes[] = { 1587 { normal, "normal", BGE_LOOP_NONE }, 1588 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1589 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1590 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1591 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1592 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1593 }; 1594 1595 static enum ioc_reply 1596 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1597 { 1598 /* 1599 * If the mode isn't being changed, there's nothing to do ... 1600 */ 1601 if (mode == bgep->param_loop_mode) 1602 return (IOC_ACK); 1603 1604 /* 1605 * Validate the requested mode and prepare a suitable message 1606 * to explain the link down/up cycle that the change will 1607 * probably induce ... 1608 */ 1609 switch (mode) { 1610 default: 1611 return (IOC_INVAL); 1612 1613 case BGE_LOOP_NONE: 1614 case BGE_LOOP_EXTERNAL_1000: 1615 case BGE_LOOP_EXTERNAL_100: 1616 case BGE_LOOP_EXTERNAL_10: 1617 case BGE_LOOP_INTERNAL_PHY: 1618 case BGE_LOOP_INTERNAL_MAC: 1619 break; 1620 } 1621 1622 /* 1623 * All OK; tell the caller to reprogram 1624 * the PHY and/or MAC for the new mode ... 1625 */ 1626 bgep->param_loop_mode = mode; 1627 return (IOC_RESTART_ACK); 1628 } 1629 1630 static enum ioc_reply 1631 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1632 { 1633 lb_info_sz_t *lbsp; 1634 lb_property_t *lbpp; 1635 uint32_t *lbmp; 1636 int cmd; 1637 1638 _NOTE(ARGUNUSED(wq)) 1639 1640 /* 1641 * Validate format of ioctl 1642 */ 1643 if (mp->b_cont == NULL) 1644 return (IOC_INVAL); 1645 1646 cmd = iocp->ioc_cmd; 1647 switch (cmd) { 1648 default: 1649 /* NOTREACHED */ 1650 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1651 return (IOC_INVAL); 1652 1653 case LB_GET_INFO_SIZE: 1654 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1655 return (IOC_INVAL); 1656 lbsp = (void *)mp->b_cont->b_rptr; 1657 *lbsp = sizeof (loopmodes); 1658 return (IOC_REPLY); 1659 1660 case LB_GET_INFO: 1661 if (iocp->ioc_count != sizeof (loopmodes)) 1662 return (IOC_INVAL); 1663 lbpp = (void *)mp->b_cont->b_rptr; 1664 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1665 return (IOC_REPLY); 1666 1667 case LB_GET_MODE: 1668 if (iocp->ioc_count != sizeof (uint32_t)) 1669 return (IOC_INVAL); 1670 lbmp = (void *)mp->b_cont->b_rptr; 1671 *lbmp = bgep->param_loop_mode; 1672 return (IOC_REPLY); 1673 1674 case LB_SET_MODE: 1675 if (iocp->ioc_count != sizeof (uint32_t)) 1676 return (IOC_INVAL); 1677 lbmp = (void *)mp->b_cont->b_rptr; 1678 return (bge_set_loop_mode(bgep, *lbmp)); 1679 } 1680 } 1681 1682 /* 1683 * Specific bge IOCTLs, the gld module handles the generic ones. 1684 */ 1685 static void 1686 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1687 { 1688 bge_t *bgep = arg; 1689 struct iocblk *iocp; 1690 enum ioc_reply status; 1691 boolean_t need_privilege; 1692 int err; 1693 int cmd; 1694 1695 /* 1696 * Validate the command before bothering with the mutex ... 1697 */ 1698 iocp = (void *)mp->b_rptr; 1699 iocp->ioc_error = 0; 1700 need_privilege = B_TRUE; 1701 cmd = iocp->ioc_cmd; 1702 switch (cmd) { 1703 default: 1704 miocnak(wq, mp, 0, EINVAL); 1705 return; 1706 1707 case BGE_MII_READ: 1708 case BGE_MII_WRITE: 1709 case BGE_SEE_READ: 1710 case BGE_SEE_WRITE: 1711 case BGE_FLASH_READ: 1712 case BGE_FLASH_WRITE: 1713 case BGE_DIAG: 1714 case BGE_PEEK: 1715 case BGE_POKE: 1716 case BGE_PHY_RESET: 1717 case BGE_SOFT_RESET: 1718 case BGE_HARD_RESET: 1719 break; 1720 1721 case LB_GET_INFO_SIZE: 1722 case LB_GET_INFO: 1723 case LB_GET_MODE: 1724 need_privilege = B_FALSE; 1725 /* FALLTHRU */ 1726 case LB_SET_MODE: 1727 break; 1728 1729 } 1730 1731 if (need_privilege) { 1732 /* 1733 * Check for specific net_config privilege on Solaris 10+. 1734 */ 1735 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1736 if (err != 0) { 1737 miocnak(wq, mp, 0, err); 1738 return; 1739 } 1740 } 1741 1742 mutex_enter(bgep->genlock); 1743 if (!(bgep->progress & PROGRESS_INTR)) { 1744 /* can happen during autorecovery */ 1745 mutex_exit(bgep->genlock); 1746 miocnak(wq, mp, 0, EIO); 1747 return; 1748 } 1749 1750 switch (cmd) { 1751 default: 1752 _NOTE(NOTREACHED) 1753 status = IOC_INVAL; 1754 break; 1755 1756 case BGE_MII_READ: 1757 case BGE_MII_WRITE: 1758 case BGE_SEE_READ: 1759 case BGE_SEE_WRITE: 1760 case BGE_FLASH_READ: 1761 case BGE_FLASH_WRITE: 1762 case BGE_DIAG: 1763 case BGE_PEEK: 1764 case BGE_POKE: 1765 case BGE_PHY_RESET: 1766 case BGE_SOFT_RESET: 1767 case BGE_HARD_RESET: 1768 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1769 break; 1770 1771 case LB_GET_INFO_SIZE: 1772 case LB_GET_INFO: 1773 case LB_GET_MODE: 1774 case LB_SET_MODE: 1775 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1776 break; 1777 1778 } 1779 1780 /* 1781 * Do we need to reprogram the PHY and/or the MAC? 1782 * Do it now, while we still have the mutex. 1783 * 1784 * Note: update the PHY first, 'cos it controls the 1785 * speed/duplex parameters that the MAC code uses. 1786 */ 1787 switch (status) { 1788 case IOC_RESTART_REPLY: 1789 case IOC_RESTART_ACK: 1790 if (bge_reprogram(bgep) == IOC_INVAL) 1791 status = IOC_INVAL; 1792 break; 1793 } 1794 1795 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1796 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1797 status = IOC_INVAL; 1798 } 1799 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1800 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1801 status = IOC_INVAL; 1802 } 1803 mutex_exit(bgep->genlock); 1804 1805 /* 1806 * Finally, decide how to reply 1807 */ 1808 switch (status) { 1809 default: 1810 case IOC_INVAL: 1811 /* 1812 * Error, reply with a NAK and EINVAL or the specified error 1813 */ 1814 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1815 EINVAL : iocp->ioc_error); 1816 break; 1817 1818 case IOC_DONE: 1819 /* 1820 * OK, reply already sent 1821 */ 1822 break; 1823 1824 case IOC_RESTART_ACK: 1825 case IOC_ACK: 1826 /* 1827 * OK, reply with an ACK 1828 */ 1829 miocack(wq, mp, 0, 0); 1830 break; 1831 1832 case IOC_RESTART_REPLY: 1833 case IOC_REPLY: 1834 /* 1835 * OK, send prepared reply as ACK or NAK 1836 */ 1837 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1838 M_IOCACK : M_IOCNAK; 1839 qreply(wq, mp); 1840 break; 1841 } 1842 } 1843 1844 static void 1845 bge_resources_add(bge_t *bgep, time_t time, uint_t pkt_cnt) 1846 { 1847 1848 recv_ring_t *rrp; 1849 mac_rx_fifo_t mrf; 1850 int ring; 1851 1852 /* 1853 * Register Rx rings as resources and save mac 1854 * resource id for future reference 1855 */ 1856 mrf.mrf_type = MAC_RX_FIFO; 1857 mrf.mrf_blank = bge_chip_blank; 1858 mrf.mrf_arg = (void *)bgep; 1859 mrf.mrf_normal_blank_time = time; 1860 mrf.mrf_normal_pkt_count = pkt_cnt; 1861 1862 for (ring = 0; ring < bgep->chipid.rx_rings; ring++) { 1863 rrp = &bgep->recv[ring]; 1864 rrp->handle = mac_resource_add(bgep->mh, 1865 (mac_resource_t *)&mrf); 1866 } 1867 } 1868 1869 static void 1870 bge_m_resources(void *arg) 1871 { 1872 bge_t *bgep = arg; 1873 1874 mutex_enter(bgep->genlock); 1875 1876 bge_resources_add(bgep, bgep->chipid.rx_ticks_norm, 1877 bgep->chipid.rx_count_norm); 1878 mutex_exit(bgep->genlock); 1879 } 1880 1881 /* 1882 * ========== Per-instance setup/teardown code ========== 1883 */ 1884 1885 #undef BGE_DBG 1886 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1887 /* 1888 * Allocate an area of memory and a DMA handle for accessing it 1889 */ 1890 static int 1891 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 1892 uint_t dma_flags, dma_area_t *dma_p) 1893 { 1894 caddr_t va; 1895 int err; 1896 1897 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 1898 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 1899 1900 /* 1901 * Allocate handle 1902 */ 1903 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 1904 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 1905 if (err != DDI_SUCCESS) 1906 return (DDI_FAILURE); 1907 1908 /* 1909 * Allocate memory 1910 */ 1911 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 1912 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 1913 &dma_p->acc_hdl); 1914 if (err != DDI_SUCCESS) 1915 return (DDI_FAILURE); 1916 1917 /* 1918 * Bind the two together 1919 */ 1920 dma_p->mem_va = va; 1921 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1922 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 1923 &dma_p->cookie, &dma_p->ncookies); 1924 1925 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 1926 dma_p->alength, err, dma_p->ncookies)); 1927 1928 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 1929 return (DDI_FAILURE); 1930 1931 dma_p->nslots = ~0U; 1932 dma_p->size = ~0U; 1933 dma_p->token = ~0U; 1934 dma_p->offset = 0; 1935 return (DDI_SUCCESS); 1936 } 1937 1938 /* 1939 * Free one allocated area of DMAable memory 1940 */ 1941 static void 1942 bge_free_dma_mem(dma_area_t *dma_p) 1943 { 1944 if (dma_p->dma_hdl != NULL) { 1945 if (dma_p->ncookies) { 1946 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1947 dma_p->ncookies = 0; 1948 } 1949 ddi_dma_free_handle(&dma_p->dma_hdl); 1950 dma_p->dma_hdl = NULL; 1951 } 1952 1953 if (dma_p->acc_hdl != NULL) { 1954 ddi_dma_mem_free(&dma_p->acc_hdl); 1955 dma_p->acc_hdl = NULL; 1956 } 1957 } 1958 /* 1959 * Utility routine to carve a slice off a chunk of allocated memory, 1960 * updating the chunk descriptor accordingly. The size of the slice 1961 * is given by the product of the <qty> and <size> parameters. 1962 */ 1963 static void 1964 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 1965 uint32_t qty, uint32_t size) 1966 { 1967 static uint32_t sequence = 0xbcd5704a; 1968 size_t totsize; 1969 1970 totsize = qty*size; 1971 ASSERT(totsize <= chunk->alength); 1972 1973 *slice = *chunk; 1974 slice->nslots = qty; 1975 slice->size = size; 1976 slice->alength = totsize; 1977 slice->token = ++sequence; 1978 1979 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 1980 chunk->alength -= totsize; 1981 chunk->offset += totsize; 1982 chunk->cookie.dmac_laddress += totsize; 1983 chunk->cookie.dmac_size -= totsize; 1984 } 1985 1986 /* 1987 * Initialise the specified Receive Producer (Buffer) Ring, using 1988 * the information in the <dma_area> descriptors that it contains 1989 * to set up all the other fields. This routine should be called 1990 * only once for each ring. 1991 */ 1992 static void 1993 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 1994 { 1995 buff_ring_t *brp; 1996 bge_status_t *bsp; 1997 sw_rbd_t *srbdp; 1998 dma_area_t pbuf; 1999 uint32_t bufsize; 2000 uint32_t nslots; 2001 uint32_t slot; 2002 uint32_t split; 2003 2004 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2005 NIC_MEM_SHADOW_BUFF_STD, 2006 NIC_MEM_SHADOW_BUFF_JUMBO, 2007 NIC_MEM_SHADOW_BUFF_MINI 2008 }; 2009 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2010 RECV_STD_PROD_INDEX_REG, 2011 RECV_JUMBO_PROD_INDEX_REG, 2012 RECV_MINI_PROD_INDEX_REG 2013 }; 2014 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2015 STATUS_STD_BUFF_CONS_INDEX, 2016 STATUS_JUMBO_BUFF_CONS_INDEX, 2017 STATUS_MINI_BUFF_CONS_INDEX 2018 }; 2019 2020 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2021 (void *)bgep, ring)); 2022 2023 brp = &bgep->buff[ring]; 2024 nslots = brp->desc.nslots; 2025 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2026 bufsize = brp->buf[0].size; 2027 2028 /* 2029 * Set up the copy of the h/w RCB 2030 * 2031 * Note: unlike Send & Receive Return Rings, (where the max_len 2032 * field holds the number of slots), in a Receive Buffer Ring 2033 * this field indicates the size of each buffer in the ring. 2034 */ 2035 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2036 brp->hw_rcb.max_len = (uint16_t)bufsize; 2037 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2038 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2039 2040 /* 2041 * Other one-off initialisation of per-ring data 2042 */ 2043 brp->bgep = bgep; 2044 bsp = DMA_VPTR(bgep->status_block); 2045 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2046 brp->chip_mbx_reg = mailbox_regs[ring]; 2047 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2048 DDI_INTR_PRI(bgep->intr_pri)); 2049 2050 /* 2051 * Allocate the array of s/w Receive Buffer Descriptors 2052 */ 2053 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2054 brp->sw_rbds = srbdp; 2055 2056 /* 2057 * Now initialise each array element once and for all 2058 */ 2059 for (split = 0; split < BGE_SPLIT; ++split) { 2060 pbuf = brp->buf[split]; 2061 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2062 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2063 ASSERT(pbuf.alength == 0); 2064 } 2065 } 2066 2067 /* 2068 * Clean up initialisation done above before the memory is freed 2069 */ 2070 static void 2071 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2072 { 2073 buff_ring_t *brp; 2074 sw_rbd_t *srbdp; 2075 2076 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2077 (void *)bgep, ring)); 2078 2079 brp = &bgep->buff[ring]; 2080 srbdp = brp->sw_rbds; 2081 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2082 2083 mutex_destroy(brp->rf_lock); 2084 } 2085 2086 /* 2087 * Initialise the specified Receive (Return) Ring, using the 2088 * information in the <dma_area> descriptors that it contains 2089 * to set up all the other fields. This routine should be called 2090 * only once for each ring. 2091 */ 2092 static void 2093 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2094 { 2095 recv_ring_t *rrp; 2096 bge_status_t *bsp; 2097 uint32_t nslots; 2098 2099 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2100 (void *)bgep, ring)); 2101 2102 /* 2103 * The chip architecture requires that receive return rings have 2104 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2105 */ 2106 rrp = &bgep->recv[ring]; 2107 nslots = rrp->desc.nslots; 2108 ASSERT(nslots == 0 || nslots == 512 || 2109 nslots == 1024 || nslots == 2048); 2110 2111 /* 2112 * Set up the copy of the h/w RCB 2113 */ 2114 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2115 rrp->hw_rcb.max_len = (uint16_t)nslots; 2116 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2117 rrp->hw_rcb.nic_ring_addr = 0; 2118 2119 /* 2120 * Other one-off initialisation of per-ring data 2121 */ 2122 rrp->bgep = bgep; 2123 bsp = DMA_VPTR(bgep->status_block); 2124 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2125 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2126 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2127 DDI_INTR_PRI(bgep->intr_pri)); 2128 } 2129 2130 2131 /* 2132 * Clean up initialisation done above before the memory is freed 2133 */ 2134 static void 2135 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2136 { 2137 recv_ring_t *rrp; 2138 2139 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2140 (void *)bgep, ring)); 2141 2142 rrp = &bgep->recv[ring]; 2143 if (rrp->rx_softint) 2144 ddi_remove_softintr(rrp->rx_softint); 2145 mutex_destroy(rrp->rx_lock); 2146 } 2147 2148 /* 2149 * Initialise the specified Send Ring, using the information in the 2150 * <dma_area> descriptors that it contains to set up all the other 2151 * fields. This routine should be called only once for each ring. 2152 */ 2153 static void 2154 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2155 { 2156 send_ring_t *srp; 2157 bge_status_t *bsp; 2158 sw_sbd_t *ssbdp; 2159 dma_area_t desc; 2160 dma_area_t pbuf; 2161 uint32_t nslots; 2162 uint32_t slot; 2163 uint32_t split; 2164 sw_txbuf_t *txbuf; 2165 2166 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2167 (void *)bgep, ring)); 2168 2169 /* 2170 * The chip architecture requires that host-based send rings 2171 * have 512 elements per ring. See 570X-PG102-R page 56. 2172 */ 2173 srp = &bgep->send[ring]; 2174 nslots = srp->desc.nslots; 2175 ASSERT(nslots == 0 || nslots == 512); 2176 2177 /* 2178 * Set up the copy of the h/w RCB 2179 */ 2180 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2181 srp->hw_rcb.max_len = (uint16_t)nslots; 2182 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2183 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2184 2185 /* 2186 * Other one-off initialisation of per-ring data 2187 */ 2188 srp->bgep = bgep; 2189 bsp = DMA_VPTR(bgep->status_block); 2190 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2191 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2192 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2193 DDI_INTR_PRI(bgep->intr_pri)); 2194 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2195 DDI_INTR_PRI(bgep->intr_pri)); 2196 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2197 DDI_INTR_PRI(bgep->intr_pri)); 2198 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2199 DDI_INTR_PRI(bgep->intr_pri)); 2200 if (nslots == 0) 2201 return; 2202 2203 /* 2204 * Allocate the array of s/w Send Buffer Descriptors 2205 */ 2206 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2207 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2208 srp->txbuf_head = 2209 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2210 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2211 srp->sw_sbds = ssbdp; 2212 srp->txbuf = txbuf; 2213 srp->tx_buffers = BGE_SEND_BUF_NUM; 2214 srp->tx_buffers_low = srp->tx_buffers / 4; 2215 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2216 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2217 else 2218 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2219 srp->tx_array = 1; 2220 2221 /* 2222 * Chunk tx desc area 2223 */ 2224 desc = srp->desc; 2225 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2226 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2227 sizeof (bge_sbd_t)); 2228 } 2229 ASSERT(desc.alength == 0); 2230 2231 /* 2232 * Chunk tx buffer area 2233 */ 2234 for (split = 0; split < BGE_SPLIT; ++split) { 2235 pbuf = srp->buf[0][split]; 2236 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2237 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2238 bgep->chipid.snd_buff_size); 2239 txbuf++; 2240 } 2241 ASSERT(pbuf.alength == 0); 2242 } 2243 } 2244 2245 /* 2246 * Clean up initialisation done above before the memory is freed 2247 */ 2248 static void 2249 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2250 { 2251 send_ring_t *srp; 2252 uint32_t array; 2253 uint32_t split; 2254 uint32_t nslots; 2255 2256 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2257 (void *)bgep, ring)); 2258 2259 srp = &bgep->send[ring]; 2260 mutex_destroy(srp->tc_lock); 2261 mutex_destroy(srp->freetxbuf_lock); 2262 mutex_destroy(srp->txbuf_lock); 2263 mutex_destroy(srp->tx_lock); 2264 nslots = srp->desc.nslots; 2265 if (nslots == 0) 2266 return; 2267 2268 for (array = 1; array < srp->tx_array; ++array) 2269 for (split = 0; split < BGE_SPLIT; ++split) 2270 bge_free_dma_mem(&srp->buf[array][split]); 2271 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2272 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2273 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2274 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2275 srp->sw_sbds = NULL; 2276 srp->txbuf_head = NULL; 2277 srp->txbuf = NULL; 2278 srp->pktp = NULL; 2279 } 2280 2281 /* 2282 * Initialise all transmit, receive, and buffer rings. 2283 */ 2284 void 2285 bge_init_rings(bge_t *bgep) 2286 { 2287 uint32_t ring; 2288 2289 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2290 2291 /* 2292 * Perform one-off initialisation of each ring ... 2293 */ 2294 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2295 bge_init_send_ring(bgep, ring); 2296 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2297 bge_init_recv_ring(bgep, ring); 2298 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2299 bge_init_buff_ring(bgep, ring); 2300 } 2301 2302 /* 2303 * Undo the work of bge_init_rings() above before the memory is freed 2304 */ 2305 void 2306 bge_fini_rings(bge_t *bgep) 2307 { 2308 uint32_t ring; 2309 2310 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2311 2312 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2313 bge_fini_buff_ring(bgep, ring); 2314 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2315 bge_fini_recv_ring(bgep, ring); 2316 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2317 bge_fini_send_ring(bgep, ring); 2318 } 2319 2320 /* 2321 * Called from the bge_m_stop() to free the tx buffers which are 2322 * allocated from the tx process. 2323 */ 2324 void 2325 bge_free_txbuf_arrays(send_ring_t *srp) 2326 { 2327 uint32_t array; 2328 uint32_t split; 2329 2330 ASSERT(mutex_owned(srp->tx_lock)); 2331 2332 /* 2333 * Free the extra tx buffer DMA area 2334 */ 2335 for (array = 1; array < srp->tx_array; ++array) 2336 for (split = 0; split < BGE_SPLIT; ++split) 2337 bge_free_dma_mem(&srp->buf[array][split]); 2338 2339 /* 2340 * Restore initial tx buffer numbers 2341 */ 2342 srp->tx_array = 1; 2343 srp->tx_buffers = BGE_SEND_BUF_NUM; 2344 srp->tx_buffers_low = srp->tx_buffers / 4; 2345 srp->tx_flow = 0; 2346 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2347 } 2348 2349 /* 2350 * Called from tx process to allocate more tx buffers 2351 */ 2352 bge_queue_item_t * 2353 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2354 { 2355 bge_queue_t *txbuf_queue; 2356 bge_queue_item_t *txbuf_item_last; 2357 bge_queue_item_t *txbuf_item; 2358 bge_queue_item_t *txbuf_item_rtn; 2359 sw_txbuf_t *txbuf; 2360 dma_area_t area; 2361 size_t txbuffsize; 2362 uint32_t slot; 2363 uint32_t array; 2364 uint32_t split; 2365 uint32_t err; 2366 2367 ASSERT(mutex_owned(srp->tx_lock)); 2368 2369 array = srp->tx_array; 2370 if (array >= srp->tx_array_max) 2371 return (NULL); 2372 2373 /* 2374 * Allocate memory & handles for TX buffers 2375 */ 2376 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2377 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2378 for (split = 0; split < BGE_SPLIT; ++split) { 2379 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2380 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2381 &srp->buf[array][split]); 2382 if (err != DDI_SUCCESS) { 2383 /* Free the last already allocated OK chunks */ 2384 for (slot = 0; slot <= split; ++slot) 2385 bge_free_dma_mem(&srp->buf[array][slot]); 2386 srp->tx_alloc_fail++; 2387 return (NULL); 2388 } 2389 } 2390 2391 /* 2392 * Chunk tx buffer area 2393 */ 2394 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2395 for (split = 0; split < BGE_SPLIT; ++split) { 2396 area = srp->buf[array][split]; 2397 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2398 bge_slice_chunk(&txbuf->buf, &area, 1, 2399 bgep->chipid.snd_buff_size); 2400 txbuf++; 2401 } 2402 } 2403 2404 /* 2405 * Add above buffers to the tx buffer pop queue 2406 */ 2407 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2408 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2409 txbuf_item_last = NULL; 2410 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2411 txbuf_item->item = txbuf; 2412 txbuf_item->next = txbuf_item_last; 2413 txbuf_item_last = txbuf_item; 2414 txbuf++; 2415 txbuf_item++; 2416 } 2417 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2418 txbuf_item_rtn = txbuf_item; 2419 txbuf_item++; 2420 txbuf_queue = srp->txbuf_pop_queue; 2421 mutex_enter(txbuf_queue->lock); 2422 txbuf_item->next = txbuf_queue->head; 2423 txbuf_queue->head = txbuf_item_last; 2424 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2425 mutex_exit(txbuf_queue->lock); 2426 2427 srp->tx_array++; 2428 srp->tx_buffers += BGE_SEND_BUF_NUM; 2429 srp->tx_buffers_low = srp->tx_buffers / 4; 2430 2431 return (txbuf_item_rtn); 2432 } 2433 2434 /* 2435 * This function allocates all the transmit and receive buffers 2436 * and descriptors, in four chunks. 2437 */ 2438 int 2439 bge_alloc_bufs(bge_t *bgep) 2440 { 2441 dma_area_t area; 2442 size_t rxbuffsize; 2443 size_t txbuffsize; 2444 size_t rxbuffdescsize; 2445 size_t rxdescsize; 2446 size_t txdescsize; 2447 uint32_t ring; 2448 uint32_t rx_rings = bgep->chipid.rx_rings; 2449 uint32_t tx_rings = bgep->chipid.tx_rings; 2450 int split; 2451 int err; 2452 2453 BGE_TRACE(("bge_alloc_bufs($%p)", 2454 (void *)bgep)); 2455 2456 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2457 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2458 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2459 2460 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2461 txbuffsize *= tx_rings; 2462 2463 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2464 rxdescsize *= sizeof (bge_rbd_t); 2465 2466 rxbuffdescsize = BGE_STD_SLOTS_USED; 2467 rxbuffdescsize += bgep->chipid.jumbo_slots; 2468 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2469 rxbuffdescsize *= sizeof (bge_rbd_t); 2470 2471 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2472 txdescsize *= sizeof (bge_sbd_t); 2473 txdescsize += sizeof (bge_statistics_t); 2474 txdescsize += sizeof (bge_status_t); 2475 txdescsize += BGE_STATUS_PADDING; 2476 2477 /* 2478 * Enable PCI relaxed ordering only for RX/TX data buffers 2479 */ 2480 if (bge_relaxed_ordering) 2481 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2482 2483 /* 2484 * Allocate memory & handles for RX buffers 2485 */ 2486 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2487 for (split = 0; split < BGE_SPLIT; ++split) { 2488 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2489 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2490 &bgep->rx_buff[split]); 2491 if (err != DDI_SUCCESS) 2492 return (DDI_FAILURE); 2493 } 2494 2495 /* 2496 * Allocate memory & handles for TX buffers 2497 */ 2498 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2499 for (split = 0; split < BGE_SPLIT; ++split) { 2500 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2501 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2502 &bgep->tx_buff[split]); 2503 if (err != DDI_SUCCESS) 2504 return (DDI_FAILURE); 2505 } 2506 2507 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2508 2509 /* 2510 * Allocate memory & handles for receive return rings 2511 */ 2512 ASSERT((rxdescsize % rx_rings) == 0); 2513 for (split = 0; split < rx_rings; ++split) { 2514 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2515 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2516 &bgep->rx_desc[split]); 2517 if (err != DDI_SUCCESS) 2518 return (DDI_FAILURE); 2519 } 2520 2521 /* 2522 * Allocate memory & handles for buffer (producer) descriptor rings 2523 */ 2524 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2525 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2526 if (err != DDI_SUCCESS) 2527 return (DDI_FAILURE); 2528 2529 /* 2530 * Allocate memory & handles for TX descriptor rings, 2531 * status block, and statistics area 2532 */ 2533 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2534 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2535 if (err != DDI_SUCCESS) 2536 return (DDI_FAILURE); 2537 2538 /* 2539 * Now carve up each of the allocated areas ... 2540 */ 2541 for (split = 0; split < BGE_SPLIT; ++split) { 2542 area = bgep->rx_buff[split]; 2543 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2544 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2545 bgep->chipid.std_buf_size); 2546 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2547 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2548 bgep->chipid.recv_jumbo_size); 2549 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2550 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2551 BGE_MINI_BUFF_SIZE); 2552 } 2553 2554 for (split = 0; split < BGE_SPLIT; ++split) { 2555 area = bgep->tx_buff[split]; 2556 for (ring = 0; ring < tx_rings; ++ring) 2557 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2558 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2559 bgep->chipid.snd_buff_size); 2560 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2561 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2562 &area, 0, bgep->chipid.snd_buff_size); 2563 } 2564 2565 for (ring = 0; ring < rx_rings; ++ring) 2566 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2567 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2568 2569 area = bgep->rx_desc[rx_rings]; 2570 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2571 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2572 0, sizeof (bge_rbd_t)); 2573 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2574 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2575 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2576 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2577 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2578 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2579 ASSERT(area.alength == 0); 2580 2581 area = bgep->tx_desc; 2582 for (ring = 0; ring < tx_rings; ++ring) 2583 bge_slice_chunk(&bgep->send[ring].desc, &area, 2584 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2585 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2586 bge_slice_chunk(&bgep->send[ring].desc, &area, 2587 0, sizeof (bge_sbd_t)); 2588 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2589 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2590 ASSERT(area.alength == BGE_STATUS_PADDING); 2591 DMA_ZERO(bgep->status_block); 2592 2593 return (DDI_SUCCESS); 2594 } 2595 2596 /* 2597 * This routine frees the transmit and receive buffers and descriptors. 2598 * Make sure the chip is stopped before calling it! 2599 */ 2600 void 2601 bge_free_bufs(bge_t *bgep) 2602 { 2603 int split; 2604 2605 BGE_TRACE(("bge_free_bufs($%p)", 2606 (void *)bgep)); 2607 2608 bge_free_dma_mem(&bgep->tx_desc); 2609 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2610 bge_free_dma_mem(&bgep->rx_desc[split]); 2611 for (split = 0; split < BGE_SPLIT; ++split) 2612 bge_free_dma_mem(&bgep->tx_buff[split]); 2613 for (split = 0; split < BGE_SPLIT; ++split) 2614 bge_free_dma_mem(&bgep->rx_buff[split]); 2615 } 2616 2617 /* 2618 * Determine (initial) MAC address ("BIA") to use for this interface 2619 */ 2620 2621 static void 2622 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2623 { 2624 struct ether_addr sysaddr; 2625 char propbuf[8]; /* "true" or "false", plus NUL */ 2626 uchar_t *bytes; 2627 int *ints; 2628 uint_t nelts; 2629 int err; 2630 2631 BGE_TRACE(("bge_find_mac_address($%p)", 2632 (void *)bgep)); 2633 2634 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2635 cidp->hw_mac_addr, 2636 ether_sprintf((void *)cidp->vendor_addr.addr), 2637 cidp->vendor_addr.set ? "" : "not ")); 2638 2639 /* 2640 * The "vendor's factory-set address" may already have 2641 * been extracted from the chip, but if the property 2642 * "local-mac-address" is set we use that instead. It 2643 * will normally be set by OBP, but it could also be 2644 * specified in a .conf file(!) 2645 * 2646 * There doesn't seem to be a way to define byte-array 2647 * properties in a .conf, so we check whether it looks 2648 * like an array of 6 ints instead. 2649 * 2650 * Then, we check whether it looks like an array of 6 2651 * bytes (which it should, if OBP set it). If we can't 2652 * make sense of it either way, we'll ignore it. 2653 */ 2654 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2655 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2656 if (err == DDI_PROP_SUCCESS) { 2657 if (nelts == ETHERADDRL) { 2658 while (nelts--) 2659 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2660 cidp->vendor_addr.set = B_TRUE; 2661 } 2662 ddi_prop_free(ints); 2663 } 2664 2665 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2666 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2667 if (err == DDI_PROP_SUCCESS) { 2668 if (nelts == ETHERADDRL) { 2669 while (nelts--) 2670 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2671 cidp->vendor_addr.set = B_TRUE; 2672 } 2673 ddi_prop_free(bytes); 2674 } 2675 2676 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2677 ether_sprintf((void *)cidp->vendor_addr.addr), 2678 cidp->vendor_addr.set ? "" : "not ")); 2679 2680 /* 2681 * Look up the OBP property "local-mac-address?". Note that even 2682 * though its value is a string (which should be "true" or "false"), 2683 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2684 * the buffer first and then fetch the property as an untyped array; 2685 * this may or may not include a final NUL, but since there will 2686 * always be one left at the end of the buffer we can now treat it 2687 * as a string anyway. 2688 */ 2689 nelts = sizeof (propbuf); 2690 bzero(propbuf, nelts--); 2691 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2692 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2693 2694 /* 2695 * Now, if the address still isn't set from the hardware (SEEPROM) 2696 * or the OBP or .conf property, OR if the user has foolishly set 2697 * 'local-mac-address? = false', use "the system address" instead 2698 * (but only if it's non-null i.e. has been set from the IDPROM). 2699 */ 2700 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2701 if (localetheraddr(NULL, &sysaddr) != 0) { 2702 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2703 cidp->vendor_addr.set = B_TRUE; 2704 } 2705 2706 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2707 ether_sprintf((void *)cidp->vendor_addr.addr), 2708 cidp->vendor_addr.set ? "" : "not ")); 2709 2710 /* 2711 * Finally(!), if there's a valid "mac-address" property (created 2712 * if we netbooted from this interface), we must use this instead 2713 * of any of the above to ensure that the NFS/install server doesn't 2714 * get confused by the address changing as Solaris takes over! 2715 */ 2716 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2717 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2718 if (err == DDI_PROP_SUCCESS) { 2719 if (nelts == ETHERADDRL) { 2720 while (nelts--) 2721 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2722 cidp->vendor_addr.set = B_TRUE; 2723 } 2724 ddi_prop_free(bytes); 2725 } 2726 2727 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2728 ether_sprintf((void *)cidp->vendor_addr.addr), 2729 cidp->vendor_addr.set ? "" : "not ")); 2730 } 2731 2732 2733 /*ARGSUSED*/ 2734 int 2735 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2736 { 2737 ddi_fm_error_t de; 2738 2739 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2740 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2741 return (de.fme_status); 2742 } 2743 2744 /*ARGSUSED*/ 2745 int 2746 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2747 { 2748 ddi_fm_error_t de; 2749 2750 ASSERT(bgep->progress & PROGRESS_BUFS); 2751 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2752 return (de.fme_status); 2753 } 2754 2755 /* 2756 * The IO fault service error handling callback function 2757 */ 2758 /*ARGSUSED*/ 2759 static int 2760 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2761 { 2762 /* 2763 * as the driver can always deal with an error in any dma or 2764 * access handle, we can just return the fme_status value. 2765 */ 2766 pci_ereport_post(dip, err, NULL); 2767 return (err->fme_status); 2768 } 2769 2770 static void 2771 bge_fm_init(bge_t *bgep) 2772 { 2773 ddi_iblock_cookie_t iblk; 2774 2775 /* Only register with IO Fault Services if we have some capability */ 2776 if (bgep->fm_capabilities) { 2777 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2778 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2779 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2780 2781 /* Register capabilities with IO Fault Services */ 2782 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2783 2784 /* 2785 * Initialize pci ereport capabilities if ereport capable 2786 */ 2787 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2788 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2789 pci_ereport_setup(bgep->devinfo); 2790 2791 /* 2792 * Register error callback if error callback capable 2793 */ 2794 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2795 ddi_fm_handler_register(bgep->devinfo, 2796 bge_fm_error_cb, (void*) bgep); 2797 } else { 2798 /* 2799 * These fields have to be cleared of FMA if there are no 2800 * FMA capabilities at runtime. 2801 */ 2802 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2803 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2804 dma_attr.dma_attr_flags = 0; 2805 } 2806 } 2807 2808 static void 2809 bge_fm_fini(bge_t *bgep) 2810 { 2811 /* Only unregister FMA capabilities if we registered some */ 2812 if (bgep->fm_capabilities) { 2813 2814 /* 2815 * Release any resources allocated by pci_ereport_setup() 2816 */ 2817 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2818 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2819 pci_ereport_teardown(bgep->devinfo); 2820 2821 /* 2822 * Un-register error callback if error callback capable 2823 */ 2824 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2825 ddi_fm_handler_unregister(bgep->devinfo); 2826 2827 /* Unregister from IO Fault Services */ 2828 ddi_fm_fini(bgep->devinfo); 2829 } 2830 } 2831 2832 static void 2833 #ifdef BGE_IPMI_ASF 2834 bge_unattach(bge_t *bgep, uint_t asf_mode) 2835 #else 2836 bge_unattach(bge_t *bgep) 2837 #endif 2838 { 2839 BGE_TRACE(("bge_unattach($%p)", 2840 (void *)bgep)); 2841 2842 /* 2843 * Flag that no more activity may be initiated 2844 */ 2845 bgep->progress &= ~PROGRESS_READY; 2846 2847 /* 2848 * Quiesce the PHY and MAC (leave it reset but still powered). 2849 * Clean up and free all BGE data structures 2850 */ 2851 if (bgep->periodic_id != NULL) { 2852 ddi_periodic_delete(bgep->periodic_id); 2853 bgep->periodic_id = NULL; 2854 } 2855 if (bgep->progress & PROGRESS_KSTATS) 2856 bge_fini_kstats(bgep); 2857 if (bgep->progress & PROGRESS_PHY) 2858 bge_phys_reset(bgep); 2859 if (bgep->progress & PROGRESS_HWINT) { 2860 mutex_enter(bgep->genlock); 2861 #ifdef BGE_IPMI_ASF 2862 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2863 #else 2864 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2865 #endif 2866 ddi_fm_service_impact(bgep->devinfo, 2867 DDI_SERVICE_UNAFFECTED); 2868 #ifdef BGE_IPMI_ASF 2869 if (bgep->asf_enabled) { 2870 /* 2871 * This register has been overlaid. We restore its 2872 * initial value here. 2873 */ 2874 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2875 BGE_NIC_DATA_SIG); 2876 } 2877 #endif 2878 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2879 ddi_fm_service_impact(bgep->devinfo, 2880 DDI_SERVICE_UNAFFECTED); 2881 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2882 ddi_fm_service_impact(bgep->devinfo, 2883 DDI_SERVICE_UNAFFECTED); 2884 mutex_exit(bgep->genlock); 2885 } 2886 if (bgep->progress & PROGRESS_INTR) { 2887 bge_intr_disable(bgep); 2888 bge_fini_rings(bgep); 2889 } 2890 if (bgep->progress & PROGRESS_HWINT) { 2891 bge_rem_intrs(bgep); 2892 rw_destroy(bgep->errlock); 2893 mutex_destroy(bgep->softintrlock); 2894 mutex_destroy(bgep->genlock); 2895 } 2896 if (bgep->progress & PROGRESS_FACTOTUM) 2897 ddi_remove_softintr(bgep->factotum_id); 2898 if (bgep->progress & PROGRESS_RESCHED) 2899 ddi_remove_softintr(bgep->drain_id); 2900 if (bgep->progress & PROGRESS_BUFS) 2901 bge_free_bufs(bgep); 2902 if (bgep->progress & PROGRESS_REGS) 2903 ddi_regs_map_free(&bgep->io_handle); 2904 if (bgep->progress & PROGRESS_CFG) 2905 pci_config_teardown(&bgep->cfg_handle); 2906 2907 bge_fm_fini(bgep); 2908 2909 ddi_remove_minor_node(bgep->devinfo, NULL); 2910 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 2911 kmem_free(bgep, sizeof (*bgep)); 2912 } 2913 2914 static int 2915 bge_resume(dev_info_t *devinfo) 2916 { 2917 bge_t *bgep; /* Our private data */ 2918 chip_id_t *cidp; 2919 chip_id_t chipid; 2920 2921 bgep = ddi_get_driver_private(devinfo); 2922 if (bgep == NULL) 2923 return (DDI_FAILURE); 2924 2925 /* 2926 * Refuse to resume if the data structures aren't consistent 2927 */ 2928 if (bgep->devinfo != devinfo) 2929 return (DDI_FAILURE); 2930 2931 #ifdef BGE_IPMI_ASF 2932 /* 2933 * Power management hasn't been supported in BGE now. If you 2934 * want to implement it, please add the ASF/IPMI related 2935 * code here. 2936 */ 2937 2938 #endif 2939 2940 /* 2941 * Read chip ID & set up config space command register(s) 2942 * Refuse to resume if the chip has changed its identity! 2943 */ 2944 cidp = &bgep->chipid; 2945 mutex_enter(bgep->genlock); 2946 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 2947 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2948 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2949 mutex_exit(bgep->genlock); 2950 return (DDI_FAILURE); 2951 } 2952 mutex_exit(bgep->genlock); 2953 if (chipid.vendor != cidp->vendor) 2954 return (DDI_FAILURE); 2955 if (chipid.device != cidp->device) 2956 return (DDI_FAILURE); 2957 if (chipid.revision != cidp->revision) 2958 return (DDI_FAILURE); 2959 if (chipid.asic_rev != cidp->asic_rev) 2960 return (DDI_FAILURE); 2961 2962 /* 2963 * All OK, reinitialise h/w & kick off GLD scheduling 2964 */ 2965 mutex_enter(bgep->genlock); 2966 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 2967 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 2968 (void) bge_check_acc_handle(bgep, bgep->io_handle); 2969 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2970 mutex_exit(bgep->genlock); 2971 return (DDI_FAILURE); 2972 } 2973 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2974 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2975 mutex_exit(bgep->genlock); 2976 return (DDI_FAILURE); 2977 } 2978 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 2979 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2980 mutex_exit(bgep->genlock); 2981 return (DDI_FAILURE); 2982 } 2983 mutex_exit(bgep->genlock); 2984 return (DDI_SUCCESS); 2985 } 2986 2987 /* 2988 * attach(9E) -- Attach a device to the system 2989 * 2990 * Called once for each board successfully probed. 2991 */ 2992 static int 2993 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2994 { 2995 bge_t *bgep; /* Our private data */ 2996 mac_register_t *macp; 2997 chip_id_t *cidp; 2998 caddr_t regs; 2999 int instance; 3000 int err; 3001 int intr_types; 3002 #ifdef BGE_IPMI_ASF 3003 uint32_t mhcrValue; 3004 #ifdef __sparc 3005 uint16_t value16; 3006 #endif 3007 #ifdef BGE_NETCONSOLE 3008 int retval; 3009 #endif 3010 #endif 3011 3012 instance = ddi_get_instance(devinfo); 3013 3014 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3015 (void *)devinfo, cmd, instance)); 3016 BGE_BRKPT(NULL, "bge_attach"); 3017 3018 switch (cmd) { 3019 default: 3020 return (DDI_FAILURE); 3021 3022 case DDI_RESUME: 3023 return (bge_resume(devinfo)); 3024 3025 case DDI_ATTACH: 3026 break; 3027 } 3028 3029 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3030 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3031 ddi_set_driver_private(devinfo, bgep); 3032 bgep->bge_guard = BGE_GUARD; 3033 bgep->devinfo = devinfo; 3034 bgep->param_drain_max = 64; 3035 bgep->param_msi_cnt = 0; 3036 bgep->param_loop_mode = 0; 3037 3038 /* 3039 * Initialize more fields in BGE private data 3040 */ 3041 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3042 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3043 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3044 BGE_DRIVER_NAME, instance); 3045 3046 /* 3047 * Initialize for fma support 3048 */ 3049 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3050 DDI_PROP_DONTPASS, fm_cap, 3051 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3052 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3053 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3054 bge_fm_init(bgep); 3055 3056 /* 3057 * Look up the IOMMU's page size for DVMA mappings (must be 3058 * a power of 2) and convert to a mask. This can be used to 3059 * determine whether a message buffer crosses a page boundary. 3060 * Note: in 2s complement binary notation, if X is a power of 3061 * 2, then -X has the representation "11...1100...00". 3062 */ 3063 bgep->pagemask = dvma_pagesize(devinfo); 3064 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3065 bgep->pagemask = -bgep->pagemask; 3066 3067 /* 3068 * Map config space registers 3069 * Read chip ID & set up config space command register(s) 3070 * 3071 * Note: this leaves the chip accessible by Memory Space 3072 * accesses, but with interrupts and Bus Mastering off. 3073 * This should ensure that nothing untoward will happen 3074 * if it has been left active by the (net-)bootloader. 3075 * We'll re-enable Bus Mastering once we've reset the chip, 3076 * and allow interrupts only when everything else is set up. 3077 */ 3078 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3079 #ifdef BGE_IPMI_ASF 3080 #ifdef __sparc 3081 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3082 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3083 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3084 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3085 MHCR_ENABLE_TAGGED_STATUS_MODE | 3086 MHCR_MASK_INTERRUPT_MODE | 3087 MHCR_MASK_PCI_INT_OUTPUT | 3088 MHCR_CLEAR_INTERRUPT_INTA | 3089 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3090 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3091 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3092 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3093 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3094 MEMORY_ARBITER_ENABLE); 3095 #else 3096 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3097 #endif 3098 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3099 bgep->asf_wordswapped = B_TRUE; 3100 } else { 3101 bgep->asf_wordswapped = B_FALSE; 3102 } 3103 bge_asf_get_config(bgep); 3104 #endif 3105 if (err != DDI_SUCCESS) { 3106 bge_problem(bgep, "pci_config_setup() failed"); 3107 goto attach_fail; 3108 } 3109 bgep->progress |= PROGRESS_CFG; 3110 cidp = &bgep->chipid; 3111 bzero(cidp, sizeof (*cidp)); 3112 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3113 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3114 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3115 goto attach_fail; 3116 } 3117 3118 #ifdef BGE_IPMI_ASF 3119 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3120 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3121 bgep->asf_newhandshake = B_TRUE; 3122 } else { 3123 bgep->asf_newhandshake = B_FALSE; 3124 } 3125 #endif 3126 3127 /* 3128 * Update those parts of the chip ID derived from volatile 3129 * registers with the values seen by OBP (in case the chip 3130 * has been reset externally and therefore lost them). 3131 */ 3132 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3133 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3134 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3135 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3136 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3137 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3138 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3139 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3140 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3141 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3142 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3143 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3144 3145 if (bge_jumbo_enable == B_TRUE) { 3146 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3147 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3148 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3149 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3150 cidp->default_mtu = BGE_DEFAULT_MTU; 3151 } 3152 } 3153 /* 3154 * Map operating registers 3155 */ 3156 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3157 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3158 if (err != DDI_SUCCESS) { 3159 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3160 goto attach_fail; 3161 } 3162 bgep->io_regs = regs; 3163 bgep->progress |= PROGRESS_REGS; 3164 3165 /* 3166 * Characterise the device, so we know its requirements. 3167 * Then allocate the appropriate TX and RX descriptors & buffers. 3168 */ 3169 if (bge_chip_id_init(bgep) == EIO) { 3170 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3171 goto attach_fail; 3172 } 3173 3174 3175 err = bge_alloc_bufs(bgep); 3176 if (err != DDI_SUCCESS) { 3177 bge_problem(bgep, "DMA buffer allocation failed"); 3178 goto attach_fail; 3179 } 3180 bgep->progress |= PROGRESS_BUFS; 3181 3182 /* 3183 * Add the softint handlers: 3184 * 3185 * Both of these handlers are used to avoid restrictions on the 3186 * context and/or mutexes required for some operations. In 3187 * particular, the hardware interrupt handler and its subfunctions 3188 * can detect a number of conditions that we don't want to handle 3189 * in that context or with that set of mutexes held. So, these 3190 * softints are triggered instead: 3191 * 3192 * the <resched> softint is triggered if we have previously 3193 * had to refuse to send a packet because of resource shortage 3194 * (we've run out of transmit buffers), but the send completion 3195 * interrupt handler has now detected that more buffers have 3196 * become available. 3197 * 3198 * the <factotum> is triggered if the h/w interrupt handler 3199 * sees the <link state changed> or <error> bits in the status 3200 * block. It's also triggered periodically to poll the link 3201 * state, just in case we aren't getting link status change 3202 * interrupts ... 3203 */ 3204 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3205 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3206 if (err != DDI_SUCCESS) { 3207 bge_problem(bgep, "ddi_add_softintr() failed"); 3208 goto attach_fail; 3209 } 3210 bgep->progress |= PROGRESS_RESCHED; 3211 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3212 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3213 if (err != DDI_SUCCESS) { 3214 bge_problem(bgep, "ddi_add_softintr() failed"); 3215 goto attach_fail; 3216 } 3217 bgep->progress |= PROGRESS_FACTOTUM; 3218 3219 /* Get supported interrupt types */ 3220 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3221 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3222 3223 goto attach_fail; 3224 } 3225 3226 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3227 bgep->ifname, intr_types)); 3228 3229 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3230 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3231 bge_error(bgep, "MSI registration failed, " 3232 "trying FIXED interrupt type\n"); 3233 } else { 3234 BGE_DEBUG(("%s: Using MSI interrupt type", 3235 bgep->ifname)); 3236 bgep->intr_type = DDI_INTR_TYPE_MSI; 3237 bgep->progress |= PROGRESS_HWINT; 3238 } 3239 } 3240 3241 if (!(bgep->progress & PROGRESS_HWINT) && 3242 (intr_types & DDI_INTR_TYPE_FIXED)) { 3243 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3244 bge_error(bgep, "FIXED interrupt " 3245 "registration failed\n"); 3246 goto attach_fail; 3247 } 3248 3249 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3250 3251 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3252 bgep->progress |= PROGRESS_HWINT; 3253 } 3254 3255 if (!(bgep->progress & PROGRESS_HWINT)) { 3256 bge_error(bgep, "No interrupts registered\n"); 3257 goto attach_fail; 3258 } 3259 3260 /* 3261 * Note that interrupts are not enabled yet as 3262 * mutex locks are not initialized. Initialize mutex locks. 3263 */ 3264 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3265 DDI_INTR_PRI(bgep->intr_pri)); 3266 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3267 DDI_INTR_PRI(bgep->intr_pri)); 3268 rw_init(bgep->errlock, NULL, RW_DRIVER, 3269 DDI_INTR_PRI(bgep->intr_pri)); 3270 3271 /* 3272 * Initialize rings. 3273 */ 3274 bge_init_rings(bgep); 3275 3276 /* 3277 * Now that mutex locks are initialized, enable interrupts. 3278 */ 3279 bge_intr_enable(bgep); 3280 bgep->progress |= PROGRESS_INTR; 3281 3282 /* 3283 * Initialise link state variables 3284 * Stop, reset & reinitialise the chip. 3285 * Initialise the (internal) PHY. 3286 */ 3287 bgep->link_state = LINK_STATE_UNKNOWN; 3288 3289 mutex_enter(bgep->genlock); 3290 3291 /* 3292 * Reset chip & rings to initial state; also reset address 3293 * filtering, promiscuity, loopback mode. 3294 */ 3295 #ifdef BGE_IPMI_ASF 3296 #ifdef BGE_NETCONSOLE 3297 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3298 #else 3299 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3300 #endif 3301 #else 3302 if (bge_reset(bgep) != DDI_SUCCESS) { 3303 #endif 3304 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3305 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3306 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3307 mutex_exit(bgep->genlock); 3308 goto attach_fail; 3309 } 3310 3311 #ifdef BGE_IPMI_ASF 3312 if (bgep->asf_enabled) { 3313 bgep->asf_status = ASF_STAT_RUN_INIT; 3314 } 3315 #endif 3316 3317 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3318 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3319 bgep->promisc = B_FALSE; 3320 bgep->param_loop_mode = BGE_LOOP_NONE; 3321 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3322 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3323 mutex_exit(bgep->genlock); 3324 goto attach_fail; 3325 } 3326 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3327 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3328 mutex_exit(bgep->genlock); 3329 goto attach_fail; 3330 } 3331 3332 mutex_exit(bgep->genlock); 3333 3334 if (bge_phys_init(bgep) == EIO) { 3335 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3336 goto attach_fail; 3337 } 3338 bgep->progress |= PROGRESS_PHY; 3339 3340 /* 3341 * initialize NDD-tweakable parameters 3342 */ 3343 if (bge_nd_init(bgep)) { 3344 bge_problem(bgep, "bge_nd_init() failed"); 3345 goto attach_fail; 3346 } 3347 bgep->progress |= PROGRESS_NDD; 3348 3349 /* 3350 * Create & initialise named kstats 3351 */ 3352 bge_init_kstats(bgep, instance); 3353 bgep->progress |= PROGRESS_KSTATS; 3354 3355 /* 3356 * Determine whether to override the chip's own MAC address 3357 */ 3358 bge_find_mac_address(bgep, cidp); 3359 ethaddr_copy(cidp->vendor_addr.addr, bgep->curr_addr[0].addr); 3360 bgep->curr_addr[0].set = B_TRUE; 3361 3362 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3363 /* 3364 * Address available is one less than MAX 3365 * as primary address is not advertised 3366 * as a multiple MAC address. 3367 */ 3368 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX - 1; 3369 3370 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3371 goto attach_fail; 3372 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3373 macp->m_driver = bgep; 3374 macp->m_dip = devinfo; 3375 macp->m_src_addr = bgep->curr_addr[0].addr; 3376 macp->m_callbacks = &bge_m_callbacks; 3377 macp->m_min_sdu = 0; 3378 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3379 macp->m_margin = VLAN_TAGSZ; 3380 macp->m_priv_props = bge_priv_prop; 3381 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3382 3383 /* 3384 * Finally, we're ready to register ourselves with the MAC layer 3385 * interface; if this succeeds, we're all ready to start() 3386 */ 3387 err = mac_register(macp, &bgep->mh); 3388 mac_free(macp); 3389 if (err != 0) 3390 goto attach_fail; 3391 3392 /* 3393 * Register a periodical handler. 3394 * bge_chip_cyclic() is invoked in kernel context. 3395 */ 3396 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3397 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3398 3399 bgep->progress |= PROGRESS_READY; 3400 ASSERT(bgep->bge_guard == BGE_GUARD); 3401 #ifdef BGE_IPMI_ASF 3402 #ifdef BGE_NETCONSOLE 3403 if (bgep->asf_enabled) { 3404 mutex_enter(bgep->genlock); 3405 retval = bge_chip_start(bgep, B_TRUE); 3406 mutex_exit(bgep->genlock); 3407 if (retval != DDI_SUCCESS) 3408 goto attach_fail; 3409 } 3410 #endif 3411 #endif 3412 return (DDI_SUCCESS); 3413 3414 attach_fail: 3415 #ifdef BGE_IPMI_ASF 3416 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3417 #else 3418 bge_unattach(bgep); 3419 #endif 3420 return (DDI_FAILURE); 3421 } 3422 3423 /* 3424 * bge_suspend() -- suspend transmit/receive for powerdown 3425 */ 3426 static int 3427 bge_suspend(bge_t *bgep) 3428 { 3429 /* 3430 * Stop processing and idle (powerdown) the PHY ... 3431 */ 3432 mutex_enter(bgep->genlock); 3433 #ifdef BGE_IPMI_ASF 3434 /* 3435 * Power management hasn't been supported in BGE now. If you 3436 * want to implement it, please add the ASF/IPMI related 3437 * code here. 3438 */ 3439 #endif 3440 bge_stop(bgep); 3441 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3442 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3443 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3444 mutex_exit(bgep->genlock); 3445 return (DDI_FAILURE); 3446 } 3447 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3448 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3449 mutex_exit(bgep->genlock); 3450 return (DDI_FAILURE); 3451 } 3452 mutex_exit(bgep->genlock); 3453 3454 return (DDI_SUCCESS); 3455 } 3456 3457 /* 3458 * detach(9E) -- Detach a device from the system 3459 */ 3460 static int 3461 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3462 { 3463 bge_t *bgep; 3464 #ifdef BGE_IPMI_ASF 3465 uint_t asf_mode; 3466 asf_mode = ASF_MODE_NONE; 3467 #endif 3468 3469 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3470 3471 bgep = ddi_get_driver_private(devinfo); 3472 3473 switch (cmd) { 3474 default: 3475 return (DDI_FAILURE); 3476 3477 case DDI_SUSPEND: 3478 return (bge_suspend(bgep)); 3479 3480 case DDI_DETACH: 3481 break; 3482 } 3483 3484 #ifdef BGE_IPMI_ASF 3485 mutex_enter(bgep->genlock); 3486 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3487 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3488 3489 bge_asf_update_status(bgep); 3490 if (bgep->asf_status == ASF_STAT_RUN) { 3491 bge_asf_stop_timer(bgep); 3492 } 3493 bgep->asf_status = ASF_STAT_STOP; 3494 3495 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3496 3497 if (bgep->asf_pseudostop) { 3498 bge_chip_stop(bgep, B_FALSE); 3499 bgep->bge_mac_state = BGE_MAC_STOPPED; 3500 bgep->asf_pseudostop = B_FALSE; 3501 } 3502 3503 asf_mode = ASF_MODE_POST_SHUTDOWN; 3504 3505 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3506 ddi_fm_service_impact(bgep->devinfo, 3507 DDI_SERVICE_UNAFFECTED); 3508 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3509 ddi_fm_service_impact(bgep->devinfo, 3510 DDI_SERVICE_UNAFFECTED); 3511 } 3512 mutex_exit(bgep->genlock); 3513 #endif 3514 3515 /* 3516 * Unregister from the GLD subsystem. This can fail, in 3517 * particular if there are DLPI style-2 streams still open - 3518 * in which case we just return failure without shutting 3519 * down chip operations. 3520 */ 3521 if (mac_unregister(bgep->mh) != 0) 3522 return (DDI_FAILURE); 3523 3524 /* 3525 * All activity stopped, so we can clean up & exit 3526 */ 3527 #ifdef BGE_IPMI_ASF 3528 bge_unattach(bgep, asf_mode); 3529 #else 3530 bge_unattach(bgep); 3531 #endif 3532 return (DDI_SUCCESS); 3533 } 3534 3535 3536 /* 3537 * ========== Module Loading Data & Entry Points ========== 3538 */ 3539 3540 #undef BGE_DBG 3541 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3542 3543 DDI_DEFINE_STREAM_OPS(bge_dev_ops, nulldev, nulldev, bge_attach, bge_detach, 3544 nodev, NULL, D_MP, NULL); 3545 3546 static struct modldrv bge_modldrv = { 3547 &mod_driverops, /* Type of module. This one is a driver */ 3548 bge_ident, /* short description */ 3549 &bge_dev_ops /* driver specific ops */ 3550 }; 3551 3552 static struct modlinkage modlinkage = { 3553 MODREV_1, (void *)&bge_modldrv, NULL 3554 }; 3555 3556 3557 int 3558 _info(struct modinfo *modinfop) 3559 { 3560 return (mod_info(&modlinkage, modinfop)); 3561 } 3562 3563 int 3564 _init(void) 3565 { 3566 int status; 3567 3568 mac_init_ops(&bge_dev_ops, "bge"); 3569 status = mod_install(&modlinkage); 3570 if (status == DDI_SUCCESS) 3571 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3572 else 3573 mac_fini_ops(&bge_dev_ops); 3574 return (status); 3575 } 3576 3577 int 3578 _fini(void) 3579 { 3580 int status; 3581 3582 status = mod_remove(&modlinkage); 3583 if (status == DDI_SUCCESS) { 3584 mac_fini_ops(&bge_dev_ops); 3585 mutex_destroy(bge_log_mutex); 3586 } 3587 return (status); 3588 } 3589 3590 3591 /* 3592 * bge_add_intrs: 3593 * 3594 * Register FIXED or MSI interrupts. 3595 */ 3596 static int 3597 bge_add_intrs(bge_t *bgep, int intr_type) 3598 { 3599 dev_info_t *dip = bgep->devinfo; 3600 int avail, actual, intr_size, count = 0; 3601 int i, flag, ret; 3602 3603 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3604 3605 /* Get number of interrupts */ 3606 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3607 if ((ret != DDI_SUCCESS) || (count == 0)) { 3608 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3609 "count: %d", ret, count); 3610 3611 return (DDI_FAILURE); 3612 } 3613 3614 /* Get number of available interrupts */ 3615 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3616 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3617 bge_error(bgep, "ddi_intr_get_navail() failure, " 3618 "ret: %d, avail: %d\n", ret, avail); 3619 3620 return (DDI_FAILURE); 3621 } 3622 3623 if (avail < count) { 3624 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3625 bgep->ifname, count, avail)); 3626 } 3627 3628 /* 3629 * BGE hardware generates only single MSI even though it claims 3630 * to support multiple MSIs. So, hard code MSI count value to 1. 3631 */ 3632 if (intr_type == DDI_INTR_TYPE_MSI) { 3633 count = 1; 3634 flag = DDI_INTR_ALLOC_STRICT; 3635 } else { 3636 flag = DDI_INTR_ALLOC_NORMAL; 3637 } 3638 3639 /* Allocate an array of interrupt handles */ 3640 intr_size = count * sizeof (ddi_intr_handle_t); 3641 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3642 3643 /* Call ddi_intr_alloc() */ 3644 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3645 count, &actual, flag); 3646 3647 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3648 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3649 3650 kmem_free(bgep->htable, intr_size); 3651 return (DDI_FAILURE); 3652 } 3653 3654 if (actual < count) { 3655 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3656 bgep->ifname, count, actual)); 3657 } 3658 3659 bgep->intr_cnt = actual; 3660 3661 /* 3662 * Get priority for first msi, assume remaining are all the same 3663 */ 3664 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3665 DDI_SUCCESS) { 3666 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3667 3668 /* Free already allocated intr */ 3669 for (i = 0; i < actual; i++) { 3670 (void) ddi_intr_free(bgep->htable[i]); 3671 } 3672 3673 kmem_free(bgep->htable, intr_size); 3674 return (DDI_FAILURE); 3675 } 3676 3677 /* Call ddi_intr_add_handler() */ 3678 for (i = 0; i < actual; i++) { 3679 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3680 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3681 bge_error(bgep, "ddi_intr_add_handler() " 3682 "failed %d\n", ret); 3683 3684 /* Free already allocated intr */ 3685 for (i = 0; i < actual; i++) { 3686 (void) ddi_intr_free(bgep->htable[i]); 3687 } 3688 3689 kmem_free(bgep->htable, intr_size); 3690 return (DDI_FAILURE); 3691 } 3692 } 3693 3694 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3695 != DDI_SUCCESS) { 3696 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3697 3698 for (i = 0; i < actual; i++) { 3699 (void) ddi_intr_remove_handler(bgep->htable[i]); 3700 (void) ddi_intr_free(bgep->htable[i]); 3701 } 3702 3703 kmem_free(bgep->htable, intr_size); 3704 return (DDI_FAILURE); 3705 } 3706 3707 return (DDI_SUCCESS); 3708 } 3709 3710 /* 3711 * bge_rem_intrs: 3712 * 3713 * Unregister FIXED or MSI interrupts 3714 */ 3715 static void 3716 bge_rem_intrs(bge_t *bgep) 3717 { 3718 int i; 3719 3720 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3721 3722 /* Call ddi_intr_remove_handler() */ 3723 for (i = 0; i < bgep->intr_cnt; i++) { 3724 (void) ddi_intr_remove_handler(bgep->htable[i]); 3725 (void) ddi_intr_free(bgep->htable[i]); 3726 } 3727 3728 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3729 } 3730 3731 3732 void 3733 bge_intr_enable(bge_t *bgep) 3734 { 3735 int i; 3736 3737 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3738 /* Call ddi_intr_block_enable() for MSI interrupts */ 3739 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3740 } else { 3741 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3742 for (i = 0; i < bgep->intr_cnt; i++) { 3743 (void) ddi_intr_enable(bgep->htable[i]); 3744 } 3745 } 3746 } 3747 3748 3749 void 3750 bge_intr_disable(bge_t *bgep) 3751 { 3752 int i; 3753 3754 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3755 /* Call ddi_intr_block_disable() */ 3756 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3757 } else { 3758 for (i = 0; i < bgep->intr_cnt; i++) { 3759 (void) ddi_intr_disable(bgep->htable[i]); 3760 } 3761 } 3762 } 3763 3764 int 3765 bge_reprogram(bge_t *bgep) 3766 { 3767 int status = 0; 3768 3769 ASSERT(mutex_owned(bgep->genlock)); 3770 3771 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3772 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3773 status = IOC_INVAL; 3774 } 3775 #ifdef BGE_IPMI_ASF 3776 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3777 #else 3778 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3779 #endif 3780 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3781 status = IOC_INVAL; 3782 } 3783 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3784 bge_chip_msi_trig(bgep); 3785 return (status); 3786 } 3787