1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "bge_impl.h" 30 #include <sys/sdt.h> 31 #include <sys/dld.h> 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 * Make sure you keep the version ID up to date! 36 */ 37 static char bge_ident[] = "Broadcom Gb Ethernet"; 38 39 /* 40 * Property names 41 */ 42 static char debug_propname[] = "bge-debug-flags"; 43 static char clsize_propname[] = "cache-line-size"; 44 static char latency_propname[] = "latency-timer"; 45 static char localmac_boolname[] = "local-mac-address?"; 46 static char localmac_propname[] = "local-mac-address"; 47 static char macaddr_propname[] = "mac-address"; 48 static char subdev_propname[] = "subsystem-id"; 49 static char subven_propname[] = "subsystem-vendor-id"; 50 static char rxrings_propname[] = "bge-rx-rings"; 51 static char txrings_propname[] = "bge-tx-rings"; 52 static char fm_cap[] = "fm-capable"; 53 static char default_mtu[] = "default_mtu"; 54 55 static int bge_add_intrs(bge_t *, int); 56 static void bge_rem_intrs(bge_t *); 57 58 /* 59 * Describes the chip's DMA engine 60 */ 61 static ddi_dma_attr_t dma_attr = { 62 DMA_ATTR_V0, /* dma_attr version */ 63 0x0000000000000000ull, /* dma_attr_addr_lo */ 64 0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */ 65 0x00000000FFFFFFFFull, /* dma_attr_count_max */ 66 0x0000000000000001ull, /* dma_attr_align */ 67 0x00000FFF, /* dma_attr_burstsizes */ 68 0x00000001, /* dma_attr_minxfer */ 69 0x000000000000FFFFull, /* dma_attr_maxxfer */ 70 0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */ 71 1, /* dma_attr_sgllen */ 72 0x00000001, /* dma_attr_granular */ 73 DDI_DMA_FLAGERR /* dma_attr_flags */ 74 }; 75 76 /* 77 * PIO access attributes for registers 78 */ 79 static ddi_device_acc_attr_t bge_reg_accattr = { 80 DDI_DEVICE_ATTR_V0, 81 DDI_NEVERSWAP_ACC, 82 DDI_STRICTORDER_ACC, 83 DDI_FLAGERR_ACC 84 }; 85 86 /* 87 * DMA access attributes for descriptors: NOT to be byte swapped. 88 */ 89 static ddi_device_acc_attr_t bge_desc_accattr = { 90 DDI_DEVICE_ATTR_V0, 91 DDI_NEVERSWAP_ACC, 92 DDI_STRICTORDER_ACC, 93 DDI_FLAGERR_ACC 94 }; 95 96 /* 97 * DMA access attributes for data: NOT to be byte swapped. 98 */ 99 static ddi_device_acc_attr_t bge_data_accattr = { 100 DDI_DEVICE_ATTR_V0, 101 DDI_NEVERSWAP_ACC, 102 DDI_STRICTORDER_ACC 103 }; 104 105 /* 106 * Versions of the O/S up to Solaris 8 didn't support network booting 107 * from any network interface except the first (NET0). Patching this 108 * flag to a non-zero value will tell the driver to work around this 109 * limitation by creating an extra (internal) pathname node. To do 110 * this, just add a line like the following to the CLIENT'S etc/system 111 * file ON THE ROOT FILESYSTEM SERVER before booting the client: 112 * 113 * set bge:bge_net1_boot_support = 1; 114 */ 115 static uint32_t bge_net1_boot_support = 1; 116 117 static int bge_m_start(void *); 118 static void bge_m_stop(void *); 119 static int bge_m_promisc(void *, boolean_t); 120 static int bge_m_multicst(void *, boolean_t, const uint8_t *); 121 static int bge_m_unicst(void *, const uint8_t *); 122 static void bge_m_resources(void *); 123 static void bge_m_ioctl(void *, queue_t *, mblk_t *); 124 static boolean_t bge_m_getcapab(void *, mac_capab_t, void *); 125 static int bge_unicst_set(void *, const uint8_t *, 126 mac_addr_slot_t); 127 static int bge_m_unicst_add(void *, mac_multi_addr_t *); 128 static int bge_m_unicst_remove(void *, mac_addr_slot_t); 129 static int bge_m_unicst_modify(void *, mac_multi_addr_t *); 130 static int bge_m_unicst_get(void *, mac_multi_addr_t *); 131 static int bge_m_setprop(void *, const char *, mac_prop_id_t, 132 uint_t, const void *); 133 static int bge_m_getprop(void *, const char *, mac_prop_id_t, 134 uint_t, uint_t, void *); 135 static int bge_set_priv_prop(bge_t *, const char *, uint_t, 136 const void *); 137 static int bge_get_priv_prop(bge_t *, const char *, uint_t, 138 uint_t, void *); 139 140 #define BGE_M_CALLBACK_FLAGS\ 141 (MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 142 143 static mac_callbacks_t bge_m_callbacks = { 144 BGE_M_CALLBACK_FLAGS, 145 bge_m_stat, 146 bge_m_start, 147 bge_m_stop, 148 bge_m_promisc, 149 bge_m_multicst, 150 bge_m_unicst, 151 bge_m_tx, 152 bge_m_resources, 153 bge_m_ioctl, 154 bge_m_getcapab, 155 NULL, 156 NULL, 157 bge_m_setprop, 158 bge_m_getprop 159 }; 160 161 mac_priv_prop_t bge_priv_prop[] = { 162 {"_adv_asym_pause_cap", MAC_PROP_PERM_RW}, 163 {"_adv_pause_cap", MAC_PROP_PERM_RW} 164 }; 165 166 #define BGE_MAX_PRIV_PROPS \ 167 (sizeof (bge_priv_prop) / sizeof (mac_priv_prop_t)) 168 169 /* 170 * ========== Transmit and receive ring reinitialisation ========== 171 */ 172 173 /* 174 * These <reinit> routines each reset the specified ring to an initial 175 * state, assuming that the corresponding <init> routine has already 176 * been called exactly once. 177 */ 178 179 static void 180 bge_reinit_send_ring(send_ring_t *srp) 181 { 182 bge_queue_t *txbuf_queue; 183 bge_queue_item_t *txbuf_head; 184 sw_txbuf_t *txbuf; 185 sw_sbd_t *ssbdp; 186 uint32_t slot; 187 188 /* 189 * Reinitialise control variables ... 190 */ 191 srp->tx_flow = 0; 192 srp->tx_next = 0; 193 srp->txfill_next = 0; 194 srp->tx_free = srp->desc.nslots; 195 ASSERT(mutex_owned(srp->tc_lock)); 196 srp->tc_next = 0; 197 srp->txpkt_next = 0; 198 srp->tx_block = 0; 199 srp->tx_nobd = 0; 200 srp->tx_nobuf = 0; 201 202 /* 203 * Initialize the tx buffer push queue 204 */ 205 mutex_enter(srp->freetxbuf_lock); 206 mutex_enter(srp->txbuf_lock); 207 txbuf_queue = &srp->freetxbuf_queue; 208 txbuf_queue->head = NULL; 209 txbuf_queue->count = 0; 210 txbuf_queue->lock = srp->freetxbuf_lock; 211 srp->txbuf_push_queue = txbuf_queue; 212 213 /* 214 * Initialize the tx buffer pop queue 215 */ 216 txbuf_queue = &srp->txbuf_queue; 217 txbuf_queue->head = NULL; 218 txbuf_queue->count = 0; 219 txbuf_queue->lock = srp->txbuf_lock; 220 srp->txbuf_pop_queue = txbuf_queue; 221 txbuf_head = srp->txbuf_head; 222 txbuf = srp->txbuf; 223 for (slot = 0; slot < srp->tx_buffers; ++slot) { 224 txbuf_head->item = txbuf; 225 txbuf_head->next = txbuf_queue->head; 226 txbuf_queue->head = txbuf_head; 227 txbuf_queue->count++; 228 txbuf++; 229 txbuf_head++; 230 } 231 mutex_exit(srp->txbuf_lock); 232 mutex_exit(srp->freetxbuf_lock); 233 234 /* 235 * Zero and sync all the h/w Send Buffer Descriptors 236 */ 237 DMA_ZERO(srp->desc); 238 DMA_SYNC(srp->desc, DDI_DMA_SYNC_FORDEV); 239 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 240 ssbdp = srp->sw_sbds; 241 for (slot = 0; slot < srp->desc.nslots; ++ssbdp, ++slot) 242 ssbdp->pbuf = NULL; 243 } 244 245 static void 246 bge_reinit_recv_ring(recv_ring_t *rrp) 247 { 248 /* 249 * Reinitialise control variables ... 250 */ 251 rrp->rx_next = 0; 252 } 253 254 static void 255 bge_reinit_buff_ring(buff_ring_t *brp, uint32_t ring) 256 { 257 bge_rbd_t *hw_rbd_p; 258 sw_rbd_t *srbdp; 259 uint32_t bufsize; 260 uint32_t nslots; 261 uint32_t slot; 262 263 static uint16_t ring_type_flag[BGE_BUFF_RINGS_MAX] = { 264 RBD_FLAG_STD_RING, 265 RBD_FLAG_JUMBO_RING, 266 RBD_FLAG_MINI_RING 267 }; 268 269 /* 270 * Zero, initialise and sync all the h/w Receive Buffer Descriptors 271 * Note: all the remaining fields (<type>, <flags>, <ip_cksum>, 272 * <tcp_udp_cksum>, <error_flag>, <vlan_tag>, and <reserved>) 273 * should be zeroed, and so don't need to be set up specifically 274 * once the whole area has been cleared. 275 */ 276 DMA_ZERO(brp->desc); 277 278 hw_rbd_p = DMA_VPTR(brp->desc); 279 nslots = brp->desc.nslots; 280 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 281 bufsize = brp->buf[0].size; 282 srbdp = brp->sw_rbds; 283 for (slot = 0; slot < nslots; ++hw_rbd_p, ++srbdp, ++slot) { 284 hw_rbd_p->host_buf_addr = srbdp->pbuf.cookie.dmac_laddress; 285 hw_rbd_p->index = slot; 286 hw_rbd_p->len = bufsize; 287 hw_rbd_p->opaque = srbdp->pbuf.token; 288 hw_rbd_p->flags |= ring_type_flag[ring]; 289 } 290 291 DMA_SYNC(brp->desc, DDI_DMA_SYNC_FORDEV); 292 293 /* 294 * Finally, reinitialise the ring control variables ... 295 */ 296 brp->rf_next = (nslots != 0) ? (nslots-1) : 0; 297 } 298 299 /* 300 * Reinitialize all rings 301 */ 302 static void 303 bge_reinit_rings(bge_t *bgep) 304 { 305 uint32_t ring; 306 307 ASSERT(mutex_owned(bgep->genlock)); 308 309 /* 310 * Send Rings ... 311 */ 312 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) 313 bge_reinit_send_ring(&bgep->send[ring]); 314 315 /* 316 * Receive Return Rings ... 317 */ 318 for (ring = 0; ring < bgep->chipid.rx_rings; ++ring) 319 bge_reinit_recv_ring(&bgep->recv[ring]); 320 321 /* 322 * Receive Producer Rings ... 323 */ 324 for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring) 325 bge_reinit_buff_ring(&bgep->buff[ring], ring); 326 } 327 328 /* 329 * ========== Internal state management entry points ========== 330 */ 331 332 #undef BGE_DBG 333 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 334 335 /* 336 * These routines provide all the functionality required by the 337 * corresponding GLD entry points, but don't update the GLD state 338 * so they can be called internally without disturbing our record 339 * of what GLD thinks we should be doing ... 340 */ 341 342 /* 343 * bge_reset() -- reset h/w & rings to initial state 344 */ 345 static int 346 #ifdef BGE_IPMI_ASF 347 bge_reset(bge_t *bgep, uint_t asf_mode) 348 #else 349 bge_reset(bge_t *bgep) 350 #endif 351 { 352 uint32_t ring; 353 int retval; 354 355 BGE_TRACE(("bge_reset($%p)", (void *)bgep)); 356 357 ASSERT(mutex_owned(bgep->genlock)); 358 359 /* 360 * Grab all the other mutexes in the world (this should 361 * ensure no other threads are manipulating driver state) 362 */ 363 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 364 mutex_enter(bgep->recv[ring].rx_lock); 365 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 366 mutex_enter(bgep->buff[ring].rf_lock); 367 rw_enter(bgep->errlock, RW_WRITER); 368 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 369 mutex_enter(bgep->send[ring].tx_lock); 370 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 371 mutex_enter(bgep->send[ring].tc_lock); 372 373 #ifdef BGE_IPMI_ASF 374 retval = bge_chip_reset(bgep, B_TRUE, asf_mode); 375 #else 376 retval = bge_chip_reset(bgep, B_TRUE); 377 #endif 378 bge_reinit_rings(bgep); 379 380 /* 381 * Free the world ... 382 */ 383 for (ring = BGE_SEND_RINGS_MAX; ring-- > 0; ) 384 mutex_exit(bgep->send[ring].tc_lock); 385 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 386 mutex_exit(bgep->send[ring].tx_lock); 387 rw_exit(bgep->errlock); 388 for (ring = BGE_BUFF_RINGS_MAX; ring-- > 0; ) 389 mutex_exit(bgep->buff[ring].rf_lock); 390 for (ring = BGE_RECV_RINGS_MAX; ring-- > 0; ) 391 mutex_exit(bgep->recv[ring].rx_lock); 392 393 BGE_DEBUG(("bge_reset($%p) done", (void *)bgep)); 394 return (retval); 395 } 396 397 /* 398 * bge_stop() -- stop processing, don't reset h/w or rings 399 */ 400 static void 401 bge_stop(bge_t *bgep) 402 { 403 BGE_TRACE(("bge_stop($%p)", (void *)bgep)); 404 405 ASSERT(mutex_owned(bgep->genlock)); 406 407 #ifdef BGE_IPMI_ASF 408 if (bgep->asf_enabled) { 409 bgep->asf_pseudostop = B_TRUE; 410 } else { 411 #endif 412 bge_chip_stop(bgep, B_FALSE); 413 #ifdef BGE_IPMI_ASF 414 } 415 #endif 416 417 BGE_DEBUG(("bge_stop($%p) done", (void *)bgep)); 418 } 419 420 /* 421 * bge_start() -- start transmitting/receiving 422 */ 423 static int 424 bge_start(bge_t *bgep, boolean_t reset_phys) 425 { 426 int retval; 427 428 BGE_TRACE(("bge_start($%p, %d)", (void *)bgep, reset_phys)); 429 430 ASSERT(mutex_owned(bgep->genlock)); 431 432 /* 433 * Start chip processing, including enabling interrupts 434 */ 435 retval = bge_chip_start(bgep, reset_phys); 436 437 BGE_DEBUG(("bge_start($%p, %d) done", (void *)bgep, reset_phys)); 438 return (retval); 439 } 440 441 /* 442 * bge_restart - restart transmitting/receiving after error or suspend 443 */ 444 int 445 bge_restart(bge_t *bgep, boolean_t reset_phys) 446 { 447 int retval = DDI_SUCCESS; 448 ASSERT(mutex_owned(bgep->genlock)); 449 450 #ifdef BGE_IPMI_ASF 451 if (bgep->asf_enabled) { 452 if (bge_reset(bgep, ASF_MODE_POST_INIT) != DDI_SUCCESS) 453 retval = DDI_FAILURE; 454 } else 455 if (bge_reset(bgep, ASF_MODE_NONE) != DDI_SUCCESS) 456 retval = DDI_FAILURE; 457 #else 458 if (bge_reset(bgep) != DDI_SUCCESS) 459 retval = DDI_FAILURE; 460 #endif 461 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 462 if (bge_start(bgep, reset_phys) != DDI_SUCCESS) 463 retval = DDI_FAILURE; 464 bgep->watchdog = 0; 465 ddi_trigger_softintr(bgep->drain_id); 466 } 467 468 BGE_DEBUG(("bge_restart($%p, %d) done", (void *)bgep, reset_phys)); 469 return (retval); 470 } 471 472 473 /* 474 * ========== Nemo-required management entry points ========== 475 */ 476 477 #undef BGE_DBG 478 #define BGE_DBG BGE_DBG_NEMO /* debug flag for this code */ 479 480 /* 481 * bge_m_stop() -- stop transmitting/receiving 482 */ 483 static void 484 bge_m_stop(void *arg) 485 { 486 bge_t *bgep = arg; /* private device info */ 487 send_ring_t *srp; 488 uint32_t ring; 489 490 BGE_TRACE(("bge_m_stop($%p)", arg)); 491 492 /* 493 * Just stop processing, then record new GLD state 494 */ 495 mutex_enter(bgep->genlock); 496 if (!(bgep->progress & PROGRESS_INTR)) { 497 /* can happen during autorecovery */ 498 mutex_exit(bgep->genlock); 499 return; 500 } 501 bge_stop(bgep); 502 /* 503 * Free the possible tx buffers allocated in tx process. 504 */ 505 #ifdef BGE_IPMI_ASF 506 if (!bgep->asf_pseudostop) 507 #endif 508 { 509 rw_enter(bgep->errlock, RW_WRITER); 510 for (ring = 0; ring < bgep->chipid.tx_rings; ++ring) { 511 srp = &bgep->send[ring]; 512 mutex_enter(srp->tx_lock); 513 if (srp->tx_array > 1) 514 bge_free_txbuf_arrays(srp); 515 mutex_exit(srp->tx_lock); 516 } 517 rw_exit(bgep->errlock); 518 } 519 bgep->bge_mac_state = BGE_MAC_STOPPED; 520 BGE_DEBUG(("bge_m_stop($%p) done", arg)); 521 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 522 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED); 523 mutex_exit(bgep->genlock); 524 } 525 526 /* 527 * bge_m_start() -- start transmitting/receiving 528 */ 529 static int 530 bge_m_start(void *arg) 531 { 532 bge_t *bgep = arg; /* private device info */ 533 534 BGE_TRACE(("bge_m_start($%p)", arg)); 535 536 /* 537 * Start processing and record new GLD state 538 */ 539 mutex_enter(bgep->genlock); 540 if (!(bgep->progress & PROGRESS_INTR)) { 541 /* can happen during autorecovery */ 542 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 543 mutex_exit(bgep->genlock); 544 return (EIO); 545 } 546 #ifdef BGE_IPMI_ASF 547 if (bgep->asf_enabled) { 548 if ((bgep->asf_status == ASF_STAT_RUN) && 549 (bgep->asf_pseudostop)) { 550 bgep->bge_mac_state = BGE_MAC_STARTED; 551 mutex_exit(bgep->genlock); 552 return (0); 553 } 554 } 555 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 556 #else 557 if (bge_reset(bgep) != DDI_SUCCESS) { 558 #endif 559 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 560 (void) bge_check_acc_handle(bgep, bgep->io_handle); 561 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 562 mutex_exit(bgep->genlock); 563 return (EIO); 564 } 565 if (bge_start(bgep, B_TRUE) != DDI_SUCCESS) { 566 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 567 (void) bge_check_acc_handle(bgep, bgep->io_handle); 568 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 569 mutex_exit(bgep->genlock); 570 return (EIO); 571 } 572 bgep->bge_mac_state = BGE_MAC_STARTED; 573 BGE_DEBUG(("bge_m_start($%p) done", arg)); 574 575 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 576 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 577 mutex_exit(bgep->genlock); 578 return (EIO); 579 } 580 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 581 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 582 mutex_exit(bgep->genlock); 583 return (EIO); 584 } 585 #ifdef BGE_IPMI_ASF 586 if (bgep->asf_enabled) { 587 if (bgep->asf_status != ASF_STAT_RUN) { 588 /* start ASF heart beat */ 589 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 590 (void *)bgep, 591 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 592 bgep->asf_status = ASF_STAT_RUN; 593 } 594 } 595 #endif 596 mutex_exit(bgep->genlock); 597 598 return (0); 599 } 600 601 /* 602 * bge_m_unicst() -- set the physical network address 603 */ 604 static int 605 bge_m_unicst(void *arg, const uint8_t *macaddr) 606 { 607 /* 608 * Request to set address in 609 * address slot 0, i.e., default address 610 */ 611 return (bge_unicst_set(arg, macaddr, 0)); 612 } 613 614 /* 615 * bge_unicst_set() -- set the physical network address 616 */ 617 static int 618 bge_unicst_set(void *arg, const uint8_t *macaddr, mac_addr_slot_t slot) 619 { 620 bge_t *bgep = arg; /* private device info */ 621 622 BGE_TRACE(("bge_m_unicst_set($%p, %s)", arg, 623 ether_sprintf((void *)macaddr))); 624 /* 625 * Remember the new current address in the driver state 626 * Sync the chip's idea of the address too ... 627 */ 628 mutex_enter(bgep->genlock); 629 if (!(bgep->progress & PROGRESS_INTR)) { 630 /* can happen during autorecovery */ 631 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 632 mutex_exit(bgep->genlock); 633 return (EIO); 634 } 635 ethaddr_copy(macaddr, bgep->curr_addr[slot].addr); 636 #ifdef BGE_IPMI_ASF 637 if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE) { 638 #else 639 if (bge_chip_sync(bgep) == DDI_FAILURE) { 640 #endif 641 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 642 (void) bge_check_acc_handle(bgep, bgep->io_handle); 643 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 644 mutex_exit(bgep->genlock); 645 return (EIO); 646 } 647 #ifdef BGE_IPMI_ASF 648 if (bgep->asf_enabled) { 649 /* 650 * The above bge_chip_sync() function wrote the ethernet MAC 651 * addresses registers which destroyed the IPMI/ASF sideband. 652 * Here, we have to reset chip to make IPMI/ASF sideband work. 653 */ 654 if (bgep->asf_status == ASF_STAT_RUN) { 655 /* 656 * We must stop ASF heart beat before bge_chip_stop(), 657 * otherwise some computers (ex. IBM HS20 blade server) 658 * may crash. 659 */ 660 bge_asf_update_status(bgep); 661 bge_asf_stop_timer(bgep); 662 bgep->asf_status = ASF_STAT_STOP; 663 664 bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET); 665 } 666 bge_chip_stop(bgep, B_FALSE); 667 668 if (bge_restart(bgep, B_FALSE) == DDI_FAILURE) { 669 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 670 (void) bge_check_acc_handle(bgep, bgep->io_handle); 671 ddi_fm_service_impact(bgep->devinfo, 672 DDI_SERVICE_DEGRADED); 673 mutex_exit(bgep->genlock); 674 return (EIO); 675 } 676 677 /* 678 * Start our ASF heartbeat counter as soon as possible. 679 */ 680 if (bgep->asf_status != ASF_STAT_RUN) { 681 /* start ASF heart beat */ 682 bgep->asf_timeout_id = timeout(bge_asf_heartbeat, 683 (void *)bgep, 684 drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL)); 685 bgep->asf_status = ASF_STAT_RUN; 686 } 687 } 688 #endif 689 BGE_DEBUG(("bge_m_unicst_set($%p) done", arg)); 690 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 691 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 692 mutex_exit(bgep->genlock); 693 return (EIO); 694 } 695 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 696 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 697 mutex_exit(bgep->genlock); 698 return (EIO); 699 } 700 mutex_exit(bgep->genlock); 701 702 return (0); 703 } 704 705 /* 706 * The following four routines are used as callbacks for multiple MAC 707 * address support: 708 * - bge_m_unicst_add(void *, mac_multi_addr_t *); 709 * - bge_m_unicst_remove(void *, mac_addr_slot_t); 710 * - bge_m_unicst_modify(void *, mac_multi_addr_t *); 711 * - bge_m_unicst_get(void *, mac_multi_addr_t *); 712 */ 713 714 /* 715 * bge_m_unicst_add() - will find an unused address slot, set the 716 * address value to the one specified, reserve that slot and enable 717 * the NIC to start filtering on the new MAC address. 718 * address slot. Returns 0 on success. 719 */ 720 static int 721 bge_m_unicst_add(void *arg, mac_multi_addr_t *maddr) 722 { 723 bge_t *bgep = arg; /* private device info */ 724 mac_addr_slot_t slot; 725 int err; 726 727 if (mac_unicst_verify(bgep->mh, 728 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 729 return (EINVAL); 730 731 mutex_enter(bgep->genlock); 732 if (bgep->unicst_addr_avail == 0) { 733 /* no slots available */ 734 mutex_exit(bgep->genlock); 735 return (ENOSPC); 736 } 737 738 /* 739 * Primary/default address is in slot 0. The next three 740 * addresses are the multiple MAC addresses. So multiple 741 * MAC address 0 is in slot 1, 1 in slot 2, and so on. 742 * So the first multiple MAC address resides in slot 1. 743 */ 744 for (slot = 1; slot < bgep->unicst_addr_total; slot++) { 745 if (bgep->curr_addr[slot].set == B_FALSE) { 746 bgep->curr_addr[slot].set = B_TRUE; 747 break; 748 } 749 } 750 751 ASSERT(slot < bgep->unicst_addr_total); 752 bgep->unicst_addr_avail--; 753 mutex_exit(bgep->genlock); 754 maddr->mma_slot = slot; 755 756 if ((err = bge_unicst_set(bgep, maddr->mma_addr, slot)) != 0) { 757 mutex_enter(bgep->genlock); 758 bgep->curr_addr[slot].set = B_FALSE; 759 bgep->unicst_addr_avail++; 760 mutex_exit(bgep->genlock); 761 } 762 return (err); 763 } 764 765 /* 766 * bge_m_unicst_remove() - removes a MAC address that was added by a 767 * call to bge_m_unicst_add(). The slot number that was returned in 768 * add() is passed in the call to remove the address. 769 * Returns 0 on success. 770 */ 771 static int 772 bge_m_unicst_remove(void *arg, mac_addr_slot_t slot) 773 { 774 bge_t *bgep = arg; /* private device info */ 775 776 if (slot <= 0 || slot >= bgep->unicst_addr_total) 777 return (EINVAL); 778 779 mutex_enter(bgep->genlock); 780 if (bgep->curr_addr[slot].set == B_TRUE) { 781 bgep->curr_addr[slot].set = B_FALSE; 782 bgep->unicst_addr_avail++; 783 mutex_exit(bgep->genlock); 784 /* 785 * Copy the default address to the passed slot 786 */ 787 return (bge_unicst_set(bgep, bgep->curr_addr[0].addr, slot)); 788 } 789 mutex_exit(bgep->genlock); 790 return (EINVAL); 791 } 792 793 /* 794 * bge_m_unicst_modify() - modifies the value of an address that 795 * has been added by bge_m_unicst_add(). The new address, address 796 * length and the slot number that was returned in the call to add 797 * should be passed to bge_m_unicst_modify(). mma_flags should be 798 * set to 0. Returns 0 on success. 799 */ 800 static int 801 bge_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) 802 { 803 bge_t *bgep = arg; /* private device info */ 804 mac_addr_slot_t slot; 805 806 if (mac_unicst_verify(bgep->mh, 807 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 808 return (EINVAL); 809 810 slot = maddr->mma_slot; 811 812 if (slot <= 0 || slot >= bgep->unicst_addr_total) 813 return (EINVAL); 814 815 mutex_enter(bgep->genlock); 816 if (bgep->curr_addr[slot].set == B_TRUE) { 817 mutex_exit(bgep->genlock); 818 return (bge_unicst_set(bgep, maddr->mma_addr, slot)); 819 } 820 mutex_exit(bgep->genlock); 821 822 return (EINVAL); 823 } 824 825 /* 826 * bge_m_unicst_get() - will get the MAC address and all other 827 * information related to the address slot passed in mac_multi_addr_t. 828 * mma_flags should be set to 0 in the call. 829 * On return, mma_flags can take the following values: 830 * 1) MMAC_SLOT_UNUSED 831 * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR 832 * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR 833 * 4) MMAC_SLOT_USED 834 */ 835 static int 836 bge_m_unicst_get(void *arg, mac_multi_addr_t *maddr) 837 { 838 bge_t *bgep = arg; /* private device info */ 839 mac_addr_slot_t slot; 840 841 slot = maddr->mma_slot; 842 843 if (slot <= 0 || slot >= bgep->unicst_addr_total) 844 return (EINVAL); 845 846 mutex_enter(bgep->genlock); 847 if (bgep->curr_addr[slot].set == B_TRUE) { 848 ethaddr_copy(bgep->curr_addr[slot].addr, 849 maddr->mma_addr); 850 maddr->mma_flags = MMAC_SLOT_USED; 851 } else { 852 maddr->mma_flags = MMAC_SLOT_UNUSED; 853 } 854 mutex_exit(bgep->genlock); 855 856 return (0); 857 } 858 859 extern void bge_wake_factotum(bge_t *); 860 861 static boolean_t 862 bge_param_locked(mac_prop_id_t pr_num) 863 { 864 /* 865 * All adv_* parameters are locked (read-only) while 866 * the device is in any sort of loopback mode ... 867 */ 868 switch (pr_num) { 869 case DLD_PROP_ADV_1000FDX_CAP: 870 case DLD_PROP_EN_1000FDX_CAP: 871 case DLD_PROP_ADV_1000HDX_CAP: 872 case DLD_PROP_EN_1000HDX_CAP: 873 case DLD_PROP_ADV_100FDX_CAP: 874 case DLD_PROP_EN_100FDX_CAP: 875 case DLD_PROP_ADV_100HDX_CAP: 876 case DLD_PROP_EN_100HDX_CAP: 877 case DLD_PROP_ADV_10FDX_CAP: 878 case DLD_PROP_EN_10FDX_CAP: 879 case DLD_PROP_ADV_10HDX_CAP: 880 case DLD_PROP_EN_10HDX_CAP: 881 case DLD_PROP_AUTONEG: 882 case DLD_PROP_FLOWCTRL: 883 return (B_TRUE); 884 } 885 return (B_FALSE); 886 } 887 /* 888 * callback functions for set/get of properties 889 */ 890 static int 891 bge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 892 uint_t pr_valsize, const void *pr_val) 893 { 894 bge_t *bgep = barg; 895 int err = 0; 896 uint32_t cur_mtu, new_mtu; 897 uint_t maxsdu; 898 link_flowctrl_t fl; 899 900 mutex_enter(bgep->genlock); 901 if (bgep->param_loop_mode != BGE_LOOP_NONE && 902 bge_param_locked(pr_num)) { 903 /* 904 * All adv_* parameters are locked (read-only) 905 * while the device is in any sort of loopback mode. 906 */ 907 mutex_exit(bgep->genlock); 908 return (EBUSY); 909 } 910 if ((bgep->chipid.flags & CHIP_FLAG_SERDES) && 911 ((pr_num == DLD_PROP_EN_100FDX_CAP) || 912 (pr_num == DLD_PROP_EN_100FDX_CAP) || 913 (pr_num == DLD_PROP_EN_10FDX_CAP) || 914 (pr_num == DLD_PROP_EN_10HDX_CAP))) { 915 /* 916 * these properties are read/write on copper, 917 * read-only and 0 on serdes 918 */ 919 mutex_exit(bgep->genlock); 920 return (ENOTSUP); 921 } 922 923 switch (pr_num) { 924 case DLD_PROP_EN_1000FDX_CAP: 925 bgep->param_en_1000fdx = *(uint8_t *)pr_val; 926 bgep->param_adv_1000fdx = *(uint8_t *)pr_val; 927 goto reprogram; 928 case DLD_PROP_EN_1000HDX_CAP: 929 bgep->param_en_1000hdx = *(uint8_t *)pr_val; 930 bgep->param_adv_1000hdx = *(uint8_t *)pr_val; 931 goto reprogram; 932 case DLD_PROP_EN_100FDX_CAP: 933 bgep->param_en_100fdx = *(uint8_t *)pr_val; 934 bgep->param_adv_100fdx = *(uint8_t *)pr_val; 935 goto reprogram; 936 case DLD_PROP_EN_100HDX_CAP: 937 bgep->param_en_100hdx = *(uint8_t *)pr_val; 938 bgep->param_adv_100hdx = *(uint8_t *)pr_val; 939 goto reprogram; 940 case DLD_PROP_EN_10FDX_CAP: 941 bgep->param_en_10fdx = *(uint8_t *)pr_val; 942 bgep->param_adv_10fdx = *(uint8_t *)pr_val; 943 goto reprogram; 944 case DLD_PROP_EN_10HDX_CAP: 945 bgep->param_en_10hdx = *(uint8_t *)pr_val; 946 bgep->param_adv_10hdx = *(uint8_t *)pr_val; 947 reprogram: 948 if (err == 0 && bge_reprogram(bgep) == IOC_INVAL) 949 err = EINVAL; 950 break; 951 case DLD_PROP_ADV_1000FDX_CAP: 952 case DLD_PROP_ADV_1000HDX_CAP: 953 case DLD_PROP_ADV_100FDX_CAP: 954 case DLD_PROP_ADV_100HDX_CAP: 955 case DLD_PROP_ADV_10FDX_CAP: 956 case DLD_PROP_ADV_10HDX_CAP: 957 case DLD_PROP_STATUS: 958 case DLD_PROP_SPEED: 959 case DLD_PROP_DUPLEX: 960 err = ENOTSUP; /* read-only prop. Can't set this */ 961 break; 962 case DLD_PROP_AUTONEG: 963 bgep->param_adv_autoneg = *(uint8_t *)pr_val; 964 if (bge_reprogram(bgep) == IOC_INVAL) 965 err = EINVAL; 966 break; 967 case DLD_PROP_MTU: 968 cur_mtu = bgep->chipid.default_mtu; 969 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 970 971 if (new_mtu == cur_mtu) { 972 err = 0; 973 break; 974 } 975 if (new_mtu < BGE_DEFAULT_MTU || 976 new_mtu > BGE_MAXIMUM_MTU) { 977 err = EINVAL; 978 break; 979 } 980 if ((new_mtu > BGE_DEFAULT_MTU) && 981 (bgep->chipid.flags & CHIP_FLAG_NO_JUMBO)) { 982 err = EINVAL; 983 break; 984 } 985 if (bgep->bge_mac_state == BGE_MAC_STARTED) { 986 err = EBUSY; 987 break; 988 } 989 bgep->chipid.default_mtu = new_mtu; 990 if (bge_chip_id_init(bgep)) { 991 err = EINVAL; 992 break; 993 } 994 maxsdu = bgep->chipid.ethmax_size - 995 sizeof (struct ether_header); 996 err = mac_maxsdu_update(bgep->mh, maxsdu); 997 if (err == 0) { 998 bgep->bge_dma_error = B_TRUE; 999 bgep->manual_reset = B_TRUE; 1000 bge_chip_stop(bgep, B_TRUE); 1001 bge_wake_factotum(bgep); 1002 err = 0; 1003 } 1004 break; 1005 case DLD_PROP_FLOWCTRL: 1006 bcopy(pr_val, &fl, sizeof (fl)); 1007 switch (fl) { 1008 default: 1009 err = ENOTSUP; 1010 break; 1011 case LINK_FLOWCTRL_NONE: 1012 bgep->param_adv_pause = 0; 1013 bgep->param_adv_asym_pause = 0; 1014 1015 bgep->param_link_rx_pause = B_FALSE; 1016 bgep->param_link_tx_pause = B_FALSE; 1017 break; 1018 case LINK_FLOWCTRL_RX: 1019 if (!((bgep->param_lp_pause == 0) && 1020 (bgep->param_lp_asym_pause == 1))) { 1021 err = EINVAL; 1022 break; 1023 } 1024 bgep->param_adv_pause = 1; 1025 bgep->param_adv_asym_pause = 1; 1026 1027 bgep->param_link_rx_pause = B_TRUE; 1028 bgep->param_link_tx_pause = B_FALSE; 1029 break; 1030 case LINK_FLOWCTRL_TX: 1031 if (!((bgep->param_lp_pause == 1) && 1032 (bgep->param_lp_asym_pause == 1))) { 1033 err = EINVAL; 1034 break; 1035 } 1036 bgep->param_adv_pause = 0; 1037 bgep->param_adv_asym_pause = 1; 1038 1039 bgep->param_link_rx_pause = B_FALSE; 1040 bgep->param_link_tx_pause = B_TRUE; 1041 break; 1042 case LINK_FLOWCTRL_BI: 1043 if (bgep->param_lp_pause != 1) { 1044 err = EINVAL; 1045 break; 1046 } 1047 bgep->param_adv_pause = 1; 1048 1049 bgep->param_link_rx_pause = B_TRUE; 1050 bgep->param_link_tx_pause = B_TRUE; 1051 break; 1052 } 1053 1054 if (err == 0) { 1055 if (bge_reprogram(bgep) == IOC_INVAL) 1056 err = EINVAL; 1057 } 1058 1059 break; 1060 case DLD_PROP_PRIVATE: 1061 err = bge_set_priv_prop(bgep, pr_name, pr_valsize, 1062 pr_val); 1063 break; 1064 default: 1065 err = ENOTSUP; 1066 break; 1067 } 1068 mutex_exit(bgep->genlock); 1069 return (err); 1070 } 1071 1072 static int 1073 bge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num, 1074 uint_t pr_flags, uint_t pr_valsize, void *pr_val) 1075 { 1076 bge_t *bgep = barg; 1077 int err = 0; 1078 link_flowctrl_t fl; 1079 uint64_t speed; 1080 int flags = bgep->chipid.flags; 1081 boolean_t is_default = (pr_flags & DLD_DEFAULT); 1082 1083 if (pr_valsize == 0) 1084 return (EINVAL); 1085 bzero(pr_val, pr_valsize); 1086 switch (pr_num) { 1087 case DLD_PROP_DUPLEX: 1088 if (pr_valsize < sizeof (link_duplex_t)) 1089 return (EINVAL); 1090 bcopy(&bgep->param_link_duplex, pr_val, 1091 sizeof (link_duplex_t)); 1092 break; 1093 case DLD_PROP_SPEED: 1094 if (pr_valsize < sizeof (speed)) 1095 return (EINVAL); 1096 speed = bgep->param_link_speed * 1000000ull; 1097 bcopy(&speed, pr_val, sizeof (speed)); 1098 break; 1099 case DLD_PROP_STATUS: 1100 if (pr_valsize < sizeof (link_state_t)) 1101 return (EINVAL); 1102 bcopy(&bgep->link_state, pr_val, 1103 sizeof (link_state_t)); 1104 break; 1105 case DLD_PROP_AUTONEG: 1106 if (is_default) 1107 *(uint8_t *)pr_val = 1; 1108 else 1109 *(uint8_t *)pr_val = bgep->param_adv_autoneg; 1110 break; 1111 case DLD_PROP_FLOWCTRL: 1112 if (pr_valsize < sizeof (fl)) 1113 return (EINVAL); 1114 if (is_default) { 1115 fl = LINK_FLOWCTRL_BI; 1116 bcopy(&fl, pr_val, sizeof (fl)); 1117 break; 1118 } 1119 1120 if (bgep->param_link_rx_pause && 1121 !bgep->param_link_tx_pause) 1122 fl = LINK_FLOWCTRL_RX; 1123 1124 if (!bgep->param_link_rx_pause && 1125 !bgep->param_link_tx_pause) 1126 fl = LINK_FLOWCTRL_NONE; 1127 1128 if (!bgep->param_link_rx_pause && 1129 bgep->param_link_tx_pause) 1130 fl = LINK_FLOWCTRL_TX; 1131 1132 if (bgep->param_link_rx_pause && 1133 bgep->param_link_tx_pause) 1134 fl = LINK_FLOWCTRL_BI; 1135 bcopy(&fl, pr_val, sizeof (fl)); 1136 break; 1137 case DLD_PROP_ADV_1000FDX_CAP: 1138 if (is_default) 1139 *(uint8_t *)pr_val = 1; 1140 else 1141 *(uint8_t *)pr_val = bgep->param_adv_1000fdx; 1142 break; 1143 case DLD_PROP_EN_1000FDX_CAP: 1144 if (is_default) 1145 *(uint8_t *)pr_val = 1; 1146 else 1147 *(uint8_t *)pr_val = bgep->param_en_1000fdx; 1148 break; 1149 case DLD_PROP_ADV_1000HDX_CAP: 1150 if (is_default) 1151 *(uint8_t *)pr_val = 1; 1152 else 1153 *(uint8_t *)pr_val = bgep->param_adv_1000hdx; 1154 break; 1155 case DLD_PROP_EN_1000HDX_CAP: 1156 if (is_default) 1157 *(uint8_t *)pr_val = 1; 1158 else 1159 *(uint8_t *)pr_val = bgep->param_en_1000hdx; 1160 break; 1161 case DLD_PROP_ADV_100FDX_CAP: 1162 if (is_default) { 1163 *(uint8_t *)pr_val = 1164 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1165 } else { 1166 *(uint8_t *)pr_val = bgep->param_adv_100fdx; 1167 } 1168 break; 1169 case DLD_PROP_EN_100FDX_CAP: 1170 if (is_default) { 1171 *(uint8_t *)pr_val = 1172 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1173 } else { 1174 *(uint8_t *)pr_val = bgep->param_en_100fdx; 1175 } 1176 break; 1177 case DLD_PROP_ADV_100HDX_CAP: 1178 if (is_default) { 1179 *(uint8_t *)pr_val = 1180 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1181 } else { 1182 *(uint8_t *)pr_val = bgep->param_adv_100hdx; 1183 } 1184 break; 1185 case DLD_PROP_EN_100HDX_CAP: 1186 if (is_default) { 1187 *(uint8_t *)pr_val = 1188 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1189 } else { 1190 *(uint8_t *)pr_val = bgep->param_en_100hdx; 1191 } 1192 break; 1193 case DLD_PROP_ADV_10FDX_CAP: 1194 if (is_default) { 1195 *(uint8_t *)pr_val = 1196 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1197 } else { 1198 *(uint8_t *)pr_val = bgep->param_adv_10fdx; 1199 } 1200 break; 1201 case DLD_PROP_EN_10FDX_CAP: 1202 if (is_default) { 1203 *(uint8_t *)pr_val = 1204 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1205 } else { 1206 *(uint8_t *)pr_val = bgep->param_en_10fdx; 1207 } 1208 break; 1209 case DLD_PROP_ADV_10HDX_CAP: 1210 if (is_default) { 1211 *(uint8_t *)pr_val = 1212 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1213 } else { 1214 *(uint8_t *)pr_val = bgep->param_adv_10hdx; 1215 } 1216 break; 1217 case DLD_PROP_EN_10HDX_CAP: 1218 if (is_default) { 1219 *(uint8_t *)pr_val = 1220 ((flags & CHIP_FLAG_SERDES) ? 0 : 1); 1221 } else { 1222 *(uint8_t *)pr_val = bgep->param_en_10hdx; 1223 } 1224 break; 1225 case DLD_PROP_ADV_100T4_CAP: 1226 case DLD_PROP_EN_100T4_CAP: 1227 *(uint8_t *)pr_val = 0; 1228 break; 1229 case DLD_PROP_PRIVATE: 1230 err = bge_get_priv_prop(bgep, pr_name, pr_flags, 1231 pr_valsize, pr_val); 1232 return (err); 1233 default: 1234 return (ENOTSUP); 1235 } 1236 return (0); 1237 } 1238 1239 /* ARGSUSED */ 1240 static int 1241 bge_set_priv_prop(bge_t *bgep, const char *pr_name, uint_t pr_valsize, 1242 const void *pr_val) 1243 { 1244 int err = 0; 1245 long result; 1246 1247 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1248 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1249 if (result > 1 || result < 0) { 1250 err = EINVAL; 1251 } else { 1252 bgep->param_adv_pause = result; 1253 if (bge_reprogram(bgep) == IOC_INVAL) 1254 err = EINVAL; 1255 } 1256 return (err); 1257 } 1258 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1259 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1260 if (result > 1 || result < 0) { 1261 err = EINVAL; 1262 } else { 1263 bgep->param_adv_asym_pause = result; 1264 if (bge_reprogram(bgep) == IOC_INVAL) 1265 err = EINVAL; 1266 } 1267 return (err); 1268 } 1269 if (strcmp(pr_name, "_drain_max") == 0) { 1270 1271 /* 1272 * on the Tx side, we need to update the h/w register for 1273 * real packet transmission per packet. The drain_max parameter 1274 * is used to reduce the register access. This parameter 1275 * controls the max number of packets that we will hold before 1276 * updating the bge h/w to trigger h/w transmit. The bge 1277 * chipset usually has a max of 512 Tx descriptors, thus 1278 * the upper bound on drain_max is 512. 1279 */ 1280 if (pr_val == NULL) { 1281 err = EINVAL; 1282 return (err); 1283 } 1284 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1285 if (result > 512 || result < 1) 1286 err = EINVAL; 1287 else { 1288 bgep->param_drain_max = (uint32_t)result; 1289 if (bge_reprogram(bgep) == IOC_INVAL) 1290 err = EINVAL; 1291 } 1292 return (err); 1293 } 1294 if (strcmp(pr_name, "_msi_cnt") == 0) { 1295 1296 if (pr_val == NULL) { 1297 err = EINVAL; 1298 return (err); 1299 } 1300 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1301 if (result > 7 || result < 0) 1302 err = EINVAL; 1303 else { 1304 bgep->param_msi_cnt = (uint32_t)result; 1305 if (bge_reprogram(bgep) == IOC_INVAL) 1306 err = EINVAL; 1307 } 1308 return (err); 1309 } 1310 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1311 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1312 return (EINVAL); 1313 1314 bgep->chipid.rx_ticks_norm = result; 1315 return (0); 1316 } 1317 1318 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1319 if (ddi_strtol(pr_val, (char **)NULL, 0, &result) != 0) 1320 return (EINVAL); 1321 1322 bgep->chipid.rx_count_norm = result; 1323 return (0); 1324 } 1325 return (ENOTSUP); 1326 } 1327 1328 static int 1329 bge_get_priv_prop(bge_t *bge, const char *pr_name, uint_t pr_flags, 1330 uint_t pr_valsize, void *pr_val) 1331 { 1332 int err = ENOTSUP; 1333 boolean_t is_default = (pr_flags & DLD_DEFAULT); 1334 int value; 1335 1336 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1337 value = (is_default? 1 : bge->param_adv_pause); 1338 err = 0; 1339 goto done; 1340 } 1341 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1342 value = (is_default? 1 : bge->param_adv_asym_pause); 1343 err = 0; 1344 goto done; 1345 } 1346 if (strcmp(pr_name, "_drain_max") == 0) { 1347 value = (is_default? 64 : bge->param_drain_max); 1348 err = 0; 1349 goto done; 1350 } 1351 if (strcmp(pr_name, "_msi_cnt") == 0) { 1352 value = (is_default? 0 : bge->param_msi_cnt); 1353 err = 0; 1354 goto done; 1355 } 1356 1357 if (strcmp(pr_name, "_intr_coalesce_blank_time") == 0) { 1358 value = (is_default? bge_rx_ticks_norm : 1359 bge->chipid.rx_ticks_norm); 1360 err = 0; 1361 goto done; 1362 } 1363 1364 if (strcmp(pr_name, "_intr_coalesce_pkt_cnt") == 0) { 1365 value = (is_default? bge_rx_count_norm : 1366 bge->chipid.rx_count_norm); 1367 err = 0; 1368 goto done; 1369 } 1370 1371 done: 1372 if (err == 0) { 1373 (void) snprintf(pr_val, pr_valsize, "%d", value); 1374 } 1375 return (err); 1376 } 1377 1378 /* 1379 * Compute the index of the required bit in the multicast hash map. 1380 * This must mirror the way the hardware actually does it! 1381 * See Broadcom document 570X-PG102-R page 125. 1382 */ 1383 static uint32_t 1384 bge_hash_index(const uint8_t *mca) 1385 { 1386 uint32_t hash; 1387 1388 CRC32(hash, mca, ETHERADDRL, -1U, crc32_table); 1389 1390 return (hash); 1391 } 1392 1393 /* 1394 * bge_m_multicst_add() -- enable/disable a multicast address 1395 */ 1396 static int 1397 bge_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1398 { 1399 bge_t *bgep = arg; /* private device info */ 1400 uint32_t hash; 1401 uint32_t index; 1402 uint32_t word; 1403 uint32_t bit; 1404 uint8_t *refp; 1405 1406 BGE_TRACE(("bge_m_multicst($%p, %s, %s)", arg, 1407 (add) ? "add" : "remove", ether_sprintf((void *)mca))); 1408 1409 /* 1410 * Precalculate all required masks, pointers etc ... 1411 */ 1412 hash = bge_hash_index(mca); 1413 index = hash % BGE_HASH_TABLE_SIZE; 1414 word = index/32u; 1415 bit = 1 << (index % 32u); 1416 refp = &bgep->mcast_refs[index]; 1417 1418 BGE_DEBUG(("bge_m_multicst: hash 0x%x index %d (%d:0x%x) = %d", 1419 hash, index, word, bit, *refp)); 1420 1421 /* 1422 * We must set the appropriate bit in the hash map (and the 1423 * corresponding h/w register) when the refcount goes from 0 1424 * to >0, and clear it when the last ref goes away (refcount 1425 * goes from >0 back to 0). If we change the hash map, we 1426 * must also update the chip's hardware map registers. 1427 */ 1428 mutex_enter(bgep->genlock); 1429 if (!(bgep->progress & PROGRESS_INTR)) { 1430 /* can happen during autorecovery */ 1431 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1432 mutex_exit(bgep->genlock); 1433 return (EIO); 1434 } 1435 if (add) { 1436 if ((*refp)++ == 0) { 1437 bgep->mcast_hash[word] |= bit; 1438 #ifdef BGE_IPMI_ASF 1439 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1440 #else 1441 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1442 #endif 1443 (void) bge_check_acc_handle(bgep, 1444 bgep->cfg_handle); 1445 (void) bge_check_acc_handle(bgep, 1446 bgep->io_handle); 1447 ddi_fm_service_impact(bgep->devinfo, 1448 DDI_SERVICE_DEGRADED); 1449 mutex_exit(bgep->genlock); 1450 return (EIO); 1451 } 1452 } 1453 } else { 1454 if (--(*refp) == 0) { 1455 bgep->mcast_hash[word] &= ~bit; 1456 #ifdef BGE_IPMI_ASF 1457 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1458 #else 1459 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1460 #endif 1461 (void) bge_check_acc_handle(bgep, 1462 bgep->cfg_handle); 1463 (void) bge_check_acc_handle(bgep, 1464 bgep->io_handle); 1465 ddi_fm_service_impact(bgep->devinfo, 1466 DDI_SERVICE_DEGRADED); 1467 mutex_exit(bgep->genlock); 1468 return (EIO); 1469 } 1470 } 1471 } 1472 BGE_DEBUG(("bge_m_multicst($%p) done", arg)); 1473 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1474 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1475 mutex_exit(bgep->genlock); 1476 return (EIO); 1477 } 1478 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1479 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1480 mutex_exit(bgep->genlock); 1481 return (EIO); 1482 } 1483 mutex_exit(bgep->genlock); 1484 1485 return (0); 1486 } 1487 1488 /* 1489 * bge_m_promisc() -- set or reset promiscuous mode on the board 1490 * 1491 * Program the hardware to enable/disable promiscuous and/or 1492 * receive-all-multicast modes. 1493 */ 1494 static int 1495 bge_m_promisc(void *arg, boolean_t on) 1496 { 1497 bge_t *bgep = arg; 1498 1499 BGE_TRACE(("bge_m_promisc_set($%p, %d)", arg, on)); 1500 1501 /* 1502 * Store MAC layer specified mode and pass to chip layer to update h/w 1503 */ 1504 mutex_enter(bgep->genlock); 1505 if (!(bgep->progress & PROGRESS_INTR)) { 1506 /* can happen during autorecovery */ 1507 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1508 mutex_exit(bgep->genlock); 1509 return (EIO); 1510 } 1511 bgep->promisc = on; 1512 #ifdef BGE_IPMI_ASF 1513 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 1514 #else 1515 if (bge_chip_sync(bgep) == DDI_FAILURE) { 1516 #endif 1517 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 1518 (void) bge_check_acc_handle(bgep, bgep->io_handle); 1519 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1520 mutex_exit(bgep->genlock); 1521 return (EIO); 1522 } 1523 BGE_DEBUG(("bge_m_promisc_set($%p) done", arg)); 1524 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1525 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1526 mutex_exit(bgep->genlock); 1527 return (EIO); 1528 } 1529 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1530 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1531 mutex_exit(bgep->genlock); 1532 return (EIO); 1533 } 1534 mutex_exit(bgep->genlock); 1535 return (0); 1536 } 1537 1538 /*ARGSUSED*/ 1539 static boolean_t 1540 bge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1541 { 1542 bge_t *bgep = arg; 1543 1544 switch (cap) { 1545 case MAC_CAPAB_HCKSUM: { 1546 uint32_t *txflags = cap_data; 1547 1548 *txflags = HCKSUM_INET_FULL_V4 | HCKSUM_IPHDRCKSUM; 1549 break; 1550 } 1551 1552 case MAC_CAPAB_POLL: 1553 /* 1554 * There's nothing for us to fill in, simply returning 1555 * B_TRUE stating that we support polling is sufficient. 1556 */ 1557 break; 1558 1559 case MAC_CAPAB_MULTIADDRESS: { 1560 multiaddress_capab_t *mmacp = cap_data; 1561 1562 mutex_enter(bgep->genlock); 1563 /* 1564 * The number of MAC addresses made available by 1565 * this capability is one less than the total as 1566 * the primary address in slot 0 is counted in 1567 * the total. 1568 */ 1569 mmacp->maddr_naddr = bgep->unicst_addr_total - 1; 1570 mmacp->maddr_naddrfree = bgep->unicst_addr_avail; 1571 /* No multiple factory addresses, set mma_flag to 0 */ 1572 mmacp->maddr_flag = 0; 1573 mmacp->maddr_handle = bgep; 1574 mmacp->maddr_add = bge_m_unicst_add; 1575 mmacp->maddr_remove = bge_m_unicst_remove; 1576 mmacp->maddr_modify = bge_m_unicst_modify; 1577 mmacp->maddr_get = bge_m_unicst_get; 1578 mmacp->maddr_reserve = NULL; 1579 mutex_exit(bgep->genlock); 1580 break; 1581 } 1582 1583 default: 1584 return (B_FALSE); 1585 } 1586 return (B_TRUE); 1587 } 1588 1589 /* 1590 * Loopback ioctl code 1591 */ 1592 1593 static lb_property_t loopmodes[] = { 1594 { normal, "normal", BGE_LOOP_NONE }, 1595 { external, "1000Mbps", BGE_LOOP_EXTERNAL_1000 }, 1596 { external, "100Mbps", BGE_LOOP_EXTERNAL_100 }, 1597 { external, "10Mbps", BGE_LOOP_EXTERNAL_10 }, 1598 { internal, "PHY", BGE_LOOP_INTERNAL_PHY }, 1599 { internal, "MAC", BGE_LOOP_INTERNAL_MAC } 1600 }; 1601 1602 static enum ioc_reply 1603 bge_set_loop_mode(bge_t *bgep, uint32_t mode) 1604 { 1605 /* 1606 * If the mode isn't being changed, there's nothing to do ... 1607 */ 1608 if (mode == bgep->param_loop_mode) 1609 return (IOC_ACK); 1610 1611 /* 1612 * Validate the requested mode and prepare a suitable message 1613 * to explain the link down/up cycle that the change will 1614 * probably induce ... 1615 */ 1616 switch (mode) { 1617 default: 1618 return (IOC_INVAL); 1619 1620 case BGE_LOOP_NONE: 1621 case BGE_LOOP_EXTERNAL_1000: 1622 case BGE_LOOP_EXTERNAL_100: 1623 case BGE_LOOP_EXTERNAL_10: 1624 case BGE_LOOP_INTERNAL_PHY: 1625 case BGE_LOOP_INTERNAL_MAC: 1626 break; 1627 } 1628 1629 /* 1630 * All OK; tell the caller to reprogram 1631 * the PHY and/or MAC for the new mode ... 1632 */ 1633 bgep->param_loop_mode = mode; 1634 return (IOC_RESTART_ACK); 1635 } 1636 1637 static enum ioc_reply 1638 bge_loop_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1639 { 1640 lb_info_sz_t *lbsp; 1641 lb_property_t *lbpp; 1642 uint32_t *lbmp; 1643 int cmd; 1644 1645 _NOTE(ARGUNUSED(wq)) 1646 1647 /* 1648 * Validate format of ioctl 1649 */ 1650 if (mp->b_cont == NULL) 1651 return (IOC_INVAL); 1652 1653 cmd = iocp->ioc_cmd; 1654 switch (cmd) { 1655 default: 1656 /* NOTREACHED */ 1657 bge_error(bgep, "bge_loop_ioctl: invalid cmd 0x%x", cmd); 1658 return (IOC_INVAL); 1659 1660 case LB_GET_INFO_SIZE: 1661 if (iocp->ioc_count != sizeof (lb_info_sz_t)) 1662 return (IOC_INVAL); 1663 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 1664 *lbsp = sizeof (loopmodes); 1665 return (IOC_REPLY); 1666 1667 case LB_GET_INFO: 1668 if (iocp->ioc_count != sizeof (loopmodes)) 1669 return (IOC_INVAL); 1670 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 1671 bcopy(loopmodes, lbpp, sizeof (loopmodes)); 1672 return (IOC_REPLY); 1673 1674 case LB_GET_MODE: 1675 if (iocp->ioc_count != sizeof (uint32_t)) 1676 return (IOC_INVAL); 1677 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1678 *lbmp = bgep->param_loop_mode; 1679 return (IOC_REPLY); 1680 1681 case LB_SET_MODE: 1682 if (iocp->ioc_count != sizeof (uint32_t)) 1683 return (IOC_INVAL); 1684 lbmp = (uint32_t *)mp->b_cont->b_rptr; 1685 return (bge_set_loop_mode(bgep, *lbmp)); 1686 } 1687 } 1688 1689 /* 1690 * Specific bge IOCTLs, the gld module handles the generic ones. 1691 */ 1692 static void 1693 bge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1694 { 1695 bge_t *bgep = arg; 1696 struct iocblk *iocp; 1697 enum ioc_reply status; 1698 boolean_t need_privilege; 1699 int err; 1700 int cmd; 1701 1702 /* 1703 * Validate the command before bothering with the mutex ... 1704 */ 1705 iocp = (struct iocblk *)mp->b_rptr; 1706 iocp->ioc_error = 0; 1707 need_privilege = B_TRUE; 1708 cmd = iocp->ioc_cmd; 1709 switch (cmd) { 1710 default: 1711 miocnak(wq, mp, 0, EINVAL); 1712 return; 1713 1714 case BGE_MII_READ: 1715 case BGE_MII_WRITE: 1716 case BGE_SEE_READ: 1717 case BGE_SEE_WRITE: 1718 case BGE_FLASH_READ: 1719 case BGE_FLASH_WRITE: 1720 case BGE_DIAG: 1721 case BGE_PEEK: 1722 case BGE_POKE: 1723 case BGE_PHY_RESET: 1724 case BGE_SOFT_RESET: 1725 case BGE_HARD_RESET: 1726 break; 1727 1728 case LB_GET_INFO_SIZE: 1729 case LB_GET_INFO: 1730 case LB_GET_MODE: 1731 need_privilege = B_FALSE; 1732 /* FALLTHRU */ 1733 case LB_SET_MODE: 1734 break; 1735 1736 } 1737 1738 if (need_privilege) { 1739 /* 1740 * Check for specific net_config privilege on Solaris 10+. 1741 */ 1742 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 1743 if (err != 0) { 1744 miocnak(wq, mp, 0, err); 1745 return; 1746 } 1747 } 1748 1749 mutex_enter(bgep->genlock); 1750 if (!(bgep->progress & PROGRESS_INTR)) { 1751 /* can happen during autorecovery */ 1752 mutex_exit(bgep->genlock); 1753 miocnak(wq, mp, 0, EIO); 1754 return; 1755 } 1756 1757 switch (cmd) { 1758 default: 1759 _NOTE(NOTREACHED) 1760 status = IOC_INVAL; 1761 break; 1762 1763 case BGE_MII_READ: 1764 case BGE_MII_WRITE: 1765 case BGE_SEE_READ: 1766 case BGE_SEE_WRITE: 1767 case BGE_FLASH_READ: 1768 case BGE_FLASH_WRITE: 1769 case BGE_DIAG: 1770 case BGE_PEEK: 1771 case BGE_POKE: 1772 case BGE_PHY_RESET: 1773 case BGE_SOFT_RESET: 1774 case BGE_HARD_RESET: 1775 status = bge_chip_ioctl(bgep, wq, mp, iocp); 1776 break; 1777 1778 case LB_GET_INFO_SIZE: 1779 case LB_GET_INFO: 1780 case LB_GET_MODE: 1781 case LB_SET_MODE: 1782 status = bge_loop_ioctl(bgep, wq, mp, iocp); 1783 break; 1784 1785 } 1786 1787 /* 1788 * Do we need to reprogram the PHY and/or the MAC? 1789 * Do it now, while we still have the mutex. 1790 * 1791 * Note: update the PHY first, 'cos it controls the 1792 * speed/duplex parameters that the MAC code uses. 1793 */ 1794 switch (status) { 1795 case IOC_RESTART_REPLY: 1796 case IOC_RESTART_ACK: 1797 if (bge_reprogram(bgep) == IOC_INVAL) 1798 status = IOC_INVAL; 1799 break; 1800 } 1801 1802 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 1803 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1804 status = IOC_INVAL; 1805 } 1806 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 1807 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 1808 status = IOC_INVAL; 1809 } 1810 mutex_exit(bgep->genlock); 1811 1812 /* 1813 * Finally, decide how to reply 1814 */ 1815 switch (status) { 1816 default: 1817 case IOC_INVAL: 1818 /* 1819 * Error, reply with a NAK and EINVAL or the specified error 1820 */ 1821 miocnak(wq, mp, 0, iocp->ioc_error == 0 ? 1822 EINVAL : iocp->ioc_error); 1823 break; 1824 1825 case IOC_DONE: 1826 /* 1827 * OK, reply already sent 1828 */ 1829 break; 1830 1831 case IOC_RESTART_ACK: 1832 case IOC_ACK: 1833 /* 1834 * OK, reply with an ACK 1835 */ 1836 miocack(wq, mp, 0, 0); 1837 break; 1838 1839 case IOC_RESTART_REPLY: 1840 case IOC_REPLY: 1841 /* 1842 * OK, send prepared reply as ACK or NAK 1843 */ 1844 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1845 M_IOCACK : M_IOCNAK; 1846 qreply(wq, mp); 1847 break; 1848 } 1849 } 1850 1851 static void 1852 bge_resources_add(bge_t *bgep, time_t time, uint_t pkt_cnt) 1853 { 1854 1855 recv_ring_t *rrp; 1856 mac_rx_fifo_t mrf; 1857 int ring; 1858 1859 /* 1860 * Register Rx rings as resources and save mac 1861 * resource id for future reference 1862 */ 1863 mrf.mrf_type = MAC_RX_FIFO; 1864 mrf.mrf_blank = bge_chip_blank; 1865 mrf.mrf_arg = (void *)bgep; 1866 mrf.mrf_normal_blank_time = time; 1867 mrf.mrf_normal_pkt_count = pkt_cnt; 1868 1869 for (ring = 0; ring < bgep->chipid.rx_rings; ring++) { 1870 rrp = &bgep->recv[ring]; 1871 rrp->handle = mac_resource_add(bgep->mh, 1872 (mac_resource_t *)&mrf); 1873 } 1874 } 1875 1876 static void 1877 bge_m_resources(void *arg) 1878 { 1879 bge_t *bgep = arg; 1880 1881 mutex_enter(bgep->genlock); 1882 1883 bge_resources_add(bgep, bgep->chipid.rx_ticks_norm, 1884 bgep->chipid.rx_count_norm); 1885 mutex_exit(bgep->genlock); 1886 } 1887 1888 /* 1889 * ========== Per-instance setup/teardown code ========== 1890 */ 1891 1892 #undef BGE_DBG 1893 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 1894 /* 1895 * Allocate an area of memory and a DMA handle for accessing it 1896 */ 1897 static int 1898 bge_alloc_dma_mem(bge_t *bgep, size_t memsize, ddi_device_acc_attr_t *attr_p, 1899 uint_t dma_flags, dma_area_t *dma_p) 1900 { 1901 caddr_t va; 1902 int err; 1903 1904 BGE_TRACE(("bge_alloc_dma_mem($%p, %ld, $%p, 0x%x, $%p)", 1905 (void *)bgep, memsize, attr_p, dma_flags, dma_p)); 1906 1907 /* 1908 * Allocate handle 1909 */ 1910 err = ddi_dma_alloc_handle(bgep->devinfo, &dma_attr, 1911 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_hdl); 1912 if (err != DDI_SUCCESS) 1913 return (DDI_FAILURE); 1914 1915 /* 1916 * Allocate memory 1917 */ 1918 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize, attr_p, 1919 dma_flags, DDI_DMA_DONTWAIT, NULL, &va, &dma_p->alength, 1920 &dma_p->acc_hdl); 1921 if (err != DDI_SUCCESS) 1922 return (DDI_FAILURE); 1923 1924 /* 1925 * Bind the two together 1926 */ 1927 dma_p->mem_va = va; 1928 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 1929 va, dma_p->alength, dma_flags, DDI_DMA_DONTWAIT, NULL, 1930 &dma_p->cookie, &dma_p->ncookies); 1931 1932 BGE_DEBUG(("bge_alloc_dma_mem(): bind %d bytes; err %d, %d cookies", 1933 dma_p->alength, err, dma_p->ncookies)); 1934 1935 if (err != DDI_DMA_MAPPED || dma_p->ncookies != 1) 1936 return (DDI_FAILURE); 1937 1938 dma_p->nslots = ~0U; 1939 dma_p->size = ~0U; 1940 dma_p->token = ~0U; 1941 dma_p->offset = 0; 1942 return (DDI_SUCCESS); 1943 } 1944 1945 /* 1946 * Free one allocated area of DMAable memory 1947 */ 1948 static void 1949 bge_free_dma_mem(dma_area_t *dma_p) 1950 { 1951 if (dma_p->dma_hdl != NULL) { 1952 if (dma_p->ncookies) { 1953 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 1954 dma_p->ncookies = 0; 1955 } 1956 ddi_dma_free_handle(&dma_p->dma_hdl); 1957 dma_p->dma_hdl = NULL; 1958 } 1959 1960 if (dma_p->acc_hdl != NULL) { 1961 ddi_dma_mem_free(&dma_p->acc_hdl); 1962 dma_p->acc_hdl = NULL; 1963 } 1964 } 1965 /* 1966 * Utility routine to carve a slice off a chunk of allocated memory, 1967 * updating the chunk descriptor accordingly. The size of the slice 1968 * is given by the product of the <qty> and <size> parameters. 1969 */ 1970 static void 1971 bge_slice_chunk(dma_area_t *slice, dma_area_t *chunk, 1972 uint32_t qty, uint32_t size) 1973 { 1974 static uint32_t sequence = 0xbcd5704a; 1975 size_t totsize; 1976 1977 totsize = qty*size; 1978 ASSERT(size >= 0); 1979 ASSERT(totsize <= chunk->alength); 1980 1981 *slice = *chunk; 1982 slice->nslots = qty; 1983 slice->size = size; 1984 slice->alength = totsize; 1985 slice->token = ++sequence; 1986 1987 chunk->mem_va = (caddr_t)chunk->mem_va + totsize; 1988 chunk->alength -= totsize; 1989 chunk->offset += totsize; 1990 chunk->cookie.dmac_laddress += totsize; 1991 chunk->cookie.dmac_size -= totsize; 1992 } 1993 1994 /* 1995 * Initialise the specified Receive Producer (Buffer) Ring, using 1996 * the information in the <dma_area> descriptors that it contains 1997 * to set up all the other fields. This routine should be called 1998 * only once for each ring. 1999 */ 2000 static void 2001 bge_init_buff_ring(bge_t *bgep, uint64_t ring) 2002 { 2003 buff_ring_t *brp; 2004 bge_status_t *bsp; 2005 sw_rbd_t *srbdp; 2006 dma_area_t pbuf; 2007 uint32_t bufsize; 2008 uint32_t nslots; 2009 uint32_t slot; 2010 uint32_t split; 2011 2012 static bge_regno_t nic_ring_addrs[BGE_BUFF_RINGS_MAX] = { 2013 NIC_MEM_SHADOW_BUFF_STD, 2014 NIC_MEM_SHADOW_BUFF_JUMBO, 2015 NIC_MEM_SHADOW_BUFF_MINI 2016 }; 2017 static bge_regno_t mailbox_regs[BGE_BUFF_RINGS_MAX] = { 2018 RECV_STD_PROD_INDEX_REG, 2019 RECV_JUMBO_PROD_INDEX_REG, 2020 RECV_MINI_PROD_INDEX_REG 2021 }; 2022 static bge_regno_t buff_cons_xref[BGE_BUFF_RINGS_MAX] = { 2023 STATUS_STD_BUFF_CONS_INDEX, 2024 STATUS_JUMBO_BUFF_CONS_INDEX, 2025 STATUS_MINI_BUFF_CONS_INDEX 2026 }; 2027 2028 BGE_TRACE(("bge_init_buff_ring($%p, %d)", 2029 (void *)bgep, ring)); 2030 2031 brp = &bgep->buff[ring]; 2032 nslots = brp->desc.nslots; 2033 ASSERT(brp->buf[0].nslots == nslots/BGE_SPLIT); 2034 bufsize = brp->buf[0].size; 2035 2036 /* 2037 * Set up the copy of the h/w RCB 2038 * 2039 * Note: unlike Send & Receive Return Rings, (where the max_len 2040 * field holds the number of slots), in a Receive Buffer Ring 2041 * this field indicates the size of each buffer in the ring. 2042 */ 2043 brp->hw_rcb.host_ring_addr = brp->desc.cookie.dmac_laddress; 2044 brp->hw_rcb.max_len = bufsize; 2045 brp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2046 brp->hw_rcb.nic_ring_addr = nic_ring_addrs[ring]; 2047 2048 /* 2049 * Other one-off initialisation of per-ring data 2050 */ 2051 brp->bgep = bgep; 2052 bsp = DMA_VPTR(bgep->status_block); 2053 brp->cons_index_p = &bsp->buff_cons_index[buff_cons_xref[ring]]; 2054 brp->chip_mbx_reg = mailbox_regs[ring]; 2055 mutex_init(brp->rf_lock, NULL, MUTEX_DRIVER, 2056 DDI_INTR_PRI(bgep->intr_pri)); 2057 2058 /* 2059 * Allocate the array of s/w Receive Buffer Descriptors 2060 */ 2061 srbdp = kmem_zalloc(nslots*sizeof (*srbdp), KM_SLEEP); 2062 brp->sw_rbds = srbdp; 2063 2064 /* 2065 * Now initialise each array element once and for all 2066 */ 2067 for (split = 0; split < BGE_SPLIT; ++split) { 2068 pbuf = brp->buf[split]; 2069 for (slot = 0; slot < nslots/BGE_SPLIT; ++srbdp, ++slot) 2070 bge_slice_chunk(&srbdp->pbuf, &pbuf, 1, bufsize); 2071 ASSERT(pbuf.alength == 0); 2072 } 2073 } 2074 2075 /* 2076 * Clean up initialisation done above before the memory is freed 2077 */ 2078 static void 2079 bge_fini_buff_ring(bge_t *bgep, uint64_t ring) 2080 { 2081 buff_ring_t *brp; 2082 sw_rbd_t *srbdp; 2083 2084 BGE_TRACE(("bge_fini_buff_ring($%p, %d)", 2085 (void *)bgep, ring)); 2086 2087 brp = &bgep->buff[ring]; 2088 srbdp = brp->sw_rbds; 2089 kmem_free(srbdp, brp->desc.nslots*sizeof (*srbdp)); 2090 2091 mutex_destroy(brp->rf_lock); 2092 } 2093 2094 /* 2095 * Initialise the specified Receive (Return) Ring, using the 2096 * information in the <dma_area> descriptors that it contains 2097 * to set up all the other fields. This routine should be called 2098 * only once for each ring. 2099 */ 2100 static void 2101 bge_init_recv_ring(bge_t *bgep, uint64_t ring) 2102 { 2103 recv_ring_t *rrp; 2104 bge_status_t *bsp; 2105 uint32_t nslots; 2106 2107 BGE_TRACE(("bge_init_recv_ring($%p, %d)", 2108 (void *)bgep, ring)); 2109 2110 /* 2111 * The chip architecture requires that receive return rings have 2112 * 512 or 1024 or 2048 elements per ring. See 570X-PG108-R page 103. 2113 */ 2114 rrp = &bgep->recv[ring]; 2115 nslots = rrp->desc.nslots; 2116 ASSERT(nslots == 0 || nslots == 512 || 2117 nslots == 1024 || nslots == 2048); 2118 2119 /* 2120 * Set up the copy of the h/w RCB 2121 */ 2122 rrp->hw_rcb.host_ring_addr = rrp->desc.cookie.dmac_laddress; 2123 rrp->hw_rcb.max_len = nslots; 2124 rrp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2125 rrp->hw_rcb.nic_ring_addr = 0; 2126 2127 /* 2128 * Other one-off initialisation of per-ring data 2129 */ 2130 rrp->bgep = bgep; 2131 bsp = DMA_VPTR(bgep->status_block); 2132 rrp->prod_index_p = RECV_INDEX_P(bsp, ring); 2133 rrp->chip_mbx_reg = RECV_RING_CONS_INDEX_REG(ring); 2134 mutex_init(rrp->rx_lock, NULL, MUTEX_DRIVER, 2135 DDI_INTR_PRI(bgep->intr_pri)); 2136 } 2137 2138 2139 /* 2140 * Clean up initialisation done above before the memory is freed 2141 */ 2142 static void 2143 bge_fini_recv_ring(bge_t *bgep, uint64_t ring) 2144 { 2145 recv_ring_t *rrp; 2146 2147 BGE_TRACE(("bge_fini_recv_ring($%p, %d)", 2148 (void *)bgep, ring)); 2149 2150 rrp = &bgep->recv[ring]; 2151 if (rrp->rx_softint) 2152 ddi_remove_softintr(rrp->rx_softint); 2153 mutex_destroy(rrp->rx_lock); 2154 } 2155 2156 /* 2157 * Initialise the specified Send Ring, using the information in the 2158 * <dma_area> descriptors that it contains to set up all the other 2159 * fields. This routine should be called only once for each ring. 2160 */ 2161 static void 2162 bge_init_send_ring(bge_t *bgep, uint64_t ring) 2163 { 2164 send_ring_t *srp; 2165 bge_status_t *bsp; 2166 sw_sbd_t *ssbdp; 2167 dma_area_t desc; 2168 dma_area_t pbuf; 2169 uint32_t nslots; 2170 uint32_t slot; 2171 uint32_t split; 2172 sw_txbuf_t *txbuf; 2173 2174 BGE_TRACE(("bge_init_send_ring($%p, %d)", 2175 (void *)bgep, ring)); 2176 2177 /* 2178 * The chip architecture requires that host-based send rings 2179 * have 512 elements per ring. See 570X-PG102-R page 56. 2180 */ 2181 srp = &bgep->send[ring]; 2182 nslots = srp->desc.nslots; 2183 ASSERT(nslots == 0 || nslots == 512); 2184 2185 /* 2186 * Set up the copy of the h/w RCB 2187 */ 2188 srp->hw_rcb.host_ring_addr = srp->desc.cookie.dmac_laddress; 2189 srp->hw_rcb.max_len = nslots; 2190 srp->hw_rcb.flags = nslots > 0 ? 0 : RCB_FLAG_RING_DISABLED; 2191 srp->hw_rcb.nic_ring_addr = NIC_MEM_SHADOW_SEND_RING(ring, nslots); 2192 2193 /* 2194 * Other one-off initialisation of per-ring data 2195 */ 2196 srp->bgep = bgep; 2197 bsp = DMA_VPTR(bgep->status_block); 2198 srp->cons_index_p = SEND_INDEX_P(bsp, ring); 2199 srp->chip_mbx_reg = SEND_RING_HOST_INDEX_REG(ring); 2200 mutex_init(srp->tx_lock, NULL, MUTEX_DRIVER, 2201 DDI_INTR_PRI(bgep->intr_pri)); 2202 mutex_init(srp->txbuf_lock, NULL, MUTEX_DRIVER, 2203 DDI_INTR_PRI(bgep->intr_pri)); 2204 mutex_init(srp->freetxbuf_lock, NULL, MUTEX_DRIVER, 2205 DDI_INTR_PRI(bgep->intr_pri)); 2206 mutex_init(srp->tc_lock, NULL, MUTEX_DRIVER, 2207 DDI_INTR_PRI(bgep->intr_pri)); 2208 if (nslots == 0) 2209 return; 2210 2211 /* 2212 * Allocate the array of s/w Send Buffer Descriptors 2213 */ 2214 ssbdp = kmem_zalloc(nslots*sizeof (*ssbdp), KM_SLEEP); 2215 txbuf = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (*txbuf), KM_SLEEP); 2216 srp->txbuf_head = 2217 kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (bge_queue_item_t), KM_SLEEP); 2218 srp->pktp = kmem_zalloc(BGE_SEND_BUF_MAX*sizeof (send_pkt_t), KM_SLEEP); 2219 srp->sw_sbds = ssbdp; 2220 srp->txbuf = txbuf; 2221 srp->tx_buffers = BGE_SEND_BUF_NUM; 2222 srp->tx_buffers_low = srp->tx_buffers / 4; 2223 if (bgep->chipid.snd_buff_size > BGE_SEND_BUFF_SIZE_DEFAULT) 2224 srp->tx_array_max = BGE_SEND_BUF_ARRAY_JUMBO; 2225 else 2226 srp->tx_array_max = BGE_SEND_BUF_ARRAY; 2227 srp->tx_array = 1; 2228 2229 /* 2230 * Chunk tx desc area 2231 */ 2232 desc = srp->desc; 2233 for (slot = 0; slot < nslots; ++ssbdp, ++slot) { 2234 bge_slice_chunk(&ssbdp->desc, &desc, 1, 2235 sizeof (bge_sbd_t)); 2236 } 2237 ASSERT(desc.alength == 0); 2238 2239 /* 2240 * Chunk tx buffer area 2241 */ 2242 for (split = 0; split < BGE_SPLIT; ++split) { 2243 pbuf = srp->buf[0][split]; 2244 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2245 bge_slice_chunk(&txbuf->buf, &pbuf, 1, 2246 bgep->chipid.snd_buff_size); 2247 txbuf++; 2248 } 2249 ASSERT(pbuf.alength == 0); 2250 } 2251 } 2252 2253 /* 2254 * Clean up initialisation done above before the memory is freed 2255 */ 2256 static void 2257 bge_fini_send_ring(bge_t *bgep, uint64_t ring) 2258 { 2259 send_ring_t *srp; 2260 uint32_t array; 2261 uint32_t split; 2262 uint32_t nslots; 2263 2264 BGE_TRACE(("bge_fini_send_ring($%p, %d)", 2265 (void *)bgep, ring)); 2266 2267 srp = &bgep->send[ring]; 2268 mutex_destroy(srp->tc_lock); 2269 mutex_destroy(srp->freetxbuf_lock); 2270 mutex_destroy(srp->txbuf_lock); 2271 mutex_destroy(srp->tx_lock); 2272 nslots = srp->desc.nslots; 2273 if (nslots == 0) 2274 return; 2275 2276 for (array = 1; array < srp->tx_array; ++array) 2277 for (split = 0; split < BGE_SPLIT; ++split) 2278 bge_free_dma_mem(&srp->buf[array][split]); 2279 kmem_free(srp->sw_sbds, nslots*sizeof (*srp->sw_sbds)); 2280 kmem_free(srp->txbuf_head, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf_head)); 2281 kmem_free(srp->txbuf, BGE_SEND_BUF_MAX*sizeof (*srp->txbuf)); 2282 kmem_free(srp->pktp, BGE_SEND_BUF_MAX*sizeof (*srp->pktp)); 2283 srp->sw_sbds = NULL; 2284 srp->txbuf_head = NULL; 2285 srp->txbuf = NULL; 2286 srp->pktp = NULL; 2287 } 2288 2289 /* 2290 * Initialise all transmit, receive, and buffer rings. 2291 */ 2292 void 2293 bge_init_rings(bge_t *bgep) 2294 { 2295 uint32_t ring; 2296 2297 BGE_TRACE(("bge_init_rings($%p)", (void *)bgep)); 2298 2299 /* 2300 * Perform one-off initialisation of each ring ... 2301 */ 2302 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2303 bge_init_send_ring(bgep, ring); 2304 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2305 bge_init_recv_ring(bgep, ring); 2306 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2307 bge_init_buff_ring(bgep, ring); 2308 } 2309 2310 /* 2311 * Undo the work of bge_init_rings() above before the memory is freed 2312 */ 2313 void 2314 bge_fini_rings(bge_t *bgep) 2315 { 2316 uint32_t ring; 2317 2318 BGE_TRACE(("bge_fini_rings($%p)", (void *)bgep)); 2319 2320 for (ring = 0; ring < BGE_BUFF_RINGS_MAX; ++ring) 2321 bge_fini_buff_ring(bgep, ring); 2322 for (ring = 0; ring < BGE_RECV_RINGS_MAX; ++ring) 2323 bge_fini_recv_ring(bgep, ring); 2324 for (ring = 0; ring < BGE_SEND_RINGS_MAX; ++ring) 2325 bge_fini_send_ring(bgep, ring); 2326 } 2327 2328 /* 2329 * Called from the bge_m_stop() to free the tx buffers which are 2330 * allocated from the tx process. 2331 */ 2332 void 2333 bge_free_txbuf_arrays(send_ring_t *srp) 2334 { 2335 uint32_t array; 2336 uint32_t split; 2337 2338 ASSERT(mutex_owned(srp->tx_lock)); 2339 2340 /* 2341 * Free the extra tx buffer DMA area 2342 */ 2343 for (array = 1; array < srp->tx_array; ++array) 2344 for (split = 0; split < BGE_SPLIT; ++split) 2345 bge_free_dma_mem(&srp->buf[array][split]); 2346 2347 /* 2348 * Restore initial tx buffer numbers 2349 */ 2350 srp->tx_array = 1; 2351 srp->tx_buffers = BGE_SEND_BUF_NUM; 2352 srp->tx_buffers_low = srp->tx_buffers / 4; 2353 srp->tx_flow = 0; 2354 bzero(srp->pktp, BGE_SEND_BUF_MAX * sizeof (*srp->pktp)); 2355 } 2356 2357 /* 2358 * Called from tx process to allocate more tx buffers 2359 */ 2360 bge_queue_item_t * 2361 bge_alloc_txbuf_array(bge_t *bgep, send_ring_t *srp) 2362 { 2363 bge_queue_t *txbuf_queue; 2364 bge_queue_item_t *txbuf_item_last; 2365 bge_queue_item_t *txbuf_item; 2366 bge_queue_item_t *txbuf_item_rtn; 2367 sw_txbuf_t *txbuf; 2368 dma_area_t area; 2369 size_t txbuffsize; 2370 uint32_t slot; 2371 uint32_t array; 2372 uint32_t split; 2373 uint32_t err; 2374 2375 ASSERT(mutex_owned(srp->tx_lock)); 2376 2377 array = srp->tx_array; 2378 if (array >= srp->tx_array_max) 2379 return (NULL); 2380 2381 /* 2382 * Allocate memory & handles for TX buffers 2383 */ 2384 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2385 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2386 for (split = 0; split < BGE_SPLIT; ++split) { 2387 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2388 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2389 &srp->buf[array][split]); 2390 if (err != DDI_SUCCESS) { 2391 /* Free the last already allocated OK chunks */ 2392 for (slot = 0; slot <= split; ++slot) 2393 bge_free_dma_mem(&srp->buf[array][slot]); 2394 srp->tx_alloc_fail++; 2395 return (NULL); 2396 } 2397 } 2398 2399 /* 2400 * Chunk tx buffer area 2401 */ 2402 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2403 for (split = 0; split < BGE_SPLIT; ++split) { 2404 area = srp->buf[array][split]; 2405 for (slot = 0; slot < BGE_SEND_BUF_NUM/BGE_SPLIT; ++slot) { 2406 bge_slice_chunk(&txbuf->buf, &area, 1, 2407 bgep->chipid.snd_buff_size); 2408 txbuf++; 2409 } 2410 } 2411 2412 /* 2413 * Add above buffers to the tx buffer pop queue 2414 */ 2415 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2416 txbuf = srp->txbuf + array*BGE_SEND_BUF_NUM; 2417 txbuf_item_last = NULL; 2418 for (slot = 0; slot < BGE_SEND_BUF_NUM; ++slot) { 2419 txbuf_item->item = txbuf; 2420 txbuf_item->next = txbuf_item_last; 2421 txbuf_item_last = txbuf_item; 2422 txbuf++; 2423 txbuf_item++; 2424 } 2425 txbuf_item = srp->txbuf_head + array*BGE_SEND_BUF_NUM; 2426 txbuf_item_rtn = txbuf_item; 2427 txbuf_item++; 2428 txbuf_queue = srp->txbuf_pop_queue; 2429 mutex_enter(txbuf_queue->lock); 2430 txbuf_item->next = txbuf_queue->head; 2431 txbuf_queue->head = txbuf_item_last; 2432 txbuf_queue->count += BGE_SEND_BUF_NUM - 1; 2433 mutex_exit(txbuf_queue->lock); 2434 2435 srp->tx_array++; 2436 srp->tx_buffers += BGE_SEND_BUF_NUM; 2437 srp->tx_buffers_low = srp->tx_buffers / 4; 2438 2439 return (txbuf_item_rtn); 2440 } 2441 2442 /* 2443 * This function allocates all the transmit and receive buffers 2444 * and descriptors, in four chunks. 2445 */ 2446 int 2447 bge_alloc_bufs(bge_t *bgep) 2448 { 2449 dma_area_t area; 2450 size_t rxbuffsize; 2451 size_t txbuffsize; 2452 size_t rxbuffdescsize; 2453 size_t rxdescsize; 2454 size_t txdescsize; 2455 uint32_t ring; 2456 uint32_t rx_rings = bgep->chipid.rx_rings; 2457 uint32_t tx_rings = bgep->chipid.tx_rings; 2458 int split; 2459 int err; 2460 2461 BGE_TRACE(("bge_alloc_bufs($%p)", 2462 (void *)bgep)); 2463 2464 rxbuffsize = BGE_STD_SLOTS_USED*bgep->chipid.std_buf_size; 2465 rxbuffsize += bgep->chipid.jumbo_slots*bgep->chipid.recv_jumbo_size; 2466 rxbuffsize += BGE_MINI_SLOTS_USED*BGE_MINI_BUFF_SIZE; 2467 2468 txbuffsize = BGE_SEND_BUF_NUM*bgep->chipid.snd_buff_size; 2469 txbuffsize *= tx_rings; 2470 2471 rxdescsize = rx_rings*bgep->chipid.recv_slots; 2472 rxdescsize *= sizeof (bge_rbd_t); 2473 2474 rxbuffdescsize = BGE_STD_SLOTS_USED; 2475 rxbuffdescsize += bgep->chipid.jumbo_slots; 2476 rxbuffdescsize += BGE_MINI_SLOTS_USED; 2477 rxbuffdescsize *= sizeof (bge_rbd_t); 2478 2479 txdescsize = tx_rings*BGE_SEND_SLOTS_USED; 2480 txdescsize *= sizeof (bge_sbd_t); 2481 txdescsize += sizeof (bge_statistics_t); 2482 txdescsize += sizeof (bge_status_t); 2483 txdescsize += BGE_STATUS_PADDING; 2484 2485 /* 2486 * Enable PCI relaxed ordering only for RX/TX data buffers 2487 */ 2488 if (bge_relaxed_ordering) 2489 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING; 2490 2491 /* 2492 * Allocate memory & handles for RX buffers 2493 */ 2494 ASSERT((rxbuffsize % BGE_SPLIT) == 0); 2495 for (split = 0; split < BGE_SPLIT; ++split) { 2496 err = bge_alloc_dma_mem(bgep, rxbuffsize/BGE_SPLIT, 2497 &bge_data_accattr, DDI_DMA_READ | BGE_DMA_MODE, 2498 &bgep->rx_buff[split]); 2499 if (err != DDI_SUCCESS) 2500 return (DDI_FAILURE); 2501 } 2502 2503 /* 2504 * Allocate memory & handles for TX buffers 2505 */ 2506 ASSERT((txbuffsize % BGE_SPLIT) == 0); 2507 for (split = 0; split < BGE_SPLIT; ++split) { 2508 err = bge_alloc_dma_mem(bgep, txbuffsize/BGE_SPLIT, 2509 &bge_data_accattr, DDI_DMA_WRITE | BGE_DMA_MODE, 2510 &bgep->tx_buff[split]); 2511 if (err != DDI_SUCCESS) 2512 return (DDI_FAILURE); 2513 } 2514 2515 dma_attr.dma_attr_flags &= ~DDI_DMA_RELAXED_ORDERING; 2516 2517 /* 2518 * Allocate memory & handles for receive return rings 2519 */ 2520 ASSERT((rxdescsize % rx_rings) == 0); 2521 for (split = 0; split < rx_rings; ++split) { 2522 err = bge_alloc_dma_mem(bgep, rxdescsize/rx_rings, 2523 &bge_desc_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2524 &bgep->rx_desc[split]); 2525 if (err != DDI_SUCCESS) 2526 return (DDI_FAILURE); 2527 } 2528 2529 /* 2530 * Allocate memory & handles for buffer (producer) descriptor rings 2531 */ 2532 err = bge_alloc_dma_mem(bgep, rxbuffdescsize, &bge_desc_accattr, 2533 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->rx_desc[split]); 2534 if (err != DDI_SUCCESS) 2535 return (DDI_FAILURE); 2536 2537 /* 2538 * Allocate memory & handles for TX descriptor rings, 2539 * status block, and statistics area 2540 */ 2541 err = bge_alloc_dma_mem(bgep, txdescsize, &bge_desc_accattr, 2542 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, &bgep->tx_desc); 2543 if (err != DDI_SUCCESS) 2544 return (DDI_FAILURE); 2545 2546 /* 2547 * Now carve up each of the allocated areas ... 2548 */ 2549 for (split = 0; split < BGE_SPLIT; ++split) { 2550 area = bgep->rx_buff[split]; 2551 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].buf[split], 2552 &area, BGE_STD_SLOTS_USED/BGE_SPLIT, 2553 bgep->chipid.std_buf_size); 2554 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].buf[split], 2555 &area, bgep->chipid.jumbo_slots/BGE_SPLIT, 2556 bgep->chipid.recv_jumbo_size); 2557 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].buf[split], 2558 &area, BGE_MINI_SLOTS_USED/BGE_SPLIT, 2559 BGE_MINI_BUFF_SIZE); 2560 ASSERT(area.alength >= 0); 2561 } 2562 2563 for (split = 0; split < BGE_SPLIT; ++split) { 2564 area = bgep->tx_buff[split]; 2565 for (ring = 0; ring < tx_rings; ++ring) 2566 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2567 &area, BGE_SEND_BUF_NUM/BGE_SPLIT, 2568 bgep->chipid.snd_buff_size); 2569 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2570 bge_slice_chunk(&bgep->send[ring].buf[0][split], 2571 &area, 0, bgep->chipid.snd_buff_size); 2572 ASSERT(area.alength >= 0); 2573 } 2574 2575 for (ring = 0; ring < rx_rings; ++ring) 2576 bge_slice_chunk(&bgep->recv[ring].desc, &bgep->rx_desc[ring], 2577 bgep->chipid.recv_slots, sizeof (bge_rbd_t)); 2578 2579 area = bgep->rx_desc[rx_rings]; 2580 for (; ring < BGE_RECV_RINGS_MAX; ++ring) 2581 bge_slice_chunk(&bgep->recv[ring].desc, &area, 2582 0, sizeof (bge_rbd_t)); 2583 bge_slice_chunk(&bgep->buff[BGE_STD_BUFF_RING].desc, &area, 2584 BGE_STD_SLOTS_USED, sizeof (bge_rbd_t)); 2585 bge_slice_chunk(&bgep->buff[BGE_JUMBO_BUFF_RING].desc, &area, 2586 bgep->chipid.jumbo_slots, sizeof (bge_rbd_t)); 2587 bge_slice_chunk(&bgep->buff[BGE_MINI_BUFF_RING].desc, &area, 2588 BGE_MINI_SLOTS_USED, sizeof (bge_rbd_t)); 2589 ASSERT(area.alength == 0); 2590 2591 area = bgep->tx_desc; 2592 for (ring = 0; ring < tx_rings; ++ring) 2593 bge_slice_chunk(&bgep->send[ring].desc, &area, 2594 BGE_SEND_SLOTS_USED, sizeof (bge_sbd_t)); 2595 for (; ring < BGE_SEND_RINGS_MAX; ++ring) 2596 bge_slice_chunk(&bgep->send[ring].desc, &area, 2597 0, sizeof (bge_sbd_t)); 2598 bge_slice_chunk(&bgep->statistics, &area, 1, sizeof (bge_statistics_t)); 2599 bge_slice_chunk(&bgep->status_block, &area, 1, sizeof (bge_status_t)); 2600 ASSERT(area.alength == BGE_STATUS_PADDING); 2601 DMA_ZERO(bgep->status_block); 2602 2603 return (DDI_SUCCESS); 2604 } 2605 2606 /* 2607 * This routine frees the transmit and receive buffers and descriptors. 2608 * Make sure the chip is stopped before calling it! 2609 */ 2610 void 2611 bge_free_bufs(bge_t *bgep) 2612 { 2613 int split; 2614 2615 BGE_TRACE(("bge_free_bufs($%p)", 2616 (void *)bgep)); 2617 2618 bge_free_dma_mem(&bgep->tx_desc); 2619 for (split = 0; split < BGE_RECV_RINGS_SPLIT; ++split) 2620 bge_free_dma_mem(&bgep->rx_desc[split]); 2621 for (split = 0; split < BGE_SPLIT; ++split) 2622 bge_free_dma_mem(&bgep->tx_buff[split]); 2623 for (split = 0; split < BGE_SPLIT; ++split) 2624 bge_free_dma_mem(&bgep->rx_buff[split]); 2625 } 2626 2627 /* 2628 * Determine (initial) MAC address ("BIA") to use for this interface 2629 */ 2630 2631 static void 2632 bge_find_mac_address(bge_t *bgep, chip_id_t *cidp) 2633 { 2634 struct ether_addr sysaddr; 2635 char propbuf[8]; /* "true" or "false", plus NUL */ 2636 uchar_t *bytes; 2637 int *ints; 2638 uint_t nelts; 2639 int err; 2640 2641 BGE_TRACE(("bge_find_mac_address($%p)", 2642 (void *)bgep)); 2643 2644 BGE_DEBUG(("bge_find_mac_address: hw_mac_addr %012llx, => %s (%sset)", 2645 cidp->hw_mac_addr, 2646 ether_sprintf((void *)cidp->vendor_addr.addr), 2647 cidp->vendor_addr.set ? "" : "not ")); 2648 2649 /* 2650 * The "vendor's factory-set address" may already have 2651 * been extracted from the chip, but if the property 2652 * "local-mac-address" is set we use that instead. It 2653 * will normally be set by OBP, but it could also be 2654 * specified in a .conf file(!) 2655 * 2656 * There doesn't seem to be a way to define byte-array 2657 * properties in a .conf, so we check whether it looks 2658 * like an array of 6 ints instead. 2659 * 2660 * Then, we check whether it looks like an array of 6 2661 * bytes (which it should, if OBP set it). If we can't 2662 * make sense of it either way, we'll ignore it. 2663 */ 2664 err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo, 2665 DDI_PROP_DONTPASS, localmac_propname, &ints, &nelts); 2666 if (err == DDI_PROP_SUCCESS) { 2667 if (nelts == ETHERADDRL) { 2668 while (nelts--) 2669 cidp->vendor_addr.addr[nelts] = ints[nelts]; 2670 cidp->vendor_addr.set = B_TRUE; 2671 } 2672 ddi_prop_free(ints); 2673 } 2674 2675 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2676 DDI_PROP_DONTPASS, localmac_propname, &bytes, &nelts); 2677 if (err == DDI_PROP_SUCCESS) { 2678 if (nelts == ETHERADDRL) { 2679 while (nelts--) 2680 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2681 cidp->vendor_addr.set = B_TRUE; 2682 } 2683 ddi_prop_free(bytes); 2684 } 2685 2686 BGE_DEBUG(("bge_find_mac_address: +local %s (%sset)", 2687 ether_sprintf((void *)cidp->vendor_addr.addr), 2688 cidp->vendor_addr.set ? "" : "not ")); 2689 2690 /* 2691 * Look up the OBP property "local-mac-address?". Note that even 2692 * though its value is a string (which should be "true" or "false"), 2693 * it can't be decoded by ddi_prop_lookup_string(9F). So, we zero 2694 * the buffer first and then fetch the property as an untyped array; 2695 * this may or may not include a final NUL, but since there will 2696 * always be one left at the end of the buffer we can now treat it 2697 * as a string anyway. 2698 */ 2699 nelts = sizeof (propbuf); 2700 bzero(propbuf, nelts--); 2701 err = ddi_getlongprop_buf(DDI_DEV_T_ANY, bgep->devinfo, 2702 DDI_PROP_CANSLEEP, localmac_boolname, propbuf, (int *)&nelts); 2703 2704 /* 2705 * Now, if the address still isn't set from the hardware (SEEPROM) 2706 * or the OBP or .conf property, OR if the user has foolishly set 2707 * 'local-mac-address? = false', use "the system address" instead 2708 * (but only if it's non-null i.e. has been set from the IDPROM). 2709 */ 2710 if (cidp->vendor_addr.set == B_FALSE || strcmp(propbuf, "false") == 0) 2711 if (localetheraddr(NULL, &sysaddr) != 0) { 2712 ethaddr_copy(&sysaddr, cidp->vendor_addr.addr); 2713 cidp->vendor_addr.set = B_TRUE; 2714 } 2715 2716 BGE_DEBUG(("bge_find_mac_address: +system %s (%sset)", 2717 ether_sprintf((void *)cidp->vendor_addr.addr), 2718 cidp->vendor_addr.set ? "" : "not ")); 2719 2720 /* 2721 * Finally(!), if there's a valid "mac-address" property (created 2722 * if we netbooted from this interface), we must use this instead 2723 * of any of the above to ensure that the NFS/install server doesn't 2724 * get confused by the address changing as Solaris takes over! 2725 */ 2726 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, bgep->devinfo, 2727 DDI_PROP_DONTPASS, macaddr_propname, &bytes, &nelts); 2728 if (err == DDI_PROP_SUCCESS) { 2729 if (nelts == ETHERADDRL) { 2730 while (nelts--) 2731 cidp->vendor_addr.addr[nelts] = bytes[nelts]; 2732 cidp->vendor_addr.set = B_TRUE; 2733 } 2734 ddi_prop_free(bytes); 2735 } 2736 2737 BGE_DEBUG(("bge_find_mac_address: =final %s (%sset)", 2738 ether_sprintf((void *)cidp->vendor_addr.addr), 2739 cidp->vendor_addr.set ? "" : "not ")); 2740 } 2741 2742 2743 /*ARGSUSED*/ 2744 int 2745 bge_check_acc_handle(bge_t *bgep, ddi_acc_handle_t handle) 2746 { 2747 ddi_fm_error_t de; 2748 2749 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 2750 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 2751 return (de.fme_status); 2752 } 2753 2754 /*ARGSUSED*/ 2755 int 2756 bge_check_dma_handle(bge_t *bgep, ddi_dma_handle_t handle) 2757 { 2758 ddi_fm_error_t de; 2759 2760 ASSERT(bgep->progress & PROGRESS_BUFS); 2761 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 2762 return (de.fme_status); 2763 } 2764 2765 /* 2766 * The IO fault service error handling callback function 2767 */ 2768 /*ARGSUSED*/ 2769 static int 2770 bge_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 2771 { 2772 /* 2773 * as the driver can always deal with an error in any dma or 2774 * access handle, we can just return the fme_status value. 2775 */ 2776 pci_ereport_post(dip, err, NULL); 2777 return (err->fme_status); 2778 } 2779 2780 static void 2781 bge_fm_init(bge_t *bgep) 2782 { 2783 ddi_iblock_cookie_t iblk; 2784 2785 /* Only register with IO Fault Services if we have some capability */ 2786 if (bgep->fm_capabilities) { 2787 bge_reg_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2788 bge_desc_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 2789 dma_attr.dma_attr_flags = DDI_DMA_FLAGERR; 2790 2791 /* Register capabilities with IO Fault Services */ 2792 ddi_fm_init(bgep->devinfo, &bgep->fm_capabilities, &iblk); 2793 2794 /* 2795 * Initialize pci ereport capabilities if ereport capable 2796 */ 2797 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2798 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2799 pci_ereport_setup(bgep->devinfo); 2800 2801 /* 2802 * Register error callback if error callback capable 2803 */ 2804 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2805 ddi_fm_handler_register(bgep->devinfo, 2806 bge_fm_error_cb, (void*) bgep); 2807 } else { 2808 /* 2809 * These fields have to be cleared of FMA if there are no 2810 * FMA capabilities at runtime. 2811 */ 2812 bge_reg_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2813 bge_desc_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 2814 dma_attr.dma_attr_flags = 0; 2815 } 2816 } 2817 2818 static void 2819 bge_fm_fini(bge_t *bgep) 2820 { 2821 /* Only unregister FMA capabilities if we registered some */ 2822 if (bgep->fm_capabilities) { 2823 2824 /* 2825 * Release any resources allocated by pci_ereport_setup() 2826 */ 2827 if (DDI_FM_EREPORT_CAP(bgep->fm_capabilities) || 2828 DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2829 pci_ereport_teardown(bgep->devinfo); 2830 2831 /* 2832 * Un-register error callback if error callback capable 2833 */ 2834 if (DDI_FM_ERRCB_CAP(bgep->fm_capabilities)) 2835 ddi_fm_handler_unregister(bgep->devinfo); 2836 2837 /* Unregister from IO Fault Services */ 2838 ddi_fm_fini(bgep->devinfo); 2839 } 2840 } 2841 2842 static void 2843 #ifdef BGE_IPMI_ASF 2844 bge_unattach(bge_t *bgep, uint_t asf_mode) 2845 #else 2846 bge_unattach(bge_t *bgep) 2847 #endif 2848 { 2849 BGE_TRACE(("bge_unattach($%p)", 2850 (void *)bgep)); 2851 2852 /* 2853 * Flag that no more activity may be initiated 2854 */ 2855 bgep->progress &= ~PROGRESS_READY; 2856 2857 /* 2858 * Quiesce the PHY and MAC (leave it reset but still powered). 2859 * Clean up and free all BGE data structures 2860 */ 2861 if (bgep->periodic_id != NULL) { 2862 ddi_periodic_delete(bgep->periodic_id); 2863 bgep->periodic_id = NULL; 2864 } 2865 if (bgep->progress & PROGRESS_KSTATS) 2866 bge_fini_kstats(bgep); 2867 if (bgep->progress & PROGRESS_PHY) 2868 bge_phys_reset(bgep); 2869 if (bgep->progress & PROGRESS_HWINT) { 2870 mutex_enter(bgep->genlock); 2871 #ifdef BGE_IPMI_ASF 2872 if (bge_chip_reset(bgep, B_FALSE, asf_mode) != DDI_SUCCESS) 2873 #else 2874 if (bge_chip_reset(bgep, B_FALSE) != DDI_SUCCESS) 2875 #endif 2876 ddi_fm_service_impact(bgep->devinfo, 2877 DDI_SERVICE_UNAFFECTED); 2878 #ifdef BGE_IPMI_ASF 2879 if (bgep->asf_enabled) { 2880 /* 2881 * This register has been overlaid. We restore its 2882 * initial value here. 2883 */ 2884 bge_nic_put32(bgep, BGE_NIC_DATA_SIG_ADDR, 2885 BGE_NIC_DATA_SIG); 2886 } 2887 #endif 2888 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 2889 ddi_fm_service_impact(bgep->devinfo, 2890 DDI_SERVICE_UNAFFECTED); 2891 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 2892 ddi_fm_service_impact(bgep->devinfo, 2893 DDI_SERVICE_UNAFFECTED); 2894 mutex_exit(bgep->genlock); 2895 } 2896 if (bgep->progress & PROGRESS_INTR) { 2897 bge_intr_disable(bgep); 2898 bge_fini_rings(bgep); 2899 } 2900 if (bgep->progress & PROGRESS_HWINT) { 2901 bge_rem_intrs(bgep); 2902 rw_destroy(bgep->errlock); 2903 mutex_destroy(bgep->softintrlock); 2904 mutex_destroy(bgep->genlock); 2905 } 2906 if (bgep->progress & PROGRESS_FACTOTUM) 2907 ddi_remove_softintr(bgep->factotum_id); 2908 if (bgep->progress & PROGRESS_RESCHED) 2909 ddi_remove_softintr(bgep->drain_id); 2910 if (bgep->progress & PROGRESS_BUFS) 2911 bge_free_bufs(bgep); 2912 if (bgep->progress & PROGRESS_REGS) 2913 ddi_regs_map_free(&bgep->io_handle); 2914 if (bgep->progress & PROGRESS_CFG) 2915 pci_config_teardown(&bgep->cfg_handle); 2916 2917 bge_fm_fini(bgep); 2918 2919 ddi_remove_minor_node(bgep->devinfo, NULL); 2920 kmem_free(bgep->pstats, sizeof (bge_statistics_reg_t)); 2921 kmem_free(bgep, sizeof (*bgep)); 2922 } 2923 2924 static int 2925 bge_resume(dev_info_t *devinfo) 2926 { 2927 bge_t *bgep; /* Our private data */ 2928 chip_id_t *cidp; 2929 chip_id_t chipid; 2930 2931 bgep = ddi_get_driver_private(devinfo); 2932 if (bgep == NULL) 2933 return (DDI_FAILURE); 2934 2935 /* 2936 * Refuse to resume if the data structures aren't consistent 2937 */ 2938 if (bgep->devinfo != devinfo) 2939 return (DDI_FAILURE); 2940 2941 #ifdef BGE_IPMI_ASF 2942 /* 2943 * Power management hasn't been supported in BGE now. If you 2944 * want to implement it, please add the ASF/IPMI related 2945 * code here. 2946 */ 2947 2948 #endif 2949 2950 /* 2951 * Read chip ID & set up config space command register(s) 2952 * Refuse to resume if the chip has changed its identity! 2953 */ 2954 cidp = &bgep->chipid; 2955 mutex_enter(bgep->genlock); 2956 bge_chip_cfg_init(bgep, &chipid, B_FALSE); 2957 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2958 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2959 mutex_exit(bgep->genlock); 2960 return (DDI_FAILURE); 2961 } 2962 mutex_exit(bgep->genlock); 2963 if (chipid.vendor != cidp->vendor) 2964 return (DDI_FAILURE); 2965 if (chipid.device != cidp->device) 2966 return (DDI_FAILURE); 2967 if (chipid.revision != cidp->revision) 2968 return (DDI_FAILURE); 2969 if (chipid.asic_rev != cidp->asic_rev) 2970 return (DDI_FAILURE); 2971 2972 /* 2973 * All OK, reinitialise h/w & kick off GLD scheduling 2974 */ 2975 mutex_enter(bgep->genlock); 2976 if (bge_restart(bgep, B_TRUE) != DDI_SUCCESS) { 2977 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 2978 (void) bge_check_acc_handle(bgep, bgep->io_handle); 2979 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2980 mutex_exit(bgep->genlock); 2981 return (DDI_FAILURE); 2982 } 2983 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 2984 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2985 mutex_exit(bgep->genlock); 2986 return (DDI_FAILURE); 2987 } 2988 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 2989 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 2990 mutex_exit(bgep->genlock); 2991 return (DDI_FAILURE); 2992 } 2993 mutex_exit(bgep->genlock); 2994 return (DDI_SUCCESS); 2995 } 2996 2997 /* 2998 * attach(9E) -- Attach a device to the system 2999 * 3000 * Called once for each board successfully probed. 3001 */ 3002 static int 3003 bge_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3004 { 3005 bge_t *bgep; /* Our private data */ 3006 mac_register_t *macp; 3007 chip_id_t *cidp; 3008 caddr_t regs; 3009 int instance; 3010 int err; 3011 int intr_types; 3012 #ifdef BGE_IPMI_ASF 3013 uint32_t mhcrValue; 3014 #ifdef __sparc 3015 uint16_t value16; 3016 #endif 3017 #ifdef BGE_NETCONSOLE 3018 int retval; 3019 #endif 3020 #endif 3021 3022 instance = ddi_get_instance(devinfo); 3023 3024 BGE_GTRACE(("bge_attach($%p, %d) instance %d", 3025 (void *)devinfo, cmd, instance)); 3026 BGE_BRKPT(NULL, "bge_attach"); 3027 3028 switch (cmd) { 3029 default: 3030 return (DDI_FAILURE); 3031 3032 case DDI_RESUME: 3033 return (bge_resume(devinfo)); 3034 3035 case DDI_ATTACH: 3036 break; 3037 } 3038 3039 bgep = kmem_zalloc(sizeof (*bgep), KM_SLEEP); 3040 bgep->pstats = kmem_zalloc(sizeof (bge_statistics_reg_t), KM_SLEEP); 3041 ddi_set_driver_private(devinfo, bgep); 3042 bgep->bge_guard = BGE_GUARD; 3043 bgep->devinfo = devinfo; 3044 bgep->param_drain_max = 64; 3045 bgep->param_msi_cnt = 0; 3046 bgep->param_loop_mode = 0; 3047 3048 /* 3049 * Initialize more fields in BGE private data 3050 */ 3051 bgep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3052 DDI_PROP_DONTPASS, debug_propname, bge_debug); 3053 (void) snprintf(bgep->ifname, sizeof (bgep->ifname), "%s%d", 3054 BGE_DRIVER_NAME, instance); 3055 3056 /* 3057 * Initialize for fma support 3058 */ 3059 bgep->fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3060 DDI_PROP_DONTPASS, fm_cap, 3061 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3062 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3063 BGE_DEBUG(("bgep->fm_capabilities = %d", bgep->fm_capabilities)); 3064 bge_fm_init(bgep); 3065 3066 /* 3067 * Look up the IOMMU's page size for DVMA mappings (must be 3068 * a power of 2) and convert to a mask. This can be used to 3069 * determine whether a message buffer crosses a page boundary. 3070 * Note: in 2s complement binary notation, if X is a power of 3071 * 2, then -X has the representation "11...1100...00". 3072 */ 3073 bgep->pagemask = dvma_pagesize(devinfo); 3074 ASSERT(ddi_ffs(bgep->pagemask) == ddi_fls(bgep->pagemask)); 3075 bgep->pagemask = -bgep->pagemask; 3076 3077 /* 3078 * Map config space registers 3079 * Read chip ID & set up config space command register(s) 3080 * 3081 * Note: this leaves the chip accessible by Memory Space 3082 * accesses, but with interrupts and Bus Mastering off. 3083 * This should ensure that nothing untoward will happen 3084 * if it has been left active by the (net-)bootloader. 3085 * We'll re-enable Bus Mastering once we've reset the chip, 3086 * and allow interrupts only when everything else is set up. 3087 */ 3088 err = pci_config_setup(devinfo, &bgep->cfg_handle); 3089 #ifdef BGE_IPMI_ASF 3090 #ifdef __sparc 3091 value16 = pci_config_get16(bgep->cfg_handle, PCI_CONF_COMM); 3092 value16 = value16 | (PCI_COMM_MAE | PCI_COMM_ME); 3093 pci_config_put16(bgep->cfg_handle, PCI_CONF_COMM, value16); 3094 mhcrValue = MHCR_ENABLE_INDIRECT_ACCESS | 3095 MHCR_ENABLE_TAGGED_STATUS_MODE | 3096 MHCR_MASK_INTERRUPT_MODE | 3097 MHCR_MASK_PCI_INT_OUTPUT | 3098 MHCR_CLEAR_INTERRUPT_INTA | 3099 MHCR_ENABLE_ENDIAN_WORD_SWAP | 3100 MHCR_ENABLE_ENDIAN_BYTE_SWAP; 3101 pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcrValue); 3102 bge_ind_put32(bgep, MEMORY_ARBITER_MODE_REG, 3103 bge_ind_get32(bgep, MEMORY_ARBITER_MODE_REG) | 3104 MEMORY_ARBITER_ENABLE); 3105 #else 3106 mhcrValue = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR); 3107 #endif 3108 if (mhcrValue & MHCR_ENABLE_ENDIAN_WORD_SWAP) { 3109 bgep->asf_wordswapped = B_TRUE; 3110 } else { 3111 bgep->asf_wordswapped = B_FALSE; 3112 } 3113 bge_asf_get_config(bgep); 3114 #endif 3115 if (err != DDI_SUCCESS) { 3116 bge_problem(bgep, "pci_config_setup() failed"); 3117 goto attach_fail; 3118 } 3119 bgep->progress |= PROGRESS_CFG; 3120 cidp = &bgep->chipid; 3121 bzero(cidp, sizeof (*cidp)); 3122 bge_chip_cfg_init(bgep, cidp, B_FALSE); 3123 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3124 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3125 goto attach_fail; 3126 } 3127 3128 #ifdef BGE_IPMI_ASF 3129 if (DEVICE_5721_SERIES_CHIPSETS(bgep) || 3130 DEVICE_5714_SERIES_CHIPSETS(bgep)) { 3131 bgep->asf_newhandshake = B_TRUE; 3132 } else { 3133 bgep->asf_newhandshake = B_FALSE; 3134 } 3135 #endif 3136 3137 /* 3138 * Update those parts of the chip ID derived from volatile 3139 * registers with the values seen by OBP (in case the chip 3140 * has been reset externally and therefore lost them). 3141 */ 3142 cidp->subven = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3143 DDI_PROP_DONTPASS, subven_propname, cidp->subven); 3144 cidp->subdev = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3145 DDI_PROP_DONTPASS, subdev_propname, cidp->subdev); 3146 cidp->clsize = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3147 DDI_PROP_DONTPASS, clsize_propname, cidp->clsize); 3148 cidp->latency = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3149 DDI_PROP_DONTPASS, latency_propname, cidp->latency); 3150 cidp->rx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3151 DDI_PROP_DONTPASS, rxrings_propname, cidp->rx_rings); 3152 cidp->tx_rings = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3153 DDI_PROP_DONTPASS, txrings_propname, cidp->tx_rings); 3154 3155 if (bge_jumbo_enable == B_TRUE) { 3156 cidp->default_mtu = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 3157 DDI_PROP_DONTPASS, default_mtu, BGE_DEFAULT_MTU); 3158 if ((cidp->default_mtu < BGE_DEFAULT_MTU)|| 3159 (cidp->default_mtu > BGE_MAXIMUM_MTU)) { 3160 cidp->default_mtu = BGE_DEFAULT_MTU; 3161 } 3162 } 3163 /* 3164 * Map operating registers 3165 */ 3166 err = ddi_regs_map_setup(devinfo, BGE_PCI_OPREGS_RNUMBER, 3167 ®s, 0, 0, &bge_reg_accattr, &bgep->io_handle); 3168 if (err != DDI_SUCCESS) { 3169 bge_problem(bgep, "ddi_regs_map_setup() failed"); 3170 goto attach_fail; 3171 } 3172 bgep->io_regs = regs; 3173 bgep->progress |= PROGRESS_REGS; 3174 3175 /* 3176 * Characterise the device, so we know its requirements. 3177 * Then allocate the appropriate TX and RX descriptors & buffers. 3178 */ 3179 if (bge_chip_id_init(bgep) == EIO) { 3180 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3181 goto attach_fail; 3182 } 3183 3184 3185 err = bge_alloc_bufs(bgep); 3186 if (err != DDI_SUCCESS) { 3187 bge_problem(bgep, "DMA buffer allocation failed"); 3188 goto attach_fail; 3189 } 3190 bgep->progress |= PROGRESS_BUFS; 3191 3192 /* 3193 * Add the softint handlers: 3194 * 3195 * Both of these handlers are used to avoid restrictions on the 3196 * context and/or mutexes required for some operations. In 3197 * particular, the hardware interrupt handler and its subfunctions 3198 * can detect a number of conditions that we don't want to handle 3199 * in that context or with that set of mutexes held. So, these 3200 * softints are triggered instead: 3201 * 3202 * the <resched> softint is triggered if we have previously 3203 * had to refuse to send a packet because of resource shortage 3204 * (we've run out of transmit buffers), but the send completion 3205 * interrupt handler has now detected that more buffers have 3206 * become available. 3207 * 3208 * the <factotum> is triggered if the h/w interrupt handler 3209 * sees the <link state changed> or <error> bits in the status 3210 * block. It's also triggered periodically to poll the link 3211 * state, just in case we aren't getting link status change 3212 * interrupts ... 3213 */ 3214 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->drain_id, 3215 NULL, NULL, bge_send_drain, (caddr_t)bgep); 3216 if (err != DDI_SUCCESS) { 3217 bge_problem(bgep, "ddi_add_softintr() failed"); 3218 goto attach_fail; 3219 } 3220 bgep->progress |= PROGRESS_RESCHED; 3221 err = ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &bgep->factotum_id, 3222 NULL, NULL, bge_chip_factotum, (caddr_t)bgep); 3223 if (err != DDI_SUCCESS) { 3224 bge_problem(bgep, "ddi_add_softintr() failed"); 3225 goto attach_fail; 3226 } 3227 bgep->progress |= PROGRESS_FACTOTUM; 3228 3229 /* Get supported interrupt types */ 3230 if (ddi_intr_get_supported_types(devinfo, &intr_types) != DDI_SUCCESS) { 3231 bge_error(bgep, "ddi_intr_get_supported_types failed\n"); 3232 3233 goto attach_fail; 3234 } 3235 3236 BGE_DEBUG(("%s: ddi_intr_get_supported_types() returned: %x", 3237 bgep->ifname, intr_types)); 3238 3239 if ((intr_types & DDI_INTR_TYPE_MSI) && bgep->chipid.msi_enabled) { 3240 if (bge_add_intrs(bgep, DDI_INTR_TYPE_MSI) != DDI_SUCCESS) { 3241 bge_error(bgep, "MSI registration failed, " 3242 "trying FIXED interrupt type\n"); 3243 } else { 3244 BGE_DEBUG(("%s: Using MSI interrupt type", 3245 bgep->ifname)); 3246 bgep->intr_type = DDI_INTR_TYPE_MSI; 3247 bgep->progress |= PROGRESS_HWINT; 3248 } 3249 } 3250 3251 if (!(bgep->progress & PROGRESS_HWINT) && 3252 (intr_types & DDI_INTR_TYPE_FIXED)) { 3253 if (bge_add_intrs(bgep, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) { 3254 bge_error(bgep, "FIXED interrupt " 3255 "registration failed\n"); 3256 goto attach_fail; 3257 } 3258 3259 BGE_DEBUG(("%s: Using FIXED interrupt type", bgep->ifname)); 3260 3261 bgep->intr_type = DDI_INTR_TYPE_FIXED; 3262 bgep->progress |= PROGRESS_HWINT; 3263 } 3264 3265 if (!(bgep->progress & PROGRESS_HWINT)) { 3266 bge_error(bgep, "No interrupts registered\n"); 3267 goto attach_fail; 3268 } 3269 3270 /* 3271 * Note that interrupts are not enabled yet as 3272 * mutex locks are not initialized. Initialize mutex locks. 3273 */ 3274 mutex_init(bgep->genlock, NULL, MUTEX_DRIVER, 3275 DDI_INTR_PRI(bgep->intr_pri)); 3276 mutex_init(bgep->softintrlock, NULL, MUTEX_DRIVER, 3277 DDI_INTR_PRI(bgep->intr_pri)); 3278 rw_init(bgep->errlock, NULL, RW_DRIVER, 3279 DDI_INTR_PRI(bgep->intr_pri)); 3280 3281 /* 3282 * Initialize rings. 3283 */ 3284 bge_init_rings(bgep); 3285 3286 /* 3287 * Now that mutex locks are initialized, enable interrupts. 3288 */ 3289 bge_intr_enable(bgep); 3290 bgep->progress |= PROGRESS_INTR; 3291 3292 /* 3293 * Initialise link state variables 3294 * Stop, reset & reinitialise the chip. 3295 * Initialise the (internal) PHY. 3296 */ 3297 bgep->link_state = LINK_STATE_UNKNOWN; 3298 3299 mutex_enter(bgep->genlock); 3300 3301 /* 3302 * Reset chip & rings to initial state; also reset address 3303 * filtering, promiscuity, loopback mode. 3304 */ 3305 #ifdef BGE_IPMI_ASF 3306 #ifdef BGE_NETCONSOLE 3307 if (bge_reset(bgep, ASF_MODE_INIT) != DDI_SUCCESS) { 3308 #else 3309 if (bge_reset(bgep, ASF_MODE_SHUTDOWN) != DDI_SUCCESS) { 3310 #endif 3311 #else 3312 if (bge_reset(bgep) != DDI_SUCCESS) { 3313 #endif 3314 (void) bge_check_acc_handle(bgep, bgep->cfg_handle); 3315 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3316 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3317 mutex_exit(bgep->genlock); 3318 goto attach_fail; 3319 } 3320 3321 #ifdef BGE_IPMI_ASF 3322 if (bgep->asf_enabled) { 3323 bgep->asf_status = ASF_STAT_RUN_INIT; 3324 } 3325 #endif 3326 3327 bzero(bgep->mcast_hash, sizeof (bgep->mcast_hash)); 3328 bzero(bgep->mcast_refs, sizeof (bgep->mcast_refs)); 3329 bgep->promisc = B_FALSE; 3330 bgep->param_loop_mode = BGE_LOOP_NONE; 3331 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) { 3332 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3333 mutex_exit(bgep->genlock); 3334 goto attach_fail; 3335 } 3336 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3337 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3338 mutex_exit(bgep->genlock); 3339 goto attach_fail; 3340 } 3341 3342 mutex_exit(bgep->genlock); 3343 3344 if (bge_phys_init(bgep) == EIO) { 3345 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_LOST); 3346 goto attach_fail; 3347 } 3348 bgep->progress |= PROGRESS_PHY; 3349 3350 /* 3351 * initialize NDD-tweakable parameters 3352 */ 3353 if (bge_nd_init(bgep)) { 3354 bge_problem(bgep, "bge_nd_init() failed"); 3355 goto attach_fail; 3356 } 3357 bgep->progress |= PROGRESS_NDD; 3358 3359 /* 3360 * Create & initialise named kstats 3361 */ 3362 bge_init_kstats(bgep, instance); 3363 bgep->progress |= PROGRESS_KSTATS; 3364 3365 /* 3366 * Determine whether to override the chip's own MAC address 3367 */ 3368 bge_find_mac_address(bgep, cidp); 3369 ethaddr_copy(cidp->vendor_addr.addr, bgep->curr_addr[0].addr); 3370 bgep->curr_addr[0].set = B_TRUE; 3371 3372 bgep->unicst_addr_total = MAC_ADDRESS_REGS_MAX; 3373 /* 3374 * Address available is one less than MAX 3375 * as primary address is not advertised 3376 * as a multiple MAC address. 3377 */ 3378 bgep->unicst_addr_avail = MAC_ADDRESS_REGS_MAX - 1; 3379 3380 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3381 goto attach_fail; 3382 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3383 macp->m_driver = bgep; 3384 macp->m_dip = devinfo; 3385 macp->m_src_addr = bgep->curr_addr[0].addr; 3386 macp->m_callbacks = &bge_m_callbacks; 3387 macp->m_min_sdu = 0; 3388 macp->m_max_sdu = cidp->ethmax_size - sizeof (struct ether_header); 3389 macp->m_margin = VLAN_TAGSZ; 3390 macp->m_priv_props = bge_priv_prop; 3391 macp->m_priv_prop_count = BGE_MAX_PRIV_PROPS; 3392 3393 /* 3394 * Finally, we're ready to register ourselves with the MAC layer 3395 * interface; if this succeeds, we're all ready to start() 3396 */ 3397 err = mac_register(macp, &bgep->mh); 3398 mac_free(macp); 3399 if (err != 0) 3400 goto attach_fail; 3401 3402 /* 3403 * Register a periodical handler. 3404 * bge_chip_cyclic() is invoked in kernel context. 3405 */ 3406 bgep->periodic_id = ddi_periodic_add(bge_chip_cyclic, bgep, 3407 BGE_CYCLIC_PERIOD, DDI_IPL_0); 3408 3409 bgep->progress |= PROGRESS_READY; 3410 ASSERT(bgep->bge_guard == BGE_GUARD); 3411 #ifdef BGE_IPMI_ASF 3412 #ifdef BGE_NETCONSOLE 3413 if (bgep->asf_enabled) { 3414 mutex_enter(bgep->genlock); 3415 retval = bge_chip_start(bgep, B_TRUE); 3416 mutex_exit(bgep->genlock); 3417 if (retval != DDI_SUCCESS) 3418 goto attach_fail; 3419 } 3420 #endif 3421 #endif 3422 return (DDI_SUCCESS); 3423 3424 attach_fail: 3425 #ifdef BGE_IPMI_ASF 3426 bge_unattach(bgep, ASF_MODE_SHUTDOWN); 3427 #else 3428 bge_unattach(bgep); 3429 #endif 3430 return (DDI_FAILURE); 3431 } 3432 3433 /* 3434 * bge_suspend() -- suspend transmit/receive for powerdown 3435 */ 3436 static int 3437 bge_suspend(bge_t *bgep) 3438 { 3439 /* 3440 * Stop processing and idle (powerdown) the PHY ... 3441 */ 3442 mutex_enter(bgep->genlock); 3443 #ifdef BGE_IPMI_ASF 3444 /* 3445 * Power management hasn't been supported in BGE now. If you 3446 * want to implement it, please add the ASF/IPMI related 3447 * code here. 3448 */ 3449 #endif 3450 bge_stop(bgep); 3451 if (bge_phys_idle(bgep) != DDI_SUCCESS) { 3452 (void) bge_check_acc_handle(bgep, bgep->io_handle); 3453 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3454 mutex_exit(bgep->genlock); 3455 return (DDI_FAILURE); 3456 } 3457 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) { 3458 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3459 mutex_exit(bgep->genlock); 3460 return (DDI_FAILURE); 3461 } 3462 mutex_exit(bgep->genlock); 3463 3464 return (DDI_SUCCESS); 3465 } 3466 3467 /* 3468 * detach(9E) -- Detach a device from the system 3469 */ 3470 static int 3471 bge_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3472 { 3473 bge_t *bgep; 3474 #ifdef BGE_IPMI_ASF 3475 uint_t asf_mode; 3476 asf_mode = ASF_MODE_NONE; 3477 #endif 3478 3479 BGE_GTRACE(("bge_detach($%p, %d)", (void *)devinfo, cmd)); 3480 3481 bgep = ddi_get_driver_private(devinfo); 3482 3483 switch (cmd) { 3484 default: 3485 return (DDI_FAILURE); 3486 3487 case DDI_SUSPEND: 3488 return (bge_suspend(bgep)); 3489 3490 case DDI_DETACH: 3491 break; 3492 } 3493 3494 #ifdef BGE_IPMI_ASF 3495 mutex_enter(bgep->genlock); 3496 if (bgep->asf_enabled && ((bgep->asf_status == ASF_STAT_RUN) || 3497 (bgep->asf_status == ASF_STAT_RUN_INIT))) { 3498 3499 bge_asf_update_status(bgep); 3500 if (bgep->asf_status == ASF_STAT_RUN) { 3501 bge_asf_stop_timer(bgep); 3502 } 3503 bgep->asf_status = ASF_STAT_STOP; 3504 3505 bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET); 3506 3507 if (bgep->asf_pseudostop) { 3508 bge_chip_stop(bgep, B_FALSE); 3509 bgep->bge_mac_state = BGE_MAC_STOPPED; 3510 bgep->asf_pseudostop = B_FALSE; 3511 } 3512 3513 asf_mode = ASF_MODE_POST_SHUTDOWN; 3514 3515 if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) 3516 ddi_fm_service_impact(bgep->devinfo, 3517 DDI_SERVICE_UNAFFECTED); 3518 if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) 3519 ddi_fm_service_impact(bgep->devinfo, 3520 DDI_SERVICE_UNAFFECTED); 3521 } 3522 mutex_exit(bgep->genlock); 3523 #endif 3524 3525 /* 3526 * Unregister from the GLD subsystem. This can fail, in 3527 * particular if there are DLPI style-2 streams still open - 3528 * in which case we just return failure without shutting 3529 * down chip operations. 3530 */ 3531 if (mac_unregister(bgep->mh) != 0) 3532 return (DDI_FAILURE); 3533 3534 /* 3535 * All activity stopped, so we can clean up & exit 3536 */ 3537 #ifdef BGE_IPMI_ASF 3538 bge_unattach(bgep, asf_mode); 3539 #else 3540 bge_unattach(bgep); 3541 #endif 3542 return (DDI_SUCCESS); 3543 } 3544 3545 3546 /* 3547 * ========== Module Loading Data & Entry Points ========== 3548 */ 3549 3550 #undef BGE_DBG 3551 #define BGE_DBG BGE_DBG_INIT /* debug flag for this code */ 3552 3553 DDI_DEFINE_STREAM_OPS(bge_dev_ops, nulldev, nulldev, bge_attach, bge_detach, 3554 nodev, NULL, D_MP, NULL); 3555 3556 static struct modldrv bge_modldrv = { 3557 &mod_driverops, /* Type of module. This one is a driver */ 3558 bge_ident, /* short description */ 3559 &bge_dev_ops /* driver specific ops */ 3560 }; 3561 3562 static struct modlinkage modlinkage = { 3563 MODREV_1, (void *)&bge_modldrv, NULL 3564 }; 3565 3566 3567 int 3568 _info(struct modinfo *modinfop) 3569 { 3570 return (mod_info(&modlinkage, modinfop)); 3571 } 3572 3573 int 3574 _init(void) 3575 { 3576 int status; 3577 3578 mac_init_ops(&bge_dev_ops, "bge"); 3579 status = mod_install(&modlinkage); 3580 if (status == DDI_SUCCESS) 3581 mutex_init(bge_log_mutex, NULL, MUTEX_DRIVER, NULL); 3582 else 3583 mac_fini_ops(&bge_dev_ops); 3584 return (status); 3585 } 3586 3587 int 3588 _fini(void) 3589 { 3590 int status; 3591 3592 status = mod_remove(&modlinkage); 3593 if (status == DDI_SUCCESS) { 3594 mac_fini_ops(&bge_dev_ops); 3595 mutex_destroy(bge_log_mutex); 3596 } 3597 return (status); 3598 } 3599 3600 3601 /* 3602 * bge_add_intrs: 3603 * 3604 * Register FIXED or MSI interrupts. 3605 */ 3606 static int 3607 bge_add_intrs(bge_t *bgep, int intr_type) 3608 { 3609 dev_info_t *dip = bgep->devinfo; 3610 int avail, actual, intr_size, count = 0; 3611 int i, flag, ret; 3612 3613 BGE_DEBUG(("bge_add_intrs($%p, 0x%x)", (void *)bgep, intr_type)); 3614 3615 /* Get number of interrupts */ 3616 ret = ddi_intr_get_nintrs(dip, intr_type, &count); 3617 if ((ret != DDI_SUCCESS) || (count == 0)) { 3618 bge_error(bgep, "ddi_intr_get_nintrs() failure, ret: %d, " 3619 "count: %d", ret, count); 3620 3621 return (DDI_FAILURE); 3622 } 3623 3624 /* Get number of available interrupts */ 3625 ret = ddi_intr_get_navail(dip, intr_type, &avail); 3626 if ((ret != DDI_SUCCESS) || (avail == 0)) { 3627 bge_error(bgep, "ddi_intr_get_navail() failure, " 3628 "ret: %d, avail: %d\n", ret, avail); 3629 3630 return (DDI_FAILURE); 3631 } 3632 3633 if (avail < count) { 3634 BGE_DEBUG(("%s: nintrs() returned %d, navail returned %d", 3635 bgep->ifname, count, avail)); 3636 } 3637 3638 /* 3639 * BGE hardware generates only single MSI even though it claims 3640 * to support multiple MSIs. So, hard code MSI count value to 1. 3641 */ 3642 if (intr_type == DDI_INTR_TYPE_MSI) { 3643 count = 1; 3644 flag = DDI_INTR_ALLOC_STRICT; 3645 } else { 3646 flag = DDI_INTR_ALLOC_NORMAL; 3647 } 3648 3649 /* Allocate an array of interrupt handles */ 3650 intr_size = count * sizeof (ddi_intr_handle_t); 3651 bgep->htable = kmem_alloc(intr_size, KM_SLEEP); 3652 3653 /* Call ddi_intr_alloc() */ 3654 ret = ddi_intr_alloc(dip, bgep->htable, intr_type, 0, 3655 count, &actual, flag); 3656 3657 if ((ret != DDI_SUCCESS) || (actual == 0)) { 3658 bge_error(bgep, "ddi_intr_alloc() failed %d\n", ret); 3659 3660 kmem_free(bgep->htable, intr_size); 3661 return (DDI_FAILURE); 3662 } 3663 3664 if (actual < count) { 3665 BGE_DEBUG(("%s: Requested: %d, Received: %d", 3666 bgep->ifname, count, actual)); 3667 } 3668 3669 bgep->intr_cnt = actual; 3670 3671 /* 3672 * Get priority for first msi, assume remaining are all the same 3673 */ 3674 if ((ret = ddi_intr_get_pri(bgep->htable[0], &bgep->intr_pri)) != 3675 DDI_SUCCESS) { 3676 bge_error(bgep, "ddi_intr_get_pri() failed %d\n", ret); 3677 3678 /* Free already allocated intr */ 3679 for (i = 0; i < actual; i++) { 3680 (void) ddi_intr_free(bgep->htable[i]); 3681 } 3682 3683 kmem_free(bgep->htable, intr_size); 3684 return (DDI_FAILURE); 3685 } 3686 3687 /* Call ddi_intr_add_handler() */ 3688 for (i = 0; i < actual; i++) { 3689 if ((ret = ddi_intr_add_handler(bgep->htable[i], bge_intr, 3690 (caddr_t)bgep, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) { 3691 bge_error(bgep, "ddi_intr_add_handler() " 3692 "failed %d\n", ret); 3693 3694 /* Free already allocated intr */ 3695 for (i = 0; i < actual; i++) { 3696 (void) ddi_intr_free(bgep->htable[i]); 3697 } 3698 3699 kmem_free(bgep->htable, intr_size); 3700 return (DDI_FAILURE); 3701 } 3702 } 3703 3704 if ((ret = ddi_intr_get_cap(bgep->htable[0], &bgep->intr_cap)) 3705 != DDI_SUCCESS) { 3706 bge_error(bgep, "ddi_intr_get_cap() failed %d\n", ret); 3707 3708 for (i = 0; i < actual; i++) { 3709 (void) ddi_intr_remove_handler(bgep->htable[i]); 3710 (void) ddi_intr_free(bgep->htable[i]); 3711 } 3712 3713 kmem_free(bgep->htable, intr_size); 3714 return (DDI_FAILURE); 3715 } 3716 3717 return (DDI_SUCCESS); 3718 } 3719 3720 /* 3721 * bge_rem_intrs: 3722 * 3723 * Unregister FIXED or MSI interrupts 3724 */ 3725 static void 3726 bge_rem_intrs(bge_t *bgep) 3727 { 3728 int i; 3729 3730 BGE_DEBUG(("bge_rem_intrs($%p)", (void *)bgep)); 3731 3732 /* Call ddi_intr_remove_handler() */ 3733 for (i = 0; i < bgep->intr_cnt; i++) { 3734 (void) ddi_intr_remove_handler(bgep->htable[i]); 3735 (void) ddi_intr_free(bgep->htable[i]); 3736 } 3737 3738 kmem_free(bgep->htable, bgep->intr_cnt * sizeof (ddi_intr_handle_t)); 3739 } 3740 3741 3742 void 3743 bge_intr_enable(bge_t *bgep) 3744 { 3745 int i; 3746 3747 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3748 /* Call ddi_intr_block_enable() for MSI interrupts */ 3749 (void) ddi_intr_block_enable(bgep->htable, bgep->intr_cnt); 3750 } else { 3751 /* Call ddi_intr_enable for MSI or FIXED interrupts */ 3752 for (i = 0; i < bgep->intr_cnt; i++) { 3753 (void) ddi_intr_enable(bgep->htable[i]); 3754 } 3755 } 3756 } 3757 3758 3759 void 3760 bge_intr_disable(bge_t *bgep) 3761 { 3762 int i; 3763 3764 if (bgep->intr_cap & DDI_INTR_FLAG_BLOCK) { 3765 /* Call ddi_intr_block_disable() */ 3766 (void) ddi_intr_block_disable(bgep->htable, bgep->intr_cnt); 3767 } else { 3768 for (i = 0; i < bgep->intr_cnt; i++) { 3769 (void) ddi_intr_disable(bgep->htable[i]); 3770 } 3771 } 3772 } 3773 3774 int 3775 bge_reprogram(bge_t *bgep) 3776 { 3777 int status = 0; 3778 3779 ASSERT(mutex_owned(bgep->genlock)); 3780 3781 if (bge_phys_update(bgep) != DDI_SUCCESS) { 3782 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3783 status = IOC_INVAL; 3784 } 3785 #ifdef BGE_IPMI_ASF 3786 if (bge_chip_sync(bgep, B_TRUE) == DDI_FAILURE) { 3787 #else 3788 if (bge_chip_sync(bgep) == DDI_FAILURE) { 3789 #endif 3790 ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED); 3791 status = IOC_INVAL; 3792 } 3793 if (bgep->intr_type == DDI_INTR_TYPE_MSI) 3794 bge_chip_msi_trig(bgep); 3795 return (status); 3796 } 3797