1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 #include <sys/types.h> 28 #include <sys/sunddi.h> 29 #include "dmfe_impl.h" 30 31 /* 32 * This is the string displayed by modinfo, etc. 33 */ 34 static char dmfe_ident[] = "Davicom DM9102 Ethernet"; 35 36 37 /* 38 * NOTES: 39 * 40 * #defines: 41 * 42 * DMFE_PCI_RNUMBER is the register-set number to use for the operating 43 * registers. On an OBP-based machine, regset 0 refers to CONFIG space, 44 * regset 1 will be the operating registers in I/O space, and regset 2 45 * will be the operating registers in MEMORY space (preferred). If an 46 * expansion ROM is fitted, it may appear as a further register set. 47 * 48 * DMFE_SLOP defines the amount by which the chip may read beyond 49 * the end of a buffer or descriptor, apparently 6-8 dwords :( 50 * We have to make sure this doesn't cause it to access unallocated 51 * or unmapped memory. 52 * 53 * DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP) 54 * rounded up to a multiple of 4. Here we choose a power of two for 55 * speed & simplicity at the cost of a bit more memory. 56 * 57 * However, the buffer length field in the TX/RX descriptors is only 58 * eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes 59 * per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1 60 * (2000) bytes each. 61 * 62 * DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for 63 * the data buffers. The descriptors are always set up in CONSISTENT 64 * mode. 65 * 66 * DMFE_HEADROOM defines how much space we'll leave in allocated 67 * mblks before the first valid data byte. This should be chosen 68 * to be 2 modulo 4, so that once the ethernet header (14 bytes) 69 * has been stripped off, the packet data will be 4-byte aligned. 70 * The remaining space can be used by upstream modules to prepend 71 * any headers required. 72 * 73 * Patchable globals: 74 * 75 * dmfe_bus_modes: the bus mode bits to be put into CSR0. 76 * Setting READ_MULTIPLE in this register seems to cause 77 * the chip to generate a READ LINE command with a parity 78 * error! Don't do it! 79 * 80 * dmfe_setup_desc1: the value to be put into descriptor word 1 81 * when sending a SETUP packet. 82 * 83 * Setting TX_LAST_DESC in desc1 in a setup packet seems 84 * to make the chip spontaneously reset internally - it 85 * attempts to give back the setup packet descriptor by 86 * writing to PCI address 00000000 - which may or may not 87 * get a MASTER ABORT - after which most of its registers 88 * seem to have either default values or garbage! 89 * 90 * TX_FIRST_DESC doesn't seem to have the same effect but 91 * it isn't needed on a setup packet so we'll leave it out 92 * too, just in case it has some other wierd side-effect. 93 * 94 * The default hardware packet filtering mode is now 95 * HASH_AND_PERFECT (imperfect filtering of multicast 96 * packets and perfect filtering of unicast packets). 97 * If this is found not to work reliably, setting the 98 * TX_FILTER_TYPE1 bit will cause a switchover to using 99 * HASH_ONLY mode (imperfect filtering of *all* packets). 100 * Software will then perform the additional filtering 101 * as required. 102 */ 103 104 #define DMFE_PCI_RNUMBER 2 105 #define DMFE_SLOP (8*sizeof (uint32_t)) 106 #define DMFE_BUF_SIZE 2048 107 #define DMFE_BUF_SIZE_1 2000 108 #define DMFE_DMA_MODE DDI_DMA_STREAMING 109 #define DMFE_HEADROOM 34 110 111 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN; 112 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE | 113 TX_FILTER_TYPE0; 114 115 /* 116 * Some tunable parameters ... 117 * Number of RX/TX ring entries (128/128) 118 * Minimum number of TX ring slots to keep free (1) 119 * Low-water mark at which to try to reclaim TX ring slots (1) 120 * How often to take a TX-done interrupt (twice per ring cycle) 121 * Whether to reclaim TX ring entries on a TX-done interrupt (no) 122 */ 123 124 #define DMFE_TX_DESC 128 /* Should be a multiple of 4 <= 256 */ 125 #define DMFE_RX_DESC 128 /* Should be a multiple of 4 <= 256 */ 126 127 static uint32_t dmfe_rx_desc = DMFE_RX_DESC; 128 static uint32_t dmfe_tx_desc = DMFE_TX_DESC; 129 static uint32_t dmfe_tx_min_free = 1; 130 static uint32_t dmfe_tx_reclaim_level = 1; 131 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1; 132 static boolean_t dmfe_reclaim_on_done = B_FALSE; 133 134 /* 135 * Time-related parameters: 136 * 137 * We use a cyclic to provide a periodic callback; this is then used 138 * to check for TX-stall and poll the link status register. 139 * 140 * DMFE_TICK is the interval between cyclic callbacks, in microseconds. 141 * 142 * TX_STALL_TIME_100 is the timeout in microseconds between passing 143 * a packet to the chip for transmission and seeing that it's gone, 144 * when running at 100Mb/s. If we haven't reclaimed at least one 145 * descriptor in this time we assume the transmitter has stalled 146 * and reset the chip. 147 * 148 * TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s. 149 * 150 * LINK_POLL_TIME is the interval between checks on the link state 151 * when nothing appears to have happened (this is in addition to the 152 * case where we think we've detected a link change, and serves as a 153 * backup in case the quick link check doesn't work properly). 154 * 155 * Patchable globals: 156 * 157 * dmfe_tick_us: DMFE_TICK 158 * dmfe_tx100_stall_us: TX_STALL_TIME_100 159 * dmfe_tx10_stall_us: TX_STALL_TIME_10 160 * dmfe_link_poll_us: LINK_POLL_TIME 161 * 162 * These are then used in _init() to calculate: 163 * 164 * stall_100_tix[]: number of consecutive cyclic callbacks without a 165 * reclaim before the TX process is considered stalled, 166 * when running at 100Mb/s. The elements are indexed 167 * by transmit-engine-state. 168 * stall_10_tix[]: number of consecutive cyclic callbacks without a 169 * reclaim before the TX process is considered stalled, 170 * when running at 10Mb/s. The elements are indexed 171 * by transmit-engine-state. 172 * factotum_tix: number of consecutive cyclic callbacks before waking 173 * up the factotum even though there doesn't appear to 174 * be anything for it to do 175 */ 176 177 #define DMFE_TICK 25000 /* microseconds */ 178 #define TX_STALL_TIME_100 50000 /* microseconds */ 179 #define TX_STALL_TIME_10 200000 /* microseconds */ 180 #define LINK_POLL_TIME 5000000 /* microseconds */ 181 182 static uint32_t dmfe_tick_us = DMFE_TICK; 183 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100; 184 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10; 185 static uint32_t dmfe_link_poll_us = LINK_POLL_TIME; 186 187 /* 188 * Calculated from above in _init() 189 */ 190 191 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1]; 192 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1]; 193 static uint32_t factotum_tix; 194 static uint32_t factotum_fast_tix; 195 static uint32_t factotum_start_tix; 196 197 /* 198 * Property names 199 */ 200 static char localmac_propname[] = "local-mac-address"; 201 static char opmode_propname[] = "opmode-reg-value"; 202 static char debug_propname[] = "dmfe-debug-flags"; 203 204 static int dmfe_m_start(void *); 205 static void dmfe_m_stop(void *); 206 static int dmfe_m_promisc(void *, boolean_t); 207 static int dmfe_m_multicst(void *, boolean_t, const uint8_t *); 208 static int dmfe_m_unicst(void *, const uint8_t *); 209 static void dmfe_m_ioctl(void *, queue_t *, mblk_t *); 210 static mblk_t *dmfe_m_tx(void *, mblk_t *); 211 static int dmfe_m_stat(void *, uint_t, uint64_t *); 212 213 static mac_callbacks_t dmfe_m_callbacks = { 214 (MC_IOCTL), 215 dmfe_m_stat, 216 dmfe_m_start, 217 dmfe_m_stop, 218 dmfe_m_promisc, 219 dmfe_m_multicst, 220 dmfe_m_unicst, 221 dmfe_m_tx, 222 dmfe_m_ioctl, 223 NULL, 224 }; 225 226 227 /* 228 * Describes the chip's DMA engine 229 */ 230 static ddi_dma_attr_t dma_attr = { 231 DMA_ATTR_V0, /* dma_attr version */ 232 0, /* dma_attr_addr_lo */ 233 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 234 0x0FFFFFF, /* dma_attr_count_max */ 235 0x20, /* dma_attr_align */ 236 0x7F, /* dma_attr_burstsizes */ 237 1, /* dma_attr_minxfer */ 238 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 239 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */ 240 1, /* dma_attr_sgllen */ 241 1, /* dma_attr_granular */ 242 0 /* dma_attr_flags */ 243 }; 244 245 /* 246 * DMA access attributes for registers and descriptors 247 */ 248 static ddi_device_acc_attr_t dmfe_reg_accattr = { 249 DDI_DEVICE_ATTR_V0, 250 DDI_STRUCTURE_LE_ACC, 251 DDI_STRICTORDER_ACC 252 }; 253 254 /* 255 * DMA access attributes for data: NOT to be byte swapped. 256 */ 257 static ddi_device_acc_attr_t dmfe_data_accattr = { 258 DDI_DEVICE_ATTR_V0, 259 DDI_NEVERSWAP_ACC, 260 DDI_STRICTORDER_ACC 261 }; 262 263 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = { 264 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 265 }; 266 267 268 /* 269 * ========== Lowest-level chip register & ring access routines ========== 270 */ 271 272 /* 273 * I/O register get/put routines 274 */ 275 uint32_t 276 dmfe_chip_get32(dmfe_t *dmfep, off_t offset) 277 { 278 uint32_t *addr; 279 280 addr = (void *)(dmfep->io_reg + offset); 281 return (ddi_get32(dmfep->io_handle, addr)); 282 } 283 284 void 285 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value) 286 { 287 uint32_t *addr; 288 289 addr = (void *)(dmfep->io_reg + offset); 290 ddi_put32(dmfep->io_handle, addr, value); 291 } 292 293 /* 294 * TX/RX ring get/put routines 295 */ 296 static uint32_t 297 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset) 298 { 299 uint32_t *addr; 300 301 addr = (void *)dma_p->mem_va; 302 return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset)); 303 } 304 305 static void 306 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value) 307 { 308 uint32_t *addr; 309 310 addr = (void *)dma_p->mem_va; 311 ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value); 312 } 313 314 /* 315 * Setup buffer get/put routines 316 */ 317 static uint32_t 318 dmfe_setup_get32(dma_area_t *dma_p, uint_t index) 319 { 320 uint32_t *addr; 321 322 addr = (void *)dma_p->setup_va; 323 return (ddi_get32(dma_p->acc_hdl, addr + index)); 324 } 325 326 static void 327 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value) 328 { 329 uint32_t *addr; 330 331 addr = (void *)dma_p->setup_va; 332 ddi_put32(dma_p->acc_hdl, addr + index, value); 333 } 334 335 336 /* 337 * ========== Low-level chip & ring buffer manipulation ========== 338 */ 339 340 #define DMFE_DBG DMFE_DBG_REGS /* debug flag for this code */ 341 342 /* 343 * dmfe_set_opmode() -- function to set operating mode 344 */ 345 static void 346 dmfe_set_opmode(dmfe_t *dmfep) 347 { 348 DMFE_DEBUG(("dmfe_set_opmode: opmode 0x%x", dmfep->opmode)); 349 350 ASSERT(mutex_owned(dmfep->oplock)); 351 352 dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode); 353 drv_usecwait(10); 354 } 355 356 /* 357 * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w 358 */ 359 static void 360 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate) 361 { 362 ASSERT(mutex_owned(dmfep->oplock)); 363 364 /* 365 * Stop the chip: 366 * disable all interrupts 367 * stop TX/RX processes 368 * clear the status bits for TX/RX stopped 369 * If required, reset the chip 370 * Record the new state 371 */ 372 dmfe_chip_put32(dmfep, INT_MASK_REG, 0); 373 dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE); 374 dmfe_set_opmode(dmfep); 375 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 376 377 switch (newstate) { 378 default: 379 ASSERT(!"can't get here"); 380 return; 381 382 case CHIP_STOPPED: 383 case CHIP_ERROR: 384 break; 385 386 case CHIP_RESET: 387 dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET); 388 drv_usecwait(10); 389 dmfe_chip_put32(dmfep, BUS_MODE_REG, 0); 390 drv_usecwait(10); 391 dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes); 392 break; 393 } 394 395 dmfep->chip_state = newstate; 396 } 397 398 /* 399 * Initialize transmit and receive descriptor rings, and 400 * set the chip to point to the first entry in each ring 401 */ 402 static void 403 dmfe_init_rings(dmfe_t *dmfep) 404 { 405 dma_area_t *descp; 406 uint32_t pstart; 407 uint32_t pnext; 408 uint32_t pbuff; 409 uint32_t desc1; 410 int i; 411 412 /* 413 * You need all the locks in order to rewrite the descriptor rings 414 */ 415 ASSERT(mutex_owned(dmfep->oplock)); 416 ASSERT(mutex_owned(dmfep->rxlock)); 417 ASSERT(mutex_owned(dmfep->txlock)); 418 419 /* 420 * Program the RX ring entries 421 */ 422 descp = &dmfep->rx_desc; 423 pstart = descp->mem_dvma; 424 pnext = pstart + sizeof (struct rx_desc_type); 425 pbuff = dmfep->rx_buff.mem_dvma; 426 desc1 = RX_CHAINING | DMFE_BUF_SIZE_1; 427 428 for (i = 0; i < dmfep->rx.n_desc; ++i) { 429 dmfe_ring_put32(descp, i, RD_NEXT, pnext); 430 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 431 dmfe_ring_put32(descp, i, DESC1, desc1); 432 dmfe_ring_put32(descp, i, DESC0, RX_OWN); 433 434 pnext += sizeof (struct rx_desc_type); 435 pbuff += DMFE_BUF_SIZE; 436 } 437 438 /* 439 * Fix up last entry & sync 440 */ 441 dmfe_ring_put32(descp, --i, RD_NEXT, pstart); 442 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 443 dmfep->rx.next_free = 0; 444 445 /* 446 * Set the base address of the RX descriptor list in CSR3 447 */ 448 DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)", 449 descp->mem_va, descp->mem_dvma)); 450 dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma); 451 452 /* 453 * Program the TX ring entries 454 */ 455 descp = &dmfep->tx_desc; 456 pstart = descp->mem_dvma; 457 pnext = pstart + sizeof (struct tx_desc_type); 458 pbuff = dmfep->tx_buff.mem_dvma; 459 desc1 = TX_CHAINING; 460 461 for (i = 0; i < dmfep->tx.n_desc; ++i) { 462 dmfe_ring_put32(descp, i, TD_NEXT, pnext); 463 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 464 dmfe_ring_put32(descp, i, DESC1, desc1); 465 dmfe_ring_put32(descp, i, DESC0, 0); 466 467 pnext += sizeof (struct tx_desc_type); 468 pbuff += DMFE_BUF_SIZE; 469 } 470 471 /* 472 * Fix up last entry & sync 473 */ 474 dmfe_ring_put32(descp, --i, TD_NEXT, pstart); 475 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 476 dmfep->tx.n_free = dmfep->tx.n_desc; 477 dmfep->tx.next_free = dmfep->tx.next_busy = 0; 478 479 /* 480 * Set the base address of the TX descrptor list in CSR4 481 */ 482 DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)", 483 descp->mem_va, descp->mem_dvma)); 484 dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma); 485 } 486 487 /* 488 * dmfe_start_chip() -- start the chip transmitting and/or receiving 489 */ 490 static void 491 dmfe_start_chip(dmfe_t *dmfep, int mode) 492 { 493 ASSERT(mutex_owned(dmfep->oplock)); 494 495 dmfep->opmode |= mode; 496 dmfe_set_opmode(dmfep); 497 498 dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0); 499 /* 500 * Enable VLAN length mode (allows packets to be 4 bytes Longer). 501 */ 502 dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE); 503 504 /* 505 * Clear any pending process-stopped interrupts 506 */ 507 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 508 dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX : 509 mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED; 510 } 511 512 /* 513 * dmfe_enable_interrupts() -- enable our favourite set of interrupts. 514 * 515 * Normal interrupts: 516 * We always enable: 517 * RX_PKTDONE_INT (packet received) 518 * TX_PKTDONE_INT (TX complete) 519 * We never enable: 520 * TX_ALLDONE_INT (next TX buffer not ready) 521 * 522 * Abnormal interrupts: 523 * We always enable: 524 * RX_STOPPED_INT 525 * TX_STOPPED_INT 526 * SYSTEM_ERR_INT 527 * RX_UNAVAIL_INT 528 * We never enable: 529 * RX_EARLY_INT 530 * RX_WATCHDOG_INT 531 * TX_JABBER_INT 532 * TX_EARLY_INT 533 * TX_UNDERFLOW_INT 534 * GP_TIMER_INT (not valid in -9 chips) 535 * LINK_STATUS_INT (not valid in -9 chips) 536 */ 537 static void 538 dmfe_enable_interrupts(dmfe_t *dmfep) 539 { 540 ASSERT(mutex_owned(dmfep->oplock)); 541 542 /* 543 * Put 'the standard set of interrupts' in the interrupt mask register 544 */ 545 dmfep->imask = RX_PKTDONE_INT | TX_PKTDONE_INT | 546 RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT; 547 548 dmfe_chip_put32(dmfep, INT_MASK_REG, 549 NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask); 550 dmfep->chip_state = CHIP_RUNNING; 551 552 DMFE_DEBUG(("dmfe_enable_interrupts: imask 0x%x", dmfep->imask)); 553 } 554 555 #undef DMFE_DBG 556 557 558 /* 559 * ========== RX side routines ========== 560 */ 561 562 #define DMFE_DBG DMFE_DBG_RECV /* debug flag for this code */ 563 564 /* 565 * Function to update receive statistics on various errors 566 */ 567 static void 568 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0) 569 { 570 ASSERT(mutex_owned(dmfep->rxlock)); 571 572 /* 573 * The error summary bit and the error bits that it summarises 574 * are only valid if this is the last fragment. Therefore, a 575 * fragment only contributes to the error statistics if both 576 * the last-fragment and error summary bits are set. 577 */ 578 if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) { 579 dmfep->rx_stats_ierrors += 1; 580 581 /* 582 * There are some other error bits in the descriptor for 583 * which there don't seem to be appropriate MAC statistics, 584 * notably RX_COLLISION and perhaps RX_DESC_ERR. The 585 * latter may not be possible if it is supposed to indicate 586 * that one buffer has been filled with a partial packet 587 * and the next buffer required for the rest of the packet 588 * was not available, as all our buffers are more than large 589 * enough for a whole packet without fragmenting. 590 */ 591 592 if (desc0 & RX_OVERFLOW) { 593 dmfep->rx_stats_overflow += 1; 594 595 } else if (desc0 & RX_RUNT_FRAME) 596 dmfep->rx_stats_short += 1; 597 598 if (desc0 & RX_CRC) 599 dmfep->rx_stats_fcs += 1; 600 601 if (desc0 & RX_FRAME2LONG) 602 dmfep->rx_stats_toolong += 1; 603 } 604 605 /* 606 * A receive watchdog timeout is counted as a MAC-level receive 607 * error. Strangely, it doesn't set the packet error summary bit, 608 * according to the chip data sheet :-? 609 */ 610 if (desc0 & RX_RCV_WD_TO) 611 dmfep->rx_stats_macrcv_errors += 1; 612 613 if (desc0 & RX_DRIBBLING) 614 dmfep->rx_stats_align += 1; 615 616 if (desc0 & RX_MII_ERR) 617 dmfep->rx_stats_macrcv_errors += 1; 618 } 619 620 /* 621 * Receive incoming packet(s) and pass them up ... 622 */ 623 static mblk_t * 624 dmfe_getp(dmfe_t *dmfep) 625 { 626 dma_area_t *descp; 627 mblk_t **tail; 628 mblk_t *head; 629 mblk_t *mp; 630 char *rxb; 631 uchar_t *dp; 632 uint32_t desc0; 633 uint32_t misses; 634 int packet_length; 635 int index; 636 637 mutex_enter(dmfep->rxlock); 638 639 /* 640 * Update the missed frame statistic from the on-chip counter. 641 */ 642 misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG); 643 dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK); 644 645 /* 646 * sync (all) receive descriptors before inspecting them 647 */ 648 descp = &dmfep->rx_desc; 649 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 650 651 /* 652 * We should own at least one RX entry, since we've had a 653 * receive interrupt, but let's not be dogmatic about it. 654 */ 655 index = dmfep->rx.next_free; 656 desc0 = dmfe_ring_get32(descp, index, DESC0); 657 if (desc0 & RX_OWN) 658 DMFE_DEBUG(("dmfe_getp: no work, desc0 0x%x", desc0)); 659 660 for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) { 661 /* 662 * Maintain statistics for every descriptor returned 663 * to us by the chip ... 664 */ 665 DMFE_DEBUG(("dmfe_getp: desc0 0x%x", desc0)); 666 dmfe_update_rx_stats(dmfep, desc0); 667 668 /* 669 * Check that the entry has both "packet start" and 670 * "packet end" flags. We really shouldn't get packet 671 * fragments, 'cos all the RX buffers are bigger than 672 * the largest valid packet. So we'll just drop any 673 * fragments we find & skip on to the next entry. 674 */ 675 if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) { 676 DMFE_DEBUG(("dmfe_getp: dropping fragment")); 677 goto skip; 678 } 679 680 /* 681 * A whole packet in one buffer. We have to check error 682 * status and packet length before forwarding it upstream. 683 */ 684 if (desc0 & RX_ERR_SUMMARY) { 685 DMFE_DEBUG(("dmfe_getp: dropping errored packet")); 686 goto skip; 687 } 688 689 packet_length = (desc0 >> 16) & 0x3fff; 690 if (packet_length > DMFE_MAX_PKT_SIZE) { 691 DMFE_DEBUG(("dmfe_getp: dropping oversize packet, " 692 "length %d", packet_length)); 693 goto skip; 694 } else if (packet_length < ETHERMIN) { 695 /* 696 * Note that VLAN packet would be even larger, 697 * but we don't worry about dropping runt VLAN 698 * frames. 699 * 700 * This check is probably redundant, as well, 701 * since the hardware should drop RUNT frames. 702 */ 703 DMFE_DEBUG(("dmfe_getp: dropping undersize packet, " 704 "length %d", packet_length)); 705 goto skip; 706 } 707 708 /* 709 * Sync the data, so we can examine it; then check that 710 * the packet is really intended for us (remember that 711 * if we're using Imperfect Filtering, then the chip will 712 * receive unicast packets sent to stations whose addresses 713 * just happen to hash to the same value as our own; we 714 * discard these here so they don't get sent upstream ...) 715 */ 716 (void) ddi_dma_sync(dmfep->rx_buff.dma_hdl, 717 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, 718 DDI_DMA_SYNC_FORKERNEL); 719 rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE]; 720 721 722 /* 723 * We do not bother to check that the packet is really for 724 * us, we let the MAC framework make that check instead. 725 * This is especially important if we ever want to support 726 * multiple MAC addresses. 727 */ 728 729 /* 730 * Packet looks good; get a buffer to copy it into. We 731 * allow some space at the front of the allocated buffer 732 * (HEADROOM) in case any upstream modules want to prepend 733 * some sort of header. The value has been carefully chosen 734 * So that it also has the side-effect of making the packet 735 * *contents* 4-byte aligned, as required by NCA! 736 */ 737 mp = allocb(DMFE_HEADROOM + packet_length, 0); 738 if (mp == NULL) { 739 DMFE_DEBUG(("dmfe_getp: no buffer - dropping packet")); 740 dmfep->rx_stats_norcvbuf += 1; 741 goto skip; 742 } 743 744 /* 745 * Account for statistics of good packets. 746 */ 747 dmfep->rx_stats_ipackets += 1; 748 dmfep->rx_stats_rbytes += packet_length; 749 if (desc0 & RX_MULTI_FRAME) { 750 if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) { 751 dmfep->rx_stats_multi += 1; 752 } else { 753 dmfep->rx_stats_bcast += 1; 754 } 755 } 756 757 /* 758 * Copy the packet into the STREAMS buffer 759 */ 760 dp = mp->b_rptr += DMFE_HEADROOM; 761 mp->b_cont = mp->b_next = NULL; 762 763 /* 764 * Don't worry about stripping the vlan tag, the MAC 765 * layer will take care of that for us. 766 */ 767 bcopy(rxb, dp, packet_length); 768 769 /* 770 * Fix up the packet length, and link it to the chain 771 */ 772 mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL; 773 *tail = mp; 774 tail = &mp->b_next; 775 776 skip: 777 /* 778 * Return ownership of ring entry & advance to next 779 */ 780 dmfe_ring_put32(descp, index, DESC0, RX_OWN); 781 index = NEXT(index, dmfep->rx.n_desc); 782 desc0 = dmfe_ring_get32(descp, index, DESC0); 783 } 784 785 /* 786 * Remember where to start looking next time ... 787 */ 788 dmfep->rx.next_free = index; 789 790 /* 791 * sync the receive descriptors that we've given back 792 * (actually, we sync all of them for simplicity), and 793 * wake the chip in case it had suspended receive 794 */ 795 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 796 dmfe_chip_put32(dmfep, RX_POLL_REG, 0); 797 798 mutex_exit(dmfep->rxlock); 799 return (head); 800 } 801 802 #undef DMFE_DBG 803 804 805 /* 806 * ========== Primary TX side routines ========== 807 */ 808 809 #define DMFE_DBG DMFE_DBG_SEND /* debug flag for this code */ 810 811 /* 812 * TX ring management: 813 * 814 * There are <tx.n_desc> entries in the ring, of which those from 815 * <tx.next_free> round to but not including <tx.next_busy> must 816 * be owned by the CPU. The number of such entries should equal 817 * <tx.n_free>; but there may also be some more entries which the 818 * chip has given back but which we haven't yet accounted for. 819 * The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts 820 * as it discovers such entries. 821 * 822 * Initially, or when the ring is entirely free: 823 * C = Owned by CPU 824 * D = Owned by Davicom (DMFE) chip 825 * 826 * tx.next_free tx.n_desc = 16 827 * | 828 * v 829 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 830 * | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | 831 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 832 * ^ 833 * | 834 * tx.next_busy tx.n_free = 16 835 * 836 * On entry to reclaim() during normal use: 837 * 838 * tx.next_free tx.n_desc = 16 839 * | 840 * v 841 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 842 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 843 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 844 * ^ 845 * | 846 * tx.next_busy tx.n_free = 9 847 * 848 * On exit from reclaim(): 849 * 850 * tx.next_free tx.n_desc = 16 851 * | 852 * v 853 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 854 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 855 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 856 * ^ 857 * | 858 * tx.next_busy tx.n_free = 13 859 * 860 * The ring is considered "full" when only one entry is owned by 861 * the CPU; thus <tx.n_free> should always be >= 1. 862 * 863 * tx.next_free tx.n_desc = 16 864 * | 865 * v 866 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 867 * | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D | 868 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 869 * ^ 870 * | 871 * tx.next_busy tx.n_free = 1 872 */ 873 874 /* 875 * Function to update transmit statistics on various errors 876 */ 877 static void 878 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1) 879 { 880 uint32_t collisions; 881 uint32_t errbits; 882 uint32_t errsum; 883 884 ASSERT(mutex_owned(dmfep->txlock)); 885 886 collisions = ((desc0 >> 3) & 0x0f); 887 errsum = desc0 & TX_ERR_SUMMARY; 888 errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS | 889 TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO); 890 if ((errsum == 0) != (errbits == 0)) { 891 dmfe_log(dmfep, "dubious TX error status 0x%x", desc0); 892 desc0 |= TX_ERR_SUMMARY; 893 } 894 895 if (desc0 & TX_ERR_SUMMARY) { 896 dmfep->tx_stats_oerrors += 1; 897 898 /* 899 * If we ever see a transmit jabber timeout, we count it 900 * as a MAC-level transmit error; but we probably won't 901 * see it as it causes an Abnormal interrupt and we reset 902 * the chip in order to recover 903 */ 904 if (desc0 & TX_JABBER_TO) { 905 dmfep->tx_stats_macxmt_errors += 1; 906 dmfep->tx_stats_jabber += 1; 907 } 908 909 if (desc0 & TX_UNDERFLOW) 910 dmfep->tx_stats_underflow += 1; 911 else if (desc0 & TX_LATE_COLL) 912 dmfep->tx_stats_xmtlatecoll += 1; 913 914 if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER)) 915 dmfep->tx_stats_nocarrier += 1; 916 917 if (desc0 & TX_EXCESS_COLL) { 918 dmfep->tx_stats_excoll += 1; 919 collisions = 16; 920 } 921 } else { 922 int bit = index % NBBY; 923 int byt = index / NBBY; 924 925 if (dmfep->tx_mcast[byt] & bit) { 926 dmfep->tx_mcast[byt] &= ~bit; 927 dmfep->tx_stats_multi += 1; 928 929 } else if (dmfep->tx_bcast[byt] & bit) { 930 dmfep->tx_bcast[byt] &= ~bit; 931 dmfep->tx_stats_bcast += 1; 932 } 933 934 dmfep->tx_stats_opackets += 1; 935 dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1; 936 } 937 938 if (collisions == 1) 939 dmfep->tx_stats_first_coll += 1; 940 else if (collisions != 0) 941 dmfep->tx_stats_multi_coll += 1; 942 dmfep->tx_stats_collisions += collisions; 943 944 if (desc0 & TX_DEFERRED) 945 dmfep->tx_stats_defer += 1; 946 } 947 948 /* 949 * Reclaim all the ring entries that the chip has returned to us ... 950 * 951 * Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims 952 * as many as possible, restarts the TX stall timeout, and returns B_TRUE. 953 */ 954 static boolean_t 955 dmfe_reclaim_tx_desc(dmfe_t *dmfep) 956 { 957 dma_area_t *descp; 958 uint32_t desc0; 959 uint32_t desc1; 960 int i; 961 962 ASSERT(mutex_owned(dmfep->txlock)); 963 964 /* 965 * sync transmit descriptor ring before looking at it 966 */ 967 descp = &dmfep->tx_desc; 968 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 969 970 /* 971 * Early exit if there are no descriptors to reclaim, either 972 * because they're all reclaimed already, or because the next 973 * one is still owned by the chip ... 974 */ 975 i = dmfep->tx.next_busy; 976 if (i == dmfep->tx.next_free) 977 return (B_FALSE); 978 desc0 = dmfe_ring_get32(descp, i, DESC0); 979 if (desc0 & TX_OWN) 980 return (B_FALSE); 981 982 /* 983 * Reclaim as many descriptors as possible ... 984 */ 985 for (;;) { 986 desc1 = dmfe_ring_get32(descp, i, DESC1); 987 ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0); 988 989 if (desc1 & TX_SETUP_PACKET) { 990 /* 991 * Setup packet - restore buffer address 992 */ 993 ASSERT(dmfe_ring_get32(descp, i, BUFFER1) == 994 descp->setup_dvma); 995 dmfe_ring_put32(descp, i, BUFFER1, 996 dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE); 997 } else { 998 /* 999 * Regular packet - just update stats 1000 */ 1001 ASSERT(dmfe_ring_get32(descp, i, BUFFER1) == 1002 dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE); 1003 dmfe_update_tx_stats(dmfep, i, desc0, desc1); 1004 } 1005 1006 #if DMFEDEBUG 1007 /* 1008 * We can use one of the SPARE bits in the TX descriptor 1009 * to track when a ring buffer slot is reclaimed. Then 1010 * we can deduce the last operation on a slot from the 1011 * top half of DESC0: 1012 * 1013 * 0x8000 xxxx given to DMFE chip (TX_OWN) 1014 * 0x7fff xxxx returned but not yet reclaimed 1015 * 0x3fff xxxx reclaimed 1016 */ 1017 #define TX_PEND_RECLAIM (1UL<<30) 1018 dmfe_ring_put32(descp, i, DESC0, desc0 & ~TX_PEND_RECLAIM); 1019 #endif /* DMFEDEBUG */ 1020 1021 /* 1022 * Update count & index; we're all done if the ring is 1023 * now fully reclaimed, or the next entry if still owned 1024 * by the chip ... 1025 */ 1026 dmfep->tx.n_free += 1; 1027 i = NEXT(i, dmfep->tx.n_desc); 1028 if (i == dmfep->tx.next_free) 1029 break; 1030 desc0 = dmfe_ring_get32(descp, i, DESC0); 1031 if (desc0 & TX_OWN) 1032 break; 1033 } 1034 1035 dmfep->tx.next_busy = i; 1036 dmfep->tx_pending_tix = 0; 1037 return (B_TRUE); 1038 } 1039 1040 /* 1041 * Send the message in the message block chain <mp>. 1042 * 1043 * The message is freed if and only if its contents are successfully copied 1044 * and queued for transmission (so that the return value is B_TRUE). 1045 * If we can't queue the message, the return value is B_FALSE and 1046 * the message is *not* freed. 1047 * 1048 * This routine handles the special case of <mp> == NULL, which indicates 1049 * that we want to "send" the special "setup packet" allocated during 1050 * startup. We have to use some different flags in the packet descriptor 1051 * to say its a setup packet (from the global <dmfe_setup_desc1>), and the 1052 * setup packet *isn't* freed after use. 1053 */ 1054 static boolean_t 1055 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp) 1056 { 1057 dma_area_t *descp; 1058 mblk_t *bp; 1059 char *txb; 1060 uint32_t desc1; 1061 uint32_t index; 1062 size_t totlen; 1063 size_t mblen; 1064 1065 /* 1066 * If the number of free slots is below the reclaim threshold 1067 * (soft limit), we'll try to reclaim some. If we fail, and 1068 * the number of free slots is also below the minimum required 1069 * (the hard limit, usually 1), then we can't send the packet. 1070 */ 1071 mutex_enter(dmfep->txlock); 1072 if (dmfep->tx.n_free <= dmfe_tx_reclaim_level && 1073 dmfe_reclaim_tx_desc(dmfep) == B_FALSE && 1074 dmfep->tx.n_free <= dmfe_tx_min_free) { 1075 /* 1076 * Resource shortage - return B_FALSE so the packet 1077 * will be queued for retry after the next TX-done 1078 * interrupt. 1079 */ 1080 mutex_exit(dmfep->txlock); 1081 DMFE_DEBUG(("dmfe_send_msg: no free descriptors")); 1082 return (B_FALSE); 1083 } 1084 1085 /* 1086 * There's a slot available, so claim it by incrementing 1087 * the next-free index and decrementing the free count. 1088 * If the ring is currently empty, we also restart the 1089 * stall-detect timer. The ASSERTions check that our 1090 * invariants still hold: 1091 * the next-free index must not match the next-busy index 1092 * there must still be at least one free entry 1093 * After this, we now have exclusive ownership of the ring 1094 * entry (and matching buffer) indicated by <index>, so we 1095 * don't need to hold the TX lock any longer 1096 */ 1097 index = dmfep->tx.next_free; 1098 dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc); 1099 ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy); 1100 if (dmfep->tx.n_free-- == dmfep->tx.n_desc) 1101 dmfep->tx_pending_tix = 0; 1102 ASSERT(dmfep->tx.n_free >= 1); 1103 mutex_exit(dmfep->txlock); 1104 1105 /* 1106 * Check the ownership of the ring entry ... 1107 */ 1108 descp = &dmfep->tx_desc; 1109 ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0); 1110 1111 if (mp == NULL) { 1112 /* 1113 * Indicates we should send a SETUP packet, which we do by 1114 * temporarily switching the BUFFER1 pointer in the ring 1115 * entry. The reclaim routine will restore BUFFER1 to its 1116 * usual value. 1117 * 1118 * Note that as the setup packet is tagged on the end of 1119 * the TX ring, when we sync the descriptor we're also 1120 * implicitly syncing the setup packet - hence, we don't 1121 * need a separate ddi_dma_sync() call here. 1122 */ 1123 desc1 = dmfe_setup_desc1; 1124 dmfe_ring_put32(descp, index, BUFFER1, descp->setup_dvma); 1125 } else { 1126 /* 1127 * A regular packet; we copy the data into a pre-mapped 1128 * buffer, which avoids the overhead (and complication) 1129 * of mapping/unmapping STREAMS buffers and keeping hold 1130 * of them until the DMA has completed. 1131 * 1132 * Because all buffers are the same size, and larger 1133 * than the longest single valid message, we don't have 1134 * to bother about splitting the message across multiple 1135 * buffers. 1136 */ 1137 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 1138 totlen = 0; 1139 bp = mp; 1140 1141 /* 1142 * Copy all (remaining) mblks in the message ... 1143 */ 1144 for (; bp != NULL; bp = bp->b_cont) { 1145 mblen = MBLKL(bp); 1146 if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) { 1147 bcopy(bp->b_rptr, txb, mblen); 1148 txb += mblen; 1149 } 1150 } 1151 1152 /* 1153 * Is this a multicast or broadcast packet? We do 1154 * this so that we can track statistics accurately 1155 * when we reclaim it. 1156 */ 1157 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 1158 if (txb[0] & 0x1) { 1159 if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) { 1160 dmfep->tx_bcast[index / NBBY] |= 1161 (1 << (index % NBBY)); 1162 } else { 1163 dmfep->tx_mcast[index / NBBY] |= 1164 (1 << (index % NBBY)); 1165 } 1166 } 1167 1168 /* 1169 * We'e reached the end of the chain; and we should have 1170 * collected no more than DMFE_MAX_PKT_SIZE bytes into our 1171 * buffer. Note that the <size> field in the descriptor is 1172 * only 11 bits, so bigger packets would be a problem! 1173 */ 1174 ASSERT(bp == NULL); 1175 ASSERT(totlen <= DMFE_MAX_PKT_SIZE); 1176 totlen &= TX_BUFFER_SIZE1; 1177 desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen; 1178 1179 (void) ddi_dma_sync(dmfep->tx_buff.dma_hdl, 1180 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV); 1181 } 1182 1183 /* 1184 * Update ring descriptor entries, sync them, and wake up the 1185 * transmit process 1186 */ 1187 if ((index & dmfe_tx_int_factor) == 0) 1188 desc1 |= TX_INT_ON_COMP; 1189 desc1 |= TX_CHAINING; 1190 dmfe_ring_put32(descp, index, DESC1, desc1); 1191 dmfe_ring_put32(descp, index, DESC0, TX_OWN); 1192 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 1193 dmfe_chip_put32(dmfep, TX_POLL_REG, 0); 1194 1195 /* 1196 * Finally, free the message & return success 1197 */ 1198 if (mp) 1199 freemsg(mp); 1200 return (B_TRUE); 1201 } 1202 1203 /* 1204 * dmfe_m_tx() -- send a chain of packets 1205 * 1206 * Called when packet(s) are ready to be transmitted. A pointer to an 1207 * M_DATA message that contains the packet is passed to this routine. 1208 * The complete LLC header is contained in the message's first message 1209 * block, and the remainder of the packet is contained within 1210 * additional M_DATA message blocks linked to the first message block. 1211 * 1212 * Additional messages may be passed by linking with b_next. 1213 */ 1214 static mblk_t * 1215 dmfe_m_tx(void *arg, mblk_t *mp) 1216 { 1217 dmfe_t *dmfep = arg; /* private device info */ 1218 mblk_t *next; 1219 1220 ASSERT(mp != NULL); 1221 ASSERT(dmfep->mac_state == DMFE_MAC_STARTED); 1222 1223 if (dmfep->chip_state != CHIP_RUNNING) 1224 return (mp); 1225 1226 while (mp != NULL) { 1227 next = mp->b_next; 1228 mp->b_next = NULL; 1229 if (!dmfe_send_msg(dmfep, mp)) { 1230 mp->b_next = next; 1231 break; 1232 } 1233 mp = next; 1234 } 1235 1236 return (mp); 1237 } 1238 1239 #undef DMFE_DBG 1240 1241 1242 /* 1243 * ========== Address-setting routines (TX-side) ========== 1244 */ 1245 1246 #define DMFE_DBG DMFE_DBG_ADDR /* debug flag for this code */ 1247 1248 /* 1249 * Find the index of the relevant bit in the setup packet. 1250 * This must mirror the way the hardware will actually calculate it! 1251 */ 1252 static uint32_t 1253 dmfe_hash_index(const uint8_t *address) 1254 { 1255 uint32_t const POLY = HASH_POLY; 1256 uint32_t crc = HASH_CRC; 1257 uint32_t index; 1258 uint32_t msb; 1259 uchar_t currentbyte; 1260 int byteslength; 1261 int shift; 1262 int bit; 1263 1264 for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) { 1265 currentbyte = address[byteslength]; 1266 for (bit = 0; bit < 8; ++bit) { 1267 msb = crc >> 31; 1268 crc <<= 1; 1269 if (msb ^ (currentbyte & 1)) { 1270 crc ^= POLY; 1271 crc |= 0x00000001; 1272 } 1273 currentbyte >>= 1; 1274 } 1275 } 1276 1277 for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift) 1278 index |= (((crc >> bit) & 1) << shift); 1279 1280 return (index); 1281 } 1282 1283 /* 1284 * Find and set/clear the relevant bit in the setup packet hash table 1285 * This must mirror the way the hardware will actually interpret it! 1286 */ 1287 static void 1288 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val) 1289 { 1290 dma_area_t *descp; 1291 uint32_t tmp; 1292 1293 ASSERT(mutex_owned(dmfep->oplock)); 1294 1295 descp = &dmfep->tx_desc; 1296 tmp = dmfe_setup_get32(descp, index/16); 1297 if (val) 1298 tmp |= 1 << (index%16); 1299 else 1300 tmp &= ~(1 << (index%16)); 1301 dmfe_setup_put32(descp, index/16, tmp); 1302 } 1303 1304 /* 1305 * Update the refcount for the bit in the setup packet corresponding 1306 * to the specified address; if it changes between zero & nonzero, 1307 * also update the bitmap itself & return B_TRUE, so that the caller 1308 * knows to re-send the setup packet. Otherwise (only the refcount 1309 * changed), return B_FALSE 1310 */ 1311 static boolean_t 1312 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val) 1313 { 1314 uint32_t index; 1315 uint8_t *refp; 1316 boolean_t change; 1317 1318 index = dmfe_hash_index(mca); 1319 refp = &dmfep->mcast_refs[index]; 1320 change = (val ? (*refp)++ : --(*refp)) == 0; 1321 1322 if (change) 1323 dmfe_update_hash(dmfep, index, val); 1324 1325 return (change); 1326 } 1327 1328 /* 1329 * "Transmit" the (possibly updated) magic setup packet 1330 */ 1331 static int 1332 dmfe_send_setup(dmfe_t *dmfep) 1333 { 1334 int status; 1335 1336 ASSERT(mutex_owned(dmfep->oplock)); 1337 1338 /* 1339 * If the chip isn't running, we can't really send the setup frame 1340 * now but it doesn't matter, 'cos it will be sent when the transmit 1341 * process is restarted (see dmfe_start()). 1342 */ 1343 if ((dmfep->opmode & START_TRANSMIT) == 0) 1344 return (0); 1345 1346 /* 1347 * "Send" the setup frame. If it fails (e.g. no resources), 1348 * set a flag; then the factotum will retry the "send". Once 1349 * it works, we can clear the flag no matter how many attempts 1350 * had previously failed. We tell the caller that it worked 1351 * whether it did or not; after all, it *will* work eventually. 1352 */ 1353 status = dmfe_send_msg(dmfep, NULL); 1354 dmfep->need_setup = status ? B_FALSE : B_TRUE; 1355 return (0); 1356 } 1357 1358 /* 1359 * dmfe_m_unicst() -- set the physical network address 1360 */ 1361 static int 1362 dmfe_m_unicst(void *arg, const uint8_t *macaddr) 1363 { 1364 dmfe_t *dmfep = arg; 1365 int status; 1366 int index; 1367 1368 /* 1369 * Update our current address and send out a new setup packet 1370 * 1371 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT 1372 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes). 1373 * 1374 * It is said that there is a bug in the 21140 where it fails to 1375 * receive packes addresses to the specified perfect filter address. 1376 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1 1377 * bit should be set in the module variable dmfe_setup_desc1. 1378 * 1379 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering. 1380 * In this mode, *all* incoming addresses are hashed and looked 1381 * up in the bitmap described by the setup packet. Therefore, 1382 * the bit representing the station address has to be added to 1383 * the table before sending it out. If the address is changed, 1384 * the old entry should be removed before the new entry is made. 1385 * 1386 * NOTE: in this mode, unicast packets that are not intended for 1387 * this station may be received; it is up to software to filter 1388 * them out afterwards! 1389 * 1390 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT 1391 * filtering. In this mode, multicast addresses are hashed and 1392 * checked against the bitmap, while unicast addresses are simply 1393 * matched against the one physical address specified in the setup 1394 * packet. This means that we shouldn't receive unicast packets 1395 * that aren't intended for us (but software still has to filter 1396 * multicast packets just the same). 1397 * 1398 * Whichever mode we're using, we have to enter the broadcast 1399 * address into the multicast filter map too, so we do this on 1400 * the first time through after attach or reset. 1401 */ 1402 mutex_enter(dmfep->oplock); 1403 1404 if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1) 1405 (void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE); 1406 if (dmfe_setup_desc1 & TX_FILTER_TYPE1) 1407 (void) dmfe_update_mcast(dmfep, macaddr, B_TRUE); 1408 if (!dmfep->addr_set) 1409 (void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE); 1410 1411 /* 1412 * Remember the new current address 1413 */ 1414 ethaddr_copy(macaddr, dmfep->curr_addr); 1415 dmfep->addr_set = B_TRUE; 1416 1417 /* 1418 * Install the new physical address into the proper position in 1419 * the setup frame; this is only used if we select hash+perfect 1420 * filtering, but we'll put it in anyway. The ugliness here is 1421 * down to the usual war of the egg :( 1422 */ 1423 for (index = 0; index < ETHERADDRL; index += 2) 1424 dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2, 1425 (macaddr[index+1] << 8) | macaddr[index]); 1426 1427 /* 1428 * Finally, we're ready to "transmit" the setup frame 1429 */ 1430 status = dmfe_send_setup(dmfep); 1431 mutex_exit(dmfep->oplock); 1432 1433 return (status); 1434 } 1435 1436 /* 1437 * dmfe_m_multicst() -- enable or disable a multicast address 1438 * 1439 * Program the hardware to enable/disable the multicast address 1440 * in "mca" (enable if add is true, otherwise disable it.) 1441 * We keep a refcount for each bit in the map, so that it still 1442 * works out properly if multiple addresses hash to the same bit. 1443 * dmfe_update_mcast() tells us whether the map actually changed; 1444 * if so, we have to re-"transmit" the magic setup packet. 1445 */ 1446 static int 1447 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1448 { 1449 dmfe_t *dmfep = arg; /* private device info */ 1450 int status = 0; 1451 1452 mutex_enter(dmfep->oplock); 1453 if (dmfe_update_mcast(dmfep, mca, add)) 1454 status = dmfe_send_setup(dmfep); 1455 mutex_exit(dmfep->oplock); 1456 1457 return (status); 1458 } 1459 1460 #undef DMFE_DBG 1461 1462 1463 /* 1464 * ========== Internal state management entry points ========== 1465 */ 1466 1467 #define DMFE_DBG DMFE_DBG_GLD /* debug flag for this code */ 1468 1469 /* 1470 * These routines provide all the functionality required by the 1471 * corresponding MAC layer entry points, but don't update the MAC layer state 1472 * so they can be called internally without disturbing our record 1473 * of what MAC layer thinks we should be doing ... 1474 */ 1475 1476 /* 1477 * dmfe_stop() -- stop processing, don't reset h/w or rings 1478 */ 1479 static void 1480 dmfe_stop(dmfe_t *dmfep) 1481 { 1482 ASSERT(mutex_owned(dmfep->oplock)); 1483 1484 dmfe_stop_chip(dmfep, CHIP_STOPPED); 1485 } 1486 1487 /* 1488 * dmfe_reset() -- stop processing, reset h/w & rings to initial state 1489 */ 1490 static void 1491 dmfe_reset(dmfe_t *dmfep) 1492 { 1493 ASSERT(mutex_owned(dmfep->oplock)); 1494 ASSERT(mutex_owned(dmfep->rxlock)); 1495 ASSERT(mutex_owned(dmfep->txlock)); 1496 1497 dmfe_stop_chip(dmfep, CHIP_RESET); 1498 dmfe_init_rings(dmfep); 1499 } 1500 1501 /* 1502 * dmfe_start() -- start transmitting/receiving 1503 */ 1504 static void 1505 dmfe_start(dmfe_t *dmfep) 1506 { 1507 uint32_t gpsr; 1508 1509 ASSERT(mutex_owned(dmfep->oplock)); 1510 1511 ASSERT(dmfep->chip_state == CHIP_RESET || 1512 dmfep->chip_state == CHIP_STOPPED); 1513 1514 /* 1515 * Make opmode consistent with PHY duplex setting 1516 */ 1517 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 1518 if (gpsr & GPS_FULL_DUPLEX) 1519 dmfep->opmode |= FULL_DUPLEX; 1520 else 1521 dmfep->opmode &= ~FULL_DUPLEX; 1522 1523 /* 1524 * Start transmit processing 1525 * Set up the address filters 1526 * Start receive processing 1527 * Enable interrupts 1528 */ 1529 dmfe_start_chip(dmfep, START_TRANSMIT); 1530 (void) dmfe_send_setup(dmfep); 1531 drv_usecwait(10); 1532 dmfe_start_chip(dmfep, START_RECEIVE); 1533 dmfe_enable_interrupts(dmfep); 1534 } 1535 1536 /* 1537 * dmfe_restart - restart transmitting/receiving after error or suspend 1538 */ 1539 static void 1540 dmfe_restart(dmfe_t *dmfep) 1541 { 1542 ASSERT(mutex_owned(dmfep->oplock)); 1543 1544 /* 1545 * You need not only <oplock>, but also <rxlock> AND <txlock> 1546 * in order to reset the rings, but then <txlock> *mustn't* 1547 * be held across the call to dmfe_start() 1548 */ 1549 mutex_enter(dmfep->rxlock); 1550 mutex_enter(dmfep->txlock); 1551 dmfe_reset(dmfep); 1552 mutex_exit(dmfep->txlock); 1553 mutex_exit(dmfep->rxlock); 1554 if (dmfep->mac_state == DMFE_MAC_STARTED) 1555 dmfe_start(dmfep); 1556 } 1557 1558 1559 /* 1560 * ========== MAC-required management entry points ========== 1561 */ 1562 1563 /* 1564 * dmfe_m_stop() -- stop transmitting/receiving 1565 */ 1566 static void 1567 dmfe_m_stop(void *arg) 1568 { 1569 dmfe_t *dmfep = arg; /* private device info */ 1570 1571 /* 1572 * Just stop processing, then record new MAC state 1573 */ 1574 mutex_enter(dmfep->oplock); 1575 dmfe_stop(dmfep); 1576 dmfep->mac_state = DMFE_MAC_STOPPED; 1577 mutex_exit(dmfep->oplock); 1578 } 1579 1580 /* 1581 * dmfe_m_start() -- start transmitting/receiving 1582 */ 1583 static int 1584 dmfe_m_start(void *arg) 1585 { 1586 dmfe_t *dmfep = arg; /* private device info */ 1587 1588 /* 1589 * Start processing and record new MAC state 1590 */ 1591 mutex_enter(dmfep->oplock); 1592 dmfe_start(dmfep); 1593 dmfep->mac_state = DMFE_MAC_STARTED; 1594 mutex_exit(dmfep->oplock); 1595 1596 return (0); 1597 } 1598 1599 /* 1600 * dmfe_m_promisc() -- set or reset promiscuous mode on the board 1601 * 1602 * Program the hardware to enable/disable promiscuous and/or 1603 * receive-all-multicast modes. Davicom don't document this 1604 * clearly, but it looks like we can do this on-the-fly (i.e. 1605 * without stopping & restarting the TX/RX processes). 1606 */ 1607 static int 1608 dmfe_m_promisc(void *arg, boolean_t on) 1609 { 1610 dmfe_t *dmfep = arg; 1611 1612 mutex_enter(dmfep->oplock); 1613 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 1614 if (on) 1615 dmfep->opmode |= PROMISC_MODE; 1616 dmfe_set_opmode(dmfep); 1617 mutex_exit(dmfep->oplock); 1618 1619 return (0); 1620 } 1621 1622 #undef DMFE_DBG 1623 1624 1625 /* 1626 * ========== Factotum, implemented as a softint handler ========== 1627 */ 1628 1629 #define DMFE_DBG DMFE_DBG_FACT /* debug flag for this code */ 1630 1631 /* 1632 * The factotum is woken up when there's something to do that we'd rather 1633 * not do from inside a (high-level?) hardware interrupt handler. Its 1634 * two main tasks are: 1635 * reset & restart the chip after an error 1636 * update & restart the chip after a link status change 1637 */ 1638 static uint_t 1639 dmfe_factotum(caddr_t arg) 1640 { 1641 dmfe_t *dmfep; 1642 1643 dmfep = (void *)arg; 1644 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 1645 1646 mutex_enter(dmfep->oplock); 1647 1648 dmfep->factotum_flag = 0; 1649 DRV_KS_INC(dmfep, KS_FACTOTUM_RUN); 1650 1651 /* 1652 * Check for chip error ... 1653 */ 1654 if (dmfep->chip_state == CHIP_ERROR) { 1655 /* 1656 * Error recovery required: reset the chip and the rings, 1657 * then, if it's supposed to be running, kick it off again. 1658 */ 1659 DRV_KS_INC(dmfep, KS_RECOVERY); 1660 dmfe_restart(dmfep); 1661 } else if (dmfep->need_setup) { 1662 (void) dmfe_send_setup(dmfep); 1663 } 1664 mutex_exit(dmfep->oplock); 1665 1666 /* 1667 * Then, check the link state. We need <milock> but not <oplock> 1668 * to do this, but if something's changed, we need <oplock> as well 1669 * in order to stop/restart the chip! Note: we could simply hold 1670 * <oplock> right through here, but we'd rather not 'cos checking 1671 * the link state involves reading over the bit-serial MII bus, 1672 * which takes ~500us even when nothing's changed. Holding <oplock> 1673 * would lock out the interrupt handler for the duration, so it's 1674 * better to release it first and reacquire it only if needed. 1675 */ 1676 mutex_enter(dmfep->milock); 1677 if (dmfe_check_link(dmfep)) { 1678 mutex_enter(dmfep->oplock); 1679 dmfe_stop(dmfep); 1680 DRV_KS_INC(dmfep, KS_LINK_CHECK); 1681 if (dmfep->update_phy) { 1682 /* 1683 * The chip may reset itself for some unknown 1684 * reason. If this happens, the chip will use 1685 * default settings (for speed, duplex, and autoneg), 1686 * which possibly aren't the user's desired settings. 1687 */ 1688 dmfe_update_phy(dmfep); 1689 dmfep->update_phy = B_FALSE; 1690 } 1691 dmfe_recheck_link(dmfep, B_FALSE); 1692 if (dmfep->mac_state == DMFE_MAC_STARTED) 1693 dmfe_start(dmfep); 1694 mutex_exit(dmfep->oplock); 1695 } 1696 mutex_exit(dmfep->milock); 1697 1698 /* 1699 * Keep MAC up-to-date about the state of the link ... 1700 */ 1701 mac_link_update(dmfep->mh, dmfep->link_state); 1702 1703 return (DDI_INTR_CLAIMED); 1704 } 1705 1706 static void 1707 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why) 1708 { 1709 DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d", 1710 why, ks_id, dmfep->factotum_flag)); 1711 1712 ASSERT(mutex_owned(dmfep->oplock)); 1713 DRV_KS_INC(dmfep, ks_id); 1714 1715 if (dmfep->factotum_flag++ == 0) 1716 ddi_trigger_softintr(dmfep->factotum_id); 1717 } 1718 1719 #undef DMFE_DBG 1720 1721 1722 /* 1723 * ========== Periodic Tasks (Cyclic handler & friends) ========== 1724 */ 1725 1726 #define DMFE_DBG DMFE_DBG_TICK /* debug flag for this code */ 1727 1728 /* 1729 * Periodic tick tasks, run from the cyclic handler 1730 * 1731 * Check the state of the link and wake the factotum if necessary 1732 */ 1733 static void 1734 dmfe_tick_link_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 1735 { 1736 link_state_t phy_state; 1737 link_state_t utp_state; 1738 const char *why; 1739 int ks_id; 1740 1741 _NOTE(ARGUNUSED(istat)) 1742 1743 ASSERT(mutex_owned(dmfep->oplock)); 1744 1745 /* 1746 * Is it time to wake the factotum? We do so periodically, in 1747 * case the fast check below doesn't always reveal a link change 1748 */ 1749 if (dmfep->link_poll_tix-- == 0) { 1750 dmfep->link_poll_tix = factotum_tix; 1751 why = "tick (link poll)"; 1752 ks_id = KS_TICK_LINK_POLL; 1753 } else { 1754 why = NULL; 1755 ks_id = KS_TICK_LINK_STATE; 1756 } 1757 1758 /* 1759 * Has the link status changed? If so, we might want to wake 1760 * the factotum to deal with it. 1761 */ 1762 phy_state = (gpsr & GPS_LINK_STATUS) ? LINK_STATE_UP : LINK_STATE_DOWN; 1763 utp_state = (gpsr & GPS_UTP_SIG) ? LINK_STATE_UP : LINK_STATE_DOWN; 1764 if (phy_state != utp_state) 1765 why = "tick (phy <> utp)"; 1766 else if ((dmfep->link_state == LINK_STATE_UP) && 1767 (phy_state == LINK_STATE_DOWN)) 1768 why = "tick (UP -> DOWN)"; 1769 else if (phy_state != dmfep->link_state) { 1770 if (dmfep->link_poll_tix > factotum_fast_tix) 1771 dmfep->link_poll_tix = factotum_fast_tix; 1772 } 1773 1774 if (why != NULL) { 1775 DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d", 1776 why, dmfep->link_state, phy_state, utp_state)); 1777 dmfe_wake_factotum(dmfep, ks_id, why); 1778 } 1779 } 1780 1781 /* 1782 * Periodic tick tasks, run from the cyclic handler 1783 * 1784 * Check for TX stall; flag an error and wake the factotum if so. 1785 */ 1786 static void 1787 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 1788 { 1789 boolean_t tx_stall; 1790 uint32_t tx_state; 1791 uint32_t limit; 1792 1793 ASSERT(mutex_owned(dmfep->oplock)); 1794 1795 /* 1796 * Check for transmit stall ... 1797 * 1798 * IF there's at least one packet in the ring, AND the timeout 1799 * has elapsed, AND we can't reclaim any descriptors, THEN we've 1800 * stalled; we return B_TRUE to trigger a reset-and-recover cycle. 1801 * 1802 * Note that the timeout limit is based on the transmit engine 1803 * state; we allow the transmitter longer to make progress in 1804 * some states than in others, based on observations of this 1805 * chip's actual behaviour in the lab. 1806 * 1807 * By observation, we find that on about 1 in 10000 passes through 1808 * here, the TX lock is already held. In that case, we'll skip 1809 * the check on this pass rather than wait. Most likely, the send 1810 * routine was holding the lock when the interrupt happened, and 1811 * we'll succeed next time through. In the event of a real stall, 1812 * the TX ring will fill up, after which the send routine won't be 1813 * called any more and then we're sure to get in. 1814 */ 1815 tx_stall = B_FALSE; 1816 if (mutex_tryenter(dmfep->txlock)) { 1817 if (dmfep->tx.n_free < dmfep->tx.n_desc) { 1818 tx_state = TX_PROCESS_STATE(istat); 1819 if (gpsr & GPS_LINK_100) 1820 limit = stall_100_tix[tx_state]; 1821 else 1822 limit = stall_10_tix[tx_state]; 1823 if (++dmfep->tx_pending_tix >= limit && 1824 dmfe_reclaim_tx_desc(dmfep) == B_FALSE) { 1825 dmfe_log(dmfep, "TX stall detected " 1826 "after %d ticks in state %d; " 1827 "automatic recovery initiated", 1828 dmfep->tx_pending_tix, tx_state); 1829 tx_stall = B_TRUE; 1830 } 1831 } 1832 mutex_exit(dmfep->txlock); 1833 } 1834 1835 if (tx_stall) { 1836 dmfe_stop_chip(dmfep, CHIP_ERROR); 1837 dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)"); 1838 } 1839 } 1840 1841 /* 1842 * Cyclic callback handler 1843 */ 1844 static void 1845 dmfe_cyclic(void *arg) 1846 { 1847 dmfe_t *dmfep = arg; /* private device info */ 1848 uint32_t istat; 1849 uint32_t gpsr; 1850 1851 /* 1852 * If the chip's not RUNNING, there's nothing to do. 1853 * If we can't get the mutex straight away, we'll just 1854 * skip this pass; we'll back back soon enough anyway. 1855 */ 1856 if (dmfep->chip_state != CHIP_RUNNING) 1857 return; 1858 if (mutex_tryenter(dmfep->oplock) == 0) 1859 return; 1860 1861 /* 1862 * Recheck chip state (it might have been stopped since we 1863 * checked above). If still running, call each of the *tick* 1864 * tasks. They will check for link change, TX stall, etc ... 1865 */ 1866 if (dmfep->chip_state == CHIP_RUNNING) { 1867 istat = dmfe_chip_get32(dmfep, STATUS_REG); 1868 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 1869 dmfe_tick_link_check(dmfep, gpsr, istat); 1870 dmfe_tick_stall_check(dmfep, gpsr, istat); 1871 } 1872 1873 DRV_KS_INC(dmfep, KS_CYCLIC_RUN); 1874 mutex_exit(dmfep->oplock); 1875 } 1876 1877 #undef DMFE_DBG 1878 1879 1880 /* 1881 * ========== Hardware interrupt handler ========== 1882 */ 1883 1884 #define DMFE_DBG DMFE_DBG_INT /* debug flag for this code */ 1885 1886 /* 1887 * dmfe_interrupt() -- handle chip interrupts 1888 */ 1889 static uint_t 1890 dmfe_interrupt(caddr_t arg) 1891 { 1892 dmfe_t *dmfep; /* private device info */ 1893 uint32_t interrupts; 1894 uint32_t istat; 1895 const char *msg; 1896 mblk_t *mp; 1897 boolean_t warning_msg = B_TRUE; 1898 1899 dmfep = (void *)arg; 1900 1901 /* 1902 * A quick check as to whether the interrupt was from this 1903 * device, before we even finish setting up all our local 1904 * variables. Note that reading the interrupt status register 1905 * doesn't have any unpleasant side effects such as clearing 1906 * the bits read, so it's quite OK to re-read it once we have 1907 * determined that we are going to service this interrupt and 1908 * grabbed the mutexen. 1909 */ 1910 istat = dmfe_chip_get32(dmfep, STATUS_REG); 1911 if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0) 1912 return (DDI_INTR_UNCLAIMED); 1913 1914 /* 1915 * Unfortunately, there can be a race condition between attach() 1916 * adding the interrupt handler and initialising the mutexen, 1917 * and the handler itself being called because of a pending 1918 * interrupt. So, we check <imask>; if it shows that interrupts 1919 * haven't yet been enabled (and therefore we shouldn't really 1920 * be here at all), we will just write back the value read from 1921 * the status register, thus acknowledging (and clearing) *all* 1922 * pending conditions without really servicing them, and claim 1923 * the interrupt. 1924 */ 1925 if (dmfep->imask == 0) { 1926 DMFE_DEBUG(("dmfe_interrupt: early interrupt 0x%x", istat)); 1927 dmfe_chip_put32(dmfep, STATUS_REG, istat); 1928 return (DDI_INTR_CLAIMED); 1929 } 1930 1931 /* 1932 * We're committed to servicing this interrupt, but we 1933 * need to get the lock before going any further ... 1934 */ 1935 mutex_enter(dmfep->oplock); 1936 DRV_KS_INC(dmfep, KS_INTERRUPT); 1937 1938 /* 1939 * Identify bits that represent enabled interrupts ... 1940 */ 1941 istat |= dmfe_chip_get32(dmfep, STATUS_REG); 1942 interrupts = istat & dmfep->imask; 1943 ASSERT(interrupts != 0); 1944 1945 DMFE_DEBUG(("dmfe_interrupt: istat 0x%x -> 0x%x", istat, interrupts)); 1946 1947 /* 1948 * Check for any interrupts other than TX/RX done. 1949 * If there are any, they are considered Abnormal 1950 * and will cause the chip to be reset. 1951 */ 1952 if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) { 1953 if (istat & ABNORMAL_SUMMARY_INT) { 1954 /* 1955 * Any Abnormal interrupts will lead to us 1956 * resetting the chip, so we don't bother 1957 * to clear each interrupt individually. 1958 * 1959 * Our main task here is to identify the problem, 1960 * by pointing out the most significant unexpected 1961 * bit. Additional bits may well be consequences 1962 * of the first problem, so we consider the possible 1963 * causes in order of severity. 1964 */ 1965 if (interrupts & SYSTEM_ERR_INT) { 1966 switch (istat & SYSTEM_ERR_BITS) { 1967 case SYSTEM_ERR_M_ABORT: 1968 msg = "Bus Master Abort"; 1969 break; 1970 1971 case SYSTEM_ERR_T_ABORT: 1972 msg = "Bus Target Abort"; 1973 break; 1974 1975 case SYSTEM_ERR_PARITY: 1976 msg = "Parity Error"; 1977 break; 1978 1979 default: 1980 msg = "Unknown System Bus Error"; 1981 break; 1982 } 1983 } else if (interrupts & RX_STOPPED_INT) { 1984 msg = "RX process stopped"; 1985 } else if (interrupts & RX_UNAVAIL_INT) { 1986 msg = "RX buffer unavailable"; 1987 warning_msg = B_FALSE; 1988 } else if (interrupts & RX_WATCHDOG_INT) { 1989 msg = "RX watchdog timeout?"; 1990 } else if (interrupts & RX_EARLY_INT) { 1991 msg = "RX early interrupt?"; 1992 } else if (interrupts & TX_STOPPED_INT) { 1993 msg = "TX process stopped"; 1994 } else if (interrupts & TX_JABBER_INT) { 1995 msg = "TX jabber timeout"; 1996 } else if (interrupts & TX_UNDERFLOW_INT) { 1997 msg = "TX underflow?"; 1998 } else if (interrupts & TX_EARLY_INT) { 1999 msg = "TX early interrupt?"; 2000 2001 } else if (interrupts & LINK_STATUS_INT) { 2002 msg = "Link status change?"; 2003 } else if (interrupts & GP_TIMER_INT) { 2004 msg = "Timer expired?"; 2005 } 2006 2007 if (warning_msg) 2008 dmfe_warning(dmfep, "abnormal interrupt, " 2009 "status 0x%x: %s", istat, msg); 2010 2011 /* 2012 * We don't want to run the entire reinitialisation 2013 * code out of this (high-level?) interrupt, so we 2014 * simply STOP the chip, and wake up the factotum 2015 * to reinitalise it ... 2016 */ 2017 dmfe_stop_chip(dmfep, CHIP_ERROR); 2018 dmfe_wake_factotum(dmfep, KS_CHIP_ERROR, 2019 "interrupt (error)"); 2020 } else { 2021 /* 2022 * We shouldn't really get here (it would mean 2023 * there were some unprocessed enabled bits but 2024 * they weren't Abnormal?), but we'll check just 2025 * in case ... 2026 */ 2027 DMFE_DEBUG(("unexpected interrupt bits: 0x%x", istat)); 2028 } 2029 } 2030 2031 /* 2032 * Acknowledge all the original bits - except in the case of an 2033 * error, when we leave them unacknowledged so that the recovery 2034 * code can see what was going on when the problem occurred ... 2035 */ 2036 if (dmfep->chip_state != CHIP_ERROR) { 2037 (void) dmfe_chip_put32(dmfep, STATUS_REG, istat); 2038 /* 2039 * Read-after-write forces completion on PCI bus. 2040 * 2041 */ 2042 (void) dmfe_chip_get32(dmfep, STATUS_REG); 2043 } 2044 2045 2046 /* 2047 * We've finished talking to the chip, so we can drop <oplock> 2048 * before handling the normal interrupts, which only involve 2049 * manipulation of descriptors ... 2050 */ 2051 mutex_exit(dmfep->oplock); 2052 2053 if (interrupts & RX_PKTDONE_INT) 2054 if ((mp = dmfe_getp(dmfep)) != NULL) 2055 mac_rx(dmfep->mh, NULL, mp); 2056 2057 if (interrupts & TX_PKTDONE_INT) { 2058 /* 2059 * The only reason for taking this interrupt is to give 2060 * MAC a chance to schedule queued packets after a 2061 * ring-full condition. To minimise the number of 2062 * redundant TX-Done interrupts, we only mark two of the 2063 * ring descriptors as 'interrupt-on-complete' - all the 2064 * others are simply handed back without an interrupt. 2065 */ 2066 if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) { 2067 (void) dmfe_reclaim_tx_desc(dmfep); 2068 mutex_exit(dmfep->txlock); 2069 } 2070 mac_tx_update(dmfep->mh); 2071 } 2072 2073 return (DDI_INTR_CLAIMED); 2074 } 2075 2076 #undef DMFE_DBG 2077 2078 2079 /* 2080 * ========== Statistics update handler ========== 2081 */ 2082 2083 #define DMFE_DBG DMFE_DBG_STATS /* debug flag for this code */ 2084 2085 static int 2086 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val) 2087 { 2088 dmfe_t *dmfep = arg; 2089 int rv = 0; 2090 2091 mutex_enter(dmfep->milock); 2092 mutex_enter(dmfep->oplock); 2093 mutex_enter(dmfep->rxlock); 2094 mutex_enter(dmfep->txlock); 2095 2096 /* make sure we have all the stats collected */ 2097 (void) dmfe_reclaim_tx_desc(dmfep); 2098 2099 switch (stat) { 2100 case MAC_STAT_IFSPEED: 2101 *val = dmfep->op_stats_speed; 2102 break; 2103 2104 case MAC_STAT_IPACKETS: 2105 *val = dmfep->rx_stats_ipackets; 2106 break; 2107 2108 case MAC_STAT_MULTIRCV: 2109 *val = dmfep->rx_stats_multi; 2110 break; 2111 2112 case MAC_STAT_BRDCSTRCV: 2113 *val = dmfep->rx_stats_bcast; 2114 break; 2115 2116 case MAC_STAT_RBYTES: 2117 *val = dmfep->rx_stats_rbytes; 2118 break; 2119 2120 case MAC_STAT_IERRORS: 2121 *val = dmfep->rx_stats_ierrors; 2122 break; 2123 2124 case MAC_STAT_NORCVBUF: 2125 *val = dmfep->rx_stats_norcvbuf; 2126 break; 2127 2128 case MAC_STAT_COLLISIONS: 2129 *val = dmfep->tx_stats_collisions; 2130 break; 2131 2132 case MAC_STAT_OERRORS: 2133 *val = dmfep->tx_stats_oerrors; 2134 break; 2135 2136 case MAC_STAT_OPACKETS: 2137 *val = dmfep->tx_stats_opackets; 2138 break; 2139 2140 case MAC_STAT_MULTIXMT: 2141 *val = dmfep->tx_stats_multi; 2142 break; 2143 2144 case MAC_STAT_BRDCSTXMT: 2145 *val = dmfep->tx_stats_bcast; 2146 break; 2147 2148 case MAC_STAT_OBYTES: 2149 *val = dmfep->tx_stats_obytes; 2150 break; 2151 2152 case MAC_STAT_OVERFLOWS: 2153 *val = dmfep->rx_stats_overflow; 2154 break; 2155 2156 case MAC_STAT_UNDERFLOWS: 2157 *val = dmfep->tx_stats_underflow; 2158 break; 2159 2160 case ETHER_STAT_ALIGN_ERRORS: 2161 *val = dmfep->rx_stats_align; 2162 break; 2163 2164 case ETHER_STAT_FCS_ERRORS: 2165 *val = dmfep->rx_stats_fcs; 2166 break; 2167 2168 case ETHER_STAT_TOOLONG_ERRORS: 2169 *val = dmfep->rx_stats_toolong; 2170 break; 2171 2172 case ETHER_STAT_TOOSHORT_ERRORS: 2173 *val = dmfep->rx_stats_short; 2174 break; 2175 2176 case ETHER_STAT_MACRCV_ERRORS: 2177 *val = dmfep->rx_stats_macrcv_errors; 2178 break; 2179 2180 case ETHER_STAT_MACXMT_ERRORS: 2181 *val = dmfep->tx_stats_macxmt_errors; 2182 break; 2183 2184 case ETHER_STAT_JABBER_ERRORS: 2185 *val = dmfep->tx_stats_jabber; 2186 break; 2187 2188 case ETHER_STAT_CARRIER_ERRORS: 2189 *val = dmfep->tx_stats_nocarrier; 2190 break; 2191 2192 case ETHER_STAT_TX_LATE_COLLISIONS: 2193 *val = dmfep->tx_stats_xmtlatecoll; 2194 break; 2195 2196 case ETHER_STAT_EX_COLLISIONS: 2197 *val = dmfep->tx_stats_excoll; 2198 break; 2199 2200 case ETHER_STAT_DEFER_XMTS: 2201 *val = dmfep->tx_stats_defer; 2202 break; 2203 2204 case ETHER_STAT_FIRST_COLLISIONS: 2205 *val = dmfep->tx_stats_first_coll; 2206 break; 2207 2208 case ETHER_STAT_MULTI_COLLISIONS: 2209 *val = dmfep->tx_stats_multi_coll; 2210 break; 2211 2212 case ETHER_STAT_XCVR_INUSE: 2213 *val = dmfep->phy_inuse; 2214 break; 2215 2216 case ETHER_STAT_XCVR_ID: 2217 *val = dmfep->phy_id; 2218 break; 2219 2220 case ETHER_STAT_XCVR_ADDR: 2221 *val = dmfep->phy_addr; 2222 break; 2223 2224 case ETHER_STAT_LINK_DUPLEX: 2225 *val = dmfep->op_stats_duplex; 2226 break; 2227 2228 case ETHER_STAT_CAP_100T4: 2229 *val = dmfep->param_bmsr_100T4; 2230 break; 2231 2232 case ETHER_STAT_CAP_100FDX: 2233 *val = dmfep->param_bmsr_100fdx; 2234 break; 2235 2236 case ETHER_STAT_CAP_100HDX: 2237 *val = dmfep->param_bmsr_100hdx; 2238 break; 2239 2240 case ETHER_STAT_CAP_10FDX: 2241 *val = dmfep->param_bmsr_10fdx; 2242 break; 2243 2244 case ETHER_STAT_CAP_10HDX: 2245 *val = dmfep->param_bmsr_10hdx; 2246 break; 2247 2248 case ETHER_STAT_CAP_AUTONEG: 2249 *val = dmfep->param_bmsr_autoneg; 2250 break; 2251 2252 case ETHER_STAT_CAP_REMFAULT: 2253 *val = dmfep->param_bmsr_remfault; 2254 break; 2255 2256 case ETHER_STAT_ADV_CAP_AUTONEG: 2257 *val = dmfep->param_autoneg; 2258 break; 2259 2260 case ETHER_STAT_ADV_CAP_100T4: 2261 *val = dmfep->param_anar_100T4; 2262 break; 2263 2264 case ETHER_STAT_ADV_CAP_100FDX: 2265 *val = dmfep->param_anar_100fdx; 2266 break; 2267 2268 case ETHER_STAT_ADV_CAP_100HDX: 2269 *val = dmfep->param_anar_100hdx; 2270 break; 2271 2272 case ETHER_STAT_ADV_CAP_10FDX: 2273 *val = dmfep->param_anar_10fdx; 2274 break; 2275 2276 case ETHER_STAT_ADV_CAP_10HDX: 2277 *val = dmfep->param_anar_10hdx; 2278 break; 2279 2280 case ETHER_STAT_ADV_REMFAULT: 2281 *val = dmfep->param_anar_remfault; 2282 break; 2283 2284 case ETHER_STAT_LP_CAP_AUTONEG: 2285 *val = dmfep->param_lp_autoneg; 2286 break; 2287 2288 case ETHER_STAT_LP_CAP_100T4: 2289 *val = dmfep->param_lp_100T4; 2290 break; 2291 2292 case ETHER_STAT_LP_CAP_100FDX: 2293 *val = dmfep->param_lp_100fdx; 2294 break; 2295 2296 case ETHER_STAT_LP_CAP_100HDX: 2297 *val = dmfep->param_lp_100hdx; 2298 break; 2299 2300 case ETHER_STAT_LP_CAP_10FDX: 2301 *val = dmfep->param_lp_10fdx; 2302 break; 2303 2304 case ETHER_STAT_LP_CAP_10HDX: 2305 *val = dmfep->param_lp_10hdx; 2306 break; 2307 2308 case ETHER_STAT_LP_REMFAULT: 2309 *val = dmfep->param_lp_remfault; 2310 break; 2311 2312 default: 2313 rv = ENOTSUP; 2314 } 2315 2316 mutex_exit(dmfep->txlock); 2317 mutex_exit(dmfep->rxlock); 2318 mutex_exit(dmfep->oplock); 2319 mutex_exit(dmfep->milock); 2320 2321 return (rv); 2322 } 2323 2324 #undef DMFE_DBG 2325 2326 2327 /* 2328 * ========== Ioctl handler & subfunctions ========== 2329 */ 2330 2331 #define DMFE_DBG DMFE_DBG_IOCTL /* debug flag for this code */ 2332 2333 /* 2334 * Loopback operation 2335 * 2336 * Support access to the internal loopback and external loopback 2337 * functions selected via the Operation Mode Register (OPR). 2338 * These will be used by netlbtest (see BugId 4370609) 2339 * 2340 * Note that changing the loopback mode causes a stop/restart cycle 2341 * 2342 * It would be nice to evolve this to support the ioctls in sys/netlb.h, 2343 * but then it would be even better to use Brussels to configure this. 2344 */ 2345 static enum ioc_reply 2346 dmfe_loop_ioctl(dmfe_t *dmfep, queue_t *wq, mblk_t *mp, int cmd) 2347 { 2348 loopback_t *loop_req_p; 2349 uint32_t loopmode; 2350 2351 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) 2352 return (IOC_INVAL); 2353 2354 loop_req_p = (void *)mp->b_cont->b_rptr; 2355 2356 switch (cmd) { 2357 default: 2358 /* 2359 * This should never happen ... 2360 */ 2361 dmfe_error(dmfep, "dmfe_loop_ioctl: invalid cmd 0x%x", cmd); 2362 return (IOC_INVAL); 2363 2364 case DMFE_GET_LOOP_MODE: 2365 /* 2366 * This doesn't return the current loopback mode - it 2367 * returns a bitmask :-( of all possible loopback modes 2368 */ 2369 DMFE_DEBUG(("dmfe_loop_ioctl: GET_LOOP_MODE")); 2370 loop_req_p->loopback = DMFE_LOOPBACK_MODES; 2371 miocack(wq, mp, sizeof (loopback_t), 0); 2372 return (IOC_DONE); 2373 2374 case DMFE_SET_LOOP_MODE: 2375 /* 2376 * Select any of the various loopback modes 2377 */ 2378 DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d", 2379 loop_req_p->loopback)); 2380 switch (loop_req_p->loopback) { 2381 default: 2382 return (IOC_INVAL); 2383 2384 case DMFE_LOOPBACK_OFF: 2385 loopmode = LOOPBACK_OFF; 2386 break; 2387 2388 case DMFE_PHY_A_LOOPBACK_ON: 2389 loopmode = LOOPBACK_PHY_A; 2390 break; 2391 2392 case DMFE_PHY_D_LOOPBACK_ON: 2393 loopmode = LOOPBACK_PHY_D; 2394 break; 2395 2396 case DMFE_INT_LOOPBACK_ON: 2397 loopmode = LOOPBACK_INTERNAL; 2398 break; 2399 } 2400 2401 if ((dmfep->opmode & LOOPBACK_MODE_MASK) != loopmode) { 2402 dmfep->opmode &= ~LOOPBACK_MODE_MASK; 2403 dmfep->opmode |= loopmode; 2404 return (IOC_RESTART_ACK); 2405 } 2406 2407 return (IOC_ACK); 2408 } 2409 } 2410 2411 /* 2412 * Specific dmfe IOCTLs, the mac module handles the generic ones. 2413 */ 2414 static void 2415 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2416 { 2417 dmfe_t *dmfep = arg; 2418 struct iocblk *iocp; 2419 enum ioc_reply status; 2420 int cmd; 2421 2422 /* 2423 * Validate the command before bothering with the mutexen ... 2424 */ 2425 iocp = (void *)mp->b_rptr; 2426 cmd = iocp->ioc_cmd; 2427 switch (cmd) { 2428 default: 2429 DMFE_DEBUG(("dmfe_m_ioctl: unknown cmd 0x%x", cmd)); 2430 miocnak(wq, mp, 0, EINVAL); 2431 return; 2432 2433 case DMFE_SET_LOOP_MODE: 2434 case DMFE_GET_LOOP_MODE: 2435 case ND_GET: 2436 case ND_SET: 2437 break; 2438 } 2439 2440 mutex_enter(dmfep->milock); 2441 mutex_enter(dmfep->oplock); 2442 2443 switch (cmd) { 2444 default: 2445 _NOTE(NOTREACHED) 2446 status = IOC_INVAL; 2447 break; 2448 2449 case DMFE_SET_LOOP_MODE: 2450 case DMFE_GET_LOOP_MODE: 2451 status = dmfe_loop_ioctl(dmfep, wq, mp, cmd); 2452 break; 2453 2454 case ND_GET: 2455 case ND_SET: 2456 status = dmfe_nd_ioctl(dmfep, wq, mp, cmd); 2457 break; 2458 } 2459 2460 /* 2461 * Do we need to restart? 2462 */ 2463 switch (status) { 2464 default: 2465 break; 2466 2467 case IOC_RESTART_ACK: 2468 case IOC_RESTART: 2469 /* 2470 * PHY parameters changed; we need to stop, update the 2471 * PHY layer and restart before sending the reply or ACK 2472 */ 2473 dmfe_stop(dmfep); 2474 dmfe_update_phy(dmfep); 2475 dmfep->update_phy = B_FALSE; 2476 2477 /* 2478 * The link will now most likely go DOWN and UP, because 2479 * we've changed the loopback state or the link parameters 2480 * or autonegotiation. So we have to check that it's 2481 * settled down before we restart the TX/RX processes. 2482 * The ioctl code will have planted some reason strings 2483 * to explain what's happening, so the link state change 2484 * messages won't be printed on the console . We wake the 2485 * factotum to deal with link notifications, if any ... 2486 */ 2487 if (dmfe_check_link(dmfep)) { 2488 dmfe_recheck_link(dmfep, B_TRUE); 2489 dmfe_wake_factotum(dmfep, KS_LINK_CHECK, "ioctl"); 2490 } 2491 2492 if (dmfep->mac_state == DMFE_MAC_STARTED) 2493 dmfe_start(dmfep); 2494 break; 2495 } 2496 2497 /* 2498 * The 'reasons-for-link-change', if any, don't apply any more 2499 */ 2500 mutex_exit(dmfep->oplock); 2501 mutex_exit(dmfep->milock); 2502 2503 /* 2504 * Finally, decide how to reply 2505 */ 2506 switch (status) { 2507 default: 2508 /* 2509 * Error, reply with a NAK and EINVAL 2510 */ 2511 miocnak(wq, mp, 0, EINVAL); 2512 break; 2513 2514 case IOC_RESTART_ACK: 2515 case IOC_ACK: 2516 /* 2517 * OK, reply with an ACK 2518 */ 2519 miocack(wq, mp, 0, 0); 2520 break; 2521 2522 case IOC_RESTART: 2523 case IOC_REPLY: 2524 /* 2525 * OK, send prepared reply 2526 */ 2527 qreply(wq, mp); 2528 break; 2529 2530 case IOC_DONE: 2531 /* 2532 * OK, reply already sent 2533 */ 2534 break; 2535 } 2536 } 2537 2538 #undef DMFE_DBG 2539 2540 2541 /* 2542 * ========== Per-instance setup/teardown code ========== 2543 */ 2544 2545 #define DMFE_DBG DMFE_DBG_INIT /* debug flag for this code */ 2546 2547 /* 2548 * Determine local MAC address & broadcast address for this interface 2549 */ 2550 static void 2551 dmfe_find_mac_address(dmfe_t *dmfep) 2552 { 2553 uchar_t *prop; 2554 uint_t propsize; 2555 int err; 2556 2557 /* 2558 * We have to find the "vendor's factory-set address". This is 2559 * the value of the property "local-mac-address", as set by OBP 2560 * (or a .conf file!) 2561 * 2562 * If the property is not there, then we try to find the factory 2563 * mac address from the devices serial EEPROM. 2564 */ 2565 bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr)); 2566 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo, 2567 DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize); 2568 if (err == DDI_PROP_SUCCESS) { 2569 if (propsize == ETHERADDRL) 2570 ethaddr_copy(prop, dmfep->curr_addr); 2571 ddi_prop_free(prop); 2572 } else { 2573 /* no property set... check eeprom */ 2574 dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr, 2575 ETHERADDRL); 2576 } 2577 2578 DMFE_DEBUG(("dmfe_setup_mac_address: factory %s", 2579 ether_sprintf((void *)dmfep->curr_addr))); 2580 } 2581 2582 static int 2583 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize, 2584 size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p, 2585 uint_t dma_flags, dma_area_t *dma_p) 2586 { 2587 ddi_dma_cookie_t dma_cookie; 2588 uint_t ncookies; 2589 int err; 2590 2591 /* 2592 * Allocate handle 2593 */ 2594 err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr, 2595 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 2596 if (err != DDI_SUCCESS) 2597 return (DDI_FAILURE); 2598 2599 /* 2600 * Allocate memory 2601 */ 2602 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop, 2603 attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 2604 DDI_DMA_SLEEP, NULL, 2605 &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl); 2606 if (err != DDI_SUCCESS) 2607 return (DDI_FAILURE); 2608 2609 /* 2610 * Bind the two together 2611 */ 2612 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2613 dma_p->mem_va, dma_p->alength, dma_flags, 2614 DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies); 2615 if (err != DDI_DMA_MAPPED) 2616 return (DDI_FAILURE); 2617 if ((dma_p->ncookies = ncookies) != 1) 2618 return (DDI_FAILURE); 2619 2620 dma_p->mem_dvma = dma_cookie.dmac_address; 2621 if (setup > 0) { 2622 dma_p->setup_dvma = dma_p->mem_dvma + memsize; 2623 dma_p->setup_va = dma_p->mem_va + memsize; 2624 } else { 2625 dma_p->setup_dvma = 0; 2626 dma_p->setup_va = NULL; 2627 } 2628 2629 return (DDI_SUCCESS); 2630 } 2631 2632 /* 2633 * This function allocates the transmit and receive buffers and descriptors. 2634 */ 2635 static int 2636 dmfe_alloc_bufs(dmfe_t *dmfep) 2637 { 2638 size_t memsize; 2639 int err; 2640 2641 /* 2642 * Allocate memory & handles for TX descriptor ring 2643 */ 2644 memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type); 2645 err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP, 2646 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2647 &dmfep->tx_desc); 2648 if (err != DDI_SUCCESS) 2649 return (DDI_FAILURE); 2650 2651 /* 2652 * Allocate memory & handles for TX buffers 2653 */ 2654 memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE; 2655 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 2656 &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE, 2657 &dmfep->tx_buff); 2658 if (err != DDI_SUCCESS) 2659 return (DDI_FAILURE); 2660 2661 /* 2662 * Allocate memory & handles for RX descriptor ring 2663 */ 2664 memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type); 2665 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP, 2666 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2667 &dmfep->rx_desc); 2668 if (err != DDI_SUCCESS) 2669 return (DDI_FAILURE); 2670 2671 /* 2672 * Allocate memory & handles for RX buffers 2673 */ 2674 memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE; 2675 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 2676 &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff); 2677 if (err != DDI_SUCCESS) 2678 return (DDI_FAILURE); 2679 2680 /* 2681 * Allocate bitmasks for tx packet type tracking 2682 */ 2683 dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 2684 dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 2685 2686 return (DDI_SUCCESS); 2687 } 2688 2689 static void 2690 dmfe_free_dma_mem(dma_area_t *dma_p) 2691 { 2692 if (dma_p->dma_hdl != NULL) { 2693 if (dma_p->ncookies) { 2694 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2695 dma_p->ncookies = 0; 2696 } 2697 ddi_dma_free_handle(&dma_p->dma_hdl); 2698 dma_p->dma_hdl = NULL; 2699 dma_p->mem_dvma = 0; 2700 dma_p->setup_dvma = 0; 2701 } 2702 2703 if (dma_p->acc_hdl != NULL) { 2704 ddi_dma_mem_free(&dma_p->acc_hdl); 2705 dma_p->acc_hdl = NULL; 2706 dma_p->mem_va = NULL; 2707 dma_p->setup_va = NULL; 2708 } 2709 } 2710 2711 /* 2712 * This routine frees the transmit and receive buffers and descriptors. 2713 * Make sure the chip is stopped before calling it! 2714 */ 2715 static void 2716 dmfe_free_bufs(dmfe_t *dmfep) 2717 { 2718 dmfe_free_dma_mem(&dmfep->rx_buff); 2719 dmfe_free_dma_mem(&dmfep->rx_desc); 2720 dmfe_free_dma_mem(&dmfep->tx_buff); 2721 dmfe_free_dma_mem(&dmfep->tx_desc); 2722 kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY); 2723 kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY); 2724 } 2725 2726 static void 2727 dmfe_unattach(dmfe_t *dmfep) 2728 { 2729 /* 2730 * Clean up and free all DMFE data structures 2731 */ 2732 if (dmfep->cycid != NULL) { 2733 ddi_periodic_delete(dmfep->cycid); 2734 dmfep->cycid = NULL; 2735 } 2736 2737 if (dmfep->ksp_drv != NULL) 2738 kstat_delete(dmfep->ksp_drv); 2739 if (dmfep->progress & PROGRESS_HWINT) { 2740 ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk); 2741 mutex_destroy(dmfep->txlock); 2742 mutex_destroy(dmfep->rxlock); 2743 mutex_destroy(dmfep->oplock); 2744 } 2745 if (dmfep->progress & PROGRESS_SOFTINT) 2746 ddi_remove_softintr(dmfep->factotum_id); 2747 if (dmfep->progress & PROGRESS_BUFS) 2748 dmfe_free_bufs(dmfep); 2749 if (dmfep->progress & PROGRESS_REGS) 2750 ddi_regs_map_free(&dmfep->io_handle); 2751 if (dmfep->progress & PROGRESS_NDD) 2752 dmfe_nd_cleanup(dmfep); 2753 2754 kmem_free(dmfep, sizeof (*dmfep)); 2755 } 2756 2757 static int 2758 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp) 2759 { 2760 ddi_acc_handle_t handle; 2761 uint32_t regval; 2762 2763 if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS) 2764 return (DDI_FAILURE); 2765 2766 /* 2767 * Get vendor/device/revision. We expect (but don't check) that 2768 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102) 2769 */ 2770 idp->vendor = pci_config_get16(handle, PCI_CONF_VENID); 2771 idp->device = pci_config_get16(handle, PCI_CONF_DEVID); 2772 idp->revision = pci_config_get8(handle, PCI_CONF_REVID); 2773 2774 /* 2775 * Turn on Bus Master Enable bit and ensure the device is not asleep 2776 */ 2777 regval = pci_config_get32(handle, PCI_CONF_COMM); 2778 pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME)); 2779 2780 regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD); 2781 pci_config_put32(handle, PCI_DMFE_CONF_CFDD, 2782 regval & ~(CFDD_SLEEP | CFDD_SNOOZE)); 2783 2784 pci_config_teardown(&handle); 2785 return (DDI_SUCCESS); 2786 } 2787 2788 struct ks_index { 2789 int index; 2790 char *name; 2791 }; 2792 2793 static const struct ks_index ks_drv_names[] = { 2794 { KS_INTERRUPT, "intr" }, 2795 { KS_CYCLIC_RUN, "cyclic_run" }, 2796 2797 { KS_TICK_LINK_STATE, "link_state_change" }, 2798 { KS_TICK_LINK_POLL, "link_state_poll" }, 2799 { KS_TX_STALL, "tx_stall_detect" }, 2800 { KS_CHIP_ERROR, "chip_error_interrupt" }, 2801 2802 { KS_FACTOTUM_RUN, "factotum_run" }, 2803 { KS_RECOVERY, "factotum_recover" }, 2804 { KS_LINK_CHECK, "factotum_link_check" }, 2805 2806 { KS_LINK_UP_CNT, "link_up_cnt" }, 2807 { KS_LINK_DROP_CNT, "link_drop_cnt" }, 2808 2809 { KS_MIIREG_BMSR, "mii_status" }, 2810 { KS_MIIREG_ANAR, "mii_advert_cap" }, 2811 { KS_MIIREG_ANLPAR, "mii_partner_cap" }, 2812 { KS_MIIREG_ANER, "mii_expansion_cap" }, 2813 { KS_MIIREG_DSCSR, "mii_dscsr" }, 2814 2815 { -1, NULL } 2816 }; 2817 2818 static void 2819 dmfe_init_kstats(dmfe_t *dmfep, int instance) 2820 { 2821 kstat_t *ksp; 2822 kstat_named_t *knp; 2823 const struct ks_index *ksip; 2824 2825 /* no need to create MII stats, the mac module already does it */ 2826 2827 /* Create and initialise driver-defined kstats */ 2828 ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net", 2829 KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT); 2830 if (ksp != NULL) { 2831 for (knp = ksp->ks_data, ksip = ks_drv_names; 2832 ksip->name != NULL; ++ksip) { 2833 kstat_named_init(&knp[ksip->index], ksip->name, 2834 KSTAT_DATA_UINT64); 2835 } 2836 dmfep->ksp_drv = ksp; 2837 dmfep->knp_drv = knp; 2838 kstat_install(ksp); 2839 } else { 2840 dmfe_error(dmfep, "kstat_create() for dmfe_events failed"); 2841 } 2842 } 2843 2844 static int 2845 dmfe_resume(dev_info_t *devinfo) 2846 { 2847 dmfe_t *dmfep; /* Our private data */ 2848 chip_id_t chipid; 2849 2850 dmfep = ddi_get_driver_private(devinfo); 2851 if (dmfep == NULL) 2852 return (DDI_FAILURE); 2853 2854 /* 2855 * Refuse to resume if the data structures aren't consistent 2856 */ 2857 if (dmfep->devinfo != devinfo) 2858 return (DDI_FAILURE); 2859 2860 /* 2861 * Refuse to resume if the chip's changed its identity (*boggle*) 2862 */ 2863 if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS) 2864 return (DDI_FAILURE); 2865 if (chipid.vendor != dmfep->chipid.vendor) 2866 return (DDI_FAILURE); 2867 if (chipid.device != dmfep->chipid.device) 2868 return (DDI_FAILURE); 2869 if (chipid.revision != dmfep->chipid.revision) 2870 return (DDI_FAILURE); 2871 2872 /* 2873 * All OK, reinitialise h/w & kick off MAC scheduling 2874 */ 2875 mutex_enter(dmfep->oplock); 2876 dmfe_restart(dmfep); 2877 mutex_exit(dmfep->oplock); 2878 mac_tx_update(dmfep->mh); 2879 return (DDI_SUCCESS); 2880 } 2881 2882 /* 2883 * attach(9E) -- Attach a device to the system 2884 * 2885 * Called once for each board successfully probed. 2886 */ 2887 static int 2888 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2889 { 2890 mac_register_t *macp; 2891 dmfe_t *dmfep; /* Our private data */ 2892 uint32_t csr6; 2893 int instance; 2894 int err; 2895 2896 instance = ddi_get_instance(devinfo); 2897 2898 switch (cmd) { 2899 default: 2900 return (DDI_FAILURE); 2901 2902 case DDI_RESUME: 2903 return (dmfe_resume(devinfo)); 2904 2905 case DDI_ATTACH: 2906 break; 2907 } 2908 2909 dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP); 2910 ddi_set_driver_private(devinfo, dmfep); 2911 dmfep->devinfo = devinfo; 2912 dmfep->dmfe_guard = DMFE_GUARD; 2913 2914 /* 2915 * Initialize more fields in DMFE private data 2916 * Determine the local MAC address 2917 */ 2918 #if DMFEDEBUG 2919 dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0, 2920 debug_propname, dmfe_debug); 2921 #endif /* DMFEDEBUG */ 2922 dmfep->cycid = NULL; 2923 (void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d", 2924 instance); 2925 2926 /* 2927 * Check for custom "opmode-reg-value" property; 2928 * if none, use the defaults below for CSR6 ... 2929 */ 2930 csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1; 2931 dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 2932 DDI_PROP_DONTPASS, opmode_propname, csr6); 2933 2934 /* 2935 * Read chip ID & set up config space command register(s) 2936 */ 2937 if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) { 2938 dmfe_error(dmfep, "dmfe_config_init() failed"); 2939 goto attach_fail; 2940 } 2941 dmfep->progress |= PROGRESS_CONFIG; 2942 2943 /* 2944 * Register NDD-tweakable parameters 2945 */ 2946 if (dmfe_nd_init(dmfep)) { 2947 dmfe_error(dmfep, "dmfe_nd_init() failed"); 2948 goto attach_fail; 2949 } 2950 dmfep->progress |= PROGRESS_NDD; 2951 2952 /* 2953 * Map operating registers 2954 */ 2955 err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER, 2956 &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle); 2957 if (err != DDI_SUCCESS) { 2958 dmfe_error(dmfep, "ddi_regs_map_setup() failed"); 2959 goto attach_fail; 2960 } 2961 dmfep->progress |= PROGRESS_REGS; 2962 2963 /* 2964 * Get our MAC address. 2965 */ 2966 dmfe_find_mac_address(dmfep); 2967 2968 /* 2969 * Allocate the TX and RX descriptors/buffers. 2970 */ 2971 dmfep->tx.n_desc = dmfe_tx_desc; 2972 dmfep->rx.n_desc = dmfe_rx_desc; 2973 err = dmfe_alloc_bufs(dmfep); 2974 if (err != DDI_SUCCESS) { 2975 dmfe_error(dmfep, "DMA buffer allocation failed"); 2976 goto attach_fail; 2977 } 2978 dmfep->progress |= PROGRESS_BUFS; 2979 2980 /* 2981 * Add the softint handler 2982 */ 2983 dmfep->link_poll_tix = factotum_start_tix; 2984 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id, 2985 NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) { 2986 dmfe_error(dmfep, "ddi_add_softintr() failed"); 2987 goto attach_fail; 2988 } 2989 dmfep->progress |= PROGRESS_SOFTINT; 2990 2991 /* 2992 * Add the h/w interrupt handler & initialise mutexen 2993 */ 2994 if (ddi_add_intr(devinfo, 0, &dmfep->iblk, NULL, 2995 dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) { 2996 dmfe_error(dmfep, "ddi_add_intr() failed"); 2997 goto attach_fail; 2998 } 2999 mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL); 3000 mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk); 3001 mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk); 3002 mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk); 3003 dmfep->progress |= PROGRESS_HWINT; 3004 3005 /* 3006 * Create & initialise named kstats 3007 */ 3008 dmfe_init_kstats(dmfep, instance); 3009 3010 /* 3011 * Reset & initialise the chip and the ring buffers 3012 * Initialise the (internal) PHY 3013 */ 3014 mutex_enter(dmfep->oplock); 3015 mutex_enter(dmfep->rxlock); 3016 mutex_enter(dmfep->txlock); 3017 3018 dmfe_reset(dmfep); 3019 3020 /* 3021 * Prepare the setup packet 3022 */ 3023 bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE); 3024 bzero(dmfep->mcast_refs, MCASTBUF_SIZE); 3025 dmfep->addr_set = B_FALSE; 3026 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 3027 dmfep->mac_state = DMFE_MAC_RESET; 3028 3029 mutex_exit(dmfep->txlock); 3030 mutex_exit(dmfep->rxlock); 3031 mutex_exit(dmfep->oplock); 3032 3033 dmfep->link_state = LINK_STATE_UNKNOWN; 3034 if (dmfe_init_phy(dmfep) != B_TRUE) 3035 goto attach_fail; 3036 dmfep->update_phy = B_TRUE; 3037 3038 /* 3039 * Send a reasonable setup frame. This configures our starting 3040 * address and the broadcast address. 3041 */ 3042 (void) dmfe_m_unicst(dmfep, dmfep->curr_addr); 3043 3044 /* 3045 * Initialize pointers to device specific functions which 3046 * will be used by the generic layer. 3047 */ 3048 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3049 goto attach_fail; 3050 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3051 macp->m_driver = dmfep; 3052 macp->m_dip = devinfo; 3053 macp->m_src_addr = dmfep->curr_addr; 3054 macp->m_callbacks = &dmfe_m_callbacks; 3055 macp->m_min_sdu = 0; 3056 macp->m_max_sdu = ETHERMTU; 3057 macp->m_margin = VLAN_TAGSZ; 3058 3059 /* 3060 * Finally, we're ready to register ourselves with the MAC layer 3061 * interface; if this succeeds, we're all ready to start() 3062 */ 3063 err = mac_register(macp, &dmfep->mh); 3064 mac_free(macp); 3065 if (err != 0) 3066 goto attach_fail; 3067 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 3068 3069 /* 3070 * Install the cyclic callback that we use to check for link 3071 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic()) 3072 * is invoked in kernel context then. 3073 */ 3074 ASSERT(dmfep->cycid == NULL); 3075 dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep, 3076 dmfe_tick_us * 1000, DDI_IPL_0); 3077 return (DDI_SUCCESS); 3078 3079 attach_fail: 3080 dmfe_unattach(dmfep); 3081 return (DDI_FAILURE); 3082 } 3083 3084 /* 3085 * dmfe_suspend() -- suspend transmit/receive for powerdown 3086 */ 3087 static int 3088 dmfe_suspend(dmfe_t *dmfep) 3089 { 3090 /* 3091 * Just stop processing ... 3092 */ 3093 mutex_enter(dmfep->oplock); 3094 dmfe_stop(dmfep); 3095 mutex_exit(dmfep->oplock); 3096 3097 return (DDI_SUCCESS); 3098 } 3099 3100 /* 3101 * detach(9E) -- Detach a device from the system 3102 */ 3103 static int 3104 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3105 { 3106 dmfe_t *dmfep; 3107 3108 dmfep = ddi_get_driver_private(devinfo); 3109 3110 switch (cmd) { 3111 default: 3112 return (DDI_FAILURE); 3113 3114 case DDI_SUSPEND: 3115 return (dmfe_suspend(dmfep)); 3116 3117 case DDI_DETACH: 3118 break; 3119 } 3120 3121 /* 3122 * Unregister from the MAC subsystem. This can fail, in 3123 * particular if there are DLPI style-2 streams still open - 3124 * in which case we just return failure without shutting 3125 * down chip operations. 3126 */ 3127 if (mac_unregister(dmfep->mh) != DDI_SUCCESS) 3128 return (DDI_FAILURE); 3129 3130 /* 3131 * All activity stopped, so we can clean up & exit 3132 */ 3133 dmfe_unattach(dmfep); 3134 return (DDI_SUCCESS); 3135 } 3136 3137 3138 /* 3139 * ========== Module Loading Data & Entry Points ========== 3140 */ 3141 3142 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach, 3143 nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported); 3144 3145 static struct modldrv dmfe_modldrv = { 3146 &mod_driverops, /* Type of module. This one is a driver */ 3147 dmfe_ident, /* short description */ 3148 &dmfe_dev_ops /* driver specific ops */ 3149 }; 3150 3151 static struct modlinkage modlinkage = { 3152 MODREV_1, (void *)&dmfe_modldrv, NULL 3153 }; 3154 3155 int 3156 _info(struct modinfo *modinfop) 3157 { 3158 return (mod_info(&modlinkage, modinfop)); 3159 } 3160 3161 int 3162 _init(void) 3163 { 3164 uint32_t tmp100; 3165 uint32_t tmp10; 3166 int i; 3167 int status; 3168 3169 /* Calculate global timing parameters */ 3170 tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 3171 tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 3172 3173 for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) { 3174 switch (i) { 3175 case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA): 3176 case TX_PROCESS_STATE(TX_PROCESS_WAIT_END): 3177 /* 3178 * The chip doesn't spontaneously recover from 3179 * a stall in these states, so we reset early 3180 */ 3181 stall_100_tix[i] = tmp100; 3182 stall_10_tix[i] = tmp10; 3183 break; 3184 3185 case TX_PROCESS_STATE(TX_PROCESS_SUSPEND): 3186 default: 3187 /* 3188 * The chip has been seen to spontaneously recover 3189 * after an apparent stall in the SUSPEND state, 3190 * so we'll allow it rather longer to do so. As 3191 * stalls in other states have not been observed, 3192 * we'll use long timeouts for them too ... 3193 */ 3194 stall_100_tix[i] = tmp100 * 20; 3195 stall_10_tix[i] = tmp10 * 20; 3196 break; 3197 } 3198 } 3199 3200 factotum_tix = (dmfe_link_poll_us+dmfe_tick_us-1)/dmfe_tick_us; 3201 factotum_fast_tix = 1+(factotum_tix/5); 3202 factotum_start_tix = 1+(factotum_tix*2); 3203 3204 mac_init_ops(&dmfe_dev_ops, "dmfe"); 3205 status = mod_install(&modlinkage); 3206 if (status == DDI_SUCCESS) 3207 dmfe_log_init(); 3208 3209 return (status); 3210 } 3211 3212 int 3213 _fini(void) 3214 { 3215 int status; 3216 3217 status = mod_remove(&modlinkage); 3218 if (status == DDI_SUCCESS) { 3219 mac_fini_ops(&dmfe_dev_ops); 3220 dmfe_log_fini(); 3221 } 3222 3223 return (status); 3224 } 3225 3226 #undef DMFE_DBG 3227