1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 #include <sys/types.h> 28 #include <sys/sunddi.h> 29 #include <sys/policy.h> 30 #include <sys/sdt.h> 31 #include "dmfe_impl.h" 32 33 /* 34 * This is the string displayed by modinfo, etc. 35 */ 36 static char dmfe_ident[] = "Davicom DM9102 Ethernet"; 37 38 39 /* 40 * NOTES: 41 * 42 * #defines: 43 * 44 * DMFE_PCI_RNUMBER is the register-set number to use for the operating 45 * registers. On an OBP-based machine, regset 0 refers to CONFIG space, 46 * regset 1 will be the operating registers in I/O space, and regset 2 47 * will be the operating registers in MEMORY space (preferred). If an 48 * expansion ROM is fitted, it may appear as a further register set. 49 * 50 * DMFE_SLOP defines the amount by which the chip may read beyond 51 * the end of a buffer or descriptor, apparently 6-8 dwords :( 52 * We have to make sure this doesn't cause it to access unallocated 53 * or unmapped memory. 54 * 55 * DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP) 56 * rounded up to a multiple of 4. Here we choose a power of two for 57 * speed & simplicity at the cost of a bit more memory. 58 * 59 * However, the buffer length field in the TX/RX descriptors is only 60 * eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes 61 * per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1 62 * (2000) bytes each. 63 * 64 * DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for 65 * the data buffers. The descriptors are always set up in CONSISTENT 66 * mode. 67 * 68 * DMFE_HEADROOM defines how much space we'll leave in allocated 69 * mblks before the first valid data byte. This should be chosen 70 * to be 2 modulo 4, so that once the ethernet header (14 bytes) 71 * has been stripped off, the packet data will be 4-byte aligned. 72 * The remaining space can be used by upstream modules to prepend 73 * any headers required. 74 * 75 * Patchable globals: 76 * 77 * dmfe_bus_modes: the bus mode bits to be put into CSR0. 78 * Setting READ_MULTIPLE in this register seems to cause 79 * the chip to generate a READ LINE command with a parity 80 * error! Don't do it! 81 * 82 * dmfe_setup_desc1: the value to be put into descriptor word 1 83 * when sending a SETUP packet. 84 * 85 * Setting TX_LAST_DESC in desc1 in a setup packet seems 86 * to make the chip spontaneously reset internally - it 87 * attempts to give back the setup packet descriptor by 88 * writing to PCI address 00000000 - which may or may not 89 * get a MASTER ABORT - after which most of its registers 90 * seem to have either default values or garbage! 91 * 92 * TX_FIRST_DESC doesn't seem to have the same effect but 93 * it isn't needed on a setup packet so we'll leave it out 94 * too, just in case it has some other wierd side-effect. 95 * 96 * The default hardware packet filtering mode is now 97 * HASH_AND_PERFECT (imperfect filtering of multicast 98 * packets and perfect filtering of unicast packets). 99 * If this is found not to work reliably, setting the 100 * TX_FILTER_TYPE1 bit will cause a switchover to using 101 * HASH_ONLY mode (imperfect filtering of *all* packets). 102 * Software will then perform the additional filtering 103 * as required. 104 */ 105 106 #define DMFE_PCI_RNUMBER 2 107 #define DMFE_SLOP (8*sizeof (uint32_t)) 108 #define DMFE_BUF_SIZE 2048 109 #define DMFE_BUF_SIZE_1 2000 110 #define DMFE_DMA_MODE DDI_DMA_STREAMING 111 #define DMFE_HEADROOM 34 112 113 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN; 114 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE | 115 TX_FILTER_TYPE0; 116 117 /* 118 * Some tunable parameters ... 119 * Number of RX/TX ring entries (128/128) 120 * Minimum number of TX ring slots to keep free (1) 121 * Low-water mark at which to try to reclaim TX ring slots (1) 122 * How often to take a TX-done interrupt (twice per ring cycle) 123 * Whether to reclaim TX ring entries on a TX-done interrupt (no) 124 */ 125 126 #define DMFE_TX_DESC 128 /* Should be a multiple of 4 <= 256 */ 127 #define DMFE_RX_DESC 128 /* Should be a multiple of 4 <= 256 */ 128 129 static uint32_t dmfe_rx_desc = DMFE_RX_DESC; 130 static uint32_t dmfe_tx_desc = DMFE_TX_DESC; 131 static uint32_t dmfe_tx_min_free = 1; 132 static uint32_t dmfe_tx_reclaim_level = 1; 133 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1; 134 static boolean_t dmfe_reclaim_on_done = B_FALSE; 135 136 /* 137 * Time-related parameters: 138 * 139 * We use a cyclic to provide a periodic callback; this is then used 140 * to check for TX-stall and poll the link status register. 141 * 142 * DMFE_TICK is the interval between cyclic callbacks, in microseconds. 143 * 144 * TX_STALL_TIME_100 is the timeout in microseconds between passing 145 * a packet to the chip for transmission and seeing that it's gone, 146 * when running at 100Mb/s. If we haven't reclaimed at least one 147 * descriptor in this time we assume the transmitter has stalled 148 * and reset the chip. 149 * 150 * TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s. 151 * 152 * Patchable globals: 153 * 154 * dmfe_tick_us: DMFE_TICK 155 * dmfe_tx100_stall_us: TX_STALL_TIME_100 156 * dmfe_tx10_stall_us: TX_STALL_TIME_10 157 * 158 * These are then used in _init() to calculate: 159 * 160 * stall_100_tix[]: number of consecutive cyclic callbacks without a 161 * reclaim before the TX process is considered stalled, 162 * when running at 100Mb/s. The elements are indexed 163 * by transmit-engine-state. 164 * stall_10_tix[]: number of consecutive cyclic callbacks without a 165 * reclaim before the TX process is considered stalled, 166 * when running at 10Mb/s. The elements are indexed 167 * by transmit-engine-state. 168 */ 169 170 #define DMFE_TICK 25000 /* microseconds */ 171 #define TX_STALL_TIME_100 50000 /* microseconds */ 172 #define TX_STALL_TIME_10 200000 /* microseconds */ 173 174 static uint32_t dmfe_tick_us = DMFE_TICK; 175 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100; 176 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10; 177 178 /* 179 * Calculated from above in _init() 180 */ 181 182 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1]; 183 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1]; 184 185 /* 186 * Property names 187 */ 188 static char localmac_propname[] = "local-mac-address"; 189 static char opmode_propname[] = "opmode-reg-value"; 190 191 static int dmfe_m_start(void *); 192 static void dmfe_m_stop(void *); 193 static int dmfe_m_promisc(void *, boolean_t); 194 static int dmfe_m_multicst(void *, boolean_t, const uint8_t *); 195 static int dmfe_m_unicst(void *, const uint8_t *); 196 static void dmfe_m_ioctl(void *, queue_t *, mblk_t *); 197 static mblk_t *dmfe_m_tx(void *, mblk_t *); 198 static int dmfe_m_stat(void *, uint_t, uint64_t *); 199 static int dmfe_m_getprop(void *, const char *, mac_prop_id_t, 200 uint_t, uint_t, void *, uint_t *); 201 static int dmfe_m_setprop(void *, const char *, mac_prop_id_t, 202 uint_t, const void *); 203 204 static mac_callbacks_t dmfe_m_callbacks = { 205 (MC_IOCTL | MC_SETPROP | MC_GETPROP), 206 dmfe_m_stat, 207 dmfe_m_start, 208 dmfe_m_stop, 209 dmfe_m_promisc, 210 dmfe_m_multicst, 211 dmfe_m_unicst, 212 dmfe_m_tx, 213 dmfe_m_ioctl, 214 NULL, /* getcapab */ 215 NULL, /* open */ 216 NULL, /* close */ 217 dmfe_m_setprop, 218 dmfe_m_getprop 219 }; 220 221 222 /* 223 * Describes the chip's DMA engine 224 */ 225 static ddi_dma_attr_t dma_attr = { 226 DMA_ATTR_V0, /* dma_attr version */ 227 0, /* dma_attr_addr_lo */ 228 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 229 0x0FFFFFF, /* dma_attr_count_max */ 230 0x20, /* dma_attr_align */ 231 0x7F, /* dma_attr_burstsizes */ 232 1, /* dma_attr_minxfer */ 233 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 234 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */ 235 1, /* dma_attr_sgllen */ 236 1, /* dma_attr_granular */ 237 0 /* dma_attr_flags */ 238 }; 239 240 /* 241 * DMA access attributes for registers and descriptors 242 */ 243 static ddi_device_acc_attr_t dmfe_reg_accattr = { 244 DDI_DEVICE_ATTR_V0, 245 DDI_STRUCTURE_LE_ACC, 246 DDI_STRICTORDER_ACC 247 }; 248 249 /* 250 * DMA access attributes for data: NOT to be byte swapped. 251 */ 252 static ddi_device_acc_attr_t dmfe_data_accattr = { 253 DDI_DEVICE_ATTR_V0, 254 DDI_NEVERSWAP_ACC, 255 DDI_STRICTORDER_ACC 256 }; 257 258 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = { 259 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 260 }; 261 262 263 /* 264 * ========== Lowest-level chip register & ring access routines ========== 265 */ 266 267 /* 268 * I/O register get/put routines 269 */ 270 uint32_t 271 dmfe_chip_get32(dmfe_t *dmfep, off_t offset) 272 { 273 uint32_t *addr; 274 275 addr = (void *)(dmfep->io_reg + offset); 276 return (ddi_get32(dmfep->io_handle, addr)); 277 } 278 279 void 280 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value) 281 { 282 uint32_t *addr; 283 284 addr = (void *)(dmfep->io_reg + offset); 285 ddi_put32(dmfep->io_handle, addr, value); 286 } 287 288 /* 289 * TX/RX ring get/put routines 290 */ 291 static uint32_t 292 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset) 293 { 294 uint32_t *addr; 295 296 addr = (void *)dma_p->mem_va; 297 return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset)); 298 } 299 300 static void 301 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value) 302 { 303 uint32_t *addr; 304 305 addr = (void *)dma_p->mem_va; 306 ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value); 307 } 308 309 /* 310 * Setup buffer get/put routines 311 */ 312 static uint32_t 313 dmfe_setup_get32(dma_area_t *dma_p, uint_t index) 314 { 315 uint32_t *addr; 316 317 addr = (void *)dma_p->setup_va; 318 return (ddi_get32(dma_p->acc_hdl, addr + index)); 319 } 320 321 static void 322 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value) 323 { 324 uint32_t *addr; 325 326 addr = (void *)dma_p->setup_va; 327 ddi_put32(dma_p->acc_hdl, addr + index, value); 328 } 329 330 331 /* 332 * ========== Low-level chip & ring buffer manipulation ========== 333 */ 334 335 /* 336 * dmfe_set_opmode() -- function to set operating mode 337 */ 338 static void 339 dmfe_set_opmode(dmfe_t *dmfep) 340 { 341 ASSERT(mutex_owned(dmfep->oplock)); 342 343 dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode); 344 drv_usecwait(10); 345 } 346 347 /* 348 * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w 349 */ 350 static void 351 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate) 352 { 353 ASSERT(mutex_owned(dmfep->oplock)); 354 355 /* 356 * Stop the chip: 357 * disable all interrupts 358 * stop TX/RX processes 359 * clear the status bits for TX/RX stopped 360 * If required, reset the chip 361 * Record the new state 362 */ 363 dmfe_chip_put32(dmfep, INT_MASK_REG, 0); 364 dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE); 365 dmfe_set_opmode(dmfep); 366 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 367 368 switch (newstate) { 369 default: 370 ASSERT(!"can't get here"); 371 return; 372 373 case CHIP_STOPPED: 374 case CHIP_ERROR: 375 break; 376 377 case CHIP_RESET: 378 dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET); 379 drv_usecwait(10); 380 dmfe_chip_put32(dmfep, BUS_MODE_REG, 0); 381 drv_usecwait(10); 382 dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes); 383 break; 384 } 385 386 dmfep->chip_state = newstate; 387 } 388 389 /* 390 * Initialize transmit and receive descriptor rings, and 391 * set the chip to point to the first entry in each ring 392 */ 393 static void 394 dmfe_init_rings(dmfe_t *dmfep) 395 { 396 dma_area_t *descp; 397 uint32_t pstart; 398 uint32_t pnext; 399 uint32_t pbuff; 400 uint32_t desc1; 401 int i; 402 403 /* 404 * You need all the locks in order to rewrite the descriptor rings 405 */ 406 ASSERT(mutex_owned(dmfep->oplock)); 407 ASSERT(mutex_owned(dmfep->rxlock)); 408 ASSERT(mutex_owned(dmfep->txlock)); 409 410 /* 411 * Program the RX ring entries 412 */ 413 descp = &dmfep->rx_desc; 414 pstart = descp->mem_dvma; 415 pnext = pstart + sizeof (struct rx_desc_type); 416 pbuff = dmfep->rx_buff.mem_dvma; 417 desc1 = RX_CHAINING | DMFE_BUF_SIZE_1; 418 419 for (i = 0; i < dmfep->rx.n_desc; ++i) { 420 dmfe_ring_put32(descp, i, RD_NEXT, pnext); 421 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 422 dmfe_ring_put32(descp, i, DESC1, desc1); 423 dmfe_ring_put32(descp, i, DESC0, RX_OWN); 424 425 pnext += sizeof (struct rx_desc_type); 426 pbuff += DMFE_BUF_SIZE; 427 } 428 429 /* 430 * Fix up last entry & sync 431 */ 432 dmfe_ring_put32(descp, --i, RD_NEXT, pstart); 433 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 434 dmfep->rx.next_free = 0; 435 436 /* 437 * Set the base address of the RX descriptor list in CSR3 438 */ 439 dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma); 440 441 /* 442 * Program the TX ring entries 443 */ 444 descp = &dmfep->tx_desc; 445 pstart = descp->mem_dvma; 446 pnext = pstart + sizeof (struct tx_desc_type); 447 pbuff = dmfep->tx_buff.mem_dvma; 448 desc1 = TX_CHAINING; 449 450 for (i = 0; i < dmfep->tx.n_desc; ++i) { 451 dmfe_ring_put32(descp, i, TD_NEXT, pnext); 452 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 453 dmfe_ring_put32(descp, i, DESC1, desc1); 454 dmfe_ring_put32(descp, i, DESC0, 0); 455 456 pnext += sizeof (struct tx_desc_type); 457 pbuff += DMFE_BUF_SIZE; 458 } 459 460 /* 461 * Fix up last entry & sync 462 */ 463 dmfe_ring_put32(descp, --i, TD_NEXT, pstart); 464 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 465 dmfep->tx.n_free = dmfep->tx.n_desc; 466 dmfep->tx.next_free = dmfep->tx.next_busy = 0; 467 468 /* 469 * Set the base address of the TX descrptor list in CSR4 470 */ 471 dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma); 472 } 473 474 /* 475 * dmfe_start_chip() -- start the chip transmitting and/or receiving 476 */ 477 static void 478 dmfe_start_chip(dmfe_t *dmfep, int mode) 479 { 480 ASSERT(mutex_owned(dmfep->oplock)); 481 482 dmfep->opmode |= mode; 483 dmfe_set_opmode(dmfep); 484 485 dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0); 486 /* 487 * Enable VLAN length mode (allows packets to be 4 bytes Longer). 488 */ 489 dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE); 490 491 /* 492 * Clear any pending process-stopped interrupts 493 */ 494 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 495 dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX : 496 mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED; 497 } 498 499 /* 500 * dmfe_enable_interrupts() -- enable our favourite set of interrupts. 501 * 502 * Normal interrupts: 503 * We always enable: 504 * RX_PKTDONE_INT (packet received) 505 * TX_PKTDONE_INT (TX complete) 506 * We never enable: 507 * TX_ALLDONE_INT (next TX buffer not ready) 508 * 509 * Abnormal interrupts: 510 * We always enable: 511 * RX_STOPPED_INT 512 * TX_STOPPED_INT 513 * SYSTEM_ERR_INT 514 * RX_UNAVAIL_INT 515 * We never enable: 516 * RX_EARLY_INT 517 * RX_WATCHDOG_INT 518 * TX_JABBER_INT 519 * TX_EARLY_INT 520 * TX_UNDERFLOW_INT 521 * GP_TIMER_INT (not valid in -9 chips) 522 * LINK_STATUS_INT (not valid in -9 chips) 523 */ 524 static void 525 dmfe_enable_interrupts(dmfe_t *dmfep) 526 { 527 ASSERT(mutex_owned(dmfep->oplock)); 528 529 /* 530 * Put 'the standard set of interrupts' in the interrupt mask register 531 */ 532 dmfep->imask = RX_PKTDONE_INT | TX_PKTDONE_INT | 533 RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT; 534 535 dmfe_chip_put32(dmfep, INT_MASK_REG, 536 NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask); 537 dmfep->chip_state = CHIP_RUNNING; 538 } 539 540 /* 541 * ========== RX side routines ========== 542 */ 543 544 /* 545 * Function to update receive statistics on various errors 546 */ 547 static void 548 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0) 549 { 550 ASSERT(mutex_owned(dmfep->rxlock)); 551 552 /* 553 * The error summary bit and the error bits that it summarises 554 * are only valid if this is the last fragment. Therefore, a 555 * fragment only contributes to the error statistics if both 556 * the last-fragment and error summary bits are set. 557 */ 558 if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) { 559 dmfep->rx_stats_ierrors += 1; 560 561 /* 562 * There are some other error bits in the descriptor for 563 * which there don't seem to be appropriate MAC statistics, 564 * notably RX_COLLISION and perhaps RX_DESC_ERR. The 565 * latter may not be possible if it is supposed to indicate 566 * that one buffer has been filled with a partial packet 567 * and the next buffer required for the rest of the packet 568 * was not available, as all our buffers are more than large 569 * enough for a whole packet without fragmenting. 570 */ 571 572 if (desc0 & RX_OVERFLOW) { 573 dmfep->rx_stats_overflow += 1; 574 575 } else if (desc0 & RX_RUNT_FRAME) 576 dmfep->rx_stats_short += 1; 577 578 if (desc0 & RX_CRC) 579 dmfep->rx_stats_fcs += 1; 580 581 if (desc0 & RX_FRAME2LONG) 582 dmfep->rx_stats_toolong += 1; 583 } 584 585 /* 586 * A receive watchdog timeout is counted as a MAC-level receive 587 * error. Strangely, it doesn't set the packet error summary bit, 588 * according to the chip data sheet :-? 589 */ 590 if (desc0 & RX_RCV_WD_TO) 591 dmfep->rx_stats_macrcv_errors += 1; 592 593 if (desc0 & RX_DRIBBLING) 594 dmfep->rx_stats_align += 1; 595 596 if (desc0 & RX_MII_ERR) 597 dmfep->rx_stats_macrcv_errors += 1; 598 } 599 600 /* 601 * Receive incoming packet(s) and pass them up ... 602 */ 603 static mblk_t * 604 dmfe_getp(dmfe_t *dmfep) 605 { 606 dma_area_t *descp; 607 mblk_t **tail; 608 mblk_t *head; 609 mblk_t *mp; 610 char *rxb; 611 uchar_t *dp; 612 uint32_t desc0; 613 uint32_t misses; 614 int packet_length; 615 int index; 616 617 mutex_enter(dmfep->rxlock); 618 619 /* 620 * Update the missed frame statistic from the on-chip counter. 621 */ 622 misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG); 623 dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK); 624 625 /* 626 * sync (all) receive descriptors before inspecting them 627 */ 628 descp = &dmfep->rx_desc; 629 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 630 631 /* 632 * We should own at least one RX entry, since we've had a 633 * receive interrupt, but let's not be dogmatic about it. 634 */ 635 index = dmfep->rx.next_free; 636 desc0 = dmfe_ring_get32(descp, index, DESC0); 637 638 DTRACE_PROBE1(rx__start, uint32_t, desc0); 639 for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) { 640 /* 641 * Maintain statistics for every descriptor returned 642 * to us by the chip ... 643 */ 644 dmfe_update_rx_stats(dmfep, desc0); 645 646 /* 647 * Check that the entry has both "packet start" and 648 * "packet end" flags. We really shouldn't get packet 649 * fragments, 'cos all the RX buffers are bigger than 650 * the largest valid packet. So we'll just drop any 651 * fragments we find & skip on to the next entry. 652 */ 653 if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) { 654 DTRACE_PROBE1(rx__frag, uint32_t, desc0); 655 goto skip; 656 } 657 658 /* 659 * A whole packet in one buffer. We have to check error 660 * status and packet length before forwarding it upstream. 661 */ 662 if (desc0 & RX_ERR_SUMMARY) { 663 DTRACE_PROBE1(rx__err, uint32_t, desc0); 664 goto skip; 665 } 666 667 packet_length = (desc0 >> 16) & 0x3fff; 668 if (packet_length > DMFE_MAX_PKT_SIZE) { 669 DTRACE_PROBE1(rx__toobig, int, packet_length); 670 goto skip; 671 } else if (packet_length < ETHERMIN) { 672 /* 673 * Note that VLAN packet would be even larger, 674 * but we don't worry about dropping runt VLAN 675 * frames. 676 * 677 * This check is probably redundant, as well, 678 * since the hardware should drop RUNT frames. 679 */ 680 DTRACE_PROBE1(rx__runt, int, packet_length); 681 goto skip; 682 } 683 684 /* 685 * Sync the data, so we can examine it; then check that 686 * the packet is really intended for us (remember that 687 * if we're using Imperfect Filtering, then the chip will 688 * receive unicast packets sent to stations whose addresses 689 * just happen to hash to the same value as our own; we 690 * discard these here so they don't get sent upstream ...) 691 */ 692 (void) ddi_dma_sync(dmfep->rx_buff.dma_hdl, 693 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, 694 DDI_DMA_SYNC_FORKERNEL); 695 rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE]; 696 697 698 /* 699 * We do not bother to check that the packet is really for 700 * us, we let the MAC framework make that check instead. 701 * This is especially important if we ever want to support 702 * multiple MAC addresses. 703 */ 704 705 /* 706 * Packet looks good; get a buffer to copy it into. We 707 * allow some space at the front of the allocated buffer 708 * (HEADROOM) in case any upstream modules want to prepend 709 * some sort of header. The value has been carefully chosen 710 * So that it also has the side-effect of making the packet 711 * *contents* 4-byte aligned, as required by NCA! 712 */ 713 mp = allocb(DMFE_HEADROOM + packet_length, 0); 714 if (mp == NULL) { 715 DTRACE_PROBE(rx__no__buf); 716 dmfep->rx_stats_norcvbuf += 1; 717 goto skip; 718 } 719 720 /* 721 * Account for statistics of good packets. 722 */ 723 dmfep->rx_stats_ipackets += 1; 724 dmfep->rx_stats_rbytes += packet_length; 725 if (desc0 & RX_MULTI_FRAME) { 726 if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) { 727 dmfep->rx_stats_multi += 1; 728 } else { 729 dmfep->rx_stats_bcast += 1; 730 } 731 } 732 733 /* 734 * Copy the packet into the STREAMS buffer 735 */ 736 dp = mp->b_rptr += DMFE_HEADROOM; 737 mp->b_cont = mp->b_next = NULL; 738 739 /* 740 * Don't worry about stripping the vlan tag, the MAC 741 * layer will take care of that for us. 742 */ 743 bcopy(rxb, dp, packet_length); 744 745 /* 746 * Fix up the packet length, and link it to the chain 747 */ 748 mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL; 749 *tail = mp; 750 tail = &mp->b_next; 751 752 skip: 753 /* 754 * Return ownership of ring entry & advance to next 755 */ 756 dmfe_ring_put32(descp, index, DESC0, RX_OWN); 757 index = NEXT(index, dmfep->rx.n_desc); 758 desc0 = dmfe_ring_get32(descp, index, DESC0); 759 } 760 761 /* 762 * Remember where to start looking next time ... 763 */ 764 dmfep->rx.next_free = index; 765 766 /* 767 * sync the receive descriptors that we've given back 768 * (actually, we sync all of them for simplicity), and 769 * wake the chip in case it had suspended receive 770 */ 771 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 772 dmfe_chip_put32(dmfep, RX_POLL_REG, 0); 773 774 mutex_exit(dmfep->rxlock); 775 return (head); 776 } 777 778 /* 779 * ========== Primary TX side routines ========== 780 */ 781 782 /* 783 * TX ring management: 784 * 785 * There are <tx.n_desc> entries in the ring, of which those from 786 * <tx.next_free> round to but not including <tx.next_busy> must 787 * be owned by the CPU. The number of such entries should equal 788 * <tx.n_free>; but there may also be some more entries which the 789 * chip has given back but which we haven't yet accounted for. 790 * The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts 791 * as it discovers such entries. 792 * 793 * Initially, or when the ring is entirely free: 794 * C = Owned by CPU 795 * D = Owned by Davicom (DMFE) chip 796 * 797 * tx.next_free tx.n_desc = 16 798 * | 799 * v 800 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 801 * | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | 802 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 803 * ^ 804 * | 805 * tx.next_busy tx.n_free = 16 806 * 807 * On entry to reclaim() during normal use: 808 * 809 * tx.next_free tx.n_desc = 16 810 * | 811 * v 812 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 813 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 814 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 815 * ^ 816 * | 817 * tx.next_busy tx.n_free = 9 818 * 819 * On exit from reclaim(): 820 * 821 * tx.next_free tx.n_desc = 16 822 * | 823 * v 824 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 825 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 826 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 827 * ^ 828 * | 829 * tx.next_busy tx.n_free = 13 830 * 831 * The ring is considered "full" when only one entry is owned by 832 * the CPU; thus <tx.n_free> should always be >= 1. 833 * 834 * tx.next_free tx.n_desc = 16 835 * | 836 * v 837 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 838 * | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D | 839 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 840 * ^ 841 * | 842 * tx.next_busy tx.n_free = 1 843 */ 844 845 /* 846 * Function to update transmit statistics on various errors 847 */ 848 static void 849 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1) 850 { 851 uint32_t collisions; 852 uint32_t errbits; 853 uint32_t errsum; 854 855 ASSERT(mutex_owned(dmfep->txlock)); 856 857 collisions = ((desc0 >> 3) & 0x0f); 858 errsum = desc0 & TX_ERR_SUMMARY; 859 errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS | 860 TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO); 861 if ((errsum == 0) != (errbits == 0)) { 862 dmfe_log(dmfep, "dubious TX error status 0x%x", desc0); 863 desc0 |= TX_ERR_SUMMARY; 864 } 865 866 if (desc0 & TX_ERR_SUMMARY) { 867 dmfep->tx_stats_oerrors += 1; 868 869 /* 870 * If we ever see a transmit jabber timeout, we count it 871 * as a MAC-level transmit error; but we probably won't 872 * see it as it causes an Abnormal interrupt and we reset 873 * the chip in order to recover 874 */ 875 if (desc0 & TX_JABBER_TO) { 876 dmfep->tx_stats_macxmt_errors += 1; 877 dmfep->tx_stats_jabber += 1; 878 } 879 880 if (desc0 & TX_UNDERFLOW) 881 dmfep->tx_stats_underflow += 1; 882 else if (desc0 & TX_LATE_COLL) 883 dmfep->tx_stats_xmtlatecoll += 1; 884 885 if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER)) 886 dmfep->tx_stats_nocarrier += 1; 887 888 if (desc0 & TX_EXCESS_COLL) { 889 dmfep->tx_stats_excoll += 1; 890 collisions = 16; 891 } 892 } else { 893 int bit = index % NBBY; 894 int byt = index / NBBY; 895 896 if (dmfep->tx_mcast[byt] & bit) { 897 dmfep->tx_mcast[byt] &= ~bit; 898 dmfep->tx_stats_multi += 1; 899 900 } else if (dmfep->tx_bcast[byt] & bit) { 901 dmfep->tx_bcast[byt] &= ~bit; 902 dmfep->tx_stats_bcast += 1; 903 } 904 905 dmfep->tx_stats_opackets += 1; 906 dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1; 907 } 908 909 if (collisions == 1) 910 dmfep->tx_stats_first_coll += 1; 911 else if (collisions != 0) 912 dmfep->tx_stats_multi_coll += 1; 913 dmfep->tx_stats_collisions += collisions; 914 915 if (desc0 & TX_DEFERRED) 916 dmfep->tx_stats_defer += 1; 917 } 918 919 /* 920 * Reclaim all the ring entries that the chip has returned to us ... 921 * 922 * Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims 923 * as many as possible, restarts the TX stall timeout, and returns B_TRUE. 924 */ 925 static boolean_t 926 dmfe_reclaim_tx_desc(dmfe_t *dmfep) 927 { 928 dma_area_t *descp; 929 uint32_t desc0; 930 uint32_t desc1; 931 int i; 932 933 ASSERT(mutex_owned(dmfep->txlock)); 934 935 /* 936 * sync transmit descriptor ring before looking at it 937 */ 938 descp = &dmfep->tx_desc; 939 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 940 941 /* 942 * Early exit if there are no descriptors to reclaim, either 943 * because they're all reclaimed already, or because the next 944 * one is still owned by the chip ... 945 */ 946 i = dmfep->tx.next_busy; 947 if (i == dmfep->tx.next_free) 948 return (B_FALSE); 949 desc0 = dmfe_ring_get32(descp, i, DESC0); 950 if (desc0 & TX_OWN) 951 return (B_FALSE); 952 953 /* 954 * Reclaim as many descriptors as possible ... 955 */ 956 for (;;) { 957 desc1 = dmfe_ring_get32(descp, i, DESC1); 958 ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0); 959 960 if ((desc1 & TX_SETUP_PACKET) == 0) { 961 /* 962 * Regular packet - just update stats 963 */ 964 dmfe_update_tx_stats(dmfep, i, desc0, desc1); 965 } 966 967 /* 968 * Update count & index; we're all done if the ring is 969 * now fully reclaimed, or the next entry if still owned 970 * by the chip ... 971 */ 972 dmfep->tx.n_free += 1; 973 i = NEXT(i, dmfep->tx.n_desc); 974 if (i == dmfep->tx.next_free) 975 break; 976 desc0 = dmfe_ring_get32(descp, i, DESC0); 977 if (desc0 & TX_OWN) 978 break; 979 } 980 981 dmfep->tx.next_busy = i; 982 dmfep->tx_pending_tix = 0; 983 return (B_TRUE); 984 } 985 986 /* 987 * Send the message in the message block chain <mp>. 988 * 989 * The message is freed if and only if its contents are successfully copied 990 * and queued for transmission (so that the return value is B_TRUE). 991 * If we can't queue the message, the return value is B_FALSE and 992 * the message is *not* freed. 993 * 994 * This routine handles the special case of <mp> == NULL, which indicates 995 * that we want to "send" the special "setup packet" allocated during 996 * startup. We have to use some different flags in the packet descriptor 997 * to say its a setup packet (from the global <dmfe_setup_desc1>), and the 998 * setup packet *isn't* freed after use. 999 */ 1000 static boolean_t 1001 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp) 1002 { 1003 dma_area_t *descp; 1004 mblk_t *bp; 1005 char *txb; 1006 uint32_t desc1; 1007 uint32_t index; 1008 size_t totlen; 1009 size_t mblen; 1010 uint32_t paddr; 1011 1012 /* 1013 * If the number of free slots is below the reclaim threshold 1014 * (soft limit), we'll try to reclaim some. If we fail, and 1015 * the number of free slots is also below the minimum required 1016 * (the hard limit, usually 1), then we can't send the packet. 1017 */ 1018 mutex_enter(dmfep->txlock); 1019 if (dmfep->suspended) 1020 return (B_FALSE); 1021 1022 if (dmfep->tx.n_free <= dmfe_tx_reclaim_level && 1023 dmfe_reclaim_tx_desc(dmfep) == B_FALSE && 1024 dmfep->tx.n_free <= dmfe_tx_min_free) { 1025 /* 1026 * Resource shortage - return B_FALSE so the packet 1027 * will be queued for retry after the next TX-done 1028 * interrupt. 1029 */ 1030 mutex_exit(dmfep->txlock); 1031 DTRACE_PROBE(tx__no__desc); 1032 return (B_FALSE); 1033 } 1034 1035 /* 1036 * There's a slot available, so claim it by incrementing 1037 * the next-free index and decrementing the free count. 1038 * If the ring is currently empty, we also restart the 1039 * stall-detect timer. The ASSERTions check that our 1040 * invariants still hold: 1041 * the next-free index must not match the next-busy index 1042 * there must still be at least one free entry 1043 * After this, we now have exclusive ownership of the ring 1044 * entry (and matching buffer) indicated by <index>, so we 1045 * don't need to hold the TX lock any longer 1046 */ 1047 index = dmfep->tx.next_free; 1048 dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc); 1049 ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy); 1050 if (dmfep->tx.n_free-- == dmfep->tx.n_desc) 1051 dmfep->tx_pending_tix = 0; 1052 ASSERT(dmfep->tx.n_free >= 1); 1053 mutex_exit(dmfep->txlock); 1054 1055 /* 1056 * Check the ownership of the ring entry ... 1057 */ 1058 descp = &dmfep->tx_desc; 1059 ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0); 1060 1061 if (mp == NULL) { 1062 /* 1063 * Indicates we should send a SETUP packet, which we do by 1064 * temporarily switching the BUFFER1 pointer in the ring 1065 * entry. The reclaim routine will restore BUFFER1 to its 1066 * usual value. 1067 * 1068 * Note that as the setup packet is tagged on the end of 1069 * the TX ring, when we sync the descriptor we're also 1070 * implicitly syncing the setup packet - hence, we don't 1071 * need a separate ddi_dma_sync() call here. 1072 */ 1073 desc1 = dmfe_setup_desc1; 1074 paddr = descp->setup_dvma; 1075 } else { 1076 /* 1077 * A regular packet; we copy the data into a pre-mapped 1078 * buffer, which avoids the overhead (and complication) 1079 * of mapping/unmapping STREAMS buffers and keeping hold 1080 * of them until the DMA has completed. 1081 * 1082 * Because all buffers are the same size, and larger 1083 * than the longest single valid message, we don't have 1084 * to bother about splitting the message across multiple 1085 * buffers. 1086 */ 1087 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 1088 totlen = 0; 1089 bp = mp; 1090 1091 /* 1092 * Copy all (remaining) mblks in the message ... 1093 */ 1094 for (; bp != NULL; bp = bp->b_cont) { 1095 mblen = MBLKL(bp); 1096 if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) { 1097 bcopy(bp->b_rptr, txb, mblen); 1098 txb += mblen; 1099 } 1100 } 1101 1102 /* 1103 * Is this a multicast or broadcast packet? We do 1104 * this so that we can track statistics accurately 1105 * when we reclaim it. 1106 */ 1107 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 1108 if (txb[0] & 0x1) { 1109 if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) { 1110 dmfep->tx_bcast[index / NBBY] |= 1111 (1 << (index % NBBY)); 1112 } else { 1113 dmfep->tx_mcast[index / NBBY] |= 1114 (1 << (index % NBBY)); 1115 } 1116 } 1117 1118 /* 1119 * We'e reached the end of the chain; and we should have 1120 * collected no more than DMFE_MAX_PKT_SIZE bytes into our 1121 * buffer. Note that the <size> field in the descriptor is 1122 * only 11 bits, so bigger packets would be a problem! 1123 */ 1124 ASSERT(bp == NULL); 1125 ASSERT(totlen <= DMFE_MAX_PKT_SIZE); 1126 totlen &= TX_BUFFER_SIZE1; 1127 desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen; 1128 paddr = dmfep->tx_buff.mem_dvma + index*DMFE_BUF_SIZE; 1129 1130 (void) ddi_dma_sync(dmfep->tx_buff.dma_hdl, 1131 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV); 1132 } 1133 1134 /* 1135 * Update ring descriptor entries, sync them, and wake up the 1136 * transmit process 1137 */ 1138 if ((index & dmfe_tx_int_factor) == 0) 1139 desc1 |= TX_INT_ON_COMP; 1140 desc1 |= TX_CHAINING; 1141 dmfe_ring_put32(descp, index, BUFFER1, paddr); 1142 dmfe_ring_put32(descp, index, DESC1, desc1); 1143 dmfe_ring_put32(descp, index, DESC0, TX_OWN); 1144 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 1145 dmfe_chip_put32(dmfep, TX_POLL_REG, 0); 1146 1147 /* 1148 * Finally, free the message & return success 1149 */ 1150 if (mp) 1151 freemsg(mp); 1152 return (B_TRUE); 1153 } 1154 1155 /* 1156 * dmfe_m_tx() -- send a chain of packets 1157 * 1158 * Called when packet(s) are ready to be transmitted. A pointer to an 1159 * M_DATA message that contains the packet is passed to this routine. 1160 * The complete LLC header is contained in the message's first message 1161 * block, and the remainder of the packet is contained within 1162 * additional M_DATA message blocks linked to the first message block. 1163 * 1164 * Additional messages may be passed by linking with b_next. 1165 */ 1166 static mblk_t * 1167 dmfe_m_tx(void *arg, mblk_t *mp) 1168 { 1169 dmfe_t *dmfep = arg; /* private device info */ 1170 mblk_t *next; 1171 1172 ASSERT(mp != NULL); 1173 ASSERT(dmfep->mac_state == DMFE_MAC_STARTED); 1174 1175 if (dmfep->chip_state != CHIP_RUNNING) 1176 return (mp); 1177 1178 while (mp != NULL) { 1179 next = mp->b_next; 1180 mp->b_next = NULL; 1181 if (!dmfe_send_msg(dmfep, mp)) { 1182 mp->b_next = next; 1183 break; 1184 } 1185 mp = next; 1186 } 1187 1188 return (mp); 1189 } 1190 1191 /* 1192 * ========== Address-setting routines (TX-side) ========== 1193 */ 1194 1195 /* 1196 * Find the index of the relevant bit in the setup packet. 1197 * This must mirror the way the hardware will actually calculate it! 1198 */ 1199 static uint32_t 1200 dmfe_hash_index(const uint8_t *address) 1201 { 1202 uint32_t const POLY = HASH_POLY; 1203 uint32_t crc = HASH_CRC; 1204 uint32_t index; 1205 uint32_t msb; 1206 uchar_t currentbyte; 1207 int byteslength; 1208 int shift; 1209 int bit; 1210 1211 for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) { 1212 currentbyte = address[byteslength]; 1213 for (bit = 0; bit < 8; ++bit) { 1214 msb = crc >> 31; 1215 crc <<= 1; 1216 if (msb ^ (currentbyte & 1)) { 1217 crc ^= POLY; 1218 crc |= 0x00000001; 1219 } 1220 currentbyte >>= 1; 1221 } 1222 } 1223 1224 for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift) 1225 index |= (((crc >> bit) & 1) << shift); 1226 1227 return (index); 1228 } 1229 1230 /* 1231 * Find and set/clear the relevant bit in the setup packet hash table 1232 * This must mirror the way the hardware will actually interpret it! 1233 */ 1234 static void 1235 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val) 1236 { 1237 dma_area_t *descp; 1238 uint32_t tmp; 1239 1240 ASSERT(mutex_owned(dmfep->oplock)); 1241 1242 descp = &dmfep->tx_desc; 1243 tmp = dmfe_setup_get32(descp, index/16); 1244 if (val) 1245 tmp |= 1 << (index%16); 1246 else 1247 tmp &= ~(1 << (index%16)); 1248 dmfe_setup_put32(descp, index/16, tmp); 1249 } 1250 1251 /* 1252 * Update the refcount for the bit in the setup packet corresponding 1253 * to the specified address; if it changes between zero & nonzero, 1254 * also update the bitmap itself & return B_TRUE, so that the caller 1255 * knows to re-send the setup packet. Otherwise (only the refcount 1256 * changed), return B_FALSE 1257 */ 1258 static boolean_t 1259 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val) 1260 { 1261 uint32_t index; 1262 uint8_t *refp; 1263 boolean_t change; 1264 1265 index = dmfe_hash_index(mca); 1266 refp = &dmfep->mcast_refs[index]; 1267 change = (val ? (*refp)++ : --(*refp)) == 0; 1268 1269 if (change) 1270 dmfe_update_hash(dmfep, index, val); 1271 1272 return (change); 1273 } 1274 1275 /* 1276 * "Transmit" the (possibly updated) magic setup packet 1277 */ 1278 static int 1279 dmfe_send_setup(dmfe_t *dmfep) 1280 { 1281 int status; 1282 1283 ASSERT(mutex_owned(dmfep->oplock)); 1284 1285 if (dmfep->suspended) 1286 return (0); 1287 1288 /* 1289 * If the chip isn't running, we can't really send the setup frame 1290 * now but it doesn't matter, 'cos it will be sent when the transmit 1291 * process is restarted (see dmfe_start()). 1292 */ 1293 if ((dmfep->opmode & START_TRANSMIT) == 0) 1294 return (0); 1295 1296 /* 1297 * "Send" the setup frame. If it fails (e.g. no resources), 1298 * set a flag; then the factotum will retry the "send". Once 1299 * it works, we can clear the flag no matter how many attempts 1300 * had previously failed. We tell the caller that it worked 1301 * whether it did or not; after all, it *will* work eventually. 1302 */ 1303 status = dmfe_send_msg(dmfep, NULL); 1304 dmfep->need_setup = status ? B_FALSE : B_TRUE; 1305 return (0); 1306 } 1307 1308 /* 1309 * dmfe_m_unicst() -- set the physical network address 1310 */ 1311 static int 1312 dmfe_m_unicst(void *arg, const uint8_t *macaddr) 1313 { 1314 dmfe_t *dmfep = arg; 1315 int status; 1316 int index; 1317 1318 /* 1319 * Update our current address and send out a new setup packet 1320 * 1321 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT 1322 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes). 1323 * 1324 * It is said that there is a bug in the 21140 where it fails to 1325 * receive packes addresses to the specified perfect filter address. 1326 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1 1327 * bit should be set in the module variable dmfe_setup_desc1. 1328 * 1329 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering. 1330 * In this mode, *all* incoming addresses are hashed and looked 1331 * up in the bitmap described by the setup packet. Therefore, 1332 * the bit representing the station address has to be added to 1333 * the table before sending it out. If the address is changed, 1334 * the old entry should be removed before the new entry is made. 1335 * 1336 * NOTE: in this mode, unicast packets that are not intended for 1337 * this station may be received; it is up to software to filter 1338 * them out afterwards! 1339 * 1340 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT 1341 * filtering. In this mode, multicast addresses are hashed and 1342 * checked against the bitmap, while unicast addresses are simply 1343 * matched against the one physical address specified in the setup 1344 * packet. This means that we shouldn't receive unicast packets 1345 * that aren't intended for us (but software still has to filter 1346 * multicast packets just the same). 1347 * 1348 * Whichever mode we're using, we have to enter the broadcast 1349 * address into the multicast filter map too, so we do this on 1350 * the first time through after attach or reset. 1351 */ 1352 mutex_enter(dmfep->oplock); 1353 1354 if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1) 1355 (void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE); 1356 if (dmfe_setup_desc1 & TX_FILTER_TYPE1) 1357 (void) dmfe_update_mcast(dmfep, macaddr, B_TRUE); 1358 if (!dmfep->addr_set) 1359 (void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE); 1360 1361 /* 1362 * Remember the new current address 1363 */ 1364 ethaddr_copy(macaddr, dmfep->curr_addr); 1365 dmfep->addr_set = B_TRUE; 1366 1367 /* 1368 * Install the new physical address into the proper position in 1369 * the setup frame; this is only used if we select hash+perfect 1370 * filtering, but we'll put it in anyway. The ugliness here is 1371 * down to the usual war of the egg :( 1372 */ 1373 for (index = 0; index < ETHERADDRL; index += 2) 1374 dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2, 1375 (macaddr[index+1] << 8) | macaddr[index]); 1376 1377 /* 1378 * Finally, we're ready to "transmit" the setup frame 1379 */ 1380 status = dmfe_send_setup(dmfep); 1381 mutex_exit(dmfep->oplock); 1382 1383 return (status); 1384 } 1385 1386 /* 1387 * dmfe_m_multicst() -- enable or disable a multicast address 1388 * 1389 * Program the hardware to enable/disable the multicast address 1390 * in "mca" (enable if add is true, otherwise disable it.) 1391 * We keep a refcount for each bit in the map, so that it still 1392 * works out properly if multiple addresses hash to the same bit. 1393 * dmfe_update_mcast() tells us whether the map actually changed; 1394 * if so, we have to re-"transmit" the magic setup packet. 1395 */ 1396 static int 1397 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1398 { 1399 dmfe_t *dmfep = arg; /* private device info */ 1400 int status = 0; 1401 1402 mutex_enter(dmfep->oplock); 1403 if (dmfe_update_mcast(dmfep, mca, add)) 1404 status = dmfe_send_setup(dmfep); 1405 mutex_exit(dmfep->oplock); 1406 1407 return (status); 1408 } 1409 1410 1411 /* 1412 * ========== Internal state management entry points ========== 1413 */ 1414 1415 /* 1416 * These routines provide all the functionality required by the 1417 * corresponding MAC layer entry points, but don't update the MAC layer state 1418 * so they can be called internally without disturbing our record 1419 * of what MAC layer thinks we should be doing ... 1420 */ 1421 1422 /* 1423 * dmfe_stop() -- stop processing, don't reset h/w or rings 1424 */ 1425 static void 1426 dmfe_stop(dmfe_t *dmfep) 1427 { 1428 ASSERT(mutex_owned(dmfep->oplock)); 1429 1430 dmfe_stop_chip(dmfep, CHIP_STOPPED); 1431 } 1432 1433 /* 1434 * dmfe_reset() -- stop processing, reset h/w & rings to initial state 1435 */ 1436 static void 1437 dmfe_reset(dmfe_t *dmfep) 1438 { 1439 ASSERT(mutex_owned(dmfep->oplock)); 1440 ASSERT(mutex_owned(dmfep->rxlock)); 1441 ASSERT(mutex_owned(dmfep->txlock)); 1442 1443 dmfe_stop_chip(dmfep, CHIP_RESET); 1444 dmfe_init_rings(dmfep); 1445 } 1446 1447 /* 1448 * dmfe_start() -- start transmitting/receiving 1449 */ 1450 static void 1451 dmfe_start(dmfe_t *dmfep) 1452 { 1453 uint32_t gpsr; 1454 1455 ASSERT(mutex_owned(dmfep->oplock)); 1456 1457 ASSERT(dmfep->chip_state == CHIP_RESET || 1458 dmfep->chip_state == CHIP_STOPPED); 1459 1460 /* 1461 * Make opmode consistent with PHY duplex setting 1462 */ 1463 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 1464 if (gpsr & GPS_FULL_DUPLEX) 1465 dmfep->opmode |= FULL_DUPLEX; 1466 else 1467 dmfep->opmode &= ~FULL_DUPLEX; 1468 1469 /* 1470 * Start transmit processing 1471 * Set up the address filters 1472 * Start receive processing 1473 * Enable interrupts 1474 */ 1475 dmfe_start_chip(dmfep, START_TRANSMIT); 1476 (void) dmfe_send_setup(dmfep); 1477 drv_usecwait(10); 1478 dmfe_start_chip(dmfep, START_RECEIVE); 1479 dmfe_enable_interrupts(dmfep); 1480 } 1481 1482 /* 1483 * dmfe_restart - restart transmitting/receiving after error or suspend 1484 */ 1485 static void 1486 dmfe_restart(dmfe_t *dmfep) 1487 { 1488 ASSERT(mutex_owned(dmfep->oplock)); 1489 1490 /* 1491 * You need not only <oplock>, but also <rxlock> AND <txlock> 1492 * in order to reset the rings, but then <txlock> *mustn't* 1493 * be held across the call to dmfe_start() 1494 */ 1495 mutex_enter(dmfep->rxlock); 1496 mutex_enter(dmfep->txlock); 1497 dmfe_reset(dmfep); 1498 mutex_exit(dmfep->txlock); 1499 mutex_exit(dmfep->rxlock); 1500 if (dmfep->mac_state == DMFE_MAC_STARTED) { 1501 dmfe_start(dmfep); 1502 } 1503 } 1504 1505 1506 /* 1507 * ========== MAC-required management entry points ========== 1508 */ 1509 1510 /* 1511 * dmfe_m_stop() -- stop transmitting/receiving 1512 */ 1513 static void 1514 dmfe_m_stop(void *arg) 1515 { 1516 dmfe_t *dmfep = arg; /* private device info */ 1517 1518 /* 1519 * Just stop processing, then record new MAC state 1520 */ 1521 mii_stop(dmfep->mii); 1522 1523 mutex_enter(dmfep->oplock); 1524 if (!dmfep->suspended) 1525 dmfe_stop(dmfep); 1526 dmfep->mac_state = DMFE_MAC_STOPPED; 1527 mutex_exit(dmfep->oplock); 1528 } 1529 1530 /* 1531 * dmfe_m_start() -- start transmitting/receiving 1532 */ 1533 static int 1534 dmfe_m_start(void *arg) 1535 { 1536 dmfe_t *dmfep = arg; /* private device info */ 1537 1538 /* 1539 * Start processing and record new MAC state 1540 */ 1541 mutex_enter(dmfep->oplock); 1542 if (!dmfep->suspended) 1543 dmfe_start(dmfep); 1544 dmfep->mac_state = DMFE_MAC_STARTED; 1545 mutex_exit(dmfep->oplock); 1546 1547 mii_start(dmfep->mii); 1548 1549 return (0); 1550 } 1551 1552 /* 1553 * dmfe_m_promisc() -- set or reset promiscuous mode on the board 1554 * 1555 * Program the hardware to enable/disable promiscuous and/or 1556 * receive-all-multicast modes. Davicom don't document this 1557 * clearly, but it looks like we can do this on-the-fly (i.e. 1558 * without stopping & restarting the TX/RX processes). 1559 */ 1560 static int 1561 dmfe_m_promisc(void *arg, boolean_t on) 1562 { 1563 dmfe_t *dmfep = arg; 1564 1565 mutex_enter(dmfep->oplock); 1566 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 1567 if (on) 1568 dmfep->opmode |= PROMISC_MODE; 1569 if (!dmfep->suspended) 1570 dmfe_set_opmode(dmfep); 1571 mutex_exit(dmfep->oplock); 1572 1573 return (0); 1574 } 1575 1576 /* 1577 * ========== Factotum, implemented as a softint handler ========== 1578 */ 1579 1580 /* 1581 * The factotum is woken up when there's something to do that we'd rather 1582 * not do from inside a (high-level?) hardware interrupt handler. Its 1583 * two main tasks are: 1584 * reset & restart the chip after an error 1585 * update & restart the chip after a link status change 1586 */ 1587 static uint_t 1588 dmfe_factotum(caddr_t arg) 1589 { 1590 dmfe_t *dmfep; 1591 1592 dmfep = (void *)arg; 1593 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 1594 1595 mutex_enter(dmfep->oplock); 1596 if (dmfep->suspended) { 1597 mutex_exit(dmfep->oplock); 1598 return (DDI_INTR_CLAIMED); 1599 } 1600 1601 dmfep->factotum_flag = 0; 1602 DRV_KS_INC(dmfep, KS_FACTOTUM_RUN); 1603 1604 /* 1605 * Check for chip error ... 1606 */ 1607 if (dmfep->chip_state == CHIP_ERROR) { 1608 /* 1609 * Error recovery required: reset the chip and the rings, 1610 * then, if it's supposed to be running, kick it off again. 1611 */ 1612 DRV_KS_INC(dmfep, KS_RECOVERY); 1613 dmfe_restart(dmfep); 1614 mutex_exit(dmfep->oplock); 1615 1616 mii_reset(dmfep->mii); 1617 1618 } else if (dmfep->need_setup) { 1619 (void) dmfe_send_setup(dmfep); 1620 mutex_exit(dmfep->oplock); 1621 } 1622 1623 return (DDI_INTR_CLAIMED); 1624 } 1625 1626 static void 1627 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why) 1628 { 1629 _NOTE(ARGUNUSED(why)); 1630 ASSERT(mutex_owned(dmfep->oplock)); 1631 DRV_KS_INC(dmfep, ks_id); 1632 1633 if (dmfep->factotum_flag++ == 0) 1634 ddi_trigger_softintr(dmfep->factotum_id); 1635 } 1636 1637 1638 /* 1639 * ========== Periodic Tasks (Cyclic handler & friends) ========== 1640 */ 1641 1642 /* 1643 * Periodic tick tasks, run from the cyclic handler 1644 * 1645 * Check for TX stall; flag an error and wake the factotum if so. 1646 */ 1647 static void 1648 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 1649 { 1650 boolean_t tx_stall; 1651 uint32_t tx_state; 1652 uint32_t limit; 1653 1654 ASSERT(mutex_owned(dmfep->oplock)); 1655 1656 /* 1657 * Check for transmit stall ... 1658 * 1659 * IF there's at least one packet in the ring, AND the timeout 1660 * has elapsed, AND we can't reclaim any descriptors, THEN we've 1661 * stalled; we return B_TRUE to trigger a reset-and-recover cycle. 1662 * 1663 * Note that the timeout limit is based on the transmit engine 1664 * state; we allow the transmitter longer to make progress in 1665 * some states than in others, based on observations of this 1666 * chip's actual behaviour in the lab. 1667 * 1668 * By observation, we find that on about 1 in 10000 passes through 1669 * here, the TX lock is already held. In that case, we'll skip 1670 * the check on this pass rather than wait. Most likely, the send 1671 * routine was holding the lock when the interrupt happened, and 1672 * we'll succeed next time through. In the event of a real stall, 1673 * the TX ring will fill up, after which the send routine won't be 1674 * called any more and then we're sure to get in. 1675 */ 1676 tx_stall = B_FALSE; 1677 if (mutex_tryenter(dmfep->txlock)) { 1678 if (dmfep->tx.n_free < dmfep->tx.n_desc) { 1679 tx_state = TX_PROCESS_STATE(istat); 1680 if (gpsr & GPS_LINK_100) 1681 limit = stall_100_tix[tx_state]; 1682 else 1683 limit = stall_10_tix[tx_state]; 1684 if (++dmfep->tx_pending_tix >= limit && 1685 dmfe_reclaim_tx_desc(dmfep) == B_FALSE) { 1686 dmfe_log(dmfep, "TX stall detected " 1687 "after %d ticks in state %d; " 1688 "automatic recovery initiated", 1689 dmfep->tx_pending_tix, tx_state); 1690 tx_stall = B_TRUE; 1691 } 1692 } 1693 mutex_exit(dmfep->txlock); 1694 } 1695 1696 if (tx_stall) { 1697 dmfe_stop_chip(dmfep, CHIP_ERROR); 1698 dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)"); 1699 } 1700 } 1701 1702 /* 1703 * Cyclic callback handler 1704 */ 1705 static void 1706 dmfe_cyclic(void *arg) 1707 { 1708 dmfe_t *dmfep = arg; /* private device info */ 1709 uint32_t istat; 1710 uint32_t gpsr; 1711 1712 /* 1713 * If the chip's not RUNNING, there's nothing to do. 1714 * If we can't get the mutex straight away, we'll just 1715 * skip this pass; we'll back back soon enough anyway. 1716 */ 1717 if (mutex_tryenter(dmfep->oplock) == 0) 1718 return; 1719 if ((dmfep->suspended) || (dmfep->chip_state != CHIP_RUNNING)) { 1720 mutex_exit(dmfep->oplock); 1721 return; 1722 } 1723 1724 /* 1725 * Recheck chip state (it might have been stopped since we 1726 * checked above). If still running, call each of the *tick* 1727 * tasks. They will check for link change, TX stall, etc ... 1728 */ 1729 if (dmfep->chip_state == CHIP_RUNNING) { 1730 istat = dmfe_chip_get32(dmfep, STATUS_REG); 1731 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 1732 dmfe_tick_stall_check(dmfep, gpsr, istat); 1733 } 1734 1735 DRV_KS_INC(dmfep, KS_CYCLIC_RUN); 1736 mutex_exit(dmfep->oplock); 1737 } 1738 1739 /* 1740 * ========== Hardware interrupt handler ========== 1741 */ 1742 1743 /* 1744 * dmfe_interrupt() -- handle chip interrupts 1745 */ 1746 static uint_t 1747 dmfe_interrupt(caddr_t arg) 1748 { 1749 dmfe_t *dmfep; /* private device info */ 1750 uint32_t interrupts; 1751 uint32_t istat; 1752 const char *msg; 1753 mblk_t *mp; 1754 boolean_t warning_msg = B_TRUE; 1755 1756 dmfep = (void *)arg; 1757 1758 mutex_enter(dmfep->oplock); 1759 if (dmfep->suspended) { 1760 mutex_exit(dmfep->oplock); 1761 return (DDI_INTR_UNCLAIMED); 1762 } 1763 1764 /* 1765 * A quick check as to whether the interrupt was from this 1766 * device, before we even finish setting up all our local 1767 * variables. Note that reading the interrupt status register 1768 * doesn't have any unpleasant side effects such as clearing 1769 * the bits read, so it's quite OK to re-read it once we have 1770 * determined that we are going to service this interrupt and 1771 * grabbed the mutexen. 1772 */ 1773 istat = dmfe_chip_get32(dmfep, STATUS_REG); 1774 if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0) { 1775 1776 mutex_exit(dmfep->oplock); 1777 return (DDI_INTR_UNCLAIMED); 1778 } 1779 1780 DRV_KS_INC(dmfep, KS_INTERRUPT); 1781 1782 /* 1783 * Identify bits that represent enabled interrupts ... 1784 */ 1785 istat |= dmfe_chip_get32(dmfep, STATUS_REG); 1786 interrupts = istat & dmfep->imask; 1787 ASSERT(interrupts != 0); 1788 1789 DTRACE_PROBE1(intr, uint32_t, istat); 1790 1791 /* 1792 * Check for any interrupts other than TX/RX done. 1793 * If there are any, they are considered Abnormal 1794 * and will cause the chip to be reset. 1795 */ 1796 if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) { 1797 if (istat & ABNORMAL_SUMMARY_INT) { 1798 /* 1799 * Any Abnormal interrupts will lead to us 1800 * resetting the chip, so we don't bother 1801 * to clear each interrupt individually. 1802 * 1803 * Our main task here is to identify the problem, 1804 * by pointing out the most significant unexpected 1805 * bit. Additional bits may well be consequences 1806 * of the first problem, so we consider the possible 1807 * causes in order of severity. 1808 */ 1809 if (interrupts & SYSTEM_ERR_INT) { 1810 switch (istat & SYSTEM_ERR_BITS) { 1811 case SYSTEM_ERR_M_ABORT: 1812 msg = "Bus Master Abort"; 1813 break; 1814 1815 case SYSTEM_ERR_T_ABORT: 1816 msg = "Bus Target Abort"; 1817 break; 1818 1819 case SYSTEM_ERR_PARITY: 1820 msg = "Parity Error"; 1821 break; 1822 1823 default: 1824 msg = "Unknown System Bus Error"; 1825 break; 1826 } 1827 } else if (interrupts & RX_STOPPED_INT) { 1828 msg = "RX process stopped"; 1829 } else if (interrupts & RX_UNAVAIL_INT) { 1830 msg = "RX buffer unavailable"; 1831 warning_msg = B_FALSE; 1832 } else if (interrupts & RX_WATCHDOG_INT) { 1833 msg = "RX watchdog timeout?"; 1834 } else if (interrupts & RX_EARLY_INT) { 1835 msg = "RX early interrupt?"; 1836 } else if (interrupts & TX_STOPPED_INT) { 1837 msg = "TX process stopped"; 1838 } else if (interrupts & TX_JABBER_INT) { 1839 msg = "TX jabber timeout"; 1840 } else if (interrupts & TX_UNDERFLOW_INT) { 1841 msg = "TX underflow?"; 1842 } else if (interrupts & TX_EARLY_INT) { 1843 msg = "TX early interrupt?"; 1844 1845 } else if (interrupts & LINK_STATUS_INT) { 1846 msg = "Link status change?"; 1847 } else if (interrupts & GP_TIMER_INT) { 1848 msg = "Timer expired?"; 1849 } 1850 1851 if (warning_msg) 1852 dmfe_warning(dmfep, "abnormal interrupt, " 1853 "status 0x%x: %s", istat, msg); 1854 1855 /* 1856 * We don't want to run the entire reinitialisation 1857 * code out of this (high-level?) interrupt, so we 1858 * simply STOP the chip, and wake up the factotum 1859 * to reinitalise it ... 1860 */ 1861 dmfe_stop_chip(dmfep, CHIP_ERROR); 1862 dmfe_wake_factotum(dmfep, KS_CHIP_ERROR, 1863 "interrupt (error)"); 1864 } else { 1865 /* 1866 * We shouldn't really get here (it would mean 1867 * there were some unprocessed enabled bits but 1868 * they weren't Abnormal?), but we'll check just 1869 * in case ... 1870 */ 1871 DTRACE_PROBE1(intr__unexpected, uint32_t, istat); 1872 } 1873 } 1874 1875 /* 1876 * Acknowledge all the original bits - except in the case of an 1877 * error, when we leave them unacknowledged so that the recovery 1878 * code can see what was going on when the problem occurred ... 1879 */ 1880 if (dmfep->chip_state != CHIP_ERROR) { 1881 (void) dmfe_chip_put32(dmfep, STATUS_REG, istat); 1882 /* 1883 * Read-after-write forces completion on PCI bus. 1884 * 1885 */ 1886 (void) dmfe_chip_get32(dmfep, STATUS_REG); 1887 } 1888 1889 1890 /* 1891 * We've finished talking to the chip, so we can drop <oplock> 1892 * before handling the normal interrupts, which only involve 1893 * manipulation of descriptors ... 1894 */ 1895 mutex_exit(dmfep->oplock); 1896 1897 if (interrupts & RX_PKTDONE_INT) 1898 if ((mp = dmfe_getp(dmfep)) != NULL) 1899 mac_rx(dmfep->mh, NULL, mp); 1900 1901 if (interrupts & TX_PKTDONE_INT) { 1902 /* 1903 * The only reason for taking this interrupt is to give 1904 * MAC a chance to schedule queued packets after a 1905 * ring-full condition. To minimise the number of 1906 * redundant TX-Done interrupts, we only mark two of the 1907 * ring descriptors as 'interrupt-on-complete' - all the 1908 * others are simply handed back without an interrupt. 1909 */ 1910 if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) { 1911 (void) dmfe_reclaim_tx_desc(dmfep); 1912 mutex_exit(dmfep->txlock); 1913 } 1914 mac_tx_update(dmfep->mh); 1915 } 1916 1917 return (DDI_INTR_CLAIMED); 1918 } 1919 1920 /* 1921 * ========== Statistics update handler ========== 1922 */ 1923 1924 static int 1925 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val) 1926 { 1927 dmfe_t *dmfep = arg; 1928 int rv = 0; 1929 1930 /* Let MII handle its own stats. */ 1931 if (mii_m_getstat(dmfep->mii, stat, val) == 0) { 1932 return (0); 1933 } 1934 1935 mutex_enter(dmfep->oplock); 1936 mutex_enter(dmfep->rxlock); 1937 mutex_enter(dmfep->txlock); 1938 1939 /* make sure we have all the stats collected */ 1940 (void) dmfe_reclaim_tx_desc(dmfep); 1941 1942 switch (stat) { 1943 1944 case MAC_STAT_IPACKETS: 1945 *val = dmfep->rx_stats_ipackets; 1946 break; 1947 1948 case MAC_STAT_MULTIRCV: 1949 *val = dmfep->rx_stats_multi; 1950 break; 1951 1952 case MAC_STAT_BRDCSTRCV: 1953 *val = dmfep->rx_stats_bcast; 1954 break; 1955 1956 case MAC_STAT_RBYTES: 1957 *val = dmfep->rx_stats_rbytes; 1958 break; 1959 1960 case MAC_STAT_IERRORS: 1961 *val = dmfep->rx_stats_ierrors; 1962 break; 1963 1964 case MAC_STAT_NORCVBUF: 1965 *val = dmfep->rx_stats_norcvbuf; 1966 break; 1967 1968 case MAC_STAT_COLLISIONS: 1969 *val = dmfep->tx_stats_collisions; 1970 break; 1971 1972 case MAC_STAT_OERRORS: 1973 *val = dmfep->tx_stats_oerrors; 1974 break; 1975 1976 case MAC_STAT_OPACKETS: 1977 *val = dmfep->tx_stats_opackets; 1978 break; 1979 1980 case MAC_STAT_MULTIXMT: 1981 *val = dmfep->tx_stats_multi; 1982 break; 1983 1984 case MAC_STAT_BRDCSTXMT: 1985 *val = dmfep->tx_stats_bcast; 1986 break; 1987 1988 case MAC_STAT_OBYTES: 1989 *val = dmfep->tx_stats_obytes; 1990 break; 1991 1992 case MAC_STAT_OVERFLOWS: 1993 *val = dmfep->rx_stats_overflow; 1994 break; 1995 1996 case MAC_STAT_UNDERFLOWS: 1997 *val = dmfep->tx_stats_underflow; 1998 break; 1999 2000 case ETHER_STAT_ALIGN_ERRORS: 2001 *val = dmfep->rx_stats_align; 2002 break; 2003 2004 case ETHER_STAT_FCS_ERRORS: 2005 *val = dmfep->rx_stats_fcs; 2006 break; 2007 2008 case ETHER_STAT_TOOLONG_ERRORS: 2009 *val = dmfep->rx_stats_toolong; 2010 break; 2011 2012 case ETHER_STAT_TOOSHORT_ERRORS: 2013 *val = dmfep->rx_stats_short; 2014 break; 2015 2016 case ETHER_STAT_MACRCV_ERRORS: 2017 *val = dmfep->rx_stats_macrcv_errors; 2018 break; 2019 2020 case ETHER_STAT_MACXMT_ERRORS: 2021 *val = dmfep->tx_stats_macxmt_errors; 2022 break; 2023 2024 case ETHER_STAT_JABBER_ERRORS: 2025 *val = dmfep->tx_stats_jabber; 2026 break; 2027 2028 case ETHER_STAT_CARRIER_ERRORS: 2029 *val = dmfep->tx_stats_nocarrier; 2030 break; 2031 2032 case ETHER_STAT_TX_LATE_COLLISIONS: 2033 *val = dmfep->tx_stats_xmtlatecoll; 2034 break; 2035 2036 case ETHER_STAT_EX_COLLISIONS: 2037 *val = dmfep->tx_stats_excoll; 2038 break; 2039 2040 case ETHER_STAT_DEFER_XMTS: 2041 *val = dmfep->tx_stats_defer; 2042 break; 2043 2044 case ETHER_STAT_FIRST_COLLISIONS: 2045 *val = dmfep->tx_stats_first_coll; 2046 break; 2047 2048 case ETHER_STAT_MULTI_COLLISIONS: 2049 *val = dmfep->tx_stats_multi_coll; 2050 break; 2051 2052 default: 2053 rv = ENOTSUP; 2054 } 2055 2056 mutex_exit(dmfep->txlock); 2057 mutex_exit(dmfep->rxlock); 2058 mutex_exit(dmfep->oplock); 2059 2060 return (rv); 2061 } 2062 2063 /* 2064 * ========== Ioctl handler & subfunctions ========== 2065 */ 2066 2067 static lb_property_t dmfe_loopmodes[] = { 2068 { normal, "normal", 0 }, 2069 { internal, "Internal", 1 }, 2070 { external, "External", 2 }, 2071 }; 2072 2073 /* 2074 * Specific dmfe IOCTLs, the mac module handles the generic ones. 2075 * Unfortunately, the DM9102 doesn't seem to work well with MII based 2076 * loopback, so we have to do something special for it. 2077 */ 2078 2079 static void 2080 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2081 { 2082 dmfe_t *dmfep = arg; 2083 struct iocblk *iocp; 2084 int rv = 0; 2085 lb_info_sz_t sz; 2086 int cmd; 2087 uint32_t mode; 2088 2089 iocp = (void *)mp->b_rptr; 2090 cmd = iocp->ioc_cmd; 2091 2092 if (mp->b_cont == NULL) { 2093 /* 2094 * All of these ioctls need data! 2095 */ 2096 miocnak(wq, mp, 0, EINVAL); 2097 return; 2098 } 2099 2100 switch (cmd) { 2101 case LB_GET_INFO_SIZE: 2102 if (iocp->ioc_count != sizeof (sz)) { 2103 rv = EINVAL; 2104 } else { 2105 sz = sizeof (dmfe_loopmodes); 2106 bcopy(&sz, mp->b_cont->b_rptr, sizeof (sz)); 2107 } 2108 break; 2109 2110 case LB_GET_INFO: 2111 if (iocp->ioc_count != sizeof (dmfe_loopmodes)) { 2112 rv = EINVAL; 2113 } else { 2114 bcopy(dmfe_loopmodes, mp->b_cont->b_rptr, 2115 iocp->ioc_count); 2116 } 2117 break; 2118 2119 case LB_GET_MODE: 2120 if (iocp->ioc_count != sizeof (mode)) { 2121 rv = EINVAL; 2122 } else { 2123 mutex_enter(dmfep->oplock); 2124 switch (dmfep->opmode & LOOPBACK_MODE_MASK) { 2125 case LOOPBACK_OFF: 2126 mode = 0; 2127 break; 2128 case LOOPBACK_INTERNAL: 2129 mode = 1; 2130 break; 2131 default: 2132 mode = 2; 2133 break; 2134 } 2135 mutex_exit(dmfep->oplock); 2136 bcopy(&mode, mp->b_cont->b_rptr, sizeof (mode)); 2137 } 2138 break; 2139 2140 case LB_SET_MODE: 2141 rv = secpolicy_net_config(iocp->ioc_cr, B_FALSE); 2142 if (rv != 0) 2143 break; 2144 if (iocp->ioc_count != sizeof (mode)) { 2145 rv = EINVAL; 2146 break; 2147 } 2148 bcopy(mp->b_cont->b_rptr, &mode, sizeof (mode)); 2149 2150 mutex_enter(dmfep->oplock); 2151 dmfep->opmode &= ~LOOPBACK_MODE_MASK; 2152 switch (mode) { 2153 case 2: 2154 dmfep->opmode |= LOOPBACK_PHY_D; 2155 break; 2156 case 1: 2157 dmfep->opmode |= LOOPBACK_INTERNAL; 2158 break; 2159 default: 2160 break; 2161 } 2162 if (!dmfep->suspended) { 2163 dmfe_restart(dmfep); 2164 } 2165 mutex_exit(dmfep->oplock); 2166 break; 2167 2168 default: 2169 rv = EINVAL; 2170 break; 2171 } 2172 2173 if (rv == 0) { 2174 miocack(wq, mp, iocp->ioc_count, 0); 2175 } else { 2176 miocnak(wq, mp, 0, rv); 2177 } 2178 } 2179 2180 int 2181 dmfe_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags, 2182 uint_t sz, void *val, uint_t *perm) 2183 { 2184 dmfe_t *dmfep = arg; 2185 2186 return (mii_m_getprop(dmfep->mii, name, num, flags, sz, val, perm)); 2187 } 2188 2189 int 2190 dmfe_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz, 2191 const void *val) 2192 { 2193 dmfe_t *dmfep = arg; 2194 2195 return (mii_m_setprop(dmfep->mii, name, num, sz, val)); 2196 } 2197 2198 2199 /* 2200 * ========== Per-instance setup/teardown code ========== 2201 */ 2202 2203 /* 2204 * Determine local MAC address & broadcast address for this interface 2205 */ 2206 static void 2207 dmfe_find_mac_address(dmfe_t *dmfep) 2208 { 2209 uchar_t *prop; 2210 uint_t propsize; 2211 int err; 2212 2213 /* 2214 * We have to find the "vendor's factory-set address". This is 2215 * the value of the property "local-mac-address", as set by OBP 2216 * (or a .conf file!) 2217 * 2218 * If the property is not there, then we try to find the factory 2219 * mac address from the devices serial EEPROM. 2220 */ 2221 bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr)); 2222 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo, 2223 DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize); 2224 if (err == DDI_PROP_SUCCESS) { 2225 if (propsize == ETHERADDRL) 2226 ethaddr_copy(prop, dmfep->curr_addr); 2227 ddi_prop_free(prop); 2228 } else { 2229 /* no property set... check eeprom */ 2230 dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr, 2231 ETHERADDRL); 2232 } 2233 } 2234 2235 static int 2236 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize, 2237 size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p, 2238 uint_t dma_flags, dma_area_t *dma_p) 2239 { 2240 ddi_dma_cookie_t dma_cookie; 2241 uint_t ncookies; 2242 int err; 2243 2244 /* 2245 * Allocate handle 2246 */ 2247 err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr, 2248 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 2249 if (err != DDI_SUCCESS) { 2250 dmfe_error(dmfep, "DMA handle allocation failed"); 2251 return (DDI_FAILURE); 2252 } 2253 2254 /* 2255 * Allocate memory 2256 */ 2257 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop, 2258 attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 2259 DDI_DMA_SLEEP, NULL, 2260 &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl); 2261 if (err != DDI_SUCCESS) { 2262 dmfe_error(dmfep, "DMA memory allocation failed: %d", err); 2263 return (DDI_FAILURE); 2264 } 2265 2266 /* 2267 * Bind the two together 2268 */ 2269 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2270 dma_p->mem_va, dma_p->alength, dma_flags, 2271 DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies); 2272 if (err != DDI_DMA_MAPPED) { 2273 dmfe_error(dmfep, "DMA mapping failed: %d", err); 2274 return (DDI_FAILURE); 2275 } 2276 if ((dma_p->ncookies = ncookies) != 1) { 2277 dmfe_error(dmfep, "Too many DMA cookeis: %d", ncookies); 2278 return (DDI_FAILURE); 2279 } 2280 2281 dma_p->mem_dvma = dma_cookie.dmac_address; 2282 if (setup > 0) { 2283 dma_p->setup_dvma = dma_p->mem_dvma + memsize; 2284 dma_p->setup_va = dma_p->mem_va + memsize; 2285 } else { 2286 dma_p->setup_dvma = 0; 2287 dma_p->setup_va = NULL; 2288 } 2289 2290 return (DDI_SUCCESS); 2291 } 2292 2293 /* 2294 * This function allocates the transmit and receive buffers and descriptors. 2295 */ 2296 static int 2297 dmfe_alloc_bufs(dmfe_t *dmfep) 2298 { 2299 size_t memsize; 2300 int err; 2301 2302 /* 2303 * Allocate memory & handles for TX descriptor ring 2304 */ 2305 memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type); 2306 err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP, 2307 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2308 &dmfep->tx_desc); 2309 if (err != DDI_SUCCESS) { 2310 dmfe_error(dmfep, "TX descriptor allocation failed"); 2311 return (DDI_FAILURE); 2312 } 2313 2314 /* 2315 * Allocate memory & handles for TX buffers 2316 */ 2317 memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE; 2318 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 2319 &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE, 2320 &dmfep->tx_buff); 2321 if (err != DDI_SUCCESS) { 2322 dmfe_error(dmfep, "TX buffer allocation failed"); 2323 return (DDI_FAILURE); 2324 } 2325 2326 /* 2327 * Allocate memory & handles for RX descriptor ring 2328 */ 2329 memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type); 2330 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP, 2331 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2332 &dmfep->rx_desc); 2333 if (err != DDI_SUCCESS) { 2334 dmfe_error(dmfep, "RX descriptor allocation failed"); 2335 return (DDI_FAILURE); 2336 } 2337 2338 /* 2339 * Allocate memory & handles for RX buffers 2340 */ 2341 memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE; 2342 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 2343 &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff); 2344 if (err != DDI_SUCCESS) { 2345 dmfe_error(dmfep, "RX buffer allocation failed"); 2346 return (DDI_FAILURE); 2347 } 2348 2349 /* 2350 * Allocate bitmasks for tx packet type tracking 2351 */ 2352 dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 2353 dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 2354 2355 return (DDI_SUCCESS); 2356 } 2357 2358 static void 2359 dmfe_free_dma_mem(dma_area_t *dma_p) 2360 { 2361 if (dma_p->dma_hdl != NULL) { 2362 if (dma_p->ncookies) { 2363 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2364 dma_p->ncookies = 0; 2365 } 2366 ddi_dma_free_handle(&dma_p->dma_hdl); 2367 dma_p->dma_hdl = NULL; 2368 dma_p->mem_dvma = 0; 2369 dma_p->setup_dvma = 0; 2370 } 2371 2372 if (dma_p->acc_hdl != NULL) { 2373 ddi_dma_mem_free(&dma_p->acc_hdl); 2374 dma_p->acc_hdl = NULL; 2375 dma_p->mem_va = NULL; 2376 dma_p->setup_va = NULL; 2377 } 2378 } 2379 2380 /* 2381 * This routine frees the transmit and receive buffers and descriptors. 2382 * Make sure the chip is stopped before calling it! 2383 */ 2384 static void 2385 dmfe_free_bufs(dmfe_t *dmfep) 2386 { 2387 dmfe_free_dma_mem(&dmfep->rx_buff); 2388 dmfe_free_dma_mem(&dmfep->rx_desc); 2389 dmfe_free_dma_mem(&dmfep->tx_buff); 2390 dmfe_free_dma_mem(&dmfep->tx_desc); 2391 if (dmfep->tx_mcast) 2392 kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY); 2393 if (dmfep->tx_bcast) 2394 kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY); 2395 } 2396 2397 static void 2398 dmfe_unattach(dmfe_t *dmfep) 2399 { 2400 /* 2401 * Clean up and free all DMFE data structures 2402 */ 2403 if (dmfep->cycid != NULL) { 2404 ddi_periodic_delete(dmfep->cycid); 2405 dmfep->cycid = NULL; 2406 } 2407 2408 if (dmfep->ksp_drv != NULL) 2409 kstat_delete(dmfep->ksp_drv); 2410 if (dmfep->progress & PROGRESS_HWINT) { 2411 ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk); 2412 } 2413 if (dmfep->progress & PROGRESS_SOFTINT) 2414 ddi_remove_softintr(dmfep->factotum_id); 2415 if (dmfep->mii != NULL) 2416 mii_free(dmfep->mii); 2417 if (dmfep->progress & PROGRESS_MUTEX) { 2418 mutex_destroy(dmfep->txlock); 2419 mutex_destroy(dmfep->rxlock); 2420 mutex_destroy(dmfep->oplock); 2421 } 2422 dmfe_free_bufs(dmfep); 2423 if (dmfep->io_handle != NULL) 2424 ddi_regs_map_free(&dmfep->io_handle); 2425 2426 kmem_free(dmfep, sizeof (*dmfep)); 2427 } 2428 2429 static int 2430 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp) 2431 { 2432 ddi_acc_handle_t handle; 2433 uint32_t regval; 2434 2435 if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS) 2436 return (DDI_FAILURE); 2437 2438 /* 2439 * Get vendor/device/revision. We expect (but don't check) that 2440 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102) 2441 */ 2442 idp->vendor = pci_config_get16(handle, PCI_CONF_VENID); 2443 idp->device = pci_config_get16(handle, PCI_CONF_DEVID); 2444 idp->revision = pci_config_get8(handle, PCI_CONF_REVID); 2445 2446 /* 2447 * Turn on Bus Master Enable bit and ensure the device is not asleep 2448 */ 2449 regval = pci_config_get32(handle, PCI_CONF_COMM); 2450 pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME)); 2451 2452 regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD); 2453 pci_config_put32(handle, PCI_DMFE_CONF_CFDD, 2454 regval & ~(CFDD_SLEEP | CFDD_SNOOZE)); 2455 2456 pci_config_teardown(&handle); 2457 return (DDI_SUCCESS); 2458 } 2459 2460 struct ks_index { 2461 int index; 2462 char *name; 2463 }; 2464 2465 static const struct ks_index ks_drv_names[] = { 2466 { KS_INTERRUPT, "intr" }, 2467 { KS_CYCLIC_RUN, "cyclic_run" }, 2468 2469 { KS_TX_STALL, "tx_stall_detect" }, 2470 { KS_CHIP_ERROR, "chip_error_interrupt" }, 2471 2472 { KS_FACTOTUM_RUN, "factotum_run" }, 2473 { KS_RECOVERY, "factotum_recover" }, 2474 2475 { -1, NULL } 2476 }; 2477 2478 static void 2479 dmfe_init_kstats(dmfe_t *dmfep, int instance) 2480 { 2481 kstat_t *ksp; 2482 kstat_named_t *knp; 2483 const struct ks_index *ksip; 2484 2485 /* no need to create MII stats, the mac module already does it */ 2486 2487 /* Create and initialise driver-defined kstats */ 2488 ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net", 2489 KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT); 2490 if (ksp != NULL) { 2491 for (knp = ksp->ks_data, ksip = ks_drv_names; 2492 ksip->name != NULL; ++ksip) { 2493 kstat_named_init(&knp[ksip->index], ksip->name, 2494 KSTAT_DATA_UINT64); 2495 } 2496 dmfep->ksp_drv = ksp; 2497 dmfep->knp_drv = knp; 2498 kstat_install(ksp); 2499 } else { 2500 dmfe_error(dmfep, "kstat_create() for dmfe_events failed"); 2501 } 2502 } 2503 2504 static int 2505 dmfe_resume(dev_info_t *devinfo) 2506 { 2507 dmfe_t *dmfep; /* Our private data */ 2508 chip_id_t chipid; 2509 boolean_t restart = B_FALSE; 2510 2511 dmfep = ddi_get_driver_private(devinfo); 2512 if (dmfep == NULL) 2513 return (DDI_FAILURE); 2514 2515 /* 2516 * Refuse to resume if the data structures aren't consistent 2517 */ 2518 if (dmfep->devinfo != devinfo) 2519 return (DDI_FAILURE); 2520 2521 /* 2522 * Refuse to resume if the chip's changed its identity (*boggle*) 2523 */ 2524 if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS) 2525 return (DDI_FAILURE); 2526 if (chipid.vendor != dmfep->chipid.vendor) 2527 return (DDI_FAILURE); 2528 if (chipid.device != dmfep->chipid.device) 2529 return (DDI_FAILURE); 2530 if (chipid.revision != dmfep->chipid.revision) 2531 return (DDI_FAILURE); 2532 2533 mutex_enter(dmfep->oplock); 2534 mutex_enter(dmfep->txlock); 2535 dmfep->suspended = B_FALSE; 2536 mutex_exit(dmfep->txlock); 2537 2538 /* 2539 * All OK, reinitialise h/w & kick off MAC scheduling 2540 */ 2541 if (dmfep->mac_state == DMFE_MAC_STARTED) { 2542 dmfe_restart(dmfep); 2543 restart = B_TRUE; 2544 } 2545 mutex_exit(dmfep->oplock); 2546 2547 if (restart) { 2548 mii_resume(dmfep->mii); 2549 mac_tx_update(dmfep->mh); 2550 } 2551 return (DDI_SUCCESS); 2552 } 2553 2554 /* 2555 * attach(9E) -- Attach a device to the system 2556 * 2557 * Called once for each board successfully probed. 2558 */ 2559 static int 2560 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2561 { 2562 mac_register_t *macp; 2563 dmfe_t *dmfep; /* Our private data */ 2564 uint32_t csr6; 2565 int instance; 2566 int err; 2567 2568 instance = ddi_get_instance(devinfo); 2569 2570 switch (cmd) { 2571 default: 2572 return (DDI_FAILURE); 2573 2574 case DDI_RESUME: 2575 return (dmfe_resume(devinfo)); 2576 2577 case DDI_ATTACH: 2578 break; 2579 } 2580 2581 dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP); 2582 ddi_set_driver_private(devinfo, dmfep); 2583 dmfep->devinfo = devinfo; 2584 dmfep->dmfe_guard = DMFE_GUARD; 2585 2586 /* 2587 * Initialize more fields in DMFE private data 2588 * Determine the local MAC address 2589 */ 2590 #if DMFEDEBUG 2591 dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0, 2592 debug_propname, dmfe_debug); 2593 #endif /* DMFEDEBUG */ 2594 dmfep->cycid = NULL; 2595 (void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d", 2596 instance); 2597 2598 /* 2599 * Check for custom "opmode-reg-value" property; 2600 * if none, use the defaults below for CSR6 ... 2601 */ 2602 csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1; 2603 dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 2604 DDI_PROP_DONTPASS, opmode_propname, csr6); 2605 2606 /* 2607 * Read chip ID & set up config space command register(s) 2608 */ 2609 if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) { 2610 dmfe_error(dmfep, "dmfe_config_init() failed"); 2611 goto attach_fail; 2612 } 2613 2614 /* 2615 * Map operating registers 2616 */ 2617 err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER, 2618 &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle); 2619 if (err != DDI_SUCCESS) { 2620 dmfe_error(dmfep, "ddi_regs_map_setup() failed"); 2621 goto attach_fail; 2622 } 2623 2624 /* 2625 * Get our MAC address. 2626 */ 2627 dmfe_find_mac_address(dmfep); 2628 2629 /* 2630 * Allocate the TX and RX descriptors/buffers. 2631 */ 2632 dmfep->tx.n_desc = dmfe_tx_desc; 2633 dmfep->rx.n_desc = dmfe_rx_desc; 2634 err = dmfe_alloc_bufs(dmfep); 2635 if (err != DDI_SUCCESS) { 2636 goto attach_fail; 2637 } 2638 2639 /* 2640 * Add the softint handler 2641 */ 2642 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id, 2643 NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) { 2644 dmfe_error(dmfep, "ddi_add_softintr() failed"); 2645 goto attach_fail; 2646 } 2647 dmfep->progress |= PROGRESS_SOFTINT; 2648 2649 /* 2650 * Add the h/w interrupt handler & initialise mutexen 2651 */ 2652 if (ddi_get_iblock_cookie(devinfo, 0, &dmfep->iblk) != DDI_SUCCESS) { 2653 dmfe_error(dmfep, "ddi_get_iblock_cookie() failed"); 2654 goto attach_fail; 2655 } 2656 2657 mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL); 2658 mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk); 2659 mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk); 2660 mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk); 2661 dmfep->progress |= PROGRESS_MUTEX; 2662 2663 if (ddi_add_intr(devinfo, 0, NULL, NULL, 2664 dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) { 2665 dmfe_error(dmfep, "ddi_add_intr() failed"); 2666 goto attach_fail; 2667 } 2668 dmfep->progress |= PROGRESS_HWINT; 2669 2670 /* 2671 * Create & initialise named kstats 2672 */ 2673 dmfe_init_kstats(dmfep, instance); 2674 2675 /* 2676 * Reset & initialise the chip and the ring buffers 2677 * Initialise the (internal) PHY 2678 */ 2679 mutex_enter(dmfep->oplock); 2680 mutex_enter(dmfep->rxlock); 2681 mutex_enter(dmfep->txlock); 2682 2683 dmfe_reset(dmfep); 2684 2685 /* 2686 * Prepare the setup packet 2687 */ 2688 bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE); 2689 bzero(dmfep->mcast_refs, MCASTBUF_SIZE); 2690 dmfep->addr_set = B_FALSE; 2691 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 2692 dmfep->mac_state = DMFE_MAC_RESET; 2693 2694 mutex_exit(dmfep->txlock); 2695 mutex_exit(dmfep->rxlock); 2696 mutex_exit(dmfep->oplock); 2697 2698 if (dmfe_init_phy(dmfep) != B_TRUE) 2699 goto attach_fail; 2700 2701 /* 2702 * Send a reasonable setup frame. This configures our starting 2703 * address and the broadcast address. 2704 */ 2705 (void) dmfe_m_unicst(dmfep, dmfep->curr_addr); 2706 2707 /* 2708 * Initialize pointers to device specific functions which 2709 * will be used by the generic layer. 2710 */ 2711 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 2712 goto attach_fail; 2713 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 2714 macp->m_driver = dmfep; 2715 macp->m_dip = devinfo; 2716 macp->m_src_addr = dmfep->curr_addr; 2717 macp->m_callbacks = &dmfe_m_callbacks; 2718 macp->m_min_sdu = 0; 2719 macp->m_max_sdu = ETHERMTU; 2720 macp->m_margin = VLAN_TAGSZ; 2721 2722 /* 2723 * Finally, we're ready to register ourselves with the MAC layer 2724 * interface; if this succeeds, we're all ready to start() 2725 */ 2726 err = mac_register(macp, &dmfep->mh); 2727 mac_free(macp); 2728 if (err != 0) 2729 goto attach_fail; 2730 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 2731 2732 /* 2733 * Install the cyclic callback that we use to check for link 2734 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic()) 2735 * is invoked in kernel context then. 2736 */ 2737 ASSERT(dmfep->cycid == NULL); 2738 dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep, 2739 dmfe_tick_us * 1000, DDI_IPL_0); 2740 return (DDI_SUCCESS); 2741 2742 attach_fail: 2743 dmfe_unattach(dmfep); 2744 return (DDI_FAILURE); 2745 } 2746 2747 /* 2748 * dmfe_suspend() -- suspend transmit/receive for powerdown 2749 */ 2750 static int 2751 dmfe_suspend(dmfe_t *dmfep) 2752 { 2753 /* 2754 * Just stop processing ... 2755 */ 2756 mii_suspend(dmfep->mii); 2757 mutex_enter(dmfep->oplock); 2758 dmfe_stop(dmfep); 2759 2760 mutex_enter(dmfep->txlock); 2761 dmfep->suspended = B_TRUE; 2762 mutex_exit(dmfep->txlock); 2763 mutex_exit(dmfep->oplock); 2764 2765 return (DDI_SUCCESS); 2766 } 2767 2768 /* 2769 * detach(9E) -- Detach a device from the system 2770 */ 2771 static int 2772 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2773 { 2774 dmfe_t *dmfep; 2775 2776 dmfep = ddi_get_driver_private(devinfo); 2777 2778 switch (cmd) { 2779 default: 2780 return (DDI_FAILURE); 2781 2782 case DDI_SUSPEND: 2783 return (dmfe_suspend(dmfep)); 2784 2785 case DDI_DETACH: 2786 break; 2787 } 2788 2789 /* 2790 * Unregister from the MAC subsystem. This can fail, in 2791 * particular if there are DLPI style-2 streams still open - 2792 * in which case we just return failure without shutting 2793 * down chip operations. 2794 */ 2795 if (mac_unregister(dmfep->mh) != DDI_SUCCESS) 2796 return (DDI_FAILURE); 2797 2798 /* 2799 * All activity stopped, so we can clean up & exit 2800 */ 2801 dmfe_unattach(dmfep); 2802 return (DDI_SUCCESS); 2803 } 2804 2805 2806 /* 2807 * ========== Module Loading Data & Entry Points ========== 2808 */ 2809 2810 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach, 2811 nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported); 2812 2813 static struct modldrv dmfe_modldrv = { 2814 &mod_driverops, /* Type of module. This one is a driver */ 2815 dmfe_ident, /* short description */ 2816 &dmfe_dev_ops /* driver specific ops */ 2817 }; 2818 2819 static struct modlinkage modlinkage = { 2820 MODREV_1, (void *)&dmfe_modldrv, NULL 2821 }; 2822 2823 int 2824 _info(struct modinfo *modinfop) 2825 { 2826 return (mod_info(&modlinkage, modinfop)); 2827 } 2828 2829 int 2830 _init(void) 2831 { 2832 uint32_t tmp100; 2833 uint32_t tmp10; 2834 int i; 2835 int status; 2836 2837 /* Calculate global timing parameters */ 2838 tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 2839 tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 2840 2841 for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) { 2842 switch (i) { 2843 case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA): 2844 case TX_PROCESS_STATE(TX_PROCESS_WAIT_END): 2845 /* 2846 * The chip doesn't spontaneously recover from 2847 * a stall in these states, so we reset early 2848 */ 2849 stall_100_tix[i] = tmp100; 2850 stall_10_tix[i] = tmp10; 2851 break; 2852 2853 case TX_PROCESS_STATE(TX_PROCESS_SUSPEND): 2854 default: 2855 /* 2856 * The chip has been seen to spontaneously recover 2857 * after an apparent stall in the SUSPEND state, 2858 * so we'll allow it rather longer to do so. As 2859 * stalls in other states have not been observed, 2860 * we'll use long timeouts for them too ... 2861 */ 2862 stall_100_tix[i] = tmp100 * 20; 2863 stall_10_tix[i] = tmp10 * 20; 2864 break; 2865 } 2866 } 2867 2868 mac_init_ops(&dmfe_dev_ops, "dmfe"); 2869 status = mod_install(&modlinkage); 2870 if (status == DDI_SUCCESS) 2871 dmfe_log_init(); 2872 2873 return (status); 2874 } 2875 2876 int 2877 _fini(void) 2878 { 2879 int status; 2880 2881 status = mod_remove(&modlinkage); 2882 if (status == DDI_SUCCESS) { 2883 mac_fini_ops(&dmfe_dev_ops); 2884 dmfe_log_fini(); 2885 } 2886 2887 return (status); 2888 } 2889