1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/sunddi.h> 30 #include "dmfe_impl.h" 31 32 /* 33 * This is the string displayed by modinfo, etc. 34 */ 35 static char dmfe_ident[] = "Davicom DM9102 Ethernet"; 36 37 38 /* 39 * NOTES: 40 * 41 * #defines: 42 * 43 * DMFE_PCI_RNUMBER is the register-set number to use for the operating 44 * registers. On an OBP-based machine, regset 0 refers to CONFIG space, 45 * regset 1 will be the operating registers in I/O space, and regset 2 46 * will be the operating registers in MEMORY space (preferred). If an 47 * expansion ROM is fitted, it may appear as a further register set. 48 * 49 * DMFE_SLOP defines the amount by which the chip may read beyond 50 * the end of a buffer or descriptor, apparently 6-8 dwords :( 51 * We have to make sure this doesn't cause it to access unallocated 52 * or unmapped memory. 53 * 54 * DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP) 55 * rounded up to a multiple of 4. Here we choose a power of two for 56 * speed & simplicity at the cost of a bit more memory. 57 * 58 * However, the buffer length field in the TX/RX descriptors is only 59 * eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes 60 * per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1 61 * (2000) bytes each. 62 * 63 * DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for 64 * the data buffers. The descriptors are always set up in CONSISTENT 65 * mode. 66 * 67 * DMFE_HEADROOM defines how much space we'll leave in allocated 68 * mblks before the first valid data byte. This should be chosen 69 * to be 2 modulo 4, so that once the ethernet header (14 bytes) 70 * has been stripped off, the packet data will be 4-byte aligned. 71 * The remaining space can be used by upstream modules to prepend 72 * any headers required. 73 * 74 * Patchable globals: 75 * 76 * dmfe_bus_modes: the bus mode bits to be put into CSR0. 77 * Setting READ_MULTIPLE in this register seems to cause 78 * the chip to generate a READ LINE command with a parity 79 * error! Don't do it! 80 * 81 * dmfe_setup_desc1: the value to be put into descriptor word 1 82 * when sending a SETUP packet. 83 * 84 * Setting TX_LAST_DESC in desc1 in a setup packet seems 85 * to make the chip spontaneously reset internally - it 86 * attempts to give back the setup packet descriptor by 87 * writing to PCI address 00000000 - which may or may not 88 * get a MASTER ABORT - after which most of its registers 89 * seem to have either default values or garbage! 90 * 91 * TX_FIRST_DESC doesn't seem to have the same effect but 92 * it isn't needed on a setup packet so we'll leave it out 93 * too, just in case it has some other wierd side-effect. 94 * 95 * The default hardware packet filtering mode is now 96 * HASH_AND_PERFECT (imperfect filtering of multicast 97 * packets and perfect filtering of unicast packets). 98 * If this is found not to work reliably, setting the 99 * TX_FILTER_TYPE1 bit will cause a switchover to using 100 * HASH_ONLY mode (imperfect filtering of *all* packets). 101 * Software will then perform the additional filtering 102 * as required. 103 */ 104 105 #define DMFE_PCI_RNUMBER 2 106 #define DMFE_SLOP (8*sizeof (uint32_t)) 107 #define DMFE_BUF_SIZE 2048 108 #define DMFE_BUF_SIZE_1 2000 109 #define DMFE_DMA_MODE DDI_DMA_STREAMING 110 #define DMFE_HEADROOM 34 111 112 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN; 113 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE | 114 TX_FILTER_TYPE0; 115 116 /* 117 * Some tunable parameters ... 118 * Number of RX/TX ring entries (128/128) 119 * Minimum number of TX ring slots to keep free (1) 120 * Low-water mark at which to try to reclaim TX ring slots (1) 121 * How often to take a TX-done interrupt (twice per ring cycle) 122 * Whether to reclaim TX ring entries on a TX-done interrupt (no) 123 */ 124 125 #define DMFE_TX_DESC 128 /* Should be a multiple of 4 <= 256 */ 126 #define DMFE_RX_DESC 128 /* Should be a multiple of 4 <= 256 */ 127 128 static uint32_t dmfe_rx_desc = DMFE_RX_DESC; 129 static uint32_t dmfe_tx_desc = DMFE_TX_DESC; 130 static uint32_t dmfe_tx_min_free = 1; 131 static uint32_t dmfe_tx_reclaim_level = 1; 132 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1; 133 static boolean_t dmfe_reclaim_on_done = B_FALSE; 134 135 /* 136 * Time-related parameters: 137 * 138 * We use a cyclic to provide a periodic callback; this is then used 139 * to check for TX-stall and poll the link status register. 140 * 141 * DMFE_TICK is the interval between cyclic callbacks, in microseconds. 142 * 143 * TX_STALL_TIME_100 is the timeout in microseconds between passing 144 * a packet to the chip for transmission and seeing that it's gone, 145 * when running at 100Mb/s. If we haven't reclaimed at least one 146 * descriptor in this time we assume the transmitter has stalled 147 * and reset the chip. 148 * 149 * TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s. 150 * 151 * LINK_POLL_TIME is the interval between checks on the link state 152 * when nothing appears to have happened (this is in addition to the 153 * case where we think we've detected a link change, and serves as a 154 * backup in case the quick link check doesn't work properly). 155 * 156 * Patchable globals: 157 * 158 * dmfe_tick_us: DMFE_TICK 159 * dmfe_tx100_stall_us: TX_STALL_TIME_100 160 * dmfe_tx10_stall_us: TX_STALL_TIME_10 161 * dmfe_link_poll_us: LINK_POLL_TIME 162 * 163 * These are then used in _init() to calculate: 164 * 165 * stall_100_tix[]: number of consecutive cyclic callbacks without a 166 * reclaim before the TX process is considered stalled, 167 * when running at 100Mb/s. The elements are indexed 168 * by transmit-engine-state. 169 * stall_10_tix[]: number of consecutive cyclic callbacks without a 170 * reclaim before the TX process is considered stalled, 171 * when running at 10Mb/s. The elements are indexed 172 * by transmit-engine-state. 173 * factotum_tix: number of consecutive cyclic callbacks before waking 174 * up the factotum even though there doesn't appear to 175 * be anything for it to do 176 */ 177 178 #define DMFE_TICK 25000 /* microseconds */ 179 #define TX_STALL_TIME_100 50000 /* microseconds */ 180 #define TX_STALL_TIME_10 200000 /* microseconds */ 181 #define LINK_POLL_TIME 5000000 /* microseconds */ 182 183 static uint32_t dmfe_tick_us = DMFE_TICK; 184 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100; 185 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10; 186 static uint32_t dmfe_link_poll_us = LINK_POLL_TIME; 187 188 /* 189 * Calculated from above in _init() 190 */ 191 192 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1]; 193 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1]; 194 static uint32_t factotum_tix; 195 static uint32_t factotum_fast_tix; 196 static uint32_t factotum_start_tix; 197 198 /* 199 * Property names 200 */ 201 static char localmac_propname[] = "local-mac-address"; 202 static char opmode_propname[] = "opmode-reg-value"; 203 static char debug_propname[] = "dmfe-debug-flags"; 204 205 static int dmfe_m_start(void *); 206 static void dmfe_m_stop(void *); 207 static int dmfe_m_promisc(void *, boolean_t); 208 static int dmfe_m_multicst(void *, boolean_t, const uint8_t *); 209 static int dmfe_m_unicst(void *, const uint8_t *); 210 static void dmfe_m_ioctl(void *, queue_t *, mblk_t *); 211 static boolean_t dmfe_m_getcapab(void *, mac_capab_t, void *); 212 static mblk_t *dmfe_m_tx(void *, mblk_t *); 213 static int dmfe_m_stat(void *, uint_t, uint64_t *); 214 215 static mac_callbacks_t dmfe_m_callbacks = { 216 (MC_IOCTL | MC_GETCAPAB), 217 dmfe_m_stat, 218 dmfe_m_start, 219 dmfe_m_stop, 220 dmfe_m_promisc, 221 dmfe_m_multicst, 222 dmfe_m_unicst, 223 dmfe_m_tx, 224 NULL, 225 dmfe_m_ioctl, 226 dmfe_m_getcapab, 227 }; 228 229 230 /* 231 * Describes the chip's DMA engine 232 */ 233 static ddi_dma_attr_t dma_attr = { 234 DMA_ATTR_V0, /* dma_attr version */ 235 0, /* dma_attr_addr_lo */ 236 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */ 237 0x0FFFFFF, /* dma_attr_count_max */ 238 0x20, /* dma_attr_align */ 239 0x7F, /* dma_attr_burstsizes */ 240 1, /* dma_attr_minxfer */ 241 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */ 242 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */ 243 1, /* dma_attr_sgllen */ 244 1, /* dma_attr_granular */ 245 0 /* dma_attr_flags */ 246 }; 247 248 /* 249 * DMA access attributes for registers and descriptors 250 */ 251 static ddi_device_acc_attr_t dmfe_reg_accattr = { 252 DDI_DEVICE_ATTR_V0, 253 DDI_STRUCTURE_LE_ACC, 254 DDI_STRICTORDER_ACC 255 }; 256 257 /* 258 * DMA access attributes for data: NOT to be byte swapped. 259 */ 260 static ddi_device_acc_attr_t dmfe_data_accattr = { 261 DDI_DEVICE_ATTR_V0, 262 DDI_NEVERSWAP_ACC, 263 DDI_STRICTORDER_ACC 264 }; 265 266 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = { 267 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 268 }; 269 270 271 /* 272 * ========== Lowest-level chip register & ring access routines ========== 273 */ 274 275 /* 276 * I/O register get/put routines 277 */ 278 uint32_t 279 dmfe_chip_get32(dmfe_t *dmfep, off_t offset) 280 { 281 uint32_t *addr; 282 283 addr = (void *)(dmfep->io_reg + offset); 284 return (ddi_get32(dmfep->io_handle, addr)); 285 } 286 287 void 288 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value) 289 { 290 uint32_t *addr; 291 292 addr = (void *)(dmfep->io_reg + offset); 293 ddi_put32(dmfep->io_handle, addr, value); 294 } 295 296 /* 297 * TX/RX ring get/put routines 298 */ 299 static uint32_t 300 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset) 301 { 302 uint32_t *addr; 303 304 addr = (void *)dma_p->mem_va; 305 return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset)); 306 } 307 308 static void 309 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value) 310 { 311 uint32_t *addr; 312 313 addr = (void *)dma_p->mem_va; 314 ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value); 315 } 316 317 /* 318 * Setup buffer get/put routines 319 */ 320 static uint32_t 321 dmfe_setup_get32(dma_area_t *dma_p, uint_t index) 322 { 323 uint32_t *addr; 324 325 addr = (void *)dma_p->setup_va; 326 return (ddi_get32(dma_p->acc_hdl, addr + index)); 327 } 328 329 static void 330 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value) 331 { 332 uint32_t *addr; 333 334 addr = (void *)dma_p->setup_va; 335 ddi_put32(dma_p->acc_hdl, addr + index, value); 336 } 337 338 339 /* 340 * ========== Low-level chip & ring buffer manipulation ========== 341 */ 342 343 #define DMFE_DBG DMFE_DBG_REGS /* debug flag for this code */ 344 345 /* 346 * dmfe_set_opmode() -- function to set operating mode 347 */ 348 static void 349 dmfe_set_opmode(dmfe_t *dmfep) 350 { 351 DMFE_DEBUG(("dmfe_set_opmode: opmode 0x%x", dmfep->opmode)); 352 353 ASSERT(mutex_owned(dmfep->oplock)); 354 355 dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode); 356 drv_usecwait(10); 357 } 358 359 /* 360 * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w 361 */ 362 static void 363 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate) 364 { 365 ASSERT(mutex_owned(dmfep->oplock)); 366 367 /* 368 * Stop the chip: 369 * disable all interrupts 370 * stop TX/RX processes 371 * clear the status bits for TX/RX stopped 372 * If required, reset the chip 373 * Record the new state 374 */ 375 dmfe_chip_put32(dmfep, INT_MASK_REG, 0); 376 dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE); 377 dmfe_set_opmode(dmfep); 378 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 379 380 switch (newstate) { 381 default: 382 ASSERT(!"can't get here"); 383 return; 384 385 case CHIP_STOPPED: 386 case CHIP_ERROR: 387 break; 388 389 case CHIP_RESET: 390 dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET); 391 drv_usecwait(10); 392 dmfe_chip_put32(dmfep, BUS_MODE_REG, 0); 393 drv_usecwait(10); 394 dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes); 395 break; 396 } 397 398 dmfep->chip_state = newstate; 399 } 400 401 /* 402 * Initialize transmit and receive descriptor rings, and 403 * set the chip to point to the first entry in each ring 404 */ 405 static void 406 dmfe_init_rings(dmfe_t *dmfep) 407 { 408 dma_area_t *descp; 409 uint32_t pstart; 410 uint32_t pnext; 411 uint32_t pbuff; 412 uint32_t desc1; 413 int i; 414 415 /* 416 * You need all the locks in order to rewrite the descriptor rings 417 */ 418 ASSERT(mutex_owned(dmfep->oplock)); 419 ASSERT(mutex_owned(dmfep->rxlock)); 420 ASSERT(mutex_owned(dmfep->txlock)); 421 422 /* 423 * Program the RX ring entries 424 */ 425 descp = &dmfep->rx_desc; 426 pstart = descp->mem_dvma; 427 pnext = pstart + sizeof (struct rx_desc_type); 428 pbuff = dmfep->rx_buff.mem_dvma; 429 desc1 = RX_CHAINING | DMFE_BUF_SIZE_1; 430 431 for (i = 0; i < dmfep->rx.n_desc; ++i) { 432 dmfe_ring_put32(descp, i, RD_NEXT, pnext); 433 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 434 dmfe_ring_put32(descp, i, DESC1, desc1); 435 dmfe_ring_put32(descp, i, DESC0, RX_OWN); 436 437 pnext += sizeof (struct rx_desc_type); 438 pbuff += DMFE_BUF_SIZE; 439 } 440 441 /* 442 * Fix up last entry & sync 443 */ 444 dmfe_ring_put32(descp, --i, RD_NEXT, pstart); 445 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 446 dmfep->rx.next_free = 0; 447 448 /* 449 * Set the base address of the RX descriptor list in CSR3 450 */ 451 DMFE_DEBUG(("RX descriptor VA: $%p (DVMA $%x)", 452 descp->mem_va, descp->mem_dvma)); 453 dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma); 454 455 /* 456 * Program the TX ring entries 457 */ 458 descp = &dmfep->tx_desc; 459 pstart = descp->mem_dvma; 460 pnext = pstart + sizeof (struct tx_desc_type); 461 pbuff = dmfep->tx_buff.mem_dvma; 462 desc1 = TX_CHAINING; 463 464 for (i = 0; i < dmfep->tx.n_desc; ++i) { 465 dmfe_ring_put32(descp, i, TD_NEXT, pnext); 466 dmfe_ring_put32(descp, i, BUFFER1, pbuff); 467 dmfe_ring_put32(descp, i, DESC1, desc1); 468 dmfe_ring_put32(descp, i, DESC0, 0); 469 470 pnext += sizeof (struct tx_desc_type); 471 pbuff += DMFE_BUF_SIZE; 472 } 473 474 /* 475 * Fix up last entry & sync 476 */ 477 dmfe_ring_put32(descp, --i, TD_NEXT, pstart); 478 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 479 dmfep->tx.n_free = dmfep->tx.n_desc; 480 dmfep->tx.next_free = dmfep->tx.next_busy = 0; 481 482 /* 483 * Set the base address of the TX descrptor list in CSR4 484 */ 485 DMFE_DEBUG(("TX descriptor VA: $%p (DVMA $%x)", 486 descp->mem_va, descp->mem_dvma)); 487 dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma); 488 } 489 490 /* 491 * dmfe_start_chip() -- start the chip transmitting and/or receiving 492 */ 493 static void 494 dmfe_start_chip(dmfe_t *dmfep, int mode) 495 { 496 ASSERT(mutex_owned(dmfep->oplock)); 497 498 dmfep->opmode |= mode; 499 dmfe_set_opmode(dmfep); 500 501 dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0); 502 /* 503 * Enable VLAN length mode (allows packets to be 4 bytes Longer). 504 */ 505 dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE); 506 507 /* 508 * Clear any pending process-stopped interrupts 509 */ 510 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT); 511 dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX : 512 mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED; 513 } 514 515 /* 516 * dmfe_enable_interrupts() -- enable our favourite set of interrupts. 517 * 518 * Normal interrupts: 519 * We always enable: 520 * RX_PKTDONE_INT (packet received) 521 * TX_PKTDONE_INT (TX complete) 522 * We never enable: 523 * TX_ALLDONE_INT (next TX buffer not ready) 524 * 525 * Abnormal interrupts: 526 * We always enable: 527 * RX_STOPPED_INT 528 * TX_STOPPED_INT 529 * SYSTEM_ERR_INT 530 * RX_UNAVAIL_INT 531 * We never enable: 532 * RX_EARLY_INT 533 * RX_WATCHDOG_INT 534 * TX_JABBER_INT 535 * TX_EARLY_INT 536 * TX_UNDERFLOW_INT 537 * GP_TIMER_INT (not valid in -9 chips) 538 * LINK_STATUS_INT (not valid in -9 chips) 539 */ 540 static void 541 dmfe_enable_interrupts(dmfe_t *dmfep) 542 { 543 ASSERT(mutex_owned(dmfep->oplock)); 544 545 /* 546 * Put 'the standard set of interrupts' in the interrupt mask register 547 */ 548 dmfep->imask = RX_PKTDONE_INT | TX_PKTDONE_INT | 549 RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT; 550 551 dmfe_chip_put32(dmfep, INT_MASK_REG, 552 NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask); 553 dmfep->chip_state = CHIP_RUNNING; 554 555 DMFE_DEBUG(("dmfe_enable_interrupts: imask 0x%x", dmfep->imask)); 556 } 557 558 #undef DMFE_DBG 559 560 561 /* 562 * ========== RX side routines ========== 563 */ 564 565 #define DMFE_DBG DMFE_DBG_RECV /* debug flag for this code */ 566 567 /* 568 * Function to update receive statistics on various errors 569 */ 570 static void 571 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0) 572 { 573 ASSERT(mutex_owned(dmfep->rxlock)); 574 575 /* 576 * The error summary bit and the error bits that it summarises 577 * are only valid if this is the last fragment. Therefore, a 578 * fragment only contributes to the error statistics if both 579 * the last-fragment and error summary bits are set. 580 */ 581 if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) { 582 dmfep->rx_stats_ierrors += 1; 583 584 /* 585 * There are some other error bits in the descriptor for 586 * which there don't seem to be appropriate MAC statistics, 587 * notably RX_COLLISION and perhaps RX_DESC_ERR. The 588 * latter may not be possible if it is supposed to indicate 589 * that one buffer has been filled with a partial packet 590 * and the next buffer required for the rest of the packet 591 * was not available, as all our buffers are more than large 592 * enough for a whole packet without fragmenting. 593 */ 594 595 if (desc0 & RX_OVERFLOW) { 596 dmfep->rx_stats_overflow += 1; 597 598 } else if (desc0 & RX_RUNT_FRAME) 599 dmfep->rx_stats_short += 1; 600 601 if (desc0 & RX_CRC) 602 dmfep->rx_stats_fcs += 1; 603 604 if (desc0 & RX_FRAME2LONG) 605 dmfep->rx_stats_toolong += 1; 606 } 607 608 /* 609 * A receive watchdog timeout is counted as a MAC-level receive 610 * error. Strangely, it doesn't set the packet error summary bit, 611 * according to the chip data sheet :-? 612 */ 613 if (desc0 & RX_RCV_WD_TO) 614 dmfep->rx_stats_macrcv_errors += 1; 615 616 if (desc0 & RX_DRIBBLING) 617 dmfep->rx_stats_align += 1; 618 619 if (desc0 & RX_MII_ERR) 620 dmfep->rx_stats_macrcv_errors += 1; 621 } 622 623 /* 624 * Receive incoming packet(s) and pass them up ... 625 */ 626 static mblk_t * 627 dmfe_getp(dmfe_t *dmfep) 628 { 629 dma_area_t *descp; 630 mblk_t **tail; 631 mblk_t *head; 632 mblk_t *mp; 633 char *rxb; 634 uchar_t *dp; 635 uint32_t desc0; 636 uint32_t misses; 637 int packet_length; 638 int index; 639 640 mutex_enter(dmfep->rxlock); 641 642 /* 643 * Update the missed frame statistic from the on-chip counter. 644 */ 645 misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG); 646 dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK); 647 648 /* 649 * sync (all) receive descriptors before inspecting them 650 */ 651 descp = &dmfep->rx_desc; 652 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 653 654 /* 655 * We should own at least one RX entry, since we've had a 656 * receive interrupt, but let's not be dogmatic about it. 657 */ 658 index = dmfep->rx.next_free; 659 desc0 = dmfe_ring_get32(descp, index, DESC0); 660 if (desc0 & RX_OWN) 661 DMFE_DEBUG(("dmfe_getp: no work, desc0 0x%x", desc0)); 662 663 for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) { 664 /* 665 * Maintain statistics for every descriptor returned 666 * to us by the chip ... 667 */ 668 DMFE_DEBUG(("dmfe_getp: desc0 0x%x", desc0)); 669 dmfe_update_rx_stats(dmfep, desc0); 670 671 /* 672 * Check that the entry has both "packet start" and 673 * "packet end" flags. We really shouldn't get packet 674 * fragments, 'cos all the RX buffers are bigger than 675 * the largest valid packet. So we'll just drop any 676 * fragments we find & skip on to the next entry. 677 */ 678 if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) { 679 DMFE_DEBUG(("dmfe_getp: dropping fragment")); 680 goto skip; 681 } 682 683 /* 684 * A whole packet in one buffer. We have to check error 685 * status and packet length before forwarding it upstream. 686 */ 687 if (desc0 & RX_ERR_SUMMARY) { 688 DMFE_DEBUG(("dmfe_getp: dropping errored packet")); 689 goto skip; 690 } 691 692 packet_length = (desc0 >> 16) & 0x3fff; 693 if (packet_length > DMFE_MAX_PKT_SIZE) { 694 DMFE_DEBUG(("dmfe_getp: dropping oversize packet, " 695 "length %d", packet_length)); 696 goto skip; 697 } else if (packet_length < ETHERMIN) { 698 /* 699 * Note that VLAN packet would be even larger, 700 * but we don't worry about dropping runt VLAN 701 * frames. 702 * 703 * This check is probably redundant, as well, 704 * since the hardware should drop RUNT frames. 705 */ 706 DMFE_DEBUG(("dmfe_getp: dropping undersize packet, " 707 "length %d", packet_length)); 708 goto skip; 709 } 710 711 /* 712 * Sync the data, so we can examine it; then check that 713 * the packet is really intended for us (remember that 714 * if we're using Imperfect Filtering, then the chip will 715 * receive unicast packets sent to stations whose addresses 716 * just happen to hash to the same value as our own; we 717 * discard these here so they don't get sent upstream ...) 718 */ 719 (void) ddi_dma_sync(dmfep->rx_buff.dma_hdl, 720 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, 721 DDI_DMA_SYNC_FORKERNEL); 722 rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE]; 723 724 725 /* 726 * We do not bother to check that the packet is really for 727 * us, we let the MAC framework make that check instead. 728 * This is especially important if we ever want to support 729 * multiple MAC addresses. 730 */ 731 732 /* 733 * Packet looks good; get a buffer to copy it into. We 734 * allow some space at the front of the allocated buffer 735 * (HEADROOM) in case any upstream modules want to prepend 736 * some sort of header. The value has been carefully chosen 737 * So that it also has the side-effect of making the packet 738 * *contents* 4-byte aligned, as required by NCA! 739 */ 740 mp = allocb(DMFE_HEADROOM + packet_length, 0); 741 if (mp == NULL) { 742 DMFE_DEBUG(("dmfe_getp: no buffer - dropping packet")); 743 dmfep->rx_stats_norcvbuf += 1; 744 goto skip; 745 } 746 747 /* 748 * Account for statistics of good packets. 749 */ 750 dmfep->rx_stats_ipackets += 1; 751 dmfep->rx_stats_rbytes += packet_length; 752 if (desc0 & RX_MULTI_FRAME) { 753 if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) { 754 dmfep->rx_stats_multi += 1; 755 } else { 756 dmfep->rx_stats_bcast += 1; 757 } 758 } 759 760 /* 761 * Copy the packet into the STREAMS buffer 762 */ 763 dp = mp->b_rptr += DMFE_HEADROOM; 764 mp->b_cont = mp->b_next = NULL; 765 766 /* 767 * Don't worry about stripping the vlan tag, the MAC 768 * layer will take care of that for us. 769 */ 770 bcopy(rxb, dp, packet_length); 771 772 /* 773 * Fix up the packet length, and link it to the chain 774 */ 775 mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL; 776 *tail = mp; 777 tail = &mp->b_next; 778 779 skip: 780 /* 781 * Return ownership of ring entry & advance to next 782 */ 783 dmfe_ring_put32(descp, index, DESC0, RX_OWN); 784 index = NEXT(index, dmfep->rx.n_desc); 785 desc0 = dmfe_ring_get32(descp, index, DESC0); 786 } 787 788 /* 789 * Remember where to start looking next time ... 790 */ 791 dmfep->rx.next_free = index; 792 793 /* 794 * sync the receive descriptors that we've given back 795 * (actually, we sync all of them for simplicity), and 796 * wake the chip in case it had suspended receive 797 */ 798 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 799 dmfe_chip_put32(dmfep, RX_POLL_REG, 0); 800 801 mutex_exit(dmfep->rxlock); 802 return (head); 803 } 804 805 #undef DMFE_DBG 806 807 808 /* 809 * ========== Primary TX side routines ========== 810 */ 811 812 #define DMFE_DBG DMFE_DBG_SEND /* debug flag for this code */ 813 814 /* 815 * TX ring management: 816 * 817 * There are <tx.n_desc> entries in the ring, of which those from 818 * <tx.next_free> round to but not including <tx.next_busy> must 819 * be owned by the CPU. The number of such entries should equal 820 * <tx.n_free>; but there may also be some more entries which the 821 * chip has given back but which we haven't yet accounted for. 822 * The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts 823 * as it discovers such entries. 824 * 825 * Initially, or when the ring is entirely free: 826 * C = Owned by CPU 827 * D = Owned by Davicom (DMFE) chip 828 * 829 * tx.next_free tx.n_desc = 16 830 * | 831 * v 832 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 833 * | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | 834 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 835 * ^ 836 * | 837 * tx.next_busy tx.n_free = 16 838 * 839 * On entry to reclaim() during normal use: 840 * 841 * tx.next_free tx.n_desc = 16 842 * | 843 * v 844 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 845 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 846 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 847 * ^ 848 * | 849 * tx.next_busy tx.n_free = 9 850 * 851 * On exit from reclaim(): 852 * 853 * tx.next_free tx.n_desc = 16 854 * | 855 * v 856 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 857 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C | 858 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 859 * ^ 860 * | 861 * tx.next_busy tx.n_free = 13 862 * 863 * The ring is considered "full" when only one entry is owned by 864 * the CPU; thus <tx.n_free> should always be >= 1. 865 * 866 * tx.next_free tx.n_desc = 16 867 * | 868 * v 869 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 870 * | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D | 871 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ 872 * ^ 873 * | 874 * tx.next_busy tx.n_free = 1 875 */ 876 877 /* 878 * Function to update transmit statistics on various errors 879 */ 880 static void 881 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1) 882 { 883 uint32_t collisions; 884 uint32_t errbits; 885 uint32_t errsum; 886 887 ASSERT(mutex_owned(dmfep->txlock)); 888 889 collisions = ((desc0 >> 3) & 0x0f); 890 errsum = desc0 & TX_ERR_SUMMARY; 891 errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS | 892 TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO); 893 if ((errsum == 0) != (errbits == 0)) { 894 dmfe_log(dmfep, "dubious TX error status 0x%x", desc0); 895 desc0 |= TX_ERR_SUMMARY; 896 } 897 898 if (desc0 & TX_ERR_SUMMARY) { 899 dmfep->tx_stats_oerrors += 1; 900 901 /* 902 * If we ever see a transmit jabber timeout, we count it 903 * as a MAC-level transmit error; but we probably won't 904 * see it as it causes an Abnormal interrupt and we reset 905 * the chip in order to recover 906 */ 907 if (desc0 & TX_JABBER_TO) { 908 dmfep->tx_stats_macxmt_errors += 1; 909 dmfep->tx_stats_jabber += 1; 910 } 911 912 if (desc0 & TX_UNDERFLOW) 913 dmfep->tx_stats_underflow += 1; 914 else if (desc0 & TX_LATE_COLL) 915 dmfep->tx_stats_xmtlatecoll += 1; 916 917 if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER)) 918 dmfep->tx_stats_nocarrier += 1; 919 920 if (desc0 & TX_EXCESS_COLL) { 921 dmfep->tx_stats_excoll += 1; 922 collisions = 16; 923 } 924 } else { 925 int bit = index % NBBY; 926 int byt = index / NBBY; 927 928 if (dmfep->tx_mcast[byt] & bit) { 929 dmfep->tx_mcast[byt] &= ~bit; 930 dmfep->tx_stats_multi += 1; 931 932 } else if (dmfep->tx_bcast[byt] & bit) { 933 dmfep->tx_bcast[byt] &= ~bit; 934 dmfep->tx_stats_bcast += 1; 935 } 936 937 dmfep->tx_stats_opackets += 1; 938 dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1; 939 } 940 941 if (collisions == 1) 942 dmfep->tx_stats_first_coll += 1; 943 else if (collisions != 0) 944 dmfep->tx_stats_multi_coll += 1; 945 dmfep->tx_stats_collisions += collisions; 946 947 if (desc0 & TX_DEFERRED) 948 dmfep->tx_stats_defer += 1; 949 } 950 951 /* 952 * Reclaim all the ring entries that the chip has returned to us ... 953 * 954 * Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims 955 * as many as possible, restarts the TX stall timeout, and returns B_TRUE. 956 */ 957 static boolean_t 958 dmfe_reclaim_tx_desc(dmfe_t *dmfep) 959 { 960 dma_area_t *descp; 961 uint32_t desc0; 962 uint32_t desc1; 963 int i; 964 965 ASSERT(mutex_owned(dmfep->txlock)); 966 967 /* 968 * sync transmit descriptor ring before looking at it 969 */ 970 descp = &dmfep->tx_desc; 971 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL); 972 973 /* 974 * Early exit if there are no descriptors to reclaim, either 975 * because they're all reclaimed already, or because the next 976 * one is still owned by the chip ... 977 */ 978 i = dmfep->tx.next_busy; 979 if (i == dmfep->tx.next_free) 980 return (B_FALSE); 981 desc0 = dmfe_ring_get32(descp, i, DESC0); 982 if (desc0 & TX_OWN) 983 return (B_FALSE); 984 985 /* 986 * Reclaim as many descriptors as possible ... 987 */ 988 for (;;) { 989 desc1 = dmfe_ring_get32(descp, i, DESC1); 990 ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0); 991 992 if (desc1 & TX_SETUP_PACKET) { 993 /* 994 * Setup packet - restore buffer address 995 */ 996 ASSERT(dmfe_ring_get32(descp, i, BUFFER1) == 997 descp->setup_dvma); 998 dmfe_ring_put32(descp, i, BUFFER1, 999 dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE); 1000 } else { 1001 /* 1002 * Regular packet - just update stats 1003 */ 1004 ASSERT(dmfe_ring_get32(descp, i, BUFFER1) == 1005 dmfep->tx_buff.mem_dvma + i*DMFE_BUF_SIZE); 1006 dmfe_update_tx_stats(dmfep, i, desc0, desc1); 1007 } 1008 1009 #if DMFEDEBUG 1010 /* 1011 * We can use one of the SPARE bits in the TX descriptor 1012 * to track when a ring buffer slot is reclaimed. Then 1013 * we can deduce the last operation on a slot from the 1014 * top half of DESC0: 1015 * 1016 * 0x8000 xxxx given to DMFE chip (TX_OWN) 1017 * 0x7fff xxxx returned but not yet reclaimed 1018 * 0x3fff xxxx reclaimed 1019 */ 1020 #define TX_PEND_RECLAIM (1UL<<30) 1021 dmfe_ring_put32(descp, i, DESC0, desc0 & ~TX_PEND_RECLAIM); 1022 #endif /* DMFEDEBUG */ 1023 1024 /* 1025 * Update count & index; we're all done if the ring is 1026 * now fully reclaimed, or the next entry if still owned 1027 * by the chip ... 1028 */ 1029 dmfep->tx.n_free += 1; 1030 i = NEXT(i, dmfep->tx.n_desc); 1031 if (i == dmfep->tx.next_free) 1032 break; 1033 desc0 = dmfe_ring_get32(descp, i, DESC0); 1034 if (desc0 & TX_OWN) 1035 break; 1036 } 1037 1038 dmfep->tx.next_busy = i; 1039 dmfep->tx_pending_tix = 0; 1040 return (B_TRUE); 1041 } 1042 1043 /* 1044 * Send the message in the message block chain <mp>. 1045 * 1046 * The message is freed if and only if its contents are successfully copied 1047 * and queued for transmission (so that the return value is B_TRUE). 1048 * If we can't queue the message, the return value is B_FALSE and 1049 * the message is *not* freed. 1050 * 1051 * This routine handles the special case of <mp> == NULL, which indicates 1052 * that we want to "send" the special "setup packet" allocated during 1053 * startup. We have to use some different flags in the packet descriptor 1054 * to say its a setup packet (from the global <dmfe_setup_desc1>), and the 1055 * setup packet *isn't* freed after use. 1056 */ 1057 static boolean_t 1058 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp) 1059 { 1060 dma_area_t *descp; 1061 mblk_t *bp; 1062 char *txb; 1063 uint32_t desc1; 1064 uint32_t index; 1065 size_t totlen; 1066 size_t mblen; 1067 1068 /* 1069 * If the number of free slots is below the reclaim threshold 1070 * (soft limit), we'll try to reclaim some. If we fail, and 1071 * the number of free slots is also below the minimum required 1072 * (the hard limit, usually 1), then we can't send the packet. 1073 */ 1074 mutex_enter(dmfep->txlock); 1075 if (dmfep->tx.n_free <= dmfe_tx_reclaim_level && 1076 dmfe_reclaim_tx_desc(dmfep) == B_FALSE && 1077 dmfep->tx.n_free <= dmfe_tx_min_free) { 1078 /* 1079 * Resource shortage - return B_FALSE so the packet 1080 * will be queued for retry after the next TX-done 1081 * interrupt. 1082 */ 1083 mutex_exit(dmfep->txlock); 1084 DMFE_DEBUG(("dmfe_send_msg: no free descriptors")); 1085 return (B_FALSE); 1086 } 1087 1088 /* 1089 * There's a slot available, so claim it by incrementing 1090 * the next-free index and decrementing the free count. 1091 * If the ring is currently empty, we also restart the 1092 * stall-detect timer. The ASSERTions check that our 1093 * invariants still hold: 1094 * the next-free index must not match the next-busy index 1095 * there must still be at least one free entry 1096 * After this, we now have exclusive ownership of the ring 1097 * entry (and matching buffer) indicated by <index>, so we 1098 * don't need to hold the TX lock any longer 1099 */ 1100 index = dmfep->tx.next_free; 1101 dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc); 1102 ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy); 1103 if (dmfep->tx.n_free-- == dmfep->tx.n_desc) 1104 dmfep->tx_pending_tix = 0; 1105 ASSERT(dmfep->tx.n_free >= 1); 1106 mutex_exit(dmfep->txlock); 1107 1108 /* 1109 * Check the ownership of the ring entry ... 1110 */ 1111 descp = &dmfep->tx_desc; 1112 ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0); 1113 1114 if (mp == NULL) { 1115 /* 1116 * Indicates we should send a SETUP packet, which we do by 1117 * temporarily switching the BUFFER1 pointer in the ring 1118 * entry. The reclaim routine will restore BUFFER1 to its 1119 * usual value. 1120 * 1121 * Note that as the setup packet is tagged on the end of 1122 * the TX ring, when we sync the descriptor we're also 1123 * implicitly syncing the setup packet - hence, we don't 1124 * need a separate ddi_dma_sync() call here. 1125 */ 1126 desc1 = dmfe_setup_desc1; 1127 dmfe_ring_put32(descp, index, BUFFER1, descp->setup_dvma); 1128 } else { 1129 /* 1130 * A regular packet; we copy the data into a pre-mapped 1131 * buffer, which avoids the overhead (and complication) 1132 * of mapping/unmapping STREAMS buffers and keeping hold 1133 * of them until the DMA has completed. 1134 * 1135 * Because all buffers are the same size, and larger 1136 * than the longest single valid message, we don't have 1137 * to bother about splitting the message across multiple 1138 * buffers. 1139 */ 1140 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 1141 totlen = 0; 1142 bp = mp; 1143 1144 /* 1145 * Copy all (remaining) mblks in the message ... 1146 */ 1147 for (; bp != NULL; bp = bp->b_cont) { 1148 mblen = MBLKL(bp); 1149 if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) { 1150 bcopy(bp->b_rptr, txb, mblen); 1151 txb += mblen; 1152 } 1153 } 1154 1155 /* 1156 * Is this a multicast or broadcast packet? We do 1157 * this so that we can track statistics accurately 1158 * when we reclaim it. 1159 */ 1160 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE]; 1161 if (txb[0] & 0x1) { 1162 if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) { 1163 dmfep->tx_bcast[index / NBBY] |= 1164 (1 << (index % NBBY)); 1165 } else { 1166 dmfep->tx_mcast[index / NBBY] |= 1167 (1 << (index % NBBY)); 1168 } 1169 } 1170 1171 /* 1172 * We'e reached the end of the chain; and we should have 1173 * collected no more than DMFE_MAX_PKT_SIZE bytes into our 1174 * buffer. Note that the <size> field in the descriptor is 1175 * only 11 bits, so bigger packets would be a problem! 1176 */ 1177 ASSERT(bp == NULL); 1178 ASSERT(totlen <= DMFE_MAX_PKT_SIZE); 1179 totlen &= TX_BUFFER_SIZE1; 1180 desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen; 1181 1182 (void) ddi_dma_sync(dmfep->tx_buff.dma_hdl, 1183 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV); 1184 } 1185 1186 /* 1187 * Update ring descriptor entries, sync them, and wake up the 1188 * transmit process 1189 */ 1190 if ((index & dmfe_tx_int_factor) == 0) 1191 desc1 |= TX_INT_ON_COMP; 1192 desc1 |= TX_CHAINING; 1193 dmfe_ring_put32(descp, index, DESC1, desc1); 1194 dmfe_ring_put32(descp, index, DESC0, TX_OWN); 1195 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV); 1196 dmfe_chip_put32(dmfep, TX_POLL_REG, 0); 1197 1198 /* 1199 * Finally, free the message & return success 1200 */ 1201 if (mp) 1202 freemsg(mp); 1203 return (B_TRUE); 1204 } 1205 1206 /* 1207 * dmfe_m_tx() -- send a chain of packets 1208 * 1209 * Called when packet(s) are ready to be transmitted. A pointer to an 1210 * M_DATA message that contains the packet is passed to this routine. 1211 * The complete LLC header is contained in the message's first message 1212 * block, and the remainder of the packet is contained within 1213 * additional M_DATA message blocks linked to the first message block. 1214 * 1215 * Additional messages may be passed by linking with b_next. 1216 */ 1217 static mblk_t * 1218 dmfe_m_tx(void *arg, mblk_t *mp) 1219 { 1220 dmfe_t *dmfep = arg; /* private device info */ 1221 mblk_t *next; 1222 1223 ASSERT(mp != NULL); 1224 ASSERT(dmfep->mac_state == DMFE_MAC_STARTED); 1225 1226 if (dmfep->chip_state != CHIP_RUNNING) 1227 return (mp); 1228 1229 while (mp != NULL) { 1230 next = mp->b_next; 1231 mp->b_next = NULL; 1232 if (!dmfe_send_msg(dmfep, mp)) { 1233 mp->b_next = next; 1234 break; 1235 } 1236 mp = next; 1237 } 1238 1239 return (mp); 1240 } 1241 1242 #undef DMFE_DBG 1243 1244 1245 /* 1246 * ========== Address-setting routines (TX-side) ========== 1247 */ 1248 1249 #define DMFE_DBG DMFE_DBG_ADDR /* debug flag for this code */ 1250 1251 /* 1252 * Find the index of the relevant bit in the setup packet. 1253 * This must mirror the way the hardware will actually calculate it! 1254 */ 1255 static uint32_t 1256 dmfe_hash_index(const uint8_t *address) 1257 { 1258 uint32_t const POLY = HASH_POLY; 1259 uint32_t crc = HASH_CRC; 1260 uint32_t index; 1261 uint32_t msb; 1262 uchar_t currentbyte; 1263 int byteslength; 1264 int shift; 1265 int bit; 1266 1267 for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) { 1268 currentbyte = address[byteslength]; 1269 for (bit = 0; bit < 8; ++bit) { 1270 msb = crc >> 31; 1271 crc <<= 1; 1272 if (msb ^ (currentbyte & 1)) { 1273 crc ^= POLY; 1274 crc |= 0x00000001; 1275 } 1276 currentbyte >>= 1; 1277 } 1278 } 1279 1280 for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift) 1281 index |= (((crc >> bit) & 1) << shift); 1282 1283 return (index); 1284 } 1285 1286 /* 1287 * Find and set/clear the relevant bit in the setup packet hash table 1288 * This must mirror the way the hardware will actually interpret it! 1289 */ 1290 static void 1291 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val) 1292 { 1293 dma_area_t *descp; 1294 uint32_t tmp; 1295 1296 ASSERT(mutex_owned(dmfep->oplock)); 1297 1298 descp = &dmfep->tx_desc; 1299 tmp = dmfe_setup_get32(descp, index/16); 1300 if (val) 1301 tmp |= 1 << (index%16); 1302 else 1303 tmp &= ~(1 << (index%16)); 1304 dmfe_setup_put32(descp, index/16, tmp); 1305 } 1306 1307 /* 1308 * Update the refcount for the bit in the setup packet corresponding 1309 * to the specified address; if it changes between zero & nonzero, 1310 * also update the bitmap itself & return B_TRUE, so that the caller 1311 * knows to re-send the setup packet. Otherwise (only the refcount 1312 * changed), return B_FALSE 1313 */ 1314 static boolean_t 1315 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val) 1316 { 1317 uint32_t index; 1318 uint8_t *refp; 1319 boolean_t change; 1320 1321 index = dmfe_hash_index(mca); 1322 refp = &dmfep->mcast_refs[index]; 1323 change = (val ? (*refp)++ : --(*refp)) == 0; 1324 1325 if (change) 1326 dmfe_update_hash(dmfep, index, val); 1327 1328 return (change); 1329 } 1330 1331 /* 1332 * "Transmit" the (possibly updated) magic setup packet 1333 */ 1334 static int 1335 dmfe_send_setup(dmfe_t *dmfep) 1336 { 1337 int status; 1338 1339 ASSERT(mutex_owned(dmfep->oplock)); 1340 1341 /* 1342 * If the chip isn't running, we can't really send the setup frame 1343 * now but it doesn't matter, 'cos it will be sent when the transmit 1344 * process is restarted (see dmfe_start()). 1345 */ 1346 if ((dmfep->opmode & START_TRANSMIT) == 0) 1347 return (0); 1348 1349 /* 1350 * "Send" the setup frame. If it fails (e.g. no resources), 1351 * set a flag; then the factotum will retry the "send". Once 1352 * it works, we can clear the flag no matter how many attempts 1353 * had previously failed. We tell the caller that it worked 1354 * whether it did or not; after all, it *will* work eventually. 1355 */ 1356 status = dmfe_send_msg(dmfep, NULL); 1357 dmfep->need_setup = status ? B_FALSE : B_TRUE; 1358 return (0); 1359 } 1360 1361 /* 1362 * dmfe_m_unicst() -- set the physical network address 1363 */ 1364 static int 1365 dmfe_m_unicst(void *arg, const uint8_t *macaddr) 1366 { 1367 dmfe_t *dmfep = arg; 1368 int status; 1369 int index; 1370 1371 /* 1372 * Update our current address and send out a new setup packet 1373 * 1374 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT 1375 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes). 1376 * 1377 * It is said that there is a bug in the 21140 where it fails to 1378 * receive packes addresses to the specified perfect filter address. 1379 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1 1380 * bit should be set in the module variable dmfe_setup_desc1. 1381 * 1382 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering. 1383 * In this mode, *all* incoming addresses are hashed and looked 1384 * up in the bitmap described by the setup packet. Therefore, 1385 * the bit representing the station address has to be added to 1386 * the table before sending it out. If the address is changed, 1387 * the old entry should be removed before the new entry is made. 1388 * 1389 * NOTE: in this mode, unicast packets that are not intended for 1390 * this station may be received; it is up to software to filter 1391 * them out afterwards! 1392 * 1393 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT 1394 * filtering. In this mode, multicast addresses are hashed and 1395 * checked against the bitmap, while unicast addresses are simply 1396 * matched against the one physical address specified in the setup 1397 * packet. This means that we shouldn't receive unicast packets 1398 * that aren't intended for us (but software still has to filter 1399 * multicast packets just the same). 1400 * 1401 * Whichever mode we're using, we have to enter the broadcast 1402 * address into the multicast filter map too, so we do this on 1403 * the first time through after attach or reset. 1404 */ 1405 mutex_enter(dmfep->oplock); 1406 1407 if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1) 1408 (void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE); 1409 if (dmfe_setup_desc1 & TX_FILTER_TYPE1) 1410 (void) dmfe_update_mcast(dmfep, macaddr, B_TRUE); 1411 if (!dmfep->addr_set) 1412 (void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE); 1413 1414 /* 1415 * Remember the new current address 1416 */ 1417 ethaddr_copy(macaddr, dmfep->curr_addr); 1418 dmfep->addr_set = B_TRUE; 1419 1420 /* 1421 * Install the new physical address into the proper position in 1422 * the setup frame; this is only used if we select hash+perfect 1423 * filtering, but we'll put it in anyway. The ugliness here is 1424 * down to the usual war of the egg :( 1425 */ 1426 for (index = 0; index < ETHERADDRL; index += 2) 1427 dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2, 1428 (macaddr[index+1] << 8) | macaddr[index]); 1429 1430 /* 1431 * Finally, we're ready to "transmit" the setup frame 1432 */ 1433 status = dmfe_send_setup(dmfep); 1434 mutex_exit(dmfep->oplock); 1435 1436 return (status); 1437 } 1438 1439 /* 1440 * dmfe_m_multicst() -- enable or disable a multicast address 1441 * 1442 * Program the hardware to enable/disable the multicast address 1443 * in "mca" (enable if add is true, otherwise disable it.) 1444 * We keep a refcount for each bit in the map, so that it still 1445 * works out properly if multiple addresses hash to the same bit. 1446 * dmfe_update_mcast() tells us whether the map actually changed; 1447 * if so, we have to re-"transmit" the magic setup packet. 1448 */ 1449 static int 1450 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca) 1451 { 1452 dmfe_t *dmfep = arg; /* private device info */ 1453 int status = 0; 1454 1455 mutex_enter(dmfep->oplock); 1456 if (dmfe_update_mcast(dmfep, mca, add)) 1457 status = dmfe_send_setup(dmfep); 1458 mutex_exit(dmfep->oplock); 1459 1460 return (status); 1461 } 1462 1463 #undef DMFE_DBG 1464 1465 1466 /* 1467 * ========== Internal state management entry points ========== 1468 */ 1469 1470 #define DMFE_DBG DMFE_DBG_GLD /* debug flag for this code */ 1471 1472 /* 1473 * These routines provide all the functionality required by the 1474 * corresponding MAC layer entry points, but don't update the MAC layer state 1475 * so they can be called internally without disturbing our record 1476 * of what MAC layer thinks we should be doing ... 1477 */ 1478 1479 /* 1480 * dmfe_stop() -- stop processing, don't reset h/w or rings 1481 */ 1482 static void 1483 dmfe_stop(dmfe_t *dmfep) 1484 { 1485 ASSERT(mutex_owned(dmfep->oplock)); 1486 1487 dmfe_stop_chip(dmfep, CHIP_STOPPED); 1488 } 1489 1490 /* 1491 * dmfe_reset() -- stop processing, reset h/w & rings to initial state 1492 */ 1493 static void 1494 dmfe_reset(dmfe_t *dmfep) 1495 { 1496 ASSERT(mutex_owned(dmfep->oplock)); 1497 ASSERT(mutex_owned(dmfep->rxlock)); 1498 ASSERT(mutex_owned(dmfep->txlock)); 1499 1500 dmfe_stop_chip(dmfep, CHIP_RESET); 1501 dmfe_init_rings(dmfep); 1502 } 1503 1504 /* 1505 * dmfe_start() -- start transmitting/receiving 1506 */ 1507 static void 1508 dmfe_start(dmfe_t *dmfep) 1509 { 1510 uint32_t gpsr; 1511 1512 ASSERT(mutex_owned(dmfep->oplock)); 1513 1514 ASSERT(dmfep->chip_state == CHIP_RESET || 1515 dmfep->chip_state == CHIP_STOPPED); 1516 1517 /* 1518 * Make opmode consistent with PHY duplex setting 1519 */ 1520 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 1521 if (gpsr & GPS_FULL_DUPLEX) 1522 dmfep->opmode |= FULL_DUPLEX; 1523 else 1524 dmfep->opmode &= ~FULL_DUPLEX; 1525 1526 /* 1527 * Start transmit processing 1528 * Set up the address filters 1529 * Start receive processing 1530 * Enable interrupts 1531 */ 1532 dmfe_start_chip(dmfep, START_TRANSMIT); 1533 (void) dmfe_send_setup(dmfep); 1534 drv_usecwait(10); 1535 dmfe_start_chip(dmfep, START_RECEIVE); 1536 dmfe_enable_interrupts(dmfep); 1537 } 1538 1539 /* 1540 * dmfe_restart - restart transmitting/receiving after error or suspend 1541 */ 1542 static void 1543 dmfe_restart(dmfe_t *dmfep) 1544 { 1545 ASSERT(mutex_owned(dmfep->oplock)); 1546 1547 /* 1548 * You need not only <oplock>, but also <rxlock> AND <txlock> 1549 * in order to reset the rings, but then <txlock> *mustn't* 1550 * be held across the call to dmfe_start() 1551 */ 1552 mutex_enter(dmfep->rxlock); 1553 mutex_enter(dmfep->txlock); 1554 dmfe_reset(dmfep); 1555 mutex_exit(dmfep->txlock); 1556 mutex_exit(dmfep->rxlock); 1557 if (dmfep->mac_state == DMFE_MAC_STARTED) 1558 dmfe_start(dmfep); 1559 } 1560 1561 1562 /* 1563 * ========== MAC-required management entry points ========== 1564 */ 1565 1566 /* 1567 * dmfe_m_stop() -- stop transmitting/receiving 1568 */ 1569 static void 1570 dmfe_m_stop(void *arg) 1571 { 1572 dmfe_t *dmfep = arg; /* private device info */ 1573 1574 /* 1575 * Just stop processing, then record new MAC state 1576 */ 1577 mutex_enter(dmfep->oplock); 1578 dmfe_stop(dmfep); 1579 dmfep->mac_state = DMFE_MAC_STOPPED; 1580 mutex_exit(dmfep->oplock); 1581 } 1582 1583 /* 1584 * dmfe_m_start() -- start transmitting/receiving 1585 */ 1586 static int 1587 dmfe_m_start(void *arg) 1588 { 1589 dmfe_t *dmfep = arg; /* private device info */ 1590 1591 /* 1592 * Start processing and record new MAC state 1593 */ 1594 mutex_enter(dmfep->oplock); 1595 dmfe_start(dmfep); 1596 dmfep->mac_state = DMFE_MAC_STARTED; 1597 mutex_exit(dmfep->oplock); 1598 1599 return (0); 1600 } 1601 1602 /* 1603 * dmfe_m_promisc() -- set or reset promiscuous mode on the board 1604 * 1605 * Program the hardware to enable/disable promiscuous and/or 1606 * receive-all-multicast modes. Davicom don't document this 1607 * clearly, but it looks like we can do this on-the-fly (i.e. 1608 * without stopping & restarting the TX/RX processes). 1609 */ 1610 static int 1611 dmfe_m_promisc(void *arg, boolean_t on) 1612 { 1613 dmfe_t *dmfep = arg; 1614 1615 mutex_enter(dmfep->oplock); 1616 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 1617 if (on) 1618 dmfep->opmode |= PROMISC_MODE; 1619 dmfe_set_opmode(dmfep); 1620 mutex_exit(dmfep->oplock); 1621 1622 return (0); 1623 } 1624 1625 /*ARGSUSED*/ 1626 static boolean_t 1627 dmfe_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 1628 { 1629 /* 1630 * Note that the chip could support some form of polling and 1631 * multiaddress support. We should look into adding polling 1632 * support later, once Solaris is better positioned to take 1633 * advantage of it, although it may be of little use since 1634 * even a lowly 500MHz US-IIe should be able to keep up with 1635 * 100Mbps. (Esp. if the packets are not unreasonably sized.) 1636 * 1637 * Multiaddress support, however, is likely to be of more 1638 * utility with crossbow and virtualized NICs. Although, the 1639 * fact that dmfe is only supported on low-end US-IIe hardware 1640 * makes one wonder whether VNICs are likely to be used on 1641 * such platforms. The chip certainly supports the notion, 1642 * since it can be run in HASH-ONLY mode. (Though this would 1643 * require software to drop unicast packets that are 1644 * incorrectly received due to hash collision of the 1645 * destination mac address.) 1646 * 1647 * Interestingly enough, modern Davicom chips (the 9102D) 1648 * support full IP checksum offload, though its unclear 1649 * whether any of these chips are used on any systems that can 1650 * run Solaris. 1651 * 1652 * If this driver is ever supported on x86 hardware, then 1653 * these assumptions should be revisited. 1654 */ 1655 switch (cap) { 1656 case MAC_CAPAB_POLL: 1657 case MAC_CAPAB_MULTIADDRESS: 1658 case MAC_CAPAB_HCKSUM: 1659 default: 1660 return (B_FALSE); 1661 } 1662 } 1663 1664 1665 #undef DMFE_DBG 1666 1667 1668 /* 1669 * ========== Factotum, implemented as a softint handler ========== 1670 */ 1671 1672 #define DMFE_DBG DMFE_DBG_FACT /* debug flag for this code */ 1673 1674 /* 1675 * The factotum is woken up when there's something to do that we'd rather 1676 * not do from inside a (high-level?) hardware interrupt handler. Its 1677 * two main tasks are: 1678 * reset & restart the chip after an error 1679 * update & restart the chip after a link status change 1680 */ 1681 static uint_t 1682 dmfe_factotum(caddr_t arg) 1683 { 1684 dmfe_t *dmfep; 1685 1686 dmfep = (void *)arg; 1687 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 1688 1689 mutex_enter(dmfep->oplock); 1690 1691 dmfep->factotum_flag = 0; 1692 DRV_KS_INC(dmfep, KS_FACTOTUM_RUN); 1693 1694 /* 1695 * Check for chip error ... 1696 */ 1697 if (dmfep->chip_state == CHIP_ERROR) { 1698 /* 1699 * Error recovery required: reset the chip and the rings, 1700 * then, if it's supposed to be running, kick it off again. 1701 */ 1702 DRV_KS_INC(dmfep, KS_RECOVERY); 1703 dmfe_restart(dmfep); 1704 } else if (dmfep->need_setup) { 1705 (void) dmfe_send_setup(dmfep); 1706 } 1707 mutex_exit(dmfep->oplock); 1708 1709 /* 1710 * Then, check the link state. We need <milock> but not <oplock> 1711 * to do this, but if something's changed, we need <oplock> as well 1712 * in order to stop/restart the chip! Note: we could simply hold 1713 * <oplock> right through here, but we'd rather not 'cos checking 1714 * the link state involves reading over the bit-serial MII bus, 1715 * which takes ~500us even when nothing's changed. Holding <oplock> 1716 * would lock out the interrupt handler for the duration, so it's 1717 * better to release it first and reacquire it only if needed. 1718 */ 1719 mutex_enter(dmfep->milock); 1720 if (dmfe_check_link(dmfep)) { 1721 mutex_enter(dmfep->oplock); 1722 dmfe_stop(dmfep); 1723 DRV_KS_INC(dmfep, KS_LINK_CHECK); 1724 if (dmfep->update_phy) { 1725 /* 1726 * The chip may reset itself for some unknown 1727 * reason. If this happens, the chip will use 1728 * default settings (for speed, duplex, and autoneg), 1729 * which possibly aren't the user's desired settings. 1730 */ 1731 dmfe_update_phy(dmfep); 1732 dmfep->update_phy = B_FALSE; 1733 } 1734 dmfe_recheck_link(dmfep, B_FALSE); 1735 if (dmfep->mac_state == DMFE_MAC_STARTED) 1736 dmfe_start(dmfep); 1737 mutex_exit(dmfep->oplock); 1738 } 1739 mutex_exit(dmfep->milock); 1740 1741 /* 1742 * Keep MAC up-to-date about the state of the link ... 1743 */ 1744 mac_link_update(dmfep->mh, dmfep->link_state); 1745 1746 return (DDI_INTR_CLAIMED); 1747 } 1748 1749 static void 1750 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why) 1751 { 1752 DMFE_DEBUG(("dmfe_wake_factotum: %s [%d] flag %d", 1753 why, ks_id, dmfep->factotum_flag)); 1754 1755 ASSERT(mutex_owned(dmfep->oplock)); 1756 DRV_KS_INC(dmfep, ks_id); 1757 1758 if (dmfep->factotum_flag++ == 0) 1759 ddi_trigger_softintr(dmfep->factotum_id); 1760 } 1761 1762 #undef DMFE_DBG 1763 1764 1765 /* 1766 * ========== Periodic Tasks (Cyclic handler & friends) ========== 1767 */ 1768 1769 #define DMFE_DBG DMFE_DBG_TICK /* debug flag for this code */ 1770 1771 /* 1772 * Periodic tick tasks, run from the cyclic handler 1773 * 1774 * Check the state of the link and wake the factotum if necessary 1775 */ 1776 static void 1777 dmfe_tick_link_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 1778 { 1779 link_state_t phy_state; 1780 link_state_t utp_state; 1781 const char *why; 1782 int ks_id; 1783 1784 _NOTE(ARGUNUSED(istat)) 1785 1786 ASSERT(mutex_owned(dmfep->oplock)); 1787 1788 /* 1789 * Is it time to wake the factotum? We do so periodically, in 1790 * case the fast check below doesn't always reveal a link change 1791 */ 1792 if (dmfep->link_poll_tix-- == 0) { 1793 dmfep->link_poll_tix = factotum_tix; 1794 why = "tick (link poll)"; 1795 ks_id = KS_TICK_LINK_POLL; 1796 } else { 1797 why = NULL; 1798 ks_id = KS_TICK_LINK_STATE; 1799 } 1800 1801 /* 1802 * Has the link status changed? If so, we might want to wake 1803 * the factotum to deal with it. 1804 */ 1805 phy_state = (gpsr & GPS_LINK_STATUS) ? LINK_STATE_UP : LINK_STATE_DOWN; 1806 utp_state = (gpsr & GPS_UTP_SIG) ? LINK_STATE_UP : LINK_STATE_DOWN; 1807 if (phy_state != utp_state) 1808 why = "tick (phy <> utp)"; 1809 else if ((dmfep->link_state == LINK_STATE_UP) && 1810 (phy_state == LINK_STATE_DOWN)) 1811 why = "tick (UP -> DOWN)"; 1812 else if (phy_state != dmfep->link_state) { 1813 if (dmfep->link_poll_tix > factotum_fast_tix) 1814 dmfep->link_poll_tix = factotum_fast_tix; 1815 } 1816 1817 if (why != NULL) { 1818 DMFE_DEBUG(("dmfe_%s: link %d phy %d utp %d", 1819 why, dmfep->link_state, phy_state, utp_state)); 1820 dmfe_wake_factotum(dmfep, ks_id, why); 1821 } 1822 } 1823 1824 /* 1825 * Periodic tick tasks, run from the cyclic handler 1826 * 1827 * Check for TX stall; flag an error and wake the factotum if so. 1828 */ 1829 static void 1830 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat) 1831 { 1832 boolean_t tx_stall; 1833 uint32_t tx_state; 1834 uint32_t limit; 1835 1836 ASSERT(mutex_owned(dmfep->oplock)); 1837 1838 /* 1839 * Check for transmit stall ... 1840 * 1841 * IF there's at least one packet in the ring, AND the timeout 1842 * has elapsed, AND we can't reclaim any descriptors, THEN we've 1843 * stalled; we return B_TRUE to trigger a reset-and-recover cycle. 1844 * 1845 * Note that the timeout limit is based on the transmit engine 1846 * state; we allow the transmitter longer to make progress in 1847 * some states than in others, based on observations of this 1848 * chip's actual behaviour in the lab. 1849 * 1850 * By observation, we find that on about 1 in 10000 passes through 1851 * here, the TX lock is already held. In that case, we'll skip 1852 * the check on this pass rather than wait. Most likely, the send 1853 * routine was holding the lock when the interrupt happened, and 1854 * we'll succeed next time through. In the event of a real stall, 1855 * the TX ring will fill up, after which the send routine won't be 1856 * called any more and then we're sure to get in. 1857 */ 1858 tx_stall = B_FALSE; 1859 if (mutex_tryenter(dmfep->txlock)) { 1860 if (dmfep->tx.n_free < dmfep->tx.n_desc) { 1861 tx_state = TX_PROCESS_STATE(istat); 1862 if (gpsr & GPS_LINK_100) 1863 limit = stall_100_tix[tx_state]; 1864 else 1865 limit = stall_10_tix[tx_state]; 1866 if (++dmfep->tx_pending_tix >= limit && 1867 dmfe_reclaim_tx_desc(dmfep) == B_FALSE) { 1868 dmfe_log(dmfep, "TX stall detected " 1869 "after %d ticks in state %d; " 1870 "automatic recovery initiated", 1871 dmfep->tx_pending_tix, tx_state); 1872 tx_stall = B_TRUE; 1873 } 1874 } 1875 mutex_exit(dmfep->txlock); 1876 } 1877 1878 if (tx_stall) { 1879 dmfe_stop_chip(dmfep, CHIP_ERROR); 1880 dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)"); 1881 } 1882 } 1883 1884 /* 1885 * Cyclic callback handler 1886 */ 1887 static void 1888 dmfe_cyclic(void *arg) 1889 { 1890 dmfe_t *dmfep = arg; /* private device info */ 1891 uint32_t istat; 1892 uint32_t gpsr; 1893 1894 /* 1895 * If the chip's not RUNNING, there's nothing to do. 1896 * If we can't get the mutex straight away, we'll just 1897 * skip this pass; we'll back back soon enough anyway. 1898 */ 1899 if (dmfep->chip_state != CHIP_RUNNING) 1900 return; 1901 if (mutex_tryenter(dmfep->oplock) == 0) 1902 return; 1903 1904 /* 1905 * Recheck chip state (it might have been stopped since we 1906 * checked above). If still running, call each of the *tick* 1907 * tasks. They will check for link change, TX stall, etc ... 1908 */ 1909 if (dmfep->chip_state == CHIP_RUNNING) { 1910 istat = dmfe_chip_get32(dmfep, STATUS_REG); 1911 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG); 1912 dmfe_tick_link_check(dmfep, gpsr, istat); 1913 dmfe_tick_stall_check(dmfep, gpsr, istat); 1914 } 1915 1916 DRV_KS_INC(dmfep, KS_CYCLIC_RUN); 1917 mutex_exit(dmfep->oplock); 1918 } 1919 1920 #undef DMFE_DBG 1921 1922 1923 /* 1924 * ========== Hardware interrupt handler ========== 1925 */ 1926 1927 #define DMFE_DBG DMFE_DBG_INT /* debug flag for this code */ 1928 1929 /* 1930 * dmfe_interrupt() -- handle chip interrupts 1931 */ 1932 static uint_t 1933 dmfe_interrupt(caddr_t arg) 1934 { 1935 dmfe_t *dmfep; /* private device info */ 1936 uint32_t interrupts; 1937 uint32_t istat; 1938 const char *msg; 1939 mblk_t *mp; 1940 boolean_t warning_msg = B_TRUE; 1941 1942 dmfep = (void *)arg; 1943 1944 /* 1945 * A quick check as to whether the interrupt was from this 1946 * device, before we even finish setting up all our local 1947 * variables. Note that reading the interrupt status register 1948 * doesn't have any unpleasant side effects such as clearing 1949 * the bits read, so it's quite OK to re-read it once we have 1950 * determined that we are going to service this interrupt and 1951 * grabbed the mutexen. 1952 */ 1953 istat = dmfe_chip_get32(dmfep, STATUS_REG); 1954 if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0) 1955 return (DDI_INTR_UNCLAIMED); 1956 1957 /* 1958 * Unfortunately, there can be a race condition between attach() 1959 * adding the interrupt handler and initialising the mutexen, 1960 * and the handler itself being called because of a pending 1961 * interrupt. So, we check <imask>; if it shows that interrupts 1962 * haven't yet been enabled (and therefore we shouldn't really 1963 * be here at all), we will just write back the value read from 1964 * the status register, thus acknowledging (and clearing) *all* 1965 * pending conditions without really servicing them, and claim 1966 * the interrupt. 1967 */ 1968 if (dmfep->imask == 0) { 1969 DMFE_DEBUG(("dmfe_interrupt: early interrupt 0x%x", istat)); 1970 dmfe_chip_put32(dmfep, STATUS_REG, istat); 1971 return (DDI_INTR_CLAIMED); 1972 } 1973 1974 /* 1975 * We're committed to servicing this interrupt, but we 1976 * need to get the lock before going any further ... 1977 */ 1978 mutex_enter(dmfep->oplock); 1979 DRV_KS_INC(dmfep, KS_INTERRUPT); 1980 1981 /* 1982 * Identify bits that represent enabled interrupts ... 1983 */ 1984 istat |= dmfe_chip_get32(dmfep, STATUS_REG); 1985 interrupts = istat & dmfep->imask; 1986 ASSERT(interrupts != 0); 1987 1988 DMFE_DEBUG(("dmfe_interrupt: istat 0x%x -> 0x%x", istat, interrupts)); 1989 1990 /* 1991 * Check for any interrupts other than TX/RX done. 1992 * If there are any, they are considered Abnormal 1993 * and will cause the chip to be reset. 1994 */ 1995 if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) { 1996 if (istat & ABNORMAL_SUMMARY_INT) { 1997 /* 1998 * Any Abnormal interrupts will lead to us 1999 * resetting the chip, so we don't bother 2000 * to clear each interrupt individually. 2001 * 2002 * Our main task here is to identify the problem, 2003 * by pointing out the most significant unexpected 2004 * bit. Additional bits may well be consequences 2005 * of the first problem, so we consider the possible 2006 * causes in order of severity. 2007 */ 2008 if (interrupts & SYSTEM_ERR_INT) { 2009 switch (istat & SYSTEM_ERR_BITS) { 2010 case SYSTEM_ERR_M_ABORT: 2011 msg = "Bus Master Abort"; 2012 break; 2013 2014 case SYSTEM_ERR_T_ABORT: 2015 msg = "Bus Target Abort"; 2016 break; 2017 2018 case SYSTEM_ERR_PARITY: 2019 msg = "Parity Error"; 2020 break; 2021 2022 default: 2023 msg = "Unknown System Bus Error"; 2024 break; 2025 } 2026 } else if (interrupts & RX_STOPPED_INT) { 2027 msg = "RX process stopped"; 2028 } else if (interrupts & RX_UNAVAIL_INT) { 2029 msg = "RX buffer unavailable"; 2030 warning_msg = B_FALSE; 2031 } else if (interrupts & RX_WATCHDOG_INT) { 2032 msg = "RX watchdog timeout?"; 2033 } else if (interrupts & RX_EARLY_INT) { 2034 msg = "RX early interrupt?"; 2035 } else if (interrupts & TX_STOPPED_INT) { 2036 msg = "TX process stopped"; 2037 } else if (interrupts & TX_JABBER_INT) { 2038 msg = "TX jabber timeout"; 2039 } else if (interrupts & TX_UNDERFLOW_INT) { 2040 msg = "TX underflow?"; 2041 } else if (interrupts & TX_EARLY_INT) { 2042 msg = "TX early interrupt?"; 2043 2044 } else if (interrupts & LINK_STATUS_INT) { 2045 msg = "Link status change?"; 2046 } else if (interrupts & GP_TIMER_INT) { 2047 msg = "Timer expired?"; 2048 } 2049 2050 if (warning_msg) 2051 dmfe_warning(dmfep, "abnormal interrupt, " 2052 "status 0x%x: %s", istat, msg); 2053 2054 /* 2055 * We don't want to run the entire reinitialisation 2056 * code out of this (high-level?) interrupt, so we 2057 * simply STOP the chip, and wake up the factotum 2058 * to reinitalise it ... 2059 */ 2060 dmfe_stop_chip(dmfep, CHIP_ERROR); 2061 dmfe_wake_factotum(dmfep, KS_CHIP_ERROR, 2062 "interrupt (error)"); 2063 } else { 2064 /* 2065 * We shouldn't really get here (it would mean 2066 * there were some unprocessed enabled bits but 2067 * they weren't Abnormal?), but we'll check just 2068 * in case ... 2069 */ 2070 DMFE_DEBUG(("unexpected interrupt bits: 0x%x", istat)); 2071 } 2072 } 2073 2074 /* 2075 * Acknowledge all the original bits - except in the case of an 2076 * error, when we leave them unacknowledged so that the recovery 2077 * code can see what was going on when the problem occurred ... 2078 */ 2079 if (dmfep->chip_state != CHIP_ERROR) { 2080 (void) dmfe_chip_put32(dmfep, STATUS_REG, istat); 2081 /* 2082 * Read-after-write forces completion on PCI bus. 2083 * 2084 */ 2085 (void) dmfe_chip_get32(dmfep, STATUS_REG); 2086 } 2087 2088 2089 /* 2090 * We've finished talking to the chip, so we can drop <oplock> 2091 * before handling the normal interrupts, which only involve 2092 * manipulation of descriptors ... 2093 */ 2094 mutex_exit(dmfep->oplock); 2095 2096 if (interrupts & RX_PKTDONE_INT) 2097 if ((mp = dmfe_getp(dmfep)) != NULL) 2098 mac_rx(dmfep->mh, NULL, mp); 2099 2100 if (interrupts & TX_PKTDONE_INT) { 2101 /* 2102 * The only reason for taking this interrupt is to give 2103 * MAC a chance to schedule queued packets after a 2104 * ring-full condition. To minimise the number of 2105 * redundant TX-Done interrupts, we only mark two of the 2106 * ring descriptors as 'interrupt-on-complete' - all the 2107 * others are simply handed back without an interrupt. 2108 */ 2109 if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) { 2110 (void) dmfe_reclaim_tx_desc(dmfep); 2111 mutex_exit(dmfep->txlock); 2112 } 2113 mac_tx_update(dmfep->mh); 2114 } 2115 2116 return (DDI_INTR_CLAIMED); 2117 } 2118 2119 #undef DMFE_DBG 2120 2121 2122 /* 2123 * ========== Statistics update handler ========== 2124 */ 2125 2126 #define DMFE_DBG DMFE_DBG_STATS /* debug flag for this code */ 2127 2128 static int 2129 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val) 2130 { 2131 dmfe_t *dmfep = arg; 2132 int rv = 0; 2133 2134 mutex_enter(dmfep->milock); 2135 mutex_enter(dmfep->oplock); 2136 mutex_enter(dmfep->rxlock); 2137 mutex_enter(dmfep->txlock); 2138 2139 /* make sure we have all the stats collected */ 2140 (void) dmfe_reclaim_tx_desc(dmfep); 2141 2142 switch (stat) { 2143 case MAC_STAT_IFSPEED: 2144 *val = dmfep->op_stats_speed; 2145 break; 2146 2147 case MAC_STAT_IPACKETS: 2148 *val = dmfep->rx_stats_ipackets; 2149 break; 2150 2151 case MAC_STAT_MULTIRCV: 2152 *val = dmfep->rx_stats_multi; 2153 break; 2154 2155 case MAC_STAT_BRDCSTRCV: 2156 *val = dmfep->rx_stats_bcast; 2157 break; 2158 2159 case MAC_STAT_RBYTES: 2160 *val = dmfep->rx_stats_rbytes; 2161 break; 2162 2163 case MAC_STAT_IERRORS: 2164 *val = dmfep->rx_stats_ierrors; 2165 break; 2166 2167 case MAC_STAT_NORCVBUF: 2168 *val = dmfep->rx_stats_norcvbuf; 2169 break; 2170 2171 case MAC_STAT_COLLISIONS: 2172 *val = dmfep->tx_stats_collisions; 2173 break; 2174 2175 case MAC_STAT_OERRORS: 2176 *val = dmfep->tx_stats_oerrors; 2177 break; 2178 2179 case MAC_STAT_OPACKETS: 2180 *val = dmfep->tx_stats_opackets; 2181 break; 2182 2183 case MAC_STAT_MULTIXMT: 2184 *val = dmfep->tx_stats_multi; 2185 break; 2186 2187 case MAC_STAT_BRDCSTXMT: 2188 *val = dmfep->tx_stats_bcast; 2189 break; 2190 2191 case MAC_STAT_OBYTES: 2192 *val = dmfep->tx_stats_obytes; 2193 break; 2194 2195 case MAC_STAT_OVERFLOWS: 2196 *val = dmfep->rx_stats_overflow; 2197 break; 2198 2199 case MAC_STAT_UNDERFLOWS: 2200 *val = dmfep->tx_stats_underflow; 2201 break; 2202 2203 case ETHER_STAT_ALIGN_ERRORS: 2204 *val = dmfep->rx_stats_align; 2205 break; 2206 2207 case ETHER_STAT_FCS_ERRORS: 2208 *val = dmfep->rx_stats_fcs; 2209 break; 2210 2211 case ETHER_STAT_TOOLONG_ERRORS: 2212 *val = dmfep->rx_stats_toolong; 2213 break; 2214 2215 case ETHER_STAT_TOOSHORT_ERRORS: 2216 *val = dmfep->rx_stats_short; 2217 break; 2218 2219 case ETHER_STAT_MACRCV_ERRORS: 2220 *val = dmfep->rx_stats_macrcv_errors; 2221 break; 2222 2223 case ETHER_STAT_MACXMT_ERRORS: 2224 *val = dmfep->tx_stats_macxmt_errors; 2225 break; 2226 2227 case ETHER_STAT_JABBER_ERRORS: 2228 *val = dmfep->tx_stats_jabber; 2229 break; 2230 2231 case ETHER_STAT_CARRIER_ERRORS: 2232 *val = dmfep->tx_stats_nocarrier; 2233 break; 2234 2235 case ETHER_STAT_TX_LATE_COLLISIONS: 2236 *val = dmfep->tx_stats_xmtlatecoll; 2237 break; 2238 2239 case ETHER_STAT_EX_COLLISIONS: 2240 *val = dmfep->tx_stats_excoll; 2241 break; 2242 2243 case ETHER_STAT_DEFER_XMTS: 2244 *val = dmfep->tx_stats_defer; 2245 break; 2246 2247 case ETHER_STAT_FIRST_COLLISIONS: 2248 *val = dmfep->tx_stats_first_coll; 2249 break; 2250 2251 case ETHER_STAT_MULTI_COLLISIONS: 2252 *val = dmfep->tx_stats_multi_coll; 2253 break; 2254 2255 case ETHER_STAT_XCVR_INUSE: 2256 *val = dmfep->phy_inuse; 2257 break; 2258 2259 case ETHER_STAT_XCVR_ID: 2260 *val = dmfep->phy_id; 2261 break; 2262 2263 case ETHER_STAT_XCVR_ADDR: 2264 *val = dmfep->phy_addr; 2265 break; 2266 2267 case ETHER_STAT_LINK_DUPLEX: 2268 *val = dmfep->op_stats_duplex; 2269 break; 2270 2271 case ETHER_STAT_CAP_100T4: 2272 *val = dmfep->param_bmsr_100T4; 2273 break; 2274 2275 case ETHER_STAT_CAP_100FDX: 2276 *val = dmfep->param_bmsr_100fdx; 2277 break; 2278 2279 case ETHER_STAT_CAP_100HDX: 2280 *val = dmfep->param_bmsr_100hdx; 2281 break; 2282 2283 case ETHER_STAT_CAP_10FDX: 2284 *val = dmfep->param_bmsr_10fdx; 2285 break; 2286 2287 case ETHER_STAT_CAP_10HDX: 2288 *val = dmfep->param_bmsr_10hdx; 2289 break; 2290 2291 case ETHER_STAT_CAP_AUTONEG: 2292 *val = dmfep->param_bmsr_autoneg; 2293 break; 2294 2295 case ETHER_STAT_CAP_REMFAULT: 2296 *val = dmfep->param_bmsr_remfault; 2297 break; 2298 2299 case ETHER_STAT_ADV_CAP_AUTONEG: 2300 *val = dmfep->param_autoneg; 2301 break; 2302 2303 case ETHER_STAT_ADV_CAP_100T4: 2304 *val = dmfep->param_anar_100T4; 2305 break; 2306 2307 case ETHER_STAT_ADV_CAP_100FDX: 2308 *val = dmfep->param_anar_100fdx; 2309 break; 2310 2311 case ETHER_STAT_ADV_CAP_100HDX: 2312 *val = dmfep->param_anar_100hdx; 2313 break; 2314 2315 case ETHER_STAT_ADV_CAP_10FDX: 2316 *val = dmfep->param_anar_10fdx; 2317 break; 2318 2319 case ETHER_STAT_ADV_CAP_10HDX: 2320 *val = dmfep->param_anar_10hdx; 2321 break; 2322 2323 case ETHER_STAT_ADV_REMFAULT: 2324 *val = dmfep->param_anar_remfault; 2325 break; 2326 2327 case ETHER_STAT_LP_CAP_AUTONEG: 2328 *val = dmfep->param_lp_autoneg; 2329 break; 2330 2331 case ETHER_STAT_LP_CAP_100T4: 2332 *val = dmfep->param_lp_100T4; 2333 break; 2334 2335 case ETHER_STAT_LP_CAP_100FDX: 2336 *val = dmfep->param_lp_100fdx; 2337 break; 2338 2339 case ETHER_STAT_LP_CAP_100HDX: 2340 *val = dmfep->param_lp_100hdx; 2341 break; 2342 2343 case ETHER_STAT_LP_CAP_10FDX: 2344 *val = dmfep->param_lp_10fdx; 2345 break; 2346 2347 case ETHER_STAT_LP_CAP_10HDX: 2348 *val = dmfep->param_lp_10hdx; 2349 break; 2350 2351 case ETHER_STAT_LP_REMFAULT: 2352 *val = dmfep->param_lp_remfault; 2353 break; 2354 2355 default: 2356 rv = ENOTSUP; 2357 } 2358 2359 mutex_exit(dmfep->txlock); 2360 mutex_exit(dmfep->rxlock); 2361 mutex_exit(dmfep->oplock); 2362 mutex_exit(dmfep->milock); 2363 2364 return (rv); 2365 } 2366 2367 #undef DMFE_DBG 2368 2369 2370 /* 2371 * ========== Ioctl handler & subfunctions ========== 2372 */ 2373 2374 #define DMFE_DBG DMFE_DBG_IOCTL /* debug flag for this code */ 2375 2376 /* 2377 * Loopback operation 2378 * 2379 * Support access to the internal loopback and external loopback 2380 * functions selected via the Operation Mode Register (OPR). 2381 * These will be used by netlbtest (see BugId 4370609) 2382 * 2383 * Note that changing the loopback mode causes a stop/restart cycle 2384 * 2385 * It would be nice to evolve this to support the ioctls in sys/netlb.h, 2386 * but then it would be even better to use Brussels to configure this. 2387 */ 2388 static enum ioc_reply 2389 dmfe_loop_ioctl(dmfe_t *dmfep, queue_t *wq, mblk_t *mp, int cmd) 2390 { 2391 loopback_t *loop_req_p; 2392 uint32_t loopmode; 2393 2394 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) 2395 return (IOC_INVAL); 2396 2397 loop_req_p = (void *)mp->b_cont->b_rptr; 2398 2399 switch (cmd) { 2400 default: 2401 /* 2402 * This should never happen ... 2403 */ 2404 dmfe_error(dmfep, "dmfe_loop_ioctl: invalid cmd 0x%x", cmd); 2405 return (IOC_INVAL); 2406 2407 case DMFE_GET_LOOP_MODE: 2408 /* 2409 * This doesn't return the current loopback mode - it 2410 * returns a bitmask :-( of all possible loopback modes 2411 */ 2412 DMFE_DEBUG(("dmfe_loop_ioctl: GET_LOOP_MODE")); 2413 loop_req_p->loopback = DMFE_LOOPBACK_MODES; 2414 miocack(wq, mp, sizeof (loopback_t), 0); 2415 return (IOC_DONE); 2416 2417 case DMFE_SET_LOOP_MODE: 2418 /* 2419 * Select any of the various loopback modes 2420 */ 2421 DMFE_DEBUG(("dmfe_loop_ioctl: SET_LOOP_MODE %d", 2422 loop_req_p->loopback)); 2423 switch (loop_req_p->loopback) { 2424 default: 2425 return (IOC_INVAL); 2426 2427 case DMFE_LOOPBACK_OFF: 2428 loopmode = LOOPBACK_OFF; 2429 break; 2430 2431 case DMFE_PHY_A_LOOPBACK_ON: 2432 loopmode = LOOPBACK_PHY_A; 2433 break; 2434 2435 case DMFE_PHY_D_LOOPBACK_ON: 2436 loopmode = LOOPBACK_PHY_D; 2437 break; 2438 2439 case DMFE_INT_LOOPBACK_ON: 2440 loopmode = LOOPBACK_INTERNAL; 2441 break; 2442 } 2443 2444 if ((dmfep->opmode & LOOPBACK_MODE_MASK) != loopmode) { 2445 dmfep->opmode &= ~LOOPBACK_MODE_MASK; 2446 dmfep->opmode |= loopmode; 2447 return (IOC_RESTART_ACK); 2448 } 2449 2450 return (IOC_ACK); 2451 } 2452 } 2453 2454 /* 2455 * Specific dmfe IOCTLs, the mac module handles the generic ones. 2456 */ 2457 static void 2458 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 2459 { 2460 dmfe_t *dmfep = arg; 2461 struct iocblk *iocp; 2462 enum ioc_reply status; 2463 int cmd; 2464 2465 /* 2466 * Validate the command before bothering with the mutexen ... 2467 */ 2468 iocp = (void *)mp->b_rptr; 2469 cmd = iocp->ioc_cmd; 2470 switch (cmd) { 2471 default: 2472 DMFE_DEBUG(("dmfe_m_ioctl: unknown cmd 0x%x", cmd)); 2473 miocnak(wq, mp, 0, EINVAL); 2474 return; 2475 2476 case DMFE_SET_LOOP_MODE: 2477 case DMFE_GET_LOOP_MODE: 2478 case ND_GET: 2479 case ND_SET: 2480 break; 2481 } 2482 2483 mutex_enter(dmfep->milock); 2484 mutex_enter(dmfep->oplock); 2485 2486 switch (cmd) { 2487 default: 2488 _NOTE(NOTREACHED) 2489 status = IOC_INVAL; 2490 break; 2491 2492 case DMFE_SET_LOOP_MODE: 2493 case DMFE_GET_LOOP_MODE: 2494 status = dmfe_loop_ioctl(dmfep, wq, mp, cmd); 2495 break; 2496 2497 case ND_GET: 2498 case ND_SET: 2499 status = dmfe_nd_ioctl(dmfep, wq, mp, cmd); 2500 break; 2501 } 2502 2503 /* 2504 * Do we need to restart? 2505 */ 2506 switch (status) { 2507 default: 2508 break; 2509 2510 case IOC_RESTART_ACK: 2511 case IOC_RESTART: 2512 /* 2513 * PHY parameters changed; we need to stop, update the 2514 * PHY layer and restart before sending the reply or ACK 2515 */ 2516 dmfe_stop(dmfep); 2517 dmfe_update_phy(dmfep); 2518 dmfep->update_phy = B_FALSE; 2519 2520 /* 2521 * The link will now most likely go DOWN and UP, because 2522 * we've changed the loopback state or the link parameters 2523 * or autonegotiation. So we have to check that it's 2524 * settled down before we restart the TX/RX processes. 2525 * The ioctl code will have planted some reason strings 2526 * to explain what's happening, so the link state change 2527 * messages won't be printed on the console . We wake the 2528 * factotum to deal with link notifications, if any ... 2529 */ 2530 if (dmfe_check_link(dmfep)) { 2531 dmfe_recheck_link(dmfep, B_TRUE); 2532 dmfe_wake_factotum(dmfep, KS_LINK_CHECK, "ioctl"); 2533 } 2534 2535 if (dmfep->mac_state == DMFE_MAC_STARTED) 2536 dmfe_start(dmfep); 2537 break; 2538 } 2539 2540 /* 2541 * The 'reasons-for-link-change', if any, don't apply any more 2542 */ 2543 mutex_exit(dmfep->oplock); 2544 mutex_exit(dmfep->milock); 2545 2546 /* 2547 * Finally, decide how to reply 2548 */ 2549 switch (status) { 2550 default: 2551 /* 2552 * Error, reply with a NAK and EINVAL 2553 */ 2554 miocnak(wq, mp, 0, EINVAL); 2555 break; 2556 2557 case IOC_RESTART_ACK: 2558 case IOC_ACK: 2559 /* 2560 * OK, reply with an ACK 2561 */ 2562 miocack(wq, mp, 0, 0); 2563 break; 2564 2565 case IOC_RESTART: 2566 case IOC_REPLY: 2567 /* 2568 * OK, send prepared reply 2569 */ 2570 qreply(wq, mp); 2571 break; 2572 2573 case IOC_DONE: 2574 /* 2575 * OK, reply already sent 2576 */ 2577 break; 2578 } 2579 } 2580 2581 #undef DMFE_DBG 2582 2583 2584 /* 2585 * ========== Per-instance setup/teardown code ========== 2586 */ 2587 2588 #define DMFE_DBG DMFE_DBG_INIT /* debug flag for this code */ 2589 2590 /* 2591 * Determine local MAC address & broadcast address for this interface 2592 */ 2593 static void 2594 dmfe_find_mac_address(dmfe_t *dmfep) 2595 { 2596 uchar_t *prop; 2597 uint_t propsize; 2598 int err; 2599 2600 /* 2601 * We have to find the "vendor's factory-set address". This is 2602 * the value of the property "local-mac-address", as set by OBP 2603 * (or a .conf file!) 2604 * 2605 * If the property is not there, then we try to find the factory 2606 * mac address from the devices serial EEPROM. 2607 */ 2608 bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr)); 2609 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo, 2610 DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize); 2611 if (err == DDI_PROP_SUCCESS) { 2612 if (propsize == ETHERADDRL) 2613 ethaddr_copy(prop, dmfep->curr_addr); 2614 ddi_prop_free(prop); 2615 } else { 2616 /* no property set... check eeprom */ 2617 dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr, 2618 ETHERADDRL); 2619 } 2620 2621 DMFE_DEBUG(("dmfe_setup_mac_address: factory %s", 2622 ether_sprintf((void *)dmfep->curr_addr))); 2623 } 2624 2625 static int 2626 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize, 2627 size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p, 2628 uint_t dma_flags, dma_area_t *dma_p) 2629 { 2630 ddi_dma_cookie_t dma_cookie; 2631 uint_t ncookies; 2632 int err; 2633 2634 /* 2635 * Allocate handle 2636 */ 2637 err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr, 2638 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl); 2639 if (err != DDI_SUCCESS) 2640 return (DDI_FAILURE); 2641 2642 /* 2643 * Allocate memory 2644 */ 2645 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop, 2646 attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING), 2647 DDI_DMA_SLEEP, NULL, 2648 &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl); 2649 if (err != DDI_SUCCESS) 2650 return (DDI_FAILURE); 2651 2652 /* 2653 * Bind the two together 2654 */ 2655 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL, 2656 dma_p->mem_va, dma_p->alength, dma_flags, 2657 DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies); 2658 if (err != DDI_DMA_MAPPED) 2659 return (DDI_FAILURE); 2660 if ((dma_p->ncookies = ncookies) != 1) 2661 return (DDI_FAILURE); 2662 2663 dma_p->mem_dvma = dma_cookie.dmac_address; 2664 if (setup > 0) { 2665 dma_p->setup_dvma = dma_p->mem_dvma + memsize; 2666 dma_p->setup_va = dma_p->mem_va + memsize; 2667 } else { 2668 dma_p->setup_dvma = 0; 2669 dma_p->setup_va = NULL; 2670 } 2671 2672 return (DDI_SUCCESS); 2673 } 2674 2675 /* 2676 * This function allocates the transmit and receive buffers and descriptors. 2677 */ 2678 static int 2679 dmfe_alloc_bufs(dmfe_t *dmfep) 2680 { 2681 size_t memsize; 2682 int err; 2683 2684 /* 2685 * Allocate memory & handles for TX descriptor ring 2686 */ 2687 memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type); 2688 err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP, 2689 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2690 &dmfep->tx_desc); 2691 if (err != DDI_SUCCESS) 2692 return (DDI_FAILURE); 2693 2694 /* 2695 * Allocate memory & handles for TX buffers 2696 */ 2697 memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE; 2698 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 2699 &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE, 2700 &dmfep->tx_buff); 2701 if (err != DDI_SUCCESS) 2702 return (DDI_FAILURE); 2703 2704 /* 2705 * Allocate memory & handles for RX descriptor ring 2706 */ 2707 memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type); 2708 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP, 2709 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2710 &dmfep->rx_desc); 2711 if (err != DDI_SUCCESS) 2712 return (DDI_FAILURE); 2713 2714 /* 2715 * Allocate memory & handles for RX buffers 2716 */ 2717 memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE; 2718 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0, 2719 &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff); 2720 if (err != DDI_SUCCESS) 2721 return (DDI_FAILURE); 2722 2723 /* 2724 * Allocate bitmasks for tx packet type tracking 2725 */ 2726 dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 2727 dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP); 2728 2729 return (DDI_SUCCESS); 2730 } 2731 2732 static void 2733 dmfe_free_dma_mem(dma_area_t *dma_p) 2734 { 2735 if (dma_p->dma_hdl != NULL) { 2736 if (dma_p->ncookies) { 2737 (void) ddi_dma_unbind_handle(dma_p->dma_hdl); 2738 dma_p->ncookies = 0; 2739 } 2740 ddi_dma_free_handle(&dma_p->dma_hdl); 2741 dma_p->dma_hdl = NULL; 2742 dma_p->mem_dvma = 0; 2743 dma_p->setup_dvma = 0; 2744 } 2745 2746 if (dma_p->acc_hdl != NULL) { 2747 ddi_dma_mem_free(&dma_p->acc_hdl); 2748 dma_p->acc_hdl = NULL; 2749 dma_p->mem_va = NULL; 2750 dma_p->setup_va = NULL; 2751 } 2752 } 2753 2754 /* 2755 * This routine frees the transmit and receive buffers and descriptors. 2756 * Make sure the chip is stopped before calling it! 2757 */ 2758 static void 2759 dmfe_free_bufs(dmfe_t *dmfep) 2760 { 2761 dmfe_free_dma_mem(&dmfep->rx_buff); 2762 dmfe_free_dma_mem(&dmfep->rx_desc); 2763 dmfe_free_dma_mem(&dmfep->tx_buff); 2764 dmfe_free_dma_mem(&dmfep->tx_desc); 2765 kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY); 2766 kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY); 2767 } 2768 2769 static void 2770 dmfe_unattach(dmfe_t *dmfep) 2771 { 2772 /* 2773 * Clean up and free all DMFE data structures 2774 */ 2775 if (dmfep->cycid != NULL) { 2776 ddi_periodic_delete(dmfep->cycid); 2777 dmfep->cycid = NULL; 2778 } 2779 2780 if (dmfep->ksp_drv != NULL) 2781 kstat_delete(dmfep->ksp_drv); 2782 if (dmfep->progress & PROGRESS_HWINT) { 2783 ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk); 2784 mutex_destroy(dmfep->txlock); 2785 mutex_destroy(dmfep->rxlock); 2786 mutex_destroy(dmfep->oplock); 2787 } 2788 if (dmfep->progress & PROGRESS_SOFTINT) 2789 ddi_remove_softintr(dmfep->factotum_id); 2790 if (dmfep->progress & PROGRESS_BUFS) 2791 dmfe_free_bufs(dmfep); 2792 if (dmfep->progress & PROGRESS_REGS) 2793 ddi_regs_map_free(&dmfep->io_handle); 2794 if (dmfep->progress & PROGRESS_NDD) 2795 dmfe_nd_cleanup(dmfep); 2796 2797 kmem_free(dmfep, sizeof (*dmfep)); 2798 } 2799 2800 static int 2801 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp) 2802 { 2803 ddi_acc_handle_t handle; 2804 uint32_t regval; 2805 2806 if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS) 2807 return (DDI_FAILURE); 2808 2809 /* 2810 * Get vendor/device/revision. We expect (but don't check) that 2811 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102) 2812 */ 2813 idp->vendor = pci_config_get16(handle, PCI_CONF_VENID); 2814 idp->device = pci_config_get16(handle, PCI_CONF_DEVID); 2815 idp->revision = pci_config_get8(handle, PCI_CONF_REVID); 2816 2817 /* 2818 * Turn on Bus Master Enable bit and ensure the device is not asleep 2819 */ 2820 regval = pci_config_get32(handle, PCI_CONF_COMM); 2821 pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME)); 2822 2823 regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD); 2824 pci_config_put32(handle, PCI_DMFE_CONF_CFDD, 2825 regval & ~(CFDD_SLEEP | CFDD_SNOOZE)); 2826 2827 pci_config_teardown(&handle); 2828 return (DDI_SUCCESS); 2829 } 2830 2831 struct ks_index { 2832 int index; 2833 char *name; 2834 }; 2835 2836 static const struct ks_index ks_drv_names[] = { 2837 { KS_INTERRUPT, "intr" }, 2838 { KS_CYCLIC_RUN, "cyclic_run" }, 2839 2840 { KS_TICK_LINK_STATE, "link_state_change" }, 2841 { KS_TICK_LINK_POLL, "link_state_poll" }, 2842 { KS_TX_STALL, "tx_stall_detect" }, 2843 { KS_CHIP_ERROR, "chip_error_interrupt" }, 2844 2845 { KS_FACTOTUM_RUN, "factotum_run" }, 2846 { KS_RECOVERY, "factotum_recover" }, 2847 { KS_LINK_CHECK, "factotum_link_check" }, 2848 2849 { KS_LINK_UP_CNT, "link_up_cnt" }, 2850 { KS_LINK_DROP_CNT, "link_drop_cnt" }, 2851 2852 { KS_MIIREG_BMSR, "mii_status" }, 2853 { KS_MIIREG_ANAR, "mii_advert_cap" }, 2854 { KS_MIIREG_ANLPAR, "mii_partner_cap" }, 2855 { KS_MIIREG_ANER, "mii_expansion_cap" }, 2856 { KS_MIIREG_DSCSR, "mii_dscsr" }, 2857 2858 { -1, NULL } 2859 }; 2860 2861 static void 2862 dmfe_init_kstats(dmfe_t *dmfep, int instance) 2863 { 2864 kstat_t *ksp; 2865 kstat_named_t *knp; 2866 const struct ks_index *ksip; 2867 2868 /* no need to create MII stats, the mac module already does it */ 2869 2870 /* Create and initialise driver-defined kstats */ 2871 ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net", 2872 KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT); 2873 if (ksp != NULL) { 2874 for (knp = ksp->ks_data, ksip = ks_drv_names; 2875 ksip->name != NULL; ++ksip) { 2876 kstat_named_init(&knp[ksip->index], ksip->name, 2877 KSTAT_DATA_UINT64); 2878 } 2879 dmfep->ksp_drv = ksp; 2880 dmfep->knp_drv = knp; 2881 kstat_install(ksp); 2882 } else { 2883 dmfe_error(dmfep, "kstat_create() for dmfe_events failed"); 2884 } 2885 } 2886 2887 static int 2888 dmfe_resume(dev_info_t *devinfo) 2889 { 2890 dmfe_t *dmfep; /* Our private data */ 2891 chip_id_t chipid; 2892 2893 dmfep = ddi_get_driver_private(devinfo); 2894 if (dmfep == NULL) 2895 return (DDI_FAILURE); 2896 2897 /* 2898 * Refuse to resume if the data structures aren't consistent 2899 */ 2900 if (dmfep->devinfo != devinfo) 2901 return (DDI_FAILURE); 2902 2903 /* 2904 * Refuse to resume if the chip's changed its identity (*boggle*) 2905 */ 2906 if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS) 2907 return (DDI_FAILURE); 2908 if (chipid.vendor != dmfep->chipid.vendor) 2909 return (DDI_FAILURE); 2910 if (chipid.device != dmfep->chipid.device) 2911 return (DDI_FAILURE); 2912 if (chipid.revision != dmfep->chipid.revision) 2913 return (DDI_FAILURE); 2914 2915 /* 2916 * All OK, reinitialise h/w & kick off MAC scheduling 2917 */ 2918 mutex_enter(dmfep->oplock); 2919 dmfe_restart(dmfep); 2920 mutex_exit(dmfep->oplock); 2921 mac_tx_update(dmfep->mh); 2922 return (DDI_SUCCESS); 2923 } 2924 2925 /* 2926 * attach(9E) -- Attach a device to the system 2927 * 2928 * Called once for each board successfully probed. 2929 */ 2930 static int 2931 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2932 { 2933 mac_register_t *macp; 2934 dmfe_t *dmfep; /* Our private data */ 2935 uint32_t csr6; 2936 int instance; 2937 int err; 2938 2939 instance = ddi_get_instance(devinfo); 2940 2941 switch (cmd) { 2942 default: 2943 return (DDI_FAILURE); 2944 2945 case DDI_RESUME: 2946 return (dmfe_resume(devinfo)); 2947 2948 case DDI_ATTACH: 2949 break; 2950 } 2951 2952 dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP); 2953 ddi_set_driver_private(devinfo, dmfep); 2954 dmfep->devinfo = devinfo; 2955 dmfep->dmfe_guard = DMFE_GUARD; 2956 2957 /* 2958 * Initialize more fields in DMFE private data 2959 * Determine the local MAC address 2960 */ 2961 #if DMFEDEBUG 2962 dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0, 2963 debug_propname, dmfe_debug); 2964 #endif /* DMFEDEBUG */ 2965 dmfep->cycid = NULL; 2966 (void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d", 2967 instance); 2968 2969 /* 2970 * Check for custom "opmode-reg-value" property; 2971 * if none, use the defaults below for CSR6 ... 2972 */ 2973 csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1; 2974 dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 2975 DDI_PROP_DONTPASS, opmode_propname, csr6); 2976 2977 /* 2978 * Read chip ID & set up config space command register(s) 2979 */ 2980 if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) { 2981 dmfe_error(dmfep, "dmfe_config_init() failed"); 2982 goto attach_fail; 2983 } 2984 dmfep->progress |= PROGRESS_CONFIG; 2985 2986 /* 2987 * Register NDD-tweakable parameters 2988 */ 2989 if (dmfe_nd_init(dmfep)) { 2990 dmfe_error(dmfep, "dmfe_nd_init() failed"); 2991 goto attach_fail; 2992 } 2993 dmfep->progress |= PROGRESS_NDD; 2994 2995 /* 2996 * Map operating registers 2997 */ 2998 err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER, 2999 &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle); 3000 if (err != DDI_SUCCESS) { 3001 dmfe_error(dmfep, "ddi_regs_map_setup() failed"); 3002 goto attach_fail; 3003 } 3004 dmfep->progress |= PROGRESS_REGS; 3005 3006 /* 3007 * Get our MAC address. 3008 */ 3009 dmfe_find_mac_address(dmfep); 3010 3011 /* 3012 * Allocate the TX and RX descriptors/buffers. 3013 */ 3014 dmfep->tx.n_desc = dmfe_tx_desc; 3015 dmfep->rx.n_desc = dmfe_rx_desc; 3016 err = dmfe_alloc_bufs(dmfep); 3017 if (err != DDI_SUCCESS) { 3018 dmfe_error(dmfep, "DMA buffer allocation failed"); 3019 goto attach_fail; 3020 } 3021 dmfep->progress |= PROGRESS_BUFS; 3022 3023 /* 3024 * Add the softint handler 3025 */ 3026 dmfep->link_poll_tix = factotum_start_tix; 3027 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id, 3028 NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) { 3029 dmfe_error(dmfep, "ddi_add_softintr() failed"); 3030 goto attach_fail; 3031 } 3032 dmfep->progress |= PROGRESS_SOFTINT; 3033 3034 /* 3035 * Add the h/w interrupt handler & initialise mutexen 3036 */ 3037 if (ddi_add_intr(devinfo, 0, &dmfep->iblk, NULL, 3038 dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) { 3039 dmfe_error(dmfep, "ddi_add_intr() failed"); 3040 goto attach_fail; 3041 } 3042 mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL); 3043 mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk); 3044 mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk); 3045 mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk); 3046 dmfep->progress |= PROGRESS_HWINT; 3047 3048 /* 3049 * Create & initialise named kstats 3050 */ 3051 dmfe_init_kstats(dmfep, instance); 3052 3053 /* 3054 * Reset & initialise the chip and the ring buffers 3055 * Initialise the (internal) PHY 3056 */ 3057 mutex_enter(dmfep->oplock); 3058 mutex_enter(dmfep->rxlock); 3059 mutex_enter(dmfep->txlock); 3060 3061 dmfe_reset(dmfep); 3062 3063 /* 3064 * Prepare the setup packet 3065 */ 3066 bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE); 3067 bzero(dmfep->mcast_refs, MCASTBUF_SIZE); 3068 dmfep->addr_set = B_FALSE; 3069 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST); 3070 dmfep->mac_state = DMFE_MAC_RESET; 3071 3072 mutex_exit(dmfep->txlock); 3073 mutex_exit(dmfep->rxlock); 3074 mutex_exit(dmfep->oplock); 3075 3076 dmfep->link_state = LINK_STATE_UNKNOWN; 3077 if (dmfe_init_phy(dmfep) != B_TRUE) 3078 goto attach_fail; 3079 dmfep->update_phy = B_TRUE; 3080 3081 /* 3082 * Send a reasonable setup frame. This configures our starting 3083 * address and the broadcast address. 3084 */ 3085 (void) dmfe_m_unicst(dmfep, dmfep->curr_addr); 3086 3087 /* 3088 * Initialize pointers to device specific functions which 3089 * will be used by the generic layer. 3090 */ 3091 if ((macp = mac_alloc(MAC_VERSION)) == NULL) 3092 goto attach_fail; 3093 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 3094 macp->m_driver = dmfep; 3095 macp->m_dip = devinfo; 3096 macp->m_src_addr = dmfep->curr_addr; 3097 macp->m_callbacks = &dmfe_m_callbacks; 3098 macp->m_min_sdu = 0; 3099 macp->m_max_sdu = ETHERMTU; 3100 macp->m_margin = VLAN_TAGSZ; 3101 3102 /* 3103 * Finally, we're ready to register ourselves with the MAC layer 3104 * interface; if this succeeds, we're all ready to start() 3105 */ 3106 err = mac_register(macp, &dmfep->mh); 3107 mac_free(macp); 3108 if (err != 0) 3109 goto attach_fail; 3110 ASSERT(dmfep->dmfe_guard == DMFE_GUARD); 3111 3112 /* 3113 * Install the cyclic callback that we use to check for link 3114 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic()) 3115 * is invoked in kernel context then. 3116 */ 3117 ASSERT(dmfep->cycid == NULL); 3118 dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep, 3119 dmfe_tick_us * 1000, DDI_IPL_0); 3120 return (DDI_SUCCESS); 3121 3122 attach_fail: 3123 dmfe_unattach(dmfep); 3124 return (DDI_FAILURE); 3125 } 3126 3127 /* 3128 * dmfe_suspend() -- suspend transmit/receive for powerdown 3129 */ 3130 static int 3131 dmfe_suspend(dmfe_t *dmfep) 3132 { 3133 /* 3134 * Just stop processing ... 3135 */ 3136 mutex_enter(dmfep->oplock); 3137 dmfe_stop(dmfep); 3138 mutex_exit(dmfep->oplock); 3139 3140 return (DDI_SUCCESS); 3141 } 3142 3143 /* 3144 * detach(9E) -- Detach a device from the system 3145 */ 3146 static int 3147 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3148 { 3149 dmfe_t *dmfep; 3150 3151 dmfep = ddi_get_driver_private(devinfo); 3152 3153 switch (cmd) { 3154 default: 3155 return (DDI_FAILURE); 3156 3157 case DDI_SUSPEND: 3158 return (dmfe_suspend(dmfep)); 3159 3160 case DDI_DETACH: 3161 break; 3162 } 3163 3164 /* 3165 * Unregister from the MAC subsystem. This can fail, in 3166 * particular if there are DLPI style-2 streams still open - 3167 * in which case we just return failure without shutting 3168 * down chip operations. 3169 */ 3170 if (mac_unregister(dmfep->mh) != DDI_SUCCESS) 3171 return (DDI_FAILURE); 3172 3173 /* 3174 * All activity stopped, so we can clean up & exit 3175 */ 3176 dmfe_unattach(dmfep); 3177 return (DDI_SUCCESS); 3178 } 3179 3180 3181 /* 3182 * ========== Module Loading Data & Entry Points ========== 3183 */ 3184 3185 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach, 3186 nodev, NULL, D_MP, NULL); 3187 3188 static struct modldrv dmfe_modldrv = { 3189 &mod_driverops, /* Type of module. This one is a driver */ 3190 dmfe_ident, /* short description */ 3191 &dmfe_dev_ops /* driver specific ops */ 3192 }; 3193 3194 static struct modlinkage modlinkage = { 3195 MODREV_1, (void *)&dmfe_modldrv, NULL 3196 }; 3197 3198 int 3199 _info(struct modinfo *modinfop) 3200 { 3201 return (mod_info(&modlinkage, modinfop)); 3202 } 3203 3204 int 3205 _init(void) 3206 { 3207 uint32_t tmp100; 3208 uint32_t tmp10; 3209 int i; 3210 int status; 3211 3212 /* Calculate global timing parameters */ 3213 tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 3214 tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us; 3215 3216 for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) { 3217 switch (i) { 3218 case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA): 3219 case TX_PROCESS_STATE(TX_PROCESS_WAIT_END): 3220 /* 3221 * The chip doesn't spontaneously recover from 3222 * a stall in these states, so we reset early 3223 */ 3224 stall_100_tix[i] = tmp100; 3225 stall_10_tix[i] = tmp10; 3226 break; 3227 3228 case TX_PROCESS_STATE(TX_PROCESS_SUSPEND): 3229 default: 3230 /* 3231 * The chip has been seen to spontaneously recover 3232 * after an apparent stall in the SUSPEND state, 3233 * so we'll allow it rather longer to do so. As 3234 * stalls in other states have not been observed, 3235 * we'll use long timeouts for them too ... 3236 */ 3237 stall_100_tix[i] = tmp100 * 20; 3238 stall_10_tix[i] = tmp10 * 20; 3239 break; 3240 } 3241 } 3242 3243 factotum_tix = (dmfe_link_poll_us+dmfe_tick_us-1)/dmfe_tick_us; 3244 factotum_fast_tix = 1+(factotum_tix/5); 3245 factotum_start_tix = 1+(factotum_tix*2); 3246 3247 mac_init_ops(&dmfe_dev_ops, "dmfe"); 3248 status = mod_install(&modlinkage); 3249 if (status == DDI_SUCCESS) 3250 dmfe_log_init(); 3251 3252 return (status); 3253 } 3254 3255 int 3256 _fini(void) 3257 { 3258 int status; 3259 3260 status = mod_remove(&modlinkage); 3261 if (status == DDI_SUCCESS) { 3262 mac_fini_ops(&dmfe_dev_ops); 3263 dmfe_log_fini(); 3264 } 3265 3266 return (status); 3267 } 3268 3269 #undef DMFE_DBG 3270