1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26
27 #include <sys/types.h>
28 #include <sys/sunddi.h>
29 #include <sys/policy.h>
30 #include <sys/sdt.h>
31 #include "dmfe_impl.h"
32
33 /*
34 * This is the string displayed by modinfo, etc.
35 */
36 static char dmfe_ident[] = "Davicom DM9102 Ethernet";
37
38
39 /*
40 * NOTES:
41 *
42 * #defines:
43 *
44 * DMFE_PCI_RNUMBER is the register-set number to use for the operating
45 * registers. On an OBP-based machine, regset 0 refers to CONFIG space,
46 * regset 1 will be the operating registers in I/O space, and regset 2
47 * will be the operating registers in MEMORY space (preferred). If an
48 * expansion ROM is fitted, it may appear as a further register set.
49 *
50 * DMFE_SLOP defines the amount by which the chip may read beyond
51 * the end of a buffer or descriptor, apparently 6-8 dwords :(
52 * We have to make sure this doesn't cause it to access unallocated
53 * or unmapped memory.
54 *
55 * DMFE_BUF_SIZE must be at least (ETHERMAX + ETHERFCSL + DMFE_SLOP)
56 * rounded up to a multiple of 4. Here we choose a power of two for
57 * speed & simplicity at the cost of a bit more memory.
58 *
59 * However, the buffer length field in the TX/RX descriptors is only
60 * eleven bits, so even though we allocate DMFE_BUF_SIZE (2048) bytes
61 * per buffer, we tell the chip that they're only DMFE_BUF_SIZE_1
62 * (2000) bytes each.
63 *
64 * DMFE_DMA_MODE defines the mode (STREAMING/CONSISTENT) used for
65 * the data buffers. The descriptors are always set up in CONSISTENT
66 * mode.
67 *
68 * DMFE_HEADROOM defines how much space we'll leave in allocated
69 * mblks before the first valid data byte. This should be chosen
70 * to be 2 modulo 4, so that once the ethernet header (14 bytes)
71 * has been stripped off, the packet data will be 4-byte aligned.
72 * The remaining space can be used by upstream modules to prepend
73 * any headers required.
74 *
75 * Patchable globals:
76 *
77 * dmfe_bus_modes: the bus mode bits to be put into CSR0.
78 * Setting READ_MULTIPLE in this register seems to cause
79 * the chip to generate a READ LINE command with a parity
80 * error! Don't do it!
81 *
82 * dmfe_setup_desc1: the value to be put into descriptor word 1
83 * when sending a SETUP packet.
84 *
85 * Setting TX_LAST_DESC in desc1 in a setup packet seems
86 * to make the chip spontaneously reset internally - it
87 * attempts to give back the setup packet descriptor by
88 * writing to PCI address 00000000 - which may or may not
89 * get a MASTER ABORT - after which most of its registers
90 * seem to have either default values or garbage!
91 *
92 * TX_FIRST_DESC doesn't seem to have the same effect but
93 * it isn't needed on a setup packet so we'll leave it out
94 * too, just in case it has some other wierd side-effect.
95 *
96 * The default hardware packet filtering mode is now
97 * HASH_AND_PERFECT (imperfect filtering of multicast
98 * packets and perfect filtering of unicast packets).
99 * If this is found not to work reliably, setting the
100 * TX_FILTER_TYPE1 bit will cause a switchover to using
101 * HASH_ONLY mode (imperfect filtering of *all* packets).
102 * Software will then perform the additional filtering
103 * as required.
104 */
105
106 #define DMFE_PCI_RNUMBER 2
107 #define DMFE_SLOP (8*sizeof (uint32_t))
108 #define DMFE_BUF_SIZE 2048
109 #define DMFE_BUF_SIZE_1 2000
110 #define DMFE_DMA_MODE DDI_DMA_STREAMING
111 #define DMFE_HEADROOM 34
112
113 static uint32_t dmfe_bus_modes = TX_POLL_INTVL | CACHE_ALIGN;
114 static uint32_t dmfe_setup_desc1 = TX_SETUP_PACKET | SETUPBUF_SIZE |
115 TX_FILTER_TYPE0;
116
117 /*
118 * Some tunable parameters ...
119 * Number of RX/TX ring entries (128/128)
120 * Minimum number of TX ring slots to keep free (1)
121 * Low-water mark at which to try to reclaim TX ring slots (1)
122 * How often to take a TX-done interrupt (twice per ring cycle)
123 * Whether to reclaim TX ring entries on a TX-done interrupt (no)
124 */
125
126 #define DMFE_TX_DESC 128 /* Should be a multiple of 4 <= 256 */
127 #define DMFE_RX_DESC 128 /* Should be a multiple of 4 <= 256 */
128
129 static uint32_t dmfe_rx_desc = DMFE_RX_DESC;
130 static uint32_t dmfe_tx_desc = DMFE_TX_DESC;
131 static uint32_t dmfe_tx_min_free = 1;
132 static uint32_t dmfe_tx_reclaim_level = 1;
133 static uint32_t dmfe_tx_int_factor = (DMFE_TX_DESC / 2) - 1;
134 static boolean_t dmfe_reclaim_on_done = B_FALSE;
135
136 /*
137 * Time-related parameters:
138 *
139 * We use a cyclic to provide a periodic callback; this is then used
140 * to check for TX-stall and poll the link status register.
141 *
142 * DMFE_TICK is the interval between cyclic callbacks, in microseconds.
143 *
144 * TX_STALL_TIME_100 is the timeout in microseconds between passing
145 * a packet to the chip for transmission and seeing that it's gone,
146 * when running at 100Mb/s. If we haven't reclaimed at least one
147 * descriptor in this time we assume the transmitter has stalled
148 * and reset the chip.
149 *
150 * TX_STALL_TIME_10 is the equivalent timeout when running at 10Mb/s.
151 *
152 * Patchable globals:
153 *
154 * dmfe_tick_us: DMFE_TICK
155 * dmfe_tx100_stall_us: TX_STALL_TIME_100
156 * dmfe_tx10_stall_us: TX_STALL_TIME_10
157 *
158 * These are then used in _init() to calculate:
159 *
160 * stall_100_tix[]: number of consecutive cyclic callbacks without a
161 * reclaim before the TX process is considered stalled,
162 * when running at 100Mb/s. The elements are indexed
163 * by transmit-engine-state.
164 * stall_10_tix[]: number of consecutive cyclic callbacks without a
165 * reclaim before the TX process is considered stalled,
166 * when running at 10Mb/s. The elements are indexed
167 * by transmit-engine-state.
168 */
169
170 #define DMFE_TICK 25000 /* microseconds */
171 #define TX_STALL_TIME_100 50000 /* microseconds */
172 #define TX_STALL_TIME_10 200000 /* microseconds */
173
174 static uint32_t dmfe_tick_us = DMFE_TICK;
175 static uint32_t dmfe_tx100_stall_us = TX_STALL_TIME_100;
176 static uint32_t dmfe_tx10_stall_us = TX_STALL_TIME_10;
177
178 /*
179 * Calculated from above in _init()
180 */
181
182 static uint32_t stall_100_tix[TX_PROCESS_MAX_STATE+1];
183 static uint32_t stall_10_tix[TX_PROCESS_MAX_STATE+1];
184
185 /*
186 * Property names
187 */
188 static char localmac_propname[] = "local-mac-address";
189 static char opmode_propname[] = "opmode-reg-value";
190
191 static int dmfe_m_start(void *);
192 static void dmfe_m_stop(void *);
193 static int dmfe_m_promisc(void *, boolean_t);
194 static int dmfe_m_multicst(void *, boolean_t, const uint8_t *);
195 static int dmfe_m_unicst(void *, const uint8_t *);
196 static void dmfe_m_ioctl(void *, queue_t *, mblk_t *);
197 static mblk_t *dmfe_m_tx(void *, mblk_t *);
198 static int dmfe_m_stat(void *, uint_t, uint64_t *);
199 static int dmfe_m_getprop(void *, const char *, mac_prop_id_t,
200 uint_t, void *);
201 static int dmfe_m_setprop(void *, const char *, mac_prop_id_t,
202 uint_t, const void *);
203 static void dmfe_m_propinfo(void *, const char *, mac_prop_id_t,
204 mac_prop_info_handle_t);
205
206 static mac_callbacks_t dmfe_m_callbacks = {
207 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
208 dmfe_m_stat,
209 dmfe_m_start,
210 dmfe_m_stop,
211 dmfe_m_promisc,
212 dmfe_m_multicst,
213 dmfe_m_unicst,
214 dmfe_m_tx,
215 NULL,
216 dmfe_m_ioctl,
217 NULL, /* getcapab */
218 NULL, /* open */
219 NULL, /* close */
220 dmfe_m_setprop,
221 dmfe_m_getprop,
222 dmfe_m_propinfo
223 };
224
225
226 /*
227 * Describes the chip's DMA engine
228 */
229 static ddi_dma_attr_t dma_attr = {
230 DMA_ATTR_V0, /* dma_attr version */
231 0, /* dma_attr_addr_lo */
232 (uint32_t)0xFFFFFFFF, /* dma_attr_addr_hi */
233 0x0FFFFFF, /* dma_attr_count_max */
234 0x20, /* dma_attr_align */
235 0x7F, /* dma_attr_burstsizes */
236 1, /* dma_attr_minxfer */
237 (uint32_t)0xFFFFFFFF, /* dma_attr_maxxfer */
238 (uint32_t)0xFFFFFFFF, /* dma_attr_seg */
239 1, /* dma_attr_sgllen */
240 1, /* dma_attr_granular */
241 0 /* dma_attr_flags */
242 };
243
244 /*
245 * DMA access attributes for registers and descriptors
246 */
247 static ddi_device_acc_attr_t dmfe_reg_accattr = {
248 DDI_DEVICE_ATTR_V0,
249 DDI_STRUCTURE_LE_ACC,
250 DDI_STRICTORDER_ACC
251 };
252
253 /*
254 * DMA access attributes for data: NOT to be byte swapped.
255 */
256 static ddi_device_acc_attr_t dmfe_data_accattr = {
257 DDI_DEVICE_ATTR_V0,
258 DDI_NEVERSWAP_ACC,
259 DDI_STRICTORDER_ACC
260 };
261
262 static uchar_t dmfe_broadcast_addr[ETHERADDRL] = {
263 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
264 };
265
266
267 /*
268 * ========== Lowest-level chip register & ring access routines ==========
269 */
270
271 /*
272 * I/O register get/put routines
273 */
274 uint32_t
dmfe_chip_get32(dmfe_t * dmfep,off_t offset)275 dmfe_chip_get32(dmfe_t *dmfep, off_t offset)
276 {
277 uint32_t *addr;
278
279 addr = (void *)(dmfep->io_reg + offset);
280 return (ddi_get32(dmfep->io_handle, addr));
281 }
282
283 void
dmfe_chip_put32(dmfe_t * dmfep,off_t offset,uint32_t value)284 dmfe_chip_put32(dmfe_t *dmfep, off_t offset, uint32_t value)
285 {
286 uint32_t *addr;
287
288 addr = (void *)(dmfep->io_reg + offset);
289 ddi_put32(dmfep->io_handle, addr, value);
290 }
291
292 /*
293 * TX/RX ring get/put routines
294 */
295 static uint32_t
dmfe_ring_get32(dma_area_t * dma_p,uint_t index,uint_t offset)296 dmfe_ring_get32(dma_area_t *dma_p, uint_t index, uint_t offset)
297 {
298 uint32_t *addr;
299
300 addr = (void *)dma_p->mem_va;
301 return (ddi_get32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset));
302 }
303
304 static void
dmfe_ring_put32(dma_area_t * dma_p,uint_t index,uint_t offset,uint32_t value)305 dmfe_ring_put32(dma_area_t *dma_p, uint_t index, uint_t offset, uint32_t value)
306 {
307 uint32_t *addr;
308
309 addr = (void *)dma_p->mem_va;
310 ddi_put32(dma_p->acc_hdl, addr + index*DESC_SIZE + offset, value);
311 }
312
313 /*
314 * Setup buffer get/put routines
315 */
316 static uint32_t
dmfe_setup_get32(dma_area_t * dma_p,uint_t index)317 dmfe_setup_get32(dma_area_t *dma_p, uint_t index)
318 {
319 uint32_t *addr;
320
321 addr = (void *)dma_p->setup_va;
322 return (ddi_get32(dma_p->acc_hdl, addr + index));
323 }
324
325 static void
dmfe_setup_put32(dma_area_t * dma_p,uint_t index,uint32_t value)326 dmfe_setup_put32(dma_area_t *dma_p, uint_t index, uint32_t value)
327 {
328 uint32_t *addr;
329
330 addr = (void *)dma_p->setup_va;
331 ddi_put32(dma_p->acc_hdl, addr + index, value);
332 }
333
334
335 /*
336 * ========== Low-level chip & ring buffer manipulation ==========
337 */
338
339 /*
340 * dmfe_set_opmode() -- function to set operating mode
341 */
342 static void
dmfe_set_opmode(dmfe_t * dmfep)343 dmfe_set_opmode(dmfe_t *dmfep)
344 {
345 ASSERT(mutex_owned(dmfep->oplock));
346
347 dmfe_chip_put32(dmfep, OPN_MODE_REG, dmfep->opmode);
348 drv_usecwait(10);
349 }
350
351 /*
352 * dmfe_stop_chip() -- stop all chip processing & optionally reset the h/w
353 */
354 static void
dmfe_stop_chip(dmfe_t * dmfep,enum chip_state newstate)355 dmfe_stop_chip(dmfe_t *dmfep, enum chip_state newstate)
356 {
357 ASSERT(mutex_owned(dmfep->oplock));
358
359 /*
360 * Stop the chip:
361 * disable all interrupts
362 * stop TX/RX processes
363 * clear the status bits for TX/RX stopped
364 * If required, reset the chip
365 * Record the new state
366 */
367 dmfe_chip_put32(dmfep, INT_MASK_REG, 0);
368 dmfep->opmode &= ~(START_TRANSMIT | START_RECEIVE);
369 dmfe_set_opmode(dmfep);
370 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
371
372 switch (newstate) {
373 default:
374 ASSERT(!"can't get here");
375 return;
376
377 case CHIP_STOPPED:
378 case CHIP_ERROR:
379 break;
380
381 case CHIP_RESET:
382 dmfe_chip_put32(dmfep, BUS_MODE_REG, SW_RESET);
383 drv_usecwait(10);
384 dmfe_chip_put32(dmfep, BUS_MODE_REG, 0);
385 drv_usecwait(10);
386 dmfe_chip_put32(dmfep, BUS_MODE_REG, dmfe_bus_modes);
387 break;
388 }
389
390 dmfep->chip_state = newstate;
391 }
392
393 /*
394 * Initialize transmit and receive descriptor rings, and
395 * set the chip to point to the first entry in each ring
396 */
397 static void
dmfe_init_rings(dmfe_t * dmfep)398 dmfe_init_rings(dmfe_t *dmfep)
399 {
400 dma_area_t *descp;
401 uint32_t pstart;
402 uint32_t pnext;
403 uint32_t pbuff;
404 uint32_t desc1;
405 int i;
406
407 /*
408 * You need all the locks in order to rewrite the descriptor rings
409 */
410 ASSERT(mutex_owned(dmfep->oplock));
411 ASSERT(mutex_owned(dmfep->rxlock));
412 ASSERT(mutex_owned(dmfep->txlock));
413
414 /*
415 * Program the RX ring entries
416 */
417 descp = &dmfep->rx_desc;
418 pstart = descp->mem_dvma;
419 pnext = pstart + sizeof (struct rx_desc_type);
420 pbuff = dmfep->rx_buff.mem_dvma;
421 desc1 = RX_CHAINING | DMFE_BUF_SIZE_1;
422
423 for (i = 0; i < dmfep->rx.n_desc; ++i) {
424 dmfe_ring_put32(descp, i, RD_NEXT, pnext);
425 dmfe_ring_put32(descp, i, BUFFER1, pbuff);
426 dmfe_ring_put32(descp, i, DESC1, desc1);
427 dmfe_ring_put32(descp, i, DESC0, RX_OWN);
428
429 pnext += sizeof (struct rx_desc_type);
430 pbuff += DMFE_BUF_SIZE;
431 }
432
433 /*
434 * Fix up last entry & sync
435 */
436 dmfe_ring_put32(descp, --i, RD_NEXT, pstart);
437 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
438 dmfep->rx.next_free = 0;
439
440 /*
441 * Set the base address of the RX descriptor list in CSR3
442 */
443 dmfe_chip_put32(dmfep, RX_BASE_ADDR_REG, descp->mem_dvma);
444
445 /*
446 * Program the TX ring entries
447 */
448 descp = &dmfep->tx_desc;
449 pstart = descp->mem_dvma;
450 pnext = pstart + sizeof (struct tx_desc_type);
451 pbuff = dmfep->tx_buff.mem_dvma;
452 desc1 = TX_CHAINING;
453
454 for (i = 0; i < dmfep->tx.n_desc; ++i) {
455 dmfe_ring_put32(descp, i, TD_NEXT, pnext);
456 dmfe_ring_put32(descp, i, BUFFER1, pbuff);
457 dmfe_ring_put32(descp, i, DESC1, desc1);
458 dmfe_ring_put32(descp, i, DESC0, 0);
459
460 pnext += sizeof (struct tx_desc_type);
461 pbuff += DMFE_BUF_SIZE;
462 }
463
464 /*
465 * Fix up last entry & sync
466 */
467 dmfe_ring_put32(descp, --i, TD_NEXT, pstart);
468 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
469 dmfep->tx.n_free = dmfep->tx.n_desc;
470 dmfep->tx.next_free = dmfep->tx.next_busy = 0;
471
472 /*
473 * Set the base address of the TX descrptor list in CSR4
474 */
475 dmfe_chip_put32(dmfep, TX_BASE_ADDR_REG, descp->mem_dvma);
476 }
477
478 /*
479 * dmfe_start_chip() -- start the chip transmitting and/or receiving
480 */
481 static void
dmfe_start_chip(dmfe_t * dmfep,int mode)482 dmfe_start_chip(dmfe_t *dmfep, int mode)
483 {
484 ASSERT(mutex_owned(dmfep->oplock));
485
486 dmfep->opmode |= mode;
487 dmfe_set_opmode(dmfep);
488
489 dmfe_chip_put32(dmfep, W_J_TIMER_REG, 0);
490 /*
491 * Enable VLAN length mode (allows packets to be 4 bytes Longer).
492 */
493 dmfe_chip_put32(dmfep, W_J_TIMER_REG, VLAN_ENABLE);
494
495 /*
496 * Clear any pending process-stopped interrupts
497 */
498 dmfe_chip_put32(dmfep, STATUS_REG, TX_STOPPED_INT | RX_STOPPED_INT);
499 dmfep->chip_state = mode & START_RECEIVE ? CHIP_TX_RX :
500 mode & START_TRANSMIT ? CHIP_TX_ONLY : CHIP_STOPPED;
501 }
502
503 /*
504 * dmfe_enable_interrupts() -- enable our favourite set of interrupts.
505 *
506 * Normal interrupts:
507 * We always enable:
508 * RX_PKTDONE_INT (packet received)
509 * TX_PKTDONE_INT (TX complete)
510 * We never enable:
511 * TX_ALLDONE_INT (next TX buffer not ready)
512 *
513 * Abnormal interrupts:
514 * We always enable:
515 * RX_STOPPED_INT
516 * TX_STOPPED_INT
517 * SYSTEM_ERR_INT
518 * RX_UNAVAIL_INT
519 * We never enable:
520 * RX_EARLY_INT
521 * RX_WATCHDOG_INT
522 * TX_JABBER_INT
523 * TX_EARLY_INT
524 * TX_UNDERFLOW_INT
525 * GP_TIMER_INT (not valid in -9 chips)
526 * LINK_STATUS_INT (not valid in -9 chips)
527 */
528 static void
dmfe_enable_interrupts(dmfe_t * dmfep)529 dmfe_enable_interrupts(dmfe_t *dmfep)
530 {
531 ASSERT(mutex_owned(dmfep->oplock));
532
533 /*
534 * Put 'the standard set of interrupts' in the interrupt mask register
535 */
536 dmfep->imask = RX_PKTDONE_INT | TX_PKTDONE_INT |
537 RX_STOPPED_INT | TX_STOPPED_INT | RX_UNAVAIL_INT | SYSTEM_ERR_INT;
538
539 dmfe_chip_put32(dmfep, INT_MASK_REG,
540 NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT | dmfep->imask);
541 dmfep->chip_state = CHIP_RUNNING;
542 }
543
544 /*
545 * ========== RX side routines ==========
546 */
547
548 /*
549 * Function to update receive statistics on various errors
550 */
551 static void
dmfe_update_rx_stats(dmfe_t * dmfep,uint32_t desc0)552 dmfe_update_rx_stats(dmfe_t *dmfep, uint32_t desc0)
553 {
554 ASSERT(mutex_owned(dmfep->rxlock));
555
556 /*
557 * The error summary bit and the error bits that it summarises
558 * are only valid if this is the last fragment. Therefore, a
559 * fragment only contributes to the error statistics if both
560 * the last-fragment and error summary bits are set.
561 */
562 if (((RX_LAST_DESC | RX_ERR_SUMMARY) & ~desc0) == 0) {
563 dmfep->rx_stats_ierrors += 1;
564
565 /*
566 * There are some other error bits in the descriptor for
567 * which there don't seem to be appropriate MAC statistics,
568 * notably RX_COLLISION and perhaps RX_DESC_ERR. The
569 * latter may not be possible if it is supposed to indicate
570 * that one buffer has been filled with a partial packet
571 * and the next buffer required for the rest of the packet
572 * was not available, as all our buffers are more than large
573 * enough for a whole packet without fragmenting.
574 */
575
576 if (desc0 & RX_OVERFLOW) {
577 dmfep->rx_stats_overflow += 1;
578
579 } else if (desc0 & RX_RUNT_FRAME)
580 dmfep->rx_stats_short += 1;
581
582 if (desc0 & RX_CRC)
583 dmfep->rx_stats_fcs += 1;
584
585 if (desc0 & RX_FRAME2LONG)
586 dmfep->rx_stats_toolong += 1;
587 }
588
589 /*
590 * A receive watchdog timeout is counted as a MAC-level receive
591 * error. Strangely, it doesn't set the packet error summary bit,
592 * according to the chip data sheet :-?
593 */
594 if (desc0 & RX_RCV_WD_TO)
595 dmfep->rx_stats_macrcv_errors += 1;
596
597 if (desc0 & RX_DRIBBLING)
598 dmfep->rx_stats_align += 1;
599
600 if (desc0 & RX_MII_ERR)
601 dmfep->rx_stats_macrcv_errors += 1;
602 }
603
604 /*
605 * Receive incoming packet(s) and pass them up ...
606 */
607 static mblk_t *
dmfe_getp(dmfe_t * dmfep)608 dmfe_getp(dmfe_t *dmfep)
609 {
610 dma_area_t *descp;
611 mblk_t **tail;
612 mblk_t *head;
613 mblk_t *mp;
614 char *rxb;
615 uchar_t *dp;
616 uint32_t desc0;
617 uint32_t misses;
618 int packet_length;
619 int index;
620
621 mutex_enter(dmfep->rxlock);
622
623 /*
624 * Update the missed frame statistic from the on-chip counter.
625 */
626 misses = dmfe_chip_get32(dmfep, MISSED_FRAME_REG);
627 dmfep->rx_stats_norcvbuf += (misses & MISSED_FRAME_MASK);
628
629 /*
630 * sync (all) receive descriptors before inspecting them
631 */
632 descp = &dmfep->rx_desc;
633 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
634
635 /*
636 * We should own at least one RX entry, since we've had a
637 * receive interrupt, but let's not be dogmatic about it.
638 */
639 index = dmfep->rx.next_free;
640 desc0 = dmfe_ring_get32(descp, index, DESC0);
641
642 DTRACE_PROBE1(rx__start, uint32_t, desc0);
643 for (head = NULL, tail = &head; (desc0 & RX_OWN) == 0; ) {
644 /*
645 * Maintain statistics for every descriptor returned
646 * to us by the chip ...
647 */
648 dmfe_update_rx_stats(dmfep, desc0);
649
650 /*
651 * Check that the entry has both "packet start" and
652 * "packet end" flags. We really shouldn't get packet
653 * fragments, 'cos all the RX buffers are bigger than
654 * the largest valid packet. So we'll just drop any
655 * fragments we find & skip on to the next entry.
656 */
657 if (((RX_FIRST_DESC | RX_LAST_DESC) & ~desc0) != 0) {
658 DTRACE_PROBE1(rx__frag, uint32_t, desc0);
659 goto skip;
660 }
661
662 /*
663 * A whole packet in one buffer. We have to check error
664 * status and packet length before forwarding it upstream.
665 */
666 if (desc0 & RX_ERR_SUMMARY) {
667 DTRACE_PROBE1(rx__err, uint32_t, desc0);
668 goto skip;
669 }
670
671 packet_length = (desc0 >> 16) & 0x3fff;
672 if (packet_length > DMFE_MAX_PKT_SIZE) {
673 DTRACE_PROBE1(rx__toobig, int, packet_length);
674 goto skip;
675 } else if (packet_length < ETHERMIN) {
676 /*
677 * Note that VLAN packet would be even larger,
678 * but we don't worry about dropping runt VLAN
679 * frames.
680 *
681 * This check is probably redundant, as well,
682 * since the hardware should drop RUNT frames.
683 */
684 DTRACE_PROBE1(rx__runt, int, packet_length);
685 goto skip;
686 }
687
688 /*
689 * Sync the data, so we can examine it; then check that
690 * the packet is really intended for us (remember that
691 * if we're using Imperfect Filtering, then the chip will
692 * receive unicast packets sent to stations whose addresses
693 * just happen to hash to the same value as our own; we
694 * discard these here so they don't get sent upstream ...)
695 */
696 (void) ddi_dma_sync(dmfep->rx_buff.dma_hdl,
697 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE,
698 DDI_DMA_SYNC_FORKERNEL);
699 rxb = &dmfep->rx_buff.mem_va[index*DMFE_BUF_SIZE];
700
701
702 /*
703 * We do not bother to check that the packet is really for
704 * us, we let the MAC framework make that check instead.
705 * This is especially important if we ever want to support
706 * multiple MAC addresses.
707 */
708
709 /*
710 * Packet looks good; get a buffer to copy it into. We
711 * allow some space at the front of the allocated buffer
712 * (HEADROOM) in case any upstream modules want to prepend
713 * some sort of header. The value has been carefully chosen
714 * So that it also has the side-effect of making the packet
715 * *contents* 4-byte aligned, as required by NCA!
716 */
717 mp = allocb(DMFE_HEADROOM + packet_length, 0);
718 if (mp == NULL) {
719 DTRACE_PROBE(rx__no__buf);
720 dmfep->rx_stats_norcvbuf += 1;
721 goto skip;
722 }
723
724 /*
725 * Account for statistics of good packets.
726 */
727 dmfep->rx_stats_ipackets += 1;
728 dmfep->rx_stats_rbytes += packet_length;
729 if (desc0 & RX_MULTI_FRAME) {
730 if (bcmp(rxb, dmfe_broadcast_addr, ETHERADDRL)) {
731 dmfep->rx_stats_multi += 1;
732 } else {
733 dmfep->rx_stats_bcast += 1;
734 }
735 }
736
737 /*
738 * Copy the packet into the STREAMS buffer
739 */
740 dp = mp->b_rptr += DMFE_HEADROOM;
741 mp->b_cont = mp->b_next = NULL;
742
743 /*
744 * Don't worry about stripping the vlan tag, the MAC
745 * layer will take care of that for us.
746 */
747 bcopy(rxb, dp, packet_length);
748
749 /*
750 * Fix up the packet length, and link it to the chain
751 */
752 mp->b_wptr = mp->b_rptr + packet_length - ETHERFCSL;
753 *tail = mp;
754 tail = &mp->b_next;
755
756 skip:
757 /*
758 * Return ownership of ring entry & advance to next
759 */
760 dmfe_ring_put32(descp, index, DESC0, RX_OWN);
761 index = NEXT(index, dmfep->rx.n_desc);
762 desc0 = dmfe_ring_get32(descp, index, DESC0);
763 }
764
765 /*
766 * Remember where to start looking next time ...
767 */
768 dmfep->rx.next_free = index;
769
770 /*
771 * sync the receive descriptors that we've given back
772 * (actually, we sync all of them for simplicity), and
773 * wake the chip in case it had suspended receive
774 */
775 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
776 dmfe_chip_put32(dmfep, RX_POLL_REG, 0);
777
778 mutex_exit(dmfep->rxlock);
779 return (head);
780 }
781
782 /*
783 * ========== Primary TX side routines ==========
784 */
785
786 /*
787 * TX ring management:
788 *
789 * There are <tx.n_desc> entries in the ring, of which those from
790 * <tx.next_free> round to but not including <tx.next_busy> must
791 * be owned by the CPU. The number of such entries should equal
792 * <tx.n_free>; but there may also be some more entries which the
793 * chip has given back but which we haven't yet accounted for.
794 * The routine dmfe_reclaim_tx_desc() adjusts the indexes & counts
795 * as it discovers such entries.
796 *
797 * Initially, or when the ring is entirely free:
798 * C = Owned by CPU
799 * D = Owned by Davicom (DMFE) chip
800 *
801 * tx.next_free tx.n_desc = 16
802 * |
803 * v
804 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
805 * | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C | C |
806 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
807 * ^
808 * |
809 * tx.next_busy tx.n_free = 16
810 *
811 * On entry to reclaim() during normal use:
812 *
813 * tx.next_free tx.n_desc = 16
814 * |
815 * v
816 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
817 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
818 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
819 * ^
820 * |
821 * tx.next_busy tx.n_free = 9
822 *
823 * On exit from reclaim():
824 *
825 * tx.next_free tx.n_desc = 16
826 * |
827 * v
828 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
829 * | C | C | C | C | C | C | D | D | D | C | C | C | C | C | C | C |
830 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
831 * ^
832 * |
833 * tx.next_busy tx.n_free = 13
834 *
835 * The ring is considered "full" when only one entry is owned by
836 * the CPU; thus <tx.n_free> should always be >= 1.
837 *
838 * tx.next_free tx.n_desc = 16
839 * |
840 * v
841 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
842 * | D | D | D | D | D | C | D | D | D | D | D | D | D | D | D | D |
843 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
844 * ^
845 * |
846 * tx.next_busy tx.n_free = 1
847 */
848
849 /*
850 * Function to update transmit statistics on various errors
851 */
852 static void
dmfe_update_tx_stats(dmfe_t * dmfep,int index,uint32_t desc0,uint32_t desc1)853 dmfe_update_tx_stats(dmfe_t *dmfep, int index, uint32_t desc0, uint32_t desc1)
854 {
855 uint32_t collisions;
856 uint32_t errbits;
857 uint32_t errsum;
858
859 ASSERT(mutex_owned(dmfep->txlock));
860
861 collisions = ((desc0 >> 3) & 0x0f);
862 errsum = desc0 & TX_ERR_SUMMARY;
863 errbits = desc0 & (TX_UNDERFLOW | TX_LATE_COLL | TX_CARRIER_LOSS |
864 TX_NO_CARRIER | TX_EXCESS_COLL | TX_JABBER_TO);
865 if ((errsum == 0) != (errbits == 0)) {
866 dmfe_log(dmfep, "dubious TX error status 0x%x", desc0);
867 desc0 |= TX_ERR_SUMMARY;
868 }
869
870 if (desc0 & TX_ERR_SUMMARY) {
871 dmfep->tx_stats_oerrors += 1;
872
873 /*
874 * If we ever see a transmit jabber timeout, we count it
875 * as a MAC-level transmit error; but we probably won't
876 * see it as it causes an Abnormal interrupt and we reset
877 * the chip in order to recover
878 */
879 if (desc0 & TX_JABBER_TO) {
880 dmfep->tx_stats_macxmt_errors += 1;
881 dmfep->tx_stats_jabber += 1;
882 }
883
884 if (desc0 & TX_UNDERFLOW)
885 dmfep->tx_stats_underflow += 1;
886 else if (desc0 & TX_LATE_COLL)
887 dmfep->tx_stats_xmtlatecoll += 1;
888
889 if (desc0 & (TX_CARRIER_LOSS | TX_NO_CARRIER))
890 dmfep->tx_stats_nocarrier += 1;
891
892 if (desc0 & TX_EXCESS_COLL) {
893 dmfep->tx_stats_excoll += 1;
894 collisions = 16;
895 }
896 } else {
897 int bit = index % NBBY;
898 int byt = index / NBBY;
899
900 if (dmfep->tx_mcast[byt] & bit) {
901 dmfep->tx_mcast[byt] &= ~bit;
902 dmfep->tx_stats_multi += 1;
903
904 } else if (dmfep->tx_bcast[byt] & bit) {
905 dmfep->tx_bcast[byt] &= ~bit;
906 dmfep->tx_stats_bcast += 1;
907 }
908
909 dmfep->tx_stats_opackets += 1;
910 dmfep->tx_stats_obytes += desc1 & TX_BUFFER_SIZE1;
911 }
912
913 if (collisions == 1)
914 dmfep->tx_stats_first_coll += 1;
915 else if (collisions != 0)
916 dmfep->tx_stats_multi_coll += 1;
917 dmfep->tx_stats_collisions += collisions;
918
919 if (desc0 & TX_DEFERRED)
920 dmfep->tx_stats_defer += 1;
921 }
922
923 /*
924 * Reclaim all the ring entries that the chip has returned to us ...
925 *
926 * Returns B_FALSE if no entries could be reclaimed. Otherwise, reclaims
927 * as many as possible, restarts the TX stall timeout, and returns B_TRUE.
928 */
929 static boolean_t
dmfe_reclaim_tx_desc(dmfe_t * dmfep)930 dmfe_reclaim_tx_desc(dmfe_t *dmfep)
931 {
932 dma_area_t *descp;
933 uint32_t desc0;
934 uint32_t desc1;
935 int i;
936
937 ASSERT(mutex_owned(dmfep->txlock));
938
939 /*
940 * sync transmit descriptor ring before looking at it
941 */
942 descp = &dmfep->tx_desc;
943 DMA_SYNC(descp, DDI_DMA_SYNC_FORKERNEL);
944
945 /*
946 * Early exit if there are no descriptors to reclaim, either
947 * because they're all reclaimed already, or because the next
948 * one is still owned by the chip ...
949 */
950 i = dmfep->tx.next_busy;
951 if (i == dmfep->tx.next_free)
952 return (B_FALSE);
953 desc0 = dmfe_ring_get32(descp, i, DESC0);
954 if (desc0 & TX_OWN)
955 return (B_FALSE);
956
957 /*
958 * Reclaim as many descriptors as possible ...
959 */
960 for (;;) {
961 desc1 = dmfe_ring_get32(descp, i, DESC1);
962 ASSERT((desc1 & (TX_SETUP_PACKET | TX_LAST_DESC)) != 0);
963
964 if ((desc1 & TX_SETUP_PACKET) == 0) {
965 /*
966 * Regular packet - just update stats
967 */
968 dmfe_update_tx_stats(dmfep, i, desc0, desc1);
969 }
970
971 /*
972 * Update count & index; we're all done if the ring is
973 * now fully reclaimed, or the next entry if still owned
974 * by the chip ...
975 */
976 dmfep->tx.n_free += 1;
977 i = NEXT(i, dmfep->tx.n_desc);
978 if (i == dmfep->tx.next_free)
979 break;
980 desc0 = dmfe_ring_get32(descp, i, DESC0);
981 if (desc0 & TX_OWN)
982 break;
983 }
984
985 dmfep->tx.next_busy = i;
986 dmfep->tx_pending_tix = 0;
987 return (B_TRUE);
988 }
989
990 /*
991 * Send the message in the message block chain <mp>.
992 *
993 * The message is freed if and only if its contents are successfully copied
994 * and queued for transmission (so that the return value is B_TRUE).
995 * If we can't queue the message, the return value is B_FALSE and
996 * the message is *not* freed.
997 *
998 * This routine handles the special case of <mp> == NULL, which indicates
999 * that we want to "send" the special "setup packet" allocated during
1000 * startup. We have to use some different flags in the packet descriptor
1001 * to say its a setup packet (from the global <dmfe_setup_desc1>), and the
1002 * setup packet *isn't* freed after use.
1003 */
1004 static boolean_t
dmfe_send_msg(dmfe_t * dmfep,mblk_t * mp)1005 dmfe_send_msg(dmfe_t *dmfep, mblk_t *mp)
1006 {
1007 dma_area_t *descp;
1008 mblk_t *bp;
1009 char *txb;
1010 uint32_t desc1;
1011 uint32_t index;
1012 size_t totlen;
1013 size_t mblen;
1014 uint32_t paddr;
1015
1016 /*
1017 * If the number of free slots is below the reclaim threshold
1018 * (soft limit), we'll try to reclaim some. If we fail, and
1019 * the number of free slots is also below the minimum required
1020 * (the hard limit, usually 1), then we can't send the packet.
1021 */
1022 mutex_enter(dmfep->txlock);
1023 if (dmfep->suspended)
1024 return (B_FALSE);
1025
1026 if (dmfep->tx.n_free <= dmfe_tx_reclaim_level &&
1027 dmfe_reclaim_tx_desc(dmfep) == B_FALSE &&
1028 dmfep->tx.n_free <= dmfe_tx_min_free) {
1029 /*
1030 * Resource shortage - return B_FALSE so the packet
1031 * will be queued for retry after the next TX-done
1032 * interrupt.
1033 */
1034 mutex_exit(dmfep->txlock);
1035 DTRACE_PROBE(tx__no__desc);
1036 return (B_FALSE);
1037 }
1038
1039 /*
1040 * There's a slot available, so claim it by incrementing
1041 * the next-free index and decrementing the free count.
1042 * If the ring is currently empty, we also restart the
1043 * stall-detect timer. The ASSERTions check that our
1044 * invariants still hold:
1045 * the next-free index must not match the next-busy index
1046 * there must still be at least one free entry
1047 * After this, we now have exclusive ownership of the ring
1048 * entry (and matching buffer) indicated by <index>, so we
1049 * don't need to hold the TX lock any longer
1050 */
1051 index = dmfep->tx.next_free;
1052 dmfep->tx.next_free = NEXT(index, dmfep->tx.n_desc);
1053 ASSERT(dmfep->tx.next_free != dmfep->tx.next_busy);
1054 if (dmfep->tx.n_free-- == dmfep->tx.n_desc)
1055 dmfep->tx_pending_tix = 0;
1056 ASSERT(dmfep->tx.n_free >= 1);
1057 mutex_exit(dmfep->txlock);
1058
1059 /*
1060 * Check the ownership of the ring entry ...
1061 */
1062 descp = &dmfep->tx_desc;
1063 ASSERT((dmfe_ring_get32(descp, index, DESC0) & TX_OWN) == 0);
1064
1065 if (mp == NULL) {
1066 /*
1067 * Indicates we should send a SETUP packet, which we do by
1068 * temporarily switching the BUFFER1 pointer in the ring
1069 * entry. The reclaim routine will restore BUFFER1 to its
1070 * usual value.
1071 *
1072 * Note that as the setup packet is tagged on the end of
1073 * the TX ring, when we sync the descriptor we're also
1074 * implicitly syncing the setup packet - hence, we don't
1075 * need a separate ddi_dma_sync() call here.
1076 */
1077 desc1 = dmfe_setup_desc1;
1078 paddr = descp->setup_dvma;
1079 } else {
1080 /*
1081 * A regular packet; we copy the data into a pre-mapped
1082 * buffer, which avoids the overhead (and complication)
1083 * of mapping/unmapping STREAMS buffers and keeping hold
1084 * of them until the DMA has completed.
1085 *
1086 * Because all buffers are the same size, and larger
1087 * than the longest single valid message, we don't have
1088 * to bother about splitting the message across multiple
1089 * buffers.
1090 */
1091 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
1092 totlen = 0;
1093 bp = mp;
1094
1095 /*
1096 * Copy all (remaining) mblks in the message ...
1097 */
1098 for (; bp != NULL; bp = bp->b_cont) {
1099 mblen = MBLKL(bp);
1100 if ((totlen += mblen) <= DMFE_MAX_PKT_SIZE) {
1101 bcopy(bp->b_rptr, txb, mblen);
1102 txb += mblen;
1103 }
1104 }
1105
1106 /*
1107 * Is this a multicast or broadcast packet? We do
1108 * this so that we can track statistics accurately
1109 * when we reclaim it.
1110 */
1111 txb = &dmfep->tx_buff.mem_va[index*DMFE_BUF_SIZE];
1112 if (txb[0] & 0x1) {
1113 if (bcmp(txb, dmfe_broadcast_addr, ETHERADDRL) == 0) {
1114 dmfep->tx_bcast[index / NBBY] |=
1115 (1 << (index % NBBY));
1116 } else {
1117 dmfep->tx_mcast[index / NBBY] |=
1118 (1 << (index % NBBY));
1119 }
1120 }
1121
1122 /*
1123 * We'e reached the end of the chain; and we should have
1124 * collected no more than DMFE_MAX_PKT_SIZE bytes into our
1125 * buffer. Note that the <size> field in the descriptor is
1126 * only 11 bits, so bigger packets would be a problem!
1127 */
1128 ASSERT(bp == NULL);
1129 ASSERT(totlen <= DMFE_MAX_PKT_SIZE);
1130 totlen &= TX_BUFFER_SIZE1;
1131 desc1 = TX_FIRST_DESC | TX_LAST_DESC | totlen;
1132 paddr = dmfep->tx_buff.mem_dvma + index*DMFE_BUF_SIZE;
1133
1134 (void) ddi_dma_sync(dmfep->tx_buff.dma_hdl,
1135 index * DMFE_BUF_SIZE, DMFE_BUF_SIZE, DDI_DMA_SYNC_FORDEV);
1136 }
1137
1138 /*
1139 * Update ring descriptor entries, sync them, and wake up the
1140 * transmit process
1141 */
1142 if ((index & dmfe_tx_int_factor) == 0)
1143 desc1 |= TX_INT_ON_COMP;
1144 desc1 |= TX_CHAINING;
1145 dmfe_ring_put32(descp, index, BUFFER1, paddr);
1146 dmfe_ring_put32(descp, index, DESC1, desc1);
1147 dmfe_ring_put32(descp, index, DESC0, TX_OWN);
1148 DMA_SYNC(descp, DDI_DMA_SYNC_FORDEV);
1149 dmfe_chip_put32(dmfep, TX_POLL_REG, 0);
1150
1151 /*
1152 * Finally, free the message & return success
1153 */
1154 if (mp)
1155 freemsg(mp);
1156 return (B_TRUE);
1157 }
1158
1159 /*
1160 * dmfe_m_tx() -- send a chain of packets
1161 *
1162 * Called when packet(s) are ready to be transmitted. A pointer to an
1163 * M_DATA message that contains the packet is passed to this routine.
1164 * The complete LLC header is contained in the message's first message
1165 * block, and the remainder of the packet is contained within
1166 * additional M_DATA message blocks linked to the first message block.
1167 *
1168 * Additional messages may be passed by linking with b_next.
1169 */
1170 static mblk_t *
dmfe_m_tx(void * arg,mblk_t * mp)1171 dmfe_m_tx(void *arg, mblk_t *mp)
1172 {
1173 dmfe_t *dmfep = arg; /* private device info */
1174 mblk_t *next;
1175
1176 ASSERT(mp != NULL);
1177 ASSERT(dmfep->mac_state == DMFE_MAC_STARTED);
1178
1179 if (dmfep->chip_state != CHIP_RUNNING)
1180 return (mp);
1181
1182 while (mp != NULL) {
1183 next = mp->b_next;
1184 mp->b_next = NULL;
1185 if (!dmfe_send_msg(dmfep, mp)) {
1186 mp->b_next = next;
1187 break;
1188 }
1189 mp = next;
1190 }
1191
1192 return (mp);
1193 }
1194
1195 /*
1196 * ========== Address-setting routines (TX-side) ==========
1197 */
1198
1199 /*
1200 * Find the index of the relevant bit in the setup packet.
1201 * This must mirror the way the hardware will actually calculate it!
1202 */
1203 static uint32_t
dmfe_hash_index(const uint8_t * address)1204 dmfe_hash_index(const uint8_t *address)
1205 {
1206 uint32_t const POLY = HASH_POLY;
1207 uint32_t crc = HASH_CRC;
1208 uint32_t index;
1209 uint32_t msb;
1210 uchar_t currentbyte;
1211 int byteslength;
1212 int shift;
1213 int bit;
1214
1215 for (byteslength = 0; byteslength < ETHERADDRL; ++byteslength) {
1216 currentbyte = address[byteslength];
1217 for (bit = 0; bit < 8; ++bit) {
1218 msb = crc >> 31;
1219 crc <<= 1;
1220 if (msb ^ (currentbyte & 1)) {
1221 crc ^= POLY;
1222 crc |= 0x00000001;
1223 }
1224 currentbyte >>= 1;
1225 }
1226 }
1227
1228 for (index = 0, bit = 23, shift = 8; shift >= 0; ++bit, --shift)
1229 index |= (((crc >> bit) & 1) << shift);
1230
1231 return (index);
1232 }
1233
1234 /*
1235 * Find and set/clear the relevant bit in the setup packet hash table
1236 * This must mirror the way the hardware will actually interpret it!
1237 */
1238 static void
dmfe_update_hash(dmfe_t * dmfep,uint32_t index,boolean_t val)1239 dmfe_update_hash(dmfe_t *dmfep, uint32_t index, boolean_t val)
1240 {
1241 dma_area_t *descp;
1242 uint32_t tmp;
1243
1244 ASSERT(mutex_owned(dmfep->oplock));
1245
1246 descp = &dmfep->tx_desc;
1247 tmp = dmfe_setup_get32(descp, index/16);
1248 if (val)
1249 tmp |= 1 << (index%16);
1250 else
1251 tmp &= ~(1 << (index%16));
1252 dmfe_setup_put32(descp, index/16, tmp);
1253 }
1254
1255 /*
1256 * Update the refcount for the bit in the setup packet corresponding
1257 * to the specified address; if it changes between zero & nonzero,
1258 * also update the bitmap itself & return B_TRUE, so that the caller
1259 * knows to re-send the setup packet. Otherwise (only the refcount
1260 * changed), return B_FALSE
1261 */
1262 static boolean_t
dmfe_update_mcast(dmfe_t * dmfep,const uint8_t * mca,boolean_t val)1263 dmfe_update_mcast(dmfe_t *dmfep, const uint8_t *mca, boolean_t val)
1264 {
1265 uint32_t index;
1266 uint8_t *refp;
1267 boolean_t change;
1268
1269 index = dmfe_hash_index(mca);
1270 refp = &dmfep->mcast_refs[index];
1271 change = (val ? (*refp)++ : --(*refp)) == 0;
1272
1273 if (change)
1274 dmfe_update_hash(dmfep, index, val);
1275
1276 return (change);
1277 }
1278
1279 /*
1280 * "Transmit" the (possibly updated) magic setup packet
1281 */
1282 static int
dmfe_send_setup(dmfe_t * dmfep)1283 dmfe_send_setup(dmfe_t *dmfep)
1284 {
1285 int status;
1286
1287 ASSERT(mutex_owned(dmfep->oplock));
1288
1289 if (dmfep->suspended)
1290 return (0);
1291
1292 /*
1293 * If the chip isn't running, we can't really send the setup frame
1294 * now but it doesn't matter, 'cos it will be sent when the transmit
1295 * process is restarted (see dmfe_start()).
1296 */
1297 if ((dmfep->opmode & START_TRANSMIT) == 0)
1298 return (0);
1299
1300 /*
1301 * "Send" the setup frame. If it fails (e.g. no resources),
1302 * set a flag; then the factotum will retry the "send". Once
1303 * it works, we can clear the flag no matter how many attempts
1304 * had previously failed. We tell the caller that it worked
1305 * whether it did or not; after all, it *will* work eventually.
1306 */
1307 status = dmfe_send_msg(dmfep, NULL);
1308 dmfep->need_setup = status ? B_FALSE : B_TRUE;
1309 return (0);
1310 }
1311
1312 /*
1313 * dmfe_m_unicst() -- set the physical network address
1314 */
1315 static int
dmfe_m_unicst(void * arg,const uint8_t * macaddr)1316 dmfe_m_unicst(void *arg, const uint8_t *macaddr)
1317 {
1318 dmfe_t *dmfep = arg;
1319 int status;
1320 int index;
1321
1322 /*
1323 * Update our current address and send out a new setup packet
1324 *
1325 * Here we accommodate the use of HASH_ONLY or HASH_AND_PERFECT
1326 * filtering modes (we don't support PERFECT_ONLY or INVERSE modes).
1327 *
1328 * It is said that there is a bug in the 21140 where it fails to
1329 * receive packes addresses to the specified perfect filter address.
1330 * If the same bug is present in the DM9102A, the TX_FILTER_TYPE1
1331 * bit should be set in the module variable dmfe_setup_desc1.
1332 *
1333 * If TX_FILTER_TYPE1 is set, we will use HASH_ONLY filtering.
1334 * In this mode, *all* incoming addresses are hashed and looked
1335 * up in the bitmap described by the setup packet. Therefore,
1336 * the bit representing the station address has to be added to
1337 * the table before sending it out. If the address is changed,
1338 * the old entry should be removed before the new entry is made.
1339 *
1340 * NOTE: in this mode, unicast packets that are not intended for
1341 * this station may be received; it is up to software to filter
1342 * them out afterwards!
1343 *
1344 * If TX_FILTER_TYPE1 is *not* set, we will use HASH_AND_PERFECT
1345 * filtering. In this mode, multicast addresses are hashed and
1346 * checked against the bitmap, while unicast addresses are simply
1347 * matched against the one physical address specified in the setup
1348 * packet. This means that we shouldn't receive unicast packets
1349 * that aren't intended for us (but software still has to filter
1350 * multicast packets just the same).
1351 *
1352 * Whichever mode we're using, we have to enter the broadcast
1353 * address into the multicast filter map too, so we do this on
1354 * the first time through after attach or reset.
1355 */
1356 mutex_enter(dmfep->oplock);
1357
1358 if (dmfep->addr_set && dmfe_setup_desc1 & TX_FILTER_TYPE1)
1359 (void) dmfe_update_mcast(dmfep, dmfep->curr_addr, B_FALSE);
1360 if (dmfe_setup_desc1 & TX_FILTER_TYPE1)
1361 (void) dmfe_update_mcast(dmfep, macaddr, B_TRUE);
1362 if (!dmfep->addr_set)
1363 (void) dmfe_update_mcast(dmfep, dmfe_broadcast_addr, B_TRUE);
1364
1365 /*
1366 * Remember the new current address
1367 */
1368 ethaddr_copy(macaddr, dmfep->curr_addr);
1369 dmfep->addr_set = B_TRUE;
1370
1371 /*
1372 * Install the new physical address into the proper position in
1373 * the setup frame; this is only used if we select hash+perfect
1374 * filtering, but we'll put it in anyway. The ugliness here is
1375 * down to the usual war of the egg :(
1376 */
1377 for (index = 0; index < ETHERADDRL; index += 2)
1378 dmfe_setup_put32(&dmfep->tx_desc, SETUPBUF_PHYS+index/2,
1379 (macaddr[index+1] << 8) | macaddr[index]);
1380
1381 /*
1382 * Finally, we're ready to "transmit" the setup frame
1383 */
1384 status = dmfe_send_setup(dmfep);
1385 mutex_exit(dmfep->oplock);
1386
1387 return (status);
1388 }
1389
1390 /*
1391 * dmfe_m_multicst() -- enable or disable a multicast address
1392 *
1393 * Program the hardware to enable/disable the multicast address
1394 * in "mca" (enable if add is true, otherwise disable it.)
1395 * We keep a refcount for each bit in the map, so that it still
1396 * works out properly if multiple addresses hash to the same bit.
1397 * dmfe_update_mcast() tells us whether the map actually changed;
1398 * if so, we have to re-"transmit" the magic setup packet.
1399 */
1400 static int
dmfe_m_multicst(void * arg,boolean_t add,const uint8_t * mca)1401 dmfe_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1402 {
1403 dmfe_t *dmfep = arg; /* private device info */
1404 int status = 0;
1405
1406 mutex_enter(dmfep->oplock);
1407 if (dmfe_update_mcast(dmfep, mca, add))
1408 status = dmfe_send_setup(dmfep);
1409 mutex_exit(dmfep->oplock);
1410
1411 return (status);
1412 }
1413
1414
1415 /*
1416 * ========== Internal state management entry points ==========
1417 */
1418
1419 /*
1420 * These routines provide all the functionality required by the
1421 * corresponding MAC layer entry points, but don't update the MAC layer state
1422 * so they can be called internally without disturbing our record
1423 * of what MAC layer thinks we should be doing ...
1424 */
1425
1426 /*
1427 * dmfe_stop() -- stop processing, don't reset h/w or rings
1428 */
1429 static void
dmfe_stop(dmfe_t * dmfep)1430 dmfe_stop(dmfe_t *dmfep)
1431 {
1432 ASSERT(mutex_owned(dmfep->oplock));
1433
1434 dmfe_stop_chip(dmfep, CHIP_STOPPED);
1435 }
1436
1437 /*
1438 * dmfe_reset() -- stop processing, reset h/w & rings to initial state
1439 */
1440 static void
dmfe_reset(dmfe_t * dmfep)1441 dmfe_reset(dmfe_t *dmfep)
1442 {
1443 ASSERT(mutex_owned(dmfep->oplock));
1444 ASSERT(mutex_owned(dmfep->rxlock));
1445 ASSERT(mutex_owned(dmfep->txlock));
1446
1447 dmfe_stop_chip(dmfep, CHIP_RESET);
1448 dmfe_init_rings(dmfep);
1449 }
1450
1451 /*
1452 * dmfe_start() -- start transmitting/receiving
1453 */
1454 static void
dmfe_start(dmfe_t * dmfep)1455 dmfe_start(dmfe_t *dmfep)
1456 {
1457 uint32_t gpsr;
1458
1459 ASSERT(mutex_owned(dmfep->oplock));
1460
1461 ASSERT(dmfep->chip_state == CHIP_RESET ||
1462 dmfep->chip_state == CHIP_STOPPED);
1463
1464 /*
1465 * Make opmode consistent with PHY duplex setting
1466 */
1467 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
1468 if (gpsr & GPS_FULL_DUPLEX)
1469 dmfep->opmode |= FULL_DUPLEX;
1470 else
1471 dmfep->opmode &= ~FULL_DUPLEX;
1472
1473 /*
1474 * Start transmit processing
1475 * Set up the address filters
1476 * Start receive processing
1477 * Enable interrupts
1478 */
1479 dmfe_start_chip(dmfep, START_TRANSMIT);
1480 (void) dmfe_send_setup(dmfep);
1481 drv_usecwait(10);
1482 dmfe_start_chip(dmfep, START_RECEIVE);
1483 dmfe_enable_interrupts(dmfep);
1484 }
1485
1486 /*
1487 * dmfe_restart - restart transmitting/receiving after error or suspend
1488 */
1489 static void
dmfe_restart(dmfe_t * dmfep)1490 dmfe_restart(dmfe_t *dmfep)
1491 {
1492 ASSERT(mutex_owned(dmfep->oplock));
1493
1494 /*
1495 * You need not only <oplock>, but also <rxlock> AND <txlock>
1496 * in order to reset the rings, but then <txlock> *mustn't*
1497 * be held across the call to dmfe_start()
1498 */
1499 mutex_enter(dmfep->rxlock);
1500 mutex_enter(dmfep->txlock);
1501 dmfe_reset(dmfep);
1502 mutex_exit(dmfep->txlock);
1503 mutex_exit(dmfep->rxlock);
1504 if (dmfep->mac_state == DMFE_MAC_STARTED) {
1505 dmfe_start(dmfep);
1506 }
1507 }
1508
1509
1510 /*
1511 * ========== MAC-required management entry points ==========
1512 */
1513
1514 /*
1515 * dmfe_m_stop() -- stop transmitting/receiving
1516 */
1517 static void
dmfe_m_stop(void * arg)1518 dmfe_m_stop(void *arg)
1519 {
1520 dmfe_t *dmfep = arg; /* private device info */
1521
1522 /*
1523 * Just stop processing, then record new MAC state
1524 */
1525 mii_stop(dmfep->mii);
1526
1527 mutex_enter(dmfep->oplock);
1528 if (!dmfep->suspended)
1529 dmfe_stop(dmfep);
1530 dmfep->mac_state = DMFE_MAC_STOPPED;
1531 mutex_exit(dmfep->oplock);
1532 }
1533
1534 /*
1535 * dmfe_m_start() -- start transmitting/receiving
1536 */
1537 static int
dmfe_m_start(void * arg)1538 dmfe_m_start(void *arg)
1539 {
1540 dmfe_t *dmfep = arg; /* private device info */
1541
1542 /*
1543 * Start processing and record new MAC state
1544 */
1545 mutex_enter(dmfep->oplock);
1546 if (!dmfep->suspended)
1547 dmfe_start(dmfep);
1548 dmfep->mac_state = DMFE_MAC_STARTED;
1549 mutex_exit(dmfep->oplock);
1550
1551 mii_start(dmfep->mii);
1552
1553 return (0);
1554 }
1555
1556 /*
1557 * dmfe_m_promisc() -- set or reset promiscuous mode on the board
1558 *
1559 * Program the hardware to enable/disable promiscuous and/or
1560 * receive-all-multicast modes. Davicom don't document this
1561 * clearly, but it looks like we can do this on-the-fly (i.e.
1562 * without stopping & restarting the TX/RX processes).
1563 */
1564 static int
dmfe_m_promisc(void * arg,boolean_t on)1565 dmfe_m_promisc(void *arg, boolean_t on)
1566 {
1567 dmfe_t *dmfep = arg;
1568
1569 mutex_enter(dmfep->oplock);
1570 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
1571 if (on)
1572 dmfep->opmode |= PROMISC_MODE;
1573 if (!dmfep->suspended)
1574 dmfe_set_opmode(dmfep);
1575 mutex_exit(dmfep->oplock);
1576
1577 return (0);
1578 }
1579
1580 /*
1581 * ========== Factotum, implemented as a softint handler ==========
1582 */
1583
1584 /*
1585 * The factotum is woken up when there's something to do that we'd rather
1586 * not do from inside a (high-level?) hardware interrupt handler. Its
1587 * two main tasks are:
1588 * reset & restart the chip after an error
1589 * update & restart the chip after a link status change
1590 */
1591 static uint_t
dmfe_factotum(caddr_t arg)1592 dmfe_factotum(caddr_t arg)
1593 {
1594 dmfe_t *dmfep;
1595
1596 dmfep = (void *)arg;
1597 ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
1598
1599 mutex_enter(dmfep->oplock);
1600 if (dmfep->suspended) {
1601 mutex_exit(dmfep->oplock);
1602 return (DDI_INTR_CLAIMED);
1603 }
1604
1605 dmfep->factotum_flag = 0;
1606 DRV_KS_INC(dmfep, KS_FACTOTUM_RUN);
1607
1608 /*
1609 * Check for chip error ...
1610 */
1611 if (dmfep->chip_state == CHIP_ERROR) {
1612 /*
1613 * Error recovery required: reset the chip and the rings,
1614 * then, if it's supposed to be running, kick it off again.
1615 */
1616 DRV_KS_INC(dmfep, KS_RECOVERY);
1617 dmfe_restart(dmfep);
1618 mutex_exit(dmfep->oplock);
1619
1620 mii_reset(dmfep->mii);
1621
1622 } else if (dmfep->need_setup) {
1623 (void) dmfe_send_setup(dmfep);
1624 mutex_exit(dmfep->oplock);
1625 }
1626
1627 return (DDI_INTR_CLAIMED);
1628 }
1629
1630 static void
dmfe_wake_factotum(dmfe_t * dmfep,int ks_id,const char * why)1631 dmfe_wake_factotum(dmfe_t *dmfep, int ks_id, const char *why)
1632 {
1633 _NOTE(ARGUNUSED(why));
1634 ASSERT(mutex_owned(dmfep->oplock));
1635 DRV_KS_INC(dmfep, ks_id);
1636
1637 if (dmfep->factotum_flag++ == 0)
1638 ddi_trigger_softintr(dmfep->factotum_id);
1639 }
1640
1641
1642 /*
1643 * ========== Periodic Tasks (Cyclic handler & friends) ==========
1644 */
1645
1646 /*
1647 * Periodic tick tasks, run from the cyclic handler
1648 *
1649 * Check for TX stall; flag an error and wake the factotum if so.
1650 */
1651 static void
dmfe_tick_stall_check(dmfe_t * dmfep,uint32_t gpsr,uint32_t istat)1652 dmfe_tick_stall_check(dmfe_t *dmfep, uint32_t gpsr, uint32_t istat)
1653 {
1654 boolean_t tx_stall;
1655 uint32_t tx_state;
1656 uint32_t limit;
1657
1658 ASSERT(mutex_owned(dmfep->oplock));
1659
1660 /*
1661 * Check for transmit stall ...
1662 *
1663 * IF there's at least one packet in the ring, AND the timeout
1664 * has elapsed, AND we can't reclaim any descriptors, THEN we've
1665 * stalled; we return B_TRUE to trigger a reset-and-recover cycle.
1666 *
1667 * Note that the timeout limit is based on the transmit engine
1668 * state; we allow the transmitter longer to make progress in
1669 * some states than in others, based on observations of this
1670 * chip's actual behaviour in the lab.
1671 *
1672 * By observation, we find that on about 1 in 10000 passes through
1673 * here, the TX lock is already held. In that case, we'll skip
1674 * the check on this pass rather than wait. Most likely, the send
1675 * routine was holding the lock when the interrupt happened, and
1676 * we'll succeed next time through. In the event of a real stall,
1677 * the TX ring will fill up, after which the send routine won't be
1678 * called any more and then we're sure to get in.
1679 */
1680 tx_stall = B_FALSE;
1681 if (mutex_tryenter(dmfep->txlock)) {
1682 if (dmfep->tx.n_free < dmfep->tx.n_desc) {
1683 tx_state = TX_PROCESS_STATE(istat);
1684 if (gpsr & GPS_LINK_100)
1685 limit = stall_100_tix[tx_state];
1686 else
1687 limit = stall_10_tix[tx_state];
1688 if (++dmfep->tx_pending_tix >= limit &&
1689 dmfe_reclaim_tx_desc(dmfep) == B_FALSE) {
1690 dmfe_log(dmfep, "TX stall detected "
1691 "after %d ticks in state %d; "
1692 "automatic recovery initiated",
1693 dmfep->tx_pending_tix, tx_state);
1694 tx_stall = B_TRUE;
1695 }
1696 }
1697 mutex_exit(dmfep->txlock);
1698 }
1699
1700 if (tx_stall) {
1701 dmfe_stop_chip(dmfep, CHIP_ERROR);
1702 dmfe_wake_factotum(dmfep, KS_TX_STALL, "tick (TX stall)");
1703 }
1704 }
1705
1706 /*
1707 * Cyclic callback handler
1708 */
1709 static void
dmfe_cyclic(void * arg)1710 dmfe_cyclic(void *arg)
1711 {
1712 dmfe_t *dmfep = arg; /* private device info */
1713 uint32_t istat;
1714 uint32_t gpsr;
1715
1716 /*
1717 * If the chip's not RUNNING, there's nothing to do.
1718 * If we can't get the mutex straight away, we'll just
1719 * skip this pass; we'll back back soon enough anyway.
1720 */
1721 if (mutex_tryenter(dmfep->oplock) == 0)
1722 return;
1723 if ((dmfep->suspended) || (dmfep->chip_state != CHIP_RUNNING)) {
1724 mutex_exit(dmfep->oplock);
1725 return;
1726 }
1727
1728 /*
1729 * Recheck chip state (it might have been stopped since we
1730 * checked above). If still running, call each of the *tick*
1731 * tasks. They will check for link change, TX stall, etc ...
1732 */
1733 if (dmfep->chip_state == CHIP_RUNNING) {
1734 istat = dmfe_chip_get32(dmfep, STATUS_REG);
1735 gpsr = dmfe_chip_get32(dmfep, PHY_STATUS_REG);
1736 dmfe_tick_stall_check(dmfep, gpsr, istat);
1737 }
1738
1739 DRV_KS_INC(dmfep, KS_CYCLIC_RUN);
1740 mutex_exit(dmfep->oplock);
1741 }
1742
1743 /*
1744 * ========== Hardware interrupt handler ==========
1745 */
1746
1747 /*
1748 * dmfe_interrupt() -- handle chip interrupts
1749 */
1750 static uint_t
dmfe_interrupt(caddr_t arg)1751 dmfe_interrupt(caddr_t arg)
1752 {
1753 dmfe_t *dmfep; /* private device info */
1754 uint32_t interrupts;
1755 uint32_t istat;
1756 const char *msg;
1757 mblk_t *mp;
1758 boolean_t warning_msg = B_TRUE;
1759
1760 dmfep = (void *)arg;
1761
1762 mutex_enter(dmfep->oplock);
1763 if (dmfep->suspended) {
1764 mutex_exit(dmfep->oplock);
1765 return (DDI_INTR_UNCLAIMED);
1766 }
1767
1768 /*
1769 * A quick check as to whether the interrupt was from this
1770 * device, before we even finish setting up all our local
1771 * variables. Note that reading the interrupt status register
1772 * doesn't have any unpleasant side effects such as clearing
1773 * the bits read, so it's quite OK to re-read it once we have
1774 * determined that we are going to service this interrupt and
1775 * grabbed the mutexen.
1776 */
1777 istat = dmfe_chip_get32(dmfep, STATUS_REG);
1778 if ((istat & (NORMAL_SUMMARY_INT | ABNORMAL_SUMMARY_INT)) == 0) {
1779
1780 mutex_exit(dmfep->oplock);
1781 return (DDI_INTR_UNCLAIMED);
1782 }
1783
1784 DRV_KS_INC(dmfep, KS_INTERRUPT);
1785
1786 /*
1787 * Identify bits that represent enabled interrupts ...
1788 */
1789 istat |= dmfe_chip_get32(dmfep, STATUS_REG);
1790 interrupts = istat & dmfep->imask;
1791 ASSERT(interrupts != 0);
1792
1793 DTRACE_PROBE1(intr, uint32_t, istat);
1794
1795 /*
1796 * Check for any interrupts other than TX/RX done.
1797 * If there are any, they are considered Abnormal
1798 * and will cause the chip to be reset.
1799 */
1800 if (interrupts & ~(RX_PKTDONE_INT | TX_PKTDONE_INT)) {
1801 if (istat & ABNORMAL_SUMMARY_INT) {
1802 /*
1803 * Any Abnormal interrupts will lead to us
1804 * resetting the chip, so we don't bother
1805 * to clear each interrupt individually.
1806 *
1807 * Our main task here is to identify the problem,
1808 * by pointing out the most significant unexpected
1809 * bit. Additional bits may well be consequences
1810 * of the first problem, so we consider the possible
1811 * causes in order of severity.
1812 */
1813 if (interrupts & SYSTEM_ERR_INT) {
1814 switch (istat & SYSTEM_ERR_BITS) {
1815 case SYSTEM_ERR_M_ABORT:
1816 msg = "Bus Master Abort";
1817 break;
1818
1819 case SYSTEM_ERR_T_ABORT:
1820 msg = "Bus Target Abort";
1821 break;
1822
1823 case SYSTEM_ERR_PARITY:
1824 msg = "Parity Error";
1825 break;
1826
1827 default:
1828 msg = "Unknown System Bus Error";
1829 break;
1830 }
1831 } else if (interrupts & RX_STOPPED_INT) {
1832 msg = "RX process stopped";
1833 } else if (interrupts & RX_UNAVAIL_INT) {
1834 msg = "RX buffer unavailable";
1835 warning_msg = B_FALSE;
1836 } else if (interrupts & RX_WATCHDOG_INT) {
1837 msg = "RX watchdog timeout?";
1838 } else if (interrupts & RX_EARLY_INT) {
1839 msg = "RX early interrupt?";
1840 } else if (interrupts & TX_STOPPED_INT) {
1841 msg = "TX process stopped";
1842 } else if (interrupts & TX_JABBER_INT) {
1843 msg = "TX jabber timeout";
1844 } else if (interrupts & TX_UNDERFLOW_INT) {
1845 msg = "TX underflow?";
1846 } else if (interrupts & TX_EARLY_INT) {
1847 msg = "TX early interrupt?";
1848
1849 } else if (interrupts & LINK_STATUS_INT) {
1850 msg = "Link status change?";
1851 } else if (interrupts & GP_TIMER_INT) {
1852 msg = "Timer expired?";
1853 }
1854
1855 if (warning_msg)
1856 dmfe_warning(dmfep, "abnormal interrupt, "
1857 "status 0x%x: %s", istat, msg);
1858
1859 /*
1860 * We don't want to run the entire reinitialisation
1861 * code out of this (high-level?) interrupt, so we
1862 * simply STOP the chip, and wake up the factotum
1863 * to reinitalise it ...
1864 */
1865 dmfe_stop_chip(dmfep, CHIP_ERROR);
1866 dmfe_wake_factotum(dmfep, KS_CHIP_ERROR,
1867 "interrupt (error)");
1868 } else {
1869 /*
1870 * We shouldn't really get here (it would mean
1871 * there were some unprocessed enabled bits but
1872 * they weren't Abnormal?), but we'll check just
1873 * in case ...
1874 */
1875 DTRACE_PROBE1(intr__unexpected, uint32_t, istat);
1876 }
1877 }
1878
1879 /*
1880 * Acknowledge all the original bits - except in the case of an
1881 * error, when we leave them unacknowledged so that the recovery
1882 * code can see what was going on when the problem occurred ...
1883 */
1884 if (dmfep->chip_state != CHIP_ERROR) {
1885 (void) dmfe_chip_put32(dmfep, STATUS_REG, istat);
1886 /*
1887 * Read-after-write forces completion on PCI bus.
1888 *
1889 */
1890 (void) dmfe_chip_get32(dmfep, STATUS_REG);
1891 }
1892
1893
1894 /*
1895 * We've finished talking to the chip, so we can drop <oplock>
1896 * before handling the normal interrupts, which only involve
1897 * manipulation of descriptors ...
1898 */
1899 mutex_exit(dmfep->oplock);
1900
1901 if (interrupts & RX_PKTDONE_INT)
1902 if ((mp = dmfe_getp(dmfep)) != NULL)
1903 mac_rx(dmfep->mh, NULL, mp);
1904
1905 if (interrupts & TX_PKTDONE_INT) {
1906 /*
1907 * The only reason for taking this interrupt is to give
1908 * MAC a chance to schedule queued packets after a
1909 * ring-full condition. To minimise the number of
1910 * redundant TX-Done interrupts, we only mark two of the
1911 * ring descriptors as 'interrupt-on-complete' - all the
1912 * others are simply handed back without an interrupt.
1913 */
1914 if (dmfe_reclaim_on_done && mutex_tryenter(dmfep->txlock)) {
1915 (void) dmfe_reclaim_tx_desc(dmfep);
1916 mutex_exit(dmfep->txlock);
1917 }
1918 mac_tx_update(dmfep->mh);
1919 }
1920
1921 return (DDI_INTR_CLAIMED);
1922 }
1923
1924 /*
1925 * ========== Statistics update handler ==========
1926 */
1927
1928 static int
dmfe_m_stat(void * arg,uint_t stat,uint64_t * val)1929 dmfe_m_stat(void *arg, uint_t stat, uint64_t *val)
1930 {
1931 dmfe_t *dmfep = arg;
1932 int rv = 0;
1933
1934 /* Let MII handle its own stats. */
1935 if (mii_m_getstat(dmfep->mii, stat, val) == 0) {
1936 return (0);
1937 }
1938
1939 mutex_enter(dmfep->oplock);
1940 mutex_enter(dmfep->rxlock);
1941 mutex_enter(dmfep->txlock);
1942
1943 /* make sure we have all the stats collected */
1944 (void) dmfe_reclaim_tx_desc(dmfep);
1945
1946 switch (stat) {
1947
1948 case MAC_STAT_IPACKETS:
1949 *val = dmfep->rx_stats_ipackets;
1950 break;
1951
1952 case MAC_STAT_MULTIRCV:
1953 *val = dmfep->rx_stats_multi;
1954 break;
1955
1956 case MAC_STAT_BRDCSTRCV:
1957 *val = dmfep->rx_stats_bcast;
1958 break;
1959
1960 case MAC_STAT_RBYTES:
1961 *val = dmfep->rx_stats_rbytes;
1962 break;
1963
1964 case MAC_STAT_IERRORS:
1965 *val = dmfep->rx_stats_ierrors;
1966 break;
1967
1968 case MAC_STAT_NORCVBUF:
1969 *val = dmfep->rx_stats_norcvbuf;
1970 break;
1971
1972 case MAC_STAT_COLLISIONS:
1973 *val = dmfep->tx_stats_collisions;
1974 break;
1975
1976 case MAC_STAT_OERRORS:
1977 *val = dmfep->tx_stats_oerrors;
1978 break;
1979
1980 case MAC_STAT_OPACKETS:
1981 *val = dmfep->tx_stats_opackets;
1982 break;
1983
1984 case MAC_STAT_MULTIXMT:
1985 *val = dmfep->tx_stats_multi;
1986 break;
1987
1988 case MAC_STAT_BRDCSTXMT:
1989 *val = dmfep->tx_stats_bcast;
1990 break;
1991
1992 case MAC_STAT_OBYTES:
1993 *val = dmfep->tx_stats_obytes;
1994 break;
1995
1996 case MAC_STAT_OVERFLOWS:
1997 *val = dmfep->rx_stats_overflow;
1998 break;
1999
2000 case MAC_STAT_UNDERFLOWS:
2001 *val = dmfep->tx_stats_underflow;
2002 break;
2003
2004 case ETHER_STAT_ALIGN_ERRORS:
2005 *val = dmfep->rx_stats_align;
2006 break;
2007
2008 case ETHER_STAT_FCS_ERRORS:
2009 *val = dmfep->rx_stats_fcs;
2010 break;
2011
2012 case ETHER_STAT_TOOLONG_ERRORS:
2013 *val = dmfep->rx_stats_toolong;
2014 break;
2015
2016 case ETHER_STAT_TOOSHORT_ERRORS:
2017 *val = dmfep->rx_stats_short;
2018 break;
2019
2020 case ETHER_STAT_MACRCV_ERRORS:
2021 *val = dmfep->rx_stats_macrcv_errors;
2022 break;
2023
2024 case ETHER_STAT_MACXMT_ERRORS:
2025 *val = dmfep->tx_stats_macxmt_errors;
2026 break;
2027
2028 case ETHER_STAT_JABBER_ERRORS:
2029 *val = dmfep->tx_stats_jabber;
2030 break;
2031
2032 case ETHER_STAT_CARRIER_ERRORS:
2033 *val = dmfep->tx_stats_nocarrier;
2034 break;
2035
2036 case ETHER_STAT_TX_LATE_COLLISIONS:
2037 *val = dmfep->tx_stats_xmtlatecoll;
2038 break;
2039
2040 case ETHER_STAT_EX_COLLISIONS:
2041 *val = dmfep->tx_stats_excoll;
2042 break;
2043
2044 case ETHER_STAT_DEFER_XMTS:
2045 *val = dmfep->tx_stats_defer;
2046 break;
2047
2048 case ETHER_STAT_FIRST_COLLISIONS:
2049 *val = dmfep->tx_stats_first_coll;
2050 break;
2051
2052 case ETHER_STAT_MULTI_COLLISIONS:
2053 *val = dmfep->tx_stats_multi_coll;
2054 break;
2055
2056 default:
2057 rv = ENOTSUP;
2058 }
2059
2060 mutex_exit(dmfep->txlock);
2061 mutex_exit(dmfep->rxlock);
2062 mutex_exit(dmfep->oplock);
2063
2064 return (rv);
2065 }
2066
2067 /*
2068 * ========== Ioctl handler & subfunctions ==========
2069 */
2070
2071 static lb_property_t dmfe_loopmodes[] = {
2072 { normal, "normal", 0 },
2073 { internal, "Internal", 1 },
2074 { external, "External", 2 },
2075 };
2076
2077 /*
2078 * Specific dmfe IOCTLs, the mac module handles the generic ones.
2079 * Unfortunately, the DM9102 doesn't seem to work well with MII based
2080 * loopback, so we have to do something special for it.
2081 */
2082
2083 static void
dmfe_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2084 dmfe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2085 {
2086 dmfe_t *dmfep = arg;
2087 struct iocblk *iocp;
2088 int rv = 0;
2089 lb_info_sz_t sz;
2090 int cmd;
2091 uint32_t mode;
2092
2093 iocp = (void *)mp->b_rptr;
2094 cmd = iocp->ioc_cmd;
2095
2096 if (mp->b_cont == NULL) {
2097 /*
2098 * All of these ioctls need data!
2099 */
2100 miocnak(wq, mp, 0, EINVAL);
2101 return;
2102 }
2103
2104 switch (cmd) {
2105 case LB_GET_INFO_SIZE:
2106 if (iocp->ioc_count != sizeof (sz)) {
2107 rv = EINVAL;
2108 } else {
2109 sz = sizeof (dmfe_loopmodes);
2110 bcopy(&sz, mp->b_cont->b_rptr, sizeof (sz));
2111 }
2112 break;
2113
2114 case LB_GET_INFO:
2115 if (iocp->ioc_count != sizeof (dmfe_loopmodes)) {
2116 rv = EINVAL;
2117 } else {
2118 bcopy(dmfe_loopmodes, mp->b_cont->b_rptr,
2119 iocp->ioc_count);
2120 }
2121 break;
2122
2123 case LB_GET_MODE:
2124 if (iocp->ioc_count != sizeof (mode)) {
2125 rv = EINVAL;
2126 } else {
2127 mutex_enter(dmfep->oplock);
2128 switch (dmfep->opmode & LOOPBACK_MODE_MASK) {
2129 case LOOPBACK_OFF:
2130 mode = 0;
2131 break;
2132 case LOOPBACK_INTERNAL:
2133 mode = 1;
2134 break;
2135 default:
2136 mode = 2;
2137 break;
2138 }
2139 mutex_exit(dmfep->oplock);
2140 bcopy(&mode, mp->b_cont->b_rptr, sizeof (mode));
2141 }
2142 break;
2143
2144 case LB_SET_MODE:
2145 rv = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2146 if (rv != 0)
2147 break;
2148 if (iocp->ioc_count != sizeof (mode)) {
2149 rv = EINVAL;
2150 break;
2151 }
2152 bcopy(mp->b_cont->b_rptr, &mode, sizeof (mode));
2153
2154 mutex_enter(dmfep->oplock);
2155 dmfep->opmode &= ~LOOPBACK_MODE_MASK;
2156 switch (mode) {
2157 case 2:
2158 dmfep->opmode |= LOOPBACK_PHY_D;
2159 break;
2160 case 1:
2161 dmfep->opmode |= LOOPBACK_INTERNAL;
2162 break;
2163 default:
2164 break;
2165 }
2166 if (!dmfep->suspended) {
2167 dmfe_restart(dmfep);
2168 }
2169 mutex_exit(dmfep->oplock);
2170 break;
2171
2172 default:
2173 rv = EINVAL;
2174 break;
2175 }
2176
2177 if (rv == 0) {
2178 miocack(wq, mp, iocp->ioc_count, 0);
2179 } else {
2180 miocnak(wq, mp, 0, rv);
2181 }
2182 }
2183
2184 int
dmfe_m_getprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,void * val)2185 dmfe_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2186 void *val)
2187 {
2188 dmfe_t *dmfep = arg;
2189
2190 return (mii_m_getprop(dmfep->mii, name, num, sz, val));
2191 }
2192
2193 int
dmfe_m_setprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,const void * val)2194 dmfe_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2195 const void *val)
2196 {
2197 dmfe_t *dmfep = arg;
2198
2199 return (mii_m_setprop(dmfep->mii, name, num, sz, val));
2200 }
2201
2202 static void
dmfe_m_propinfo(void * arg,const char * name,mac_prop_id_t num,mac_prop_info_handle_t mph)2203 dmfe_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
2204 mac_prop_info_handle_t mph)
2205 {
2206 dmfe_t *dmfep = arg;
2207
2208 mii_m_propinfo(dmfep->mii, name, num, mph);
2209 }
2210
2211 /*
2212 * ========== Per-instance setup/teardown code ==========
2213 */
2214
2215 /*
2216 * Determine local MAC address & broadcast address for this interface
2217 */
2218 static void
dmfe_find_mac_address(dmfe_t * dmfep)2219 dmfe_find_mac_address(dmfe_t *dmfep)
2220 {
2221 uchar_t *prop;
2222 uint_t propsize;
2223 int err;
2224
2225 /*
2226 * We have to find the "vendor's factory-set address". This is
2227 * the value of the property "local-mac-address", as set by OBP
2228 * (or a .conf file!)
2229 *
2230 * If the property is not there, then we try to find the factory
2231 * mac address from the devices serial EEPROM.
2232 */
2233 bzero(dmfep->curr_addr, sizeof (dmfep->curr_addr));
2234 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dmfep->devinfo,
2235 DDI_PROP_DONTPASS, localmac_propname, &prop, &propsize);
2236 if (err == DDI_PROP_SUCCESS) {
2237 if (propsize == ETHERADDRL)
2238 ethaddr_copy(prop, dmfep->curr_addr);
2239 ddi_prop_free(prop);
2240 } else {
2241 /* no property set... check eeprom */
2242 dmfe_read_eeprom(dmfep, EEPROM_EN_ADDR, dmfep->curr_addr,
2243 ETHERADDRL);
2244 }
2245 }
2246
2247 static int
dmfe_alloc_dma_mem(dmfe_t * dmfep,size_t memsize,size_t setup,size_t slop,ddi_device_acc_attr_t * attr_p,uint_t dma_flags,dma_area_t * dma_p)2248 dmfe_alloc_dma_mem(dmfe_t *dmfep, size_t memsize,
2249 size_t setup, size_t slop, ddi_device_acc_attr_t *attr_p,
2250 uint_t dma_flags, dma_area_t *dma_p)
2251 {
2252 ddi_dma_cookie_t dma_cookie;
2253 uint_t ncookies;
2254 int err;
2255
2256 /*
2257 * Allocate handle
2258 */
2259 err = ddi_dma_alloc_handle(dmfep->devinfo, &dma_attr,
2260 DDI_DMA_SLEEP, NULL, &dma_p->dma_hdl);
2261 if (err != DDI_SUCCESS) {
2262 dmfe_error(dmfep, "DMA handle allocation failed");
2263 return (DDI_FAILURE);
2264 }
2265
2266 /*
2267 * Allocate memory
2268 */
2269 err = ddi_dma_mem_alloc(dma_p->dma_hdl, memsize + setup + slop,
2270 attr_p, dma_flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
2271 DDI_DMA_SLEEP, NULL,
2272 &dma_p->mem_va, &dma_p->alength, &dma_p->acc_hdl);
2273 if (err != DDI_SUCCESS) {
2274 dmfe_error(dmfep, "DMA memory allocation failed: %d", err);
2275 return (DDI_FAILURE);
2276 }
2277
2278 /*
2279 * Bind the two together
2280 */
2281 err = ddi_dma_addr_bind_handle(dma_p->dma_hdl, NULL,
2282 dma_p->mem_va, dma_p->alength, dma_flags,
2283 DDI_DMA_SLEEP, NULL, &dma_cookie, &ncookies);
2284 if (err != DDI_DMA_MAPPED) {
2285 dmfe_error(dmfep, "DMA mapping failed: %d", err);
2286 return (DDI_FAILURE);
2287 }
2288 if ((dma_p->ncookies = ncookies) != 1) {
2289 dmfe_error(dmfep, "Too many DMA cookeis: %d", ncookies);
2290 return (DDI_FAILURE);
2291 }
2292
2293 dma_p->mem_dvma = dma_cookie.dmac_address;
2294 if (setup > 0) {
2295 dma_p->setup_dvma = dma_p->mem_dvma + memsize;
2296 dma_p->setup_va = dma_p->mem_va + memsize;
2297 } else {
2298 dma_p->setup_dvma = 0;
2299 dma_p->setup_va = NULL;
2300 }
2301
2302 return (DDI_SUCCESS);
2303 }
2304
2305 /*
2306 * This function allocates the transmit and receive buffers and descriptors.
2307 */
2308 static int
dmfe_alloc_bufs(dmfe_t * dmfep)2309 dmfe_alloc_bufs(dmfe_t *dmfep)
2310 {
2311 size_t memsize;
2312 int err;
2313
2314 /*
2315 * Allocate memory & handles for TX descriptor ring
2316 */
2317 memsize = dmfep->tx.n_desc * sizeof (struct tx_desc_type);
2318 err = dmfe_alloc_dma_mem(dmfep, memsize, SETUPBUF_SIZE, DMFE_SLOP,
2319 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2320 &dmfep->tx_desc);
2321 if (err != DDI_SUCCESS) {
2322 dmfe_error(dmfep, "TX descriptor allocation failed");
2323 return (DDI_FAILURE);
2324 }
2325
2326 /*
2327 * Allocate memory & handles for TX buffers
2328 */
2329 memsize = dmfep->tx.n_desc * DMFE_BUF_SIZE;
2330 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
2331 &dmfe_data_accattr, DDI_DMA_WRITE | DMFE_DMA_MODE,
2332 &dmfep->tx_buff);
2333 if (err != DDI_SUCCESS) {
2334 dmfe_error(dmfep, "TX buffer allocation failed");
2335 return (DDI_FAILURE);
2336 }
2337
2338 /*
2339 * Allocate memory & handles for RX descriptor ring
2340 */
2341 memsize = dmfep->rx.n_desc * sizeof (struct rx_desc_type);
2342 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, DMFE_SLOP,
2343 &dmfe_reg_accattr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2344 &dmfep->rx_desc);
2345 if (err != DDI_SUCCESS) {
2346 dmfe_error(dmfep, "RX descriptor allocation failed");
2347 return (DDI_FAILURE);
2348 }
2349
2350 /*
2351 * Allocate memory & handles for RX buffers
2352 */
2353 memsize = dmfep->rx.n_desc * DMFE_BUF_SIZE;
2354 err = dmfe_alloc_dma_mem(dmfep, memsize, 0, 0,
2355 &dmfe_data_accattr, DDI_DMA_READ | DMFE_DMA_MODE, &dmfep->rx_buff);
2356 if (err != DDI_SUCCESS) {
2357 dmfe_error(dmfep, "RX buffer allocation failed");
2358 return (DDI_FAILURE);
2359 }
2360
2361 /*
2362 * Allocate bitmasks for tx packet type tracking
2363 */
2364 dmfep->tx_mcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
2365 dmfep->tx_bcast = kmem_zalloc(dmfep->tx.n_desc / NBBY, KM_SLEEP);
2366
2367 return (DDI_SUCCESS);
2368 }
2369
2370 static void
dmfe_free_dma_mem(dma_area_t * dma_p)2371 dmfe_free_dma_mem(dma_area_t *dma_p)
2372 {
2373 if (dma_p->dma_hdl != NULL) {
2374 if (dma_p->ncookies) {
2375 (void) ddi_dma_unbind_handle(dma_p->dma_hdl);
2376 dma_p->ncookies = 0;
2377 }
2378 ddi_dma_free_handle(&dma_p->dma_hdl);
2379 dma_p->dma_hdl = NULL;
2380 dma_p->mem_dvma = 0;
2381 dma_p->setup_dvma = 0;
2382 }
2383
2384 if (dma_p->acc_hdl != NULL) {
2385 ddi_dma_mem_free(&dma_p->acc_hdl);
2386 dma_p->acc_hdl = NULL;
2387 dma_p->mem_va = NULL;
2388 dma_p->setup_va = NULL;
2389 }
2390 }
2391
2392 /*
2393 * This routine frees the transmit and receive buffers and descriptors.
2394 * Make sure the chip is stopped before calling it!
2395 */
2396 static void
dmfe_free_bufs(dmfe_t * dmfep)2397 dmfe_free_bufs(dmfe_t *dmfep)
2398 {
2399 dmfe_free_dma_mem(&dmfep->rx_buff);
2400 dmfe_free_dma_mem(&dmfep->rx_desc);
2401 dmfe_free_dma_mem(&dmfep->tx_buff);
2402 dmfe_free_dma_mem(&dmfep->tx_desc);
2403 if (dmfep->tx_mcast)
2404 kmem_free(dmfep->tx_mcast, dmfep->tx.n_desc / NBBY);
2405 if (dmfep->tx_bcast)
2406 kmem_free(dmfep->tx_bcast, dmfep->tx.n_desc / NBBY);
2407 }
2408
2409 static void
dmfe_unattach(dmfe_t * dmfep)2410 dmfe_unattach(dmfe_t *dmfep)
2411 {
2412 /*
2413 * Clean up and free all DMFE data structures
2414 */
2415 if (dmfep->cycid != NULL) {
2416 ddi_periodic_delete(dmfep->cycid);
2417 dmfep->cycid = NULL;
2418 }
2419
2420 if (dmfep->ksp_drv != NULL)
2421 kstat_delete(dmfep->ksp_drv);
2422 if (dmfep->progress & PROGRESS_HWINT) {
2423 ddi_remove_intr(dmfep->devinfo, 0, dmfep->iblk);
2424 }
2425 if (dmfep->progress & PROGRESS_SOFTINT)
2426 ddi_remove_softintr(dmfep->factotum_id);
2427 if (dmfep->mii != NULL)
2428 mii_free(dmfep->mii);
2429 if (dmfep->progress & PROGRESS_MUTEX) {
2430 mutex_destroy(dmfep->txlock);
2431 mutex_destroy(dmfep->rxlock);
2432 mutex_destroy(dmfep->oplock);
2433 }
2434 dmfe_free_bufs(dmfep);
2435 if (dmfep->io_handle != NULL)
2436 ddi_regs_map_free(&dmfep->io_handle);
2437
2438 kmem_free(dmfep, sizeof (*dmfep));
2439 }
2440
2441 static int
dmfe_config_init(dmfe_t * dmfep,chip_id_t * idp)2442 dmfe_config_init(dmfe_t *dmfep, chip_id_t *idp)
2443 {
2444 ddi_acc_handle_t handle;
2445 uint32_t regval;
2446
2447 if (pci_config_setup(dmfep->devinfo, &handle) != DDI_SUCCESS)
2448 return (DDI_FAILURE);
2449
2450 /*
2451 * Get vendor/device/revision. We expect (but don't check) that
2452 * (vendorid == DAVICOM_VENDOR_ID) && (deviceid == DEVICE_ID_9102)
2453 */
2454 idp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
2455 idp->device = pci_config_get16(handle, PCI_CONF_DEVID);
2456 idp->revision = pci_config_get8(handle, PCI_CONF_REVID);
2457
2458 /*
2459 * Turn on Bus Master Enable bit and ensure the device is not asleep
2460 */
2461 regval = pci_config_get32(handle, PCI_CONF_COMM);
2462 pci_config_put32(handle, PCI_CONF_COMM, (regval | PCI_COMM_ME));
2463
2464 regval = pci_config_get32(handle, PCI_DMFE_CONF_CFDD);
2465 pci_config_put32(handle, PCI_DMFE_CONF_CFDD,
2466 regval & ~(CFDD_SLEEP | CFDD_SNOOZE));
2467
2468 pci_config_teardown(&handle);
2469 return (DDI_SUCCESS);
2470 }
2471
2472 struct ks_index {
2473 int index;
2474 char *name;
2475 };
2476
2477 static const struct ks_index ks_drv_names[] = {
2478 { KS_INTERRUPT, "intr" },
2479 { KS_CYCLIC_RUN, "cyclic_run" },
2480
2481 { KS_TX_STALL, "tx_stall_detect" },
2482 { KS_CHIP_ERROR, "chip_error_interrupt" },
2483
2484 { KS_FACTOTUM_RUN, "factotum_run" },
2485 { KS_RECOVERY, "factotum_recover" },
2486
2487 { -1, NULL }
2488 };
2489
2490 static void
dmfe_init_kstats(dmfe_t * dmfep,int instance)2491 dmfe_init_kstats(dmfe_t *dmfep, int instance)
2492 {
2493 kstat_t *ksp;
2494 kstat_named_t *knp;
2495 const struct ks_index *ksip;
2496
2497 /* no need to create MII stats, the mac module already does it */
2498
2499 /* Create and initialise driver-defined kstats */
2500 ksp = kstat_create(DRIVER_NAME, instance, "dmfe_events", "net",
2501 KSTAT_TYPE_NAMED, KS_DRV_COUNT, KSTAT_FLAG_PERSISTENT);
2502 if (ksp != NULL) {
2503 for (knp = ksp->ks_data, ksip = ks_drv_names;
2504 ksip->name != NULL; ++ksip) {
2505 kstat_named_init(&knp[ksip->index], ksip->name,
2506 KSTAT_DATA_UINT64);
2507 }
2508 dmfep->ksp_drv = ksp;
2509 dmfep->knp_drv = knp;
2510 kstat_install(ksp);
2511 } else {
2512 dmfe_error(dmfep, "kstat_create() for dmfe_events failed");
2513 }
2514 }
2515
2516 static int
dmfe_resume(dev_info_t * devinfo)2517 dmfe_resume(dev_info_t *devinfo)
2518 {
2519 dmfe_t *dmfep; /* Our private data */
2520 chip_id_t chipid;
2521 boolean_t restart = B_FALSE;
2522
2523 dmfep = ddi_get_driver_private(devinfo);
2524 if (dmfep == NULL)
2525 return (DDI_FAILURE);
2526
2527 /*
2528 * Refuse to resume if the data structures aren't consistent
2529 */
2530 if (dmfep->devinfo != devinfo)
2531 return (DDI_FAILURE);
2532
2533 /*
2534 * Refuse to resume if the chip's changed its identity (*boggle*)
2535 */
2536 if (dmfe_config_init(dmfep, &chipid) != DDI_SUCCESS)
2537 return (DDI_FAILURE);
2538 if (chipid.vendor != dmfep->chipid.vendor)
2539 return (DDI_FAILURE);
2540 if (chipid.device != dmfep->chipid.device)
2541 return (DDI_FAILURE);
2542 if (chipid.revision != dmfep->chipid.revision)
2543 return (DDI_FAILURE);
2544
2545 mutex_enter(dmfep->oplock);
2546 mutex_enter(dmfep->txlock);
2547 dmfep->suspended = B_FALSE;
2548 mutex_exit(dmfep->txlock);
2549
2550 /*
2551 * All OK, reinitialise h/w & kick off MAC scheduling
2552 */
2553 if (dmfep->mac_state == DMFE_MAC_STARTED) {
2554 dmfe_restart(dmfep);
2555 restart = B_TRUE;
2556 }
2557 mutex_exit(dmfep->oplock);
2558
2559 if (restart) {
2560 mii_resume(dmfep->mii);
2561 mac_tx_update(dmfep->mh);
2562 }
2563 return (DDI_SUCCESS);
2564 }
2565
2566 /*
2567 * attach(9E) -- Attach a device to the system
2568 *
2569 * Called once for each board successfully probed.
2570 */
2571 static int
dmfe_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)2572 dmfe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
2573 {
2574 mac_register_t *macp;
2575 dmfe_t *dmfep; /* Our private data */
2576 uint32_t csr6;
2577 int instance;
2578 int err;
2579
2580 instance = ddi_get_instance(devinfo);
2581
2582 switch (cmd) {
2583 default:
2584 return (DDI_FAILURE);
2585
2586 case DDI_RESUME:
2587 return (dmfe_resume(devinfo));
2588
2589 case DDI_ATTACH:
2590 break;
2591 }
2592
2593 dmfep = kmem_zalloc(sizeof (*dmfep), KM_SLEEP);
2594 ddi_set_driver_private(devinfo, dmfep);
2595 dmfep->devinfo = devinfo;
2596 dmfep->dmfe_guard = DMFE_GUARD;
2597
2598 /*
2599 * Initialize more fields in DMFE private data
2600 * Determine the local MAC address
2601 */
2602 #if DMFEDEBUG
2603 dmfep->debug = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo, 0,
2604 debug_propname, dmfe_debug);
2605 #endif /* DMFEDEBUG */
2606 dmfep->cycid = NULL;
2607 (void) snprintf(dmfep->ifname, sizeof (dmfep->ifname), "dmfe%d",
2608 instance);
2609
2610 /*
2611 * Check for custom "opmode-reg-value" property;
2612 * if none, use the defaults below for CSR6 ...
2613 */
2614 csr6 = TX_THRESHOLD_HI | STORE_AND_FORWARD | EXT_MII_IF | OPN_25_MB1;
2615 dmfep->opmode = ddi_prop_get_int(DDI_DEV_T_ANY, devinfo,
2616 DDI_PROP_DONTPASS, opmode_propname, csr6);
2617
2618 /*
2619 * Read chip ID & set up config space command register(s)
2620 */
2621 if (dmfe_config_init(dmfep, &dmfep->chipid) != DDI_SUCCESS) {
2622 dmfe_error(dmfep, "dmfe_config_init() failed");
2623 goto attach_fail;
2624 }
2625
2626 /*
2627 * Map operating registers
2628 */
2629 err = ddi_regs_map_setup(devinfo, DMFE_PCI_RNUMBER,
2630 &dmfep->io_reg, 0, 0, &dmfe_reg_accattr, &dmfep->io_handle);
2631 if (err != DDI_SUCCESS) {
2632 dmfe_error(dmfep, "ddi_regs_map_setup() failed");
2633 goto attach_fail;
2634 }
2635
2636 /*
2637 * Get our MAC address.
2638 */
2639 dmfe_find_mac_address(dmfep);
2640
2641 /*
2642 * Allocate the TX and RX descriptors/buffers.
2643 */
2644 dmfep->tx.n_desc = dmfe_tx_desc;
2645 dmfep->rx.n_desc = dmfe_rx_desc;
2646 err = dmfe_alloc_bufs(dmfep);
2647 if (err != DDI_SUCCESS) {
2648 goto attach_fail;
2649 }
2650
2651 /*
2652 * Add the softint handler
2653 */
2654 if (ddi_add_softintr(devinfo, DDI_SOFTINT_LOW, &dmfep->factotum_id,
2655 NULL, NULL, dmfe_factotum, (caddr_t)dmfep) != DDI_SUCCESS) {
2656 dmfe_error(dmfep, "ddi_add_softintr() failed");
2657 goto attach_fail;
2658 }
2659 dmfep->progress |= PROGRESS_SOFTINT;
2660
2661 /*
2662 * Add the h/w interrupt handler & initialise mutexen
2663 */
2664 if (ddi_get_iblock_cookie(devinfo, 0, &dmfep->iblk) != DDI_SUCCESS) {
2665 dmfe_error(dmfep, "ddi_get_iblock_cookie() failed");
2666 goto attach_fail;
2667 }
2668
2669 mutex_init(dmfep->milock, NULL, MUTEX_DRIVER, NULL);
2670 mutex_init(dmfep->oplock, NULL, MUTEX_DRIVER, dmfep->iblk);
2671 mutex_init(dmfep->rxlock, NULL, MUTEX_DRIVER, dmfep->iblk);
2672 mutex_init(dmfep->txlock, NULL, MUTEX_DRIVER, dmfep->iblk);
2673 dmfep->progress |= PROGRESS_MUTEX;
2674
2675 if (ddi_add_intr(devinfo, 0, NULL, NULL,
2676 dmfe_interrupt, (caddr_t)dmfep) != DDI_SUCCESS) {
2677 dmfe_error(dmfep, "ddi_add_intr() failed");
2678 goto attach_fail;
2679 }
2680 dmfep->progress |= PROGRESS_HWINT;
2681
2682 /*
2683 * Create & initialise named kstats
2684 */
2685 dmfe_init_kstats(dmfep, instance);
2686
2687 /*
2688 * Reset & initialise the chip and the ring buffers
2689 * Initialise the (internal) PHY
2690 */
2691 mutex_enter(dmfep->oplock);
2692 mutex_enter(dmfep->rxlock);
2693 mutex_enter(dmfep->txlock);
2694
2695 dmfe_reset(dmfep);
2696
2697 /*
2698 * Prepare the setup packet
2699 */
2700 bzero(dmfep->tx_desc.setup_va, SETUPBUF_SIZE);
2701 bzero(dmfep->mcast_refs, MCASTBUF_SIZE);
2702 dmfep->addr_set = B_FALSE;
2703 dmfep->opmode &= ~(PROMISC_MODE | PASS_MULTICAST);
2704 dmfep->mac_state = DMFE_MAC_RESET;
2705
2706 mutex_exit(dmfep->txlock);
2707 mutex_exit(dmfep->rxlock);
2708 mutex_exit(dmfep->oplock);
2709
2710 if (dmfe_init_phy(dmfep) != B_TRUE)
2711 goto attach_fail;
2712
2713 /*
2714 * Send a reasonable setup frame. This configures our starting
2715 * address and the broadcast address.
2716 */
2717 (void) dmfe_m_unicst(dmfep, dmfep->curr_addr);
2718
2719 /*
2720 * Initialize pointers to device specific functions which
2721 * will be used by the generic layer.
2722 */
2723 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2724 goto attach_fail;
2725 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2726 macp->m_driver = dmfep;
2727 macp->m_dip = devinfo;
2728 macp->m_src_addr = dmfep->curr_addr;
2729 macp->m_callbacks = &dmfe_m_callbacks;
2730 macp->m_min_sdu = 0;
2731 macp->m_max_sdu = ETHERMTU;
2732 macp->m_margin = VLAN_TAGSZ;
2733
2734 /*
2735 * Finally, we're ready to register ourselves with the MAC layer
2736 * interface; if this succeeds, we're all ready to start()
2737 */
2738 err = mac_register(macp, &dmfep->mh);
2739 mac_free(macp);
2740 if (err != 0)
2741 goto attach_fail;
2742 ASSERT(dmfep->dmfe_guard == DMFE_GUARD);
2743
2744 /*
2745 * Install the cyclic callback that we use to check for link
2746 * status, transmit stall, etc. The cyclic callback (dmfe_cyclic())
2747 * is invoked in kernel context then.
2748 */
2749 ASSERT(dmfep->cycid == NULL);
2750 dmfep->cycid = ddi_periodic_add(dmfe_cyclic, dmfep,
2751 dmfe_tick_us * 1000, DDI_IPL_0);
2752 return (DDI_SUCCESS);
2753
2754 attach_fail:
2755 dmfe_unattach(dmfep);
2756 return (DDI_FAILURE);
2757 }
2758
2759 /*
2760 * dmfe_suspend() -- suspend transmit/receive for powerdown
2761 */
2762 static int
dmfe_suspend(dmfe_t * dmfep)2763 dmfe_suspend(dmfe_t *dmfep)
2764 {
2765 /*
2766 * Just stop processing ...
2767 */
2768 mii_suspend(dmfep->mii);
2769 mutex_enter(dmfep->oplock);
2770 dmfe_stop(dmfep);
2771
2772 mutex_enter(dmfep->txlock);
2773 dmfep->suspended = B_TRUE;
2774 mutex_exit(dmfep->txlock);
2775 mutex_exit(dmfep->oplock);
2776
2777 return (DDI_SUCCESS);
2778 }
2779
2780 /*
2781 * detach(9E) -- Detach a device from the system
2782 */
2783 static int
dmfe_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)2784 dmfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
2785 {
2786 dmfe_t *dmfep;
2787
2788 dmfep = ddi_get_driver_private(devinfo);
2789
2790 switch (cmd) {
2791 default:
2792 return (DDI_FAILURE);
2793
2794 case DDI_SUSPEND:
2795 return (dmfe_suspend(dmfep));
2796
2797 case DDI_DETACH:
2798 break;
2799 }
2800
2801 /*
2802 * Unregister from the MAC subsystem. This can fail, in
2803 * particular if there are DLPI style-2 streams still open -
2804 * in which case we just return failure without shutting
2805 * down chip operations.
2806 */
2807 if (mac_unregister(dmfep->mh) != DDI_SUCCESS)
2808 return (DDI_FAILURE);
2809
2810 /*
2811 * All activity stopped, so we can clean up & exit
2812 */
2813 dmfe_unattach(dmfep);
2814 return (DDI_SUCCESS);
2815 }
2816
2817
2818 /*
2819 * ========== Module Loading Data & Entry Points ==========
2820 */
2821
2822 DDI_DEFINE_STREAM_OPS(dmfe_dev_ops, nulldev, nulldev, dmfe_attach, dmfe_detach,
2823 nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
2824
2825 static struct modldrv dmfe_modldrv = {
2826 &mod_driverops, /* Type of module. This one is a driver */
2827 dmfe_ident, /* short description */
2828 &dmfe_dev_ops /* driver specific ops */
2829 };
2830
2831 static struct modlinkage modlinkage = {
2832 MODREV_1, (void *)&dmfe_modldrv, NULL
2833 };
2834
2835 int
_info(struct modinfo * modinfop)2836 _info(struct modinfo *modinfop)
2837 {
2838 return (mod_info(&modlinkage, modinfop));
2839 }
2840
2841 int
_init(void)2842 _init(void)
2843 {
2844 uint32_t tmp100;
2845 uint32_t tmp10;
2846 int i;
2847 int status;
2848
2849 /* Calculate global timing parameters */
2850 tmp100 = (dmfe_tx100_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
2851 tmp10 = (dmfe_tx10_stall_us+dmfe_tick_us-1)/dmfe_tick_us;
2852
2853 for (i = 0; i <= TX_PROCESS_MAX_STATE; ++i) {
2854 switch (i) {
2855 case TX_PROCESS_STATE(TX_PROCESS_FETCH_DATA):
2856 case TX_PROCESS_STATE(TX_PROCESS_WAIT_END):
2857 /*
2858 * The chip doesn't spontaneously recover from
2859 * a stall in these states, so we reset early
2860 */
2861 stall_100_tix[i] = tmp100;
2862 stall_10_tix[i] = tmp10;
2863 break;
2864
2865 case TX_PROCESS_STATE(TX_PROCESS_SUSPEND):
2866 default:
2867 /*
2868 * The chip has been seen to spontaneously recover
2869 * after an apparent stall in the SUSPEND state,
2870 * so we'll allow it rather longer to do so. As
2871 * stalls in other states have not been observed,
2872 * we'll use long timeouts for them too ...
2873 */
2874 stall_100_tix[i] = tmp100 * 20;
2875 stall_10_tix[i] = tmp10 * 20;
2876 break;
2877 }
2878 }
2879
2880 mac_init_ops(&dmfe_dev_ops, "dmfe");
2881 status = mod_install(&modlinkage);
2882 if (status == DDI_SUCCESS)
2883 dmfe_log_init();
2884
2885 return (status);
2886 }
2887
2888 int
_fini(void)2889 _fini(void)
2890 {
2891 int status;
2892
2893 status = mod_remove(&modlinkage);
2894 if (status == DDI_SUCCESS) {
2895 mac_fini_ops(&dmfe_dev_ops);
2896 dmfe_log_fini();
2897 }
2898
2899 return (status);
2900 }
2901