1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 1998 The NetBSD Foundation, Inc.
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Frank van der Linden.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/varargs.h>
36 #include <sys/types.h>
37 #include <sys/modctl.h>
38 #include <sys/conf.h>
39 #include <sys/devops.h>
40 #include <sys/stream.h>
41 #include <sys/strsun.h>
42 #include <sys/cmn_err.h>
43 #include <sys/ethernet.h>
44 #include <sys/pci.h>
45 #include <sys/kmem.h>
46 #include <sys/time.h>
47 #include <sys/mii.h>
48 #include <sys/miiregs.h>
49 #include <sys/mac_ether.h>
50 #include <sys/mac_provider.h>
51 #include <sys/strsubr.h>
52 #include <sys/pattr.h>
53 #include <sys/dlpi.h>
54 #include <sys/ddi.h>
55 #include <sys/sunddi.h>
56
57 #include <sys/vlan.h>
58
59 #include "elxl.h"
60
61 static boolean_t elxl_add_intr(elxl_t *);
62 static void elxl_probe_media(elxl_t *);
63 static void elxl_set_rxfilter(elxl_t *);
64 static void elxl_set_media(elxl_t *);
65 static uint16_t elxl_read_eeprom(elxl_t *, int);
66 static void elxl_init(elxl_t *);
67 static void elxl_stop(elxl_t *);
68 static void elxl_reset(elxl_t *);
69 static void elxl_getstats(elxl_t *);
70
71 static int elxl_eeprom_busy(elxl_t *);
72
73 static void elxl_setup_tx(elxl_t *);
74
75 static uint16_t elxl_mii_read(void *, uint8_t, uint8_t);
76 static void elxl_mii_write(void *, uint8_t, uint8_t, uint16_t);
77 static void elxl_mii_notify(void *, link_state_t);
78
79 static int elxl_m_stat(void *, uint_t, uint64_t *);
80 static int elxl_m_start(void *);
81 static void elxl_m_stop(void *);
82 static mblk_t *elxl_m_tx(void *, mblk_t *);
83 static int elxl_m_promisc(void *, boolean_t);
84 static int elxl_m_multicst(void *, boolean_t, const uint8_t *);
85 static int elxl_m_unicst(void *, const uint8_t *);
86 static int elxl_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
87 void *);
88 static int elxl_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
89 const void *);
90 static void elxl_m_propinfo(void *, const char *, mac_prop_id_t,
91 mac_prop_info_handle_t);
92 static boolean_t elxl_m_getcapab(void *, mac_capab_t cap, void *);
93 static uint_t elxl_intr(caddr_t, caddr_t);
94 static void elxl_error(elxl_t *, char *, ...);
95 static void elxl_linkcheck(void *);
96 static int elxl_attach(dev_info_t *);
97 static void elxl_detach(elxl_t *);
98 static void elxl_suspend(elxl_t *);
99 static void elxl_resume(dev_info_t *);
100 static int elxl_ddi_attach(dev_info_t *, ddi_attach_cmd_t);
101 static int elxl_ddi_detach(dev_info_t *, ddi_detach_cmd_t);
102 static int elxl_ddi_quiesce(dev_info_t *);
103
104 static ddi_device_acc_attr_t ex_dev_acc_attr = {
105 DDI_DEVICE_ATTR_V0,
106 DDI_STRUCTURE_LE_ACC,
107 DDI_STRICTORDER_ACC
108 };
109
110 static ddi_device_acc_attr_t ex_buf_acc_attr = {
111 DDI_DEVICE_ATTR_V0,
112 DDI_NEVERSWAP_ACC,
113 DDI_STORECACHING_OK_ACC
114 };
115
116 /*
117 * In theory buffers can have more flexible DMA attributes, but since
118 * we're just using a preallocated region with bcopy, there is little
119 * reason to allow for rougher alignment. (Further, the 8-byte
120 * alignment can allow for more efficient bcopy and similar operations
121 * from the buffer.)
122 */
123 static ddi_dma_attr_t ex_dma_attr = {
124 DMA_ATTR_V0, /* dma_attr_version */
125 0, /* dma_attr_addr_lo */
126 0xFFFFFFFFU, /* dma_attr_addr_hi */
127 0x00FFFFFFU, /* dma_attr_count_max */
128 8, /* dma_attr_align */
129 0x7F, /* dma_attr_burstsizes */
130 1, /* dma_attr_minxfer */
131 0xFFFFFFFFU, /* dma_attr_maxxfer */
132 0xFFFFFFFFU, /* dma_attr_seg */
133 1, /* dma_attr_sgllen */
134 1, /* dma_attr_granular */
135 0 /* dma_attr_flags */
136 };
137
138 static uint8_t ex_broadcast[6] = {
139 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
140 };
141
142 /*
143 * Structure to map media-present bits in boards to ifmedia codes and
144 * printable media names. Used for table-driven ifmedia initialization.
145 */
146 typedef struct ex_media {
147 int exm_mpbit; /* media present bit */
148 int exm_xcvr; /* XCVR_SEL_* constant */
149 } ex_media_t;
150
151 /*
152 * Media table for 3c90x chips. Note that chips with MII have no
153 * `native' media. This is sorted in "reverse preference".
154 */
155 static ex_media_t ex_native_media[] = {
156 { MEDIAOPT_AUI, XCVR_SEL_AUI },
157 { MEDIAOPT_BNC, XCVR_SEL_BNC },
158 { MEDIAOPT_10T, XCVR_SEL_10T },
159 { MEDIAOPT_100TX, XCVR_SEL_AUTO }, /* only 90XB */
160 { MEDIAOPT_100FX, XCVR_SEL_100FX },
161 { MEDIAOPT_MII, XCVR_SEL_MII },
162 { MEDIAOPT_100T4, XCVR_SEL_MII },
163 { 0, 0 },
164 };
165
166
167 /*
168 * NB: There are lots of other models that *could* be supported.
169 * Specifically there are cardbus and miniPCI variants that could be
170 * easily added here, but they require special hacks and I have no
171 * access to the hardware required to verify them. Especially they
172 * seem to require some extra work in another register window, and I
173 * have no supporting documentation.
174 */
175 static const struct ex_product {
176 uint16_t epp_prodid; /* PCI product ID */
177 const char *epp_name; /* device name */
178 unsigned epp_flags; /* initial softc flags */
179 } ex_products[] = {
180 { 0x4500, "3c450-TX", 0 },
181 { 0x7646, "3cSOHO100-TX", 0 },
182 { 0x9000, "3c900-TPO", 0 },
183 { 0x9001, "3c900-COMBO", 0 },
184 { 0x9004, "3c900B-TPO", 0 },
185 { 0x9005, "3c900B-COMBO", 0 },
186 { 0x9006, "3c900B-TPC", 0 },
187 { 0x900a, "3c900B-FL", 0 },
188 { 0x9050, "3c905-TX", 0 },
189 { 0x9051, "3c905-T4", 0 },
190 { 0x9055, "3c905B-TX", 0 },
191 { 0x9056, "3c905B-T4", 0 },
192 { 0x9058, "3c905B-COMBO", 0 },
193 { 0x905a, "3c905B-FX", 0 },
194 { 0x9200, "3c905C-TX", 0 },
195 { 0x9201, "3c920B-EMB", 0 },
196 { 0x9202, "3c920B-EMB-WNM", 0 },
197 { 0x9800, "3c980", 0 },
198 { 0x9805, "3c980C-TXM", 0 },
199
200 { 0, NULL, 0 },
201 };
202
203 static char *ex_priv_prop[] = {
204 "_media",
205 "_available_media",
206 NULL
207 };
208
209 static mii_ops_t ex_mii_ops = {
210 MII_OPS_VERSION,
211 elxl_mii_read,
212 elxl_mii_write,
213 elxl_mii_notify,
214 };
215
216 static mac_callbacks_t elxl_m_callbacks = {
217 MC_GETCAPAB | MC_PROPERTIES,
218 elxl_m_stat,
219 elxl_m_start,
220 elxl_m_stop,
221 elxl_m_promisc,
222 elxl_m_multicst,
223 elxl_m_unicst,
224 elxl_m_tx,
225 NULL,
226 NULL,
227 elxl_m_getcapab,
228 NULL,
229 NULL,
230 elxl_m_setprop,
231 elxl_m_getprop,
232 elxl_m_propinfo
233 };
234
235 /*
236 * Stream information
237 */
238 DDI_DEFINE_STREAM_OPS(ex_devops, nulldev, nulldev,
239 elxl_ddi_attach, elxl_ddi_detach,
240 nodev, NULL, D_MP, NULL, elxl_ddi_quiesce);
241
242 /*
243 * Module linkage information.
244 */
245
246 static struct modldrv ex_modldrv = {
247 &mod_driverops, /* drv_modops */
248 "3Com EtherLink XL", /* drv_linkinfo */
249 &ex_devops /* drv_dev_ops */
250 };
251
252 static struct modlinkage ex_modlinkage = {
253 MODREV_1, /* ml_rev */
254 { &ex_modldrv, NULL } /* ml_linkage */
255 };
256
257 int
_init(void)258 _init(void)
259 {
260 int rv;
261 mac_init_ops(&ex_devops, "elxl");
262 if ((rv = mod_install(&ex_modlinkage)) != DDI_SUCCESS) {
263 mac_fini_ops(&ex_devops);
264 }
265 return (rv);
266 }
267
268 int
_fini(void)269 _fini(void)
270 {
271 int rv;
272 if ((rv = mod_remove(&ex_modlinkage)) == DDI_SUCCESS) {
273 mac_fini_ops(&ex_devops);
274 }
275 return (rv);
276 }
277
278 int
_info(struct modinfo * modinfop)279 _info(struct modinfo *modinfop)
280 {
281 return (mod_info(&ex_modlinkage, modinfop));
282 }
283
284 static void
ex_free_ring(ex_ring_t * r)285 ex_free_ring(ex_ring_t *r)
286 {
287 for (int i = 0; i < r->r_count; i++) {
288 ex_desc_t *ed = &r->r_desc[i];
289 if (ed->ed_bufaddr)
290 (void) ddi_dma_unbind_handle(ed->ed_dmah);
291 if (ed->ed_acch)
292 ddi_dma_mem_free(&ed->ed_acch);
293 if (ed->ed_dmah)
294 ddi_dma_free_handle(&ed->ed_dmah);
295 }
296
297 if (r->r_paddr)
298 (void) ddi_dma_unbind_handle(r->r_dmah);
299 if (r->r_acch)
300 ddi_dma_mem_free(&r->r_acch);
301 if (r->r_dmah)
302 ddi_dma_free_handle(&r->r_dmah);
303
304 kmem_free(r->r_desc, sizeof (ex_desc_t) * r->r_count);
305 r->r_desc = NULL;
306 }
307
308 static void
elxl_reset_ring(ex_ring_t * r,uint_t dir)309 elxl_reset_ring(ex_ring_t *r, uint_t dir)
310 {
311 ex_desc_t *ed;
312 ex_pd_t *pd;
313
314 if (dir == DDI_DMA_WRITE) {
315 /* transmit ring, not linked yet */
316 for (int i = 0; i < r->r_count; i++) {
317 ed = &r->r_desc[i];
318 pd = ed->ed_pd;
319 PUT_PD(r, pd->pd_link, 0);
320 PUT_PD(r, pd->pd_fsh, 0);
321 PUT_PD(r, pd->pd_len, EX_FR_LAST);
322 PUT_PD(r, pd->pd_addr, ed->ed_bufaddr);
323 }
324 r->r_head = NULL;
325 r->r_tail = NULL;
326 r->r_avail = r->r_count;
327 } else {
328 /* receive is linked into a list */
329 for (int i = 0; i < r->r_count; i++) {
330 ed = &r->r_desc[i];
331 pd = ed->ed_pd;
332 PUT_PD(r, pd->pd_link, ed->ed_next->ed_descaddr);
333 PUT_PD(r, pd->pd_status, 0);
334 PUT_PD(r, pd->pd_len, EX_BUFSZ | EX_FR_LAST);
335 PUT_PD(r, pd->pd_addr, ed->ed_bufaddr);
336 }
337 r->r_head = &r->r_desc[0];
338 r->r_tail = NULL;
339 r->r_avail = 0;
340 }
341 (void) ddi_dma_sync(r->r_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
342 }
343
344 static boolean_t
ex_alloc_ring(elxl_t * sc,int count,ex_ring_t * r,uint_t dir)345 ex_alloc_ring(elxl_t *sc, int count, ex_ring_t *r, uint_t dir)
346 {
347 dev_info_t *dip = sc->ex_dip;
348 int i;
349 int rv;
350 size_t len;
351 ddi_dma_cookie_t dmac;
352 unsigned ndmac;
353
354 r->r_count = count;
355 r->r_desc = kmem_zalloc(sizeof (ex_desc_t) * count, KM_SLEEP);
356
357 rv = ddi_dma_alloc_handle(dip, &ex_dma_attr, DDI_DMA_DONTWAIT,
358 NULL, &r->r_dmah);
359 if (rv != DDI_SUCCESS) {
360 elxl_error(sc, "unable to allocate descriptor dma handle");
361 return (B_FALSE);
362 }
363
364 rv = ddi_dma_mem_alloc(r->r_dmah, count * sizeof (struct ex_pd),
365 &ex_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
366 (caddr_t *)&r->r_pd, &len, &r->r_acch);
367 if (rv != DDI_SUCCESS) {
368 elxl_error(sc, "unable to allocate descriptor memory");
369 return (B_FALSE);
370 }
371 bzero(r->r_pd, len);
372
373 rv = ddi_dma_addr_bind_handle(r->r_dmah, NULL,
374 (caddr_t)r->r_pd, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
375 DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
376 if (rv != DDI_DMA_MAPPED) {
377 elxl_error(sc, "unable to map descriptor memory");
378 return (B_FALSE);
379 }
380 r->r_paddr = dmac.dmac_address;
381
382 for (i = 0; i < count; i++) {
383 ex_desc_t *ed = &r->r_desc[i];
384 ex_pd_t *pd = &r->r_pd[i];
385
386 ed->ed_pd = pd;
387 ed->ed_off = (i * sizeof (ex_pd_t));
388 ed->ed_descaddr = r->r_paddr + (i * sizeof (ex_pd_t));
389
390 /* Link the high level descriptors into a ring. */
391 ed->ed_next = &r->r_desc[(i + 1) % count];
392 ed->ed_next->ed_prev = ed;
393
394 rv = ddi_dma_alloc_handle(dip, &ex_dma_attr,
395 DDI_DMA_DONTWAIT, NULL, &ed->ed_dmah);
396 if (rv != 0) {
397 elxl_error(sc, "can't allocate buf dma handle");
398 return (B_FALSE);
399 }
400 rv = ddi_dma_mem_alloc(ed->ed_dmah, EX_BUFSZ, &ex_buf_acc_attr,
401 DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &ed->ed_buf,
402 &len, &ed->ed_acch);
403 if (rv != DDI_SUCCESS) {
404 elxl_error(sc, "unable to allocate buf memory");
405 return (B_FALSE);
406 }
407 bzero(ed->ed_buf, len);
408
409 rv = ddi_dma_addr_bind_handle(ed->ed_dmah, NULL,
410 ed->ed_buf, len, dir | DDI_DMA_STREAMING,
411 DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
412 if (rv != DDI_DMA_MAPPED) {
413 elxl_error(sc, "unable to map buf memory");
414 return (B_FALSE);
415 }
416 ed->ed_bufaddr = dmac.dmac_address;
417 }
418
419 elxl_reset_ring(r, dir);
420
421 return (B_TRUE);
422 }
423
424 static boolean_t
elxl_add_intr(elxl_t * sc)425 elxl_add_intr(elxl_t *sc)
426 {
427 dev_info_t *dip;
428 int actual;
429 uint_t ipri;
430
431 int rv;
432
433 dip = sc->ex_dip;
434
435 rv = ddi_intr_alloc(dip, &sc->ex_intrh, DDI_INTR_TYPE_FIXED,
436 0, 1, &actual, DDI_INTR_ALLOC_STRICT);
437 if ((rv != DDI_SUCCESS) || (actual != 1)) {
438 elxl_error(sc, "Unable to allocate interrupt, %d, count %d",
439 rv, actual);
440 return (B_FALSE);
441 }
442
443 if (ddi_intr_get_pri(sc->ex_intrh, &ipri) != DDI_SUCCESS) {
444 elxl_error(sc, "Unable to get interrupt priority");
445 return (B_FALSE);
446 }
447
448 if (ddi_intr_add_handler(sc->ex_intrh, elxl_intr, sc, NULL) !=
449 DDI_SUCCESS) {
450 elxl_error(sc, "Can't add interrupt handler");
451 (void) ddi_intr_free(sc->ex_intrh);
452 sc->ex_intrh = NULL;
453 return (B_FALSE);
454 }
455 mutex_init(&sc->ex_intrlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(ipri));
456 mutex_init(&sc->ex_txlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(ipri));
457
458 return (B_TRUE);
459 }
460
461 static int
elxl_attach(dev_info_t * dip)462 elxl_attach(dev_info_t *dip)
463 {
464 elxl_t *sc;
465 mac_register_t *macp;
466 uint16_t val;
467 uint16_t venid;
468 uint16_t devid;
469 int i;
470
471 sc = kmem_zalloc(sizeof (*sc), KM_SLEEP);
472 ddi_set_driver_private(dip, sc);
473 sc->ex_dip = dip;
474
475 if (pci_config_setup(dip, &sc->ex_pcih) != DDI_SUCCESS) {
476 elxl_error(sc, "unable to setup PCI config handle");
477 goto fail;
478 }
479 venid = pci_config_get16(sc->ex_pcih, PCI_CONF_VENID);
480 devid = pci_config_get16(sc->ex_pcih, PCI_CONF_DEVID);
481
482 if (venid != 0x10b7) {
483 /* Not a 3Com part! */
484 elxl_error(sc, "Unsupported vendor id (0x%x)", venid);
485 goto fail;
486 }
487 for (i = 0; ex_products[i].epp_name; i++) {
488 if (devid == ex_products[i].epp_prodid) {
489 cmn_err(CE_CONT, "?%s%d: 3Com %s",
490 ddi_driver_name(dip),
491 ddi_get_instance(dip),
492 ex_products[i].epp_name);
493 sc->ex_conf = ex_products[i].epp_flags;
494 break;
495 }
496 }
497 if (ex_products[i].epp_name == NULL) {
498 /* Not a produce we know how to support */
499 elxl_error(sc, "Unsupported device id (0x%x)", devid);
500 elxl_error(sc, "Driver may or may not function.");
501 }
502
503 pci_config_put16(sc->ex_pcih, PCI_CONF_COMM,
504 pci_config_get16(sc->ex_pcih, PCI_CONF_COMM) |
505 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
506
507 if (ddi_regs_map_setup(dip, 1, &sc->ex_regsva, 0, 0, &ex_dev_acc_attr,
508 &sc->ex_regsh) != DDI_SUCCESS) {
509 elxl_error(sc, "Unable to map device registers");
510 goto fail;
511 }
512
513 if (!elxl_add_intr(sc)) {
514 goto fail;
515 }
516
517 elxl_reset(sc);
518
519 val = elxl_read_eeprom(sc, EE_OEM_ADDR_0);
520 sc->ex_factaddr[0] = val >> 8;
521 sc->ex_factaddr[1] = val & 0xff;
522 val = elxl_read_eeprom(sc, EE_OEM_ADDR_1);
523 sc->ex_factaddr[2] = val >> 8;
524 sc->ex_factaddr[3] = val & 0xff;
525 val = elxl_read_eeprom(sc, EE_OEM_ADDR_2);
526 sc->ex_factaddr[4] = val >> 8;
527 sc->ex_factaddr[5] = val & 0xff;
528 bcopy(sc->ex_factaddr, sc->ex_curraddr, 6);
529
530 sc->ex_capab = elxl_read_eeprom(sc, EE_CAPABILITIES);
531
532 /*
533 * Is this a 90XB? If bit 2 (supportsLargePackets) is set, or
534 * bit (supportsNoTxLength) is clear, then its a 90X.
535 * Otherwise its a 90XB.
536 */
537 if ((sc->ex_capab & (1 << 2)) || !(sc->ex_capab & (1 << 9))) {
538 sc->ex_conf &= ~CONF_90XB;
539 } else {
540 sc->ex_conf |= CONF_90XB;
541 }
542
543 if (!ex_alloc_ring(sc, EX_NRX, &sc->ex_rxring, DDI_DMA_READ)) {
544 goto fail;
545 }
546
547 if (!ex_alloc_ring(sc, EX_NTX, &sc->ex_txring, DDI_DMA_WRITE)) {
548 goto fail;
549 }
550
551 elxl_probe_media(sc);
552
553 /*
554 * The probe may have indicated MII!
555 */
556 if (sc->ex_mediaopt & (MEDIAOPT_MII | MEDIAOPT_100TX)) {
557 sc->ex_miih = mii_alloc(sc, sc->ex_dip, &ex_mii_ops);
558 if (sc->ex_miih == NULL) {
559 goto fail;
560 }
561 /*
562 * Note: The 90XB models can in theory support pause,
563 * but we're not enabling now due to lack of units for
564 * testing with. If this is changed, make sure to
565 * update the code in elxl_mii_notify to set the flow
566 * control field in the W3_MAC_CONTROL register.
567 */
568 mii_set_pauseable(sc->ex_miih, B_FALSE, B_FALSE);
569 }
570 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
571 elxl_error(sc, "MAC register allocation failed");
572 goto fail;
573 }
574 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
575 macp->m_driver = sc;
576 macp->m_dip = dip;
577 macp->m_src_addr = sc->ex_curraddr;
578 macp->m_callbacks = &elxl_m_callbacks;
579 macp->m_min_sdu = 0;
580 macp->m_max_sdu = ETHERMTU;
581 macp->m_margin = VLAN_TAGSZ;
582 macp->m_priv_props = ex_priv_prop;
583
584 (void) ddi_intr_enable(sc->ex_intrh);
585
586 if (mac_register(macp, &sc->ex_mach) == DDI_SUCCESS) {
587
588 /*
589 * Note: we don't want to start link checking
590 * until *after* we have added the MAC handle.
591 */
592 if (sc->ex_mediaopt &
593 (MEDIAOPT_MASK & ~(MEDIAOPT_MII | MEDIAOPT_100TX))) {
594
595 /* Check non-MII link state once per second. */
596 sc->ex_linkcheck =
597 ddi_periodic_add(elxl_linkcheck, sc, 10000000, 0);
598 }
599
600 mac_free(macp);
601 return (DDI_SUCCESS);
602 }
603
604 mac_free(macp);
605
606 fail:
607 elxl_detach(sc);
608 return (DDI_FAILURE);
609 }
610
611 /*
612 * Find the media present on non-MII chips, and select the one to use.
613 */
614 static void
elxl_probe_media(elxl_t * sc)615 elxl_probe_media(elxl_t *sc)
616 {
617 ex_media_t *exm;
618 uint32_t config;
619 uint32_t default_media;
620 uint16_t media_options;
621
622 SET_WIN(3);
623 config = GET32(W3_INTERNAL_CONFIG);
624 media_options = GET16(W3_MEDIAOPT);
625
626 /*
627 * We modify the media_options field so that we have a
628 * consistent view of the media available, without worrying
629 * about the version of ASIC, etc.
630 */
631
632 /*
633 * 100BASE-TX is handled differently on 90XB from 90X. Older
634 * parts use the external MII to provide this support.
635 */
636 if (sc->ex_conf & CONF_90XB) {
637 if (media_options & MEDIAOPT_100TX) {
638 /*
639 * 3Com advises that we should only ever use the
640 * auto mode. Notably, it seems that there should
641 * never be a 90XB board with the MEDIAOPT_10T bit set
642 * without this bit. If it happens, the driver will
643 * run in compatible 10BASE-T only mode.
644 */
645 media_options &= ~MEDIAOPT_10T;
646 }
647 } else {
648 if (media_options & MEDIAOPT_100TX) {
649 /*
650 * If this occurs, we really want to use it like
651 * an MII device. Generally in this situation we
652 * want to use the MII exclusively, and there ought
653 * not be a 10bT transceiver.
654 */
655 media_options |= MEDIAOPT_MII;
656 media_options &= ~MEDIAOPT_100TX;
657 media_options &= ~MEDIAOPT_10T;
658
659 /*
660 * Additionally, some of these devices map all
661 * internal PHY register at *every* address, not
662 * just the "allowed" address 24.
663 */
664 sc->ex_conf |= CONF_INTPHY;
665 }
666 /*
667 * Early versions didn't have 10FL models, and used this
668 * bit for something else (VCO).
669 */
670 media_options &= ~MEDIAOPT_10FL;
671 }
672 if (media_options & MEDIAOPT_100T4) {
673 /* 100BASE-T4 units all use the MII bus. */
674 media_options |= MEDIAOPT_MII;
675 media_options &= ~MEDIAOPT_100T4;
676 }
677
678 /* Save our media options. */
679 sc->ex_mediaopt = media_options;
680
681 #define APPEND_MEDIA(str, bit, name) \
682 if (media_options & (bit)) { \
683 (void) strlcat(str, *str ? "," : "", sizeof (str)); \
684 (void) strlcat(str, name, sizeof (str)); \
685 }
686
687 APPEND_MEDIA(sc->ex_medias, (MEDIAOPT_MII|MEDIAOPT_100TX), "mii");
688 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_10T, "tp-hdx,tp-fdx");
689 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_100FX, "fx-hdx,fx-fdx");
690 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_BNC, "bnc");
691 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_AUI, "aui");
692 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_10FL, "fl-hdx,fl-fdx");
693
694 if (config & XCVR_SEL_100TX) {
695 /* Only found on 90XB. Don't use this, use AUTO instead! */
696 config |= XCVR_SEL_AUTO;
697 config &= ~XCVR_SEL_100TX;
698 }
699
700 default_media = (config & XCVR_SEL_MASK);
701
702 /* Sanity check that there are any media! */
703 if ((media_options & MEDIAOPT_MASK) == 0) {
704 elxl_error(sc,
705 "No media present? Attempting to use default.");
706 /*
707 * This "default" may be non-sensical. At worst it should
708 * cause a busted link.
709 */
710 sc->ex_xcvr = default_media;
711 }
712
713 for (exm = ex_native_media; exm->exm_mpbit != 0; exm++) {
714 if (media_options & exm->exm_mpbit) {
715 if (exm->exm_xcvr == default_media) {
716 /* preferred default is present, just use it */
717 sc->ex_xcvr = default_media;
718 return;
719 }
720
721 sc->ex_xcvr = exm->exm_xcvr;
722 /* but keep trying for other more preferred options */
723 }
724 }
725 }
726
727 /*
728 * Setup transmitter parameters.
729 */
730 static void
elxl_setup_tx(elxl_t * sc)731 elxl_setup_tx(elxl_t *sc)
732 {
733 /*
734 * Disable reclaim threshold for 90xB, set free threshold to
735 * 6 * 256 = 1536 for 90x.
736 */
737 if (sc->ex_conf & CONF_90XB)
738 PUT_CMD(CMD_SET_TXRECLAIM | 255);
739 else
740 PUT8(REG_TXFREETHRESH, 6);
741
742 /*
743 * We've seen underflows at the root cause of NIC hangs on
744 * older cards. Use a store-and-forward model to prevent that.
745 */
746 PUT_CMD(CMD_SET_TXSTART | EX_BUFSZ >> 2);
747 }
748
749 /*
750 * Bring device up.
751 */
752 static void
elxl_init(elxl_t * sc)753 elxl_init(elxl_t *sc)
754 {
755 if (sc->ex_suspended)
756 return;
757
758 WAIT_CMD(sc);
759 elxl_stop(sc);
760
761 PUT_CMD(CMD_RX_RESET);
762 WAIT_CMD(sc);
763 PUT_CMD(CMD_TX_RESET);
764 WAIT_CMD(sc);
765
766 /* Load Tx parameters. */
767 elxl_setup_tx(sc);
768
769 PUT32(REG_DMACTRL, GET32(REG_DMACTRL) | DMACTRL_UPRXEAREN);
770
771 PUT_CMD(CMD_IND_ENABLE | INT_WATCHED);
772 PUT_CMD(CMD_INT_ENABLE | INT_WATCHED);
773
774 PUT_CMD(CMD_INT_ACK | 0xff);
775
776 elxl_set_media(sc);
777 elxl_set_rxfilter(sc);
778
779 /* Configure for VLAN tag sizing. */
780 SET_WIN(3);
781 if (sc->ex_conf & CONF_90XB) {
782 PUT16(W3_MAX_PKT_SIZE, EX_BUFSZ);
783 } else {
784 PUT16(W3_MAC_CONTROL, GET16(W3_MAC_CONTROL) |
785 MAC_CONTROL_ALLOW_LARGE);
786 }
787
788 PUT_CMD(CMD_SET_RXEARLY | (EX_BUFSZ >> 2));
789
790 PUT_CMD(CMD_STATS_ENABLE);
791 PUT_CMD(CMD_TX_ENABLE);
792 PUT32(REG_UPLISTPTR, sc->ex_rxring.r_paddr);
793 PUT_CMD(CMD_RX_ENABLE);
794 PUT_CMD(CMD_UP_UNSTALL);
795 }
796
797 /*
798 * Set multicast receive filter. Also take care of promiscuous mode.
799 * Note that *some* of this hardware is fully capable of either a 256
800 * or 64 bit multicast hash. However, we can't determine what the
801 * size of the hash table is easily, and so we are expected to be able
802 * to resubmit the entire list of addresses each time. This puts an
803 * onerous burden on the driver to maintain its list of multicast
804 * addresses. Since multicast stuff is usually not that performance
805 * sensitive, and since we don't usually have much of it, we are just
806 * going to skip it. We allow the upper layers to filter it, as
807 * needed, by setting the all-multicast bit if the hardware can do it.
808 * This also reduces our test burden.
809 */
810 static void
elxl_set_rxfilter(elxl_t * sc)811 elxl_set_rxfilter(elxl_t *sc)
812 {
813 uint16_t mask = FILTER_UNICAST | FILTER_ALLBCAST;
814
815 if (sc->ex_suspended)
816 return;
817
818 /*
819 * Set the station address and clear the station mask. The latter
820 * is needed for 90x cards, 0 is the default for 90xB cards.
821 */
822 SET_WIN(2);
823 for (int i = 0; i < ETHERADDRL; i++) {
824 PUT8(W2_STATION_ADDRESS + i, sc->ex_curraddr[i]);
825 PUT8(W2_STATION_MASK + i, 0);
826 }
827
828 if (sc->ex_mccount) {
829 mask |= FILTER_ALLMULTI;
830 }
831 if (sc->ex_promisc) {
832 mask |= FILTER_PROMISC;
833 }
834 PUT_CMD(CMD_SET_FILTER | mask);
835 }
836
837 static void
elxl_set_media(elxl_t * sc)838 elxl_set_media(elxl_t *sc)
839 {
840 uint32_t configreg;
841
842 SET_WIN(4);
843 PUT16(W4_MEDIASTAT, 0);
844 PUT_CMD(CMD_BNC_DISABLE);
845 drv_usecwait(800);
846
847 /*
848 * Now turn on the selected media/transceiver.
849 */
850 switch (sc->ex_xcvr) {
851 case XCVR_SEL_10T:
852 sc->ex_mii_active = B_FALSE;
853 PUT16(W4_MEDIASTAT,
854 MEDIASTAT_JABGUARD_EN | MEDIASTAT_LINKBEAT_EN);
855 drv_usecwait(800);
856 break;
857
858 case XCVR_SEL_BNC:
859 sc->ex_mii_active = B_FALSE;
860 PUT_CMD(CMD_BNC_ENABLE);
861 drv_usecwait(800);
862 break;
863
864 case XCVR_SEL_100FX:
865 sc->ex_mii_active = B_FALSE; /* Is this really true? */
866 PUT16(W4_MEDIASTAT, MEDIASTAT_LINKBEAT_EN);
867 drv_usecwait(800);
868 break;
869
870 case XCVR_SEL_AUI:
871 sc->ex_mii_active = B_FALSE;
872 PUT16(W4_MEDIASTAT, MEDIASTAT_SQE_EN);
873 drv_usecwait(800);
874 break;
875
876 case XCVR_SEL_AUTO:
877 case XCVR_SEL_MII:
878 /*
879 * This is due to paranoia. If a card claims
880 * to default to MII, but doesn't have it set in
881 * media options, then we don't want to leave
882 * the MII active or we'll have problems derferencing
883 * the "mii handle".
884 */
885 if (sc->ex_miih) {
886 sc->ex_mii_active = B_TRUE;
887 } else {
888 sc->ex_mii_active = B_FALSE;
889 }
890 break;
891
892 default:
893 sc->ex_mii_active = B_FALSE;
894 elxl_error(sc, "Impossible media setting!");
895 break;
896 }
897
898 SET_WIN(3);
899 configreg = GET32(W3_INTERNAL_CONFIG);
900
901 configreg &= ~(XCVR_SEL_MASK);
902 configreg |= (sc->ex_xcvr);
903
904 PUT32(W3_INTERNAL_CONFIG, configreg);
905
906 /*
907 * If we're not using MII, force the full-duplex setting. MII
908 * based modes handle the full-duplex setting via the MII
909 * notify callback.
910 */
911 if (!sc->ex_mii_active) {
912 uint16_t mctl;
913 mctl = GET16(W3_MAC_CONTROL);
914 if (sc->ex_fdx) {
915 mctl |= MAC_CONTROL_FDX;
916 } else {
917 mctl &= ~MAC_CONTROL_FDX;
918 }
919 PUT16(W3_MAC_CONTROL, mctl);
920 }
921 }
922
923 /*
924 * Get currently-selected media from card.
925 * (if_media callback, may be called before interface is brought up).
926 */
927 static void
elxl_linkcheck(void * arg)928 elxl_linkcheck(void *arg)
929 {
930 elxl_t *sc = arg;
931 uint16_t stat;
932 link_state_t link;
933
934 mutex_enter(&sc->ex_txlock);
935 if (sc->ex_mii_active) {
936 mutex_exit(&sc->ex_txlock);
937 return;
938 }
939 if (sc->ex_running && !sc->ex_suspended) {
940 switch (sc->ex_xcvr) {
941 case XCVR_SEL_100FX:
942 /* these media we can detect link on */
943 SET_WIN(4);
944 stat = GET16(W4_MEDIASTAT);
945 if (stat & MEDIASTAT_LINKDETECT) {
946 sc->ex_link = LINK_STATE_UP;
947 sc->ex_speed = 100000000;
948 } else {
949 sc->ex_link = LINK_STATE_DOWN;
950 sc->ex_speed = 0;
951 }
952 break;
953
954 case XCVR_SEL_10T:
955 /* these media we can detect link on */
956 SET_WIN(4);
957 stat = GET16(W4_MEDIASTAT);
958 if (stat & MEDIASTAT_LINKDETECT) {
959 sc->ex_link = LINK_STATE_UP;
960 sc->ex_speed = 10000000;
961 } else {
962 sc->ex_link = LINK_STATE_DOWN;
963 sc->ex_speed = 0;
964 }
965 break;
966
967 case XCVR_SEL_BNC:
968 case XCVR_SEL_AUI:
969 default:
970 /*
971 * For these we don't really know the answer,
972 * but if we lie then at least it won't cause
973 * ifconfig to turn off the RUNNING flag.
974 * This is necessary because we might
975 * transition from LINK_STATE_DOWN when
976 * switching media.
977 */
978 sc->ex_speed = 10000000;
979 sc->ex_link = LINK_STATE_UP;
980 break;
981 }
982 SET_WIN(3);
983 sc->ex_duplex = GET16(W3_MAC_CONTROL) & MAC_CONTROL_FDX ?
984 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
985 } else {
986 sc->ex_speed = 0;
987 sc->ex_duplex = LINK_DUPLEX_UNKNOWN;
988 sc->ex_link = LINK_STATE_UNKNOWN;
989 }
990 link = sc->ex_link;
991 mutex_exit(&sc->ex_txlock);
992
993 mac_link_update(sc->ex_mach, link);
994 }
995
996 static int
elxl_m_promisc(void * arg,boolean_t on)997 elxl_m_promisc(void *arg, boolean_t on)
998 {
999 elxl_t *sc = arg;
1000
1001 mutex_enter(&sc->ex_intrlock);
1002 mutex_enter(&sc->ex_txlock);
1003 sc->ex_promisc = on;
1004 elxl_set_rxfilter(sc);
1005 mutex_exit(&sc->ex_txlock);
1006 mutex_exit(&sc->ex_intrlock);
1007 return (0);
1008 }
1009
1010 static int
elxl_m_multicst(void * arg,boolean_t add,const uint8_t * addr)1011 elxl_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1012 {
1013 elxl_t *sc = arg;
1014
1015 _NOTE(ARGUNUSED(addr));
1016
1017 mutex_enter(&sc->ex_intrlock);
1018 mutex_enter(&sc->ex_txlock);
1019 if (add) {
1020 sc->ex_mccount++;
1021 if (sc->ex_mccount == 1) {
1022 elxl_set_rxfilter(sc);
1023 }
1024 } else {
1025 sc->ex_mccount--;
1026 if (sc->ex_mccount == 0) {
1027 elxl_set_rxfilter(sc);
1028 }
1029 }
1030 mutex_exit(&sc->ex_txlock);
1031 mutex_exit(&sc->ex_intrlock);
1032 return (0);
1033 }
1034
1035 static int
elxl_m_unicst(void * arg,const uint8_t * addr)1036 elxl_m_unicst(void *arg, const uint8_t *addr)
1037 {
1038 elxl_t *sc = arg;
1039
1040 mutex_enter(&sc->ex_intrlock);
1041 mutex_enter(&sc->ex_txlock);
1042 bcopy(addr, sc->ex_curraddr, ETHERADDRL);
1043 elxl_set_rxfilter(sc);
1044 mutex_exit(&sc->ex_txlock);
1045 mutex_exit(&sc->ex_intrlock);
1046
1047 return (0);
1048 }
1049
1050 static mblk_t *
elxl_m_tx(void * arg,mblk_t * mp)1051 elxl_m_tx(void *arg, mblk_t *mp)
1052 {
1053 elxl_t *sc = arg;
1054 ex_desc_t *txd;
1055 ex_desc_t *first;
1056 ex_desc_t *tail;
1057 size_t len;
1058 ex_ring_t *r;
1059 ex_pd_t *pd;
1060 uint32_t cflags;
1061 mblk_t *nmp;
1062 boolean_t reenable = B_FALSE;
1063 boolean_t reset = B_FALSE;
1064 uint32_t paddr;
1065
1066 r = &sc->ex_txring;
1067 mutex_enter(&sc->ex_txlock);
1068 if (sc->ex_suspended) {
1069 while (mp != NULL) {
1070 sc->ex_nocarrier++;
1071 nmp = mp->b_next;
1072 freemsg(mp);
1073 mp = nmp;
1074 }
1075 mutex_exit(&sc->ex_txlock);
1076 return (NULL);
1077 }
1078
1079 for (int limit = (EX_NTX * 2); limit; limit--) {
1080 uint8_t stat = GET8(REG_TXSTATUS);
1081 if ((stat & TXSTATUS_COMPLETE) == 0) {
1082 break;
1083 }
1084 if (stat & TXSTATUS_MAXCOLLISIONS) {
1085 reenable = B_TRUE;
1086 sc->ex_excoll++;
1087 }
1088 if ((stat & TXSTATUS_ERRS) != 0) {
1089 reset = B_TRUE;
1090 if (stat & TXSTATUS_JABBER) {
1091 sc->ex_jabber++;
1092 }
1093 if (stat & TXSTATUS_RECLAIM_ERR) {
1094 sc->ex_txerr++;
1095 }
1096 if (stat & TXSTATUS_UNDERRUN) {
1097 sc->ex_uflo++;
1098 }
1099 }
1100 PUT8(REG_TXSTATUS, 0);
1101 }
1102
1103 if (reset || reenable) {
1104 paddr = GET32(REG_DNLISTPTR);
1105 if (reset) {
1106 WAIT_CMD(sc);
1107 PUT_CMD(CMD_TX_RESET);
1108 WAIT_CMD(sc);
1109 elxl_setup_tx(sc);
1110 }
1111 PUT_CMD(CMD_TX_ENABLE);
1112 if (paddr) {
1113 PUT32(REG_DNLISTPTR, paddr);
1114 }
1115 }
1116
1117 /* first reclaim any free descriptors */
1118 while (r->r_avail < r->r_count) {
1119
1120 paddr = GET32(REG_DNLISTPTR);
1121 txd = r->r_head;
1122 if (paddr == txd->ed_descaddr) {
1123 /* still processing this one, we're done */
1124 break;
1125 }
1126 if (paddr == 0) {
1127 /* done processing the entire list! */
1128 r->r_head = NULL;
1129 r->r_tail = NULL;
1130 r->r_avail = r->r_count;
1131 break;
1132 }
1133 r->r_avail++;
1134 r->r_head = txd->ed_next;
1135 }
1136
1137 if ((r->r_avail < r->r_count) && (GET32(REG_DNLISTPTR) != 0)) {
1138 PUT_CMD(CMD_DN_STALL);
1139 WAIT_CMD(sc);
1140 }
1141
1142 first = NULL;
1143 tail = r->r_tail;
1144
1145 /*
1146 * If there is already a tx list, select the next desc on the list.
1147 * Otherwise, just pick the first descriptor.
1148 */
1149 txd = tail ? tail->ed_next : &r->r_desc[0];
1150
1151 while ((mp != NULL) && (r->r_avail)) {
1152
1153 nmp = mp->b_next;
1154
1155 len = msgsize(mp);
1156 if (len > (ETHERMAX + VLAN_TAGSZ)) {
1157 sc->ex_txerr++;
1158 freemsg(mp);
1159 mp = nmp;
1160 continue;
1161 }
1162
1163 cflags = 0;
1164 if ((sc->ex_conf & CONF_90XB) != 0) {
1165 uint32_t pflags;
1166 hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL,
1167 &pflags);
1168 if (pflags & HCK_IPV4_HDRCKSUM) {
1169 cflags |= EX_DPD_IPCKSUM;
1170 }
1171 if (pflags & HCK_FULLCKSUM) {
1172 cflags |= (EX_DPD_TCPCKSUM | EX_DPD_UDPCKSUM);
1173 }
1174 }
1175
1176 /* Mark this descriptor is in use. We're committed now. */
1177 mcopymsg(mp, txd->ed_buf); /* frees the mblk! */
1178 r->r_avail--;
1179 mp = nmp;
1180
1181 /* Accounting stuff. */
1182 sc->ex_opackets++;
1183 sc->ex_obytes += len;
1184 if (txd->ed_buf[0] & 0x1) {
1185 if (bcmp(txd->ed_buf, ex_broadcast, ETHERADDRL) != 0) {
1186 sc->ex_multixmt++;
1187 } else {
1188 sc->ex_brdcstxmt++;
1189 }
1190 }
1191
1192 pd = txd->ed_pd;
1193
1194
1195 /*
1196 * Zero pad the frame if its too short. This
1197 * also avoids a checksum offload bug.
1198 */
1199 if (len < 30) {
1200 bzero(txd->ed_buf + len, ETHERMIN - len);
1201 len = ETHERMIN;
1202 }
1203
1204 /*
1205 * If this our first packet so far, record the head
1206 * of the list.
1207 */
1208 if (first == NULL) {
1209 first = txd;
1210 }
1211
1212 (void) ddi_dma_sync(txd->ed_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
1213
1214 PUT_PD(r, pd->pd_link, 0);
1215 PUT_PD(r, pd->pd_fsh, len | cflags);
1216 PUT_PD(r, pd->pd_addr, txd->ed_bufaddr);
1217 PUT_PD(r, pd->pd_len, len | EX_FR_LAST);
1218
1219 /*
1220 * Write the link into the previous descriptor. Note that
1221 * if this is the first packet (so no previous queued), this
1222 * will be benign because the previous descriptor won't be
1223 * on any tx list. (Furthermore, we'll clear its link field
1224 * when we do later use it.)
1225 */
1226 PUT_PD(r, txd->ed_prev->ed_pd->pd_link, txd->ed_descaddr);
1227 }
1228
1229 /*
1230 * Are we submitting any packets?
1231 */
1232 if (first != NULL) {
1233 /* Interrupt on the last packet. */
1234 PUT_PD(r, pd->pd_fsh, len | cflags | EX_DPD_DNIND);
1235
1236 if (tail == NULL) {
1237 /* No packets pending, so its a new list head! */
1238 r->r_head = first;
1239 } else {
1240 pd = tail->ed_pd;
1241 /* We've added frames, so don't interrupt mid-list. */
1242 PUT_PD(r, pd->pd_fsh,
1243 GET_PD(r, pd->pd_fsh) & ~(EX_DPD_DNIND));
1244 }
1245 /* Record the last descriptor. */
1246 r->r_tail = txd;
1247
1248 /* flush the entire ring - we're stopped so its safe */
1249 (void) ddi_dma_sync(r->r_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
1250 }
1251
1252 /* Restart transmitter. */
1253 if (sc->ex_txring.r_head) {
1254 PUT32(REG_DNLISTPTR, sc->ex_txring.r_head->ed_descaddr);
1255 }
1256 PUT_CMD(CMD_DN_UNSTALL);
1257
1258 mutex_exit(&sc->ex_txlock);
1259
1260 return (mp);
1261 }
1262
1263 static mblk_t *
elxl_recv(elxl_t * sc,ex_desc_t * rxd,uint32_t stat)1264 elxl_recv(elxl_t *sc, ex_desc_t *rxd, uint32_t stat)
1265 {
1266 mblk_t *mp = NULL;
1267 uint32_t len;
1268
1269 len = stat & EX_UPD_PKTLENMASK;
1270 if (stat & (EX_UPD_ERR_VLAN | EX_UPD_OVERFLOW)) {
1271 if (stat & EX_UPD_RUNT) {
1272 sc->ex_runt++;
1273 }
1274 if (stat & EX_UPD_OVERRUN) {
1275 sc->ex_oflo++;
1276 }
1277 if (stat & EX_UPD_CRCERR) {
1278 sc->ex_fcs++;
1279 }
1280 if (stat & EX_UPD_ALIGNERR) {
1281 sc->ex_align++;
1282 }
1283 if (stat & EX_UPD_OVERFLOW) {
1284 sc->ex_toolong++;
1285 }
1286 return (NULL);
1287 }
1288 if (len < sizeof (struct ether_header)) {
1289 sc->ex_runt++;
1290 return (NULL);
1291 }
1292 if (len > (ETHERMAX + VLAN_TAGSZ)) {
1293 /* Allow four bytes for the VLAN header */
1294 sc->ex_toolong++;
1295 return (NULL);
1296 }
1297 if ((mp = allocb(len + 14, BPRI_HI)) == NULL) {
1298 sc->ex_allocbfail++;
1299 return (NULL);
1300 }
1301
1302 (void) ddi_dma_sync(rxd->ed_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1303 mp->b_rptr += 14;
1304 mp->b_wptr = mp->b_rptr + len;
1305 bcopy(rxd->ed_buf, mp->b_rptr, len);
1306
1307 sc->ex_ipackets++;
1308 sc->ex_ibytes += len;
1309 if (rxd->ed_buf[0] & 0x1) {
1310 if (bcmp(rxd->ed_buf, ex_broadcast, ETHERADDRL) != 0) {
1311 sc->ex_multircv++;
1312 } else {
1313 sc->ex_brdcstrcv++;
1314 }
1315 }
1316
1317 /*
1318 * Set the incoming checksum information for the packet.
1319 */
1320 if (((sc->ex_conf & CONF_90XB) != 0) &&
1321 ((stat & EX_UPD_IPCHECKED) != 0) &&
1322 ((stat & (EX_UPD_CKSUMERR)) == 0)) {
1323 uint32_t pflags = 0;
1324 if (stat & EX_UPD_IPCHECKED) {
1325 pflags |= HCK_IPV4_HDRCKSUM;
1326 }
1327 if (stat & (EX_UPD_TCPCHECKED | EX_UPD_UDPCHECKED)) {
1328 pflags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
1329 }
1330 (void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, pflags, 0);
1331 }
1332
1333 return (mp);
1334 }
1335
1336 static int
elxl_m_start(void * arg)1337 elxl_m_start(void *arg)
1338 {
1339 elxl_t *sc = arg;
1340
1341 mutex_enter(&sc->ex_intrlock);
1342 mutex_enter(&sc->ex_txlock);
1343
1344 elxl_init(sc);
1345 sc->ex_running = B_TRUE;
1346
1347 mutex_exit(&sc->ex_txlock);
1348 mutex_exit(&sc->ex_intrlock);
1349
1350 if (sc->ex_miih) {
1351 mii_start(sc->ex_miih);
1352 }
1353 return (0);
1354 }
1355
1356 static void
elxl_m_stop(void * arg)1357 elxl_m_stop(void *arg)
1358 {
1359 elxl_t *sc = arg;
1360
1361 if (sc->ex_miih) {
1362 mii_stop(sc->ex_miih);
1363 }
1364
1365 mutex_enter(&sc->ex_intrlock);
1366 mutex_enter(&sc->ex_txlock);
1367
1368 elxl_stop(sc);
1369 sc->ex_running = B_FALSE;
1370
1371 mutex_exit(&sc->ex_txlock);
1372 mutex_exit(&sc->ex_intrlock);
1373 }
1374
1375 static boolean_t
elxl_m_getcapab(void * arg,mac_capab_t cap,void * data)1376 elxl_m_getcapab(void *arg, mac_capab_t cap, void *data)
1377 {
1378 elxl_t *sc = arg;
1379 switch (cap) {
1380 case MAC_CAPAB_HCKSUM: {
1381 uint32_t *flags = data;
1382 if (sc->ex_conf & CONF_90XB) {
1383 *flags = HCKSUM_IPHDRCKSUM | HCKSUM_INET_FULL_V4;
1384 return (B_TRUE);
1385 }
1386 return (B_FALSE);
1387 }
1388 default:
1389 return (B_FALSE);
1390 }
1391 }
1392
1393 static int
elxl_m_getprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,void * val)1394 elxl_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1395 void *val)
1396 {
1397 elxl_t *sc = arg;
1398 int rv;
1399
1400 if (sc->ex_mii_active) {
1401 rv = mii_m_getprop(sc->ex_miih, name, num, sz, val);
1402 if (rv != ENOTSUP)
1403 return (rv);
1404 }
1405
1406 switch (num) {
1407 case MAC_PROP_DUPLEX:
1408 *(uint8_t *)val = sc->ex_duplex;
1409 break;
1410 case MAC_PROP_SPEED:
1411 *(uint8_t *)val = sc->ex_speed;
1412 break;
1413 case MAC_PROP_STATUS:
1414 bcopy(&sc->ex_link, val, sizeof (link_state_t));
1415 break;
1416
1417 case MAC_PROP_PRIVATE:
1418 if (strcmp(name, "_media") == 0) {
1419 char *str;
1420
1421 switch (sc->ex_xcvr) {
1422 case XCVR_SEL_AUTO:
1423 case XCVR_SEL_MII:
1424 str = "mii";
1425 break;
1426 case XCVR_SEL_10T:
1427 str = sc->ex_fdx ? "tp-fdx" : "tp-hdx";
1428 break;
1429 case XCVR_SEL_BNC:
1430 str = "bnc";
1431 break;
1432 case XCVR_SEL_AUI:
1433 if (sc->ex_mediaopt & MEDIAOPT_10FL) {
1434 str = sc->ex_fdx ? "fl-fdx" : "fl-hdx";
1435 } else {
1436 str = "aui";
1437 }
1438 break;
1439 case XCVR_SEL_100FX:
1440 str = sc->ex_fdx ? "fx-fdx" : "fx-hdx";
1441 break;
1442 default:
1443 str = "unknown";
1444 break;
1445 }
1446 (void) snprintf(val, sz, "%s", str);
1447 return (0);
1448 }
1449 /*
1450 * This available media property is a hack, and should
1451 * be removed when we can provide proper support for
1452 * querying it as proposed in PSARC 2009/235. (At the
1453 * moment the implementation lacks support for using
1454 * MAC_PROP_POSSIBLE with private properties.)
1455 */
1456 if (strcmp(name, "_available_media") == 0) {
1457 (void) snprintf(val, sz, "%s", sc->ex_medias);
1458 return (0);
1459 }
1460 break;
1461 }
1462 return (ENOTSUP);
1463 }
1464
1465 static int
elxl_m_setprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,const void * val)1466 elxl_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1467 const void *val)
1468 {
1469 elxl_t *sc = arg;
1470 int rv;
1471
1472 if (sc->ex_mii_active) {
1473 rv = mii_m_setprop(sc->ex_miih, name, num, sz, val);
1474 if (rv != ENOTSUP) {
1475 return (rv);
1476 }
1477 }
1478 switch (num) {
1479
1480 case MAC_PROP_PRIVATE:
1481 if (strcmp(name, "_media") == 0) {
1482 uint32_t mopt = sc->ex_mediaopt;
1483
1484 if (strcmp(val, "mii") == 0) {
1485 if (mopt & MEDIAOPT_100TX) {
1486 sc->ex_xcvr = XCVR_SEL_AUTO;
1487 } else if (mopt & MEDIAOPT_MII) {
1488 sc->ex_xcvr = XCVR_SEL_MII;
1489 } else {
1490 return (EINVAL);
1491 }
1492 } else if (strcmp(val, "tp-fdx") == 0) {
1493 /* select media option */
1494 if (mopt & MEDIAOPT_10T) {
1495 sc->ex_xcvr = XCVR_SEL_10T;
1496 sc->ex_fdx = B_TRUE;
1497 } else {
1498 return (EINVAL);
1499 }
1500 } else if (strcmp(val, "tp-hdx") == 0) {
1501 /* select media option */
1502 if (mopt & MEDIAOPT_10T) {
1503 sc->ex_xcvr = XCVR_SEL_10T;
1504 sc->ex_fdx = B_FALSE;
1505 } else {
1506 return (EINVAL);
1507 }
1508 } else if (strcmp(val, "fx-fdx") == 0) {
1509 if (mopt & MEDIAOPT_100FX) {
1510 sc->ex_xcvr = XCVR_SEL_100FX;
1511 sc->ex_fdx = B_TRUE;
1512 } else {
1513 return (EINVAL);
1514 }
1515 } else if (strcmp(val, "fx-hdx") == 0) {
1516 if (mopt & MEDIAOPT_100FX) {
1517 sc->ex_xcvr = XCVR_SEL_100FX;
1518 sc->ex_fdx = B_FALSE;
1519 } else {
1520 return (EINVAL);
1521 }
1522 } else if (strcmp(val, "bnc") == 0) {
1523 if (mopt & MEDIAOPT_BNC) {
1524 sc->ex_xcvr = XCVR_SEL_BNC;
1525 sc->ex_fdx = B_FALSE;
1526 } else {
1527 return (EINVAL);
1528 }
1529 } else if (strcmp(val, "aui") == 0) {
1530 if (mopt & MEDIAOPT_AUI) {
1531 sc->ex_xcvr = XCVR_SEL_AUI;
1532 sc->ex_fdx = B_FALSE;
1533 } else {
1534 return (EINVAL);
1535 }
1536 } else if (strcmp(val, "fl-fdx") == 0) {
1537 if (mopt & MEDIAOPT_10FL) {
1538 sc->ex_xcvr = XCVR_SEL_AUI;
1539 sc->ex_fdx = B_TRUE;
1540 } else {
1541 return (EINVAL);
1542 }
1543 } else if (strcmp(val, "fl-hdx") == 0) {
1544 if (mopt & MEDIAOPT_10FL) {
1545 sc->ex_xcvr = XCVR_SEL_AUI;
1546 sc->ex_fdx = B_FALSE;
1547 } else {
1548 return (EINVAL);
1549 }
1550
1551 } else {
1552 return (EINVAL);
1553 }
1554 goto reset;
1555 }
1556 break;
1557 default:
1558 break;
1559 }
1560
1561 return (ENOTSUP);
1562
1563 reset:
1564 mutex_enter(&sc->ex_intrlock);
1565 mutex_enter(&sc->ex_txlock);
1566 if (!sc->ex_suspended) {
1567 elxl_reset(sc);
1568 if (sc->ex_running) {
1569 elxl_init(sc);
1570 }
1571 }
1572 mutex_exit(&sc->ex_txlock);
1573 mutex_exit(&sc->ex_intrlock);
1574 return (0);
1575 }
1576
1577 static void
elxl_m_propinfo(void * arg,const char * name,mac_prop_id_t num,mac_prop_info_handle_t prh)1578 elxl_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
1579 mac_prop_info_handle_t prh)
1580 {
1581 elxl_t *sc = arg;
1582
1583 if (sc->ex_mii_active)
1584 mii_m_propinfo(sc->ex_miih, name, num, prh);
1585
1586 switch (num) {
1587 case MAC_PROP_DUPLEX:
1588 case MAC_PROP_SPEED:
1589 case MAC_PROP_STATUS:
1590 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1591 break;
1592
1593 case MAC_PROP_PRIVATE:
1594 if (strcmp(name, "_available_media") == 0)
1595 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1596 break;
1597 }
1598 }
1599
1600 static int
elxl_m_stat(void * arg,uint_t stat,uint64_t * val)1601 elxl_m_stat(void *arg, uint_t stat, uint64_t *val)
1602 {
1603 elxl_t *sc = arg;
1604
1605 if (stat == MAC_STAT_IFSPEED) {
1606 elxl_getstats(sc);
1607 }
1608
1609 if ((sc->ex_mii_active) &&
1610 (mii_m_getstat(sc->ex_miih, stat, val) == 0)) {
1611 return (0);
1612 }
1613
1614 switch (stat) {
1615 case MAC_STAT_IFSPEED:
1616 *val = sc->ex_speed;
1617 break;
1618
1619 case ETHER_STAT_LINK_DUPLEX:
1620 *val = sc->ex_duplex;
1621 break;
1622
1623 case MAC_STAT_MULTIRCV:
1624 *val = sc->ex_multircv;
1625 break;
1626
1627 case MAC_STAT_BRDCSTRCV:
1628 *val = sc->ex_brdcstrcv;
1629 break;
1630
1631 case MAC_STAT_MULTIXMT:
1632 *val = sc->ex_multixmt;
1633 break;
1634
1635 case MAC_STAT_BRDCSTXMT:
1636 *val = sc->ex_brdcstxmt;
1637 break;
1638
1639 case MAC_STAT_IPACKETS:
1640 *val = sc->ex_ipackets;
1641 break;
1642
1643 case MAC_STAT_OPACKETS:
1644 *val = sc->ex_opackets;
1645 break;
1646
1647 case MAC_STAT_RBYTES:
1648 *val = sc->ex_ibytes;
1649 break;
1650 case MAC_STAT_OBYTES:
1651 *val = sc->ex_obytes;
1652 break;
1653
1654 case MAC_STAT_COLLISIONS:
1655 case ETHER_STAT_FIRST_COLLISIONS:
1656 *val = sc->ex_singlecol + sc->ex_multcol;
1657 break;
1658
1659 case ETHER_STAT_MULTI_COLLISIONS:
1660 *val = sc->ex_multcol;
1661 break;
1662
1663 case ETHER_STAT_TX_LATE_COLLISIONS:
1664 *val = sc->ex_latecol;
1665 break;
1666
1667 case ETHER_STAT_ALIGN_ERRORS:
1668 *val = sc->ex_align;
1669 break;
1670
1671 case ETHER_STAT_FCS_ERRORS:
1672 *val = sc->ex_fcs;
1673 break;
1674
1675 case ETHER_STAT_SQE_ERRORS:
1676 *val = sc->ex_sqe;
1677 break;
1678
1679 case ETHER_STAT_DEFER_XMTS:
1680 *val = sc->ex_defer;
1681 break;
1682
1683 case ETHER_STAT_CARRIER_ERRORS:
1684 *val = sc->ex_nocarrier;
1685 break;
1686
1687 case ETHER_STAT_TOOLONG_ERRORS:
1688 *val = sc->ex_toolong;
1689 break;
1690
1691 case ETHER_STAT_EX_COLLISIONS:
1692 *val = sc->ex_excoll;
1693 break;
1694
1695 case MAC_STAT_OVERFLOWS:
1696 *val = sc->ex_oflo;
1697 break;
1698
1699 case MAC_STAT_UNDERFLOWS:
1700 *val = sc->ex_uflo;
1701 break;
1702
1703 case ETHER_STAT_TOOSHORT_ERRORS:
1704 *val = sc->ex_runt;
1705 break;
1706
1707 case ETHER_STAT_JABBER_ERRORS:
1708 *val = sc->ex_jabber;
1709 break;
1710
1711 case MAC_STAT_NORCVBUF:
1712 *val = sc->ex_allocbfail;
1713 break;
1714
1715 case MAC_STAT_OERRORS:
1716 *val = sc->ex_jabber + sc->ex_latecol + sc->ex_uflo;
1717 break;
1718
1719 case MAC_STAT_IERRORS:
1720 *val = sc->ex_align + sc->ex_fcs + sc->ex_runt +
1721 sc->ex_toolong + sc->ex_oflo + sc->ex_allocbfail;
1722 break;
1723
1724 default:
1725 return (ENOTSUP);
1726 }
1727 return (0);
1728 }
1729
1730 static uint_t
elxl_intr(caddr_t arg,caddr_t dontcare)1731 elxl_intr(caddr_t arg, caddr_t dontcare)
1732 {
1733 elxl_t *sc = (void *)arg;
1734 uint16_t stat;
1735 mblk_t *mphead = NULL;
1736 mblk_t **mpp = &mphead;
1737
1738 _NOTE(ARGUNUSED(dontcare));
1739
1740 mutex_enter(&sc->ex_intrlock);
1741 if (sc->ex_suspended) {
1742 mutex_exit(&sc->ex_intrlock);
1743 return (DDI_INTR_UNCLAIMED);
1744 }
1745
1746 stat = GET16(REG_CMD_STAT);
1747
1748 if ((stat & INT_LATCH) == 0) {
1749 mutex_exit(&sc->ex_intrlock);
1750 return (DDI_INTR_UNCLAIMED);
1751 }
1752
1753 /*
1754 * Acknowledge interrupts.
1755 */
1756 PUT_CMD(CMD_INT_ACK | (stat & INT_WATCHED) | INT_LATCH);
1757
1758 if (stat & INT_HOST_ERROR) {
1759 /* XXX: Potentially a good spot for FMA */
1760 elxl_error(sc, "Adapter failure (%x)", stat);
1761 mutex_enter(&sc->ex_txlock);
1762 elxl_reset(sc);
1763 if (sc->ex_running)
1764 elxl_init(sc);
1765 mutex_exit(&sc->ex_txlock);
1766 mutex_exit(&sc->ex_intrlock);
1767 return (DDI_INTR_CLAIMED);
1768 }
1769 if (stat & INT_UP_COMPLETE) {
1770 ex_ring_t *r;
1771 ex_desc_t *rxd;
1772 ex_pd_t *pd;
1773 mblk_t *mp;
1774 uint32_t pktstat;
1775
1776 r = &sc->ex_rxring;
1777
1778 for (;;) {
1779 rxd = r->r_head;
1780 pd = rxd->ed_pd;
1781
1782 (void) ddi_dma_sync(r->r_dmah, rxd->ed_off,
1783 sizeof (ex_pd_t), DDI_DMA_SYNC_FORKERNEL);
1784
1785 pktstat = GET_PD(r, pd->pd_status);
1786
1787 if ((pktstat & EX_UPD_COMPLETE) == 0) {
1788 break;
1789 }
1790
1791 /* Advance head to next packet. */
1792 r->r_head = r->r_head->ed_next;
1793
1794 if ((mp = elxl_recv(sc, rxd, pktstat)) != NULL) {
1795 *mpp = mp;
1796 mpp = &mp->b_next;
1797 }
1798
1799 /* clear the upComplete status, reset other fields */
1800 PUT_PD(r, pd->pd_status, 0);
1801 PUT_PD(r, pd->pd_len, EX_BUFSZ | EX_FR_LAST);
1802 PUT_PD(r, pd->pd_addr, rxd->ed_bufaddr);
1803 (void) ddi_dma_sync(r->r_dmah, rxd->ed_off,
1804 sizeof (ex_pd_t), DDI_DMA_SYNC_FORDEV);
1805 }
1806
1807 /*
1808 * If the engine stalled processing (due to
1809 * insufficient UPDs usually), restart it.
1810 */
1811 if (GET32(REG_UPLISTPTR) == 0) {
1812 /*
1813 * This seems that it can happen in an RX overrun
1814 * situation.
1815 */
1816 mutex_enter(&sc->ex_txlock);
1817 if (sc->ex_running)
1818 elxl_init(sc);
1819 mutex_exit(&sc->ex_txlock);
1820 }
1821 PUT_CMD(CMD_UP_UNSTALL);
1822 }
1823
1824 mutex_exit(&sc->ex_intrlock);
1825
1826 if (mphead) {
1827 mac_rx(sc->ex_mach, NULL, mphead);
1828 }
1829 if (stat & INT_STATS) {
1830 elxl_getstats(sc);
1831 }
1832 if (stat & INT_DN_COMPLETE) {
1833 mac_tx_update(sc->ex_mach);
1834 }
1835
1836 return (DDI_INTR_CLAIMED);
1837 }
1838
1839 static void
elxl_getstats(elxl_t * sc)1840 elxl_getstats(elxl_t *sc)
1841 {
1842 mutex_enter(&sc->ex_txlock);
1843 if (sc->ex_suspended) {
1844 mutex_exit(&sc->ex_txlock);
1845 return;
1846 }
1847
1848 SET_WIN(6);
1849 /*
1850 * We count the packets and bytes elsewhere, but we need to
1851 * read the registers to clear them.
1852 */
1853 (void) GET8(W6_RX_FRAMES);
1854 (void) GET8(W6_TX_FRAMES);
1855 (void) GET8(W6_UPPER_FRAMES);
1856 (void) GET8(W6_RX_OVERRUNS); /* counted by elxl_recv */
1857 (void) GET16(W6_RX_BYTES);
1858 (void) GET16(W6_TX_BYTES);
1859
1860 sc->ex_defer += GET8(W6_DEFER);
1861 sc->ex_latecol += GET8(W6_TX_LATE_COL);
1862 sc->ex_singlecol += GET8(W6_SINGLE_COL);
1863 sc->ex_multcol += GET8(W6_MULT_COL);
1864 sc->ex_sqe += GET8(W6_SQE_ERRORS);
1865 sc->ex_nocarrier += GET8(W6_NO_CARRIER);
1866
1867 SET_WIN(4);
1868 /* Note: we ought to report this somewhere... */
1869 (void) GET8(W4_BADSSD);
1870
1871 mutex_exit(&sc->ex_txlock);
1872 }
1873
1874 static void
elxl_reset(elxl_t * sc)1875 elxl_reset(elxl_t *sc)
1876 {
1877 PUT_CMD(CMD_GLOBAL_RESET);
1878 /*
1879 * Some ASICs need a longer time (20 ms) to come properly out
1880 * of reset. Do not reduce this value.
1881 *
1882 * Note that this occurs only during attach and failure recovery,
1883 * so it should be mostly harmless.
1884 */
1885 drv_usecwait(20000);
1886 WAIT_CMD(sc);
1887 }
1888
1889 static void
elxl_stop(elxl_t * sc)1890 elxl_stop(elxl_t *sc)
1891 {
1892 ASSERT(mutex_owned(&sc->ex_intrlock));
1893 ASSERT(mutex_owned(&sc->ex_txlock));
1894
1895 if (sc->ex_suspended)
1896 return;
1897
1898 PUT_CMD(CMD_RX_DISABLE);
1899 PUT_CMD(CMD_TX_DISABLE);
1900 PUT_CMD(CMD_BNC_DISABLE);
1901
1902 elxl_reset_ring(&sc->ex_rxring, DDI_DMA_READ);
1903 elxl_reset_ring(&sc->ex_txring, DDI_DMA_WRITE);
1904
1905 PUT_CMD(CMD_INT_ACK | INT_LATCH);
1906 /* Disable all interrupts. (0 means "none".) */
1907 PUT_CMD(CMD_INT_ENABLE | 0);
1908 }
1909
1910 static void
elxl_suspend(elxl_t * sc)1911 elxl_suspend(elxl_t *sc)
1912 {
1913 if (sc->ex_miih) {
1914 mii_suspend(sc->ex_miih);
1915 }
1916
1917 mutex_enter(&sc->ex_intrlock);
1918 mutex_enter(&sc->ex_txlock);
1919 elxl_stop(sc);
1920 sc->ex_suspended = B_TRUE;
1921 mutex_exit(&sc->ex_txlock);
1922 mutex_exit(&sc->ex_intrlock);
1923 }
1924
1925 static void
elxl_resume(dev_info_t * dip)1926 elxl_resume(dev_info_t *dip)
1927 {
1928 elxl_t *sc;
1929
1930 /* This should always succeed. */
1931 sc = ddi_get_driver_private(dip);
1932 ASSERT(sc);
1933
1934 mutex_enter(&sc->ex_intrlock);
1935 mutex_enter(&sc->ex_txlock);
1936 sc->ex_suspended = B_FALSE;
1937 elxl_reset(sc);
1938 if (sc->ex_running)
1939 elxl_init(sc);
1940 mutex_exit(&sc->ex_txlock);
1941 mutex_exit(&sc->ex_intrlock);
1942
1943 if (sc->ex_miih) {
1944 mii_resume(sc->ex_miih);
1945 }
1946 }
1947
1948 static void
elxl_detach(elxl_t * sc)1949 elxl_detach(elxl_t *sc)
1950 {
1951 if (sc->ex_miih) {
1952 /* Detach all PHYs */
1953 mii_free(sc->ex_miih);
1954 }
1955 if (sc->ex_linkcheck) {
1956 ddi_periodic_delete(sc->ex_linkcheck);
1957 }
1958
1959 if (sc->ex_intrh != NULL) {
1960 (void) ddi_intr_disable(sc->ex_intrh);
1961 (void) ddi_intr_remove_handler(sc->ex_intrh);
1962 (void) ddi_intr_free(sc->ex_intrh);
1963 mutex_destroy(&sc->ex_intrlock);
1964 mutex_destroy(&sc->ex_txlock);
1965 }
1966
1967 if (sc->ex_pcih) {
1968 pci_config_teardown(&sc->ex_pcih);
1969 }
1970 if (sc->ex_regsh) {
1971 ddi_regs_map_free(&sc->ex_regsh);
1972 }
1973 ex_free_ring(&sc->ex_txring);
1974 ex_free_ring(&sc->ex_rxring);
1975
1976 kmem_free(sc, sizeof (*sc));
1977 }
1978
1979 /*
1980 * Read EEPROM data. If we can't unbusy the EEPROM, then zero will be
1981 * returned. This will probably result in a bogus node address.
1982 */
1983 static uint16_t
elxl_read_eeprom(elxl_t * sc,int offset)1984 elxl_read_eeprom(elxl_t *sc, int offset)
1985 {
1986 uint16_t data = 0;
1987
1988 SET_WIN(0);
1989 if (elxl_eeprom_busy(sc))
1990 goto out;
1991
1992 PUT16(W0_EE_CMD, EE_CMD_READ | (offset & 0x3f));
1993 if (elxl_eeprom_busy(sc))
1994 goto out;
1995 data = GET16(W0_EE_DATA);
1996 out:
1997 return (data);
1998 }
1999
2000 static int
elxl_eeprom_busy(elxl_t * sc)2001 elxl_eeprom_busy(elxl_t *sc)
2002 {
2003 int i = 2000;
2004
2005 while (i--) {
2006 if (!(GET16(W0_EE_CMD) & EE_CMD_BUSY))
2007 return (0);
2008 drv_usecwait(100);
2009 }
2010 elxl_error(sc, "Eeprom stays busy.");
2011 return (1);
2012 }
2013
2014 static void
ex_mii_send_bits(struct ex_softc * sc,uint16_t bits,int cnt)2015 ex_mii_send_bits(struct ex_softc *sc, uint16_t bits, int cnt)
2016 {
2017 uint16_t val;
2018 ASSERT(cnt > 0);
2019
2020 PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2021 drv_usecwait(1);
2022
2023 for (int i = (1 << (cnt - 1)); i; i >>= 1) {
2024 if (bits & i) {
2025 val = PHYSMGMT_DIR | PHYSMGMT_DATA;
2026 } else {
2027 val = PHYSMGMT_DIR;
2028 }
2029 PUT16(W4_PHYSMGMT, val);
2030 drv_usecwait(1);
2031 PUT16(W4_PHYSMGMT, val | PHYSMGMT_CLK);
2032 drv_usecwait(1);
2033 PUT16(W4_PHYSMGMT, val);
2034 drv_usecwait(1);
2035 }
2036 }
2037
2038 static void
ex_mii_sync(struct ex_softc * sc)2039 ex_mii_sync(struct ex_softc *sc)
2040 {
2041 /*
2042 * We set the data bit output, and strobe the clock 32 times.
2043 */
2044 PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR);
2045 drv_usecwait(1);
2046
2047 for (int i = 0; i < 32; i++) {
2048 PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR | PHYSMGMT_CLK);
2049 drv_usecwait(1);
2050 PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR);
2051 drv_usecwait(1);
2052 }
2053 }
2054
2055 static uint16_t
elxl_mii_read(void * arg,uint8_t phy,uint8_t reg)2056 elxl_mii_read(void *arg, uint8_t phy, uint8_t reg)
2057 {
2058 elxl_t *sc = arg;
2059 uint16_t data;
2060 int val;
2061
2062 if ((sc->ex_conf & CONF_INTPHY) && phy != INTPHY_ID)
2063 return (0xffff);
2064
2065 mutex_enter(&sc->ex_txlock);
2066 SET_WIN(4);
2067
2068 ex_mii_sync(sc);
2069
2070 ex_mii_send_bits(sc, 1, 2); /* start */
2071 ex_mii_send_bits(sc, 2, 2); /* read command */
2072 ex_mii_send_bits(sc, phy, 5);
2073 ex_mii_send_bits(sc, reg, 5);
2074
2075 PUT16(W4_PHYSMGMT, 0); /* switch to input */
2076 drv_usecwait(1);
2077 PUT16(W4_PHYSMGMT, PHYSMGMT_CLK); /* turnaround time */
2078 drv_usecwait(1);
2079 PUT16(W4_PHYSMGMT, 0);
2080 drv_usecwait(1);
2081
2082 PUT16(W4_PHYSMGMT, PHYSMGMT_CLK); /* idle time */
2083 drv_usecwait(1);
2084 PUT16(W4_PHYSMGMT, 0);
2085 drv_usecwait(1);
2086
2087 for (data = 0, val = 0x8000; val; val >>= 1) {
2088 if (GET16(W4_PHYSMGMT) & PHYSMGMT_DATA) {
2089 data |= val;
2090 }
2091 /* strobe the clock */
2092 PUT16(W4_PHYSMGMT, PHYSMGMT_CLK);
2093 drv_usecwait(1);
2094 PUT16(W4_PHYSMGMT, 0);
2095 drv_usecwait(1);
2096 }
2097
2098 /* return to output mode */
2099 PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2100 drv_usecwait(1);
2101
2102 mutex_exit(&sc->ex_txlock);
2103
2104 return (data);
2105 }
2106
2107 static void
elxl_mii_write(void * arg,uint8_t phy,uint8_t reg,uint16_t data)2108 elxl_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t data)
2109 {
2110 elxl_t *sc = arg;
2111
2112 if ((sc->ex_conf & CONF_INTPHY) && phy != INTPHY_ID)
2113 return;
2114
2115 mutex_enter(&sc->ex_txlock);
2116 SET_WIN(4);
2117
2118 ex_mii_sync(sc);
2119 ex_mii_send_bits(sc, 1, 2); /* start */
2120 ex_mii_send_bits(sc, 1, 2); /* write */
2121 ex_mii_send_bits(sc, phy, 5);
2122 ex_mii_send_bits(sc, reg, 5);
2123 ex_mii_send_bits(sc, 2, 2); /* ack/turnaround */
2124 ex_mii_send_bits(sc, data, 16);
2125
2126 /* return to output mode */
2127 PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2128 drv_usecwait(1);
2129
2130 mutex_exit(&sc->ex_txlock);
2131 }
2132
2133 static void
elxl_mii_notify(void * arg,link_state_t link)2134 elxl_mii_notify(void *arg, link_state_t link)
2135 {
2136 elxl_t *sc = arg;
2137 int mctl;
2138 link_duplex_t duplex;
2139
2140 duplex = mii_get_duplex(sc->ex_miih);
2141
2142 mutex_enter(&sc->ex_txlock);
2143 if (!sc->ex_mii_active) {
2144 /* If we're using some other legacy media, bail out now */
2145 mutex_exit(&sc->ex_txlock);
2146 return;
2147 }
2148 if (!sc->ex_suspended) {
2149 SET_WIN(3);
2150 mctl = GET16(W3_MAC_CONTROL);
2151 if (duplex == LINK_DUPLEX_FULL)
2152 mctl |= MAC_CONTROL_FDX;
2153 else
2154 mctl &= ~MAC_CONTROL_FDX;
2155 PUT16(W3_MAC_CONTROL, mctl);
2156 }
2157 mutex_exit(&sc->ex_txlock);
2158
2159 mac_link_update(sc->ex_mach, link);
2160 }
2161
2162 static int
elxl_ddi_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)2163 elxl_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2164 {
2165 switch (cmd) {
2166 case DDI_ATTACH:
2167 return (elxl_attach(dip));
2168
2169 case DDI_RESUME:
2170 elxl_resume(dip);
2171 return (DDI_SUCCESS);
2172
2173 default:
2174 return (DDI_FAILURE);
2175 }
2176 }
2177
2178 static int
elxl_ddi_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2179 elxl_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2180 {
2181 elxl_t *sc;
2182
2183 sc = ddi_get_driver_private(dip);
2184 ASSERT(sc);
2185
2186 switch (cmd) {
2187 case DDI_DETACH:
2188 if (mac_disable(sc->ex_mach) != 0) {
2189 return (DDI_FAILURE);
2190 }
2191 (void) mac_unregister(sc->ex_mach);
2192 elxl_detach(sc);
2193 return (DDI_SUCCESS);
2194
2195 case DDI_SUSPEND:
2196 elxl_suspend(sc);
2197 return (DDI_SUCCESS);
2198
2199 default:
2200 return (DDI_FAILURE);
2201 }
2202 }
2203
2204 static int
elxl_ddi_quiesce(dev_info_t * dip)2205 elxl_ddi_quiesce(dev_info_t *dip)
2206 {
2207 elxl_t *sc;
2208
2209 sc = ddi_get_driver_private(dip);
2210 ASSERT(sc);
2211
2212 if (!sc->ex_suspended)
2213 elxl_reset(sc);
2214 return (DDI_SUCCESS);
2215 }
2216
2217 static void
elxl_error(elxl_t * sc,char * fmt,...)2218 elxl_error(elxl_t *sc, char *fmt, ...)
2219 {
2220 va_list ap;
2221 char buf[256];
2222
2223 va_start(ap, fmt);
2224 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
2225 va_end(ap);
2226
2227 cmn_err(CE_WARN, "%s%d: %s",
2228 ddi_driver_name(sc->ex_dip), ddi_get_instance(sc->ex_dip), buf);
2229 }
2230