1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 * Copyright 2018 Joyent, Inc.
5 */
6
7 /*
8 * Copyright (c) 1998 The NetBSD Foundation, Inc.
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Frank van der Linden.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #include <sys/varargs.h>
37 #include <sys/types.h>
38 #include <sys/modctl.h>
39 #include <sys/conf.h>
40 #include <sys/devops.h>
41 #include <sys/stream.h>
42 #include <sys/strsun.h>
43 #include <sys/cmn_err.h>
44 #include <sys/ethernet.h>
45 #include <sys/pci.h>
46 #include <sys/kmem.h>
47 #include <sys/time.h>
48 #include <sys/mii.h>
49 #include <sys/miiregs.h>
50 #include <sys/mac_ether.h>
51 #include <sys/mac_provider.h>
52 #include <sys/strsubr.h>
53 #include <sys/pattr.h>
54 #include <sys/dlpi.h>
55 #include <sys/ddi.h>
56 #include <sys/sunddi.h>
57
58 #include <sys/vlan.h>
59
60 #include "elxl.h"
61
62 static boolean_t elxl_add_intr(elxl_t *);
63 static void elxl_probe_media(elxl_t *);
64 static void elxl_set_rxfilter(elxl_t *);
65 static void elxl_set_media(elxl_t *);
66 static uint16_t elxl_read_eeprom(elxl_t *, int);
67 static void elxl_init(elxl_t *);
68 static void elxl_stop(elxl_t *);
69 static void elxl_reset(elxl_t *);
70 static void elxl_getstats(elxl_t *);
71
72 static int elxl_eeprom_busy(elxl_t *);
73
74 static void elxl_setup_tx(elxl_t *);
75
76 static uint16_t elxl_mii_read(void *, uint8_t, uint8_t);
77 static void elxl_mii_write(void *, uint8_t, uint8_t, uint16_t);
78 static void elxl_mii_notify(void *, link_state_t);
79
80 static int elxl_m_stat(void *, uint_t, uint64_t *);
81 static int elxl_m_start(void *);
82 static void elxl_m_stop(void *);
83 static mblk_t *elxl_m_tx(void *, mblk_t *);
84 static int elxl_m_promisc(void *, boolean_t);
85 static int elxl_m_multicst(void *, boolean_t, const uint8_t *);
86 static int elxl_m_unicst(void *, const uint8_t *);
87 static int elxl_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
88 void *);
89 static int elxl_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
90 const void *);
91 static void elxl_m_propinfo(void *, const char *, mac_prop_id_t,
92 mac_prop_info_handle_t);
93 static boolean_t elxl_m_getcapab(void *, mac_capab_t cap, void *);
94 static uint_t elxl_intr(caddr_t, caddr_t);
95 static void elxl_error(elxl_t *, char *, ...);
96 static void elxl_linkcheck(void *);
97 static int elxl_attach(dev_info_t *);
98 static void elxl_detach(elxl_t *);
99 static void elxl_suspend(elxl_t *);
100 static void elxl_resume(dev_info_t *);
101 static int elxl_ddi_attach(dev_info_t *, ddi_attach_cmd_t);
102 static int elxl_ddi_detach(dev_info_t *, ddi_detach_cmd_t);
103 static int elxl_ddi_quiesce(dev_info_t *);
104
105 static ddi_device_acc_attr_t ex_dev_acc_attr = {
106 DDI_DEVICE_ATTR_V0,
107 DDI_STRUCTURE_LE_ACC,
108 DDI_STRICTORDER_ACC
109 };
110
111 static ddi_device_acc_attr_t ex_buf_acc_attr = {
112 DDI_DEVICE_ATTR_V0,
113 DDI_NEVERSWAP_ACC,
114 DDI_STORECACHING_OK_ACC
115 };
116
117 /*
118 * In theory buffers can have more flexible DMA attributes, but since
119 * we're just using a preallocated region with bcopy, there is little
120 * reason to allow for rougher alignment. (Further, the 8-byte
121 * alignment can allow for more efficient bcopy and similar operations
122 * from the buffer.)
123 */
124 static ddi_dma_attr_t ex_dma_attr = {
125 DMA_ATTR_V0, /* dma_attr_version */
126 0, /* dma_attr_addr_lo */
127 0xFFFFFFFFU, /* dma_attr_addr_hi */
128 0x00FFFFFFU, /* dma_attr_count_max */
129 8, /* dma_attr_align */
130 0x7F, /* dma_attr_burstsizes */
131 1, /* dma_attr_minxfer */
132 0xFFFFFFFFU, /* dma_attr_maxxfer */
133 0xFFFFFFFFU, /* dma_attr_seg */
134 1, /* dma_attr_sgllen */
135 1, /* dma_attr_granular */
136 0 /* dma_attr_flags */
137 };
138
139 static uint8_t ex_broadcast[6] = {
140 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
141 };
142
143 /*
144 * Structure to map media-present bits in boards to ifmedia codes and
145 * printable media names. Used for table-driven ifmedia initialization.
146 */
147 typedef struct ex_media {
148 int exm_mpbit; /* media present bit */
149 int exm_xcvr; /* XCVR_SEL_* constant */
150 } ex_media_t;
151
152 /*
153 * Media table for 3c90x chips. Note that chips with MII have no
154 * `native' media. This is sorted in "reverse preference".
155 */
156 static ex_media_t ex_native_media[] = {
157 { MEDIAOPT_AUI, XCVR_SEL_AUI },
158 { MEDIAOPT_BNC, XCVR_SEL_BNC },
159 { MEDIAOPT_10T, XCVR_SEL_10T },
160 { MEDIAOPT_100TX, XCVR_SEL_AUTO }, /* only 90XB */
161 { MEDIAOPT_100FX, XCVR_SEL_100FX },
162 { MEDIAOPT_MII, XCVR_SEL_MII },
163 { MEDIAOPT_100T4, XCVR_SEL_MII },
164 { 0, 0 },
165 };
166
167
168 /*
169 * NB: There are lots of other models that *could* be supported.
170 * Specifically there are cardbus and miniPCI variants that could be
171 * easily added here, but they require special hacks and I have no
172 * access to the hardware required to verify them. Especially they
173 * seem to require some extra work in another register window, and I
174 * have no supporting documentation.
175 */
176 static const struct ex_product {
177 uint16_t epp_prodid; /* PCI product ID */
178 const char *epp_name; /* device name */
179 unsigned epp_flags; /* initial softc flags */
180 } ex_products[] = {
181 { 0x4500, "3c450-TX", 0 },
182 { 0x7646, "3cSOHO100-TX", 0 },
183 { 0x9000, "3c900-TPO", 0 },
184 { 0x9001, "3c900-COMBO", 0 },
185 { 0x9004, "3c900B-TPO", 0 },
186 { 0x9005, "3c900B-COMBO", 0 },
187 { 0x9006, "3c900B-TPC", 0 },
188 { 0x900a, "3c900B-FL", 0 },
189 { 0x9050, "3c905-TX", 0 },
190 { 0x9051, "3c905-T4", 0 },
191 { 0x9055, "3c905B-TX", 0 },
192 { 0x9056, "3c905B-T4", 0 },
193 { 0x9058, "3c905B-COMBO", 0 },
194 { 0x905a, "3c905B-FX", 0 },
195 { 0x9200, "3c905C-TX", 0 },
196 { 0x9201, "3c920B-EMB", 0 },
197 { 0x9202, "3c920B-EMB-WNM", 0 },
198 { 0x9800, "3c980", 0 },
199 { 0x9805, "3c980C-TXM", 0 },
200
201 { 0, NULL, 0 },
202 };
203
204 static char *ex_priv_prop[] = {
205 "_media",
206 "_available_media",
207 NULL
208 };
209
210 static mii_ops_t ex_mii_ops = {
211 MII_OPS_VERSION,
212 elxl_mii_read,
213 elxl_mii_write,
214 elxl_mii_notify,
215 };
216
217 static mac_callbacks_t elxl_m_callbacks = {
218 MC_GETCAPAB | MC_PROPERTIES,
219 elxl_m_stat,
220 elxl_m_start,
221 elxl_m_stop,
222 elxl_m_promisc,
223 elxl_m_multicst,
224 elxl_m_unicst,
225 elxl_m_tx,
226 NULL,
227 NULL,
228 elxl_m_getcapab,
229 NULL,
230 NULL,
231 elxl_m_setprop,
232 elxl_m_getprop,
233 elxl_m_propinfo
234 };
235
236 /*
237 * Stream information
238 */
239 DDI_DEFINE_STREAM_OPS(ex_devops, nulldev, nulldev,
240 elxl_ddi_attach, elxl_ddi_detach,
241 nodev, NULL, D_MP, NULL, elxl_ddi_quiesce);
242
243 /*
244 * Module linkage information.
245 */
246
247 static struct modldrv ex_modldrv = {
248 &mod_driverops, /* drv_modops */
249 "3Com EtherLink XL", /* drv_linkinfo */
250 &ex_devops /* drv_dev_ops */
251 };
252
253 static struct modlinkage ex_modlinkage = {
254 MODREV_1, /* ml_rev */
255 { &ex_modldrv, NULL } /* ml_linkage */
256 };
257
258 int
_init(void)259 _init(void)
260 {
261 int rv;
262 mac_init_ops(&ex_devops, "elxl");
263 if ((rv = mod_install(&ex_modlinkage)) != DDI_SUCCESS) {
264 mac_fini_ops(&ex_devops);
265 }
266 return (rv);
267 }
268
269 int
_fini(void)270 _fini(void)
271 {
272 int rv;
273 if ((rv = mod_remove(&ex_modlinkage)) == DDI_SUCCESS) {
274 mac_fini_ops(&ex_devops);
275 }
276 return (rv);
277 }
278
279 int
_info(struct modinfo * modinfop)280 _info(struct modinfo *modinfop)
281 {
282 return (mod_info(&ex_modlinkage, modinfop));
283 }
284
285 static void
ex_free_ring(ex_ring_t * r)286 ex_free_ring(ex_ring_t *r)
287 {
288 for (int i = 0; i < r->r_count; i++) {
289 ex_desc_t *ed = &r->r_desc[i];
290 if (ed->ed_bufaddr)
291 (void) ddi_dma_unbind_handle(ed->ed_dmah);
292 if (ed->ed_acch)
293 ddi_dma_mem_free(&ed->ed_acch);
294 if (ed->ed_dmah)
295 ddi_dma_free_handle(&ed->ed_dmah);
296 }
297
298 if (r->r_paddr)
299 (void) ddi_dma_unbind_handle(r->r_dmah);
300 if (r->r_acch)
301 ddi_dma_mem_free(&r->r_acch);
302 if (r->r_dmah)
303 ddi_dma_free_handle(&r->r_dmah);
304
305 kmem_free(r->r_desc, sizeof (ex_desc_t) * r->r_count);
306 r->r_desc = NULL;
307 }
308
309 static void
elxl_reset_ring(ex_ring_t * r,uint_t dir)310 elxl_reset_ring(ex_ring_t *r, uint_t dir)
311 {
312 ex_desc_t *ed;
313 ex_pd_t *pd;
314
315 if (dir == DDI_DMA_WRITE) {
316 /* transmit ring, not linked yet */
317 for (int i = 0; i < r->r_count; i++) {
318 ed = &r->r_desc[i];
319 pd = ed->ed_pd;
320 PUT_PD(r, pd->pd_link, 0);
321 PUT_PD(r, pd->pd_fsh, 0);
322 PUT_PD(r, pd->pd_len, EX_FR_LAST);
323 PUT_PD(r, pd->pd_addr, ed->ed_bufaddr);
324 }
325 r->r_head = NULL;
326 r->r_tail = NULL;
327 r->r_avail = r->r_count;
328 } else {
329 /* receive is linked into a list */
330 for (int i = 0; i < r->r_count; i++) {
331 ed = &r->r_desc[i];
332 pd = ed->ed_pd;
333 PUT_PD(r, pd->pd_link, ed->ed_next->ed_descaddr);
334 PUT_PD(r, pd->pd_status, 0);
335 PUT_PD(r, pd->pd_len, EX_BUFSZ | EX_FR_LAST);
336 PUT_PD(r, pd->pd_addr, ed->ed_bufaddr);
337 }
338 r->r_head = &r->r_desc[0];
339 r->r_tail = NULL;
340 r->r_avail = 0;
341 }
342 (void) ddi_dma_sync(r->r_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
343 }
344
345 static boolean_t
ex_alloc_ring(elxl_t * sc,int count,ex_ring_t * r,uint_t dir)346 ex_alloc_ring(elxl_t *sc, int count, ex_ring_t *r, uint_t dir)
347 {
348 dev_info_t *dip = sc->ex_dip;
349 int i;
350 int rv;
351 size_t len;
352 ddi_dma_cookie_t dmac;
353 unsigned ndmac;
354
355 r->r_count = count;
356 r->r_desc = kmem_zalloc(sizeof (ex_desc_t) * count, KM_SLEEP);
357
358 rv = ddi_dma_alloc_handle(dip, &ex_dma_attr, DDI_DMA_DONTWAIT,
359 NULL, &r->r_dmah);
360 if (rv != DDI_SUCCESS) {
361 elxl_error(sc, "unable to allocate descriptor dma handle");
362 return (B_FALSE);
363 }
364
365 rv = ddi_dma_mem_alloc(r->r_dmah, count * sizeof (struct ex_pd),
366 &ex_dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
367 (caddr_t *)&r->r_pd, &len, &r->r_acch);
368 if (rv != DDI_SUCCESS) {
369 elxl_error(sc, "unable to allocate descriptor memory");
370 return (B_FALSE);
371 }
372 bzero(r->r_pd, len);
373
374 rv = ddi_dma_addr_bind_handle(r->r_dmah, NULL,
375 (caddr_t)r->r_pd, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
376 DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
377 if (rv != DDI_DMA_MAPPED) {
378 elxl_error(sc, "unable to map descriptor memory");
379 return (B_FALSE);
380 }
381 r->r_paddr = dmac.dmac_address;
382
383 for (i = 0; i < count; i++) {
384 ex_desc_t *ed = &r->r_desc[i];
385 ex_pd_t *pd = &r->r_pd[i];
386
387 ed->ed_pd = pd;
388 ed->ed_off = (i * sizeof (ex_pd_t));
389 ed->ed_descaddr = r->r_paddr + (i * sizeof (ex_pd_t));
390
391 /* Link the high level descriptors into a ring. */
392 ed->ed_next = &r->r_desc[(i + 1) % count];
393 ed->ed_next->ed_prev = ed;
394
395 rv = ddi_dma_alloc_handle(dip, &ex_dma_attr,
396 DDI_DMA_DONTWAIT, NULL, &ed->ed_dmah);
397 if (rv != 0) {
398 elxl_error(sc, "can't allocate buf dma handle");
399 return (B_FALSE);
400 }
401 rv = ddi_dma_mem_alloc(ed->ed_dmah, EX_BUFSZ, &ex_buf_acc_attr,
402 DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &ed->ed_buf,
403 &len, &ed->ed_acch);
404 if (rv != DDI_SUCCESS) {
405 elxl_error(sc, "unable to allocate buf memory");
406 return (B_FALSE);
407 }
408 bzero(ed->ed_buf, len);
409
410 rv = ddi_dma_addr_bind_handle(ed->ed_dmah, NULL,
411 ed->ed_buf, len, dir | DDI_DMA_STREAMING,
412 DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
413 if (rv != DDI_DMA_MAPPED) {
414 elxl_error(sc, "unable to map buf memory");
415 return (B_FALSE);
416 }
417 ed->ed_bufaddr = dmac.dmac_address;
418 }
419
420 elxl_reset_ring(r, dir);
421
422 return (B_TRUE);
423 }
424
425 static boolean_t
elxl_add_intr(elxl_t * sc)426 elxl_add_intr(elxl_t *sc)
427 {
428 dev_info_t *dip;
429 int actual;
430 uint_t ipri;
431
432 int rv;
433
434 dip = sc->ex_dip;
435
436 rv = ddi_intr_alloc(dip, &sc->ex_intrh, DDI_INTR_TYPE_FIXED,
437 0, 1, &actual, DDI_INTR_ALLOC_STRICT);
438 if ((rv != DDI_SUCCESS) || (actual != 1)) {
439 elxl_error(sc, "Unable to allocate interrupt, %d, count %d",
440 rv, actual);
441 return (B_FALSE);
442 }
443
444 if (ddi_intr_get_pri(sc->ex_intrh, &ipri) != DDI_SUCCESS) {
445 elxl_error(sc, "Unable to get interrupt priority");
446 return (B_FALSE);
447 }
448
449 if (ddi_intr_add_handler(sc->ex_intrh, elxl_intr, sc, NULL) !=
450 DDI_SUCCESS) {
451 elxl_error(sc, "Can't add interrupt handler");
452 (void) ddi_intr_free(sc->ex_intrh);
453 sc->ex_intrh = NULL;
454 return (B_FALSE);
455 }
456 mutex_init(&sc->ex_intrlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(ipri));
457 mutex_init(&sc->ex_txlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(ipri));
458
459 return (B_TRUE);
460 }
461
462 static int
elxl_attach(dev_info_t * dip)463 elxl_attach(dev_info_t *dip)
464 {
465 elxl_t *sc;
466 mac_register_t *macp;
467 uint16_t val;
468 uint16_t venid;
469 uint16_t devid;
470 int i;
471
472 sc = kmem_zalloc(sizeof (*sc), KM_SLEEP);
473 ddi_set_driver_private(dip, sc);
474 sc->ex_dip = dip;
475
476 if (pci_config_setup(dip, &sc->ex_pcih) != DDI_SUCCESS) {
477 elxl_error(sc, "unable to setup PCI config handle");
478 goto fail;
479 }
480 venid = pci_config_get16(sc->ex_pcih, PCI_CONF_VENID);
481 devid = pci_config_get16(sc->ex_pcih, PCI_CONF_DEVID);
482
483 if (venid != 0x10b7) {
484 /* Not a 3Com part! */
485 elxl_error(sc, "Unsupported vendor id (0x%x)", venid);
486 goto fail;
487 }
488 for (i = 0; ex_products[i].epp_name; i++) {
489 if (devid == ex_products[i].epp_prodid) {
490 cmn_err(CE_CONT, "?%s%d: 3Com %s",
491 ddi_driver_name(dip),
492 ddi_get_instance(dip),
493 ex_products[i].epp_name);
494 sc->ex_conf = ex_products[i].epp_flags;
495 break;
496 }
497 }
498 if (ex_products[i].epp_name == NULL) {
499 /* Not a produce we know how to support */
500 elxl_error(sc, "Unsupported device id (0x%x)", devid);
501 elxl_error(sc, "Driver may or may not function.");
502 }
503
504 pci_config_put16(sc->ex_pcih, PCI_CONF_COMM,
505 pci_config_get16(sc->ex_pcih, PCI_CONF_COMM) |
506 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
507
508 if (ddi_regs_map_setup(dip, 1, &sc->ex_regsva, 0, 0, &ex_dev_acc_attr,
509 &sc->ex_regsh) != DDI_SUCCESS) {
510 elxl_error(sc, "Unable to map device registers");
511 goto fail;
512 }
513
514 if (!elxl_add_intr(sc)) {
515 goto fail;
516 }
517
518 elxl_reset(sc);
519
520 val = elxl_read_eeprom(sc, EE_OEM_ADDR_0);
521 sc->ex_factaddr[0] = val >> 8;
522 sc->ex_factaddr[1] = val & 0xff;
523 val = elxl_read_eeprom(sc, EE_OEM_ADDR_1);
524 sc->ex_factaddr[2] = val >> 8;
525 sc->ex_factaddr[3] = val & 0xff;
526 val = elxl_read_eeprom(sc, EE_OEM_ADDR_2);
527 sc->ex_factaddr[4] = val >> 8;
528 sc->ex_factaddr[5] = val & 0xff;
529 bcopy(sc->ex_factaddr, sc->ex_curraddr, 6);
530
531 sc->ex_capab = elxl_read_eeprom(sc, EE_CAPABILITIES);
532
533 /*
534 * Is this a 90XB? If bit 2 (supportsLargePackets) is set, or
535 * bit (supportsNoTxLength) is clear, then its a 90X.
536 * Otherwise its a 90XB.
537 */
538 if ((sc->ex_capab & (1 << 2)) || !(sc->ex_capab & (1 << 9))) {
539 sc->ex_conf &= ~CONF_90XB;
540 } else {
541 sc->ex_conf |= CONF_90XB;
542 }
543
544 if (!ex_alloc_ring(sc, EX_NRX, &sc->ex_rxring, DDI_DMA_READ)) {
545 goto fail;
546 }
547
548 if (!ex_alloc_ring(sc, EX_NTX, &sc->ex_txring, DDI_DMA_WRITE)) {
549 goto fail;
550 }
551
552 elxl_probe_media(sc);
553
554 /*
555 * The probe may have indicated MII!
556 */
557 if (sc->ex_mediaopt & (MEDIAOPT_MII | MEDIAOPT_100TX)) {
558 sc->ex_miih = mii_alloc(sc, sc->ex_dip, &ex_mii_ops);
559 if (sc->ex_miih == NULL) {
560 goto fail;
561 }
562 /*
563 * Note: The 90XB models can in theory support pause,
564 * but we're not enabling now due to lack of units for
565 * testing with. If this is changed, make sure to
566 * update the code in elxl_mii_notify to set the flow
567 * control field in the W3_MAC_CONTROL register.
568 */
569 mii_set_pauseable(sc->ex_miih, B_FALSE, B_FALSE);
570 }
571 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
572 elxl_error(sc, "MAC register allocation failed");
573 goto fail;
574 }
575 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
576 macp->m_driver = sc;
577 macp->m_dip = dip;
578 macp->m_src_addr = sc->ex_curraddr;
579 macp->m_callbacks = &elxl_m_callbacks;
580 macp->m_min_sdu = 0;
581 macp->m_max_sdu = ETHERMTU;
582 macp->m_margin = VLAN_TAGSZ;
583 macp->m_priv_props = ex_priv_prop;
584
585 (void) ddi_intr_enable(sc->ex_intrh);
586
587 if (mac_register(macp, &sc->ex_mach) == DDI_SUCCESS) {
588
589 /*
590 * Note: we don't want to start link checking
591 * until *after* we have added the MAC handle.
592 */
593 if (sc->ex_mediaopt &
594 (MEDIAOPT_MASK & ~(MEDIAOPT_MII | MEDIAOPT_100TX))) {
595
596 /* Check non-MII link state once per second. */
597 sc->ex_linkcheck =
598 ddi_periodic_add(elxl_linkcheck, sc, 10000000, 0);
599 }
600
601 mac_free(macp);
602 return (DDI_SUCCESS);
603 }
604
605 mac_free(macp);
606
607 fail:
608 elxl_detach(sc);
609 return (DDI_FAILURE);
610 }
611
612 /*
613 * Find the media present on non-MII chips, and select the one to use.
614 */
615 static void
elxl_probe_media(elxl_t * sc)616 elxl_probe_media(elxl_t *sc)
617 {
618 ex_media_t *exm;
619 uint32_t config;
620 uint32_t default_media;
621 uint16_t media_options;
622
623 SET_WIN(3);
624 config = GET32(W3_INTERNAL_CONFIG);
625 media_options = GET16(W3_MEDIAOPT);
626
627 /*
628 * We modify the media_options field so that we have a
629 * consistent view of the media available, without worrying
630 * about the version of ASIC, etc.
631 */
632
633 /*
634 * 100BASE-TX is handled differently on 90XB from 90X. Older
635 * parts use the external MII to provide this support.
636 */
637 if (sc->ex_conf & CONF_90XB) {
638 if (media_options & MEDIAOPT_100TX) {
639 /*
640 * 3Com advises that we should only ever use the
641 * auto mode. Notably, it seems that there should
642 * never be a 90XB board with the MEDIAOPT_10T bit set
643 * without this bit. If it happens, the driver will
644 * run in compatible 10BASE-T only mode.
645 */
646 media_options &= ~MEDIAOPT_10T;
647 }
648 } else {
649 if (media_options & MEDIAOPT_100TX) {
650 /*
651 * If this occurs, we really want to use it like
652 * an MII device. Generally in this situation we
653 * want to use the MII exclusively, and there ought
654 * not be a 10bT transceiver.
655 */
656 media_options |= MEDIAOPT_MII;
657 media_options &= ~MEDIAOPT_100TX;
658 media_options &= ~MEDIAOPT_10T;
659
660 /*
661 * Additionally, some of these devices map all
662 * internal PHY register at *every* address, not
663 * just the "allowed" address 24.
664 */
665 sc->ex_conf |= CONF_INTPHY;
666 }
667 /*
668 * Early versions didn't have 10FL models, and used this
669 * bit for something else (VCO).
670 */
671 media_options &= ~MEDIAOPT_10FL;
672 }
673 if (media_options & MEDIAOPT_100T4) {
674 /* 100BASE-T4 units all use the MII bus. */
675 media_options |= MEDIAOPT_MII;
676 media_options &= ~MEDIAOPT_100T4;
677 }
678
679 /* Save our media options. */
680 sc->ex_mediaopt = media_options;
681
682 #define APPEND_MEDIA(str, bit, name) \
683 if (media_options & (bit)) { \
684 (void) strlcat(str, *str ? "," : "", sizeof (str)); \
685 (void) strlcat(str, name, sizeof (str)); \
686 }
687
688 APPEND_MEDIA(sc->ex_medias, (MEDIAOPT_MII|MEDIAOPT_100TX), "mii");
689 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_10T, "tp-hdx,tp-fdx");
690 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_100FX, "fx-hdx,fx-fdx");
691 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_BNC, "bnc");
692 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_AUI, "aui");
693 APPEND_MEDIA(sc->ex_medias, MEDIAOPT_10FL, "fl-hdx,fl-fdx");
694
695 if (config & XCVR_SEL_100TX) {
696 /* Only found on 90XB. Don't use this, use AUTO instead! */
697 config |= XCVR_SEL_AUTO;
698 config &= ~XCVR_SEL_100TX;
699 }
700
701 default_media = (config & XCVR_SEL_MASK);
702
703 /* Sanity check that there are any media! */
704 if ((media_options & MEDIAOPT_MASK) == 0) {
705 elxl_error(sc,
706 "No media present? Attempting to use default.");
707 /*
708 * This "default" may be non-sensical. At worst it should
709 * cause a busted link.
710 */
711 sc->ex_xcvr = default_media;
712 }
713
714 for (exm = ex_native_media; exm->exm_mpbit != 0; exm++) {
715 if (media_options & exm->exm_mpbit) {
716 if (exm->exm_xcvr == default_media) {
717 /* preferred default is present, just use it */
718 sc->ex_xcvr = default_media;
719 return;
720 }
721
722 sc->ex_xcvr = exm->exm_xcvr;
723 /* but keep trying for other more preferred options */
724 }
725 }
726 }
727
728 /*
729 * Setup transmitter parameters.
730 */
731 static void
elxl_setup_tx(elxl_t * sc)732 elxl_setup_tx(elxl_t *sc)
733 {
734 /*
735 * Disable reclaim threshold for 90xB, set free threshold to
736 * 6 * 256 = 1536 for 90x.
737 */
738 if (sc->ex_conf & CONF_90XB)
739 PUT_CMD(CMD_SET_TXRECLAIM | 255);
740 else
741 PUT8(REG_TXFREETHRESH, 6);
742
743 /*
744 * We've seen underflows at the root cause of NIC hangs on
745 * older cards. Use a store-and-forward model to prevent that.
746 */
747 PUT_CMD(CMD_SET_TXSTART | EX_BUFSZ >> 2);
748 }
749
750 /*
751 * Bring device up.
752 */
753 static void
elxl_init(elxl_t * sc)754 elxl_init(elxl_t *sc)
755 {
756 if (sc->ex_suspended)
757 return;
758
759 WAIT_CMD(sc);
760 elxl_stop(sc);
761
762 PUT_CMD(CMD_RX_RESET);
763 WAIT_CMD(sc);
764 PUT_CMD(CMD_TX_RESET);
765 WAIT_CMD(sc);
766
767 /* Load Tx parameters. */
768 elxl_setup_tx(sc);
769
770 PUT32(REG_DMACTRL, GET32(REG_DMACTRL) | DMACTRL_UPRXEAREN);
771
772 PUT_CMD(CMD_IND_ENABLE | INT_WATCHED);
773 PUT_CMD(CMD_INT_ENABLE | INT_WATCHED);
774
775 PUT_CMD(CMD_INT_ACK | 0xff);
776
777 elxl_set_media(sc);
778 elxl_set_rxfilter(sc);
779
780 /* Configure for VLAN tag sizing. */
781 SET_WIN(3);
782 if (sc->ex_conf & CONF_90XB) {
783 PUT16(W3_MAX_PKT_SIZE, EX_BUFSZ);
784 } else {
785 PUT16(W3_MAC_CONTROL, GET16(W3_MAC_CONTROL) |
786 MAC_CONTROL_ALLOW_LARGE);
787 }
788
789 PUT_CMD(CMD_SET_RXEARLY | (EX_BUFSZ >> 2));
790
791 PUT_CMD(CMD_STATS_ENABLE);
792 PUT_CMD(CMD_TX_ENABLE);
793 PUT32(REG_UPLISTPTR, sc->ex_rxring.r_paddr);
794 PUT_CMD(CMD_RX_ENABLE);
795 PUT_CMD(CMD_UP_UNSTALL);
796 }
797
798 /*
799 * Set multicast receive filter. Also take care of promiscuous mode.
800 * Note that *some* of this hardware is fully capable of either a 256
801 * or 64 bit multicast hash. However, we can't determine what the
802 * size of the hash table is easily, and so we are expected to be able
803 * to resubmit the entire list of addresses each time. This puts an
804 * onerous burden on the driver to maintain its list of multicast
805 * addresses. Since multicast stuff is usually not that performance
806 * sensitive, and since we don't usually have much of it, we are just
807 * going to skip it. We allow the upper layers to filter it, as
808 * needed, by setting the all-multicast bit if the hardware can do it.
809 * This also reduces our test burden.
810 */
811 static void
elxl_set_rxfilter(elxl_t * sc)812 elxl_set_rxfilter(elxl_t *sc)
813 {
814 uint16_t mask = FILTER_UNICAST | FILTER_ALLBCAST;
815
816 if (sc->ex_suspended)
817 return;
818
819 /*
820 * Set the station address and clear the station mask. The latter
821 * is needed for 90x cards, 0 is the default for 90xB cards.
822 */
823 SET_WIN(2);
824 for (int i = 0; i < ETHERADDRL; i++) {
825 PUT8(W2_STATION_ADDRESS + i, sc->ex_curraddr[i]);
826 PUT8(W2_STATION_MASK + i, 0);
827 }
828
829 if (sc->ex_mccount) {
830 mask |= FILTER_ALLMULTI;
831 }
832 if (sc->ex_promisc) {
833 mask |= FILTER_PROMISC;
834 }
835 PUT_CMD(CMD_SET_FILTER | mask);
836 }
837
838 static void
elxl_set_media(elxl_t * sc)839 elxl_set_media(elxl_t *sc)
840 {
841 uint32_t configreg;
842
843 SET_WIN(4);
844 PUT16(W4_MEDIASTAT, 0);
845 PUT_CMD(CMD_BNC_DISABLE);
846 drv_usecwait(800);
847
848 /*
849 * Now turn on the selected media/transceiver.
850 */
851 switch (sc->ex_xcvr) {
852 case XCVR_SEL_10T:
853 sc->ex_mii_active = B_FALSE;
854 PUT16(W4_MEDIASTAT,
855 MEDIASTAT_JABGUARD_EN | MEDIASTAT_LINKBEAT_EN);
856 drv_usecwait(800);
857 break;
858
859 case XCVR_SEL_BNC:
860 sc->ex_mii_active = B_FALSE;
861 PUT_CMD(CMD_BNC_ENABLE);
862 drv_usecwait(800);
863 break;
864
865 case XCVR_SEL_100FX:
866 sc->ex_mii_active = B_FALSE; /* Is this really true? */
867 PUT16(W4_MEDIASTAT, MEDIASTAT_LINKBEAT_EN);
868 drv_usecwait(800);
869 break;
870
871 case XCVR_SEL_AUI:
872 sc->ex_mii_active = B_FALSE;
873 PUT16(W4_MEDIASTAT, MEDIASTAT_SQE_EN);
874 drv_usecwait(800);
875 break;
876
877 case XCVR_SEL_AUTO:
878 case XCVR_SEL_MII:
879 /*
880 * This is due to paranoia. If a card claims
881 * to default to MII, but doesn't have it set in
882 * media options, then we don't want to leave
883 * the MII active or we'll have problems derferencing
884 * the "mii handle".
885 */
886 if (sc->ex_miih) {
887 sc->ex_mii_active = B_TRUE;
888 } else {
889 sc->ex_mii_active = B_FALSE;
890 }
891 break;
892
893 default:
894 sc->ex_mii_active = B_FALSE;
895 elxl_error(sc, "Impossible media setting!");
896 break;
897 }
898
899 SET_WIN(3);
900 configreg = GET32(W3_INTERNAL_CONFIG);
901
902 configreg &= ~(XCVR_SEL_MASK);
903 configreg |= (sc->ex_xcvr);
904
905 PUT32(W3_INTERNAL_CONFIG, configreg);
906
907 /*
908 * If we're not using MII, force the full-duplex setting. MII
909 * based modes handle the full-duplex setting via the MII
910 * notify callback.
911 */
912 if (!sc->ex_mii_active) {
913 uint16_t mctl;
914 mctl = GET16(W3_MAC_CONTROL);
915 if (sc->ex_fdx) {
916 mctl |= MAC_CONTROL_FDX;
917 } else {
918 mctl &= ~MAC_CONTROL_FDX;
919 }
920 PUT16(W3_MAC_CONTROL, mctl);
921 }
922 }
923
924 /*
925 * Get currently-selected media from card.
926 * (if_media callback, may be called before interface is brought up).
927 */
928 static void
elxl_linkcheck(void * arg)929 elxl_linkcheck(void *arg)
930 {
931 elxl_t *sc = arg;
932 uint16_t stat;
933 link_state_t link;
934
935 mutex_enter(&sc->ex_txlock);
936 if (sc->ex_mii_active) {
937 mutex_exit(&sc->ex_txlock);
938 return;
939 }
940 if (sc->ex_running && !sc->ex_suspended) {
941 switch (sc->ex_xcvr) {
942 case XCVR_SEL_100FX:
943 /* these media we can detect link on */
944 SET_WIN(4);
945 stat = GET16(W4_MEDIASTAT);
946 if (stat & MEDIASTAT_LINKDETECT) {
947 sc->ex_link = LINK_STATE_UP;
948 sc->ex_speed = 100000000;
949 } else {
950 sc->ex_link = LINK_STATE_DOWN;
951 sc->ex_speed = 0;
952 }
953 break;
954
955 case XCVR_SEL_10T:
956 /* these media we can detect link on */
957 SET_WIN(4);
958 stat = GET16(W4_MEDIASTAT);
959 if (stat & MEDIASTAT_LINKDETECT) {
960 sc->ex_link = LINK_STATE_UP;
961 sc->ex_speed = 10000000;
962 } else {
963 sc->ex_link = LINK_STATE_DOWN;
964 sc->ex_speed = 0;
965 }
966 break;
967
968 case XCVR_SEL_BNC:
969 case XCVR_SEL_AUI:
970 default:
971 /*
972 * For these we don't really know the answer,
973 * but if we lie then at least it won't cause
974 * ifconfig to turn off the RUNNING flag.
975 * This is necessary because we might
976 * transition from LINK_STATE_DOWN when
977 * switching media.
978 */
979 sc->ex_speed = 10000000;
980 sc->ex_link = LINK_STATE_UP;
981 break;
982 }
983 SET_WIN(3);
984 sc->ex_duplex = GET16(W3_MAC_CONTROL) & MAC_CONTROL_FDX ?
985 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
986 } else {
987 sc->ex_speed = 0;
988 sc->ex_duplex = LINK_DUPLEX_UNKNOWN;
989 sc->ex_link = LINK_STATE_UNKNOWN;
990 }
991 link = sc->ex_link;
992 mutex_exit(&sc->ex_txlock);
993
994 mac_link_update(sc->ex_mach, link);
995 }
996
997 static int
elxl_m_promisc(void * arg,boolean_t on)998 elxl_m_promisc(void *arg, boolean_t on)
999 {
1000 elxl_t *sc = arg;
1001
1002 mutex_enter(&sc->ex_intrlock);
1003 mutex_enter(&sc->ex_txlock);
1004 sc->ex_promisc = on;
1005 elxl_set_rxfilter(sc);
1006 mutex_exit(&sc->ex_txlock);
1007 mutex_exit(&sc->ex_intrlock);
1008 return (0);
1009 }
1010
1011 static int
elxl_m_multicst(void * arg,boolean_t add,const uint8_t * addr)1012 elxl_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
1013 {
1014 elxl_t *sc = arg;
1015
1016 _NOTE(ARGUNUSED(addr));
1017
1018 mutex_enter(&sc->ex_intrlock);
1019 mutex_enter(&sc->ex_txlock);
1020 if (add) {
1021 sc->ex_mccount++;
1022 if (sc->ex_mccount == 1) {
1023 elxl_set_rxfilter(sc);
1024 }
1025 } else {
1026 sc->ex_mccount--;
1027 if (sc->ex_mccount == 0) {
1028 elxl_set_rxfilter(sc);
1029 }
1030 }
1031 mutex_exit(&sc->ex_txlock);
1032 mutex_exit(&sc->ex_intrlock);
1033 return (0);
1034 }
1035
1036 static int
elxl_m_unicst(void * arg,const uint8_t * addr)1037 elxl_m_unicst(void *arg, const uint8_t *addr)
1038 {
1039 elxl_t *sc = arg;
1040
1041 mutex_enter(&sc->ex_intrlock);
1042 mutex_enter(&sc->ex_txlock);
1043 bcopy(addr, sc->ex_curraddr, ETHERADDRL);
1044 elxl_set_rxfilter(sc);
1045 mutex_exit(&sc->ex_txlock);
1046 mutex_exit(&sc->ex_intrlock);
1047
1048 return (0);
1049 }
1050
1051 static mblk_t *
elxl_m_tx(void * arg,mblk_t * mp)1052 elxl_m_tx(void *arg, mblk_t *mp)
1053 {
1054 elxl_t *sc = arg;
1055 ex_desc_t *txd;
1056 ex_desc_t *first;
1057 ex_desc_t *tail;
1058 size_t len;
1059 ex_ring_t *r;
1060 ex_pd_t *pd;
1061 uint32_t cflags;
1062 mblk_t *nmp;
1063 boolean_t reenable = B_FALSE;
1064 boolean_t reset = B_FALSE;
1065 uint32_t paddr;
1066
1067 r = &sc->ex_txring;
1068 mutex_enter(&sc->ex_txlock);
1069 if (sc->ex_suspended) {
1070 while (mp != NULL) {
1071 sc->ex_nocarrier++;
1072 nmp = mp->b_next;
1073 freemsg(mp);
1074 mp = nmp;
1075 }
1076 mutex_exit(&sc->ex_txlock);
1077 return (NULL);
1078 }
1079
1080 for (int limit = (EX_NTX * 2); limit; limit--) {
1081 uint8_t stat = GET8(REG_TXSTATUS);
1082 if ((stat & TXSTATUS_COMPLETE) == 0) {
1083 break;
1084 }
1085 if (stat & TXSTATUS_MAXCOLLISIONS) {
1086 reenable = B_TRUE;
1087 sc->ex_excoll++;
1088 }
1089 if ((stat & TXSTATUS_ERRS) != 0) {
1090 reset = B_TRUE;
1091 if (stat & TXSTATUS_JABBER) {
1092 sc->ex_jabber++;
1093 }
1094 if (stat & TXSTATUS_RECLAIM_ERR) {
1095 sc->ex_txerr++;
1096 }
1097 if (stat & TXSTATUS_UNDERRUN) {
1098 sc->ex_uflo++;
1099 }
1100 }
1101 PUT8(REG_TXSTATUS, 0);
1102 }
1103
1104 if (reset || reenable) {
1105 paddr = GET32(REG_DNLISTPTR);
1106 if (reset) {
1107 WAIT_CMD(sc);
1108 PUT_CMD(CMD_TX_RESET);
1109 WAIT_CMD(sc);
1110 elxl_setup_tx(sc);
1111 }
1112 PUT_CMD(CMD_TX_ENABLE);
1113 if (paddr) {
1114 PUT32(REG_DNLISTPTR, paddr);
1115 }
1116 }
1117
1118 /* first reclaim any free descriptors */
1119 while (r->r_avail < r->r_count) {
1120
1121 paddr = GET32(REG_DNLISTPTR);
1122 txd = r->r_head;
1123 if (paddr == txd->ed_descaddr) {
1124 /* still processing this one, we're done */
1125 break;
1126 }
1127 if (paddr == 0) {
1128 /* done processing the entire list! */
1129 r->r_head = NULL;
1130 r->r_tail = NULL;
1131 r->r_avail = r->r_count;
1132 break;
1133 }
1134 r->r_avail++;
1135 r->r_head = txd->ed_next;
1136 }
1137
1138 if ((r->r_avail < r->r_count) && (GET32(REG_DNLISTPTR) != 0)) {
1139 PUT_CMD(CMD_DN_STALL);
1140 WAIT_CMD(sc);
1141 }
1142
1143 first = NULL;
1144 tail = r->r_tail;
1145
1146 /*
1147 * If there is already a tx list, select the next desc on the list.
1148 * Otherwise, just pick the first descriptor.
1149 */
1150 txd = tail ? tail->ed_next : &r->r_desc[0];
1151
1152 while ((mp != NULL) && (r->r_avail)) {
1153
1154 nmp = mp->b_next;
1155
1156 len = msgsize(mp);
1157 if (len > (ETHERMAX + VLAN_TAGSZ)) {
1158 sc->ex_txerr++;
1159 freemsg(mp);
1160 mp = nmp;
1161 continue;
1162 }
1163
1164 cflags = 0;
1165 if ((sc->ex_conf & CONF_90XB) != 0) {
1166 uint32_t pflags;
1167 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
1168 if (pflags & HCK_IPV4_HDRCKSUM) {
1169 cflags |= EX_DPD_IPCKSUM;
1170 }
1171 if (pflags & HCK_FULLCKSUM) {
1172 cflags |= (EX_DPD_TCPCKSUM | EX_DPD_UDPCKSUM);
1173 }
1174 }
1175
1176 /* Mark this descriptor is in use. We're committed now. */
1177 mcopymsg(mp, txd->ed_buf); /* frees the mblk! */
1178 r->r_avail--;
1179 mp = nmp;
1180
1181 /* Accounting stuff. */
1182 sc->ex_opackets++;
1183 sc->ex_obytes += len;
1184 if (txd->ed_buf[0] & 0x1) {
1185 if (bcmp(txd->ed_buf, ex_broadcast, ETHERADDRL) != 0) {
1186 sc->ex_multixmt++;
1187 } else {
1188 sc->ex_brdcstxmt++;
1189 }
1190 }
1191
1192 pd = txd->ed_pd;
1193
1194
1195 /*
1196 * Zero pad the frame if its too short. This
1197 * also avoids a checksum offload bug.
1198 */
1199 if (len < 30) {
1200 bzero(txd->ed_buf + len, ETHERMIN - len);
1201 len = ETHERMIN;
1202 }
1203
1204 /*
1205 * If this our first packet so far, record the head
1206 * of the list.
1207 */
1208 if (first == NULL) {
1209 first = txd;
1210 }
1211
1212 (void) ddi_dma_sync(txd->ed_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
1213
1214 PUT_PD(r, pd->pd_link, 0);
1215 PUT_PD(r, pd->pd_fsh, len | cflags);
1216 PUT_PD(r, pd->pd_addr, txd->ed_bufaddr);
1217 PUT_PD(r, pd->pd_len, len | EX_FR_LAST);
1218
1219 /*
1220 * Write the link into the previous descriptor. Note that
1221 * if this is the first packet (so no previous queued), this
1222 * will be benign because the previous descriptor won't be
1223 * on any tx list. (Furthermore, we'll clear its link field
1224 * when we do later use it.)
1225 */
1226 PUT_PD(r, txd->ed_prev->ed_pd->pd_link, txd->ed_descaddr);
1227 }
1228
1229 /*
1230 * Are we submitting any packets?
1231 */
1232 if (first != NULL) {
1233 /* Interrupt on the last packet. */
1234 PUT_PD(r, pd->pd_fsh, len | cflags | EX_DPD_DNIND);
1235
1236 if (tail == NULL) {
1237 /* No packets pending, so its a new list head! */
1238 r->r_head = first;
1239 } else {
1240 pd = tail->ed_pd;
1241 /* We've added frames, so don't interrupt mid-list. */
1242 PUT_PD(r, pd->pd_fsh,
1243 GET_PD(r, pd->pd_fsh) & ~(EX_DPD_DNIND));
1244 }
1245 /* Record the last descriptor. */
1246 r->r_tail = txd;
1247
1248 /* flush the entire ring - we're stopped so its safe */
1249 (void) ddi_dma_sync(r->r_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
1250 }
1251
1252 /* Restart transmitter. */
1253 if (sc->ex_txring.r_head) {
1254 PUT32(REG_DNLISTPTR, sc->ex_txring.r_head->ed_descaddr);
1255 }
1256 PUT_CMD(CMD_DN_UNSTALL);
1257
1258 mutex_exit(&sc->ex_txlock);
1259
1260 return (mp);
1261 }
1262
1263 static mblk_t *
elxl_recv(elxl_t * sc,ex_desc_t * rxd,uint32_t stat)1264 elxl_recv(elxl_t *sc, ex_desc_t *rxd, uint32_t stat)
1265 {
1266 mblk_t *mp = NULL;
1267 uint32_t len;
1268
1269 len = stat & EX_UPD_PKTLENMASK;
1270 if (stat & (EX_UPD_ERR_VLAN | EX_UPD_OVERFLOW)) {
1271 if (stat & EX_UPD_RUNT) {
1272 sc->ex_runt++;
1273 }
1274 if (stat & EX_UPD_OVERRUN) {
1275 sc->ex_oflo++;
1276 }
1277 if (stat & EX_UPD_CRCERR) {
1278 sc->ex_fcs++;
1279 }
1280 if (stat & EX_UPD_ALIGNERR) {
1281 sc->ex_align++;
1282 }
1283 if (stat & EX_UPD_OVERFLOW) {
1284 sc->ex_toolong++;
1285 }
1286 return (NULL);
1287 }
1288 if (len < sizeof (struct ether_header)) {
1289 sc->ex_runt++;
1290 return (NULL);
1291 }
1292 if (len > (ETHERMAX + VLAN_TAGSZ)) {
1293 /* Allow four bytes for the VLAN header */
1294 sc->ex_toolong++;
1295 return (NULL);
1296 }
1297 if ((mp = allocb(len + 14, BPRI_HI)) == NULL) {
1298 sc->ex_allocbfail++;
1299 return (NULL);
1300 }
1301
1302 (void) ddi_dma_sync(rxd->ed_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1303 mp->b_rptr += 14;
1304 mp->b_wptr = mp->b_rptr + len;
1305 bcopy(rxd->ed_buf, mp->b_rptr, len);
1306
1307 sc->ex_ipackets++;
1308 sc->ex_ibytes += len;
1309 if (rxd->ed_buf[0] & 0x1) {
1310 if (bcmp(rxd->ed_buf, ex_broadcast, ETHERADDRL) != 0) {
1311 sc->ex_multircv++;
1312 } else {
1313 sc->ex_brdcstrcv++;
1314 }
1315 }
1316
1317 /*
1318 * Set the incoming checksum information for the packet.
1319 */
1320 if (((sc->ex_conf & CONF_90XB) != 0) &&
1321 ((stat & EX_UPD_IPCHECKED) != 0) &&
1322 ((stat & (EX_UPD_CKSUMERR)) == 0)) {
1323 uint32_t pflags = 0;
1324 if (stat & EX_UPD_IPCHECKED) {
1325 pflags |= HCK_IPV4_HDRCKSUM;
1326 }
1327 if (stat & (EX_UPD_TCPCHECKED | EX_UPD_UDPCHECKED)) {
1328 pflags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
1329 }
1330 mac_hcksum_set(mp, 0, 0, 0, 0, pflags);
1331 }
1332
1333 return (mp);
1334 }
1335
1336 static int
elxl_m_start(void * arg)1337 elxl_m_start(void *arg)
1338 {
1339 elxl_t *sc = arg;
1340
1341 mutex_enter(&sc->ex_intrlock);
1342 mutex_enter(&sc->ex_txlock);
1343
1344 elxl_init(sc);
1345 sc->ex_running = B_TRUE;
1346
1347 mutex_exit(&sc->ex_txlock);
1348 mutex_exit(&sc->ex_intrlock);
1349
1350 if (sc->ex_miih) {
1351 mii_start(sc->ex_miih);
1352 }
1353 return (0);
1354 }
1355
1356 static void
elxl_m_stop(void * arg)1357 elxl_m_stop(void *arg)
1358 {
1359 elxl_t *sc = arg;
1360
1361 if (sc->ex_miih) {
1362 mii_stop(sc->ex_miih);
1363 }
1364
1365 mutex_enter(&sc->ex_intrlock);
1366 mutex_enter(&sc->ex_txlock);
1367
1368 elxl_stop(sc);
1369 sc->ex_running = B_FALSE;
1370
1371 mutex_exit(&sc->ex_txlock);
1372 mutex_exit(&sc->ex_intrlock);
1373 }
1374
1375 static boolean_t
elxl_m_getcapab(void * arg,mac_capab_t cap,void * data)1376 elxl_m_getcapab(void *arg, mac_capab_t cap, void *data)
1377 {
1378 elxl_t *sc = arg;
1379 switch (cap) {
1380 case MAC_CAPAB_HCKSUM: {
1381 uint32_t *flags = data;
1382 if (sc->ex_conf & CONF_90XB) {
1383 *flags = HCKSUM_IPHDRCKSUM | HCKSUM_INET_FULL_V4;
1384 return (B_TRUE);
1385 }
1386 return (B_FALSE);
1387 }
1388 default:
1389 return (B_FALSE);
1390 }
1391 }
1392
1393 static int
elxl_m_getprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,void * val)1394 elxl_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1395 void *val)
1396 {
1397 elxl_t *sc = arg;
1398 int rv;
1399
1400 if (sc->ex_mii_active) {
1401 rv = mii_m_getprop(sc->ex_miih, name, num, sz, val);
1402 if (rv != ENOTSUP)
1403 return (rv);
1404 }
1405
1406 switch (num) {
1407 case MAC_PROP_DUPLEX:
1408 *(uint8_t *)val = sc->ex_duplex;
1409 break;
1410 case MAC_PROP_SPEED:
1411 *(uint8_t *)val = sc->ex_speed;
1412 break;
1413 case MAC_PROP_STATUS:
1414 bcopy(&sc->ex_link, val, sizeof (link_state_t));
1415 break;
1416
1417 case MAC_PROP_PRIVATE:
1418 if (strcmp(name, "_media") == 0) {
1419 char *str;
1420
1421 switch (sc->ex_xcvr) {
1422 case XCVR_SEL_AUTO:
1423 case XCVR_SEL_MII:
1424 str = "mii";
1425 break;
1426 case XCVR_SEL_10T:
1427 str = sc->ex_fdx ? "tp-fdx" : "tp-hdx";
1428 break;
1429 case XCVR_SEL_BNC:
1430 str = "bnc";
1431 break;
1432 case XCVR_SEL_AUI:
1433 if (sc->ex_mediaopt & MEDIAOPT_10FL) {
1434 str = sc->ex_fdx ? "fl-fdx" : "fl-hdx";
1435 } else {
1436 str = "aui";
1437 }
1438 break;
1439 case XCVR_SEL_100FX:
1440 str = sc->ex_fdx ? "fx-fdx" : "fx-hdx";
1441 break;
1442 default:
1443 str = "unknown";
1444 break;
1445 }
1446 (void) snprintf(val, sz, "%s", str);
1447 return (0);
1448 }
1449 /*
1450 * This available media property is a hack, and should
1451 * be removed when we can provide proper support for
1452 * querying it as proposed in PSARC 2009/235. (At the
1453 * moment the implementation lacks support for using
1454 * MAC_PROP_POSSIBLE with private properties.)
1455 */
1456 if (strcmp(name, "_available_media") == 0) {
1457 (void) snprintf(val, sz, "%s", sc->ex_medias);
1458 return (0);
1459 }
1460 break;
1461 }
1462 return (ENOTSUP);
1463 }
1464
1465 static int
elxl_m_setprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,const void * val)1466 elxl_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1467 const void *val)
1468 {
1469 elxl_t *sc = arg;
1470 int rv;
1471
1472 if (sc->ex_mii_active) {
1473 rv = mii_m_setprop(sc->ex_miih, name, num, sz, val);
1474 if (rv != ENOTSUP) {
1475 return (rv);
1476 }
1477 }
1478 switch (num) {
1479
1480 case MAC_PROP_PRIVATE:
1481 if (strcmp(name, "_media") == 0) {
1482 uint32_t mopt = sc->ex_mediaopt;
1483
1484 if (strcmp(val, "mii") == 0) {
1485 if (mopt & MEDIAOPT_100TX) {
1486 sc->ex_xcvr = XCVR_SEL_AUTO;
1487 } else if (mopt & MEDIAOPT_MII) {
1488 sc->ex_xcvr = XCVR_SEL_MII;
1489 } else {
1490 return (EINVAL);
1491 }
1492 } else if (strcmp(val, "tp-fdx") == 0) {
1493 /* select media option */
1494 if (mopt & MEDIAOPT_10T) {
1495 sc->ex_xcvr = XCVR_SEL_10T;
1496 sc->ex_fdx = B_TRUE;
1497 } else {
1498 return (EINVAL);
1499 }
1500 } else if (strcmp(val, "tp-hdx") == 0) {
1501 /* select media option */
1502 if (mopt & MEDIAOPT_10T) {
1503 sc->ex_xcvr = XCVR_SEL_10T;
1504 sc->ex_fdx = B_FALSE;
1505 } else {
1506 return (EINVAL);
1507 }
1508 } else if (strcmp(val, "fx-fdx") == 0) {
1509 if (mopt & MEDIAOPT_100FX) {
1510 sc->ex_xcvr = XCVR_SEL_100FX;
1511 sc->ex_fdx = B_TRUE;
1512 } else {
1513 return (EINVAL);
1514 }
1515 } else if (strcmp(val, "fx-hdx") == 0) {
1516 if (mopt & MEDIAOPT_100FX) {
1517 sc->ex_xcvr = XCVR_SEL_100FX;
1518 sc->ex_fdx = B_FALSE;
1519 } else {
1520 return (EINVAL);
1521 }
1522 } else if (strcmp(val, "bnc") == 0) {
1523 if (mopt & MEDIAOPT_BNC) {
1524 sc->ex_xcvr = XCVR_SEL_BNC;
1525 sc->ex_fdx = B_FALSE;
1526 } else {
1527 return (EINVAL);
1528 }
1529 } else if (strcmp(val, "aui") == 0) {
1530 if (mopt & MEDIAOPT_AUI) {
1531 sc->ex_xcvr = XCVR_SEL_AUI;
1532 sc->ex_fdx = B_FALSE;
1533 } else {
1534 return (EINVAL);
1535 }
1536 } else if (strcmp(val, "fl-fdx") == 0) {
1537 if (mopt & MEDIAOPT_10FL) {
1538 sc->ex_xcvr = XCVR_SEL_AUI;
1539 sc->ex_fdx = B_TRUE;
1540 } else {
1541 return (EINVAL);
1542 }
1543 } else if (strcmp(val, "fl-hdx") == 0) {
1544 if (mopt & MEDIAOPT_10FL) {
1545 sc->ex_xcvr = XCVR_SEL_AUI;
1546 sc->ex_fdx = B_FALSE;
1547 } else {
1548 return (EINVAL);
1549 }
1550
1551 } else {
1552 return (EINVAL);
1553 }
1554 goto reset;
1555 }
1556 break;
1557 default:
1558 break;
1559 }
1560
1561 return (ENOTSUP);
1562
1563 reset:
1564 mutex_enter(&sc->ex_intrlock);
1565 mutex_enter(&sc->ex_txlock);
1566 if (!sc->ex_suspended) {
1567 elxl_reset(sc);
1568 if (sc->ex_running) {
1569 elxl_init(sc);
1570 }
1571 }
1572 mutex_exit(&sc->ex_txlock);
1573 mutex_exit(&sc->ex_intrlock);
1574 return (0);
1575 }
1576
1577 static void
elxl_m_propinfo(void * arg,const char * name,mac_prop_id_t num,mac_prop_info_handle_t prh)1578 elxl_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
1579 mac_prop_info_handle_t prh)
1580 {
1581 elxl_t *sc = arg;
1582
1583 if (sc->ex_mii_active)
1584 mii_m_propinfo(sc->ex_miih, name, num, prh);
1585
1586 switch (num) {
1587 case MAC_PROP_DUPLEX:
1588 case MAC_PROP_SPEED:
1589 case MAC_PROP_STATUS:
1590 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1591 break;
1592
1593 case MAC_PROP_PRIVATE:
1594 if (strcmp(name, "_available_media") == 0)
1595 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1596 break;
1597 }
1598 }
1599
1600 static int
elxl_m_stat(void * arg,uint_t stat,uint64_t * val)1601 elxl_m_stat(void *arg, uint_t stat, uint64_t *val)
1602 {
1603 elxl_t *sc = arg;
1604
1605 if (stat == MAC_STAT_IFSPEED) {
1606 elxl_getstats(sc);
1607 }
1608
1609 if ((sc->ex_mii_active) &&
1610 (mii_m_getstat(sc->ex_miih, stat, val) == 0)) {
1611 return (0);
1612 }
1613
1614 switch (stat) {
1615 case MAC_STAT_IFSPEED:
1616 *val = sc->ex_speed;
1617 break;
1618
1619 case ETHER_STAT_LINK_DUPLEX:
1620 *val = sc->ex_duplex;
1621 break;
1622
1623 case MAC_STAT_MULTIRCV:
1624 *val = sc->ex_multircv;
1625 break;
1626
1627 case MAC_STAT_BRDCSTRCV:
1628 *val = sc->ex_brdcstrcv;
1629 break;
1630
1631 case MAC_STAT_MULTIXMT:
1632 *val = sc->ex_multixmt;
1633 break;
1634
1635 case MAC_STAT_BRDCSTXMT:
1636 *val = sc->ex_brdcstxmt;
1637 break;
1638
1639 case MAC_STAT_IPACKETS:
1640 *val = sc->ex_ipackets;
1641 break;
1642
1643 case MAC_STAT_OPACKETS:
1644 *val = sc->ex_opackets;
1645 break;
1646
1647 case MAC_STAT_RBYTES:
1648 *val = sc->ex_ibytes;
1649 break;
1650 case MAC_STAT_OBYTES:
1651 *val = sc->ex_obytes;
1652 break;
1653
1654 case MAC_STAT_COLLISIONS:
1655 case ETHER_STAT_FIRST_COLLISIONS:
1656 *val = sc->ex_singlecol + sc->ex_multcol;
1657 break;
1658
1659 case ETHER_STAT_MULTI_COLLISIONS:
1660 *val = sc->ex_multcol;
1661 break;
1662
1663 case ETHER_STAT_TX_LATE_COLLISIONS:
1664 *val = sc->ex_latecol;
1665 break;
1666
1667 case ETHER_STAT_ALIGN_ERRORS:
1668 *val = sc->ex_align;
1669 break;
1670
1671 case ETHER_STAT_FCS_ERRORS:
1672 *val = sc->ex_fcs;
1673 break;
1674
1675 case ETHER_STAT_SQE_ERRORS:
1676 *val = sc->ex_sqe;
1677 break;
1678
1679 case ETHER_STAT_DEFER_XMTS:
1680 *val = sc->ex_defer;
1681 break;
1682
1683 case ETHER_STAT_CARRIER_ERRORS:
1684 *val = sc->ex_nocarrier;
1685 break;
1686
1687 case ETHER_STAT_TOOLONG_ERRORS:
1688 *val = sc->ex_toolong;
1689 break;
1690
1691 case ETHER_STAT_EX_COLLISIONS:
1692 *val = sc->ex_excoll;
1693 break;
1694
1695 case MAC_STAT_OVERFLOWS:
1696 *val = sc->ex_oflo;
1697 break;
1698
1699 case MAC_STAT_UNDERFLOWS:
1700 *val = sc->ex_uflo;
1701 break;
1702
1703 case ETHER_STAT_TOOSHORT_ERRORS:
1704 *val = sc->ex_runt;
1705 break;
1706
1707 case ETHER_STAT_JABBER_ERRORS:
1708 *val = sc->ex_jabber;
1709 break;
1710
1711 case MAC_STAT_NORCVBUF:
1712 *val = sc->ex_allocbfail;
1713 break;
1714
1715 case MAC_STAT_OERRORS:
1716 *val = sc->ex_jabber + sc->ex_latecol + sc->ex_uflo;
1717 break;
1718
1719 case MAC_STAT_IERRORS:
1720 *val = sc->ex_align + sc->ex_fcs + sc->ex_runt +
1721 sc->ex_toolong + sc->ex_oflo + sc->ex_allocbfail;
1722 break;
1723
1724 default:
1725 return (ENOTSUP);
1726 }
1727 return (0);
1728 }
1729
1730 static uint_t
elxl_intr(caddr_t arg,caddr_t dontcare)1731 elxl_intr(caddr_t arg, caddr_t dontcare)
1732 {
1733 elxl_t *sc = (void *)arg;
1734 uint16_t stat;
1735 mblk_t *mphead = NULL;
1736 mblk_t **mpp = &mphead;
1737
1738 _NOTE(ARGUNUSED(dontcare));
1739
1740 mutex_enter(&sc->ex_intrlock);
1741 if (sc->ex_suspended) {
1742 mutex_exit(&sc->ex_intrlock);
1743 return (DDI_INTR_UNCLAIMED);
1744 }
1745
1746 stat = GET16(REG_CMD_STAT);
1747
1748 if ((stat & INT_LATCH) == 0) {
1749 mutex_exit(&sc->ex_intrlock);
1750 return (DDI_INTR_UNCLAIMED);
1751 }
1752
1753 /*
1754 * Acknowledge interrupts.
1755 */
1756 PUT_CMD(CMD_INT_ACK | (stat & INT_WATCHED) | INT_LATCH);
1757
1758 if (stat & INT_HOST_ERROR) {
1759 /* XXX: Potentially a good spot for FMA */
1760 elxl_error(sc, "Adapter failure (%x)", stat);
1761 mutex_enter(&sc->ex_txlock);
1762 elxl_reset(sc);
1763 if (sc->ex_running)
1764 elxl_init(sc);
1765 mutex_exit(&sc->ex_txlock);
1766 mutex_exit(&sc->ex_intrlock);
1767 return (DDI_INTR_CLAIMED);
1768 }
1769 if (stat & INT_UP_COMPLETE) {
1770 ex_ring_t *r;
1771 ex_desc_t *rxd;
1772 ex_pd_t *pd;
1773 mblk_t *mp;
1774 uint32_t pktstat;
1775
1776 r = &sc->ex_rxring;
1777
1778 for (;;) {
1779 rxd = r->r_head;
1780 pd = rxd->ed_pd;
1781
1782 (void) ddi_dma_sync(r->r_dmah, rxd->ed_off,
1783 sizeof (ex_pd_t), DDI_DMA_SYNC_FORKERNEL);
1784
1785 pktstat = GET_PD(r, pd->pd_status);
1786
1787 if ((pktstat & EX_UPD_COMPLETE) == 0) {
1788 break;
1789 }
1790
1791 /* Advance head to next packet. */
1792 r->r_head = r->r_head->ed_next;
1793
1794 if ((mp = elxl_recv(sc, rxd, pktstat)) != NULL) {
1795 *mpp = mp;
1796 mpp = &mp->b_next;
1797 }
1798
1799 /* clear the upComplete status, reset other fields */
1800 PUT_PD(r, pd->pd_status, 0);
1801 PUT_PD(r, pd->pd_len, EX_BUFSZ | EX_FR_LAST);
1802 PUT_PD(r, pd->pd_addr, rxd->ed_bufaddr);
1803 (void) ddi_dma_sync(r->r_dmah, rxd->ed_off,
1804 sizeof (ex_pd_t), DDI_DMA_SYNC_FORDEV);
1805 }
1806
1807 /*
1808 * If the engine stalled processing (due to
1809 * insufficient UPDs usually), restart it.
1810 */
1811 if (GET32(REG_UPLISTPTR) == 0) {
1812 /*
1813 * This seems that it can happen in an RX overrun
1814 * situation.
1815 */
1816 mutex_enter(&sc->ex_txlock);
1817 if (sc->ex_running)
1818 elxl_init(sc);
1819 mutex_exit(&sc->ex_txlock);
1820 }
1821 PUT_CMD(CMD_UP_UNSTALL);
1822 }
1823
1824 mutex_exit(&sc->ex_intrlock);
1825
1826 if (mphead) {
1827 mac_rx(sc->ex_mach, NULL, mphead);
1828 }
1829 if (stat & INT_STATS) {
1830 elxl_getstats(sc);
1831 }
1832 if (stat & INT_DN_COMPLETE) {
1833 mac_tx_update(sc->ex_mach);
1834 }
1835
1836 return (DDI_INTR_CLAIMED);
1837 }
1838
1839 static void
elxl_getstats(elxl_t * sc)1840 elxl_getstats(elxl_t *sc)
1841 {
1842 mutex_enter(&sc->ex_txlock);
1843 if (sc->ex_suspended) {
1844 mutex_exit(&sc->ex_txlock);
1845 return;
1846 }
1847
1848 SET_WIN(6);
1849 /*
1850 * We count the packets and bytes elsewhere, but we need to
1851 * read the registers to clear them.
1852 */
1853 (void) GET8(W6_RX_FRAMES);
1854 (void) GET8(W6_TX_FRAMES);
1855 (void) GET8(W6_UPPER_FRAMES);
1856 (void) GET8(W6_RX_OVERRUNS); /* counted by elxl_recv */
1857 (void) GET16(W6_RX_BYTES);
1858 (void) GET16(W6_TX_BYTES);
1859
1860 sc->ex_defer += GET8(W6_DEFER);
1861 sc->ex_latecol += GET8(W6_TX_LATE_COL);
1862 sc->ex_singlecol += GET8(W6_SINGLE_COL);
1863 sc->ex_multcol += GET8(W6_MULT_COL);
1864 sc->ex_sqe += GET8(W6_SQE_ERRORS);
1865 sc->ex_nocarrier += GET8(W6_NO_CARRIER);
1866
1867 SET_WIN(4);
1868 /* Note: we ought to report this somewhere... */
1869 (void) GET8(W4_BADSSD);
1870
1871 mutex_exit(&sc->ex_txlock);
1872 }
1873
1874 static void
elxl_reset(elxl_t * sc)1875 elxl_reset(elxl_t *sc)
1876 {
1877 PUT_CMD(CMD_GLOBAL_RESET);
1878 /*
1879 * Some ASICs need a longer time (20 ms) to come properly out
1880 * of reset. Do not reduce this value.
1881 *
1882 * Note that this occurs only during attach and failure recovery,
1883 * so it should be mostly harmless.
1884 */
1885 drv_usecwait(20000);
1886 WAIT_CMD(sc);
1887 }
1888
1889 static void
elxl_stop(elxl_t * sc)1890 elxl_stop(elxl_t *sc)
1891 {
1892 ASSERT(mutex_owned(&sc->ex_intrlock));
1893 ASSERT(mutex_owned(&sc->ex_txlock));
1894
1895 if (sc->ex_suspended)
1896 return;
1897
1898 PUT_CMD(CMD_RX_DISABLE);
1899 PUT_CMD(CMD_TX_DISABLE);
1900 PUT_CMD(CMD_BNC_DISABLE);
1901
1902 elxl_reset_ring(&sc->ex_rxring, DDI_DMA_READ);
1903 elxl_reset_ring(&sc->ex_txring, DDI_DMA_WRITE);
1904
1905 PUT_CMD(CMD_INT_ACK | INT_LATCH);
1906 /* Disable all interrupts. (0 means "none".) */
1907 PUT_CMD(CMD_INT_ENABLE | 0);
1908 }
1909
1910 static void
elxl_suspend(elxl_t * sc)1911 elxl_suspend(elxl_t *sc)
1912 {
1913 if (sc->ex_miih) {
1914 mii_suspend(sc->ex_miih);
1915 }
1916
1917 mutex_enter(&sc->ex_intrlock);
1918 mutex_enter(&sc->ex_txlock);
1919 elxl_stop(sc);
1920 sc->ex_suspended = B_TRUE;
1921 mutex_exit(&sc->ex_txlock);
1922 mutex_exit(&sc->ex_intrlock);
1923 }
1924
1925 static void
elxl_resume(dev_info_t * dip)1926 elxl_resume(dev_info_t *dip)
1927 {
1928 elxl_t *sc;
1929
1930 /* This should always succeed. */
1931 sc = ddi_get_driver_private(dip);
1932 ASSERT(sc);
1933
1934 mutex_enter(&sc->ex_intrlock);
1935 mutex_enter(&sc->ex_txlock);
1936 sc->ex_suspended = B_FALSE;
1937 elxl_reset(sc);
1938 if (sc->ex_running)
1939 elxl_init(sc);
1940 mutex_exit(&sc->ex_txlock);
1941 mutex_exit(&sc->ex_intrlock);
1942
1943 if (sc->ex_miih) {
1944 mii_resume(sc->ex_miih);
1945 }
1946 }
1947
1948 static void
elxl_detach(elxl_t * sc)1949 elxl_detach(elxl_t *sc)
1950 {
1951 if (sc->ex_miih) {
1952 /* Detach all PHYs */
1953 mii_free(sc->ex_miih);
1954 }
1955 if (sc->ex_linkcheck) {
1956 ddi_periodic_delete(sc->ex_linkcheck);
1957 }
1958
1959 if (sc->ex_intrh != NULL) {
1960 (void) ddi_intr_disable(sc->ex_intrh);
1961 (void) ddi_intr_remove_handler(sc->ex_intrh);
1962 (void) ddi_intr_free(sc->ex_intrh);
1963 mutex_destroy(&sc->ex_intrlock);
1964 mutex_destroy(&sc->ex_txlock);
1965 }
1966
1967 if (sc->ex_pcih) {
1968 pci_config_teardown(&sc->ex_pcih);
1969 }
1970 if (sc->ex_regsh) {
1971 ddi_regs_map_free(&sc->ex_regsh);
1972 }
1973 ex_free_ring(&sc->ex_txring);
1974 ex_free_ring(&sc->ex_rxring);
1975
1976 kmem_free(sc, sizeof (*sc));
1977 }
1978
1979 /*
1980 * Read EEPROM data. If we can't unbusy the EEPROM, then zero will be
1981 * returned. This will probably result in a bogus node address.
1982 */
1983 static uint16_t
elxl_read_eeprom(elxl_t * sc,int offset)1984 elxl_read_eeprom(elxl_t *sc, int offset)
1985 {
1986 uint16_t data = 0;
1987
1988 SET_WIN(0);
1989 if (elxl_eeprom_busy(sc))
1990 goto out;
1991
1992 PUT16(W0_EE_CMD, EE_CMD_READ | (offset & 0x3f));
1993 if (elxl_eeprom_busy(sc))
1994 goto out;
1995 data = GET16(W0_EE_DATA);
1996 out:
1997 return (data);
1998 }
1999
2000 static int
elxl_eeprom_busy(elxl_t * sc)2001 elxl_eeprom_busy(elxl_t *sc)
2002 {
2003 int i = 2000;
2004
2005 while (i--) {
2006 if (!(GET16(W0_EE_CMD) & EE_CMD_BUSY))
2007 return (0);
2008 drv_usecwait(100);
2009 }
2010 elxl_error(sc, "Eeprom stays busy.");
2011 return (1);
2012 }
2013
2014 static void
ex_mii_send_bits(struct ex_softc * sc,uint16_t bits,int cnt)2015 ex_mii_send_bits(struct ex_softc *sc, uint16_t bits, int cnt)
2016 {
2017 uint16_t val;
2018 ASSERT(cnt > 0);
2019
2020 PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2021 drv_usecwait(1);
2022
2023 for (int i = (1 << (cnt - 1)); i; i >>= 1) {
2024 if (bits & i) {
2025 val = PHYSMGMT_DIR | PHYSMGMT_DATA;
2026 } else {
2027 val = PHYSMGMT_DIR;
2028 }
2029 PUT16(W4_PHYSMGMT, val);
2030 drv_usecwait(1);
2031 PUT16(W4_PHYSMGMT, val | PHYSMGMT_CLK);
2032 drv_usecwait(1);
2033 PUT16(W4_PHYSMGMT, val);
2034 drv_usecwait(1);
2035 }
2036 }
2037
2038 static void
ex_mii_sync(struct ex_softc * sc)2039 ex_mii_sync(struct ex_softc *sc)
2040 {
2041 /*
2042 * We set the data bit output, and strobe the clock 32 times.
2043 */
2044 PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR);
2045 drv_usecwait(1);
2046
2047 for (int i = 0; i < 32; i++) {
2048 PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR | PHYSMGMT_CLK);
2049 drv_usecwait(1);
2050 PUT16(W4_PHYSMGMT, PHYSMGMT_DATA | PHYSMGMT_DIR);
2051 drv_usecwait(1);
2052 }
2053 }
2054
2055 static uint16_t
elxl_mii_read(void * arg,uint8_t phy,uint8_t reg)2056 elxl_mii_read(void *arg, uint8_t phy, uint8_t reg)
2057 {
2058 elxl_t *sc = arg;
2059 uint16_t data;
2060 int val;
2061
2062 if ((sc->ex_conf & CONF_INTPHY) && phy != INTPHY_ID)
2063 return (0xffff);
2064
2065 mutex_enter(&sc->ex_txlock);
2066 SET_WIN(4);
2067
2068 ex_mii_sync(sc);
2069
2070 ex_mii_send_bits(sc, 1, 2); /* start */
2071 ex_mii_send_bits(sc, 2, 2); /* read command */
2072 ex_mii_send_bits(sc, phy, 5);
2073 ex_mii_send_bits(sc, reg, 5);
2074
2075 PUT16(W4_PHYSMGMT, 0); /* switch to input */
2076 drv_usecwait(1);
2077 PUT16(W4_PHYSMGMT, PHYSMGMT_CLK); /* turnaround time */
2078 drv_usecwait(1);
2079 PUT16(W4_PHYSMGMT, 0);
2080 drv_usecwait(1);
2081
2082 PUT16(W4_PHYSMGMT, PHYSMGMT_CLK); /* idle time */
2083 drv_usecwait(1);
2084 PUT16(W4_PHYSMGMT, 0);
2085 drv_usecwait(1);
2086
2087 for (data = 0, val = 0x8000; val; val >>= 1) {
2088 if (GET16(W4_PHYSMGMT) & PHYSMGMT_DATA) {
2089 data |= val;
2090 }
2091 /* strobe the clock */
2092 PUT16(W4_PHYSMGMT, PHYSMGMT_CLK);
2093 drv_usecwait(1);
2094 PUT16(W4_PHYSMGMT, 0);
2095 drv_usecwait(1);
2096 }
2097
2098 /* return to output mode */
2099 PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2100 drv_usecwait(1);
2101
2102 mutex_exit(&sc->ex_txlock);
2103
2104 return (data);
2105 }
2106
2107 static void
elxl_mii_write(void * arg,uint8_t phy,uint8_t reg,uint16_t data)2108 elxl_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t data)
2109 {
2110 elxl_t *sc = arg;
2111
2112 if ((sc->ex_conf & CONF_INTPHY) && phy != INTPHY_ID)
2113 return;
2114
2115 mutex_enter(&sc->ex_txlock);
2116 SET_WIN(4);
2117
2118 ex_mii_sync(sc);
2119 ex_mii_send_bits(sc, 1, 2); /* start */
2120 ex_mii_send_bits(sc, 1, 2); /* write */
2121 ex_mii_send_bits(sc, phy, 5);
2122 ex_mii_send_bits(sc, reg, 5);
2123 ex_mii_send_bits(sc, 2, 2); /* ack/turnaround */
2124 ex_mii_send_bits(sc, data, 16);
2125
2126 /* return to output mode */
2127 PUT16(W4_PHYSMGMT, PHYSMGMT_DIR);
2128 drv_usecwait(1);
2129
2130 mutex_exit(&sc->ex_txlock);
2131 }
2132
2133 static void
elxl_mii_notify(void * arg,link_state_t link)2134 elxl_mii_notify(void *arg, link_state_t link)
2135 {
2136 elxl_t *sc = arg;
2137 int mctl;
2138 link_duplex_t duplex;
2139
2140 duplex = mii_get_duplex(sc->ex_miih);
2141
2142 mutex_enter(&sc->ex_txlock);
2143 if (!sc->ex_mii_active) {
2144 /* If we're using some other legacy media, bail out now */
2145 mutex_exit(&sc->ex_txlock);
2146 return;
2147 }
2148 if (!sc->ex_suspended) {
2149 SET_WIN(3);
2150 mctl = GET16(W3_MAC_CONTROL);
2151 if (duplex == LINK_DUPLEX_FULL)
2152 mctl |= MAC_CONTROL_FDX;
2153 else
2154 mctl &= ~MAC_CONTROL_FDX;
2155 PUT16(W3_MAC_CONTROL, mctl);
2156 }
2157 mutex_exit(&sc->ex_txlock);
2158
2159 mac_link_update(sc->ex_mach, link);
2160 }
2161
2162 static int
elxl_ddi_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)2163 elxl_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2164 {
2165 switch (cmd) {
2166 case DDI_ATTACH:
2167 return (elxl_attach(dip));
2168
2169 case DDI_RESUME:
2170 elxl_resume(dip);
2171 return (DDI_SUCCESS);
2172
2173 default:
2174 return (DDI_FAILURE);
2175 }
2176 }
2177
2178 static int
elxl_ddi_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2179 elxl_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2180 {
2181 elxl_t *sc;
2182
2183 sc = ddi_get_driver_private(dip);
2184 ASSERT(sc);
2185
2186 switch (cmd) {
2187 case DDI_DETACH:
2188 if (mac_disable(sc->ex_mach) != 0) {
2189 return (DDI_FAILURE);
2190 }
2191 (void) mac_unregister(sc->ex_mach);
2192 elxl_detach(sc);
2193 return (DDI_SUCCESS);
2194
2195 case DDI_SUSPEND:
2196 elxl_suspend(sc);
2197 return (DDI_SUCCESS);
2198
2199 default:
2200 return (DDI_FAILURE);
2201 }
2202 }
2203
2204 static int
elxl_ddi_quiesce(dev_info_t * dip)2205 elxl_ddi_quiesce(dev_info_t *dip)
2206 {
2207 elxl_t *sc;
2208
2209 sc = ddi_get_driver_private(dip);
2210 ASSERT(sc);
2211
2212 if (!sc->ex_suspended)
2213 elxl_reset(sc);
2214 return (DDI_SUCCESS);
2215 }
2216
2217 static void
elxl_error(elxl_t * sc,char * fmt,...)2218 elxl_error(elxl_t *sc, char *fmt, ...)
2219 {
2220 va_list ap;
2221 char buf[256];
2222
2223 va_start(ap, fmt);
2224 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
2225 va_end(ap);
2226
2227 cmn_err(CE_WARN, "%s%d: %s",
2228 ddi_driver_name(sc->ex_dip), ddi_get_instance(sc->ex_dip), buf);
2229 }
2230