1 /*
2 * Solaris driver for ethernet cards based on the ADMtek Centaur
3 *
4 * Copyright (c) 2007 by Garrett D'Amore <garrett@damore.org>.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS''
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
33 * Use is subject to license terms.
34 */
35
36
37 #include <sys/varargs.h>
38 #include <sys/types.h>
39 #include <sys/modctl.h>
40 #include <sys/conf.h>
41 #include <sys/devops.h>
42 #include <sys/stream.h>
43 #include <sys/strsun.h>
44 #include <sys/cmn_err.h>
45 #include <sys/ethernet.h>
46 #include <sys/kmem.h>
47 #include <sys/time.h>
48 #include <sys/crc32.h>
49 #include <sys/mii.h>
50 #include <sys/miiregs.h>
51 #include <sys/mac.h>
52 #include <sys/mac_ether.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 #include <sys/vlan.h>
56
57 #include "afe.h"
58 #include "afeimpl.h"
59
60 /*
61 * Driver globals.
62 */
63
64 /* table of supported devices */
65 static afe_card_t afe_cards[] = {
66
67 /*
68 * ADMtek Centaur and Comet
69 */
70 { 0x1317, 0x0981, "ADMtek AL981", MODEL_COMET },
71 { 0x1317, 0x0985, "ADMtek AN983", MODEL_CENTAUR },
72 { 0x1317, 0x1985, "ADMtek AN985", MODEL_CENTAUR },
73 { 0x1317, 0x9511, "ADMtek ADM9511", MODEL_CENTAUR },
74 { 0x1317, 0x9513, "ADMtek ADM9513", MODEL_CENTAUR },
75 /*
76 * Accton just relabels other companies' controllers
77 */
78 { 0x1113, 0x1216, "Accton EN5251", MODEL_CENTAUR },
79 /*
80 * Models listed here.
81 */
82 { 0x10b7, 0x9300, "3Com 3CSOHO100B-TX", MODEL_CENTAUR },
83 { 0x1113, 0xec02, "SMC SMC1244TX", MODEL_CENTAUR },
84 { 0x10b8, 0x1255, "SMC SMC1255TX", MODEL_CENTAUR },
85 { 0x111a, 0x1020, "Siemens SpeedStream PCI 10/100", MODEL_CENTAUR },
86 { 0x1113, 0x1207, "Accton EN1207F", MODEL_CENTAUR },
87 { 0x1113, 0x2242, "Accton EN2242", MODEL_CENTAUR },
88 { 0x1113, 0x2220, "Accton EN2220", MODEL_CENTAUR },
89 { 0x1113, 0x9216, "3M VOL-N100VF+TX", MODEL_CENTAUR },
90 { 0x1317, 0x0574, "Linksys LNE100TX", MODEL_CENTAUR },
91 { 0x1317, 0x0570, "Linksys NC100", MODEL_CENTAUR },
92 { 0x1385, 0x511a, "Netgear FA511", MODEL_CENTAUR },
93 { 0x13d1, 0xab02, "AboCom FE2500", MODEL_CENTAUR },
94 { 0x13d1, 0xab03, "AboCom PCM200", MODEL_CENTAUR },
95 { 0x13d1, 0xab08, "AboCom FE2500MX", MODEL_CENTAUR },
96 { 0x1414, 0x0001, "Microsoft MN-120", MODEL_CENTAUR },
97 { 0x16ec, 0x00ed, "U.S. Robotics USR997900", MODEL_CENTAUR },
98 { 0x1734, 0x100c, "Fujitsu-Siemens D1961", MODEL_CENTAUR },
99 { 0x1737, 0xab08, "Linksys PCMPC200", MODEL_CENTAUR },
100 { 0x1737, 0xab09, "Linksys PCM200", MODEL_CENTAUR },
101 { 0x17b3, 0xab08, "Hawking PN672TX", MODEL_CENTAUR },
102 };
103
104 #define ETHERVLANMTU (ETHERMAX + 4)
105
106 /*
107 * Function prototypes
108 */
109 static int afe_attach(dev_info_t *, ddi_attach_cmd_t);
110 static int afe_detach(dev_info_t *, ddi_detach_cmd_t);
111 static int afe_resume(dev_info_t *);
112 static int afe_quiesce(dev_info_t *);
113 static int afe_m_unicst(void *, const uint8_t *);
114 static int afe_m_multicst(void *, boolean_t, const uint8_t *);
115 static int afe_m_promisc(void *, boolean_t);
116 static mblk_t *afe_m_tx(void *, mblk_t *);
117 static void afe_m_ioctl(void *, queue_t *, mblk_t *);
118 static int afe_m_stat(void *, uint_t, uint64_t *);
119 static int afe_m_start(void *);
120 static void afe_m_stop(void *);
121 static int afe_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
122 void *);
123 static int afe_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
124 const void *);
125 static void afe_m_propinfo(void *, const char *, mac_prop_id_t,
126 mac_prop_info_handle_t);
127 static unsigned afe_intr(caddr_t);
128 static void afe_startmac(afe_t *);
129 static void afe_stopmac(afe_t *);
130 static void afe_resetrings(afe_t *);
131 static boolean_t afe_initialize(afe_t *);
132 static void afe_startall(afe_t *);
133 static void afe_stopall(afe_t *);
134 static void afe_resetall(afe_t *);
135 static afe_txbuf_t *afe_alloctxbuf(afe_t *);
136 static void afe_destroytxbuf(afe_txbuf_t *);
137 static afe_rxbuf_t *afe_allocrxbuf(afe_t *);
138 static void afe_destroyrxbuf(afe_rxbuf_t *);
139 static boolean_t afe_send(afe_t *, mblk_t *);
140 static int afe_allocrxring(afe_t *);
141 static void afe_freerxring(afe_t *);
142 static int afe_alloctxring(afe_t *);
143 static void afe_freetxring(afe_t *);
144 static void afe_error(dev_info_t *, char *, ...);
145 static void afe_setrxfilt(afe_t *);
146 static int afe_watchdog(afe_t *);
147 static uint8_t afe_sromwidth(afe_t *);
148 static uint16_t afe_readsromword(afe_t *, unsigned);
149 static void afe_readsrom(afe_t *, unsigned, unsigned, char *);
150 static void afe_getfactaddr(afe_t *, uchar_t *);
151 static uint8_t afe_miireadbit(afe_t *);
152 static void afe_miiwritebit(afe_t *, uint8_t);
153 static void afe_miitristate(afe_t *);
154 static uint16_t afe_miireadgeneral(afe_t *, uint8_t, uint8_t);
155 static void afe_miiwritegeneral(afe_t *, uint8_t, uint8_t, uint16_t);
156 static uint16_t afe_miireadcomet(afe_t *, uint8_t, uint8_t);
157 static void afe_miiwritecomet(afe_t *, uint8_t, uint8_t, uint16_t);
158 static uint16_t afe_mii_read(void *, uint8_t, uint8_t);
159 static void afe_mii_write(void *, uint8_t, uint8_t, uint16_t);
160 static void afe_mii_notify(void *, link_state_t);
161 static void afe_mii_reset(void *);
162 static void afe_disableinterrupts(afe_t *);
163 static void afe_enableinterrupts(afe_t *);
164 static void afe_reclaim(afe_t *);
165 static mblk_t *afe_receive(afe_t *);
166
167 #define KIOIP KSTAT_INTR_PTR(afep->afe_intrstat)
168
169 static mii_ops_t afe_mii_ops = {
170 MII_OPS_VERSION,
171 afe_mii_read,
172 afe_mii_write,
173 afe_mii_notify,
174 afe_mii_reset
175 };
176
177 static mac_callbacks_t afe_m_callbacks = {
178 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
179 afe_m_stat,
180 afe_m_start,
181 afe_m_stop,
182 afe_m_promisc,
183 afe_m_multicst,
184 afe_m_unicst,
185 afe_m_tx,
186 NULL,
187 afe_m_ioctl, /* mc_ioctl */
188 NULL, /* mc_getcapab */
189 NULL, /* mc_open */
190 NULL, /* mc_close */
191 afe_m_setprop,
192 afe_m_getprop,
193 afe_m_propinfo
194 };
195
196
197 /*
198 * Stream information
199 */
200 DDI_DEFINE_STREAM_OPS(afe_devops, nulldev, nulldev, afe_attach, afe_detach,
201 nodev, NULL, D_MP, NULL, afe_quiesce);
202
203 /*
204 * Module linkage information.
205 */
206
207 static struct modldrv afe_modldrv = {
208 &mod_driverops, /* drv_modops */
209 "ADMtek Fast Ethernet", /* drv_linkinfo */
210 &afe_devops /* drv_dev_ops */
211 };
212
213 static struct modlinkage afe_modlinkage = {
214 MODREV_1, /* ml_rev */
215 { &afe_modldrv, NULL } /* ml_linkage */
216 };
217
218 /*
219 * Device attributes.
220 */
221 static ddi_device_acc_attr_t afe_devattr = {
222 DDI_DEVICE_ATTR_V0,
223 DDI_STRUCTURE_LE_ACC,
224 DDI_STRICTORDER_ACC
225 };
226
227 static ddi_device_acc_attr_t afe_bufattr = {
228 DDI_DEVICE_ATTR_V0,
229 DDI_NEVERSWAP_ACC,
230 DDI_STRICTORDER_ACC
231 };
232
233 static ddi_dma_attr_t afe_dma_attr = {
234 DMA_ATTR_V0, /* dma_attr_version */
235 0, /* dma_attr_addr_lo */
236 0xFFFFFFFFU, /* dma_attr_addr_hi */
237 0x7FFFFFFFU, /* dma_attr_count_max */
238 4, /* dma_attr_align */
239 0x3F, /* dma_attr_burstsizes */
240 1, /* dma_attr_minxfer */
241 0xFFFFFFFFU, /* dma_attr_maxxfer */
242 0xFFFFFFFFU, /* dma_attr_seg */
243 1, /* dma_attr_sgllen */
244 1, /* dma_attr_granular */
245 0 /* dma_attr_flags */
246 };
247
248 /*
249 * Tx buffers can be arbitrarily aligned. Additionally, they can
250 * cross a page boundary, so we use the two buffer addresses of the
251 * chip to provide a two-entry scatter-gather list.
252 */
253 static ddi_dma_attr_t afe_dma_txattr = {
254 DMA_ATTR_V0, /* dma_attr_version */
255 0, /* dma_attr_addr_lo */
256 0xFFFFFFFFU, /* dma_attr_addr_hi */
257 0x7FFFFFFFU, /* dma_attr_count_max */
258 1, /* dma_attr_align */
259 0x3F, /* dma_attr_burstsizes */
260 1, /* dma_attr_minxfer */
261 0xFFFFFFFFU, /* dma_attr_maxxfer */
262 0xFFFFFFFFU, /* dma_attr_seg */
263 2, /* dma_attr_sgllen */
264 1, /* dma_attr_granular */
265 0 /* dma_attr_flags */
266 };
267
268 /*
269 * Ethernet addresses.
270 */
271 static uchar_t afe_broadcast[ETHERADDRL] = {
272 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
273 };
274
275 /*
276 * DDI entry points.
277 */
278 int
_init(void)279 _init(void)
280 {
281 int rv;
282 mac_init_ops(&afe_devops, "afe");
283 if ((rv = mod_install(&afe_modlinkage)) != DDI_SUCCESS) {
284 mac_fini_ops(&afe_devops);
285 }
286 return (rv);
287 }
288
289 int
_fini(void)290 _fini(void)
291 {
292 int rv;
293 if ((rv = mod_remove(&afe_modlinkage)) == DDI_SUCCESS) {
294 mac_fini_ops(&afe_devops);
295 }
296 return (rv);
297 }
298
299 int
_info(struct modinfo * modinfop)300 _info(struct modinfo *modinfop)
301 {
302 return (mod_info(&afe_modlinkage, modinfop));
303 }
304
305 int
afe_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)306 afe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
307 {
308 afe_t *afep;
309 mac_register_t *macp;
310 int inst = ddi_get_instance(dip);
311 ddi_acc_handle_t pci;
312 uint16_t venid;
313 uint16_t devid;
314 uint16_t svid;
315 uint16_t ssid;
316 uint16_t cachesize;
317 afe_card_t *cardp;
318 int i;
319
320 switch (cmd) {
321 case DDI_RESUME:
322 return (afe_resume(dip));
323
324 case DDI_ATTACH:
325 break;
326
327 default:
328 return (DDI_FAILURE);
329 }
330
331 /* this card is a bus master, reject any slave-only slot */
332 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
333 afe_error(dip, "slot does not support PCI bus-master");
334 return (DDI_FAILURE);
335 }
336 /* PCI devices shouldn't generate hilevel interrupts */
337 if (ddi_intr_hilevel(dip, 0) != 0) {
338 afe_error(dip, "hilevel interrupts not supported");
339 return (DDI_FAILURE);
340 }
341 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
342 afe_error(dip, "unable to setup PCI config handle");
343 return (DDI_FAILURE);
344 }
345
346 venid = pci_config_get16(pci, PCI_VID);
347 devid = pci_config_get16(pci, PCI_DID);
348 svid = pci_config_get16(pci, PCI_SVID);
349 ssid = pci_config_get16(pci, PCI_SSID);
350
351 /*
352 * Note: ADMtek boards seem to misprogram themselves with bogus
353 * timings, which do not seem to work properly on SPARC. We
354 * reprogram them zero (but only if they appear to be broken),
355 * which seems to at least work. Its unclear that this is a
356 * legal or wise practice to me, but it certainly works better
357 * than the original values. (I would love to hear
358 * suggestions for better values, or a better strategy.)
359 */
360 if ((pci_config_get8(pci, PCI_MINGNT) == 0xff) &&
361 (pci_config_get8(pci, PCI_MAXLAT) == 0xff)) {
362 pci_config_put8(pci, PCI_MINGNT, 0);
363 pci_config_put8(pci, PCI_MAXLAT, 0);
364 }
365
366 /*
367 * the last entry in the card table matches every possible
368 * card, so the for-loop always terminates properly.
369 */
370 cardp = NULL;
371 for (i = 0; i < (sizeof (afe_cards) / sizeof (afe_card_t)); i++) {
372 if ((venid == afe_cards[i].card_venid) &&
373 (devid == afe_cards[i].card_devid)) {
374 cardp = &afe_cards[i];
375 }
376 if ((svid == afe_cards[i].card_venid) &&
377 (ssid == afe_cards[i].card_devid)) {
378 cardp = &afe_cards[i];
379 break;
380 }
381 }
382
383 if (cardp == NULL) {
384 pci_config_teardown(&pci);
385 afe_error(dip, "Unable to identify PCI card");
386 return (DDI_FAILURE);
387 }
388
389 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
390 cardp->card_cardname) != DDI_PROP_SUCCESS) {
391 pci_config_teardown(&pci);
392 afe_error(dip, "Unable to create model property");
393 return (DDI_FAILURE);
394 }
395
396 /*
397 * Grab the PCI cachesize -- we use this to program the
398 * cache-optimization bus access bits.
399 */
400 cachesize = pci_config_get8(pci, PCI_CLS);
401
402 /* this cannot fail */
403 afep = kmem_zalloc(sizeof (afe_t), KM_SLEEP);
404 ddi_set_driver_private(dip, afep);
405
406 /* get the interrupt block cookie */
407 if (ddi_get_iblock_cookie(dip, 0, &afep->afe_icookie) != DDI_SUCCESS) {
408 afe_error(dip, "ddi_get_iblock_cookie failed");
409 pci_config_teardown(&pci);
410 kmem_free(afep, sizeof (afe_t));
411 return (DDI_FAILURE);
412 }
413
414 afep->afe_dip = dip;
415 afep->afe_cardp = cardp;
416 afep->afe_phyaddr = -1;
417 afep->afe_cachesize = cachesize;
418
419 afep->afe_forcefiber = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
420 "fiber", 0);
421
422 mutex_init(&afep->afe_xmtlock, NULL, MUTEX_DRIVER, afep->afe_icookie);
423 mutex_init(&afep->afe_intrlock, NULL, MUTEX_DRIVER, afep->afe_icookie);
424
425 /*
426 * Enable bus master, IO space, and memory space accesses.
427 */
428 pci_config_put16(pci, PCI_CMD,
429 pci_config_get16(pci, PCI_CMD) | PCI_CMD_BME | PCI_CMD_MAE);
430
431 /* we're done with this now, drop it */
432 pci_config_teardown(&pci);
433
434 /*
435 * Initialize interrupt kstat. This should not normally fail, since
436 * we don't use a persistent stat. We do it this way to avoid having
437 * to test for it at run time on the hot path.
438 */
439 afep->afe_intrstat = kstat_create("afe", inst, "intr", "controller",
440 KSTAT_TYPE_INTR, 1, 0);
441 if (afep->afe_intrstat == NULL) {
442 afe_error(dip, "kstat_create failed");
443 goto failed;
444 }
445 kstat_install(afep->afe_intrstat);
446
447 /*
448 * Set up the MII.
449 */
450 if ((afep->afe_mii = mii_alloc(afep, dip, &afe_mii_ops)) == NULL) {
451 goto failed;
452 }
453
454 /*
455 * Centaur can support PAUSE, but Comet can't.
456 */
457 if (AFE_MODEL(afep) == MODEL_CENTAUR) {
458 mii_set_pauseable(afep->afe_mii, B_TRUE, B_FALSE);
459 } else {
460 mii_set_pauseable(afep->afe_mii, B_FALSE, B_FALSE);
461 }
462
463 /*
464 * Map in the device registers.
465 */
466 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&afep->afe_regs,
467 0, 0, &afe_devattr, &afep->afe_regshandle)) {
468 afe_error(dip, "ddi_regs_map_setup failed");
469 goto failed;
470 }
471
472 /*
473 * Allocate DMA resources (descriptor rings and buffers).
474 */
475 if ((afe_allocrxring(afep) != DDI_SUCCESS) ||
476 (afe_alloctxring(afep) != DDI_SUCCESS)) {
477 afe_error(dip, "unable to allocate DMA resources");
478 goto failed;
479 }
480
481 /* Initialize the chip. */
482 mutex_enter(&afep->afe_intrlock);
483 mutex_enter(&afep->afe_xmtlock);
484 if (!afe_initialize(afep)) {
485 mutex_exit(&afep->afe_xmtlock);
486 mutex_exit(&afep->afe_intrlock);
487 goto failed;
488 }
489 mutex_exit(&afep->afe_xmtlock);
490 mutex_exit(&afep->afe_intrlock);
491
492 /* Determine the number of address bits to our EEPROM. */
493 afep->afe_sromwidth = afe_sromwidth(afep);
494
495 /*
496 * Get the factory ethernet address. This becomes the current
497 * ethernet address (it can be overridden later via ifconfig).
498 */
499 afe_getfactaddr(afep, afep->afe_curraddr);
500 afep->afe_promisc = B_FALSE;
501
502 /* make sure we add configure the initial filter */
503 (void) afe_m_unicst(afep, afep->afe_curraddr);
504 (void) afe_m_multicst(afep, B_TRUE, afe_broadcast);
505
506 /*
507 * Establish interrupt handler.
508 */
509 if (ddi_add_intr(dip, 0, NULL, NULL, afe_intr, (caddr_t)afep) !=
510 DDI_SUCCESS) {
511 afe_error(dip, "unable to add interrupt");
512 goto failed;
513 }
514
515 /* TODO: do the power management stuff */
516
517 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
518 afe_error(dip, "mac_alloc failed");
519 goto failed;
520 }
521
522 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
523 macp->m_driver = afep;
524 macp->m_dip = dip;
525 macp->m_src_addr = afep->afe_curraddr;
526 macp->m_callbacks = &afe_m_callbacks;
527 macp->m_min_sdu = 0;
528 macp->m_max_sdu = ETHERMTU;
529 macp->m_margin = VLAN_TAGSZ;
530
531 if (mac_register(macp, &afep->afe_mh) == DDI_SUCCESS) {
532 mac_free(macp);
533 return (DDI_SUCCESS);
534 }
535
536 /* failed to register with MAC */
537 mac_free(macp);
538 failed:
539 if (afep->afe_icookie != NULL) {
540 ddi_remove_intr(dip, 0, afep->afe_icookie);
541 }
542 if (afep->afe_intrstat) {
543 kstat_delete(afep->afe_intrstat);
544 }
545 mutex_destroy(&afep->afe_intrlock);
546 mutex_destroy(&afep->afe_xmtlock);
547
548 afe_freerxring(afep);
549 afe_freetxring(afep);
550
551 if (afep->afe_regshandle != NULL) {
552 ddi_regs_map_free(&afep->afe_regshandle);
553 }
554 kmem_free(afep, sizeof (afe_t));
555 return (DDI_FAILURE);
556 }
557
558 int
afe_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)559 afe_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
560 {
561 afe_t *afep;
562
563 afep = ddi_get_driver_private(dip);
564 if (afep == NULL) {
565 afe_error(dip, "no soft state in detach!");
566 return (DDI_FAILURE);
567 }
568
569 switch (cmd) {
570 case DDI_DETACH:
571
572 if (mac_unregister(afep->afe_mh) != 0) {
573 return (DDI_FAILURE);
574 }
575
576 /* make sure hardware is quiesced */
577 mutex_enter(&afep->afe_intrlock);
578 mutex_enter(&afep->afe_xmtlock);
579 afep->afe_flags &= ~AFE_RUNNING;
580 afe_stopall(afep);
581 mutex_exit(&afep->afe_xmtlock);
582 mutex_exit(&afep->afe_intrlock);
583
584 /* clean up and shut down device */
585 ddi_remove_intr(dip, 0, afep->afe_icookie);
586
587 /* clean up MII layer */
588 mii_free(afep->afe_mii);
589
590 /* clean up kstats */
591 kstat_delete(afep->afe_intrstat);
592
593 ddi_prop_remove_all(dip);
594
595 /* free up any left over buffers or DMA resources */
596 afe_freerxring(afep);
597 afe_freetxring(afep);
598
599 ddi_regs_map_free(&afep->afe_regshandle);
600 mutex_destroy(&afep->afe_intrlock);
601 mutex_destroy(&afep->afe_xmtlock);
602
603 kmem_free(afep, sizeof (afe_t));
604 return (DDI_SUCCESS);
605
606 case DDI_SUSPEND:
607 /* stop MII monitoring */
608 mii_suspend(afep->afe_mii);
609
610 /* quiesce the hardware */
611 mutex_enter(&afep->afe_intrlock);
612 mutex_enter(&afep->afe_xmtlock);
613 afep->afe_flags |= AFE_SUSPENDED;
614 afe_stopall(afep);
615 mutex_exit(&afep->afe_xmtlock);
616 mutex_exit(&afep->afe_intrlock);
617 return (DDI_SUCCESS);
618 default:
619 return (DDI_FAILURE);
620 }
621 }
622
623 int
afe_resume(dev_info_t * dip)624 afe_resume(dev_info_t *dip)
625 {
626 afe_t *afep;
627
628 if ((afep = ddi_get_driver_private(dip)) == NULL) {
629 return (DDI_FAILURE);
630 }
631
632 mutex_enter(&afep->afe_intrlock);
633 mutex_enter(&afep->afe_xmtlock);
634
635 afep->afe_flags &= ~AFE_SUSPENDED;
636
637 /* re-initialize chip */
638 if (!afe_initialize(afep)) {
639 afe_error(afep->afe_dip, "unable to resume chip!");
640 afep->afe_flags |= AFE_SUSPENDED;
641 mutex_exit(&afep->afe_intrlock);
642 mutex_exit(&afep->afe_xmtlock);
643 return (DDI_SUCCESS);
644 }
645
646 /* start the chip */
647 if (afep->afe_flags & AFE_RUNNING) {
648 afe_startall(afep);
649 }
650
651 /* drop locks */
652 mutex_exit(&afep->afe_xmtlock);
653 mutex_exit(&afep->afe_intrlock);
654
655 mii_resume(afep->afe_mii);
656
657 return (DDI_SUCCESS);
658 }
659
660 int
afe_quiesce(dev_info_t * dip)661 afe_quiesce(dev_info_t *dip)
662 {
663 afe_t *afep;
664
665 if ((afep = ddi_get_driver_private(dip)) == NULL) {
666 return (DDI_FAILURE);
667 }
668
669 SETBIT(afep, CSR_PAR, PAR_RESET);
670 /*
671 * At 66 MHz it is 16 nsec per access or more (always more)
672 * So we need 3,333 times to retry for 50 usec. We just
673 * round up to 5000 times. Unless the hardware is horked,
674 * it will always terminate *well* before that anyway.
675 */
676 for (int i = 0; i < 5000; i++) {
677 if ((GETCSR(afep, CSR_PAR) & PAR_RESET) == 0) {
678 return (DDI_SUCCESS);
679 }
680 }
681
682 /* hardware didn't quiesce - force a full reboot (PCI reset) */
683 return (DDI_FAILURE);
684 }
685
686 void
afe_setrxfilt(afe_t * afep)687 afe_setrxfilt(afe_t *afep)
688 {
689 unsigned rxen, pa0, pa1;
690
691 if (afep->afe_flags & AFE_SUSPENDED) {
692 /* don't touch a suspended interface */
693 return;
694 }
695
696 rxen = GETCSR(afep, CSR_NAR) & NAR_RX_ENABLE;
697
698 /* stop receiver */
699 if (rxen) {
700 afe_stopmac(afep);
701 }
702
703 /* program promiscuous mode */
704 if (afep->afe_promisc)
705 SETBIT(afep, CSR_NAR, NAR_RX_PROMISC);
706 else
707 CLRBIT(afep, CSR_NAR, NAR_RX_PROMISC);
708
709 /* program mac address */
710 pa0 = (afep->afe_curraddr[3] << 24) | (afep->afe_curraddr[2] << 16) |
711 (afep->afe_curraddr[1] << 8) | afep->afe_curraddr[0];
712 pa1 = (afep->afe_curraddr[5] << 8) | afep->afe_curraddr[4];
713
714 PUTCSR(afep, CSR_PAR0, pa0);
715 PUTCSR(afep, CSR_PAR1, pa1);
716 if (rxen) {
717 SETBIT(afep, CSR_NAR, rxen);
718 }
719
720 /* program multicast filter */
721 if (AFE_MODEL(afep) == MODEL_COMET) {
722 if (afep->afe_mctab[0] || afep->afe_mctab[1]) {
723 SETBIT(afep, CSR_NAR, NAR_RX_MULTI);
724 } else {
725 CLRBIT(afep, CSR_NAR, NAR_RX_MULTI);
726 }
727 } else {
728 CLRBIT(afep, CSR_NAR, NAR_RX_MULTI);
729 PUTCSR(afep, CSR_MAR0, afep->afe_mctab[0]);
730 PUTCSR(afep, CSR_MAR1, afep->afe_mctab[1]);
731 }
732
733 /* restart receiver */
734 if (rxen) {
735 afe_startmac(afep);
736 }
737 }
738
739 int
afe_watchdog(afe_t * afep)740 afe_watchdog(afe_t *afep)
741 {
742 if ((afep->afe_txstall_time != 0) &&
743 (gethrtime() > afep->afe_txstall_time) &&
744 (afep->afe_txavail != AFE_TXRING)) {
745 afep->afe_txstall_time = 0;
746 afe_error(afep->afe_dip, "TX stall detected!");
747 return (DDI_FAILURE);
748 } else {
749 return (DDI_SUCCESS);
750 }
751 }
752
753 int
afe_m_multicst(void * arg,boolean_t add,const uint8_t * macaddr)754 afe_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
755 {
756 afe_t *afep = arg;
757 int index;
758 uint32_t crc;
759 uint32_t bit;
760 uint32_t newval, oldval;
761
762 CRC32(crc, macaddr, ETHERADDRL, -1U, crc32_table);
763 crc %= AFE_MCHASH;
764
765 /* bit within a 32-bit word */
766 index = crc / 32;
767 bit = (1 << (crc % 32));
768
769 mutex_enter(&afep->afe_intrlock);
770 mutex_enter(&afep->afe_xmtlock);
771 newval = oldval = afep->afe_mctab[index];
772
773 if (add) {
774 afep->afe_mccount[crc]++;
775 if (afep->afe_mccount[crc] == 1)
776 newval |= bit;
777 } else {
778 afep->afe_mccount[crc]--;
779 if (afep->afe_mccount[crc] == 0)
780 newval &= ~bit;
781 }
782 if (newval != oldval) {
783 afep->afe_mctab[index] = newval;
784 afe_setrxfilt(afep);
785 }
786
787 mutex_exit(&afep->afe_xmtlock);
788 mutex_exit(&afep->afe_intrlock);
789
790 return (0);
791 }
792
793 int
afe_m_promisc(void * arg,boolean_t on)794 afe_m_promisc(void *arg, boolean_t on)
795 {
796 afe_t *afep = arg;
797
798 /* exclusive access to the card while we reprogram it */
799 mutex_enter(&afep->afe_intrlock);
800 mutex_enter(&afep->afe_xmtlock);
801 /* save current promiscuous mode state for replay in resume */
802 afep->afe_promisc = on;
803
804 afe_setrxfilt(afep);
805 mutex_exit(&afep->afe_xmtlock);
806 mutex_exit(&afep->afe_intrlock);
807
808 return (0);
809 }
810
811 int
afe_m_unicst(void * arg,const uint8_t * macaddr)812 afe_m_unicst(void *arg, const uint8_t *macaddr)
813 {
814 afe_t *afep = arg;
815
816 /* exclusive access to the card while we reprogram it */
817 mutex_enter(&afep->afe_intrlock);
818 mutex_enter(&afep->afe_xmtlock);
819
820 bcopy(macaddr, afep->afe_curraddr, ETHERADDRL);
821 afe_setrxfilt(afep);
822
823 mutex_exit(&afep->afe_xmtlock);
824 mutex_exit(&afep->afe_intrlock);
825
826 return (0);
827 }
828
829 mblk_t *
afe_m_tx(void * arg,mblk_t * mp)830 afe_m_tx(void *arg, mblk_t *mp)
831 {
832 afe_t *afep = arg;
833 mblk_t *nmp;
834
835 mutex_enter(&afep->afe_xmtlock);
836
837 if (afep->afe_flags & AFE_SUSPENDED) {
838 while ((nmp = mp) != NULL) {
839 afep->afe_carrier_errors++;
840 mp = mp->b_next;
841 freemsg(nmp);
842 }
843 mutex_exit(&afep->afe_xmtlock);
844 return (NULL);
845 }
846
847 while (mp != NULL) {
848 nmp = mp->b_next;
849 mp->b_next = NULL;
850
851 if (!afe_send(afep, mp)) {
852 mp->b_next = nmp;
853 break;
854 }
855 mp = nmp;
856 }
857 mutex_exit(&afep->afe_xmtlock);
858
859 return (mp);
860 }
861
862 void
afe_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)863 afe_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
864 {
865 afe_t *afep = arg;
866
867 if (mii_m_loop_ioctl(afep->afe_mii, wq, mp))
868 return;
869
870 miocnak(wq, mp, 0, EINVAL);
871 }
872
873 /*
874 * Hardware management.
875 */
876 static boolean_t
afe_initialize(afe_t * afep)877 afe_initialize(afe_t *afep)
878 {
879 int i;
880 unsigned val;
881 uint32_t par, nar;
882
883 ASSERT(mutex_owned(&afep->afe_intrlock));
884 ASSERT(mutex_owned(&afep->afe_xmtlock));
885
886 SETBIT(afep, CSR_PAR, PAR_RESET);
887 for (i = 1; i < 10; i++) {
888 drv_usecwait(5);
889 val = GETCSR(afep, CSR_PAR);
890 if (!(val & PAR_RESET)) {
891 break;
892 }
893 }
894 if (i == 10) {
895 afe_error(afep->afe_dip, "timed out waiting for reset!");
896 return (B_FALSE);
897 }
898
899 /*
900 * Updated Centaur data sheets show that the Comet and Centaur are
901 * alike here (contrary to earlier versions of the data sheet).
902 */
903 /* XXX:? chip problems */
904 /* par = PAR_MRLE | PAR_MRME | PAR_MWIE; */
905 par = 0;
906 switch (afep->afe_cachesize) {
907 case 8:
908 par |= PAR_CALIGN_8 | PAR_BURST_8;
909 break;
910 case 16:
911 par |= PAR_CALIGN_16 | PAR_BURST_16;
912 break;
913 case 32:
914 par |= PAR_CALIGN_32 | PAR_BURST_32;
915 break;
916 default:
917 par |= PAR_BURST_32;
918 par &= ~(PAR_MWIE | PAR_MRLE | PAR_MRME);
919 break;
920
921 }
922
923 PUTCSR(afep, CSR_PAR, par);
924
925 /* enable transmit underrun auto-recovery */
926 SETBIT(afep, CSR_CR, CR_TXURAUTOR);
927
928 afe_resetrings(afep);
929
930 /* clear the lost packet counter (cleared on read) */
931 (void) GETCSR(afep, CSR_LPC);
932
933 nar = GETCSR(afep, CSR_NAR);
934 nar &= ~NAR_TR; /* clear tx threshold */
935 nar |= NAR_SF; /* store-and-forward */
936 nar |= NAR_HBD; /* disable SQE test */
937 PUTCSR(afep, CSR_NAR, nar);
938
939 afe_setrxfilt(afep);
940
941 return (B_TRUE);
942 }
943
944 /*
945 * Serial EEPROM access - inspired by the FreeBSD implementation.
946 */
947
948 uint8_t
afe_sromwidth(afe_t * afep)949 afe_sromwidth(afe_t *afep)
950 {
951 int i;
952 uint32_t eeread;
953 uint8_t addrlen = 8;
954
955 eeread = SPR_SROM_READ | SPR_SROM_SEL | SPR_SROM_CHIP;
956
957 PUTCSR(afep, CSR_SPR, eeread & ~SPR_SROM_CHIP);
958 drv_usecwait(1);
959 PUTCSR(afep, CSR_SPR, eeread);
960
961 /* command bits first */
962 for (i = 4; i != 0; i >>= 1) {
963 unsigned val = (SROM_READCMD & i) ? SPR_SROM_DIN : 0;
964
965 PUTCSR(afep, CSR_SPR, eeread | val);
966 drv_usecwait(1);
967 PUTCSR(afep, CSR_SPR, eeread | val | SPR_SROM_CLOCK);
968 drv_usecwait(1);
969 }
970
971 PUTCSR(afep, CSR_SPR, eeread);
972
973 for (addrlen = 1; addrlen <= 12; addrlen++) {
974 PUTCSR(afep, CSR_SPR, eeread | SPR_SROM_CLOCK);
975 drv_usecwait(1);
976 if (!(GETCSR(afep, CSR_SPR) & SPR_SROM_DOUT)) {
977 PUTCSR(afep, CSR_SPR, eeread);
978 drv_usecwait(1);
979 break;
980 }
981 PUTCSR(afep, CSR_SPR, eeread);
982 drv_usecwait(1);
983 }
984
985 /* turn off accesses to the EEPROM */
986 PUTCSR(afep, CSR_SPR, eeread &~ SPR_SROM_CHIP);
987
988 return ((addrlen < 4 || addrlen > 12) ? 6 : addrlen);
989 }
990
991 /*
992 * The words in EEPROM are stored in little endian order. We
993 * shift bits out in big endian order, though. This requires
994 * a byte swap on some platforms.
995 */
996 uint16_t
afe_readsromword(afe_t * afep,unsigned romaddr)997 afe_readsromword(afe_t *afep, unsigned romaddr)
998 {
999 int i;
1000 uint16_t word = 0;
1001 uint16_t retval;
1002 int eeread;
1003 uint8_t addrlen;
1004 int readcmd;
1005 uchar_t *ptr;
1006
1007 eeread = SPR_SROM_READ | SPR_SROM_SEL | SPR_SROM_CHIP;
1008 addrlen = afep->afe_sromwidth;
1009 readcmd = (SROM_READCMD << addrlen) | romaddr;
1010
1011 if (romaddr >= (1 << addrlen)) {
1012 /* too big to fit! */
1013 return (0);
1014 }
1015
1016 PUTCSR(afep, CSR_SPR, eeread & ~SPR_SROM_CHIP);
1017 PUTCSR(afep, CSR_SPR, eeread);
1018
1019 /* command and address bits */
1020 for (i = 4 + addrlen; i >= 0; i--) {
1021 short val = (readcmd & (1 << i)) ? SPR_SROM_DIN : 0;
1022
1023 PUTCSR(afep, CSR_SPR, eeread | val);
1024 drv_usecwait(1);
1025 PUTCSR(afep, CSR_SPR, eeread | val | SPR_SROM_CLOCK);
1026 drv_usecwait(1);
1027 }
1028
1029 PUTCSR(afep, CSR_SPR, eeread);
1030
1031 for (i = 0; i < 16; i++) {
1032 PUTCSR(afep, CSR_SPR, eeread | SPR_SROM_CLOCK);
1033 drv_usecwait(1);
1034 word <<= 1;
1035 if (GETCSR(afep, CSR_SPR) & SPR_SROM_DOUT) {
1036 word |= 1;
1037 }
1038 PUTCSR(afep, CSR_SPR, eeread);
1039 drv_usecwait(1);
1040 }
1041
1042 /* turn off accesses to the EEPROM */
1043 PUTCSR(afep, CSR_SPR, eeread &~ SPR_SROM_CHIP);
1044
1045 /*
1046 * Fix up the endianness thing. Note that the values
1047 * are stored in little endian format on the SROM.
1048 */
1049 ptr = (uchar_t *)&word;
1050 retval = (ptr[1] << 8) | ptr[0];
1051 return (retval);
1052 }
1053
1054 void
afe_readsrom(afe_t * afep,unsigned romaddr,unsigned len,char * dest)1055 afe_readsrom(afe_t *afep, unsigned romaddr, unsigned len, char *dest)
1056 {
1057 int i;
1058 uint16_t word;
1059 uint16_t *ptr = (uint16_t *)((void *)dest);
1060 for (i = 0; i < len; i++) {
1061 word = afe_readsromword(afep, romaddr + i);
1062 *ptr = word;
1063 ptr++;
1064 }
1065 }
1066
1067 void
afe_getfactaddr(afe_t * afep,uchar_t * eaddr)1068 afe_getfactaddr(afe_t *afep, uchar_t *eaddr)
1069 {
1070 afe_readsrom(afep, SROM_ENADDR, ETHERADDRL / 2, (char *)eaddr);
1071 }
1072
1073
1074
1075 /*
1076 * MII management.
1077 */
1078 void
afe_mii_reset(void * arg)1079 afe_mii_reset(void *arg)
1080 {
1081 afe_t *afep = arg;
1082 int fiber;
1083 uint16_t mcr;
1084 uint16_t pilr;
1085 uint8_t phyaddr;
1086
1087 /*
1088 * Its entirely possible that this belongs as a PHY specific
1089 * override.
1090 */
1091 if ((mii_get_id(afep->afe_mii) & 0xfffffff0) != 0x225410) {
1092 /* if its not an AN983B, we don't care */
1093 return;
1094 }
1095
1096 phyaddr = mii_get_addr(afep->afe_mii);
1097
1098 fiber = 0;
1099
1100 switch (afep->afe_forcefiber) {
1101 case 0:
1102 /* UTP Port */
1103 fiber = 0;
1104 break;
1105 case 1:
1106 /* Fiber Port */
1107 fiber = 1;
1108 break;
1109 }
1110
1111 mcr = afe_mii_read(afep, phyaddr, PHY_MCR);
1112 switch (fiber) {
1113 case 0:
1114 mcr &= ~MCR_FIBER;
1115 break;
1116
1117 case 1:
1118 mcr |= MCR_FIBER;
1119 break;
1120 }
1121 afe_mii_write(afep, phyaddr, PHY_MCR, mcr);
1122 drv_usecwait(500);
1123
1124 /*
1125 * work around for errata 983B_0416 -- duplex light flashes
1126 * in 10 HDX. we just disable SQE testing on the device.
1127 */
1128 pilr = afe_mii_read(afep, phyaddr, PHY_PILR);
1129 pilr |= PILR_NOSQE;
1130 afe_mii_write(afep, phyaddr, PHY_PILR, pilr);
1131 }
1132
1133 void
afe_mii_notify(void * arg,link_state_t link)1134 afe_mii_notify(void *arg, link_state_t link)
1135 {
1136 afe_t *afep = arg;
1137
1138 if (AFE_MODEL(afep) == MODEL_CENTAUR) {
1139 if (mii_get_flowctrl(afep->afe_mii) == LINK_FLOWCTRL_BI) {
1140 SETBIT(afep, CSR_CR, CR_PAUSE);
1141 } else {
1142 CLRBIT(afep, CSR_CR, CR_PAUSE);
1143 }
1144 }
1145 mac_link_update(afep->afe_mh, link);
1146 }
1147
1148 void
afe_miitristate(afe_t * afep)1149 afe_miitristate(afe_t *afep)
1150 {
1151 uint32_t val = SPR_SROM_WRITE | SPR_MII_CTRL;
1152
1153 PUTCSR(afep, CSR_SPR, val);
1154 drv_usecwait(1);
1155 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK);
1156 drv_usecwait(1);
1157 }
1158
1159 void
afe_miiwritebit(afe_t * afep,uint8_t bit)1160 afe_miiwritebit(afe_t *afep, uint8_t bit)
1161 {
1162 uint32_t val = bit ? SPR_MII_DOUT : 0;
1163
1164 PUTCSR(afep, CSR_SPR, val);
1165 drv_usecwait(1);
1166 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK);
1167 drv_usecwait(1);
1168 }
1169
1170 uint8_t
afe_miireadbit(afe_t * afep)1171 afe_miireadbit(afe_t *afep)
1172 {
1173 uint32_t val = SPR_MII_CTRL | SPR_SROM_READ;
1174 uint8_t bit;
1175
1176 PUTCSR(afep, CSR_SPR, val);
1177 drv_usecwait(1);
1178 bit = (GETCSR(afep, CSR_SPR) & SPR_MII_DIN) ? 1 : 0;
1179 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK);
1180 drv_usecwait(1);
1181 return (bit);
1182 }
1183
1184 uint16_t
afe_mii_read(void * arg,uint8_t phy,uint8_t reg)1185 afe_mii_read(void *arg, uint8_t phy, uint8_t reg)
1186 {
1187 afe_t *afep = arg;
1188 /*
1189 * ADMtek bugs ignore address decode bits -- they only
1190 * support PHY at 1.
1191 */
1192 if (phy != 1) {
1193 return (0xffff);
1194 }
1195 switch (AFE_MODEL(afep)) {
1196 case MODEL_COMET:
1197 return (afe_miireadcomet(afep, phy, reg));
1198 case MODEL_CENTAUR:
1199 return (afe_miireadgeneral(afep, phy, reg));
1200 }
1201 return (0xffff);
1202 }
1203
1204 uint16_t
afe_miireadgeneral(afe_t * afep,uint8_t phy,uint8_t reg)1205 afe_miireadgeneral(afe_t *afep, uint8_t phy, uint8_t reg)
1206 {
1207 uint16_t value = 0;
1208 int i;
1209
1210 /* send the 32 bit preamble */
1211 for (i = 0; i < 32; i++) {
1212 afe_miiwritebit(afep, 1);
1213 }
1214
1215 /* send the start code - 01b */
1216 afe_miiwritebit(afep, 0);
1217 afe_miiwritebit(afep, 1);
1218
1219 /* send the opcode for read, - 10b */
1220 afe_miiwritebit(afep, 1);
1221 afe_miiwritebit(afep, 0);
1222
1223 /* next we send the 5 bit phy address */
1224 for (i = 0x10; i > 0; i >>= 1) {
1225 afe_miiwritebit(afep, (phy & i) ? 1 : 0);
1226 }
1227
1228 /* the 5 bit register address goes next */
1229 for (i = 0x10; i > 0; i >>= 1) {
1230 afe_miiwritebit(afep, (reg & i) ? 1 : 0);
1231 }
1232
1233 /* turnaround - tristate followed by logic 0 */
1234 afe_miitristate(afep);
1235 afe_miiwritebit(afep, 0);
1236
1237 /* read the 16 bit register value */
1238 for (i = 0x8000; i > 0; i >>= 1) {
1239 value <<= 1;
1240 value |= afe_miireadbit(afep);
1241 }
1242 afe_miitristate(afep);
1243 return (value);
1244 }
1245
1246 uint16_t
afe_miireadcomet(afe_t * afep,uint8_t phy,uint8_t reg)1247 afe_miireadcomet(afe_t *afep, uint8_t phy, uint8_t reg)
1248 {
1249 if (phy != 1) {
1250 return (0xffff);
1251 }
1252 switch (reg) {
1253 case MII_CONTROL:
1254 reg = CSR_BMCR;
1255 break;
1256 case MII_STATUS:
1257 reg = CSR_BMSR;
1258 break;
1259 case MII_PHYIDH:
1260 reg = CSR_PHYIDR1;
1261 break;
1262 case MII_PHYIDL:
1263 reg = CSR_PHYIDR2;
1264 break;
1265 case MII_AN_ADVERT:
1266 reg = CSR_ANAR;
1267 break;
1268 case MII_AN_LPABLE:
1269 reg = CSR_ANLPAR;
1270 break;
1271 case MII_AN_EXPANSION:
1272 reg = CSR_ANER;
1273 break;
1274 default:
1275 return (0);
1276 }
1277 return (GETCSR16(afep, reg) & 0xFFFF);
1278 }
1279
1280 void
afe_mii_write(void * arg,uint8_t phy,uint8_t reg,uint16_t val)1281 afe_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t val)
1282 {
1283 afe_t *afep = arg;
1284
1285 /*
1286 * ADMtek bugs ignore address decode bits -- they only
1287 * support PHY at 1.
1288 */
1289 if (phy != 1) {
1290 return;
1291 }
1292 switch (AFE_MODEL(afep)) {
1293 case MODEL_COMET:
1294 afe_miiwritecomet(afep, phy, reg, val);
1295 break;
1296 case MODEL_CENTAUR:
1297 afe_miiwritegeneral(afep, phy, reg, val);
1298 break;
1299 }
1300 }
1301
1302 void
afe_miiwritegeneral(afe_t * afep,uint8_t phy,uint8_t reg,uint16_t val)1303 afe_miiwritegeneral(afe_t *afep, uint8_t phy, uint8_t reg, uint16_t val)
1304 {
1305 int i;
1306
1307 /* send the 32 bit preamble */
1308 for (i = 0; i < 32; i++) {
1309 afe_miiwritebit(afep, 1);
1310 }
1311
1312 /* send the start code - 01b */
1313 afe_miiwritebit(afep, 0);
1314 afe_miiwritebit(afep, 1);
1315
1316 /* send the opcode for write, - 01b */
1317 afe_miiwritebit(afep, 0);
1318 afe_miiwritebit(afep, 1);
1319
1320 /* next we send the 5 bit phy address */
1321 for (i = 0x10; i > 0; i >>= 1) {
1322 afe_miiwritebit(afep, (phy & i) ? 1 : 0);
1323 }
1324
1325 /* the 5 bit register address goes next */
1326 for (i = 0x10; i > 0; i >>= 1) {
1327 afe_miiwritebit(afep, (reg & i) ? 1 : 0);
1328 }
1329
1330 /* turnaround - 1 bit followed by logic 0 */
1331 afe_miiwritebit(afep, 1);
1332 afe_miiwritebit(afep, 0);
1333
1334 /* now write out our data (16 bits) */
1335 for (i = 0x8000; i > 0; i >>= 1) {
1336 afe_miiwritebit(afep, (val & i) ? 1 : 0);
1337 }
1338
1339 /* idle mode */
1340 afe_miitristate(afep);
1341 }
1342
1343 void
afe_miiwritecomet(afe_t * afep,uint8_t phy,uint8_t reg,uint16_t val)1344 afe_miiwritecomet(afe_t *afep, uint8_t phy, uint8_t reg, uint16_t val)
1345 {
1346 if (phy != 1) {
1347 return;
1348 }
1349 switch (reg) {
1350 case MII_CONTROL:
1351 reg = CSR_BMCR;
1352 break;
1353 case MII_STATUS:
1354 reg = CSR_BMSR;
1355 break;
1356 case MII_PHYIDH:
1357 reg = CSR_PHYIDR1;
1358 break;
1359 case MII_PHYIDL:
1360 reg = CSR_PHYIDR2;
1361 break;
1362 case MII_AN_ADVERT:
1363 reg = CSR_ANAR;
1364 break;
1365 case MII_AN_LPABLE:
1366 reg = CSR_ANLPAR;
1367 break;
1368 case MII_AN_EXPANSION:
1369 reg = CSR_ANER;
1370 break;
1371 default:
1372 return;
1373 }
1374 PUTCSR16(afep, reg, val);
1375 }
1376
1377 int
afe_m_start(void * arg)1378 afe_m_start(void *arg)
1379 {
1380 afe_t *afep = arg;
1381
1382 /* grab exclusive access to the card */
1383 mutex_enter(&afep->afe_intrlock);
1384 mutex_enter(&afep->afe_xmtlock);
1385
1386 afe_startall(afep);
1387 afep->afe_flags |= AFE_RUNNING;
1388
1389 mutex_exit(&afep->afe_xmtlock);
1390 mutex_exit(&afep->afe_intrlock);
1391
1392 mii_start(afep->afe_mii);
1393
1394 return (0);
1395 }
1396
1397 void
afe_m_stop(void * arg)1398 afe_m_stop(void *arg)
1399 {
1400 afe_t *afep = arg;
1401
1402 mii_stop(afep->afe_mii);
1403
1404 /* exclusive access to the hardware! */
1405 mutex_enter(&afep->afe_intrlock);
1406 mutex_enter(&afep->afe_xmtlock);
1407
1408 afe_stopall(afep);
1409 afep->afe_flags &= ~AFE_RUNNING;
1410
1411 mutex_exit(&afep->afe_xmtlock);
1412 mutex_exit(&afep->afe_intrlock);
1413 }
1414
1415 void
afe_startmac(afe_t * afep)1416 afe_startmac(afe_t *afep)
1417 {
1418 /* verify exclusive access to the card */
1419 ASSERT(mutex_owned(&afep->afe_intrlock));
1420 ASSERT(mutex_owned(&afep->afe_xmtlock));
1421
1422 /* start the card */
1423 SETBIT(afep, CSR_NAR, NAR_TX_ENABLE | NAR_RX_ENABLE);
1424
1425 if (afep->afe_txavail != AFE_TXRING)
1426 PUTCSR(afep, CSR_TDR, 0);
1427
1428 /* tell the mac that we are ready to go! */
1429 if (afep->afe_flags & AFE_RUNNING)
1430 mac_tx_update(afep->afe_mh);
1431
1432 /* start watchdog timer */
1433 PUTCSR(afep, CSR_TIMER, TIMER_LOOP |
1434 (AFE_WDOGTIMER * 1000 / TIMER_USEC));
1435 }
1436
1437 void
afe_stopmac(afe_t * afep)1438 afe_stopmac(afe_t *afep)
1439 {
1440 int i;
1441
1442 /* exclusive access to the hardware! */
1443 ASSERT(mutex_owned(&afep->afe_intrlock));
1444 ASSERT(mutex_owned(&afep->afe_xmtlock));
1445
1446 CLRBIT(afep, CSR_NAR, NAR_TX_ENABLE | NAR_RX_ENABLE);
1447
1448 /*
1449 * A 1518 byte frame at 10Mbps takes about 1.2 msec to drain.
1450 * We just add up to the nearest msec (2), which should be
1451 * plenty to complete.
1452 *
1453 * Note that some chips never seem to indicate the transition to
1454 * the stopped state properly. Experience shows that we can safely
1455 * proceed anyway, after waiting the requisite timeout.
1456 */
1457 for (i = 2000; i != 0; i -= 10) {
1458 if ((GETCSR(afep, CSR_SR) & (SR_TX_STATE | SR_RX_STATE)) == 0)
1459 break;
1460 drv_usecwait(10);
1461 }
1462
1463 /* prevent an interrupt */
1464 PUTCSR(afep, CSR_SR2, INT_RXSTOPPED | INT_TXSTOPPED);
1465
1466 /* stop the watchdog timer */
1467 PUTCSR(afep, CSR_TIMER, 0);
1468 }
1469
1470 void
afe_resetrings(afe_t * afep)1471 afe_resetrings(afe_t *afep)
1472 {
1473 int i;
1474
1475 /* now we need to reset the pointers... */
1476 PUTCSR(afep, CSR_RDB, 0);
1477 PUTCSR(afep, CSR_TDB, 0);
1478
1479 /* reset the descriptor ring pointers */
1480 afep->afe_rxhead = 0;
1481 afep->afe_txreclaim = 0;
1482 afep->afe_txsend = 0;
1483 afep->afe_txavail = AFE_TXRING;
1484
1485 /* set up transmit descriptor ring */
1486 for (i = 0; i < AFE_TXRING; i++) {
1487 afe_desc_t *tmdp = &afep->afe_txdescp[i];
1488 unsigned control = 0;
1489 if (i == (AFE_TXRING - 1)) {
1490 control |= TXCTL_ENDRING;
1491 }
1492 PUTTXDESC(afep, tmdp->desc_status, 0);
1493 PUTTXDESC(afep, tmdp->desc_control, control);
1494 PUTTXDESC(afep, tmdp->desc_buffer1, 0);
1495 PUTTXDESC(afep, tmdp->desc_buffer2, 0);
1496 SYNCTXDESC(afep, i, DDI_DMA_SYNC_FORDEV);
1497 }
1498 PUTCSR(afep, CSR_TDB, afep->afe_txdesc_paddr);
1499
1500 /* make the receive buffers available */
1501 for (i = 0; i < AFE_RXRING; i++) {
1502 afe_rxbuf_t *rxb = afep->afe_rxbufs[i];
1503 afe_desc_t *rmdp = &afep->afe_rxdescp[i];
1504 unsigned control;
1505
1506 control = AFE_BUFSZ & RXCTL_BUFLEN1;
1507 if (i == (AFE_RXRING - 1)) {
1508 control |= RXCTL_ENDRING;
1509 }
1510 PUTRXDESC(afep, rmdp->desc_buffer1, rxb->rxb_paddr);
1511 PUTRXDESC(afep, rmdp->desc_buffer2, 0);
1512 PUTRXDESC(afep, rmdp->desc_control, control);
1513 PUTRXDESC(afep, rmdp->desc_status, RXSTAT_OWN);
1514 SYNCRXDESC(afep, i, DDI_DMA_SYNC_FORDEV);
1515 }
1516 PUTCSR(afep, CSR_RDB, afep->afe_rxdesc_paddr);
1517 }
1518
1519 void
afe_stopall(afe_t * afep)1520 afe_stopall(afe_t *afep)
1521 {
1522 afe_disableinterrupts(afep);
1523 afe_stopmac(afep);
1524 }
1525
1526 void
afe_startall(afe_t * afep)1527 afe_startall(afe_t *afep)
1528 {
1529 ASSERT(mutex_owned(&afep->afe_intrlock));
1530 ASSERT(mutex_owned(&afep->afe_xmtlock));
1531
1532 /* make sure interrupts are disabled to begin */
1533 afe_disableinterrupts(afep);
1534
1535 /* initialize the chip */
1536 (void) afe_initialize(afep);
1537
1538 /* now we can enable interrupts */
1539 afe_enableinterrupts(afep);
1540
1541 /* start up the mac */
1542 afe_startmac(afep);
1543 }
1544
1545 void
afe_resetall(afe_t * afep)1546 afe_resetall(afe_t *afep)
1547 {
1548 afe_stopall(afep);
1549 afe_startall(afep);
1550 }
1551
1552 afe_txbuf_t *
afe_alloctxbuf(afe_t * afep)1553 afe_alloctxbuf(afe_t *afep)
1554 {
1555 ddi_dma_cookie_t dmac;
1556 unsigned ncookies;
1557 afe_txbuf_t *txb;
1558 size_t len;
1559
1560 txb = kmem_zalloc(sizeof (*txb), KM_SLEEP);
1561
1562 if (ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_txattr,
1563 DDI_DMA_SLEEP, NULL, &txb->txb_dmah) != DDI_SUCCESS) {
1564 return (NULL);
1565 }
1566
1567 if (ddi_dma_mem_alloc(txb->txb_dmah, AFE_BUFSZ, &afe_bufattr,
1568 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &txb->txb_buf, &len,
1569 &txb->txb_acch) != DDI_SUCCESS) {
1570 return (NULL);
1571 }
1572 if (ddi_dma_addr_bind_handle(txb->txb_dmah, NULL, txb->txb_buf,
1573 len, DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
1574 &dmac, &ncookies) != DDI_DMA_MAPPED) {
1575 return (NULL);
1576 }
1577 txb->txb_paddr = dmac.dmac_address;
1578
1579 return (txb);
1580 }
1581
1582 void
afe_destroytxbuf(afe_txbuf_t * txb)1583 afe_destroytxbuf(afe_txbuf_t *txb)
1584 {
1585 if (txb != NULL) {
1586 if (txb->txb_paddr)
1587 (void) ddi_dma_unbind_handle(txb->txb_dmah);
1588 if (txb->txb_acch)
1589 ddi_dma_mem_free(&txb->txb_acch);
1590 if (txb->txb_dmah)
1591 ddi_dma_free_handle(&txb->txb_dmah);
1592 kmem_free(txb, sizeof (*txb));
1593 }
1594 }
1595
1596 afe_rxbuf_t *
afe_allocrxbuf(afe_t * afep)1597 afe_allocrxbuf(afe_t *afep)
1598 {
1599 afe_rxbuf_t *rxb;
1600 size_t len;
1601 unsigned ccnt;
1602 ddi_dma_cookie_t dmac;
1603
1604 rxb = kmem_zalloc(sizeof (*rxb), KM_SLEEP);
1605
1606 if (ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr,
1607 DDI_DMA_SLEEP, NULL, &rxb->rxb_dmah) != DDI_SUCCESS) {
1608 kmem_free(rxb, sizeof (*rxb));
1609 return (NULL);
1610 }
1611 if (ddi_dma_mem_alloc(rxb->rxb_dmah, AFE_BUFSZ, &afe_bufattr,
1612 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &rxb->rxb_buf, &len,
1613 &rxb->rxb_acch) != DDI_SUCCESS) {
1614 ddi_dma_free_handle(&rxb->rxb_dmah);
1615 kmem_free(rxb, sizeof (*rxb));
1616 return (NULL);
1617 }
1618 if (ddi_dma_addr_bind_handle(rxb->rxb_dmah, NULL, rxb->rxb_buf, len,
1619 DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &dmac,
1620 &ccnt) != DDI_DMA_MAPPED) {
1621 ddi_dma_mem_free(&rxb->rxb_acch);
1622 ddi_dma_free_handle(&rxb->rxb_dmah);
1623 kmem_free(rxb, sizeof (*rxb));
1624 return (NULL);
1625 }
1626 rxb->rxb_paddr = dmac.dmac_address;
1627
1628 return (rxb);
1629 }
1630
1631 void
afe_destroyrxbuf(afe_rxbuf_t * rxb)1632 afe_destroyrxbuf(afe_rxbuf_t *rxb)
1633 {
1634 if (rxb) {
1635 (void) ddi_dma_unbind_handle(rxb->rxb_dmah);
1636 ddi_dma_mem_free(&rxb->rxb_acch);
1637 ddi_dma_free_handle(&rxb->rxb_dmah);
1638 kmem_free(rxb, sizeof (*rxb));
1639 }
1640 }
1641
1642 /*
1643 * Allocate receive resources.
1644 */
1645 int
afe_allocrxring(afe_t * afep)1646 afe_allocrxring(afe_t *afep)
1647 {
1648 int rval;
1649 int i;
1650 size_t size;
1651 size_t len;
1652 ddi_dma_cookie_t dmac;
1653 unsigned ncookies;
1654 caddr_t kaddr;
1655
1656 size = AFE_RXRING * sizeof (afe_desc_t);
1657
1658 rval = ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr,
1659 DDI_DMA_SLEEP, NULL, &afep->afe_rxdesc_dmah);
1660 if (rval != DDI_SUCCESS) {
1661 afe_error(afep->afe_dip,
1662 "unable to allocate DMA handle for rx descriptors");
1663 return (DDI_FAILURE);
1664 }
1665
1666 rval = ddi_dma_mem_alloc(afep->afe_rxdesc_dmah, size, &afe_devattr,
1667 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len,
1668 &afep->afe_rxdesc_acch);
1669 if (rval != DDI_SUCCESS) {
1670 afe_error(afep->afe_dip,
1671 "unable to allocate DMA memory for rx descriptors");
1672 return (DDI_FAILURE);
1673 }
1674
1675 rval = ddi_dma_addr_bind_handle(afep->afe_rxdesc_dmah, NULL, kaddr,
1676 size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1677 &dmac, &ncookies);
1678 if (rval != DDI_DMA_MAPPED) {
1679 afe_error(afep->afe_dip,
1680 "unable to bind DMA for rx descriptors");
1681 return (DDI_FAILURE);
1682 }
1683
1684 /* because of afe_dma_attr */
1685 ASSERT(ncookies == 1);
1686
1687 /* we take the 32-bit physical address out of the cookie */
1688 afep->afe_rxdesc_paddr = dmac.dmac_address;
1689 afep->afe_rxdescp = (void *)kaddr;
1690
1691 /* allocate buffer pointers (not the buffers themselves, yet) */
1692 afep->afe_rxbufs = kmem_zalloc(AFE_RXRING * sizeof (afe_rxbuf_t *),
1693 KM_SLEEP);
1694
1695 /* now allocate rx buffers */
1696 for (i = 0; i < AFE_RXRING; i++) {
1697 afe_rxbuf_t *rxb = afe_allocrxbuf(afep);
1698 if (rxb == NULL)
1699 return (DDI_FAILURE);
1700 afep->afe_rxbufs[i] = rxb;
1701 }
1702
1703 return (DDI_SUCCESS);
1704 }
1705
1706 /*
1707 * Allocate transmit resources.
1708 */
1709 int
afe_alloctxring(afe_t * afep)1710 afe_alloctxring(afe_t *afep)
1711 {
1712 int rval;
1713 int i;
1714 size_t size;
1715 size_t len;
1716 ddi_dma_cookie_t dmac;
1717 unsigned ncookies;
1718 caddr_t kaddr;
1719
1720 size = AFE_TXRING * sizeof (afe_desc_t);
1721
1722 rval = ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr,
1723 DDI_DMA_SLEEP, NULL, &afep->afe_txdesc_dmah);
1724 if (rval != DDI_SUCCESS) {
1725 afe_error(afep->afe_dip,
1726 "unable to allocate DMA handle for tx descriptors");
1727 return (DDI_FAILURE);
1728 }
1729
1730 rval = ddi_dma_mem_alloc(afep->afe_txdesc_dmah, size, &afe_devattr,
1731 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len,
1732 &afep->afe_txdesc_acch);
1733 if (rval != DDI_SUCCESS) {
1734 afe_error(afep->afe_dip,
1735 "unable to allocate DMA memory for tx descriptors");
1736 return (DDI_FAILURE);
1737 }
1738
1739 rval = ddi_dma_addr_bind_handle(afep->afe_txdesc_dmah, NULL, kaddr,
1740 size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1741 &dmac, &ncookies);
1742 if (rval != DDI_DMA_MAPPED) {
1743 afe_error(afep->afe_dip,
1744 "unable to bind DMA for tx descriptors");
1745 return (DDI_FAILURE);
1746 }
1747
1748 /* because of afe_dma_attr */
1749 ASSERT(ncookies == 1);
1750
1751 /* we take the 32-bit physical address out of the cookie */
1752 afep->afe_txdesc_paddr = dmac.dmac_address;
1753 afep->afe_txdescp = (void *)kaddr;
1754
1755 /* allocate buffer pointers (not the buffers themselves, yet) */
1756 afep->afe_txbufs = kmem_zalloc(AFE_TXRING * sizeof (afe_txbuf_t *),
1757 KM_SLEEP);
1758
1759 /* now allocate tx buffers */
1760 for (i = 0; i < AFE_TXRING; i++) {
1761 afe_txbuf_t *txb = afe_alloctxbuf(afep);
1762 if (txb == NULL)
1763 return (DDI_FAILURE);
1764 afep->afe_txbufs[i] = txb;
1765 }
1766
1767 return (DDI_SUCCESS);
1768 }
1769
1770 void
afe_freerxring(afe_t * afep)1771 afe_freerxring(afe_t *afep)
1772 {
1773 int i;
1774
1775 for (i = 0; i < AFE_RXRING; i++) {
1776 afe_destroyrxbuf(afep->afe_rxbufs[i]);
1777 }
1778
1779 if (afep->afe_rxbufs) {
1780 kmem_free(afep->afe_rxbufs,
1781 AFE_RXRING * sizeof (afe_rxbuf_t *));
1782 }
1783
1784 if (afep->afe_rxdesc_paddr)
1785 (void) ddi_dma_unbind_handle(afep->afe_rxdesc_dmah);
1786 if (afep->afe_rxdesc_acch)
1787 ddi_dma_mem_free(&afep->afe_rxdesc_acch);
1788 if (afep->afe_rxdesc_dmah)
1789 ddi_dma_free_handle(&afep->afe_rxdesc_dmah);
1790 }
1791
1792 void
afe_freetxring(afe_t * afep)1793 afe_freetxring(afe_t *afep)
1794 {
1795 int i;
1796
1797 for (i = 0; i < AFE_TXRING; i++) {
1798 afe_destroytxbuf(afep->afe_txbufs[i]);
1799 }
1800
1801 if (afep->afe_txbufs) {
1802 kmem_free(afep->afe_txbufs,
1803 AFE_TXRING * sizeof (afe_txbuf_t *));
1804 }
1805 if (afep->afe_txdesc_paddr)
1806 (void) ddi_dma_unbind_handle(afep->afe_txdesc_dmah);
1807 if (afep->afe_txdesc_acch)
1808 ddi_dma_mem_free(&afep->afe_txdesc_acch);
1809 if (afep->afe_txdesc_dmah)
1810 ddi_dma_free_handle(&afep->afe_txdesc_dmah);
1811 }
1812
1813 /*
1814 * Interrupt service routine.
1815 */
1816 unsigned
afe_intr(caddr_t arg)1817 afe_intr(caddr_t arg)
1818 {
1819 afe_t *afep = (void *)arg;
1820 uint32_t status;
1821 mblk_t *mp = NULL;
1822 boolean_t doreset = B_FALSE;
1823
1824 mutex_enter(&afep->afe_intrlock);
1825
1826 if (afep->afe_flags & AFE_SUSPENDED) {
1827 /* we cannot receive interrupts! */
1828 mutex_exit(&afep->afe_intrlock);
1829 return (DDI_INTR_UNCLAIMED);
1830 }
1831
1832 /* check interrupt status bits, did we interrupt? */
1833 status = GETCSR(afep, CSR_SR2) & INT_ALL;
1834
1835 if (status == 0) {
1836 KIOIP->intrs[KSTAT_INTR_SPURIOUS]++;
1837 mutex_exit(&afep->afe_intrlock);
1838 return (DDI_INTR_UNCLAIMED);
1839 }
1840 /* ack the interrupt */
1841 PUTCSR(afep, CSR_SR2, status);
1842 KIOIP->intrs[KSTAT_INTR_HARD]++;
1843
1844 if (!(afep->afe_flags & AFE_RUNNING)) {
1845 /* not running, don't touch anything */
1846 mutex_exit(&afep->afe_intrlock);
1847 return (DDI_INTR_CLAIMED);
1848 }
1849
1850 if (status & (INT_RXOK|INT_RXNOBUF)) {
1851 /* receive packets */
1852 mp = afe_receive(afep);
1853 if (status & INT_RXNOBUF)
1854 PUTCSR(afep, CSR_RDR, 0); /* wake up chip */
1855 }
1856
1857 if (status & INT_TXOK) {
1858 /* transmit completed */
1859 mutex_enter(&afep->afe_xmtlock);
1860 afe_reclaim(afep);
1861 mutex_exit(&afep->afe_xmtlock);
1862 }
1863
1864 if ((status & INT_TIMER) && (afe_watchdog(afep) != DDI_SUCCESS)) {
1865 doreset = B_TRUE;
1866 }
1867
1868 if (status & (INT_RXSTOPPED|INT_TXSTOPPED|
1869 INT_RXJABBER|INT_TXJABBER|INT_TXUNDERFLOW)) {
1870
1871 if (status & (INT_RXJABBER | INT_TXJABBER)) {
1872 afep->afe_jabber++;
1873 }
1874 doreset = B_TRUE;
1875 }
1876
1877 if (status & INT_BUSERR) {
1878 switch (GETCSR(afep, CSR_SR) & SR_BERR_TYPE) {
1879 case SR_BERR_PARITY:
1880 afe_error(afep->afe_dip, "PCI parity error");
1881 break;
1882 case SR_BERR_TARGET_ABORT:
1883 afe_error(afep->afe_dip, "PCI target abort");
1884 break;
1885 case SR_BERR_MASTER_ABORT:
1886 afe_error(afep->afe_dip, "PCI master abort");
1887 break;
1888 default:
1889 afe_error(afep->afe_dip, "Unknown PCI error");
1890 break;
1891 }
1892
1893 /* reset the chip in an attempt to fix things */
1894 doreset = B_TRUE;
1895 }
1896
1897
1898 if (doreset) {
1899 mutex_enter(&afep->afe_xmtlock);
1900 afe_resetall(afep);
1901 mutex_exit(&afep->afe_xmtlock);
1902 mutex_exit(&afep->afe_intrlock);
1903
1904 mii_reset(afep->afe_mii);
1905 } else {
1906 mutex_exit(&afep->afe_intrlock);
1907 }
1908
1909 if (status & INT_LINKCHG) {
1910 mii_check(afep->afe_mii);
1911 }
1912
1913 /*
1914 * Send up packets. We do this outside of the intrlock.
1915 */
1916 if (mp) {
1917 mac_rx(afep->afe_mh, NULL, mp);
1918 }
1919
1920 return (DDI_INTR_CLAIMED);
1921 }
1922
1923 void
afe_enableinterrupts(afe_t * afep)1924 afe_enableinterrupts(afe_t *afep)
1925 {
1926 unsigned mask = INT_WANTED;
1927
1928 if (afep->afe_wantw)
1929 mask |= INT_TXOK;
1930
1931 PUTCSR(afep, CSR_IER2, mask);
1932
1933 if (AFE_MODEL(afep) == MODEL_COMET) {
1934 /*
1935 * On the Comet, this is the internal transceiver
1936 * interrupt. We program the Comet's built-in PHY to
1937 * enable certain interrupts.
1938 */
1939 PUTCSR16(afep, CSR_XIE, XIE_LDE | XIE_ANCE);
1940 }
1941 }
1942
1943 void
afe_disableinterrupts(afe_t * afep)1944 afe_disableinterrupts(afe_t *afep)
1945 {
1946 /* disable further interrupts */
1947 PUTCSR(afep, CSR_IER2, INT_NONE);
1948
1949 /* clear any pending interrupts */
1950 PUTCSR(afep, CSR_SR2, INT_ALL);
1951 }
1952
1953 boolean_t
afe_send(afe_t * afep,mblk_t * mp)1954 afe_send(afe_t *afep, mblk_t *mp)
1955 {
1956 size_t len;
1957 afe_txbuf_t *txb;
1958 afe_desc_t *tmd;
1959 uint32_t control;
1960 int txsend;
1961
1962 ASSERT(mutex_owned(&afep->afe_xmtlock));
1963 ASSERT(mp != NULL);
1964
1965 len = msgsize(mp);
1966 if (len > ETHERVLANMTU) {
1967 afep->afe_macxmt_errors++;
1968 freemsg(mp);
1969 return (B_TRUE);
1970 }
1971
1972 if (afep->afe_txavail < AFE_TXRECLAIM)
1973 afe_reclaim(afep);
1974
1975 if (afep->afe_txavail == 0) {
1976 /* no more tmds */
1977 afep->afe_wantw = B_TRUE;
1978 /* enable TX interrupt */
1979 afe_enableinterrupts(afep);
1980 return (B_FALSE);
1981 }
1982
1983 txsend = afep->afe_txsend;
1984
1985 /*
1986 * For simplicity, we just do a copy into a preallocated
1987 * DMA buffer.
1988 */
1989
1990 txb = afep->afe_txbufs[txsend];
1991 mcopymsg(mp, txb->txb_buf); /* frees mp! */
1992
1993 /*
1994 * Statistics.
1995 */
1996 afep->afe_opackets++;
1997 afep->afe_obytes += len;
1998 if (txb->txb_buf[0] & 0x1) {
1999 if (bcmp(txb->txb_buf, afe_broadcast, ETHERADDRL) != 0)
2000 afep->afe_multixmt++;
2001 else
2002 afep->afe_brdcstxmt++;
2003 }
2004
2005 /* note len is already known to be a small unsigned */
2006 control = len | TXCTL_FIRST | TXCTL_LAST | TXCTL_INTCMPLTE;
2007
2008 if (txsend == (AFE_TXRING - 1))
2009 control |= TXCTL_ENDRING;
2010
2011 tmd = &afep->afe_txdescp[txsend];
2012
2013 SYNCTXBUF(txb, len, DDI_DMA_SYNC_FORDEV);
2014 PUTTXDESC(afep, tmd->desc_control, control);
2015 PUTTXDESC(afep, tmd->desc_buffer1, txb->txb_paddr);
2016 PUTTXDESC(afep, tmd->desc_buffer2, 0);
2017 PUTTXDESC(afep, tmd->desc_status, TXSTAT_OWN);
2018 /* sync the descriptor out to the device */
2019 SYNCTXDESC(afep, txsend, DDI_DMA_SYNC_FORDEV);
2020
2021 /*
2022 * Note the new values of txavail and txsend.
2023 */
2024 afep->afe_txavail--;
2025 afep->afe_txsend = (txsend + 1) % AFE_TXRING;
2026
2027 /*
2028 * It should never, ever take more than 5 seconds to drain
2029 * the ring. If it happens, then we are stuck!
2030 */
2031 afep->afe_txstall_time = gethrtime() + (5 * 1000000000ULL);
2032
2033 /*
2034 * wake up the chip ... inside the lock to protect against DR suspend,
2035 * etc.
2036 */
2037 PUTCSR(afep, CSR_TDR, 0);
2038
2039 return (B_TRUE);
2040 }
2041
2042 /*
2043 * Reclaim buffers that have completed transmission.
2044 */
2045 void
afe_reclaim(afe_t * afep)2046 afe_reclaim(afe_t *afep)
2047 {
2048 afe_desc_t *tmdp;
2049
2050 while (afep->afe_txavail != AFE_TXRING) {
2051 uint32_t status;
2052 uint32_t control;
2053 int index = afep->afe_txreclaim;
2054
2055 tmdp = &afep->afe_txdescp[index];
2056
2057 /* sync it before we read it */
2058 SYNCTXDESC(afep, index, DDI_DMA_SYNC_FORKERNEL);
2059
2060 control = GETTXDESC(afep, tmdp->desc_control);
2061 status = GETTXDESC(afep, tmdp->desc_status);
2062
2063 if (status & TXSTAT_OWN) {
2064 /* chip is still working on it, we're done */
2065 break;
2066 }
2067
2068 afep->afe_txavail++;
2069 afep->afe_txreclaim = (index + 1) % AFE_TXRING;
2070
2071 /* in the most common successful case, all bits are clear */
2072 if (status == 0)
2073 continue;
2074
2075 if ((control & TXCTL_LAST) == 0)
2076 continue;
2077
2078 if (status & TXSTAT_TXERR) {
2079 afep->afe_errxmt++;
2080
2081 if (status & TXSTAT_JABBER) {
2082 /* transmit jabber timeout */
2083 afep->afe_macxmt_errors++;
2084 }
2085 if (status &
2086 (TXSTAT_CARRLOST | TXSTAT_NOCARR)) {
2087 afep->afe_carrier_errors++;
2088 }
2089 if (status & TXSTAT_UFLOW) {
2090 afep->afe_underflow++;
2091 }
2092 if (status & TXSTAT_LATECOL) {
2093 afep->afe_tx_late_collisions++;
2094 }
2095 if (status & TXSTAT_EXCOLL) {
2096 afep->afe_ex_collisions++;
2097 afep->afe_collisions += 16;
2098 }
2099 }
2100
2101 if (status & TXSTAT_DEFER) {
2102 afep->afe_defer_xmts++;
2103 }
2104
2105 /* collision counting */
2106 if (TXCOLLCNT(status) == 1) {
2107 afep->afe_collisions++;
2108 afep->afe_first_collisions++;
2109 } else if (TXCOLLCNT(status)) {
2110 afep->afe_collisions += TXCOLLCNT(status);
2111 afep->afe_multi_collisions += TXCOLLCNT(status);
2112 }
2113 }
2114
2115 if (afep->afe_txavail >= AFE_TXRESCHED) {
2116 if (afep->afe_wantw) {
2117 /*
2118 * we were able to reclaim some packets, so
2119 * disable tx interrupts
2120 */
2121 afep->afe_wantw = B_FALSE;
2122 afe_enableinterrupts(afep);
2123 mac_tx_update(afep->afe_mh);
2124 }
2125 }
2126 }
2127
2128 mblk_t *
afe_receive(afe_t * afep)2129 afe_receive(afe_t *afep)
2130 {
2131 unsigned len;
2132 afe_rxbuf_t *rxb;
2133 afe_desc_t *rmd;
2134 uint32_t status;
2135 mblk_t *mpchain, **mpp, *mp;
2136 int head, cnt;
2137
2138 mpchain = NULL;
2139 mpp = &mpchain;
2140 head = afep->afe_rxhead;
2141
2142 /* limit the number of packets we process to a half ring size */
2143 for (cnt = 0; cnt < AFE_RXRING / 2; cnt++) {
2144
2145 rmd = &afep->afe_rxdescp[head];
2146 rxb = afep->afe_rxbufs[head];
2147
2148 SYNCRXDESC(afep, head, DDI_DMA_SYNC_FORKERNEL);
2149 status = GETRXDESC(afep, rmd->desc_status);
2150 if (status & RXSTAT_OWN) {
2151 /* chip is still chewing on it */
2152 break;
2153 }
2154
2155 /* discard the ethernet frame checksum */
2156 len = RXLENGTH(status) - ETHERFCSL;
2157
2158 if ((status & (RXSTAT_ERRS | RXSTAT_FIRST | RXSTAT_LAST)) !=
2159 (RXSTAT_FIRST | RXSTAT_LAST)) {
2160
2161 afep->afe_errrcv++;
2162
2163 /*
2164 * Abnormal status bits detected, analyze further.
2165 */
2166 if ((status & (RXSTAT_LAST|RXSTAT_FIRST)) !=
2167 (RXSTAT_LAST|RXSTAT_FIRST)) {
2168
2169 if (status & RXSTAT_FIRST) {
2170 afep->afe_toolong_errors++;
2171 }
2172 } else if (status & RXSTAT_DESCERR) {
2173 afep->afe_macrcv_errors++;
2174
2175 } else if (status & RXSTAT_RUNT) {
2176 afep->afe_runt++;
2177
2178 } else if (status & RXSTAT_COLLSEEN) {
2179 /* this should really be rx_late_collisions */
2180 afep->afe_macrcv_errors++;
2181
2182 } else if (status & RXSTAT_DRIBBLE) {
2183 afep->afe_align_errors++;
2184
2185 } else if (status & RXSTAT_CRCERR) {
2186 afep->afe_fcs_errors++;
2187
2188 } else if (status & RXSTAT_OFLOW) {
2189 afep->afe_overflow++;
2190 }
2191 }
2192
2193 else if (len > ETHERVLANMTU) {
2194 afep->afe_errrcv++;
2195 afep->afe_toolong_errors++;
2196 }
2197
2198 /*
2199 * At this point, the chip thinks the packet is OK.
2200 */
2201 else {
2202 mp = allocb(len + AFE_HEADROOM, 0);
2203 if (mp == NULL) {
2204 afep->afe_errrcv++;
2205 afep->afe_norcvbuf++;
2206 goto skip;
2207 }
2208
2209 /* sync the buffer before we look at it */
2210 SYNCRXBUF(rxb, len, DDI_DMA_SYNC_FORKERNEL);
2211 mp->b_rptr += AFE_HEADROOM;
2212 mp->b_wptr = mp->b_rptr + len;
2213 bcopy((char *)rxb->rxb_buf, mp->b_rptr, len);
2214
2215 afep->afe_ipackets++;
2216 afep->afe_rbytes += len;
2217 if (status & RXSTAT_GROUP) {
2218 if (bcmp(mp->b_rptr, afe_broadcast,
2219 ETHERADDRL) == 0)
2220 afep->afe_brdcstrcv++;
2221 else
2222 afep->afe_multircv++;
2223 }
2224 *mpp = mp;
2225 mpp = &mp->b_next;
2226 }
2227
2228 skip:
2229 /* return ring entry to the hardware */
2230 PUTRXDESC(afep, rmd->desc_status, RXSTAT_OWN);
2231 SYNCRXDESC(afep, head, DDI_DMA_SYNC_FORDEV);
2232
2233 /* advance to next RMD */
2234 head = (head + 1) % AFE_RXRING;
2235 }
2236
2237 afep->afe_rxhead = head;
2238
2239 return (mpchain);
2240 }
2241
2242 int
afe_m_stat(void * arg,uint_t stat,uint64_t * val)2243 afe_m_stat(void *arg, uint_t stat, uint64_t *val)
2244 {
2245 afe_t *afep = arg;
2246
2247 mutex_enter(&afep->afe_xmtlock);
2248 if ((afep->afe_flags & (AFE_RUNNING|AFE_SUSPENDED)) == AFE_RUNNING)
2249 afe_reclaim(afep);
2250 mutex_exit(&afep->afe_xmtlock);
2251
2252 if (mii_m_getstat(afep->afe_mii, stat, val) == 0) {
2253 return (0);
2254 }
2255 switch (stat) {
2256 case MAC_STAT_MULTIRCV:
2257 *val = afep->afe_multircv;
2258 break;
2259
2260 case MAC_STAT_BRDCSTRCV:
2261 *val = afep->afe_brdcstrcv;
2262 break;
2263
2264 case MAC_STAT_MULTIXMT:
2265 *val = afep->afe_multixmt;
2266 break;
2267
2268 case MAC_STAT_BRDCSTXMT:
2269 *val = afep->afe_brdcstxmt;
2270 break;
2271
2272 case MAC_STAT_IPACKETS:
2273 *val = afep->afe_ipackets;
2274 break;
2275
2276 case MAC_STAT_RBYTES:
2277 *val = afep->afe_rbytes;
2278 break;
2279
2280 case MAC_STAT_OPACKETS:
2281 *val = afep->afe_opackets;
2282 break;
2283
2284 case MAC_STAT_OBYTES:
2285 *val = afep->afe_obytes;
2286 break;
2287
2288 case MAC_STAT_NORCVBUF:
2289 *val = afep->afe_norcvbuf;
2290 break;
2291
2292 case MAC_STAT_NOXMTBUF:
2293 *val = 0;
2294 break;
2295
2296 case MAC_STAT_COLLISIONS:
2297 *val = afep->afe_collisions;
2298 break;
2299
2300 case MAC_STAT_IERRORS:
2301 *val = afep->afe_errrcv;
2302 break;
2303
2304 case MAC_STAT_OERRORS:
2305 *val = afep->afe_errxmt;
2306 break;
2307
2308 case ETHER_STAT_ALIGN_ERRORS:
2309 *val = afep->afe_align_errors;
2310 break;
2311
2312 case ETHER_STAT_FCS_ERRORS:
2313 *val = afep->afe_fcs_errors;
2314 break;
2315
2316 case ETHER_STAT_SQE_ERRORS:
2317 *val = afep->afe_sqe_errors;
2318 break;
2319
2320 case ETHER_STAT_DEFER_XMTS:
2321 *val = afep->afe_defer_xmts;
2322 break;
2323
2324 case ETHER_STAT_FIRST_COLLISIONS:
2325 *val = afep->afe_first_collisions;
2326 break;
2327
2328 case ETHER_STAT_MULTI_COLLISIONS:
2329 *val = afep->afe_multi_collisions;
2330 break;
2331
2332 case ETHER_STAT_TX_LATE_COLLISIONS:
2333 *val = afep->afe_tx_late_collisions;
2334 break;
2335
2336 case ETHER_STAT_EX_COLLISIONS:
2337 *val = afep->afe_ex_collisions;
2338 break;
2339
2340 case ETHER_STAT_MACXMT_ERRORS:
2341 *val = afep->afe_macxmt_errors;
2342 break;
2343
2344 case ETHER_STAT_CARRIER_ERRORS:
2345 *val = afep->afe_carrier_errors;
2346 break;
2347
2348 case ETHER_STAT_TOOLONG_ERRORS:
2349 *val = afep->afe_toolong_errors;
2350 break;
2351
2352 case ETHER_STAT_MACRCV_ERRORS:
2353 *val = afep->afe_macrcv_errors;
2354 break;
2355
2356 case MAC_STAT_OVERFLOWS:
2357 *val = afep->afe_overflow;
2358 break;
2359
2360 case MAC_STAT_UNDERFLOWS:
2361 *val = afep->afe_underflow;
2362 break;
2363
2364 case ETHER_STAT_TOOSHORT_ERRORS:
2365 *val = afep->afe_runt;
2366 break;
2367
2368 case ETHER_STAT_JABBER_ERRORS:
2369 *val = afep->afe_jabber;
2370 break;
2371
2372 default:
2373 return (ENOTSUP);
2374 }
2375 return (0);
2376 }
2377
2378 int
afe_m_getprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,void * val)2379 afe_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2380 void *val)
2381 {
2382 afe_t *afep = arg;
2383
2384 return (mii_m_getprop(afep->afe_mii, name, num, sz, val));
2385 }
2386
2387 int
afe_m_setprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,const void * val)2388 afe_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
2389 const void *val)
2390 {
2391 afe_t *afep = arg;
2392
2393 return (mii_m_setprop(afep->afe_mii, name, num, sz, val));
2394 }
2395
2396 static void
afe_m_propinfo(void * arg,const char * name,mac_prop_id_t num,mac_prop_info_handle_t prh)2397 afe_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
2398 mac_prop_info_handle_t prh)
2399 {
2400 afe_t *afep = arg;
2401
2402 mii_m_propinfo(afep->afe_mii, name, num, prh);
2403 }
2404
2405 /*
2406 * Debugging and error reporting.
2407 */
2408 void
afe_error(dev_info_t * dip,char * fmt,...)2409 afe_error(dev_info_t *dip, char *fmt, ...)
2410 {
2411 va_list ap;
2412 char buf[256];
2413
2414 va_start(ap, fmt);
2415 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
2416 va_end(ap);
2417
2418 if (dip) {
2419 cmn_err(CE_WARN, "%s%d: %s",
2420 ddi_driver_name(dip), ddi_get_instance(dip), buf);
2421 } else {
2422 cmn_err(CE_WARN, "afe: %s", buf);
2423 }
2424 }
2425