1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
14 */
15
16 /*
17 * Intel Pro/100B Ethernet Driver
18 */
19
20 #include <sys/types.h>
21 #include <sys/modctl.h>
22 #include <sys/conf.h>
23 #include <sys/kmem.h>
24 #include <sys/ksynch.h>
25 #include <sys/cmn_err.h>
26 #include <sys/note.h>
27 #include <sys/pci.h>
28 #include <sys/pci_cap.h>
29 #include <sys/ethernet.h>
30 #include <sys/mii.h>
31 #include <sys/miiregs.h>
32 #include <sys/mac.h>
33 #include <sys/mac_ether.h>
34 #include <sys/ethernet.h>
35 #include <sys/vlan.h>
36 #include <sys/list.h>
37 #include <sys/sysmacros.h>
38 #include <sys/varargs.h>
39 #include <sys/stream.h>
40 #include <sys/strsun.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43
44 #include "iprb.h"
45 #include "rcvbundl.h"
46
47 /*
48 * Intel has openly documented the programming interface for these
49 * parts in the "Intel 8255x 10/100 Mbps Ethernet Controller Family
50 * Open Source Software Developer Manual".
51 *
52 * While some open source systems have utilized many of the features
53 * of some models in this family (especially scatter gather and IP
54 * checksum support), we have elected to offer only the basic
55 * functionality. These are only 10/100 parts, and the additional
56 * complexity is not justified by the minimal performance benefit.
57 * KISS. So, we are only supporting the simple 82557 features.
58 */
59
60 static uint16_t iprb_mii_read(void *, uint8_t, uint8_t);
61 static void iprb_mii_write(void *, uint8_t, uint8_t, uint16_t);
62 static void iprb_mii_notify(void *, link_state_t);
63 static int iprb_attach(dev_info_t *);
64 static int iprb_detach(dev_info_t *);
65 static int iprb_quiesce(dev_info_t *);
66 static int iprb_suspend(dev_info_t *);
67 static int iprb_resume(dev_info_t *);
68 static int iprb_m_stat(void *, uint_t, uint64_t *);
69 static int iprb_m_start(void *);
70 static void iprb_m_stop(void *);
71 static int iprb_m_promisc(void *, boolean_t);
72 static int iprb_m_multicst(void *, boolean_t, const uint8_t *);
73 static int iprb_m_unicst(void *, const uint8_t *);
74 static mblk_t *iprb_m_tx(void *, mblk_t *);
75 static void iprb_m_ioctl(void *, queue_t *, mblk_t *);
76 static int iprb_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
77 const void *);
78 static int iprb_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
79 void *);
80 static void iprb_m_propinfo(void *, const char *, mac_prop_id_t,
81 mac_prop_info_handle_t);
82 static void iprb_destroy(iprb_t *);
83 static int iprb_configure(iprb_t *);
84 static void iprb_eeprom_sendbits(iprb_t *, uint32_t, uint8_t);
85 static uint16_t iprb_eeprom_read(iprb_t *, uint16_t);
86 static void iprb_identify(iprb_t *);
87 static int iprb_cmd_submit(iprb_t *, uint16_t);
88 static void iprb_cmd_reclaim(iprb_t *);
89 static int iprb_cmd_ready(iprb_t *);
90 static int iprb_cmd_drain(iprb_t *);
91 static void iprb_rx_add(iprb_t *);
92 static void iprb_rx_init(iprb_t *);
93 static mblk_t *iprb_rx(iprb_t *);
94 static mblk_t *iprb_send(iprb_t *, mblk_t *);
95 static uint_t iprb_intr(caddr_t, caddr_t);
96 static void iprb_periodic(void *);
97 static int iprb_add_intr(iprb_t *);
98 static int iprb_dma_alloc(iprb_t *, iprb_dma_t *, size_t);
99 static void iprb_dma_free(iprb_dma_t *);
100 static iprb_dma_t *iprb_cmd_next(iprb_t *);
101 static int iprb_set_config(iprb_t *);
102 static int iprb_set_unicast(iprb_t *);
103 static int iprb_set_multicast(iprb_t *);
104 static int iprb_set_ucode(iprb_t *);
105 static void iprb_update_stats(iprb_t *);
106 static int iprb_start(iprb_t *);
107 static void iprb_stop(iprb_t *);
108 static int iprb_ddi_attach(dev_info_t *, ddi_attach_cmd_t);
109 static int iprb_ddi_detach(dev_info_t *, ddi_detach_cmd_t);
110 static void iprb_error(iprb_t *, const char *, ...);
111
112 static mii_ops_t iprb_mii_ops = {
113 MII_OPS_VERSION,
114 iprb_mii_read,
115 iprb_mii_write,
116 iprb_mii_notify,
117 NULL, /* reset */
118 };
119
120 static mac_callbacks_t iprb_m_callbacks = {
121 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
122 iprb_m_stat,
123 iprb_m_start,
124 iprb_m_stop,
125 iprb_m_promisc,
126 iprb_m_multicst,
127 iprb_m_unicst,
128 iprb_m_tx,
129 NULL,
130 iprb_m_ioctl, /* mc_ioctl */
131 NULL, /* mc_getcapab */
132 NULL, /* mc_open */
133 NULL, /* mc_close */
134 iprb_m_setprop,
135 iprb_m_getprop,
136 iprb_m_propinfo
137 };
138
139
140 /*
141 * Stream information
142 */
143 DDI_DEFINE_STREAM_OPS(iprb_devops, nulldev, nulldev,
144 iprb_ddi_attach, iprb_ddi_detach, nodev, NULL, D_MP, NULL, iprb_quiesce);
145
146 static struct modldrv iprb_modldrv = {
147 &mod_driverops, /* drv_modops */
148 "Intel 8255x Ethernet", /* drv_linkinfo */
149 &iprb_devops /* drv_dev_ops */
150 };
151
152 static struct modlinkage iprb_modlinkage = {
153 MODREV_1, /* ml_rev */
154 { &iprb_modldrv, NULL } /* ml_linkage */
155 };
156
157
158 static ddi_device_acc_attr_t acc_attr = {
159 DDI_DEVICE_ATTR_V0,
160 DDI_STRUCTURE_LE_ACC,
161 DDI_STRICTORDER_ACC
162 };
163
164 static ddi_device_acc_attr_t buf_attr = {
165 DDI_DEVICE_ATTR_V0,
166 DDI_NEVERSWAP_ACC,
167 DDI_STORECACHING_OK_ACC
168 };
169
170 /*
171 * The 8225x is a 32-bit addressing engine, but it can only address up
172 * to 31 bits on a single transaction. (Far less in reality it turns
173 * out.) Statistics buffers have to be 16-byte aligned, and as we
174 * allocate individual data pieces for other things, there is no
175 * compelling reason to use another attribute with support for less
176 * strict alignment.
177 */
178 static ddi_dma_attr_t dma_attr = {
179 DMA_ATTR_V0, /* dma_attr_version */
180 0, /* dma_attr_addr_lo */
181 0xFFFFFFFFU, /* dma_attr_addr_hi */
182 0x7FFFFFFFU, /* dma_attr_count_max */
183 16, /* dma_attr_align */
184 0x100, /* dma_attr_burstsizes */
185 1, /* dma_attr_minxfer */
186 0xFFFFFFFFU, /* dma_attr_maxxfer */
187 0xFFFFFFFFU, /* dma_attr_seg */
188 1, /* dma_attr_sgllen */
189 1, /* dma_attr_granular */
190 0 /* dma_attr_flags */
191 };
192
193 #define DECL_UCODE(x) \
194 static const uint32_t x ## _WORDS[] = x ## _RCVBUNDLE_UCODE
195 DECL_UCODE(D101_A);
196 DECL_UCODE(D101_B0);
197 DECL_UCODE(D101M_B);
198 DECL_UCODE(D101S);
199 DECL_UCODE(D102_B);
200 DECL_UCODE(D102_C);
201 DECL_UCODE(D102_E);
202
203 static uint8_t iprb_bcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
204
205 /*
206 * We don't bother allowing for tuning of the CPU saver algorithm.
207 * The ucode has reasonable defaults built-in. However, some variants
208 * apparently have bug fixes delivered via this ucode, so we still
209 * need to support the ucode upload.
210 */
211 typedef struct {
212 uint8_t rev;
213 uint8_t length;
214 const uint32_t *ucode;
215 } iprb_ucode_t;
216
217 #define UCODE(x) \
218 sizeof (x ## _WORDS) / sizeof (uint32_t), x ## _WORDS
219
220 static const iprb_ucode_t iprb_ucode[] = {
221 { REV_82558_A4, UCODE(D101_A) },
222 { REV_82558_B0, UCODE(D101_B0) },
223 { REV_82559_A0, UCODE(D101M_B) },
224 { REV_82559S_A, UCODE(D101S) },
225 { REV_82550, UCODE(D102_B) },
226 { REV_82550_C, UCODE(D102_C) },
227 { REV_82551_F, UCODE(D102_E) },
228 { 0 },
229 };
230
231 int
_init(void)232 _init(void)
233 {
234 int rv;
235 mac_init_ops(&iprb_devops, "iprb");
236 if ((rv = mod_install(&iprb_modlinkage)) != DDI_SUCCESS) {
237 mac_fini_ops(&iprb_devops);
238 }
239 return (rv);
240 }
241
242 int
_fini(void)243 _fini(void)
244 {
245 int rv;
246 if ((rv = mod_remove(&iprb_modlinkage)) == DDI_SUCCESS) {
247 mac_fini_ops(&iprb_devops);
248 }
249 return (rv);
250 }
251
252 int
_info(struct modinfo * modinfop)253 _info(struct modinfo *modinfop)
254 {
255 return (mod_info(&iprb_modlinkage, modinfop));
256 }
257
258 int
iprb_attach(dev_info_t * dip)259 iprb_attach(dev_info_t *dip)
260 {
261 iprb_t *ip;
262 uint16_t w;
263 int i;
264 mac_register_t *macp;
265
266 ip = kmem_zalloc(sizeof (*ip), KM_SLEEP);
267 ddi_set_driver_private(dip, ip);
268 ip->dip = dip;
269
270 list_create(&ip->mcast, sizeof (struct iprb_mcast),
271 offsetof(struct iprb_mcast, node));
272
273 /* we don't support high level interrupts, so we don't need cookies */
274 mutex_init(&ip->culock, NULL, MUTEX_DRIVER, NULL);
275 mutex_init(&ip->rulock, NULL, MUTEX_DRIVER, NULL);
276
277 if (pci_config_setup(dip, &ip->pcih) != DDI_SUCCESS) {
278 iprb_error(ip, "unable to map configuration space");
279 iprb_destroy(ip);
280 return (DDI_FAILURE);
281 }
282
283 if (ddi_regs_map_setup(dip, 1, &ip->regs, 0, 0, &acc_attr,
284 &ip->regsh) != DDI_SUCCESS) {
285 iprb_error(ip, "unable to map device registers");
286 iprb_destroy(ip);
287 return (DDI_FAILURE);
288 }
289
290 /* Reset, but first go into idle state */
291 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
292 drv_usecwait(10);
293 PUT32(ip, CSR_PORT, PORT_SW_RESET);
294 drv_usecwait(10);
295 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
296 (void) GET8(ip, CSR_INTCTL);
297
298 /*
299 * Precalculate watchdog times.
300 */
301 ip->tx_timeout = TX_WATCHDOG;
302 ip->rx_timeout = RX_WATCHDOG;
303
304 iprb_identify(ip);
305
306 /* Obtain our factory MAC address */
307 w = iprb_eeprom_read(ip, 0);
308 ip->factaddr[0] = w & 0xff;
309 ip->factaddr[1] = w >> 8;
310 w = iprb_eeprom_read(ip, 1);
311 ip->factaddr[2] = w & 0xff;
312 ip->factaddr[3] = w >> 8;
313 w = iprb_eeprom_read(ip, 2);
314 ip->factaddr[4] = w & 0xff;
315 ip->factaddr[5] = w >> 8;
316 bcopy(ip->factaddr, ip->curraddr, 6);
317
318 if (ip->resumebug) {
319 /*
320 * Generally, most devices we will ever see will
321 * already have fixed firmware. Since I can't verify
322 * the validity of the fix (no suitably downrev
323 * hardware), we'll just do our best to avoid it for
324 * devices that exhibit this behavior.
325 */
326 if ((iprb_eeprom_read(ip, 10) & 0x02) == 0) {
327 /* EEPROM fix was already applied, assume safe. */
328 ip->resumebug = B_FALSE;
329 }
330 }
331
332 if ((iprb_eeprom_read(ip, 3) & 0x3) != 0x3) {
333 cmn_err(CE_CONT, "?Enabling RX errata workaround.\n");
334 ip->rxhangbug = B_TRUE;
335 }
336
337 /* Determine whether we have an MII or a legacy 80c24 */
338 w = iprb_eeprom_read(ip, 6);
339 if ((w & 0x3f00) != 0x0600) {
340 if ((ip->miih = mii_alloc(ip, dip, &iprb_mii_ops)) == NULL) {
341 iprb_error(ip, "unable to allocate MII ops vector");
342 iprb_destroy(ip);
343 return (DDI_FAILURE);
344 }
345 if (ip->canpause) {
346 mii_set_pauseable(ip->miih, B_TRUE, B_FALSE);
347 }
348 }
349
350 /* Allocate cmds and tx region */
351 for (i = 0; i < NUM_TX; i++) {
352 /* Command blocks */
353 if (iprb_dma_alloc(ip, &ip->cmds[i], CB_SIZE) != DDI_SUCCESS) {
354 iprb_destroy(ip);
355 return (DDI_FAILURE);
356 }
357 }
358
359 for (i = 0; i < NUM_TX; i++) {
360 iprb_dma_t *cb = &ip->cmds[i];
361 /* Link the command blocks into a ring */
362 PUTCB32(cb, CB_LNK_OFFSET, (ip->cmds[(i + 1) % NUM_TX].paddr));
363 }
364
365 for (i = 0; i < NUM_RX; i++) {
366 /* Rx packet buffers */
367 if (iprb_dma_alloc(ip, &ip->rxb[i], RFD_SIZE) != DDI_SUCCESS) {
368 iprb_destroy(ip);
369 return (DDI_FAILURE);
370 }
371 }
372 if (iprb_dma_alloc(ip, &ip->stats, STATS_SIZE) != DDI_SUCCESS) {
373 iprb_destroy(ip);
374 return (DDI_FAILURE);
375 }
376
377 if (iprb_add_intr(ip) != DDI_SUCCESS) {
378 iprb_destroy(ip);
379 return (DDI_FAILURE);
380 }
381
382 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
383 iprb_error(ip, "unable to allocate mac structure");
384 iprb_destroy(ip);
385 return (DDI_FAILURE);
386 }
387
388 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
389 macp->m_driver = ip;
390 macp->m_dip = dip;
391 macp->m_src_addr = ip->curraddr;
392 macp->m_callbacks = &iprb_m_callbacks;
393 macp->m_min_sdu = 0;
394 macp->m_max_sdu = ETHERMTU;
395 macp->m_margin = VLAN_TAGSZ;
396 if (mac_register(macp, &ip->mach) != 0) {
397 iprb_error(ip, "unable to register mac with framework");
398 mac_free(macp);
399 iprb_destroy(ip);
400 return (DDI_FAILURE);
401 }
402
403 mac_free(macp);
404 return (DDI_SUCCESS);
405 }
406
407 int
iprb_detach(dev_info_t * dip)408 iprb_detach(dev_info_t *dip)
409 {
410 iprb_t *ip;
411
412 ip = ddi_get_driver_private(dip);
413 ASSERT(ip != NULL);
414
415 if (mac_disable(ip->mach) != 0)
416 return (DDI_FAILURE);
417
418 (void) mac_unregister(ip->mach);
419 iprb_destroy(ip);
420 return (DDI_SUCCESS);
421 }
422
423 int
iprb_add_intr(iprb_t * ip)424 iprb_add_intr(iprb_t *ip)
425 {
426 int actual;
427
428 if (ddi_intr_alloc(ip->dip, &ip->intrh, DDI_INTR_TYPE_FIXED, 0, 1,
429 &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS) {
430 iprb_error(ip, "failed allocating interrupt handle");
431 return (DDI_FAILURE);
432 }
433
434 if (ddi_intr_add_handler(ip->intrh, iprb_intr, ip, NULL) !=
435 DDI_SUCCESS) {
436 (void) ddi_intr_free(ip->intrh);
437 ip->intrh = NULL;
438 iprb_error(ip, "failed adding interrupt handler");
439 return (DDI_FAILURE);
440 }
441 if (ddi_intr_enable(ip->intrh) != DDI_SUCCESS) {
442 (void) ddi_intr_remove_handler(ip->intrh);
443 (void) ddi_intr_free(ip->intrh);
444 ip->intrh = NULL;
445 iprb_error(ip, "failed enabling interrupt");
446 return (DDI_FAILURE);
447 }
448 return (DDI_SUCCESS);
449 }
450
451 int
iprb_dma_alloc(iprb_t * ip,iprb_dma_t * h,size_t size)452 iprb_dma_alloc(iprb_t *ip, iprb_dma_t *h, size_t size)
453 {
454 size_t rlen;
455 ddi_dma_cookie_t dmac;
456 uint_t ndmac;
457
458 if (ddi_dma_alloc_handle(ip->dip, &dma_attr, DDI_DMA_SLEEP, NULL,
459 &h->dmah) != DDI_SUCCESS) {
460 iprb_error(ip, "unable to allocate dma handle");
461 return (DDI_FAILURE);
462 }
463 if (ddi_dma_mem_alloc(h->dmah, size, &buf_attr, DDI_DMA_CONSISTENT,
464 DDI_DMA_SLEEP, NULL, &h->vaddr, &rlen, &h->acch) != DDI_SUCCESS) {
465 iprb_error(ip, "unable to allocate dma memory");
466 return (DDI_FAILURE);
467 }
468 bzero(h->vaddr, size);
469 if (ddi_dma_addr_bind_handle(h->dmah, NULL, h->vaddr, size,
470 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL,
471 &dmac, &ndmac) != DDI_DMA_MAPPED) {
472 iprb_error(ip, "unable to map command memory");
473 return (DDI_FAILURE);
474 }
475 h->paddr = dmac.dmac_address;
476 return (DDI_SUCCESS);
477 }
478
479 void
iprb_dma_free(iprb_dma_t * h)480 iprb_dma_free(iprb_dma_t *h)
481 {
482 if (h->paddr != 0)
483 (void) ddi_dma_unbind_handle(h->dmah);
484 h->paddr = 0;
485 if (h->acch != NULL)
486 ddi_dma_mem_free(&h->acch);
487 h->acch = NULL;
488 if (h->dmah != NULL)
489 ddi_dma_free_handle(&h->dmah);
490 h->dmah = NULL;
491 }
492
493 void
iprb_destroy(iprb_t * ip)494 iprb_destroy(iprb_t *ip)
495 {
496 int i;
497 iprb_mcast_t *mc;
498
499 /* shut down interrupts */
500 if (ip->intrh != NULL) {
501 (void) ddi_intr_disable(ip->intrh);
502 (void) ddi_intr_remove_handler(ip->intrh);
503 (void) ddi_intr_free(ip->intrh);
504 }
505 /* release DMA resources */
506 for (i = 0; i < NUM_TX; i++) {
507 iprb_dma_free(&ip->cmds[i]);
508 }
509 for (i = 0; i < NUM_RX; i++) {
510 iprb_dma_free(&ip->rxb[i]);
511 }
512 iprb_dma_free(&ip->stats);
513
514 if (ip->miih)
515 mii_free(ip->miih);
516
517 /* clean up the multicast list */
518 while ((mc = list_head(&ip->mcast)) != NULL) {
519 list_remove(&ip->mcast, mc);
520 kmem_free(mc, sizeof (*mc));
521 }
522
523 /* tear down register mappings */
524 if (ip->pcih)
525 pci_config_teardown(&ip->pcih);
526 if (ip->regsh)
527 ddi_regs_map_free(&ip->regsh);
528
529 /* clean the dip */
530 ddi_set_driver_private(ip->dip, NULL);
531
532 list_destroy(&ip->mcast);
533 mutex_destroy(&ip->culock);
534 mutex_destroy(&ip->rulock);
535
536 /* and finally toss the structure itself */
537 kmem_free(ip, sizeof (*ip));
538 }
539
540 void
iprb_identify(iprb_t * ip)541 iprb_identify(iprb_t *ip)
542 {
543 ip->devid = pci_config_get16(ip->pcih, PCI_CONF_DEVID);
544 ip->revid = pci_config_get8(ip->pcih, PCI_CONF_REVID);
545
546 switch (ip->devid) {
547 case 0x1229: /* 8255x family */
548 case 0x1030: /* Intel InBusiness */
549
550 if (ip->revid >= REV_82558_A4) {
551 ip->canpause = B_TRUE;
552 ip->canmwi = B_TRUE;
553 } else {
554 ip->is557 = B_TRUE;
555 }
556 if (ip->revid >= REV_82559_A0)
557 ip->resumebug = B_TRUE;
558 break;
559
560 case 0x1209: /* Embedded 82559ER */
561 ip->canpause = B_TRUE;
562 ip->resumebug = B_TRUE;
563 ip->canmwi = B_TRUE;
564 break;
565
566 case 0x2449: /* ICH2 */
567 case 0x1031: /* Pro/100 VE (ICH3) */
568 case 0x1032: /* Pro/100 VE (ICH3) */
569 case 0x1033: /* Pro/100 VM (ICH3) */
570 case 0x1034: /* Pro/100 VM (ICH3) */
571 case 0x1038: /* Pro/100 VM (ICH3) */
572 ip->resumebug = B_TRUE;
573 if (ip->revid >= REV_82558_A4)
574 ip->canpause = B_TRUE;
575 break;
576
577 default:
578 if (ip->revid >= REV_82558_A4)
579 ip->canpause = B_TRUE;
580 break;
581 }
582
583 /* Allow property override MWI support - not normally needed. */
584 if (ddi_prop_get_int(DDI_DEV_T_ANY, ip->dip, 0, "MWIEnable", 1) == 0) {
585 ip->canmwi = B_FALSE;
586 }
587 }
588
589 void
iprb_eeprom_sendbits(iprb_t * ip,uint32_t val,uint8_t nbits)590 iprb_eeprom_sendbits(iprb_t *ip, uint32_t val, uint8_t nbits)
591 {
592 uint32_t mask;
593 uint16_t x;
594
595 mask = 1U << (nbits - 1);
596 while (mask) {
597 x = (mask & val) ? EEPROM_EEDI : 0;
598 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
599 drv_usecwait(100);
600 PUT16(ip, CSR_EECTL, x | EEPROM_EESK | EEPROM_EECS);
601 drv_usecwait(100);
602 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
603 drv_usecwait(100);
604 mask >>= 1;
605 }
606 }
607
608 uint16_t
iprb_eeprom_read(iprb_t * ip,uint16_t address)609 iprb_eeprom_read(iprb_t *ip, uint16_t address)
610 {
611 uint16_t val;
612 int mask;
613 uint16_t n;
614 uint16_t bits;
615
616 /* if we don't know the address size yet call again to determine it */
617 if ((address != 0) && (ip->eeprom_bits == 0))
618 (void) iprb_eeprom_read(ip, 0);
619
620 if ((bits = ip->eeprom_bits) == 0) {
621 bits = 8;
622 ASSERT(address == 0);
623 }
624 /* enable the EEPROM chip select */
625 PUT16(ip, CSR_EECTL, EEPROM_EECS);
626 drv_usecwait(100);
627
628 /* send a read command */
629 iprb_eeprom_sendbits(ip, 6, 3);
630 n = 0;
631 for (mask = (1U << (bits - 1)); mask != 0; mask >>= 1) {
632 uint16_t x = (mask & address) ? EEPROM_EEDI : 0;
633 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
634 drv_usecwait(100);
635 PUT16(ip, CSR_EECTL, x | EEPROM_EESK | EEPROM_EECS);
636 drv_usecwait(100);
637 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
638 drv_usecwait(100);
639
640 n++;
641 /* check the dummy 0 bit */
642 if ((GET16(ip, CSR_EECTL) & EEPROM_EEDO) == 0) {
643 if (ip->eeprom_bits == 0) {
644 ip->eeprom_bits = n;
645 cmn_err(CE_CONT, "?EEPROM size %d words.\n",
646 1U << ip->eeprom_bits);
647 }
648 break;
649 }
650 }
651 if (n != ip->eeprom_bits) {
652 iprb_error(ip, "cannot determine EEPROM size (%d, %d)",
653 ip->eeprom_bits, n);
654 }
655
656 /* shift out a 16-bit word */
657 val = 0;
658 for (mask = 0x8000; mask; mask >>= 1) {
659 PUT16(ip, CSR_EECTL, EEPROM_EECS | EEPROM_EESK);
660 drv_usecwait(100);
661 if (GET16(ip, CSR_EECTL) & EEPROM_EEDO)
662 val |= mask;
663 drv_usecwait(100);
664 PUT16(ip, CSR_EECTL, EEPROM_EECS);
665 drv_usecwait(100);
666 }
667
668 /* and disable the eeprom */
669 PUT16(ip, CSR_EECTL, 0);
670 drv_usecwait(100);
671
672 return (val);
673 }
674
675 int
iprb_cmd_ready(iprb_t * ip)676 iprb_cmd_ready(iprb_t *ip)
677 {
678 /* wait for pending SCB commands to be accepted */
679 for (int cnt = 1000000; cnt != 0; cnt -= 10) {
680 if (GET8(ip, CSR_CMD) == 0) {
681 return (DDI_SUCCESS);
682 }
683 drv_usecwait(10);
684 }
685 iprb_error(ip, "timeout waiting for chip to become ready");
686 return (DDI_FAILURE);
687 }
688
689 void
iprb_cmd_reclaim(iprb_t * ip)690 iprb_cmd_reclaim(iprb_t *ip)
691 {
692 while (ip->cmd_count) {
693 iprb_dma_t *cb = &ip->cmds[ip->cmd_tail];
694
695 SYNCCB(cb, CB_STS_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
696 if ((GETCB16(cb, CB_STS_OFFSET) & CB_STS_C) == 0) {
697 break;
698 }
699
700 ip->cmd_tail++;
701 ip->cmd_tail %= NUM_TX;
702 ip->cmd_count--;
703 if (ip->cmd_count == 0) {
704 ip->tx_wdog = 0;
705 } else {
706 ip->tx_wdog = gethrtime();
707 }
708 }
709 }
710
711 int
iprb_cmd_drain(iprb_t * ip)712 iprb_cmd_drain(iprb_t *ip)
713 {
714 for (int i = 1000000; i; i -= 10) {
715 iprb_cmd_reclaim(ip);
716 if (ip->cmd_count == 0)
717 return (DDI_SUCCESS);
718 drv_usecwait(10);
719 }
720 iprb_error(ip, "time out waiting for commands to drain");
721 return (DDI_FAILURE);
722 }
723
724 int
iprb_cmd_submit(iprb_t * ip,uint16_t cmd)725 iprb_cmd_submit(iprb_t *ip, uint16_t cmd)
726 {
727 iprb_dma_t *ncb = &ip->cmds[ip->cmd_head];
728 iprb_dma_t *lcb = &ip->cmds[ip->cmd_last];
729
730 /* If this command will consume the last CB, interrupt when done */
731 ASSERT((ip->cmd_count) < NUM_TX);
732 if (ip->cmd_count == (NUM_TX - 1)) {
733 cmd |= CB_CMD_I;
734 }
735
736 /* clear the status entry */
737 PUTCB16(ncb, CB_STS_OFFSET, 0);
738
739 /* suspend upon completion of this new command */
740 cmd |= CB_CMD_S;
741 PUTCB16(ncb, CB_CMD_OFFSET, cmd);
742 SYNCCB(ncb, 0, 0, DDI_DMA_SYNC_FORDEV);
743
744 /* clear the suspend flag from the last submitted command */
745 SYNCCB(lcb, CB_CMD_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
746 PUTCB16(lcb, CB_CMD_OFFSET, GETCB16(lcb, CB_CMD_OFFSET) & ~CB_CMD_S);
747 SYNCCB(lcb, CB_CMD_OFFSET, 2, DDI_DMA_SYNC_FORDEV);
748
749
750 /*
751 * If the chip has a resume bug, then we need to try this as a work
752 * around. Some anecdotal evidence is that this will help solve
753 * the resume bug. Its a performance hit, but only if the EEPROM
754 * is not updated. (In theory we could do this only for 10Mbps HDX,
755 * but since it should just about never get used, we keep it simple.)
756 */
757 if (ip->resumebug) {
758 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
759 return (DDI_FAILURE);
760 PUT8(ip, CSR_CMD, CUC_NOP);
761 (void) GET8(ip, CSR_CMD);
762 drv_usecwait(1);
763 }
764
765 /* wait for the SCB to be ready to accept a new command */
766 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
767 return (DDI_FAILURE);
768
769 /*
770 * Finally we can resume the CU. Note that if this the first
771 * command in the sequence (i.e. if the CU is IDLE), or if the
772 * CU is already busy working, then this CU resume command
773 * will not have any effect.
774 */
775 PUT8(ip, CSR_CMD, CUC_RESUME);
776 (void) GET8(ip, CSR_CMD); /* flush CSR */
777
778 ip->tx_wdog = gethrtime();
779 ip->cmd_last = ip->cmd_head;
780 ip->cmd_head++;
781 ip->cmd_head %= NUM_TX;
782 ip->cmd_count++;
783
784 return (DDI_SUCCESS);
785 }
786
787 iprb_dma_t *
iprb_cmd_next(iprb_t * ip)788 iprb_cmd_next(iprb_t *ip)
789 {
790 if (ip->cmd_count == NUM_TX) {
791 return (NULL);
792 }
793 ASSERT(ip->cmd_count < NUM_TX);
794 return (&ip->cmds[ip->cmd_head]);
795 }
796
797 int
iprb_set_unicast(iprb_t * ip)798 iprb_set_unicast(iprb_t *ip)
799 {
800 iprb_dma_t *cb;
801
802 ASSERT(mutex_owned(&ip->culock));
803
804 if ((cb = iprb_cmd_next(ip)) == NULL)
805 return (DDI_FAILURE);
806
807 PUTCBEA(cb, CB_IAS_ADR_OFFSET, ip->curraddr);
808 return (iprb_cmd_submit(ip, CB_CMD_IAS));
809 }
810
811 int
iprb_set_multicast(iprb_t * ip)812 iprb_set_multicast(iprb_t *ip)
813 {
814 iprb_dma_t *cb;
815 iprb_mcast_t *mc;
816 int i;
817 list_t *l;
818
819 ASSERT(mutex_owned(&ip->culock));
820
821 if ((ip->nmcast <= 0) || (ip->nmcast > CB_MCS_CNT_MAX)) {
822 /*
823 * Only send the list if the total number of multicast
824 * address is nonzero and small enough to fit. We
825 * don't error out if it is too big, because in that
826 * case we will use the "allmulticast" support
827 * via iprb_set_config instead.
828 */
829 return (DDI_SUCCESS);
830 }
831
832 if ((cb = iprb_cmd_next(ip)) == NULL) {
833 return (DDI_FAILURE);
834 }
835
836 l = &ip->mcast;
837 for (mc = list_head(l), i = 0; mc; mc = list_next(l, mc), i++) {
838 PUTCBEA(cb, CB_MCS_ADR_OFFSET + (i * 6), mc->addr);
839 }
840 ASSERT(i == ip->nmcast);
841 PUTCB16(cb, CB_MCS_CNT_OFFSET, i);
842 return (iprb_cmd_submit(ip, CB_CMD_MCS));
843 }
844
845 int
iprb_set_config(iprb_t * ip)846 iprb_set_config(iprb_t *ip)
847 {
848 iprb_dma_t *cb;
849
850 ASSERT(mutex_owned(&ip->culock));
851 if ((cb = iprb_cmd_next(ip)) == NULL) {
852 return (DDI_FAILURE);
853 }
854 PUTCB8(cb, CB_CONFIG_OFFSET + 0, 0x16);
855 PUTCB8(cb, CB_CONFIG_OFFSET + 1, 0x8);
856 PUTCB8(cb, CB_CONFIG_OFFSET + 2, 0);
857 PUTCB8(cb, CB_CONFIG_OFFSET + 3, (ip->canmwi ? 1 : 0));
858 PUTCB8(cb, CB_CONFIG_OFFSET + 4, 0);
859 PUTCB8(cb, CB_CONFIG_OFFSET + 5, 0);
860 PUTCB8(cb, CB_CONFIG_OFFSET + 6, (ip->promisc ? 0x80 : 0) | 0x3a);
861 PUTCB8(cb, CB_CONFIG_OFFSET + 7, (ip->promisc ? 0 : 0x1) | 2);
862 PUTCB8(cb, CB_CONFIG_OFFSET + 8, (ip->miih ? 0x1 : 0));
863 PUTCB8(cb, CB_CONFIG_OFFSET + 9, 0);
864 PUTCB8(cb, CB_CONFIG_OFFSET + 10, 0x2e);
865 PUTCB8(cb, CB_CONFIG_OFFSET + 11, 0);
866 PUTCB8(cb, CB_CONFIG_OFFSET + 12, (ip->is557 ? 0 : 1) | 0x60);
867 PUTCB8(cb, CB_CONFIG_OFFSET + 13, 0);
868 PUTCB8(cb, CB_CONFIG_OFFSET + 14, 0xf2);
869 PUTCB8(cb, CB_CONFIG_OFFSET + 15,
870 (ip->miih ? 0x80 : 0) | (ip->promisc ? 0x1 : 0) | 0x48);
871 PUTCB8(cb, CB_CONFIG_OFFSET + 16, 0);
872 PUTCB8(cb, CB_CONFIG_OFFSET + 17, (ip->canpause ? 0x40 : 0));
873 PUTCB8(cb, CB_CONFIG_OFFSET + 18, (ip->is557 ? 0 : 0x8) | 0xf2);
874 PUTCB8(cb, CB_CONFIG_OFFSET + 19,
875 ((ip->revid < REV_82558_B0) ? 0 : 0x80) |
876 (ip->canpause ? 0x18 : 0));
877 PUTCB8(cb, CB_CONFIG_OFFSET + 20, 0x3f);
878 PUTCB8(cb, CB_CONFIG_OFFSET + 21,
879 ((ip->nmcast >= CB_MCS_CNT_MAX) ? 0x8 : 0) | 0x5);
880
881 return (iprb_cmd_submit(ip, CB_CMD_CONFIG));
882 }
883
884 int
iprb_set_ucode(iprb_t * ip)885 iprb_set_ucode(iprb_t *ip)
886 {
887 iprb_dma_t *cb;
888 const iprb_ucode_t *uc = NULL;
889 int i;
890
891 for (i = 0; iprb_ucode[i].length; i++) {
892 if (iprb_ucode[i].rev == ip->revid) {
893 uc = &iprb_ucode[i];
894 break;
895 }
896 }
897 if (uc == NULL) {
898 /* no matching firmware found, assume success */
899 return (DDI_SUCCESS);
900 }
901
902 ASSERT(mutex_owned(&ip->culock));
903 if ((cb = iprb_cmd_next(ip)) == NULL) {
904 return (DDI_FAILURE);
905 }
906 for (i = 0; i < uc->length; i++) {
907 PUTCB32(cb, (CB_UCODE_OFFSET + i * 4), uc->ucode[i]);
908 }
909 return (iprb_cmd_submit(ip, CB_CMD_UCODE));
910 }
911
912 int
iprb_configure(iprb_t * ip)913 iprb_configure(iprb_t *ip)
914 {
915 ASSERT(mutex_owned(&ip->culock));
916
917 if (iprb_cmd_drain(ip) != DDI_SUCCESS)
918 return (DDI_FAILURE);
919
920 if (iprb_set_config(ip) != DDI_SUCCESS)
921 return (DDI_FAILURE);
922 if (iprb_set_unicast(ip) != DDI_SUCCESS)
923 return (DDI_FAILURE);
924 if (iprb_set_multicast(ip) != DDI_SUCCESS)
925 return (DDI_FAILURE);
926
927 return (DDI_SUCCESS);
928 }
929
930 void
iprb_stop(iprb_t * ip)931 iprb_stop(iprb_t *ip)
932 {
933 /* go idle */
934 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
935 (void) GET32(ip, CSR_PORT);
936 drv_usecwait(50);
937
938 /* shut off device interrupts */
939 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
940 }
941
942 int
iprb_start(iprb_t * ip)943 iprb_start(iprb_t *ip)
944 {
945 iprb_dma_t *cb;
946
947 ASSERT(mutex_owned(&ip->rulock));
948 ASSERT(mutex_owned(&ip->culock));
949
950 /* Reset, but first go into idle state */
951 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
952 (void) GET32(ip, CSR_PORT);
953 drv_usecwait(50);
954
955 PUT32(ip, CSR_PORT, PORT_SW_RESET);
956 (void) GET32(ip, CSR_PORT);
957 drv_usecwait(10);
958 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
959
960 /* Reset pointers */
961 ip->cmd_head = ip->cmd_tail = 0;
962 ip->cmd_last = NUM_TX - 1;
963
964 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
965 return (DDI_FAILURE);
966 PUT32(ip, CSR_GEN_PTR, 0);
967 PUT8(ip, CSR_CMD, CUC_CUBASE);
968 (void) GET8(ip, CSR_CMD);
969
970 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
971 return (DDI_FAILURE);
972 PUT32(ip, CSR_GEN_PTR, 0);
973 PUT8(ip, CSR_CMD, RUC_RUBASE);
974 (void) GET8(ip, CSR_CMD);
975
976 /* Send a NOP. This will be the first command seen by the device. */
977 cb = iprb_cmd_next(ip);
978 ASSERT(cb);
979 if (iprb_cmd_submit(ip, CB_CMD_NOP) != DDI_SUCCESS)
980 return (DDI_FAILURE);
981
982 /* as that was the first command, go ahead and submit a CU start */
983 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
984 return (DDI_FAILURE);
985 PUT32(ip, CSR_GEN_PTR, cb->paddr);
986 PUT8(ip, CSR_CMD, CUC_START);
987 (void) GET8(ip, CSR_CMD);
988
989 /* Upload firmware. */
990 if (iprb_set_ucode(ip) != DDI_SUCCESS)
991 return (DDI_FAILURE);
992
993 /* Set up RFDs */
994 iprb_rx_init(ip);
995
996 PUT32(ip, CSR_GEN_PTR, ip->rxb[0].paddr);
997 /* wait for the SCB */
998 (void) iprb_cmd_ready(ip);
999 PUT8(ip, CSR_CMD, RUC_START);
1000 (void) GET8(ip, CSR_CMD); /* flush CSR */
1001
1002 /* Enable device interrupts */
1003 PUT8(ip, CSR_INTCTL, 0);
1004 (void) GET8(ip, CSR_INTCTL);
1005
1006 return (DDI_SUCCESS);
1007 }
1008
1009 void
iprb_update_stats(iprb_t * ip)1010 iprb_update_stats(iprb_t *ip)
1011 {
1012 iprb_dma_t *sp = &ip->stats;
1013 hrtime_t tstamp;
1014 int i;
1015
1016 ASSERT(mutex_owned(&ip->culock));
1017
1018 /* Collect the hardware stats, but don't keep redoing it */
1019 tstamp = gethrtime();
1020 if (tstamp / NANOSEC == ip->stats_time / NANOSEC)
1021 return;
1022
1023 PUTSTAT(sp, STATS_DONE_OFFSET, 0);
1024 SYNCSTATS(sp, 0, 0, DDI_DMA_SYNC_FORDEV);
1025
1026 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
1027 return;
1028 PUT32(ip, CSR_GEN_PTR, sp->paddr);
1029 PUT8(ip, CSR_CMD, CUC_STATSBASE);
1030 (void) GET8(ip, CSR_CMD);
1031
1032 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
1033 return;
1034 PUT8(ip, CSR_CMD, CUC_STATS_RST);
1035 (void) GET8(ip, CSR_CMD); /* flush wb */
1036
1037 for (i = 10000; i; i -= 10) {
1038 SYNCSTATS(sp, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1039 if (GETSTAT(sp, STATS_DONE_OFFSET) == STATS_RST_DONE) {
1040 /* yay stats are updated */
1041 break;
1042 }
1043 drv_usecwait(10);
1044 }
1045 if (i == 0) {
1046 iprb_error(ip, "time out acquiring hardware statistics");
1047 return;
1048 }
1049
1050 ip->ex_coll += GETSTAT(sp, STATS_TX_MAXCOL_OFFSET);
1051 ip->late_coll += GETSTAT(sp, STATS_TX_LATECOL_OFFSET);
1052 ip->uflo += GETSTAT(sp, STATS_TX_UFLO_OFFSET);
1053 ip->defer_xmt += GETSTAT(sp, STATS_TX_DEFER_OFFSET);
1054 ip->one_coll += GETSTAT(sp, STATS_TX_ONECOL_OFFSET);
1055 ip->multi_coll += GETSTAT(sp, STATS_TX_MULTCOL_OFFSET);
1056 ip->collisions += GETSTAT(sp, STATS_TX_TOTCOL_OFFSET);
1057 ip->fcs_errs += GETSTAT(sp, STATS_RX_FCS_OFFSET);
1058 ip->align_errs += GETSTAT(sp, STATS_RX_ALIGN_OFFSET);
1059 ip->norcvbuf += GETSTAT(sp, STATS_RX_NOBUF_OFFSET);
1060 ip->oflo += GETSTAT(sp, STATS_RX_OFLO_OFFSET);
1061 ip->runt += GETSTAT(sp, STATS_RX_SHORT_OFFSET);
1062
1063 ip->stats_time = tstamp;
1064 }
1065
1066 mblk_t *
iprb_send(iprb_t * ip,mblk_t * mp)1067 iprb_send(iprb_t *ip, mblk_t *mp)
1068 {
1069 iprb_dma_t *cb;
1070 size_t sz;
1071
1072 ASSERT(mutex_owned(&ip->culock));
1073
1074 /* possibly reclaim some CBs */
1075 iprb_cmd_reclaim(ip);
1076
1077 cb = iprb_cmd_next(ip);
1078
1079 if (cb == NULL) {
1080 /* flow control */
1081 ip->wantw = B_TRUE;
1082 return (mp);
1083 }
1084
1085 if ((sz = msgsize(mp)) > (ETHERMAX + VLAN_TAGSZ)) {
1086 /* Generally this should never occur */
1087 ip->macxmt_errs++;
1088 freemsg(mp);
1089 return (NULL);
1090 }
1091
1092 ip->opackets++;
1093 ip->obytes += sz;
1094
1095 PUTCB32(cb, CB_TX_TBD_OFFSET, 0xffffffffU);
1096 PUTCB16(cb, CB_TX_COUNT_OFFSET, (sz & 0x3fff) | CB_TX_EOF);
1097 PUTCB8(cb, CB_TX_THRESH_OFFSET, (sz / 8) & 0xff);
1098 PUTCB8(cb, CB_TX_NUMBER_OFFSET, 0);
1099 mcopymsg(mp, cb->vaddr + CB_TX_DATA_OFFSET);
1100 if (cb->vaddr[CB_TX_DATA_OFFSET] & 0x1) {
1101 if (bcmp(cb->vaddr + CB_TX_DATA_OFFSET, &iprb_bcast, 6) != 0) {
1102 ip->multixmt++;
1103 } else {
1104 ip->brdcstxmt++;
1105 }
1106 }
1107 SYNCCB(cb, 0, CB_TX_DATA_OFFSET + sz, DDI_DMA_SYNC_FORDEV);
1108
1109 if (iprb_cmd_submit(ip, CB_CMD_TX) != DDI_SUCCESS) {
1110 ip->macxmt_errs++;
1111 }
1112
1113 return (NULL);
1114 }
1115
1116 void
iprb_rx_add(iprb_t * ip)1117 iprb_rx_add(iprb_t *ip)
1118 {
1119 uint16_t last, curr, next;
1120 iprb_dma_t *rfd, *nfd, *lfd;
1121
1122 ASSERT(mutex_owned(&ip->rulock));
1123
1124 curr = ip->rx_index;
1125 last = ip->rx_last;
1126 next = (curr + 1) % NUM_RX;
1127
1128 ip->rx_last = curr;
1129 ip->rx_index = next;
1130
1131 lfd = &ip->rxb[last];
1132 rfd = &ip->rxb[curr];
1133 nfd = &ip->rxb[next];
1134
1135 PUTRFD32(rfd, RFD_LNK_OFFSET, nfd->paddr);
1136 PUTRFD16(rfd, RFD_CTL_OFFSET, RFD_CTL_EL);
1137 PUTRFD16(rfd, RFD_SIZ_OFFSET, RFD_SIZE - RFD_PKT_OFFSET);
1138 PUTRFD16(rfd, RFD_CNT_OFFSET, 0);
1139 SYNCRFD(rfd, 0, RFD_PKT_OFFSET, DDI_DMA_SYNC_FORDEV);
1140 /* clear the suspend & EL bits from the previous RFD */
1141 PUTRFD16(lfd, RFD_CTL_OFFSET, 0);
1142 SYNCRFD(rfd, RFD_CTL_OFFSET, 2, DDI_DMA_SYNC_FORDEV);
1143 }
1144
1145 void
iprb_rx_init(iprb_t * ip)1146 iprb_rx_init(iprb_t *ip)
1147 {
1148 ip->rx_index = 0;
1149 ip->rx_last = NUM_RX - 1;
1150 for (int i = 0; i < NUM_RX; i++)
1151 iprb_rx_add(ip);
1152 ip->rx_index = 0;
1153 ip->rx_last = NUM_RX - 1;
1154 }
1155
1156 mblk_t *
iprb_rx(iprb_t * ip)1157 iprb_rx(iprb_t *ip)
1158 {
1159 iprb_dma_t *rfd;
1160 uint16_t cnt;
1161 uint16_t sts;
1162 int i;
1163 mblk_t *mplist;
1164 mblk_t **mpp;
1165 mblk_t *mp;
1166
1167 mplist = NULL;
1168 mpp = &mplist;
1169
1170 for (i = 0; i < NUM_RX; i++) {
1171 rfd = &ip->rxb[ip->rx_index];
1172 SYNCRFD(rfd, RFD_STS_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
1173 if ((GETRFD16(rfd, RFD_STS_OFFSET) & RFD_STS_C) == 0) {
1174 break;
1175 }
1176
1177 ip->rx_wdog = gethrtime();
1178
1179 SYNCRFD(rfd, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1180 cnt = GETRFD16(rfd, RFD_CNT_OFFSET);
1181 cnt &= ~(RFD_CNT_EOF | RFD_CNT_F);
1182 sts = GETRFD16(rfd, RFD_STS_OFFSET);
1183
1184 if (cnt > (ETHERMAX + VLAN_TAGSZ)) {
1185 ip->toolong++;
1186 iprb_rx_add(ip);
1187 continue;
1188 }
1189 if (((sts & RFD_STS_OK) == 0) && (sts & RFD_STS_ERRS)) {
1190 iprb_rx_add(ip);
1191 continue;
1192 }
1193 if ((mp = allocb(cnt, BPRI_MED)) == NULL) {
1194 ip->norcvbuf++;
1195 iprb_rx_add(ip);
1196 continue;
1197 }
1198 bcopy(rfd->vaddr + RFD_PKT_OFFSET, mp->b_wptr, cnt);
1199
1200 /* return it to the RFD list */
1201 iprb_rx_add(ip);
1202
1203 mp->b_wptr += cnt;
1204 ip->ipackets++;
1205 ip->rbytes += cnt;
1206 if (mp->b_rptr[0] & 0x1) {
1207 if (bcmp(mp->b_rptr, &iprb_bcast, 6) != 0) {
1208 ip->multircv++;
1209 } else {
1210 ip->brdcstrcv++;
1211 }
1212 }
1213 *mpp = mp;
1214 mpp = &mp->b_next;
1215 }
1216 return (mplist);
1217 }
1218
1219 int
iprb_m_promisc(void * arg,boolean_t on)1220 iprb_m_promisc(void *arg, boolean_t on)
1221 {
1222 iprb_t *ip = arg;
1223
1224 mutex_enter(&ip->culock);
1225 ip->promisc = on;
1226 if (ip->running && !ip->suspended)
1227 (void) iprb_configure(ip);
1228 mutex_exit(&ip->culock);
1229 return (0);
1230 }
1231
1232 int
iprb_m_unicst(void * arg,const uint8_t * macaddr)1233 iprb_m_unicst(void *arg, const uint8_t *macaddr)
1234 {
1235 iprb_t *ip = arg;
1236
1237 mutex_enter(&ip->culock);
1238 bcopy(macaddr, ip->curraddr, 6);
1239 if (ip->running && !ip->suspended)
1240 (void) iprb_configure(ip);
1241 mutex_exit(&ip->culock);
1242 return (0);
1243 }
1244
1245 int
iprb_m_multicst(void * arg,boolean_t add,const uint8_t * macaddr)1246 iprb_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
1247 {
1248 iprb_t *ip = arg;
1249 list_t *l = &ip->mcast;
1250 iprb_mcast_t *mc;
1251
1252 if (add) {
1253 mc = kmem_alloc(sizeof (*mc), KM_NOSLEEP);
1254 if (mc == NULL) {
1255 return (ENOMEM);
1256 }
1257 bcopy(macaddr, mc->addr, 6);
1258 mutex_enter(&ip->culock);
1259 list_insert_head(l, mc);
1260 ip->nmcast++;
1261 if (ip->running && !ip->suspended)
1262 (void) iprb_configure(ip);
1263 mutex_exit(&ip->culock);
1264 } else {
1265 mutex_enter(&ip->culock);
1266 for (mc = list_head(l); mc != NULL; mc = list_next(l, mc)) {
1267 if (bcmp(macaddr, mc->addr, 6) == 0) {
1268 list_remove(&ip->mcast, mc);
1269 ip->nmcast--;
1270 if (ip->running && !ip->suspended)
1271 (void) iprb_configure(ip);
1272 break;
1273 }
1274 }
1275 mutex_exit(&ip->culock);
1276 if (mc)
1277 kmem_free(mc, sizeof (*mc));
1278 }
1279 return (0);
1280 }
1281
1282 int
iprb_m_start(void * arg)1283 iprb_m_start(void *arg)
1284 {
1285 int rv;
1286 iprb_t *ip = arg;
1287
1288 mutex_enter(&ip->rulock);
1289 mutex_enter(&ip->culock);
1290 rv = ip->suspended ? 0 : iprb_start(ip);
1291 if (rv == 0)
1292 ip->running = B_TRUE;
1293 ip->perh = ddi_periodic_add(iprb_periodic, ip, 5000000000, 0);
1294 mutex_exit(&ip->culock);
1295 mutex_exit(&ip->rulock);
1296 if (rv == 0) {
1297 if (ip->miih)
1298 mii_start(ip->miih);
1299 else
1300 /* might be a lie. */
1301 mac_link_update(ip->mach, LINK_STATE_UP);
1302 }
1303 return (rv ? EIO : 0);
1304 }
1305
1306 void
iprb_m_stop(void * arg)1307 iprb_m_stop(void *arg)
1308 {
1309 iprb_t *ip = arg;
1310
1311 if (ip->miih) {
1312 mii_stop(ip->miih);
1313 } else {
1314 mac_link_update(ip->mach, LINK_STATE_DOWN);
1315 }
1316
1317 ddi_periodic_delete(ip->perh);
1318 ip->perh = 0;
1319
1320 mutex_enter(&ip->rulock);
1321 mutex_enter(&ip->culock);
1322
1323 if (!ip->suspended) {
1324 iprb_update_stats(ip);
1325 iprb_stop(ip);
1326 }
1327 ip->running = B_FALSE;
1328 mutex_exit(&ip->culock);
1329 mutex_exit(&ip->rulock);
1330 }
1331
1332 int
iprb_m_stat(void * arg,uint_t stat,uint64_t * val)1333 iprb_m_stat(void *arg, uint_t stat, uint64_t *val)
1334 {
1335 iprb_t *ip = arg;
1336
1337 if (ip->miih && (mii_m_getstat(ip->miih, stat, val) == 0)) {
1338 return (0);
1339 }
1340
1341 mutex_enter(&ip->culock);
1342 if ((!ip->suspended) && (ip->running)) {
1343 iprb_update_stats(ip);
1344 }
1345 mutex_exit(&ip->culock);
1346
1347 switch (stat) {
1348 case MAC_STAT_IFSPEED:
1349 if (ip->miih == NULL) {
1350 *val = 10000000; /* 10 Mbps */
1351 }
1352 break;
1353 case ETHER_STAT_LINK_DUPLEX:
1354 if (ip->miih == NULL) {
1355 *val = LINK_DUPLEX_UNKNOWN;
1356 }
1357 break;
1358 case MAC_STAT_MULTIRCV:
1359 *val = ip->multircv;
1360 break;
1361 case MAC_STAT_BRDCSTRCV:
1362 *val = ip->brdcstrcv;
1363 break;
1364 case MAC_STAT_MULTIXMT:
1365 *val = ip->multixmt;
1366 break;
1367 case MAC_STAT_BRDCSTXMT:
1368 *val = ip->brdcstxmt;
1369 break;
1370 case MAC_STAT_IPACKETS:
1371 * val = ip->ipackets;
1372 break;
1373 case MAC_STAT_RBYTES:
1374 *val = ip->rbytes;
1375 break;
1376 case MAC_STAT_OPACKETS:
1377 *val = ip->opackets;
1378 break;
1379 case MAC_STAT_OBYTES:
1380 *val = ip->obytes;
1381 break;
1382 case MAC_STAT_NORCVBUF:
1383 *val = ip->norcvbuf;
1384 break;
1385 case MAC_STAT_COLLISIONS:
1386 *val = ip->collisions;
1387 break;
1388 case MAC_STAT_IERRORS:
1389 *val = ip->align_errs +
1390 ip->fcs_errs +
1391 ip->norcvbuf +
1392 ip->runt +
1393 ip->toolong +
1394 ip->macrcv_errs;
1395 break;
1396 case MAC_STAT_OERRORS:
1397 *val = ip->ex_coll +
1398 ip->late_coll +
1399 ip->uflo +
1400 ip->macxmt_errs +
1401 ip->nocarrier;
1402 break;
1403 case ETHER_STAT_ALIGN_ERRORS:
1404 *val = ip->align_errs;
1405 break;
1406 case ETHER_STAT_FCS_ERRORS:
1407 *val = ip->fcs_errs;
1408 break;
1409 case ETHER_STAT_DEFER_XMTS:
1410 *val = ip->defer_xmt;
1411 break;
1412 case ETHER_STAT_FIRST_COLLISIONS:
1413 *val = ip->one_coll + ip->multi_coll + ip->ex_coll;
1414 break;
1415 case ETHER_STAT_MULTI_COLLISIONS:
1416 *val = ip->multi_coll;
1417 break;
1418 case ETHER_STAT_TX_LATE_COLLISIONS:
1419 *val = ip->late_coll;
1420 break;
1421 case ETHER_STAT_EX_COLLISIONS:
1422 *val = ip->ex_coll;
1423 break;
1424 case MAC_STAT_OVERFLOWS:
1425 *val = ip->oflo;
1426 break;
1427 case MAC_STAT_UNDERFLOWS:
1428 *val = ip->uflo;
1429 break;
1430 case ETHER_STAT_TOOSHORT_ERRORS:
1431 *val = ip->runt;
1432 break;
1433 case ETHER_STAT_TOOLONG_ERRORS:
1434 *val = ip->toolong;
1435 break;
1436 case ETHER_STAT_CARRIER_ERRORS:
1437 *val = ip->nocarrier; /* reported only for "suspend" */
1438 break;
1439 case ETHER_STAT_MACXMT_ERRORS:
1440 *val = ip->macxmt_errs;
1441 break;
1442 case ETHER_STAT_MACRCV_ERRORS:
1443 *val = ip->macrcv_errs;
1444 break;
1445 default:
1446 return (ENOTSUP);
1447 }
1448 return (0);
1449 }
1450
1451 void
iprb_m_propinfo(void * arg,const char * name,mac_prop_id_t id,mac_prop_info_handle_t pih)1452 iprb_m_propinfo(void *arg, const char *name, mac_prop_id_t id,
1453 mac_prop_info_handle_t pih)
1454 {
1455 iprb_t *ip = arg;
1456
1457 if (ip->miih != NULL) {
1458 mii_m_propinfo(ip->miih, name, id, pih);
1459 return;
1460 }
1461 switch (id) {
1462 case MAC_PROP_DUPLEX:
1463 case MAC_PROP_SPEED:
1464 mac_prop_info_set_perm(pih, MAC_PROP_PERM_READ);
1465 break;
1466 }
1467 }
1468
1469 int
iprb_m_getprop(void * arg,const char * name,mac_prop_id_t id,uint_t sz,void * val)1470 iprb_m_getprop(void *arg, const char *name, mac_prop_id_t id, uint_t sz,
1471 void *val)
1472 {
1473 iprb_t *ip = arg;
1474 uint64_t x;
1475
1476 if (ip->miih != NULL) {
1477 return (mii_m_getprop(ip->miih, name, id, sz, val));
1478 }
1479 switch (id) {
1480 case MAC_PROP_SPEED:
1481 x = 10000000;
1482 bcopy(&x, val, sizeof (x));
1483 return (0);
1484
1485 case MAC_PROP_DUPLEX:
1486 x = LINK_DUPLEX_UNKNOWN;
1487 bcopy(&x, val, sizeof (x));
1488 return (0);
1489 }
1490
1491 return (ENOTSUP);
1492 }
1493
1494 int
iprb_m_setprop(void * arg,const char * name,mac_prop_id_t id,uint_t sz,const void * val)1495 iprb_m_setprop(void *arg, const char *name, mac_prop_id_t id, uint_t sz,
1496 const void *val)
1497 {
1498 iprb_t *ip = arg;
1499
1500 if (ip->miih != NULL) {
1501 return (mii_m_setprop(ip->miih, name, id, sz, val));
1502 }
1503 return (ENOTSUP);
1504 }
1505
1506 mblk_t *
iprb_m_tx(void * arg,mblk_t * mp)1507 iprb_m_tx(void *arg, mblk_t *mp)
1508 {
1509 iprb_t *ip = arg;
1510 mblk_t *nmp;
1511
1512 mutex_enter(&ip->culock);
1513
1514 while (mp != NULL) {
1515 nmp = mp->b_next;
1516 mp->b_next = NULL;
1517 if (ip->suspended) {
1518 freemsg(mp);
1519 ip->nocarrier++;
1520 mp = nmp;
1521 continue;
1522 }
1523 if ((mp = iprb_send(ip, mp)) != NULL) {
1524 mp->b_next = nmp;
1525 break;
1526 }
1527 mp = nmp;
1528 }
1529 mutex_exit(&ip->culock);
1530 return (mp);
1531 }
1532
1533 void
iprb_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)1534 iprb_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1535 {
1536 iprb_t *ip = arg;
1537
1538 if ((ip->miih != NULL) && (mii_m_loop_ioctl(ip->miih, wq, mp)))
1539 return;
1540
1541 miocnak(wq, mp, 0, EINVAL);
1542 }
1543
1544 uint16_t
iprb_mii_read(void * arg,uint8_t phy,uint8_t reg)1545 iprb_mii_read(void *arg, uint8_t phy, uint8_t reg)
1546 {
1547 iprb_t *ip = arg;
1548 uint32_t mdi;
1549
1550 /*
1551 * NB: we are guaranteed by the MII layer not to be suspended.
1552 * Furthermore, we have an independent MII register.
1553 */
1554
1555 mdi = MDI_OP_RD |
1556 ((uint32_t)phy << MDI_PHYAD_SHIFT) |
1557 ((uint32_t)reg << MDI_REGAD_SHIFT);
1558
1559 PUT32(ip, CSR_MDICTL, mdi);
1560 for (int i = 0; i < 100; i++) {
1561 mdi = GET32(ip, CSR_MDICTL);
1562 if (mdi & MDI_R) {
1563 return (mdi & 0xffff);
1564 }
1565 drv_usecwait(1);
1566 }
1567 return (0xffff);
1568 }
1569
1570 void
iprb_mii_write(void * arg,uint8_t phy,uint8_t reg,uint16_t data)1571 iprb_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t data)
1572 {
1573 iprb_t *ip = arg;
1574 uint32_t mdi;
1575
1576 mdi = MDI_OP_WR |
1577 ((uint32_t)phy << MDI_PHYAD_SHIFT) |
1578 ((uint32_t)reg << MDI_REGAD_SHIFT) |
1579 (data);
1580
1581 PUT32(ip, CSR_MDICTL, mdi);
1582 for (int i = 0; i < 100; i++) {
1583 if (GET32(ip, CSR_MDICTL) & MDI_R)
1584 break;
1585 }
1586 }
1587
1588 void
iprb_mii_notify(void * arg,link_state_t link)1589 iprb_mii_notify(void *arg, link_state_t link)
1590 {
1591 iprb_t *ip = arg;
1592
1593 mac_link_update(ip->mach, link);
1594 }
1595
1596 uint_t
iprb_intr(caddr_t arg1,caddr_t arg2)1597 iprb_intr(caddr_t arg1, caddr_t arg2)
1598 {
1599 iprb_t *ip = (void *)arg1;
1600 uint8_t sts;
1601 mblk_t *mp = NULL;
1602
1603 _NOTE(ARGUNUSED(arg2));
1604
1605 mutex_enter(&ip->rulock);
1606 if (ip->suspended) {
1607 mutex_exit(&ip->rulock);
1608 return (DDI_INTR_UNCLAIMED);
1609 }
1610 sts = GET8(ip, CSR_STS);
1611 if (sts == 0) {
1612 /* No interrupt status! */
1613 mutex_exit(&ip->rulock);
1614 return (DDI_INTR_UNCLAIMED);
1615 }
1616 /* acknowledge the interrupts */
1617 PUT8(ip, CSR_STS, sts);
1618
1619 if (sts & (STS_RNR | STS_FR)) {
1620 mp = iprb_rx(ip);
1621
1622 if ((sts & STS_RNR) &&
1623 ((GET8(ip, CSR_STATE) & STATE_RUS) == STATE_RUS_NORES)) {
1624 iprb_rx_init(ip);
1625
1626 mutex_enter(&ip->culock);
1627 PUT32(ip, CSR_GEN_PTR, ip->rxb[0].paddr);
1628 /* wait for the SCB */
1629 (void) iprb_cmd_ready(ip);
1630 PUT8(ip, CSR_CMD, RUC_START);
1631 (void) GET8(ip, CSR_CMD); /* flush CSR */
1632 mutex_exit(&ip->culock);
1633 }
1634 }
1635 mutex_exit(&ip->rulock);
1636
1637 if (mp) {
1638 mac_rx(ip->mach, NULL, mp);
1639 }
1640 if ((sts & (STS_CNA | STS_CX)) && ip->wantw) {
1641 ip->wantw = B_FALSE;
1642 mac_tx_update(ip->mach);
1643 }
1644 return (DDI_INTR_CLAIMED);
1645 }
1646
1647 void
iprb_periodic(void * arg)1648 iprb_periodic(void *arg)
1649 {
1650 iprb_t *ip = arg;
1651 boolean_t reset = B_FALSE;
1652
1653 mutex_enter(&ip->rulock);
1654 if (ip->suspended || !ip->running) {
1655 mutex_exit(&ip->rulock);
1656 return;
1657 }
1658
1659 /*
1660 * If we haven't received a packet in a while, and if the link
1661 * is up, then it might be a hung chip. This problem
1662 * reportedly only occurs at 10 Mbps.
1663 */
1664 if (ip->rxhangbug &&
1665 ((ip->miih == NULL) || (mii_get_speed(ip->miih) == 10000000)) &&
1666 ((gethrtime() - ip->rx_wdog) > ip->rx_timeout)) {
1667 cmn_err(CE_CONT, "?Possible RU hang, resetting.\n");
1668 reset = B_TRUE;
1669 }
1670
1671 /* update the statistics */
1672 mutex_enter(&ip->culock);
1673
1674 if (ip->tx_wdog && ((gethrtime() - ip->tx_wdog) > ip->tx_timeout)) {
1675 /* transmit/CU hang? */
1676 cmn_err(CE_CONT, "?CU stalled, resetting.\n");
1677 reset = B_TRUE;
1678 }
1679
1680 if (reset) {
1681 /* We want to reconfigure */
1682 iprb_stop(ip);
1683 if (iprb_start(ip) != DDI_SUCCESS) {
1684 iprb_error(ip, "unable to restart chip");
1685 }
1686 }
1687
1688 iprb_update_stats(ip);
1689
1690 mutex_exit(&ip->culock);
1691 mutex_exit(&ip->rulock);
1692 }
1693
1694 int
iprb_quiesce(dev_info_t * dip)1695 iprb_quiesce(dev_info_t *dip)
1696 {
1697 iprb_t *ip = ddi_get_driver_private(dip);
1698
1699 /* Reset, but first go into idle state */
1700 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
1701 drv_usecwait(50);
1702 PUT32(ip, CSR_PORT, PORT_SW_RESET);
1703 drv_usecwait(10);
1704 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
1705
1706 return (DDI_SUCCESS);
1707 }
1708
1709 int
iprb_suspend(dev_info_t * dip)1710 iprb_suspend(dev_info_t *dip)
1711 {
1712 iprb_t *ip = ddi_get_driver_private(dip);
1713
1714 if (ip->miih)
1715 mii_suspend(ip->miih);
1716
1717 mutex_enter(&ip->rulock);
1718 mutex_enter(&ip->culock);
1719 if (!ip->suspended) {
1720 ip->suspended = B_TRUE;
1721 if (ip->running) {
1722 iprb_update_stats(ip);
1723 iprb_stop(ip);
1724 }
1725 }
1726 mutex_exit(&ip->culock);
1727 mutex_exit(&ip->rulock);
1728 return (DDI_SUCCESS);
1729 }
1730
1731 int
iprb_resume(dev_info_t * dip)1732 iprb_resume(dev_info_t *dip)
1733 {
1734 iprb_t *ip = ddi_get_driver_private(dip);
1735
1736 mutex_enter(&ip->rulock);
1737 mutex_enter(&ip->culock);
1738
1739 ip->suspended = B_FALSE;
1740 if (ip->running) {
1741 if (iprb_start(ip) != DDI_SUCCESS) {
1742 iprb_error(ip, "unable to restart chip!");
1743 ip->suspended = B_TRUE;
1744 mutex_exit(&ip->culock);
1745 mutex_exit(&ip->rulock);
1746 return (DDI_FAILURE);
1747 }
1748 }
1749
1750 mutex_exit(&ip->culock);
1751 mutex_exit(&ip->rulock);
1752 if (ip->miih)
1753 mii_resume(ip->miih);
1754 return (DDI_SUCCESS);
1755 }
1756
1757 int
iprb_ddi_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1758 iprb_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1759 {
1760 switch (cmd) {
1761 case DDI_ATTACH:
1762 return (iprb_attach(dip));
1763
1764 case DDI_RESUME:
1765 return (iprb_resume(dip));
1766
1767 default:
1768 return (DDI_FAILURE);
1769 }
1770 }
1771
1772 int
iprb_ddi_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1773 iprb_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1774 {
1775 switch (cmd) {
1776 case DDI_DETACH:
1777 return (iprb_detach(dip));
1778
1779 case DDI_SUSPEND:
1780 return (iprb_suspend(dip));
1781
1782 default:
1783 return (DDI_FAILURE);
1784 }
1785 }
1786
1787 void
iprb_error(iprb_t * ip,const char * fmt,...)1788 iprb_error(iprb_t *ip, const char *fmt, ...)
1789 {
1790 va_list ap;
1791 char buf[256];
1792
1793 va_start(ap, fmt);
1794 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
1795 va_end(ap);
1796
1797 cmn_err(CE_WARN, "%s%d: %s",
1798 ddi_driver_name(ip->dip), ddi_get_instance(ip->dip), buf);
1799 }
1800