1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
14 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association.
15 * Copyright 2022 Oxide Computer Company
16 */
17
18 /*
19 * Intel Pro/100B Ethernet Driver
20 */
21
22 #include <sys/types.h>
23 #include <sys/modctl.h>
24 #include <sys/conf.h>
25 #include <sys/kmem.h>
26 #include <sys/ksynch.h>
27 #include <sys/cmn_err.h>
28 #include <sys/note.h>
29 #include <sys/pci.h>
30 #include <sys/pci_cap.h>
31 #include <sys/ethernet.h>
32 #include <sys/mii.h>
33 #include <sys/miiregs.h>
34 #include <sys/mac.h>
35 #include <sys/mac_ether.h>
36 #include <sys/ethernet.h>
37 #include <sys/vlan.h>
38 #include <sys/list.h>
39 #include <sys/sysmacros.h>
40 #include <sys/varargs.h>
41 #include <sys/stream.h>
42 #include <sys/strsun.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45
46 #include "iprb.h"
47 #include "rcvbundl.h"
48
49 /*
50 * Intel has openly documented the programming interface for these
51 * parts in the "Intel 8255x 10/100 Mbps Ethernet Controller Family
52 * Open Source Software Developer Manual".
53 *
54 * While some open source systems have utilized many of the features
55 * of some models in this family (especially scatter gather and IP
56 * checksum support), we have elected to offer only the basic
57 * functionality. These are only 10/100 parts, and the additional
58 * complexity is not justified by the minimal performance benefit.
59 * KISS. So, we are only supporting the simple 82557 features.
60 */
61
62 static uint16_t iprb_mii_read(void *, uint8_t, uint8_t);
63 static void iprb_mii_write(void *, uint8_t, uint8_t, uint16_t);
64 static void iprb_mii_notify(void *, link_state_t);
65 static int iprb_attach(dev_info_t *);
66 static int iprb_detach(dev_info_t *);
67 static int iprb_quiesce(dev_info_t *);
68 static int iprb_suspend(dev_info_t *);
69 static int iprb_resume(dev_info_t *);
70 static int iprb_m_stat(void *, uint_t, uint64_t *);
71 static int iprb_m_start(void *);
72 static void iprb_m_stop(void *);
73 static int iprb_m_promisc(void *, boolean_t);
74 static int iprb_m_multicst(void *, boolean_t, const uint8_t *);
75 static int iprb_m_unicst(void *, const uint8_t *);
76 static mblk_t *iprb_m_tx(void *, mblk_t *);
77 static void iprb_m_ioctl(void *, queue_t *, mblk_t *);
78 static int iprb_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
79 const void *);
80 static int iprb_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
81 void *);
82 static void iprb_m_propinfo(void *, const char *, mac_prop_id_t,
83 mac_prop_info_handle_t);
84 static void iprb_destroy(iprb_t *);
85 static int iprb_configure(iprb_t *);
86 static void iprb_eeprom_sendbits(iprb_t *, uint32_t, uint8_t);
87 static uint16_t iprb_eeprom_read(iprb_t *, uint16_t);
88 static void iprb_identify(iprb_t *);
89 static int iprb_cmd_submit(iprb_t *, uint16_t);
90 static void iprb_cmd_reclaim(iprb_t *);
91 static int iprb_cmd_ready(iprb_t *);
92 static int iprb_cmd_drain(iprb_t *);
93 static void iprb_rx_add(iprb_t *);
94 static void iprb_rx_init(iprb_t *);
95 static mblk_t *iprb_rx(iprb_t *);
96 static mblk_t *iprb_send(iprb_t *, mblk_t *);
97 static uint_t iprb_intr(caddr_t, caddr_t);
98 static void iprb_periodic(void *);
99 static int iprb_add_intr(iprb_t *);
100 static int iprb_dma_alloc(iprb_t *, iprb_dma_t *, size_t);
101 static void iprb_dma_free(iprb_dma_t *);
102 static iprb_dma_t *iprb_cmd_next(iprb_t *);
103 static int iprb_set_config(iprb_t *);
104 static int iprb_set_unicast(iprb_t *);
105 static int iprb_set_multicast(iprb_t *);
106 static int iprb_set_ucode(iprb_t *);
107 static void iprb_update_stats(iprb_t *);
108 static int iprb_start(iprb_t *);
109 static void iprb_stop(iprb_t *);
110 static int iprb_ddi_attach(dev_info_t *, ddi_attach_cmd_t);
111 static int iprb_ddi_detach(dev_info_t *, ddi_detach_cmd_t);
112 static void iprb_error(iprb_t *, const char *, ...);
113
114 static mii_ops_t iprb_mii_ops = {
115 MII_OPS_VERSION,
116 iprb_mii_read,
117 iprb_mii_write,
118 iprb_mii_notify,
119 NULL, /* reset */
120 };
121
122 static mac_callbacks_t iprb_m_callbacks = {
123 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
124 iprb_m_stat,
125 iprb_m_start,
126 iprb_m_stop,
127 iprb_m_promisc,
128 iprb_m_multicst,
129 iprb_m_unicst,
130 iprb_m_tx,
131 NULL,
132 iprb_m_ioctl, /* mc_ioctl */
133 NULL, /* mc_getcapab */
134 NULL, /* mc_open */
135 NULL, /* mc_close */
136 iprb_m_setprop,
137 iprb_m_getprop,
138 iprb_m_propinfo
139 };
140
141
142 /*
143 * Stream information
144 */
145 DDI_DEFINE_STREAM_OPS(iprb_devops, nulldev, nulldev,
146 iprb_ddi_attach, iprb_ddi_detach, nodev, NULL, D_MP, NULL, iprb_quiesce);
147
148 static struct modldrv iprb_modldrv = {
149 &mod_driverops, /* drv_modops */
150 "Intel 8255x Ethernet", /* drv_linkinfo */
151 &iprb_devops /* drv_dev_ops */
152 };
153
154 static struct modlinkage iprb_modlinkage = {
155 MODREV_1, /* ml_rev */
156 { &iprb_modldrv, NULL } /* ml_linkage */
157 };
158
159
160 static ddi_device_acc_attr_t acc_attr = {
161 DDI_DEVICE_ATTR_V0,
162 DDI_STRUCTURE_LE_ACC,
163 DDI_STRICTORDER_ACC
164 };
165
166 static ddi_device_acc_attr_t buf_attr = {
167 DDI_DEVICE_ATTR_V0,
168 DDI_NEVERSWAP_ACC,
169 DDI_STORECACHING_OK_ACC
170 };
171
172 /*
173 * The 8225x is a 32-bit addressing engine, but it can only address up
174 * to 31 bits on a single transaction. (Far less in reality it turns
175 * out.) Statistics buffers have to be 16-byte aligned, and as we
176 * allocate individual data pieces for other things, there is no
177 * compelling reason to use another attribute with support for less
178 * strict alignment.
179 */
180 static ddi_dma_attr_t dma_attr = {
181 DMA_ATTR_V0, /* dma_attr_version */
182 0, /* dma_attr_addr_lo */
183 0xFFFFFFFFU, /* dma_attr_addr_hi */
184 0x7FFFFFFFU, /* dma_attr_count_max */
185 16, /* dma_attr_align */
186 0x100, /* dma_attr_burstsizes */
187 1, /* dma_attr_minxfer */
188 0xFFFFFFFFU, /* dma_attr_maxxfer */
189 0xFFFFFFFFU, /* dma_attr_seg */
190 1, /* dma_attr_sgllen */
191 1, /* dma_attr_granular */
192 0 /* dma_attr_flags */
193 };
194
195 #define DECL_UCODE(x) \
196 static const uint32_t x ## _WORDS[] = x ## _RCVBUNDLE_UCODE
197 DECL_UCODE(D101_A);
198 DECL_UCODE(D101_B0);
199 DECL_UCODE(D101M_B);
200 DECL_UCODE(D101S);
201 DECL_UCODE(D102_B);
202 DECL_UCODE(D102_C);
203 DECL_UCODE(D102_E);
204
205 static uint8_t iprb_bcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
206
207 /*
208 * We don't bother allowing for tuning of the CPU saver algorithm.
209 * The ucode has reasonable defaults built-in. However, some variants
210 * apparently have bug fixes delivered via this ucode, so we still
211 * need to support the ucode upload.
212 */
213 typedef struct {
214 uint8_t rev;
215 uint8_t length;
216 const uint32_t *ucode;
217 } iprb_ucode_t;
218
219 #define UCODE(x) \
220 sizeof (x ## _WORDS) / sizeof (uint32_t), x ## _WORDS
221
222 static const iprb_ucode_t iprb_ucode[] = {
223 { REV_82558_A4, UCODE(D101_A) },
224 { REV_82558_B0, UCODE(D101_B0) },
225 { REV_82559_A0, UCODE(D101M_B) },
226 { REV_82559S_A, UCODE(D101S) },
227 { REV_82550, UCODE(D102_B) },
228 { REV_82550_C, UCODE(D102_C) },
229 { REV_82551_F, UCODE(D102_E) },
230 { 0 },
231 };
232
233 int
_init(void)234 _init(void)
235 {
236 int rv;
237 mac_init_ops(&iprb_devops, "iprb");
238 if ((rv = mod_install(&iprb_modlinkage)) != DDI_SUCCESS) {
239 mac_fini_ops(&iprb_devops);
240 }
241 return (rv);
242 }
243
244 int
_fini(void)245 _fini(void)
246 {
247 int rv;
248 if ((rv = mod_remove(&iprb_modlinkage)) == DDI_SUCCESS) {
249 mac_fini_ops(&iprb_devops);
250 }
251 return (rv);
252 }
253
254 int
_info(struct modinfo * modinfop)255 _info(struct modinfo *modinfop)
256 {
257 return (mod_info(&iprb_modlinkage, modinfop));
258 }
259
260 int
iprb_attach(dev_info_t * dip)261 iprb_attach(dev_info_t *dip)
262 {
263 iprb_t *ip;
264 uint16_t w;
265 int i;
266 mac_register_t *macp;
267
268 ip = kmem_zalloc(sizeof (*ip), KM_SLEEP);
269 ddi_set_driver_private(dip, ip);
270 ip->dip = dip;
271
272 list_create(&ip->mcast, sizeof (struct iprb_mcast),
273 offsetof(struct iprb_mcast, node));
274
275 /* we don't support high level interrupts, so we don't need cookies */
276 mutex_init(&ip->culock, NULL, MUTEX_DRIVER, NULL);
277 mutex_init(&ip->rulock, NULL, MUTEX_DRIVER, NULL);
278
279 if (pci_config_setup(dip, &ip->pcih) != DDI_SUCCESS) {
280 iprb_error(ip, "unable to map configuration space");
281 iprb_destroy(ip);
282 return (DDI_FAILURE);
283 }
284
285 if (ddi_regs_map_setup(dip, 1, &ip->regs, 0, 0, &acc_attr,
286 &ip->regsh) != DDI_SUCCESS) {
287 iprb_error(ip, "unable to map device registers");
288 iprb_destroy(ip);
289 return (DDI_FAILURE);
290 }
291
292 /* Reset, but first go into idle state */
293 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
294 drv_usecwait(10);
295 PUT32(ip, CSR_PORT, PORT_SW_RESET);
296 drv_usecwait(10);
297 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
298 (void) GET8(ip, CSR_INTCTL);
299
300 /*
301 * Precalculate watchdog times.
302 */
303 ip->tx_timeout = TX_WATCHDOG;
304 ip->rx_timeout = RX_WATCHDOG;
305
306 iprb_identify(ip);
307
308 /* Obtain our factory MAC address */
309 w = iprb_eeprom_read(ip, 0);
310 ip->factaddr[0] = w & 0xff;
311 ip->factaddr[1] = w >> 8;
312 w = iprb_eeprom_read(ip, 1);
313 ip->factaddr[2] = w & 0xff;
314 ip->factaddr[3] = w >> 8;
315 w = iprb_eeprom_read(ip, 2);
316 ip->factaddr[4] = w & 0xff;
317 ip->factaddr[5] = w >> 8;
318 bcopy(ip->factaddr, ip->curraddr, 6);
319
320 if (ip->resumebug) {
321 /*
322 * Generally, most devices we will ever see will
323 * already have fixed firmware. Since I can't verify
324 * the validity of the fix (no suitably downrev
325 * hardware), we'll just do our best to avoid it for
326 * devices that exhibit this behavior.
327 */
328 if ((iprb_eeprom_read(ip, 10) & 0x02) == 0) {
329 /* EEPROM fix was already applied, assume safe. */
330 ip->resumebug = B_FALSE;
331 }
332 }
333
334 if ((iprb_eeprom_read(ip, 3) & 0x3) != 0x3) {
335 cmn_err(CE_CONT, "?Enabling RX errata workaround.\n");
336 ip->rxhangbug = B_TRUE;
337 }
338
339 /* Determine whether we have an MII or a legacy 80c24 */
340 w = iprb_eeprom_read(ip, 6);
341 if ((w & 0x3f00) != 0x0600) {
342 if ((ip->miih = mii_alloc(ip, dip, &iprb_mii_ops)) == NULL) {
343 iprb_error(ip, "unable to allocate MII ops vector");
344 iprb_destroy(ip);
345 return (DDI_FAILURE);
346 }
347 if (ip->canpause) {
348 mii_set_pauseable(ip->miih, B_TRUE, B_FALSE);
349 }
350 }
351
352 /* Allocate cmds and tx region */
353 for (i = 0; i < NUM_TX; i++) {
354 /* Command blocks */
355 if (iprb_dma_alloc(ip, &ip->cmds[i], CB_SIZE) != DDI_SUCCESS) {
356 iprb_destroy(ip);
357 return (DDI_FAILURE);
358 }
359 }
360
361 for (i = 0; i < NUM_TX; i++) {
362 iprb_dma_t *cb = &ip->cmds[i];
363 /* Link the command blocks into a ring */
364 PUTCB32(cb, CB_LNK_OFFSET, (ip->cmds[(i + 1) % NUM_TX].paddr));
365 }
366
367 for (i = 0; i < NUM_RX; i++) {
368 /* Rx packet buffers */
369 if (iprb_dma_alloc(ip, &ip->rxb[i], RFD_SIZE) != DDI_SUCCESS) {
370 iprb_destroy(ip);
371 return (DDI_FAILURE);
372 }
373 }
374 if (iprb_dma_alloc(ip, &ip->stats, STATS_SIZE) != DDI_SUCCESS) {
375 iprb_destroy(ip);
376 return (DDI_FAILURE);
377 }
378
379 if (iprb_add_intr(ip) != DDI_SUCCESS) {
380 iprb_destroy(ip);
381 return (DDI_FAILURE);
382 }
383
384 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
385 iprb_error(ip, "unable to allocate mac structure");
386 iprb_destroy(ip);
387 return (DDI_FAILURE);
388 }
389
390 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
391 macp->m_driver = ip;
392 macp->m_dip = dip;
393 macp->m_src_addr = ip->curraddr;
394 macp->m_callbacks = &iprb_m_callbacks;
395 macp->m_min_sdu = 0;
396 macp->m_max_sdu = ETHERMTU;
397 macp->m_margin = VLAN_TAGSZ;
398 if (mac_register(macp, &ip->mach) != 0) {
399 iprb_error(ip, "unable to register mac with framework");
400 mac_free(macp);
401 iprb_destroy(ip);
402 return (DDI_FAILURE);
403 }
404
405 mac_free(macp);
406 return (DDI_SUCCESS);
407 }
408
409 int
iprb_detach(dev_info_t * dip)410 iprb_detach(dev_info_t *dip)
411 {
412 iprb_t *ip;
413
414 ip = ddi_get_driver_private(dip);
415 ASSERT(ip != NULL);
416
417 if (mac_disable(ip->mach) != 0)
418 return (DDI_FAILURE);
419
420 (void) mac_unregister(ip->mach);
421 iprb_destroy(ip);
422 return (DDI_SUCCESS);
423 }
424
425 int
iprb_add_intr(iprb_t * ip)426 iprb_add_intr(iprb_t *ip)
427 {
428 int actual;
429
430 if (ddi_intr_alloc(ip->dip, &ip->intrh, DDI_INTR_TYPE_FIXED, 0, 1,
431 &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS) {
432 iprb_error(ip, "failed allocating interrupt handle");
433 return (DDI_FAILURE);
434 }
435
436 if (ddi_intr_add_handler(ip->intrh, iprb_intr, ip, NULL) !=
437 DDI_SUCCESS) {
438 (void) ddi_intr_free(ip->intrh);
439 ip->intrh = NULL;
440 iprb_error(ip, "failed adding interrupt handler");
441 return (DDI_FAILURE);
442 }
443 if (ddi_intr_enable(ip->intrh) != DDI_SUCCESS) {
444 (void) ddi_intr_remove_handler(ip->intrh);
445 (void) ddi_intr_free(ip->intrh);
446 ip->intrh = NULL;
447 iprb_error(ip, "failed enabling interrupt");
448 return (DDI_FAILURE);
449 }
450 return (DDI_SUCCESS);
451 }
452
453 int
iprb_dma_alloc(iprb_t * ip,iprb_dma_t * h,size_t size)454 iprb_dma_alloc(iprb_t *ip, iprb_dma_t *h, size_t size)
455 {
456 size_t rlen;
457 ddi_dma_cookie_t dmac;
458 uint_t ndmac;
459
460 if (ddi_dma_alloc_handle(ip->dip, &dma_attr, DDI_DMA_SLEEP, NULL,
461 &h->dmah) != DDI_SUCCESS) {
462 iprb_error(ip, "unable to allocate dma handle");
463 return (DDI_FAILURE);
464 }
465 if (ddi_dma_mem_alloc(h->dmah, size, &buf_attr, DDI_DMA_CONSISTENT,
466 DDI_DMA_SLEEP, NULL, &h->vaddr, &rlen, &h->acch) != DDI_SUCCESS) {
467 iprb_error(ip, "unable to allocate dma memory");
468 return (DDI_FAILURE);
469 }
470 bzero(h->vaddr, size);
471 if (ddi_dma_addr_bind_handle(h->dmah, NULL, h->vaddr, size,
472 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL,
473 &dmac, &ndmac) != DDI_DMA_MAPPED) {
474 iprb_error(ip, "unable to map command memory");
475 return (DDI_FAILURE);
476 }
477 h->paddr = dmac.dmac_address;
478 return (DDI_SUCCESS);
479 }
480
481 void
iprb_dma_free(iprb_dma_t * h)482 iprb_dma_free(iprb_dma_t *h)
483 {
484 if (h->paddr != 0)
485 (void) ddi_dma_unbind_handle(h->dmah);
486 h->paddr = 0;
487 if (h->acch != NULL)
488 ddi_dma_mem_free(&h->acch);
489 h->acch = NULL;
490 if (h->dmah != NULL)
491 ddi_dma_free_handle(&h->dmah);
492 h->dmah = NULL;
493 }
494
495 void
iprb_destroy(iprb_t * ip)496 iprb_destroy(iprb_t *ip)
497 {
498 int i;
499 iprb_mcast_t *mc;
500
501 /* shut down interrupts */
502 if (ip->intrh != NULL) {
503 (void) ddi_intr_disable(ip->intrh);
504 (void) ddi_intr_remove_handler(ip->intrh);
505 (void) ddi_intr_free(ip->intrh);
506 }
507 /* release DMA resources */
508 for (i = 0; i < NUM_TX; i++) {
509 iprb_dma_free(&ip->cmds[i]);
510 }
511 for (i = 0; i < NUM_RX; i++) {
512 iprb_dma_free(&ip->rxb[i]);
513 }
514 iprb_dma_free(&ip->stats);
515
516 if (ip->miih)
517 mii_free(ip->miih);
518
519 /* clean up the multicast list */
520 while ((mc = list_head(&ip->mcast)) != NULL) {
521 list_remove(&ip->mcast, mc);
522 kmem_free(mc, sizeof (*mc));
523 }
524
525 /* tear down register mappings */
526 if (ip->pcih)
527 pci_config_teardown(&ip->pcih);
528 if (ip->regsh)
529 ddi_regs_map_free(&ip->regsh);
530
531 /* clean the dip */
532 ddi_set_driver_private(ip->dip, NULL);
533
534 list_destroy(&ip->mcast);
535 mutex_destroy(&ip->culock);
536 mutex_destroy(&ip->rulock);
537
538 /* and finally toss the structure itself */
539 kmem_free(ip, sizeof (*ip));
540 }
541
542 void
iprb_identify(iprb_t * ip)543 iprb_identify(iprb_t *ip)
544 {
545 ip->devid = pci_config_get16(ip->pcih, PCI_CONF_DEVID);
546 ip->revid = pci_config_get8(ip->pcih, PCI_CONF_REVID);
547
548 switch (ip->devid) {
549 case 0x1229: /* 8255x family */
550 case 0x1030: /* Intel InBusiness */
551
552 if (ip->revid >= REV_82558_A4) {
553 ip->canpause = B_TRUE;
554 ip->canmwi = B_TRUE;
555 } else {
556 ip->is557 = B_TRUE;
557 }
558 if (ip->revid >= REV_82559_A0)
559 ip->resumebug = B_TRUE;
560 break;
561
562 case 0x1209: /* Embedded 82559ER */
563 ip->canpause = B_TRUE;
564 ip->resumebug = B_TRUE;
565 ip->canmwi = B_TRUE;
566 break;
567
568 case 0x2449: /* ICH2 */
569 case 0x1031: /* Pro/100 VE (ICH3) */
570 case 0x1032: /* Pro/100 VE (ICH3) */
571 case 0x1033: /* Pro/100 VM (ICH3) */
572 case 0x1034: /* Pro/100 VM (ICH3) */
573 case 0x1038: /* Pro/100 VM (ICH3) */
574 ip->resumebug = B_TRUE;
575 if (ip->revid >= REV_82558_A4)
576 ip->canpause = B_TRUE;
577 break;
578
579 default:
580 if (ip->revid >= REV_82558_A4)
581 ip->canpause = B_TRUE;
582 break;
583 }
584
585 /* Allow property override MWI support - not normally needed. */
586 if (ddi_prop_get_int(DDI_DEV_T_ANY, ip->dip, 0, "MWIEnable", 1) == 0) {
587 ip->canmwi = B_FALSE;
588 }
589 }
590
591 void
iprb_eeprom_sendbits(iprb_t * ip,uint32_t val,uint8_t nbits)592 iprb_eeprom_sendbits(iprb_t *ip, uint32_t val, uint8_t nbits)
593 {
594 uint32_t mask;
595 uint16_t x;
596
597 mask = 1U << (nbits - 1);
598 while (mask) {
599 x = (mask & val) ? EEPROM_EEDI : 0;
600 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
601 drv_usecwait(100);
602 PUT16(ip, CSR_EECTL, x | EEPROM_EESK | EEPROM_EECS);
603 drv_usecwait(100);
604 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
605 drv_usecwait(100);
606 mask >>= 1;
607 }
608 }
609
610 uint16_t
iprb_eeprom_read(iprb_t * ip,uint16_t address)611 iprb_eeprom_read(iprb_t *ip, uint16_t address)
612 {
613 uint16_t val;
614 int mask;
615 uint16_t n;
616 uint16_t bits;
617
618 /* if we don't know the address size yet call again to determine it */
619 if ((address != 0) && (ip->eeprom_bits == 0))
620 (void) iprb_eeprom_read(ip, 0);
621
622 if ((bits = ip->eeprom_bits) == 0) {
623 bits = 8;
624 ASSERT(address == 0);
625 }
626 /* enable the EEPROM chip select */
627 PUT16(ip, CSR_EECTL, EEPROM_EECS);
628 drv_usecwait(100);
629
630 /* send a read command */
631 iprb_eeprom_sendbits(ip, 6, 3);
632 n = 0;
633 for (mask = (1U << (bits - 1)); mask != 0; mask >>= 1) {
634 uint16_t x = (mask & address) ? EEPROM_EEDI : 0;
635 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
636 drv_usecwait(100);
637 PUT16(ip, CSR_EECTL, x | EEPROM_EESK | EEPROM_EECS);
638 drv_usecwait(100);
639 PUT16(ip, CSR_EECTL, x | EEPROM_EECS);
640 drv_usecwait(100);
641
642 n++;
643 /* check the dummy 0 bit */
644 if ((GET16(ip, CSR_EECTL) & EEPROM_EEDO) == 0) {
645 if (ip->eeprom_bits == 0) {
646 ip->eeprom_bits = n;
647 cmn_err(CE_CONT, "?EEPROM size %d words.\n",
648 1U << ip->eeprom_bits);
649 }
650 break;
651 }
652 }
653 if (n != ip->eeprom_bits) {
654 iprb_error(ip, "cannot determine EEPROM size (%d, %d)",
655 ip->eeprom_bits, n);
656 }
657
658 /* shift out a 16-bit word */
659 val = 0;
660 for (mask = 0x8000; mask; mask >>= 1) {
661 PUT16(ip, CSR_EECTL, EEPROM_EECS | EEPROM_EESK);
662 drv_usecwait(100);
663 if (GET16(ip, CSR_EECTL) & EEPROM_EEDO)
664 val |= mask;
665 drv_usecwait(100);
666 PUT16(ip, CSR_EECTL, EEPROM_EECS);
667 drv_usecwait(100);
668 }
669
670 /* and disable the eeprom */
671 PUT16(ip, CSR_EECTL, 0);
672 drv_usecwait(100);
673
674 return (val);
675 }
676
677 int
iprb_cmd_ready(iprb_t * ip)678 iprb_cmd_ready(iprb_t *ip)
679 {
680 /* wait for pending SCB commands to be accepted */
681 for (int cnt = 1000000; cnt != 0; cnt -= 10) {
682 if (GET8(ip, CSR_CMD) == 0) {
683 return (DDI_SUCCESS);
684 }
685 drv_usecwait(10);
686 }
687 iprb_error(ip, "timeout waiting for chip to become ready");
688 return (DDI_FAILURE);
689 }
690
691 void
iprb_cmd_reclaim(iprb_t * ip)692 iprb_cmd_reclaim(iprb_t *ip)
693 {
694 while (ip->cmd_count) {
695 iprb_dma_t *cb = &ip->cmds[ip->cmd_tail];
696
697 SYNCCB(cb, CB_STS_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
698 if ((GETCB16(cb, CB_STS_OFFSET) & CB_STS_C) == 0) {
699 break;
700 }
701
702 ip->cmd_tail++;
703 ip->cmd_tail %= NUM_TX;
704 ip->cmd_count--;
705 if (ip->cmd_count == 0) {
706 ip->tx_wdog = 0;
707 } else {
708 ip->tx_wdog = gethrtime();
709 }
710 }
711 }
712
713 int
iprb_cmd_drain(iprb_t * ip)714 iprb_cmd_drain(iprb_t *ip)
715 {
716 for (int i = 1000000; i; i -= 10) {
717 iprb_cmd_reclaim(ip);
718 if (ip->cmd_count == 0)
719 return (DDI_SUCCESS);
720 drv_usecwait(10);
721 }
722 iprb_error(ip, "time out waiting for commands to drain");
723 return (DDI_FAILURE);
724 }
725
726 int
iprb_cmd_submit(iprb_t * ip,uint16_t cmd)727 iprb_cmd_submit(iprb_t *ip, uint16_t cmd)
728 {
729 iprb_dma_t *ncb = &ip->cmds[ip->cmd_head];
730 iprb_dma_t *lcb = &ip->cmds[ip->cmd_last];
731
732 /* If this command will consume the last CB, interrupt when done */
733 ASSERT((ip->cmd_count) < NUM_TX);
734 if (ip->cmd_count == (NUM_TX - 1)) {
735 cmd |= CB_CMD_I;
736 }
737
738 /* clear the status entry */
739 PUTCB16(ncb, CB_STS_OFFSET, 0);
740
741 /* suspend upon completion of this new command */
742 cmd |= CB_CMD_S;
743 PUTCB16(ncb, CB_CMD_OFFSET, cmd);
744 SYNCCB(ncb, 0, 0, DDI_DMA_SYNC_FORDEV);
745
746 /* clear the suspend flag from the last submitted command */
747 SYNCCB(lcb, CB_CMD_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
748 PUTCB16(lcb, CB_CMD_OFFSET, GETCB16(lcb, CB_CMD_OFFSET) & ~CB_CMD_S);
749 SYNCCB(lcb, CB_CMD_OFFSET, 2, DDI_DMA_SYNC_FORDEV);
750
751
752 /*
753 * If the chip has a resume bug, then we need to try this as a work
754 * around. Some anecdotal evidence is that this will help solve
755 * the resume bug. Its a performance hit, but only if the EEPROM
756 * is not updated. (In theory we could do this only for 10Mbps HDX,
757 * but since it should just about never get used, we keep it simple.)
758 */
759 if (ip->resumebug) {
760 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
761 return (DDI_FAILURE);
762 PUT8(ip, CSR_CMD, CUC_NOP);
763 (void) GET8(ip, CSR_CMD);
764 drv_usecwait(1);
765 }
766
767 /* wait for the SCB to be ready to accept a new command */
768 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
769 return (DDI_FAILURE);
770
771 /*
772 * Finally we can resume the CU. Note that if this the first
773 * command in the sequence (i.e. if the CU is IDLE), or if the
774 * CU is already busy working, then this CU resume command
775 * will not have any effect.
776 */
777 PUT8(ip, CSR_CMD, CUC_RESUME);
778 (void) GET8(ip, CSR_CMD); /* flush CSR */
779
780 ip->tx_wdog = gethrtime();
781 ip->cmd_last = ip->cmd_head;
782 ip->cmd_head++;
783 ip->cmd_head %= NUM_TX;
784 ip->cmd_count++;
785
786 return (DDI_SUCCESS);
787 }
788
789 iprb_dma_t *
iprb_cmd_next(iprb_t * ip)790 iprb_cmd_next(iprb_t *ip)
791 {
792 if (ip->cmd_count >= NUM_TX) {
793 return (NULL);
794 }
795 return (&ip->cmds[ip->cmd_head]);
796 }
797
798 int
iprb_set_unicast(iprb_t * ip)799 iprb_set_unicast(iprb_t *ip)
800 {
801 iprb_dma_t *cb;
802
803 ASSERT(mutex_owned(&ip->culock));
804
805 if ((cb = iprb_cmd_next(ip)) == NULL)
806 return (DDI_FAILURE);
807
808 PUTCBEA(cb, CB_IAS_ADR_OFFSET, ip->curraddr);
809 return (iprb_cmd_submit(ip, CB_CMD_IAS));
810 }
811
812 int
iprb_set_multicast(iprb_t * ip)813 iprb_set_multicast(iprb_t *ip)
814 {
815 iprb_dma_t *cb;
816 iprb_mcast_t *mc;
817 int i;
818 list_t *l;
819
820 ASSERT(mutex_owned(&ip->culock));
821
822 if ((ip->nmcast <= 0) || (ip->nmcast > CB_MCS_CNT_MAX)) {
823 /*
824 * Only send the list if the total number of multicast
825 * address is nonzero and small enough to fit. We
826 * don't error out if it is too big, because in that
827 * case we will use the "allmulticast" support
828 * via iprb_set_config instead.
829 */
830 return (DDI_SUCCESS);
831 }
832
833 if ((cb = iprb_cmd_next(ip)) == NULL) {
834 return (DDI_FAILURE);
835 }
836
837 l = &ip->mcast;
838 for (mc = list_head(l), i = 0; mc; mc = list_next(l, mc), i++) {
839 PUTCBEA(cb, CB_MCS_ADR_OFFSET + (i * 6), mc->addr);
840 }
841 ASSERT(i == ip->nmcast);
842 PUTCB16(cb, CB_MCS_CNT_OFFSET, i);
843 return (iprb_cmd_submit(ip, CB_CMD_MCS));
844 }
845
846 int
iprb_set_config(iprb_t * ip)847 iprb_set_config(iprb_t *ip)
848 {
849 iprb_dma_t *cb;
850
851 ASSERT(mutex_owned(&ip->culock));
852 if ((cb = iprb_cmd_next(ip)) == NULL) {
853 return (DDI_FAILURE);
854 }
855 PUTCB8(cb, CB_CONFIG_OFFSET + 0, 0x16);
856 PUTCB8(cb, CB_CONFIG_OFFSET + 1, 0x8);
857 PUTCB8(cb, CB_CONFIG_OFFSET + 2, 0);
858 PUTCB8(cb, CB_CONFIG_OFFSET + 3, (ip->canmwi ? 1 : 0));
859 PUTCB8(cb, CB_CONFIG_OFFSET + 4, 0);
860 PUTCB8(cb, CB_CONFIG_OFFSET + 5, 0);
861 PUTCB8(cb, CB_CONFIG_OFFSET + 6, (ip->promisc ? 0x80 : 0) | 0x3a);
862 PUTCB8(cb, CB_CONFIG_OFFSET + 7, (ip->promisc ? 0 : 0x1) | 2);
863 PUTCB8(cb, CB_CONFIG_OFFSET + 8, (ip->miih ? 0x1 : 0));
864 PUTCB8(cb, CB_CONFIG_OFFSET + 9, 0);
865 PUTCB8(cb, CB_CONFIG_OFFSET + 10, 0x2e);
866 PUTCB8(cb, CB_CONFIG_OFFSET + 11, 0);
867 PUTCB8(cb, CB_CONFIG_OFFSET + 12, (ip->is557 ? 0 : 1) | 0x60);
868 PUTCB8(cb, CB_CONFIG_OFFSET + 13, 0);
869 PUTCB8(cb, CB_CONFIG_OFFSET + 14, 0xf2);
870 PUTCB8(cb, CB_CONFIG_OFFSET + 15,
871 (ip->miih ? 0x80 : 0) | (ip->promisc ? 0x1 : 0) | 0x48);
872 PUTCB8(cb, CB_CONFIG_OFFSET + 16, 0);
873 PUTCB8(cb, CB_CONFIG_OFFSET + 17, (ip->canpause ? 0x40 : 0));
874 PUTCB8(cb, CB_CONFIG_OFFSET + 18, (ip->is557 ? 0 : 0x8) | 0xf2);
875 PUTCB8(cb, CB_CONFIG_OFFSET + 19,
876 ((ip->revid < REV_82558_B0) ? 0 : 0x80) |
877 (ip->canpause ? 0x18 : 0));
878 PUTCB8(cb, CB_CONFIG_OFFSET + 20, 0x3f);
879 PUTCB8(cb, CB_CONFIG_OFFSET + 21,
880 ((ip->nmcast >= CB_MCS_CNT_MAX) ? 0x8 : 0) | 0x5);
881
882 return (iprb_cmd_submit(ip, CB_CMD_CONFIG));
883 }
884
885 int
iprb_set_ucode(iprb_t * ip)886 iprb_set_ucode(iprb_t *ip)
887 {
888 iprb_dma_t *cb;
889 const iprb_ucode_t *uc = NULL;
890 int i;
891
892 for (i = 0; iprb_ucode[i].length; i++) {
893 if (iprb_ucode[i].rev == ip->revid) {
894 uc = &iprb_ucode[i];
895 break;
896 }
897 }
898 if (uc == NULL) {
899 /* no matching firmware found, assume success */
900 return (DDI_SUCCESS);
901 }
902
903 ASSERT(mutex_owned(&ip->culock));
904 if ((cb = iprb_cmd_next(ip)) == NULL) {
905 return (DDI_FAILURE);
906 }
907 for (i = 0; i < uc->length; i++) {
908 PUTCB32(cb, (CB_UCODE_OFFSET + i * 4), uc->ucode[i]);
909 }
910 return (iprb_cmd_submit(ip, CB_CMD_UCODE));
911 }
912
913 int
iprb_configure(iprb_t * ip)914 iprb_configure(iprb_t *ip)
915 {
916 ASSERT(mutex_owned(&ip->culock));
917
918 if (iprb_cmd_drain(ip) != DDI_SUCCESS)
919 return (DDI_FAILURE);
920
921 if (iprb_set_config(ip) != DDI_SUCCESS)
922 return (DDI_FAILURE);
923 if (iprb_set_unicast(ip) != DDI_SUCCESS)
924 return (DDI_FAILURE);
925 if (iprb_set_multicast(ip) != DDI_SUCCESS)
926 return (DDI_FAILURE);
927
928 return (DDI_SUCCESS);
929 }
930
931 void
iprb_stop(iprb_t * ip)932 iprb_stop(iprb_t *ip)
933 {
934 /* go idle */
935 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
936 (void) GET32(ip, CSR_PORT);
937 drv_usecwait(50);
938
939 /* shut off device interrupts */
940 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
941 }
942
943 int
iprb_start(iprb_t * ip)944 iprb_start(iprb_t *ip)
945 {
946 iprb_dma_t *cb;
947
948 ASSERT(mutex_owned(&ip->rulock));
949 ASSERT(mutex_owned(&ip->culock));
950
951 /* Reset, but first go into idle state */
952 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
953 (void) GET32(ip, CSR_PORT);
954 drv_usecwait(50);
955
956 PUT32(ip, CSR_PORT, PORT_SW_RESET);
957 (void) GET32(ip, CSR_PORT);
958 drv_usecwait(10);
959 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
960
961 /* Reset pointers */
962 ip->cmd_head = ip->cmd_tail = 0;
963 ip->cmd_last = NUM_TX - 1;
964 ip->cmd_count = 0;
965
966 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
967 return (DDI_FAILURE);
968 PUT32(ip, CSR_GEN_PTR, 0);
969 PUT8(ip, CSR_CMD, CUC_CUBASE);
970 (void) GET8(ip, CSR_CMD);
971
972 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
973 return (DDI_FAILURE);
974 PUT32(ip, CSR_GEN_PTR, 0);
975 PUT8(ip, CSR_CMD, RUC_RUBASE);
976 (void) GET8(ip, CSR_CMD);
977
978 /* Send a NOP. This will be the first command seen by the device. */
979 cb = iprb_cmd_next(ip);
980 VERIFY3P(cb, !=, NULL);
981 if (iprb_cmd_submit(ip, CB_CMD_NOP) != DDI_SUCCESS)
982 return (DDI_FAILURE);
983
984 /* as that was the first command, go ahead and submit a CU start */
985 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
986 return (DDI_FAILURE);
987 PUT32(ip, CSR_GEN_PTR, cb->paddr);
988 PUT8(ip, CSR_CMD, CUC_START);
989 (void) GET8(ip, CSR_CMD);
990
991 /* Upload firmware. */
992 if (iprb_set_ucode(ip) != DDI_SUCCESS)
993 return (DDI_FAILURE);
994
995 /* Set up RFDs */
996 iprb_rx_init(ip);
997
998 PUT32(ip, CSR_GEN_PTR, ip->rxb[0].paddr);
999 /* wait for the SCB */
1000 (void) iprb_cmd_ready(ip);
1001 PUT8(ip, CSR_CMD, RUC_START);
1002 (void) GET8(ip, CSR_CMD); /* flush CSR */
1003
1004 /* Enable device interrupts */
1005 PUT8(ip, CSR_INTCTL, 0);
1006 (void) GET8(ip, CSR_INTCTL);
1007
1008 return (DDI_SUCCESS);
1009 }
1010
1011 void
iprb_update_stats(iprb_t * ip)1012 iprb_update_stats(iprb_t *ip)
1013 {
1014 iprb_dma_t *sp = &ip->stats;
1015 hrtime_t tstamp;
1016 int i;
1017
1018 ASSERT(mutex_owned(&ip->culock));
1019
1020 /* Collect the hardware stats, but don't keep redoing it */
1021 tstamp = gethrtime();
1022 if (tstamp / NANOSEC == ip->stats_time / NANOSEC)
1023 return;
1024
1025 PUTSTAT(sp, STATS_DONE_OFFSET, 0);
1026 SYNCSTATS(sp, 0, 0, DDI_DMA_SYNC_FORDEV);
1027
1028 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
1029 return;
1030 PUT32(ip, CSR_GEN_PTR, sp->paddr);
1031 PUT8(ip, CSR_CMD, CUC_STATSBASE);
1032 (void) GET8(ip, CSR_CMD);
1033
1034 if (iprb_cmd_ready(ip) != DDI_SUCCESS)
1035 return;
1036 PUT8(ip, CSR_CMD, CUC_STATS_RST);
1037 (void) GET8(ip, CSR_CMD); /* flush wb */
1038
1039 for (i = 10000; i; i -= 10) {
1040 SYNCSTATS(sp, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1041 if (GETSTAT(sp, STATS_DONE_OFFSET) == STATS_RST_DONE) {
1042 /* yay stats are updated */
1043 break;
1044 }
1045 drv_usecwait(10);
1046 }
1047 if (i == 0) {
1048 iprb_error(ip, "time out acquiring hardware statistics");
1049 return;
1050 }
1051
1052 ip->ex_coll += GETSTAT(sp, STATS_TX_MAXCOL_OFFSET);
1053 ip->late_coll += GETSTAT(sp, STATS_TX_LATECOL_OFFSET);
1054 ip->uflo += GETSTAT(sp, STATS_TX_UFLO_OFFSET);
1055 ip->defer_xmt += GETSTAT(sp, STATS_TX_DEFER_OFFSET);
1056 ip->one_coll += GETSTAT(sp, STATS_TX_ONECOL_OFFSET);
1057 ip->multi_coll += GETSTAT(sp, STATS_TX_MULTCOL_OFFSET);
1058 ip->collisions += GETSTAT(sp, STATS_TX_TOTCOL_OFFSET);
1059 ip->fcs_errs += GETSTAT(sp, STATS_RX_FCS_OFFSET);
1060 ip->align_errs += GETSTAT(sp, STATS_RX_ALIGN_OFFSET);
1061 ip->norcvbuf += GETSTAT(sp, STATS_RX_NOBUF_OFFSET);
1062 ip->oflo += GETSTAT(sp, STATS_RX_OFLO_OFFSET);
1063 ip->runt += GETSTAT(sp, STATS_RX_SHORT_OFFSET);
1064
1065 ip->stats_time = tstamp;
1066 }
1067
1068 mblk_t *
iprb_send(iprb_t * ip,mblk_t * mp)1069 iprb_send(iprb_t *ip, mblk_t *mp)
1070 {
1071 iprb_dma_t *cb;
1072 size_t sz;
1073
1074 ASSERT(mutex_owned(&ip->culock));
1075
1076 /* possibly reclaim some CBs */
1077 iprb_cmd_reclaim(ip);
1078
1079 cb = iprb_cmd_next(ip);
1080
1081 if (cb == NULL) {
1082 /* flow control */
1083 ip->wantw = B_TRUE;
1084 return (mp);
1085 }
1086
1087 if ((sz = msgsize(mp)) > (ETHERMAX + VLAN_TAGSZ)) {
1088 /* Generally this should never occur */
1089 ip->macxmt_errs++;
1090 freemsg(mp);
1091 return (NULL);
1092 }
1093
1094 ip->opackets++;
1095 ip->obytes += sz;
1096
1097 PUTCB32(cb, CB_TX_TBD_OFFSET, 0xffffffffU);
1098 PUTCB16(cb, CB_TX_COUNT_OFFSET, (sz & 0x3fff) | CB_TX_EOF);
1099 PUTCB8(cb, CB_TX_THRESH_OFFSET, (sz / 8) & 0xff);
1100 PUTCB8(cb, CB_TX_NUMBER_OFFSET, 0);
1101 mcopymsg(mp, cb->vaddr + CB_TX_DATA_OFFSET);
1102 if (cb->vaddr[CB_TX_DATA_OFFSET] & 0x1) {
1103 if (bcmp(cb->vaddr + CB_TX_DATA_OFFSET, &iprb_bcast, 6) != 0) {
1104 ip->multixmt++;
1105 } else {
1106 ip->brdcstxmt++;
1107 }
1108 }
1109 SYNCCB(cb, 0, CB_TX_DATA_OFFSET + sz, DDI_DMA_SYNC_FORDEV);
1110
1111 if (iprb_cmd_submit(ip, CB_CMD_TX) != DDI_SUCCESS) {
1112 ip->macxmt_errs++;
1113 }
1114
1115 return (NULL);
1116 }
1117
1118 void
iprb_rx_add(iprb_t * ip)1119 iprb_rx_add(iprb_t *ip)
1120 {
1121 uint16_t last, curr, next;
1122 iprb_dma_t *rfd, *nfd, *lfd;
1123
1124 ASSERT(mutex_owned(&ip->rulock));
1125
1126 curr = ip->rx_index;
1127 last = ip->rx_last;
1128 next = (curr + 1) % NUM_RX;
1129
1130 ip->rx_last = curr;
1131 ip->rx_index = next;
1132
1133 lfd = &ip->rxb[last];
1134 rfd = &ip->rxb[curr];
1135 nfd = &ip->rxb[next];
1136
1137 PUTRFD32(rfd, RFD_LNK_OFFSET, nfd->paddr);
1138 PUTRFD16(rfd, RFD_CTL_OFFSET, RFD_CTL_EL);
1139 PUTRFD16(rfd, RFD_SIZ_OFFSET, RFD_SIZE - RFD_PKT_OFFSET);
1140 PUTRFD16(rfd, RFD_CNT_OFFSET, 0);
1141 SYNCRFD(rfd, 0, RFD_PKT_OFFSET, DDI_DMA_SYNC_FORDEV);
1142 /* clear the suspend & EL bits from the previous RFD */
1143 PUTRFD16(lfd, RFD_CTL_OFFSET, 0);
1144 SYNCRFD(rfd, RFD_CTL_OFFSET, 2, DDI_DMA_SYNC_FORDEV);
1145 }
1146
1147 void
iprb_rx_init(iprb_t * ip)1148 iprb_rx_init(iprb_t *ip)
1149 {
1150 ip->rx_index = 0;
1151 ip->rx_last = NUM_RX - 1;
1152 for (int i = 0; i < NUM_RX; i++)
1153 iprb_rx_add(ip);
1154 ip->rx_index = 0;
1155 ip->rx_last = NUM_RX - 1;
1156 }
1157
1158 mblk_t *
iprb_rx(iprb_t * ip)1159 iprb_rx(iprb_t *ip)
1160 {
1161 iprb_dma_t *rfd;
1162 uint16_t cnt;
1163 uint16_t sts;
1164 int i;
1165 mblk_t *mplist;
1166 mblk_t **mpp;
1167 mblk_t *mp;
1168
1169 mplist = NULL;
1170 mpp = &mplist;
1171
1172 for (i = 0; i < NUM_RX; i++) {
1173 rfd = &ip->rxb[ip->rx_index];
1174 SYNCRFD(rfd, RFD_STS_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL);
1175 if ((GETRFD16(rfd, RFD_STS_OFFSET) & RFD_STS_C) == 0) {
1176 break;
1177 }
1178
1179 ip->rx_wdog = gethrtime();
1180
1181 SYNCRFD(rfd, 0, 0, DDI_DMA_SYNC_FORKERNEL);
1182 cnt = GETRFD16(rfd, RFD_CNT_OFFSET);
1183 cnt &= ~(RFD_CNT_EOF | RFD_CNT_F);
1184 sts = GETRFD16(rfd, RFD_STS_OFFSET);
1185
1186 if (cnt > (ETHERMAX + VLAN_TAGSZ)) {
1187 ip->toolong++;
1188 iprb_rx_add(ip);
1189 continue;
1190 }
1191 if (((sts & RFD_STS_OK) == 0) && (sts & RFD_STS_ERRS)) {
1192 iprb_rx_add(ip);
1193 continue;
1194 }
1195 if ((mp = allocb(cnt, BPRI_MED)) == NULL) {
1196 ip->norcvbuf++;
1197 iprb_rx_add(ip);
1198 continue;
1199 }
1200 bcopy(rfd->vaddr + RFD_PKT_OFFSET, mp->b_wptr, cnt);
1201
1202 /* return it to the RFD list */
1203 iprb_rx_add(ip);
1204
1205 mp->b_wptr += cnt;
1206 ip->ipackets++;
1207 ip->rbytes += cnt;
1208 if (mp->b_rptr[0] & 0x1) {
1209 if (bcmp(mp->b_rptr, &iprb_bcast, 6) != 0) {
1210 ip->multircv++;
1211 } else {
1212 ip->brdcstrcv++;
1213 }
1214 }
1215 *mpp = mp;
1216 mpp = &mp->b_next;
1217 }
1218 return (mplist);
1219 }
1220
1221 int
iprb_m_promisc(void * arg,boolean_t on)1222 iprb_m_promisc(void *arg, boolean_t on)
1223 {
1224 iprb_t *ip = arg;
1225
1226 mutex_enter(&ip->culock);
1227 ip->promisc = on;
1228 if (ip->running && !ip->suspended)
1229 (void) iprb_configure(ip);
1230 mutex_exit(&ip->culock);
1231 return (0);
1232 }
1233
1234 int
iprb_m_unicst(void * arg,const uint8_t * macaddr)1235 iprb_m_unicst(void *arg, const uint8_t *macaddr)
1236 {
1237 iprb_t *ip = arg;
1238
1239 mutex_enter(&ip->culock);
1240 bcopy(macaddr, ip->curraddr, 6);
1241 if (ip->running && !ip->suspended)
1242 (void) iprb_configure(ip);
1243 mutex_exit(&ip->culock);
1244 return (0);
1245 }
1246
1247 int
iprb_m_multicst(void * arg,boolean_t add,const uint8_t * macaddr)1248 iprb_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
1249 {
1250 iprb_t *ip = arg;
1251 list_t *l = &ip->mcast;
1252 iprb_mcast_t *mc;
1253
1254 if (add) {
1255 mc = kmem_alloc(sizeof (*mc), KM_NOSLEEP);
1256 if (mc == NULL) {
1257 return (ENOMEM);
1258 }
1259 bcopy(macaddr, mc->addr, 6);
1260 mutex_enter(&ip->culock);
1261 list_insert_head(l, mc);
1262 ip->nmcast++;
1263 if (ip->running && !ip->suspended)
1264 (void) iprb_configure(ip);
1265 mutex_exit(&ip->culock);
1266 } else {
1267 mutex_enter(&ip->culock);
1268 for (mc = list_head(l); mc != NULL; mc = list_next(l, mc)) {
1269 if (bcmp(macaddr, mc->addr, 6) == 0) {
1270 list_remove(&ip->mcast, mc);
1271 ip->nmcast--;
1272 if (ip->running && !ip->suspended)
1273 (void) iprb_configure(ip);
1274 break;
1275 }
1276 }
1277 mutex_exit(&ip->culock);
1278 if (mc)
1279 kmem_free(mc, sizeof (*mc));
1280 }
1281 return (0);
1282 }
1283
1284 int
iprb_m_start(void * arg)1285 iprb_m_start(void *arg)
1286 {
1287 int rv;
1288 iprb_t *ip = arg;
1289
1290 mutex_enter(&ip->rulock);
1291 mutex_enter(&ip->culock);
1292 rv = ip->suspended ? 0 : iprb_start(ip);
1293 if (rv == 0)
1294 ip->running = B_TRUE;
1295 ip->perh = ddi_periodic_add(iprb_periodic, ip, 5000000000, 0);
1296 mutex_exit(&ip->culock);
1297 mutex_exit(&ip->rulock);
1298 if (rv == 0) {
1299 if (ip->miih)
1300 mii_start(ip->miih);
1301 else
1302 /* might be a lie. */
1303 mac_link_update(ip->mach, LINK_STATE_UP);
1304 }
1305 return (rv ? EIO : 0);
1306 }
1307
1308 void
iprb_m_stop(void * arg)1309 iprb_m_stop(void *arg)
1310 {
1311 iprb_t *ip = arg;
1312
1313 if (ip->miih) {
1314 mii_stop(ip->miih);
1315 } else {
1316 mac_link_update(ip->mach, LINK_STATE_DOWN);
1317 }
1318
1319 ddi_periodic_delete(ip->perh);
1320 ip->perh = 0;
1321
1322 mutex_enter(&ip->rulock);
1323 mutex_enter(&ip->culock);
1324
1325 if (!ip->suspended) {
1326 iprb_update_stats(ip);
1327 iprb_stop(ip);
1328 }
1329 ip->running = B_FALSE;
1330 mutex_exit(&ip->culock);
1331 mutex_exit(&ip->rulock);
1332 }
1333
1334 int
iprb_m_stat(void * arg,uint_t stat,uint64_t * val)1335 iprb_m_stat(void *arg, uint_t stat, uint64_t *val)
1336 {
1337 iprb_t *ip = arg;
1338
1339 if (ip->miih && (mii_m_getstat(ip->miih, stat, val) == 0)) {
1340 return (0);
1341 }
1342
1343 mutex_enter(&ip->culock);
1344 if ((!ip->suspended) && (ip->running)) {
1345 iprb_update_stats(ip);
1346 }
1347 mutex_exit(&ip->culock);
1348
1349 switch (stat) {
1350 case MAC_STAT_IFSPEED:
1351 if (ip->miih == NULL) {
1352 *val = 10000000; /* 10 Mbps */
1353 }
1354 break;
1355 case ETHER_STAT_LINK_DUPLEX:
1356 if (ip->miih == NULL) {
1357 *val = LINK_DUPLEX_UNKNOWN;
1358 }
1359 break;
1360 case MAC_STAT_MULTIRCV:
1361 *val = ip->multircv;
1362 break;
1363 case MAC_STAT_BRDCSTRCV:
1364 *val = ip->brdcstrcv;
1365 break;
1366 case MAC_STAT_MULTIXMT:
1367 *val = ip->multixmt;
1368 break;
1369 case MAC_STAT_BRDCSTXMT:
1370 *val = ip->brdcstxmt;
1371 break;
1372 case MAC_STAT_IPACKETS:
1373 * val = ip->ipackets;
1374 break;
1375 case MAC_STAT_RBYTES:
1376 *val = ip->rbytes;
1377 break;
1378 case MAC_STAT_OPACKETS:
1379 *val = ip->opackets;
1380 break;
1381 case MAC_STAT_OBYTES:
1382 *val = ip->obytes;
1383 break;
1384 case MAC_STAT_NORCVBUF:
1385 *val = ip->norcvbuf;
1386 break;
1387 case MAC_STAT_COLLISIONS:
1388 *val = ip->collisions;
1389 break;
1390 case MAC_STAT_IERRORS:
1391 *val = ip->align_errs +
1392 ip->fcs_errs +
1393 ip->norcvbuf +
1394 ip->runt +
1395 ip->toolong +
1396 ip->macrcv_errs;
1397 break;
1398 case MAC_STAT_OERRORS:
1399 *val = ip->ex_coll +
1400 ip->late_coll +
1401 ip->uflo +
1402 ip->macxmt_errs +
1403 ip->nocarrier;
1404 break;
1405 case ETHER_STAT_ALIGN_ERRORS:
1406 *val = ip->align_errs;
1407 break;
1408 case ETHER_STAT_FCS_ERRORS:
1409 *val = ip->fcs_errs;
1410 break;
1411 case ETHER_STAT_DEFER_XMTS:
1412 *val = ip->defer_xmt;
1413 break;
1414 case ETHER_STAT_FIRST_COLLISIONS:
1415 *val = ip->one_coll + ip->multi_coll + ip->ex_coll;
1416 break;
1417 case ETHER_STAT_MULTI_COLLISIONS:
1418 *val = ip->multi_coll;
1419 break;
1420 case ETHER_STAT_TX_LATE_COLLISIONS:
1421 *val = ip->late_coll;
1422 break;
1423 case ETHER_STAT_EX_COLLISIONS:
1424 *val = ip->ex_coll;
1425 break;
1426 case MAC_STAT_OVERFLOWS:
1427 *val = ip->oflo;
1428 break;
1429 case MAC_STAT_UNDERFLOWS:
1430 *val = ip->uflo;
1431 break;
1432 case ETHER_STAT_TOOSHORT_ERRORS:
1433 *val = ip->runt;
1434 break;
1435 case ETHER_STAT_TOOLONG_ERRORS:
1436 *val = ip->toolong;
1437 break;
1438 case ETHER_STAT_CARRIER_ERRORS:
1439 *val = ip->nocarrier; /* reported only for "suspend" */
1440 break;
1441 case ETHER_STAT_MACXMT_ERRORS:
1442 *val = ip->macxmt_errs;
1443 break;
1444 case ETHER_STAT_MACRCV_ERRORS:
1445 *val = ip->macrcv_errs;
1446 break;
1447 default:
1448 return (ENOTSUP);
1449 }
1450 return (0);
1451 }
1452
1453 void
iprb_m_propinfo(void * arg,const char * name,mac_prop_id_t id,mac_prop_info_handle_t pih)1454 iprb_m_propinfo(void *arg, const char *name, mac_prop_id_t id,
1455 mac_prop_info_handle_t pih)
1456 {
1457 iprb_t *ip = arg;
1458
1459 if (ip->miih != NULL) {
1460 mii_m_propinfo(ip->miih, name, id, pih);
1461 return;
1462 }
1463 switch (id) {
1464 case MAC_PROP_DUPLEX:
1465 case MAC_PROP_SPEED:
1466 mac_prop_info_set_perm(pih, MAC_PROP_PERM_READ);
1467 break;
1468 }
1469 }
1470
1471 int
iprb_m_getprop(void * arg,const char * name,mac_prop_id_t id,uint_t sz,void * val)1472 iprb_m_getprop(void *arg, const char *name, mac_prop_id_t id, uint_t sz,
1473 void *val)
1474 {
1475 iprb_t *ip = arg;
1476 uint64_t x;
1477
1478 if (ip->miih != NULL) {
1479 return (mii_m_getprop(ip->miih, name, id, sz, val));
1480 }
1481 switch (id) {
1482 case MAC_PROP_SPEED:
1483 x = 10000000;
1484 bcopy(&x, val, sizeof (x));
1485 return (0);
1486
1487 case MAC_PROP_DUPLEX:
1488 x = LINK_DUPLEX_UNKNOWN;
1489 bcopy(&x, val, sizeof (x));
1490 return (0);
1491 }
1492
1493 return (ENOTSUP);
1494 }
1495
1496 int
iprb_m_setprop(void * arg,const char * name,mac_prop_id_t id,uint_t sz,const void * val)1497 iprb_m_setprop(void *arg, const char *name, mac_prop_id_t id, uint_t sz,
1498 const void *val)
1499 {
1500 iprb_t *ip = arg;
1501
1502 if (ip->miih != NULL) {
1503 return (mii_m_setprop(ip->miih, name, id, sz, val));
1504 }
1505 return (ENOTSUP);
1506 }
1507
1508 mblk_t *
iprb_m_tx(void * arg,mblk_t * mp)1509 iprb_m_tx(void *arg, mblk_t *mp)
1510 {
1511 iprb_t *ip = arg;
1512 mblk_t *nmp;
1513
1514 mutex_enter(&ip->culock);
1515
1516 while (mp != NULL) {
1517 nmp = mp->b_next;
1518 mp->b_next = NULL;
1519 if (ip->suspended) {
1520 freemsg(mp);
1521 ip->nocarrier++;
1522 mp = nmp;
1523 continue;
1524 }
1525 if ((mp = iprb_send(ip, mp)) != NULL) {
1526 mp->b_next = nmp;
1527 break;
1528 }
1529 mp = nmp;
1530 }
1531 mutex_exit(&ip->culock);
1532 return (mp);
1533 }
1534
1535 void
iprb_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)1536 iprb_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1537 {
1538 iprb_t *ip = arg;
1539
1540 if ((ip->miih != NULL) && (mii_m_loop_ioctl(ip->miih, wq, mp)))
1541 return;
1542
1543 miocnak(wq, mp, 0, EINVAL);
1544 }
1545
1546 uint16_t
iprb_mii_read(void * arg,uint8_t phy,uint8_t reg)1547 iprb_mii_read(void *arg, uint8_t phy, uint8_t reg)
1548 {
1549 iprb_t *ip = arg;
1550 uint32_t mdi;
1551
1552 /*
1553 * NB: we are guaranteed by the MII layer not to be suspended.
1554 * Furthermore, we have an independent MII register.
1555 */
1556
1557 mdi = MDI_OP_RD |
1558 ((uint32_t)phy << MDI_PHYAD_SHIFT) |
1559 ((uint32_t)reg << MDI_REGAD_SHIFT);
1560
1561 PUT32(ip, CSR_MDICTL, mdi);
1562 for (int i = 0; i < 100; i++) {
1563 mdi = GET32(ip, CSR_MDICTL);
1564 if (mdi & MDI_R) {
1565 return (mdi & 0xffff);
1566 }
1567 drv_usecwait(1);
1568 }
1569 return (0xffff);
1570 }
1571
1572 void
iprb_mii_write(void * arg,uint8_t phy,uint8_t reg,uint16_t data)1573 iprb_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t data)
1574 {
1575 iprb_t *ip = arg;
1576 uint32_t mdi;
1577
1578 mdi = MDI_OP_WR |
1579 ((uint32_t)phy << MDI_PHYAD_SHIFT) |
1580 ((uint32_t)reg << MDI_REGAD_SHIFT) |
1581 (data);
1582
1583 PUT32(ip, CSR_MDICTL, mdi);
1584 for (int i = 0; i < 100; i++) {
1585 if (GET32(ip, CSR_MDICTL) & MDI_R)
1586 break;
1587 }
1588 }
1589
1590 void
iprb_mii_notify(void * arg,link_state_t link)1591 iprb_mii_notify(void *arg, link_state_t link)
1592 {
1593 iprb_t *ip = arg;
1594
1595 mac_link_update(ip->mach, link);
1596 }
1597
1598 uint_t
iprb_intr(caddr_t arg1,caddr_t arg2)1599 iprb_intr(caddr_t arg1, caddr_t arg2)
1600 {
1601 iprb_t *ip = (void *)arg1;
1602 uint8_t sts;
1603 mblk_t *mp = NULL;
1604
1605 _NOTE(ARGUNUSED(arg2));
1606
1607 mutex_enter(&ip->rulock);
1608 if (ip->suspended) {
1609 mutex_exit(&ip->rulock);
1610 return (DDI_INTR_UNCLAIMED);
1611 }
1612 sts = GET8(ip, CSR_STS);
1613 if (sts == 0) {
1614 /* No interrupt status! */
1615 mutex_exit(&ip->rulock);
1616 return (DDI_INTR_UNCLAIMED);
1617 }
1618 /* acknowledge the interrupts */
1619 PUT8(ip, CSR_STS, sts);
1620
1621 if (sts & (STS_RNR | STS_FR)) {
1622 mp = iprb_rx(ip);
1623
1624 if ((sts & STS_RNR) &&
1625 ((GET8(ip, CSR_STATE) & STATE_RUS) == STATE_RUS_NORES)) {
1626 iprb_rx_init(ip);
1627
1628 mutex_enter(&ip->culock);
1629 PUT32(ip, CSR_GEN_PTR, ip->rxb[0].paddr);
1630 /* wait for the SCB */
1631 (void) iprb_cmd_ready(ip);
1632 PUT8(ip, CSR_CMD, RUC_START);
1633 (void) GET8(ip, CSR_CMD); /* flush CSR */
1634 mutex_exit(&ip->culock);
1635 }
1636 }
1637 mutex_exit(&ip->rulock);
1638
1639 if (mp) {
1640 mac_rx(ip->mach, NULL, mp);
1641 }
1642 if ((sts & (STS_CNA | STS_CX)) && ip->wantw) {
1643 ip->wantw = B_FALSE;
1644 mac_tx_update(ip->mach);
1645 }
1646 return (DDI_INTR_CLAIMED);
1647 }
1648
1649 void
iprb_periodic(void * arg)1650 iprb_periodic(void *arg)
1651 {
1652 iprb_t *ip = arg;
1653 boolean_t reset = B_FALSE;
1654
1655 mutex_enter(&ip->rulock);
1656 if (ip->suspended || !ip->running) {
1657 mutex_exit(&ip->rulock);
1658 return;
1659 }
1660
1661 /*
1662 * If we haven't received a packet in a while, and if the link
1663 * is up, then it might be a hung chip. This problem
1664 * reportedly only occurs at 10 Mbps.
1665 */
1666 if (ip->rxhangbug &&
1667 ((ip->miih == NULL) || (mii_get_speed(ip->miih) == 10000000)) &&
1668 ((gethrtime() - ip->rx_wdog) > ip->rx_timeout)) {
1669 cmn_err(CE_CONT, "?Possible RU hang, resetting.\n");
1670 reset = B_TRUE;
1671 }
1672
1673 /* update the statistics */
1674 mutex_enter(&ip->culock);
1675
1676 /*
1677 * The watchdog timer is updated when we send frames or when we reclaim
1678 * completed commands. When the link is idle for long periods it is
1679 * possible we will have done neither of these things, so reclaim
1680 * explicitly before checking for a transmit stall:
1681 */
1682 iprb_cmd_reclaim(ip);
1683 if (ip->tx_wdog && ((gethrtime() - ip->tx_wdog) > ip->tx_timeout)) {
1684 /* transmit/CU hang? */
1685 cmn_err(CE_CONT, "?CU stalled, resetting.\n");
1686 reset = B_TRUE;
1687 }
1688
1689 if (reset) {
1690 /* We want to reconfigure */
1691 iprb_stop(ip);
1692 if (iprb_start(ip) != DDI_SUCCESS) {
1693 iprb_error(ip, "unable to restart chip");
1694 }
1695 }
1696
1697 iprb_update_stats(ip);
1698
1699 mutex_exit(&ip->culock);
1700 mutex_exit(&ip->rulock);
1701 }
1702
1703 int
iprb_quiesce(dev_info_t * dip)1704 iprb_quiesce(dev_info_t *dip)
1705 {
1706 iprb_t *ip = ddi_get_driver_private(dip);
1707
1708 /* Reset, but first go into idle state */
1709 PUT32(ip, CSR_PORT, PORT_SEL_RESET);
1710 drv_usecwait(50);
1711 PUT32(ip, CSR_PORT, PORT_SW_RESET);
1712 drv_usecwait(10);
1713 PUT8(ip, CSR_INTCTL, INTCTL_MASK);
1714
1715 return (DDI_SUCCESS);
1716 }
1717
1718 int
iprb_suspend(dev_info_t * dip)1719 iprb_suspend(dev_info_t *dip)
1720 {
1721 iprb_t *ip = ddi_get_driver_private(dip);
1722
1723 if (ip->miih)
1724 mii_suspend(ip->miih);
1725
1726 mutex_enter(&ip->rulock);
1727 mutex_enter(&ip->culock);
1728 if (!ip->suspended) {
1729 ip->suspended = B_TRUE;
1730 if (ip->running) {
1731 iprb_update_stats(ip);
1732 iprb_stop(ip);
1733 }
1734 }
1735 mutex_exit(&ip->culock);
1736 mutex_exit(&ip->rulock);
1737 return (DDI_SUCCESS);
1738 }
1739
1740 int
iprb_resume(dev_info_t * dip)1741 iprb_resume(dev_info_t *dip)
1742 {
1743 iprb_t *ip = ddi_get_driver_private(dip);
1744
1745 mutex_enter(&ip->rulock);
1746 mutex_enter(&ip->culock);
1747
1748 ip->suspended = B_FALSE;
1749 if (ip->running) {
1750 if (iprb_start(ip) != DDI_SUCCESS) {
1751 iprb_error(ip, "unable to restart chip!");
1752 ip->suspended = B_TRUE;
1753 mutex_exit(&ip->culock);
1754 mutex_exit(&ip->rulock);
1755 return (DDI_FAILURE);
1756 }
1757 }
1758
1759 mutex_exit(&ip->culock);
1760 mutex_exit(&ip->rulock);
1761 if (ip->miih)
1762 mii_resume(ip->miih);
1763 return (DDI_SUCCESS);
1764 }
1765
1766 int
iprb_ddi_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1767 iprb_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1768 {
1769 switch (cmd) {
1770 case DDI_ATTACH:
1771 return (iprb_attach(dip));
1772
1773 case DDI_RESUME:
1774 return (iprb_resume(dip));
1775
1776 default:
1777 return (DDI_FAILURE);
1778 }
1779 }
1780
1781 int
iprb_ddi_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1782 iprb_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1783 {
1784 switch (cmd) {
1785 case DDI_DETACH:
1786 return (iprb_detach(dip));
1787
1788 case DDI_SUSPEND:
1789 return (iprb_suspend(dip));
1790
1791 default:
1792 return (DDI_FAILURE);
1793 }
1794 }
1795
1796 void
iprb_error(iprb_t * ip,const char * fmt,...)1797 iprb_error(iprb_t *ip, const char *fmt, ...)
1798 {
1799 va_list ap;
1800 char buf[256];
1801
1802 va_start(ap, fmt);
1803 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
1804 va_end(ap);
1805
1806 cmn_err(CE_WARN, "%s%d: %s",
1807 ddi_driver_name(ip->dip), ddi_get_instance(ip->dip), buf);
1808 }
1809