1 /*
2 * sfe_util.c: general ethernet mac driver framework version 2.6
3 *
4 * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * 3. Neither the name of the author nor the names of its contributors may be
17 * used to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 */
33
34 /*
35 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
36 * Use is subject to license terms.
37 */
38
39 /*
40 * System Header files.
41 */
42 #include <sys/types.h>
43 #include <sys/conf.h>
44 #include <sys/debug.h>
45 #include <sys/kmem.h>
46 #include <sys/vtrace.h>
47 #include <sys/ethernet.h>
48 #include <sys/modctl.h>
49 #include <sys/errno.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/stream.h> /* required for MBLK* */
53 #include <sys/strsun.h> /* required for mionack() */
54 #include <sys/byteorder.h>
55 #include <sys/pci.h>
56 #include <inet/common.h>
57 #include <inet/led.h>
58 #include <inet/mi.h>
59 #include <inet/nd.h>
60 #include <sys/crc32.h>
61
62 #include <sys/note.h>
63
64 #include "sfe_mii.h"
65 #include "sfe_util.h"
66
67
68
69 extern char ident[];
70
71 /* Debugging support */
72 #ifdef GEM_DEBUG_LEVEL
73 static int gem_debug = GEM_DEBUG_LEVEL;
74 #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args
75 #else
76 #define DPRINTF(n, args)
77 #undef ASSERT
78 #define ASSERT(x)
79 #endif
80
81 #define IOC_LINESIZE 0x40 /* Is it right for amd64? */
82
83 /*
84 * Useful macros and typedefs
85 */
86 #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
87
88 #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
89 #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
90
91 #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9])
92 #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6])
93
94
95 #ifndef INT32_MAX
96 #define INT32_MAX 0x7fffffff
97 #endif
98
99 #define VTAG_OFF (ETHERADDRL*2)
100 #ifndef VTAG_SIZE
101 #define VTAG_SIZE 4
102 #endif
103 #ifndef VTAG_TPID
104 #define VTAG_TPID 0x8100U
105 #endif
106
107 #define GET_TXBUF(dp, sn) \
108 &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
109
110 #ifndef offsetof
111 #define offsetof(t, m) ((long)&(((t *) 0)->m))
112 #endif
113 #define TXFLAG_VTAG(flag) \
114 (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
115
116 #define MAXPKTBUF(dp) \
117 ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
118
119 #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */
120 #define BOOLEAN(x) ((x) != 0)
121
122 /*
123 * Macros to distinct chip generation.
124 */
125
126 /*
127 * Private functions
128 */
129 static void gem_mii_start(struct gem_dev *);
130 static void gem_mii_stop(struct gem_dev *);
131
132 /* local buffer management */
133 static void gem_nd_setup(struct gem_dev *dp);
134 static void gem_nd_cleanup(struct gem_dev *dp);
135 static int gem_alloc_memory(struct gem_dev *);
136 static void gem_free_memory(struct gem_dev *);
137 static void gem_init_rx_ring(struct gem_dev *);
138 static void gem_init_tx_ring(struct gem_dev *);
139 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
140
141 static void gem_tx_timeout(struct gem_dev *);
142 static void gem_mii_link_watcher(struct gem_dev *dp);
143 static int gem_mac_init(struct gem_dev *dp);
144 static int gem_mac_start(struct gem_dev *dp);
145 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
146 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
147
148 static struct ether_addr gem_etherbroadcastaddr = {
149 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
150 };
151
152 int gem_speed_value[] = {10, 100, 1000};
153
154 /* ============================================================== */
155 /*
156 * Misc runtime routines
157 */
158 /* ============================================================== */
159 /*
160 * Ether CRC calculation according to 21143 data sheet
161 */
162 uint32_t
gem_ether_crc_le(const uint8_t * addr,int len)163 gem_ether_crc_le(const uint8_t *addr, int len)
164 {
165 uint32_t crc;
166
167 CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
168 return (crc);
169 }
170
171 uint32_t
gem_ether_crc_be(const uint8_t * addr,int len)172 gem_ether_crc_be(const uint8_t *addr, int len)
173 {
174 int idx;
175 int bit;
176 uint_t data;
177 uint32_t crc;
178 #define CRC32_POLY_BE 0x04c11db7
179
180 crc = 0xffffffff;
181 for (idx = 0; idx < len; idx++) {
182 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
183 crc = (crc << 1)
184 ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
185 }
186 }
187 return (crc);
188 #undef CRC32_POLY_BE
189 }
190
191 int
gem_prop_get_int(struct gem_dev * dp,char * prop_template,int def_val)192 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
193 {
194 char propname[32];
195
196 (void) sprintf(propname, prop_template, dp->name);
197
198 return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
199 DDI_PROP_DONTPASS, propname, def_val));
200 }
201
202 static int
gem_population(uint32_t x)203 gem_population(uint32_t x)
204 {
205 int i;
206 int cnt;
207
208 cnt = 0;
209 for (i = 0; i < 32; i++) {
210 if (x & (1 << i)) {
211 cnt++;
212 }
213 }
214 return (cnt);
215 }
216
217 #ifdef GEM_DEBUG_LEVEL
218 #ifdef GEM_DEBUG_VLAN
219 static void
gem_dump_packet(struct gem_dev * dp,char * title,mblk_t * mp,boolean_t check_cksum)220 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
221 boolean_t check_cksum)
222 {
223 char msg[180];
224 uint8_t buf[18+20+20];
225 uint8_t *p;
226 size_t offset;
227 uint_t ethertype;
228 uint_t proto;
229 uint_t ipproto = 0;
230 uint_t iplen;
231 uint_t iphlen;
232 uint_t tcplen;
233 uint_t udplen;
234 uint_t cksum;
235 int rest;
236 int len;
237 char *bp;
238 mblk_t *tp;
239 extern uint_t ip_cksum(mblk_t *, int, uint32_t);
240
241 msg[0] = 0;
242 bp = msg;
243
244 rest = sizeof (buf);
245 offset = 0;
246 for (tp = mp; tp; tp = tp->b_cont) {
247 len = tp->b_wptr - tp->b_rptr;
248 len = min(rest, len);
249 bcopy(tp->b_rptr, &buf[offset], len);
250 rest -= len;
251 offset += len;
252 if (rest == 0) {
253 break;
254 }
255 }
256
257 offset = 0;
258 p = &buf[offset];
259
260 /* ethernet address */
261 sprintf(bp,
262 "ether: %02x:%02x:%02x:%02x:%02x:%02x"
263 " -> %02x:%02x:%02x:%02x:%02x:%02x",
264 p[6], p[7], p[8], p[9], p[10], p[11],
265 p[0], p[1], p[2], p[3], p[4], p[5]);
266 bp = &msg[strlen(msg)];
267
268 /* vlag tag and etherrtype */
269 ethertype = GET_ETHERTYPE(p);
270 if (ethertype == VTAG_TPID) {
271 sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
272 bp = &msg[strlen(msg)];
273
274 offset += VTAG_SIZE;
275 p = &buf[offset];
276 ethertype = GET_ETHERTYPE(p);
277 }
278 sprintf(bp, " type:%04x", ethertype);
279 bp = &msg[strlen(msg)];
280
281 /* ethernet packet length */
282 sprintf(bp, " mblklen:%d", msgdsize(mp));
283 bp = &msg[strlen(msg)];
284 if (mp->b_cont) {
285 sprintf(bp, "(");
286 bp = &msg[strlen(msg)];
287 for (tp = mp; tp; tp = tp->b_cont) {
288 if (tp == mp) {
289 sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
290 } else {
291 sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
292 }
293 bp = &msg[strlen(msg)];
294 }
295 sprintf(bp, ")");
296 bp = &msg[strlen(msg)];
297 }
298
299 if (ethertype != ETHERTYPE_IP) {
300 goto x;
301 }
302
303 /* ip address */
304 offset += sizeof (struct ether_header);
305 p = &buf[offset];
306 ipproto = p[9];
307 iplen = GET_NET16(&p[2]);
308 sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
309 p[12], p[13], p[14], p[15],
310 p[16], p[17], p[18], p[19],
311 ipproto, iplen);
312 bp = (void *)&msg[strlen(msg)];
313
314 iphlen = (p[0] & 0xf) * 4;
315
316 /* cksum for psuedo header */
317 cksum = *(uint16_t *)&p[12];
318 cksum += *(uint16_t *)&p[14];
319 cksum += *(uint16_t *)&p[16];
320 cksum += *(uint16_t *)&p[18];
321 cksum += BE_16(ipproto);
322
323 /* tcp or udp protocol header */
324 offset += iphlen;
325 p = &buf[offset];
326 if (ipproto == IPPROTO_TCP) {
327 tcplen = iplen - iphlen;
328 sprintf(bp, ", tcp: len:%d cksum:%x",
329 tcplen, GET_NET16(&p[16]));
330 bp = (void *)&msg[strlen(msg)];
331
332 if (check_cksum) {
333 cksum += BE_16(tcplen);
334 cksum = (uint16_t)ip_cksum(mp, offset, cksum);
335 sprintf(bp, " (%s)",
336 (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
337 bp = (void *)&msg[strlen(msg)];
338 }
339 } else if (ipproto == IPPROTO_UDP) {
340 udplen = GET_NET16(&p[4]);
341 sprintf(bp, ", udp: len:%d cksum:%x",
342 udplen, GET_NET16(&p[6]));
343 bp = (void *)&msg[strlen(msg)];
344
345 if (GET_NET16(&p[6]) && check_cksum) {
346 cksum += *(uint16_t *)&p[4];
347 cksum = (uint16_t)ip_cksum(mp, offset, cksum);
348 sprintf(bp, " (%s)",
349 (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
350 bp = (void *)&msg[strlen(msg)];
351 }
352 }
353 x:
354 cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
355 }
356 #endif /* GEM_DEBUG_VLAN */
357 #endif /* GEM_DEBUG_LEVEL */
358
359 /* ============================================================== */
360 /*
361 * IO cache flush
362 */
363 /* ============================================================== */
364 __INLINE__ void
gem_rx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)365 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
366 {
367 int n;
368 int m;
369 int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
370
371 /* sync active descriptors */
372 if (rx_desc_unit_shift < 0 || nslot == 0) {
373 /* no rx descriptor ring */
374 return;
375 }
376
377 n = dp->gc.gc_rx_ring_size - head;
378 if ((m = nslot - n) > 0) {
379 (void) ddi_dma_sync(dp->desc_dma_handle,
380 (off_t)0,
381 (size_t)(m << rx_desc_unit_shift),
382 how);
383 nslot = n;
384 }
385
386 (void) ddi_dma_sync(dp->desc_dma_handle,
387 (off_t)(head << rx_desc_unit_shift),
388 (size_t)(nslot << rx_desc_unit_shift),
389 how);
390 }
391
392 __INLINE__ void
gem_tx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)393 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
394 {
395 int n;
396 int m;
397 int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
398
399 /* sync active descriptors */
400 if (tx_desc_unit_shift < 0 || nslot == 0) {
401 /* no tx descriptor ring */
402 return;
403 }
404
405 n = dp->gc.gc_tx_ring_size - head;
406 if ((m = nslot - n) > 0) {
407 (void) ddi_dma_sync(dp->desc_dma_handle,
408 (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
409 (size_t)(m << tx_desc_unit_shift),
410 how);
411 nslot = n;
412 }
413
414 (void) ddi_dma_sync(dp->desc_dma_handle,
415 (off_t)((head << tx_desc_unit_shift)
416 + (dp->tx_ring_dma - dp->rx_ring_dma)),
417 (size_t)(nslot << tx_desc_unit_shift),
418 how);
419 }
420
421 static void
gem_rx_start_default(struct gem_dev * dp,int head,int nslot)422 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
423 {
424 gem_rx_desc_dma_sync(dp,
425 SLOT(head, dp->gc.gc_rx_ring_size), nslot,
426 DDI_DMA_SYNC_FORDEV);
427 }
428
429 /* ============================================================== */
430 /*
431 * Buffer management
432 */
433 /* ============================================================== */
434 static void
gem_dump_txbuf(struct gem_dev * dp,int level,const char * title)435 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
436 {
437 cmn_err(level,
438 "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
439 "tx_softq: %d[%d] %d[%d] (+%d), "
440 "tx_free: %d[%d] %d[%d] (+%d), "
441 "tx_desc: %d[%d] %d[%d] (+%d), "
442 "intr: %d[%d] (+%d), ",
443 dp->name, title,
444 dp->tx_active_head,
445 SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
446 dp->tx_active_tail,
447 SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
448 dp->tx_active_tail - dp->tx_active_head,
449 dp->tx_softq_head,
450 SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
451 dp->tx_softq_tail,
452 SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
453 dp->tx_softq_tail - dp->tx_softq_head,
454 dp->tx_free_head,
455 SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
456 dp->tx_free_tail,
457 SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
458 dp->tx_free_tail - dp->tx_free_head,
459 dp->tx_desc_head,
460 SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
461 dp->tx_desc_tail,
462 SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
463 dp->tx_desc_tail - dp->tx_desc_head,
464 dp->tx_desc_intr,
465 SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
466 dp->tx_desc_intr - dp->tx_desc_head);
467 }
468
469 static void
gem_free_rxbuf(struct rxbuf * rbp)470 gem_free_rxbuf(struct rxbuf *rbp)
471 {
472 struct gem_dev *dp;
473
474 dp = rbp->rxb_devp;
475 ASSERT(mutex_owned(&dp->intrlock));
476 rbp->rxb_next = dp->rx_buf_freelist;
477 dp->rx_buf_freelist = rbp;
478 dp->rx_buf_freecnt++;
479 }
480
481 /*
482 * gem_get_rxbuf: supply a receive buffer which have been mapped into
483 * DMA space.
484 */
485 struct rxbuf *
gem_get_rxbuf(struct gem_dev * dp,int cansleep)486 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
487 {
488 struct rxbuf *rbp;
489 uint_t count = 0;
490 int i;
491 int err;
492
493 ASSERT(mutex_owned(&dp->intrlock));
494
495 DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
496 dp->rx_buf_freecnt));
497 /*
498 * Get rx buffer management structure
499 */
500 rbp = dp->rx_buf_freelist;
501 if (rbp) {
502 /* get one from the recycle list */
503 ASSERT(dp->rx_buf_freecnt > 0);
504
505 dp->rx_buf_freelist = rbp->rxb_next;
506 dp->rx_buf_freecnt--;
507 rbp->rxb_next = NULL;
508 return (rbp);
509 }
510
511 /*
512 * Allocate a rx buffer management structure
513 */
514 rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
515 if (rbp == NULL) {
516 /* no memory */
517 return (NULL);
518 }
519
520 /*
521 * Prepare a back pointer to the device structure which will be
522 * refered on freeing the buffer later.
523 */
524 rbp->rxb_devp = dp;
525
526 /* allocate a dma handle for rx data buffer */
527 if ((err = ddi_dma_alloc_handle(dp->dip,
528 &dp->gc.gc_dma_attr_rxbuf,
529 (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
530 NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
531
532 cmn_err(CE_WARN,
533 "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
534 dp->name, __func__, err);
535
536 kmem_free(rbp, sizeof (struct rxbuf));
537 return (NULL);
538 }
539
540 /* allocate a bounce buffer for rx */
541 if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
542 ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
543 &dp->gc.gc_buf_attr,
544 /*
545 * if the nic requires a header at the top of receive buffers,
546 * it may access the rx buffer randomly.
547 */
548 (dp->gc.gc_rx_header_len > 0)
549 ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
550 cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
551 NULL,
552 &rbp->rxb_buf, &rbp->rxb_buf_len,
553 &rbp->rxb_bah)) != DDI_SUCCESS) {
554
555 cmn_err(CE_WARN,
556 "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
557 dp->name, __func__, err);
558
559 ddi_dma_free_handle(&rbp->rxb_dh);
560 kmem_free(rbp, sizeof (struct rxbuf));
561 return (NULL);
562 }
563
564 /* Mapin the bounce buffer into the DMA space */
565 if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
566 NULL, rbp->rxb_buf, dp->rx_buf_len,
567 ((dp->gc.gc_rx_header_len > 0)
568 ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
569 :(DDI_DMA_READ | DDI_DMA_STREAMING)),
570 cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
571 NULL,
572 rbp->rxb_dmacookie,
573 &count)) != DDI_DMA_MAPPED) {
574
575 ASSERT(err != DDI_DMA_INUSE);
576 DPRINTF(0, (CE_WARN,
577 "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
578 dp->name, __func__, err));
579
580 /*
581 * we failed to allocate a dma resource
582 * for the rx bounce buffer.
583 */
584 ddi_dma_mem_free(&rbp->rxb_bah);
585 ddi_dma_free_handle(&rbp->rxb_dh);
586 kmem_free(rbp, sizeof (struct rxbuf));
587 return (NULL);
588 }
589
590 /* correct the rest of the DMA mapping */
591 for (i = 1; i < count; i++) {
592 ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
593 }
594 rbp->rxb_nfrags = count;
595
596 /* Now we successfully prepared an rx buffer */
597 dp->rx_buf_allocated++;
598
599 return (rbp);
600 }
601
602 /* ============================================================== */
603 /*
604 * memory resource management
605 */
606 /* ============================================================== */
607 static int
gem_alloc_memory(struct gem_dev * dp)608 gem_alloc_memory(struct gem_dev *dp)
609 {
610 caddr_t ring;
611 caddr_t buf;
612 size_t req_size;
613 size_t ring_len;
614 size_t buf_len;
615 ddi_dma_cookie_t ring_cookie;
616 ddi_dma_cookie_t buf_cookie;
617 uint_t count;
618 int i;
619 int err;
620 struct txbuf *tbp;
621 int tx_buf_len;
622 ddi_dma_attr_t dma_attr_txbounce;
623
624 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
625
626 dp->desc_dma_handle = NULL;
627 req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
628
629 if (req_size > 0) {
630 /*
631 * Alloc RX/TX descriptors and a io area.
632 */
633 if ((err = ddi_dma_alloc_handle(dp->dip,
634 &dp->gc.gc_dma_attr_desc,
635 DDI_DMA_SLEEP, NULL,
636 &dp->desc_dma_handle)) != DDI_SUCCESS) {
637 cmn_err(CE_WARN,
638 "!%s: %s: ddi_dma_alloc_handle failed: %d",
639 dp->name, __func__, err);
640 return (ENOMEM);
641 }
642
643 if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
644 req_size, &dp->gc.gc_desc_attr,
645 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
646 &ring, &ring_len,
647 &dp->desc_acc_handle)) != DDI_SUCCESS) {
648 cmn_err(CE_WARN,
649 "!%s: %s: ddi_dma_mem_alloc failed: "
650 "ret %d, request size: %d",
651 dp->name, __func__, err, (int)req_size);
652 ddi_dma_free_handle(&dp->desc_dma_handle);
653 return (ENOMEM);
654 }
655
656 if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
657 NULL, ring, ring_len,
658 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
659 DDI_DMA_SLEEP, NULL,
660 &ring_cookie, &count)) != DDI_SUCCESS) {
661 ASSERT(err != DDI_DMA_INUSE);
662 cmn_err(CE_WARN,
663 "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
664 dp->name, __func__, err);
665 ddi_dma_mem_free(&dp->desc_acc_handle);
666 ddi_dma_free_handle(&dp->desc_dma_handle);
667 return (ENOMEM);
668 }
669 ASSERT(count == 1);
670
671 /* set base of rx descriptor ring */
672 dp->rx_ring = ring;
673 dp->rx_ring_dma = ring_cookie.dmac_laddress;
674
675 /* set base of tx descriptor ring */
676 dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
677 dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
678
679 /* set base of io area */
680 dp->io_area = dp->tx_ring + dp->tx_desc_size;
681 dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
682 }
683
684 /*
685 * Prepare DMA resources for tx packets
686 */
687 ASSERT(dp->gc.gc_tx_buf_size > 0);
688
689 /* Special dma attribute for tx bounce buffers */
690 dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
691 dma_attr_txbounce.dma_attr_sgllen = 1;
692 dma_attr_txbounce.dma_attr_align =
693 max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
694
695 /* Size for tx bounce buffers must be max tx packet size. */
696 tx_buf_len = MAXPKTBUF(dp);
697 tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
698
699 ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
700
701 for (i = 0, tbp = dp->tx_buf;
702 i < dp->gc.gc_tx_buf_size; i++, tbp++) {
703
704 /* setup bounce buffers for tx packets */
705 if ((err = ddi_dma_alloc_handle(dp->dip,
706 &dma_attr_txbounce,
707 DDI_DMA_SLEEP, NULL,
708 &tbp->txb_bdh)) != DDI_SUCCESS) {
709
710 cmn_err(CE_WARN,
711 "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
712 " err=%d, i=%d",
713 dp->name, __func__, err, i);
714 goto err_alloc_dh;
715 }
716
717 if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
718 tx_buf_len,
719 &dp->gc.gc_buf_attr,
720 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
721 &buf, &buf_len,
722 &tbp->txb_bah)) != DDI_SUCCESS) {
723 cmn_err(CE_WARN,
724 "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
725 "ret %d, request size %d",
726 dp->name, __func__, err, tx_buf_len);
727 ddi_dma_free_handle(&tbp->txb_bdh);
728 goto err_alloc_dh;
729 }
730
731 if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
732 NULL, buf, buf_len,
733 DDI_DMA_WRITE | DDI_DMA_STREAMING,
734 DDI_DMA_SLEEP, NULL,
735 &buf_cookie, &count)) != DDI_SUCCESS) {
736 ASSERT(err != DDI_DMA_INUSE);
737 cmn_err(CE_WARN,
738 "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
739 dp->name, __func__, err);
740 ddi_dma_mem_free(&tbp->txb_bah);
741 ddi_dma_free_handle(&tbp->txb_bdh);
742 goto err_alloc_dh;
743 }
744 ASSERT(count == 1);
745 tbp->txb_buf = buf;
746 tbp->txb_buf_dma = buf_cookie.dmac_laddress;
747 }
748
749 return (0);
750
751 err_alloc_dh:
752 if (dp->gc.gc_tx_buf_size > 0) {
753 while (i-- > 0) {
754 (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
755 ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
756 ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
757 }
758 }
759
760 if (dp->desc_dma_handle) {
761 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
762 ddi_dma_mem_free(&dp->desc_acc_handle);
763 ddi_dma_free_handle(&dp->desc_dma_handle);
764 dp->desc_dma_handle = NULL;
765 }
766
767 return (ENOMEM);
768 }
769
770 static void
gem_free_memory(struct gem_dev * dp)771 gem_free_memory(struct gem_dev *dp)
772 {
773 int i;
774 struct rxbuf *rbp;
775 struct txbuf *tbp;
776
777 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
778
779 /* Free TX/RX descriptors and tx padding buffer */
780 if (dp->desc_dma_handle) {
781 (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
782 ddi_dma_mem_free(&dp->desc_acc_handle);
783 ddi_dma_free_handle(&dp->desc_dma_handle);
784 dp->desc_dma_handle = NULL;
785 }
786
787 /* Free dma handles for Tx */
788 for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
789 /* Free bounce buffer associated to each txbuf */
790 (void) ddi_dma_unbind_handle(tbp->txb_bdh);
791 ddi_dma_mem_free(&tbp->txb_bah);
792 ddi_dma_free_handle(&tbp->txb_bdh);
793 }
794
795 /* Free rx buffer */
796 while ((rbp = dp->rx_buf_freelist) != NULL) {
797
798 ASSERT(dp->rx_buf_freecnt > 0);
799
800 dp->rx_buf_freelist = rbp->rxb_next;
801 dp->rx_buf_freecnt--;
802
803 /* release DMA mapping */
804 ASSERT(rbp->rxb_dh != NULL);
805
806 /* free dma handles for rx bbuf */
807 /* it has dma mapping always */
808 ASSERT(rbp->rxb_nfrags > 0);
809 (void) ddi_dma_unbind_handle(rbp->rxb_dh);
810
811 /* free the associated bounce buffer and dma handle */
812 ASSERT(rbp->rxb_bah != NULL);
813 ddi_dma_mem_free(&rbp->rxb_bah);
814 /* free the associated dma handle */
815 ddi_dma_free_handle(&rbp->rxb_dh);
816
817 /* free the base memory of rx buffer management */
818 kmem_free(rbp, sizeof (struct rxbuf));
819 }
820 }
821
822 /* ============================================================== */
823 /*
824 * Rx/Tx descriptor slot management
825 */
826 /* ============================================================== */
827 /*
828 * Initialize an empty rx ring.
829 */
830 static void
gem_init_rx_ring(struct gem_dev * dp)831 gem_init_rx_ring(struct gem_dev *dp)
832 {
833 int i;
834 int rx_ring_size = dp->gc.gc_rx_ring_size;
835
836 DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
837 dp->name, __func__,
838 rx_ring_size, dp->gc.gc_rx_buf_max));
839
840 /* make a physical chain of rx descriptors */
841 for (i = 0; i < rx_ring_size; i++) {
842 (*dp->gc.gc_rx_desc_init)(dp, i);
843 }
844 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
845
846 dp->rx_active_head = (seqnum_t)0;
847 dp->rx_active_tail = (seqnum_t)0;
848
849 ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
850 ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
851 }
852
853 /*
854 * Prepare rx buffers and put them into the rx buffer/descriptor ring.
855 */
856 static void
gem_prepare_rx_buf(struct gem_dev * dp)857 gem_prepare_rx_buf(struct gem_dev *dp)
858 {
859 int i;
860 int nrbuf;
861 struct rxbuf *rbp;
862
863 ASSERT(mutex_owned(&dp->intrlock));
864
865 /* Now we have no active buffers in rx ring */
866
867 nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
868 for (i = 0; i < nrbuf; i++) {
869 if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
870 break;
871 }
872 gem_append_rxbuf(dp, rbp);
873 }
874
875 gem_rx_desc_dma_sync(dp,
876 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
877 }
878
879 /*
880 * Reclaim active rx buffers in rx buffer ring.
881 */
882 static void
gem_clean_rx_buf(struct gem_dev * dp)883 gem_clean_rx_buf(struct gem_dev *dp)
884 {
885 int i;
886 struct rxbuf *rbp;
887 int rx_ring_size = dp->gc.gc_rx_ring_size;
888 #ifdef GEM_DEBUG_LEVEL
889 int total;
890 #endif
891 ASSERT(mutex_owned(&dp->intrlock));
892
893 DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
894 dp->name, __func__, dp->rx_buf_freecnt));
895 /*
896 * clean up HW descriptors
897 */
898 for (i = 0; i < rx_ring_size; i++) {
899 (*dp->gc.gc_rx_desc_clean)(dp, i);
900 }
901 gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
902
903 #ifdef GEM_DEBUG_LEVEL
904 total = 0;
905 #endif
906 /*
907 * Reclaim allocated rx buffers
908 */
909 while ((rbp = dp->rx_buf_head) != NULL) {
910 #ifdef GEM_DEBUG_LEVEL
911 total++;
912 #endif
913 /* remove the first one from rx buffer list */
914 dp->rx_buf_head = rbp->rxb_next;
915
916 /* recycle the rxbuf */
917 gem_free_rxbuf(rbp);
918 }
919 dp->rx_buf_tail = (struct rxbuf *)NULL;
920
921 DPRINTF(2, (CE_CONT,
922 "!%s: %s: %d buffers freeed, total: %d free",
923 dp->name, __func__, total, dp->rx_buf_freecnt));
924 }
925
926 /*
927 * Initialize an empty transmit buffer/descriptor ring
928 */
929 static void
gem_init_tx_ring(struct gem_dev * dp)930 gem_init_tx_ring(struct gem_dev *dp)
931 {
932 int i;
933 int tx_buf_size = dp->gc.gc_tx_buf_size;
934 int tx_ring_size = dp->gc.gc_tx_ring_size;
935
936 DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
937 dp->name, __func__,
938 dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
939
940 ASSERT(!dp->mac_active);
941
942 /* initialize active list and free list */
943 dp->tx_slots_base =
944 SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
945 dp->tx_softq_tail -= dp->tx_softq_head;
946 dp->tx_softq_head = (seqnum_t)0;
947
948 dp->tx_active_head = dp->tx_softq_head;
949 dp->tx_active_tail = dp->tx_softq_head;
950
951 dp->tx_free_head = dp->tx_softq_tail;
952 dp->tx_free_tail = dp->gc.gc_tx_buf_limit;
953
954 dp->tx_desc_head = (seqnum_t)0;
955 dp->tx_desc_tail = (seqnum_t)0;
956 dp->tx_desc_intr = (seqnum_t)0;
957
958 for (i = 0; i < tx_ring_size; i++) {
959 (*dp->gc.gc_tx_desc_init)(dp, i);
960 }
961 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
962 }
963
964 __INLINE__
965 static void
gem_txbuf_free_dma_resources(struct txbuf * tbp)966 gem_txbuf_free_dma_resources(struct txbuf *tbp)
967 {
968 if (tbp->txb_mp) {
969 freemsg(tbp->txb_mp);
970 tbp->txb_mp = NULL;
971 }
972 tbp->txb_nfrags = 0;
973 tbp->txb_flag = 0;
974 }
975 #pragma inline(gem_txbuf_free_dma_resources)
976
977 /*
978 * reclaim active tx buffers and reset positions in tx rings.
979 */
980 static void
gem_clean_tx_buf(struct gem_dev * dp)981 gem_clean_tx_buf(struct gem_dev *dp)
982 {
983 int i;
984 seqnum_t head;
985 seqnum_t tail;
986 seqnum_t sn;
987 struct txbuf *tbp;
988 int tx_ring_size = dp->gc.gc_tx_ring_size;
989 #ifdef GEM_DEBUG_LEVEL
990 int err;
991 #endif
992
993 ASSERT(!dp->mac_active);
994 ASSERT(dp->tx_busy == 0);
995 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
996
997 /*
998 * clean up all HW descriptors
999 */
1000 for (i = 0; i < tx_ring_size; i++) {
1001 (*dp->gc.gc_tx_desc_clean)(dp, i);
1002 }
1003 gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1004
1005 /* dequeue all active and loaded buffers */
1006 head = dp->tx_active_head;
1007 tail = dp->tx_softq_tail;
1008
1009 ASSERT(dp->tx_free_head - head >= 0);
1010 tbp = GET_TXBUF(dp, head);
1011 for (sn = head; sn != tail; sn++) {
1012 gem_txbuf_free_dma_resources(tbp);
1013 ASSERT(tbp->txb_mp == NULL);
1014 dp->stats.errxmt++;
1015 tbp = tbp->txb_next;
1016 }
1017
1018 #ifdef GEM_DEBUG_LEVEL
1019 /* ensure no dma resources for tx are not in use now */
1020 err = 0;
1021 while (sn != head + dp->gc.gc_tx_buf_size) {
1022 if (tbp->txb_mp || tbp->txb_nfrags) {
1023 DPRINTF(0, (CE_CONT,
1024 "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1025 dp->name, __func__,
1026 sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1027 tbp->txb_mp, tbp->txb_nfrags));
1028 err = 1;
1029 }
1030 sn++;
1031 tbp = tbp->txb_next;
1032 }
1033
1034 if (err) {
1035 gem_dump_txbuf(dp, CE_WARN,
1036 "gem_clean_tx_buf: tbp->txb_mp != NULL");
1037 }
1038 #endif
1039 /* recycle buffers, now no active tx buffers in the ring */
1040 dp->tx_free_tail += tail - head;
1041 ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1042
1043 /* fix positions in tx buffer rings */
1044 dp->tx_active_head = dp->tx_free_head;
1045 dp->tx_active_tail = dp->tx_free_head;
1046 dp->tx_softq_head = dp->tx_free_head;
1047 dp->tx_softq_tail = dp->tx_free_head;
1048 }
1049
1050 /*
1051 * Reclaim transmitted buffers from tx buffer/descriptor ring.
1052 */
1053 __INLINE__ int
gem_reclaim_txbuf(struct gem_dev * dp)1054 gem_reclaim_txbuf(struct gem_dev *dp)
1055 {
1056 struct txbuf *tbp;
1057 uint_t txstat;
1058 int err = GEM_SUCCESS;
1059 seqnum_t head;
1060 seqnum_t tail;
1061 seqnum_t sn;
1062 seqnum_t desc_head;
1063 int tx_ring_size = dp->gc.gc_tx_ring_size;
1064 uint_t (*tx_desc_stat)(struct gem_dev *dp,
1065 int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1066 clock_t now;
1067
1068 now = ddi_get_lbolt();
1069 if (now == (clock_t)0) {
1070 /* make non-zero timestamp */
1071 now--;
1072 }
1073
1074 mutex_enter(&dp->xmitlock);
1075
1076 head = dp->tx_active_head;
1077 tail = dp->tx_active_tail;
1078
1079 #if GEM_DEBUG_LEVEL > 2
1080 if (head != tail) {
1081 cmn_err(CE_CONT, "!%s: %s: "
1082 "testing active_head:%d[%d], active_tail:%d[%d]",
1083 dp->name, __func__,
1084 head, SLOT(head, dp->gc.gc_tx_buf_size),
1085 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1086 }
1087 #endif
1088 #ifdef DEBUG
1089 if (dp->tx_reclaim_busy == 0) {
1090 /* check tx buffer management consistency */
1091 ASSERT(dp->tx_free_tail - dp->tx_active_head
1092 == dp->gc.gc_tx_buf_limit);
1093 /* EMPTY */
1094 }
1095 #endif
1096 dp->tx_reclaim_busy++;
1097
1098 /* sync all active HW descriptors */
1099 gem_tx_desc_dma_sync(dp,
1100 SLOT(dp->tx_desc_head, tx_ring_size),
1101 dp->tx_desc_tail - dp->tx_desc_head,
1102 DDI_DMA_SYNC_FORKERNEL);
1103
1104 tbp = GET_TXBUF(dp, head);
1105 desc_head = dp->tx_desc_head;
1106 for (sn = head; sn != tail;
1107 dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1108 int ndescs;
1109
1110 ASSERT(tbp->txb_desc == desc_head);
1111
1112 ndescs = tbp->txb_ndescs;
1113 if (ndescs == 0) {
1114 /* skip errored descriptors */
1115 continue;
1116 }
1117 txstat = (*tx_desc_stat)(dp,
1118 SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1119
1120 if (txstat == 0) {
1121 /* not transmitted yet */
1122 break;
1123 }
1124
1125 if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1126 dp->tx_blocked = now;
1127 }
1128
1129 ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1130
1131 if (txstat & GEM_TX_ERR) {
1132 err = GEM_FAILURE;
1133 cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1134 dp->name, sn, SLOT(sn, tx_ring_size));
1135 }
1136 #if GEM_DEBUG_LEVEL > 4
1137 if (now - tbp->txb_stime >= 50) {
1138 cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1139 dp->name, (now - tbp->txb_stime)*10);
1140 }
1141 #endif
1142 /* free transmitted descriptors */
1143 desc_head += ndescs;
1144 }
1145
1146 if (dp->tx_desc_head != desc_head) {
1147 /* we have reclaimed one or more tx buffers */
1148 dp->tx_desc_head = desc_head;
1149
1150 /* If we passed the next interrupt position, update it */
1151 if (desc_head - dp->tx_desc_intr > 0) {
1152 dp->tx_desc_intr = desc_head;
1153 }
1154 }
1155 mutex_exit(&dp->xmitlock);
1156
1157 /* free dma mapping resources associated with transmitted tx buffers */
1158 tbp = GET_TXBUF(dp, head);
1159 tail = sn;
1160 #if GEM_DEBUG_LEVEL > 2
1161 if (head != tail) {
1162 cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1163 __func__,
1164 head, SLOT(head, dp->gc.gc_tx_buf_size),
1165 tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1166 }
1167 #endif
1168 for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1169 gem_txbuf_free_dma_resources(tbp);
1170 }
1171
1172 /* recycle the tx buffers */
1173 mutex_enter(&dp->xmitlock);
1174 if (--dp->tx_reclaim_busy == 0) {
1175 /* we are the last thread who can update free tail */
1176 #if GEM_DEBUG_LEVEL > 4
1177 /* check all resouces have been deallocated */
1178 sn = dp->tx_free_tail;
1179 tbp = GET_TXBUF(dp, new_tail);
1180 while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1181 if (tbp->txb_nfrags) {
1182 /* in use */
1183 break;
1184 }
1185 ASSERT(tbp->txb_mp == NULL);
1186 tbp = tbp->txb_next;
1187 sn++;
1188 }
1189 ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1190 #endif
1191 dp->tx_free_tail =
1192 dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1193 }
1194 if (!dp->mac_active) {
1195 /* someone may be waiting for me. */
1196 cv_broadcast(&dp->tx_drain_cv);
1197 }
1198 #if GEM_DEBUG_LEVEL > 2
1199 cmn_err(CE_CONT, "!%s: %s: called, "
1200 "free_head:%d free_tail:%d(+%d) added:%d",
1201 dp->name, __func__,
1202 dp->tx_free_head, dp->tx_free_tail,
1203 dp->tx_free_tail - dp->tx_free_head, tail - head);
1204 #endif
1205 mutex_exit(&dp->xmitlock);
1206
1207 return (err);
1208 }
1209 #pragma inline(gem_reclaim_txbuf)
1210
1211
1212 /*
1213 * Make tx descriptors in out-of-order manner
1214 */
1215 static void
gem_tx_load_descs_oo(struct gem_dev * dp,seqnum_t start_slot,seqnum_t end_slot,uint64_t flags)1216 gem_tx_load_descs_oo(struct gem_dev *dp,
1217 seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1218 {
1219 seqnum_t sn;
1220 struct txbuf *tbp;
1221 int tx_ring_size = dp->gc.gc_tx_ring_size;
1222 int (*tx_desc_write)
1223 (struct gem_dev *dp, int slot,
1224 ddi_dma_cookie_t *dmacookie,
1225 int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1226 clock_t now = ddi_get_lbolt();
1227
1228 sn = start_slot;
1229 tbp = GET_TXBUF(dp, sn);
1230 do {
1231 #if GEM_DEBUG_LEVEL > 1
1232 if (dp->tx_cnt < 100) {
1233 dp->tx_cnt++;
1234 flags |= GEM_TXFLAG_INTR;
1235 }
1236 #endif
1237 /* write a tx descriptor */
1238 tbp->txb_desc = sn;
1239 tbp->txb_ndescs = (*tx_desc_write)(dp,
1240 SLOT(sn, tx_ring_size),
1241 tbp->txb_dmacookie,
1242 tbp->txb_nfrags, flags | tbp->txb_flag);
1243 tbp->txb_stime = now;
1244 ASSERT(tbp->txb_ndescs == 1);
1245
1246 flags = 0;
1247 sn++;
1248 tbp = tbp->txb_next;
1249 } while (sn != end_slot);
1250 }
1251
1252 __INLINE__
1253 static size_t
gem_setup_txbuf_copy(struct gem_dev * dp,mblk_t * mp,struct txbuf * tbp)1254 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1255 {
1256 size_t min_pkt;
1257 caddr_t bp;
1258 size_t off;
1259 mblk_t *tp;
1260 size_t len;
1261 uint64_t flag;
1262
1263 ASSERT(tbp->txb_mp == NULL);
1264
1265 /* we use bounce buffer for the packet */
1266 min_pkt = ETHERMIN;
1267 bp = tbp->txb_buf;
1268 off = 0;
1269 tp = mp;
1270
1271 flag = tbp->txb_flag;
1272 if (flag & GEM_TXFLAG_SWVTAG) {
1273 /* need to increase min packet size */
1274 min_pkt += VTAG_SIZE;
1275 ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1276 }
1277
1278 /* copy the rest */
1279 for (; tp; tp = tp->b_cont) {
1280 if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1281 bcopy(tp->b_rptr, &bp[off], len);
1282 off += len;
1283 }
1284 }
1285
1286 if (off < min_pkt &&
1287 (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1288 /*
1289 * Extend the packet to minimum packet size explicitly.
1290 * For software vlan packets, we shouldn't use tx autopad
1291 * function because nics may not be aware of vlan.
1292 * we must keep 46 octet of payload even if we use vlan.
1293 */
1294 bzero(&bp[off], min_pkt - off);
1295 off = min_pkt;
1296 }
1297
1298 (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1299
1300 tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1301 tbp->txb_dmacookie[0].dmac_size = off;
1302
1303 DPRINTF(2, (CE_CONT,
1304 "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1305 dp->name, __func__,
1306 tbp->txb_dmacookie[0].dmac_laddress,
1307 tbp->txb_dmacookie[0].dmac_size,
1308 (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1309 min_pkt));
1310
1311 /* save misc info */
1312 tbp->txb_mp = mp;
1313 tbp->txb_nfrags = 1;
1314 #ifdef DEBUG_MULTIFRAGS
1315 if (dp->gc.gc_tx_max_frags >= 3 &&
1316 tbp->txb_dmacookie[0].dmac_size > 16*3) {
1317 tbp->txb_dmacookie[1].dmac_laddress =
1318 tbp->txb_dmacookie[0].dmac_laddress + 16;
1319 tbp->txb_dmacookie[2].dmac_laddress =
1320 tbp->txb_dmacookie[1].dmac_laddress + 16;
1321
1322 tbp->txb_dmacookie[2].dmac_size =
1323 tbp->txb_dmacookie[0].dmac_size - 16*2;
1324 tbp->txb_dmacookie[1].dmac_size = 16;
1325 tbp->txb_dmacookie[0].dmac_size = 16;
1326 tbp->txb_nfrags = 3;
1327 }
1328 #endif
1329 return (off);
1330 }
1331 #pragma inline(gem_setup_txbuf_copy)
1332
1333 __INLINE__
1334 static void
gem_tx_start_unit(struct gem_dev * dp)1335 gem_tx_start_unit(struct gem_dev *dp)
1336 {
1337 seqnum_t head;
1338 seqnum_t tail;
1339 struct txbuf *tbp_head;
1340 struct txbuf *tbp_tail;
1341
1342 /* update HW descriptors from soft queue */
1343 ASSERT(mutex_owned(&dp->xmitlock));
1344 ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1345
1346 head = dp->tx_softq_head;
1347 tail = dp->tx_softq_tail;
1348
1349 DPRINTF(1, (CE_CONT,
1350 "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1351 dp->name, __func__, head, tail, tail - head,
1352 dp->tx_desc_head, dp->tx_desc_tail,
1353 dp->tx_desc_tail - dp->tx_desc_head));
1354
1355 ASSERT(tail - head > 0);
1356
1357 dp->tx_desc_tail = tail;
1358
1359 tbp_head = GET_TXBUF(dp, head);
1360 tbp_tail = GET_TXBUF(dp, tail - 1);
1361
1362 ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1363
1364 dp->gc.gc_tx_start(dp,
1365 SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1366 tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1367
1368 /* advance softq head and active tail */
1369 dp->tx_softq_head = dp->tx_active_tail = tail;
1370 }
1371 #pragma inline(gem_tx_start_unit)
1372
1373 #ifdef GEM_DEBUG_LEVEL
1374 static int gem_send_cnt[10];
1375 #endif
1376 #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE)
1377 #define EHLEN (sizeof (struct ether_header))
1378 /*
1379 * check ether packet type and ip protocol
1380 */
1381 static uint64_t
gem_txbuf_options(struct gem_dev * dp,mblk_t * mp,uint8_t * bp)1382 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1383 {
1384 mblk_t *tp;
1385 ssize_t len;
1386 uint_t vtag;
1387 int off;
1388 uint64_t flag;
1389
1390 flag = 0ULL;
1391
1392 /*
1393 * prepare continuous header of the packet for protocol analysis
1394 */
1395 if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1396 /* we use work buffer to copy mblk */
1397 for (tp = mp, off = 0;
1398 tp && (off < PKT_MIN_SIZE);
1399 tp = tp->b_cont, off += len) {
1400 len = (long)tp->b_wptr - (long)tp->b_rptr;
1401 len = min(len, PKT_MIN_SIZE - off);
1402 bcopy(tp->b_rptr, &bp[off], len);
1403 }
1404 } else {
1405 /* we can use mblk without copy */
1406 bp = mp->b_rptr;
1407 }
1408
1409 /* process vlan tag for GLD v3 */
1410 if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1411 if (dp->misc_flag & GEM_VLAN_HARD) {
1412 vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1413 ASSERT(vtag);
1414 flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1415 } else {
1416 flag |= GEM_TXFLAG_SWVTAG;
1417 }
1418 }
1419 return (flag);
1420 }
1421 #undef EHLEN
1422 #undef PKT_MIN_SIZE
1423 /*
1424 * gem_send_common is an exported function because hw depend routines may
1425 * use it for sending control frames like setup frames for 2114x chipset.
1426 */
1427 mblk_t *
gem_send_common(struct gem_dev * dp,mblk_t * mp_head,uint32_t flags)1428 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1429 {
1430 int nmblk;
1431 int avail;
1432 mblk_t *tp;
1433 mblk_t *mp;
1434 int i;
1435 struct txbuf *tbp;
1436 seqnum_t head;
1437 uint64_t load_flags;
1438 uint64_t len_total = 0;
1439 uint32_t bcast = 0;
1440 uint32_t mcast = 0;
1441
1442 ASSERT(mp_head != NULL);
1443
1444 mp = mp_head;
1445 nmblk = 1;
1446 while ((mp = mp->b_next) != NULL) {
1447 nmblk++;
1448 }
1449 #ifdef GEM_DEBUG_LEVEL
1450 gem_send_cnt[0]++;
1451 gem_send_cnt[min(nmblk, 9)]++;
1452 #endif
1453 /*
1454 * Aquire resources
1455 */
1456 mutex_enter(&dp->xmitlock);
1457 if (dp->mac_suspended) {
1458 mutex_exit(&dp->xmitlock);
1459 mp = mp_head;
1460 while (mp) {
1461 tp = mp->b_next;
1462 freemsg(mp);
1463 mp = tp;
1464 }
1465 return (NULL);
1466 }
1467
1468 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1469 /* don't send data packets while mac isn't active */
1470 /* XXX - should we discard packets? */
1471 mutex_exit(&dp->xmitlock);
1472 return (mp_head);
1473 }
1474
1475 /* allocate free slots */
1476 head = dp->tx_free_head;
1477 avail = dp->tx_free_tail - head;
1478
1479 DPRINTF(2, (CE_CONT,
1480 "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1481 dp->name, __func__,
1482 dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1483
1484 avail = min(avail, dp->tx_max_packets);
1485
1486 if (nmblk > avail) {
1487 if (avail == 0) {
1488 /* no resources; short cut */
1489 DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1490 dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1491 goto done;
1492 }
1493 nmblk = avail;
1494 }
1495
1496 dp->tx_free_head = head + nmblk;
1497 load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1498
1499 /* update last interrupt position if tx buffers exhaust. */
1500 if (nmblk == avail) {
1501 tbp = GET_TXBUF(dp, head + avail - 1);
1502 tbp->txb_flag = GEM_TXFLAG_INTR;
1503 dp->tx_desc_intr = head + avail;
1504 }
1505 mutex_exit(&dp->xmitlock);
1506
1507 tbp = GET_TXBUF(dp, head);
1508
1509 for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1510 uint8_t *bp;
1511 uint64_t txflag;
1512
1513 /* remove one from the mblk list */
1514 ASSERT(mp_head != NULL);
1515 mp = mp_head;
1516 mp_head = mp_head->b_next;
1517 mp->b_next = NULL;
1518
1519 /* statistics for non-unicast packets */
1520 bp = mp->b_rptr;
1521 if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1522 if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1523 ETHERADDRL) == 0) {
1524 bcast++;
1525 } else {
1526 mcast++;
1527 }
1528 }
1529
1530 /* save misc info */
1531 txflag = tbp->txb_flag;
1532 txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1533 txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1534 tbp->txb_flag = txflag;
1535
1536 len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1537 }
1538
1539 (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1540
1541 /* Append the tbp at the tail of the active tx buffer list */
1542 mutex_enter(&dp->xmitlock);
1543
1544 if ((--dp->tx_busy) == 0) {
1545 /* extend the tail of softq, as new packets have been ready. */
1546 dp->tx_softq_tail = dp->tx_free_head;
1547
1548 if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1549 /*
1550 * The device status has changed while we are
1551 * preparing tx buf.
1552 * As we are the last one that make tx non-busy.
1553 * wake up someone who may wait for us.
1554 */
1555 cv_broadcast(&dp->tx_drain_cv);
1556 } else {
1557 ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1558 gem_tx_start_unit(dp);
1559 }
1560 }
1561 dp->stats.obytes += len_total;
1562 dp->stats.opackets += nmblk;
1563 dp->stats.obcast += bcast;
1564 dp->stats.omcast += mcast;
1565 done:
1566 mutex_exit(&dp->xmitlock);
1567
1568 return (mp_head);
1569 }
1570
1571 /* ========================================================== */
1572 /*
1573 * error detection and restart routines
1574 */
1575 /* ========================================================== */
1576 int
gem_restart_nic(struct gem_dev * dp,uint_t flags)1577 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1578 {
1579 ASSERT(mutex_owned(&dp->intrlock));
1580
1581 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1582 #ifdef GEM_DEBUG_LEVEL
1583 #if GEM_DEBUG_LEVEL > 1
1584 gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1585 #endif
1586 #endif
1587
1588 if (dp->mac_suspended) {
1589 /* should we return GEM_FAILURE ? */
1590 return (GEM_FAILURE);
1591 }
1592
1593 /*
1594 * We should avoid calling any routines except xxx_chip_reset
1595 * when we are resuming the system.
1596 */
1597 if (dp->mac_active) {
1598 if (flags & GEM_RESTART_KEEP_BUF) {
1599 /* stop rx gracefully */
1600 dp->rxmode &= ~RXMODE_ENABLE;
1601 (void) (*dp->gc.gc_set_rx_filter)(dp);
1602 }
1603 (void) gem_mac_stop(dp, flags);
1604 }
1605
1606 /* reset the chip. */
1607 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1608 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1609 dp->name, __func__);
1610 goto err;
1611 }
1612
1613 if (gem_mac_init(dp) != GEM_SUCCESS) {
1614 goto err;
1615 }
1616
1617 /* setup media mode if the link have been up */
1618 if (dp->mii_state == MII_STATE_LINKUP) {
1619 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1620 goto err;
1621 }
1622 }
1623
1624 /* setup mac address and enable rx filter */
1625 dp->rxmode |= RXMODE_ENABLE;
1626 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1627 goto err;
1628 }
1629
1630 /*
1631 * XXX - a panic happened because of linkdown.
1632 * We must check mii_state here, because the link can be down just
1633 * before the restart event happen. If the link is down now,
1634 * gem_mac_start() will be called from gem_mii_link_check() when
1635 * the link become up later.
1636 */
1637 if (dp->mii_state == MII_STATE_LINKUP) {
1638 /* restart the nic */
1639 ASSERT(!dp->mac_active);
1640 (void) gem_mac_start(dp);
1641 }
1642 return (GEM_SUCCESS);
1643 err:
1644 return (GEM_FAILURE);
1645 }
1646
1647
1648 static void
gem_tx_timeout(struct gem_dev * dp)1649 gem_tx_timeout(struct gem_dev *dp)
1650 {
1651 clock_t now;
1652 boolean_t tx_sched;
1653 struct txbuf *tbp;
1654
1655 mutex_enter(&dp->intrlock);
1656
1657 tx_sched = B_FALSE;
1658 now = ddi_get_lbolt();
1659
1660 mutex_enter(&dp->xmitlock);
1661 if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1662 mutex_exit(&dp->xmitlock);
1663 goto schedule_next;
1664 }
1665 mutex_exit(&dp->xmitlock);
1666
1667 /* reclaim transmitted buffers to check the trasmitter hangs or not. */
1668 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1669 /* tx error happened, reset transmitter in the chip */
1670 (void) gem_restart_nic(dp, 0);
1671 tx_sched = B_TRUE;
1672 dp->tx_blocked = (clock_t)0;
1673
1674 goto schedule_next;
1675 }
1676
1677 mutex_enter(&dp->xmitlock);
1678 /* check if the transmitter thread is stuck */
1679 if (dp->tx_active_head == dp->tx_active_tail) {
1680 /* no tx buffer is loaded to the nic */
1681 if (dp->tx_blocked &&
1682 now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1683 gem_dump_txbuf(dp, CE_WARN,
1684 "gem_tx_timeout: tx blocked");
1685 tx_sched = B_TRUE;
1686 dp->tx_blocked = (clock_t)0;
1687 }
1688 mutex_exit(&dp->xmitlock);
1689 goto schedule_next;
1690 }
1691
1692 tbp = GET_TXBUF(dp, dp->tx_active_head);
1693 if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1694 mutex_exit(&dp->xmitlock);
1695 goto schedule_next;
1696 }
1697 mutex_exit(&dp->xmitlock);
1698
1699 gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1700
1701 /* discard untransmitted packet and restart tx. */
1702 (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1703 tx_sched = B_TRUE;
1704 dp->tx_blocked = (clock_t)0;
1705
1706 schedule_next:
1707 mutex_exit(&dp->intrlock);
1708
1709 /* restart the downstream if needed */
1710 if (tx_sched) {
1711 mac_tx_update(dp->mh);
1712 }
1713
1714 DPRINTF(4, (CE_CONT,
1715 "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1716 dp->name, BOOLEAN(dp->tx_blocked),
1717 dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1718 dp->timeout_id =
1719 timeout((void (*)(void *))gem_tx_timeout,
1720 (void *)dp, dp->gc.gc_tx_timeout_interval);
1721 }
1722
1723 /* ================================================================== */
1724 /*
1725 * Interrupt handler
1726 */
1727 /* ================================================================== */
1728 __INLINE__
1729 static void
gem_append_rxbuf(struct gem_dev * dp,struct rxbuf * rbp_head)1730 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1731 {
1732 struct rxbuf *rbp;
1733 seqnum_t tail;
1734 int rx_ring_size = dp->gc.gc_rx_ring_size;
1735
1736 ASSERT(rbp_head != NULL);
1737 ASSERT(mutex_owned(&dp->intrlock));
1738
1739 DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1740 dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1741
1742 /*
1743 * Add new buffers into active rx buffer list
1744 */
1745 if (dp->rx_buf_head == NULL) {
1746 dp->rx_buf_head = rbp_head;
1747 ASSERT(dp->rx_buf_tail == NULL);
1748 } else {
1749 dp->rx_buf_tail->rxb_next = rbp_head;
1750 }
1751
1752 tail = dp->rx_active_tail;
1753 for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1754 /* need to notify the tail for the lower layer */
1755 dp->rx_buf_tail = rbp;
1756
1757 dp->gc.gc_rx_desc_write(dp,
1758 SLOT(tail, rx_ring_size),
1759 rbp->rxb_dmacookie,
1760 rbp->rxb_nfrags);
1761
1762 dp->rx_active_tail = tail = tail + 1;
1763 }
1764 }
1765 #pragma inline(gem_append_rxbuf)
1766
1767 mblk_t *
gem_get_packet_default(struct gem_dev * dp,struct rxbuf * rbp,size_t len)1768 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1769 {
1770 int rx_header_len = dp->gc.gc_rx_header_len;
1771 uint8_t *bp;
1772 mblk_t *mp;
1773
1774 /* allocate a new mblk */
1775 if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1776 ASSERT(mp->b_next == NULL);
1777 ASSERT(mp->b_cont == NULL);
1778
1779 mp->b_rptr += VTAG_SIZE;
1780 bp = mp->b_rptr;
1781 mp->b_wptr = bp + len;
1782
1783 /*
1784 * flush the range of the entire buffer to invalidate
1785 * all of corresponding dirty entries in iocache.
1786 */
1787 (void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1788 0, DDI_DMA_SYNC_FORKERNEL);
1789
1790 bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1791 }
1792 return (mp);
1793 }
1794
1795 #ifdef GEM_DEBUG_LEVEL
1796 uint_t gem_rx_pkts[17];
1797 #endif
1798
1799
1800 int
gem_receive(struct gem_dev * dp)1801 gem_receive(struct gem_dev *dp)
1802 {
1803 uint64_t len_total = 0;
1804 struct rxbuf *rbp;
1805 mblk_t *mp;
1806 int cnt = 0;
1807 uint64_t rxstat;
1808 struct rxbuf *newbufs;
1809 struct rxbuf **newbufs_tailp;
1810 mblk_t *rx_head;
1811 mblk_t **rx_tailp;
1812 int rx_ring_size = dp->gc.gc_rx_ring_size;
1813 seqnum_t active_head;
1814 uint64_t (*rx_desc_stat)(struct gem_dev *dp,
1815 int slot, int ndesc);
1816 int ethermin = ETHERMIN;
1817 int ethermax = dp->mtu + sizeof (struct ether_header);
1818 int rx_header_len = dp->gc.gc_rx_header_len;
1819
1820 ASSERT(mutex_owned(&dp->intrlock));
1821
1822 DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1823 dp->name, dp->rx_buf_head));
1824
1825 rx_desc_stat = dp->gc.gc_rx_desc_stat;
1826 newbufs_tailp = &newbufs;
1827 rx_tailp = &rx_head;
1828 for (active_head = dp->rx_active_head;
1829 (rbp = dp->rx_buf_head) != NULL; active_head++) {
1830 int len;
1831 if (cnt == 0) {
1832 cnt = max(dp->poll_pkt_delay*2, 10);
1833 cnt = min(cnt,
1834 dp->rx_active_tail - active_head);
1835 gem_rx_desc_dma_sync(dp,
1836 SLOT(active_head, rx_ring_size),
1837 cnt,
1838 DDI_DMA_SYNC_FORKERNEL);
1839 }
1840
1841 if (rx_header_len > 0) {
1842 (void) ddi_dma_sync(rbp->rxb_dh, 0,
1843 rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1844 }
1845
1846 if (((rxstat = (*rx_desc_stat)(dp,
1847 SLOT(active_head, rx_ring_size),
1848 rbp->rxb_nfrags))
1849 & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1850 /* not received yet */
1851 break;
1852 }
1853
1854 /* Remove the head of the rx buffer list */
1855 dp->rx_buf_head = rbp->rxb_next;
1856 cnt--;
1857
1858
1859 if (rxstat & GEM_RX_ERR) {
1860 goto next;
1861 }
1862
1863 len = rxstat & GEM_RX_LEN;
1864 DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1865 dp->name, __func__, rxstat, len));
1866
1867 /*
1868 * Copy the packet
1869 */
1870 if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1871 /* no memory, discard the packet */
1872 dp->stats.norcvbuf++;
1873 goto next;
1874 }
1875
1876 /*
1877 * Process VLAN tag
1878 */
1879 ethermin = ETHERMIN;
1880 ethermax = dp->mtu + sizeof (struct ether_header);
1881 if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1882 ethermax += VTAG_SIZE;
1883 }
1884
1885 /* check packet size */
1886 if (len < ethermin) {
1887 dp->stats.errrcv++;
1888 dp->stats.runt++;
1889 freemsg(mp);
1890 goto next;
1891 }
1892
1893 if (len > ethermax) {
1894 dp->stats.errrcv++;
1895 dp->stats.frame_too_long++;
1896 freemsg(mp);
1897 goto next;
1898 }
1899
1900 len_total += len;
1901
1902 #ifdef GEM_DEBUG_VLAN
1903 if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1904 gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1905 }
1906 #endif
1907 /* append received packet to temporaly rx buffer list */
1908 *rx_tailp = mp;
1909 rx_tailp = &mp->b_next;
1910
1911 if (mp->b_rptr[0] & 1) {
1912 if (bcmp(mp->b_rptr,
1913 gem_etherbroadcastaddr.ether_addr_octet,
1914 ETHERADDRL) == 0) {
1915 dp->stats.rbcast++;
1916 } else {
1917 dp->stats.rmcast++;
1918 }
1919 }
1920 next:
1921 ASSERT(rbp != NULL);
1922
1923 /* append new one to temporal new buffer list */
1924 *newbufs_tailp = rbp;
1925 newbufs_tailp = &rbp->rxb_next;
1926 }
1927
1928 /* advance rx_active_head */
1929 if ((cnt = active_head - dp->rx_active_head) > 0) {
1930 dp->stats.rbytes += len_total;
1931 dp->stats.rpackets += cnt;
1932 }
1933 dp->rx_active_head = active_head;
1934
1935 /* terminate the working list */
1936 *newbufs_tailp = NULL;
1937 *rx_tailp = NULL;
1938
1939 if (dp->rx_buf_head == NULL) {
1940 dp->rx_buf_tail = NULL;
1941 }
1942
1943 DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1944 dp->name, __func__, cnt, rx_head));
1945
1946 if (newbufs) {
1947 /*
1948 * fillfull rx list with new buffers
1949 */
1950 seqnum_t head;
1951
1952 /* save current tail */
1953 head = dp->rx_active_tail;
1954 gem_append_rxbuf(dp, newbufs);
1955
1956 /* call hw depend start routine if we have. */
1957 dp->gc.gc_rx_start(dp,
1958 SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1959 }
1960
1961 if (rx_head) {
1962 /*
1963 * send up received packets
1964 */
1965 mutex_exit(&dp->intrlock);
1966 mac_rx(dp->mh, NULL, rx_head);
1967 mutex_enter(&dp->intrlock);
1968 }
1969
1970 #ifdef GEM_DEBUG_LEVEL
1971 gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1972 #endif
1973 return (cnt);
1974 }
1975
1976 boolean_t
gem_tx_done(struct gem_dev * dp)1977 gem_tx_done(struct gem_dev *dp)
1978 {
1979 boolean_t tx_sched = B_FALSE;
1980
1981 if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1982 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1983 DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1984 dp->name, dp->tx_active_head, dp->tx_active_tail));
1985 tx_sched = B_TRUE;
1986 goto x;
1987 }
1988
1989 mutex_enter(&dp->xmitlock);
1990
1991 /* XXX - we must not have any packets in soft queue */
1992 ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1993 /*
1994 * If we won't have chance to get more free tx buffers, and blocked,
1995 * it is worth to reschedule the downstream i.e. tx side.
1996 */
1997 ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1998 if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1999 /*
2000 * As no further tx-done interrupts are scheduled, this
2001 * is the last chance to kick tx side, which may be
2002 * blocked now, otherwise the tx side never works again.
2003 */
2004 tx_sched = B_TRUE;
2005 dp->tx_blocked = (clock_t)0;
2006 dp->tx_max_packets =
2007 min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2008 }
2009
2010 mutex_exit(&dp->xmitlock);
2011
2012 DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2013 dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2014 x:
2015 return (tx_sched);
2016 }
2017
2018 static uint_t
gem_intr(struct gem_dev * dp)2019 gem_intr(struct gem_dev *dp)
2020 {
2021 uint_t ret;
2022
2023 mutex_enter(&dp->intrlock);
2024 if (dp->mac_suspended) {
2025 mutex_exit(&dp->intrlock);
2026 return (DDI_INTR_UNCLAIMED);
2027 }
2028 dp->intr_busy = B_TRUE;
2029
2030 ret = (*dp->gc.gc_interrupt)(dp);
2031
2032 if (ret == DDI_INTR_UNCLAIMED) {
2033 dp->intr_busy = B_FALSE;
2034 mutex_exit(&dp->intrlock);
2035 return (ret);
2036 }
2037
2038 if (!dp->mac_active) {
2039 cv_broadcast(&dp->tx_drain_cv);
2040 }
2041
2042
2043 dp->stats.intr++;
2044 dp->intr_busy = B_FALSE;
2045
2046 mutex_exit(&dp->intrlock);
2047
2048 if (ret & INTR_RESTART_TX) {
2049 DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2050 mac_tx_update(dp->mh);
2051 ret &= ~INTR_RESTART_TX;
2052 }
2053 return (ret);
2054 }
2055
2056 static void
gem_intr_watcher(struct gem_dev * dp)2057 gem_intr_watcher(struct gem_dev *dp)
2058 {
2059 (void) gem_intr(dp);
2060
2061 /* schedule next call of tu_intr_watcher */
2062 dp->intr_watcher_id =
2063 timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2064 }
2065
2066 /* ======================================================================== */
2067 /*
2068 * MII support routines
2069 */
2070 /* ======================================================================== */
2071 static void
gem_choose_forcedmode(struct gem_dev * dp)2072 gem_choose_forcedmode(struct gem_dev *dp)
2073 {
2074 /* choose media mode */
2075 if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2076 dp->speed = GEM_SPD_1000;
2077 dp->full_duplex = dp->anadv_1000fdx;
2078 } else if (dp->anadv_100fdx || dp->anadv_100t4) {
2079 dp->speed = GEM_SPD_100;
2080 dp->full_duplex = B_TRUE;
2081 } else if (dp->anadv_100hdx) {
2082 dp->speed = GEM_SPD_100;
2083 dp->full_duplex = B_FALSE;
2084 } else {
2085 dp->speed = GEM_SPD_10;
2086 dp->full_duplex = dp->anadv_10fdx;
2087 }
2088 }
2089
2090 uint16_t
gem_mii_read(struct gem_dev * dp,uint_t reg)2091 gem_mii_read(struct gem_dev *dp, uint_t reg)
2092 {
2093 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2094 (*dp->gc.gc_mii_sync)(dp);
2095 }
2096 return ((*dp->gc.gc_mii_read)(dp, reg));
2097 }
2098
2099 void
gem_mii_write(struct gem_dev * dp,uint_t reg,uint16_t val)2100 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2101 {
2102 if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2103 (*dp->gc.gc_mii_sync)(dp);
2104 }
2105 (*dp->gc.gc_mii_write)(dp, reg, val);
2106 }
2107
2108 #define fc_cap_decode(x) \
2109 ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \
2110 (((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2111
2112 int
gem_mii_config_default(struct gem_dev * dp)2113 gem_mii_config_default(struct gem_dev *dp)
2114 {
2115 uint16_t mii_stat;
2116 uint16_t val;
2117 static uint16_t fc_cap_encode[4] = {
2118 0, /* none */
2119 MII_ABILITY_PAUSE, /* symmetric */
2120 MII_ABILITY_ASMPAUSE, /* tx */
2121 MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
2122 };
2123
2124 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2125
2126 /*
2127 * Configure bits in advertisement register
2128 */
2129 mii_stat = dp->mii_status;
2130
2131 DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2132 dp->name, __func__, mii_stat, MII_STATUS_BITS));
2133
2134 if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2135 /* it's funny */
2136 cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2137 dp->name, mii_stat, MII_STATUS_BITS);
2138 return (GEM_FAILURE);
2139 }
2140
2141 /* Do not change the rest of the ability bits in the advert reg */
2142 val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2143
2144 DPRINTF(0, (CE_CONT,
2145 "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2146 dp->name, __func__,
2147 dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2148 dp->anadv_10fdx, dp->anadv_10hdx));
2149
2150 if (dp->anadv_100t4) {
2151 val |= MII_ABILITY_100BASE_T4;
2152 }
2153 if (dp->anadv_100fdx) {
2154 val |= MII_ABILITY_100BASE_TX_FD;
2155 }
2156 if (dp->anadv_100hdx) {
2157 val |= MII_ABILITY_100BASE_TX;
2158 }
2159 if (dp->anadv_10fdx) {
2160 val |= MII_ABILITY_10BASE_T_FD;
2161 }
2162 if (dp->anadv_10hdx) {
2163 val |= MII_ABILITY_10BASE_T;
2164 }
2165
2166 /* set flow control capability */
2167 val |= fc_cap_encode[dp->anadv_flow_control];
2168
2169 DPRINTF(0, (CE_CONT,
2170 "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2171 dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2172 dp->anadv_flow_control));
2173
2174 gem_mii_write(dp, MII_AN_ADVERT, val);
2175
2176 if (mii_stat & MII_STATUS_XSTATUS) {
2177 /*
2178 * 1000Base-T GMII support
2179 */
2180 if (!dp->anadv_autoneg) {
2181 /* enable manual configuration */
2182 val = MII_1000TC_CFG_EN;
2183 } else {
2184 val = 0;
2185 if (dp->anadv_1000fdx) {
2186 val |= MII_1000TC_ADV_FULL;
2187 }
2188 if (dp->anadv_1000hdx) {
2189 val |= MII_1000TC_ADV_HALF;
2190 }
2191 }
2192 DPRINTF(0, (CE_CONT,
2193 "!%s: %s: setting MII_1000TC reg:%b",
2194 dp->name, __func__, val, MII_1000TC_BITS));
2195
2196 gem_mii_write(dp, MII_1000TC, val);
2197 }
2198
2199 return (GEM_SUCCESS);
2200 }
2201
2202 #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
2203 #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
2204
2205 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2206 /* none symm tx rx/symm */
2207 /* none */
2208 {FLOW_CONTROL_NONE,
2209 FLOW_CONTROL_NONE,
2210 FLOW_CONTROL_NONE,
2211 FLOW_CONTROL_NONE},
2212 /* sym */
2213 {FLOW_CONTROL_NONE,
2214 FLOW_CONTROL_SYMMETRIC,
2215 FLOW_CONTROL_NONE,
2216 FLOW_CONTROL_SYMMETRIC},
2217 /* tx */
2218 {FLOW_CONTROL_NONE,
2219 FLOW_CONTROL_NONE,
2220 FLOW_CONTROL_NONE,
2221 FLOW_CONTROL_TX_PAUSE},
2222 /* rx/symm */
2223 {FLOW_CONTROL_NONE,
2224 FLOW_CONTROL_SYMMETRIC,
2225 FLOW_CONTROL_RX_PAUSE,
2226 FLOW_CONTROL_SYMMETRIC},
2227 };
2228
2229 static char *gem_fc_type[] = {
2230 "without",
2231 "with symmetric",
2232 "with tx",
2233 "with rx",
2234 };
2235
2236 boolean_t
gem_mii_link_check(struct gem_dev * dp)2237 gem_mii_link_check(struct gem_dev *dp)
2238 {
2239 uint16_t old_mii_state;
2240 boolean_t tx_sched = B_FALSE;
2241 uint16_t status;
2242 uint16_t advert;
2243 uint16_t lpable;
2244 uint16_t exp;
2245 uint16_t ctl1000;
2246 uint16_t stat1000;
2247 uint16_t val;
2248 clock_t now;
2249 clock_t diff;
2250 int linkdown_action;
2251 boolean_t fix_phy = B_FALSE;
2252
2253 now = ddi_get_lbolt();
2254 old_mii_state = dp->mii_state;
2255
2256 DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2257 dp->name, __func__, now, dp->mii_state));
2258
2259 diff = now - dp->mii_last_check;
2260 dp->mii_last_check = now;
2261
2262 /*
2263 * For NWAM, don't show linkdown state right
2264 * after the system boots
2265 */
2266 if (dp->linkup_delay > 0) {
2267 if (dp->linkup_delay > diff) {
2268 dp->linkup_delay -= diff;
2269 } else {
2270 /* link up timeout */
2271 dp->linkup_delay = -1;
2272 }
2273 }
2274
2275 next_nowait:
2276 switch (dp->mii_state) {
2277 case MII_STATE_UNKNOWN:
2278 /* power-up, DP83840 requires 32 sync bits */
2279 (*dp->gc.gc_mii_sync)(dp);
2280 goto reset_phy;
2281
2282 case MII_STATE_RESETTING:
2283 dp->mii_timer -= diff;
2284 if (dp->mii_timer > 0) {
2285 /* don't read phy registers in resetting */
2286 dp->mii_interval = WATCH_INTERVAL_FAST;
2287 goto next;
2288 }
2289
2290 /* Timer expired, ensure reset bit is not set */
2291
2292 if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2293 /* some phys need sync bits after reset */
2294 (*dp->gc.gc_mii_sync)(dp);
2295 }
2296 val = gem_mii_read(dp, MII_CONTROL);
2297 if (val & MII_CONTROL_RESET) {
2298 cmn_err(CE_NOTE,
2299 "!%s: time:%ld resetting phy not complete."
2300 " mii_control:0x%b",
2301 dp->name, ddi_get_lbolt(),
2302 val, MII_CONTROL_BITS);
2303 }
2304
2305 /* ensure neither isolated nor pwrdown nor auto-nego mode */
2306 /* XXX -- this operation is required for NS DP83840A. */
2307 gem_mii_write(dp, MII_CONTROL, 0);
2308
2309 /* As resetting PHY has completed, configure PHY registers */
2310 if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2311 /* we failed to configure PHY. */
2312 goto reset_phy;
2313 }
2314
2315 /* mii_config may disable autonegatiation */
2316 gem_choose_forcedmode(dp);
2317
2318 dp->mii_lpable = 0;
2319 dp->mii_advert = 0;
2320 dp->mii_exp = 0;
2321 dp->mii_ctl1000 = 0;
2322 dp->mii_stat1000 = 0;
2323 dp->flow_control = FLOW_CONTROL_NONE;
2324
2325 if (!dp->anadv_autoneg) {
2326 /* skip auto-negotiation phase */
2327 dp->mii_state = MII_STATE_MEDIA_SETUP;
2328 dp->mii_timer = 0;
2329 dp->mii_interval = 0;
2330 goto next_nowait;
2331 }
2332
2333 /* Issue auto-negotiation command */
2334 goto autonego;
2335
2336 case MII_STATE_AUTONEGOTIATING:
2337 /*
2338 * Autonegotiation is in progress
2339 */
2340 dp->mii_timer -= diff;
2341 if (dp->mii_timer -
2342 (dp->gc.gc_mii_an_timeout
2343 - dp->gc.gc_mii_an_wait) > 0) {
2344 /*
2345 * wait for a while, typically autonegotiation
2346 * completes in 2.3 - 2.5 sec.
2347 */
2348 dp->mii_interval = WATCH_INTERVAL_FAST;
2349 goto next;
2350 }
2351
2352 /* read PHY status */
2353 status = gem_mii_read(dp, MII_STATUS);
2354 DPRINTF(4, (CE_CONT,
2355 "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2356 dp->name, __func__, dp->mii_state,
2357 status, MII_STATUS_BITS));
2358
2359 if (status & MII_STATUS_REMFAULT) {
2360 /*
2361 * The link parnert told me something wrong happend.
2362 * What do we do ?
2363 */
2364 cmn_err(CE_CONT,
2365 "!%s: auto-negotiation failed: remote fault",
2366 dp->name);
2367 goto autonego;
2368 }
2369
2370 if ((status & MII_STATUS_ANDONE) == 0) {
2371 if (dp->mii_timer <= 0) {
2372 /*
2373 * Auto-negotiation was timed out,
2374 * try again w/o resetting phy.
2375 */
2376 if (!dp->mii_supress_msg) {
2377 cmn_err(CE_WARN,
2378 "!%s: auto-negotiation failed: timeout",
2379 dp->name);
2380 dp->mii_supress_msg = B_TRUE;
2381 }
2382 goto autonego;
2383 }
2384 /*
2385 * Auto-negotiation is in progress. Wait.
2386 */
2387 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2388 goto next;
2389 }
2390
2391 /*
2392 * Auto-negotiation have completed.
2393 * Assume linkdown and fall through.
2394 */
2395 dp->mii_supress_msg = B_FALSE;
2396 dp->mii_state = MII_STATE_AN_DONE;
2397 DPRINTF(0, (CE_CONT,
2398 "!%s: auto-negotiation completed, MII_STATUS:%b",
2399 dp->name, status, MII_STATUS_BITS));
2400
2401 if (dp->gc.gc_mii_an_delay > 0) {
2402 dp->mii_timer = dp->gc.gc_mii_an_delay;
2403 dp->mii_interval = drv_usectohz(20*1000);
2404 goto next;
2405 }
2406
2407 dp->mii_timer = 0;
2408 diff = 0;
2409 goto next_nowait;
2410
2411 case MII_STATE_AN_DONE:
2412 /*
2413 * Auto-negotiation have done. Now we can set up media.
2414 */
2415 dp->mii_timer -= diff;
2416 if (dp->mii_timer > 0) {
2417 /* wait for a while */
2418 dp->mii_interval = WATCH_INTERVAL_FAST;
2419 goto next;
2420 }
2421
2422 /*
2423 * set up the result of auto negotiation
2424 */
2425
2426 /*
2427 * Read registers required to determin current
2428 * duplex mode and media speed.
2429 */
2430 if (dp->gc.gc_mii_an_delay > 0) {
2431 /*
2432 * As the link watcher context has been suspended,
2433 * 'status' is invalid. We must status register here
2434 */
2435 status = gem_mii_read(dp, MII_STATUS);
2436 }
2437 advert = gem_mii_read(dp, MII_AN_ADVERT);
2438 lpable = gem_mii_read(dp, MII_AN_LPABLE);
2439 exp = gem_mii_read(dp, MII_AN_EXPANSION);
2440 if (exp == 0xffff) {
2441 /* some phys don't have exp register */
2442 exp = 0;
2443 }
2444 ctl1000 = 0;
2445 stat1000 = 0;
2446 if (dp->mii_status & MII_STATUS_XSTATUS) {
2447 ctl1000 = gem_mii_read(dp, MII_1000TC);
2448 stat1000 = gem_mii_read(dp, MII_1000TS);
2449 }
2450 dp->mii_lpable = lpable;
2451 dp->mii_advert = advert;
2452 dp->mii_exp = exp;
2453 dp->mii_ctl1000 = ctl1000;
2454 dp->mii_stat1000 = stat1000;
2455
2456 cmn_err(CE_CONT,
2457 "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2458 dp->name,
2459 advert, MII_ABILITY_BITS,
2460 lpable, MII_ABILITY_BITS,
2461 exp, MII_AN_EXP_BITS);
2462
2463 if (dp->mii_status & MII_STATUS_XSTATUS) {
2464 cmn_err(CE_CONT,
2465 "! MII_1000TC:%b, MII_1000TS:%b",
2466 ctl1000, MII_1000TC_BITS,
2467 stat1000, MII_1000TS_BITS);
2468 }
2469
2470 if (gem_population(lpable) <= 1 &&
2471 (exp & MII_AN_EXP_LPCANAN) == 0) {
2472 if ((advert & MII_ABILITY_TECH) != lpable) {
2473 cmn_err(CE_WARN,
2474 "!%s: but the link partnar doesn't seem"
2475 " to have auto-negotiation capability."
2476 " please check the link configuration.",
2477 dp->name);
2478 }
2479 /*
2480 * it should be result of parallel detection, which
2481 * cannot detect duplex mode.
2482 */
2483 if (lpable & MII_ABILITY_100BASE_TX) {
2484 /*
2485 * we prefer full duplex mode for 100Mbps
2486 * connection, if we can.
2487 */
2488 lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2489 }
2490
2491 if ((advert & lpable) == 0 &&
2492 lpable & MII_ABILITY_10BASE_T) {
2493 lpable |= advert & MII_ABILITY_10BASE_T_FD;
2494 }
2495 /*
2496 * as the link partnar isn't auto-negotiatable, use
2497 * fixed mode temporally.
2498 */
2499 fix_phy = B_TRUE;
2500 } else if (lpable == 0) {
2501 cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2502 goto reset_phy;
2503 }
2504 /*
2505 * configure current link mode according to AN priority.
2506 */
2507 val = advert & lpable;
2508 if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2509 (stat1000 & MII_1000TS_LP_FULL)) {
2510 /* 1000BaseT & full duplex */
2511 dp->speed = GEM_SPD_1000;
2512 dp->full_duplex = B_TRUE;
2513 } else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2514 (stat1000 & MII_1000TS_LP_HALF)) {
2515 /* 1000BaseT & half duplex */
2516 dp->speed = GEM_SPD_1000;
2517 dp->full_duplex = B_FALSE;
2518 } else if (val & MII_ABILITY_100BASE_TX_FD) {
2519 /* 100BaseTx & full duplex */
2520 dp->speed = GEM_SPD_100;
2521 dp->full_duplex = B_TRUE;
2522 } else if (val & MII_ABILITY_100BASE_T4) {
2523 /* 100BaseT4 & full duplex */
2524 dp->speed = GEM_SPD_100;
2525 dp->full_duplex = B_TRUE;
2526 } else if (val & MII_ABILITY_100BASE_TX) {
2527 /* 100BaseTx & half duplex */
2528 dp->speed = GEM_SPD_100;
2529 dp->full_duplex = B_FALSE;
2530 } else if (val & MII_ABILITY_10BASE_T_FD) {
2531 /* 10BaseT & full duplex */
2532 dp->speed = GEM_SPD_10;
2533 dp->full_duplex = B_TRUE;
2534 } else if (val & MII_ABILITY_10BASE_T) {
2535 /* 10BaseT & half duplex */
2536 dp->speed = GEM_SPD_10;
2537 dp->full_duplex = B_FALSE;
2538 } else {
2539 /*
2540 * It seems that the link partnar doesn't have
2541 * auto-negotiation capability and our PHY
2542 * could not report the correct current mode.
2543 * We guess current mode by mii_control register.
2544 */
2545 val = gem_mii_read(dp, MII_CONTROL);
2546
2547 /* select 100m full or 10m half */
2548 dp->speed = (val & MII_CONTROL_100MB) ?
2549 GEM_SPD_100 : GEM_SPD_10;
2550 dp->full_duplex = dp->speed != GEM_SPD_10;
2551 fix_phy = B_TRUE;
2552
2553 cmn_err(CE_NOTE,
2554 "!%s: auto-negotiation done but "
2555 "common ability not found.\n"
2556 "PHY state: control:%b advert:%b lpable:%b\n"
2557 "guessing %d Mbps %s duplex mode",
2558 dp->name,
2559 val, MII_CONTROL_BITS,
2560 advert, MII_ABILITY_BITS,
2561 lpable, MII_ABILITY_BITS,
2562 gem_speed_value[dp->speed],
2563 dp->full_duplex ? "full" : "half");
2564 }
2565
2566 if (dp->full_duplex) {
2567 dp->flow_control =
2568 gem_fc_result[fc_cap_decode(advert)]
2569 [fc_cap_decode(lpable)];
2570 } else {
2571 dp->flow_control = FLOW_CONTROL_NONE;
2572 }
2573 dp->mii_state = MII_STATE_MEDIA_SETUP;
2574 /* FALLTHROUGH */
2575
2576 case MII_STATE_MEDIA_SETUP:
2577 dp->mii_state = MII_STATE_LINKDOWN;
2578 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2579 DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2580 dp->mii_supress_msg = B_FALSE;
2581
2582 /* use short interval */
2583 dp->mii_interval = WATCH_INTERVAL_FAST;
2584
2585 if ((!dp->anadv_autoneg) ||
2586 dp->gc.gc_mii_an_oneshot || fix_phy) {
2587
2588 /*
2589 * write specified mode to phy.
2590 */
2591 val = gem_mii_read(dp, MII_CONTROL);
2592 val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2593 MII_CONTROL_ANE | MII_CONTROL_RSAN);
2594
2595 if (dp->full_duplex) {
2596 val |= MII_CONTROL_FDUPLEX;
2597 }
2598
2599 switch (dp->speed) {
2600 case GEM_SPD_1000:
2601 val |= MII_CONTROL_1000MB;
2602 break;
2603
2604 case GEM_SPD_100:
2605 val |= MII_CONTROL_100MB;
2606 break;
2607
2608 default:
2609 cmn_err(CE_WARN, "%s: unknown speed:%d",
2610 dp->name, dp->speed);
2611 /* FALLTHROUGH */
2612 case GEM_SPD_10:
2613 /* for GEM_SPD_10, do nothing */
2614 break;
2615 }
2616
2617 if (dp->mii_status & MII_STATUS_XSTATUS) {
2618 gem_mii_write(dp,
2619 MII_1000TC, MII_1000TC_CFG_EN);
2620 }
2621 gem_mii_write(dp, MII_CONTROL, val);
2622 }
2623
2624 if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2625 /* notify the result of auto-negotiation to mac */
2626 (*dp->gc.gc_set_media)(dp);
2627 }
2628
2629 if ((void *)dp->gc.gc_mii_tune_phy) {
2630 /* for built-in sis900 */
2631 /* XXX - this code should be removed. */
2632 (*dp->gc.gc_mii_tune_phy)(dp);
2633 }
2634
2635 goto next_nowait;
2636
2637 case MII_STATE_LINKDOWN:
2638 status = gem_mii_read(dp, MII_STATUS);
2639 if (status & MII_STATUS_LINKUP) {
2640 /*
2641 * Link going up
2642 */
2643 dp->mii_state = MII_STATE_LINKUP;
2644 dp->mii_supress_msg = B_FALSE;
2645
2646 DPRINTF(0, (CE_CONT,
2647 "!%s: link up detected: mii_stat:%b",
2648 dp->name, status, MII_STATUS_BITS));
2649
2650 /*
2651 * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
2652 * ignored when MII_CONTROL_ANE is set.
2653 */
2654 cmn_err(CE_CONT,
2655 "!%s: Link up: %d Mbps %s duplex %s flow control",
2656 dp->name,
2657 gem_speed_value[dp->speed],
2658 dp->full_duplex ? "full" : "half",
2659 gem_fc_type[dp->flow_control]);
2660
2661 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2662
2663 /* XXX - we need other timer to watch statictics */
2664 if (dp->gc.gc_mii_hw_link_detection &&
2665 dp->nic_state == NIC_STATE_ONLINE) {
2666 dp->mii_interval = 0;
2667 }
2668
2669 if (dp->nic_state == NIC_STATE_ONLINE) {
2670 if (!dp->mac_active) {
2671 (void) gem_mac_start(dp);
2672 }
2673 tx_sched = B_TRUE;
2674 }
2675 goto next;
2676 }
2677
2678 dp->mii_supress_msg = B_TRUE;
2679 if (dp->anadv_autoneg) {
2680 dp->mii_timer -= diff;
2681 if (dp->mii_timer <= 0) {
2682 /*
2683 * link down timer expired.
2684 * need to restart auto-negotiation.
2685 */
2686 linkdown_action =
2687 dp->gc.gc_mii_linkdown_timeout_action;
2688 goto restart_autonego;
2689 }
2690 }
2691 /* don't change mii_state */
2692 break;
2693
2694 case MII_STATE_LINKUP:
2695 status = gem_mii_read(dp, MII_STATUS);
2696 if ((status & MII_STATUS_LINKUP) == 0) {
2697 /*
2698 * Link going down
2699 */
2700 cmn_err(CE_NOTE,
2701 "!%s: link down detected: mii_stat:%b",
2702 dp->name, status, MII_STATUS_BITS);
2703
2704 if (dp->nic_state == NIC_STATE_ONLINE &&
2705 dp->mac_active &&
2706 dp->gc.gc_mii_stop_mac_on_linkdown) {
2707 (void) gem_mac_stop(dp, 0);
2708
2709 if (dp->tx_blocked) {
2710 /* drain tx */
2711 tx_sched = B_TRUE;
2712 }
2713 }
2714
2715 if (dp->anadv_autoneg) {
2716 /* need to restart auto-negotiation */
2717 linkdown_action = dp->gc.gc_mii_linkdown_action;
2718 goto restart_autonego;
2719 }
2720
2721 dp->mii_state = MII_STATE_LINKDOWN;
2722 dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2723
2724 if ((void *)dp->gc.gc_mii_tune_phy) {
2725 /* for built-in sis900 */
2726 (*dp->gc.gc_mii_tune_phy)(dp);
2727 }
2728 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2729 goto next;
2730 }
2731
2732 /* don't change mii_state */
2733 if (dp->gc.gc_mii_hw_link_detection &&
2734 dp->nic_state == NIC_STATE_ONLINE) {
2735 dp->mii_interval = 0;
2736 goto next;
2737 }
2738 break;
2739 }
2740 dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2741 goto next;
2742
2743 /* Actions on the end of state routine */
2744
2745 restart_autonego:
2746 switch (linkdown_action) {
2747 case MII_ACTION_RESET:
2748 if (!dp->mii_supress_msg) {
2749 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2750 }
2751 dp->mii_supress_msg = B_TRUE;
2752 goto reset_phy;
2753
2754 case MII_ACTION_NONE:
2755 dp->mii_supress_msg = B_TRUE;
2756 if (dp->gc.gc_mii_an_oneshot) {
2757 goto autonego;
2758 }
2759 /* PHY will restart autonego automatically */
2760 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2761 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2762 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2763 goto next;
2764
2765 case MII_ACTION_RSA:
2766 if (!dp->mii_supress_msg) {
2767 cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2768 dp->name);
2769 }
2770 dp->mii_supress_msg = B_TRUE;
2771 goto autonego;
2772
2773 default:
2774 cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2775 dp->name, dp->gc.gc_mii_linkdown_action);
2776 dp->mii_supress_msg = B_TRUE;
2777 }
2778 /* NOTREACHED */
2779
2780 reset_phy:
2781 if (!dp->mii_supress_msg) {
2782 cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2783 }
2784 dp->mii_state = MII_STATE_RESETTING;
2785 dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2786 if (!dp->gc.gc_mii_dont_reset) {
2787 gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2788 }
2789 dp->mii_interval = WATCH_INTERVAL_FAST;
2790 goto next;
2791
2792 autonego:
2793 if (!dp->mii_supress_msg) {
2794 cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2795 }
2796 dp->mii_state = MII_STATE_AUTONEGOTIATING;
2797 dp->mii_timer = dp->gc.gc_mii_an_timeout;
2798
2799 /* start/restart auto nego */
2800 val = gem_mii_read(dp, MII_CONTROL) &
2801 ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2802
2803 gem_mii_write(dp, MII_CONTROL,
2804 val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2805
2806 dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2807
2808 next:
2809 if (dp->link_watcher_id == 0 && dp->mii_interval) {
2810 /* we must schedule next mii_watcher */
2811 dp->link_watcher_id =
2812 timeout((void (*)(void *))&gem_mii_link_watcher,
2813 (void *)dp, dp->mii_interval);
2814 }
2815
2816 if (old_mii_state != dp->mii_state) {
2817 /* notify new mii link state */
2818 if (dp->mii_state == MII_STATE_LINKUP) {
2819 dp->linkup_delay = 0;
2820 GEM_LINKUP(dp);
2821 } else if (dp->linkup_delay <= 0) {
2822 GEM_LINKDOWN(dp);
2823 }
2824 } else if (dp->linkup_delay < 0) {
2825 /* first linkup timeout */
2826 dp->linkup_delay = 0;
2827 GEM_LINKDOWN(dp);
2828 }
2829
2830 return (tx_sched);
2831 }
2832
2833 static void
gem_mii_link_watcher(struct gem_dev * dp)2834 gem_mii_link_watcher(struct gem_dev *dp)
2835 {
2836 boolean_t tx_sched;
2837
2838 mutex_enter(&dp->intrlock);
2839
2840 dp->link_watcher_id = 0;
2841 tx_sched = gem_mii_link_check(dp);
2842 #if GEM_DEBUG_LEVEL > 2
2843 if (dp->link_watcher_id == 0) {
2844 cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2845 }
2846 #endif
2847 mutex_exit(&dp->intrlock);
2848
2849 if (tx_sched) {
2850 /* kick potentially stopped downstream */
2851 mac_tx_update(dp->mh);
2852 }
2853 }
2854
2855 int
gem_mii_probe_default(struct gem_dev * dp)2856 gem_mii_probe_default(struct gem_dev *dp)
2857 {
2858 int8_t phy;
2859 uint16_t status;
2860 uint16_t adv;
2861 uint16_t adv_org;
2862
2863 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2864
2865 /*
2866 * Scan PHY
2867 */
2868 /* ensure to send sync bits */
2869 dp->mii_status = 0;
2870
2871 /* Try default phy first */
2872 if (dp->mii_phy_addr) {
2873 status = gem_mii_read(dp, MII_STATUS);
2874 if (status != 0xffff && status != 0) {
2875 gem_mii_write(dp, MII_CONTROL, 0);
2876 goto PHY_found;
2877 }
2878
2879 if (dp->mii_phy_addr < 0) {
2880 cmn_err(CE_NOTE,
2881 "!%s: failed to probe default internal and/or non-MII PHY",
2882 dp->name);
2883 return (GEM_FAILURE);
2884 }
2885
2886 cmn_err(CE_NOTE,
2887 "!%s: failed to probe default MII PHY at %d",
2888 dp->name, dp->mii_phy_addr);
2889 }
2890
2891 /* Try all possible address */
2892 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2893 dp->mii_phy_addr = phy;
2894 status = gem_mii_read(dp, MII_STATUS);
2895
2896 if (status != 0xffff && status != 0) {
2897 gem_mii_write(dp, MII_CONTROL, 0);
2898 goto PHY_found;
2899 }
2900 }
2901
2902 for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2903 dp->mii_phy_addr = phy;
2904 gem_mii_write(dp, MII_CONTROL, 0);
2905 status = gem_mii_read(dp, MII_STATUS);
2906
2907 if (status != 0xffff && status != 0) {
2908 goto PHY_found;
2909 }
2910 }
2911
2912 cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2913 dp->mii_phy_addr = -1;
2914
2915 return (GEM_FAILURE);
2916
2917 PHY_found:
2918 dp->mii_status = status;
2919 dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2920 gem_mii_read(dp, MII_PHYIDL);
2921
2922 if (dp->mii_phy_addr < 0) {
2923 cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2924 dp->name, dp->mii_phy_id);
2925 } else {
2926 cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2927 dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2928 }
2929
2930 cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2931 dp->name,
2932 gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2933 status, MII_STATUS_BITS,
2934 gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2935 gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2936
2937 dp->mii_xstatus = 0;
2938 if (status & MII_STATUS_XSTATUS) {
2939 dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2940
2941 cmn_err(CE_CONT, "!%s: xstatus:%b",
2942 dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2943 }
2944
2945 /* check if the phy can advertize pause abilities */
2946 adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2947
2948 gem_mii_write(dp, MII_AN_ADVERT,
2949 MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
2950
2951 adv = gem_mii_read(dp, MII_AN_ADVERT);
2952
2953 if ((adv & MII_ABILITY_PAUSE) == 0) {
2954 dp->gc.gc_flow_control &= ~1;
2955 }
2956
2957 if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
2958 dp->gc.gc_flow_control &= ~2;
2959 }
2960
2961 gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2962
2963 return (GEM_SUCCESS);
2964 }
2965
2966 static void
gem_mii_start(struct gem_dev * dp)2967 gem_mii_start(struct gem_dev *dp)
2968 {
2969 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2970
2971 /* make a first call of check link */
2972 dp->mii_state = MII_STATE_UNKNOWN;
2973 dp->mii_last_check = ddi_get_lbolt();
2974 dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2975 (void) gem_mii_link_watcher(dp);
2976 }
2977
2978 static void
gem_mii_stop(struct gem_dev * dp)2979 gem_mii_stop(struct gem_dev *dp)
2980 {
2981 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2982
2983 /* Ensure timer routine stopped */
2984 mutex_enter(&dp->intrlock);
2985 if (dp->link_watcher_id) {
2986 while (untimeout(dp->link_watcher_id) == -1)
2987 ;
2988 dp->link_watcher_id = 0;
2989 }
2990 mutex_exit(&dp->intrlock);
2991 }
2992
2993 boolean_t
gem_get_mac_addr_conf(struct gem_dev * dp)2994 gem_get_mac_addr_conf(struct gem_dev *dp)
2995 {
2996 char propname[32];
2997 char *valstr;
2998 uint8_t mac[ETHERADDRL];
2999 char *cp;
3000 int c;
3001 int i;
3002 int j;
3003 uint8_t v;
3004 uint8_t d;
3005 uint8_t ored;
3006
3007 DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3008 /*
3009 * Get ethernet address from .conf file
3010 */
3011 (void) sprintf(propname, "mac-addr");
3012 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3013 DDI_PROP_DONTPASS, propname, &valstr)) !=
3014 DDI_PROP_SUCCESS) {
3015 return (B_FALSE);
3016 }
3017
3018 if (strlen(valstr) != ETHERADDRL*3-1) {
3019 goto syntax_err;
3020 }
3021
3022 cp = valstr;
3023 j = 0;
3024 ored = 0;
3025 for (;;) {
3026 v = 0;
3027 for (i = 0; i < 2; i++) {
3028 c = *cp++;
3029
3030 if (c >= 'a' && c <= 'f') {
3031 d = c - 'a' + 10;
3032 } else if (c >= 'A' && c <= 'F') {
3033 d = c - 'A' + 10;
3034 } else if (c >= '0' && c <= '9') {
3035 d = c - '0';
3036 } else {
3037 goto syntax_err;
3038 }
3039 v = (v << 4) | d;
3040 }
3041
3042 mac[j++] = v;
3043 ored |= v;
3044 if (j == ETHERADDRL) {
3045 /* done */
3046 break;
3047 }
3048
3049 c = *cp++;
3050 if (c != ':') {
3051 goto syntax_err;
3052 }
3053 }
3054
3055 if (ored == 0) {
3056 goto err;
3057 }
3058 for (i = 0; i < ETHERADDRL; i++) {
3059 dp->dev_addr.ether_addr_octet[i] = mac[i];
3060 }
3061 ddi_prop_free(valstr);
3062 return (B_TRUE);
3063
3064 syntax_err:
3065 cmn_err(CE_CONT,
3066 "!%s: read mac addr: trying .conf: syntax err %s",
3067 dp->name, valstr);
3068 err:
3069 ddi_prop_free(valstr);
3070
3071 return (B_FALSE);
3072 }
3073
3074
3075 /* ============================================================== */
3076 /*
3077 * internal start/stop interface
3078 */
3079 /* ============================================================== */
3080 static int
gem_mac_set_rx_filter(struct gem_dev * dp)3081 gem_mac_set_rx_filter(struct gem_dev *dp)
3082 {
3083 return ((*dp->gc.gc_set_rx_filter)(dp));
3084 }
3085
3086 /*
3087 * gem_mac_init: cold start
3088 */
3089 static int
gem_mac_init(struct gem_dev * dp)3090 gem_mac_init(struct gem_dev *dp)
3091 {
3092 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3093
3094 if (dp->mac_suspended) {
3095 return (GEM_FAILURE);
3096 }
3097
3098 dp->mac_active = B_FALSE;
3099
3100 gem_init_rx_ring(dp);
3101 gem_init_tx_ring(dp);
3102
3103 /* reset transmitter state */
3104 dp->tx_blocked = (clock_t)0;
3105 dp->tx_busy = 0;
3106 dp->tx_reclaim_busy = 0;
3107 dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3108
3109 if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3110 return (GEM_FAILURE);
3111 }
3112
3113 gem_prepare_rx_buf(dp);
3114
3115 return (GEM_SUCCESS);
3116 }
3117 /*
3118 * gem_mac_start: warm start
3119 */
3120 static int
gem_mac_start(struct gem_dev * dp)3121 gem_mac_start(struct gem_dev *dp)
3122 {
3123 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3124
3125 ASSERT(mutex_owned(&dp->intrlock));
3126 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3127 ASSERT(dp->mii_state == MII_STATE_LINKUP);
3128
3129 /* enable tx and rx */
3130 mutex_enter(&dp->xmitlock);
3131 if (dp->mac_suspended) {
3132 mutex_exit(&dp->xmitlock);
3133 return (GEM_FAILURE);
3134 }
3135 dp->mac_active = B_TRUE;
3136 mutex_exit(&dp->xmitlock);
3137
3138 /* setup rx buffers */
3139 (*dp->gc.gc_rx_start)(dp,
3140 SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3141 dp->rx_active_tail - dp->rx_active_head);
3142
3143 if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3144 cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3145 dp->name, __func__);
3146 return (GEM_FAILURE);
3147 }
3148
3149 mutex_enter(&dp->xmitlock);
3150
3151 /* load untranmitted packets to the nic */
3152 ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3153 if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3154 gem_tx_load_descs_oo(dp,
3155 dp->tx_softq_head, dp->tx_softq_tail,
3156 GEM_TXFLAG_HEAD);
3157 /* issue preloaded tx buffers */
3158 gem_tx_start_unit(dp);
3159 }
3160
3161 mutex_exit(&dp->xmitlock);
3162
3163 return (GEM_SUCCESS);
3164 }
3165
3166 static int
gem_mac_stop(struct gem_dev * dp,uint_t flags)3167 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3168 {
3169 int i;
3170 int wait_time; /* in uS */
3171 #ifdef GEM_DEBUG_LEVEL
3172 clock_t now;
3173 #endif
3174 int ret = GEM_SUCCESS;
3175
3176 DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3177 dp->name, __func__, dp->rx_buf_freecnt));
3178
3179 ASSERT(mutex_owned(&dp->intrlock));
3180 ASSERT(!mutex_owned(&dp->xmitlock));
3181
3182 /*
3183 * Block transmits
3184 */
3185 mutex_enter(&dp->xmitlock);
3186 if (dp->mac_suspended) {
3187 mutex_exit(&dp->xmitlock);
3188 return (GEM_SUCCESS);
3189 }
3190 dp->mac_active = B_FALSE;
3191
3192 while (dp->tx_busy > 0) {
3193 cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3194 }
3195 mutex_exit(&dp->xmitlock);
3196
3197 if ((flags & GEM_RESTART_NOWAIT) == 0) {
3198 /*
3199 * Wait for all tx buffers sent.
3200 */
3201 wait_time =
3202 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3203 (dp->tx_active_tail - dp->tx_active_head);
3204
3205 DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3206 dp->name, __func__, wait_time));
3207 i = 0;
3208 #ifdef GEM_DEBUG_LEVEL
3209 now = ddi_get_lbolt();
3210 #endif
3211 while (dp->tx_active_tail != dp->tx_active_head) {
3212 if (i > wait_time) {
3213 /* timeout */
3214 cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3215 dp->name, __func__);
3216 break;
3217 }
3218 (void) gem_reclaim_txbuf(dp);
3219 drv_usecwait(100);
3220 i += 100;
3221 }
3222 DPRINTF(0, (CE_NOTE,
3223 "!%s: %s: the nic have drained in %d uS, real %d mS",
3224 dp->name, __func__, i,
3225 10*((int)(ddi_get_lbolt() - now))));
3226 }
3227
3228 /*
3229 * Now we can stop the nic safely.
3230 */
3231 if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3232 cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3233 dp->name, __func__);
3234 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3235 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3236 dp->name, __func__);
3237 }
3238 }
3239
3240 /*
3241 * Clear all rx buffers
3242 */
3243 if (flags & GEM_RESTART_KEEP_BUF) {
3244 (void) gem_receive(dp);
3245 }
3246 gem_clean_rx_buf(dp);
3247
3248 /*
3249 * Update final statistics
3250 */
3251 (*dp->gc.gc_get_stats)(dp);
3252
3253 /*
3254 * Clear all pended tx packets
3255 */
3256 ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3257 ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3258 if (flags & GEM_RESTART_KEEP_BUF) {
3259 /* restore active tx buffers */
3260 dp->tx_active_tail = dp->tx_active_head;
3261 dp->tx_softq_head = dp->tx_active_head;
3262 } else {
3263 gem_clean_tx_buf(dp);
3264 }
3265
3266 return (ret);
3267 }
3268
3269 static int
gem_add_multicast(struct gem_dev * dp,const uint8_t * ep)3270 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3271 {
3272 int cnt;
3273 int err;
3274
3275 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3276
3277 mutex_enter(&dp->intrlock);
3278 if (dp->mac_suspended) {
3279 mutex_exit(&dp->intrlock);
3280 return (GEM_FAILURE);
3281 }
3282
3283 if (dp->mc_count_req++ < GEM_MAXMC) {
3284 /* append the new address at the end of the mclist */
3285 cnt = dp->mc_count;
3286 bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3287 ETHERADDRL);
3288 if (dp->gc.gc_multicast_hash) {
3289 dp->mc_list[cnt].hash =
3290 (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3291 }
3292 dp->mc_count = cnt + 1;
3293 }
3294
3295 if (dp->mc_count_req != dp->mc_count) {
3296 /* multicast address list overflow */
3297 dp->rxmode |= RXMODE_MULTI_OVF;
3298 } else {
3299 dp->rxmode &= ~RXMODE_MULTI_OVF;
3300 }
3301
3302 /* tell new multicast list to the hardware */
3303 err = gem_mac_set_rx_filter(dp);
3304
3305 mutex_exit(&dp->intrlock);
3306
3307 return (err);
3308 }
3309
3310 static int
gem_remove_multicast(struct gem_dev * dp,const uint8_t * ep)3311 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3312 {
3313 size_t len;
3314 int i;
3315 int cnt;
3316 int err;
3317
3318 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3319
3320 mutex_enter(&dp->intrlock);
3321 if (dp->mac_suspended) {
3322 mutex_exit(&dp->intrlock);
3323 return (GEM_FAILURE);
3324 }
3325
3326 dp->mc_count_req--;
3327 cnt = dp->mc_count;
3328 for (i = 0; i < cnt; i++) {
3329 if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3330 continue;
3331 }
3332 /* shrink the mclist by copying forward */
3333 len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3334 if (len > 0) {
3335 bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3336 }
3337 dp->mc_count--;
3338 break;
3339 }
3340
3341 if (dp->mc_count_req != dp->mc_count) {
3342 /* multicast address list overflow */
3343 dp->rxmode |= RXMODE_MULTI_OVF;
3344 } else {
3345 dp->rxmode &= ~RXMODE_MULTI_OVF;
3346 }
3347 /* In gem v2, don't hold xmitlock on calling set_rx_filter */
3348 err = gem_mac_set_rx_filter(dp);
3349
3350 mutex_exit(&dp->intrlock);
3351
3352 return (err);
3353 }
3354
3355 /* ============================================================== */
3356 /*
3357 * ND interface
3358 */
3359 /* ============================================================== */
3360 enum {
3361 PARAM_AUTONEG_CAP,
3362 PARAM_PAUSE_CAP,
3363 PARAM_ASYM_PAUSE_CAP,
3364 PARAM_1000FDX_CAP,
3365 PARAM_1000HDX_CAP,
3366 PARAM_100T4_CAP,
3367 PARAM_100FDX_CAP,
3368 PARAM_100HDX_CAP,
3369 PARAM_10FDX_CAP,
3370 PARAM_10HDX_CAP,
3371
3372 PARAM_ADV_AUTONEG_CAP,
3373 PARAM_ADV_PAUSE_CAP,
3374 PARAM_ADV_ASYM_PAUSE_CAP,
3375 PARAM_ADV_1000FDX_CAP,
3376 PARAM_ADV_1000HDX_CAP,
3377 PARAM_ADV_100T4_CAP,
3378 PARAM_ADV_100FDX_CAP,
3379 PARAM_ADV_100HDX_CAP,
3380 PARAM_ADV_10FDX_CAP,
3381 PARAM_ADV_10HDX_CAP,
3382
3383 PARAM_LP_AUTONEG_CAP,
3384 PARAM_LP_PAUSE_CAP,
3385 PARAM_LP_ASYM_PAUSE_CAP,
3386 PARAM_LP_1000FDX_CAP,
3387 PARAM_LP_1000HDX_CAP,
3388 PARAM_LP_100T4_CAP,
3389 PARAM_LP_100FDX_CAP,
3390 PARAM_LP_100HDX_CAP,
3391 PARAM_LP_10FDX_CAP,
3392 PARAM_LP_10HDX_CAP,
3393
3394 PARAM_LINK_STATUS,
3395 PARAM_LINK_SPEED,
3396 PARAM_LINK_DUPLEX,
3397
3398 PARAM_LINK_AUTONEG,
3399 PARAM_LINK_RX_PAUSE,
3400 PARAM_LINK_TX_PAUSE,
3401
3402 PARAM_LOOP_MODE,
3403 PARAM_MSI_CNT,
3404
3405 #ifdef DEBUG_RESUME
3406 PARAM_RESUME_TEST,
3407 #endif
3408 PARAM_COUNT
3409 };
3410
3411 enum ioc_reply {
3412 IOC_INVAL = -1, /* bad, NAK with EINVAL */
3413 IOC_DONE, /* OK, reply sent */
3414 IOC_ACK, /* OK, just send ACK */
3415 IOC_REPLY, /* OK, just send reply */
3416 IOC_RESTART_ACK, /* OK, restart & ACK */
3417 IOC_RESTART_REPLY /* OK, restart & reply */
3418 };
3419
3420 struct gem_nd_arg {
3421 struct gem_dev *dp;
3422 int item;
3423 };
3424
3425 static int
gem_param_get(queue_t * q,mblk_t * mp,caddr_t arg,cred_t * credp)3426 gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3427 {
3428 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3429 int item = ((struct gem_nd_arg *)(void *)arg)->item;
3430 long val;
3431
3432 DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3433 dp->name, __func__, item));
3434
3435 switch (item) {
3436 case PARAM_AUTONEG_CAP:
3437 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3438 DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3439 break;
3440
3441 case PARAM_PAUSE_CAP:
3442 val = BOOLEAN(dp->gc.gc_flow_control & 1);
3443 break;
3444
3445 case PARAM_ASYM_PAUSE_CAP:
3446 val = BOOLEAN(dp->gc.gc_flow_control & 2);
3447 break;
3448
3449 case PARAM_1000FDX_CAP:
3450 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3451 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3452 break;
3453
3454 case PARAM_1000HDX_CAP:
3455 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3456 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3457 break;
3458
3459 case PARAM_100T4_CAP:
3460 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3461 break;
3462
3463 case PARAM_100FDX_CAP:
3464 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3465 break;
3466
3467 case PARAM_100HDX_CAP:
3468 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3469 break;
3470
3471 case PARAM_10FDX_CAP:
3472 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3473 break;
3474
3475 case PARAM_10HDX_CAP:
3476 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3477 break;
3478
3479 case PARAM_ADV_AUTONEG_CAP:
3480 val = dp->anadv_autoneg;
3481 break;
3482
3483 case PARAM_ADV_PAUSE_CAP:
3484 val = BOOLEAN(dp->anadv_flow_control & 1);
3485 break;
3486
3487 case PARAM_ADV_ASYM_PAUSE_CAP:
3488 val = BOOLEAN(dp->anadv_flow_control & 2);
3489 break;
3490
3491 case PARAM_ADV_1000FDX_CAP:
3492 val = dp->anadv_1000fdx;
3493 break;
3494
3495 case PARAM_ADV_1000HDX_CAP:
3496 val = dp->anadv_1000hdx;
3497 break;
3498
3499 case PARAM_ADV_100T4_CAP:
3500 val = dp->anadv_100t4;
3501 break;
3502
3503 case PARAM_ADV_100FDX_CAP:
3504 val = dp->anadv_100fdx;
3505 break;
3506
3507 case PARAM_ADV_100HDX_CAP:
3508 val = dp->anadv_100hdx;
3509 break;
3510
3511 case PARAM_ADV_10FDX_CAP:
3512 val = dp->anadv_10fdx;
3513 break;
3514
3515 case PARAM_ADV_10HDX_CAP:
3516 val = dp->anadv_10hdx;
3517 break;
3518
3519 case PARAM_LP_AUTONEG_CAP:
3520 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3521 break;
3522
3523 case PARAM_LP_PAUSE_CAP:
3524 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3525 break;
3526
3527 case PARAM_LP_ASYM_PAUSE_CAP:
3528 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
3529 break;
3530
3531 case PARAM_LP_1000FDX_CAP:
3532 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3533 break;
3534
3535 case PARAM_LP_1000HDX_CAP:
3536 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3537 break;
3538
3539 case PARAM_LP_100T4_CAP:
3540 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3541 break;
3542
3543 case PARAM_LP_100FDX_CAP:
3544 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3545 break;
3546
3547 case PARAM_LP_100HDX_CAP:
3548 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3549 break;
3550
3551 case PARAM_LP_10FDX_CAP:
3552 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3553 break;
3554
3555 case PARAM_LP_10HDX_CAP:
3556 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3557 break;
3558
3559 case PARAM_LINK_STATUS:
3560 val = (dp->mii_state == MII_STATE_LINKUP);
3561 break;
3562
3563 case PARAM_LINK_SPEED:
3564 val = gem_speed_value[dp->speed];
3565 break;
3566
3567 case PARAM_LINK_DUPLEX:
3568 val = 0;
3569 if (dp->mii_state == MII_STATE_LINKUP) {
3570 val = dp->full_duplex ? 2 : 1;
3571 }
3572 break;
3573
3574 case PARAM_LINK_AUTONEG:
3575 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3576 break;
3577
3578 case PARAM_LINK_RX_PAUSE:
3579 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3580 (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3581 break;
3582
3583 case PARAM_LINK_TX_PAUSE:
3584 val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3585 (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3586 break;
3587
3588 #ifdef DEBUG_RESUME
3589 case PARAM_RESUME_TEST:
3590 val = 0;
3591 break;
3592 #endif
3593 default:
3594 cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3595 dp->name, item);
3596 break;
3597 }
3598
3599 (void) mi_mpprintf(mp, "%ld", val);
3600
3601 return (0);
3602 }
3603
3604 static int
gem_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t arg,cred_t * credp)3605 gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3606 {
3607 struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3608 int item = ((struct gem_nd_arg *)(void *)arg)->item;
3609 long val;
3610 char *end;
3611
3612 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3613 if (ddi_strtol(value, &end, 10, &val)) {
3614 return (EINVAL);
3615 }
3616 if (end == value) {
3617 return (EINVAL);
3618 }
3619
3620 switch (item) {
3621 case PARAM_ADV_AUTONEG_CAP:
3622 if (val != 0 && val != 1) {
3623 goto err;
3624 }
3625 if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3626 goto err;
3627 }
3628 dp->anadv_autoneg = (int)val;
3629 break;
3630
3631 case PARAM_ADV_PAUSE_CAP:
3632 if (val != 0 && val != 1) {
3633 goto err;
3634 }
3635 if (val) {
3636 dp->anadv_flow_control |= 1;
3637 } else {
3638 dp->anadv_flow_control &= ~1;
3639 }
3640 break;
3641
3642 case PARAM_ADV_ASYM_PAUSE_CAP:
3643 if (val != 0 && val != 1) {
3644 goto err;
3645 }
3646 if (val) {
3647 dp->anadv_flow_control |= 2;
3648 } else {
3649 dp->anadv_flow_control &= ~2;
3650 }
3651 break;
3652
3653 case PARAM_ADV_1000FDX_CAP:
3654 if (val != 0 && val != 1) {
3655 goto err;
3656 }
3657 if (val && (dp->mii_xstatus &
3658 (MII_XSTATUS_1000BASET_FD |
3659 MII_XSTATUS_1000BASEX_FD)) == 0) {
3660 goto err;
3661 }
3662 dp->anadv_1000fdx = (int)val;
3663 break;
3664
3665 case PARAM_ADV_1000HDX_CAP:
3666 if (val != 0 && val != 1) {
3667 goto err;
3668 }
3669 if (val && (dp->mii_xstatus &
3670 (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3671 goto err;
3672 }
3673 dp->anadv_1000hdx = (int)val;
3674 break;
3675
3676 case PARAM_ADV_100T4_CAP:
3677 if (val != 0 && val != 1) {
3678 goto err;
3679 }
3680 if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3681 goto err;
3682 }
3683 dp->anadv_100t4 = (int)val;
3684 break;
3685
3686 case PARAM_ADV_100FDX_CAP:
3687 if (val != 0 && val != 1) {
3688 goto err;
3689 }
3690 if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3691 goto err;
3692 }
3693 dp->anadv_100fdx = (int)val;
3694 break;
3695
3696 case PARAM_ADV_100HDX_CAP:
3697 if (val != 0 && val != 1) {
3698 goto err;
3699 }
3700 if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3701 goto err;
3702 }
3703 dp->anadv_100hdx = (int)val;
3704 break;
3705
3706 case PARAM_ADV_10FDX_CAP:
3707 if (val != 0 && val != 1) {
3708 goto err;
3709 }
3710 if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3711 goto err;
3712 }
3713 dp->anadv_10fdx = (int)val;
3714 break;
3715
3716 case PARAM_ADV_10HDX_CAP:
3717 if (val != 0 && val != 1) {
3718 goto err;
3719 }
3720 if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3721 goto err;
3722 }
3723 dp->anadv_10hdx = (int)val;
3724 break;
3725 }
3726
3727 /* sync with PHY */
3728 gem_choose_forcedmode(dp);
3729
3730 dp->mii_state = MII_STATE_UNKNOWN;
3731 if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3732 /* XXX - Can we ignore the return code ? */
3733 (void) gem_mii_link_check(dp);
3734 }
3735
3736 return (0);
3737 err:
3738 return (EINVAL);
3739 }
3740
3741 static void
gem_nd_load(struct gem_dev * dp,char * name,ndgetf_t gf,ndsetf_t sf,int item)3742 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3743 {
3744 struct gem_nd_arg *arg;
3745
3746 ASSERT(item >= 0);
3747 ASSERT(item < PARAM_COUNT);
3748
3749 arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3750 arg->dp = dp;
3751 arg->item = item;
3752
3753 DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3754 dp->name, __func__, name, item));
3755 (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3756 }
3757
3758 static void
gem_nd_setup(struct gem_dev * dp)3759 gem_nd_setup(struct gem_dev *dp)
3760 {
3761 DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3762 dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3763
3764 ASSERT(dp->nd_arg_p == NULL);
3765
3766 dp->nd_arg_p =
3767 kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3768
3769 #define SETFUNC(x) ((x) ? gem_param_set : NULL)
3770
3771 gem_nd_load(dp, "autoneg_cap",
3772 gem_param_get, NULL, PARAM_AUTONEG_CAP);
3773 gem_nd_load(dp, "pause_cap",
3774 gem_param_get, NULL, PARAM_PAUSE_CAP);
3775 gem_nd_load(dp, "asym_pause_cap",
3776 gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3777 gem_nd_load(dp, "1000fdx_cap",
3778 gem_param_get, NULL, PARAM_1000FDX_CAP);
3779 gem_nd_load(dp, "1000hdx_cap",
3780 gem_param_get, NULL, PARAM_1000HDX_CAP);
3781 gem_nd_load(dp, "100T4_cap",
3782 gem_param_get, NULL, PARAM_100T4_CAP);
3783 gem_nd_load(dp, "100fdx_cap",
3784 gem_param_get, NULL, PARAM_100FDX_CAP);
3785 gem_nd_load(dp, "100hdx_cap",
3786 gem_param_get, NULL, PARAM_100HDX_CAP);
3787 gem_nd_load(dp, "10fdx_cap",
3788 gem_param_get, NULL, PARAM_10FDX_CAP);
3789 gem_nd_load(dp, "10hdx_cap",
3790 gem_param_get, NULL, PARAM_10HDX_CAP);
3791
3792 /* Our advertised capabilities */
3793 gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3794 SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3795 PARAM_ADV_AUTONEG_CAP);
3796 gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3797 SETFUNC(dp->gc.gc_flow_control & 1),
3798 PARAM_ADV_PAUSE_CAP);
3799 gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3800 SETFUNC(dp->gc.gc_flow_control & 2),
3801 PARAM_ADV_ASYM_PAUSE_CAP);
3802 gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3803 SETFUNC(dp->mii_xstatus &
3804 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3805 PARAM_ADV_1000FDX_CAP);
3806 gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3807 SETFUNC(dp->mii_xstatus &
3808 (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3809 PARAM_ADV_1000HDX_CAP);
3810 gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3811 SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3812 !dp->mii_advert_ro),
3813 PARAM_ADV_100T4_CAP);
3814 gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3815 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3816 !dp->mii_advert_ro),
3817 PARAM_ADV_100FDX_CAP);
3818 gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3819 SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3820 !dp->mii_advert_ro),
3821 PARAM_ADV_100HDX_CAP);
3822 gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3823 SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3824 !dp->mii_advert_ro),
3825 PARAM_ADV_10FDX_CAP);
3826 gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3827 SETFUNC((dp->mii_status & MII_STATUS_10) &&
3828 !dp->mii_advert_ro),
3829 PARAM_ADV_10HDX_CAP);
3830
3831 /* Partner's advertised capabilities */
3832 gem_nd_load(dp, "lp_autoneg_cap",
3833 gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3834 gem_nd_load(dp, "lp_pause_cap",
3835 gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3836 gem_nd_load(dp, "lp_asym_pause_cap",
3837 gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3838 gem_nd_load(dp, "lp_1000fdx_cap",
3839 gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3840 gem_nd_load(dp, "lp_1000hdx_cap",
3841 gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3842 gem_nd_load(dp, "lp_100T4_cap",
3843 gem_param_get, NULL, PARAM_LP_100T4_CAP);
3844 gem_nd_load(dp, "lp_100fdx_cap",
3845 gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3846 gem_nd_load(dp, "lp_100hdx_cap",
3847 gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3848 gem_nd_load(dp, "lp_10fdx_cap",
3849 gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3850 gem_nd_load(dp, "lp_10hdx_cap",
3851 gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3852
3853 /* Current operating modes */
3854 gem_nd_load(dp, "link_status",
3855 gem_param_get, NULL, PARAM_LINK_STATUS);
3856 gem_nd_load(dp, "link_speed",
3857 gem_param_get, NULL, PARAM_LINK_SPEED);
3858 gem_nd_load(dp, "link_duplex",
3859 gem_param_get, NULL, PARAM_LINK_DUPLEX);
3860 gem_nd_load(dp, "link_autoneg",
3861 gem_param_get, NULL, PARAM_LINK_AUTONEG);
3862 gem_nd_load(dp, "link_rx_pause",
3863 gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3864 gem_nd_load(dp, "link_tx_pause",
3865 gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3866 #ifdef DEBUG_RESUME
3867 gem_nd_load(dp, "resume_test",
3868 gem_param_get, NULL, PARAM_RESUME_TEST);
3869 #endif
3870 #undef SETFUNC
3871 }
3872
3873 static
3874 enum ioc_reply
gem_nd_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp,struct iocblk * iocp)3875 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3876 {
3877 boolean_t ok;
3878
3879 ASSERT(mutex_owned(&dp->intrlock));
3880
3881 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3882
3883 switch (iocp->ioc_cmd) {
3884 case ND_GET:
3885 ok = nd_getset(wq, dp->nd_data_p, mp);
3886 DPRINTF(0, (CE_CONT,
3887 "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3888 return (ok ? IOC_REPLY : IOC_INVAL);
3889
3890 case ND_SET:
3891 ok = nd_getset(wq, dp->nd_data_p, mp);
3892
3893 DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3894 dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3895
3896 if (!ok) {
3897 return (IOC_INVAL);
3898 }
3899
3900 if (iocp->ioc_error) {
3901 return (IOC_REPLY);
3902 }
3903
3904 return (IOC_RESTART_REPLY);
3905 }
3906
3907 cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3908
3909 return (IOC_INVAL);
3910 }
3911
3912 static void
gem_nd_cleanup(struct gem_dev * dp)3913 gem_nd_cleanup(struct gem_dev *dp)
3914 {
3915 ASSERT(dp->nd_data_p != NULL);
3916 ASSERT(dp->nd_arg_p != NULL);
3917
3918 nd_free(&dp->nd_data_p);
3919
3920 kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3921 dp->nd_arg_p = NULL;
3922 }
3923
3924 static void
gem_mac_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp)3925 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3926 {
3927 struct iocblk *iocp;
3928 enum ioc_reply status;
3929 int cmd;
3930
3931 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3932
3933 /*
3934 * Validate the command before bothering with the mutex ...
3935 */
3936 iocp = (void *)mp->b_rptr;
3937 iocp->ioc_error = 0;
3938 cmd = iocp->ioc_cmd;
3939
3940 DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3941
3942 mutex_enter(&dp->intrlock);
3943 mutex_enter(&dp->xmitlock);
3944
3945 switch (cmd) {
3946 default:
3947 _NOTE(NOTREACHED)
3948 status = IOC_INVAL;
3949 break;
3950
3951 case ND_GET:
3952 case ND_SET:
3953 status = gem_nd_ioctl(dp, wq, mp, iocp);
3954 break;
3955 }
3956
3957 mutex_exit(&dp->xmitlock);
3958 mutex_exit(&dp->intrlock);
3959
3960 #ifdef DEBUG_RESUME
3961 if (cmd == ND_GET) {
3962 gem_suspend(dp->dip);
3963 gem_resume(dp->dip);
3964 }
3965 #endif
3966 /*
3967 * Finally, decide how to reply
3968 */
3969 switch (status) {
3970 default:
3971 case IOC_INVAL:
3972 /*
3973 * Error, reply with a NAK and EINVAL or the specified error
3974 */
3975 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3976 EINVAL : iocp->ioc_error);
3977 break;
3978
3979 case IOC_DONE:
3980 /*
3981 * OK, reply already sent
3982 */
3983 break;
3984
3985 case IOC_RESTART_ACK:
3986 case IOC_ACK:
3987 /*
3988 * OK, reply with an ACK
3989 */
3990 miocack(wq, mp, 0, 0);
3991 break;
3992
3993 case IOC_RESTART_REPLY:
3994 case IOC_REPLY:
3995 /*
3996 * OK, send prepared reply as ACK or NAK
3997 */
3998 mp->b_datap->db_type =
3999 iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
4000 qreply(wq, mp);
4001 break;
4002 }
4003 }
4004
4005 #ifndef SYS_MAC_H
4006 #define XCVR_UNDEFINED 0
4007 #define XCVR_NONE 1
4008 #define XCVR_10 2
4009 #define XCVR_100T4 3
4010 #define XCVR_100X 4
4011 #define XCVR_100T2 5
4012 #define XCVR_1000X 6
4013 #define XCVR_1000T 7
4014 #endif
4015 static int
gem_mac_xcvr_inuse(struct gem_dev * dp)4016 gem_mac_xcvr_inuse(struct gem_dev *dp)
4017 {
4018 int val = XCVR_UNDEFINED;
4019
4020 if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4021 if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4022 val = XCVR_100T4;
4023 } else if (dp->mii_status &
4024 (MII_STATUS_100_BASEX_FD |
4025 MII_STATUS_100_BASEX)) {
4026 val = XCVR_100X;
4027 } else if (dp->mii_status &
4028 (MII_STATUS_100_BASE_T2_FD |
4029 MII_STATUS_100_BASE_T2)) {
4030 val = XCVR_100T2;
4031 } else if (dp->mii_status &
4032 (MII_STATUS_10_FD | MII_STATUS_10)) {
4033 val = XCVR_10;
4034 }
4035 } else if (dp->mii_xstatus &
4036 (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4037 val = XCVR_1000T;
4038 } else if (dp->mii_xstatus &
4039 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4040 val = XCVR_1000X;
4041 }
4042
4043 return (val);
4044 }
4045
4046 /* ============================================================== */
4047 /*
4048 * GLDv3 interface
4049 */
4050 /* ============================================================== */
4051 static int gem_m_getstat(void *, uint_t, uint64_t *);
4052 static int gem_m_start(void *);
4053 static void gem_m_stop(void *);
4054 static int gem_m_setpromisc(void *, boolean_t);
4055 static int gem_m_multicst(void *, boolean_t, const uint8_t *);
4056 static int gem_m_unicst(void *, const uint8_t *);
4057 static mblk_t *gem_m_tx(void *, mblk_t *);
4058 static void gem_m_ioctl(void *, queue_t *, mblk_t *);
4059 static boolean_t gem_m_getcapab(void *, mac_capab_t, void *);
4060
4061 #define GEM_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
4062
4063 static mac_callbacks_t gem_m_callbacks = {
4064 GEM_M_CALLBACK_FLAGS,
4065 gem_m_getstat,
4066 gem_m_start,
4067 gem_m_stop,
4068 gem_m_setpromisc,
4069 gem_m_multicst,
4070 gem_m_unicst,
4071 gem_m_tx,
4072 NULL,
4073 gem_m_ioctl,
4074 gem_m_getcapab,
4075 };
4076
4077 static int
gem_m_start(void * arg)4078 gem_m_start(void *arg)
4079 {
4080 int err = 0;
4081 struct gem_dev *dp = arg;
4082
4083 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4084
4085 mutex_enter(&dp->intrlock);
4086 if (dp->mac_suspended) {
4087 err = EIO;
4088 goto x;
4089 }
4090 if (gem_mac_init(dp) != GEM_SUCCESS) {
4091 err = EIO;
4092 goto x;
4093 }
4094 dp->nic_state = NIC_STATE_INITIALIZED;
4095
4096 /* reset rx filter state */
4097 dp->mc_count = 0;
4098 dp->mc_count_req = 0;
4099
4100 /* setup media mode if the link have been up */
4101 if (dp->mii_state == MII_STATE_LINKUP) {
4102 (dp->gc.gc_set_media)(dp);
4103 }
4104
4105 /* setup initial rx filter */
4106 bcopy(dp->dev_addr.ether_addr_octet,
4107 dp->cur_addr.ether_addr_octet, ETHERADDRL);
4108 dp->rxmode |= RXMODE_ENABLE;
4109
4110 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4111 err = EIO;
4112 goto x;
4113 }
4114
4115 dp->nic_state = NIC_STATE_ONLINE;
4116 if (dp->mii_state == MII_STATE_LINKUP) {
4117 if (gem_mac_start(dp) != GEM_SUCCESS) {
4118 err = EIO;
4119 goto x;
4120 }
4121 }
4122
4123 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4124 (void *)dp, dp->gc.gc_tx_timeout_interval);
4125 mutex_exit(&dp->intrlock);
4126
4127 return (0);
4128 x:
4129 dp->nic_state = NIC_STATE_STOPPED;
4130 mutex_exit(&dp->intrlock);
4131 return (err);
4132 }
4133
4134 static void
gem_m_stop(void * arg)4135 gem_m_stop(void *arg)
4136 {
4137 struct gem_dev *dp = arg;
4138
4139 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4140
4141 /* stop rx */
4142 mutex_enter(&dp->intrlock);
4143 if (dp->mac_suspended) {
4144 mutex_exit(&dp->intrlock);
4145 return;
4146 }
4147 dp->rxmode &= ~RXMODE_ENABLE;
4148 (void) gem_mac_set_rx_filter(dp);
4149 mutex_exit(&dp->intrlock);
4150
4151 /* stop tx timeout watcher */
4152 if (dp->timeout_id) {
4153 while (untimeout(dp->timeout_id) == -1)
4154 ;
4155 dp->timeout_id = 0;
4156 }
4157
4158 /* make the nic state inactive */
4159 mutex_enter(&dp->intrlock);
4160 if (dp->mac_suspended) {
4161 mutex_exit(&dp->intrlock);
4162 return;
4163 }
4164 dp->nic_state = NIC_STATE_STOPPED;
4165
4166 /* we need deassert mac_active due to block interrupt handler */
4167 mutex_enter(&dp->xmitlock);
4168 dp->mac_active = B_FALSE;
4169 mutex_exit(&dp->xmitlock);
4170
4171 /* block interrupts */
4172 while (dp->intr_busy) {
4173 cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4174 }
4175 (void) gem_mac_stop(dp, 0);
4176 mutex_exit(&dp->intrlock);
4177 }
4178
4179 static int
gem_m_multicst(void * arg,boolean_t add,const uint8_t * ep)4180 gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4181 {
4182 int err;
4183 int ret;
4184 struct gem_dev *dp = arg;
4185
4186 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4187
4188 if (add) {
4189 ret = gem_add_multicast(dp, ep);
4190 } else {
4191 ret = gem_remove_multicast(dp, ep);
4192 }
4193
4194 err = 0;
4195 if (ret != GEM_SUCCESS) {
4196 err = EIO;
4197 }
4198
4199 return (err);
4200 }
4201
4202 static int
gem_m_setpromisc(void * arg,boolean_t on)4203 gem_m_setpromisc(void *arg, boolean_t on)
4204 {
4205 int err = 0; /* no error */
4206 struct gem_dev *dp = arg;
4207
4208 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4209
4210 mutex_enter(&dp->intrlock);
4211 if (dp->mac_suspended) {
4212 mutex_exit(&dp->intrlock);
4213 return (EIO);
4214 }
4215 if (on) {
4216 dp->rxmode |= RXMODE_PROMISC;
4217 } else {
4218 dp->rxmode &= ~RXMODE_PROMISC;
4219 }
4220
4221 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4222 err = EIO;
4223 }
4224 mutex_exit(&dp->intrlock);
4225
4226 return (err);
4227 }
4228
4229 int
gem_m_getstat(void * arg,uint_t stat,uint64_t * valp)4230 gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4231 {
4232 struct gem_dev *dp = arg;
4233 struct gem_stats *gstp = &dp->stats;
4234 uint64_t val = 0;
4235
4236 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4237
4238 if (mutex_owned(&dp->intrlock)) {
4239 if (dp->mac_suspended) {
4240 return (EIO);
4241 }
4242 } else {
4243 mutex_enter(&dp->intrlock);
4244 if (dp->mac_suspended) {
4245 mutex_exit(&dp->intrlock);
4246 return (EIO);
4247 }
4248 mutex_exit(&dp->intrlock);
4249 }
4250
4251 if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4252 return (EIO);
4253 }
4254
4255 switch (stat) {
4256 case MAC_STAT_IFSPEED:
4257 val = gem_speed_value[dp->speed] *1000000ull;
4258 break;
4259
4260 case MAC_STAT_MULTIRCV:
4261 val = gstp->rmcast;
4262 break;
4263
4264 case MAC_STAT_BRDCSTRCV:
4265 val = gstp->rbcast;
4266 break;
4267
4268 case MAC_STAT_MULTIXMT:
4269 val = gstp->omcast;
4270 break;
4271
4272 case MAC_STAT_BRDCSTXMT:
4273 val = gstp->obcast;
4274 break;
4275
4276 case MAC_STAT_NORCVBUF:
4277 val = gstp->norcvbuf + gstp->missed;
4278 break;
4279
4280 case MAC_STAT_IERRORS:
4281 val = gstp->errrcv;
4282 break;
4283
4284 case MAC_STAT_NOXMTBUF:
4285 val = gstp->noxmtbuf;
4286 break;
4287
4288 case MAC_STAT_OERRORS:
4289 val = gstp->errxmt;
4290 break;
4291
4292 case MAC_STAT_COLLISIONS:
4293 val = gstp->collisions;
4294 break;
4295
4296 case MAC_STAT_RBYTES:
4297 val = gstp->rbytes;
4298 break;
4299
4300 case MAC_STAT_IPACKETS:
4301 val = gstp->rpackets;
4302 break;
4303
4304 case MAC_STAT_OBYTES:
4305 val = gstp->obytes;
4306 break;
4307
4308 case MAC_STAT_OPACKETS:
4309 val = gstp->opackets;
4310 break;
4311
4312 case MAC_STAT_UNDERFLOWS:
4313 val = gstp->underflow;
4314 break;
4315
4316 case MAC_STAT_OVERFLOWS:
4317 val = gstp->overflow;
4318 break;
4319
4320 case ETHER_STAT_ALIGN_ERRORS:
4321 val = gstp->frame;
4322 break;
4323
4324 case ETHER_STAT_FCS_ERRORS:
4325 val = gstp->crc;
4326 break;
4327
4328 case ETHER_STAT_FIRST_COLLISIONS:
4329 val = gstp->first_coll;
4330 break;
4331
4332 case ETHER_STAT_MULTI_COLLISIONS:
4333 val = gstp->multi_coll;
4334 break;
4335
4336 case ETHER_STAT_SQE_ERRORS:
4337 val = gstp->sqe;
4338 break;
4339
4340 case ETHER_STAT_DEFER_XMTS:
4341 val = gstp->defer;
4342 break;
4343
4344 case ETHER_STAT_TX_LATE_COLLISIONS:
4345 val = gstp->xmtlatecoll;
4346 break;
4347
4348 case ETHER_STAT_EX_COLLISIONS:
4349 val = gstp->excoll;
4350 break;
4351
4352 case ETHER_STAT_MACXMT_ERRORS:
4353 val = gstp->xmit_internal_err;
4354 break;
4355
4356 case ETHER_STAT_CARRIER_ERRORS:
4357 val = gstp->nocarrier;
4358 break;
4359
4360 case ETHER_STAT_TOOLONG_ERRORS:
4361 val = gstp->frame_too_long;
4362 break;
4363
4364 case ETHER_STAT_MACRCV_ERRORS:
4365 val = gstp->rcv_internal_err;
4366 break;
4367
4368 case ETHER_STAT_XCVR_ADDR:
4369 val = dp->mii_phy_addr;
4370 break;
4371
4372 case ETHER_STAT_XCVR_ID:
4373 val = dp->mii_phy_id;
4374 break;
4375
4376 case ETHER_STAT_XCVR_INUSE:
4377 val = gem_mac_xcvr_inuse(dp);
4378 break;
4379
4380 case ETHER_STAT_CAP_1000FDX:
4381 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4382 (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4383 break;
4384
4385 case ETHER_STAT_CAP_1000HDX:
4386 val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4387 (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4388 break;
4389
4390 case ETHER_STAT_CAP_100FDX:
4391 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4392 break;
4393
4394 case ETHER_STAT_CAP_100HDX:
4395 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4396 break;
4397
4398 case ETHER_STAT_CAP_10FDX:
4399 val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4400 break;
4401
4402 case ETHER_STAT_CAP_10HDX:
4403 val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4404 break;
4405
4406 case ETHER_STAT_CAP_ASMPAUSE:
4407 val = BOOLEAN(dp->gc.gc_flow_control & 2);
4408 break;
4409
4410 case ETHER_STAT_CAP_PAUSE:
4411 val = BOOLEAN(dp->gc.gc_flow_control & 1);
4412 break;
4413
4414 case ETHER_STAT_CAP_AUTONEG:
4415 val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4416 break;
4417
4418 case ETHER_STAT_ADV_CAP_1000FDX:
4419 val = dp->anadv_1000fdx;
4420 break;
4421
4422 case ETHER_STAT_ADV_CAP_1000HDX:
4423 val = dp->anadv_1000hdx;
4424 break;
4425
4426 case ETHER_STAT_ADV_CAP_100FDX:
4427 val = dp->anadv_100fdx;
4428 break;
4429
4430 case ETHER_STAT_ADV_CAP_100HDX:
4431 val = dp->anadv_100hdx;
4432 break;
4433
4434 case ETHER_STAT_ADV_CAP_10FDX:
4435 val = dp->anadv_10fdx;
4436 break;
4437
4438 case ETHER_STAT_ADV_CAP_10HDX:
4439 val = dp->anadv_10hdx;
4440 break;
4441
4442 case ETHER_STAT_ADV_CAP_ASMPAUSE:
4443 val = BOOLEAN(dp->anadv_flow_control & 2);
4444 break;
4445
4446 case ETHER_STAT_ADV_CAP_PAUSE:
4447 val = BOOLEAN(dp->anadv_flow_control & 1);
4448 break;
4449
4450 case ETHER_STAT_ADV_CAP_AUTONEG:
4451 val = dp->anadv_autoneg;
4452 break;
4453
4454 case ETHER_STAT_LP_CAP_1000FDX:
4455 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4456 break;
4457
4458 case ETHER_STAT_LP_CAP_1000HDX:
4459 val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4460 break;
4461
4462 case ETHER_STAT_LP_CAP_100FDX:
4463 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4464 break;
4465
4466 case ETHER_STAT_LP_CAP_100HDX:
4467 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4468 break;
4469
4470 case ETHER_STAT_LP_CAP_10FDX:
4471 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4472 break;
4473
4474 case ETHER_STAT_LP_CAP_10HDX:
4475 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4476 break;
4477
4478 case ETHER_STAT_LP_CAP_ASMPAUSE:
4479 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
4480 break;
4481
4482 case ETHER_STAT_LP_CAP_PAUSE:
4483 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4484 break;
4485
4486 case ETHER_STAT_LP_CAP_AUTONEG:
4487 val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4488 break;
4489
4490 case ETHER_STAT_LINK_ASMPAUSE:
4491 val = BOOLEAN(dp->flow_control & 2);
4492 break;
4493
4494 case ETHER_STAT_LINK_PAUSE:
4495 val = BOOLEAN(dp->flow_control & 1);
4496 break;
4497
4498 case ETHER_STAT_LINK_AUTONEG:
4499 val = dp->anadv_autoneg &&
4500 BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4501 break;
4502
4503 case ETHER_STAT_LINK_DUPLEX:
4504 val = (dp->mii_state == MII_STATE_LINKUP) ?
4505 (dp->full_duplex ? 2 : 1) : 0;
4506 break;
4507
4508 case ETHER_STAT_TOOSHORT_ERRORS:
4509 val = gstp->runt;
4510 break;
4511 case ETHER_STAT_LP_REMFAULT:
4512 val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4513 break;
4514
4515 case ETHER_STAT_JABBER_ERRORS:
4516 val = gstp->jabber;
4517 break;
4518
4519 case ETHER_STAT_CAP_100T4:
4520 val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4521 break;
4522
4523 case ETHER_STAT_ADV_CAP_100T4:
4524 val = dp->anadv_100t4;
4525 break;
4526
4527 case ETHER_STAT_LP_CAP_100T4:
4528 val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4529 break;
4530
4531 default:
4532 #if GEM_DEBUG_LEVEL > 2
4533 cmn_err(CE_WARN,
4534 "%s: unrecognized parameter value = %d",
4535 __func__, stat);
4536 #endif
4537 return (ENOTSUP);
4538 }
4539
4540 *valp = val;
4541
4542 return (0);
4543 }
4544
4545 static int
gem_m_unicst(void * arg,const uint8_t * mac)4546 gem_m_unicst(void *arg, const uint8_t *mac)
4547 {
4548 int err = 0;
4549 struct gem_dev *dp = arg;
4550
4551 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4552
4553 mutex_enter(&dp->intrlock);
4554 if (dp->mac_suspended) {
4555 mutex_exit(&dp->intrlock);
4556 return (EIO);
4557 }
4558 bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4559 dp->rxmode |= RXMODE_ENABLE;
4560
4561 if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4562 err = EIO;
4563 }
4564 mutex_exit(&dp->intrlock);
4565
4566 return (err);
4567 }
4568
4569 /*
4570 * gem_m_tx is used only for sending data packets into ethernet wire.
4571 */
4572 static mblk_t *
gem_m_tx(void * arg,mblk_t * mp)4573 gem_m_tx(void *arg, mblk_t *mp)
4574 {
4575 uint32_t flags = 0;
4576 struct gem_dev *dp = arg;
4577 mblk_t *tp;
4578
4579 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4580
4581 ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4582 if (dp->mii_state != MII_STATE_LINKUP) {
4583 /* Some nics hate to send packets when the link is down. */
4584 while (mp) {
4585 tp = mp->b_next;
4586 mp->b_next = NULL;
4587 freemsg(mp);
4588 mp = tp;
4589 }
4590 return (NULL);
4591 }
4592
4593 return (gem_send_common(dp, mp, flags));
4594 }
4595
4596 static void
gem_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)4597 gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4598 {
4599 DPRINTF(0, (CE_CONT, "!%s: %s: called",
4600 ((struct gem_dev *)arg)->name, __func__));
4601
4602 gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4603 }
4604
4605 /* ARGSUSED */
4606 static boolean_t
gem_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)4607 gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4608 {
4609 return (B_FALSE);
4610 }
4611
4612 static void
gem_gld3_init(struct gem_dev * dp,mac_register_t * macp)4613 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4614 {
4615 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4616 macp->m_driver = dp;
4617 macp->m_dip = dp->dip;
4618 macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4619 macp->m_callbacks = &gem_m_callbacks;
4620 macp->m_min_sdu = 0;
4621 macp->m_max_sdu = dp->mtu;
4622
4623 if (dp->misc_flag & GEM_VLAN) {
4624 macp->m_margin = VTAG_SIZE;
4625 }
4626 }
4627
4628 /* ======================================================================== */
4629 /*
4630 * attach/detatch support
4631 */
4632 /* ======================================================================== */
4633 static void
gem_read_conf(struct gem_dev * dp)4634 gem_read_conf(struct gem_dev *dp)
4635 {
4636 int val;
4637
4638 DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4639
4640 /*
4641 * Get media mode infomation from .conf file
4642 */
4643 dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4644 dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4645 dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4646 dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4647 dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4648 dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4649 dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4650 dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4651
4652 if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4653 DDI_PROP_DONTPASS, "full-duplex"))) {
4654 dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4655 dp->anadv_autoneg = B_FALSE;
4656 if (dp->full_duplex) {
4657 dp->anadv_1000hdx = B_FALSE;
4658 dp->anadv_100hdx = B_FALSE;
4659 dp->anadv_10hdx = B_FALSE;
4660 } else {
4661 dp->anadv_1000fdx = B_FALSE;
4662 dp->anadv_100fdx = B_FALSE;
4663 dp->anadv_10fdx = B_FALSE;
4664 }
4665 }
4666
4667 if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4668 dp->anadv_autoneg = B_FALSE;
4669 switch (val) {
4670 case 1000:
4671 dp->speed = GEM_SPD_1000;
4672 dp->anadv_100t4 = B_FALSE;
4673 dp->anadv_100fdx = B_FALSE;
4674 dp->anadv_100hdx = B_FALSE;
4675 dp->anadv_10fdx = B_FALSE;
4676 dp->anadv_10hdx = B_FALSE;
4677 break;
4678 case 100:
4679 dp->speed = GEM_SPD_100;
4680 dp->anadv_1000fdx = B_FALSE;
4681 dp->anadv_1000hdx = B_FALSE;
4682 dp->anadv_10fdx = B_FALSE;
4683 dp->anadv_10hdx = B_FALSE;
4684 break;
4685 case 10:
4686 dp->speed = GEM_SPD_10;
4687 dp->anadv_1000fdx = B_FALSE;
4688 dp->anadv_1000hdx = B_FALSE;
4689 dp->anadv_100t4 = B_FALSE;
4690 dp->anadv_100fdx = B_FALSE;
4691 dp->anadv_100hdx = B_FALSE;
4692 break;
4693 default:
4694 cmn_err(CE_WARN,
4695 "!%s: property %s: illegal value:%d",
4696 dp->name, "speed", val);
4697 dp->anadv_autoneg = B_TRUE;
4698 break;
4699 }
4700 }
4701
4702 val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4703 if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4704 cmn_err(CE_WARN,
4705 "!%s: property %s: illegal value:%d",
4706 dp->name, "flow-control", val);
4707 } else {
4708 val = min(val, dp->gc.gc_flow_control);
4709 }
4710 dp->anadv_flow_control = val;
4711
4712 if (gem_prop_get_int(dp, "nointr", 0)) {
4713 dp->misc_flag |= GEM_NOINTR;
4714 cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4715 }
4716
4717 dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4718 dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4719 dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4720 dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4721 dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4722 }
4723
4724
4725 /*
4726 * Gem kstat support
4727 */
4728
4729 #define GEM_LOCAL_DATA_SIZE(gc) \
4730 (sizeof (struct gem_dev) + \
4731 sizeof (struct mcast_addr) * GEM_MAXMC + \
4732 sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4733 sizeof (void *) * ((gc)->gc_tx_buf_size))
4734
4735 struct gem_dev *
gem_do_attach(dev_info_t * dip,int port,struct gem_conf * gc,void * base,ddi_acc_handle_t * regs_handlep,void * lp,int lmsize)4736 gem_do_attach(dev_info_t *dip, int port,
4737 struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4738 void *lp, int lmsize)
4739 {
4740 struct gem_dev *dp;
4741 int i;
4742 ddi_iblock_cookie_t c;
4743 mac_register_t *macp = NULL;
4744 int ret;
4745 int unit;
4746 int nports;
4747
4748 unit = ddi_get_instance(dip);
4749 if ((nports = gc->gc_nports) == 0) {
4750 nports = 1;
4751 }
4752 if (nports == 1) {
4753 ddi_set_driver_private(dip, NULL);
4754 }
4755
4756 DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4757 unit));
4758
4759 /*
4760 * Allocate soft data structure
4761 */
4762 dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4763
4764 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4765 cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4766 unit, __func__);
4767 return (NULL);
4768 }
4769 /* ddi_set_driver_private(dip, dp); */
4770
4771 /* link to private area */
4772 dp->private = lp;
4773 dp->priv_size = lmsize;
4774 dp->mc_list = (struct mcast_addr *)&dp[1];
4775
4776 dp->dip = dip;
4777 (void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4778
4779 /*
4780 * Get iblock cookie
4781 */
4782 if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4783 cmn_err(CE_CONT,
4784 "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4785 dp->name);
4786 goto err_free_private;
4787 }
4788 dp->iblock_cookie = c;
4789
4790 /*
4791 * Initialize mutex's for this device.
4792 */
4793 mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4794 mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4795 cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4796
4797 /*
4798 * configure gem parameter
4799 */
4800 dp->base_addr = base;
4801 dp->regs_handle = *regs_handlep;
4802 dp->gc = *gc;
4803 gc = &dp->gc;
4804 /* patch for simplify dma resource management */
4805 gc->gc_tx_max_frags = 1;
4806 gc->gc_tx_max_descs_per_pkt = 1;
4807 gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4808 gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4809 gc->gc_tx_desc_write_oo = B_TRUE;
4810
4811 gc->gc_nports = nports; /* fix nports */
4812
4813 /* fix copy threadsholds */
4814 gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4815 gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4816
4817 /* fix rx buffer boundary for iocache line size */
4818 ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4819 ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4820 gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4821 gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4822
4823 /* fix descriptor boundary for cache line size */
4824 gc->gc_dma_attr_desc.dma_attr_align =
4825 max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
4826
4827 /* patch get_packet method */
4828 if (gc->gc_get_packet == NULL) {
4829 gc->gc_get_packet = &gem_get_packet_default;
4830 }
4831
4832 /* patch get_rx_start method */
4833 if (gc->gc_rx_start == NULL) {
4834 gc->gc_rx_start = &gem_rx_start_default;
4835 }
4836
4837 /* calculate descriptor area */
4838 if (gc->gc_rx_desc_unit_shift >= 0) {
4839 dp->rx_desc_size =
4840 ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4841 gc->gc_dma_attr_desc.dma_attr_align);
4842 }
4843 if (gc->gc_tx_desc_unit_shift >= 0) {
4844 dp->tx_desc_size =
4845 ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4846 gc->gc_dma_attr_desc.dma_attr_align);
4847 }
4848
4849 dp->mtu = ETHERMTU;
4850 dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4851 /* link tx buffers */
4852 for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4853 dp->tx_buf[i].txb_next =
4854 &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4855 }
4856
4857 dp->rxmode = 0;
4858 dp->speed = GEM_SPD_10; /* default is 10Mbps */
4859 dp->full_duplex = B_FALSE; /* default is half */
4860 dp->flow_control = FLOW_CONTROL_NONE;
4861 dp->poll_pkt_delay = 8; /* typical coalease for rx packets */
4862
4863 /* performance tuning parameters */
4864 dp->txthr = ETHERMAX; /* tx fifo threshold */
4865 dp->txmaxdma = 16*4; /* tx max dma burst size */
4866 dp->rxthr = 128; /* rx fifo threshold */
4867 dp->rxmaxdma = 16*4; /* rx max dma burst size */
4868
4869 /*
4870 * Get media mode information from .conf file
4871 */
4872 gem_read_conf(dp);
4873
4874 /* rx_buf_len is required buffer length without padding for alignment */
4875 dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4876
4877 /*
4878 * Reset the chip
4879 */
4880 mutex_enter(&dp->intrlock);
4881 dp->nic_state = NIC_STATE_STOPPED;
4882 ret = (*dp->gc.gc_reset_chip)(dp);
4883 mutex_exit(&dp->intrlock);
4884 if (ret != GEM_SUCCESS) {
4885 goto err_free_regs;
4886 }
4887
4888 /*
4889 * HW dependant paremeter initialization
4890 */
4891 mutex_enter(&dp->intrlock);
4892 ret = (*dp->gc.gc_attach_chip)(dp);
4893 mutex_exit(&dp->intrlock);
4894 if (ret != GEM_SUCCESS) {
4895 goto err_free_regs;
4896 }
4897
4898 #ifdef DEBUG_MULTIFRAGS
4899 dp->gc.gc_tx_copy_thresh = dp->mtu;
4900 #endif
4901 /* allocate tx and rx resources */
4902 if (gem_alloc_memory(dp)) {
4903 goto err_free_regs;
4904 }
4905
4906 DPRINTF(0, (CE_CONT,
4907 "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4908 dp->name, (long)dp->base_addr,
4909 dp->dev_addr.ether_addr_octet[0],
4910 dp->dev_addr.ether_addr_octet[1],
4911 dp->dev_addr.ether_addr_octet[2],
4912 dp->dev_addr.ether_addr_octet[3],
4913 dp->dev_addr.ether_addr_octet[4],
4914 dp->dev_addr.ether_addr_octet[5]));
4915
4916 /* copy mac address */
4917 dp->cur_addr = dp->dev_addr;
4918
4919 gem_gld3_init(dp, macp);
4920
4921 /* Probe MII phy (scan phy) */
4922 dp->mii_lpable = 0;
4923 dp->mii_advert = 0;
4924 dp->mii_exp = 0;
4925 dp->mii_ctl1000 = 0;
4926 dp->mii_stat1000 = 0;
4927 if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4928 goto err_free_ring;
4929 }
4930
4931 /* mask unsupported abilities */
4932 dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4933 dp->anadv_1000fdx &=
4934 BOOLEAN(dp->mii_xstatus &
4935 (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4936 dp->anadv_1000hdx &=
4937 BOOLEAN(dp->mii_xstatus &
4938 (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4939 dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4940 dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4941 dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4942 dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4943 dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4944
4945 gem_choose_forcedmode(dp);
4946
4947 /* initialize MII phy if required */
4948 if (dp->gc.gc_mii_init) {
4949 if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4950 goto err_free_ring;
4951 }
4952 }
4953
4954 /*
4955 * initialize kstats including mii statistics
4956 */
4957 gem_nd_setup(dp);
4958
4959 /*
4960 * Add interrupt to system.
4961 */
4962 if (ret = mac_register(macp, &dp->mh)) {
4963 cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4964 dp->name, ret);
4965 goto err_release_stats;
4966 }
4967 mac_free(macp);
4968 macp = NULL;
4969
4970 if (dp->misc_flag & GEM_SOFTINTR) {
4971 if (ddi_add_softintr(dip,
4972 DDI_SOFTINT_LOW, &dp->soft_id,
4973 NULL, NULL,
4974 (uint_t (*)(caddr_t))gem_intr,
4975 (caddr_t)dp) != DDI_SUCCESS) {
4976 cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4977 dp->name);
4978 goto err_unregister;
4979 }
4980 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4981 if (ddi_add_intr(dip, 0, NULL, NULL,
4982 (uint_t (*)(caddr_t))gem_intr,
4983 (caddr_t)dp) != DDI_SUCCESS) {
4984 cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4985 goto err_unregister;
4986 }
4987 } else {
4988 /*
4989 * Dont use interrupt.
4990 * schedule first call of gem_intr_watcher
4991 */
4992 dp->intr_watcher_id =
4993 timeout((void (*)(void *))gem_intr_watcher,
4994 (void *)dp, drv_usectohz(3*1000000));
4995 }
4996
4997 /* link this device to dev_info */
4998 dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
4999 dp->port = port;
5000 ddi_set_driver_private(dip, (caddr_t)dp);
5001
5002 /* reset mii phy and start mii link watcher */
5003 gem_mii_start(dp);
5004
5005 DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5006 return (dp);
5007
5008 err_unregister:
5009 (void) mac_unregister(dp->mh);
5010 err_release_stats:
5011 /* release NDD resources */
5012 gem_nd_cleanup(dp);
5013
5014 err_free_ring:
5015 gem_free_memory(dp);
5016 err_free_regs:
5017 ddi_regs_map_free(&dp->regs_handle);
5018 err_free_locks:
5019 mutex_destroy(&dp->xmitlock);
5020 mutex_destroy(&dp->intrlock);
5021 cv_destroy(&dp->tx_drain_cv);
5022 err_free_private:
5023 if (macp) {
5024 mac_free(macp);
5025 }
5026 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5027
5028 return (NULL);
5029 }
5030
5031 int
gem_do_detach(dev_info_t * dip)5032 gem_do_detach(dev_info_t *dip)
5033 {
5034 struct gem_dev *dp;
5035 struct gem_dev *tmp;
5036 caddr_t private;
5037 int priv_size;
5038 ddi_acc_handle_t rh;
5039
5040 dp = GEM_GET_DEV(dip);
5041 if (dp == NULL) {
5042 return (DDI_SUCCESS);
5043 }
5044
5045 rh = dp->regs_handle;
5046 private = dp->private;
5047 priv_size = dp->priv_size;
5048
5049 while (dp) {
5050 /* unregister with gld v3 */
5051 if (mac_unregister(dp->mh) != 0) {
5052 return (DDI_FAILURE);
5053 }
5054
5055 /* ensure any rx buffers are not used */
5056 if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5057 /* resource is busy */
5058 cmn_err(CE_PANIC,
5059 "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5060 dp->name, __func__,
5061 dp->rx_buf_allocated, dp->rx_buf_freecnt);
5062 /* NOT REACHED */
5063 }
5064
5065 /* stop mii link watcher */
5066 gem_mii_stop(dp);
5067
5068 /* unregister interrupt handler */
5069 if (dp->misc_flag & GEM_SOFTINTR) {
5070 ddi_remove_softintr(dp->soft_id);
5071 } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5072 ddi_remove_intr(dip, 0, dp->iblock_cookie);
5073 } else {
5074 /* stop interrupt watcher */
5075 if (dp->intr_watcher_id) {
5076 while (untimeout(dp->intr_watcher_id) == -1)
5077 ;
5078 dp->intr_watcher_id = 0;
5079 }
5080 }
5081
5082 /* release NDD resources */
5083 gem_nd_cleanup(dp);
5084 /* release buffers, descriptors and dma resources */
5085 gem_free_memory(dp);
5086
5087 /* release locks and condition variables */
5088 mutex_destroy(&dp->xmitlock);
5089 mutex_destroy(&dp->intrlock);
5090 cv_destroy(&dp->tx_drain_cv);
5091
5092 /* release basic memory resources */
5093 tmp = dp->next;
5094 kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5095 dp = tmp;
5096 }
5097
5098 /* release common private memory for the nic */
5099 kmem_free(private, priv_size);
5100
5101 /* release register mapping resources */
5102 ddi_regs_map_free(&rh);
5103
5104 DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5105 ddi_driver_name(dip), ddi_get_instance(dip)));
5106
5107 return (DDI_SUCCESS);
5108 }
5109
5110 int
gem_suspend(dev_info_t * dip)5111 gem_suspend(dev_info_t *dip)
5112 {
5113 struct gem_dev *dp;
5114
5115 /*
5116 * stop the device
5117 */
5118 dp = GEM_GET_DEV(dip);
5119 ASSERT(dp);
5120
5121 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5122
5123 for (; dp; dp = dp->next) {
5124
5125 /* stop mii link watcher */
5126 gem_mii_stop(dp);
5127
5128 /* stop interrupt watcher for no-intr mode */
5129 if (dp->misc_flag & GEM_NOINTR) {
5130 if (dp->intr_watcher_id) {
5131 while (untimeout(dp->intr_watcher_id) == -1)
5132 ;
5133 }
5134 dp->intr_watcher_id = 0;
5135 }
5136
5137 /* stop tx timeout watcher */
5138 if (dp->timeout_id) {
5139 while (untimeout(dp->timeout_id) == -1)
5140 ;
5141 dp->timeout_id = 0;
5142 }
5143
5144 /* make the nic state inactive */
5145 mutex_enter(&dp->intrlock);
5146 (void) gem_mac_stop(dp, 0);
5147 ASSERT(!dp->mac_active);
5148
5149 /* no further register access */
5150 dp->mac_suspended = B_TRUE;
5151 mutex_exit(&dp->intrlock);
5152 }
5153
5154 /* XXX - power down the nic */
5155
5156 return (DDI_SUCCESS);
5157 }
5158
5159 int
gem_resume(dev_info_t * dip)5160 gem_resume(dev_info_t *dip)
5161 {
5162 struct gem_dev *dp;
5163
5164 /*
5165 * restart the device
5166 */
5167 dp = GEM_GET_DEV(dip);
5168 ASSERT(dp);
5169
5170 DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5171
5172 for (; dp; dp = dp->next) {
5173
5174 /*
5175 * Bring up the nic after power up
5176 */
5177
5178 /* gem_xxx.c layer to setup power management state. */
5179 ASSERT(!dp->mac_active);
5180
5181 /* reset the chip, because we are just after power up. */
5182 mutex_enter(&dp->intrlock);
5183
5184 dp->mac_suspended = B_FALSE;
5185 dp->nic_state = NIC_STATE_STOPPED;
5186
5187 if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5188 cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5189 dp->name, __func__);
5190 mutex_exit(&dp->intrlock);
5191 goto err;
5192 }
5193 mutex_exit(&dp->intrlock);
5194
5195 /* initialize mii phy because we are just after power up */
5196 if (dp->gc.gc_mii_init) {
5197 (void) (*dp->gc.gc_mii_init)(dp);
5198 }
5199
5200 if (dp->misc_flag & GEM_NOINTR) {
5201 /*
5202 * schedule first call of gem_intr_watcher
5203 * instead of interrupts.
5204 */
5205 dp->intr_watcher_id =
5206 timeout((void (*)(void *))gem_intr_watcher,
5207 (void *)dp, drv_usectohz(3*1000000));
5208 }
5209
5210 /* restart mii link watcher */
5211 gem_mii_start(dp);
5212
5213 /* restart mac */
5214 mutex_enter(&dp->intrlock);
5215
5216 if (gem_mac_init(dp) != GEM_SUCCESS) {
5217 mutex_exit(&dp->intrlock);
5218 goto err_reset;
5219 }
5220 dp->nic_state = NIC_STATE_INITIALIZED;
5221
5222 /* setup media mode if the link have been up */
5223 if (dp->mii_state == MII_STATE_LINKUP) {
5224 if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5225 mutex_exit(&dp->intrlock);
5226 goto err_reset;
5227 }
5228 }
5229
5230 /* enable mac address and rx filter */
5231 dp->rxmode |= RXMODE_ENABLE;
5232 if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5233 mutex_exit(&dp->intrlock);
5234 goto err_reset;
5235 }
5236 dp->nic_state = NIC_STATE_ONLINE;
5237
5238 /* restart tx timeout watcher */
5239 dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5240 (void *)dp,
5241 dp->gc.gc_tx_timeout_interval);
5242
5243 /* now the nic is fully functional */
5244 if (dp->mii_state == MII_STATE_LINKUP) {
5245 if (gem_mac_start(dp) != GEM_SUCCESS) {
5246 mutex_exit(&dp->intrlock);
5247 goto err_reset;
5248 }
5249 }
5250 mutex_exit(&dp->intrlock);
5251 }
5252
5253 return (DDI_SUCCESS);
5254
5255 err_reset:
5256 if (dp->intr_watcher_id) {
5257 while (untimeout(dp->intr_watcher_id) == -1)
5258 ;
5259 dp->intr_watcher_id = 0;
5260 }
5261 mutex_enter(&dp->intrlock);
5262 (*dp->gc.gc_reset_chip)(dp);
5263 dp->nic_state = NIC_STATE_STOPPED;
5264 mutex_exit(&dp->intrlock);
5265
5266 err:
5267 return (DDI_FAILURE);
5268 }
5269
5270 /*
5271 * misc routines for PCI
5272 */
5273 uint8_t
gem_search_pci_cap(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint8_t target)5274 gem_search_pci_cap(dev_info_t *dip,
5275 ddi_acc_handle_t conf_handle, uint8_t target)
5276 {
5277 uint8_t pci_cap_ptr;
5278 uint32_t pci_cap;
5279
5280 /* search power management capablities */
5281 pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5282 while (pci_cap_ptr) {
5283 /* read pci capability header */
5284 pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5285 if ((pci_cap & 0xff) == target) {
5286 /* found */
5287 break;
5288 }
5289 /* get next_ptr */
5290 pci_cap_ptr = (pci_cap >> 8) & 0xff;
5291 }
5292 return (pci_cap_ptr);
5293 }
5294
5295 int
gem_pci_set_power_state(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint_t new_mode)5296 gem_pci_set_power_state(dev_info_t *dip,
5297 ddi_acc_handle_t conf_handle, uint_t new_mode)
5298 {
5299 uint8_t pci_cap_ptr;
5300 uint32_t pmcsr;
5301 uint_t unit;
5302 const char *drv_name;
5303
5304 ASSERT(new_mode < 4);
5305
5306 unit = ddi_get_instance(dip);
5307 drv_name = ddi_driver_name(dip);
5308
5309 /* search power management capablities */
5310 pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5311
5312 if (pci_cap_ptr == 0) {
5313 cmn_err(CE_CONT,
5314 "!%s%d: doesn't have pci power management capability",
5315 drv_name, unit);
5316 return (DDI_FAILURE);
5317 }
5318
5319 /* read power management capabilities */
5320 pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5321
5322 DPRINTF(0, (CE_CONT,
5323 "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5324 drv_name, unit, pci_cap_ptr, pmcsr));
5325
5326 /*
5327 * Is the resuested power mode supported?
5328 */
5329 /* not yet */
5330
5331 /*
5332 * move to new mode
5333 */
5334 pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5335 pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5336
5337 return (DDI_SUCCESS);
5338 }
5339
5340 /*
5341 * select suitable register for by specified address space or register
5342 * offset in PCI config space
5343 */
5344 int
gem_pci_regs_map_setup(dev_info_t * dip,uint32_t which,uint32_t mask,struct ddi_device_acc_attr * attrp,caddr_t * basep,ddi_acc_handle_t * hp)5345 gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5346 struct ddi_device_acc_attr *attrp,
5347 caddr_t *basep, ddi_acc_handle_t *hp)
5348 {
5349 struct pci_phys_spec *regs;
5350 uint_t len;
5351 uint_t unit;
5352 uint_t n;
5353 uint_t i;
5354 int ret;
5355 const char *drv_name;
5356
5357 unit = ddi_get_instance(dip);
5358 drv_name = ddi_driver_name(dip);
5359
5360 /* Search IO-range or memory-range to be mapped */
5361 regs = NULL;
5362 len = 0;
5363
5364 if ((ret = ddi_prop_lookup_int_array(
5365 DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5366 "reg", (void *)®s, &len)) != DDI_PROP_SUCCESS) {
5367 cmn_err(CE_WARN,
5368 "!%s%d: failed to get reg property (ret:%d)",
5369 drv_name, unit, ret);
5370 return (DDI_FAILURE);
5371 }
5372 n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5373
5374 ASSERT(regs != NULL && len > 0);
5375
5376 #if GEM_DEBUG_LEVEL > 0
5377 for (i = 0; i < n; i++) {
5378 cmn_err(CE_CONT,
5379 "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5380 drv_name, unit, i,
5381 regs[i].pci_phys_hi,
5382 regs[i].pci_phys_mid,
5383 regs[i].pci_phys_low,
5384 regs[i].pci_size_hi,
5385 regs[i].pci_size_low);
5386 }
5387 #endif
5388 for (i = 0; i < n; i++) {
5389 if ((regs[i].pci_phys_hi & mask) == which) {
5390 /* it's the requested space */
5391 ddi_prop_free(regs);
5392 goto address_range_found;
5393 }
5394 }
5395 ddi_prop_free(regs);
5396 return (DDI_FAILURE);
5397
5398 address_range_found:
5399 if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5400 != DDI_SUCCESS) {
5401 cmn_err(CE_CONT,
5402 "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5403 drv_name, unit, ret);
5404 }
5405
5406 return (ret);
5407 }
5408
5409 void
gem_mod_init(struct dev_ops * dop,char * name)5410 gem_mod_init(struct dev_ops *dop, char *name)
5411 {
5412 mac_init_ops(dop, name);
5413 }
5414
5415 void
gem_mod_fini(struct dev_ops * dop)5416 gem_mod_fini(struct dev_ops *dop)
5417 {
5418 mac_fini_ops(dop);
5419 }
5420