xref: /titanic_51/usr/src/uts/common/io/yge/yge.c (revision bdf0047c9427cca40961a023475891c898579c37)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * This driver was derived from the FreeBSD if_msk.c driver, which
8  * bears the following copyright attributions and licenses.
9  */
10 
11 /*
12  *
13  *	LICENSE:
14  *	Copyright (C) Marvell International Ltd. and/or its affiliates
15  *
16  *	The computer program files contained in this folder ("Files")
17  *	are provided to you under the BSD-type license terms provided
18  *	below, and any use of such Files and any derivative works
19  *	thereof created by you shall be governed by the following terms
20  *	and conditions:
21  *
22  *	- Redistributions of source code must retain the above copyright
23  *	  notice, this list of conditions and the following disclaimer.
24  *	- Redistributions in binary form must reproduce the above
25  *	  copyright notice, this list of conditions and the following
26  *	  disclaimer in the documentation and/or other materials provided
27  *	  with the distribution.
28  *	- Neither the name of Marvell nor the names of its contributors
29  *	  may be used to endorse or promote products derived from this
30  *	  software without specific prior written permission.
31  *
32  *	THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  *	"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  *	LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  *	FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  *	COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37  *	INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38  *	BUT NOT LIMITED TO, PROCUREMENT OF  SUBSTITUTE GOODS OR SERVICES;
39  *	LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  *	HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  *	STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  *	ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  *	OF THE POSSIBILITY OF SUCH DAMAGE.
44  *	/LICENSE
45  *
46  */
47 /*
48  * Copyright (c) 1997, 1998, 1999, 2000
49  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  * 1. Redistributions of source code must retain the above copyright
55  *    notice, this list of conditions and the following disclaimer.
56  * 2. Redistributions in binary form must reproduce the above copyright
57  *    notice, this list of conditions and the following disclaimer in the
58  *    documentation and/or other materials provided with the distribution.
59  * 3. All advertising materials mentioning features or use of this software
60  *    must display the following acknowledgement:
61  *	This product includes software developed by Bill Paul.
62  * 4. Neither the name of the author nor the names of any co-contributors
63  *    may be used to endorse or promote products derived from this software
64  *    without specific prior written permission.
65  *
66  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
67  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
68  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
69  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
70  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
71  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
72  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
73  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
74  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
75  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
76  * THE POSSIBILITY OF SUCH DAMAGE.
77  */
78 /*
79  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
80  *
81  * Permission to use, copy, modify, and distribute this software for any
82  * purpose with or without fee is hereby granted, provided that the above
83  * copyright notice and this permission notice appear in all copies.
84  *
85  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
86  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
87  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
88  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
89  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
90  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
91  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
92  */
93 
94 #include <sys/varargs.h>
95 #include <sys/types.h>
96 #include <sys/modctl.h>
97 #include <sys/conf.h>
98 #include <sys/devops.h>
99 #include <sys/stream.h>
100 #include <sys/strsun.h>
101 #include <sys/cmn_err.h>
102 #include <sys/ethernet.h>
103 #include <sys/kmem.h>
104 #include <sys/time.h>
105 #include <sys/pci.h>
106 #include <sys/mii.h>
107 #include <sys/miiregs.h>
108 #include <sys/mac.h>
109 #include <sys/mac_ether.h>
110 #include <sys/mac_provider.h>
111 #include <sys/debug.h>
112 #include <sys/note.h>
113 #include <sys/ddi.h>
114 #include <sys/sunddi.h>
115 #include <sys/vlan.h>
116 
117 #include "yge.h"
118 
119 static struct ddi_device_acc_attr yge_regs_attr = {
120 	DDI_DEVICE_ATTR_V0,
121 	DDI_STRUCTURE_LE_ACC,
122 	DDI_STRICTORDER_ACC
123 };
124 
125 static struct ddi_device_acc_attr yge_ring_attr = {
126 	DDI_DEVICE_ATTR_V0,
127 	DDI_STRUCTURE_LE_ACC,
128 	DDI_STRICTORDER_ACC
129 };
130 
131 static struct ddi_device_acc_attr yge_buf_attr = {
132 	DDI_DEVICE_ATTR_V0,
133 	DDI_NEVERSWAP_ACC,
134 	DDI_STRICTORDER_ACC
135 };
136 
137 #define	DESC_ALIGN	0x1000
138 
139 static ddi_dma_attr_t yge_ring_dma_attr = {
140 	DMA_ATTR_V0,		/* dma_attr_version */
141 	0,			/* dma_attr_addr_lo */
142 	0x00000000ffffffffull,	/* dma_attr_addr_hi */
143 	0x00000000ffffffffull,	/* dma_attr_count_max */
144 	DESC_ALIGN,		/* dma_attr_align */
145 	0x000007fc,		/* dma_attr_burstsizes */
146 	1,			/* dma_attr_minxfer */
147 	0x00000000ffffffffull,	/* dma_attr_maxxfer */
148 	0x00000000ffffffffull,	/* dma_attr_seg */
149 	1,			/* dma_attr_sgllen */
150 	1,			/* dma_attr_granular */
151 	0			/* dma_attr_flags */
152 };
153 
154 static ddi_dma_attr_t yge_buf_dma_attr = {
155 	DMA_ATTR_V0,		/* dma_attr_version */
156 	0,			/* dma_attr_addr_lo */
157 	0x00000000ffffffffull,	/* dma_attr_addr_hi */
158 	0x00000000ffffffffull,	/* dma_attr_count_max */
159 	1,			/* dma_attr_align */
160 	0x0000fffc,		/* dma_attr_burstsizes */
161 	1,			/* dma_attr_minxfer */
162 	0x000000000000ffffull,	/* dma_attr_maxxfer */
163 	0x00000000ffffffffull,	/* dma_attr_seg */
164 	8,			/* dma_attr_sgllen */
165 	1,			/* dma_attr_granular */
166 	0			/* dma_attr_flags */
167 };
168 
169 
170 static int yge_attach(yge_dev_t *);
171 static void yge_detach(yge_dev_t *);
172 static int yge_suspend(yge_dev_t *);
173 static int yge_resume(yge_dev_t *);
174 
175 static void yge_reset(yge_dev_t *);
176 static void yge_setup_rambuffer(yge_dev_t *);
177 
178 static int yge_init_port(yge_port_t *);
179 static void yge_uninit_port(yge_port_t *);
180 static int yge_register_port(yge_port_t *);
181 static int yge_unregister_port(yge_port_t *);
182 
183 static void yge_tick(void *);
184 static uint_t yge_intr(caddr_t, caddr_t);
185 static int yge_intr_gmac(yge_port_t *);
186 static void yge_intr_enable(yge_dev_t *);
187 static void yge_intr_disable(yge_dev_t *);
188 static boolean_t yge_handle_events(yge_dev_t *, mblk_t **, mblk_t **, int *);
189 static void yge_handle_hwerr(yge_port_t *, uint32_t);
190 static void yge_intr_hwerr(yge_dev_t *);
191 static mblk_t *yge_rxeof(yge_port_t *, uint32_t, int);
192 static void yge_txeof(yge_port_t *, int);
193 static boolean_t yge_send(yge_port_t *, mblk_t *);
194 static void yge_set_prefetch(yge_dev_t *, int, yge_ring_t *);
195 static void yge_set_rambuffer(yge_port_t *);
196 static void yge_start_port(yge_port_t *);
197 static void yge_stop_port(yge_port_t *);
198 static void yge_phy_power(yge_dev_t *, boolean_t);
199 static int yge_alloc_ring(yge_port_t *, yge_dev_t *, yge_ring_t *, uint32_t);
200 static void yge_free_ring(yge_ring_t *);
201 static uint8_t yge_find_capability(yge_dev_t *, uint8_t);
202 
203 static int yge_txrx_dma_alloc(yge_port_t *);
204 static void yge_txrx_dma_free(yge_port_t *);
205 static void yge_init_rx_ring(yge_port_t *);
206 static void yge_init_tx_ring(yge_port_t *);
207 
208 static uint16_t yge_mii_readreg(yge_port_t *, uint8_t, uint8_t);
209 static void yge_mii_writereg(yge_port_t *, uint8_t, uint8_t, uint16_t);
210 
211 static uint16_t yge_mii_read(void *, uint8_t, uint8_t);
212 static void yge_mii_write(void *, uint8_t, uint8_t, uint16_t);
213 static void yge_mii_notify(void *, link_state_t);
214 
215 static void yge_setrxfilt(yge_port_t *);
216 static void yge_restart_task(yge_dev_t *);
217 static void yge_task(void *);
218 static void yge_dispatch(yge_dev_t *, int);
219 
220 static void yge_stats_clear(yge_port_t *);
221 static void yge_stats_update(yge_port_t *);
222 static uint32_t yge_hashbit(const uint8_t *);
223 
224 static int yge_m_unicst(void *, const uint8_t *);
225 static int yge_m_multicst(void *, boolean_t, const uint8_t *);
226 static int yge_m_promisc(void *, boolean_t);
227 static mblk_t *yge_m_tx(void *, mblk_t *);
228 static int yge_m_stat(void *, uint_t, uint64_t *);
229 static int yge_m_start(void *);
230 static void yge_m_stop(void *);
231 static int yge_m_getprop(void *, const char *, mac_prop_id_t, uint_t, void *);
232 static void yge_m_propinfo(void *, const char *, mac_prop_id_t,
233     mac_prop_info_handle_t);
234 static int yge_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
235     const void *);
236 static void yge_m_ioctl(void *, queue_t *, mblk_t *);
237 
238 void yge_error(yge_dev_t *, yge_port_t *, char *, ...);
239 extern void yge_phys_update(yge_port_t *);
240 extern int yge_phys_restart(yge_port_t *, boolean_t);
241 extern int yge_phys_init(yge_port_t *, phy_readreg_t, phy_writereg_t);
242 
243 static mac_callbacks_t yge_m_callbacks = {
244 	MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
245 	yge_m_stat,
246 	yge_m_start,
247 	yge_m_stop,
248 	yge_m_promisc,
249 	yge_m_multicst,
250 	yge_m_unicst,
251 	yge_m_tx,
252 	NULL,
253 	yge_m_ioctl,
254 	NULL,		/* mc_getcapab */
255 	NULL,		/* mc_open */
256 	NULL,		/* mc_close */
257 	yge_m_setprop,
258 	yge_m_getprop,
259 	yge_m_propinfo
260 };
261 
262 static mii_ops_t yge_mii_ops = {
263 	MII_OPS_VERSION,
264 	yge_mii_read,
265 	yge_mii_write,
266 	yge_mii_notify,
267 	NULL	/* reset */
268 };
269 
270 /*
271  * This is the low level interface routine to read from the PHY
272  * MII registers. There is multiple steps to these accesses. First
273  * the register number is written to an address register. Then after
274  * a specified delay status is checked until the data is present.
275  */
276 static uint16_t
277 yge_mii_readreg(yge_port_t *port, uint8_t phy, uint8_t reg)
278 {
279 	yge_dev_t *dev = port->p_dev;
280 	int pnum = port->p_port;
281 	uint16_t val;
282 
283 	GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL,
284 	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
285 
286 	for (int i = 0; i < YGE_TIMEOUT; i += 10) {
287 		drv_usecwait(10);
288 		val = GMAC_READ_2(dev, pnum, GM_SMI_CTRL);
289 		if ((val & GM_SMI_CT_RD_VAL) != 0) {
290 			val = GMAC_READ_2(dev, pnum, GM_SMI_DATA);
291 			return (val);
292 		}
293 	}
294 
295 	return (0xffff);
296 }
297 
298 /*
299  * This is the low level interface routine to write to the PHY
300  * MII registers. There is multiple steps to these accesses. The
301  * data and the target registers address are written to the PHY.
302  * Then the PHY is polled until it is done with the write. Note
303  * that the delays are specified and required!
304  */
305 static void
306 yge_mii_writereg(yge_port_t *port, uint8_t phy, uint8_t reg, uint16_t val)
307 {
308 	yge_dev_t *dev = port->p_dev;
309 	int pnum = port->p_port;
310 
311 	GMAC_WRITE_2(dev, pnum, GM_SMI_DATA, val);
312 	GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL,
313 	    GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
314 
315 	for (int i = 0; i < YGE_TIMEOUT; i += 10) {
316 		drv_usecwait(10);
317 		if ((GMAC_READ_2(dev, pnum, GM_SMI_CTRL) & GM_SMI_CT_BUSY) == 0)
318 			return;
319 	}
320 
321 	yge_error(NULL, port, "phy write timeout");
322 }
323 
324 static uint16_t
325 yge_mii_read(void *arg, uint8_t phy, uint8_t reg)
326 {
327 	yge_port_t *port = arg;
328 	uint16_t rv;
329 
330 	PHY_LOCK(port->p_dev);
331 	rv = yge_mii_readreg(port, phy, reg);
332 	PHY_UNLOCK(port->p_dev);
333 	return (rv);
334 }
335 
336 static void
337 yge_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t val)
338 {
339 	yge_port_t *port = arg;
340 
341 	PHY_LOCK(port->p_dev);
342 	yge_mii_writereg(port, phy, reg, val);
343 	PHY_UNLOCK(port->p_dev);
344 }
345 
346 /*
347  * The MII common code calls this function to let the MAC driver
348  * know when there has been a change in status.
349  */
350 void
351 yge_mii_notify(void *arg, link_state_t link)
352 {
353 	yge_port_t *port = arg;
354 	yge_dev_t *dev = port->p_dev;
355 	uint32_t gmac;
356 	uint32_t gpcr;
357 	link_flowctrl_t	fc;
358 	link_duplex_t duplex;
359 	int speed;
360 
361 	fc = mii_get_flowctrl(port->p_mii);
362 	duplex = mii_get_duplex(port->p_mii);
363 	speed = mii_get_speed(port->p_mii);
364 
365 	DEV_LOCK(dev);
366 
367 	if (link == LINK_STATE_UP) {
368 
369 		/* Enable Tx FIFO Underrun. */
370 		CSR_WRITE_1(dev, MR_ADDR(port->p_port, GMAC_IRQ_MSK),
371 		    GM_IS_TX_FF_UR |	/* TX FIFO underflow */
372 		    GM_IS_RX_FF_OR);	/* RX FIFO overflow */
373 
374 		gpcr = GM_GPCR_AU_ALL_DIS;
375 
376 		switch (fc) {
377 		case LINK_FLOWCTRL_BI:
378 			gmac = GMC_PAUSE_ON;
379 			gpcr &= ~(GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS);
380 			break;
381 		case LINK_FLOWCTRL_TX:
382 			gmac = GMC_PAUSE_ON;
383 			gpcr |= GM_GPCR_FC_RX_DIS;
384 			break;
385 		case LINK_FLOWCTRL_RX:
386 			gmac = GMC_PAUSE_ON;
387 			gpcr |= GM_GPCR_FC_TX_DIS;
388 			break;
389 		case LINK_FLOWCTRL_NONE:
390 		default:
391 			gmac = GMC_PAUSE_OFF;
392 			gpcr |= GM_GPCR_FC_RX_DIS;
393 			gpcr |= GM_GPCR_FC_TX_DIS;
394 			break;
395 		}
396 
397 		gpcr &= ~((GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100));
398 		switch (speed) {
399 		case 1000:
400 			gpcr |= GM_GPCR_SPEED_1000;
401 			break;
402 		case 100:
403 			gpcr |= GM_GPCR_SPEED_100;
404 			break;
405 		case 10:
406 		default:
407 			break;
408 		}
409 
410 		if (duplex == LINK_DUPLEX_FULL) {
411 			gpcr |= GM_GPCR_DUP_FULL;
412 		} else {
413 			gpcr &= ~(GM_GPCR_DUP_FULL);
414 			gmac = GMC_PAUSE_OFF;
415 			gpcr |= GM_GPCR_FC_RX_DIS;
416 			gpcr |= GM_GPCR_FC_TX_DIS;
417 		}
418 
419 		gpcr |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
420 		GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr);
421 
422 		/* Read again to ensure writing. */
423 		(void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
424 
425 		/* write out the flow control gmac setting */
426 		CSR_WRITE_4(dev, MR_ADDR(port->p_port, GMAC_CTRL), gmac);
427 
428 	} else {
429 		/* Disable Rx/Tx MAC. */
430 		gpcr = GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
431 		gpcr &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
432 		GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr);
433 
434 		/* Read again to ensure writing. */
435 		(void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL);
436 	}
437 
438 	DEV_UNLOCK(dev);
439 
440 	mac_link_update(port->p_mh, link);
441 
442 	if (port->p_running && (link == LINK_STATE_UP)) {
443 		mac_tx_update(port->p_mh);
444 	}
445 }
446 
447 static void
448 yge_setrxfilt(yge_port_t *port)
449 {
450 	yge_dev_t	*dev;
451 	uint16_t	mode;
452 	uint8_t		*ea;
453 	uint32_t	*mchash;
454 	int		pnum;
455 
456 	dev = port->p_dev;
457 	pnum = port->p_port;
458 	ea = port->p_curraddr;
459 	mchash = port->p_mchash;
460 
461 	if (dev->d_suspended)
462 		return;
463 
464 	/* Set station address. */
465 	for (int i = 0; i < (ETHERADDRL / 2); i++) {
466 		GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_1L + i * 4,
467 		    ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8)));
468 	}
469 	for (int i = 0; i < (ETHERADDRL / 2); i++) {
470 		GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_2L + i * 4,
471 		    ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8)));
472 	}
473 
474 	/* Figure out receive filtering mode. */
475 	mode = GMAC_READ_2(dev, pnum, GM_RX_CTRL);
476 	if (port->p_promisc) {
477 		mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
478 	} else {
479 		mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
480 	}
481 	/* Write the multicast filter. */
482 	GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H1, mchash[0] & 0xffff);
483 	GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H2, (mchash[0] >> 16) & 0xffff);
484 	GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H3, mchash[1] & 0xffff);
485 	GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H4, (mchash[1] >> 16) & 0xffff);
486 	/* Write the receive filtering mode. */
487 	GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, mode);
488 }
489 
490 static void
491 yge_init_rx_ring(yge_port_t *port)
492 {
493 	yge_buf_t *rxb;
494 	yge_ring_t *ring;
495 	int prod;
496 
497 	port->p_rx_cons = 0;
498 	port->p_rx_putwm = YGE_PUT_WM;
499 	ring = &port->p_rx_ring;
500 
501 	/* ala bzero, but uses safer acch access */
502 	CLEARRING(ring);
503 
504 	for (prod = 0; prod < YGE_RX_RING_CNT; prod++) {
505 		/* Hang out receive buffers. */
506 		rxb = &port->p_rx_buf[prod];
507 
508 		PUTADDR(ring, prod, rxb->b_paddr);
509 		PUTCTRL(ring, prod, port->p_framesize | OP_PACKET | HW_OWNER);
510 	}
511 
512 	SYNCRING(ring, DDI_DMA_SYNC_FORDEV);
513 
514 	yge_set_prefetch(port->p_dev, port->p_rxq, ring);
515 
516 	/* Update prefetch unit. */
517 	CSR_WRITE_2(port->p_dev,
518 	    Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG),
519 	    YGE_RX_RING_CNT - 1);
520 }
521 
522 static void
523 yge_init_tx_ring(yge_port_t *port)
524 {
525 	yge_ring_t *ring = &port->p_tx_ring;
526 
527 	port->p_tx_prod = 0;
528 	port->p_tx_cons = 0;
529 	port->p_tx_cnt = 0;
530 
531 	CLEARRING(ring);
532 	SYNCRING(ring, DDI_DMA_SYNC_FORDEV);
533 
534 	yge_set_prefetch(port->p_dev, port->p_txq, ring);
535 }
536 
537 static void
538 yge_setup_rambuffer(yge_dev_t *dev)
539 {
540 	int next;
541 	int i;
542 
543 	/* Get adapter SRAM size. */
544 	dev->d_ramsize = CSR_READ_1(dev, B2_E_0) * 4;
545 	if (dev->d_ramsize == 0)
546 		return;
547 
548 	dev->d_pflags |= PORT_FLAG_RAMBUF;
549 	/*
550 	 * Give receiver 2/3 of memory and round down to the multiple
551 	 * of 1024. Tx/Rx RAM buffer size of Yukon 2 should be multiple
552 	 * of 1024.
553 	 */
554 	dev->d_rxqsize = (((dev->d_ramsize * 1024 * 2) / 3) & ~(1024 - 1));
555 	dev->d_txqsize = (dev->d_ramsize * 1024) - dev->d_rxqsize;
556 
557 	for (i = 0, next = 0; i < dev->d_num_port; i++) {
558 		dev->d_rxqstart[i] = next;
559 		dev->d_rxqend[i] = next + dev->d_rxqsize - 1;
560 		next = dev->d_rxqend[i] + 1;
561 		dev->d_txqstart[i] = next;
562 		dev->d_txqend[i] = next + dev->d_txqsize - 1;
563 		next = dev->d_txqend[i] + 1;
564 	}
565 }
566 
567 static void
568 yge_phy_power(yge_dev_t *dev, boolean_t powerup)
569 {
570 	uint32_t val;
571 	int i;
572 
573 	if (powerup) {
574 		/* Switch power to VCC (WA for VAUX problem). */
575 		CSR_WRITE_1(dev, B0_POWER_CTRL,
576 		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
577 		/* Disable Core Clock Division, set Clock Select to 0. */
578 		CSR_WRITE_4(dev, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
579 
580 		val = 0;
581 		if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
582 		    dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
583 			/* Enable bits are inverted. */
584 			val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
585 			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
586 			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
587 		}
588 		/*
589 		 * Enable PCI & Core Clock, enable clock gating for both Links.
590 		 */
591 		CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val);
592 
593 		val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1);
594 		val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
595 		if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
596 		    dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
597 			/* Deassert Low Power for 1st PHY. */
598 			val |= PCI_Y2_PHY1_COMA;
599 			if (dev->d_num_port > 1)
600 				val |= PCI_Y2_PHY2_COMA;
601 		}
602 
603 		/* Release PHY from PowerDown/COMA mode. */
604 		pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val);
605 
606 		switch (dev->d_hw_id) {
607 		case CHIP_ID_YUKON_EC_U:
608 		case CHIP_ID_YUKON_EX:
609 		case CHIP_ID_YUKON_FE_P: {
610 			uint32_t our;
611 
612 			CSR_WRITE_2(dev, B0_CTST, Y2_HW_WOL_OFF);
613 
614 			/* Enable all clocks. */
615 			pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
616 
617 			our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_4);
618 			our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
619 			    PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
620 			/* Set all bits to 0 except bits 15..12. */
621 			pci_config_put32(dev->d_pcih, PCI_OUR_REG_4, our);
622 
623 			/* Set to default value. */
624 			our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_5);
625 			our &= P_CTL_TIM_VMAIN_AV_MSK;
626 			pci_config_put32(dev->d_pcih, PCI_OUR_REG_5, our);
627 
628 			pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, 0);
629 
630 			/*
631 			 * Enable workaround for dev 4.107 on Yukon-Ultra
632 			 * and Extreme
633 			 */
634 			our = CSR_READ_4(dev, B2_GP_IO);
635 			our |= GLB_GPIO_STAT_RACE_DIS;
636 			CSR_WRITE_4(dev, B2_GP_IO, our);
637 
638 			(void) CSR_READ_4(dev, B2_GP_IO);
639 			break;
640 		}
641 		default:
642 			break;
643 		}
644 
645 		for (i = 0; i < dev->d_num_port; i++) {
646 			CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL),
647 			    GMLC_RST_SET);
648 			CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL),
649 			    GMLC_RST_CLR);
650 		}
651 	} else {
652 		val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1);
653 		if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
654 		    dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
655 			val &= ~PCI_Y2_PHY1_COMA;
656 			if (dev->d_num_port > 1)
657 				val &= ~PCI_Y2_PHY2_COMA;
658 			val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
659 		} else {
660 			val |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
661 		}
662 		pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val);
663 
664 		val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
665 		    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
666 		    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
667 		if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
668 		    dev->d_hw_rev > CHIP_REV_YU_XL_A1) {
669 			/* Enable bits are inverted. */
670 			val = 0;
671 		}
672 		/*
673 		 * Disable PCI & Core Clock, disable clock gating for
674 		 * both Links.
675 		 */
676 		CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val);
677 		CSR_WRITE_1(dev, B0_POWER_CTRL,
678 		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
679 	}
680 }
681 
682 static void
683 yge_reset(yge_dev_t *dev)
684 {
685 	uint64_t addr;
686 	uint16_t status;
687 	uint32_t val;
688 	int i;
689 	ddi_acc_handle_t	pcih = dev->d_pcih;
690 
691 	/* Turn off ASF */
692 	if (dev->d_hw_id == CHIP_ID_YUKON_EX) {
693 		status = CSR_READ_2(dev, B28_Y2_ASF_STAT_CMD);
694 		/* Clear AHB bridge & microcontroller reset */
695 		status &= ~Y2_ASF_CPU_MODE;
696 		status &= ~Y2_ASF_AHB_RST;
697 		/* Clear ASF microcontroller state */
698 		status &= ~Y2_ASF_STAT_MSK;
699 		CSR_WRITE_2(dev, B28_Y2_ASF_STAT_CMD, status);
700 	} else {
701 		CSR_WRITE_1(dev, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
702 	}
703 	CSR_WRITE_2(dev, B0_CTST, Y2_ASF_DISABLE);
704 
705 	/*
706 	 * Since we disabled ASF, S/W reset is required for Power Management.
707 	 */
708 	CSR_WRITE_1(dev, B0_CTST, CS_RST_SET);
709 	CSR_WRITE_1(dev, B0_CTST, CS_RST_CLR);
710 
711 	/* Allow writes to PCI config space */
712 	CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
713 
714 	/* Clear all error bits in the PCI status register. */
715 	status = pci_config_get16(pcih, PCI_CONF_STAT);
716 	CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
717 
718 	status |= (PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB |
719 	    PCI_STAT_R_TARG_AB | PCI_STAT_PERROR);
720 	pci_config_put16(pcih, PCI_CONF_STAT, status);
721 
722 	CSR_WRITE_1(dev, B0_CTST, CS_MRST_CLR);
723 
724 	switch (dev->d_bustype) {
725 	case PEX_BUS:
726 		/* Clear all PEX errors. */
727 		CSR_PCI_WRITE_4(dev, Y2_CFG_AER + AER_UNCOR_ERR, 0xffffffff);
728 
729 		/* is error bit status stuck? */
730 		val = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT);
731 		if ((val & PEX_RX_OV) != 0) {
732 			dev->d_intrmask &= ~Y2_IS_HW_ERR;
733 			dev->d_intrhwemask &= ~Y2_IS_PCI_EXP;
734 		}
735 		break;
736 	case PCI_BUS:
737 		/* Set Cache Line Size to 2 (8 bytes) if configured to 0. */
738 		if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0)
739 			pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2);
740 		break;
741 	case PCIX_BUS:
742 		/* Set Cache Line Size to 2 (8 bytes) if configured to 0. */
743 		if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0)
744 			pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2);
745 
746 		/* Set Cache Line Size opt. */
747 		val = pci_config_get32(pcih, PCI_OUR_REG_1);
748 		val |= PCI_CLS_OPT;
749 		pci_config_put32(pcih, PCI_OUR_REG_1, val);
750 		break;
751 	}
752 
753 	/* Set PHY power state. */
754 	yge_phy_power(dev, B_TRUE);
755 
756 	/* Reset GPHY/GMAC Control */
757 	for (i = 0; i < dev->d_num_port; i++) {
758 		/* GPHY Control reset. */
759 		CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
760 		CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
761 		/* GMAC Control reset. */
762 		CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
763 		CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
764 		if (dev->d_hw_id == CHIP_ID_YUKON_EX ||
765 		    dev->d_hw_id == CHIP_ID_YUKON_SUPR) {
766 			CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL),
767 			    (GMC_BYP_RETR_ON | GMC_BYP_MACSECRX_ON |
768 			    GMC_BYP_MACSECTX_ON));
769 		}
770 		CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
771 
772 	}
773 	CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
774 
775 	/* LED On. */
776 	CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_ON);
777 
778 	/* Clear TWSI IRQ. */
779 	CSR_WRITE_4(dev, B2_I2C_IRQ, I2C_CLR_IRQ);
780 
781 	/* Turn off hardware timer. */
782 	CSR_WRITE_1(dev, B2_TI_CTRL, TIM_STOP);
783 	CSR_WRITE_1(dev, B2_TI_CTRL, TIM_CLR_IRQ);
784 
785 	/* Turn off descriptor polling. */
786 	CSR_WRITE_1(dev, B28_DPT_CTRL, DPT_STOP);
787 
788 	/* Turn off time stamps. */
789 	CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_STOP);
790 	CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
791 
792 	/* Don't permit config space writing */
793 	CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
794 
795 	/* enable TX Arbiters */
796 	for (i = 0; i < dev->d_num_port; i++)
797 		CSR_WRITE_1(dev, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB);
798 
799 	/* Configure timeout values. */
800 	for (i = 0; i < dev->d_num_port; i++) {
801 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
802 
803 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), RI_TO_53);
804 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), RI_TO_53);
805 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), RI_TO_53);
806 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), RI_TO_53);
807 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), RI_TO_53);
808 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), RI_TO_53);
809 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), RI_TO_53);
810 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), RI_TO_53);
811 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), RI_TO_53);
812 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), RI_TO_53);
813 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), RI_TO_53);
814 		CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), RI_TO_53);
815 	}
816 
817 	/* Disable all interrupts. */
818 	CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
819 	(void) CSR_READ_4(dev, B0_HWE_IMSK);
820 	CSR_WRITE_4(dev, B0_IMSK, 0);
821 	(void) CSR_READ_4(dev, B0_IMSK);
822 
823 	/*
824 	 * On dual port PCI-X card, there is an problem where status
825 	 * can be received out of order due to split transactions.
826 	 */
827 	if (dev->d_bustype == PCIX_BUS && dev->d_num_port > 1) {
828 		int pcix;
829 		uint16_t pcix_cmd;
830 
831 		if ((pcix = yge_find_capability(dev, PCI_CAP_ID_PCIX)) != 0) {
832 			pcix_cmd = pci_config_get16(pcih, pcix + 2);
833 			/* Clear Max Outstanding Split Transactions. */
834 			pcix_cmd &= ~0x70;
835 			CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
836 			pci_config_put16(pcih, pcix + 2, pcix_cmd);
837 			CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
838 		}
839 	}
840 	if (dev->d_bustype == PEX_BUS) {
841 		uint16_t v, width;
842 
843 		v = pci_config_get16(pcih, PEX_DEV_CTRL);
844 		/* Change Max. Read Request Size to 4096 bytes. */
845 		v &= ~PEX_DC_MAX_RRS_MSK;
846 		v |= PEX_DC_MAX_RD_RQ_SIZE(5);
847 		pci_config_put16(pcih, PEX_DEV_CTRL, v);
848 		width = pci_config_get16(pcih, PEX_LNK_STAT);
849 		width = (width & PEX_LS_LINK_WI_MSK) >> 4;
850 		v = pci_config_get16(pcih, PEX_LNK_CAP);
851 		v = (v & PEX_LS_LINK_WI_MSK) >> 4;
852 		if (v != width)
853 			yge_error(dev, NULL,
854 			    "Negotiated width of PCIe link(x%d) != "
855 			    "max. width of link(x%d)\n", width, v);
856 	}
857 
858 	/* Clear status list. */
859 	CLEARRING(&dev->d_status_ring);
860 	SYNCRING(&dev->d_status_ring, DDI_DMA_SYNC_FORDEV);
861 
862 	dev->d_stat_cons = 0;
863 
864 	CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_SET);
865 	CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_CLR);
866 
867 	/* Set the status list base address. */
868 	addr = dev->d_status_ring.r_paddr;
869 	CSR_WRITE_4(dev, STAT_LIST_ADDR_LO, YGE_ADDR_LO(addr));
870 	CSR_WRITE_4(dev, STAT_LIST_ADDR_HI, YGE_ADDR_HI(addr));
871 
872 	/* Set the status list last index. */
873 	CSR_WRITE_2(dev, STAT_LAST_IDX, YGE_STAT_RING_CNT - 1);
874 	CSR_WRITE_2(dev, STAT_PUT_IDX, 0);
875 
876 	if (dev->d_hw_id == CHIP_ID_YUKON_EC &&
877 	    dev->d_hw_rev == CHIP_REV_YU_EC_A1) {
878 		/* WA for dev. #4.3 */
879 		CSR_WRITE_2(dev, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
880 		/* WA for dev #4.18 */
881 		CSR_WRITE_1(dev, STAT_FIFO_WM, 0x21);
882 		CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 7);
883 	} else {
884 		CSR_WRITE_2(dev, STAT_TX_IDX_TH, 10);
885 		CSR_WRITE_1(dev, STAT_FIFO_WM, 16);
886 
887 		/* ISR status FIFO watermark */
888 		if (dev->d_hw_id == CHIP_ID_YUKON_XL &&
889 		    dev->d_hw_rev == CHIP_REV_YU_XL_A0)
890 			CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 4);
891 		else
892 			CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 16);
893 
894 		CSR_WRITE_4(dev, STAT_ISR_TIMER_INI, 0x0190);
895 	}
896 
897 	/*
898 	 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
899 	 */
900 	CSR_WRITE_4(dev, STAT_TX_TIMER_INI, YGE_USECS(dev, 1000));
901 
902 	/* Enable status unit. */
903 	CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_OP_ON);
904 
905 	CSR_WRITE_1(dev, STAT_TX_TIMER_CTRL, TIM_START);
906 	CSR_WRITE_1(dev, STAT_LEV_TIMER_CTRL, TIM_START);
907 	CSR_WRITE_1(dev, STAT_ISR_TIMER_CTRL, TIM_START);
908 }
909 
910 static int
911 yge_init_port(yge_port_t *port)
912 {
913 	yge_dev_t *dev = port->p_dev;
914 	int i;
915 	mac_register_t *macp;
916 
917 	port->p_flags = dev->d_pflags;
918 	port->p_ppa = ddi_get_instance(dev->d_dip) + (port->p_port * 100);
919 
920 	port->p_tx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_TX_RING_CNT,
921 	    KM_SLEEP);
922 	port->p_rx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_RX_RING_CNT,
923 	    KM_SLEEP);
924 
925 	/* Setup Tx/Rx queue register offsets. */
926 	if (port->p_port == YGE_PORT_A) {
927 		port->p_txq = Q_XA1;
928 		port->p_txsq = Q_XS1;
929 		port->p_rxq = Q_R1;
930 	} else {
931 		port->p_txq = Q_XA2;
932 		port->p_txsq = Q_XS2;
933 		port->p_rxq = Q_R2;
934 	}
935 
936 	/* Disable jumbo frame for Yukon FE. */
937 	if (dev->d_hw_id == CHIP_ID_YUKON_FE)
938 		port->p_flags |= PORT_FLAG_NOJUMBO;
939 
940 	/*
941 	 * Start out assuming a regular MTU.  Users can change this
942 	 * with dladm.  The dladm daemon is supposed to issue commands
943 	 * to change the default MTU using m_setprop during early boot
944 	 * (before the interface is plumbed) if the user has so
945 	 * requested.
946 	 */
947 	port->p_mtu = ETHERMTU;
948 
949 	port->p_mii = mii_alloc(port, dev->d_dip, &yge_mii_ops);
950 	if (port->p_mii == NULL) {
951 		yge_error(NULL, port, "MII handle allocation failed");
952 		return (DDI_FAILURE);
953 	}
954 	/* We assume all parts support asymmetric pause */
955 	mii_set_pauseable(port->p_mii, B_TRUE, B_TRUE);
956 
957 	/*
958 	 * Get station address for this interface. Note that
959 	 * dual port cards actually come with three station
960 	 * addresses: one for each port, plus an extra. The
961 	 * extra one is used by the SysKonnect driver software
962 	 * as a 'virtual' station address for when both ports
963 	 * are operating in failover mode. Currently we don't
964 	 * use this extra address.
965 	 */
966 	for (i = 0; i < ETHERADDRL; i++) {
967 		port->p_curraddr[i] =
968 		    CSR_READ_1(dev, B2_MAC_1 + (port->p_port * 8) + i);
969 	}
970 
971 	/* Register with Nemo. */
972 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
973 		yge_error(NULL, port, "MAC handle allocation failed");
974 		return (DDI_FAILURE);
975 	}
976 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
977 	macp->m_driver = port;
978 	macp->m_dip = dev->d_dip;
979 	macp->m_src_addr = port->p_curraddr;
980 	macp->m_callbacks = &yge_m_callbacks;
981 	macp->m_min_sdu = 0;
982 	macp->m_max_sdu = port->p_mtu;
983 	macp->m_instance = port->p_ppa;
984 	macp->m_margin = VLAN_TAGSZ;
985 
986 	port->p_mreg = macp;
987 
988 	return (DDI_SUCCESS);
989 }
990 
991 static int
992 yge_add_intr(yge_dev_t *dev, int intr_type)
993 {
994 	dev_info_t		*dip;
995 	int			count;
996 	int			actual;
997 	int			rv;
998 	int 			i, j;
999 
1000 	dip = dev->d_dip;
1001 
1002 	rv = ddi_intr_get_nintrs(dip, intr_type, &count);
1003 	if ((rv != DDI_SUCCESS) || (count == 0)) {
1004 		yge_error(dev, NULL,
1005 		    "ddi_intr_get_nintrs failed, rv %d, count %d", rv, count);
1006 		return (DDI_FAILURE);
1007 	}
1008 
1009 	/*
1010 	 * Allocate the interrupt.  Note that we only bother with a single
1011 	 * interrupt.  One could argue that for MSI devices with dual ports,
1012 	 * it would be nice to have a separate interrupt per port.  But right
1013 	 * now I don't know how to configure that, so we'll just settle for
1014 	 * a single interrupt.
1015 	 */
1016 	dev->d_intrcnt = 1;
1017 
1018 	dev->d_intrsize = count * sizeof (ddi_intr_handle_t);
1019 	dev->d_intrh = kmem_zalloc(dev->d_intrsize, KM_SLEEP);
1020 	if (dev->d_intrh == NULL) {
1021 		yge_error(dev, NULL, "Unable to allocate interrupt handle");
1022 		return (DDI_FAILURE);
1023 	}
1024 
1025 	rv = ddi_intr_alloc(dip, dev->d_intrh, intr_type, 0, dev->d_intrcnt,
1026 	    &actual, DDI_INTR_ALLOC_STRICT);
1027 	if ((rv != DDI_SUCCESS) || (actual == 0)) {
1028 		yge_error(dev, NULL,
1029 		    "Unable to allocate interrupt, %d, count %d",
1030 		    rv, actual);
1031 		kmem_free(dev->d_intrh, dev->d_intrsize);
1032 		return (DDI_FAILURE);
1033 	}
1034 
1035 	if ((rv = ddi_intr_get_pri(dev->d_intrh[0], &dev->d_intrpri)) !=
1036 	    DDI_SUCCESS) {
1037 		for (i = 0; i < dev->d_intrcnt; i++)
1038 			(void) ddi_intr_free(dev->d_intrh[i]);
1039 		yge_error(dev, NULL,
1040 		    "Unable to get interrupt priority, %d", rv);
1041 		kmem_free(dev->d_intrh, dev->d_intrsize);
1042 		return (DDI_FAILURE);
1043 	}
1044 
1045 	if ((rv = ddi_intr_get_cap(dev->d_intrh[0], &dev->d_intrcap)) !=
1046 	    DDI_SUCCESS) {
1047 		yge_error(dev, NULL,
1048 		    "Unable to get interrupt capabilities, %d", rv);
1049 		for (i = 0; i < dev->d_intrcnt; i++)
1050 			(void) ddi_intr_free(dev->d_intrh[i]);
1051 		kmem_free(dev->d_intrh, dev->d_intrsize);
1052 		return (DDI_FAILURE);
1053 	}
1054 
1055 	/* register interrupt handler to kernel */
1056 	for (i = 0; i < dev->d_intrcnt; i++) {
1057 		if ((rv = ddi_intr_add_handler(dev->d_intrh[i], yge_intr,
1058 		    dev, NULL)) != DDI_SUCCESS) {
1059 			yge_error(dev, NULL,
1060 			    "Unable to add interrupt handler, %d", rv);
1061 			for (j = 0; j < i; j++)
1062 				(void) ddi_intr_remove_handler(dev->d_intrh[j]);
1063 			for (i = 0; i < dev->d_intrcnt; i++)
1064 				(void) ddi_intr_free(dev->d_intrh[i]);
1065 			kmem_free(dev->d_intrh, dev->d_intrsize);
1066 			return (DDI_FAILURE);
1067 		}
1068 	}
1069 
1070 	mutex_init(&dev->d_rxlock, NULL, MUTEX_DRIVER,
1071 	    DDI_INTR_PRI(dev->d_intrpri));
1072 	mutex_init(&dev->d_txlock, NULL, MUTEX_DRIVER,
1073 	    DDI_INTR_PRI(dev->d_intrpri));
1074 	mutex_init(&dev->d_phylock, NULL, MUTEX_DRIVER,
1075 	    DDI_INTR_PRI(dev->d_intrpri));
1076 	mutex_init(&dev->d_task_mtx, NULL, MUTEX_DRIVER,
1077 	    DDI_INTR_PRI(dev->d_intrpri));
1078 
1079 	return (DDI_SUCCESS);
1080 }
1081 
1082 static int
1083 yge_attach_intr(yge_dev_t *dev)
1084 {
1085 	dev_info_t *dip = dev->d_dip;
1086 	int intr_types;
1087 	int rv;
1088 
1089 	/* Allocate IRQ resources. */
1090 	rv = ddi_intr_get_supported_types(dip, &intr_types);
1091 	if (rv != DDI_SUCCESS) {
1092 		yge_error(dev, NULL,
1093 		    "Unable to determine supported interrupt types, %d", rv);
1094 		return (DDI_FAILURE);
1095 	}
1096 
1097 	/*
1098 	 * We default to not supporting MSI.  We've found some device
1099 	 * and motherboard combinations don't always work well with
1100 	 * MSI interrupts.  Users may override this if they choose.
1101 	 */
1102 	if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "msi_enable", 0) == 0) {
1103 		/* If msi disable property present, disable both msix/msi. */
1104 		if (intr_types & DDI_INTR_TYPE_FIXED) {
1105 			intr_types &= ~(DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX);
1106 		}
1107 	}
1108 
1109 	if (intr_types & DDI_INTR_TYPE_MSIX) {
1110 		if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSIX)) ==
1111 		    DDI_SUCCESS)
1112 			return (DDI_SUCCESS);
1113 	}
1114 
1115 	if (intr_types & DDI_INTR_TYPE_MSI) {
1116 		if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSI)) ==
1117 		    DDI_SUCCESS)
1118 			return (DDI_SUCCESS);
1119 	}
1120 
1121 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1122 		if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_FIXED)) ==
1123 		    DDI_SUCCESS)
1124 			return (DDI_SUCCESS);
1125 	}
1126 
1127 	yge_error(dev, NULL, "Unable to configure any interrupts");
1128 	return (DDI_FAILURE);
1129 }
1130 
1131 static void
1132 yge_intr_enable(yge_dev_t *dev)
1133 {
1134 	int i;
1135 	if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) {
1136 		/* Call ddi_intr_block_enable() for MSI interrupts */
1137 		(void) ddi_intr_block_enable(dev->d_intrh, dev->d_intrcnt);
1138 	} else {
1139 		/* Call ddi_intr_enable for FIXED interrupts */
1140 		for (i = 0; i < dev->d_intrcnt; i++)
1141 			(void) ddi_intr_enable(dev->d_intrh[i]);
1142 	}
1143 }
1144 
1145 void
1146 yge_intr_disable(yge_dev_t *dev)
1147 {
1148 	int i;
1149 
1150 	if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) {
1151 		(void) ddi_intr_block_disable(dev->d_intrh, dev->d_intrcnt);
1152 	} else {
1153 		for (i = 0; i < dev->d_intrcnt; i++)
1154 			(void) ddi_intr_disable(dev->d_intrh[i]);
1155 	}
1156 }
1157 
1158 static uint8_t
1159 yge_find_capability(yge_dev_t *dev, uint8_t cap)
1160 {
1161 	uint8_t ptr;
1162 	uint16_t capit;
1163 	ddi_acc_handle_t pcih = dev->d_pcih;
1164 
1165 	if ((pci_config_get16(pcih, PCI_CONF_STAT) & PCI_STAT_CAP) == 0) {
1166 		return (0);
1167 	}
1168 	/* This assumes PCI, and not CardBus. */
1169 	ptr = pci_config_get8(pcih, PCI_CONF_CAP_PTR);
1170 	while (ptr != 0) {
1171 		capit = pci_config_get8(pcih, ptr + PCI_CAP_ID);
1172 		if (capit == cap) {
1173 			return (ptr);
1174 		}
1175 		ptr = pci_config_get8(pcih, ptr + PCI_CAP_NEXT_PTR);
1176 	}
1177 	return (0);
1178 }
1179 
1180 static int
1181 yge_attach(yge_dev_t *dev)
1182 {
1183 	dev_info_t	*dip = dev->d_dip;
1184 	int		rv;
1185 	int		nattached;
1186 	uint8_t		pm_cap;
1187 
1188 	if (pci_config_setup(dip, &dev->d_pcih) != DDI_SUCCESS) {
1189 		yge_error(dev, NULL, "Unable to map PCI configuration space");
1190 		goto fail;
1191 	}
1192 
1193 	/*
1194 	 * Map control/status registers.
1195 	 */
1196 
1197 	/* ensure the pmcsr status is D0 state */
1198 	pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM);
1199 	if (pm_cap != 0) {
1200 		uint16_t pmcsr;
1201 		pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR);
1202 		pmcsr &= ~PCI_PMCSR_STATE_MASK;
1203 		pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR,
1204 		    pmcsr | PCI_PMCSR_D0);
1205 	}
1206 
1207 	/* Enable PCI access and bus master. */
1208 	pci_config_put16(dev->d_pcih, PCI_CONF_COMM,
1209 	    pci_config_get16(dev->d_pcih, PCI_CONF_COMM) |
1210 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
1211 
1212 
1213 	/* Allocate I/O resource */
1214 	rv = ddi_regs_map_setup(dip, 1, &dev->d_regs, 0, 0, &yge_regs_attr,
1215 	    &dev->d_regsh);
1216 	if (rv != DDI_SUCCESS) {
1217 		yge_error(dev, NULL, "Unable to map device registers");
1218 		goto fail;
1219 	}
1220 
1221 
1222 	/* Enable all clocks. */
1223 	CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1224 	pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
1225 	CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1226 
1227 	CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR);
1228 	dev->d_hw_id = CSR_READ_1(dev, B2_CHIP_ID);
1229 	dev->d_hw_rev = (CSR_READ_1(dev, B2_MAC_CFG) >> 4) & 0x0f;
1230 
1231 
1232 	/*
1233 	 * Bail out if chip is not recognized.  Note that we only enforce
1234 	 * this in production builds.  The Ultra-2 (88e8057) has a problem
1235 	 * right now where TX works fine, but RX seems not to.  So we've
1236 	 * disabled that for now.
1237 	 */
1238 	if (dev->d_hw_id < CHIP_ID_YUKON_XL ||
1239 	    dev->d_hw_id >= CHIP_ID_YUKON_UL_2) {
1240 		yge_error(dev, NULL, "Unknown device: id=0x%02x, rev=0x%02x",
1241 		    dev->d_hw_id, dev->d_hw_rev);
1242 #ifndef	DEBUG
1243 		goto fail;
1244 #endif
1245 	}
1246 
1247 	/* Soft reset. */
1248 	CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1249 	CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR);
1250 	dev->d_pmd = CSR_READ_1(dev, B2_PMD_TYP);
1251 	if (dev->d_pmd == 'L' || dev->d_pmd == 'S' || dev->d_pmd == 'P')
1252 		dev->d_coppertype = 0;
1253 	else
1254 		dev->d_coppertype = 1;
1255 	/* Check number of MACs. */
1256 	dev->d_num_port = 1;
1257 	if ((CSR_READ_1(dev, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1258 	    CFG_DUAL_MAC_MSK) {
1259 		if (!(CSR_READ_1(dev, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1260 			dev->d_num_port++;
1261 	}
1262 
1263 	/* Check bus type. */
1264 	if (yge_find_capability(dev, PCI_CAP_ID_PCI_E) != 0) {
1265 		dev->d_bustype = PEX_BUS;
1266 	} else if (yge_find_capability(dev, PCI_CAP_ID_PCIX) != 0) {
1267 		dev->d_bustype = PCIX_BUS;
1268 	} else {
1269 		dev->d_bustype = PCI_BUS;
1270 	}
1271 
1272 	switch (dev->d_hw_id) {
1273 	case CHIP_ID_YUKON_EC:
1274 		dev->d_clock = 125;	/* 125 Mhz */
1275 		break;
1276 	case CHIP_ID_YUKON_UL_2:
1277 		dev->d_clock = 125;	/* 125 Mhz */
1278 		break;
1279 	case CHIP_ID_YUKON_SUPR:
1280 		dev->d_clock = 125;	/* 125 Mhz */
1281 		break;
1282 	case CHIP_ID_YUKON_EC_U:
1283 		dev->d_clock = 125;	/* 125 Mhz */
1284 		break;
1285 	case CHIP_ID_YUKON_EX:
1286 		dev->d_clock = 125;	/* 125 Mhz */
1287 		break;
1288 	case CHIP_ID_YUKON_FE:
1289 		dev->d_clock = 100;	/* 100 Mhz */
1290 		break;
1291 	case CHIP_ID_YUKON_FE_P:
1292 		dev->d_clock = 50;	/* 50 Mhz */
1293 		break;
1294 	case CHIP_ID_YUKON_XL:
1295 		dev->d_clock = 156;	/* 156 Mhz */
1296 		break;
1297 	default:
1298 		dev->d_clock = 156;	/* 156 Mhz */
1299 		break;
1300 	}
1301 
1302 	dev->d_process_limit = YGE_RX_RING_CNT/2;
1303 
1304 	rv = yge_alloc_ring(NULL, dev, &dev->d_status_ring, YGE_STAT_RING_CNT);
1305 	if (rv != DDI_SUCCESS)
1306 		goto fail;
1307 
1308 	/* Setup event taskq. */
1309 	dev->d_task_q = ddi_taskq_create(dip, "tq", 1, TASKQ_DEFAULTPRI, 0);
1310 	if (dev->d_task_q == NULL) {
1311 		yge_error(dev, NULL, "failed to create taskq");
1312 		goto fail;
1313 	}
1314 
1315 	/* Init the condition variable */
1316 	cv_init(&dev->d_task_cv, NULL, CV_DRIVER, NULL);
1317 
1318 	/* Allocate IRQ resources. */
1319 	if ((rv = yge_attach_intr(dev)) != DDI_SUCCESS) {
1320 		goto fail;
1321 	}
1322 
1323 	/* Set base interrupt mask. */
1324 	dev->d_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1325 	dev->d_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1326 	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1327 
1328 	/* Reset the adapter. */
1329 	yge_reset(dev);
1330 
1331 	yge_setup_rambuffer(dev);
1332 
1333 	nattached = 0;
1334 	for (int i = 0; i < dev->d_num_port; i++) {
1335 		yge_port_t *port = dev->d_port[i];
1336 		if (yge_init_port(port) != DDI_SUCCESS) {
1337 			goto fail;
1338 		}
1339 	}
1340 
1341 	yge_intr_enable(dev);
1342 
1343 	/* set up the periodic to run once per second */
1344 	dev->d_periodic = ddi_periodic_add(yge_tick, dev, 1000000000, 0);
1345 
1346 	for (int i = 0; i < dev->d_num_port; i++) {
1347 		yge_port_t *port = dev->d_port[i];
1348 		if (yge_register_port(port) == DDI_SUCCESS) {
1349 			nattached++;
1350 		}
1351 	}
1352 
1353 	if (nattached == 0) {
1354 		goto fail;
1355 	}
1356 
1357 	/* Dispatch the taskq */
1358 	if (ddi_taskq_dispatch(dev->d_task_q, yge_task, dev, DDI_SLEEP) !=
1359 	    DDI_SUCCESS) {
1360 		yge_error(dev, NULL, "failed to start taskq");
1361 		goto fail;
1362 	}
1363 
1364 	ddi_report_dev(dip);
1365 
1366 	return (DDI_SUCCESS);
1367 
1368 fail:
1369 	yge_detach(dev);
1370 	return (DDI_FAILURE);
1371 }
1372 
1373 static int
1374 yge_register_port(yge_port_t *port)
1375 {
1376 	if (mac_register(port->p_mreg, &port->p_mh) != DDI_SUCCESS) {
1377 		yge_error(NULL, port, "MAC registration failed");
1378 		return (DDI_FAILURE);
1379 	}
1380 
1381 	return (DDI_SUCCESS);
1382 }
1383 
1384 static int
1385 yge_unregister_port(yge_port_t *port)
1386 {
1387 	if ((port->p_mh) && (mac_unregister(port->p_mh) != 0)) {
1388 		return (DDI_FAILURE);
1389 	}
1390 	port->p_mh = NULL;
1391 	return (DDI_SUCCESS);
1392 }
1393 
1394 /*
1395  * Free up port specific resources. This is called only when the
1396  * port is not registered (and hence not running).
1397  */
1398 static void
1399 yge_uninit_port(yge_port_t *port)
1400 {
1401 	ASSERT(!port->p_running);
1402 
1403 	if (port->p_mreg)
1404 		mac_free(port->p_mreg);
1405 
1406 	if (port->p_mii)
1407 		mii_free(port->p_mii);
1408 
1409 	yge_txrx_dma_free(port);
1410 
1411 	if (port->p_tx_buf)
1412 		kmem_free(port->p_tx_buf,
1413 		    sizeof (yge_buf_t) * YGE_TX_RING_CNT);
1414 	if (port->p_rx_buf)
1415 		kmem_free(port->p_rx_buf,
1416 		    sizeof (yge_buf_t) * YGE_RX_RING_CNT);
1417 }
1418 
1419 static void
1420 yge_detach(yge_dev_t *dev)
1421 {
1422 	/*
1423 	 * Turn off the periodic.
1424 	 */
1425 	if (dev->d_periodic)
1426 		ddi_periodic_delete(dev->d_periodic);
1427 
1428 	for (int i = 0; i < dev->d_num_port; i++) {
1429 		yge_uninit_port(dev->d_port[i]);
1430 	}
1431 
1432 	/*
1433 	 * Make sure all interrupts are disabled.
1434 	 */
1435 	CSR_WRITE_4(dev, B0_IMSK, 0);
1436 	(void) CSR_READ_4(dev, B0_IMSK);
1437 	CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
1438 	(void) CSR_READ_4(dev, B0_HWE_IMSK);
1439 
1440 	/* LED Off. */
1441 	CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_OFF);
1442 
1443 	/* Put hardware reset. */
1444 	CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1445 
1446 	yge_free_ring(&dev->d_status_ring);
1447 
1448 	if (dev->d_task_q != NULL) {
1449 		yge_dispatch(dev, YGE_TASK_EXIT);
1450 		ddi_taskq_destroy(dev->d_task_q);
1451 		dev->d_task_q = NULL;
1452 	}
1453 
1454 	cv_destroy(&dev->d_task_cv);
1455 
1456 	yge_intr_disable(dev);
1457 
1458 	if (dev->d_intrh != NULL) {
1459 		for (int i = 0; i < dev->d_intrcnt; i++) {
1460 			(void) ddi_intr_remove_handler(dev->d_intrh[i]);
1461 			(void) ddi_intr_free(dev->d_intrh[i]);
1462 		}
1463 		kmem_free(dev->d_intrh, dev->d_intrsize);
1464 		mutex_destroy(&dev->d_phylock);
1465 		mutex_destroy(&dev->d_txlock);
1466 		mutex_destroy(&dev->d_rxlock);
1467 		mutex_destroy(&dev->d_task_mtx);
1468 	}
1469 	if (dev->d_regsh != NULL)
1470 		ddi_regs_map_free(&dev->d_regsh);
1471 
1472 	if (dev->d_pcih != NULL)
1473 		pci_config_teardown(&dev->d_pcih);
1474 }
1475 
1476 static int
1477 yge_alloc_ring(yge_port_t *port, yge_dev_t *dev, yge_ring_t *ring, uint32_t num)
1478 {
1479 	dev_info_t		*dip;
1480 	caddr_t			kaddr;
1481 	size_t			len;
1482 	int			rv;
1483 	ddi_dma_cookie_t	dmac;
1484 	unsigned		ndmac;
1485 
1486 	if (port && !dev)
1487 		dev = port->p_dev;
1488 	dip = dev->d_dip;
1489 
1490 	ring->r_num = num;
1491 
1492 	rv = ddi_dma_alloc_handle(dip, &yge_ring_dma_attr, DDI_DMA_DONTWAIT,
1493 	    NULL, &ring->r_dmah);
1494 	if (rv != DDI_SUCCESS) {
1495 		yge_error(dev, port, "Unable to allocate ring DMA handle");
1496 		return (DDI_FAILURE);
1497 	}
1498 
1499 	rv = ddi_dma_mem_alloc(ring->r_dmah, num * sizeof (yge_desc_t),
1500 	    &yge_ring_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
1501 	    &kaddr, &len, &ring->r_acch);
1502 	if (rv != DDI_SUCCESS) {
1503 		yge_error(dev, port, "Unable to allocate ring DMA memory");
1504 		return (DDI_FAILURE);
1505 	}
1506 	ring->r_size = len;
1507 	ring->r_kaddr = (void *)kaddr;
1508 
1509 	bzero(kaddr, len);
1510 
1511 	rv = ddi_dma_addr_bind_handle(ring->r_dmah, NULL, kaddr,
1512 	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1513 	    &dmac, &ndmac);
1514 	if (rv != DDI_DMA_MAPPED) {
1515 		yge_error(dev, port, "Unable to bind ring DMA handle");
1516 		return (DDI_FAILURE);
1517 	}
1518 	ASSERT(ndmac == 1);
1519 	ring->r_paddr = dmac.dmac_address;
1520 
1521 	return (DDI_SUCCESS);
1522 }
1523 
1524 static void
1525 yge_free_ring(yge_ring_t *ring)
1526 {
1527 	if (ring->r_paddr)
1528 		(void) ddi_dma_unbind_handle(ring->r_dmah);
1529 	ring->r_paddr = 0;
1530 	if (ring->r_acch)
1531 		ddi_dma_mem_free(&ring->r_acch);
1532 	ring->r_kaddr = NULL;
1533 	ring->r_acch = NULL;
1534 	if (ring->r_dmah)
1535 		ddi_dma_free_handle(&ring->r_dmah);
1536 	ring->r_dmah = NULL;
1537 }
1538 
1539 static int
1540 yge_alloc_buf(yge_port_t *port, yge_buf_t *b, size_t bufsz, int flag)
1541 {
1542 	yge_dev_t	*dev = port->p_dev;
1543 	size_t		l;
1544 	int		sflag;
1545 	int 		rv;
1546 	ddi_dma_cookie_t	dmac;
1547 	unsigned		ndmac;
1548 
1549 	sflag = flag & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT);
1550 
1551 	/* Now allocate Tx buffers. */
1552 	rv = ddi_dma_alloc_handle(dev->d_dip, &yge_buf_dma_attr,
1553 	    DDI_DMA_DONTWAIT, NULL, &b->b_dmah);
1554 	if (rv != DDI_SUCCESS) {
1555 		yge_error(NULL, port, "Unable to alloc DMA handle for buffer");
1556 		return (DDI_FAILURE);
1557 	}
1558 
1559 	rv = ddi_dma_mem_alloc(b->b_dmah, bufsz, &yge_buf_attr,
1560 	    sflag, DDI_DMA_DONTWAIT, NULL, &b->b_buf, &l, &b->b_acch);
1561 	if (rv != DDI_SUCCESS) {
1562 		yge_error(NULL, port, "Unable to alloc DMA memory for buffer");
1563 		return (DDI_FAILURE);
1564 	}
1565 
1566 	rv = ddi_dma_addr_bind_handle(b->b_dmah, NULL, b->b_buf, l, flag,
1567 	    DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac);
1568 	if (rv != DDI_DMA_MAPPED) {
1569 		yge_error(NULL, port, "Unable to bind DMA handle for buffer");
1570 		return (DDI_FAILURE);
1571 	}
1572 	ASSERT(ndmac == 1);
1573 	b->b_paddr = dmac.dmac_address;
1574 	return (DDI_SUCCESS);
1575 }
1576 
1577 static void
1578 yge_free_buf(yge_buf_t *b)
1579 {
1580 	if (b->b_paddr)
1581 		(void) ddi_dma_unbind_handle(b->b_dmah);
1582 	b->b_paddr = 0;
1583 	if (b->b_acch)
1584 		ddi_dma_mem_free(&b->b_acch);
1585 	b->b_buf = NULL;
1586 	b->b_acch = NULL;
1587 	if (b->b_dmah)
1588 		ddi_dma_free_handle(&b->b_dmah);
1589 	b->b_dmah = NULL;
1590 }
1591 
1592 static int
1593 yge_txrx_dma_alloc(yge_port_t *port)
1594 {
1595 	uint32_t		bufsz;
1596 	int			rv;
1597 	int			i;
1598 	yge_buf_t		*b;
1599 
1600 	/*
1601 	 * It seems that Yukon II supports full 64 bit DMA operations.
1602 	 * But we limit it to 32 bits only for now.  The 64 bit
1603 	 * operation would require substantially more complex
1604 	 * descriptor handling, since in such a case we would need two
1605 	 * LEs to represent a single physical address.
1606 	 *
1607 	 * If we find that this is limiting us, then we should go back
1608 	 * and re-examine it.
1609 	 */
1610 
1611 	/* Note our preferred buffer size. */
1612 	bufsz = port->p_mtu;
1613 
1614 	/* Allocate Tx ring. */
1615 	rv = yge_alloc_ring(port, NULL, &port->p_tx_ring, YGE_TX_RING_CNT);
1616 	if (rv != DDI_SUCCESS) {
1617 		return (DDI_FAILURE);
1618 	}
1619 
1620 	/* Now allocate Tx buffers. */
1621 	b = port->p_tx_buf;
1622 	for (i = 0; i < YGE_TX_RING_CNT; i++) {
1623 		rv = yge_alloc_buf(port, b, bufsz,
1624 		    DDI_DMA_STREAMING | DDI_DMA_WRITE);
1625 		if (rv != DDI_SUCCESS) {
1626 			return (DDI_FAILURE);
1627 		}
1628 		b++;
1629 	}
1630 
1631 	/* Allocate Rx ring. */
1632 	rv = yge_alloc_ring(port, NULL, &port->p_rx_ring, YGE_RX_RING_CNT);
1633 	if (rv != DDI_SUCCESS) {
1634 		return (DDI_FAILURE);
1635 	}
1636 
1637 	/* Now allocate Rx buffers. */
1638 	b = port->p_rx_buf;
1639 	for (i = 0; i < YGE_RX_RING_CNT; i++) {
1640 		rv =  yge_alloc_buf(port, b, bufsz,
1641 		    DDI_DMA_STREAMING | DDI_DMA_READ);
1642 		if (rv != DDI_SUCCESS) {
1643 			return (DDI_FAILURE);
1644 		}
1645 		b++;
1646 	}
1647 
1648 	return (DDI_SUCCESS);
1649 }
1650 
1651 static void
1652 yge_txrx_dma_free(yge_port_t *port)
1653 {
1654 	yge_buf_t	*b;
1655 
1656 	/* Tx ring. */
1657 	yge_free_ring(&port->p_tx_ring);
1658 
1659 	/* Rx ring. */
1660 	yge_free_ring(&port->p_rx_ring);
1661 
1662 	/* Tx buffers. */
1663 	b = port->p_tx_buf;
1664 	for (int i = 0; i < YGE_TX_RING_CNT; i++, b++) {
1665 		yge_free_buf(b);
1666 	}
1667 	/* Rx buffers. */
1668 	b = port->p_rx_buf;
1669 	for (int i = 0; i < YGE_RX_RING_CNT; i++, b++) {
1670 		yge_free_buf(b);
1671 	}
1672 }
1673 
1674 boolean_t
1675 yge_send(yge_port_t *port, mblk_t *mp)
1676 {
1677 	yge_ring_t *ring = &port->p_tx_ring;
1678 	yge_buf_t *txb;
1679 	int16_t prod;
1680 	size_t len;
1681 
1682 	/*
1683 	 * For now we're not going to support checksum offload or LSO.
1684 	 */
1685 
1686 	len = msgsize(mp);
1687 	if (len > port->p_framesize) {
1688 		/* too big! */
1689 		freemsg(mp);
1690 		return (B_TRUE);
1691 	}
1692 
1693 	/* Check number of available descriptors. */
1694 	if (port->p_tx_cnt + 1 >=
1695 	    (YGE_TX_RING_CNT - YGE_RESERVED_TX_DESC_CNT)) {
1696 		port->p_wantw = B_TRUE;
1697 		return (B_FALSE);
1698 	}
1699 
1700 	prod = port->p_tx_prod;
1701 
1702 	txb = &port->p_tx_buf[prod];
1703 	mcopymsg(mp, txb->b_buf);
1704 	SYNCBUF(txb, DDI_DMA_SYNC_FORDEV);
1705 
1706 	PUTADDR(ring, prod, txb->b_paddr);
1707 	PUTCTRL(ring, prod, len | OP_PACKET | HW_OWNER | EOP);
1708 	SYNCENTRY(ring, prod, DDI_DMA_SYNC_FORDEV);
1709 	port->p_tx_cnt++;
1710 
1711 	YGE_INC(prod, YGE_TX_RING_CNT);
1712 
1713 	/* Update producer index. */
1714 	port->p_tx_prod = prod;
1715 
1716 	return (B_TRUE);
1717 }
1718 
1719 static int
1720 yge_suspend(yge_dev_t *dev)
1721 {
1722 	for (int i = 0; i < dev->d_num_port; i++) {
1723 		yge_port_t *port = dev->d_port[i];
1724 		mii_suspend(port->p_mii);
1725 	}
1726 
1727 
1728 	DEV_LOCK(dev);
1729 
1730 	for (int i = 0; i < dev->d_num_port; i++) {
1731 		yge_port_t *port = dev->d_port[i];
1732 
1733 		if (port->p_running) {
1734 			yge_stop_port(port);
1735 		}
1736 	}
1737 
1738 	/* Disable all interrupts. */
1739 	CSR_WRITE_4(dev, B0_IMSK, 0);
1740 	(void) CSR_READ_4(dev, B0_IMSK);
1741 	CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
1742 	(void) CSR_READ_4(dev, B0_HWE_IMSK);
1743 
1744 	yge_phy_power(dev, B_FALSE);
1745 
1746 	/* Put hardware reset. */
1747 	CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
1748 	dev->d_suspended = B_TRUE;
1749 
1750 	DEV_UNLOCK(dev);
1751 
1752 	return (DDI_SUCCESS);
1753 }
1754 
1755 static int
1756 yge_resume(yge_dev_t *dev)
1757 {
1758 	uint8_t pm_cap;
1759 
1760 	DEV_LOCK(dev);
1761 
1762 	/* ensure the pmcsr status is D0 state */
1763 	CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1764 
1765 	if ((pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM)) != 0) {
1766 		uint16_t pmcsr;
1767 		pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR);
1768 		pmcsr &= ~PCI_PMCSR_STATE_MASK;
1769 		pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR,
1770 		    pmcsr | PCI_PMCSR_D0);
1771 	}
1772 
1773 	/* Enable PCI access and bus master. */
1774 	pci_config_put16(dev->d_pcih, PCI_CONF_COMM,
1775 	    pci_config_get16(dev->d_pcih, PCI_CONF_COMM) |
1776 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME);
1777 
1778 	/* Enable all clocks. */
1779 	switch (dev->d_hw_id) {
1780 	case CHIP_ID_YUKON_EX:
1781 	case CHIP_ID_YUKON_EC_U:
1782 	case CHIP_ID_YUKON_FE_P:
1783 		pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0);
1784 		break;
1785 	}
1786 
1787 	CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1788 
1789 	yge_reset(dev);
1790 
1791 	/* Make sure interrupts are reenabled */
1792 	CSR_WRITE_4(dev, B0_IMSK, 0);
1793 	CSR_WRITE_4(dev, B0_IMSK, Y2_IS_HW_ERR | Y2_IS_STAT_BMU);
1794 	CSR_WRITE_4(dev, B0_HWE_IMSK,
1795 	    Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1796 	    Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP);
1797 
1798 	for (int i = 0; i < dev->d_num_port; i++) {
1799 		yge_port_t *port = dev->d_port[i];
1800 
1801 		if (port != NULL && port->p_running) {
1802 			yge_start_port(port);
1803 		}
1804 	}
1805 	dev->d_suspended = B_FALSE;
1806 
1807 	DEV_UNLOCK(dev);
1808 
1809 	/* Reset MII layer */
1810 	for (int i = 0; i < dev->d_num_port; i++) {
1811 		yge_port_t *port = dev->d_port[i];
1812 
1813 		if (port->p_running) {
1814 			mii_resume(port->p_mii);
1815 			mac_tx_update(port->p_mh);
1816 		}
1817 	}
1818 
1819 	return (DDI_SUCCESS);
1820 }
1821 
1822 static mblk_t *
1823 yge_rxeof(yge_port_t *port, uint32_t status, int len)
1824 {
1825 	yge_dev_t *dev = port->p_dev;
1826 	mblk_t	*mp;
1827 	int cons, rxlen;
1828 	yge_buf_t *rxb;
1829 	yge_ring_t *ring;
1830 
1831 	ASSERT(mutex_owned(&dev->d_rxlock));
1832 
1833 	if (!port->p_running)
1834 		return (NULL);
1835 
1836 	ring = &port->p_rx_ring;
1837 	cons = port->p_rx_cons;
1838 	rxlen = status >> 16;
1839 	rxb = &port->p_rx_buf[cons];
1840 	mp = NULL;
1841 
1842 
1843 	if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
1844 	    (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
1845 		/*
1846 		 * Apparently the status for this chip is not reliable.
1847 		 * Only perform minimal consistency checking; the MAC
1848 		 * and upper protocols will have to filter any garbage.
1849 		 */
1850 		if ((len > port->p_framesize) || (rxlen != len)) {
1851 			goto bad;
1852 		}
1853 	} else {
1854 		if ((len > port->p_framesize) || (rxlen != len) ||
1855 		    ((status & GMR_FS_ANY_ERR) != 0) ||
1856 		    ((status & GMR_FS_RX_OK) == 0)) {
1857 			goto bad;
1858 		}
1859 	}
1860 
1861 	if ((mp = allocb(len + YGE_HEADROOM, BPRI_HI)) != NULL) {
1862 
1863 		/* good packet - yay */
1864 		mp->b_rptr += YGE_HEADROOM;
1865 		SYNCBUF(rxb, DDI_DMA_SYNC_FORKERNEL);
1866 		bcopy(rxb->b_buf, mp->b_rptr, len);
1867 		mp->b_wptr = mp->b_rptr + len;
1868 	} else {
1869 		port->p_stats.rx_nobuf++;
1870 	}
1871 
1872 bad:
1873 
1874 	PUTCTRL(ring, cons, port->p_framesize | OP_PACKET | HW_OWNER);
1875 	SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV);
1876 
1877 	CSR_WRITE_2(dev,
1878 	    Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG),
1879 	    cons);
1880 
1881 	YGE_INC(port->p_rx_cons, YGE_RX_RING_CNT);
1882 
1883 	return (mp);
1884 }
1885 
1886 static boolean_t
1887 yge_txeof_locked(yge_port_t *port, int idx)
1888 {
1889 	int prog;
1890 	int16_t cons;
1891 	boolean_t resched;
1892 
1893 	if (!port->p_running) {
1894 		return (B_FALSE);
1895 	}
1896 
1897 	cons = port->p_tx_cons;
1898 	prog = 0;
1899 	for (; cons != idx; YGE_INC(cons, YGE_TX_RING_CNT)) {
1900 		if (port->p_tx_cnt <= 0)
1901 			break;
1902 		prog++;
1903 		port->p_tx_cnt--;
1904 		/* No need to sync LEs as we didn't update LEs. */
1905 	}
1906 
1907 	port->p_tx_cons = cons;
1908 
1909 	if (prog > 0) {
1910 		resched = port->p_wantw;
1911 		port->p_tx_wdog = 0;
1912 		port->p_wantw = B_FALSE;
1913 		return (resched);
1914 	} else {
1915 		return (B_FALSE);
1916 	}
1917 }
1918 
1919 static void
1920 yge_txeof(yge_port_t *port, int idx)
1921 {
1922 	boolean_t resched;
1923 
1924 	TX_LOCK(port->p_dev);
1925 
1926 	resched = yge_txeof_locked(port, idx);
1927 
1928 	TX_UNLOCK(port->p_dev);
1929 
1930 	if (resched && port->p_running) {
1931 		mac_tx_update(port->p_mh);
1932 	}
1933 }
1934 
1935 static void
1936 yge_restart_task(yge_dev_t *dev)
1937 {
1938 	yge_port_t *port;
1939 
1940 	DEV_LOCK(dev);
1941 
1942 	/* Cancel pending I/O and free all Rx/Tx buffers. */
1943 	for (int i = 0; i < dev->d_num_port; i++) {
1944 		port = dev->d_port[i];
1945 		if (port->p_running)
1946 			yge_stop_port(dev->d_port[i]);
1947 	}
1948 	yge_reset(dev);
1949 	for (int i = 0; i < dev->d_num_port; i++) {
1950 		port = dev->d_port[i];
1951 
1952 		if (port->p_running)
1953 			yge_start_port(port);
1954 	}
1955 
1956 	DEV_UNLOCK(dev);
1957 
1958 	for (int i = 0; i < dev->d_num_port; i++) {
1959 		port = dev->d_port[i];
1960 
1961 		mii_reset(port->p_mii);
1962 		if (port->p_running)
1963 			mac_tx_update(port->p_mh);
1964 	}
1965 }
1966 
1967 static void
1968 yge_tick(void *arg)
1969 {
1970 	yge_dev_t *dev = arg;
1971 	yge_port_t *port;
1972 	boolean_t restart = B_FALSE;
1973 	boolean_t resched = B_FALSE;
1974 	int idx;
1975 
1976 	DEV_LOCK(dev);
1977 
1978 	if (dev->d_suspended) {
1979 		DEV_UNLOCK(dev);
1980 		return;
1981 	}
1982 
1983 	for (int i = 0; i < dev->d_num_port; i++) {
1984 		port = dev->d_port[i];
1985 
1986 		if (!port->p_running)
1987 			continue;
1988 
1989 		if (port->p_tx_cnt) {
1990 			uint32_t ridx;
1991 
1992 			/*
1993 			 * Reclaim first as there is a possibility of losing
1994 			 * Tx completion interrupts.
1995 			 */
1996 			ridx = port->p_port == YGE_PORT_A ?
1997 			    STAT_TXA1_RIDX : STAT_TXA2_RIDX;
1998 			idx = CSR_READ_2(dev, ridx);
1999 			if (port->p_tx_cons != idx) {
2000 				resched = yge_txeof_locked(port, idx);
2001 
2002 			} else {
2003 
2004 				/* detect TX hang */
2005 				port->p_tx_wdog++;
2006 				if (port->p_tx_wdog > YGE_TX_TIMEOUT) {
2007 					port->p_tx_wdog = 0;
2008 					yge_error(NULL, port,
2009 					    "TX hang detected!");
2010 					restart = B_TRUE;
2011 				}
2012 			}
2013 		}
2014 	}
2015 
2016 	DEV_UNLOCK(dev);
2017 	if (restart) {
2018 		yge_dispatch(dev, YGE_TASK_RESTART);
2019 	} else {
2020 		if (resched) {
2021 			for (int i = 0; i < dev->d_num_port; i++) {
2022 				port = dev->d_port[i];
2023 
2024 				if (port->p_running)
2025 					mac_tx_update(port->p_mh);
2026 			}
2027 		}
2028 	}
2029 }
2030 
2031 static int
2032 yge_intr_gmac(yge_port_t *port)
2033 {
2034 	yge_dev_t *dev = port->p_dev;
2035 	int pnum = port->p_port;
2036 	uint8_t status;
2037 	int dispatch_wrk = 0;
2038 
2039 	status = CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC));
2040 
2041 	/* GMAC Rx FIFO overrun. */
2042 	if ((status & GM_IS_RX_FF_OR) != 0) {
2043 		CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2044 		yge_error(NULL, port, "Rx FIFO overrun!");
2045 		dispatch_wrk |= YGE_TASK_RESTART;
2046 	}
2047 	/* GMAC Tx FIFO underrun. */
2048 	if ((status & GM_IS_TX_FF_UR) != 0) {
2049 		CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2050 		yge_error(NULL, port, "Tx FIFO underrun!");
2051 		/*
2052 		 * In case of Tx underrun, we may need to flush/reset
2053 		 * Tx MAC but that would also require
2054 		 * resynchronization with status LEs. Reinitializing
2055 		 * status LEs would affect the other port in dual MAC
2056 		 * configuration so it should be avoided if we can.
2057 		 * Due to lack of documentation it's all vague guess
2058 		 * but it needs more investigation.
2059 		 */
2060 	}
2061 	return (dispatch_wrk);
2062 }
2063 
2064 static void
2065 yge_handle_hwerr(yge_port_t *port, uint32_t status)
2066 {
2067 	yge_dev_t	*dev = port->p_dev;
2068 
2069 	if ((status & Y2_IS_PAR_RD1) != 0) {
2070 		yge_error(NULL, port, "RAM buffer read parity error");
2071 		/* Clear IRQ. */
2072 		CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL),
2073 		    RI_CLR_RD_PERR);
2074 	}
2075 	if ((status & Y2_IS_PAR_WR1) != 0) {
2076 		yge_error(NULL, port, "RAM buffer write parity error");
2077 		/* Clear IRQ. */
2078 		CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL),
2079 		    RI_CLR_WR_PERR);
2080 	}
2081 	if ((status & Y2_IS_PAR_MAC1) != 0) {
2082 		yge_error(NULL, port, "Tx MAC parity error");
2083 		/* Clear IRQ. */
2084 		CSR_WRITE_4(dev, MR_ADDR(port->p_port, TX_GMF_CTRL_T),
2085 		    GMF_CLI_TX_PE);
2086 	}
2087 	if ((status & Y2_IS_PAR_RX1) != 0) {
2088 		yge_error(NULL, port, "Rx parity error");
2089 		/* Clear IRQ. */
2090 		CSR_WRITE_4(dev, Q_ADDR(port->p_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
2091 	}
2092 	if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
2093 		yge_error(NULL, port, "TCP segmentation error");
2094 		/* Clear IRQ. */
2095 		CSR_WRITE_4(dev, Q_ADDR(port->p_txq, Q_CSR), BMU_CLR_IRQ_TCP);
2096 	}
2097 }
2098 
2099 static void
2100 yge_intr_hwerr(yge_dev_t *dev)
2101 {
2102 	uint32_t status;
2103 	uint32_t tlphead[4];
2104 
2105 	status = CSR_READ_4(dev, B0_HWE_ISRC);
2106 	/* Time Stamp timer overflow. */
2107 	if ((status & Y2_IS_TIST_OV) != 0)
2108 		CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2109 	if ((status & Y2_IS_PCI_NEXP) != 0) {
2110 		/*
2111 		 * PCI Express Error occurred which is not described in PEX
2112 		 * spec.
2113 		 * This error is also mapped either to Master Abort(
2114 		 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
2115 		 * can only be cleared there.
2116 		 */
2117 		yge_error(dev, NULL, "PCI Express protocol violation error");
2118 	}
2119 
2120 	if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
2121 		uint16_t v16;
2122 
2123 		if ((status & Y2_IS_IRQ_STAT) != 0)
2124 			yge_error(dev, NULL, "Unexpected IRQ Status error");
2125 		if ((status & Y2_IS_MST_ERR) != 0)
2126 			yge_error(dev, NULL, "Unexpected IRQ Master error");
2127 		/* Reset all bits in the PCI status register. */
2128 		v16 = pci_config_get16(dev->d_pcih, PCI_CONF_STAT);
2129 		CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2130 		pci_config_put16(dev->d_pcih, PCI_CONF_STAT, v16 |
2131 		    PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB |
2132 		    PCI_STAT_R_TARG_AB | PCI_STAT_PERROR);
2133 		CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2134 	}
2135 
2136 	/* Check for PCI Express Uncorrectable Error. */
2137 	if ((status & Y2_IS_PCI_EXP) != 0) {
2138 		uint32_t v32;
2139 
2140 		/*
2141 		 * On PCI Express bus bridges are called root complexes (RC).
2142 		 * PCI Express errors are recognized by the root complex too,
2143 		 * which requests the system to handle the problem. After
2144 		 * error occurrence it may be that no access to the adapter
2145 		 * may be performed any longer.
2146 		 */
2147 
2148 		v32 = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT);
2149 		if ((v32 & PEX_UNSUP_REQ) != 0) {
2150 			/* Ignore unsupported request error. */
2151 			yge_error(dev, NULL,
2152 			    "Uncorrectable PCI Express error");
2153 		}
2154 		if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
2155 			int i;
2156 
2157 			/* Get TLP header form Log Registers. */
2158 			for (i = 0; i < 4; i++)
2159 				tlphead[i] = CSR_PCI_READ_4(dev,
2160 				    PEX_HEADER_LOG + i * 4);
2161 			/* Check for vendor defined broadcast message. */
2162 			if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
2163 				dev->d_intrhwemask &= ~Y2_IS_PCI_EXP;
2164 				CSR_WRITE_4(dev, B0_HWE_IMSK,
2165 				    dev->d_intrhwemask);
2166 				(void) CSR_READ_4(dev, B0_HWE_IMSK);
2167 			}
2168 		}
2169 		/* Clear the interrupt. */
2170 		CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2171 		CSR_PCI_WRITE_4(dev, PEX_UNC_ERR_STAT, 0xffffffff);
2172 		CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2173 	}
2174 
2175 	if ((status & Y2_HWE_L1_MASK) != 0 && dev->d_port[YGE_PORT_A] != NULL)
2176 		yge_handle_hwerr(dev->d_port[YGE_PORT_A], status);
2177 	if ((status & Y2_HWE_L2_MASK) != 0 && dev->d_port[YGE_PORT_B] != NULL)
2178 		yge_handle_hwerr(dev->d_port[YGE_PORT_B], status >> 8);
2179 }
2180 
2181 /*
2182  * Returns B_TRUE if there is potentially more work to do.
2183  */
2184 static boolean_t
2185 yge_handle_events(yge_dev_t *dev, mblk_t **heads, mblk_t **tails, int *txindex)
2186 {
2187 	yge_port_t *port;
2188 	yge_ring_t *ring;
2189 	uint32_t control, status;
2190 	int cons, idx, len, pnum;
2191 	mblk_t *mp;
2192 	uint32_t rxprogs[2];
2193 
2194 	rxprogs[0] = rxprogs[1] = 0;
2195 
2196 	idx = CSR_READ_2(dev, STAT_PUT_IDX);
2197 	if (idx == dev->d_stat_cons) {
2198 		return (B_FALSE);
2199 	}
2200 
2201 	ring = &dev->d_status_ring;
2202 
2203 	for (cons = dev->d_stat_cons; cons != idx; ) {
2204 		/* Sync status LE. */
2205 		SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORKERNEL);
2206 		control = GETCTRL(ring, cons);
2207 		if ((control & HW_OWNER) == 0) {
2208 			yge_error(dev, NULL, "Status descriptor error: "
2209 			    "index %d, control %x", cons, control);
2210 			break;
2211 		}
2212 
2213 		status = GETSTAT(ring, cons);
2214 
2215 		control &= ~HW_OWNER;
2216 		len = control & STLE_LEN_MASK;
2217 		pnum = ((control >> 16) & 0x01);
2218 		port = dev->d_port[pnum];
2219 		if (port == NULL) {
2220 			yge_error(dev, NULL, "Invalid port opcode: 0x%08x",
2221 			    control & STLE_OP_MASK);
2222 			goto finish;
2223 		}
2224 
2225 		switch (control & STLE_OP_MASK) {
2226 		case OP_RXSTAT:
2227 			mp = yge_rxeof(port, status, len);
2228 			if (mp != NULL) {
2229 				if (heads[pnum] == NULL)
2230 					heads[pnum] = mp;
2231 				else
2232 					tails[pnum]->b_next = mp;
2233 				tails[pnum] = mp;
2234 			}
2235 
2236 			rxprogs[pnum]++;
2237 			break;
2238 
2239 		case OP_TXINDEXLE:
2240 			txindex[0] = status & STLE_TXA1_MSKL;
2241 			txindex[1] =
2242 			    ((status & STLE_TXA2_MSKL) >> STLE_TXA2_SHIFTL) |
2243 			    ((len & STLE_TXA2_MSKH) << STLE_TXA2_SHIFTH);
2244 			break;
2245 		default:
2246 			yge_error(dev, NULL, "Unhandled opcode: 0x%08x",
2247 			    control & STLE_OP_MASK);
2248 			break;
2249 		}
2250 finish:
2251 
2252 		/* Give it back to HW. */
2253 		PUTCTRL(ring, cons, control);
2254 		SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV);
2255 
2256 		YGE_INC(cons, YGE_STAT_RING_CNT);
2257 		if (rxprogs[pnum] > dev->d_process_limit) {
2258 			break;
2259 		}
2260 	}
2261 
2262 	dev->d_stat_cons = cons;
2263 	if (dev->d_stat_cons != CSR_READ_2(dev, STAT_PUT_IDX))
2264 		return (B_TRUE);
2265 	else
2266 		return (B_FALSE);
2267 }
2268 
2269 /*ARGSUSED1*/
2270 static uint_t
2271 yge_intr(caddr_t arg1, caddr_t arg2)
2272 {
2273 	yge_dev_t	*dev;
2274 	yge_port_t	*port1;
2275 	yge_port_t	*port2;
2276 	uint32_t	status;
2277 	mblk_t		*heads[2], *tails[2];
2278 	int		txindex[2];
2279 	int		dispatch_wrk;
2280 
2281 	dev = (void *)arg1;
2282 
2283 	heads[0] = heads[1] = NULL;
2284 	tails[0] = tails[1] = NULL;
2285 	txindex[0] = txindex[1] = -1;
2286 	dispatch_wrk = 0;
2287 
2288 	port1 = dev->d_port[YGE_PORT_A];
2289 	port2 = dev->d_port[YGE_PORT_B];
2290 
2291 	RX_LOCK(dev);
2292 
2293 	if (dev->d_suspended) {
2294 		RX_UNLOCK(dev);
2295 		return (DDI_INTR_UNCLAIMED);
2296 	}
2297 
2298 	/* Get interrupt source. */
2299 	status = CSR_READ_4(dev, B0_Y2_SP_ISRC2);
2300 	if (status == 0 || status == 0xffffffff ||
2301 	    (status & dev->d_intrmask) == 0) { /* Stray interrupt ? */
2302 		/* Reenable interrupts. */
2303 		CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2);
2304 		RX_UNLOCK(dev);
2305 		return (DDI_INTR_UNCLAIMED);
2306 	}
2307 
2308 	if ((status & Y2_IS_HW_ERR) != 0) {
2309 		yge_intr_hwerr(dev);
2310 	}
2311 
2312 	if (status & Y2_IS_IRQ_MAC1) {
2313 		dispatch_wrk |= yge_intr_gmac(port1);
2314 	}
2315 	if (status & Y2_IS_IRQ_MAC2) {
2316 		dispatch_wrk |= yge_intr_gmac(port2);
2317 	}
2318 
2319 	if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
2320 		yge_error(NULL, status & Y2_IS_CHK_RX1 ? port1 : port2,
2321 		    "Rx descriptor error");
2322 		dev->d_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
2323 		CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2324 		(void) CSR_READ_4(dev, B0_IMSK);
2325 	}
2326 	if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
2327 		yge_error(NULL, status & Y2_IS_CHK_TXA1 ? port1 : port2,
2328 		    "Tx descriptor error");
2329 		dev->d_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
2330 		CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2331 		(void) CSR_READ_4(dev, B0_IMSK);
2332 	}
2333 
2334 	/* handle events until it returns false */
2335 	while (yge_handle_events(dev, heads, tails, txindex))
2336 		/* NOP */;
2337 
2338 	/* Do receive/transmit events */
2339 	if ((status & Y2_IS_STAT_BMU)) {
2340 		CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_CLR_IRQ);
2341 	}
2342 
2343 	/* Reenable interrupts. */
2344 	CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2);
2345 
2346 	RX_UNLOCK(dev);
2347 
2348 	if (dispatch_wrk) {
2349 		yge_dispatch(dev, dispatch_wrk);
2350 	}
2351 
2352 	if (port1->p_running) {
2353 		if (txindex[0] >= 0) {
2354 			yge_txeof(port1, txindex[0]);
2355 		}
2356 		if (heads[0])
2357 			mac_rx(port1->p_mh, NULL, heads[0]);
2358 	} else {
2359 		if (heads[0]) {
2360 			mblk_t *mp;
2361 			while ((mp = heads[0]) != NULL) {
2362 				heads[0] = mp->b_next;
2363 				freemsg(mp);
2364 			}
2365 		}
2366 	}
2367 
2368 	if (port2->p_running) {
2369 		if (txindex[1] >= 0) {
2370 			yge_txeof(port2, txindex[1]);
2371 		}
2372 		if (heads[1])
2373 			mac_rx(port2->p_mh, NULL, heads[1]);
2374 	} else {
2375 		if (heads[1]) {
2376 			mblk_t *mp;
2377 			while ((mp = heads[1]) != NULL) {
2378 				heads[1] = mp->b_next;
2379 				freemsg(mp);
2380 			}
2381 		}
2382 	}
2383 
2384 	return (DDI_INTR_CLAIMED);
2385 }
2386 
2387 static void
2388 yge_set_tx_stfwd(yge_port_t *port)
2389 {
2390 	yge_dev_t *dev = port->p_dev;
2391 	int pnum = port->p_port;
2392 
2393 	switch (dev->d_hw_id) {
2394 	case CHIP_ID_YUKON_EX:
2395 		if (dev->d_hw_rev == CHIP_REV_YU_EX_A0)
2396 			goto yukon_ex_workaround;
2397 
2398 		if (port->p_mtu > ETHERMTU)
2399 			CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2400 			    TX_JUMBO_ENA | TX_STFW_ENA);
2401 		else
2402 			CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2403 			    TX_JUMBO_DIS | TX_STFW_ENA);
2404 		break;
2405 	default:
2406 yukon_ex_workaround:
2407 		if (port->p_mtu > ETHERMTU) {
2408 			/* Set Tx GMAC FIFO Almost Empty Threshold. */
2409 			CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_AE_THR),
2410 			    MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
2411 			/* Disable Store & Forward mode for Tx. */
2412 			CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2413 			    TX_JUMBO_ENA | TX_STFW_DIS);
2414 		} else {
2415 			/* Enable Store & Forward mode for Tx. */
2416 			CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T),
2417 			    TX_JUMBO_DIS | TX_STFW_ENA);
2418 		}
2419 		break;
2420 	}
2421 }
2422 
2423 static void
2424 yge_start_port(yge_port_t *port)
2425 {
2426 	yge_dev_t *dev = port->p_dev;
2427 	uint16_t gmac;
2428 	int32_t pnum;
2429 	int32_t rxq;
2430 	int32_t txq;
2431 	uint32_t reg;
2432 
2433 	pnum = port->p_port;
2434 	txq = port->p_txq;
2435 	rxq = port->p_rxq;
2436 
2437 	if (port->p_mtu < ETHERMTU)
2438 		port->p_framesize = ETHERMTU;
2439 	else
2440 		port->p_framesize = port->p_mtu;
2441 	port->p_framesize += sizeof (struct ether_vlan_header);
2442 
2443 	/*
2444 	 * Note for the future, if we enable offloads:
2445 	 * In Yukon EC Ultra, TSO & checksum offload is not
2446 	 * supported for jumbo frame.
2447 	 */
2448 
2449 	/* GMAC Control reset */
2450 	CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_SET);
2451 	CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_CLR);
2452 	CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_F_LOOPB_OFF);
2453 	if (dev->d_hw_id == CHIP_ID_YUKON_EX)
2454 		CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL),
2455 		    GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
2456 		    GMC_BYP_RETR_ON);
2457 	/*
2458 	 * Initialize GMAC first such that speed/duplex/flow-control
2459 	 * parameters are renegotiated with the interface is brought up.
2460 	 */
2461 	GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, 0);
2462 
2463 	/* Dummy read the Interrupt Source Register. */
2464 	(void) CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC));
2465 
2466 	/* Clear MIB stats. */
2467 	yge_stats_clear(port);
2468 
2469 	/* Disable FCS. */
2470 	GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, GM_RXCR_CRC_DIS);
2471 
2472 	/* Setup Transmit Control Register. */
2473 	GMAC_WRITE_2(dev, pnum, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
2474 
2475 	/* Setup Transmit Flow Control Register. */
2476 	GMAC_WRITE_2(dev, pnum, GM_TX_FLOW_CTRL, 0xffff);
2477 
2478 	/* Setup Transmit Parameter Register. */
2479 	GMAC_WRITE_2(dev, pnum, GM_TX_PARAM,
2480 	    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
2481 	    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
2482 
2483 	gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
2484 	    GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
2485 
2486 	if (port->p_mtu > ETHERMTU)
2487 		gmac |= GM_SMOD_JUMBO_ENA;
2488 	GMAC_WRITE_2(dev, pnum, GM_SERIAL_MODE, gmac);
2489 
2490 	/* Disable interrupts for counter overflows. */
2491 	GMAC_WRITE_2(dev, pnum, GM_TX_IRQ_MSK, 0);
2492 	GMAC_WRITE_2(dev, pnum, GM_RX_IRQ_MSK, 0);
2493 	GMAC_WRITE_2(dev, pnum, GM_TR_IRQ_MSK, 0);
2494 
2495 	/* Configure Rx MAC FIFO. */
2496 	CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET);
2497 	CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_CLR);
2498 	reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
2499 	if (dev->d_hw_id == CHIP_ID_YUKON_FE_P ||
2500 	    dev->d_hw_id == CHIP_ID_YUKON_EX)
2501 		reg |= GMF_RX_OVER_ON;
2502 	CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), reg);
2503 
2504 	/* Set receive filter. */
2505 	yge_setrxfilt(port);
2506 
2507 	/* Flush Rx MAC FIFO on any flow control or error. */
2508 	CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
2509 
2510 	/*
2511 	 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word
2512 	 * due to hardware hang on receipt of pause frames.
2513 	 */
2514 	reg = RX_GMF_FL_THR_DEF + 1;
2515 	/* FE+ magic */
2516 	if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2517 	    (dev->d_hw_rev == CHIP_REV_YU_FE2_A0))
2518 		reg = 0x178;
2519 
2520 	CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_THR), reg);
2521 
2522 	/* Configure Tx MAC FIFO. */
2523 	CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET);
2524 	CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_CLR);
2525 	CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_OPER_ON);
2526 
2527 	/* Disable hardware VLAN tag insertion/stripping. */
2528 	CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
2529 	CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
2530 
2531 	if ((port->p_flags & PORT_FLAG_RAMBUF) == 0) {
2532 		/* Set Rx Pause threshold. */
2533 		if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2534 		    (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
2535 			CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR),
2536 			    MSK_ECU_LLPP);
2537 			CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR),
2538 			    MSK_FEP_ULPP);
2539 		} else {
2540 			CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR),
2541 			    MSK_ECU_LLPP);
2542 			CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR),
2543 			    MSK_ECU_ULPP);
2544 		}
2545 		/* Configure store-and-forward for TX */
2546 		yge_set_tx_stfwd(port);
2547 	}
2548 
2549 	if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) &&
2550 	    (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) {
2551 		/* Disable dynamic watermark */
2552 		reg = CSR_READ_4(dev, MR_ADDR(pnum, TX_GMF_EA));
2553 		reg &= ~TX_DYN_WM_ENA;
2554 		CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_EA), reg);
2555 	}
2556 
2557 	/*
2558 	 * Disable Force Sync bit and Alloc bit in Tx RAM interface
2559 	 * arbiter as we don't use Sync Tx queue.
2560 	 */
2561 	CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL),
2562 	    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2563 	/* Enable the RAM Interface Arbiter. */
2564 	CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_ENA_ARB);
2565 
2566 	/* Setup RAM buffer. */
2567 	yge_set_rambuffer(port);
2568 
2569 	/* Disable Tx sync Queue. */
2570 	CSR_WRITE_1(dev, RB_ADDR(port->p_txsq, RB_CTRL), RB_RST_SET);
2571 
2572 	/* Setup Tx Queue Bus Memory Interface. */
2573 	CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_CLR_RESET);
2574 	CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_OPER_INIT);
2575 	CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_FIFO_OP_ON);
2576 	CSR_WRITE_2(dev, Q_ADDR(txq, Q_WM), MSK_BMU_TX_WM);
2577 
2578 	switch (dev->d_hw_id) {
2579 	case CHIP_ID_YUKON_EC_U:
2580 		if (dev->d_hw_rev == CHIP_REV_YU_EC_U_A0) {
2581 			/* Fix for Yukon-EC Ultra: set BMU FIFO level */
2582 			CSR_WRITE_2(dev, Q_ADDR(txq, Q_AL), MSK_ECU_TXFF_LEV);
2583 		}
2584 		break;
2585 	case CHIP_ID_YUKON_EX:
2586 		/*
2587 		 * Yukon Extreme seems to have silicon bug for
2588 		 * automatic Tx checksum calculation capability.
2589 		 */
2590 		if (dev->d_hw_rev == CHIP_REV_YU_EX_B0)
2591 			CSR_WRITE_4(dev, Q_ADDR(txq, Q_F), F_TX_CHK_AUTO_OFF);
2592 		break;
2593 	}
2594 
2595 	/* Setup Rx Queue Bus Memory Interface. */
2596 	CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_CLR_RESET);
2597 	CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_OPER_INIT);
2598 	CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_FIFO_OP_ON);
2599 	if (dev->d_bustype == PEX_BUS) {
2600 		CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), 0x80);
2601 	} else {
2602 		CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), MSK_BMU_RX_WM);
2603 	}
2604 	if (dev->d_hw_id == CHIP_ID_YUKON_EC_U &&
2605 	    dev->d_hw_rev >= CHIP_REV_YU_EC_U_A1) {
2606 		/* MAC Rx RAM Read is controlled by hardware. */
2607 		CSR_WRITE_4(dev, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
2608 	}
2609 
2610 	yge_init_tx_ring(port);
2611 
2612 	/* Disable Rx checksum offload and RSS hash. */
2613 	CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR),
2614 	    BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
2615 
2616 	yge_init_rx_ring(port);
2617 
2618 	/* Configure interrupt handling. */
2619 	if (port == dev->d_port[YGE_PORT_A]) {
2620 		dev->d_intrmask |= Y2_IS_PORT_A;
2621 		dev->d_intrhwemask |= Y2_HWE_L1_MASK;
2622 	} else if (port == dev->d_port[YGE_PORT_B]) {
2623 		dev->d_intrmask |= Y2_IS_PORT_B;
2624 		dev->d_intrhwemask |= Y2_HWE_L2_MASK;
2625 	}
2626 	CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask);
2627 	(void) CSR_READ_4(dev, B0_HWE_IMSK);
2628 	CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2629 	(void) CSR_READ_4(dev, B0_IMSK);
2630 
2631 	/* Enable RX/TX GMAC */
2632 	gmac = GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2633 	gmac |= (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2634 	GMAC_WRITE_2(port->p_dev, port->p_port, GM_GP_CTRL, gmac);
2635 	/* Read again to ensure writing. */
2636 	(void) GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2637 
2638 	/* Reset TX timer */
2639 	port->p_tx_wdog = 0;
2640 }
2641 
2642 static void
2643 yge_set_rambuffer(yge_port_t *port)
2644 {
2645 	yge_dev_t *dev;
2646 	int ltpp, utpp;
2647 	int pnum;
2648 	uint32_t rxq;
2649 	uint32_t txq;
2650 
2651 	dev = port->p_dev;
2652 	pnum = port->p_port;
2653 	rxq = port->p_rxq;
2654 	txq = port->p_txq;
2655 
2656 	if ((port->p_flags & PORT_FLAG_RAMBUF) == 0)
2657 		return;
2658 
2659 	/* Setup Rx Queue. */
2660 	CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_CLR);
2661 	CSR_WRITE_4(dev, RB_ADDR(rxq, RB_START), dev->d_rxqstart[pnum] / 8);
2662 	CSR_WRITE_4(dev, RB_ADDR(rxq, RB_END), dev->d_rxqend[pnum] / 8);
2663 	CSR_WRITE_4(dev, RB_ADDR(rxq, RB_WP), dev->d_rxqstart[pnum] / 8);
2664 	CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RP), dev->d_rxqstart[pnum] / 8);
2665 
2666 	utpp =
2667 	    (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_ULPP) / 8;
2668 	ltpp =
2669 	    (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_LLPP_B) / 8;
2670 
2671 	if (dev->d_rxqsize < MSK_MIN_RXQ_SIZE)
2672 		ltpp += (RB_LLPP_B - RB_LLPP_S) / 8;
2673 
2674 	CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_UTPP), utpp);
2675 	CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_LTPP), ltpp);
2676 	/* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
2677 
2678 	CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_ENA_OP_MD);
2679 	(void) CSR_READ_1(dev, RB_ADDR(rxq, RB_CTRL));
2680 
2681 	/* Setup Tx Queue. */
2682 	CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_CLR);
2683 	CSR_WRITE_4(dev, RB_ADDR(txq, RB_START), dev->d_txqstart[pnum] / 8);
2684 	CSR_WRITE_4(dev, RB_ADDR(txq, RB_END),  dev->d_txqend[pnum] / 8);
2685 	CSR_WRITE_4(dev, RB_ADDR(txq, RB_WP), dev->d_txqstart[pnum] / 8);
2686 	CSR_WRITE_4(dev, RB_ADDR(txq, RB_RP), dev->d_txqstart[pnum] / 8);
2687 	/* Enable Store & Forward for Tx side. */
2688 	CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_STFWD);
2689 	CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_OP_MD);
2690 	(void) CSR_READ_1(dev, RB_ADDR(txq, RB_CTRL));
2691 }
2692 
2693 static void
2694 yge_set_prefetch(yge_dev_t *dev, int qaddr, yge_ring_t *ring)
2695 {
2696 	/* Reset the prefetch unit. */
2697 	CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2698 	    PREF_UNIT_RST_SET);
2699 	CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2700 	    PREF_UNIT_RST_CLR);
2701 	/* Set LE base address. */
2702 	CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
2703 	    YGE_ADDR_LO(ring->r_paddr));
2704 	CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
2705 	    YGE_ADDR_HI(ring->r_paddr));
2706 	/* Set the list last index. */
2707 	CSR_WRITE_2(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
2708 	    ring->r_num - 1);
2709 	/* Turn on prefetch unit. */
2710 	CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
2711 	    PREF_UNIT_OP_ON);
2712 	/* Dummy read to ensure write. */
2713 	(void) CSR_READ_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
2714 }
2715 
2716 static void
2717 yge_stop_port(yge_port_t *port)
2718 {
2719 	yge_dev_t *dev = port->p_dev;
2720 	int pnum = port->p_port;
2721 	uint32_t txq = port->p_txq;
2722 	uint32_t rxq = port->p_rxq;
2723 	uint32_t val;
2724 	int i;
2725 
2726 	dev = port->p_dev;
2727 
2728 	/*
2729 	 * shutdown timeout
2730 	 */
2731 	port->p_tx_wdog = 0;
2732 
2733 	/* Disable interrupts. */
2734 	if (pnum == YGE_PORT_A) {
2735 		dev->d_intrmask &= ~Y2_IS_PORT_A;
2736 		dev->d_intrhwemask &= ~Y2_HWE_L1_MASK;
2737 	} else {
2738 		dev->d_intrmask &= ~Y2_IS_PORT_B;
2739 		dev->d_intrhwemask &= ~Y2_HWE_L2_MASK;
2740 	}
2741 	CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask);
2742 	(void) CSR_READ_4(dev, B0_HWE_IMSK);
2743 	CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask);
2744 	(void) CSR_READ_4(dev, B0_IMSK);
2745 
2746 	/* Disable Tx/Rx MAC. */
2747 	val = GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2748 	val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
2749 	GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, val);
2750 	/* Read again to ensure writing. */
2751 	(void) GMAC_READ_2(dev, pnum, GM_GP_CTRL);
2752 
2753 	/* Update stats and clear counters. */
2754 	yge_stats_update(port);
2755 
2756 	/* Stop Tx BMU. */
2757 	CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP);
2758 	val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR));
2759 	for (i = 0; i < YGE_TIMEOUT; i += 10) {
2760 		if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
2761 			CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP);
2762 			val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR));
2763 		} else
2764 			break;
2765 		drv_usecwait(10);
2766 	}
2767 	/* This is probably fairly catastrophic. */
2768 	if ((val & (BMU_STOP | BMU_IDLE)) == 0)
2769 		yge_error(NULL, port, "Tx BMU stop failed");
2770 
2771 	CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET | RB_DIS_OP_MD);
2772 
2773 	/* Disable all GMAC interrupt. */
2774 	CSR_WRITE_1(dev, MR_ADDR(pnum, GMAC_IRQ_MSK), 0);
2775 
2776 	/* Disable the RAM Interface Arbiter. */
2777 	CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_DIS_ARB);
2778 
2779 	/* Reset the PCI FIFO of the async Tx queue */
2780 	CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
2781 
2782 	/* Reset the Tx prefetch units. */
2783 	CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(txq, PREF_UNIT_CTRL_REG),
2784 	    PREF_UNIT_RST_SET);
2785 
2786 	/* Reset the RAM Buffer async Tx queue. */
2787 	CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET);
2788 
2789 	/* Reset Tx MAC FIFO. */
2790 	CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET);
2791 	/* Set Pause Off. */
2792 	CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_PAUSE_OFF);
2793 
2794 	/*
2795 	 * The Rx Stop command will not work for Yukon-2 if the BMU does not
2796 	 * reach the end of packet and since we can't make sure that we have
2797 	 * incoming data, we must reset the BMU while it is not during a DMA
2798 	 * transfer. Since it is possible that the Rx path is still active,
2799 	 * the Rx RAM buffer will be stopped first, so any possible incoming
2800 	 * data will not trigger a DMA. After the RAM buffer is stopped, the
2801 	 * BMU is polled until any DMA in progress is ended and only then it
2802 	 * will be reset.
2803 	 */
2804 
2805 	/* Disable the RAM Buffer receive queue. */
2806 	CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
2807 	for (i = 0; i < YGE_TIMEOUT; i += 10) {
2808 		if (CSR_READ_1(dev, RB_ADDR(rxq, Q_RSL)) ==
2809 		    CSR_READ_1(dev, RB_ADDR(rxq, Q_RL)))
2810 			break;
2811 		drv_usecwait(10);
2812 	}
2813 	/* This is probably nearly a fatal error. */
2814 	if (i == YGE_TIMEOUT)
2815 		yge_error(NULL, port, "Rx BMU stop failed");
2816 
2817 	CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
2818 	/* Reset the Rx prefetch unit. */
2819 	CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(rxq, PREF_UNIT_CTRL_REG),
2820 	    PREF_UNIT_RST_SET);
2821 	/* Reset the RAM Buffer receive queue. */
2822 	CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
2823 	/* Reset Rx MAC FIFO. */
2824 	CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET);
2825 }
2826 
2827 /*
2828  * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower
2829  * counter clears high 16 bits of the counter such that accessing
2830  * lower 16 bits should be the last operation.
2831  */
2832 #define	YGE_READ_MIB32(x, y)					\
2833 	GMAC_READ_4(dev, x, y)
2834 
2835 #define	YGE_READ_MIB64(x, y)					\
2836 	((((uint64_t)YGE_READ_MIB32(x, (y) + 8)) << 32) +	\
2837 	    (uint64_t)YGE_READ_MIB32(x, y))
2838 
2839 static void
2840 yge_stats_clear(yge_port_t *port)
2841 {
2842 	yge_dev_t *dev;
2843 	uint16_t gmac;
2844 	int32_t pnum;
2845 
2846 	pnum = port->p_port;
2847 	dev = port->p_dev;
2848 
2849 	/* Set MIB Clear Counter Mode. */
2850 	gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR);
2851 	GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
2852 	/* Read all MIB Counters with Clear Mode set. */
2853 	for (int i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += 4)
2854 		(void) YGE_READ_MIB32(pnum, i);
2855 	/* Clear MIB Clear Counter Mode. */
2856 	gmac &= ~GM_PAR_MIB_CLR;
2857 	GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac);
2858 }
2859 
2860 static void
2861 yge_stats_update(yge_port_t *port)
2862 {
2863 	yge_dev_t *dev;
2864 	struct yge_hw_stats *stats;
2865 	uint16_t gmac;
2866 	int32_t	pnum;
2867 
2868 	dev = port->p_dev;
2869 	pnum = port->p_port;
2870 
2871 	if (dev->d_suspended || !port->p_running) {
2872 		return;
2873 	}
2874 	stats = &port->p_stats;
2875 	/* Set MIB Clear Counter Mode. */
2876 	gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR);
2877 	GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
2878 
2879 	/* Rx stats. */
2880 	stats->rx_ucast_frames +=	YGE_READ_MIB32(pnum, GM_RXF_UC_OK);
2881 	stats->rx_bcast_frames +=	YGE_READ_MIB32(pnum, GM_RXF_BC_OK);
2882 	stats->rx_pause_frames +=	YGE_READ_MIB32(pnum, GM_RXF_MPAUSE);
2883 	stats->rx_mcast_frames +=	YGE_READ_MIB32(pnum, GM_RXF_MC_OK);
2884 	stats->rx_crc_errs +=		YGE_READ_MIB32(pnum, GM_RXF_FCS_ERR);
2885 	(void) YGE_READ_MIB32(pnum, GM_RXF_SPARE1);
2886 	stats->rx_good_octets +=	YGE_READ_MIB64(pnum, GM_RXO_OK_LO);
2887 	stats->rx_bad_octets +=		YGE_READ_MIB64(pnum, GM_RXO_ERR_LO);
2888 	stats->rx_runts +=		YGE_READ_MIB32(pnum, GM_RXF_SHT);
2889 	stats->rx_runt_errs +=		YGE_READ_MIB32(pnum, GM_RXE_FRAG);
2890 	stats->rx_pkts_64 +=		YGE_READ_MIB32(pnum, GM_RXF_64B);
2891 	stats->rx_pkts_65_127 +=	YGE_READ_MIB32(pnum, GM_RXF_127B);
2892 	stats->rx_pkts_128_255 +=	YGE_READ_MIB32(pnum, GM_RXF_255B);
2893 	stats->rx_pkts_256_511 +=	YGE_READ_MIB32(pnum, GM_RXF_511B);
2894 	stats->rx_pkts_512_1023 +=	YGE_READ_MIB32(pnum, GM_RXF_1023B);
2895 	stats->rx_pkts_1024_1518 +=	YGE_READ_MIB32(pnum, GM_RXF_1518B);
2896 	stats->rx_pkts_1519_max +=	YGE_READ_MIB32(pnum, GM_RXF_MAX_SZ);
2897 	stats->rx_pkts_too_long +=	YGE_READ_MIB32(pnum, GM_RXF_LNG_ERR);
2898 	stats->rx_pkts_jabbers +=	YGE_READ_MIB32(pnum, GM_RXF_JAB_PKT);
2899 	(void) YGE_READ_MIB32(pnum, GM_RXF_SPARE2);
2900 	stats->rx_fifo_oflows +=	YGE_READ_MIB32(pnum, GM_RXE_FIFO_OV);
2901 	(void) YGE_READ_MIB32(pnum, GM_RXF_SPARE3);
2902 
2903 	/* Tx stats. */
2904 	stats->tx_ucast_frames +=	YGE_READ_MIB32(pnum, GM_TXF_UC_OK);
2905 	stats->tx_bcast_frames +=	YGE_READ_MIB32(pnum, GM_TXF_BC_OK);
2906 	stats->tx_pause_frames +=	YGE_READ_MIB32(pnum, GM_TXF_MPAUSE);
2907 	stats->tx_mcast_frames +=	YGE_READ_MIB32(pnum, GM_TXF_MC_OK);
2908 	stats->tx_octets +=		YGE_READ_MIB64(pnum, GM_TXO_OK_LO);
2909 	stats->tx_pkts_64 +=		YGE_READ_MIB32(pnum, GM_TXF_64B);
2910 	stats->tx_pkts_65_127 +=	YGE_READ_MIB32(pnum, GM_TXF_127B);
2911 	stats->tx_pkts_128_255 +=	YGE_READ_MIB32(pnum, GM_TXF_255B);
2912 	stats->tx_pkts_256_511 +=	YGE_READ_MIB32(pnum, GM_TXF_511B);
2913 	stats->tx_pkts_512_1023 +=	YGE_READ_MIB32(pnum, GM_TXF_1023B);
2914 	stats->tx_pkts_1024_1518 +=	YGE_READ_MIB32(pnum, GM_TXF_1518B);
2915 	stats->tx_pkts_1519_max +=	YGE_READ_MIB32(pnum, GM_TXF_MAX_SZ);
2916 	(void) YGE_READ_MIB32(pnum, GM_TXF_SPARE1);
2917 	stats->tx_colls +=		YGE_READ_MIB32(pnum, GM_TXF_COL);
2918 	stats->tx_late_colls +=		YGE_READ_MIB32(pnum, GM_TXF_LAT_COL);
2919 	stats->tx_excess_colls +=	YGE_READ_MIB32(pnum, GM_TXF_ABO_COL);
2920 	stats->tx_multi_colls +=	YGE_READ_MIB32(pnum, GM_TXF_MUL_COL);
2921 	stats->tx_single_colls +=	YGE_READ_MIB32(pnum, GM_TXF_SNG_COL);
2922 	stats->tx_underflows +=		YGE_READ_MIB32(pnum, GM_TXE_FIFO_UR);
2923 	/* Clear MIB Clear Counter Mode. */
2924 	gmac &= ~GM_PAR_MIB_CLR;
2925 	GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac);
2926 }
2927 
2928 #undef YGE_READ_MIB32
2929 #undef YGE_READ_MIB64
2930 
2931 uint32_t
2932 yge_hashbit(const uint8_t *addr)
2933 {
2934 	int		idx;
2935 	int		bit;
2936 	uint_t		data;
2937 	uint32_t	crc;
2938 #define	POLY_BE	0x04c11db7
2939 
2940 	crc = 0xffffffff;
2941 	for (idx = 0; idx < 6; idx++) {
2942 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
2943 			crc = (crc << 1)
2944 			    ^ ((((crc >> 31) ^ data) & 1) ? POLY_BE : 0);
2945 		}
2946 	}
2947 #undef	POLY_BE
2948 
2949 	return (crc % 64);
2950 }
2951 
2952 int
2953 yge_m_stat(void *arg, uint_t stat, uint64_t *val)
2954 {
2955 	yge_port_t	*port = arg;
2956 	struct yge_hw_stats *stats = &port->p_stats;
2957 
2958 	if (stat == MAC_STAT_IFSPEED) {
2959 		/*
2960 		 * This is the first stat we are asked about.  We update only
2961 		 * for this stat, to avoid paying the hefty cost of the update
2962 		 * once for each stat.
2963 		 */
2964 		DEV_LOCK(port->p_dev);
2965 		yge_stats_update(port);
2966 		DEV_UNLOCK(port->p_dev);
2967 	}
2968 
2969 	if (mii_m_getstat(port->p_mii, stat, val) == 0) {
2970 		return (0);
2971 	}
2972 
2973 	switch (stat) {
2974 	case MAC_STAT_MULTIRCV:
2975 		*val = stats->rx_mcast_frames;
2976 		break;
2977 
2978 	case MAC_STAT_BRDCSTRCV:
2979 		*val = stats->rx_bcast_frames;
2980 		break;
2981 
2982 	case MAC_STAT_MULTIXMT:
2983 		*val = stats->tx_mcast_frames;
2984 		break;
2985 
2986 	case MAC_STAT_BRDCSTXMT:
2987 		*val = stats->tx_bcast_frames;
2988 		break;
2989 
2990 	case MAC_STAT_IPACKETS:
2991 		*val = stats->rx_ucast_frames;
2992 		break;
2993 
2994 	case MAC_STAT_RBYTES:
2995 		*val = stats->rx_good_octets;
2996 		break;
2997 
2998 	case MAC_STAT_OPACKETS:
2999 		*val = stats->tx_ucast_frames;
3000 		break;
3001 
3002 	case MAC_STAT_OBYTES:
3003 		*val = stats->tx_octets;
3004 		break;
3005 
3006 	case MAC_STAT_NORCVBUF:
3007 		*val = stats->rx_nobuf;
3008 		break;
3009 
3010 	case MAC_STAT_COLLISIONS:
3011 		*val = stats->tx_colls;
3012 		break;
3013 
3014 	case ETHER_STAT_ALIGN_ERRORS:
3015 		*val = stats->rx_runt_errs;
3016 		break;
3017 
3018 	case ETHER_STAT_FCS_ERRORS:
3019 		*val = stats->rx_crc_errs;
3020 		break;
3021 
3022 	case ETHER_STAT_FIRST_COLLISIONS:
3023 		*val  = stats->tx_single_colls;
3024 		break;
3025 
3026 	case ETHER_STAT_MULTI_COLLISIONS:
3027 		*val = stats->tx_multi_colls;
3028 		break;
3029 
3030 	case ETHER_STAT_TX_LATE_COLLISIONS:
3031 		*val = stats->tx_late_colls;
3032 		break;
3033 
3034 	case ETHER_STAT_EX_COLLISIONS:
3035 		*val = stats->tx_excess_colls;
3036 		break;
3037 
3038 	case ETHER_STAT_TOOLONG_ERRORS:
3039 		*val = stats->rx_pkts_too_long;
3040 		break;
3041 
3042 	case MAC_STAT_OVERFLOWS:
3043 		*val = stats->rx_fifo_oflows;
3044 		break;
3045 
3046 	case MAC_STAT_UNDERFLOWS:
3047 		*val = stats->tx_underflows;
3048 		break;
3049 
3050 	case ETHER_STAT_TOOSHORT_ERRORS:
3051 		*val = stats->rx_runts;
3052 		break;
3053 
3054 	case ETHER_STAT_JABBER_ERRORS:
3055 		*val = stats->rx_pkts_jabbers;
3056 		break;
3057 
3058 	default:
3059 		return (ENOTSUP);
3060 	}
3061 	return (0);
3062 }
3063 
3064 int
3065 yge_m_start(void *arg)
3066 {
3067 	yge_port_t	*port = arg;
3068 
3069 	DEV_LOCK(port->p_dev);
3070 
3071 	/*
3072 	 * We defer resource allocation to this point, because we
3073 	 * don't want to waste DMA resources that might better be used
3074 	 * elsewhere, if the port is not actually being used.
3075 	 *
3076 	 * Furthermore, this gives us a more graceful handling of dynamic
3077 	 * MTU modification.
3078 	 */
3079 	if (yge_txrx_dma_alloc(port) != DDI_SUCCESS) {
3080 		/* Make sure we free up partially allocated resources. */
3081 		yge_txrx_dma_free(port);
3082 		DEV_UNLOCK(port->p_dev);
3083 		return (ENOMEM);
3084 	}
3085 
3086 	if (!port->p_dev->d_suspended)
3087 		yge_start_port(port);
3088 	port->p_running = B_TRUE;
3089 	DEV_UNLOCK(port->p_dev);
3090 
3091 	mii_start(port->p_mii);
3092 
3093 	return (0);
3094 }
3095 
3096 void
3097 yge_m_stop(void *arg)
3098 {
3099 	yge_port_t	*port = arg;
3100 	yge_dev_t	*dev = port->p_dev;
3101 
3102 	DEV_LOCK(dev);
3103 	if (!dev->d_suspended)
3104 		yge_stop_port(port);
3105 
3106 	port->p_running = B_FALSE;
3107 
3108 	/* Release resources we don't need */
3109 	yge_txrx_dma_free(port);
3110 	DEV_UNLOCK(dev);
3111 }
3112 
3113 int
3114 yge_m_promisc(void *arg, boolean_t on)
3115 {
3116 	yge_port_t	*port = arg;
3117 
3118 	DEV_LOCK(port->p_dev);
3119 
3120 	/* Save current promiscuous mode. */
3121 	port->p_promisc = on;
3122 	yge_setrxfilt(port);
3123 
3124 	DEV_UNLOCK(port->p_dev);
3125 
3126 	return (0);
3127 }
3128 
3129 int
3130 yge_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
3131 {
3132 	yge_port_t	*port = arg;
3133 	int		bit;
3134 	boolean_t	update;
3135 
3136 	bit = yge_hashbit(addr);
3137 	ASSERT(bit < 64);
3138 
3139 	DEV_LOCK(port->p_dev);
3140 	if (add) {
3141 		if (port->p_mccount[bit] == 0) {
3142 			/* Set the corresponding bit in the hash table. */
3143 			port->p_mchash[bit / 32] |= (1 << (bit % 32));
3144 			update = B_TRUE;
3145 		}
3146 		port->p_mccount[bit]++;
3147 	} else {
3148 		ASSERT(port->p_mccount[bit] > 0);
3149 		port->p_mccount[bit]--;
3150 		if (port->p_mccount[bit] == 0) {
3151 			port->p_mchash[bit / 32] &= ~(1 << (bit % 32));
3152 			update = B_TRUE;
3153 		}
3154 	}
3155 
3156 	if (update) {
3157 		yge_setrxfilt(port);
3158 	}
3159 	DEV_UNLOCK(port->p_dev);
3160 	return (0);
3161 }
3162 
3163 int
3164 yge_m_unicst(void *arg, const uint8_t *macaddr)
3165 {
3166 	yge_port_t	*port = arg;
3167 
3168 	DEV_LOCK(port->p_dev);
3169 
3170 	bcopy(macaddr, port->p_curraddr, ETHERADDRL);
3171 	yge_setrxfilt(port);
3172 
3173 	DEV_UNLOCK(port->p_dev);
3174 
3175 	return (0);
3176 }
3177 
3178 mblk_t *
3179 yge_m_tx(void *arg, mblk_t *mp)
3180 {
3181 	yge_port_t	*port = arg;
3182 	mblk_t		*nmp;
3183 	int		enq = 0;
3184 	uint32_t	ridx;
3185 	int		idx;
3186 	boolean_t	resched = B_FALSE;
3187 
3188 	TX_LOCK(port->p_dev);
3189 
3190 	if (port->p_dev->d_suspended) {
3191 
3192 		TX_UNLOCK(port->p_dev);
3193 
3194 		while ((nmp = mp) != NULL) {
3195 			/* carrier_errors++; */
3196 			mp = mp->b_next;
3197 			freemsg(nmp);
3198 		}
3199 		return (NULL);
3200 	}
3201 
3202 	/* attempt a reclaim */
3203 	ridx = port->p_port == YGE_PORT_A ?
3204 	    STAT_TXA1_RIDX : STAT_TXA2_RIDX;
3205 	idx = CSR_READ_2(port->p_dev, ridx);
3206 	if (port->p_tx_cons != idx)
3207 		resched = yge_txeof_locked(port, idx);
3208 
3209 	while (mp != NULL) {
3210 		nmp = mp->b_next;
3211 		mp->b_next = NULL;
3212 
3213 		if (!yge_send(port, mp)) {
3214 			mp->b_next = nmp;
3215 			break;
3216 		}
3217 		enq++;
3218 		mp = nmp;
3219 
3220 	}
3221 	if (enq > 0) {
3222 		/* Transmit */
3223 		CSR_WRITE_2(port->p_dev,
3224 		    Y2_PREF_Q_ADDR(port->p_txq, PREF_UNIT_PUT_IDX_REG),
3225 		    port->p_tx_prod);
3226 	}
3227 
3228 	TX_UNLOCK(port->p_dev);
3229 
3230 	if (resched)
3231 		mac_tx_update(port->p_mh);
3232 
3233 	return (mp);
3234 }
3235 
3236 void
3237 yge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3238 {
3239 #ifdef	YGE_MII_LOOPBACK
3240 	/* LINTED E_FUNC_SET_NOT_USED */
3241 	yge_port_t	*port = arg;
3242 
3243 	/*
3244 	 * Right now, the MII common layer does not properly handle
3245 	 * loopback on these PHYs.  Fixing this should be done at some
3246 	 * point in the future.
3247 	 */
3248 	if (mii_m_loop_ioctl(port->p_mii, wq, mp))
3249 		return;
3250 #else
3251 	_NOTE(ARGUNUSED(arg));
3252 #endif
3253 
3254 	miocnak(wq, mp, 0, EINVAL);
3255 }
3256 
3257 int
3258 yge_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3259     uint_t pr_valsize, const void *pr_val)
3260 {
3261 	yge_port_t	*port = arg;
3262 	uint32_t	new_mtu;
3263 	int err = 0;
3264 
3265 	err = mii_m_setprop(port->p_mii, pr_name, pr_num, pr_valsize, pr_val);
3266 	if (err != ENOTSUP) {
3267 		return (err);
3268 	}
3269 
3270 	DEV_LOCK(port->p_dev);
3271 
3272 	switch (pr_num) {
3273 	case MAC_PROP_MTU:
3274 		if (pr_valsize < sizeof (new_mtu)) {
3275 			err = EINVAL;
3276 			break;
3277 		}
3278 		bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3279 		if (new_mtu == port->p_mtu) {
3280 			/* no change */
3281 			err = 0;
3282 			break;
3283 		}
3284 		if (new_mtu < ETHERMTU) {
3285 			yge_error(NULL, port,
3286 			    "Maximum MTU size too small: %d", new_mtu);
3287 			err = EINVAL;
3288 			break;
3289 		}
3290 		if (new_mtu > (port->p_flags & PORT_FLAG_NOJUMBO ?
3291 		    ETHERMTU : YGE_JUMBO_MTU)) {
3292 			yge_error(NULL, port,
3293 			    "Maximum MTU size too big: %d", new_mtu);
3294 			err = EINVAL;
3295 			break;
3296 		}
3297 		if (port->p_running) {
3298 			yge_error(NULL, port,
3299 			    "Unable to change maximum MTU while running");
3300 			err = EBUSY;
3301 			break;
3302 		}
3303 
3304 
3305 		/*
3306 		 * NB: It would probably be better not to hold the
3307 		 * DEVLOCK, but releasing it creates a potential race
3308 		 * if m_start is called concurrently.
3309 		 *
3310 		 * It turns out that the MAC layer guarantees safety
3311 		 * for us here by using a cut out for this kind of
3312 		 * notification call back anyway.
3313 		 *
3314 		 * See R8. and R14. in mac.c locking comments, which read
3315 		 * as follows:
3316 		 *
3317 		 * R8. Since it is not guaranteed (see R14) that
3318 		 * drivers won't hold locks across mac driver
3319 		 * interfaces, the MAC layer must provide a cut out
3320 		 * for control interfaces like upcall notifications
3321 		 * and start them in a separate thread.
3322 		 *
3323 		 * R14. It would be preferable if MAC drivers don't
3324 		 * hold any locks across any mac call. However at a
3325 		 * minimum they must not hold any locks across data
3326 		 * upcalls. They must also make sure that all
3327 		 * references to mac data structures are cleaned up
3328 		 * and that it is single threaded at mac_unregister
3329 		 * time.
3330 		 */
3331 		err = mac_maxsdu_update(port->p_mh, new_mtu);
3332 		if (err != 0) {
3333 			/* This should never occur! */
3334 			yge_error(NULL, port,
3335 			    "Failed notifying GLDv3 of new maximum MTU");
3336 		} else {
3337 			port->p_mtu = new_mtu;
3338 		}
3339 		break;
3340 
3341 	default:
3342 		err = ENOTSUP;
3343 		break;
3344 	}
3345 
3346 err:
3347 	DEV_UNLOCK(port->p_dev);
3348 
3349 	return (err);
3350 }
3351 
3352 int
3353 yge_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3354     uint_t pr_valsize, void *pr_val)
3355 {
3356 	yge_port_t	*port = arg;
3357 
3358 	return (mii_m_getprop(port->p_mii, pr_name, pr_num, pr_valsize,
3359 	    pr_val));
3360 }
3361 
3362 static void
3363 yge_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3364     mac_prop_info_handle_t prh)
3365 {
3366 	yge_port_t	*port = arg;
3367 
3368 	switch (pr_num) {
3369 	case MAC_PROP_MTU:
3370 		mac_prop_info_set_range_uint32(prh, ETHERMTU,
3371 		    port->p_flags & PORT_FLAG_NOJUMBO ?
3372 		    ETHERMTU : YGE_JUMBO_MTU);
3373 		break;
3374 	default:
3375 		mii_m_propinfo(port->p_mii, pr_name, pr_num, prh);
3376 		break;
3377 	}
3378 }
3379 
3380 void
3381 yge_dispatch(yge_dev_t *dev, int flag)
3382 {
3383 	TASK_LOCK(dev);
3384 	dev->d_task_flags |= flag;
3385 	TASK_SIGNAL(dev);
3386 	TASK_UNLOCK(dev);
3387 }
3388 
3389 void
3390 yge_task(void *arg)
3391 {
3392 	yge_dev_t	*dev = arg;
3393 	int		flags;
3394 
3395 	for (;;) {
3396 
3397 		TASK_LOCK(dev);
3398 		while ((flags = dev->d_task_flags) == 0)
3399 			TASK_WAIT(dev);
3400 
3401 		dev->d_task_flags = 0;
3402 		TASK_UNLOCK(dev);
3403 
3404 		/*
3405 		 * This should be the first thing after the sleep so if we are
3406 		 * requested to exit we do that and not waste time doing work
3407 		 * we will then abandone.
3408 		 */
3409 		if (flags & YGE_TASK_EXIT)
3410 			break;
3411 
3412 		/* all processing done without holding locks */
3413 		if (flags & YGE_TASK_RESTART)
3414 			yge_restart_task(dev);
3415 	}
3416 }
3417 
3418 void
3419 yge_error(yge_dev_t *dev, yge_port_t *port, char *fmt, ...)
3420 {
3421 	va_list		ap;
3422 	char		buf[256];
3423 	int		ppa;
3424 
3425 	va_start(ap, fmt);
3426 	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
3427 	va_end(ap);
3428 
3429 	if (dev == NULL && port == NULL) {
3430 		cmn_err(CE_WARN, "yge: %s", buf);
3431 	} else {
3432 		if (port != NULL)
3433 			ppa = port->p_ppa;
3434 		else
3435 			ppa = ddi_get_instance(dev->d_dip);
3436 		cmn_err(CE_WARN, "yge%d: %s", ppa, buf);
3437 	}
3438 }
3439 
3440 static int
3441 yge_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3442 {
3443 	yge_dev_t	*dev;
3444 	int		rv;
3445 
3446 	switch (cmd) {
3447 	case DDI_ATTACH:
3448 		dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
3449 		dev->d_port[0] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP);
3450 		dev->d_port[1] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP);
3451 		dev->d_dip = dip;
3452 		ddi_set_driver_private(dip, dev);
3453 
3454 		dev->d_port[0]->p_port = 0;
3455 		dev->d_port[0]->p_dev = dev;
3456 		dev->d_port[1]->p_port = 0;
3457 		dev->d_port[1]->p_dev = dev;
3458 
3459 		rv = yge_attach(dev);
3460 		if (rv != DDI_SUCCESS) {
3461 			ddi_set_driver_private(dip, 0);
3462 			kmem_free(dev->d_port[1], sizeof (yge_port_t));
3463 			kmem_free(dev->d_port[0], sizeof (yge_port_t));
3464 			kmem_free(dev, sizeof (*dev));
3465 		}
3466 		return (rv);
3467 
3468 	case DDI_RESUME:
3469 		dev = ddi_get_driver_private(dip);
3470 		ASSERT(dev != NULL);
3471 		return (yge_resume(dev));
3472 
3473 	default:
3474 		return (DDI_FAILURE);
3475 	}
3476 }
3477 
3478 static int
3479 yge_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3480 {
3481 	yge_dev_t	*dev;
3482 	int		rv;
3483 
3484 	switch (cmd) {
3485 	case DDI_DETACH:
3486 
3487 		dev = ddi_get_driver_private(dip);
3488 
3489 		/* attempt to unregister MACs from Nemo */
3490 		for (int i = 0; i < dev->d_num_port; i++) {
3491 			rv = yge_unregister_port(dev->d_port[i]);
3492 			if (rv != DDI_SUCCESS) {
3493 				return (DDI_FAILURE);
3494 			}
3495 		}
3496 
3497 		ASSERT(dip == dev->d_dip);
3498 		yge_detach(dev);
3499 		ddi_set_driver_private(dip, 0);
3500 		kmem_free(dev->d_port[1], sizeof (yge_port_t));
3501 		kmem_free(dev->d_port[0], sizeof (yge_port_t));
3502 		kmem_free(dev, sizeof (*dev));
3503 		return (DDI_SUCCESS);
3504 
3505 	case DDI_SUSPEND:
3506 		dev = ddi_get_driver_private(dip);
3507 		ASSERT(dev != NULL);
3508 		return (yge_suspend(dev));
3509 
3510 	default:
3511 		return (DDI_FAILURE);
3512 	}
3513 }
3514 
3515 static int
3516 yge_quiesce(dev_info_t *dip)
3517 {
3518 	yge_dev_t *dev;
3519 
3520 	dev = ddi_get_driver_private(dip);
3521 	ASSERT(dev != NULL);
3522 
3523 	/* NB: No locking!  We are called in single threaded context */
3524 	for (int i = 0; i < dev->d_num_port; i++) {
3525 		yge_port_t *port = dev->d_port[i];
3526 		if (port->p_running)
3527 			yge_stop_port(port);
3528 	}
3529 
3530 	/* Disable all interrupts. */
3531 	CSR_WRITE_4(dev, B0_IMSK, 0);
3532 	(void) CSR_READ_4(dev, B0_IMSK);
3533 	CSR_WRITE_4(dev, B0_HWE_IMSK, 0);
3534 	(void) CSR_READ_4(dev, B0_HWE_IMSK);
3535 
3536 	/* Put hardware into reset. */
3537 	CSR_WRITE_2(dev, B0_CTST, CS_RST_SET);
3538 
3539 	return (DDI_SUCCESS);
3540 }
3541 
3542 /*
3543  * Stream information
3544  */
3545 DDI_DEFINE_STREAM_OPS(yge_devops, nulldev, nulldev, yge_ddi_attach,
3546     yge_ddi_detach, nodev, NULL, D_MP, NULL, yge_quiesce);
3547 
3548 /*
3549  * Module linkage information.
3550  */
3551 
3552 static struct modldrv yge_modldrv = {
3553 	&mod_driverops,			/* drv_modops */
3554 	"Yukon 2 Ethernet",		/* drv_linkinfo */
3555 	&yge_devops			/* drv_dev_ops */
3556 };
3557 
3558 static struct modlinkage yge_modlinkage = {
3559 	MODREV_1,		/* ml_rev */
3560 	&yge_modldrv,		/* ml_linkage */
3561 	NULL
3562 };
3563 
3564 /*
3565  * DDI entry points.
3566  */
3567 int
3568 _init(void)
3569 {
3570 	int	rv;
3571 	mac_init_ops(&yge_devops, "yge");
3572 	if ((rv = mod_install(&yge_modlinkage)) != DDI_SUCCESS) {
3573 		mac_fini_ops(&yge_devops);
3574 	}
3575 	return (rv);
3576 }
3577 
3578 int
3579 _fini(void)
3580 {
3581 	int	rv;
3582 	if ((rv = mod_remove(&yge_modlinkage)) == DDI_SUCCESS) {
3583 		mac_fini_ops(&yge_devops);
3584 	}
3585 	return (rv);
3586 }
3587 
3588 int
3589 _info(struct modinfo *modinfop)
3590 {
3591 	return (mod_info(&yge_modlinkage, modinfop));
3592 }
3593