xref: /titanic_41/usr/src/uts/common/io/bge/bge_chip2.c (revision 989f28072d20c73ae0955d6a1e3e2fc74831cb39)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
28  */
29 
30 #include "bge_impl.h"
31 
32 #define	PIO_ADDR(bgep, offset)	((void *)((caddr_t)(bgep)->io_regs+(offset)))
33 
34 /*
35  * Future features ... ?
36  */
37 #define	BGE_CFG_IO8	1	/* 8/16-bit cfg space BIS/BIC	*/
38 #define	BGE_IND_IO32	1	/* indirect access code		*/
39 #define	BGE_SEE_IO32	1	/* SEEPROM access code		*/
40 #define	BGE_FLASH_IO32	1	/* FLASH access code		*/
41 
42 /*
43  * BGE MSI tunable:
44  *
45  * By default MSI is enabled on all supported platforms but it is disabled
46  * for some Broadcom chips due to known MSI hardware issues. Currently MSI
47  * is enabled only for 5714C A2 and 5715C A2 broadcom chips.
48  */
49 boolean_t bge_enable_msi = B_TRUE;
50 
51 /*
52  * PCI-X/PCI-E relaxed ordering tunable for OS/Nexus driver
53  */
54 boolean_t bge_relaxed_ordering = B_TRUE;
55 
56 /*
57  * Property names
58  */
59 static char knownids_propname[] = "bge-known-subsystems";
60 
61 /*
62  * Patchable globals:
63  *
64  *	bge_autorecover
65  *		Enables/disables automatic recovery after fault detection
66  *
67  *	bge_mlcr_default
68  *		Value to program into the MLCR; controls the chip's GPIO pins
69  *
70  *	bge_dma_{rd,wr}prio
71  *		Relative priorities of DMA reads & DMA writes respectively.
72  *		These may each be patched to any value 0-3.  Equal values
73  *		will give "fair" (round-robin) arbitration for PCI access.
74  *		Unequal values will give one or the other function priority.
75  *
76  *	bge_dma_rwctrl
77  *		Value to put in the Read/Write DMA control register.  See
78  *	        the Broadcom PRM for things you can fiddle with in this
79  *		register ...
80  *
81  *	bge_{tx,rx}_{count,ticks}_{norm,intr}
82  *		Send/receive interrupt coalescing parameters.  Counts are
83  *		#s of descriptors, ticks are in microseconds.  *norm* values
84  *		apply between status updates/interrupts; the *intr* values
85  *		refer to the 'during-interrupt' versions - see the PRM.
86  *
87  *		NOTE: these values have been determined by measurement. They
88  *		differ significantly from the values recommended in the PRM.
89  */
90 static uint32_t bge_autorecover = 1;
91 static uint32_t bge_mlcr_default_5714 = MLCR_DEFAULT_5714;
92 
93 static uint32_t bge_dma_rdprio = 1;
94 static uint32_t bge_dma_wrprio = 0;
95 static uint32_t bge_dma_rwctrl = PDRWCR_VAR_DEFAULT;
96 static uint32_t bge_dma_rwctrl_5721 = PDRWCR_VAR_5721;
97 static uint32_t bge_dma_rwctrl_5714 = PDRWCR_VAR_5714;
98 static uint32_t bge_dma_rwctrl_5715 = PDRWCR_VAR_5715;
99 
100 uint32_t bge_rx_ticks_norm = 128;
101 uint32_t bge_tx_ticks_norm = 2048;		/* 8 for FJ2+ !?!?	*/
102 uint32_t bge_rx_count_norm = 8;
103 uint32_t bge_tx_count_norm = 128;
104 
105 static uint32_t bge_rx_ticks_intr = 128;
106 static uint32_t bge_tx_ticks_intr = 0;		/* 8 for FJ2+ !?!?	*/
107 static uint32_t bge_rx_count_intr = 2;
108 static uint32_t bge_tx_count_intr = 0;
109 
110 /*
111  * Memory pool configuration parameters.
112  *
113  * These are generally specific to each member of the chip family, since
114  * each one may have a different memory size/configuration.
115  *
116  * Setting the mbuf pool length for a specific type of chip to 0 inhibits
117  * the driver from programming the various registers; instead they are left
118  * at their hardware defaults.  This is the preferred option for later chips
119  * (5705+), whereas the older chips *required* these registers to be set,
120  * since the h/w default was 0 ;-(
121  */
122 static uint32_t bge_mbuf_pool_base	= MBUF_POOL_BASE_DEFAULT;
123 static uint32_t bge_mbuf_pool_base_5704	= MBUF_POOL_BASE_5704;
124 static uint32_t bge_mbuf_pool_base_5705	= MBUF_POOL_BASE_5705;
125 static uint32_t bge_mbuf_pool_base_5721 = MBUF_POOL_BASE_5721;
126 static uint32_t bge_mbuf_pool_len	= MBUF_POOL_LENGTH_DEFAULT;
127 static uint32_t bge_mbuf_pool_len_5704	= MBUF_POOL_LENGTH_5704;
128 static uint32_t bge_mbuf_pool_len_5705	= 0;	/* use h/w default	*/
129 static uint32_t bge_mbuf_pool_len_5721	= 0;
130 
131 /*
132  * Various high and low water marks, thresholds, etc ...
133  *
134  * Note: these are taken from revision 7 of the PRM, and some are different
135  * from both the values in earlier PRMs *and* those determined experimentally
136  * and used in earlier versions of this driver ...
137  */
138 static uint32_t bge_mbuf_hi_water	= MBUF_HIWAT_DEFAULT;
139 static uint32_t bge_mbuf_lo_water_rmac	= MAC_RX_MBUF_LOWAT_DEFAULT;
140 static uint32_t bge_mbuf_lo_water_rdma	= RDMA_MBUF_LOWAT_DEFAULT;
141 
142 static uint32_t bge_dmad_lo_water	= DMAD_POOL_LOWAT_DEFAULT;
143 static uint32_t bge_dmad_hi_water	= DMAD_POOL_HIWAT_DEFAULT;
144 static uint32_t bge_lowat_recv_frames	= LOWAT_MAX_RECV_FRAMES_DEFAULT;
145 
146 static uint32_t bge_replenish_std	= STD_RCV_BD_REPLENISH_DEFAULT;
147 static uint32_t bge_replenish_mini	= MINI_RCV_BD_REPLENISH_DEFAULT;
148 static uint32_t bge_replenish_jumbo	= JUMBO_RCV_BD_REPLENISH_DEFAULT;
149 
150 static uint32_t	bge_watchdog_count	= 1 << 16;
151 static uint16_t bge_dma_miss_limit	= 20;
152 
153 static uint32_t bge_stop_start_on_sync	= 0;
154 
155 /*
156  * bge_intr_max_loop controls the maximum loop number within bge_intr.
157  * When loading NIC with heavy network traffic, it is useful.
158  * Increasing this value could have positive effect to throughput,
159  * but it might also increase ticks of a bge ISR stick on CPU, which might
160  * lead to bad UI interactive experience. So tune this with caution.
161  */
162 static int bge_intr_max_loop = 1;
163 
164 /*
165  * ========== Low-level chip & ring buffer manipulation ==========
166  */
167 
168 #define	BGE_DBG		BGE_DBG_REGS	/* debug flag for this code	*/
169 
170 
171 /*
172  * Config space read-modify-write routines
173  */
174 
175 #if	BGE_CFG_IO8
176 
177 static void bge_cfg_clr16(bge_t *bgep, bge_regno_t regno, uint16_t bits);
178 #pragma	inline(bge_cfg_clr16)
179 
180 static void
181 bge_cfg_clr16(bge_t *bgep, bge_regno_t regno, uint16_t bits)
182 {
183 	uint16_t regval;
184 
185 	BGE_TRACE(("bge_cfg_clr16($%p, 0x%lx, 0x%x)",
186 	    (void *)bgep, regno, bits));
187 
188 	regval = pci_config_get16(bgep->cfg_handle, regno);
189 
190 	BGE_DEBUG(("bge_cfg_clr16($%p, 0x%lx, 0x%x): 0x%x => 0x%x",
191 	    (void *)bgep, regno, bits, regval, regval & ~bits));
192 
193 	regval &= ~bits;
194 	pci_config_put16(bgep->cfg_handle, regno, regval);
195 }
196 
197 #endif	/* BGE_CFG_IO8 */
198 
199 static void bge_cfg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits);
200 #pragma	inline(bge_cfg_clr32)
201 
202 static void
203 bge_cfg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
204 {
205 	uint32_t regval;
206 
207 	BGE_TRACE(("bge_cfg_clr32($%p, 0x%lx, 0x%x)",
208 	    (void *)bgep, regno, bits));
209 
210 	regval = pci_config_get32(bgep->cfg_handle, regno);
211 
212 	BGE_DEBUG(("bge_cfg_clr32($%p, 0x%lx, 0x%x): 0x%x => 0x%x",
213 	    (void *)bgep, regno, bits, regval, regval & ~bits));
214 
215 	regval &= ~bits;
216 	pci_config_put32(bgep->cfg_handle, regno, regval);
217 }
218 
219 #if	BGE_IND_IO32
220 
221 /*
222  * Indirect access to registers & RISC scratchpads, using config space
223  * accesses only.
224  *
225  * This isn't currently used, but someday we might want to use it for
226  * restoring the Subsystem Device/Vendor registers (which aren't directly
227  * writable in Config Space), or for downloading firmware into the RISCs
228  *
229  * In any case there are endian issues to be resolved before this code is
230  * enabled; the bizarre way that bytes get twisted by this chip AND by
231  * the PCI bridge in SPARC systems mean that we shouldn't enable it until
232  * it's been thoroughly tested for all access sizes on all supported
233  * architectures (SPARC *and* x86!).
234  */
235 uint32_t bge_ind_get32(bge_t *bgep, bge_regno_t regno);
236 #pragma	inline(bge_ind_get32)
237 
238 uint32_t
239 bge_ind_get32(bge_t *bgep, bge_regno_t regno)
240 {
241 	uint32_t val;
242 
243 	BGE_TRACE(("bge_ind_get32($%p, 0x%lx)", (void *)bgep, regno));
244 
245 #ifdef __sparc
246 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
247 		regno = LE_32(regno);
248 #endif
249 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIAAR, regno);
250 	val = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_RIADR);
251 
252 	BGE_DEBUG(("bge_ind_get32($%p, 0x%lx) => 0x%x",
253 	    (void *)bgep, regno, val));
254 
255 	val = LE_32(val);
256 
257 	return (val);
258 }
259 
260 void bge_ind_put32(bge_t *bgep, bge_regno_t regno, uint32_t val);
261 #pragma	inline(bge_ind_put32)
262 
263 void
264 bge_ind_put32(bge_t *bgep, bge_regno_t regno, uint32_t val)
265 {
266 	BGE_TRACE(("bge_ind_put32($%p, 0x%lx, 0x%x)",
267 	    (void *)bgep, regno, val));
268 
269 	val = LE_32(val);
270 #ifdef __sparc
271 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
272 		regno = LE_32(regno);
273 #endif
274 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIAAR, regno);
275 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIADR, val);
276 }
277 
278 #endif	/* BGE_IND_IO32 */
279 
280 #if	BGE_DEBUGGING
281 
282 static void bge_pci_check(bge_t *bgep);
283 #pragma	no_inline(bge_pci_check)
284 
285 static void
286 bge_pci_check(bge_t *bgep)
287 {
288 	uint16_t pcistatus;
289 
290 	pcistatus = pci_config_get16(bgep->cfg_handle, PCI_CONF_STAT);
291 	if ((pcistatus & (PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB)) != 0)
292 		BGE_DEBUG(("bge_pci_check($%p): PCI status 0x%x",
293 		    (void *)bgep, pcistatus));
294 }
295 
296 #endif	/* BGE_DEBUGGING */
297 
298 /*
299  * Perform first-stage chip (re-)initialisation, using only config-space
300  * accesses:
301  *
302  * + Read the vendor/device/revision/subsystem/cache-line-size registers,
303  *   returning the data in the structure pointed to by <idp>.
304  * + Configure the target-mode endianness (swap) options.
305  * + Disable interrupts and enable Memory Space accesses.
306  * + Enable or disable Bus Mastering according to the <enable_dma> flag.
307  *
308  * This sequence is adapted from Broadcom document 570X-PG102-R,
309  * page 102, steps 1-3, 6-8 and 11-13.  The omitted parts of the sequence
310  * are 4 and 5 (Reset Core and wait) which are handled elsewhere.
311  *
312  * This function MUST be called before any non-config-space accesses
313  * are made; on this first call <enable_dma> is B_FALSE, and it
314  * effectively performs steps 3-1(!) of the initialisation sequence
315  * (the rest are not required but should be harmless).
316  *
317  * It MUST also be called after a chip reset, as this disables
318  * Memory Space cycles!  In this case, <enable_dma> is B_TRUE, and
319  * it is effectively performing steps 6-8.
320  */
321 void bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma);
322 #pragma	no_inline(bge_chip_cfg_init)
323 
324 void
325 bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma)
326 {
327 	ddi_acc_handle_t handle;
328 	uint16_t command;
329 	uint32_t mhcr;
330 	uint16_t value16;
331 	int i;
332 
333 	BGE_TRACE(("bge_chip_cfg_init($%p, $%p, %d)",
334 	    (void *)bgep, (void *)cidp, enable_dma));
335 
336 	/*
337 	 * Step 3: save PCI cache line size and subsystem vendor ID
338 	 *
339 	 * Read all the config-space registers that characterise the
340 	 * chip, specifically vendor/device/revision/subsystem vendor
341 	 * and subsystem device id.  We expect (but don't check) that
342 	 * (vendor == VENDOR_ID_BROADCOM) && (device == DEVICE_ID_5704)
343 	 *
344 	 * Also save all bus-transaction related registers (cache-line
345 	 * size, bus-grant/latency parameters, etc).  Some of these are
346 	 * cleared by reset, so we'll have to restore them later.  This
347 	 * comes from the Broadcom document 570X-PG102-R ...
348 	 *
349 	 * Note: Broadcom document 570X-PG102-R seems to be in error
350 	 * here w.r.t. the offsets of the Subsystem Vendor ID and
351 	 * Subsystem (Device) ID registers, which are the opposite way
352 	 * round according to the PCI standard.  For good measure, we
353 	 * save/restore both anyway.
354 	 */
355 	handle = bgep->cfg_handle;
356 
357 	/*
358 	 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
359 	 * has been set in PCI_CONF_COMM already, we need to write the
360 	 * byte-swapped value to it. So we just write zero first for simplicity.
361 	 */
362 	cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
363 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
364 		pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
365 	mhcr = pci_config_get32(handle, PCI_CONF_BGE_MHCR);
366 	cidp->asic_rev = mhcr & MHCR_CHIP_REV_MASK;
367 	cidp->businfo = pci_config_get32(handle, PCI_CONF_BGE_PCISTATE);
368 	cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
369 
370 	cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
371 	cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
372 	cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
373 	cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
374 	cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
375 	cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
376 
377 	BGE_DEBUG(("bge_chip_cfg_init: %s bus is %s and %s; #INTA is %s",
378 	    cidp->businfo & PCISTATE_BUS_IS_PCI ? "PCI" : "PCI-X",
379 	    cidp->businfo & PCISTATE_BUS_IS_FAST ? "fast" : "slow",
380 	    cidp->businfo & PCISTATE_BUS_IS_32_BIT ? "narrow" : "wide",
381 	    cidp->businfo & PCISTATE_INTA_STATE ? "high" : "low"));
382 	BGE_DEBUG(("bge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
383 	    cidp->vendor, cidp->device, cidp->revision));
384 	BGE_DEBUG(("bge_chip_cfg_init: subven 0x%x subdev 0x%x asic_rev 0x%x",
385 	    cidp->subven, cidp->subdev, cidp->asic_rev));
386 	BGE_DEBUG(("bge_chip_cfg_init: clsize %d latency %d command 0x%x",
387 	    cidp->clsize, cidp->latency, cidp->command));
388 
389 	/*
390 	 * Step 2 (also step 6): disable and clear interrupts.
391 	 * Steps 11-13: configure PIO endianness options, and enable
392 	 * indirect register access.  We'll also select any other
393 	 * options controlled by the MHCR (e.g. tagged status, mask
394 	 * interrupt mode) at this stage ...
395 	 *
396 	 * Note: internally, the chip is 64-bit and BIG-endian, but
397 	 * since it talks to the host over a (LITTLE-endian) PCI bus,
398 	 * it normally swaps bytes around at the PCI interface.
399 	 * However, the PCI host bridge on SPARC systems normally
400 	 * swaps the byte lanes around too, since SPARCs are also
401 	 * BIG-endian.  So it turns out that on SPARC, the right
402 	 * option is to tell the chip to swap (and the host bridge
403 	 * will swap back again), whereas on x86 we ask the chip
404 	 * NOT to swap, so the natural little-endianness of the
405 	 * PCI bus is assumed.  Then the only thing that doesn't
406 	 * automatically work right is access to an 8-byte register
407 	 * by a little-endian host; but we don't want to set the
408 	 * MHCR_ENABLE_REGISTER_WORD_SWAP bit because then 4-byte
409 	 * accesses don't go where expected ;-(  So we live with
410 	 * that, and perform word-swaps in software in the few cases
411 	 * where a chip register is defined as an 8-byte value --
412 	 * see the code below for details ...
413 	 *
414 	 * Note: the meaning of the 'MASK_INTERRUPT_MODE' bit isn't
415 	 * very clear in the register description in the PRM, but
416 	 * Broadcom document 570X-PG104-R page 248 explains a little
417 	 * more (under "Broadcom Mask Mode").  The bit changes the way
418 	 * the MASK_PCI_INT_OUTPUT bit works: with MASK_INTERRUPT_MODE
419 	 * clear, the chip interprets MASK_PCI_INT_OUTPUT in the same
420 	 * way as the 5700 did, which isn't very convenient.  Setting
421 	 * the MASK_INTERRUPT_MODE bit makes the MASK_PCI_INT_OUTPUT
422 	 * bit do just what its name says -- MASK the PCI #INTA output
423 	 * (i.e. deassert the signal at the pin) leaving all internal
424 	 * state unchanged.  This is much more convenient for our
425 	 * interrupt handler, so we set MASK_INTERRUPT_MODE here.
426 	 *
427 	 * Note: the inconvenient semantics of the interrupt mailbox
428 	 * (nonzero disables and acknowledges/clears the interrupt,
429 	 * zero enables AND CLEARS it) would make race conditions
430 	 * likely in the interrupt handler:
431 	 *
432 	 * (1)	acknowledge & disable interrupts
433 	 * (2)	while (more to do)
434 	 * 		process packets
435 	 * (3)	enable interrupts -- also clears pending
436 	 *
437 	 * If the chip received more packets and internally generated
438 	 * an interrupt between the check at (2) and the mbox write
439 	 * at (3), this interrupt would be lost :-(
440 	 *
441 	 * The best way to avoid this is to use TAGGED STATUS mode,
442 	 * where the chip includes a unique tag in each status block
443 	 * update, and the host, when re-enabling interrupts, passes
444 	 * the last tag it saw back to the chip; then the chip can
445 	 * see whether the host is truly up to date, and regenerate
446 	 * its interrupt if not.
447 	 */
448 	mhcr =	MHCR_ENABLE_INDIRECT_ACCESS |
449 	    MHCR_ENABLE_TAGGED_STATUS_MODE |
450 	    MHCR_MASK_INTERRUPT_MODE |
451 	    MHCR_CLEAR_INTERRUPT_INTA;
452 
453 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
454 		mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
455 
456 #ifdef	_BIG_ENDIAN
457 	mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
458 #endif	/* _BIG_ENDIAN */
459 
460 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
461 		pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
462 	pci_config_put32(handle, PCI_CONF_BGE_MHCR, mhcr);
463 
464 #ifdef BGE_IPMI_ASF
465 	bgep->asf_wordswapped = B_FALSE;
466 #endif
467 	/*
468 	 * Step 1 (also step 7): Enable PCI Memory Space accesses
469 	 *			 Disable Memory Write/Invalidate
470 	 *			 Enable or disable Bus Mastering
471 	 *
472 	 * Note that all other bits are taken from the original value saved
473 	 * the first time through here, rather than from the current register
474 	 * value, 'cos that will have been cleared by a soft RESET since.
475 	 * In this way we preserve the OBP/nexus-parent's preferred settings
476 	 * of the parity-error and system-error enable bits across multiple
477 	 * chip RESETs.
478 	 */
479 	command = bgep->chipid.command | PCI_COMM_MAE;
480 	command &= ~(PCI_COMM_ME|PCI_COMM_MEMWR_INVAL);
481 	if (enable_dma)
482 		command |= PCI_COMM_ME;
483 	/*
484 	 * on BCM5714 revision A0, false parity error gets generated
485 	 * due to a logic bug. Provide a workaround by disabling parity
486 	 * error.
487 	 */
488 	if (((cidp->device == DEVICE_ID_5714C) ||
489 	    (cidp->device == DEVICE_ID_5714S)) &&
490 	    (cidp->revision == REVISION_ID_5714_A0)) {
491 		command &= ~PCI_COMM_PARITY_DETECT;
492 	}
493 	pci_config_put16(handle, PCI_CONF_COMM, command);
494 
495 	/*
496 	 * On some PCI-E device, there were instances when
497 	 * the device was still link training.
498 	 */
499 	if (bgep->chipid.pci_type == BGE_PCI_E) {
500 		i = 0;
501 		value16 = pci_config_get16(handle, PCI_CONF_COMM);
502 		while ((value16 != command) && (i < 100)) {
503 			drv_usecwait(200);
504 			value16 = pci_config_get16(handle, PCI_CONF_COMM);
505 			++i;
506 		}
507 	}
508 
509 	/*
510 	 * Clear any remaining error status bits
511 	 */
512 	pci_config_put16(handle, PCI_CONF_STAT, ~0);
513 
514 	/*
515 	 * Do following if and only if the device is NOT BCM5714C OR
516 	 * BCM5715C
517 	 */
518 	if (!((cidp->device == DEVICE_ID_5714C) ||
519 	    (cidp->device == DEVICE_ID_5715C))) {
520 		/*
521 		 * Make sure these indirect-access registers are sane
522 		 * rather than random after power-up or reset
523 		 */
524 		pci_config_put32(handle, PCI_CONF_BGE_RIAAR, 0);
525 		pci_config_put32(handle, PCI_CONF_BGE_MWBAR, 0);
526 	}
527 	/*
528 	 * Step 8: Disable PCI-X/PCI-E Relaxed Ordering
529 	 */
530 	bge_cfg_clr16(bgep, PCIX_CONF_COMM, PCIX_COMM_RELAXED);
531 
532 	if (cidp->pci_type == BGE_PCI_E) {
533 		if (DEVICE_5723_SERIES_CHIPSETS(bgep)) {
534 			bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL_5723,
535 			    DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED);
536 		} else
537 			bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL,
538 			    DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED);
539 	}
540 }
541 
542 #ifdef __amd64
543 /*
544  * Distinguish CPU types
545  *
546  * These use to  distinguish AMD64 or Intel EM64T of CPU running mode.
547  * If CPU runs on Intel EM64T mode,the 64bit operation cannot works fine
548  * for PCI-Express based network interface card. This is the work-around
549  * for those nics.
550  */
551 static boolean_t bge_get_em64t_type(void);
552 #pragma	inline(bge_get_em64t_type)
553 
554 static boolean_t
555 bge_get_em64t_type(void)
556 {
557 
558 	return (x86_vendor == X86_VENDOR_Intel);
559 }
560 #endif
561 
562 /*
563  * Operating register get/set access routines
564  */
565 
566 uint32_t bge_reg_get32(bge_t *bgep, bge_regno_t regno);
567 #pragma	inline(bge_reg_get32)
568 
569 uint32_t
570 bge_reg_get32(bge_t *bgep, bge_regno_t regno)
571 {
572 	BGE_TRACE(("bge_reg_get32($%p, 0x%lx)",
573 	    (void *)bgep, regno));
574 
575 	return (ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno)));
576 }
577 
578 void bge_reg_put32(bge_t *bgep, bge_regno_t regno, uint32_t data);
579 #pragma	inline(bge_reg_put32)
580 
581 void
582 bge_reg_put32(bge_t *bgep, bge_regno_t regno, uint32_t data)
583 {
584 	BGE_TRACE(("bge_reg_put32($%p, 0x%lx, 0x%x)",
585 	    (void *)bgep, regno, data));
586 
587 	ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), data);
588 	BGE_PCICHK(bgep);
589 }
590 
591 void bge_reg_set32(bge_t *bgep, bge_regno_t regno, uint32_t bits);
592 #pragma	inline(bge_reg_set32)
593 
594 void
595 bge_reg_set32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
596 {
597 	uint32_t regval;
598 
599 	BGE_TRACE(("bge_reg_set32($%p, 0x%lx, 0x%x)",
600 	    (void *)bgep, regno, bits));
601 
602 	regval = bge_reg_get32(bgep, regno);
603 	regval |= bits;
604 	bge_reg_put32(bgep, regno, regval);
605 }
606 
607 void bge_reg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits);
608 #pragma	inline(bge_reg_clr32)
609 
610 void
611 bge_reg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
612 {
613 	uint32_t regval;
614 
615 	BGE_TRACE(("bge_reg_clr32($%p, 0x%lx, 0x%x)",
616 	    (void *)bgep, regno, bits));
617 
618 	regval = bge_reg_get32(bgep, regno);
619 	regval &= ~bits;
620 	bge_reg_put32(bgep, regno, regval);
621 }
622 
623 static uint64_t bge_reg_get64(bge_t *bgep, bge_regno_t regno);
624 #pragma	inline(bge_reg_get64)
625 
626 static uint64_t
627 bge_reg_get64(bge_t *bgep, bge_regno_t regno)
628 {
629 	uint64_t regval;
630 
631 #ifdef	__amd64
632 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
633 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
634 		regval = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno + 4));
635 		regval <<= 32;
636 		regval |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
637 	} else {
638 		regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
639 	}
640 #elif defined(__sparc)
641 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
642 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
643 		regval = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
644 		regval <<= 32;
645 		regval |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno + 4));
646 	} else {
647 		regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
648 	}
649 #else
650 	regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
651 #endif
652 
653 #ifdef	_LITTLE_ENDIAN
654 	regval = (regval >> 32) | (regval << 32);
655 #endif	/* _LITTLE_ENDIAN */
656 
657 	BGE_TRACE(("bge_reg_get64($%p, 0x%lx) = 0x%016llx",
658 	    (void *)bgep, regno, regval));
659 
660 	return (regval);
661 }
662 
663 static void bge_reg_put64(bge_t *bgep, bge_regno_t regno, uint64_t data);
664 #pragma	inline(bge_reg_put64)
665 
666 static void
667 bge_reg_put64(bge_t *bgep, bge_regno_t regno, uint64_t data)
668 {
669 	BGE_TRACE(("bge_reg_put64($%p, 0x%lx, 0x%016llx)",
670 	    (void *)bgep, regno, data));
671 
672 #ifdef	_LITTLE_ENDIAN
673 	data = ((data >> 32) | (data << 32));
674 #endif	/* _LITTLE_ENDIAN */
675 
676 #ifdef	__amd64
677 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
678 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
679 		ddi_put32(bgep->io_handle,
680 		    PIO_ADDR(bgep, regno), (uint32_t)data);
681 		BGE_PCICHK(bgep);
682 		ddi_put32(bgep->io_handle,
683 		    PIO_ADDR(bgep, regno + 4), (uint32_t)(data >> 32));
684 
685 	} else {
686 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
687 	}
688 #elif defined(__sparc)
689 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
690 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
691 		ddi_put32(bgep->io_handle,
692 		    PIO_ADDR(bgep, regno + 4), (uint32_t)data);
693 		BGE_PCICHK(bgep);
694 		ddi_put32(bgep->io_handle,
695 		    PIO_ADDR(bgep, regno), (uint32_t)(data >> 32));
696 	} else {
697 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
698 	}
699 #else
700 	ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
701 #endif
702 
703 	BGE_PCICHK(bgep);
704 }
705 
706 /*
707  * The DDI doesn't provide get/put functions for 128 bit data
708  * so we put RCBs out as two 64-bit chunks instead.
709  */
710 static void bge_reg_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp);
711 #pragma	inline(bge_reg_putrcb)
712 
713 static void
714 bge_reg_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp)
715 {
716 	uint64_t *p;
717 
718 	BGE_TRACE(("bge_reg_putrcb($%p, 0x%lx, 0x%016llx:%04x:%04x:%08x)",
719 	    (void *)bgep, addr, rcbp->host_ring_addr,
720 	    rcbp->max_len, rcbp->flags, rcbp->nic_ring_addr));
721 
722 	ASSERT((addr % sizeof (*rcbp)) == 0);
723 
724 	p = (void *)rcbp;
725 	bge_reg_put64(bgep, addr, *p++);
726 	bge_reg_put64(bgep, addr+8, *p);
727 }
728 
729 void bge_mbx_put(bge_t *bgep, bge_regno_t regno, uint64_t data);
730 #pragma	inline(bge_mbx_put)
731 
732 void
733 bge_mbx_put(bge_t *bgep, bge_regno_t regno, uint64_t data)
734 {
735 	if (DEVICE_5906_SERIES_CHIPSETS(bgep))
736 		regno += INTERRUPT_LP_MBOX_0_REG - INTERRUPT_MBOX_0_REG + 4;
737 
738 	BGE_TRACE(("bge_mbx_put($%p, 0x%lx, 0x%016llx)",
739 	    (void *)bgep, regno, data));
740 
741 	/*
742 	 * Mailbox registers are nominally 64 bits on the 5701, but
743 	 * the MSW isn't used.  On the 5703, they're only 32 bits
744 	 * anyway.  So here we just write the lower(!) 32 bits -
745 	 * remembering that the chip is big-endian, even though the
746 	 * PCI bus is little-endian ...
747 	 */
748 #ifdef	_BIG_ENDIAN
749 	ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno+4), (uint32_t)data);
750 #else
751 	ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), (uint32_t)data);
752 #endif	/* _BIG_ENDIAN */
753 	BGE_PCICHK(bgep);
754 }
755 
756 uint32_t bge_mbx_get(bge_t *bgep, bge_regno_t regno);
757 #pragma inline(bge_mbx_get)
758 
759 uint32_t
760 bge_mbx_get(bge_t *bgep, bge_regno_t regno)
761 {
762 	uint32_t val32;
763 
764 	if (DEVICE_5906_SERIES_CHIPSETS(bgep))
765 		regno += INTERRUPT_LP_MBOX_0_REG - INTERRUPT_MBOX_0_REG + 4;
766 
767 	BGE_TRACE(("bge_mbx_get($%p, 0x%lx)",
768 	    (void *)bgep, regno));
769 
770 #ifdef	_BIG_ENDIAN
771 	val32 = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno+4));
772 #else
773 	val32 = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
774 #endif	/* _BIG_ENDIAN */
775 	BGE_PCICHK(bgep);
776 
777 	BGE_DEBUG(("bge_mbx_get($%p, 0x%lx) => 0x%08x",
778 	    (void *)bgep, regno, val32));
779 
780 	return (val32);
781 }
782 
783 
784 #if	BGE_DEBUGGING
785 
786 void bge_led_mark(bge_t *bgep);
787 #pragma	no_inline(bge_led_mark)
788 
789 void
790 bge_led_mark(bge_t *bgep)
791 {
792 	uint32_t led_ctrl = LED_CONTROL_OVERRIDE_LINK |
793 	    LED_CONTROL_1000MBPS_LED |
794 	    LED_CONTROL_100MBPS_LED |
795 	    LED_CONTROL_10MBPS_LED;
796 
797 	/*
798 	 * Blink all three LINK LEDs on simultaneously, then all off,
799 	 * then restore to automatic hardware control.  This is used
800 	 * in laboratory testing to trigger a logic analyser or scope.
801 	 */
802 	bge_reg_set32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
803 	led_ctrl ^= LED_CONTROL_OVERRIDE_LINK;
804 	bge_reg_clr32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
805 	led_ctrl = LED_CONTROL_OVERRIDE_LINK;
806 	bge_reg_clr32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
807 }
808 
809 #endif	/* BGE_DEBUGGING */
810 
811 /*
812  * NIC on-chip memory access routines
813  *
814  * Only 32K of NIC memory is visible at a time, controlled by the
815  * Memory Window Base Address Register (in PCI config space).  Once
816  * this is set, the 32K region of NIC-local memory that it refers
817  * to can be directly addressed in the upper 32K of the 64K of PCI
818  * memory space used for the device.
819  */
820 
821 static void bge_nic_setwin(bge_t *bgep, bge_regno_t base);
822 #pragma	inline(bge_nic_setwin)
823 
824 static void
825 bge_nic_setwin(bge_t *bgep, bge_regno_t base)
826 {
827 	chip_id_t *cidp;
828 
829 	BGE_TRACE(("bge_nic_setwin($%p, 0x%lx)",
830 	    (void *)bgep, base));
831 
832 	ASSERT((base & MWBAR_GRANULE_MASK) == 0);
833 
834 	/*
835 	 * Don't do repeated zero data writes,
836 	 * if the device is BCM5714C/15C.
837 	 */
838 	cidp = &bgep->chipid;
839 	if ((cidp->device == DEVICE_ID_5714C) ||
840 	    (cidp->device == DEVICE_ID_5715C)) {
841 		if (bgep->lastWriteZeroData && (base == (bge_regno_t)0))
842 			return;
843 		/* Adjust lastWriteZeroData */
844 		bgep->lastWriteZeroData = ((base == (bge_regno_t)0) ?
845 		    B_TRUE : B_FALSE);
846 	}
847 #ifdef __sparc
848 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
849 		base = LE_32(base);
850 #endif
851 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, base);
852 }
853 
854 static uint32_t bge_nic_get32(bge_t *bgep, bge_regno_t addr);
855 #pragma	inline(bge_nic_get32)
856 
857 static uint32_t
858 bge_nic_get32(bge_t *bgep, bge_regno_t addr)
859 {
860 	uint32_t data;
861 
862 #if defined(BGE_IPMI_ASF) && !defined(__sparc)
863 	if (bgep->asf_enabled && !bgep->asf_wordswapped) {
864 		/* workaround for word swap error */
865 		if (addr & 4)
866 			addr = addr - 4;
867 		else
868 			addr = addr + 4;
869 	}
870 #endif
871 
872 #ifdef __sparc
873 	data = bge_nic_read32(bgep, addr);
874 #else
875 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
876 	addr &= MWBAR_GRANULE_MASK;
877 	addr += NIC_MEM_WINDOW_OFFSET;
878 
879 	data = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
880 #endif
881 
882 	BGE_TRACE(("bge_nic_get32($%p, 0x%lx) = 0x%08x",
883 	    (void *)bgep, addr, data));
884 
885 	return (data);
886 }
887 
888 void bge_nic_put32(bge_t *bgep, bge_regno_t addr, uint32_t data);
889 #pragma inline(bge_nic_put32)
890 
891 void
892 bge_nic_put32(bge_t *bgep, bge_regno_t addr, uint32_t data)
893 {
894 	BGE_TRACE(("bge_nic_put32($%p, 0x%lx, 0x%08x)",
895 	    (void *)bgep, addr, data));
896 
897 #if defined(BGE_IPMI_ASF) && !defined(__sparc)
898 	if (bgep->asf_enabled && !bgep->asf_wordswapped) {
899 		/* workaround for word swap error */
900 		if (addr & 4)
901 			addr = addr - 4;
902 		else
903 			addr = addr + 4;
904 	}
905 #endif
906 
907 #ifdef __sparc
908 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
909 		addr = LE_32(addr);
910 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, addr);
911 	data = LE_32(data);
912 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWDAR, data);
913 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, 0);
914 #else
915 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
916 	addr &= MWBAR_GRANULE_MASK;
917 	addr += NIC_MEM_WINDOW_OFFSET;
918 	ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr), data);
919 	BGE_PCICHK(bgep);
920 #endif
921 }
922 
923 static uint64_t bge_nic_get64(bge_t *bgep, bge_regno_t addr);
924 #pragma	inline(bge_nic_get64)
925 
926 static uint64_t
927 bge_nic_get64(bge_t *bgep, bge_regno_t addr)
928 {
929 	uint64_t data;
930 
931 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
932 	addr &= MWBAR_GRANULE_MASK;
933 	addr += NIC_MEM_WINDOW_OFFSET;
934 
935 #ifdef	__amd64
936 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
937 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
938 		data = ddi_get32(bgep->io_handle,
939 		    PIO_ADDR(bgep, addr + 4));
940 		data <<= 32;
941 		data |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
942 	} else {
943 		data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
944 	}
945 #elif defined(__sparc)
946 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
947 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
948 		data = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
949 		data <<= 32;
950 		data |= ddi_get32(bgep->io_handle,
951 		    PIO_ADDR(bgep, addr + 4));
952 	} else {
953 		data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
954 	}
955 #else
956 	data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
957 #endif
958 
959 	BGE_TRACE(("bge_nic_get64($%p, 0x%lx) = 0x%016llx",
960 	    (void *)bgep, addr, data));
961 
962 	return (data);
963 }
964 
965 static void bge_nic_put64(bge_t *bgep, bge_regno_t addr, uint64_t data);
966 #pragma	inline(bge_nic_put64)
967 
968 static void
969 bge_nic_put64(bge_t *bgep, bge_regno_t addr, uint64_t data)
970 {
971 	BGE_TRACE(("bge_nic_put64($%p, 0x%lx, 0x%016llx)",
972 	    (void *)bgep, addr, data));
973 
974 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
975 	addr &= MWBAR_GRANULE_MASK;
976 	addr += NIC_MEM_WINDOW_OFFSET;
977 
978 #ifdef	__amd64
979 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
980 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
981 		ddi_put32(bgep->io_handle,
982 		    PIO_ADDR(bgep, addr + 4), (uint32_t)data);
983 		BGE_PCICHK(bgep);
984 		ddi_put32(bgep->io_handle,
985 		    PIO_ADDR(bgep, addr), (uint32_t)(data >> 32));
986 	} else {
987 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
988 	}
989 #elif defined(__sparc)
990 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
991 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
992 		ddi_put32(bgep->io_handle,
993 		    PIO_ADDR(bgep, addr + 4), (uint32_t)data);
994 		BGE_PCICHK(bgep);
995 		ddi_put32(bgep->io_handle,
996 		    PIO_ADDR(bgep, addr), (uint32_t)(data >> 32));
997 	} else {
998 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
999 	}
1000 #else
1001 	ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
1002 #endif
1003 
1004 	BGE_PCICHK(bgep);
1005 }
1006 
1007 /*
1008  * The DDI doesn't provide get/put functions for 128 bit data
1009  * so we put RCBs out as two 64-bit chunks instead.
1010  */
1011 static void bge_nic_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp);
1012 #pragma	inline(bge_nic_putrcb)
1013 
1014 static void
1015 bge_nic_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp)
1016 {
1017 	uint64_t *p;
1018 
1019 	BGE_TRACE(("bge_nic_putrcb($%p, 0x%lx, 0x%016llx:%04x:%04x:%08x)",
1020 	    (void *)bgep, addr, rcbp->host_ring_addr,
1021 	    rcbp->max_len, rcbp->flags, rcbp->nic_ring_addr));
1022 
1023 	ASSERT((addr % sizeof (*rcbp)) == 0);
1024 
1025 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
1026 	addr &= MWBAR_GRANULE_MASK;
1027 	addr += NIC_MEM_WINDOW_OFFSET;
1028 
1029 	p = (void *)rcbp;
1030 #ifdef	__amd64
1031 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) || bge_get_em64t_type() ||
1032 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
1033 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr),
1034 		    (uint32_t)(*p));
1035 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4),
1036 		    (uint32_t)(*p++ >> 32));
1037 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 8),
1038 		    (uint32_t)(*p));
1039 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 12),
1040 		    (uint32_t)(*p >> 32));
1041 
1042 	} else {
1043 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1044 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr+8), *p);
1045 	}
1046 #elif defined(__sparc)
1047 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1048 	    DEVICE_5717_SERIES_CHIPSETS(bgep)) {
1049 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4),
1050 		    (uint32_t)(*p));
1051 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr),
1052 		    (uint32_t)(*p++ >> 32));
1053 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 12),
1054 		    (uint32_t)(*p));
1055 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 8),
1056 		    (uint32_t)(*p >> 32));
1057 	} else {
1058 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1059 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr + 8), *p);
1060 	}
1061 #else
1062 	ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1063 	ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr + 8), *p);
1064 #endif
1065 
1066 	BGE_PCICHK(bgep);
1067 }
1068 
1069 static void bge_nic_zero(bge_t *bgep, bge_regno_t addr, uint32_t nbytes);
1070 #pragma	inline(bge_nic_zero)
1071 
1072 static void
1073 bge_nic_zero(bge_t *bgep, bge_regno_t addr, uint32_t nbytes)
1074 {
1075 	BGE_TRACE(("bge_nic_zero($%p, 0x%lx, 0x%x)",
1076 	    (void *)bgep, addr, nbytes));
1077 
1078 	ASSERT((addr & ~MWBAR_GRANULE_MASK) ==
1079 	    ((addr+nbytes) & ~MWBAR_GRANULE_MASK));
1080 
1081 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
1082 	addr &= MWBAR_GRANULE_MASK;
1083 	addr += NIC_MEM_WINDOW_OFFSET;
1084 
1085 	(void) ddi_device_zero(bgep->io_handle, PIO_ADDR(bgep, addr),
1086 	    nbytes, 1, DDI_DATA_SZ08_ACC);
1087 	BGE_PCICHK(bgep);
1088 }
1089 
1090 /*
1091  * MII (PHY) register get/set access routines
1092  *
1093  * These use the chip's MII auto-access method, controlled by the
1094  * MII Communication register at 0x044c, so the CPU doesn't have
1095  * to fiddle with the individual bits.
1096  */
1097 
1098 #undef	BGE_DBG
1099 #define	BGE_DBG		BGE_DBG_MII	/* debug flag for this code	*/
1100 
1101 static uint16_t bge_mii_access(bge_t *bgep, bge_regno_t regno,
1102 				uint16_t data, uint32_t cmd);
1103 #pragma	no_inline(bge_mii_access)
1104 
1105 static uint16_t
1106 bge_mii_access(bge_t *bgep, bge_regno_t regno, uint16_t data, uint32_t cmd)
1107 {
1108 	uint32_t timeout;
1109 	uint32_t regval1;
1110 	uint32_t regval2;
1111 
1112 	BGE_TRACE(("bge_mii_access($%p, 0x%lx, 0x%x, 0x%x)",
1113 	    (void *)bgep, regno, data, cmd));
1114 
1115 	ASSERT(mutex_owned(bgep->genlock));
1116 
1117 	/*
1118 	 * Assemble the command ...
1119 	 */
1120 	cmd |= data << MI_COMMS_DATA_SHIFT;
1121 	cmd |= regno << MI_COMMS_REGISTER_SHIFT;
1122 	cmd |= bgep->phy_mii_addr << MI_COMMS_ADDRESS_SHIFT;
1123 	cmd |= MI_COMMS_START;
1124 
1125 	/*
1126 	 * Wait for any command already in progress ...
1127 	 *
1128 	 * Note: this *shouldn't* ever find that there is a command
1129 	 * in progress, because we already hold the <genlock> mutex.
1130 	 * Nonetheless, we have sometimes seen the MI_COMMS_START
1131 	 * bit set here -- it seems that the chip can initiate MII
1132 	 * accesses internally, even with polling OFF.
1133 	 */
1134 	regval1 = regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1135 	for (timeout = 100; ; ) {
1136 		if ((regval2 & MI_COMMS_START) == 0) {
1137 			bge_reg_put32(bgep, MI_COMMS_REG, cmd);
1138 			break;
1139 		}
1140 		if (--timeout == 0)
1141 			break;
1142 		drv_usecwait(10);
1143 		regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1144 	}
1145 
1146 	if (timeout == 0)
1147 		return ((uint16_t)~0u);
1148 
1149 	if (timeout != 100)
1150 		BGE_REPORT((bgep, "bge_mii_access: cmd 0x%x -- "
1151 		    "MI_COMMS_START set for %d us; 0x%x->0x%x",
1152 		    cmd, 10*(100-timeout), regval1, regval2));
1153 
1154 	regval1 = bge_reg_get32(bgep, MI_COMMS_REG);
1155 	for (timeout = 1000; ; ) {
1156 		if ((regval1 & MI_COMMS_START) == 0)
1157 			break;
1158 		if (--timeout == 0)
1159 			break;
1160 		drv_usecwait(10);
1161 		regval1 = bge_reg_get32(bgep, MI_COMMS_REG);
1162 	}
1163 
1164 	/*
1165 	 * Drop out early if the READ FAILED bit is set -- this chip
1166 	 * could be a 5703/4S, with a SerDes instead of a PHY!
1167 	 */
1168 	if (regval2 & MI_COMMS_READ_FAILED)
1169 		return ((uint16_t)~0u);
1170 
1171 	if (timeout == 0)
1172 		return ((uint16_t)~0u);
1173 
1174 	/*
1175 	 * The PRM says to wait 5us after seeing the START bit clear
1176 	 * and then re-read the register to get the final value of the
1177 	 * data field, in order to avoid a race condition where the
1178 	 * START bit is clear but the data field isn't yet valid.
1179 	 *
1180 	 * Note: we don't actually seem to be encounter this race;
1181 	 * except when the START bit is seen set again (see below),
1182 	 * the data field doesn't change during this 5us interval.
1183 	 */
1184 	drv_usecwait(5);
1185 	regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1186 
1187 	/*
1188 	 * Unfortunately, when following the PRMs instructions above,
1189 	 * we have occasionally seen the START bit set again(!) in the
1190 	 * value read after the 5us delay. This seems to be due to the
1191 	 * chip autonomously starting another MII access internally.
1192 	 * In such cases, the command/data/etc fields relate to the
1193 	 * internal command, rather than the one that we thought had
1194 	 * just finished.  So in this case, we fall back to returning
1195 	 * the data from the original read that showed START clear.
1196 	 */
1197 	if (regval2 & MI_COMMS_START) {
1198 		BGE_REPORT((bgep, "bge_mii_access: cmd 0x%x -- "
1199 		    "MI_COMMS_START set after transaction; 0x%x->0x%x",
1200 		    cmd, regval1, regval2));
1201 		regval2 = regval1;
1202 	}
1203 
1204 	if (regval2 & MI_COMMS_START)
1205 		return ((uint16_t)~0u);
1206 
1207 	if (regval2 & MI_COMMS_READ_FAILED)
1208 		return ((uint16_t)~0u);
1209 
1210 	return ((regval2 & MI_COMMS_DATA_MASK) >> MI_COMMS_DATA_SHIFT);
1211 }
1212 
1213 uint16_t bge_mii_get16(bge_t *bgep, bge_regno_t regno);
1214 #pragma	no_inline(bge_mii_get16)
1215 
1216 uint16_t
1217 bge_mii_get16(bge_t *bgep, bge_regno_t regno)
1218 {
1219 	BGE_TRACE(("bge_mii_get16($%p, 0x%lx)",
1220 	    (void *)bgep, regno));
1221 
1222 	ASSERT(mutex_owned(bgep->genlock));
1223 
1224 	if (DEVICE_5906_SERIES_CHIPSETS(bgep) && ((regno == MII_AUX_CONTROL) ||
1225 	    (regno == MII_MSCONTROL)))
1226 		return (0);
1227 
1228 	return (bge_mii_access(bgep, regno, 0, MI_COMMS_COMMAND_READ));
1229 }
1230 
1231 void bge_mii_put16(bge_t *bgep, bge_regno_t regno, uint16_t data);
1232 #pragma	no_inline(bge_mii_put16)
1233 
1234 void
1235 bge_mii_put16(bge_t *bgep, bge_regno_t regno, uint16_t data)
1236 {
1237 	BGE_TRACE(("bge_mii_put16($%p, 0x%lx, 0x%x)",
1238 	    (void *)bgep, regno, data));
1239 
1240 	ASSERT(mutex_owned(bgep->genlock));
1241 
1242 	if (DEVICE_5906_SERIES_CHIPSETS(bgep) && ((regno == MII_AUX_CONTROL) ||
1243 	    (regno == MII_MSCONTROL)))
1244 		return;
1245 
1246 	(void) bge_mii_access(bgep, regno, data, MI_COMMS_COMMAND_WRITE);
1247 }
1248 
1249 #undef	BGE_DBG
1250 #define	BGE_DBG		BGE_DBG_SEEPROM	/* debug flag for this code	*/
1251 
1252 #if	BGE_SEE_IO32 || BGE_FLASH_IO32
1253 
1254 /*
1255  * Basic SEEPROM get/set access routine
1256  *
1257  * This uses the chip's SEEPROM auto-access method, controlled by the
1258  * Serial EEPROM Address/Data Registers at 0x6838/683c, so the CPU
1259  * doesn't have to fiddle with the individual bits.
1260  *
1261  * The caller should hold <genlock> and *also* have already acquired
1262  * the right to access the SEEPROM, via bge_nvmem_acquire() above.
1263  *
1264  * Return value:
1265  *	0 on success,
1266  *	ENODATA on access timeout (maybe retryable: device may just be busy)
1267  *	EPROTO on other h/w or s/w errors.
1268  *
1269  * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output
1270  * from a (successful) SEEPROM_ACCESS_READ.
1271  */
1272 static int bge_seeprom_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr,
1273 				uint32_t *dp);
1274 #pragma	no_inline(bge_seeprom_access)
1275 
1276 static int
1277 bge_seeprom_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1278 {
1279 	uint32_t tries;
1280 	uint32_t regval;
1281 
1282 	ASSERT(mutex_owned(bgep->genlock));
1283 
1284 	/*
1285 	 * On the newer chips that support both SEEPROM & Flash, we need
1286 	 * to specifically enable SEEPROM access (Flash is the default).
1287 	 * On older chips, we don't; SEEPROM is the only NVtype supported,
1288 	 * and the NVM control registers don't exist ...
1289 	 */
1290 	switch (bgep->chipid.nvtype) {
1291 	case BGE_NVTYPE_NONE:
1292 	case BGE_NVTYPE_UNKNOWN:
1293 		_NOTE(NOTREACHED)
1294 	case BGE_NVTYPE_SEEPROM:
1295 		break;
1296 
1297 	case BGE_NVTYPE_LEGACY_SEEPROM:
1298 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1299 	case BGE_NVTYPE_BUFFERED_FLASH:
1300 	default:
1301 		bge_reg_set32(bgep, NVM_CONFIG1_REG,
1302 		    NVM_CFG1_LEGACY_SEEPROM_MODE);
1303 		break;
1304 	}
1305 
1306 	/*
1307 	 * Check there's no command in progress.
1308 	 *
1309 	 * Note: this *shouldn't* ever find that there is a command
1310 	 * in progress, because we already hold the <genlock> mutex.
1311 	 * Also, to ensure we don't have a conflict with the chip's
1312 	 * internal firmware or a process accessing the same (shared)
1313 	 * SEEPROM through the other port of a 5704, we've already
1314 	 * been through the "software arbitration" protocol.
1315 	 * So this is just a final consistency check: we shouldn't
1316 	 * see EITHER the START bit (command started but not complete)
1317 	 * OR the COMPLETE bit (command completed but not cleared).
1318 	 */
1319 	regval = bge_reg_get32(bgep, SERIAL_EEPROM_ADDRESS_REG);
1320 	if (regval & SEEPROM_ACCESS_START)
1321 		return (EPROTO);
1322 	if (regval & SEEPROM_ACCESS_COMPLETE)
1323 		return (EPROTO);
1324 
1325 	/*
1326 	 * Assemble the command ...
1327 	 */
1328 	cmd |= addr & SEEPROM_ACCESS_ADDRESS_MASK;
1329 	addr >>= SEEPROM_ACCESS_ADDRESS_SIZE;
1330 	addr <<= SEEPROM_ACCESS_DEVID_SHIFT;
1331 	cmd |= addr & SEEPROM_ACCESS_DEVID_MASK;
1332 	cmd |= SEEPROM_ACCESS_START;
1333 	cmd |= SEEPROM_ACCESS_COMPLETE;
1334 	cmd |= regval & SEEPROM_ACCESS_HALFCLOCK_MASK;
1335 
1336 	bge_reg_put32(bgep, SERIAL_EEPROM_DATA_REG, *dp);
1337 	bge_reg_put32(bgep, SERIAL_EEPROM_ADDRESS_REG, cmd);
1338 
1339 	/*
1340 	 * By observation, a successful access takes ~20us on a 5703/4,
1341 	 * but apparently much longer (up to 1000us) on the obsolescent
1342 	 * BCM5700/BCM5701.  We want to be sure we don't get any false
1343 	 * timeouts here; but OTOH, we don't want a bogus access to lock
1344 	 * out interrupts for longer than necessary. So we'll allow up
1345 	 * to 1000us ...
1346 	 */
1347 	for (tries = 0; tries < 1000; ++tries) {
1348 		regval = bge_reg_get32(bgep, SERIAL_EEPROM_ADDRESS_REG);
1349 		if (regval & SEEPROM_ACCESS_COMPLETE)
1350 			break;
1351 		drv_usecwait(1);
1352 	}
1353 
1354 	if (regval & SEEPROM_ACCESS_COMPLETE) {
1355 		/*
1356 		 * All OK; read the SEEPROM data register, then write back
1357 		 * the value read from the address register in order to
1358 		 * clear the <complete> bit and leave the SEEPROM access
1359 		 * state machine idle, ready for the next access ...
1360 		 */
1361 		BGE_DEBUG(("bge_seeprom_access: complete after %d us", tries));
1362 		*dp = bge_reg_get32(bgep, SERIAL_EEPROM_DATA_REG);
1363 		bge_reg_put32(bgep, SERIAL_EEPROM_ADDRESS_REG, regval);
1364 		return (0);
1365 	}
1366 
1367 	/*
1368 	 * Hmm ... what happened here?
1369 	 *
1370 	 * Most likely, the user addressed a non-existent SEEPROM. Or
1371 	 * maybe the SEEPROM was busy internally (e.g. processing a write)
1372 	 * and didn't respond to being addressed. Either way, it's left
1373 	 * the SEEPROM access state machine wedged. So we'll reset it
1374 	 * before we leave, so it's ready for next time ...
1375 	 */
1376 	BGE_DEBUG(("bge_seeprom_access: timed out after %d us", tries));
1377 	bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT);
1378 	return (ENODATA);
1379 }
1380 
1381 /*
1382  * Basic Flash get/set access routine
1383  *
1384  * These use the chip's Flash auto-access method, controlled by the
1385  * Flash Access Registers at 0x7000-701c, so the CPU doesn't have to
1386  * fiddle with the individual bits.
1387  *
1388  * The caller should hold <genlock> and *also* have already acquired
1389  * the right to access the Flash, via bge_nvmem_acquire() above.
1390  *
1391  * Return value:
1392  *	0 on success,
1393  *	ENODATA on access timeout (maybe retryable: device may just be busy)
1394  *	ENODEV if the NVmem device is missing or otherwise unusable
1395  *
1396  * <*dp> is an input to a NVM_FLASH_CMD_WR operation, or an output
1397  * from a (successful) NVM_FLASH_CMD_RD.
1398  */
1399 static int bge_flash_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr,
1400 				uint32_t *dp);
1401 #pragma	no_inline(bge_flash_access)
1402 
1403 static int
1404 bge_flash_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1405 {
1406 	uint32_t tries;
1407 	uint32_t regval;
1408 
1409 	ASSERT(mutex_owned(bgep->genlock));
1410 
1411 	/*
1412 	 * On the newer chips that support both SEEPROM & Flash, we need
1413 	 * to specifically disable SEEPROM access while accessing Flash.
1414 	 * The older chips don't support Flash, and the NVM registers don't
1415 	 * exist, so we shouldn't be here at all!
1416 	 */
1417 	switch (bgep->chipid.nvtype) {
1418 	case BGE_NVTYPE_NONE:
1419 	case BGE_NVTYPE_UNKNOWN:
1420 		_NOTE(NOTREACHED)
1421 	case BGE_NVTYPE_SEEPROM:
1422 		return (ENODEV);
1423 
1424 	case BGE_NVTYPE_LEGACY_SEEPROM:
1425 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1426 	case BGE_NVTYPE_BUFFERED_FLASH:
1427 	default:
1428 		bge_reg_clr32(bgep, NVM_CONFIG1_REG,
1429 		    NVM_CFG1_LEGACY_SEEPROM_MODE);
1430 		break;
1431 	}
1432 
1433 	/*
1434 	 * Assemble the command ...
1435 	 */
1436 	addr &= NVM_FLASH_ADDR_MASK;
1437 	cmd |= NVM_FLASH_CMD_DOIT;
1438 	cmd |= NVM_FLASH_CMD_FIRST;
1439 	cmd |= NVM_FLASH_CMD_LAST;
1440 	cmd |= NVM_FLASH_CMD_DONE;
1441 
1442 	bge_reg_put32(bgep, NVM_FLASH_WRITE_REG, *dp);
1443 	bge_reg_put32(bgep, NVM_FLASH_ADDR_REG, addr);
1444 	bge_reg_put32(bgep, NVM_FLASH_CMD_REG, cmd);
1445 
1446 	/*
1447 	 * Allow up to 1000ms ...
1448 	 */
1449 	for (tries = 0; tries < 1000; ++tries) {
1450 		regval = bge_reg_get32(bgep, NVM_FLASH_CMD_REG);
1451 		if (regval & NVM_FLASH_CMD_DONE)
1452 			break;
1453 		drv_usecwait(1);
1454 	}
1455 
1456 	if (regval & NVM_FLASH_CMD_DONE) {
1457 		/*
1458 		 * All OK; read the data from the Flash read register
1459 		 */
1460 		BGE_DEBUG(("bge_flash_access: complete after %d us", tries));
1461 		*dp = bge_reg_get32(bgep, NVM_FLASH_READ_REG);
1462 		return (0);
1463 	}
1464 
1465 	/*
1466 	 * Hmm ... what happened here?
1467 	 *
1468 	 * Most likely, the user addressed a non-existent Flash. Or
1469 	 * maybe the Flash was busy internally (e.g. processing a write)
1470 	 * and didn't respond to being addressed. Either way, there's
1471 	 * nothing we can here ...
1472 	 */
1473 	BGE_DEBUG(("bge_flash_access: timed out after %d us", tries));
1474 	return (ENODATA);
1475 }
1476 
1477 /*
1478  * The next two functions regulate access to the NVram (if fitted).
1479  *
1480  * On a 5704 (dual core) chip, there's only one SEEPROM and one Flash
1481  * (SPI) interface, but they can be accessed through either port. These
1482  * are managed by different instance of this driver and have no software
1483  * state in common.
1484  *
1485  * In addition (and even on a single core chip) the chip's internal
1486  * firmware can access the SEEPROM/Flash, most notably after a RESET
1487  * when it may download code to run internally.
1488  *
1489  * So we need to arbitrate between these various software agents.  For
1490  * this purpose, the chip provides the Software Arbitration Register,
1491  * which implements hardware(!) arbitration.
1492  *
1493  * This functionality didn't exist on older (5700/5701) chips, so there's
1494  * nothing we can do by way of arbitration on those; also, if there's no
1495  * SEEPROM/Flash fitted (or we couldn't determine what type), there's also
1496  * nothing to do.
1497  *
1498  * The internal firmware appears to use Request 0, which is the highest
1499  * priority.  So we'd like to use Request 2, leaving one higher and one
1500  * lower for any future developments ... but apparently this doesn't
1501  * always work.  So for now, the code uses Request 1 ;-(
1502  */
1503 
1504 #define	NVM_READ_REQ	NVM_READ_REQ1
1505 #define	NVM_RESET_REQ	NVM_RESET_REQ1
1506 #define	NVM_SET_REQ	NVM_SET_REQ1
1507 
1508 static void bge_nvmem_relinquish(bge_t *bgep);
1509 #pragma	no_inline(bge_nvmem_relinquish)
1510 
1511 static void
1512 bge_nvmem_relinquish(bge_t *bgep)
1513 {
1514 	ASSERT(mutex_owned(bgep->genlock));
1515 
1516 	switch (bgep->chipid.nvtype) {
1517 	case BGE_NVTYPE_NONE:
1518 	case BGE_NVTYPE_UNKNOWN:
1519 		_NOTE(NOTREACHED)
1520 		return;
1521 
1522 	case BGE_NVTYPE_SEEPROM:
1523 		/*
1524 		 * No arbitration performed, no release needed
1525 		 */
1526 		return;
1527 
1528 	case BGE_NVTYPE_LEGACY_SEEPROM:
1529 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1530 	case BGE_NVTYPE_BUFFERED_FLASH:
1531 	default:
1532 		break;
1533 	}
1534 
1535 	/*
1536 	 * Our own request should be present (whether or not granted) ...
1537 	 */
1538 	(void) bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1539 
1540 	/*
1541 	 * ... this will make it go away.
1542 	 */
1543 	bge_reg_put32(bgep, NVM_SW_ARBITRATION_REG, NVM_RESET_REQ);
1544 	(void) bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1545 }
1546 
1547 /*
1548  * Arbitrate for access to the NVmem, if necessary
1549  *
1550  * Return value:
1551  *	0 on success
1552  *	EAGAIN if the device is in use (retryable)
1553  *	ENODEV if the NVmem device is missing or otherwise unusable
1554  */
1555 static int bge_nvmem_acquire(bge_t *bgep);
1556 #pragma	no_inline(bge_nvmem_acquire)
1557 
1558 static int
1559 bge_nvmem_acquire(bge_t *bgep)
1560 {
1561 	uint32_t regval;
1562 	uint32_t tries;
1563 
1564 	ASSERT(mutex_owned(bgep->genlock));
1565 
1566 	switch (bgep->chipid.nvtype) {
1567 	case BGE_NVTYPE_NONE:
1568 	case BGE_NVTYPE_UNKNOWN:
1569 		/*
1570 		 * Access denied: no (recognisable) device fitted
1571 		 */
1572 		return (ENODEV);
1573 
1574 	case BGE_NVTYPE_SEEPROM:
1575 		/*
1576 		 * Access granted: no arbitration needed (or possible)
1577 		 */
1578 		return (0);
1579 
1580 	case BGE_NVTYPE_LEGACY_SEEPROM:
1581 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1582 	case BGE_NVTYPE_BUFFERED_FLASH:
1583 	default:
1584 		/*
1585 		 * Access conditional: conduct arbitration protocol
1586 		 */
1587 		break;
1588 	}
1589 
1590 	/*
1591 	 * We're holding the per-port mutex <genlock>, so no-one other
1592 	 * thread can be attempting to access the NVmem through *this*
1593 	 * port. But it could be in use by the *other* port (of a 5704),
1594 	 * or by the chip's internal firmware, so we have to go through
1595 	 * the full (hardware) arbitration protocol ...
1596 	 *
1597 	 * Note that *because* we're holding <genlock>, the interrupt handler
1598 	 * won't be able to progress.  So we're only willing to spin for a
1599 	 * fairly short time.  Specifically:
1600 	 *
1601 	 *	We *must* wait long enough for the hardware to resolve all
1602 	 *	requests and determine the winner.  Fortunately, this is
1603 	 *	"almost instantaneous", even as observed by GHz CPUs.
1604 	 *
1605 	 *	A successful access by another Solaris thread (via either
1606 	 *	port) typically takes ~20us.  So waiting a bit longer than
1607 	 *	that will give a good chance of success, if the other user
1608 	 *	*is* another thread on the other port.
1609 	 *
1610 	 *	However, the internal firmware can hold on to the NVmem
1611 	 *	for *much* longer: at least 10 milliseconds just after a
1612 	 *	RESET, and maybe even longer if the NVmem actually contains
1613 	 *	code to download and run on the internal CPUs.
1614 	 *
1615 	 * So, we'll allow 50us; if that's not enough then it's up to the
1616 	 * caller to retry later (hence the choice of return code EAGAIN).
1617 	 */
1618 	regval = bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1619 	bge_reg_put32(bgep, NVM_SW_ARBITRATION_REG, NVM_SET_REQ);
1620 
1621 	for (tries = 0; tries < 50; ++tries) {
1622 		regval = bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1623 		if (regval & NVM_WON_REQ1)
1624 			break;
1625 		drv_usecwait(1);
1626 	}
1627 
1628 	if (regval & NVM_WON_REQ1) {
1629 		BGE_DEBUG(("bge_nvmem_acquire: won after %d us", tries));
1630 		return (0);
1631 	}
1632 
1633 	/*
1634 	 * Somebody else must be accessing the NVmem, so abandon our
1635 	 * attempt take control of it.  The caller can try again later ...
1636 	 */
1637 	BGE_DEBUG(("bge_nvmem_acquire: lost after %d us", tries));
1638 	bge_nvmem_relinquish(bgep);
1639 	return (EAGAIN);
1640 }
1641 
1642 /*
1643  * This code assumes that the GPIO1 bit has been wired up to the NVmem
1644  * write protect line in such a way that the NVmem is protected when
1645  * GPIO1 is an input, or is an output but driven high.  Thus, to make the
1646  * NVmem writable we have to change GPIO1 to an output AND drive it low.
1647  *
1648  * Note: there's only one set of GPIO pins on a 5704, even though they
1649  * can be accessed through either port.  So the chip has to resolve what
1650  * happens if the two ports program a single pin differently ... the rule
1651  * it uses is that if the ports disagree about the *direction* of a pin,
1652  * "output" wins over "input", but if they disagree about its *value* as
1653  * an output, then the pin is TRISTATED instead!  In such a case, no-one
1654  * wins, and the external signal does whatever the external circuitry
1655  * defines as the default -- which we've assumed is the PROTECTED state.
1656  * So, we always change GPIO1 back to being an *input* whenever we're not
1657  * specifically using it to unprotect the NVmem. This allows either port
1658  * to update the NVmem, although obviously only one at a time!
1659  *
1660  * The caller should hold <genlock> and *also* have already acquired the
1661  * right to access the NVmem, via bge_nvmem_acquire() above.
1662  */
1663 static void bge_nvmem_protect(bge_t *bgep, boolean_t protect);
1664 #pragma	inline(bge_nvmem_protect)
1665 
1666 static void
1667 bge_nvmem_protect(bge_t *bgep, boolean_t protect)
1668 {
1669 	uint32_t regval;
1670 
1671 	ASSERT(mutex_owned(bgep->genlock));
1672 
1673 	regval = bge_reg_get32(bgep, MISC_LOCAL_CONTROL_REG);
1674 	if (protect) {
1675 		regval |= MLCR_MISC_PINS_OUTPUT_1;
1676 		regval &= ~MLCR_MISC_PINS_OUTPUT_ENABLE_1;
1677 	} else {
1678 		regval &= ~MLCR_MISC_PINS_OUTPUT_1;
1679 		regval |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
1680 	}
1681 	bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG, regval);
1682 }
1683 
1684 /*
1685  * Now put it all together ...
1686  *
1687  * Try to acquire control of the NVmem; if successful, then:
1688  *	unprotect it (if we want to write to it)
1689  *	perform the requested access
1690  *	reprotect it (after a write)
1691  *	relinquish control
1692  *
1693  * Return value:
1694  *	0 on success,
1695  *	EAGAIN if the device is in use (retryable)
1696  *	ENODATA on access timeout (maybe retryable: device may just be busy)
1697  *	ENODEV if the NVmem device is missing or otherwise unusable
1698  *	EPROTO on other h/w or s/w errors.
1699  */
1700 static int
1701 bge_nvmem_rw32(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1702 {
1703 	int err;
1704 
1705 	if ((err = bge_nvmem_acquire(bgep)) == 0) {
1706 		switch (cmd) {
1707 		case BGE_SEE_READ:
1708 			err = bge_seeprom_access(bgep,
1709 			    SEEPROM_ACCESS_READ, addr, dp);
1710 			break;
1711 
1712 		case BGE_SEE_WRITE:
1713 			bge_nvmem_protect(bgep, B_FALSE);
1714 			err = bge_seeprom_access(bgep,
1715 			    SEEPROM_ACCESS_WRITE, addr, dp);
1716 			bge_nvmem_protect(bgep, B_TRUE);
1717 			break;
1718 
1719 		case BGE_FLASH_READ:
1720 			if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1721 			    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1722 			    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1723 			    DEVICE_5714_SERIES_CHIPSETS(bgep)) {
1724 				bge_reg_set32(bgep, NVM_ACCESS_REG,
1725 				    NVM_ACCESS_ENABLE);
1726 			}
1727 			err = bge_flash_access(bgep,
1728 			    NVM_FLASH_CMD_RD, addr, dp);
1729 			if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1730 			    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1731 			    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1732 			    DEVICE_5714_SERIES_CHIPSETS(bgep)) {
1733 				bge_reg_clr32(bgep, NVM_ACCESS_REG,
1734 				    NVM_ACCESS_ENABLE);
1735 			}
1736 			break;
1737 
1738 		case BGE_FLASH_WRITE:
1739 			if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1740 			    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1741 			    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1742 			    DEVICE_5714_SERIES_CHIPSETS(bgep)) {
1743 				bge_reg_set32(bgep, NVM_ACCESS_REG,
1744 				    NVM_WRITE_ENABLE|NVM_ACCESS_ENABLE);
1745 			}
1746 			bge_nvmem_protect(bgep, B_FALSE);
1747 			err = bge_flash_access(bgep,
1748 			    NVM_FLASH_CMD_WR, addr, dp);
1749 			bge_nvmem_protect(bgep, B_TRUE);
1750 			if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1751 			    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1752 			    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1753 			    DEVICE_5714_SERIES_CHIPSETS(bgep)) {
1754 				bge_reg_clr32(bgep, NVM_ACCESS_REG,
1755 				    NVM_WRITE_ENABLE|NVM_ACCESS_ENABLE);
1756 			}
1757 
1758 			break;
1759 
1760 		default:
1761 			_NOTE(NOTREACHED)
1762 			break;
1763 		}
1764 		bge_nvmem_relinquish(bgep);
1765 	}
1766 
1767 	BGE_DEBUG(("bge_nvmem_rw32: err %d", err));
1768 	return (err);
1769 }
1770 
1771 /*
1772  * Attempt to get a MAC address from the SEEPROM or Flash, if any
1773  */
1774 static uint64_t bge_get_nvmac(bge_t *bgep);
1775 #pragma no_inline(bge_get_nvmac)
1776 
1777 static uint64_t
1778 bge_get_nvmac(bge_t *bgep)
1779 {
1780 	uint32_t mac_high;
1781 	uint32_t mac_low;
1782 	uint32_t addr;
1783 	uint32_t cmd;
1784 	uint64_t mac;
1785 
1786 	BGE_TRACE(("bge_get_nvmac($%p)",
1787 	    (void *)bgep));
1788 
1789 	switch (bgep->chipid.nvtype) {
1790 	case BGE_NVTYPE_NONE:
1791 	case BGE_NVTYPE_UNKNOWN:
1792 	default:
1793 		return (0ULL);
1794 
1795 	case BGE_NVTYPE_SEEPROM:
1796 	case BGE_NVTYPE_LEGACY_SEEPROM:
1797 		cmd = BGE_SEE_READ;
1798 		break;
1799 
1800 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1801 	case BGE_NVTYPE_BUFFERED_FLASH:
1802 		cmd = BGE_FLASH_READ;
1803 		break;
1804 	}
1805 
1806 	if (DEVICE_5906_SERIES_CHIPSETS(bgep))
1807 		addr = NVMEM_DATA_MAC_ADDRESS_5906;
1808 	else
1809 		addr = NVMEM_DATA_MAC_ADDRESS;
1810 
1811 	if (bge_nvmem_rw32(bgep, cmd, addr, &mac_high))
1812 		return (0ULL);
1813 	addr += 4;
1814 	if (bge_nvmem_rw32(bgep, cmd, addr, &mac_low))
1815 		return (0ULL);
1816 
1817 	/*
1818 	 * The Broadcom chip is natively BIG-endian, so that's how the
1819 	 * MAC address is represented in NVmem.  We may need to swap it
1820 	 * around on a little-endian host ...
1821 	 */
1822 #ifdef	_BIG_ENDIAN
1823 	mac = mac_high;
1824 	mac = mac << 32;
1825 	mac |= mac_low;
1826 #else
1827 	mac = BGE_BSWAP_32(mac_high);
1828 	mac = mac << 32;
1829 	mac |= BGE_BSWAP_32(mac_low);
1830 #endif	/* _BIG_ENDIAN */
1831 
1832 	return (mac);
1833 }
1834 
1835 #else	/* BGE_SEE_IO32 || BGE_FLASH_IO32 */
1836 
1837 /*
1838  * Dummy version for when we're not supporting NVmem access
1839  */
1840 static uint64_t bge_get_nvmac(bge_t *bgep);
1841 #pragma inline(bge_get_nvmac)
1842 
1843 static uint64_t
1844 bge_get_nvmac(bge_t *bgep)
1845 {
1846 	_NOTE(ARGUNUSED(bgep))
1847 	return (0ULL);
1848 }
1849 
1850 #endif	/* BGE_SEE_IO32 || BGE_FLASH_IO32 */
1851 
1852 /*
1853  * Determine the type of NVmem that is (or may be) attached to this chip,
1854  */
1855 static enum bge_nvmem_type bge_nvmem_id(bge_t *bgep);
1856 #pragma no_inline(bge_nvmem_id)
1857 
1858 static enum bge_nvmem_type
1859 bge_nvmem_id(bge_t *bgep)
1860 {
1861 	enum bge_nvmem_type nvtype;
1862 	uint32_t config1;
1863 
1864 	BGE_TRACE(("bge_nvmem_id($%p)",
1865 	    (void *)bgep));
1866 
1867 	switch (bgep->chipid.device) {
1868 	default:
1869 		/*
1870 		 * We shouldn't get here; it means we don't recognise
1871 		 * the chip, which means we don't know how to determine
1872 		 * what sort of NVmem (if any) it has.  So we'll say
1873 		 * NONE, to disable the NVmem access code ...
1874 		 */
1875 		nvtype = BGE_NVTYPE_NONE;
1876 		break;
1877 
1878 	case DEVICE_ID_5700:
1879 	case DEVICE_ID_5700x:
1880 	case DEVICE_ID_5701:
1881 		/*
1882 		 * These devices support *only* SEEPROMs
1883 		 */
1884 		nvtype = BGE_NVTYPE_SEEPROM;
1885 		break;
1886 
1887 	case DEVICE_ID_5702:
1888 	case DEVICE_ID_5702fe:
1889 	case DEVICE_ID_5703C:
1890 	case DEVICE_ID_5703S:
1891 	case DEVICE_ID_5704C:
1892 	case DEVICE_ID_5704S:
1893 	case DEVICE_ID_5704:
1894 	case DEVICE_ID_5705M:
1895 	case DEVICE_ID_5705C:
1896 	case DEVICE_ID_5705_2:
1897 	case DEVICE_ID_5717:
1898 	case DEVICE_ID_5718:
1899 	case DEVICE_ID_5724:
1900 	case DEVICE_ID_57780:
1901 	case DEVICE_ID_5780:
1902 	case DEVICE_ID_5782:
1903 	case DEVICE_ID_5785:
1904 	case DEVICE_ID_5787:
1905 	case DEVICE_ID_5787M:
1906 	case DEVICE_ID_5788:
1907 	case DEVICE_ID_5789:
1908 	case DEVICE_ID_5751:
1909 	case DEVICE_ID_5751M:
1910 	case DEVICE_ID_5752:
1911 	case DEVICE_ID_5752M:
1912 	case DEVICE_ID_5754:
1913 	case DEVICE_ID_5755:
1914 	case DEVICE_ID_5755M:
1915 	case DEVICE_ID_5756M:
1916 	case DEVICE_ID_5721:
1917 	case DEVICE_ID_5722:
1918 	case DEVICE_ID_5723:
1919 	case DEVICE_ID_5761:
1920 	case DEVICE_ID_5761E:
1921 	case DEVICE_ID_5764:
1922 	case DEVICE_ID_5714C:
1923 	case DEVICE_ID_5714S:
1924 	case DEVICE_ID_5715C:
1925 	case DEVICE_ID_5715S:
1926 		config1 = bge_reg_get32(bgep, NVM_CONFIG1_REG);
1927 		if (config1 & NVM_CFG1_FLASH_MODE)
1928 			if (config1 & NVM_CFG1_BUFFERED_MODE)
1929 				nvtype = BGE_NVTYPE_BUFFERED_FLASH;
1930 			else
1931 				nvtype = BGE_NVTYPE_UNBUFFERED_FLASH;
1932 		else
1933 			nvtype = BGE_NVTYPE_LEGACY_SEEPROM;
1934 		break;
1935 	case DEVICE_ID_5906:
1936 	case DEVICE_ID_5906M:
1937 		nvtype = BGE_NVTYPE_BUFFERED_FLASH;
1938 		break;
1939 	}
1940 
1941 	return (nvtype);
1942 }
1943 
1944 #undef	BGE_DBG
1945 #define	BGE_DBG		BGE_DBG_CHIP	/* debug flag for this code	*/
1946 
1947 static void
1948 bge_init_recv_rule(bge_t *bgep)
1949 {
1950 	bge_recv_rule_t *rulep = bgep->recv_rules;
1951 	uint32_t i;
1952 
1953 	/*
1954 	 * Initialize receive rule registers.
1955 	 * Note that rules may persist across each bge_m_start/stop() call.
1956 	 */
1957 	for (i = 0; i < RECV_RULES_NUM_MAX; i++, rulep++) {
1958 		bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep->mask_value);
1959 		bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep->control);
1960 	}
1961 }
1962 
1963 /*
1964  * Using the values captured by bge_chip_cfg_init(), and additional probes
1965  * as required, characterise the chip fully: determine the label by which
1966  * to refer to this chip, the correct settings for various registers, and
1967  * of course whether the device and/or subsystem are supported!
1968  */
1969 int bge_chip_id_init(bge_t *bgep);
1970 #pragma	no_inline(bge_chip_id_init)
1971 
1972 int
1973 bge_chip_id_init(bge_t *bgep)
1974 {
1975 	char buf[MAXPATHLEN];		/* any risk of stack overflow?	*/
1976 	boolean_t sys_ok;
1977 	boolean_t dev_ok;
1978 	chip_id_t *cidp;
1979 	uint32_t subid;
1980 	char *devname;
1981 	char *sysname;
1982 	int *ids;
1983 	int err;
1984 	uint_t i;
1985 
1986 	sys_ok = dev_ok = B_FALSE;
1987 	cidp = &bgep->chipid;
1988 
1989 	/*
1990 	 * Check the PCI device ID to determine the generic chip type and
1991 	 * select parameters that depend on this.
1992 	 *
1993 	 * Note: because the SPARC platforms in general don't fit the
1994 	 * SEEPROM 'behind' the chip, the PCI revision ID register reads
1995 	 * as zero - which is why we use <asic_rev> rather than <revision>
1996 	 * below ...
1997 	 *
1998 	 * Note: in general we can't distinguish between the Copper/SerDes
1999 	 * versions by ID alone, as some Copper devices (e.g. some but not
2000 	 * all 5703Cs) have the same ID as the SerDes equivalents.  So we
2001 	 * treat them the same here, and the MII code works out the media
2002 	 * type later on ...
2003 	 */
2004 	cidp->mbuf_base = bge_mbuf_pool_base;
2005 	cidp->mbuf_length = bge_mbuf_pool_len;
2006 	cidp->recv_slots = BGE_RECV_SLOTS_USED;
2007 	cidp->bge_dma_rwctrl = bge_dma_rwctrl;
2008 	cidp->pci_type = BGE_PCI_X;
2009 	cidp->statistic_type = BGE_STAT_BLK;
2010 	cidp->mbuf_lo_water_rdma = bge_mbuf_lo_water_rdma;
2011 	cidp->mbuf_lo_water_rmac = bge_mbuf_lo_water_rmac;
2012 	cidp->mbuf_hi_water = bge_mbuf_hi_water;
2013 	cidp->rx_ticks_norm = bge_rx_ticks_norm;
2014 	cidp->rx_count_norm = bge_rx_count_norm;
2015 	cidp->tx_ticks_norm = bge_tx_ticks_norm;
2016 	cidp->tx_count_norm = bge_tx_count_norm;
2017 	cidp->mask_pci_int = MHCR_MASK_PCI_INT_OUTPUT;
2018 
2019 	if (cidp->rx_rings == 0 || cidp->rx_rings > BGE_RECV_RINGS_MAX)
2020 		cidp->rx_rings = BGE_RECV_RINGS_DEFAULT;
2021 	if (cidp->tx_rings == 0 || cidp->tx_rings > BGE_SEND_RINGS_MAX)
2022 		cidp->tx_rings = BGE_SEND_RINGS_DEFAULT;
2023 
2024 	cidp->msi_enabled = B_FALSE;
2025 
2026 	switch (cidp->device) {
2027 	case DEVICE_ID_5717:
2028 	case DEVICE_ID_5718:
2029 	case DEVICE_ID_5724:
2030 		if (cidp->device == DEVICE_ID_5717)
2031 			cidp->chip_label = 5717;
2032 		else if (cidp->device == DEVICE_ID_5718)
2033 			cidp->chip_label = 5718;
2034 		else
2035 			cidp->chip_label = 5724;
2036 		cidp->msi_enabled = bge_enable_msi;
2037 #ifdef __sparc
2038 		cidp->mask_pci_int = LE_32(MHCR_MASK_PCI_INT_OUTPUT);
2039 #endif
2040 		cidp->bge_dma_rwctrl = LE_32(PDRWCR_VAR_5717);
2041 		cidp->pci_type = BGE_PCI_E;
2042 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2043 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5717;
2044 		cidp->mbuf_hi_water = MBUF_HIWAT_5717;
2045 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2046 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2047 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2048 		cidp->bge_mlcr_default = MLCR_DEFAULT_5717;
2049 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2050 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2051 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2052 		cidp->statistic_type = BGE_STAT_REG;
2053 		dev_ok = B_TRUE;
2054 		break;
2055 
2056 	case DEVICE_ID_5700:
2057 	case DEVICE_ID_5700x:
2058 		cidp->chip_label = 5700;
2059 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2060 		break;
2061 
2062 	case DEVICE_ID_5701:
2063 		cidp->chip_label = 5701;
2064 		dev_ok = B_TRUE;
2065 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2066 		break;
2067 
2068 	case DEVICE_ID_5702:
2069 	case DEVICE_ID_5702fe:
2070 		cidp->chip_label = 5702;
2071 		dev_ok = B_TRUE;
2072 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2073 		cidp->pci_type = BGE_PCI;
2074 		break;
2075 
2076 	case DEVICE_ID_5703C:
2077 	case DEVICE_ID_5703S:
2078 	case DEVICE_ID_5703:
2079 		/*
2080 		 * Revision A0 of the 5703/5793 had various errata
2081 		 * that we can't or don't work around, so it's not
2082 		 * supported, but all later versions are
2083 		 */
2084 		cidp->chip_label = cidp->subven == VENDOR_ID_SUN ? 5793 : 5703;
2085 		if (bgep->chipid.asic_rev != MHCR_CHIP_REV_5703_A0)
2086 			dev_ok = B_TRUE;
2087 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2088 		break;
2089 
2090 	case DEVICE_ID_5704C:
2091 	case DEVICE_ID_5704S:
2092 	case DEVICE_ID_5704:
2093 		cidp->chip_label = cidp->subven == VENDOR_ID_SUN ? 5794 : 5704;
2094 		cidp->mbuf_base = bge_mbuf_pool_base_5704;
2095 		cidp->mbuf_length = bge_mbuf_pool_len_5704;
2096 		dev_ok = B_TRUE;
2097 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2098 		break;
2099 
2100 	case DEVICE_ID_5705C:
2101 	case DEVICE_ID_5705M:
2102 	case DEVICE_ID_5705MA3:
2103 	case DEVICE_ID_5705F:
2104 	case DEVICE_ID_5705_2:
2105 	case DEVICE_ID_5754:
2106 		if (cidp->device == DEVICE_ID_5754) {
2107 			cidp->chip_label = 5754;
2108 			cidp->pci_type = BGE_PCI_E;
2109 		} else {
2110 			cidp->chip_label = 5705;
2111 			cidp->pci_type = BGE_PCI;
2112 			cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2113 		}
2114 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2115 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2116 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2117 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2118 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2119 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2120 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2121 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2122 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2123 		cidp->statistic_type = BGE_STAT_REG;
2124 		dev_ok = B_TRUE;
2125 		break;
2126 
2127 	case DEVICE_ID_5906:
2128 	case DEVICE_ID_5906M:
2129 		cidp->chip_label = 5906;
2130 		cidp->pci_type = BGE_PCI_E;
2131 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5906;
2132 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5906;
2133 		cidp->mbuf_hi_water = MBUF_HIWAT_5906;
2134 		cidp->mbuf_base = bge_mbuf_pool_base;
2135 		cidp->mbuf_length = bge_mbuf_pool_len;
2136 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2137 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2138 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2139 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2140 		cidp->statistic_type = BGE_STAT_REG;
2141 		dev_ok = B_TRUE;
2142 		break;
2143 
2144 	case DEVICE_ID_5753:
2145 		cidp->chip_label = 5753;
2146 		cidp->pci_type = BGE_PCI_E;
2147 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2148 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2149 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2150 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2151 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2152 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2153 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2154 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2155 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2156 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2157 		cidp->statistic_type = BGE_STAT_REG;
2158 		dev_ok = B_TRUE;
2159 		break;
2160 
2161 	case DEVICE_ID_5755:
2162 	case DEVICE_ID_5755M:
2163 		cidp->chip_label = 5755;
2164 		cidp->pci_type = BGE_PCI_E;
2165 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2166 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2167 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2168 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2169 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2170 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2171 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2172 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2173 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2174 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2175 		if (cidp->device == DEVICE_ID_5755M)
2176 			cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2177 		cidp->statistic_type = BGE_STAT_REG;
2178 		dev_ok = B_TRUE;
2179 		break;
2180 
2181 	case DEVICE_ID_5756M:
2182 		/*
2183 		 * This is nearly identical to the 5755M.
2184 		 * (Actually reports the 5755 chip ID.)
2185 		 */
2186 		cidp->chip_label = 5756;
2187 		cidp->pci_type = BGE_PCI_E;
2188 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2189 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2190 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2191 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2192 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2193 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2194 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2195 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2196 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2197 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2198 		cidp->statistic_type = BGE_STAT_REG;
2199 		dev_ok = B_TRUE;
2200 		break;
2201 
2202 	case DEVICE_ID_5787:
2203 	case DEVICE_ID_5787M:
2204 		cidp->chip_label = 5787;
2205 		cidp->pci_type = BGE_PCI_E;
2206 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2207 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2208 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2209 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2210 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2211 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2212 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2213 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2214 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2215 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2216 		cidp->statistic_type = BGE_STAT_REG;
2217 		dev_ok = B_TRUE;
2218 		break;
2219 
2220 	case DEVICE_ID_5723:
2221 	case DEVICE_ID_5761:
2222 	case DEVICE_ID_5761E:
2223 	case DEVICE_ID_57780:
2224 		cidp->msi_enabled = bge_enable_msi;
2225 		/*
2226 		 * We don't use MSI for BCM5764 and BCM5785, as the
2227 		 * status block may fail to update when the network
2228 		 * traffic is heavy.
2229 		 */
2230 		/* FALLTHRU */
2231 	case DEVICE_ID_5785:
2232 	case DEVICE_ID_5764:
2233 		if (cidp->device == DEVICE_ID_5723)
2234 			cidp->chip_label = 5723;
2235 		else if (cidp->device == DEVICE_ID_5764)
2236 			cidp->chip_label = 5764;
2237 		else if (cidp->device == DEVICE_ID_5785)
2238 			cidp->chip_label = 5785;
2239 		else if (cidp->device == DEVICE_ID_57780)
2240 			cidp->chip_label = 57780;
2241 		else
2242 			cidp->chip_label = 5761;
2243 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2244 		cidp->pci_type = BGE_PCI_E;
2245 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2246 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2247 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2248 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2249 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2250 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2251 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2252 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2253 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2254 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2255 		cidp->statistic_type = BGE_STAT_REG;
2256 		dev_ok = B_TRUE;
2257 		break;
2258 
2259 	/* PCI-X device, identical to 5714 */
2260 	case DEVICE_ID_5780:
2261 		cidp->chip_label = 5780;
2262 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2263 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2264 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2265 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2266 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2267 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2268 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2269 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2270 		cidp->statistic_type = BGE_STAT_REG;
2271 		dev_ok = B_TRUE;
2272 		break;
2273 
2274 	case DEVICE_ID_5782:
2275 		/*
2276 		 * Apart from the label, we treat this as a 5705(?)
2277 		 */
2278 		cidp->chip_label = 5782;
2279 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2280 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2281 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2282 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2283 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2284 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2285 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2286 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2287 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2288 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2289 		cidp->statistic_type = BGE_STAT_REG;
2290 		dev_ok = B_TRUE;
2291 		break;
2292 
2293 	case DEVICE_ID_5788:
2294 		/*
2295 		 * Apart from the label, we treat this as a 5705(?)
2296 		 */
2297 		cidp->chip_label = 5788;
2298 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2299 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2300 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2301 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2302 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2303 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2304 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2305 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2306 		cidp->statistic_type = BGE_STAT_REG;
2307 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2308 		dev_ok = B_TRUE;
2309 		break;
2310 
2311 	case DEVICE_ID_5714C:
2312 		if (cidp->revision >= REVISION_ID_5714_A2)
2313 			cidp->msi_enabled = bge_enable_msi;
2314 		/* FALLTHRU */
2315 	case DEVICE_ID_5714S:
2316 		cidp->chip_label = 5714;
2317 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2318 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2319 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2320 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2321 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2322 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2323 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5714;
2324 		cidp->bge_mlcr_default = bge_mlcr_default_5714;
2325 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2326 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2327 		cidp->pci_type = BGE_PCI_E;
2328 		cidp->statistic_type = BGE_STAT_REG;
2329 		dev_ok = B_TRUE;
2330 		break;
2331 
2332 	case DEVICE_ID_5715C:
2333 	case DEVICE_ID_5715S:
2334 		cidp->chip_label = 5715;
2335 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2336 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2337 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2338 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2339 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2340 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2341 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5715;
2342 		cidp->bge_mlcr_default = bge_mlcr_default_5714;
2343 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2344 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2345 		cidp->pci_type = BGE_PCI_E;
2346 		cidp->statistic_type = BGE_STAT_REG;
2347 		if (cidp->revision >= REVISION_ID_5715_A2)
2348 			cidp->msi_enabled = bge_enable_msi;
2349 		dev_ok = B_TRUE;
2350 		break;
2351 
2352 	case DEVICE_ID_5721:
2353 		cidp->chip_label = 5721;
2354 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2355 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2356 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2357 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2358 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2359 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2360 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2361 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2362 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2363 		cidp->pci_type = BGE_PCI_E;
2364 		cidp->statistic_type = BGE_STAT_REG;
2365 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2366 		dev_ok = B_TRUE;
2367 		break;
2368 
2369 	case DEVICE_ID_5722:
2370 		cidp->chip_label = 5722;
2371 		cidp->pci_type = BGE_PCI_E;
2372 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2373 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2374 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2375 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2376 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2377 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2378 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2379 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2380 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2381 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2382 		cidp->statistic_type = BGE_STAT_REG;
2383 		dev_ok = B_TRUE;
2384 		break;
2385 
2386 	case DEVICE_ID_5751:
2387 	case DEVICE_ID_5751M:
2388 		cidp->chip_label = 5751;
2389 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2390 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2391 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2392 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2393 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2394 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2395 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2396 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2397 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2398 		cidp->pci_type = BGE_PCI_E;
2399 		cidp->statistic_type = BGE_STAT_REG;
2400 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2401 		dev_ok = B_TRUE;
2402 		break;
2403 
2404 	case DEVICE_ID_5752:
2405 	case DEVICE_ID_5752M:
2406 		cidp->chip_label = 5752;
2407 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2408 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2409 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2410 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2411 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2412 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2413 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2414 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2415 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2416 		cidp->pci_type = BGE_PCI_E;
2417 		cidp->statistic_type = BGE_STAT_REG;
2418 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2419 		dev_ok = B_TRUE;
2420 		break;
2421 
2422 	case DEVICE_ID_5789:
2423 		cidp->chip_label = 5789;
2424 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2425 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2426 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2427 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2428 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2429 		cidp->tx_rings = BGE_RECV_RINGS_MAX_5705;
2430 		cidp->pci_type = BGE_PCI_E;
2431 		cidp->statistic_type = BGE_STAT_REG;
2432 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2433 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2434 		cidp->msi_enabled = B_TRUE;
2435 		dev_ok = B_TRUE;
2436 		break;
2437 
2438 	}
2439 
2440 	/*
2441 	 * Setup the default jumbo parameter.
2442 	 */
2443 	cidp->ethmax_size = ETHERMAX;
2444 	cidp->snd_buff_size = BGE_SEND_BUFF_SIZE_DEFAULT;
2445 	cidp->std_buf_size = BGE_STD_BUFF_SIZE;
2446 
2447 	/*
2448 	 * If jumbo is enabled and this kind of chipset supports jumbo feature,
2449 	 * setup below jumbo specific parameters.
2450 	 *
2451 	 * For BCM5714/5715, there is only one standard receive ring. So the
2452 	 * std buffer size should be set to BGE_JUMBO_BUFF_SIZE when jumbo
2453 	 * feature is enabled.
2454 	 */
2455 	if (!(cidp->flags & CHIP_FLAG_NO_JUMBO) &&
2456 	    (cidp->default_mtu > BGE_DEFAULT_MTU)) {
2457 		if (DEVICE_5714_SERIES_CHIPSETS(bgep)) {
2458 			cidp->mbuf_lo_water_rdma =
2459 			    RDMA_MBUF_LOWAT_5714_JUMBO;
2460 			cidp->mbuf_lo_water_rmac =
2461 			    MAC_RX_MBUF_LOWAT_5714_JUMBO;
2462 			cidp->mbuf_hi_water = MBUF_HIWAT_5714_JUMBO;
2463 			cidp->jumbo_slots = 0;
2464 			cidp->std_buf_size = BGE_JUMBO_BUFF_SIZE;
2465 		} else {
2466 			cidp->mbuf_lo_water_rdma =
2467 			    RDMA_MBUF_LOWAT_JUMBO;
2468 			cidp->mbuf_lo_water_rmac =
2469 			    MAC_RX_MBUF_LOWAT_JUMBO;
2470 			cidp->mbuf_hi_water = MBUF_HIWAT_JUMBO;
2471 			cidp->jumbo_slots = BGE_JUMBO_SLOTS_USED;
2472 		}
2473 		cidp->recv_jumbo_size = BGE_JUMBO_BUFF_SIZE;
2474 		cidp->snd_buff_size = BGE_SEND_BUFF_SIZE_JUMBO;
2475 		cidp->ethmax_size = cidp->default_mtu +
2476 		    sizeof (struct ether_header);
2477 	}
2478 
2479 	/*
2480 	 * Identify the NV memory type: SEEPROM or Flash?
2481 	 */
2482 	cidp->nvtype = bge_nvmem_id(bgep);
2483 
2484 	/*
2485 	 * Now, we want to check whether this device is part of a
2486 	 * supported subsystem (e.g., on the motherboard of a Sun
2487 	 * branded platform).
2488 	 *
2489 	 * Rule 1: If the Subsystem Vendor ID is "Sun", then it's OK ;-)
2490 	 */
2491 	if (cidp->subven == VENDOR_ID_SUN)
2492 		sys_ok = B_TRUE;
2493 
2494 	/*
2495 	 * Rule 2: If it's on the list on known subsystems, then it's OK.
2496 	 * Note: 0x14e41647 should *not* appear in the list, but the code
2497 	 * doesn't enforce that.
2498 	 */
2499 	err = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, bgep->devinfo,
2500 	    DDI_PROP_DONTPASS, knownids_propname, &ids, &i);
2501 	if (err == DDI_PROP_SUCCESS) {
2502 		/*
2503 		 * Got the list; scan for a matching subsystem vendor/device
2504 		 */
2505 		subid = (cidp->subven << 16) | cidp->subdev;
2506 		while (i--)
2507 			if (ids[i] == subid)
2508 				sys_ok = B_TRUE;
2509 		ddi_prop_free(ids);
2510 	}
2511 
2512 	/*
2513 	 * Rule 3: If it's a Taco/ENWS motherboard device, then it's OK
2514 	 *
2515 	 * Unfortunately, early SunBlade 1500s and 2500s didn't reprogram
2516 	 * the Subsystem Vendor ID, so it defaults to Broadcom.  Therefore,
2517 	 * we have to check specially for the exact device paths to the
2518 	 * motherboard devices on those platforms ;-(
2519 	 *
2520 	 * Note: we can't just use the "supported-subsystems" mechanism
2521 	 * above, because the entry would have to be 0x14e41647 -- which
2522 	 * would then accept *any* plugin card that *didn't* contain a
2523 	 * (valid) SEEPROM ;-(
2524 	 */
2525 	sysname = ddi_node_name(ddi_root_node());
2526 	devname = ddi_pathname(bgep->devinfo, buf);
2527 	ASSERT(strlen(devname) > 0);
2528 	if (strcmp(sysname, "SUNW,Sun-Blade-1500") == 0)	/* Taco */
2529 		if (strcmp(devname, "/pci@1f,700000/network@2") == 0)
2530 			sys_ok = B_TRUE;
2531 	if (strcmp(sysname, "SUNW,Sun-Blade-2500") == 0)	/* ENWS */
2532 		if (strcmp(devname, "/pci@1c,600000/network@3") == 0)
2533 			sys_ok = B_TRUE;
2534 
2535 	/*
2536 	 * Now check what we've discovered: is this truly a supported
2537 	 * chip on (the motherboard of) a supported platform?
2538 	 *
2539 	 * Possible problems here:
2540 	 * 1)	it's a completely unheard-of chip
2541 	 * 2)	it's a recognised but unsupported chip (e.g. 5701, 5703C-A0)
2542 	 * 3)	it's a chip we would support if it were on the motherboard
2543 	 *	of a Sun platform, but this one isn't ;-(
2544 	 */
2545 	if (cidp->chip_label == 0)
2546 		bge_problem(bgep,
2547 		    "Device 'pci%04x,%04x' not recognized (%d?)",
2548 		    cidp->vendor, cidp->device, cidp->device);
2549 	else if (!dev_ok)
2550 		bge_problem(bgep,
2551 		    "Device 'pci%04x,%04x' (%d) revision %d not supported",
2552 		    cidp->vendor, cidp->device, cidp->chip_label,
2553 		    cidp->revision);
2554 #if	BGE_DEBUGGING
2555 	else if (!sys_ok)
2556 		bge_problem(bgep,
2557 		    "%d-based subsystem 'pci%04x,%04x' not validated",
2558 		    cidp->chip_label, cidp->subven, cidp->subdev);
2559 #endif
2560 	else
2561 		cidp->flags |= CHIP_FLAG_SUPPORTED;
2562 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
2563 		return (EIO);
2564 	return (0);
2565 }
2566 
2567 void
2568 bge_chip_msi_trig(bge_t *bgep)
2569 {
2570 	uint32_t	regval;
2571 
2572 	regval = bgep->param_msi_cnt<<4;
2573 	bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, regval);
2574 	BGE_DEBUG(("bge_chip_msi_trig:data = %d", regval));
2575 }
2576 
2577 /*
2578  * Various registers that control the chip's internal engines (state
2579  * machines) have a <reset> and <enable> bits (fortunately, in the
2580  * same place in each such register :-).
2581  *
2582  * To reset the state machine, the <reset> bit must be written with 1;
2583  * it will then read back as 1 while the reset is in progress, but
2584  * self-clear to 0 when the reset completes.
2585  *
2586  * To enable a state machine, one must set the <enable> bit, which
2587  * will continue to read back as 0 until the state machine is running.
2588  *
2589  * To disable a state machine, the <enable> bit must be cleared, but
2590  * it will continue to read back as 1 until the state machine actually
2591  * stops.
2592  *
2593  * This routine implements polling for completion of a reset, enable
2594  * or disable operation, returning B_TRUE on success (bit reached the
2595  * required state) or B_FALSE on timeout (200*100us == 20ms).
2596  */
2597 static boolean_t bge_chip_poll_engine(bge_t *bgep, bge_regno_t regno,
2598 					uint32_t mask, uint32_t val);
2599 #pragma	no_inline(bge_chip_poll_engine)
2600 
2601 static boolean_t
2602 bge_chip_poll_engine(bge_t *bgep, bge_regno_t regno,
2603 	uint32_t mask, uint32_t val)
2604 {
2605 	uint32_t regval;
2606 	uint32_t n;
2607 
2608 	BGE_TRACE(("bge_chip_poll_engine($%p, 0x%lx, 0x%x, 0x%x)",
2609 	    (void *)bgep, regno, mask, val));
2610 
2611 	for (n = 200; n; --n) {
2612 		regval = bge_reg_get32(bgep, regno);
2613 		if ((regval & mask) == val)
2614 			return (B_TRUE);
2615 		drv_usecwait(100);
2616 	}
2617 
2618 	bge_problem(bgep, "bge_chip_poll_engine failed: regno = 0x%lx", regno);
2619 	bge_fm_ereport(bgep, DDI_FM_DEVICE_NO_RESPONSE);
2620 	return (B_FALSE);
2621 }
2622 
2623 /*
2624  * Various registers that control the chip's internal engines (state
2625  * machines) have a <reset> bit (fortunately, in the same place in
2626  * each such register :-).  To reset the state machine, this bit must
2627  * be written with 1; it will then read back as 1 while the reset is
2628  * in progress, but self-clear to 0 when the reset completes.
2629  *
2630  * This code sets the bit, then polls for it to read back as zero.
2631  * The return value is B_TRUE on success (reset bit cleared itself),
2632  * or B_FALSE if the state machine didn't recover :(
2633  *
2634  * NOTE: the Core reset is similar to other resets, except that we
2635  * can't poll for completion, since the Core reset disables memory
2636  * access!  So we just have to assume that it will all complete in
2637  * 100us.  See Broadcom document 570X-PG102-R, p102, steps 4-5.
2638  */
2639 static boolean_t bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno);
2640 #pragma	no_inline(bge_chip_reset_engine)
2641 
2642 static boolean_t
2643 bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno)
2644 {
2645 	uint32_t regval;
2646 	uint32_t val32;
2647 
2648 	regval = bge_reg_get32(bgep, regno);
2649 
2650 	BGE_TRACE(("bge_chip_reset_engine($%p, 0x%lx)",
2651 	    (void *)bgep, regno));
2652 	BGE_DEBUG(("bge_chip_reset_engine: 0x%lx before reset = 0x%08x",
2653 	    regno, regval));
2654 
2655 	regval |= STATE_MACHINE_RESET_BIT;
2656 
2657 	switch (regno) {
2658 	case MISC_CONFIG_REG:
2659 		/*
2660 		 * BCM5714/5721/5751 pcie chip special case. In order to avoid
2661 		 * resetting PCIE block and bringing PCIE link down, bit 29
2662 		 * in the register needs to be set first, and then set it again
2663 		 * while the reset bit is written.
2664 		 * See:P500 of 57xx-PG102-RDS.pdf.
2665 		 */
2666 		if (DEVICE_5705_SERIES_CHIPSETS(bgep)||
2667 		    DEVICE_5717_SERIES_CHIPSETS(bgep)||
2668 		    DEVICE_5721_SERIES_CHIPSETS(bgep)||
2669 		    DEVICE_5723_SERIES_CHIPSETS(bgep)||
2670 		    DEVICE_5714_SERIES_CHIPSETS(bgep)||
2671 		    DEVICE_5906_SERIES_CHIPSETS(bgep)) {
2672 			regval |= MISC_CONFIG_GPHY_POWERDOWN_OVERRIDE;
2673 			if (bgep->chipid.pci_type == BGE_PCI_E) {
2674 				if (bgep->chipid.asic_rev ==
2675 				    MHCR_CHIP_REV_5751_A0 ||
2676 				    bgep->chipid.asic_rev ==
2677 				    MHCR_CHIP_REV_5721_A0 ||
2678 				    bgep->chipid.asic_rev ==
2679 				    MHCR_CHIP_REV_5755_A0) {
2680 					val32 = bge_reg_get32(bgep,
2681 					    PHY_TEST_CTRL_REG);
2682 					if (val32 == (PHY_PCIE_SCRAM_MODE |
2683 					    PHY_PCIE_LTASS_MODE))
2684 						bge_reg_put32(bgep,
2685 						    PHY_TEST_CTRL_REG,
2686 						    PHY_PCIE_SCRAM_MODE);
2687 					val32 = pci_config_get32
2688 					    (bgep->cfg_handle,
2689 					    PCI_CONF_BGE_CLKCTL);
2690 					val32 |= CLKCTL_PCIE_A0_FIX;
2691 					pci_config_put32(bgep->cfg_handle,
2692 					    PCI_CONF_BGE_CLKCTL, val32);
2693 				}
2694 				bge_reg_set32(bgep, regno,
2695 				    MISC_CONFIG_GRC_RESET_DISABLE);
2696 				regval |= MISC_CONFIG_GRC_RESET_DISABLE;
2697 			}
2698 		}
2699 
2700 		/*
2701 		 * Special case - causes Core reset
2702 		 *
2703 		 * On SPARC v9 we want to ensure that we don't start
2704 		 * timing until the I/O access has actually reached
2705 		 * the chip, otherwise we might make the next access
2706 		 * too early.  And we can't just force the write out
2707 		 * by following it with a read (even to config space)
2708 		 * because that would cause the fault we're trying
2709 		 * to avoid.  Hence the need for membar_sync() here.
2710 		 */
2711 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), regval);
2712 #ifdef	__sparcv9
2713 		membar_sync();
2714 #endif	/* __sparcv9 */
2715 		/*
2716 		 * On some platforms,system need about 300us for
2717 		 * link setup.
2718 		 */
2719 		drv_usecwait(300);
2720 		if (DEVICE_5906_SERIES_CHIPSETS(bgep)) {
2721 			bge_reg_set32(bgep, VCPU_STATUS_REG, VCPU_DRV_RESET);
2722 			bge_reg_clr32(
2723 			    bgep, VCPU_EXT_CTL, VCPU_EXT_CTL_HALF);
2724 		}
2725 
2726 		if (bgep->chipid.pci_type == BGE_PCI_E) {
2727 			/* PCI-E device need more reset time */
2728 			drv_usecwait(120000);
2729 
2730 			/* Set PCIE max payload size and clear error status. */
2731 			if ((bgep->chipid.chip_label == 5721) ||
2732 			    (bgep->chipid.chip_label == 5751) ||
2733 			    (bgep->chipid.chip_label == 5752) ||
2734 			    (bgep->chipid.chip_label == 5789) ||
2735 			    (bgep->chipid.chip_label == 5906)) {
2736 				pci_config_put16(bgep->cfg_handle,
2737 				    PCI_CONF_DEV_CTRL, READ_REQ_SIZE_MAX);
2738 				pci_config_put16(bgep->cfg_handle,
2739 				    PCI_CONF_DEV_STUS, DEVICE_ERROR_STUS);
2740 			}
2741 
2742 			if ((bgep->chipid.chip_label == 5723) ||
2743 			    (bgep->chipid.chip_label == 5761)) {
2744 				pci_config_put16(bgep->cfg_handle,
2745 				    PCI_CONF_DEV_CTRL_5723, READ_REQ_SIZE_MAX);
2746 				pci_config_put16(bgep->cfg_handle,
2747 				    PCI_CONF_DEV_STUS_5723, DEVICE_ERROR_STUS);
2748 			}
2749 		}
2750 
2751 		BGE_PCICHK(bgep);
2752 		return (B_TRUE);
2753 
2754 	default:
2755 		bge_reg_put32(bgep, regno, regval);
2756 		return (bge_chip_poll_engine(bgep, regno,
2757 		    STATE_MACHINE_RESET_BIT, 0));
2758 	}
2759 }
2760 
2761 /*
2762  * Various registers that control the chip's internal engines (state
2763  * machines) have an <enable> bit (fortunately, in the same place in
2764  * each such register :-).  To stop the state machine, this bit must
2765  * be written with 0, then polled to see when the state machine has
2766  * actually stopped.
2767  *
2768  * The return value is B_TRUE on success (enable bit cleared), or
2769  * B_FALSE if the state machine didn't stop :(
2770  */
2771 static boolean_t bge_chip_disable_engine(bge_t *bgep, bge_regno_t regno,
2772 						uint32_t morebits);
2773 #pragma	no_inline(bge_chip_disable_engine)
2774 
2775 static boolean_t
2776 bge_chip_disable_engine(bge_t *bgep, bge_regno_t regno, uint32_t morebits)
2777 {
2778 	uint32_t regval;
2779 
2780 	BGE_TRACE(("bge_chip_disable_engine($%p, 0x%lx, 0x%x)",
2781 	    (void *)bgep, regno, morebits));
2782 
2783 	switch (regno) {
2784 	case FTQ_RESET_REG:
2785 		/*
2786 		 * For Schumacher's bugfix CR6490108
2787 		 */
2788 #ifdef BGE_IPMI_ASF
2789 #ifdef BGE_NETCONSOLE
2790 		if (bgep->asf_enabled)
2791 			return (B_TRUE);
2792 #endif
2793 #endif
2794 		/*
2795 		 * Not quite like the others; it doesn't
2796 		 * have an <enable> bit, but instead we
2797 		 * have to set and then clear all the bits
2798 		 */
2799 		bge_reg_put32(bgep, regno, ~(uint32_t)0);
2800 		drv_usecwait(100);
2801 		bge_reg_put32(bgep, regno, 0);
2802 		return (B_TRUE);
2803 
2804 	default:
2805 		regval = bge_reg_get32(bgep, regno);
2806 		regval &= ~STATE_MACHINE_ENABLE_BIT;
2807 		regval &= ~morebits;
2808 		bge_reg_put32(bgep, regno, regval);
2809 		return (bge_chip_poll_engine(bgep, regno,
2810 		    STATE_MACHINE_ENABLE_BIT, 0));
2811 	}
2812 }
2813 
2814 /*
2815  * Various registers that control the chip's internal engines (state
2816  * machines) have an <enable> bit (fortunately, in the same place in
2817  * each such register :-).  To start the state machine, this bit must
2818  * be written with 1, then polled to see when the state machine has
2819  * actually started.
2820  *
2821  * The return value is B_TRUE on success (enable bit set), or
2822  * B_FALSE if the state machine didn't start :(
2823  */
2824 static boolean_t bge_chip_enable_engine(bge_t *bgep, bge_regno_t regno,
2825 					uint32_t morebits);
2826 #pragma	no_inline(bge_chip_enable_engine)
2827 
2828 static boolean_t
2829 bge_chip_enable_engine(bge_t *bgep, bge_regno_t regno, uint32_t morebits)
2830 {
2831 	uint32_t regval;
2832 
2833 	BGE_TRACE(("bge_chip_enable_engine($%p, 0x%lx, 0x%x)",
2834 	    (void *)bgep, regno, morebits));
2835 
2836 	switch (regno) {
2837 	case FTQ_RESET_REG:
2838 #ifdef BGE_IPMI_ASF
2839 #ifdef BGE_NETCONSOLE
2840 		if (bgep->asf_enabled)
2841 			return (B_TRUE);
2842 #endif
2843 #endif
2844 		/*
2845 		 * Not quite like the others; it doesn't
2846 		 * have an <enable> bit, but instead we
2847 		 * have to set and then clear all the bits
2848 		 */
2849 		bge_reg_put32(bgep, regno, ~(uint32_t)0);
2850 		drv_usecwait(100);
2851 		bge_reg_put32(bgep, regno, 0);
2852 		return (B_TRUE);
2853 
2854 	default:
2855 		regval = bge_reg_get32(bgep, regno);
2856 		regval |= STATE_MACHINE_ENABLE_BIT;
2857 		regval |= morebits;
2858 		bge_reg_put32(bgep, regno, regval);
2859 		return (bge_chip_poll_engine(bgep, regno,
2860 		    STATE_MACHINE_ENABLE_BIT, STATE_MACHINE_ENABLE_BIT));
2861 	}
2862 }
2863 
2864 /*
2865  * Reprogram the Ethernet, Transmit, and Receive MAC
2866  * modes to match the param_* variables
2867  */
2868 void bge_sync_mac_modes(bge_t *bgep);
2869 #pragma	no_inline(bge_sync_mac_modes)
2870 
2871 void
2872 bge_sync_mac_modes(bge_t *bgep)
2873 {
2874 	uint32_t macmode;
2875 	uint32_t regval;
2876 
2877 	ASSERT(mutex_owned(bgep->genlock));
2878 
2879 	/*
2880 	 * Reprogram the Ethernet MAC mode ...
2881 	 */
2882 	macmode = regval = bge_reg_get32(bgep, ETHERNET_MAC_MODE_REG);
2883 	if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
2884 	    (bgep->param_loop_mode != BGE_LOOP_INTERNAL_MAC))
2885 		if (DEVICE_5714_SERIES_CHIPSETS(bgep))
2886 			macmode |= ETHERNET_MODE_LINK_POLARITY;
2887 		else
2888 			macmode &= ~ETHERNET_MODE_LINK_POLARITY;
2889 	else
2890 		macmode |= ETHERNET_MODE_LINK_POLARITY;
2891 	macmode &= ~ETHERNET_MODE_PORTMODE_MASK;
2892 	if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
2893 	    (bgep->param_loop_mode != BGE_LOOP_INTERNAL_MAC)) {
2894 		if (DEVICE_5714_SERIES_CHIPSETS(bgep))
2895 			macmode |= ETHERNET_MODE_PORTMODE_GMII;
2896 		else
2897 			macmode |= ETHERNET_MODE_PORTMODE_TBI;
2898 	} else if (bgep->param_link_speed == 10 ||
2899 	    bgep->param_link_speed == 100)
2900 		macmode |= ETHERNET_MODE_PORTMODE_MII;
2901 	else
2902 		macmode |= ETHERNET_MODE_PORTMODE_GMII;
2903 	if (bgep->param_link_duplex == LINK_DUPLEX_HALF)
2904 		macmode |= ETHERNET_MODE_HALF_DUPLEX;
2905 	else
2906 		macmode &= ~ETHERNET_MODE_HALF_DUPLEX;
2907 	if (bgep->param_loop_mode == BGE_LOOP_INTERNAL_MAC)
2908 		macmode |= ETHERNET_MODE_MAC_LOOPBACK;
2909 	else
2910 		macmode &= ~ETHERNET_MODE_MAC_LOOPBACK;
2911 	bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, macmode);
2912 	BGE_DEBUG(("bge_sync_mac_modes($%p) Ethernet MAC mode 0x%x => 0x%x",
2913 	    (void *)bgep, regval, macmode));
2914 
2915 	/*
2916 	 * ... the Transmit MAC mode ...
2917 	 */
2918 	macmode = regval = bge_reg_get32(bgep, TRANSMIT_MAC_MODE_REG);
2919 	if (bgep->param_link_tx_pause)
2920 		macmode |= TRANSMIT_MODE_FLOW_CONTROL;
2921 	else
2922 		macmode &= ~TRANSMIT_MODE_FLOW_CONTROL;
2923 	bge_reg_put32(bgep, TRANSMIT_MAC_MODE_REG, macmode);
2924 	BGE_DEBUG(("bge_sync_mac_modes($%p) Transmit MAC mode 0x%x => 0x%x",
2925 	    (void *)bgep, regval, macmode));
2926 
2927 	/*
2928 	 * ... and the Receive MAC mode
2929 	 */
2930 	macmode = regval = bge_reg_get32(bgep, RECEIVE_MAC_MODE_REG);
2931 	if (bgep->param_link_rx_pause)
2932 		macmode |= RECEIVE_MODE_FLOW_CONTROL;
2933 	else
2934 		macmode &= ~RECEIVE_MODE_FLOW_CONTROL;
2935 	bge_reg_put32(bgep, RECEIVE_MAC_MODE_REG, macmode);
2936 	BGE_DEBUG(("bge_sync_mac_modes($%p) Receive MAC mode 0x%x => 0x%x",
2937 	    (void *)bgep, regval, macmode));
2938 
2939 	/*
2940 	 * For BCM5785, we need to configure the link status in the MI Status
2941 	 * register with a write command when auto-polling is disabled.
2942 	 */
2943 	if (bgep->chipid.device == DEVICE_ID_5785)
2944 		if (bgep->param_link_speed == 10)
2945 			bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK
2946 			    | MI_STATUS_10MBPS);
2947 		else
2948 			bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK);
2949 }
2950 
2951 /*
2952  * bge_chip_sync() -- program the chip with the unicast MAC address,
2953  * the multicast hash table, the required level of promiscuity, and
2954  * the current loopback mode ...
2955  */
2956 #ifdef BGE_IPMI_ASF
2957 int bge_chip_sync(bge_t *bgep, boolean_t asf_keeplive);
2958 #else
2959 int bge_chip_sync(bge_t *bgep);
2960 #endif
2961 #pragma	no_inline(bge_chip_sync)
2962 
2963 int
2964 #ifdef BGE_IPMI_ASF
2965 bge_chip_sync(bge_t *bgep, boolean_t asf_keeplive)
2966 #else
2967 bge_chip_sync(bge_t *bgep)
2968 #endif
2969 {
2970 	void (*opfn)(bge_t *bgep, bge_regno_t reg, uint32_t bits);
2971 	boolean_t promisc;
2972 	uint64_t macaddr;
2973 	uint32_t fill = 0;
2974 	int i, j;
2975 	int retval = DDI_SUCCESS;
2976 
2977 	BGE_TRACE(("bge_chip_sync($%p)",
2978 	    (void *)bgep));
2979 
2980 	ASSERT(mutex_owned(bgep->genlock));
2981 
2982 	promisc = B_FALSE;
2983 	fill = ~(uint32_t)0;
2984 
2985 	if (bgep->promisc)
2986 		promisc = B_TRUE;
2987 	else
2988 		fill = (uint32_t)0;
2989 
2990 	/*
2991 	 * If the TX/RX MAC engines are already running, we should stop
2992 	 * them (and reset the RX engine) before changing the parameters.
2993 	 * If they're not running, this will have no effect ...
2994 	 *
2995 	 * NOTE: this is currently disabled by default because stopping
2996 	 * and restarting the Tx engine may cause an outgoing packet in
2997 	 * transit to be truncated.  Also, stopping and restarting the
2998 	 * Rx engine seems to not work correctly on the 5705.  Testing
2999 	 * has not (yet!) revealed any problems with NOT stopping and
3000 	 * restarting these engines (and Broadcom say their drivers don't
3001 	 * do this), but if it is found to cause problems, this variable
3002 	 * can be patched to re-enable the old behaviour ...
3003 	 */
3004 	if (bge_stop_start_on_sync) {
3005 #ifdef BGE_IPMI_ASF
3006 		if (!bgep->asf_enabled) {
3007 			if (!bge_chip_disable_engine(bgep,
3008 			    RECEIVE_MAC_MODE_REG, RECEIVE_MODE_KEEP_VLAN_TAG))
3009 				retval = DDI_FAILURE;
3010 		} else {
3011 			if (!bge_chip_disable_engine(bgep,
3012 			    RECEIVE_MAC_MODE_REG, 0))
3013 				retval = DDI_FAILURE;
3014 		}
3015 #else
3016 		if (!bge_chip_disable_engine(bgep, RECEIVE_MAC_MODE_REG,
3017 		    RECEIVE_MODE_KEEP_VLAN_TAG))
3018 			retval = DDI_FAILURE;
3019 #endif
3020 		if (!bge_chip_disable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
3021 			retval = DDI_FAILURE;
3022 		if (!bge_chip_reset_engine(bgep, RECEIVE_MAC_MODE_REG))
3023 			retval = DDI_FAILURE;
3024 	}
3025 
3026 	/*
3027 	 * Reprogram the hashed multicast address table ...
3028 	 */
3029 	for (i = 0; i < BGE_HASH_TABLE_SIZE/32; ++i)
3030 		bge_reg_put32(bgep, MAC_HASH_REG(i), 0);
3031 
3032 	for (i = 0; i < BGE_HASH_TABLE_SIZE/32; ++i)
3033 		bge_reg_put32(bgep, MAC_HASH_REG(i),
3034 			bgep->mcast_hash[i] | fill);
3035 
3036 #ifdef BGE_IPMI_ASF
3037 	if (!bgep->asf_enabled || !asf_keeplive) {
3038 #endif
3039 		/*
3040 		 * Transform the MAC address(es) from host to chip format, then
3041 		 * reprogram the transmit random backoff seed and the unicast
3042 		 * MAC address(es) ...
3043 		 */
3044 		for (j = 0; j < MAC_ADDRESS_REGS_MAX; j++) {
3045 			for (i = 0, macaddr = 0ull;
3046 			    i < ETHERADDRL; ++i) {
3047 				macaddr <<= 8;
3048 				macaddr |= bgep->curr_addr[j].addr[i];
3049 			}
3050 			fill += (macaddr >> 16) + (macaddr & 0xffffffff);
3051 			bge_reg_put64(bgep, MAC_ADDRESS_REG(j), macaddr);
3052 
3053 			BGE_DEBUG(("bge_chip_sync($%p) "
3054 			    "setting MAC address %012llx",
3055 			    (void *)bgep, macaddr));
3056 		}
3057 #ifdef BGE_IPMI_ASF
3058 	}
3059 #endif
3060 	/*
3061 	 * Set random seed of backoff interval
3062 	 *   - Writing zero means no backoff interval
3063 	 */
3064 	fill = ((fill >> 20) + (fill >> 10) + fill) & 0x3ff;
3065 	if (fill == 0)
3066 		fill = 1;
3067 	bge_reg_put32(bgep, MAC_TX_RANDOM_BACKOFF_REG, fill);
3068 
3069 	/*
3070 	 * Set or clear the PROMISCUOUS mode bit
3071 	 */
3072 	opfn = promisc ? bge_reg_set32 : bge_reg_clr32;
3073 	(*opfn)(bgep, RECEIVE_MAC_MODE_REG, RECEIVE_MODE_PROMISCUOUS);
3074 
3075 	/*
3076 	 * Sync the rest of the MAC modes too ...
3077 	 */
3078 	bge_sync_mac_modes(bgep);
3079 
3080 	/*
3081 	 * Restart RX/TX MAC engines if required ...
3082 	 */
3083 	if (bgep->bge_chip_state == BGE_CHIP_RUNNING) {
3084 		if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
3085 			retval = DDI_FAILURE;
3086 #ifdef BGE_IPMI_ASF
3087 		if (!bgep->asf_enabled) {
3088 			if (!bge_chip_enable_engine(bgep,
3089 			    RECEIVE_MAC_MODE_REG, RECEIVE_MODE_KEEP_VLAN_TAG))
3090 				retval = DDI_FAILURE;
3091 		} else {
3092 			if (!bge_chip_enable_engine(bgep,
3093 			    RECEIVE_MAC_MODE_REG, 0))
3094 				retval = DDI_FAILURE;
3095 		}
3096 #else
3097 		if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
3098 		    RECEIVE_MODE_KEEP_VLAN_TAG))
3099 			retval = DDI_FAILURE;
3100 #endif
3101 	}
3102 	return (retval);
3103 }
3104 
3105 /*
3106  * This array defines the sequence of state machine control registers
3107  * in which the <enable> bit must be cleared to bring the chip to a
3108  * clean stop.  Taken from Broadcom document 570X-PG102-R, p116.
3109  */
3110 static bge_regno_t shutdown_engine_regs[] = {
3111 	RECEIVE_MAC_MODE_REG,
3112 	RCV_BD_INITIATOR_MODE_REG,
3113 	RCV_LIST_PLACEMENT_MODE_REG,
3114 	RCV_LIST_SELECTOR_MODE_REG,		/* BCM5704 series only	*/
3115 	RCV_DATA_BD_INITIATOR_MODE_REG,
3116 	RCV_DATA_COMPLETION_MODE_REG,
3117 	RCV_BD_COMPLETION_MODE_REG,
3118 
3119 	SEND_BD_SELECTOR_MODE_REG,
3120 	SEND_BD_INITIATOR_MODE_REG,
3121 	SEND_DATA_INITIATOR_MODE_REG,
3122 	READ_DMA_MODE_REG,
3123 	SEND_DATA_COMPLETION_MODE_REG,
3124 	DMA_COMPLETION_MODE_REG,		/* BCM5704 series only	*/
3125 	SEND_BD_COMPLETION_MODE_REG,
3126 	TRANSMIT_MAC_MODE_REG,
3127 
3128 	HOST_COALESCE_MODE_REG,
3129 	WRITE_DMA_MODE_REG,
3130 	MBUF_CLUSTER_FREE_MODE_REG,		/* BCM5704 series only	*/
3131 	FTQ_RESET_REG,		/* special - see code	*/
3132 	BUFFER_MANAGER_MODE_REG,		/* BCM5704 series only	*/
3133 	MEMORY_ARBITER_MODE_REG,		/* BCM5704 series only	*/
3134 	BGE_REGNO_NONE		/* terminator		*/
3135 };
3136 
3137 #ifndef __sparc
3138 static bge_regno_t quiesce_regs[] = {
3139 	READ_DMA_MODE_REG,
3140 	DMA_COMPLETION_MODE_REG,
3141 	WRITE_DMA_MODE_REG,
3142 	BGE_REGNO_NONE
3143 };
3144 
3145 void bge_chip_stop_nonblocking(bge_t *bgep);
3146 #pragma no_inline(bge_chip_stop_nonblocking)
3147 
3148 /*
3149  * This function is called by bge_quiesce(). We
3150  * turn off all the DMA engines here.
3151  */
3152 void
3153 bge_chip_stop_nonblocking(bge_t *bgep)
3154 {
3155 	bge_regno_t *rbp;
3156 
3157 	/*
3158 	 * Flag that no more activity may be initiated
3159 	 */
3160 	bgep->progress &= ~PROGRESS_READY;
3161 
3162 	rbp = quiesce_regs;
3163 	while (*rbp != BGE_REGNO_NONE) {
3164 		(void) bge_chip_disable_engine(bgep, *rbp, 0);
3165 		++rbp;
3166 	}
3167 
3168 	bgep->bge_chip_state = BGE_CHIP_STOPPED;
3169 }
3170 
3171 #endif
3172 
3173 /*
3174  * bge_chip_stop() -- stop all chip processing
3175  *
3176  * If the <fault> parameter is B_TRUE, we're stopping the chip because
3177  * we've detected a problem internally; otherwise, this is a normal
3178  * (clean) stop (at user request i.e. the last STREAM has been closed).
3179  */
3180 void bge_chip_stop(bge_t *bgep, boolean_t fault);
3181 #pragma	no_inline(bge_chip_stop)
3182 
3183 void
3184 bge_chip_stop(bge_t *bgep, boolean_t fault)
3185 {
3186 	bge_regno_t regno;
3187 	bge_regno_t *rbp;
3188 	boolean_t ok;
3189 
3190 	BGE_TRACE(("bge_chip_stop($%p)",
3191 	    (void *)bgep));
3192 
3193 	ASSERT(mutex_owned(bgep->genlock));
3194 
3195 	rbp = shutdown_engine_regs;
3196 	/*
3197 	 * When driver try to shutdown the BCM5705/5788/5721/5751/
3198 	 * 5752/5714 and 5715 chipsets,the buffer manager and the mem
3199 	 * -ory arbiter should not be disabled.
3200 	 */
3201 	for (ok = B_TRUE; (regno = *rbp) != BGE_REGNO_NONE; ++rbp) {
3202 			if (DEVICE_5704_SERIES_CHIPSETS(bgep))
3203 				ok &= bge_chip_disable_engine(bgep, regno, 0);
3204 			else if ((regno != RCV_LIST_SELECTOR_MODE_REG) &&
3205 			    (regno != DMA_COMPLETION_MODE_REG) &&
3206 			    (regno != MBUF_CLUSTER_FREE_MODE_REG)&&
3207 			    (regno != BUFFER_MANAGER_MODE_REG) &&
3208 			    (regno != MEMORY_ARBITER_MODE_REG))
3209 				ok &= bge_chip_disable_engine(bgep,
3210 				    regno, 0);
3211 	}
3212 
3213 	if (!ok && !fault)
3214 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
3215 
3216 	/*
3217 	 * Finally, disable (all) MAC events & clear the MAC status
3218 	 */
3219 	bge_reg_put32(bgep, ETHERNET_MAC_EVENT_ENABLE_REG, 0);
3220 	bge_reg_put32(bgep, ETHERNET_MAC_STATUS_REG, ~0);
3221 
3222 	/*
3223 	 * if we're stopping the chip because of a detected fault then do
3224 	 * appropriate actions
3225 	 */
3226 	if (fault) {
3227 		if (bgep->bge_chip_state != BGE_CHIP_FAULT) {
3228 			bgep->bge_chip_state = BGE_CHIP_FAULT;
3229 			if (!bgep->manual_reset)
3230 				ddi_fm_service_impact(bgep->devinfo,
3231 				    DDI_SERVICE_LOST);
3232 			if (bgep->bge_dma_error) {
3233 				/*
3234 				 * need to free buffers in case the fault was
3235 				 * due to a memory error in a buffer - got to
3236 				 * do a fair bit of tidying first
3237 				 */
3238 				if (bgep->progress & PROGRESS_KSTATS) {
3239 					bge_fini_kstats(bgep);
3240 					bgep->progress &= ~PROGRESS_KSTATS;
3241 				}
3242 				if (bgep->progress & PROGRESS_INTR) {
3243 					bge_intr_disable(bgep);
3244 					rw_enter(bgep->errlock, RW_WRITER);
3245 					bge_fini_rings(bgep);
3246 					rw_exit(bgep->errlock);
3247 					bgep->progress &= ~PROGRESS_INTR;
3248 				}
3249 				if (bgep->progress & PROGRESS_BUFS) {
3250 					bge_free_bufs(bgep);
3251 					bgep->progress &= ~PROGRESS_BUFS;
3252 				}
3253 				bgep->bge_dma_error = B_FALSE;
3254 			}
3255 		}
3256 	} else
3257 		bgep->bge_chip_state = BGE_CHIP_STOPPED;
3258 }
3259 
3260 /*
3261  * Poll for completion of chip's ROM firmware; also, at least on the
3262  * first time through, find and return the hardware MAC address, if any.
3263  */
3264 static uint64_t bge_poll_firmware(bge_t *bgep);
3265 #pragma	no_inline(bge_poll_firmware)
3266 
3267 static uint64_t
3268 bge_poll_firmware(bge_t *bgep)
3269 {
3270 	uint64_t magic;
3271 	uint64_t mac;
3272 	uint32_t gen, val;
3273 	uint32_t i;
3274 
3275 	/*
3276 	 * Step 19: poll for firmware completion (GENCOMM port set
3277 	 * to the ones complement of T3_MAGIC_NUMBER).
3278 	 *
3279 	 * While we're at it, we also read the MAC address register;
3280 	 * at some stage the firmware will load this with the
3281 	 * factory-set value.
3282 	 *
3283 	 * When both the magic number and the MAC address are set,
3284 	 * we're done; but we impose a time limit of one second
3285 	 * (1000*1000us) in case the firmware fails in some fashion
3286 	 * or the SEEPROM that provides that MAC address isn't fitted.
3287 	 *
3288 	 * After the first time through (chip state != INITIAL), we
3289 	 * don't need the MAC address to be set (we've already got it
3290 	 * or not, from the first time), so we don't wait for it, but
3291 	 * we still have to wait for the T3_MAGIC_NUMBER.
3292 	 *
3293 	 * Note: the magic number is only a 32-bit quantity, but the NIC
3294 	 * memory is 64-bit (and big-endian) internally.  Addressing the
3295 	 * GENCOMM word as "the upper half of a 64-bit quantity" makes
3296 	 * it work correctly on both big- and little-endian hosts.
3297 	 */
3298 	if (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
3299 	    MHCR_CHIP_ASIC_REV_5906) {
3300 		for (i = 0; i < 1000; ++i) {
3301 			drv_usecwait(1000);
3302 			val = bge_reg_get32(bgep, VCPU_STATUS_REG);
3303 			if (val & VCPU_INIT_DONE)
3304 				break;
3305 		}
3306 		BGE_DEBUG(("bge_poll_firmware($%p): return after %d loops",
3307 		    (void *)bgep, i));
3308 		mac = bge_reg_get64(bgep, MAC_ADDRESS_REG(0));
3309 	} else {
3310 		for (i = 0; i < 1000; ++i) {
3311 			drv_usecwait(1000);
3312 			gen = bge_nic_get64(bgep, NIC_MEM_GENCOMM) >> 32;
3313 #ifdef BGE_IPMI_ASF
3314 			if (!bgep->asf_enabled) {
3315 #endif
3316 				if (gen != ~T3_MAGIC_NUMBER)
3317 					continue;
3318 #ifdef BGE_IPMI_ASF
3319 			}
3320 #endif
3321 			mac = bge_reg_get64(bgep, MAC_ADDRESS_REG(0));
3322 			if (mac != 0ULL)
3323 				break;
3324 			if (bgep->bge_chip_state != BGE_CHIP_INITIAL)
3325 				break;
3326 		}
3327 	}
3328 
3329 	magic = bge_nic_get64(bgep, NIC_MEM_GENCOMM);
3330 	BGE_DEBUG(("bge_poll_firmware($%p): PXE magic 0x%x after %d loops",
3331 	    (void *)bgep, gen, i));
3332 	BGE_DEBUG(("bge_poll_firmware: MAC %016llx, GENCOMM %016llx",
3333 	    mac, magic));
3334 
3335 	return (mac);
3336 }
3337 
3338 /*
3339  * Maximum times of trying to get the NVRAM access lock
3340  * by calling bge_nvmem_acquire()
3341  */
3342 #define	MAX_TRY_NVMEM_ACQUIRE	10000
3343 
3344 #ifdef BGE_IPMI_ASF
3345 int bge_chip_reset(bge_t *bgep, boolean_t enable_dma, uint_t asf_mode);
3346 #else
3347 int bge_chip_reset(bge_t *bgep, boolean_t enable_dma);
3348 #endif
3349 #pragma	no_inline(bge_chip_reset)
3350 
3351 int
3352 #ifdef BGE_IPMI_ASF
3353 bge_chip_reset(bge_t *bgep, boolean_t enable_dma, uint_t asf_mode)
3354 #else
3355 bge_chip_reset(bge_t *bgep, boolean_t enable_dma)
3356 #endif
3357 {
3358 	chip_id_t chipid;
3359 	uint64_t mac;
3360 	uint64_t magic;
3361 	uint32_t modeflags;
3362 	uint32_t mhcr;
3363 	uint32_t sx0;
3364 	uint32_t i, tries;
3365 #ifdef BGE_IPMI_ASF
3366 	uint32_t mailbox;
3367 #endif
3368 	int retval = DDI_SUCCESS;
3369 
3370 	BGE_TRACE(("bge_chip_reset($%p, %d)",
3371 		(void *)bgep, enable_dma));
3372 
3373 	ASSERT(mutex_owned(bgep->genlock));
3374 
3375 	BGE_DEBUG(("bge_chip_reset($%p, %d): current state is %d",
3376 		(void *)bgep, enable_dma, bgep->bge_chip_state));
3377 
3378 	/*
3379 	 * Do we need to stop the chip cleanly before resetting?
3380 	 */
3381 	switch (bgep->bge_chip_state) {
3382 	default:
3383 		_NOTE(NOTREACHED)
3384 		return (DDI_FAILURE);
3385 
3386 	case BGE_CHIP_INITIAL:
3387 	case BGE_CHIP_STOPPED:
3388 	case BGE_CHIP_RESET:
3389 		break;
3390 
3391 	case BGE_CHIP_RUNNING:
3392 	case BGE_CHIP_ERROR:
3393 	case BGE_CHIP_FAULT:
3394 		bge_chip_stop(bgep, B_FALSE);
3395 		break;
3396 	}
3397 
3398 #ifdef BGE_IPMI_ASF
3399 	if (bgep->asf_enabled) {
3400 #ifdef __sparc
3401 		mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3402 			MHCR_ENABLE_TAGGED_STATUS_MODE |
3403 			MHCR_MASK_INTERRUPT_MODE |
3404 			MHCR_MASK_PCI_INT_OUTPUT |
3405 			MHCR_CLEAR_INTERRUPT_INTA |
3406 			MHCR_ENABLE_ENDIAN_WORD_SWAP |
3407 			MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3408 		if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3409 			pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR,
3410 					0);
3411 		pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3412 		bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3413 			bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3414 			MEMORY_ARBITER_ENABLE);
3415 #endif
3416 		if (asf_mode == ASF_MODE_INIT) {
3417 			bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
3418 		} else if (asf_mode == ASF_MODE_SHUTDOWN) {
3419 			bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
3420 		}
3421 	}
3422 #endif
3423 	/*
3424 	 * Adapted from Broadcom document 570X-PG102-R, pp 102-116.
3425 	 * Updated to reflect Broadcom document 570X-PG104-R, pp 146-159.
3426 	 *
3427 	 * Before reset Core clock,it is
3428 	 * also required to initialize the Memory Arbiter as specified in step9
3429 	 * and Misc Host Control Register as specified in step-13
3430 	 * Step 4-5: reset Core clock & wait for completion
3431 	 * Steps 6-8: are done by bge_chip_cfg_init()
3432 	 * put the T3_MAGIC_NUMBER into the GENCOMM port before reset
3433 	 */
3434 	if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3435 		retval = DDI_FAILURE;
3436 
3437 	mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
3438 	    MHCR_ENABLE_TAGGED_STATUS_MODE |
3439 	    MHCR_MASK_INTERRUPT_MODE |
3440 	    MHCR_MASK_PCI_INT_OUTPUT |
3441 	    MHCR_CLEAR_INTERRUPT_INTA;
3442 #ifdef  _BIG_ENDIAN
3443 	mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
3444 #endif  /* _BIG_ENDIAN */
3445 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
3446 		pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, 0);
3447 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3448 #ifdef BGE_IPMI_ASF
3449 	if (bgep->asf_enabled)
3450 		bgep->asf_wordswapped = B_FALSE;
3451 #endif
3452 	/*
3453 	 * NVRAM Corruption Workaround
3454 	 */
3455 	for (tries = 0; tries < MAX_TRY_NVMEM_ACQUIRE; tries++)
3456 		if (bge_nvmem_acquire(bgep) != EAGAIN)
3457 			break;
3458 	if (tries >= MAX_TRY_NVMEM_ACQUIRE)
3459 		BGE_DEBUG(("%s: fail to acquire nvram lock",
3460 			bgep->ifname));
3461 
3462 #ifdef BGE_IPMI_ASF
3463 	if (!bgep->asf_enabled) {
3464 #endif
3465 		magic = (uint64_t)T3_MAGIC_NUMBER << 32;
3466 		bge_nic_put64(bgep, NIC_MEM_GENCOMM, magic);
3467 #ifdef BGE_IPMI_ASF
3468 	}
3469 #endif
3470 
3471 	if (!bge_chip_reset_engine(bgep, MISC_CONFIG_REG))
3472 		retval = DDI_FAILURE;
3473 	bge_chip_cfg_init(bgep, &chipid, enable_dma);
3474 
3475 	/*
3476 	 * Step 8a: This may belong elsewhere, but BCM5721 needs
3477 	 * a bit set to avoid a fifo overflow/underflow bug.
3478 	 */
3479 	if ((bgep->chipid.chip_label == 5721) ||
3480 		(bgep->chipid.chip_label == 5751) ||
3481 		(bgep->chipid.chip_label == 5752) ||
3482 		(bgep->chipid.chip_label == 5755) ||
3483 		(bgep->chipid.chip_label == 5756) ||
3484 		(bgep->chipid.chip_label == 5789) ||
3485 		(bgep->chipid.chip_label == 5906))
3486 		bge_reg_set32(bgep, TLP_CONTROL_REG, TLP_DATA_FIFO_PROTECT);
3487 
3488 
3489 	/*
3490 	 * Step 9: enable MAC memory arbiter,bit30 and bit31 of 5714/5715 should
3491 	 * not be changed.
3492 	 */
3493 	if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3494 		retval = DDI_FAILURE;
3495 
3496 	/*
3497 	 * Steps 10-11: configure PIO endianness options and
3498 	 * enable indirect register access -- already done
3499 	 * Steps 12-13: enable writing to the PCI state & clock
3500 	 * control registers -- not required; we aren't going to
3501 	 * use those features.
3502 	 * Steps 14-15: Configure DMA endianness options.  See
3503 	 * the comments on the setting of the MHCR above.
3504 	 */
3505 #ifdef	_BIG_ENDIAN
3506 	modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME |
3507 		    MODE_WORD_SWAP_NONFRAME | MODE_BYTE_SWAP_NONFRAME;
3508 #else
3509 	modeflags = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME;
3510 #endif	/* _BIG_ENDIAN */
3511 #ifdef BGE_IPMI_ASF
3512 	if (bgep->asf_enabled)
3513 		modeflags |= MODE_HOST_STACK_UP;
3514 #endif
3515 	bge_reg_put32(bgep, MODE_CONTROL_REG, modeflags);
3516 
3517 #ifdef BGE_IPMI_ASF
3518 	if (bgep->asf_enabled) {
3519 #ifdef __sparc
3520 		bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3521 			MEMORY_ARBITER_ENABLE |
3522 			bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG));
3523 #endif
3524 
3525 #ifdef  BGE_NETCONSOLE
3526 		if (!bgep->asf_newhandshake) {
3527 			if ((asf_mode == ASF_MODE_INIT) ||
3528 			(asf_mode == ASF_MODE_POST_INIT)) {
3529 				bge_asf_post_reset_old_mode(bgep,
3530 					BGE_INIT_RESET);
3531 			} else {
3532 				bge_asf_post_reset_old_mode(bgep,
3533 					BGE_SHUTDOWN_RESET);
3534 			}
3535 		}
3536 #endif
3537 
3538 		/* Wait for NVRAM init */
3539 		i = 0;
3540 		drv_usecwait(5000);
3541 		mailbox = bge_nic_get32(bgep, BGE_FIRMWARE_MAILBOX);
3542 
3543 		while ((mailbox != (uint32_t)
3544 			~BGE_MAGIC_NUM_FIRMWARE_INIT_DONE) &&
3545 			(i < 10000)) {
3546 			drv_usecwait(100);
3547 			mailbox = bge_nic_get32(bgep,
3548 				BGE_FIRMWARE_MAILBOX);
3549 			i++;
3550 		}
3551 
3552 #ifndef BGE_NETCONSOLE
3553 		if (!bgep->asf_newhandshake) {
3554 			if ((asf_mode == ASF_MODE_INIT) ||
3555 				(asf_mode == ASF_MODE_POST_INIT)) {
3556 
3557 				bge_asf_post_reset_old_mode(bgep,
3558 					BGE_INIT_RESET);
3559 			} else {
3560 				bge_asf_post_reset_old_mode(bgep,
3561 					BGE_SHUTDOWN_RESET);
3562 			}
3563 		}
3564 #endif
3565 	}
3566 #endif
3567 	/*
3568 	 * Steps 16-17: poll for firmware completion
3569 	 */
3570 	mac = bge_poll_firmware(bgep);
3571 
3572 	/*
3573 	 * Step 18: enable external memory -- doesn't apply.
3574 	 *
3575 	 * However we take the opportunity to set the MLCR anyway, as
3576 	 * this register also controls the SEEPROM auto-access method
3577 	 * which we may want to use later ...
3578 	 *
3579 	 * The proper value here depends on the way the chip is wired
3580 	 * into the circuit board, as this register *also* controls which
3581 	 * of the "Miscellaneous I/O" pins are driven as outputs and the
3582 	 * values driven onto those pins!
3583 	 *
3584 	 * See also step 74 in the PRM ...
3585 	 */
3586 	bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG,
3587 	    bgep->chipid.bge_mlcr_default);
3588 	bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT);
3589 
3590 	/*
3591 	 * Step 20: clear the Ethernet MAC mode register
3592 	 */
3593 	bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, 0);
3594 
3595 	/*
3596 	 * Step 21: restore cache-line-size, latency timer, and
3597 	 * subsystem ID registers to their original values (not
3598 	 * those read into the local structure <chipid>, 'cos
3599 	 * that was after they were cleared by the RESET).
3600 	 *
3601 	 * Note: the Subsystem Vendor/Device ID registers are not
3602 	 * directly writable in config space, so we use the shadow
3603 	 * copy in "Page Zero" of register space to restore them
3604 	 * both in one go ...
3605 	 */
3606 	pci_config_put8(bgep->cfg_handle, PCI_CONF_CACHE_LINESZ,
3607 		bgep->chipid.clsize);
3608 	pci_config_put8(bgep->cfg_handle, PCI_CONF_LATENCY_TIMER,
3609 		bgep->chipid.latency);
3610 	bge_reg_put32(bgep, PCI_CONF_SUBVENID,
3611 		(bgep->chipid.subdev << 16) | bgep->chipid.subven);
3612 
3613 	/*
3614 	 * The SEND INDEX registers should be reset to zero by the
3615 	 * global chip reset; if they're not, there'll be trouble
3616 	 * later on.
3617 	 */
3618 	sx0 = bge_reg_get32(bgep, NIC_DIAG_SEND_INDEX_REG(0));
3619 	if (sx0 != 0) {
3620 		BGE_REPORT((bgep, "SEND INDEX - device didn't RESET"));
3621 		bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
3622 		retval = DDI_FAILURE;
3623 	}
3624 
3625 	/* Enable MSI code */
3626 	if (bgep->intr_type == DDI_INTR_TYPE_MSI)
3627 		bge_reg_set32(bgep, MSI_MODE_REG,
3628 		    MSI_PRI_HIGHEST|MSI_MSI_ENABLE|MSI_ERROR_ATTENTION);
3629 
3630 	/*
3631 	 * On the first time through, save the factory-set MAC address
3632 	 * (if any).  If bge_poll_firmware() above didn't return one
3633 	 * (from a chip register) consider looking in the attached NV
3634 	 * memory device, if any.  Once we have it, we save it in both
3635 	 * register-image (64-bit) and byte-array forms.  All-zero and
3636 	 * all-one addresses are not valid, and we refuse to stash those.
3637 	 */
3638 	if (bgep->bge_chip_state == BGE_CHIP_INITIAL) {
3639 		if (mac == 0ULL)
3640 			mac = bge_get_nvmac(bgep);
3641 		if (mac != 0ULL && mac != ~0ULL) {
3642 			bgep->chipid.hw_mac_addr = mac;
3643 			for (i = ETHERADDRL; i-- != 0; ) {
3644 				bgep->chipid.vendor_addr.addr[i] = (uchar_t)mac;
3645 				mac >>= 8;
3646 			}
3647 			bgep->chipid.vendor_addr.set = B_TRUE;
3648 		}
3649 	}
3650 
3651 #ifdef BGE_IPMI_ASF
3652 	if (bgep->asf_enabled && bgep->asf_newhandshake) {
3653 		if (asf_mode != ASF_MODE_NONE) {
3654 			if ((asf_mode == ASF_MODE_INIT) ||
3655 				(asf_mode == ASF_MODE_POST_INIT)) {
3656 
3657 				bge_asf_post_reset_new_mode(bgep,
3658 					BGE_INIT_RESET);
3659 			} else {
3660 				bge_asf_post_reset_new_mode(bgep,
3661 					BGE_SHUTDOWN_RESET);
3662 			}
3663 		}
3664 	}
3665 #endif
3666 
3667 	/*
3668 	 * Record the new state
3669 	 */
3670 	bgep->chip_resets += 1;
3671 	bgep->bge_chip_state = BGE_CHIP_RESET;
3672 	return (retval);
3673 }
3674 
3675 /*
3676  * bge_chip_start() -- start the chip transmitting and/or receiving,
3677  * including enabling interrupts
3678  */
3679 int bge_chip_start(bge_t *bgep, boolean_t reset_phys);
3680 #pragma	no_inline(bge_chip_start)
3681 
3682 void
3683 bge_chip_coalesce_update(bge_t *bgep)
3684 {
3685 	bge_reg_put32(bgep, SEND_COALESCE_MAX_BD_REG,
3686 	    bgep->chipid.tx_count_norm);
3687 	bge_reg_put32(bgep, SEND_COALESCE_TICKS_REG,
3688 	    bgep->chipid.tx_ticks_norm);
3689 	bge_reg_put32(bgep, RCV_COALESCE_MAX_BD_REG,
3690 	    bgep->chipid.rx_count_norm);
3691 	bge_reg_put32(bgep, RCV_COALESCE_TICKS_REG,
3692 	    bgep->chipid.rx_ticks_norm);
3693 }
3694 
3695 int
3696 bge_chip_start(bge_t *bgep, boolean_t reset_phys)
3697 {
3698 	uint32_t coalmode;
3699 	uint32_t ledctl;
3700 	uint32_t mtu;
3701 	uint32_t maxring;
3702 	uint32_t stats_mask;
3703 	uint32_t dma_wrprio;
3704 	uint64_t ring;
3705 	uint32_t regval;
3706 	int retval = DDI_SUCCESS;
3707 
3708 	BGE_TRACE(("bge_chip_start($%p)",
3709 	    (void *)bgep));
3710 
3711 	ASSERT(mutex_owned(bgep->genlock));
3712 	ASSERT(bgep->bge_chip_state == BGE_CHIP_RESET);
3713 
3714 	/*
3715 	 * Taken from Broadcom document 570X-PG102-R, pp 102-116.
3716 	 * The document specifies 95 separate steps to fully
3717 	 * initialise the chip!!!!
3718 	 *
3719 	 * The reset code above has already got us as far as step
3720 	 * 21, so we continue with ...
3721 	 *
3722 	 * Step 22: clear the MAC statistics block
3723 	 * (0x0300-0x0aff in NIC-local memory)
3724 	 */
3725 	if (bgep->chipid.statistic_type == BGE_STAT_BLK)
3726 		bge_nic_zero(bgep, NIC_MEM_STATISTICS,
3727 		    NIC_MEM_STATISTICS_SIZE);
3728 
3729 	/*
3730 	 * Step 23: clear the status block (in host memory)
3731 	 */
3732 	DMA_ZERO(bgep->status_block);
3733 
3734 	/*
3735 	 * Step 24: set DMA read/write control register
3736 	 */
3737 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_PDRWCR,
3738 	    bgep->chipid.bge_dma_rwctrl);
3739 
3740 	/*
3741 	 * Step 25: Configure DMA endianness -- already done (16/17)
3742 	 * Step 26: Configure Host-Based Send Rings
3743 	 * Step 27: Indicate Host Stack Up
3744 	 */
3745 	bge_reg_set32(bgep, MODE_CONTROL_REG,
3746 	    MODE_HOST_SEND_BDS |
3747 	    MODE_HOST_STACK_UP);
3748 
3749 	/*
3750 	 * Step 28: Configure checksum options:
3751 	 *	Solaris supports the hardware default checksum options.
3752 	 *
3753 	 *	Workaround for Incorrect pseudo-header checksum calculation.
3754 	 */
3755 	if (bgep->chipid.flags & CHIP_FLAG_PARTIAL_CSUM)
3756 		bge_reg_set32(bgep, MODE_CONTROL_REG,
3757 		    MODE_SEND_NO_PSEUDO_HDR_CSUM);
3758 
3759 	/*
3760 	 * Step 29: configure Timer Prescaler.  The value is always the
3761 	 * same: the Core Clock frequency in MHz (66), minus 1, shifted
3762 	 * into bits 7-1.  Don't set bit 0, 'cos that's the RESET bit
3763 	 * for the whole chip!
3764 	 */
3765 	regval = bge_reg_get32(bgep, MISC_CONFIG_REG);
3766 	regval = (regval & 0xffffff00) | MISC_CONFIG_DEFAULT;
3767 	bge_reg_put32(bgep, MISC_CONFIG_REG, regval);
3768 
3769 	if (DEVICE_5906_SERIES_CHIPSETS(bgep)) {
3770 		drv_usecwait(40);
3771 		/* put PHY into ready state */
3772 		bge_reg_clr32(bgep, MISC_CONFIG_REG, MISC_CONFIG_EPHY_IDDQ);
3773 		(void) bge_reg_get32(bgep, MISC_CONFIG_REG); /* flush */
3774 		drv_usecwait(40);
3775 	}
3776 
3777 	/*
3778 	 * Steps 30-31: Configure MAC local memory pool & DMA pool registers
3779 	 *
3780 	 * If the mbuf_length is specified as 0, we just leave these at
3781 	 * their hardware defaults, rather than explicitly setting them.
3782 	 * As the Broadcom HRM,driver better not change the parameters
3783 	 * when the chipsets is 5705/5788/5721/5751/5714 and 5715.
3784 	 */
3785 	if ((bgep->chipid.mbuf_length != 0) &&
3786 	    (DEVICE_5704_SERIES_CHIPSETS(bgep))) {
3787 			bge_reg_put32(bgep, MBUF_POOL_BASE_REG,
3788 			    bgep->chipid.mbuf_base);
3789 			bge_reg_put32(bgep, MBUF_POOL_LENGTH_REG,
3790 			    bgep->chipid.mbuf_length);
3791 			bge_reg_put32(bgep, DMAD_POOL_BASE_REG,
3792 			    DMAD_POOL_BASE_DEFAULT);
3793 			bge_reg_put32(bgep, DMAD_POOL_LENGTH_REG,
3794 			    DMAD_POOL_LENGTH_DEFAULT);
3795 	}
3796 
3797 	/*
3798 	 * Step 32: configure MAC memory pool watermarks
3799 	 */
3800 	bge_reg_put32(bgep, RDMA_MBUF_LOWAT_REG,
3801 	    bgep->chipid.mbuf_lo_water_rdma);
3802 	bge_reg_put32(bgep, MAC_RX_MBUF_LOWAT_REG,
3803 	    bgep->chipid.mbuf_lo_water_rmac);
3804 	bge_reg_put32(bgep, MBUF_HIWAT_REG,
3805 	    bgep->chipid.mbuf_hi_water);
3806 
3807 	/*
3808 	 * Step 33: configure DMA resource watermarks
3809 	 */
3810 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3811 		bge_reg_put32(bgep, DMAD_POOL_LOWAT_REG,
3812 		    bge_dmad_lo_water);
3813 		bge_reg_put32(bgep, DMAD_POOL_HIWAT_REG,
3814 		    bge_dmad_hi_water);
3815 	}
3816 	bge_reg_put32(bgep, LOWAT_MAX_RECV_FRAMES_REG, bge_lowat_recv_frames);
3817 
3818 	/*
3819 	 * Steps 34-36: enable buffer manager & internal h/w queues
3820 	 */
3821 	if (!bge_chip_enable_engine(bgep, BUFFER_MANAGER_MODE_REG,
3822 	    STATE_MACHINE_ATTN_ENABLE_BIT))
3823 		retval = DDI_FAILURE;
3824 	if (!bge_chip_enable_engine(bgep, FTQ_RESET_REG, 0))
3825 		retval = DDI_FAILURE;
3826 
3827 	/*
3828 	 * Steps 37-39: initialise Receive Buffer (Producer) RCBs
3829 	 */
3830 	if (DEVICE_5717_SERIES_CHIPSETS(bgep)) {
3831 		buff_ring_t *brp = &bgep->buff[BGE_STD_BUFF_RING];
3832 		bge_reg_put64(bgep, STD_RCV_BD_RING_RCB_REG,
3833 		    brp->desc.cookie.dmac_laddress);
3834 		bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 8,
3835 		    (brp->desc.nslots) << 16 | brp->buf[0].size << 2);
3836 		bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 0xc,
3837 		    NIC_MEM_SHADOW_BUFF_STD_5717);
3838 	} else
3839 		bge_reg_putrcb(bgep, STD_RCV_BD_RING_RCB_REG,
3840 		    &bgep->buff[BGE_STD_BUFF_RING].hw_rcb);
3841 
3842 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3843 		bge_reg_putrcb(bgep, JUMBO_RCV_BD_RING_RCB_REG,
3844 		    &bgep->buff[BGE_JUMBO_BUFF_RING].hw_rcb);
3845 		bge_reg_putrcb(bgep, MINI_RCV_BD_RING_RCB_REG,
3846 		    &bgep->buff[BGE_MINI_BUFF_RING].hw_rcb);
3847 	}
3848 
3849 	/*
3850 	 * Step 40: set Receive Buffer Descriptor Ring replenish thresholds
3851 	 */
3852 	bge_reg_put32(bgep, STD_RCV_BD_REPLENISH_REG, bge_replenish_std);
3853 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3854 		bge_reg_put32(bgep, JUMBO_RCV_BD_REPLENISH_REG,
3855 		    bge_replenish_jumbo);
3856 		bge_reg_put32(bgep, MINI_RCV_BD_REPLENISH_REG,
3857 		    bge_replenish_mini);
3858 	}
3859 
3860 	/*
3861 	 * Steps 41-43: clear Send Ring Producer Indices and initialise
3862 	 * Send Producer Rings (0x0100-0x01ff in NIC-local memory)
3863 	 */
3864 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
3865 		maxring = BGE_SEND_RINGS_MAX;
3866 	else
3867 		maxring = BGE_SEND_RINGS_MAX_5705;
3868 	for (ring = 0; ring < maxring; ++ring) {
3869 		bge_mbx_put(bgep, SEND_RING_HOST_INDEX_REG(ring), 0);
3870 		bge_mbx_put(bgep, SEND_RING_NIC_INDEX_REG(ring), 0);
3871 		bge_nic_putrcb(bgep, NIC_MEM_SEND_RING(ring),
3872 		    &bgep->send[ring].hw_rcb);
3873 	}
3874 
3875 	/*
3876 	 * Steps 44-45: initialise Receive Return Rings
3877 	 * (0x0200-0x02ff in NIC-local memory)
3878 	 */
3879 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
3880 		maxring = BGE_RECV_RINGS_MAX;
3881 	else
3882 		maxring = BGE_RECV_RINGS_MAX_5705;
3883 	for (ring = 0; ring < maxring; ++ring)
3884 		bge_nic_putrcb(bgep, NIC_MEM_RECV_RING(ring),
3885 		    &bgep->recv[ring].hw_rcb);
3886 
3887 	/*
3888 	 * Step 46: initialise Receive Buffer (Producer) Ring indexes
3889 	 */
3890 	bge_mbx_put(bgep, RECV_STD_PROD_INDEX_REG, 0);
3891 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3892 		bge_mbx_put(bgep, RECV_JUMBO_PROD_INDEX_REG, 0);
3893 		bge_mbx_put(bgep, RECV_MINI_PROD_INDEX_REG, 0);
3894 	}
3895 	/*
3896 	 * Step 47: configure the MAC unicast address
3897 	 * Step 48: configure the random backoff seed
3898 	 * Step 96: set up multicast filters
3899 	 */
3900 #ifdef BGE_IPMI_ASF
3901 	if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE)
3902 #else
3903 	if (bge_chip_sync(bgep) == DDI_FAILURE)
3904 #endif
3905 		retval = DDI_FAILURE;
3906 
3907 	/*
3908 	 * Step 49: configure the MTU
3909 	 */
3910 	mtu = bgep->chipid.ethmax_size+ETHERFCSL+VLAN_TAGSZ;
3911 	bge_reg_put32(bgep, MAC_RX_MTU_SIZE_REG, mtu);
3912 
3913 	/*
3914 	 * Step 50: configure the IPG et al
3915 	 */
3916 	bge_reg_put32(bgep, MAC_TX_LENGTHS_REG, MAC_TX_LENGTHS_DEFAULT);
3917 
3918 	/*
3919 	 * Step 51: configure the default Rx Return Ring
3920 	 */
3921 	bge_reg_put32(bgep, RCV_RULES_CONFIG_REG, RCV_RULES_CONFIG_DEFAULT);
3922 
3923 	/*
3924 	 * Steps 52-54: configure Receive List Placement,
3925 	 * and enable Receive List Placement Statistics
3926 	 */
3927 	bge_reg_put32(bgep, RCV_LP_CONFIG_REG,
3928 	    RCV_LP_CONFIG(bgep->chipid.rx_rings));
3929 	switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) {
3930 	case MHCR_CHIP_ASIC_REV_5700:
3931 	case MHCR_CHIP_ASIC_REV_5701:
3932 	case MHCR_CHIP_ASIC_REV_5703:
3933 	case MHCR_CHIP_ASIC_REV_5704:
3934 		bge_reg_put32(bgep, RCV_LP_STATS_ENABLE_MASK_REG, ~0);
3935 		break;
3936 	case MHCR_CHIP_ASIC_REV_5705:
3937 		break;
3938 	default:
3939 		stats_mask = bge_reg_get32(bgep, RCV_LP_STATS_ENABLE_MASK_REG);
3940 		stats_mask &= ~RCV_LP_STATS_DISABLE_MACTQ;
3941 		bge_reg_put32(bgep, RCV_LP_STATS_ENABLE_MASK_REG, stats_mask);
3942 		break;
3943 	}
3944 	bge_reg_set32(bgep, RCV_LP_STATS_CONTROL_REG, RCV_LP_STATS_ENABLE);
3945 
3946 	if (bgep->chipid.rx_rings > 1)
3947 		bge_init_recv_rule(bgep);
3948 
3949 	/*
3950 	 * Steps 55-56: enable Send Data Initiator Statistics
3951 	 */
3952 	bge_reg_put32(bgep, SEND_INIT_STATS_ENABLE_MASK_REG, ~0);
3953 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3954 		bge_reg_put32(bgep, SEND_INIT_STATS_CONTROL_REG,
3955 		    SEND_INIT_STATS_ENABLE | SEND_INIT_STATS_FASTER);
3956 	} else {
3957 		bge_reg_put32(bgep, SEND_INIT_STATS_CONTROL_REG,
3958 		    SEND_INIT_STATS_ENABLE);
3959 	}
3960 	/*
3961 	 * Steps 57-58: stop (?) the Host Coalescing Engine
3962 	 */
3963 	if (!bge_chip_disable_engine(bgep, HOST_COALESCE_MODE_REG, ~0))
3964 		retval = DDI_FAILURE;
3965 
3966 	/*
3967 	 * Steps 59-62: initialise Host Coalescing parameters
3968 	 */
3969 	bge_chip_coalesce_update(bgep);
3970 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3971 		bge_reg_put32(bgep, SEND_COALESCE_INT_BD_REG,
3972 		    bge_tx_count_intr);
3973 		bge_reg_put32(bgep, SEND_COALESCE_INT_TICKS_REG,
3974 		    bge_tx_ticks_intr);
3975 		bge_reg_put32(bgep, RCV_COALESCE_INT_BD_REG,
3976 		    bge_rx_count_intr);
3977 		bge_reg_put32(bgep, RCV_COALESCE_INT_TICKS_REG,
3978 		    bge_rx_ticks_intr);
3979 	}
3980 
3981 	/*
3982 	 * Steps 63-64: initialise status block & statistics
3983 	 * host memory addresses
3984 	 * The statistic block does not exist in some chipsets
3985 	 * Step 65: initialise Statistics Coalescing Tick Counter
3986 	 */
3987 	bge_reg_put64(bgep, STATUS_BLOCK_HOST_ADDR_REG,
3988 	    bgep->status_block.cookie.dmac_laddress);
3989 
3990 	/*
3991 	 * Steps 66-67: initialise status block & statistics
3992 	 * NIC-local memory addresses
3993 	 */
3994 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3995 		bge_reg_put64(bgep, STATISTICS_HOST_ADDR_REG,
3996 		    bgep->statistics.cookie.dmac_laddress);
3997 		bge_reg_put32(bgep, STATISTICS_TICKS_REG,
3998 		    STATISTICS_TICKS_DEFAULT);
3999 		bge_reg_put32(bgep, STATUS_BLOCK_BASE_ADDR_REG,
4000 		    NIC_MEM_STATUS_BLOCK);
4001 		bge_reg_put32(bgep, STATISTICS_BASE_ADDR_REG,
4002 		    NIC_MEM_STATISTICS);
4003 	}
4004 
4005 	/*
4006 	 * Steps 68-71: start the Host Coalescing Engine, the Receive BD
4007 	 * Completion Engine, the Receive List Placement Engine, and the
4008 	 * Receive List selector.Pay attention:0x3400 is not exist in BCM5714
4009 	 * and BCM5715.
4010 	 */
4011 	if (bgep->chipid.tx_rings <= COALESCE_64_BYTE_RINGS &&
4012 	    bgep->chipid.rx_rings <= COALESCE_64_BYTE_RINGS)
4013 		coalmode = COALESCE_64_BYTE_STATUS;
4014 	else
4015 		coalmode = 0;
4016 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
4017 		coalmode = COALESCE_CLR_TICKS_RX;
4018 	if (!bge_chip_enable_engine(bgep, HOST_COALESCE_MODE_REG, coalmode))
4019 		retval = DDI_FAILURE;
4020 	if (!bge_chip_enable_engine(bgep, RCV_BD_COMPLETION_MODE_REG,
4021 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4022 		retval = DDI_FAILURE;
4023 	if (!bge_chip_enable_engine(bgep, RCV_LIST_PLACEMENT_MODE_REG, 0))
4024 		retval = DDI_FAILURE;
4025 
4026 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4027 		if (!bge_chip_enable_engine(bgep, RCV_LIST_SELECTOR_MODE_REG,
4028 		    STATE_MACHINE_ATTN_ENABLE_BIT))
4029 			retval = DDI_FAILURE;
4030 
4031 	/*
4032 	 * Step 72: Enable MAC DMA engines
4033 	 * Step 73: Clear & enable MAC statistics
4034 	 */
4035 	bge_reg_set32(bgep, ETHERNET_MAC_MODE_REG,
4036 	    ETHERNET_MODE_ENABLE_FHDE |
4037 	    ETHERNET_MODE_ENABLE_RDE |
4038 	    ETHERNET_MODE_ENABLE_TDE);
4039 	bge_reg_set32(bgep, ETHERNET_MAC_MODE_REG,
4040 	    ETHERNET_MODE_ENABLE_TX_STATS |
4041 	    ETHERNET_MODE_ENABLE_RX_STATS |
4042 	    ETHERNET_MODE_CLEAR_TX_STATS |
4043 	    ETHERNET_MODE_CLEAR_RX_STATS);
4044 
4045 	/*
4046 	 * Step 74: configure the MLCR (Miscellaneous Local Control
4047 	 * Register); not required, as we set up the MLCR in step 10
4048 	 * (part of the reset code) above.
4049 	 *
4050 	 * Step 75: clear Interrupt Mailbox 0
4051 	 */
4052 	bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG, 0);
4053 
4054 	/*
4055 	 * Steps 76-87: Gentlemen, start your engines ...
4056 	 *
4057 	 * Enable the DMA Completion Engine, the Write DMA Engine,
4058 	 * the Read DMA Engine, Receive Data Completion Engine,
4059 	 * the MBuf Cluster Free Engine, the Send Data Completion Engine,
4060 	 * the Send BD Completion Engine, the Receive BD Initiator Engine,
4061 	 * the Receive Data Initiator Engine, the Send Data Initiator Engine,
4062 	 * the Send BD Initiator Engine, and the Send BD Selector Engine.
4063 	 *
4064 	 * Beware exhaust fumes?
4065 	 */
4066 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4067 		if (!bge_chip_enable_engine(bgep, DMA_COMPLETION_MODE_REG, 0))
4068 			retval = DDI_FAILURE;
4069 	dma_wrprio = (bge_dma_wrprio << DMA_PRIORITY_SHIFT) |
4070 	    ALL_DMA_ATTN_BITS;
4071 	if ((MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4072 	    MHCR_CHIP_ASIC_REV_5755) ||
4073 	    (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4074 	    MHCR_CHIP_ASIC_REV_5723) ||
4075 	    (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev) ==
4076 	    MHCR_CHIP_ASIC_REV_5906)) {
4077 		dma_wrprio |= DMA_STATUS_TAG_FIX_CQ12384;
4078 	}
4079 	if (!bge_chip_enable_engine(bgep, WRITE_DMA_MODE_REG,
4080 	    dma_wrprio))
4081 		retval = DDI_FAILURE;
4082 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
4083 	    DEVICE_5717_SERIES_CHIPSETS(bgep))
4084 		bge_dma_rdprio = 0;
4085 	if (!bge_chip_enable_engine(bgep, READ_DMA_MODE_REG,
4086 	    (bge_dma_rdprio << DMA_PRIORITY_SHIFT) | ALL_DMA_ATTN_BITS))
4087 		retval = DDI_FAILURE;
4088 	if (!bge_chip_enable_engine(bgep, RCV_DATA_COMPLETION_MODE_REG,
4089 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4090 		retval = DDI_FAILURE;
4091 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4092 		if (!bge_chip_enable_engine(bgep,
4093 		    MBUF_CLUSTER_FREE_MODE_REG, 0))
4094 			retval = DDI_FAILURE;
4095 	if (!bge_chip_enable_engine(bgep, SEND_DATA_COMPLETION_MODE_REG, 0))
4096 		retval = DDI_FAILURE;
4097 	if (!bge_chip_enable_engine(bgep, SEND_BD_COMPLETION_MODE_REG,
4098 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4099 		retval = DDI_FAILURE;
4100 	if (!bge_chip_enable_engine(bgep, RCV_BD_INITIATOR_MODE_REG,
4101 	    RCV_BD_DISABLED_RING_ATTN))
4102 		retval = DDI_FAILURE;
4103 	if (!bge_chip_enable_engine(bgep, RCV_DATA_BD_INITIATOR_MODE_REG,
4104 	    RCV_DATA_BD_ILL_RING_ATTN))
4105 		retval = DDI_FAILURE;
4106 	if (!bge_chip_enable_engine(bgep, SEND_DATA_INITIATOR_MODE_REG, 0))
4107 		retval = DDI_FAILURE;
4108 	if (!bge_chip_enable_engine(bgep, SEND_BD_INITIATOR_MODE_REG,
4109 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4110 		retval = DDI_FAILURE;
4111 	if (!bge_chip_enable_engine(bgep, SEND_BD_SELECTOR_MODE_REG,
4112 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4113 		retval = DDI_FAILURE;
4114 
4115 	/*
4116 	 * Step 88: download firmware -- doesn't apply
4117 	 * Steps 89-90: enable Transmit & Receive MAC Engines
4118 	 */
4119 	if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
4120 		retval = DDI_FAILURE;
4121 #ifdef BGE_IPMI_ASF
4122 	if (!bgep->asf_enabled) {
4123 		if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4124 		    RECEIVE_MODE_KEEP_VLAN_TAG))
4125 			retval = DDI_FAILURE;
4126 	} else {
4127 		if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG, 0))
4128 			retval = DDI_FAILURE;
4129 	}
4130 #else
4131 	if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4132 	    RECEIVE_MODE_KEEP_VLAN_TAG))
4133 		retval = DDI_FAILURE;
4134 #endif
4135 
4136 	/*
4137 	 * Step 91: disable auto-polling of PHY status
4138 	 */
4139 	bge_reg_put32(bgep, MI_MODE_REG, MI_MODE_DEFAULT);
4140 
4141 	/*
4142 	 * Step 92: configure D0 power state (not required)
4143 	 * Step 93: initialise LED control register ()
4144 	 */
4145 	ledctl = LED_CONTROL_DEFAULT;
4146 	switch (bgep->chipid.device) {
4147 	case DEVICE_ID_5700:
4148 	case DEVICE_ID_5700x:
4149 	case DEVICE_ID_5701:
4150 		/*
4151 		 * Switch to 5700 (MAC) mode on these older chips
4152 		 */
4153 		ledctl &= ~LED_CONTROL_LED_MODE_MASK;
4154 		ledctl |= LED_CONTROL_LED_MODE_5700;
4155 		break;
4156 
4157 	default:
4158 		break;
4159 	}
4160 	bge_reg_put32(bgep, ETHERNET_MAC_LED_CONTROL_REG, ledctl);
4161 
4162 	/*
4163 	 * Step 94: activate link
4164 	 */
4165 	bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK);
4166 
4167 	/*
4168 	 * Step 95: set up physical layer (PHY/SerDes)
4169 	 * restart autoneg (if required)
4170 	 */
4171 	if (reset_phys)
4172 		if (bge_phys_update(bgep) == DDI_FAILURE)
4173 			retval = DDI_FAILURE;
4174 
4175 	/*
4176 	 * Extra step (DSG): hand over all the Receive Buffers to the chip
4177 	 */
4178 	for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring)
4179 		bge_mbx_put(bgep, bgep->buff[ring].chip_mbx_reg,
4180 		    bgep->buff[ring].rf_next);
4181 
4182 	/*
4183 	 * MSI bits:The least significant MSI 16-bit word.
4184 	 * ISR will be triggered different.
4185 	 */
4186 	if (bgep->intr_type == DDI_INTR_TYPE_MSI)
4187 		bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, 0x70);
4188 
4189 	/*
4190 	 * Extra step (DSG): select which interrupts are enabled
4191 	 *
4192 	 * Program the Ethernet MAC engine to signal attention on
4193 	 * Link Change events, then enable interrupts on MAC, DMA,
4194 	 * and FLOW attention signals.
4195 	 */
4196 	bge_reg_set32(bgep, ETHERNET_MAC_EVENT_ENABLE_REG,
4197 	    ETHERNET_EVENT_LINK_INT |
4198 	    ETHERNET_STATUS_PCS_ERROR_INT);
4199 #ifdef BGE_IPMI_ASF
4200 	if (bgep->asf_enabled) {
4201 		bge_reg_set32(bgep, MODE_CONTROL_REG,
4202 		    MODE_INT_ON_FLOW_ATTN |
4203 		    MODE_INT_ON_DMA_ATTN |
4204 		    MODE_HOST_STACK_UP|
4205 		    MODE_INT_ON_MAC_ATTN);
4206 	} else {
4207 #endif
4208 		bge_reg_set32(bgep, MODE_CONTROL_REG,
4209 		    MODE_INT_ON_FLOW_ATTN |
4210 		    MODE_INT_ON_DMA_ATTN |
4211 		    MODE_INT_ON_MAC_ATTN);
4212 #ifdef BGE_IPMI_ASF
4213 	}
4214 #endif
4215 
4216 	/*
4217 	 * Step 97: enable PCI interrupts!!!
4218 	 */
4219 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
4220 		bge_cfg_clr32(bgep, PCI_CONF_BGE_MHCR,
4221 		    bgep->chipid.mask_pci_int);
4222 
4223 	/*
4224 	 * All done!
4225 	 */
4226 	bgep->bge_chip_state = BGE_CHIP_RUNNING;
4227 	return (retval);
4228 }
4229 
4230 
4231 /*
4232  * ========== Hardware interrupt handler ==========
4233  */
4234 
4235 #undef	BGE_DBG
4236 #define	BGE_DBG		BGE_DBG_INT	/* debug flag for this code	*/
4237 
4238 /*
4239  * Sync the status block, then atomically clear the specified bits in
4240  * the <flags-and-tag> field of the status block.
4241  * the <flags> word of the status block, returning the value of the
4242  * <tag> and the <flags> before the bits were cleared.
4243  */
4244 static int bge_status_sync(bge_t *bgep, uint64_t bits, uint64_t *flags);
4245 #pragma	inline(bge_status_sync)
4246 
4247 static int
4248 bge_status_sync(bge_t *bgep, uint64_t bits, uint64_t *flags)
4249 {
4250 	bge_status_t *bsp;
4251 	int retval;
4252 
4253 	BGE_TRACE(("bge_status_sync($%p, 0x%llx)",
4254 	    (void *)bgep, bits));
4255 
4256 	ASSERT(bgep->bge_guard == BGE_GUARD);
4257 
4258 	DMA_SYNC(bgep->status_block, DDI_DMA_SYNC_FORKERNEL);
4259 	retval = bge_check_dma_handle(bgep, bgep->status_block.dma_hdl);
4260 	if (retval != DDI_FM_OK)
4261 		return (retval);
4262 
4263 	bsp = DMA_VPTR(bgep->status_block);
4264 	*flags = bge_atomic_clr64(&bsp->flags_n_tag, bits);
4265 
4266 	BGE_DEBUG(("bge_status_sync($%p, 0x%llx) returning 0x%llx",
4267 	    (void *)bgep, bits, *flags));
4268 
4269 	return (retval);
4270 }
4271 
4272 void bge_wake_factotum(bge_t *bgep);
4273 #pragma	inline(bge_wake_factotum)
4274 
4275 void
4276 bge_wake_factotum(bge_t *bgep)
4277 {
4278 	mutex_enter(bgep->softintrlock);
4279 	if (bgep->factotum_flag == 0) {
4280 		bgep->factotum_flag = 1;
4281 		ddi_trigger_softintr(bgep->factotum_id);
4282 	}
4283 	mutex_exit(bgep->softintrlock);
4284 }
4285 
4286 /*
4287  *	bge_intr() -- handle chip interrupts
4288  */
4289 uint_t bge_intr(caddr_t arg1, caddr_t arg2);
4290 #pragma	no_inline(bge_intr)
4291 
4292 uint_t
4293 bge_intr(caddr_t arg1, caddr_t arg2)
4294 {
4295 	bge_t *bgep = (void *)arg1;		/* private device info	*/
4296 	bge_status_t *bsp;
4297 	uint64_t flags;
4298 	uint32_t regval;
4299 	uint_t result;
4300 	int retval, loop_cnt = 0;
4301 
4302 	BGE_TRACE(("bge_intr($%p) ($%p)", arg1, arg2));
4303 
4304 	/*
4305 	 * GLD v2 checks that s/w setup is complete before passing
4306 	 * interrupts to this routine, thus eliminating the old
4307 	 * (and well-known) race condition around ddi_add_intr()
4308 	 */
4309 	ASSERT(bgep->progress & PROGRESS_HWINT);
4310 
4311 	result = DDI_INTR_UNCLAIMED;
4312 	mutex_enter(bgep->genlock);
4313 
4314 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
4315 		/*
4316 		 * Check whether chip's says it's asserting #INTA;
4317 		 * if not, don't process or claim the interrupt.
4318 		 *
4319 		 * Note that the PCI signal is active low, so the
4320 		 * bit is *zero* when the interrupt is asserted.
4321 		 */
4322 		regval = bge_reg_get32(bgep, MISC_LOCAL_CONTROL_REG);
4323 		if (!(DEVICE_5717_SERIES_CHIPSETS(bgep)) &&
4324 		    (regval & MLCR_INTA_STATE)) {
4325 			if (bge_check_acc_handle(bgep, bgep->io_handle)
4326 			    != DDI_FM_OK)
4327 				goto chip_stop;
4328 			mutex_exit(bgep->genlock);
4329 			return (result);
4330 		}
4331 
4332 		/*
4333 		 * Block further PCI interrupts ...
4334 		 */
4335 		bge_reg_set32(bgep, PCI_CONF_BGE_MHCR,
4336 		    bgep->chipid.mask_pci_int);
4337 
4338 	} else {
4339 		/*
4340 		 * Check MSI status
4341 		 */
4342 		regval = bge_reg_get32(bgep, MSI_STATUS_REG);
4343 		if (regval & MSI_ERROR_ATTENTION) {
4344 			BGE_REPORT((bgep, "msi error attention,"
4345 			    " status=0x%x", regval));
4346 			bge_reg_put32(bgep, MSI_STATUS_REG, regval);
4347 		}
4348 	}
4349 
4350 	result = DDI_INTR_CLAIMED;
4351 
4352 	BGE_DEBUG(("bge_intr($%p) ($%p) regval 0x%08x", arg1, arg2, regval));
4353 
4354 	/*
4355 	 * Sync the status block and grab the flags-n-tag from it.
4356 	 * We count the number of interrupts where there doesn't
4357 	 * seem to have been a DMA update of the status block; if
4358 	 * it *has* been updated, the counter will be cleared in
4359 	 * the while() loop below ...
4360 	 */
4361 	bgep->missed_dmas += 1;
4362 	bsp = DMA_VPTR(bgep->status_block);
4363 	for (loop_cnt = 0; loop_cnt < bge_intr_max_loop; loop_cnt++) {
4364 		if (bgep->bge_chip_state != BGE_CHIP_RUNNING) {
4365 			/*
4366 			 * bge_chip_stop() may have freed dma area etc
4367 			 * while we were in this interrupt handler -
4368 			 * better not call bge_status_sync()
4369 			 */
4370 			(void) bge_check_acc_handle(bgep,
4371 			    bgep->io_handle);
4372 			mutex_exit(bgep->genlock);
4373 			return (DDI_INTR_CLAIMED);
4374 		}
4375 		retval = bge_status_sync(bgep, STATUS_FLAG_UPDATED,
4376 		    &flags);
4377 		if (retval != DDI_FM_OK) {
4378 			bgep->bge_dma_error = B_TRUE;
4379 			goto chip_stop;
4380 		}
4381 
4382 		if (!(flags & STATUS_FLAG_UPDATED))
4383 			break;
4384 
4385 		/*
4386 		 * Tell the chip that we're processing the interrupt
4387 		 */
4388 		bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG,
4389 		    INTERRUPT_MBOX_DISABLE(flags));
4390 		if (bge_check_acc_handle(bgep, bgep->io_handle) !=
4391 		    DDI_FM_OK)
4392 			goto chip_stop;
4393 
4394 		/*
4395 		 * Drop the mutex while we:
4396 		 * 	Receive any newly-arrived packets
4397 		 *	Recycle any newly-finished send buffers
4398 		 */
4399 		bgep->bge_intr_running = B_TRUE;
4400 		mutex_exit(bgep->genlock);
4401 		bge_receive(bgep, bsp);
4402 		(void) bge_recycle(bgep, bsp);
4403 		mutex_enter(bgep->genlock);
4404 		bgep->bge_intr_running = B_FALSE;
4405 
4406 		/*
4407 		 * Tell the chip we've finished processing, and
4408 		 * give it the tag that we got from the status
4409 		 * block earlier, so that it knows just how far
4410 		 * we've gone.  If it's got more for us to do,
4411 		 * it will now update the status block and try
4412 		 * to assert an interrupt (but we've got the
4413 		 * #INTA blocked at present).  If we see the
4414 		 * update, we'll loop around to do some more.
4415 		 * Eventually we'll get out of here ...
4416 		 */
4417 		bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG,
4418 		    INTERRUPT_MBOX_ENABLE(flags));
4419 		if (bgep->chipid.pci_type == BGE_PCI_E)
4420 			(void) bge_mbx_get(bgep, INTERRUPT_MBOX_0_REG);
4421 		bgep->missed_dmas = 0;
4422 	}
4423 
4424 	/*
4425 	 * Check for exceptional conditions that we need to handle
4426 	 *
4427 	 * Link status changed
4428 	 * Status block not updated
4429 	 */
4430 	if (flags & STATUS_FLAG_LINK_CHANGED)
4431 		bge_wake_factotum(bgep);
4432 
4433 	if (bgep->missed_dmas) {
4434 		/*
4435 		 * Probably due to the internal status tag not
4436 		 * being reset.  Force a status block update now;
4437 		 * this should ensure that we get an update and
4438 		 * a new interrupt.  After that, we should be in
4439 		 * sync again ...
4440 		 */
4441 		BGE_REPORT((bgep, "interrupt: flags 0x%llx - "
4442 		    "not updated?", flags));
4443 		bgep->missed_updates++;
4444 		bge_reg_set32(bgep, HOST_COALESCE_MODE_REG,
4445 		    COALESCE_NOW);
4446 
4447 		if (bgep->missed_dmas >= bge_dma_miss_limit) {
4448 			/*
4449 			 * If this happens multiple times in a row,
4450 			 * it means DMA is just not working.  Maybe
4451 			 * the chip's failed, or maybe there's a
4452 			 * problem on the PCI bus or in the host-PCI
4453 			 * bridge (Tomatillo).
4454 			 *
4455 			 * At all events, we want to stop further
4456 			 * interrupts and let the recovery code take
4457 			 * over to see whether anything can be done
4458 			 * about it ...
4459 			 */
4460 			bge_fm_ereport(bgep,
4461 			    DDI_FM_DEVICE_BADINT_LIMIT);
4462 			goto chip_stop;
4463 		}
4464 	}
4465 
4466 	/*
4467 	 * Reenable assertion of #INTA, unless there's a DMA fault
4468 	 */
4469 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
4470 		bge_reg_clr32(bgep, PCI_CONF_BGE_MHCR,
4471 		    bgep->chipid.mask_pci_int);
4472 		if (bge_check_acc_handle(bgep, bgep->cfg_handle) !=
4473 		    DDI_FM_OK)
4474 			goto chip_stop;
4475 	}
4476 
4477 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4478 		goto chip_stop;
4479 
4480 	mutex_exit(bgep->genlock);
4481 	return (result);
4482 
4483 chip_stop:
4484 #ifdef BGE_IPMI_ASF
4485 	if (bgep->asf_enabled && bgep->asf_status == ASF_STAT_RUN) {
4486 		/*
4487 		 * We must stop ASF heart beat before
4488 		 * bge_chip_stop(), otherwise some
4489 		 * computers (ex. IBM HS20 blade
4490 		 * server) may crash.
4491 		 */
4492 		bge_asf_update_status(bgep);
4493 		bge_asf_stop_timer(bgep);
4494 		bgep->asf_status = ASF_STAT_STOP;
4495 
4496 		bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
4497 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
4498 	}
4499 #endif
4500 	bge_chip_stop(bgep, B_TRUE);
4501 	(void) bge_check_acc_handle(bgep, bgep->io_handle);
4502 	mutex_exit(bgep->genlock);
4503 	return (result);
4504 }
4505 
4506 /*
4507  * ========== Factotum, implemented as a softint handler ==========
4508  */
4509 
4510 #undef	BGE_DBG
4511 #define	BGE_DBG		BGE_DBG_FACT	/* debug flag for this code	*/
4512 
4513 static void bge_factotum_error_handler(bge_t *bgep);
4514 #pragma	no_inline(bge_factotum_error_handler)
4515 
4516 static void
4517 bge_factotum_error_handler(bge_t *bgep)
4518 {
4519 	uint32_t flow;
4520 	uint32_t rdma;
4521 	uint32_t wdma;
4522 	uint32_t tmac;
4523 	uint32_t rmac;
4524 	uint32_t rxrs;
4525 	uint32_t txrs = 0;
4526 
4527 	ASSERT(mutex_owned(bgep->genlock));
4528 
4529 	/*
4530 	 * Read all the registers that show the possible
4531 	 * reasons for the ERROR bit to be asserted
4532 	 */
4533 	flow = bge_reg_get32(bgep, FLOW_ATTN_REG);
4534 	rdma = bge_reg_get32(bgep, READ_DMA_STATUS_REG);
4535 	wdma = bge_reg_get32(bgep, WRITE_DMA_STATUS_REG);
4536 	tmac = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG);
4537 	rmac = bge_reg_get32(bgep, RECEIVE_MAC_STATUS_REG);
4538 	rxrs = bge_reg_get32(bgep, RX_RISC_STATE_REG);
4539 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4540 		txrs = bge_reg_get32(bgep, TX_RISC_STATE_REG);
4541 
4542 	BGE_DEBUG(("factotum($%p) flow 0x%x rdma 0x%x wdma 0x%x",
4543 	    (void *)bgep, flow, rdma, wdma));
4544 	BGE_DEBUG(("factotum($%p) tmac 0x%x rmac 0x%x rxrs 0x%08x txrs 0x%08x",
4545 	    (void *)bgep, tmac, rmac, rxrs, txrs));
4546 
4547 	/*
4548 	 * For now, just clear all the errors ...
4549 	 */
4550 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4551 		bge_reg_put32(bgep, TX_RISC_STATE_REG, ~0);
4552 	bge_reg_put32(bgep, RX_RISC_STATE_REG, ~0);
4553 	bge_reg_put32(bgep, RECEIVE_MAC_STATUS_REG, ~0);
4554 	bge_reg_put32(bgep, WRITE_DMA_STATUS_REG, ~0);
4555 	bge_reg_put32(bgep, READ_DMA_STATUS_REG, ~0);
4556 	bge_reg_put32(bgep, FLOW_ATTN_REG, ~0);
4557 }
4558 
4559 /*
4560  * Handler for hardware link state change.
4561  *
4562  * When this routine is called, the hardware link state has changed
4563  * and the new state is reflected in the param_* variables.  Here
4564  * we must update the softstate and reprogram the MAC to match.
4565  */
4566 static void bge_factotum_link_handler(bge_t *bgep);
4567 #pragma	no_inline(bge_factotum_link_handler)
4568 
4569 static void
4570 bge_factotum_link_handler(bge_t *bgep)
4571 {
4572 	ASSERT(mutex_owned(bgep->genlock));
4573 
4574 	/*
4575 	 * Update the s/w link_state
4576 	 */
4577 	if (bgep->param_link_up)
4578 		bgep->link_state = LINK_STATE_UP;
4579 	else
4580 		bgep->link_state = LINK_STATE_DOWN;
4581 
4582 	/*
4583 	 * Reprogram the MAC modes to match
4584 	 */
4585 	bge_sync_mac_modes(bgep);
4586 }
4587 
4588 static boolean_t bge_factotum_link_check(bge_t *bgep, int *dma_state);
4589 #pragma	no_inline(bge_factotum_link_check)
4590 
4591 static boolean_t
4592 bge_factotum_link_check(bge_t *bgep, int *dma_state)
4593 {
4594 	boolean_t check;
4595 	uint64_t flags;
4596 	uint32_t tmac_status;
4597 
4598 	ASSERT(mutex_owned(bgep->genlock));
4599 
4600 	/*
4601 	 * Get & clear the writable status bits in the Tx status register
4602 	 * (some bits are write-1-to-clear, others are just readonly).
4603 	 */
4604 	tmac_status = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG);
4605 	bge_reg_put32(bgep, TRANSMIT_MAC_STATUS_REG, tmac_status);
4606 
4607 	/*
4608 	 * Get & clear the ERROR and LINK_CHANGED bits from the status block
4609 	 */
4610 	*dma_state = bge_status_sync(bgep, STATUS_FLAG_ERROR |
4611 	    STATUS_FLAG_LINK_CHANGED, &flags);
4612 	if (*dma_state != DDI_FM_OK)
4613 		return (B_FALSE);
4614 
4615 	/*
4616 	 * Clear any errors flagged in the status block ...
4617 	 */
4618 	if (flags & STATUS_FLAG_ERROR)
4619 		bge_factotum_error_handler(bgep);
4620 
4621 	/*
4622 	 * We need to check the link status if:
4623 	 *	the status block says there's been a link change
4624 	 *	or there's any discrepancy between the various
4625 	 *	flags indicating the link state (link_state,
4626 	 *	param_link_up, and the LINK STATE bit in the
4627 	 *	Transmit MAC status register).
4628 	 */
4629 	check = (flags & STATUS_FLAG_LINK_CHANGED) != 0;
4630 	switch (bgep->link_state) {
4631 	case LINK_STATE_UP:
4632 		check |= (bgep->param_link_up == B_FALSE);
4633 		check |= ((tmac_status & TRANSMIT_STATUS_LINK_UP) == 0);
4634 		break;
4635 
4636 	case LINK_STATE_DOWN:
4637 		check |= (bgep->param_link_up != B_FALSE);
4638 		check |= ((tmac_status & TRANSMIT_STATUS_LINK_UP) != 0);
4639 		break;
4640 
4641 	default:
4642 		check = B_TRUE;
4643 		break;
4644 	}
4645 
4646 	/*
4647 	 * If <check> is false, we're sure the link hasn't changed.
4648 	 * If true, however, it's not yet definitive; we have to call
4649 	 * bge_phys_check() to determine whether the link has settled
4650 	 * into a new state yet ... and if it has, then call the link
4651 	 * state change handler.But when the chip is 5700 in Dell 6650
4652 	 * ,even if check is false, the link may have changed.So we
4653 	 * have to call bge_phys_check() to determine the link state.
4654 	 */
4655 	if (check || bgep->chipid.device == DEVICE_ID_5700) {
4656 		check = bge_phys_check(bgep);
4657 		if (check)
4658 			bge_factotum_link_handler(bgep);
4659 	}
4660 
4661 	return (check);
4662 }
4663 
4664 /*
4665  * Factotum routine to check for Tx stall, using the 'watchdog' counter
4666  */
4667 static boolean_t bge_factotum_stall_check(bge_t *bgep);
4668 #pragma	no_inline(bge_factotum_stall_check)
4669 
4670 static boolean_t
4671 bge_factotum_stall_check(bge_t *bgep)
4672 {
4673 	uint32_t dogval;
4674 	bge_status_t *bsp;
4675 	uint64_t now = gethrtime();
4676 
4677 	if ((now - bgep->timestamp) < BGE_CYCLIC_PERIOD)
4678 		return (B_FALSE);
4679 
4680 	bgep->timestamp = now;
4681 
4682 	ASSERT(mutex_owned(bgep->genlock));
4683 
4684 	/*
4685 	 * Specific check for Tx stall ...
4686 	 *
4687 	 * The 'watchdog' counter is incremented whenever a packet
4688 	 * is queued, reset to 1 when some (but not all) buffers
4689 	 * are reclaimed, reset to 0 (disabled) when all buffers
4690 	 * are reclaimed, and shifted left here.  If it exceeds the
4691 	 * threshold value, the chip is assumed to have stalled and
4692 	 * is put into the ERROR state.  The factotum will then reset
4693 	 * it on the next pass.
4694 	 *
4695 	 * All of which should ensure that we don't get into a state
4696 	 * where packets are left pending indefinitely!
4697 	 */
4698 	dogval = bge_atomic_shl32(&bgep->watchdog, 1);
4699 	bsp = DMA_VPTR(bgep->status_block);
4700 	if (dogval < bge_watchdog_count || bge_recycle(bgep, bsp))
4701 		return (B_FALSE);
4702 
4703 #if !defined(BGE_NETCONSOLE)
4704 	BGE_REPORT((bgep, "Tx stall detected, watchdog code 0x%x", dogval));
4705 #endif
4706 	bge_fm_ereport(bgep, DDI_FM_DEVICE_STALL);
4707 	return (B_TRUE);
4708 }
4709 
4710 /*
4711  * The factotum is woken up when there's something to do that we'd rather
4712  * not do from inside a hardware interrupt handler or high-level cyclic.
4713  * Its two main tasks are:
4714  *	reset & restart the chip after an error
4715  *	check the link status whenever necessary
4716  */
4717 uint_t bge_chip_factotum(caddr_t arg);
4718 #pragma	no_inline(bge_chip_factotum)
4719 
4720 uint_t
4721 bge_chip_factotum(caddr_t arg)
4722 {
4723 	bge_t *bgep;
4724 	uint_t result;
4725 	boolean_t error;
4726 	boolean_t linkchg;
4727 	int dma_state;
4728 
4729 	bgep = (void *)arg;
4730 
4731 	BGE_TRACE(("bge_chip_factotum($%p)", (void *)bgep));
4732 
4733 	mutex_enter(bgep->softintrlock);
4734 	if (bgep->factotum_flag == 0) {
4735 		mutex_exit(bgep->softintrlock);
4736 		return (DDI_INTR_UNCLAIMED);
4737 	}
4738 	bgep->factotum_flag = 0;
4739 	mutex_exit(bgep->softintrlock);
4740 
4741 	result = DDI_INTR_CLAIMED;
4742 	error = B_FALSE;
4743 	linkchg = B_FALSE;
4744 
4745 	mutex_enter(bgep->genlock);
4746 	switch (bgep->bge_chip_state) {
4747 	default:
4748 		break;
4749 
4750 	case BGE_CHIP_RUNNING:
4751 		linkchg = bge_factotum_link_check(bgep, &dma_state);
4752 		error = bge_factotum_stall_check(bgep);
4753 		if (dma_state != DDI_FM_OK) {
4754 			bgep->bge_dma_error = B_TRUE;
4755 			error = B_TRUE;
4756 		}
4757 		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4758 			error = B_TRUE;
4759 		if (error)
4760 			bgep->bge_chip_state = BGE_CHIP_ERROR;
4761 		break;
4762 
4763 	case BGE_CHIP_ERROR:
4764 		error = B_TRUE;
4765 		break;
4766 
4767 	case BGE_CHIP_FAULT:
4768 		/*
4769 		 * Fault detected, time to reset ...
4770 		 */
4771 		if (bge_autorecover) {
4772 			if (!(bgep->progress & PROGRESS_BUFS)) {
4773 				/*
4774 				 * if we can't allocate the ring buffers,
4775 				 * try later
4776 				 */
4777 				if (bge_alloc_bufs(bgep) != DDI_SUCCESS) {
4778 					mutex_exit(bgep->genlock);
4779 					return (result);
4780 				}
4781 				bgep->progress |= PROGRESS_BUFS;
4782 			}
4783 			if (!(bgep->progress & PROGRESS_INTR)) {
4784 				bge_init_rings(bgep);
4785 				bge_intr_enable(bgep);
4786 				bgep->progress |= PROGRESS_INTR;
4787 			}
4788 			if (!(bgep->progress & PROGRESS_KSTATS)) {
4789 				bge_init_kstats(bgep,
4790 				    ddi_get_instance(bgep->devinfo));
4791 				bgep->progress |= PROGRESS_KSTATS;
4792 			}
4793 
4794 			BGE_REPORT((bgep, "automatic recovery activated"));
4795 
4796 			if (bge_restart(bgep, B_FALSE) != DDI_SUCCESS) {
4797 				bgep->bge_chip_state = BGE_CHIP_ERROR;
4798 				error = B_TRUE;
4799 			}
4800 			if (bge_check_acc_handle(bgep, bgep->cfg_handle) !=
4801 			    DDI_FM_OK) {
4802 				bgep->bge_chip_state = BGE_CHIP_ERROR;
4803 				error = B_TRUE;
4804 			}
4805 			if (bge_check_acc_handle(bgep, bgep->io_handle) !=
4806 			    DDI_FM_OK) {
4807 				bgep->bge_chip_state = BGE_CHIP_ERROR;
4808 				error = B_TRUE;
4809 			}
4810 			if (error == B_FALSE) {
4811 #ifdef BGE_IPMI_ASF
4812 				if (bgep->asf_enabled &&
4813 				    bgep->asf_status != ASF_STAT_RUN) {
4814 					bgep->asf_timeout_id = timeout(
4815 					    bge_asf_heartbeat, (void *)bgep,
4816 					    drv_usectohz(
4817 					    BGE_ASF_HEARTBEAT_INTERVAL));
4818 					bgep->asf_status = ASF_STAT_RUN;
4819 				}
4820 #endif
4821 				if (!bgep->manual_reset) {
4822 					ddi_fm_service_impact(bgep->devinfo,
4823 					    DDI_SERVICE_RESTORED);
4824 				}
4825 			}
4826 		}
4827 		break;
4828 	}
4829 
4830 
4831 	/*
4832 	 * If an error is detected, stop the chip now, marking it as
4833 	 * faulty, so that it will be reset next time through ...
4834 	 *
4835 	 * Note that if intr_running is set, then bge_intr() has dropped
4836 	 * genlock to call bge_receive/bge_recycle. Can't stop the chip at
4837 	 * this point so have to wait until the next time the factotum runs.
4838 	 */
4839 	if (error && !bgep->bge_intr_running) {
4840 #ifdef BGE_IPMI_ASF
4841 		if (bgep->asf_enabled && (bgep->asf_status == ASF_STAT_RUN)) {
4842 			/*
4843 			 * We must stop ASF heart beat before bge_chip_stop(),
4844 			 * otherwise some computers (ex. IBM HS20 blade server)
4845 			 * may crash.
4846 			 */
4847 			bge_asf_update_status(bgep);
4848 			bge_asf_stop_timer(bgep);
4849 			bgep->asf_status = ASF_STAT_STOP;
4850 
4851 			bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
4852 			(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
4853 		}
4854 #endif
4855 		bge_chip_stop(bgep, B_TRUE);
4856 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
4857 	}
4858 	mutex_exit(bgep->genlock);
4859 
4860 	/*
4861 	 * If the link state changed, tell the world about it.
4862 	 * Note: can't do this while still holding the mutex.
4863 	 */
4864 	if (bgep->link_update_timer == BGE_LINK_UPDATE_TIMEOUT &&
4865 	    bgep->link_state != LINK_STATE_UNKNOWN)
4866 		linkchg = B_TRUE;
4867 	else if (bgep->link_update_timer < BGE_LINK_UPDATE_TIMEOUT &&
4868 	    bgep->link_state == LINK_STATE_DOWN)
4869 		linkchg = B_FALSE;
4870 
4871 	if (linkchg) {
4872 		mac_link_update(bgep->mh, bgep->link_state);
4873 		bgep->link_update_timer = BGE_LINK_UPDATE_DONE;
4874 	}
4875 	if (bgep->manual_reset) {
4876 		bgep->manual_reset = B_FALSE;
4877 	}
4878 
4879 	return (result);
4880 }
4881 
4882 /*
4883  * High-level cyclic handler
4884  *
4885  * This routine schedules a (low-level) softint callback to the
4886  * factotum, and prods the chip to update the status block (which
4887  * will cause a hardware interrupt when complete).
4888  */
4889 void bge_chip_cyclic(void *arg);
4890 #pragma	no_inline(bge_chip_cyclic)
4891 
4892 void
4893 bge_chip_cyclic(void *arg)
4894 {
4895 	bge_t *bgep;
4896 
4897 	bgep = arg;
4898 
4899 	switch (bgep->bge_chip_state) {
4900 	default:
4901 		return;
4902 
4903 	case BGE_CHIP_RUNNING:
4904 		bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, COALESCE_NOW);
4905 		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
4906 			ddi_fm_service_impact(bgep->devinfo,
4907 			    DDI_SERVICE_UNAFFECTED);
4908 
4909 		if (bgep->link_update_timer < BGE_LINK_UPDATE_TIMEOUT)
4910 			bgep->link_update_timer++;
4911 
4912 		break;
4913 
4914 	case BGE_CHIP_FAULT:
4915 	case BGE_CHIP_ERROR:
4916 		break;
4917 	}
4918 
4919 	bge_wake_factotum(bgep);
4920 }
4921 
4922 
4923 /*
4924  * ========== Ioctl subfunctions ==========
4925  */
4926 
4927 #undef	BGE_DBG
4928 #define	BGE_DBG		BGE_DBG_PPIO	/* debug flag for this code	*/
4929 
4930 #if	BGE_DEBUGGING || BGE_DO_PPIO
4931 
4932 static void bge_chip_peek_cfg(bge_t *bgep, bge_peekpoke_t *ppd);
4933 #pragma	no_inline(bge_chip_peek_cfg)
4934 
4935 static void
4936 bge_chip_peek_cfg(bge_t *bgep, bge_peekpoke_t *ppd)
4937 {
4938 	uint64_t regval;
4939 	uint64_t regno;
4940 
4941 	BGE_TRACE(("bge_chip_peek_cfg($%p, $%p)",
4942 	    (void *)bgep, (void *)ppd));
4943 
4944 	regno = ppd->pp_acc_offset;
4945 
4946 	switch (ppd->pp_acc_size) {
4947 	case 1:
4948 		regval = pci_config_get8(bgep->cfg_handle, regno);
4949 		break;
4950 
4951 	case 2:
4952 		regval = pci_config_get16(bgep->cfg_handle, regno);
4953 		break;
4954 
4955 	case 4:
4956 		regval = pci_config_get32(bgep->cfg_handle, regno);
4957 		break;
4958 
4959 	case 8:
4960 		regval = pci_config_get64(bgep->cfg_handle, regno);
4961 		break;
4962 	}
4963 
4964 	ppd->pp_acc_data = regval;
4965 }
4966 
4967 static void bge_chip_poke_cfg(bge_t *bgep, bge_peekpoke_t *ppd);
4968 #pragma	no_inline(bge_chip_poke_cfg)
4969 
4970 static void
4971 bge_chip_poke_cfg(bge_t *bgep, bge_peekpoke_t *ppd)
4972 {
4973 	uint64_t regval;
4974 	uint64_t regno;
4975 
4976 	BGE_TRACE(("bge_chip_poke_cfg($%p, $%p)",
4977 	    (void *)bgep, (void *)ppd));
4978 
4979 	regno = ppd->pp_acc_offset;
4980 	regval = ppd->pp_acc_data;
4981 
4982 	switch (ppd->pp_acc_size) {
4983 	case 1:
4984 		pci_config_put8(bgep->cfg_handle, regno, regval);
4985 		break;
4986 
4987 	case 2:
4988 		pci_config_put16(bgep->cfg_handle, regno, regval);
4989 		break;
4990 
4991 	case 4:
4992 		pci_config_put32(bgep->cfg_handle, regno, regval);
4993 		break;
4994 
4995 	case 8:
4996 		pci_config_put64(bgep->cfg_handle, regno, regval);
4997 		break;
4998 	}
4999 }
5000 
5001 static void bge_chip_peek_reg(bge_t *bgep, bge_peekpoke_t *ppd);
5002 #pragma	no_inline(bge_chip_peek_reg)
5003 
5004 static void
5005 bge_chip_peek_reg(bge_t *bgep, bge_peekpoke_t *ppd)
5006 {
5007 	uint64_t regval;
5008 	void *regaddr;
5009 
5010 	BGE_TRACE(("bge_chip_peek_reg($%p, $%p)",
5011 	    (void *)bgep, (void *)ppd));
5012 
5013 	regaddr = PIO_ADDR(bgep, ppd->pp_acc_offset);
5014 
5015 	switch (ppd->pp_acc_size) {
5016 	case 1:
5017 		regval = ddi_get8(bgep->io_handle, regaddr);
5018 		break;
5019 
5020 	case 2:
5021 		regval = ddi_get16(bgep->io_handle, regaddr);
5022 		break;
5023 
5024 	case 4:
5025 		regval = ddi_get32(bgep->io_handle, regaddr);
5026 		break;
5027 
5028 	case 8:
5029 		regval = ddi_get64(bgep->io_handle, regaddr);
5030 		break;
5031 	}
5032 
5033 	ppd->pp_acc_data = regval;
5034 }
5035 
5036 static void bge_chip_poke_reg(bge_t *bgep, bge_peekpoke_t *ppd);
5037 #pragma	no_inline(bge_chip_peek_reg)
5038 
5039 static void
5040 bge_chip_poke_reg(bge_t *bgep, bge_peekpoke_t *ppd)
5041 {
5042 	uint64_t regval;
5043 	void *regaddr;
5044 
5045 	BGE_TRACE(("bge_chip_poke_reg($%p, $%p)",
5046 	    (void *)bgep, (void *)ppd));
5047 
5048 	regaddr = PIO_ADDR(bgep, ppd->pp_acc_offset);
5049 	regval = ppd->pp_acc_data;
5050 
5051 	switch (ppd->pp_acc_size) {
5052 	case 1:
5053 		ddi_put8(bgep->io_handle, regaddr, regval);
5054 		break;
5055 
5056 	case 2:
5057 		ddi_put16(bgep->io_handle, regaddr, regval);
5058 		break;
5059 
5060 	case 4:
5061 		ddi_put32(bgep->io_handle, regaddr, regval);
5062 		break;
5063 
5064 	case 8:
5065 		ddi_put64(bgep->io_handle, regaddr, regval);
5066 		break;
5067 	}
5068 	BGE_PCICHK(bgep);
5069 }
5070 
5071 static void bge_chip_peek_nic(bge_t *bgep, bge_peekpoke_t *ppd);
5072 #pragma	no_inline(bge_chip_peek_nic)
5073 
5074 static void
5075 bge_chip_peek_nic(bge_t *bgep, bge_peekpoke_t *ppd)
5076 {
5077 	uint64_t regoff;
5078 	uint64_t regval;
5079 	void *regaddr;
5080 
5081 	BGE_TRACE(("bge_chip_peek_nic($%p, $%p)",
5082 	    (void *)bgep, (void *)ppd));
5083 
5084 	regoff = ppd->pp_acc_offset;
5085 	bge_nic_setwin(bgep, regoff & ~MWBAR_GRANULE_MASK);
5086 	regoff &= MWBAR_GRANULE_MASK;
5087 	regoff += NIC_MEM_WINDOW_OFFSET;
5088 	regaddr = PIO_ADDR(bgep, regoff);
5089 
5090 	switch (ppd->pp_acc_size) {
5091 	case 1:
5092 		regval = ddi_get8(bgep->io_handle, regaddr);
5093 		break;
5094 
5095 	case 2:
5096 		regval = ddi_get16(bgep->io_handle, regaddr);
5097 		break;
5098 
5099 	case 4:
5100 		regval = ddi_get32(bgep->io_handle, regaddr);
5101 		break;
5102 
5103 	case 8:
5104 		regval = ddi_get64(bgep->io_handle, regaddr);
5105 		break;
5106 	}
5107 
5108 	ppd->pp_acc_data = regval;
5109 }
5110 
5111 static void bge_chip_poke_nic(bge_t *bgep, bge_peekpoke_t *ppd);
5112 #pragma	no_inline(bge_chip_poke_nic)
5113 
5114 static void
5115 bge_chip_poke_nic(bge_t *bgep, bge_peekpoke_t *ppd)
5116 {
5117 	uint64_t regoff;
5118 	uint64_t regval;
5119 	void *regaddr;
5120 
5121 	BGE_TRACE(("bge_chip_poke_nic($%p, $%p)",
5122 	    (void *)bgep, (void *)ppd));
5123 
5124 	regoff = ppd->pp_acc_offset;
5125 	bge_nic_setwin(bgep, regoff & ~MWBAR_GRANULE_MASK);
5126 	regoff &= MWBAR_GRANULE_MASK;
5127 	regoff += NIC_MEM_WINDOW_OFFSET;
5128 	regaddr = PIO_ADDR(bgep, regoff);
5129 	regval = ppd->pp_acc_data;
5130 
5131 	switch (ppd->pp_acc_size) {
5132 	case 1:
5133 		ddi_put8(bgep->io_handle, regaddr, regval);
5134 		break;
5135 
5136 	case 2:
5137 		ddi_put16(bgep->io_handle, regaddr, regval);
5138 		break;
5139 
5140 	case 4:
5141 		ddi_put32(bgep->io_handle, regaddr, regval);
5142 		break;
5143 
5144 	case 8:
5145 		ddi_put64(bgep->io_handle, regaddr, regval);
5146 		break;
5147 	}
5148 	BGE_PCICHK(bgep);
5149 }
5150 
5151 static void bge_chip_peek_mii(bge_t *bgep, bge_peekpoke_t *ppd);
5152 #pragma	no_inline(bge_chip_peek_mii)
5153 
5154 static void
5155 bge_chip_peek_mii(bge_t *bgep, bge_peekpoke_t *ppd)
5156 {
5157 	BGE_TRACE(("bge_chip_peek_mii($%p, $%p)",
5158 	    (void *)bgep, (void *)ppd));
5159 
5160 	ppd->pp_acc_data = bge_mii_get16(bgep, ppd->pp_acc_offset/2);
5161 }
5162 
5163 static void bge_chip_poke_mii(bge_t *bgep, bge_peekpoke_t *ppd);
5164 #pragma	no_inline(bge_chip_poke_mii)
5165 
5166 static void
5167 bge_chip_poke_mii(bge_t *bgep, bge_peekpoke_t *ppd)
5168 {
5169 	BGE_TRACE(("bge_chip_poke_mii($%p, $%p)",
5170 	    (void *)bgep, (void *)ppd));
5171 
5172 	bge_mii_put16(bgep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
5173 }
5174 
5175 #if	BGE_SEE_IO32
5176 
5177 static void bge_chip_peek_seeprom(bge_t *bgep, bge_peekpoke_t *ppd);
5178 #pragma	no_inline(bge_chip_peek_seeprom)
5179 
5180 static void
5181 bge_chip_peek_seeprom(bge_t *bgep, bge_peekpoke_t *ppd)
5182 {
5183 	uint32_t data;
5184 	int err;
5185 
5186 	BGE_TRACE(("bge_chip_peek_seeprom($%p, $%p)",
5187 	    (void *)bgep, (void *)ppd));
5188 
5189 	err = bge_nvmem_rw32(bgep, BGE_SEE_READ, ppd->pp_acc_offset, &data);
5190 	ppd->pp_acc_data = err ? ~0ull : data;
5191 }
5192 
5193 static void bge_chip_poke_seeprom(bge_t *bgep, bge_peekpoke_t *ppd);
5194 #pragma	no_inline(bge_chip_poke_seeprom)
5195 
5196 static void
5197 bge_chip_poke_seeprom(bge_t *bgep, bge_peekpoke_t *ppd)
5198 {
5199 	uint32_t data;
5200 
5201 	BGE_TRACE(("bge_chip_poke_seeprom($%p, $%p)",
5202 	    (void *)bgep, (void *)ppd));
5203 
5204 	data = ppd->pp_acc_data;
5205 	(void) bge_nvmem_rw32(bgep, BGE_SEE_WRITE, ppd->pp_acc_offset, &data);
5206 }
5207 #endif	/* BGE_SEE_IO32 */
5208 
5209 #if	BGE_FLASH_IO32
5210 
5211 static void bge_chip_peek_flash(bge_t *bgep, bge_peekpoke_t *ppd);
5212 #pragma	no_inline(bge_chip_peek_flash)
5213 
5214 static void
5215 bge_chip_peek_flash(bge_t *bgep, bge_peekpoke_t *ppd)
5216 {
5217 	uint32_t data;
5218 	int err;
5219 
5220 	BGE_TRACE(("bge_chip_peek_flash($%p, $%p)",
5221 	    (void *)bgep, (void *)ppd));
5222 
5223 	err = bge_nvmem_rw32(bgep, BGE_FLASH_READ, ppd->pp_acc_offset, &data);
5224 	ppd->pp_acc_data = err ? ~0ull : data;
5225 }
5226 
5227 static void bge_chip_poke_flash(bge_t *bgep, bge_peekpoke_t *ppd);
5228 #pragma	no_inline(bge_chip_poke_flash)
5229 
5230 static void
5231 bge_chip_poke_flash(bge_t *bgep, bge_peekpoke_t *ppd)
5232 {
5233 	uint32_t data;
5234 
5235 	BGE_TRACE(("bge_chip_poke_flash($%p, $%p)",
5236 	    (void *)bgep, (void *)ppd));
5237 
5238 	data = ppd->pp_acc_data;
5239 	(void) bge_nvmem_rw32(bgep, BGE_FLASH_WRITE,
5240 	    ppd->pp_acc_offset, &data);
5241 }
5242 #endif	/* BGE_FLASH_IO32 */
5243 
5244 static void bge_chip_peek_mem(bge_t *bgep, bge_peekpoke_t *ppd);
5245 #pragma	no_inline(bge_chip_peek_mem)
5246 
5247 static void
5248 bge_chip_peek_mem(bge_t *bgep, bge_peekpoke_t *ppd)
5249 {
5250 	uint64_t regval;
5251 	void *vaddr;
5252 
5253 	BGE_TRACE(("bge_chip_peek_bge($%p, $%p)",
5254 	    (void *)bgep, (void *)ppd));
5255 
5256 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5257 
5258 	switch (ppd->pp_acc_size) {
5259 	case 1:
5260 		regval = *(uint8_t *)vaddr;
5261 		break;
5262 
5263 	case 2:
5264 		regval = *(uint16_t *)vaddr;
5265 		break;
5266 
5267 	case 4:
5268 		regval = *(uint32_t *)vaddr;
5269 		break;
5270 
5271 	case 8:
5272 		regval = *(uint64_t *)vaddr;
5273 		break;
5274 	}
5275 
5276 	BGE_DEBUG(("bge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
5277 	    (void *)bgep, (void *)ppd, regval, vaddr));
5278 
5279 	ppd->pp_acc_data = regval;
5280 }
5281 
5282 static void bge_chip_poke_mem(bge_t *bgep, bge_peekpoke_t *ppd);
5283 #pragma	no_inline(bge_chip_poke_mem)
5284 
5285 static void
5286 bge_chip_poke_mem(bge_t *bgep, bge_peekpoke_t *ppd)
5287 {
5288 	uint64_t regval;
5289 	void *vaddr;
5290 
5291 	BGE_TRACE(("bge_chip_poke_mem($%p, $%p)",
5292 	    (void *)bgep, (void *)ppd));
5293 
5294 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5295 	regval = ppd->pp_acc_data;
5296 
5297 	BGE_DEBUG(("bge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
5298 	    (void *)bgep, (void *)ppd, regval, vaddr));
5299 
5300 	switch (ppd->pp_acc_size) {
5301 	case 1:
5302 		*(uint8_t *)vaddr = (uint8_t)regval;
5303 		break;
5304 
5305 	case 2:
5306 		*(uint16_t *)vaddr = (uint16_t)regval;
5307 		break;
5308 
5309 	case 4:
5310 		*(uint32_t *)vaddr = (uint32_t)regval;
5311 		break;
5312 
5313 	case 8:
5314 		*(uint64_t *)vaddr = (uint64_t)regval;
5315 		break;
5316 	}
5317 }
5318 
5319 static enum ioc_reply bge_pp_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5320 					struct iocblk *iocp);
5321 #pragma	no_inline(bge_pp_ioctl)
5322 
5323 static enum ioc_reply
5324 bge_pp_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5325 {
5326 	void (*ppfn)(bge_t *bgep, bge_peekpoke_t *ppd);
5327 	bge_peekpoke_t *ppd;
5328 	dma_area_t *areap;
5329 	uint64_t sizemask;
5330 	uint64_t mem_va;
5331 	uint64_t maxoff;
5332 	boolean_t peek;
5333 
5334 	switch (cmd) {
5335 	default:
5336 		/* NOTREACHED */
5337 		bge_error(bgep, "bge_pp_ioctl: invalid cmd 0x%x", cmd);
5338 		return (IOC_INVAL);
5339 
5340 	case BGE_PEEK:
5341 		peek = B_TRUE;
5342 		break;
5343 
5344 	case BGE_POKE:
5345 		peek = B_FALSE;
5346 		break;
5347 	}
5348 
5349 	/*
5350 	 * Validate format of ioctl
5351 	 */
5352 	if (iocp->ioc_count != sizeof (bge_peekpoke_t))
5353 		return (IOC_INVAL);
5354 	if (mp->b_cont == NULL)
5355 		return (IOC_INVAL);
5356 	ppd = (void *)mp->b_cont->b_rptr;
5357 
5358 	/*
5359 	 * Validate request parameters
5360 	 */
5361 	switch (ppd->pp_acc_space) {
5362 	default:
5363 		return (IOC_INVAL);
5364 
5365 	case BGE_PP_SPACE_CFG:
5366 		/*
5367 		 * Config space
5368 		 */
5369 		sizemask = 8|4|2|1;
5370 		mem_va = 0;
5371 		maxoff = PCI_CONF_HDR_SIZE;
5372 		ppfn = peek ? bge_chip_peek_cfg : bge_chip_poke_cfg;
5373 		break;
5374 
5375 	case BGE_PP_SPACE_REG:
5376 		/*
5377 		 * Memory-mapped I/O space
5378 		 */
5379 		sizemask = 8|4|2|1;
5380 		mem_va = 0;
5381 		maxoff = RIAAR_REGISTER_MAX;
5382 		ppfn = peek ? bge_chip_peek_reg : bge_chip_poke_reg;
5383 		break;
5384 
5385 	case BGE_PP_SPACE_NIC:
5386 		/*
5387 		 * NIC on-chip memory
5388 		 */
5389 		sizemask = 8|4|2|1;
5390 		mem_va = 0;
5391 		maxoff = MWBAR_ONCHIP_MAX;
5392 		ppfn = peek ? bge_chip_peek_nic : bge_chip_poke_nic;
5393 		break;
5394 
5395 	case BGE_PP_SPACE_MII:
5396 		/*
5397 		 * PHY's MII registers
5398 		 * NB: all PHY registers are two bytes, but the
5399 		 * addresses increment in ones (word addressing).
5400 		 * So we scale the address here, then undo the
5401 		 * transformation inside the peek/poke functions.
5402 		 */
5403 		ppd->pp_acc_offset *= 2;
5404 		sizemask = 2;
5405 		mem_va = 0;
5406 		maxoff = (MII_MAXREG+1)*2;
5407 		ppfn = peek ? bge_chip_peek_mii : bge_chip_poke_mii;
5408 		break;
5409 
5410 #if	BGE_SEE_IO32
5411 	case BGE_PP_SPACE_SEEPROM:
5412 		/*
5413 		 * Attached SEEPROM(s), if any.
5414 		 * NB: we use the high-order bits of the 'address' as
5415 		 * a device select to accommodate multiple SEEPROMS,
5416 		 * If each one is the maximum size (64kbytes), this
5417 		 * makes them appear contiguous.  Otherwise, there may
5418 		 * be holes in the mapping.  ENxS doesn't have any
5419 		 * SEEPROMs anyway ...
5420 		 */
5421 		sizemask = 4;
5422 		mem_va = 0;
5423 		maxoff = SEEPROM_DEV_AND_ADDR_MASK;
5424 		ppfn = peek ? bge_chip_peek_seeprom : bge_chip_poke_seeprom;
5425 		break;
5426 #endif	/* BGE_SEE_IO32 */
5427 
5428 #if	BGE_FLASH_IO32
5429 	case BGE_PP_SPACE_FLASH:
5430 		/*
5431 		 * Attached Flash device (if any); a maximum of one device
5432 		 * is currently supported.  But it can be up to 1MB (unlike
5433 		 * the 64k limit on SEEPROMs) so why would you need more ;-)
5434 		 */
5435 		sizemask = 4;
5436 		mem_va = 0;
5437 		maxoff = NVM_FLASH_ADDR_MASK;
5438 		ppfn = peek ? bge_chip_peek_flash : bge_chip_poke_flash;
5439 		break;
5440 #endif	/* BGE_FLASH_IO32 */
5441 
5442 	case BGE_PP_SPACE_BGE:
5443 		/*
5444 		 * BGE data structure!
5445 		 */
5446 		sizemask = 8|4|2|1;
5447 		mem_va = (uintptr_t)bgep;
5448 		maxoff = sizeof (*bgep);
5449 		ppfn = peek ? bge_chip_peek_mem : bge_chip_poke_mem;
5450 		break;
5451 
5452 	case BGE_PP_SPACE_STATUS:
5453 	case BGE_PP_SPACE_STATISTICS:
5454 	case BGE_PP_SPACE_TXDESC:
5455 	case BGE_PP_SPACE_TXBUFF:
5456 	case BGE_PP_SPACE_RXDESC:
5457 	case BGE_PP_SPACE_RXBUFF:
5458 		/*
5459 		 * Various DMA_AREAs
5460 		 */
5461 		switch (ppd->pp_acc_space) {
5462 		case BGE_PP_SPACE_TXDESC:
5463 			areap = &bgep->tx_desc;
5464 			break;
5465 		case BGE_PP_SPACE_TXBUFF:
5466 			areap = &bgep->tx_buff[0];
5467 			break;
5468 		case BGE_PP_SPACE_RXDESC:
5469 			areap = &bgep->rx_desc[0];
5470 			break;
5471 		case BGE_PP_SPACE_RXBUFF:
5472 			areap = &bgep->rx_buff[0];
5473 			break;
5474 		case BGE_PP_SPACE_STATUS:
5475 			areap = &bgep->status_block;
5476 			break;
5477 		case BGE_PP_SPACE_STATISTICS:
5478 			if (bgep->chipid.statistic_type == BGE_STAT_BLK)
5479 				areap = &bgep->statistics;
5480 			break;
5481 		}
5482 
5483 		sizemask = 8|4|2|1;
5484 		mem_va = (uintptr_t)areap->mem_va;
5485 		maxoff = areap->alength;
5486 		ppfn = peek ? bge_chip_peek_mem : bge_chip_poke_mem;
5487 		break;
5488 	}
5489 
5490 	switch (ppd->pp_acc_size) {
5491 	default:
5492 		return (IOC_INVAL);
5493 
5494 	case 8:
5495 	case 4:
5496 	case 2:
5497 	case 1:
5498 		if ((ppd->pp_acc_size & sizemask) == 0)
5499 			return (IOC_INVAL);
5500 		break;
5501 	}
5502 
5503 	if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
5504 		return (IOC_INVAL);
5505 
5506 	if (ppd->pp_acc_offset >= maxoff)
5507 		return (IOC_INVAL);
5508 
5509 	if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
5510 		return (IOC_INVAL);
5511 
5512 	/*
5513 	 * All OK - go do it!
5514 	 */
5515 	ppd->pp_acc_offset += mem_va;
5516 	(*ppfn)(bgep, ppd);
5517 	return (peek ? IOC_REPLY : IOC_ACK);
5518 }
5519 
5520 static enum ioc_reply bge_diag_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5521 					struct iocblk *iocp);
5522 #pragma	no_inline(bge_diag_ioctl)
5523 
5524 static enum ioc_reply
5525 bge_diag_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5526 {
5527 	ASSERT(mutex_owned(bgep->genlock));
5528 
5529 	switch (cmd) {
5530 	default:
5531 		/* NOTREACHED */
5532 		bge_error(bgep, "bge_diag_ioctl: invalid cmd 0x%x", cmd);
5533 		return (IOC_INVAL);
5534 
5535 	case BGE_DIAG:
5536 		/*
5537 		 * Currently a no-op
5538 		 */
5539 		return (IOC_ACK);
5540 
5541 	case BGE_PEEK:
5542 	case BGE_POKE:
5543 		return (bge_pp_ioctl(bgep, cmd, mp, iocp));
5544 
5545 	case BGE_PHY_RESET:
5546 		return (IOC_RESTART_ACK);
5547 
5548 	case BGE_SOFT_RESET:
5549 	case BGE_HARD_RESET:
5550 		/*
5551 		 * Reset and reinitialise the 570x hardware
5552 		 */
5553 		bgep->bge_chip_state = BGE_CHIP_FAULT;
5554 		ddi_trigger_softintr(bgep->factotum_id);
5555 		(void) bge_restart(bgep, cmd == BGE_HARD_RESET);
5556 		return (IOC_ACK);
5557 	}
5558 
5559 	/* NOTREACHED */
5560 }
5561 
5562 #endif	/* BGE_DEBUGGING || BGE_DO_PPIO */
5563 
5564 static enum ioc_reply bge_mii_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5565 				    struct iocblk *iocp);
5566 #pragma	no_inline(bge_mii_ioctl)
5567 
5568 static enum ioc_reply
5569 bge_mii_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5570 {
5571 	struct bge_mii_rw *miirwp;
5572 
5573 	/*
5574 	 * Validate format of ioctl
5575 	 */
5576 	if (iocp->ioc_count != sizeof (struct bge_mii_rw))
5577 		return (IOC_INVAL);
5578 	if (mp->b_cont == NULL)
5579 		return (IOC_INVAL);
5580 	miirwp = (void *)mp->b_cont->b_rptr;
5581 
5582 	/*
5583 	 * Validate request parameters ...
5584 	 */
5585 	if (miirwp->mii_reg > MII_MAXREG)
5586 		return (IOC_INVAL);
5587 
5588 	switch (cmd) {
5589 	default:
5590 		/* NOTREACHED */
5591 		bge_error(bgep, "bge_mii_ioctl: invalid cmd 0x%x", cmd);
5592 		return (IOC_INVAL);
5593 
5594 	case BGE_MII_READ:
5595 		miirwp->mii_data = bge_mii_get16(bgep, miirwp->mii_reg);
5596 		return (IOC_REPLY);
5597 
5598 	case BGE_MII_WRITE:
5599 		bge_mii_put16(bgep, miirwp->mii_reg, miirwp->mii_data);
5600 		return (IOC_ACK);
5601 	}
5602 
5603 	/* NOTREACHED */
5604 }
5605 
5606 #if	BGE_SEE_IO32
5607 
5608 static enum ioc_reply bge_see_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5609 				    struct iocblk *iocp);
5610 #pragma	no_inline(bge_see_ioctl)
5611 
5612 static enum ioc_reply
5613 bge_see_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5614 {
5615 	struct bge_see_rw *seerwp;
5616 
5617 	/*
5618 	 * Validate format of ioctl
5619 	 */
5620 	if (iocp->ioc_count != sizeof (struct bge_see_rw))
5621 		return (IOC_INVAL);
5622 	if (mp->b_cont == NULL)
5623 		return (IOC_INVAL);
5624 	seerwp = (void *)mp->b_cont->b_rptr;
5625 
5626 	/*
5627 	 * Validate request parameters ...
5628 	 */
5629 	if (seerwp->see_addr & ~SEEPROM_DEV_AND_ADDR_MASK)
5630 		return (IOC_INVAL);
5631 
5632 	switch (cmd) {
5633 	default:
5634 		/* NOTREACHED */
5635 		bge_error(bgep, "bge_see_ioctl: invalid cmd 0x%x", cmd);
5636 		return (IOC_INVAL);
5637 
5638 	case BGE_SEE_READ:
5639 	case BGE_SEE_WRITE:
5640 		iocp->ioc_error = bge_nvmem_rw32(bgep, cmd,
5641 		    seerwp->see_addr, &seerwp->see_data);
5642 		return (IOC_REPLY);
5643 	}
5644 
5645 	/* NOTREACHED */
5646 }
5647 
5648 #endif	/* BGE_SEE_IO32 */
5649 
5650 #if	BGE_FLASH_IO32
5651 
5652 static enum ioc_reply bge_flash_ioctl(bge_t *bgep, int cmd, mblk_t *mp,
5653 				    struct iocblk *iocp);
5654 #pragma	no_inline(bge_flash_ioctl)
5655 
5656 static enum ioc_reply
5657 bge_flash_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5658 {
5659 	struct bge_flash_rw *flashrwp;
5660 
5661 	/*
5662 	 * Validate format of ioctl
5663 	 */
5664 	if (iocp->ioc_count != sizeof (struct bge_flash_rw))
5665 		return (IOC_INVAL);
5666 	if (mp->b_cont == NULL)
5667 		return (IOC_INVAL);
5668 	flashrwp = (void *)mp->b_cont->b_rptr;
5669 
5670 	/*
5671 	 * Validate request parameters ...
5672 	 */
5673 	if (flashrwp->flash_addr & ~NVM_FLASH_ADDR_MASK)
5674 		return (IOC_INVAL);
5675 
5676 	switch (cmd) {
5677 	default:
5678 		/* NOTREACHED */
5679 		bge_error(bgep, "bge_flash_ioctl: invalid cmd 0x%x", cmd);
5680 		return (IOC_INVAL);
5681 
5682 	case BGE_FLASH_READ:
5683 	case BGE_FLASH_WRITE:
5684 		iocp->ioc_error = bge_nvmem_rw32(bgep, cmd,
5685 		    flashrwp->flash_addr, &flashrwp->flash_data);
5686 		return (IOC_REPLY);
5687 	}
5688 
5689 	/* NOTREACHED */
5690 }
5691 
5692 #endif	/* BGE_FLASH_IO32 */
5693 
5694 enum ioc_reply bge_chip_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp,
5695 				struct iocblk *iocp);
5696 #pragma	no_inline(bge_chip_ioctl)
5697 
5698 enum ioc_reply
5699 bge_chip_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
5700 {
5701 	int cmd;
5702 
5703 	BGE_TRACE(("bge_chip_ioctl($%p, $%p, $%p, $%p)",
5704 	    (void *)bgep, (void *)wq, (void *)mp, (void *)iocp));
5705 
5706 	ASSERT(mutex_owned(bgep->genlock));
5707 
5708 	cmd = iocp->ioc_cmd;
5709 	switch (cmd) {
5710 	default:
5711 		/* NOTREACHED */
5712 		bge_error(bgep, "bge_chip_ioctl: invalid cmd 0x%x", cmd);
5713 		return (IOC_INVAL);
5714 
5715 	case BGE_DIAG:
5716 	case BGE_PEEK:
5717 	case BGE_POKE:
5718 	case BGE_PHY_RESET:
5719 	case BGE_SOFT_RESET:
5720 	case BGE_HARD_RESET:
5721 #if	BGE_DEBUGGING || BGE_DO_PPIO
5722 		return (bge_diag_ioctl(bgep, cmd, mp, iocp));
5723 #else
5724 		return (IOC_INVAL);
5725 #endif	/* BGE_DEBUGGING || BGE_DO_PPIO */
5726 
5727 	case BGE_MII_READ:
5728 	case BGE_MII_WRITE:
5729 		return (bge_mii_ioctl(bgep, cmd, mp, iocp));
5730 
5731 #if	BGE_SEE_IO32
5732 	case BGE_SEE_READ:
5733 	case BGE_SEE_WRITE:
5734 		return (bge_see_ioctl(bgep, cmd, mp, iocp));
5735 #endif	/* BGE_SEE_IO32 */
5736 
5737 #if	BGE_FLASH_IO32
5738 	case BGE_FLASH_READ:
5739 	case BGE_FLASH_WRITE:
5740 		return (bge_flash_ioctl(bgep, cmd, mp, iocp));
5741 #endif	/* BGE_FLASH_IO32 */
5742 	}
5743 
5744 	/* NOTREACHED */
5745 }
5746 
5747 /* ARGSUSED */
5748 void
5749 bge_chip_blank(void *arg, time_t ticks, uint_t count, int flag)
5750 {
5751 	recv_ring_t *rrp = arg;
5752 	bge_t *bgep = rrp->bgep;
5753 
5754 	mutex_enter(bgep->genlock);
5755 	rrp->poll_flag = flag;
5756 #ifdef NOT_YET
5757 	/*
5758 	 * XXX-Sunay: Since most broadcom cards support only one
5759 	 * interrupt but multiple rx rings, we can't disable the
5760 	 * physical interrupt. This need to be done via capability
5761 	 * negotiation depending on the NIC.
5762 	 */
5763 	bge_reg_put32(bgep, RCV_COALESCE_TICKS_REG, ticks);
5764 	bge_reg_put32(bgep, RCV_COALESCE_MAX_BD_REG, count);
5765 #endif
5766 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
5767 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
5768 	mutex_exit(bgep->genlock);
5769 }
5770 
5771 #ifdef BGE_IPMI_ASF
5772 
5773 uint32_t
5774 bge_nic_read32(bge_t *bgep, bge_regno_t addr)
5775 {
5776 	uint32_t data;
5777 
5778 #ifndef __sparc
5779 	if (!bgep->asf_wordswapped) {
5780 		/* a workaround word swap error */
5781 		if (addr & 4)
5782 			addr = addr - 4;
5783 		else
5784 			addr = addr + 4;
5785 	}
5786 #else
5787 	if (DEVICE_5717_SERIES_CHIPSETS(bgep))
5788 		addr = LE_32(addr);
5789 #endif
5790 
5791 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, addr);
5792 	data = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MWDAR);
5793 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, 0);
5794 
5795 	data = LE_32(data);
5796 
5797 	BGE_DEBUG(("bge_nic_read32($%p, 0x%x) => 0x%x",
5798 	    (void *)bgep, addr, data));
5799 
5800 	return (data);
5801 }
5802 
5803 void
5804 bge_asf_update_status(bge_t *bgep)
5805 {
5806 	uint32_t event;
5807 
5808 	bge_nic_put32(bgep, BGE_CMD_MAILBOX, BGE_CMD_NICDRV_ALIVE);
5809 	bge_nic_put32(bgep, BGE_CMD_LENGTH_MAILBOX, 4);
5810 	bge_nic_put32(bgep, BGE_CMD_DATA_MAILBOX,   3);
5811 
5812 	event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
5813 	bge_reg_put32(bgep, RX_RISC_EVENT_REG, event | RRER_ASF_EVENT);
5814 }
5815 
5816 
5817 /*
5818  * The driver is supposed to notify ASF that the OS is still running
5819  * every three seconds, otherwise the management server may attempt
5820  * to reboot the machine.  If it hasn't actually failed, this is
5821  * not a desirable result.  However, this isn't running as a real-time
5822  * thread, and even if it were, it might not be able to generate the
5823  * heartbeat in a timely manner due to system load.  As it isn't a
5824  * significant strain on the machine, we will set the interval to half
5825  * of the required value.
5826  */
5827 void
5828 bge_asf_heartbeat(void *arg)
5829 {
5830 	bge_t *bgep = (bge_t *)arg;
5831 
5832 	mutex_enter(bgep->genlock);
5833 	bge_asf_update_status((bge_t *)bgep);
5834 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
5835 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
5836 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
5837 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
5838 	mutex_exit(bgep->genlock);
5839 	((bge_t *)bgep)->asf_timeout_id = timeout(bge_asf_heartbeat, bgep,
5840 	    drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
5841 }
5842 
5843 
5844 void
5845 bge_asf_stop_timer(bge_t *bgep)
5846 {
5847 	timeout_id_t tmp_id = 0;
5848 
5849 	while ((bgep->asf_timeout_id != 0) &&
5850 	    (tmp_id != bgep->asf_timeout_id)) {
5851 		tmp_id = bgep->asf_timeout_id;
5852 		(void) untimeout(tmp_id);
5853 	}
5854 	bgep->asf_timeout_id = 0;
5855 }
5856 
5857 
5858 
5859 /*
5860  * This function should be placed at the earliest position of bge_attach().
5861  */
5862 void
5863 bge_asf_get_config(bge_t *bgep)
5864 {
5865 	uint32_t nicsig;
5866 	uint32_t niccfg;
5867 
5868 	bgep->asf_enabled = B_FALSE;
5869 	nicsig = bge_nic_read32(bgep, BGE_NIC_DATA_SIG_ADDR);
5870 	if (nicsig == BGE_NIC_DATA_SIG) {
5871 		niccfg = bge_nic_read32(bgep, BGE_NIC_DATA_NIC_CFG_ADDR);
5872 		if (niccfg & BGE_NIC_CFG_ENABLE_ASF)
5873 			/*
5874 			 * Here, we don't consider BAXTER, because BGE haven't
5875 			 * supported BAXTER (that is 5752). Also, as I know,
5876 			 * BAXTER doesn't support ASF feature.
5877 			 */
5878 			bgep->asf_enabled = B_TRUE;
5879 		else
5880 			bgep->asf_enabled = B_FALSE;
5881 	} else
5882 		bgep->asf_enabled = B_FALSE;
5883 }
5884 
5885 
5886 void
5887 bge_asf_pre_reset_operations(bge_t *bgep, uint32_t mode)
5888 {
5889 	uint32_t tries;
5890 	uint32_t event;
5891 
5892 	ASSERT(bgep->asf_enabled);
5893 
5894 	/* Issues "pause firmware" command and wait for ACK */
5895 	bge_nic_put32(bgep, BGE_CMD_MAILBOX, BGE_CMD_NICDRV_PAUSE_FW);
5896 	event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
5897 	bge_reg_put32(bgep, RX_RISC_EVENT_REG, event | RRER_ASF_EVENT);
5898 
5899 	event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
5900 	tries = 0;
5901 	while ((event & RRER_ASF_EVENT) && (tries < 100)) {
5902 		drv_usecwait(1);
5903 		tries ++;
5904 		event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
5905 	}
5906 
5907 	bge_nic_put32(bgep, BGE_FIRMWARE_MAILBOX,
5908 	    BGE_MAGIC_NUM_FIRMWARE_INIT_DONE);
5909 
5910 	if (bgep->asf_newhandshake) {
5911 		switch (mode) {
5912 		case BGE_INIT_RESET:
5913 			bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5914 			    BGE_DRV_STATE_START);
5915 			break;
5916 		case BGE_SHUTDOWN_RESET:
5917 			bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5918 			    BGE_DRV_STATE_UNLOAD);
5919 			break;
5920 		case BGE_SUSPEND_RESET:
5921 			bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5922 			    BGE_DRV_STATE_SUSPEND);
5923 			break;
5924 		default:
5925 			break;
5926 		}
5927 	}
5928 }
5929 
5930 
5931 void
5932 bge_asf_post_reset_old_mode(bge_t *bgep, uint32_t mode)
5933 {
5934 	switch (mode) {
5935 	case BGE_INIT_RESET:
5936 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5937 		    BGE_DRV_STATE_START);
5938 		break;
5939 	case BGE_SHUTDOWN_RESET:
5940 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5941 		    BGE_DRV_STATE_UNLOAD);
5942 		break;
5943 	case BGE_SUSPEND_RESET:
5944 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5945 		    BGE_DRV_STATE_SUSPEND);
5946 		break;
5947 	default:
5948 		break;
5949 	}
5950 }
5951 
5952 
5953 void
5954 bge_asf_post_reset_new_mode(bge_t *bgep, uint32_t mode)
5955 {
5956 	switch (mode) {
5957 	case BGE_INIT_RESET:
5958 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5959 		    BGE_DRV_STATE_START_DONE);
5960 		break;
5961 	case BGE_SHUTDOWN_RESET:
5962 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
5963 		    BGE_DRV_STATE_UNLOAD_DONE);
5964 		break;
5965 	default:
5966 		break;
5967 	}
5968 }
5969 
5970 #endif /* BGE_IPMI_ASF */
5971