xref: /illumos-gate/usr/src/uts/common/io/bge/bge_chip2.c (revision fcdb3229a31dd4ff700c69238814e326aad49098)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010-2013, by Broadcom, Inc.
24  * All Rights Reserved.
25  */
26 
27 /*
28  * Copyright (c) 2002, 2010, Oracle and/or its affiliates.
29  * All rights reserved.
30  * Copyright 2016 Nexenta Systems, Inc.  All rights reserved.
31  */
32 
33 #include "bge_impl.h"
34 
35 #define	PIO_ADDR(bgep, offset)	((void *)((caddr_t)(bgep)->io_regs+(offset)))
36 #define	APE_ADDR(bgep, offset)	((void *)((caddr_t)(bgep)->ape_regs+(offset)))
37 
38 /*
39  * Future features ... ?
40  */
41 #define	BGE_CFG_IO8	1	/* 8/16-bit cfg space BIS/BIC	*/
42 #define	BGE_IND_IO32	1	/* indirect access code		*/
43 #define	BGE_SEE_IO32	1	/* SEEPROM access code		*/
44 #define	BGE_FLASH_IO32	1	/* FLASH access code		*/
45 
46 /*
47  * BGE MSI tunable:
48  *
49  * By default MSI is enabled on all supported platforms but it is disabled
50  * for some Broadcom chips due to known MSI hardware issues. Currently MSI
51  * is enabled only for 5714C A2 and 5715C A2 broadcom chips.
52  */
53 boolean_t bge_enable_msi = B_TRUE;
54 
55 /*
56  * PCI-X/PCI-E relaxed ordering tunable for OS/Nexus driver
57  */
58 boolean_t bge_relaxed_ordering = B_TRUE;
59 
60 /*
61  * Patchable globals:
62  *
63  *	bge_autorecover
64  *		Enables/disables automatic recovery after fault detection
65  *
66  *	bge_mlcr_default
67  *		Value to program into the MLCR; controls the chip's GPIO pins
68  *
69  *	bge_dma_{rd,wr}prio
70  *		Relative priorities of DMA reads & DMA writes respectively.
71  *		These may each be patched to any value 0-3.  Equal values
72  *		will give "fair" (round-robin) arbitration for PCI access.
73  *		Unequal values will give one or the other function priority.
74  *
75  *	bge_dma_rwctrl
76  *		Value to put in the Read/Write DMA control register.  See
77  *	        the Broadcom PRM for things you can fiddle with in this
78  *		register ...
79  *
80  *	bge_{tx,rx}_{count,ticks}_{norm,intr}
81  *		Send/receive interrupt coalescing parameters.  Counts are
82  *		#s of descriptors, ticks are in microseconds.  *norm* values
83  *		apply between status updates/interrupts; the *intr* values
84  *		refer to the 'during-interrupt' versions - see the PRM.
85  *
86  *		NOTE: these values have been determined by measurement. They
87  *		differ significantly from the values recommended in the PRM.
88  */
89 static uint32_t bge_autorecover = 1;
90 static uint32_t bge_mlcr_default_5714 = MLCR_DEFAULT_5714;
91 
92 static uint32_t bge_dma_rdprio = 1;
93 static uint32_t bge_dma_wrprio = 0;
94 static uint32_t bge_dma_rwctrl = PDRWCR_VAR_DEFAULT;
95 static uint32_t bge_dma_rwctrl_5721 = PDRWCR_VAR_5721;
96 static uint32_t bge_dma_rwctrl_5714 = PDRWCR_VAR_5714;
97 static uint32_t bge_dma_rwctrl_5715 = PDRWCR_VAR_5715;
98 
99 uint32_t bge_rx_ticks_norm = 128;
100 uint32_t bge_tx_ticks_norm = 512;
101 uint32_t bge_rx_count_norm = 8;
102 uint32_t bge_tx_count_norm = 128;
103 
104 static uint32_t bge_rx_ticks_intr = 128;
105 static uint32_t bge_tx_ticks_intr = 0;		/* 8 for FJ2+ !?!?	*/
106 static uint32_t bge_rx_count_intr = 2;
107 static uint32_t bge_tx_count_intr = 0;
108 
109 /*
110  * Memory pool configuration parameters.
111  *
112  * These are generally specific to each member of the chip family, since
113  * each one may have a different memory size/configuration.
114  *
115  * Setting the mbuf pool length for a specific type of chip to 0 inhibits
116  * the driver from programming the various registers; instead they are left
117  * at their hardware defaults.  This is the preferred option for later chips
118  * (5705+), whereas the older chips *required* these registers to be set,
119  * since the h/w default was 0 ;-(
120  */
121 static uint32_t bge_mbuf_pool_base	= MBUF_POOL_BASE_DEFAULT;
122 static uint32_t bge_mbuf_pool_base_5704	= MBUF_POOL_BASE_5704;
123 static uint32_t bge_mbuf_pool_base_5705	= MBUF_POOL_BASE_5705;
124 static uint32_t bge_mbuf_pool_base_5721 = MBUF_POOL_BASE_5721;
125 static uint32_t bge_mbuf_pool_len	= MBUF_POOL_LENGTH_DEFAULT;
126 static uint32_t bge_mbuf_pool_len_5704	= MBUF_POOL_LENGTH_5704;
127 static uint32_t bge_mbuf_pool_len_5705	= 0;	/* use h/w default	*/
128 static uint32_t bge_mbuf_pool_len_5721	= 0;
129 
130 /*
131  * Various high and low water marks, thresholds, etc ...
132  *
133  * Note: these are taken from revision 7 of the PRM, and some are different
134  * from both the values in earlier PRMs *and* those determined experimentally
135  * and used in earlier versions of this driver ...
136  */
137 static uint32_t bge_mbuf_hi_water	= MBUF_HIWAT_DEFAULT;
138 static uint32_t bge_mbuf_lo_water_rmac	= MAC_RX_MBUF_LOWAT_DEFAULT;
139 static uint32_t bge_mbuf_lo_water_rdma	= RDMA_MBUF_LOWAT_DEFAULT;
140 
141 static uint32_t bge_dmad_lo_water	= DMAD_POOL_LOWAT_DEFAULT;
142 static uint32_t bge_dmad_hi_water	= DMAD_POOL_HIWAT_DEFAULT;
143 static uint32_t bge_lowat_recv_frames	= LOWAT_MAX_RECV_FRAMES_DEFAULT;
144 
145 static uint32_t bge_replenish_std	= STD_RCV_BD_REPLENISH_DEFAULT;
146 static uint32_t bge_replenish_mini	= MINI_RCV_BD_REPLENISH_DEFAULT;
147 static uint32_t bge_replenish_jumbo	= JUMBO_RCV_BD_REPLENISH_DEFAULT;
148 
149 static uint32_t	bge_watchdog_count	= 1 << 16;
150 static uint16_t bge_dma_miss_limit	= 20;
151 
152 static uint32_t bge_stop_start_on_sync	= 0;
153 
154 /*
155  * bge_intr_max_loop controls the maximum loop number within bge_intr.
156  * When loading NIC with heavy network traffic, it is useful.
157  * Increasing this value could have positive effect to throughput,
158  * but it might also increase ticks of a bge ISR stick on CPU, which might
159  * lead to bad UI interactive experience. So tune this with caution.
160  */
161 static int bge_intr_max_loop = 1;
162 
163 /*
164  * ========== Low-level chip & ring buffer manipulation ==========
165  */
166 
167 #define	BGE_DBG		BGE_DBG_REGS	/* debug flag for this code	*/
168 
169 
170 /*
171  * Config space read-modify-write routines
172  */
173 
174 #if	BGE_CFG_IO8
175 
176 static void
bge_cfg_clr16(bge_t * bgep,bge_regno_t regno,uint16_t bits)177 bge_cfg_clr16(bge_t *bgep, bge_regno_t regno, uint16_t bits)
178 {
179 	uint16_t regval;
180 
181 	BGE_TRACE(("bge_cfg_clr16($%p, 0x%lx, 0x%x)",
182 	    (void *)bgep, regno, bits));
183 
184 	regval = pci_config_get16(bgep->cfg_handle, regno);
185 
186 	BGE_DEBUG(("bge_cfg_clr16($%p, 0x%lx, 0x%x): 0x%x => 0x%x",
187 	    (void *)bgep, regno, bits, regval, regval & ~bits));
188 
189 	regval &= ~bits;
190 	pci_config_put16(bgep->cfg_handle, regno, regval);
191 }
192 
193 #endif	/* BGE_CFG_IO8 */
194 
195 static void
bge_cfg_clr32(bge_t * bgep,bge_regno_t regno,uint32_t bits)196 bge_cfg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
197 {
198 	uint32_t regval;
199 
200 	BGE_TRACE(("bge_cfg_clr32($%p, 0x%lx, 0x%x)",
201 	    (void *)bgep, regno, bits));
202 
203 	regval = pci_config_get32(bgep->cfg_handle, regno);
204 
205 	BGE_DEBUG(("bge_cfg_clr32($%p, 0x%lx, 0x%x): 0x%x => 0x%x",
206 	    (void *)bgep, regno, bits, regval, regval & ~bits));
207 
208 	regval &= ~bits;
209 	pci_config_put32(bgep->cfg_handle, regno, regval);
210 }
211 
212 #if	BGE_IND_IO32
213 
214 /*
215  * Indirect access to registers & RISC scratchpads, using config space
216  * accesses only.
217  *
218  * This isn't currently used, but someday we might want to use it for
219  * restoring the Subsystem Device/Vendor registers (which aren't directly
220  * writable in Config Space), or for downloading firmware into the RISCs
221  *
222  * In any case there are endian issues to be resolved before this code is
223  * enabled; the bizarre way that bytes get twisted by this chip AND by
224  * the PCI bridge in SPARC systems mean that we shouldn't enable it until
225  * it's been thoroughly tested for all access sizes on all supported
226  * architectures (SPARC *and* x86!).
227  */
228 uint32_t
bge_ind_get32(bge_t * bgep,bge_regno_t regno)229 bge_ind_get32(bge_t *bgep, bge_regno_t regno)
230 {
231 	uint32_t val;
232 
233 	BGE_TRACE(("bge_ind_get32($%p, 0x%lx)", (void *)bgep, regno));
234 
235 #ifdef __sparc
236 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
237 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
238 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
239 		regno = LE_32(regno);
240 	}
241 #endif
242 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIAAR, regno);
243 	val = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_RIADR);
244 
245 	BGE_DEBUG(("bge_ind_get32($%p, 0x%lx) => 0x%x",
246 	    (void *)bgep, regno, val));
247 
248 	val = LE_32(val);
249 
250 	return (val);
251 }
252 
253 void
bge_ind_put32(bge_t * bgep,bge_regno_t regno,uint32_t val)254 bge_ind_put32(bge_t *bgep, bge_regno_t regno, uint32_t val)
255 {
256 	BGE_TRACE(("bge_ind_put32($%p, 0x%lx, 0x%x)",
257 	    (void *)bgep, regno, val));
258 
259 	val = LE_32(val);
260 #ifdef __sparc
261 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
262 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
263 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
264 		regno = LE_32(regno);
265 	}
266 #endif
267 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIAAR, regno);
268 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_RIADR, val);
269 }
270 
271 #endif	/* BGE_IND_IO32 */
272 
273 #if	BGE_DEBUGGING
274 
275 static void
bge_pci_check(bge_t * bgep)276 bge_pci_check(bge_t *bgep)
277 {
278 	uint16_t pcistatus;
279 
280 	pcistatus = pci_config_get16(bgep->cfg_handle, PCI_CONF_STAT);
281 	if ((pcistatus & (PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB)) != 0)
282 		BGE_DEBUG(("bge_pci_check($%p): PCI status 0x%x",
283 		    (void *)bgep, pcistatus));
284 }
285 
286 #endif	/* BGE_DEBUGGING */
287 
288 /*
289  * Perform first-stage chip (re-)initialisation, using only config-space
290  * accesses:
291  *
292  * + Read the vendor/device/revision/subsystem/cache-line-size registers,
293  *   returning the data in the structure pointed to by <idp>.
294  * + Configure the target-mode endianness (swap) options.
295  * + Disable interrupts and enable Memory Space accesses.
296  * + Enable or disable Bus Mastering according to the <enable_dma> flag.
297  *
298  * This sequence is adapted from Broadcom document 570X-PG102-R,
299  * page 102, steps 1-3, 6-8 and 11-13.  The omitted parts of the sequence
300  * are 4 and 5 (Reset Core and wait) which are handled elsewhere.
301  *
302  * This function MUST be called before any non-config-space accesses
303  * are made; on this first call <enable_dma> is B_FALSE, and it
304  * effectively performs steps 3-1(!) of the initialisation sequence
305  * (the rest are not required but should be harmless).
306  *
307  * It MUST also be called after a chip reset, as this disables
308  * Memory Space cycles!  In this case, <enable_dma> is B_TRUE, and
309  * it is effectively performing steps 6-8.
310  */
311 void
bge_chip_cfg_init(bge_t * bgep,chip_id_t * cidp,boolean_t enable_dma)312 bge_chip_cfg_init(bge_t *bgep, chip_id_t *cidp, boolean_t enable_dma)
313 {
314 	ddi_acc_handle_t handle;
315 	uint16_t command;
316 	uint32_t mhcr;
317 	uint32_t prodid;
318 	uint32_t pci_state;
319 	uint16_t value16;
320 	int i;
321 
322 	BGE_TRACE(("bge_chip_cfg_init($%p, $%p, %d)",
323 	    (void *)bgep, (void *)cidp, enable_dma));
324 
325 	/*
326 	 * Step 3: save PCI cache line size and subsystem vendor ID
327 	 *
328 	 * Read all the config-space registers that characterise the
329 	 * chip, specifically vendor/device/revision/subsystem vendor
330 	 * and subsystem device id.  We expect (but don't check) that
331 	 * (vendor == VENDOR_ID_BROADCOM) && (device == DEVICE_ID_5704)
332 	 *
333 	 * Also save all bus-transaction related registers (cache-line
334 	 * size, bus-grant/latency parameters, etc).  Some of these are
335 	 * cleared by reset, so we'll have to restore them later.  This
336 	 * comes from the Broadcom document 570X-PG102-R ...
337 	 *
338 	 * Note: Broadcom document 570X-PG102-R seems to be in error
339 	 * here w.r.t. the offsets of the Subsystem Vendor ID and
340 	 * Subsystem (Device) ID registers, which are the opposite way
341 	 * round according to the PCI standard.  For good measure, we
342 	 * save/restore both anyway.
343 	 */
344 	handle = bgep->cfg_handle;
345 
346 	/*
347 	 * For some chipsets (e.g., BCM5718), if MHCR_ENABLE_ENDIAN_BYTE_SWAP
348 	 * has been set in PCI_CONF_COMM already, we need to write the
349 	 * byte-swapped value to it. So we just write zero first for simplicity.
350 	 */
351 	cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
352 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
353 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
354 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
355 		pci_config_put32(handle, PCI_CONF_BGE_MHCR, 0);
356 	}
357 
358 	mhcr = pci_config_get32(handle, PCI_CONF_BGE_MHCR);
359 	cidp->asic_rev = (mhcr & MHCR_CHIP_REV_MASK);
360 	cidp->asic_rev_prod_id = 0;
361 	if ((cidp->asic_rev & 0xf0000000) == CHIP_ASIC_REV_USE_PROD_ID_REG) {
362 		prodid = CHIP_ASIC_REV_PROD_ID_REG;
363 		if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
364 		    DEVICE_5725_SERIES_CHIPSETS(bgep)) {
365 			prodid = CHIP_ASIC_REV_PROD_ID_GEN2_REG;
366 		} else if (DEVICE_57765_SERIES_CHIPSETS(bgep)) {
367 			prodid = CHIP_ASIC_REV_PROD_ID_GEN15_REG;
368 		}
369 		cidp->asic_rev_prod_id = pci_config_get32(handle, prodid);
370 	}
371 
372 	cidp->businfo = pci_config_get32(handle, PCI_CONF_BGE_PCISTATE);
373 	cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
374 
375 	cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
376 	cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
377 	cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
378 	cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
379 	cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
380 	cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
381 
382 	/* 5717 C0 is treated just like 5720 A0 */
383 	if (pci_config_get16(bgep->cfg_handle, PCI_CONF_DEVID) ==
384 	    DEVICE_ID_5717_C0) {
385 		cidp->device = DEVICE_ID_5720;
386 	}
387 
388 	BGE_DEBUG(("bge_chip_cfg_init: %s bus is %s and %s; #INTA is %s",
389 	    cidp->businfo & PCISTATE_BUS_IS_PCI ? "PCI" : "PCI-X",
390 	    cidp->businfo & PCISTATE_BUS_IS_FAST ? "fast" : "slow",
391 	    cidp->businfo & PCISTATE_BUS_IS_32_BIT ? "narrow" : "wide",
392 	    cidp->businfo & PCISTATE_INTA_STATE ? "high" : "low"));
393 	BGE_DEBUG(("bge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
394 	    cidp->vendor, cidp->device, cidp->revision));
395 	BGE_DEBUG(("bge_chip_cfg_init: subven 0x%x subdev 0x%x asic_rev 0x%x",
396 	    cidp->subven, cidp->subdev, cidp->asic_rev));
397 	BGE_DEBUG(("bge_chip_cfg_init: clsize %d latency %d command 0x%x",
398 	    cidp->clsize, cidp->latency, cidp->command));
399 
400 	/*
401 	 * Step 2 (also step 6): disable and clear interrupts.
402 	 * Steps 11-13: configure PIO endianness options, and enable
403 	 * indirect register access.  We'll also select any other
404 	 * options controlled by the MHCR (e.g. tagged status, mask
405 	 * interrupt mode) at this stage ...
406 	 *
407 	 * Note: internally, the chip is 64-bit and BIG-endian, but
408 	 * since it talks to the host over a (LITTLE-endian) PCI bus,
409 	 * it normally swaps bytes around at the PCI interface.
410 	 * However, the PCI host bridge on SPARC systems normally
411 	 * swaps the byte lanes around too, since SPARCs are also
412 	 * BIG-endian.  So it turns out that on SPARC, the right
413 	 * option is to tell the chip to swap (and the host bridge
414 	 * will swap back again), whereas on x86 we ask the chip
415 	 * NOT to swap, so the natural little-endianness of the
416 	 * PCI bus is assumed.  Then the only thing that doesn't
417 	 * automatically work right is access to an 8-byte register
418 	 * by a little-endian host; but we don't want to set the
419 	 * MHCR_ENABLE_REGISTER_WORD_SWAP bit because then 4-byte
420 	 * accesses don't go where expected ;-(  So we live with
421 	 * that, and perform word-swaps in software in the few cases
422 	 * where a chip register is defined as an 8-byte value --
423 	 * see the code below for details ...
424 	 *
425 	 * Note: the meaning of the 'MASK_INTERRUPT_MODE' bit isn't
426 	 * very clear in the register description in the PRM, but
427 	 * Broadcom document 570X-PG104-R page 248 explains a little
428 	 * more (under "Broadcom Mask Mode").  The bit changes the way
429 	 * the MASK_PCI_INT_OUTPUT bit works: with MASK_INTERRUPT_MODE
430 	 * clear, the chip interprets MASK_PCI_INT_OUTPUT in the same
431 	 * way as the 5700 did, which isn't very convenient.  Setting
432 	 * the MASK_INTERRUPT_MODE bit makes the MASK_PCI_INT_OUTPUT
433 	 * bit do just what its name says -- MASK the PCI #INTA output
434 	 * (i.e. deassert the signal at the pin) leaving all internal
435 	 * state unchanged.  This is much more convenient for our
436 	 * interrupt handler, so we set MASK_INTERRUPT_MODE here.
437 	 *
438 	 * Note: the inconvenient semantics of the interrupt mailbox
439 	 * (nonzero disables and acknowledges/clears the interrupt,
440 	 * zero enables AND CLEARS it) would make race conditions
441 	 * likely in the interrupt handler:
442 	 *
443 	 * (1)	acknowledge & disable interrupts
444 	 * (2)	while (more to do)
445 	 *		process packets
446 	 * (3)	enable interrupts -- also clears pending
447 	 *
448 	 * If the chip received more packets and internally generated
449 	 * an interrupt between the check at (2) and the mbox write
450 	 * at (3), this interrupt would be lost :-(
451 	 *
452 	 * The best way to avoid this is to use TAGGED STATUS mode,
453 	 * where the chip includes a unique tag in each status block
454 	 * update, and the host, when re-enabling interrupts, passes
455 	 * the last tag it saw back to the chip; then the chip can
456 	 * see whether the host is truly up to date, and regenerate
457 	 * its interrupt if not.
458 	 */
459 	mhcr = MHCR_ENABLE_INDIRECT_ACCESS |
460 	       MHCR_ENABLE_PCI_STATE_RW |
461 	       MHCR_ENABLE_TAGGED_STATUS_MODE |
462 	       MHCR_MASK_INTERRUPT_MODE |
463 	       MHCR_CLEAR_INTERRUPT_INTA;
464 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
465 		mhcr |= MHCR_MASK_PCI_INT_OUTPUT;
466 
467 #ifdef	_BIG_ENDIAN
468 	mhcr |= MHCR_ENABLE_ENDIAN_WORD_SWAP | MHCR_ENABLE_ENDIAN_BYTE_SWAP;
469 #endif	/* _BIG_ENDIAN */
470 	pci_config_put32(handle, PCI_CONF_BGE_MHCR, mhcr);
471 
472 #ifdef BGE_IPMI_ASF
473 	bgep->asf_wordswapped = B_FALSE;
474 #endif
475 
476 	pci_state = (PCISTATE_EXT_ROM_ENABLE | PCISTATE_EXT_ROM_RETRY);
477 	/* allow reads and writes to the APE register and memory space */
478 	if (bgep->ape_enabled) {
479 		pci_state |= PCISTATE_ALLOW_APE_CTLSPC_WR |
480 		    PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR;
481 	}
482 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_PCISTATE, pci_state);
483 
484 	/*
485 	 * Step 1 (also step 7): Enable PCI Memory Space accesses
486 	 *			 Disable Memory Write/Invalidate
487 	 *			 Enable or disable Bus Mastering
488 	 *
489 	 * Note that all other bits are taken from the original value saved
490 	 * the first time through here, rather than from the current register
491 	 * value, 'cos that will have been cleared by a soft RESET since.
492 	 * In this way we preserve the OBP/nexus-parent's preferred settings
493 	 * of the parity-error and system-error enable bits across multiple
494 	 * chip RESETs.
495 	 */
496 	command = bgep->chipid.command | PCI_COMM_MAE;
497 	command &= ~(PCI_COMM_ME|PCI_COMM_MEMWR_INVAL);
498 	if (enable_dma)
499 		command |= PCI_COMM_ME;
500 	/*
501 	 * on BCM5714 revision A0, false parity error gets generated
502 	 * due to a logic bug. Provide a workaround by disabling parity
503 	 * error.
504 	 */
505 	if (((cidp->device == DEVICE_ID_5714C) ||
506 	    (cidp->device == DEVICE_ID_5714S)) &&
507 	    (cidp->revision == REVISION_ID_5714_A0)) {
508 		command &= ~PCI_COMM_PARITY_DETECT;
509 	}
510 	pci_config_put16(handle, PCI_CONF_COMM, command);
511 
512 	/*
513 	 * On some PCI-E device, there were instances when
514 	 * the device was still link training.
515 	 */
516 	if (bgep->chipid.pci_type == BGE_PCI_E) {
517 		i = 0;
518 		value16 = pci_config_get16(handle, PCI_CONF_COMM);
519 		while ((value16 != command) && (i < 100)) {
520 			drv_usecwait(200);
521 			value16 = pci_config_get16(handle, PCI_CONF_COMM);
522 			++i;
523 		}
524 	}
525 
526 	/*
527 	 * Clear any remaining error status bits
528 	 */
529 	pci_config_put16(handle, PCI_CONF_STAT, ~0);
530 
531 	/*
532 	 * Do following if and only if the device is NOT BCM5714C OR
533 	 * BCM5715C
534 	 */
535 	if (!((cidp->device == DEVICE_ID_5714C) ||
536 	    (cidp->device == DEVICE_ID_5715C))) {
537 		/*
538 		 * Make sure these indirect-access registers are sane
539 		 * rather than random after power-up or reset
540 		 */
541 		pci_config_put32(handle, PCI_CONF_BGE_RIAAR, 0);
542 		pci_config_put32(handle, PCI_CONF_BGE_MWBAR, 0);
543 	}
544 	/*
545 	 * Step 8: Disable PCI-X/PCI-E Relaxed Ordering
546 	 */
547 	bge_cfg_clr16(bgep, PCIX_CONF_COMM, PCIX_COMM_RELAXED);
548 
549 	if (cidp->pci_type == BGE_PCI_E) {
550 		if (DEVICE_5723_SERIES_CHIPSETS(bgep)) {
551 			bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL_5723,
552 			    DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED);
553 		} else if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
554 		           DEVICE_5725_SERIES_CHIPSETS(bgep) ||
555 			   DEVICE_57765_SERIES_CHIPSETS(bgep)) {
556 			bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL_5717,
557 			    DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED);
558 		} else {
559 			bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL,
560 			    DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED);
561 		}
562 	}
563 }
564 
565 #ifdef __amd64
566 /*
567  * Distinguish CPU types
568  *
569  * These use to  distinguish AMD64 or Intel EM64T of CPU running mode.
570  * If CPU runs on Intel EM64T mode,the 64bit operation cannot works fine
571  * for PCI-Express based network interface card. This is the work-around
572  * for those nics.
573  */
574 static boolean_t
bge_get_em64t_type(void)575 bge_get_em64t_type(void)
576 {
577 
578 	return (x86_vendor == X86_VENDOR_Intel);
579 }
580 #endif
581 
582 /*
583  * Operating register get/set access routines
584  */
585 uint32_t
bge_reg_get32(bge_t * bgep,bge_regno_t regno)586 bge_reg_get32(bge_t *bgep, bge_regno_t regno)
587 {
588 	BGE_TRACE(("bge_reg_get32($%p, 0x%lx)",
589 	    (void *)bgep, regno));
590 
591 	return (ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno)));
592 }
593 
594 void
bge_reg_put32(bge_t * bgep,bge_regno_t regno,uint32_t data)595 bge_reg_put32(bge_t *bgep, bge_regno_t regno, uint32_t data)
596 {
597 	BGE_TRACE(("bge_reg_put32($%p, 0x%lx, 0x%x)",
598 	    (void *)bgep, regno, data));
599 
600 	ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), data);
601 	BGE_PCICHK(bgep);
602 }
603 
604 void
bge_reg_set32(bge_t * bgep,bge_regno_t regno,uint32_t bits)605 bge_reg_set32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
606 {
607 	uint32_t regval;
608 
609 	BGE_TRACE(("bge_reg_set32($%p, 0x%lx, 0x%x)",
610 	    (void *)bgep, regno, bits));
611 
612 	regval = bge_reg_get32(bgep, regno);
613 	regval |= bits;
614 	bge_reg_put32(bgep, regno, regval);
615 }
616 
617 void
bge_reg_clr32(bge_t * bgep,bge_regno_t regno,uint32_t bits)618 bge_reg_clr32(bge_t *bgep, bge_regno_t regno, uint32_t bits)
619 {
620 	uint32_t regval;
621 
622 	BGE_TRACE(("bge_reg_clr32($%p, 0x%lx, 0x%x)",
623 	    (void *)bgep, regno, bits));
624 
625 	regval = bge_reg_get32(bgep, regno);
626 	regval &= ~bits;
627 	bge_reg_put32(bgep, regno, regval);
628 }
629 
630 static uint64_t
bge_reg_get64(bge_t * bgep,bge_regno_t regno)631 bge_reg_get64(bge_t *bgep, bge_regno_t regno)
632 {
633 	uint64_t regval;
634 
635 #ifdef	__amd64
636 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
637 	    bge_get_em64t_type() ||
638 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
639 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
640 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
641 		regval = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno + 4));
642 		regval <<= 32;
643 		regval |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
644 	} else {
645 		regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
646 	}
647 #elif defined(__sparc)
648 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
649 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
650 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
651 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
652 		regval = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
653 		regval <<= 32;
654 		regval |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno + 4));
655 	} else {
656 		regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
657 	}
658 #else
659 	regval = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, regno));
660 #endif
661 
662 #ifdef	_LITTLE_ENDIAN
663 	regval = (regval >> 32) | (regval << 32);
664 #endif	/* _LITTLE_ENDIAN */
665 
666 	BGE_TRACE(("bge_reg_get64($%p, 0x%lx) = 0x%016llx",
667 	    (void *)bgep, regno, regval));
668 
669 	return (regval);
670 }
671 
672 static void
bge_reg_put64(bge_t * bgep,bge_regno_t regno,uint64_t data)673 bge_reg_put64(bge_t *bgep, bge_regno_t regno, uint64_t data)
674 {
675 	BGE_TRACE(("bge_reg_put64($%p, 0x%lx, 0x%016llx)",
676 	    (void *)bgep, regno, data));
677 
678 #ifdef	_LITTLE_ENDIAN
679 	data = ((data >> 32) | (data << 32));
680 #endif	/* _LITTLE_ENDIAN */
681 
682 #ifdef	__amd64
683 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
684 	    bge_get_em64t_type() ||
685 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
686 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
687 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
688 		ddi_put32(bgep->io_handle,
689 		    PIO_ADDR(bgep, regno), (uint32_t)data);
690 		BGE_PCICHK(bgep);
691 		ddi_put32(bgep->io_handle,
692 		    PIO_ADDR(bgep, regno + 4), (uint32_t)(data >> 32));
693 
694 	} else {
695 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
696 	}
697 #elif defined(__sparc)
698 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
699 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
700 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
701 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
702 		ddi_put32(bgep->io_handle,
703 		    PIO_ADDR(bgep, regno + 4), (uint32_t)data);
704 		BGE_PCICHK(bgep);
705 		ddi_put32(bgep->io_handle,
706 		    PIO_ADDR(bgep, regno), (uint32_t)(data >> 32));
707 	} else {
708 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
709 	}
710 #else
711 	ddi_put64(bgep->io_handle, PIO_ADDR(bgep, regno), data);
712 #endif
713 
714 	BGE_PCICHK(bgep);
715 }
716 
717 /*
718  * The DDI doesn't provide get/put functions for 128 bit data
719  * so we put RCBs out as two 64-bit chunks instead.
720  */
721 static void
bge_reg_putrcb(bge_t * bgep,bge_regno_t addr,bge_rcb_t * rcbp)722 bge_reg_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp)
723 {
724 	uint64_t *p;
725 
726 	BGE_TRACE(("bge_reg_putrcb($%p, 0x%lx, 0x%016llx:%04x:%04x:%08x)",
727 	    (void *)bgep, addr, rcbp->host_ring_addr,
728 	    rcbp->max_len, rcbp->flags, rcbp->nic_ring_addr));
729 
730 	ASSERT((addr % sizeof (*rcbp)) == 0);
731 
732 	p = (void *)rcbp;
733 	bge_reg_put64(bgep, addr, *p++);
734 	bge_reg_put64(bgep, addr+8, *p);
735 }
736 
737 void
bge_mbx_put(bge_t * bgep,bge_regno_t regno,uint64_t data)738 bge_mbx_put(bge_t *bgep, bge_regno_t regno, uint64_t data)
739 {
740 	if (DEVICE_5906_SERIES_CHIPSETS(bgep))
741 		regno += INTERRUPT_LP_MBOX_0_REG - INTERRUPT_MBOX_0_REG + 4;
742 
743 	BGE_TRACE(("bge_mbx_put($%p, 0x%lx, 0x%016llx)",
744 	    (void *)bgep, regno, data));
745 
746 	/*
747 	 * Mailbox registers are nominally 64 bits on the 5701, but
748 	 * the MSW isn't used.  On the 5703, they're only 32 bits
749 	 * anyway.  So here we just write the lower(!) 32 bits -
750 	 * remembering that the chip is big-endian, even though the
751 	 * PCI bus is little-endian ...
752 	 */
753 #ifdef	_BIG_ENDIAN
754 	ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno+4), (uint32_t)data);
755 #else
756 	ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), (uint32_t)data);
757 #endif	/* _BIG_ENDIAN */
758 	BGE_PCICHK(bgep);
759 }
760 
761 uint32_t
bge_mbx_get(bge_t * bgep,bge_regno_t regno)762 bge_mbx_get(bge_t *bgep, bge_regno_t regno)
763 {
764 	uint32_t val32;
765 
766 	if (DEVICE_5906_SERIES_CHIPSETS(bgep))
767 		regno += INTERRUPT_LP_MBOX_0_REG - INTERRUPT_MBOX_0_REG + 4;
768 
769 	BGE_TRACE(("bge_mbx_get($%p, 0x%lx)",
770 	    (void *)bgep, regno));
771 
772 #ifdef	_BIG_ENDIAN
773 	val32 = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno+4));
774 #else
775 	val32 = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, regno));
776 #endif	/* _BIG_ENDIAN */
777 	BGE_PCICHK(bgep);
778 
779 	BGE_DEBUG(("bge_mbx_get($%p, 0x%lx) => 0x%08x",
780 	    (void *)bgep, regno, val32));
781 
782 	return (val32);
783 }
784 
785 
786 #if	BGE_DEBUGGING
787 
788 void
bge_led_mark(bge_t * bgep)789 bge_led_mark(bge_t *bgep)
790 {
791 	uint32_t led_ctrl = LED_CONTROL_OVERRIDE_LINK |
792 	    LED_CONTROL_1000MBPS_LED |
793 	    LED_CONTROL_100MBPS_LED |
794 	    LED_CONTROL_10MBPS_LED;
795 
796 	/*
797 	 * Blink all three LINK LEDs on simultaneously, then all off,
798 	 * then restore to automatic hardware control.  This is used
799 	 * in laboratory testing to trigger a logic analyser or scope.
800 	 */
801 	bge_reg_set32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
802 	led_ctrl ^= LED_CONTROL_OVERRIDE_LINK;
803 	bge_reg_clr32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
804 	led_ctrl = LED_CONTROL_OVERRIDE_LINK;
805 	bge_reg_clr32(bgep, ETHERNET_MAC_LED_CONTROL_REG, led_ctrl);
806 }
807 
808 #endif	/* BGE_DEBUGGING */
809 
810 /*
811  * NIC on-chip memory access routines
812  *
813  * Only 32K of NIC memory is visible at a time, controlled by the
814  * Memory Window Base Address Register (in PCI config space).  Once
815  * this is set, the 32K region of NIC-local memory that it refers
816  * to can be directly addressed in the upper 32K of the 64K of PCI
817  * memory space used for the device.
818  */
819 static void
bge_nic_setwin(bge_t * bgep,bge_regno_t base)820 bge_nic_setwin(bge_t *bgep, bge_regno_t base)
821 {
822 	chip_id_t *cidp;
823 
824 	BGE_TRACE(("bge_nic_setwin($%p, 0x%lx)",
825 	    (void *)bgep, base));
826 
827 	ASSERT((base & MWBAR_GRANULE_MASK) == 0);
828 
829 	/*
830 	 * Don't do repeated zero data writes,
831 	 * if the device is BCM5714C/15C.
832 	 */
833 	cidp = &bgep->chipid;
834 	if ((cidp->device == DEVICE_ID_5714C) ||
835 	    (cidp->device == DEVICE_ID_5715C)) {
836 		if (bgep->lastWriteZeroData && (base == (bge_regno_t)0))
837 			return;
838 		/* Adjust lastWriteZeroData */
839 		bgep->lastWriteZeroData = ((base == (bge_regno_t)0) ?
840 		    B_TRUE : B_FALSE);
841 	}
842 #ifdef __sparc
843 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
844 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
845 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
846 		base = LE_32(base);
847 	}
848 #endif
849 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, base);
850 }
851 
852 static uint32_t
bge_nic_get32(bge_t * bgep,bge_regno_t addr)853 bge_nic_get32(bge_t *bgep, bge_regno_t addr)
854 {
855 	uint32_t data;
856 
857 #if defined(BGE_IPMI_ASF) && !defined(__sparc)
858 	if (bgep->asf_enabled && !bgep->asf_wordswapped) {
859 		/* workaround for word swap error */
860 		if (addr & 4)
861 			addr = addr - 4;
862 		else
863 			addr = addr + 4;
864 	}
865 #endif
866 
867 #ifdef __sparc
868 	data = bge_nic_read32(bgep, addr);
869 #else
870 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
871 	addr &= MWBAR_GRANULE_MASK;
872 	addr += NIC_MEM_WINDOW_OFFSET;
873 
874 	data = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
875 #endif
876 
877 	BGE_TRACE(("bge_nic_get32($%p, 0x%lx) = 0x%08x",
878 	    (void *)bgep, addr, data));
879 
880 	return (data);
881 }
882 
883 void
bge_nic_put32(bge_t * bgep,bge_regno_t addr,uint32_t data)884 bge_nic_put32(bge_t *bgep, bge_regno_t addr, uint32_t data)
885 {
886 	BGE_TRACE(("bge_nic_put32($%p, 0x%lx, 0x%08x)",
887 	    (void *)bgep, addr, data));
888 
889 #if defined(BGE_IPMI_ASF) && !defined(__sparc)
890 	if (bgep->asf_enabled && !bgep->asf_wordswapped) {
891 		/* workaround for word swap error */
892 		if (addr & 4)
893 			addr = addr - 4;
894 		else
895 			addr = addr + 4;
896 	}
897 #endif
898 
899 #ifdef __sparc
900 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
901 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
902 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
903 		addr = LE_32(addr);
904 	}
905 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, addr);
906 	data = LE_32(data);
907 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWDAR, data);
908 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, 0);
909 #else
910 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
911 	addr &= MWBAR_GRANULE_MASK;
912 	addr += NIC_MEM_WINDOW_OFFSET;
913 	ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr), data);
914 	BGE_PCICHK(bgep);
915 #endif
916 }
917 
918 static uint64_t
bge_nic_get64(bge_t * bgep,bge_regno_t addr)919 bge_nic_get64(bge_t *bgep, bge_regno_t addr)
920 {
921 	uint64_t data;
922 
923 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
924 	addr &= MWBAR_GRANULE_MASK;
925 	addr += NIC_MEM_WINDOW_OFFSET;
926 
927 #ifdef	__amd64
928 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
929 	    bge_get_em64t_type() ||
930 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
931 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
932 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
933 		data = ddi_get32(bgep->io_handle,
934 		    PIO_ADDR(bgep, addr + 4));
935 		data <<= 32;
936 		data |= ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
937 	} else {
938 		data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
939 	}
940 #elif defined(__sparc)
941 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
942 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
943 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
944 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
945 		data = ddi_get32(bgep->io_handle, PIO_ADDR(bgep, addr));
946 		data <<= 32;
947 		data |= ddi_get32(bgep->io_handle,
948 		    PIO_ADDR(bgep, addr + 4));
949 	} else {
950 		data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
951 	}
952 #else
953 	data = ddi_get64(bgep->io_handle, PIO_ADDR(bgep, addr));
954 #endif
955 
956 	BGE_TRACE(("bge_nic_get64($%p, 0x%lx) = 0x%016llx",
957 	    (void *)bgep, addr, data));
958 
959 	return (data);
960 }
961 
962 static void
bge_nic_put64(bge_t * bgep,bge_regno_t addr,uint64_t data)963 bge_nic_put64(bge_t *bgep, bge_regno_t addr, uint64_t data)
964 {
965 	BGE_TRACE(("bge_nic_put64($%p, 0x%lx, 0x%016llx)",
966 	    (void *)bgep, addr, data));
967 
968 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
969 	addr &= MWBAR_GRANULE_MASK;
970 	addr += NIC_MEM_WINDOW_OFFSET;
971 
972 #ifdef	__amd64
973 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
974 	    bge_get_em64t_type() ||
975 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
976 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
977 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
978 		ddi_put32(bgep->io_handle,
979 		    PIO_ADDR(bgep, addr + 4), (uint32_t)data);
980 		BGE_PCICHK(bgep);
981 		ddi_put32(bgep->io_handle,
982 		    PIO_ADDR(bgep, addr), (uint32_t)(data >> 32));
983 	} else {
984 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
985 	}
986 #elif defined(__sparc)
987 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
988 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
989 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
990 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
991 		ddi_put32(bgep->io_handle,
992 		    PIO_ADDR(bgep, addr + 4), (uint32_t)data);
993 		BGE_PCICHK(bgep);
994 		ddi_put32(bgep->io_handle,
995 		    PIO_ADDR(bgep, addr), (uint32_t)(data >> 32));
996 	} else {
997 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
998 	}
999 #else
1000 	ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), data);
1001 #endif
1002 
1003 	BGE_PCICHK(bgep);
1004 }
1005 
1006 /*
1007  * The DDI doesn't provide get/put functions for 128 bit data
1008  * so we put RCBs out as two 64-bit chunks instead.
1009  */
1010 static void
bge_nic_putrcb(bge_t * bgep,bge_regno_t addr,bge_rcb_t * rcbp)1011 bge_nic_putrcb(bge_t *bgep, bge_regno_t addr, bge_rcb_t *rcbp)
1012 {
1013 	uint64_t *p;
1014 
1015 	BGE_TRACE(("bge_nic_putrcb($%p, 0x%lx, 0x%016llx:%04x:%04x:%08x)",
1016 	    (void *)bgep, addr, rcbp->host_ring_addr,
1017 	    rcbp->max_len, rcbp->flags, rcbp->nic_ring_addr));
1018 
1019 	ASSERT((addr % sizeof (*rcbp)) == 0);
1020 
1021 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
1022 	addr &= MWBAR_GRANULE_MASK;
1023 	addr += NIC_MEM_WINDOW_OFFSET;
1024 
1025 	p = (void *)rcbp;
1026 #ifdef	__amd64
1027 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1028 	    bge_get_em64t_type() ||
1029 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1030 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
1031 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
1032 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr),
1033 		    (uint32_t)(*p));
1034 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4),
1035 		    (uint32_t)(*p++ >> 32));
1036 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 8),
1037 		    (uint32_t)(*p));
1038 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 12),
1039 		    (uint32_t)(*p >> 32));
1040 
1041 	} else {
1042 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1043 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr+8), *p);
1044 	}
1045 #elif defined(__sparc)
1046 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1047 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1048 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
1049 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
1050 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 4),
1051 		    (uint32_t)(*p));
1052 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr),
1053 		    (uint32_t)(*p++ >> 32));
1054 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 12),
1055 		    (uint32_t)(*p));
1056 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, addr + 8),
1057 		    (uint32_t)(*p >> 32));
1058 	} else {
1059 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1060 		ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr + 8), *p);
1061 	}
1062 #else
1063 	ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr), *p++);
1064 	ddi_put64(bgep->io_handle, PIO_ADDR(bgep, addr + 8), *p);
1065 #endif
1066 
1067 	BGE_PCICHK(bgep);
1068 }
1069 
1070 static void
bge_nic_zero(bge_t * bgep,bge_regno_t addr,uint32_t nbytes)1071 bge_nic_zero(bge_t *bgep, bge_regno_t addr, uint32_t nbytes)
1072 {
1073 	BGE_TRACE(("bge_nic_zero($%p, 0x%lx, 0x%x)",
1074 	    (void *)bgep, addr, nbytes));
1075 
1076 	ASSERT((addr & ~MWBAR_GRANULE_MASK) ==
1077 	    ((addr+nbytes) & ~MWBAR_GRANULE_MASK));
1078 
1079 	bge_nic_setwin(bgep, addr & ~MWBAR_GRANULE_MASK);
1080 	addr &= MWBAR_GRANULE_MASK;
1081 	addr += NIC_MEM_WINDOW_OFFSET;
1082 
1083 	(void) ddi_device_zero(bgep->io_handle, PIO_ADDR(bgep, addr),
1084 	    nbytes, 1, DDI_DATA_SZ08_ACC);
1085 	BGE_PCICHK(bgep);
1086 }
1087 
1088 /*
1089  * MII (PHY) register get/set access routines
1090  *
1091  * These use the chip's MII auto-access method, controlled by the
1092  * MII Communication register at 0x044c, so the CPU doesn't have
1093  * to fiddle with the individual bits.
1094  */
1095 
1096 #undef	BGE_DBG
1097 #define	BGE_DBG		BGE_DBG_MII	/* debug flag for this code	*/
1098 
1099 static uint16_t
bge_mii_access(bge_t * bgep,bge_regno_t regno,uint16_t data,uint32_t cmd)1100 bge_mii_access(bge_t *bgep, bge_regno_t regno, uint16_t data, uint32_t cmd)
1101 {
1102 	uint32_t timeout;
1103 	uint32_t regval1;
1104 	uint32_t regval2;
1105 
1106 	BGE_TRACE(("bge_mii_access($%p, 0x%lx, 0x%x, 0x%x)",
1107 	    (void *)bgep, regno, data, cmd));
1108 
1109 	ASSERT(mutex_owned(bgep->genlock));
1110 
1111 	/*
1112 	 * Assemble the command ...
1113 	 */
1114 	cmd |= data << MI_COMMS_DATA_SHIFT;
1115 	cmd |= regno << MI_COMMS_REGISTER_SHIFT;
1116 	cmd |= bgep->phy_mii_addr << MI_COMMS_ADDRESS_SHIFT;
1117 	cmd |= MI_COMMS_START;
1118 
1119 	/*
1120 	 * Wait for any command already in progress ...
1121 	 *
1122 	 * Note: this *shouldn't* ever find that there is a command
1123 	 * in progress, because we already hold the <genlock> mutex.
1124 	 * Nonetheless, we have sometimes seen the MI_COMMS_START
1125 	 * bit set here -- it seems that the chip can initiate MII
1126 	 * accesses internally, even with polling OFF.
1127 	 */
1128 	regval1 = regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1129 	for (timeout = 100; ; ) {
1130 		if ((regval2 & MI_COMMS_START) == 0) {
1131 			bge_reg_put32(bgep, MI_COMMS_REG, cmd);
1132 			break;
1133 		}
1134 		if (--timeout == 0)
1135 			break;
1136 		drv_usecwait(10);
1137 		regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1138 	}
1139 
1140 	if (timeout == 0)
1141 		return ((uint16_t)~0u);
1142 
1143 	if (timeout != 100)
1144 		BGE_REPORT((bgep, "bge_mii_access: cmd 0x%x -- "
1145 		    "MI_COMMS_START set for %d us; 0x%x->0x%x",
1146 		    cmd, 10*(100-timeout), regval1, regval2));
1147 
1148 	regval1 = bge_reg_get32(bgep, MI_COMMS_REG);
1149 	for (timeout = 1000; ; ) {
1150 		if ((regval1 & MI_COMMS_START) == 0)
1151 			break;
1152 		if (--timeout == 0)
1153 			break;
1154 		drv_usecwait(10);
1155 		regval1 = bge_reg_get32(bgep, MI_COMMS_REG);
1156 	}
1157 
1158 	/*
1159 	 * Drop out early if the READ FAILED bit is set -- this chip
1160 	 * could be a 5703/4S, with a SerDes instead of a PHY!
1161 	 */
1162 	if (regval2 & MI_COMMS_READ_FAILED)
1163 		return ((uint16_t)~0u);
1164 
1165 	if (timeout == 0)
1166 		return ((uint16_t)~0u);
1167 
1168 	/*
1169 	 * The PRM says to wait 5us after seeing the START bit clear
1170 	 * and then re-read the register to get the final value of the
1171 	 * data field, in order to avoid a race condition where the
1172 	 * START bit is clear but the data field isn't yet valid.
1173 	 *
1174 	 * Note: we don't actually seem to be encounter this race;
1175 	 * except when the START bit is seen set again (see below),
1176 	 * the data field doesn't change during this 5us interval.
1177 	 */
1178 	drv_usecwait(5);
1179 	regval2 = bge_reg_get32(bgep, MI_COMMS_REG);
1180 
1181 	/*
1182 	 * Unfortunately, when following the PRMs instructions above,
1183 	 * we have occasionally seen the START bit set again(!) in the
1184 	 * value read after the 5us delay. This seems to be due to the
1185 	 * chip autonomously starting another MII access internally.
1186 	 * In such cases, the command/data/etc fields relate to the
1187 	 * internal command, rather than the one that we thought had
1188 	 * just finished.  So in this case, we fall back to returning
1189 	 * the data from the original read that showed START clear.
1190 	 */
1191 	if (regval2 & MI_COMMS_START) {
1192 		BGE_REPORT((bgep, "bge_mii_access: cmd 0x%x -- "
1193 		    "MI_COMMS_START set after transaction; 0x%x->0x%x",
1194 		    cmd, regval1, regval2));
1195 		regval2 = regval1;
1196 	}
1197 
1198 	if (regval2 & MI_COMMS_START)
1199 		return ((uint16_t)~0u);
1200 
1201 	if (regval2 & MI_COMMS_READ_FAILED)
1202 		return ((uint16_t)~0u);
1203 
1204 	return ((regval2 & MI_COMMS_DATA_MASK) >> MI_COMMS_DATA_SHIFT);
1205 }
1206 
1207 uint16_t
bge_mii_get16(bge_t * bgep,bge_regno_t regno)1208 bge_mii_get16(bge_t *bgep, bge_regno_t regno)
1209 {
1210 	BGE_TRACE(("bge_mii_get16($%p, 0x%lx)",
1211 	    (void *)bgep, regno));
1212 
1213 	ASSERT(mutex_owned(bgep->genlock));
1214 
1215 	if (DEVICE_5906_SERIES_CHIPSETS(bgep) && ((regno == MII_AUX_CONTROL) ||
1216 	    (regno == MII_MSCONTROL)))
1217 		return (0);
1218 
1219 	return (bge_mii_access(bgep, regno, 0, MI_COMMS_COMMAND_READ));
1220 }
1221 
1222 void
bge_mii_put16(bge_t * bgep,bge_regno_t regno,uint16_t data)1223 bge_mii_put16(bge_t *bgep, bge_regno_t regno, uint16_t data)
1224 {
1225 	BGE_TRACE(("bge_mii_put16($%p, 0x%lx, 0x%x)",
1226 	    (void *)bgep, regno, data));
1227 
1228 	ASSERT(mutex_owned(bgep->genlock));
1229 
1230 	if (DEVICE_5906_SERIES_CHIPSETS(bgep) && ((regno == MII_AUX_CONTROL) ||
1231 	    (regno == MII_MSCONTROL)))
1232 		return;
1233 
1234 	(void) bge_mii_access(bgep, regno, data, MI_COMMS_COMMAND_WRITE);
1235 }
1236 
1237 uint16_t
bge_phydsp_read(bge_t * bgep,bge_regno_t regno)1238 bge_phydsp_read(bge_t *bgep, bge_regno_t regno)
1239 {
1240 	BGE_TRACE(("bge_phydsp_read($%p, 0x%lx)",
1241 	          (void *)bgep, regno));
1242 
1243 	ASSERT(mutex_owned(bgep->genlock));
1244 
1245 	bge_mii_put16(bgep, MII_DSP_ADDRESS, regno);
1246 	return bge_mii_get16(bgep, MII_DSP_RW_PORT);
1247 }
1248 
1249 void
bge_phydsp_write(bge_t * bgep,bge_regno_t regno,uint16_t data)1250 bge_phydsp_write(bge_t *bgep, bge_regno_t regno, uint16_t data)
1251 {
1252 	BGE_TRACE(("bge_phydsp_write($%p, 0x%lx, 0x%x)",
1253 	          (void *)bgep, regno, data));
1254 
1255 	ASSERT(mutex_owned(bgep->genlock));
1256 
1257 	bge_mii_put16(bgep, MII_DSP_ADDRESS, regno);
1258 	bge_mii_put16(bgep, MII_DSP_RW_PORT, data);
1259 }
1260 
1261 #undef	BGE_DBG
1262 #define	BGE_DBG		BGE_DBG_SEEPROM	/* debug flag for this code	*/
1263 
1264 #if	BGE_SEE_IO32 || BGE_FLASH_IO32
1265 
1266 /*
1267  * Basic SEEPROM get/set access routine
1268  *
1269  * This uses the chip's SEEPROM auto-access method, controlled by the
1270  * Serial EEPROM Address/Data Registers at 0x6838/683c, so the CPU
1271  * doesn't have to fiddle with the individual bits.
1272  *
1273  * The caller should hold <genlock> and *also* have already acquired
1274  * the right to access the SEEPROM, via bge_nvmem_acquire() above.
1275  *
1276  * Return value:
1277  *	0 on success,
1278  *	ENODATA on access timeout (maybe retryable: device may just be busy)
1279  *	EPROTO on other h/w or s/w errors.
1280  *
1281  * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output
1282  * from a (successful) SEEPROM_ACCESS_READ.
1283  */
1284 static int
bge_seeprom_access(bge_t * bgep,uint32_t cmd,bge_regno_t addr,uint32_t * dp)1285 bge_seeprom_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1286 {
1287 	uint32_t tries;
1288 	uint32_t regval;
1289 
1290 	ASSERT(mutex_owned(bgep->genlock));
1291 
1292 	/*
1293 	 * On the newer chips that support both SEEPROM & Flash, we need
1294 	 * to specifically enable SEEPROM access (Flash is the default).
1295 	 * On older chips, we don't; SEEPROM is the only NVtype supported,
1296 	 * and the NVM control registers don't exist ...
1297 	 */
1298 	switch (bgep->chipid.nvtype) {
1299 	case BGE_NVTYPE_NONE:
1300 	case BGE_NVTYPE_UNKNOWN:
1301 		_NOTE(NOTREACHED)
1302 	case BGE_NVTYPE_SEEPROM:
1303 		break;
1304 
1305 	case BGE_NVTYPE_LEGACY_SEEPROM:
1306 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1307 	case BGE_NVTYPE_BUFFERED_FLASH:
1308 	default:
1309 		bge_reg_set32(bgep, NVM_CONFIG1_REG,
1310 		    NVM_CFG1_LEGACY_SEEPROM_MODE);
1311 		break;
1312 	}
1313 
1314 	/*
1315 	 * Check there's no command in progress.
1316 	 *
1317 	 * Note: this *shouldn't* ever find that there is a command
1318 	 * in progress, because we already hold the <genlock> mutex.
1319 	 * Also, to ensure we don't have a conflict with the chip's
1320 	 * internal firmware or a process accessing the same (shared)
1321 	 * SEEPROM through the other port of a 5704, we've already
1322 	 * been through the "software arbitration" protocol.
1323 	 * So this is just a final consistency check: we shouldn't
1324 	 * see EITHER the START bit (command started but not complete)
1325 	 * OR the COMPLETE bit (command completed but not cleared).
1326 	 */
1327 	regval = bge_reg_get32(bgep, SERIAL_EEPROM_ADDRESS_REG);
1328 	if (regval & SEEPROM_ACCESS_START)
1329 		return (EPROTO);
1330 	if (regval & SEEPROM_ACCESS_COMPLETE)
1331 		return (EPROTO);
1332 
1333 	/*
1334 	 * Assemble the command ...
1335 	 */
1336 	cmd |= addr & SEEPROM_ACCESS_ADDRESS_MASK;
1337 	addr >>= SEEPROM_ACCESS_ADDRESS_SIZE;
1338 	addr <<= SEEPROM_ACCESS_DEVID_SHIFT;
1339 	cmd |= addr & SEEPROM_ACCESS_DEVID_MASK;
1340 	cmd |= SEEPROM_ACCESS_START;
1341 	cmd |= SEEPROM_ACCESS_COMPLETE;
1342 	cmd |= regval & SEEPROM_ACCESS_HALFCLOCK_MASK;
1343 
1344 	bge_reg_put32(bgep, SERIAL_EEPROM_DATA_REG, *dp);
1345 	bge_reg_put32(bgep, SERIAL_EEPROM_ADDRESS_REG, cmd);
1346 
1347 	/*
1348 	 * By observation, a successful access takes ~20us on a 5703/4,
1349 	 * but apparently much longer (up to 1000us) on the obsolescent
1350 	 * BCM5700/BCM5701.  We want to be sure we don't get any false
1351 	 * timeouts here; but OTOH, we don't want a bogus access to lock
1352 	 * out interrupts for longer than necessary. So we'll allow up
1353 	 * to 1000us ...
1354 	 */
1355 	for (tries = 0; tries < 1000; ++tries) {
1356 		regval = bge_reg_get32(bgep, SERIAL_EEPROM_ADDRESS_REG);
1357 		if (regval & SEEPROM_ACCESS_COMPLETE)
1358 			break;
1359 		drv_usecwait(1);
1360 	}
1361 
1362 	if (regval & SEEPROM_ACCESS_COMPLETE) {
1363 		/*
1364 		 * All OK; read the SEEPROM data register, then write back
1365 		 * the value read from the address register in order to
1366 		 * clear the <complete> bit and leave the SEEPROM access
1367 		 * state machine idle, ready for the next access ...
1368 		 */
1369 		BGE_DEBUG(("bge_seeprom_access: complete after %d us", tries));
1370 		*dp = bge_reg_get32(bgep, SERIAL_EEPROM_DATA_REG);
1371 		bge_reg_put32(bgep, SERIAL_EEPROM_ADDRESS_REG, regval);
1372 		return (0);
1373 	}
1374 
1375 	/*
1376 	 * Hmm ... what happened here?
1377 	 *
1378 	 * Most likely, the user addressed a non-existent SEEPROM. Or
1379 	 * maybe the SEEPROM was busy internally (e.g. processing a write)
1380 	 * and didn't respond to being addressed. Either way, it's left
1381 	 * the SEEPROM access state machine wedged. So we'll reset it
1382 	 * before we leave, so it's ready for next time ...
1383 	 */
1384 	BGE_DEBUG(("bge_seeprom_access: timed out after %d us", tries));
1385 	bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT);
1386 	return (ENODATA);
1387 }
1388 
1389 /*
1390  * Basic Flash get/set access routine
1391  *
1392  * These use the chip's Flash auto-access method, controlled by the
1393  * Flash Access Registers at 0x7000-701c, so the CPU doesn't have to
1394  * fiddle with the individual bits.
1395  *
1396  * The caller should hold <genlock> and *also* have already acquired
1397  * the right to access the Flash, via bge_nvmem_acquire() above.
1398  *
1399  * Return value:
1400  *	0 on success,
1401  *	ENODATA on access timeout (maybe retryable: device may just be busy)
1402  *	ENODEV if the NVmem device is missing or otherwise unusable
1403  *
1404  * <*dp> is an input to a NVM_FLASH_CMD_WR operation, or an output
1405  * from a (successful) NVM_FLASH_CMD_RD.
1406  */
1407 static int
bge_flash_access(bge_t * bgep,uint32_t cmd,bge_regno_t addr,uint32_t * dp)1408 bge_flash_access(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1409 {
1410 	uint32_t tries;
1411 	uint32_t regval;
1412 
1413 	ASSERT(mutex_owned(bgep->genlock));
1414 
1415 	/*
1416 	 * On the newer chips that support both SEEPROM & Flash, we need
1417 	 * to specifically disable SEEPROM access while accessing Flash.
1418 	 * The older chips don't support Flash, and the NVM registers don't
1419 	 * exist, so we shouldn't be here at all!
1420 	 */
1421 	switch (bgep->chipid.nvtype) {
1422 	case BGE_NVTYPE_NONE:
1423 	case BGE_NVTYPE_UNKNOWN:
1424 		_NOTE(NOTREACHED)
1425 	case BGE_NVTYPE_SEEPROM:
1426 		return (ENODEV);
1427 
1428 	case BGE_NVTYPE_LEGACY_SEEPROM:
1429 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1430 	case BGE_NVTYPE_BUFFERED_FLASH:
1431 	default:
1432 		bge_reg_clr32(bgep, NVM_CONFIG1_REG,
1433 		    NVM_CFG1_LEGACY_SEEPROM_MODE);
1434 		break;
1435 	}
1436 
1437 	/*
1438 	 * Assemble the command ...
1439 	 */
1440 	addr &= NVM_FLASH_ADDR_MASK;
1441 	cmd |= NVM_FLASH_CMD_DOIT;
1442 	cmd |= NVM_FLASH_CMD_FIRST;
1443 	cmd |= NVM_FLASH_CMD_LAST;
1444 	cmd |= NVM_FLASH_CMD_DONE;
1445 
1446 	bge_reg_put32(bgep, NVM_FLASH_WRITE_REG, *dp);
1447 	bge_reg_put32(bgep, NVM_FLASH_ADDR_REG, addr);
1448 	bge_reg_put32(bgep, NVM_FLASH_CMD_REG, cmd);
1449 
1450 	/*
1451 	 * Allow up to 1000ms ...
1452 	 */
1453 	for (tries = 0; tries < 1000; ++tries) {
1454 		regval = bge_reg_get32(bgep, NVM_FLASH_CMD_REG);
1455 		if (regval & NVM_FLASH_CMD_DONE)
1456 			break;
1457 		drv_usecwait(1);
1458 	}
1459 
1460 	if (regval & NVM_FLASH_CMD_DONE) {
1461 		/*
1462 		 * All OK; read the data from the Flash read register
1463 		 */
1464 		BGE_DEBUG(("bge_flash_access: complete after %d us", tries));
1465 		*dp = bge_reg_get32(bgep, NVM_FLASH_READ_REG);
1466 		return (0);
1467 	}
1468 
1469 	/*
1470 	 * Hmm ... what happened here?
1471 	 *
1472 	 * Most likely, the user addressed a non-existent Flash. Or
1473 	 * maybe the Flash was busy internally (e.g. processing a write)
1474 	 * and didn't respond to being addressed. Either way, there's
1475 	 * nothing we can here ...
1476 	 */
1477 	BGE_DEBUG(("bge_flash_access: timed out after %d us", tries));
1478 	return (ENODATA);
1479 }
1480 
1481 /*
1482  * The next two functions regulate access to the NVram (if fitted).
1483  *
1484  * On a 5704 (dual core) chip, there's only one SEEPROM and one Flash
1485  * (SPI) interface, but they can be accessed through either port. These
1486  * are managed by different instance of this driver and have no software
1487  * state in common.
1488  *
1489  * In addition (and even on a single core chip) the chip's internal
1490  * firmware can access the SEEPROM/Flash, most notably after a RESET
1491  * when it may download code to run internally.
1492  *
1493  * So we need to arbitrate between these various software agents.  For
1494  * this purpose, the chip provides the Software Arbitration Register,
1495  * which implements hardware(!) arbitration.
1496  *
1497  * This functionality didn't exist on older (5700/5701) chips, so there's
1498  * nothing we can do by way of arbitration on those; also, if there's no
1499  * SEEPROM/Flash fitted (or we couldn't determine what type), there's also
1500  * nothing to do.
1501  *
1502  * The internal firmware appears to use Request 0, which is the highest
1503  * priority.  So we'd like to use Request 2, leaving one higher and one
1504  * lower for any future developments ... but apparently this doesn't
1505  * always work.  So for now, the code uses Request 1 ;-(
1506  */
1507 
1508 #define	NVM_READ_REQ	NVM_READ_REQ1
1509 #define	NVM_RESET_REQ	NVM_RESET_REQ1
1510 #define	NVM_SET_REQ	NVM_SET_REQ1
1511 
1512 static void
bge_nvmem_relinquish(bge_t * bgep)1513 bge_nvmem_relinquish(bge_t *bgep)
1514 {
1515 	ASSERT(mutex_owned(bgep->genlock));
1516 
1517 	switch (bgep->chipid.nvtype) {
1518 	case BGE_NVTYPE_NONE:
1519 	case BGE_NVTYPE_UNKNOWN:
1520 		_NOTE(NOTREACHED)
1521 		return;
1522 
1523 	case BGE_NVTYPE_SEEPROM:
1524 		/*
1525 		 * No arbitration performed, no release needed
1526 		 */
1527 		return;
1528 
1529 	case BGE_NVTYPE_LEGACY_SEEPROM:
1530 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1531 	case BGE_NVTYPE_BUFFERED_FLASH:
1532 	default:
1533 		break;
1534 	}
1535 
1536 	/*
1537 	 * Our own request should be present (whether or not granted) ...
1538 	 */
1539 	(void) bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1540 
1541 	/*
1542 	 * ... this will make it go away.
1543 	 */
1544 	bge_reg_put32(bgep, NVM_SW_ARBITRATION_REG, NVM_RESET_REQ);
1545 	(void) bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1546 }
1547 
1548 /*
1549  * Arbitrate for access to the NVmem, if necessary
1550  *
1551  * Return value:
1552  *	0 on success
1553  *	EAGAIN if the device is in use (retryable)
1554  *	ENODEV if the NVmem device is missing or otherwise unusable
1555  */
1556 static int
bge_nvmem_acquire(bge_t * bgep)1557 bge_nvmem_acquire(bge_t *bgep)
1558 {
1559 	uint32_t regval;
1560 	uint32_t tries;
1561 
1562 	ASSERT(mutex_owned(bgep->genlock));
1563 
1564 	switch (bgep->chipid.nvtype) {
1565 	case BGE_NVTYPE_NONE:
1566 	case BGE_NVTYPE_UNKNOWN:
1567 		/*
1568 		 * Access denied: no (recognisable) device fitted
1569 		 */
1570 		return (ENODEV);
1571 
1572 	case BGE_NVTYPE_SEEPROM:
1573 		/*
1574 		 * Access granted: no arbitration needed (or possible)
1575 		 */
1576 		return (0);
1577 
1578 	case BGE_NVTYPE_LEGACY_SEEPROM:
1579 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1580 	case BGE_NVTYPE_BUFFERED_FLASH:
1581 	default:
1582 		/*
1583 		 * Access conditional: conduct arbitration protocol
1584 		 */
1585 		break;
1586 	}
1587 
1588 	/*
1589 	 * We're holding the per-port mutex <genlock>, so no-one other
1590 	 * thread can be attempting to access the NVmem through *this*
1591 	 * port. But it could be in use by the *other* port (of a 5704),
1592 	 * or by the chip's internal firmware, so we have to go through
1593 	 * the full (hardware) arbitration protocol ...
1594 	 *
1595 	 * Note that *because* we're holding <genlock>, the interrupt handler
1596 	 * won't be able to progress.  So we're only willing to spin for a
1597 	 * fairly short time.  Specifically:
1598 	 *
1599 	 *	We *must* wait long enough for the hardware to resolve all
1600 	 *	requests and determine the winner.  Fortunately, this is
1601 	 *	"almost instantaneous", even as observed by GHz CPUs.
1602 	 *
1603 	 *	A successful access by another Solaris thread (via either
1604 	 *	port) typically takes ~20us.  So waiting a bit longer than
1605 	 *	that will give a good chance of success, if the other user
1606 	 *	*is* another thread on the other port.
1607 	 *
1608 	 *	However, the internal firmware can hold on to the NVmem
1609 	 *	for *much* longer: at least 10 milliseconds just after a
1610 	 *	RESET, and maybe even longer if the NVmem actually contains
1611 	 *	code to download and run on the internal CPUs.
1612 	 *
1613 	 * So, we'll allow 50us; if that's not enough then it's up to the
1614 	 * caller to retry later (hence the choice of return code EAGAIN).
1615 	 */
1616 	regval = bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1617 	bge_reg_put32(bgep, NVM_SW_ARBITRATION_REG, NVM_SET_REQ);
1618 
1619 	for (tries = 0; tries < 50; ++tries) {
1620 		regval = bge_reg_get32(bgep, NVM_SW_ARBITRATION_REG);
1621 		if (regval & NVM_WON_REQ1)
1622 			break;
1623 		drv_usecwait(1);
1624 	}
1625 
1626 	if (regval & NVM_WON_REQ1) {
1627 		BGE_DEBUG(("bge_nvmem_acquire: won after %d us", tries));
1628 		return (0);
1629 	}
1630 
1631 	/*
1632 	 * Somebody else must be accessing the NVmem, so abandon our
1633 	 * attempt take control of it.  The caller can try again later ...
1634 	 */
1635 	BGE_DEBUG(("bge_nvmem_acquire: lost after %d us", tries));
1636 	bge_nvmem_relinquish(bgep);
1637 	return (EAGAIN);
1638 }
1639 
1640 /*
1641  * This code assumes that the GPIO1 bit has been wired up to the NVmem
1642  * write protect line in such a way that the NVmem is protected when
1643  * GPIO1 is an input, or is an output but driven high.  Thus, to make the
1644  * NVmem writable we have to change GPIO1 to an output AND drive it low.
1645  *
1646  * Note: there's only one set of GPIO pins on a 5704, even though they
1647  * can be accessed through either port.  So the chip has to resolve what
1648  * happens if the two ports program a single pin differently ... the rule
1649  * it uses is that if the ports disagree about the *direction* of a pin,
1650  * "output" wins over "input", but if they disagree about its *value* as
1651  * an output, then the pin is TRISTATED instead!  In such a case, no-one
1652  * wins, and the external signal does whatever the external circuitry
1653  * defines as the default -- which we've assumed is the PROTECTED state.
1654  * So, we always change GPIO1 back to being an *input* whenever we're not
1655  * specifically using it to unprotect the NVmem. This allows either port
1656  * to update the NVmem, although obviously only one at a time!
1657  *
1658  * The caller should hold <genlock> and *also* have already acquired the
1659  * right to access the NVmem, via bge_nvmem_acquire() above.
1660  */
1661 static void
bge_nvmem_protect(bge_t * bgep,boolean_t protect)1662 bge_nvmem_protect(bge_t *bgep, boolean_t protect)
1663 {
1664 	uint32_t regval;
1665 
1666 	ASSERT(mutex_owned(bgep->genlock));
1667 
1668 	regval = bge_reg_get32(bgep, MISC_LOCAL_CONTROL_REG);
1669 	if (protect) {
1670 		regval |= MLCR_MISC_PINS_OUTPUT_1;
1671 		regval &= ~MLCR_MISC_PINS_OUTPUT_ENABLE_1;
1672 	} else {
1673 		regval &= ~MLCR_MISC_PINS_OUTPUT_1;
1674 		regval |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
1675 	}
1676 	bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG, regval);
1677 }
1678 
1679 /*
1680  * Now put it all together ...
1681  *
1682  * Try to acquire control of the NVmem; if successful, then:
1683  *	unprotect it (if we want to write to it)
1684  *	perform the requested access
1685  *	reprotect it (after a write)
1686  *	relinquish control
1687  *
1688  * Return value:
1689  *	0 on success,
1690  *	EAGAIN if the device is in use (retryable)
1691  *	ENODATA on access timeout (maybe retryable: device may just be busy)
1692  *	ENODEV if the NVmem device is missing or otherwise unusable
1693  *	EPROTO on other h/w or s/w errors.
1694  */
1695 static int
bge_nvmem_rw32(bge_t * bgep,uint32_t cmd,bge_regno_t addr,uint32_t * dp)1696 bge_nvmem_rw32(bge_t *bgep, uint32_t cmd, bge_regno_t addr, uint32_t *dp)
1697 {
1698 	int err;
1699 
1700 	if ((err = bge_nvmem_acquire(bgep)) == 0) {
1701 		switch (cmd) {
1702 		case BGE_SEE_READ:
1703 			err = bge_seeprom_access(bgep,
1704 			    SEEPROM_ACCESS_READ, addr, dp);
1705 			break;
1706 
1707 		case BGE_SEE_WRITE:
1708 			bge_nvmem_protect(bgep, B_FALSE);
1709 			err = bge_seeprom_access(bgep,
1710 			    SEEPROM_ACCESS_WRITE, addr, dp);
1711 			bge_nvmem_protect(bgep, B_TRUE);
1712 			break;
1713 
1714 		case BGE_FLASH_READ:
1715 			if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1716 			    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1717 			    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1718 			    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
1719 			    DEVICE_5714_SERIES_CHIPSETS(bgep) ||
1720 			    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
1721 				bge_reg_set32(bgep, NVM_ACCESS_REG,
1722 				    NVM_ACCESS_ENABLE);
1723 			}
1724 			err = bge_flash_access(bgep,
1725 			    NVM_FLASH_CMD_RD, addr, dp);
1726 			if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1727 			    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1728 			    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1729 			    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
1730 			    DEVICE_5714_SERIES_CHIPSETS(bgep) ||
1731 			    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
1732 				bge_reg_clr32(bgep, NVM_ACCESS_REG,
1733 				    NVM_ACCESS_ENABLE);
1734 			}
1735 			break;
1736 
1737 		case BGE_FLASH_WRITE:
1738 			if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1739 			    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1740 			    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1741 			    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
1742 			    DEVICE_5714_SERIES_CHIPSETS(bgep) ||
1743 			    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
1744 				bge_reg_set32(bgep, NVM_ACCESS_REG,
1745 				    NVM_WRITE_ENABLE|NVM_ACCESS_ENABLE);
1746 			}
1747 			bge_nvmem_protect(bgep, B_FALSE);
1748 			err = bge_flash_access(bgep,
1749 			    NVM_FLASH_CMD_WR, addr, dp);
1750 			bge_nvmem_protect(bgep, B_TRUE);
1751 			if (DEVICE_5721_SERIES_CHIPSETS(bgep) ||
1752 			    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
1753 			    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
1754 			    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
1755 			    DEVICE_5714_SERIES_CHIPSETS(bgep) ||
1756 			    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
1757 				bge_reg_clr32(bgep, NVM_ACCESS_REG,
1758 				    NVM_WRITE_ENABLE|NVM_ACCESS_ENABLE);
1759 			}
1760 
1761 			break;
1762 
1763 		default:
1764 			_NOTE(NOTREACHED)
1765 			break;
1766 		}
1767 		bge_nvmem_relinquish(bgep);
1768 	}
1769 
1770 	BGE_DEBUG(("bge_nvmem_rw32: err %d", err));
1771 	return (err);
1772 }
1773 
1774 static uint32_t
bge_nvmem_access_cmd(bge_t * bgep,boolean_t read)1775 bge_nvmem_access_cmd(bge_t *bgep, boolean_t read)
1776 {
1777 	switch (bgep->chipid.nvtype) {
1778 	case BGE_NVTYPE_NONE:
1779 	case BGE_NVTYPE_UNKNOWN:
1780 	default:
1781 		return 0;
1782 
1783 	case BGE_NVTYPE_SEEPROM:
1784 	case BGE_NVTYPE_LEGACY_SEEPROM:
1785 		return (read ? BGE_SEE_READ : BGE_SEE_WRITE);
1786 
1787 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1788 	case BGE_NVTYPE_BUFFERED_FLASH:
1789 		return (read ? BGE_FLASH_READ : BGE_FLASH_WRITE);
1790 	}
1791 }
1792 
1793 
1794 int
bge_nvmem_read32(bge_t * bgep,bge_regno_t addr,uint32_t * dp)1795 bge_nvmem_read32(bge_t *bgep, bge_regno_t addr, uint32_t *dp)
1796 {
1797 	return (bge_nvmem_rw32(bgep, bge_nvmem_access_cmd(bgep, B_TRUE),
1798 	    addr, dp));
1799 }
1800 
1801 
1802 int
bge_nvmem_write32(bge_t * bgep,bge_regno_t addr,uint32_t * dp)1803 bge_nvmem_write32(bge_t *bgep, bge_regno_t addr, uint32_t *dp)
1804 {
1805 	return (bge_nvmem_rw32(bgep, bge_nvmem_access_cmd(bgep, B_FALSE),
1806 	    addr, dp));
1807 }
1808 
1809 
1810 /*
1811  * Attempt to get a MAC address from the SEEPROM or Flash, if any
1812  */
1813 static uint64_t
bge_get_nvmac(bge_t * bgep)1814 bge_get_nvmac(bge_t *bgep)
1815 {
1816 	uint32_t mac_high;
1817 	uint32_t mac_low;
1818 	uint32_t addr;
1819 	uint32_t cmd;
1820 	uint64_t mac;
1821 
1822 	BGE_TRACE(("bge_get_nvmac($%p)",
1823 	    (void *)bgep));
1824 
1825 	switch (bgep->chipid.nvtype) {
1826 	case BGE_NVTYPE_NONE:
1827 	case BGE_NVTYPE_UNKNOWN:
1828 	default:
1829 		return (0ULL);
1830 
1831 	case BGE_NVTYPE_SEEPROM:
1832 	case BGE_NVTYPE_LEGACY_SEEPROM:
1833 		cmd = BGE_SEE_READ;
1834 		break;
1835 
1836 	case BGE_NVTYPE_UNBUFFERED_FLASH:
1837 	case BGE_NVTYPE_BUFFERED_FLASH:
1838 		cmd = BGE_FLASH_READ;
1839 		break;
1840 	}
1841 
1842 	if (DEVICE_5906_SERIES_CHIPSETS(bgep))
1843 		addr = NVMEM_DATA_MAC_ADDRESS_5906;
1844 	else
1845 		addr = NVMEM_DATA_MAC_ADDRESS;
1846 
1847 	if (bge_nvmem_rw32(bgep, cmd, addr, &mac_high))
1848 		return (0ULL);
1849 	addr += 4;
1850 	if (bge_nvmem_rw32(bgep, cmd, addr, &mac_low))
1851 		return (0ULL);
1852 
1853 	/*
1854 	 * The Broadcom chip is natively BIG-endian, so that's how the
1855 	 * MAC address is represented in NVmem.  We may need to swap it
1856 	 * around on a little-endian host ...
1857 	 */
1858 #ifdef	_BIG_ENDIAN
1859 	mac = mac_high;
1860 	mac = mac << 32;
1861 	mac |= mac_low;
1862 #else
1863 	mac = BGE_BSWAP_32(mac_high);
1864 	mac = mac << 32;
1865 	mac |= BGE_BSWAP_32(mac_low);
1866 #endif	/* _BIG_ENDIAN */
1867 
1868 	return (mac);
1869 }
1870 
1871 #else	/* BGE_SEE_IO32 || BGE_FLASH_IO32 */
1872 
1873 /*
1874  * Dummy version for when we're not supporting NVmem access
1875  */
1876 static uint64_t
bge_get_nvmac(bge_t * bgep)1877 bge_get_nvmac(bge_t *bgep)
1878 {
1879 	_NOTE(ARGUNUSED(bgep))
1880 	return (0ULL);
1881 }
1882 
1883 #endif	/* BGE_SEE_IO32 || BGE_FLASH_IO32 */
1884 
1885 /*
1886  * Determine the type of NVmem that is (or may be) attached to this chip,
1887  */
1888 static enum bge_nvmem_type
bge_nvmem_id(bge_t * bgep)1889 bge_nvmem_id(bge_t *bgep)
1890 {
1891 	enum bge_nvmem_type nvtype;
1892 	uint32_t config1;
1893 
1894 	BGE_TRACE(("bge_nvmem_id($%p)",
1895 	    (void *)bgep));
1896 
1897 	switch (bgep->chipid.device) {
1898 	default:
1899 		/*
1900 		 * We shouldn't get here; it means we don't recognise
1901 		 * the chip, which means we don't know how to determine
1902 		 * what sort of NVmem (if any) it has.  So we'll say
1903 		 * NONE, to disable the NVmem access code ...
1904 		 */
1905 		nvtype = BGE_NVTYPE_NONE;
1906 		break;
1907 
1908 	case DEVICE_ID_5700:
1909 	case DEVICE_ID_5700x:
1910 	case DEVICE_ID_5701:
1911 		/*
1912 		 * These devices support *only* SEEPROMs
1913 		 */
1914 		nvtype = BGE_NVTYPE_SEEPROM;
1915 		break;
1916 
1917 	case DEVICE_ID_5702:
1918 	case DEVICE_ID_5702fe:
1919 	case DEVICE_ID_5703C:
1920 	case DEVICE_ID_5703S:
1921 	case DEVICE_ID_5704C:
1922 	case DEVICE_ID_5704S:
1923 	case DEVICE_ID_5704:
1924 	case DEVICE_ID_5705M:
1925 	case DEVICE_ID_5705C:
1926 	case DEVICE_ID_5705_2:
1927 	case DEVICE_ID_5717:
1928 	case DEVICE_ID_5718:
1929 	case DEVICE_ID_5719:
1930 	case DEVICE_ID_5720:
1931 	case DEVICE_ID_5724:
1932 	case DEVICE_ID_5725:
1933 	case DEVICE_ID_5727:
1934 	case DEVICE_ID_57780:
1935 	case DEVICE_ID_5780:
1936 	case DEVICE_ID_5782:
1937 	case DEVICE_ID_5785:
1938 	case DEVICE_ID_5787:
1939 	case DEVICE_ID_5787M:
1940 	case DEVICE_ID_5788:
1941 	case DEVICE_ID_5789:
1942 	case DEVICE_ID_5751:
1943 	case DEVICE_ID_5751M:
1944 	case DEVICE_ID_5752:
1945 	case DEVICE_ID_5752M:
1946 	case DEVICE_ID_5754:
1947 	case DEVICE_ID_5755:
1948 	case DEVICE_ID_5755M:
1949 	case DEVICE_ID_5756M:
1950 	case DEVICE_ID_5721:
1951 	case DEVICE_ID_5722:
1952 	case DEVICE_ID_5723:
1953 	case DEVICE_ID_5761:
1954 	case DEVICE_ID_5761E:
1955 	case DEVICE_ID_5764:
1956 	case DEVICE_ID_5714C:
1957 	case DEVICE_ID_5714S:
1958 	case DEVICE_ID_5715C:
1959 	case DEVICE_ID_5715S:
1960 	case DEVICE_ID_57761:
1961 	case DEVICE_ID_57762:
1962 	case DEVICE_ID_57765:
1963 	case DEVICE_ID_57766:
1964 	case DEVICE_ID_57781:
1965 	case DEVICE_ID_57782:
1966 	case DEVICE_ID_57785:
1967 	case DEVICE_ID_57786:
1968 	case DEVICE_ID_57791:
1969 	case DEVICE_ID_57795:
1970 		config1 = bge_reg_get32(bgep, NVM_CONFIG1_REG);
1971 		if (config1 & NVM_CFG1_FLASH_MODE)
1972 			if (config1 & NVM_CFG1_BUFFERED_MODE)
1973 				nvtype = BGE_NVTYPE_BUFFERED_FLASH;
1974 			else
1975 				nvtype = BGE_NVTYPE_UNBUFFERED_FLASH;
1976 		else
1977 			nvtype = BGE_NVTYPE_LEGACY_SEEPROM;
1978 		break;
1979 	case DEVICE_ID_5906:
1980 	case DEVICE_ID_5906M:
1981 		nvtype = BGE_NVTYPE_BUFFERED_FLASH;
1982 		break;
1983 	}
1984 
1985 	return (nvtype);
1986 }
1987 
1988 #undef	BGE_DBG
1989 #define	BGE_DBG		BGE_DBG_APE	/* debug flag for this code	*/
1990 
1991 uint32_t
bge_ape_get32(bge_t * bgep,bge_regno_t regno)1992 bge_ape_get32(bge_t *bgep, bge_regno_t regno)
1993 {
1994 	BGE_TRACE(("bge_ape_get32($%p, 0x%lx)",
1995 	    (void *)bgep, regno));
1996 
1997 	return (ddi_get32(bgep->ape_handle, APE_ADDR(bgep, regno)));
1998 }
1999 
2000 void
bge_ape_put32(bge_t * bgep,bge_regno_t regno,uint32_t data)2001 bge_ape_put32(bge_t *bgep, bge_regno_t regno, uint32_t data)
2002 {
2003 	BGE_TRACE(("bge_ape_put32($%p, 0x%lx, 0x%x)",
2004 	    (void *)bgep, regno, data));
2005 
2006 	ddi_put32(bgep->ape_handle, APE_ADDR(bgep, regno), data);
2007 	BGE_PCICHK(bgep);
2008 }
2009 
2010 void
bge_ape_lock_init(bge_t * bgep)2011 bge_ape_lock_init(bge_t *bgep)
2012 {
2013 	int i;
2014 	uint32_t regbase;
2015 	uint32_t bit;
2016 
2017 	BGE_TRACE(("bge_ape_lock_init($%p)", (void *)bgep));
2018 
2019 	if (bgep->chipid.device == DEVICE_ID_5761)
2020 		regbase = BGE_APE_LOCK_GRANT;
2021 	else
2022 		regbase = BGE_APE_PER_LOCK_GRANT;
2023 
2024 	/* Make sure the driver hasn't any stale locks. */
2025 	for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
2026 		switch (i) {
2027 		case BGE_APE_LOCK_PHY0:
2028 		case BGE_APE_LOCK_PHY1:
2029 		case BGE_APE_LOCK_PHY2:
2030 		case BGE_APE_LOCK_PHY3:
2031 			bit = APE_LOCK_GRANT_DRIVER;
2032 			break;
2033 		default:
2034 			if (!bgep->pci_func)
2035 				bit = APE_LOCK_GRANT_DRIVER;
2036 			else
2037 				bit = 1 << bgep->pci_func;
2038 		}
2039 		bge_ape_put32(bgep, regbase + 4 * i, bit);
2040 	}
2041 }
2042 
2043 static int
bge_ape_lock(bge_t * bgep,int locknum)2044 bge_ape_lock(bge_t *bgep, int locknum)
2045 {
2046 	int i, off;
2047 	int ret = 0;
2048 	uint32_t status;
2049 	uint32_t req;
2050 	uint32_t gnt;
2051 	uint32_t bit;
2052 
2053 	BGE_TRACE(("bge_ape_lock($%p, 0x%x)", (void *)bgep, locknum));
2054 
2055 	if (!bgep->ape_enabled)
2056 		return (0);
2057 
2058 	switch (locknum) {
2059 	case BGE_APE_LOCK_GPIO:
2060 		if (bgep->chipid.device == DEVICE_ID_5761)
2061 			return (0);
2062 		/* FALLTHROUGH */
2063 	case BGE_APE_LOCK_GRC:
2064 	case BGE_APE_LOCK_MEM:
2065 		if (!bgep->pci_func)
2066 			bit = APE_LOCK_REQ_DRIVER;
2067 		else
2068 			bit = 1 << bgep->pci_func;
2069 		break;
2070 	case BGE_APE_LOCK_PHY0:
2071 	case BGE_APE_LOCK_PHY1:
2072 	case BGE_APE_LOCK_PHY2:
2073 	case BGE_APE_LOCK_PHY3:
2074 		bit = APE_LOCK_REQ_DRIVER;
2075 		break;
2076 	default:
2077 		return (-1);
2078 	}
2079 
2080 	if (bgep->chipid.device == DEVICE_ID_5761) {
2081 		req = BGE_APE_LOCK_REQ;
2082 		gnt = BGE_APE_LOCK_GRANT;
2083 	} else {
2084 		req = BGE_APE_PER_LOCK_REQ;
2085 		gnt = BGE_APE_PER_LOCK_GRANT;
2086 	}
2087 
2088 	off = 4 * locknum;
2089 
2090 	bge_ape_put32(bgep, req + off, bit);
2091 
2092 	/* Wait for up to 1 millisecond to acquire lock. */
2093 	for (i = 0; i < 100; i++) {
2094 		status = bge_ape_get32(bgep, gnt + off);
2095 		if (status == bit)
2096 			break;
2097 		drv_usecwait(10);
2098 	}
2099 
2100 	if (status != bit) {
2101 		/* Revoke the lock request. */
2102 		bge_ape_put32(bgep, gnt + off, bit);
2103 		ret = -1;
2104 	}
2105 
2106 	return (ret);
2107 }
2108 
2109 static void
bge_ape_unlock(bge_t * bgep,int locknum)2110 bge_ape_unlock(bge_t *bgep, int locknum)
2111 {
2112 	uint32_t gnt;
2113 	uint32_t bit;
2114 
2115 	BGE_TRACE(("bge_ape_unlock($%p, 0x%x)", (void *)bgep, locknum));
2116 
2117 	if (!bgep->ape_enabled)
2118 		return;
2119 
2120 	switch (locknum) {
2121 	case BGE_APE_LOCK_GPIO:
2122 		if (bgep->chipid.device == DEVICE_ID_5761)
2123 			return;
2124 		/* FALLTHROUGH */
2125 	case BGE_APE_LOCK_GRC:
2126 	case BGE_APE_LOCK_MEM:
2127 		if (!bgep->pci_func)
2128 			bit = APE_LOCK_GRANT_DRIVER;
2129 		else
2130 			bit = 1 << bgep->pci_func;
2131 		break;
2132 	case BGE_APE_LOCK_PHY0:
2133 	case BGE_APE_LOCK_PHY1:
2134 	case BGE_APE_LOCK_PHY2:
2135 	case BGE_APE_LOCK_PHY3:
2136 		bit = APE_LOCK_GRANT_DRIVER;
2137 		break;
2138 	default:
2139 		return;
2140 	}
2141 
2142 	if (bgep->chipid.device == DEVICE_ID_5761)
2143 		gnt = BGE_APE_LOCK_GRANT;
2144 	else
2145 		gnt = BGE_APE_PER_LOCK_GRANT;
2146 
2147 	bge_ape_put32(bgep, gnt + 4 * locknum, bit);
2148 }
2149 
2150 /* wait for pending event to finish, if successful returns with MEM locked */
2151 static int
bge_ape_event_lock(bge_t * bgep,uint32_t timeout_us)2152 bge_ape_event_lock(bge_t *bgep, uint32_t timeout_us)
2153 {
2154 	uint32_t apedata;
2155 
2156 	BGE_TRACE(("bge_ape_event_lock($%p, %d)", (void *)bgep, timeout_us));
2157 
2158 	ASSERT(timeout_us > 0);
2159 
2160 	while (timeout_us) {
2161 		if (bge_ape_lock(bgep, BGE_APE_LOCK_MEM))
2162 			return (-1);
2163 
2164 		apedata = bge_ape_get32(bgep, BGE_APE_EVENT_STATUS);
2165 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
2166 			break;
2167 
2168 		bge_ape_unlock(bgep, BGE_APE_LOCK_MEM);
2169 
2170 		drv_usecwait(10);
2171 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
2172 	}
2173 
2174 	return (timeout_us ? 0 : -1);
2175 }
2176 
2177 /* wait for pending event to finish, returns non-zero if not finished */
2178 static int
bge_ape_wait_for_event(bge_t * bgep,uint32_t timeout_us)2179 bge_ape_wait_for_event(bge_t *bgep, uint32_t timeout_us)
2180 {
2181 	uint32_t i;
2182 	uint32_t apedata;
2183 
2184 	BGE_TRACE(("bge_ape_wait_for_event($%p, %d)", (void *)bgep, timeout_us));
2185 
2186 	ASSERT(timeout_us > 0);
2187 
2188 	for (i = 0; i < timeout_us / 10; i++) {
2189 		apedata = bge_ape_get32(bgep, BGE_APE_EVENT_STATUS);
2190 
2191 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
2192 			break;
2193 
2194 		drv_usecwait(10);
2195 	}
2196 
2197 	return (i == timeout_us / 10);
2198 }
2199 
2200 int
bge_ape_scratchpad_read(bge_t * bgep,uint32_t * data,uint32_t base_off,uint32_t lenToRead)2201 bge_ape_scratchpad_read(bge_t *bgep, uint32_t *data, uint32_t base_off,
2202     uint32_t lenToRead)
2203 {
2204 	int err;
2205 	uint32_t i;
2206 	uint32_t bufoff;
2207 	uint32_t msgoff;
2208 	uint32_t maxlen;
2209 	uint32_t apedata;
2210 
2211 	BGE_TRACE(("bge_ape_scratchpad_read($%p, %p, 0x%0x, %d)",
2212 	    (void *)bgep, (void*)data, base_off, lenToRead));
2213 
2214 	if (!bgep->ape_has_ncsi)
2215 		return (0);
2216 
2217 	apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG);
2218 	if (apedata != APE_SEG_SIG_MAGIC)
2219 		return (-1);
2220 
2221 	apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS);
2222 	if (!(apedata & APE_FW_STATUS_READY))
2223 		return (-1);
2224 
2225 	bufoff = (bge_ape_get32(bgep, BGE_APE_SEG_MSG_BUF_OFF) +
2226 	          BGE_APE_SHMEM_BASE);
2227 	msgoff = bufoff + 2 * sizeof(uint32_t);
2228 	maxlen = bge_ape_get32(bgep, BGE_APE_SEG_MSG_BUF_LEN);
2229 
2230 	while (lenToRead) {
2231 		uint32_t transferLen;
2232 
2233 		/* Cap xfer sizes to scratchpad limits. */
2234 		transferLen = (lenToRead > maxlen) ? maxlen : lenToRead;
2235 		lenToRead -= transferLen;
2236 
2237 		apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS);
2238 		if (!(apedata & APE_FW_STATUS_READY))
2239 			return (-1);
2240 
2241 		/* Wait for up to 1 millisecond for APE to service previous event. */
2242 		err = bge_ape_event_lock(bgep, 1000);
2243 		if (err)
2244 			return (err);
2245 
2246 		apedata = (APE_EVENT_STATUS_DRIVER_EVNT |
2247 		           APE_EVENT_STATUS_SCRTCHPD_READ |
2248 		           APE_EVENT_STATUS_EVENT_PENDING);
2249 		bge_ape_put32(bgep, BGE_APE_EVENT_STATUS, apedata);
2250 
2251 		bge_ape_put32(bgep, bufoff, base_off);
2252 		bge_ape_put32(bgep, bufoff + sizeof(uint32_t), transferLen);
2253 
2254 		bge_ape_unlock(bgep, BGE_APE_LOCK_MEM);
2255 		bge_ape_put32(bgep, BGE_APE_EVENT, APE_EVENT_1);
2256 
2257 		base_off += transferLen;
2258 
2259 		if (bge_ape_wait_for_event(bgep, 30000))
2260 			return (-1);
2261 
2262 		for (i = 0; transferLen; i += 4, transferLen -= 4) {
2263 			uint32_t val = bge_ape_get32(bgep, msgoff + i);
2264 			memcpy(data, &val, sizeof(uint32_t));
2265 			data++;
2266 		}
2267 	}
2268 
2269 	return (0);
2270 }
2271 
2272 int
bge_ape_scratchpad_write(bge_t * bgep,uint32_t dstoff,uint32_t * data,uint32_t lenToWrite)2273 bge_ape_scratchpad_write(bge_t *bgep, uint32_t dstoff, uint32_t *data,
2274     uint32_t lenToWrite)
2275 {
2276 	int err;
2277 	uint32_t i;
2278 	uint32_t bufoff;
2279 	uint32_t msgoff;
2280 	uint32_t maxlen;
2281 	uint32_t apedata;
2282 
2283 	BGE_TRACE(("bge_ape_scratchpad_write($%p, %d, %p, %d)",
2284 	    (void *)bgep, dstoff, data, lenToWrite));
2285 
2286 	if (!bgep->ape_has_ncsi)
2287 		return (0);
2288 
2289 	apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG);
2290 	if (apedata != APE_SEG_SIG_MAGIC)
2291 		return (-1);
2292 
2293 	apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS);
2294 	if (!(apedata & APE_FW_STATUS_READY))
2295 		return (-1);
2296 
2297 	bufoff = (bge_ape_get32(bgep, BGE_APE_SEG_MSG_BUF_OFF) +
2298 	          BGE_APE_SHMEM_BASE);
2299 	msgoff = bufoff + 2 * sizeof(uint32_t);
2300 	maxlen = bge_ape_get32(bgep, BGE_APE_SEG_MSG_BUF_LEN);
2301 
2302 	while (lenToWrite) {
2303 		uint32_t transferLen;
2304 
2305 		/* Cap xfer sizes to scratchpad limits. */
2306 		transferLen = (lenToWrite > maxlen) ? maxlen : lenToWrite;
2307 		lenToWrite -= transferLen;
2308 
2309 		/* Wait for up to 1 millisecond for
2310 		 * APE to service previous event.
2311 		 */
2312 		err = bge_ape_event_lock(bgep, 1000);
2313 		if (err)
2314 			return (err);
2315 
2316 		bge_ape_put32(bgep, bufoff, dstoff);
2317 		bge_ape_put32(bgep, bufoff + sizeof(uint32_t), transferLen);
2318 		apedata = msgoff;
2319 
2320 		dstoff += transferLen;
2321 
2322 		for (i = 0; transferLen; i += 4, transferLen -= 4) {
2323 			bge_ape_put32(bgep, apedata, *data++);
2324 			apedata += sizeof(uint32_t);
2325 		}
2326 
2327 		apedata = (APE_EVENT_STATUS_DRIVER_EVNT |
2328 		           APE_EVENT_STATUS_SCRTCHPD_WRITE |
2329 		           APE_EVENT_STATUS_EVENT_PENDING);
2330 		bge_ape_put32(bgep, BGE_APE_EVENT_STATUS, apedata);
2331 
2332 		bge_ape_unlock(bgep, BGE_APE_LOCK_MEM);
2333 		bge_ape_put32(bgep, BGE_APE_EVENT, APE_EVENT_1);
2334 	}
2335 
2336 	return (0);
2337 }
2338 
2339 static int
bge_ape_send_event(bge_t * bgep,uint32_t event)2340 bge_ape_send_event(bge_t *bgep, uint32_t event)
2341 {
2342 	int err;
2343 	uint32_t apedata;
2344 
2345 	BGE_TRACE(("bge_ape_send_event($%p, %d)", (void *)bgep, event));
2346 
2347 	apedata = bge_ape_get32(bgep, BGE_APE_SEG_SIG);
2348 	if (apedata != APE_SEG_SIG_MAGIC)
2349 		return (-1);
2350 
2351 	apedata = bge_ape_get32(bgep, BGE_APE_FW_STATUS);
2352 	if (!(apedata & APE_FW_STATUS_READY))
2353 		return (-1);
2354 
2355 	/* Wait for up to 1 millisecond for APE to service previous event. */
2356 	err = bge_ape_event_lock(bgep, 1000);
2357 	if (err)
2358 		return (err);
2359 
2360 	bge_ape_put32(bgep, BGE_APE_EVENT_STATUS,
2361 	              event | APE_EVENT_STATUS_EVENT_PENDING);
2362 
2363 	bge_ape_unlock(bgep, BGE_APE_LOCK_MEM);
2364 	bge_ape_put32(bgep, BGE_APE_EVENT, APE_EVENT_1);
2365 
2366 	return 0;
2367 }
2368 
2369 static void
bge_ape_driver_state_change(bge_t * bgep,int mode)2370 bge_ape_driver_state_change(bge_t *bgep, int mode)
2371 {
2372 	uint32_t event;
2373 	uint32_t apedata;
2374 
2375 	BGE_TRACE(("bge_ape_driver_state_change($%p, %d)",
2376 	    (void *)bgep, mode));
2377 
2378 	if (!bgep->ape_enabled)
2379 		return;
2380 
2381 	switch (mode) {
2382 	case BGE_INIT_RESET:
2383 		bge_ape_put32(bgep, BGE_APE_HOST_SEG_SIG,
2384 		              APE_HOST_SEG_SIG_MAGIC);
2385 		bge_ape_put32(bgep, BGE_APE_HOST_SEG_LEN,
2386 		              APE_HOST_SEG_LEN_MAGIC);
2387 		apedata = bge_ape_get32(bgep, BGE_APE_HOST_INIT_COUNT);
2388 		bge_ape_put32(bgep, BGE_APE_HOST_INIT_COUNT, ++apedata);
2389 		bge_ape_put32(bgep, BGE_APE_HOST_DRIVER_ID,
2390 		              APE_HOST_DRIVER_ID_MAGIC(1, 0));
2391 		bge_ape_put32(bgep, BGE_APE_HOST_BEHAVIOR,
2392 		              APE_HOST_BEHAV_NO_PHYLOCK);
2393 		bge_ape_put32(bgep, BGE_APE_HOST_DRVR_STATE,
2394 		              BGE_APE_HOST_DRVR_STATE_START);
2395 
2396 		event = APE_EVENT_STATUS_STATE_START;
2397 		break;
2398 	case BGE_SHUTDOWN_RESET:
2399 		/* With the interface we are currently using,
2400 		 * APE does not track driver state.  Wiping
2401 		 * out the HOST SEGMENT SIGNATURE forces
2402 		 * the APE to assume OS absent status.
2403 		 */
2404 		bge_ape_put32(bgep, BGE_APE_HOST_SEG_SIG, 0x0);
2405 
2406 #if 0
2407 		if (WOL supported) {
2408 			bge_ape_put32(bgep, BGE_APE_HOST_WOL_SPEED,
2409 			              BGE_APE_HOST_WOL_SPEED_AUTO);
2410 			apedata = BGE_APE_HOST_DRVR_STATE_WOL;
2411 		} else
2412 #endif
2413 			apedata = BGE_APE_HOST_DRVR_STATE_UNLOAD;
2414 
2415 		bge_ape_put32(bgep, BGE_APE_HOST_DRVR_STATE, apedata);
2416 
2417 		event = APE_EVENT_STATUS_STATE_UNLOAD;
2418 		break;
2419 	case BGE_SUSPEND_RESET:
2420 		event = APE_EVENT_STATUS_STATE_SUSPEND;
2421 		break;
2422 	default:
2423 		return;
2424 	}
2425 
2426 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
2427 
2428 	bge_ape_send_event(bgep, event);
2429 }
2430 
2431 #undef	BGE_DBG
2432 #define	BGE_DBG		BGE_DBG_CHIP	/* debug flag for this code	*/
2433 
2434 static void
bge_init_recv_rule(bge_t * bgep)2435 bge_init_recv_rule(bge_t *bgep)
2436 {
2437 	bge_recv_rule_t *rulep = bgep->recv_rules;
2438 	uint32_t i;
2439 
2440 	/*
2441 	 * Initialize receive rule registers.
2442 	 * Note that rules may persist across each bge_m_start/stop() call.
2443 	 */
2444 	for (i = 0; i < RECV_RULES_NUM_MAX; i++, rulep++) {
2445 		bge_reg_put32(bgep, RECV_RULE_MASK_REG(i), rulep->mask_value);
2446 		bge_reg_put32(bgep, RECV_RULE_CONTROL_REG(i), rulep->control);
2447 	}
2448 }
2449 
2450 /*
2451  * Using the values captured by bge_chip_cfg_init(), and additional probes
2452  * as required, characterise the chip fully: determine the label by which
2453  * to refer to this chip, the correct settings for various registers, and
2454  * of course whether the device and/or subsystem are supported!
2455  */
2456 int
bge_chip_id_init(bge_t * bgep)2457 bge_chip_id_init(bge_t *bgep)
2458 {
2459 	char buf[MAXPATHLEN];		/* any risk of stack overflow?	*/
2460 	boolean_t dev_ok;
2461 	chip_id_t *cidp;
2462 	uint32_t subid;
2463 	char *devname;
2464 	char *sysname;
2465 	int *ids;
2466 	int err;
2467 	uint_t i;
2468 
2469 	dev_ok = B_FALSE;
2470 	cidp = &bgep->chipid;
2471 
2472 	/*
2473 	 * Check the PCI device ID to determine the generic chip type and
2474 	 * select parameters that depend on this.
2475 	 *
2476 	 * Note: because the SPARC platforms in general don't fit the
2477 	 * SEEPROM 'behind' the chip, the PCI revision ID register reads
2478 	 * as zero - which is why we use <asic_rev> rather than <revision>
2479 	 * below ...
2480 	 *
2481 	 * Note: in general we can't distinguish between the Copper/SerDes
2482 	 * versions by ID alone, as some Copper devices (e.g. some but not
2483 	 * all 5703Cs) have the same ID as the SerDes equivalents.  So we
2484 	 * treat them the same here, and the MII code works out the media
2485 	 * type later on ...
2486 	 */
2487 	cidp->mbuf_base = bge_mbuf_pool_base;
2488 	cidp->mbuf_length = bge_mbuf_pool_len;
2489 	cidp->recv_slots = BGE_RECV_SLOTS_USED;
2490 	cidp->bge_dma_rwctrl = bge_dma_rwctrl;
2491 	cidp->pci_type = BGE_PCI_X;
2492 	cidp->statistic_type = BGE_STAT_BLK;
2493 	cidp->mbuf_lo_water_rdma = bge_mbuf_lo_water_rdma;
2494 	cidp->mbuf_lo_water_rmac = bge_mbuf_lo_water_rmac;
2495 	cidp->mbuf_hi_water = bge_mbuf_hi_water;
2496 	cidp->rx_ticks_norm = bge_rx_ticks_norm;
2497 	cidp->rx_count_norm = bge_rx_count_norm;
2498 	cidp->tx_ticks_norm = bge_tx_ticks_norm;
2499 	cidp->tx_count_norm = bge_tx_count_norm;
2500 	cidp->mask_pci_int = MHCR_MASK_PCI_INT_OUTPUT;
2501 
2502 	if (cidp->rx_rings == 0 || cidp->rx_rings > BGE_RECV_RINGS_MAX)
2503 		cidp->rx_rings = BGE_RECV_RINGS_DEFAULT;
2504 	if (cidp->tx_rings == 0 || cidp->tx_rings > BGE_SEND_RINGS_MAX)
2505 		cidp->tx_rings = BGE_SEND_RINGS_DEFAULT;
2506 
2507 	cidp->msi_enabled = B_FALSE;
2508 
2509 	switch (cidp->device) {
2510 	case DEVICE_ID_5717:
2511 	case DEVICE_ID_5718:
2512 	case DEVICE_ID_5719:
2513 	case DEVICE_ID_5720:
2514 	case DEVICE_ID_5724:
2515 	case DEVICE_ID_5725:
2516 	case DEVICE_ID_5727:
2517 	case DEVICE_ID_57761:
2518 	case DEVICE_ID_57762:
2519 	case DEVICE_ID_57765:
2520 	case DEVICE_ID_57766:
2521 	case DEVICE_ID_57781:
2522 	case DEVICE_ID_57782:
2523 	case DEVICE_ID_57785:
2524 	case DEVICE_ID_57786:
2525 	case DEVICE_ID_57791:
2526 	case DEVICE_ID_57795:
2527 		if (cidp->device == DEVICE_ID_5717) {
2528 			cidp->chip_label = 5717;
2529 		} else if (cidp->device == DEVICE_ID_5718) {
2530 			cidp->chip_label = 5718;
2531 		} else if (cidp->device == DEVICE_ID_5719) {
2532 			cidp->chip_label = 5719;
2533 		} else if (cidp->device == DEVICE_ID_5720) {
2534 			if (pci_config_get16(bgep->cfg_handle, PCI_CONF_DEVID) ==
2535 			    DEVICE_ID_5717_C0) {
2536 				cidp->chip_label = 5717;
2537 			} else {
2538 				cidp->chip_label = 5720;
2539 			}
2540 		} else if (cidp->device == DEVICE_ID_5724) {
2541 			cidp->chip_label = 5724;
2542 		} else if (cidp->device == DEVICE_ID_5725) {
2543 			cidp->chip_label = 5725;
2544 		} else if (cidp->device == DEVICE_ID_5727) {
2545 			cidp->chip_label = 5727;
2546 		} else if (cidp->device == DEVICE_ID_57761) {
2547 			cidp->chip_label = 57761;
2548 		} else if (cidp->device == DEVICE_ID_57762) {
2549 			cidp->chip_label = 57762;
2550 		} else if (cidp->device == DEVICE_ID_57765) {
2551 			cidp->chip_label = 57765;
2552 		} else if (cidp->device == DEVICE_ID_57766) {
2553 			cidp->chip_label = 57766;
2554 		} else if (cidp->device == DEVICE_ID_57781) {
2555 			cidp->chip_label = 57781;
2556 		} else if (cidp->device == DEVICE_ID_57782) {
2557 			cidp->chip_label = 57782;
2558 		} else if (cidp->device == DEVICE_ID_57785) {
2559 			cidp->chip_label = 57785;
2560 		} else if (cidp->device == DEVICE_ID_57786) {
2561 			cidp->chip_label = 57786;
2562 		} else if (cidp->device == DEVICE_ID_57791) {
2563 			cidp->chip_label = 57791;
2564 		} else if (cidp->device == DEVICE_ID_57795) {
2565 			cidp->chip_label = 57795;
2566 		}
2567 
2568 		cidp->msi_enabled = bge_enable_msi;
2569 #ifdef __sparc
2570 		cidp->mask_pci_int = LE_32(MHCR_MASK_PCI_INT_OUTPUT);
2571 #endif
2572 		cidp->bge_dma_rwctrl = LE_32(PDRWCR_VAR_5717);
2573 		cidp->pci_type = BGE_PCI_E;
2574 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2575 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5717;
2576 		cidp->mbuf_hi_water = MBUF_HIWAT_5717;
2577 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2578 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2579 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2580 		if (DEVICE_57765_SERIES_CHIPSETS(bgep)) {
2581 			cidp->bge_mlcr_default = MLCR_DEFAULT_57765;
2582 		} else {
2583 			cidp->bge_mlcr_default = MLCR_DEFAULT_5717;
2584 		}
2585 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2586 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2587 		cidp->statistic_type = BGE_STAT_REG;
2588 		dev_ok = B_TRUE;
2589 		break;
2590 
2591 	case DEVICE_ID_5700:
2592 	case DEVICE_ID_5700x:
2593 		cidp->chip_label = 5700;
2594 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2595 		break;
2596 
2597 	case DEVICE_ID_5701:
2598 		cidp->chip_label = 5701;
2599 		dev_ok = B_TRUE;
2600 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2601 		break;
2602 
2603 	case DEVICE_ID_5702:
2604 	case DEVICE_ID_5702fe:
2605 		cidp->chip_label = 5702;
2606 		dev_ok = B_TRUE;
2607 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2608 		cidp->pci_type = BGE_PCI;
2609 		break;
2610 
2611 	case DEVICE_ID_5703C:
2612 	case DEVICE_ID_5703S:
2613 	case DEVICE_ID_5703:
2614 		/*
2615 		 * Revision A0 of the 5703/5793 had various errata
2616 		 * that we can't or don't work around, so it's not
2617 		 * supported, but all later versions are
2618 		 */
2619 		cidp->chip_label = cidp->subven == VENDOR_ID_SUN ? 5793 : 5703;
2620 		if (bgep->chipid.asic_rev != MHCR_CHIP_REV_5703_A0)
2621 			dev_ok = B_TRUE;
2622 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2623 		break;
2624 
2625 	case DEVICE_ID_5704C:
2626 	case DEVICE_ID_5704S:
2627 	case DEVICE_ID_5704:
2628 		cidp->chip_label = cidp->subven == VENDOR_ID_SUN ? 5794 : 5704;
2629 		cidp->mbuf_base = bge_mbuf_pool_base_5704;
2630 		cidp->mbuf_length = bge_mbuf_pool_len_5704;
2631 		dev_ok = B_TRUE;
2632 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2633 		break;
2634 
2635 	case DEVICE_ID_5705C:
2636 	case DEVICE_ID_5705M:
2637 	case DEVICE_ID_5705MA3:
2638 	case DEVICE_ID_5705F:
2639 	case DEVICE_ID_5705_2:
2640 	case DEVICE_ID_5754:
2641 		if (cidp->device == DEVICE_ID_5754) {
2642 			cidp->chip_label = 5754;
2643 			cidp->pci_type = BGE_PCI_E;
2644 		} else {
2645 			cidp->chip_label = 5705;
2646 			cidp->pci_type = BGE_PCI;
2647 			cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2648 		}
2649 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2650 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2651 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2652 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2653 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2654 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2655 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2656 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2657 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2658 		cidp->statistic_type = BGE_STAT_REG;
2659 		dev_ok = B_TRUE;
2660 		break;
2661 
2662 	case DEVICE_ID_5906:
2663 	case DEVICE_ID_5906M:
2664 		cidp->chip_label = 5906;
2665 		cidp->pci_type = BGE_PCI_E;
2666 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5906;
2667 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5906;
2668 		cidp->mbuf_hi_water = MBUF_HIWAT_5906;
2669 		cidp->mbuf_base = bge_mbuf_pool_base;
2670 		cidp->mbuf_length = bge_mbuf_pool_len;
2671 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2672 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2673 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2674 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2675 		cidp->statistic_type = BGE_STAT_REG;
2676 		dev_ok = B_TRUE;
2677 		break;
2678 
2679 	case DEVICE_ID_5753:
2680 		cidp->chip_label = 5753;
2681 		cidp->pci_type = BGE_PCI_E;
2682 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2683 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2684 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2685 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2686 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2687 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2688 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2689 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2690 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2691 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2692 		cidp->statistic_type = BGE_STAT_REG;
2693 		dev_ok = B_TRUE;
2694 		break;
2695 
2696 	case DEVICE_ID_5755:
2697 	case DEVICE_ID_5755M:
2698 		cidp->chip_label = 5755;
2699 		cidp->pci_type = BGE_PCI_E;
2700 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2701 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2702 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2703 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2704 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2705 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2706 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2707 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2708 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2709 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2710 		if (cidp->device == DEVICE_ID_5755M)
2711 			cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2712 		cidp->statistic_type = BGE_STAT_REG;
2713 		dev_ok = B_TRUE;
2714 		break;
2715 
2716 	case DEVICE_ID_5756M:
2717 		/*
2718 		 * This is nearly identical to the 5755M.
2719 		 * (Actually reports the 5755 chip ID.)
2720 		 */
2721 		cidp->chip_label = 5756;
2722 		cidp->pci_type = BGE_PCI_E;
2723 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2724 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2725 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2726 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2727 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2728 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2729 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2730 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2731 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2732 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2733 		cidp->statistic_type = BGE_STAT_REG;
2734 		dev_ok = B_TRUE;
2735 		break;
2736 
2737 	case DEVICE_ID_5787:
2738 	case DEVICE_ID_5787M:
2739 		cidp->chip_label = 5787;
2740 		cidp->pci_type = BGE_PCI_E;
2741 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2742 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2743 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2744 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2745 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2746 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2747 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2748 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2749 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2750 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2751 		cidp->statistic_type = BGE_STAT_REG;
2752 		dev_ok = B_TRUE;
2753 		break;
2754 
2755 	case DEVICE_ID_5723:
2756 	case DEVICE_ID_5761:
2757 	case DEVICE_ID_5761E:
2758 	case DEVICE_ID_57780:
2759 		cidp->msi_enabled = bge_enable_msi;
2760 		/*
2761 		 * We don't use MSI for BCM5764 and BCM5785, as the
2762 		 * status block may fail to update when the network
2763 		 * traffic is heavy.
2764 		 */
2765 		/* FALLTHRU */
2766 	case DEVICE_ID_5785:
2767 	case DEVICE_ID_5764:
2768 		if (cidp->device == DEVICE_ID_5723)
2769 			cidp->chip_label = 5723;
2770 		else if (cidp->device == DEVICE_ID_5764)
2771 			cidp->chip_label = 5764;
2772 		else if (cidp->device == DEVICE_ID_5785)
2773 			cidp->chip_label = 5785;
2774 		else if (cidp->device == DEVICE_ID_57780)
2775 			cidp->chip_label = 57780;
2776 		else
2777 			cidp->chip_label = 5761;
2778 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2779 		cidp->pci_type = BGE_PCI_E;
2780 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2781 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2782 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2783 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2784 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2785 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2786 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2787 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2788 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2789 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2790 		cidp->statistic_type = BGE_STAT_REG;
2791 		dev_ok = B_TRUE;
2792 		break;
2793 
2794 	/* PCI-X device, identical to 5714 */
2795 	case DEVICE_ID_5780:
2796 		cidp->chip_label = 5780;
2797 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2798 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2799 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2800 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2801 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2802 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2803 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2804 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2805 		cidp->statistic_type = BGE_STAT_REG;
2806 		dev_ok = B_TRUE;
2807 		break;
2808 
2809 	case DEVICE_ID_5782:
2810 		/*
2811 		 * Apart from the label, we treat this as a 5705(?)
2812 		 */
2813 		cidp->chip_label = 5782;
2814 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2815 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2816 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2817 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2818 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2819 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2820 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2821 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2822 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2823 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2824 		cidp->statistic_type = BGE_STAT_REG;
2825 		dev_ok = B_TRUE;
2826 		break;
2827 
2828 	case DEVICE_ID_5788:
2829 		/*
2830 		 * Apart from the label, we treat this as a 5705(?)
2831 		 */
2832 		cidp->chip_label = 5788;
2833 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2834 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2835 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2836 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2837 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2838 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2839 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2840 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2841 		cidp->statistic_type = BGE_STAT_REG;
2842 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2843 		dev_ok = B_TRUE;
2844 		break;
2845 
2846 	case DEVICE_ID_5714C:
2847 		if (cidp->revision >= REVISION_ID_5714_A2)
2848 			cidp->msi_enabled = bge_enable_msi;
2849 		/* FALLTHRU */
2850 	case DEVICE_ID_5714S:
2851 		cidp->chip_label = 5714;
2852 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2853 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2854 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2855 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2856 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2857 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2858 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5714;
2859 		cidp->bge_mlcr_default = bge_mlcr_default_5714;
2860 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2861 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2862 		cidp->pci_type = BGE_PCI_E;
2863 		cidp->statistic_type = BGE_STAT_REG;
2864 		dev_ok = B_TRUE;
2865 		break;
2866 
2867 	case DEVICE_ID_5715C:
2868 	case DEVICE_ID_5715S:
2869 		cidp->chip_label = 5715;
2870 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2871 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2872 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2873 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2874 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2875 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2876 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5715;
2877 		cidp->bge_mlcr_default = bge_mlcr_default_5714;
2878 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2879 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2880 		cidp->pci_type = BGE_PCI_E;
2881 		cidp->statistic_type = BGE_STAT_REG;
2882 		if (cidp->revision >= REVISION_ID_5715_A2)
2883 			cidp->msi_enabled = bge_enable_msi;
2884 		dev_ok = B_TRUE;
2885 		break;
2886 
2887 	case DEVICE_ID_5721:
2888 		cidp->chip_label = 5721;
2889 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2890 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2891 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2892 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2893 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2894 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2895 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2896 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2897 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2898 		cidp->pci_type = BGE_PCI_E;
2899 		cidp->statistic_type = BGE_STAT_REG;
2900 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2901 		dev_ok = B_TRUE;
2902 		break;
2903 
2904 	case DEVICE_ID_5722:
2905 		cidp->chip_label = 5722;
2906 		cidp->pci_type = BGE_PCI_E;
2907 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2908 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2909 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2910 		cidp->mbuf_base = bge_mbuf_pool_base_5705;
2911 		cidp->mbuf_length = bge_mbuf_pool_len_5705;
2912 		cidp->recv_slots = BGE_RECV_SLOTS_5705;
2913 		cidp->bge_mlcr_default |= MLCR_MISC_PINS_OUTPUT_ENABLE_1;
2914 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2915 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2916 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2917 		cidp->statistic_type = BGE_STAT_REG;
2918 		dev_ok = B_TRUE;
2919 		break;
2920 
2921 	case DEVICE_ID_5751:
2922 	case DEVICE_ID_5751M:
2923 		cidp->chip_label = 5751;
2924 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2925 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2926 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2927 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2928 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2929 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2930 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2931 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2932 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2933 		cidp->pci_type = BGE_PCI_E;
2934 		cidp->statistic_type = BGE_STAT_REG;
2935 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2936 		dev_ok = B_TRUE;
2937 		break;
2938 
2939 	case DEVICE_ID_5752:
2940 	case DEVICE_ID_5752M:
2941 		cidp->chip_label = 5752;
2942 		cidp->mbuf_lo_water_rdma = RDMA_MBUF_LOWAT_5705;
2943 		cidp->mbuf_lo_water_rmac = MAC_RX_MBUF_LOWAT_5705;
2944 		cidp->mbuf_hi_water = MBUF_HIWAT_5705;
2945 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2946 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2947 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2948 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2949 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2950 		cidp->tx_rings = BGE_SEND_RINGS_MAX_5705;
2951 		cidp->pci_type = BGE_PCI_E;
2952 		cidp->statistic_type = BGE_STAT_REG;
2953 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2954 		dev_ok = B_TRUE;
2955 		break;
2956 
2957 	case DEVICE_ID_5789:
2958 		cidp->chip_label = 5789;
2959 		cidp->mbuf_base = bge_mbuf_pool_base_5721;
2960 		cidp->mbuf_length = bge_mbuf_pool_len_5721;
2961 		cidp->recv_slots = BGE_RECV_SLOTS_5721;
2962 		cidp->bge_dma_rwctrl = bge_dma_rwctrl_5721;
2963 		cidp->rx_rings = BGE_RECV_RINGS_MAX_5705;
2964 		cidp->tx_rings = BGE_RECV_RINGS_MAX_5705;
2965 		cidp->pci_type = BGE_PCI_E;
2966 		cidp->statistic_type = BGE_STAT_REG;
2967 		cidp->flags |= CHIP_FLAG_PARTIAL_CSUM;
2968 		cidp->flags |= CHIP_FLAG_NO_JUMBO;
2969 		cidp->msi_enabled = B_TRUE;
2970 		dev_ok = B_TRUE;
2971 		break;
2972 
2973 	}
2974 
2975 	/*
2976 	 * Setup the default jumbo parameter.
2977 	 */
2978 	cidp->ethmax_size = ETHERMAX;
2979 	cidp->snd_buff_size = BGE_SEND_BUFF_SIZE_DEFAULT;
2980 	cidp->std_buf_size = BGE_STD_BUFF_SIZE;
2981 
2982 	/*
2983 	 * If jumbo is enabled and this kind of chipset supports jumbo feature,
2984 	 * setup below jumbo specific parameters.
2985 	 *
2986 	 * For BCM5714/5715, there is only one standard receive ring. So the
2987 	 * std buffer size should be set to BGE_JUMBO_BUFF_SIZE when jumbo
2988 	 * feature is enabled.
2989 	 *
2990 	 * For the BCM5718 family we hijack the standard receive ring for
2991 	 * the jumboframe traffic, keeps it simple.
2992 	 */
2993 	if (!(cidp->flags & CHIP_FLAG_NO_JUMBO) &&
2994 	    (cidp->default_mtu > BGE_DEFAULT_MTU)) {
2995 		if (DEVICE_5714_SERIES_CHIPSETS(bgep) ||
2996 		    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
2997 		    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
2998 		    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
2999 			cidp->mbuf_lo_water_rdma =
3000 			    RDMA_MBUF_LOWAT_5714_JUMBO;
3001 			cidp->mbuf_lo_water_rmac =
3002 			    MAC_RX_MBUF_LOWAT_5714_JUMBO;
3003 			cidp->mbuf_hi_water = MBUF_HIWAT_5714_JUMBO;
3004 			cidp->jumbo_slots = 0;
3005 			cidp->std_buf_size = BGE_JUMBO_BUFF_SIZE;
3006 		} else {
3007 			cidp->mbuf_lo_water_rdma =
3008 			    RDMA_MBUF_LOWAT_JUMBO;
3009 			cidp->mbuf_lo_water_rmac =
3010 			    MAC_RX_MBUF_LOWAT_JUMBO;
3011 			cidp->mbuf_hi_water = MBUF_HIWAT_JUMBO;
3012 			cidp->jumbo_slots = BGE_JUMBO_SLOTS_USED;
3013 		}
3014 		cidp->recv_jumbo_size = BGE_JUMBO_BUFF_SIZE;
3015 		cidp->snd_buff_size = BGE_SEND_BUFF_SIZE_JUMBO;
3016 		cidp->ethmax_size = cidp->default_mtu +
3017 		    sizeof (struct ether_header);
3018 	}
3019 
3020 	/*
3021 	 * Identify the NV memory type: SEEPROM or Flash?
3022 	 */
3023 	cidp->nvtype = bge_nvmem_id(bgep);
3024 
3025 	/*
3026 	 * Now check what we've discovered: is this truly a supported
3027 	 * chip on (the motherboard of) a supported platform?
3028 	 *
3029 	 * Possible problems here:
3030 	 * 1)	it's a completely unheard-of chip
3031 	 * 2)	it's a recognised but unsupported chip (e.g. 5701, 5703C-A0)
3032 	 * 3)	it's a chip we would support if it were on the motherboard
3033 	 *	of a Sun platform, but this one isn't ;-(
3034 	 */
3035 	if (cidp->chip_label == 0)
3036 		bge_problem(bgep,
3037 		    "Device 'pci%04x,%04x' not recognized (%d?)",
3038 		    cidp->vendor, cidp->device, cidp->device);
3039 	else if (!dev_ok)
3040 		bge_problem(bgep,
3041 		    "Device 'pci%04x,%04x' (%d) revision %d not supported",
3042 		    cidp->vendor, cidp->device, cidp->chip_label,
3043 		    cidp->revision);
3044 	else
3045 		cidp->flags |= CHIP_FLAG_SUPPORTED;
3046 
3047 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
3048 		return (EIO);
3049 
3050 	return (0);
3051 }
3052 
3053 void
bge_chip_msi_trig(bge_t * bgep)3054 bge_chip_msi_trig(bge_t *bgep)
3055 {
3056 	uint32_t	regval;
3057 
3058 	regval = bgep->param_msi_cnt<<4;
3059 	bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, regval);
3060 	BGE_DEBUG(("bge_chip_msi_trig:data = %d", regval));
3061 }
3062 
3063 /*
3064  * Various registers that control the chip's internal engines (state
3065  * machines) have a <reset> and <enable> bits (fortunately, in the
3066  * same place in each such register :-).
3067  *
3068  * To reset the state machine, the <reset> bit must be written with 1;
3069  * it will then read back as 1 while the reset is in progress, but
3070  * self-clear to 0 when the reset completes.
3071  *
3072  * To enable a state machine, one must set the <enable> bit, which
3073  * will continue to read back as 0 until the state machine is running.
3074  *
3075  * To disable a state machine, the <enable> bit must be cleared, but
3076  * it will continue to read back as 1 until the state machine actually
3077  * stops.
3078  *
3079  * This routine implements polling for completion of a reset, enable
3080  * or disable operation, returning B_TRUE on success (bit reached the
3081  * required state) or B_FALSE on timeout (200*100us == 20ms).
3082  */
3083 static boolean_t
bge_chip_poll_engine(bge_t * bgep,bge_regno_t regno,uint32_t mask,uint32_t val)3084 bge_chip_poll_engine(bge_t *bgep, bge_regno_t regno,
3085 	uint32_t mask, uint32_t val)
3086 {
3087 	uint32_t regval;
3088 	uint32_t n;
3089 
3090 	BGE_TRACE(("bge_chip_poll_engine($%p, 0x%lx, 0x%x, 0x%x)",
3091 	    (void *)bgep, regno, mask, val));
3092 
3093 	for (n = 200; n; --n) {
3094 		regval = bge_reg_get32(bgep, regno);
3095 		if ((regval & mask) == val)
3096 			return (B_TRUE);
3097 		drv_usecwait(100);
3098 	}
3099 
3100 	bge_problem(bgep, "bge_chip_poll_engine failed: regno = 0x%lx", regno);
3101 	bge_fm_ereport(bgep, DDI_FM_DEVICE_NO_RESPONSE);
3102 	return (B_FALSE);
3103 }
3104 
3105 /*
3106  * Various registers that control the chip's internal engines (state
3107  * machines) have a <reset> bit (fortunately, in the same place in
3108  * each such register :-).  To reset the state machine, this bit must
3109  * be written with 1; it will then read back as 1 while the reset is
3110  * in progress, but self-clear to 0 when the reset completes.
3111  *
3112  * This code sets the bit, then polls for it to read back as zero.
3113  * The return value is B_TRUE on success (reset bit cleared itself),
3114  * or B_FALSE if the state machine didn't recover :(
3115  *
3116  * NOTE: the Core reset is similar to other resets, except that we
3117  * can't poll for completion, since the Core reset disables memory
3118  * access!  So we just have to assume that it will all complete in
3119  * 100us.  See Broadcom document 570X-PG102-R, p102, steps 4-5.
3120  */
3121 static boolean_t
bge_chip_reset_engine(bge_t * bgep,bge_regno_t regno)3122 bge_chip_reset_engine(bge_t *bgep, bge_regno_t regno)
3123 {
3124 	uint32_t regval;
3125 	uint16_t val16;
3126 	uint32_t val32;
3127 	uint32_t mhcr;
3128 
3129 	regval = bge_reg_get32(bgep, regno);
3130 
3131 	BGE_TRACE(("bge_chip_reset_engine($%p, 0x%lx)",
3132 	    (void *)bgep, regno));
3133 	BGE_DEBUG(("bge_chip_reset_engine: 0x%lx before reset = 0x%08x",
3134 	    regno, regval));
3135 
3136 	regval |= STATE_MACHINE_RESET_BIT;
3137 
3138 	switch (regno) {
3139 	case MISC_CONFIG_REG:
3140 		/*
3141 		 * BCM5714/5721/5751 pcie chip special case. In order to avoid
3142 		 * resetting PCIE block and bringing PCIE link down, bit 29
3143 		 * in the register needs to be set first, and then set it again
3144 		 * while the reset bit is written.
3145 		 * See:P500 of 57xx-PG102-RDS.pdf.
3146 		 */
3147 		if (DEVICE_5705_SERIES_CHIPSETS(bgep) ||
3148 		    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3149 		    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
3150 		    DEVICE_5721_SERIES_CHIPSETS(bgep) ||
3151 		    DEVICE_5723_SERIES_CHIPSETS(bgep) ||
3152 		    DEVICE_5714_SERIES_CHIPSETS(bgep) ||
3153 		    DEVICE_5906_SERIES_CHIPSETS(bgep) ||
3154 		    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
3155 			regval |= MISC_CONFIG_GPHY_POWERDOWN_OVERRIDE;
3156 			if (bgep->chipid.pci_type == BGE_PCI_E) {
3157 				if (bgep->chipid.asic_rev ==
3158 				    MHCR_CHIP_REV_5751_A0 ||
3159 				    bgep->chipid.asic_rev ==
3160 				    MHCR_CHIP_REV_5721_A0 ||
3161 				    bgep->chipid.asic_rev ==
3162 				    MHCR_CHIP_REV_5755_A0) {
3163 					val32 = bge_reg_get32(bgep,
3164 					    PHY_TEST_CTRL_REG);
3165 					if (val32 == (PHY_PCIE_SCRAM_MODE |
3166 					    PHY_PCIE_LTASS_MODE))
3167 						bge_reg_put32(bgep,
3168 						    PHY_TEST_CTRL_REG,
3169 						    PHY_PCIE_SCRAM_MODE);
3170 					val32 = pci_config_get32
3171 					    (bgep->cfg_handle,
3172 					    PCI_CONF_BGE_CLKCTL);
3173 					val32 |= CLKCTL_PCIE_A0_FIX;
3174 					pci_config_put32(bgep->cfg_handle,
3175 					    PCI_CONF_BGE_CLKCTL, val32);
3176 				}
3177 				bge_reg_set32(bgep, regno,
3178 				    MISC_CONFIG_GRC_RESET_DISABLE);
3179 				regval |= MISC_CONFIG_GRC_RESET_DISABLE;
3180 			}
3181 		}
3182 
3183 		/*
3184 		 * Special case - causes Core reset
3185 		 *
3186 		 * On SPARC v9 we want to ensure that we don't start
3187 		 * timing until the I/O access has actually reached
3188 		 * the chip, otherwise we might make the next access
3189 		 * too early.  And we can't just force the write out
3190 		 * by following it with a read (even to config space)
3191 		 * because that would cause the fault we're trying
3192 		 * to avoid.  Hence the need for membar_sync() here.
3193 		 */
3194 		ddi_put32(bgep->io_handle, PIO_ADDR(bgep, regno), regval);
3195 #ifdef	__sparcv9
3196 		membar_sync();
3197 #endif	/* __sparcv9 */
3198 		/*
3199 		 * On some platforms,system need about 300us for
3200 		 * link setup.
3201 		 */
3202 		drv_usecwait(300);
3203 		if (DEVICE_5906_SERIES_CHIPSETS(bgep)) {
3204 			bge_reg_set32(bgep, VCPU_STATUS_REG, VCPU_DRV_RESET);
3205 			bge_reg_clr32(
3206 			    bgep, VCPU_EXT_CTL, VCPU_EXT_CTL_HALF);
3207 		}
3208 
3209 		if (bgep->chipid.pci_type == BGE_PCI_E) {
3210 			/* PCI-E device need more reset time */
3211 			drv_usecwait(120000);
3212 
3213 			/*
3214 			 * (re)Disable interrupts as the bit can be reset after a
3215 			 * core clock reset.
3216 			 */
3217 			mhcr = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR);
3218 			pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR,
3219 			    mhcr | MHCR_MASK_PCI_INT_OUTPUT);
3220 
3221 			/* Set PCIE max payload size and clear error status. */
3222 			if ((bgep->chipid.chip_label == 5721) ||
3223 			    (bgep->chipid.chip_label == 5751) ||
3224 			    (bgep->chipid.chip_label == 5752) ||
3225 			    (bgep->chipid.chip_label == 5789) ||
3226 			    (bgep->chipid.chip_label == 5906)) {
3227 				pci_config_put16(bgep->cfg_handle,
3228 				    PCI_CONF_DEV_CTRL, READ_REQ_SIZE_MAX);
3229 				pci_config_put16(bgep->cfg_handle,
3230 				    PCI_CONF_DEV_STUS, DEVICE_ERROR_STUS);
3231 			}
3232 
3233 			if ((bgep->chipid.chip_label == 5723) ||
3234 			    (bgep->chipid.chip_label == 5761)) {
3235 				pci_config_put16(bgep->cfg_handle,
3236 				    PCI_CONF_DEV_CTRL_5723, READ_REQ_SIZE_MAX);
3237 				pci_config_put16(bgep->cfg_handle,
3238 				    PCI_CONF_DEV_STUS_5723, DEVICE_ERROR_STUS);
3239 			}
3240 
3241 			if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3242 			    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
3243 			    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
3244 				val16 = pci_config_get16(bgep->cfg_handle,
3245 				                         PCI_CONF_DEV_CTRL_5717);
3246 				val16 &= ~READ_REQ_SIZE_MASK;
3247 				val16 |= READ_REQ_SIZE_2K;
3248 				pci_config_put16(bgep->cfg_handle,
3249 				    PCI_CONF_DEV_CTRL_5717, val16);
3250 			}
3251 		}
3252 
3253 		BGE_PCICHK(bgep);
3254 		return (B_TRUE);
3255 
3256 	default:
3257 		bge_reg_put32(bgep, regno, regval);
3258 		return (bge_chip_poll_engine(bgep, regno,
3259 		    STATE_MACHINE_RESET_BIT, 0));
3260 	}
3261 }
3262 
3263 /*
3264  * Various registers that control the chip's internal engines (state
3265  * machines) have an <enable> bit (fortunately, in the same place in
3266  * each such register :-).  To stop the state machine, this bit must
3267  * be written with 0, then polled to see when the state machine has
3268  * actually stopped.
3269  *
3270  * The return value is B_TRUE on success (enable bit cleared), or
3271  * B_FALSE if the state machine didn't stop :(
3272  */
3273 static boolean_t
bge_chip_disable_engine(bge_t * bgep,bge_regno_t regno,uint32_t morebits)3274 bge_chip_disable_engine(bge_t *bgep, bge_regno_t regno, uint32_t morebits)
3275 {
3276 	uint32_t regval;
3277 
3278 	BGE_TRACE(("bge_chip_disable_engine($%p, 0x%lx, 0x%x)",
3279 	    (void *)bgep, regno, morebits));
3280 
3281 	switch (regno) {
3282 	case FTQ_RESET_REG:
3283 		/*
3284 		 * For Schumacher's bugfix CR6490108
3285 		 */
3286 #ifdef BGE_IPMI_ASF
3287 #ifdef BGE_NETCONSOLE
3288 		if (bgep->asf_enabled)
3289 			return (B_TRUE);
3290 #endif
3291 #endif
3292 		/*
3293 		 * Not quite like the others; it doesn't
3294 		 * have an <enable> bit, but instead we
3295 		 * have to set and then clear all the bits
3296 		 */
3297 		bge_reg_put32(bgep, regno, ~(uint32_t)0);
3298 		drv_usecwait(100);
3299 		bge_reg_put32(bgep, regno, 0);
3300 		return (B_TRUE);
3301 
3302 	default:
3303 		if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
3304 			break;
3305 		}
3306 
3307 		if ((regno == RCV_LIST_SELECTOR_MODE_REG) ||
3308 		    (regno == DMA_COMPLETION_MODE_REG) ||
3309 		    (regno == MBUF_CLUSTER_FREE_MODE_REG) ||
3310 		    (regno == BUFFER_MANAGER_MODE_REG) ||
3311 		    (regno == MEMORY_ARBITER_MODE_REG)) {
3312 			return B_TRUE;
3313 		}
3314 
3315 		break;
3316 	}
3317 
3318 	regval = bge_reg_get32(bgep, regno);
3319 	regval &= ~STATE_MACHINE_ENABLE_BIT;
3320 	regval &= ~morebits;
3321 	bge_reg_put32(bgep, regno, regval);
3322 
3323 	return bge_chip_poll_engine(bgep, regno, STATE_MACHINE_ENABLE_BIT, 0);
3324 }
3325 
3326 /*
3327  * Various registers that control the chip's internal engines (state
3328  * machines) have an <enable> bit (fortunately, in the same place in
3329  * each such register :-).  To start the state machine, this bit must
3330  * be written with 1, then polled to see when the state machine has
3331  * actually started.
3332  *
3333  * The return value is B_TRUE on success (enable bit set), or
3334  * B_FALSE if the state machine didn't start :(
3335  */
3336 static boolean_t
bge_chip_enable_engine(bge_t * bgep,bge_regno_t regno,uint32_t morebits)3337 bge_chip_enable_engine(bge_t *bgep, bge_regno_t regno, uint32_t morebits)
3338 {
3339 	uint32_t regval;
3340 
3341 	BGE_TRACE(("bge_chip_enable_engine($%p, 0x%lx, 0x%x)",
3342 	    (void *)bgep, regno, morebits));
3343 
3344 	switch (regno) {
3345 	case FTQ_RESET_REG:
3346 #ifdef BGE_IPMI_ASF
3347 #ifdef BGE_NETCONSOLE
3348 		if (bgep->asf_enabled)
3349 			return (B_TRUE);
3350 #endif
3351 #endif
3352 		/*
3353 		 * Not quite like the others; it doesn't
3354 		 * have an <enable> bit, but instead we
3355 		 * have to set and then clear all the bits
3356 		 */
3357 		bge_reg_put32(bgep, regno, ~(uint32_t)0);
3358 		drv_usecwait(100);
3359 		bge_reg_put32(bgep, regno, 0);
3360 		return (B_TRUE);
3361 
3362 	default:
3363 		regval = bge_reg_get32(bgep, regno);
3364 		regval |= STATE_MACHINE_ENABLE_BIT;
3365 		regval |= morebits;
3366 		bge_reg_put32(bgep, regno, regval);
3367 		return (bge_chip_poll_engine(bgep, regno,
3368 		    STATE_MACHINE_ENABLE_BIT, STATE_MACHINE_ENABLE_BIT));
3369 	}
3370 }
3371 
3372 /*
3373  * Reprogram the Ethernet, Transmit, and Receive MAC
3374  * modes to match the param_* variables
3375  */
3376 void
bge_sync_mac_modes(bge_t * bgep)3377 bge_sync_mac_modes(bge_t *bgep)
3378 {
3379 	uint32_t macmode;
3380 	uint32_t regval;
3381 
3382 	ASSERT(mutex_owned(bgep->genlock));
3383 
3384 	/*
3385 	 * Reprogram the Ethernet MAC mode ...
3386 	 */
3387 	macmode = regval = bge_reg_get32(bgep, ETHERNET_MAC_MODE_REG);
3388 	macmode &= ~ETHERNET_MODE_LINK_POLARITY;
3389 	macmode &= ~ETHERNET_MODE_PORTMODE_MASK;
3390 	if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
3391 	    (bgep->param_loop_mode != BGE_LOOP_INTERNAL_MAC)) {
3392 		if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3393 		    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
3394 		    DEVICE_5714_SERIES_CHIPSETS(bgep) ||
3395 		    DEVICE_57765_SERIES_CHIPSETS(bgep))
3396 			macmode |= ETHERNET_MODE_PORTMODE_GMII;
3397 		else
3398 			macmode |= ETHERNET_MODE_PORTMODE_TBI;
3399 	} else if (bgep->param_link_speed == 10 ||
3400 	    bgep->param_link_speed == 100)
3401 		macmode |= ETHERNET_MODE_PORTMODE_MII;
3402 	else
3403 		macmode |= ETHERNET_MODE_PORTMODE_GMII;
3404 	if (bgep->param_link_duplex == LINK_DUPLEX_HALF)
3405 		macmode |= ETHERNET_MODE_HALF_DUPLEX;
3406 	else
3407 		macmode &= ~ETHERNET_MODE_HALF_DUPLEX;
3408 	if (bgep->param_loop_mode == BGE_LOOP_INTERNAL_MAC)
3409 		macmode |= ETHERNET_MODE_MAC_LOOPBACK;
3410 	else
3411 		macmode &= ~ETHERNET_MODE_MAC_LOOPBACK;
3412 	bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, macmode);
3413 	BGE_DEBUG(("bge_sync_mac_modes($%p) Ethernet MAC mode 0x%x => 0x%x",
3414 	    (void *)bgep, regval, macmode));
3415 
3416 	/*
3417 	 * ... the Transmit MAC mode ...
3418 	 */
3419 	macmode = regval = bge_reg_get32(bgep, TRANSMIT_MAC_MODE_REG);
3420 	if (bgep->param_link_tx_pause)
3421 		macmode |= TRANSMIT_MODE_FLOW_CONTROL;
3422 	else
3423 		macmode &= ~TRANSMIT_MODE_FLOW_CONTROL;
3424 	bge_reg_put32(bgep, TRANSMIT_MAC_MODE_REG, macmode);
3425 	BGE_DEBUG(("bge_sync_mac_modes($%p) Transmit MAC mode 0x%x => 0x%x",
3426 	    (void *)bgep, regval, macmode));
3427 
3428 	/*
3429 	 * ... and the Receive MAC mode
3430 	 */
3431 	macmode = regval = bge_reg_get32(bgep, RECEIVE_MAC_MODE_REG);
3432 	if (bgep->param_link_rx_pause)
3433 		macmode |= RECEIVE_MODE_FLOW_CONTROL;
3434 	else
3435 		macmode &= ~RECEIVE_MODE_FLOW_CONTROL;
3436 	bge_reg_put32(bgep, RECEIVE_MAC_MODE_REG, macmode);
3437 	BGE_DEBUG(("bge_sync_mac_modes($%p) Receive MAC mode 0x%x => 0x%x",
3438 	    (void *)bgep, regval, macmode));
3439 
3440 	/*
3441 	 * For BCM5785, we need to configure the link status in the MI Status
3442 	 * register with a write command when auto-polling is disabled.
3443 	 */
3444 	if (bgep->chipid.device == DEVICE_ID_5785)
3445 		if (bgep->param_link_speed == 10)
3446 			bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK
3447 			    | MI_STATUS_10MBPS);
3448 		else
3449 			bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK);
3450 }
3451 
3452 /*
3453  * bge_chip_sync() -- program the chip with the unicast MAC address,
3454  * the multicast hash table, the required level of promiscuity, and
3455  * the current loopback mode ...
3456  */
3457 int
3458 #ifdef BGE_IPMI_ASF
bge_chip_sync(bge_t * bgep,boolean_t asf_keeplive)3459 bge_chip_sync(bge_t *bgep, boolean_t asf_keeplive)
3460 #else
3461 bge_chip_sync(bge_t *bgep)
3462 #endif
3463 {
3464 	void (*opfn)(bge_t *bgep, bge_regno_t reg, uint32_t bits);
3465 	boolean_t promisc;
3466 	uint64_t macaddr;
3467 	uint32_t fill = 0;
3468 	int i, j;
3469 	int retval = DDI_SUCCESS;
3470 
3471 	BGE_TRACE(("bge_chip_sync($%p)",
3472 	    (void *)bgep));
3473 
3474 	ASSERT(mutex_owned(bgep->genlock));
3475 
3476 	promisc = B_FALSE;
3477 	fill = ~(uint32_t)0;
3478 
3479 	if (bgep->promisc)
3480 		promisc = B_TRUE;
3481 	else
3482 		fill = (uint32_t)0;
3483 
3484 	/*
3485 	 * If the TX/RX MAC engines are already running, we should stop
3486 	 * them (and reset the RX engine) before changing the parameters.
3487 	 * If they're not running, this will have no effect ...
3488 	 *
3489 	 * NOTE: this is currently disabled by default because stopping
3490 	 * and restarting the Tx engine may cause an outgoing packet in
3491 	 * transit to be truncated.  Also, stopping and restarting the
3492 	 * Rx engine seems to not work correctly on the 5705.  Testing
3493 	 * has not (yet!) revealed any problems with NOT stopping and
3494 	 * restarting these engines (and Broadcom say their drivers don't
3495 	 * do this), but if it is found to cause problems, this variable
3496 	 * can be patched to re-enable the old behaviour ...
3497 	 */
3498 	if (bge_stop_start_on_sync) {
3499 #ifdef BGE_IPMI_ASF
3500 		if (!bgep->asf_enabled) {
3501 			if (!bge_chip_disable_engine(bgep,
3502 			    RECEIVE_MAC_MODE_REG, RECEIVE_MODE_KEEP_VLAN_TAG))
3503 				retval = DDI_FAILURE;
3504 		} else {
3505 			if (!bge_chip_disable_engine(bgep,
3506 			    RECEIVE_MAC_MODE_REG, 0))
3507 				retval = DDI_FAILURE;
3508 		}
3509 #else
3510 		if (!bge_chip_disable_engine(bgep, RECEIVE_MAC_MODE_REG,
3511 		    RECEIVE_MODE_KEEP_VLAN_TAG))
3512 			retval = DDI_FAILURE;
3513 #endif
3514 		if (!bge_chip_disable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
3515 			retval = DDI_FAILURE;
3516 		if (!bge_chip_reset_engine(bgep, RECEIVE_MAC_MODE_REG))
3517 			retval = DDI_FAILURE;
3518 	}
3519 
3520 	/*
3521 	 * Reprogram the hashed multicast address table ...
3522 	 */
3523 	for (i = 0; i < BGE_HASH_TABLE_SIZE/32; ++i)
3524 		bge_reg_put32(bgep, MAC_HASH_REG(i), 0);
3525 
3526 	for (i = 0; i < BGE_HASH_TABLE_SIZE/32; ++i)
3527 		bge_reg_put32(bgep, MAC_HASH_REG(i),
3528 			bgep->mcast_hash[i] | fill);
3529 
3530 #ifdef BGE_IPMI_ASF
3531 	if (!bgep->asf_enabled || !asf_keeplive) {
3532 #endif
3533 		/*
3534 		 * Transform the MAC address(es) from host to chip format, then
3535 		 * reprogram the transmit random backoff seed and the unicast
3536 		 * MAC address(es) ...
3537 		 */
3538 		for (j = 0; j < MAC_ADDRESS_REGS_MAX; j++) {
3539 			for (i = 0, macaddr = 0ull;
3540 			    i < ETHERADDRL; ++i) {
3541 				macaddr <<= 8;
3542 				macaddr |= bgep->curr_addr[j].addr[i];
3543 			}
3544 			fill += (macaddr >> 16) + (macaddr & 0xffffffff);
3545 			bge_reg_put64(bgep, MAC_ADDRESS_REG(j), macaddr);
3546 
3547 			BGE_DEBUG(("bge_chip_sync($%p) "
3548 			    "setting MAC address %012llx",
3549 			    (void *)bgep, macaddr));
3550 		}
3551 #ifdef BGE_IPMI_ASF
3552 	}
3553 #endif
3554 	/*
3555 	 * Set random seed of backoff interval
3556 	 *   - Writing zero means no backoff interval
3557 	 */
3558 	fill = ((fill >> 20) + (fill >> 10) + fill) & 0x3ff;
3559 	if (fill == 0)
3560 		fill = 1;
3561 	bge_reg_put32(bgep, MAC_TX_RANDOM_BACKOFF_REG, fill);
3562 
3563 	/*
3564 	 * Set or clear the PROMISCUOUS mode bit
3565 	 */
3566 	opfn = promisc ? bge_reg_set32 : bge_reg_clr32;
3567 	(*opfn)(bgep, RECEIVE_MAC_MODE_REG, RECEIVE_MODE_PROMISCUOUS);
3568 
3569 	/*
3570 	 * Sync the rest of the MAC modes too ...
3571 	 */
3572 	bge_sync_mac_modes(bgep);
3573 
3574 	/*
3575 	 * Restart RX/TX MAC engines if required ...
3576 	 */
3577 	if (bgep->bge_chip_state == BGE_CHIP_RUNNING) {
3578 		if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0))
3579 			retval = DDI_FAILURE;
3580 #ifdef BGE_IPMI_ASF
3581 		if (!bgep->asf_enabled) {
3582 			if (!bge_chip_enable_engine(bgep,
3583 			    RECEIVE_MAC_MODE_REG, RECEIVE_MODE_KEEP_VLAN_TAG))
3584 				retval = DDI_FAILURE;
3585 		} else {
3586 			if (!bge_chip_enable_engine(bgep,
3587 			    RECEIVE_MAC_MODE_REG, 0))
3588 				retval = DDI_FAILURE;
3589 		}
3590 #else
3591 		if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
3592 		    RECEIVE_MODE_KEEP_VLAN_TAG))
3593 			retval = DDI_FAILURE;
3594 #endif
3595 	}
3596 	return (retval);
3597 }
3598 
3599 #ifndef __sparc
3600 static bge_regno_t quiesce_regs[] = {
3601 	READ_DMA_MODE_REG,
3602 	DMA_COMPLETION_MODE_REG,
3603 	WRITE_DMA_MODE_REG,
3604 	BGE_REGNO_NONE
3605 };
3606 
3607 /*
3608  * This function is called by bge_quiesce(). We
3609  * turn off all the DMA engines here.
3610  */
3611 void
bge_chip_stop_nonblocking(bge_t * bgep)3612 bge_chip_stop_nonblocking(bge_t *bgep)
3613 {
3614 	bge_regno_t *rbp;
3615 
3616 	/*
3617 	 * Flag that no more activity may be initiated
3618 	 */
3619 	bgep->progress &= ~PROGRESS_READY;
3620 
3621 	rbp = quiesce_regs;
3622 	while (*rbp != BGE_REGNO_NONE) {
3623 		(void) bge_chip_disable_engine(bgep, *rbp, 0);
3624 		++rbp;
3625 	}
3626 
3627 	bgep->bge_chip_state = BGE_CHIP_STOPPED;
3628 }
3629 
3630 #endif
3631 
3632 /*
3633  * bge_chip_stop() -- stop all chip processing
3634  *
3635  * If the <fault> parameter is B_TRUE, we're stopping the chip because
3636  * we've detected a problem internally; otherwise, this is a normal
3637  * (clean) stop (at user request i.e. the last STREAM has been closed).
3638  */
3639 void
bge_chip_stop(bge_t * bgep,boolean_t fault)3640 bge_chip_stop(bge_t *bgep, boolean_t fault)
3641 {
3642 	bge_regno_t regno;
3643 	bge_regno_t *rbp;
3644 	boolean_t ok = B_TRUE;
3645 
3646 	BGE_TRACE(("bge_chip_stop($%p)",
3647 	    (void *)bgep));
3648 
3649 	ASSERT(mutex_owned(bgep->genlock));
3650 
3651 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR,
3652 	    (pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR) |
3653 	     MHCR_MASK_PCI_INT_OUTPUT));
3654 
3655 	ok &= bge_chip_disable_engine(bgep, RECEIVE_MAC_MODE_REG, 0);
3656 	ok &= bge_chip_disable_engine(bgep, RCV_BD_INITIATOR_MODE_REG, 0);
3657 	ok &= bge_chip_disable_engine(bgep, RCV_LIST_PLACEMENT_MODE_REG, 0);
3658 	ok &= bge_chip_disable_engine(bgep, RCV_LIST_SELECTOR_MODE_REG, 0);
3659 	ok &= bge_chip_disable_engine(bgep, RCV_DATA_BD_INITIATOR_MODE_REG, 0);
3660 	ok &= bge_chip_disable_engine(bgep, RCV_DATA_COMPLETION_MODE_REG, 0);
3661 	ok &= bge_chip_disable_engine(bgep, RCV_BD_COMPLETION_MODE_REG, 0);
3662 
3663 	ok &= bge_chip_disable_engine(bgep, SEND_BD_SELECTOR_MODE_REG, 0);
3664 	ok &= bge_chip_disable_engine(bgep, SEND_BD_INITIATOR_MODE_REG, 0);
3665 	ok &= bge_chip_disable_engine(bgep, SEND_DATA_INITIATOR_MODE_REG, 0);
3666 	ok &= bge_chip_disable_engine(bgep, READ_DMA_MODE_REG, 0);
3667 	ok &= bge_chip_disable_engine(bgep, SEND_DATA_COMPLETION_MODE_REG, 0);
3668 	ok &= bge_chip_disable_engine(bgep, DMA_COMPLETION_MODE_REG, 0);
3669 	ok &= bge_chip_disable_engine(bgep, SEND_BD_COMPLETION_MODE_REG, 0);
3670 	ok &= bge_chip_disable_engine(bgep, TRANSMIT_MAC_MODE_REG, 0);
3671 
3672 	bge_reg_clr32(bgep, ETHERNET_MAC_MODE_REG, ETHERNET_MODE_ENABLE_TDE);
3673 	drv_usecwait(40);
3674 
3675 	ok &= bge_chip_disable_engine(bgep, HOST_COALESCE_MODE_REG, 0);
3676 	ok &= bge_chip_disable_engine(bgep, WRITE_DMA_MODE_REG, 0);
3677 	ok &= bge_chip_disable_engine(bgep, MBUF_CLUSTER_FREE_MODE_REG, 0);
3678 	ok &= bge_chip_disable_engine(bgep, FTQ_RESET_REG, 0);
3679 	ok &= bge_chip_disable_engine(bgep, BUFFER_MANAGER_MODE_REG, 0);
3680 	ok &= bge_chip_disable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0);
3681 	ok &= bge_chip_disable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0);
3682 
3683 	if (!ok && !fault)
3684 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
3685 
3686 	/*
3687 	 * Finally, disable (all) MAC events & clear the MAC status
3688 	 */
3689 	bge_reg_put32(bgep, ETHERNET_MAC_EVENT_ENABLE_REG, 0);
3690 	bge_reg_put32(bgep, ETHERNET_MAC_STATUS_REG, ~0);
3691 
3692 	/*
3693 	 * if we're stopping the chip because of a detected fault then do
3694 	 * appropriate actions
3695 	 */
3696 	if (fault) {
3697 		if (bgep->bge_chip_state != BGE_CHIP_FAULT) {
3698 			bgep->bge_chip_state = BGE_CHIP_FAULT;
3699 			if (!bgep->manual_reset)
3700 				ddi_fm_service_impact(bgep->devinfo,
3701 				    DDI_SERVICE_LOST);
3702 			if (bgep->bge_dma_error) {
3703 				/*
3704 				 * need to free buffers in case the fault was
3705 				 * due to a memory error in a buffer - got to
3706 				 * do a fair bit of tidying first
3707 				 */
3708 				if (bgep->progress & PROGRESS_KSTATS) {
3709 					bge_fini_kstats(bgep);
3710 					bgep->progress &= ~PROGRESS_KSTATS;
3711 				}
3712 				if (bgep->progress & PROGRESS_INTR) {
3713 					bge_intr_disable(bgep);
3714 					rw_enter(bgep->errlock, RW_WRITER);
3715 					bge_fini_rings(bgep);
3716 					rw_exit(bgep->errlock);
3717 					bgep->progress &= ~PROGRESS_INTR;
3718 				}
3719 				if (bgep->progress & PROGRESS_BUFS) {
3720 					bge_free_bufs(bgep);
3721 					bgep->progress &= ~PROGRESS_BUFS;
3722 				}
3723 				bgep->bge_dma_error = B_FALSE;
3724 			}
3725 		}
3726 	} else
3727 		bgep->bge_chip_state = BGE_CHIP_STOPPED;
3728 }
3729 
3730 /*
3731  * Poll for completion of chip's ROM firmware; also, at least on the
3732  * first time through, find and return the hardware MAC address, if any.
3733  */
3734 static uint64_t
bge_poll_firmware(bge_t * bgep)3735 bge_poll_firmware(bge_t *bgep)
3736 {
3737 	uint64_t magic;
3738 	uint64_t mac;
3739 	uint32_t gen, val;
3740 	uint32_t i;
3741 
3742 	/*
3743 	 * Step 19: poll for firmware completion (GENCOMM port set
3744 	 * to the ones complement of T3_MAGIC_NUMBER).
3745 	 *
3746 	 * While we're at it, we also read the MAC address register;
3747 	 * at some stage the firmware will load this with the
3748 	 * factory-set value.
3749 	 *
3750 	 * When both the magic number and the MAC address are set,
3751 	 * we're done; but we impose a time limit of one second
3752 	 * (1000*1000us) in case the firmware fails in some fashion
3753 	 * or the SEEPROM that provides that MAC address isn't fitted.
3754 	 *
3755 	 * After the first time through (chip state != INITIAL), we
3756 	 * don't need the MAC address to be set (we've already got it
3757 	 * or not, from the first time), so we don't wait for it, but
3758 	 * we still have to wait for the T3_MAGIC_NUMBER.
3759 	 *
3760 	 * Note: the magic number is only a 32-bit quantity, but the NIC
3761 	 * memory is 64-bit (and big-endian) internally.  Addressing the
3762 	 * GENCOMM word as "the upper half of a 64-bit quantity" makes
3763 	 * it work correctly on both big- and little-endian hosts.
3764 	 */
3765 	if (MHCR_CHIP_ASIC_REV(bgep) == MHCR_CHIP_ASIC_REV_5906) {
3766 		for (i = 0; i < 1000; ++i) {
3767 			drv_usecwait(1000);
3768 			val = bge_reg_get32(bgep, VCPU_STATUS_REG);
3769 			if (val & VCPU_INIT_DONE)
3770 				break;
3771 		}
3772 		BGE_DEBUG(("bge_poll_firmware($%p): return after %d loops",
3773 		    (void *)bgep, i));
3774 		mac = bge_reg_get64(bgep, MAC_ADDRESS_REG(0));
3775 	} else {
3776 		for (i = 0; i < 1000; ++i) {
3777 			drv_usecwait(1000);
3778 			gen = bge_nic_get64(bgep, NIC_MEM_GENCOMM) >> 32;
3779 			if (i == 0 && DEVICE_5704_SERIES_CHIPSETS(bgep))
3780 				drv_usecwait(100000);
3781 			mac = bge_reg_get64(bgep, MAC_ADDRESS_REG(0));
3782 #ifdef BGE_IPMI_ASF
3783 			if (!bgep->asf_enabled) {
3784 #endif
3785 				if (gen != ~T3_MAGIC_NUMBER)
3786 					continue;
3787 #ifdef BGE_IPMI_ASF
3788 			}
3789 #endif
3790 			if (mac != 0ULL)
3791 				break;
3792 			if (bgep->bge_chip_state != BGE_CHIP_INITIAL)
3793 				break;
3794 		}
3795 	}
3796 
3797 	magic = bge_nic_get64(bgep, NIC_MEM_GENCOMM);
3798 	BGE_DEBUG(("bge_poll_firmware($%p): PXE magic 0x%x after %d loops",
3799 	    (void *)bgep, gen, i));
3800 	BGE_DEBUG(("bge_poll_firmware: MAC %016llx, GENCOMM %016llx",
3801 	    mac, magic));
3802 
3803 	return (mac);
3804 }
3805 
3806 /*
3807  * Maximum times of trying to get the NVRAM access lock
3808  * by calling bge_nvmem_acquire()
3809  */
3810 #define	MAX_TRY_NVMEM_ACQUIRE	10000
3811 
3812 int
3813 #ifdef BGE_IPMI_ASF
bge_chip_reset(bge_t * bgep,boolean_t enable_dma,uint_t asf_mode)3814 bge_chip_reset(bge_t *bgep, boolean_t enable_dma, uint_t asf_mode)
3815 #else
3816 bge_chip_reset(bge_t *bgep, boolean_t enable_dma)
3817 #endif
3818 {
3819 	chip_id_t chipid;
3820 	uint64_t mac;
3821 	uint64_t magic;
3822 	uint32_t tmp;
3823 	uint32_t mhcr_base;
3824 	uint32_t mhcr;
3825 	uint32_t sx0;
3826 	uint32_t i, tries;
3827 #ifdef BGE_IPMI_ASF
3828 	uint32_t mailbox;
3829 #endif
3830 	int retval = DDI_SUCCESS;
3831 
3832 	BGE_TRACE(("bge_chip_reset($%p, %d)",
3833 		(void *)bgep, enable_dma));
3834 
3835 	ASSERT(mutex_owned(bgep->genlock));
3836 
3837 	BGE_DEBUG(("bge_chip_reset($%p, %d): current state is %d",
3838 		(void *)bgep, enable_dma, bgep->bge_chip_state));
3839 
3840 	/*
3841 	 * Do we need to stop the chip cleanly before resetting?
3842 	 */
3843 	switch (bgep->bge_chip_state) {
3844 	default:
3845 		_NOTE(NOTREACHED)
3846 		return (DDI_FAILURE);
3847 
3848 	case BGE_CHIP_INITIAL:
3849 	case BGE_CHIP_STOPPED:
3850 	case BGE_CHIP_RESET:
3851 		break;
3852 
3853 	case BGE_CHIP_RUNNING:
3854 	case BGE_CHIP_ERROR:
3855 	case BGE_CHIP_FAULT:
3856 		bge_chip_stop(bgep, B_FALSE);
3857 		break;
3858 	}
3859 
3860 	mhcr_base = MHCR_ENABLE_INDIRECT_ACCESS |
3861 	            MHCR_ENABLE_PCI_STATE_RW |
3862 	            MHCR_ENABLE_TAGGED_STATUS_MODE |
3863 	            MHCR_MASK_INTERRUPT_MODE |
3864 	            MHCR_MASK_PCI_INT_OUTPUT |
3865 	            MHCR_CLEAR_INTERRUPT_INTA;
3866 
3867 #ifdef BGE_IPMI_ASF
3868 	if (bgep->asf_enabled) {
3869 		mhcr = mhcr_base;
3870 #ifdef _BIG_ENDIAN
3871 		mhcr |= (MHCR_ENABLE_ENDIAN_WORD_SWAP |
3872 		         MHCR_ENABLE_ENDIAN_BYTE_SWAP);
3873 #endif
3874 		pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3875 
3876 		bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
3877 			bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG) |
3878 			MEMORY_ARBITER_ENABLE);
3879 
3880 		if (asf_mode == ASF_MODE_INIT) {
3881 			bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
3882 		} else if (asf_mode == ASF_MODE_SHUTDOWN) {
3883 			bge_asf_pre_reset_operations(bgep, BGE_SHUTDOWN_RESET);
3884 		}
3885 	}
3886 #endif
3887 
3888 	/*
3889 	 * Adapted from Broadcom document 570X-PG102-R, pp 102-116.
3890 	 * Updated to reflect Broadcom document 570X-PG104-R, pp 146-159.
3891 	 *
3892 	 * Before reset Core clock,it is
3893 	 * also required to initialize the Memory Arbiter as specified in step9
3894 	 * and Misc Host Control Register as specified in step-13
3895 	 * Step 4-5: reset Core clock & wait for completion
3896 	 * Steps 6-8: are done by bge_chip_cfg_init()
3897 	 * put the T3_MAGIC_NUMBER into the GENCOMM port before reset
3898 	 */
3899 	if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3900 		retval = DDI_FAILURE;
3901 
3902 	mhcr = mhcr_base;
3903 #ifdef _BIG_ENDIAN
3904 	mhcr |= (MHCR_ENABLE_ENDIAN_WORD_SWAP |
3905 	         MHCR_ENABLE_ENDIAN_BYTE_SWAP);
3906 #endif
3907 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3908 
3909 #ifdef BGE_IPMI_ASF
3910 	if (bgep->asf_enabled)
3911 		bgep->asf_wordswapped = B_FALSE;
3912 #endif
3913 	/*
3914 	 * NVRAM Corruption Workaround
3915 	 */
3916 	for (tries = 0; tries < MAX_TRY_NVMEM_ACQUIRE; tries++)
3917 		if (bge_nvmem_acquire(bgep) != EAGAIN)
3918 			break;
3919 	if (tries >= MAX_TRY_NVMEM_ACQUIRE)
3920 		BGE_DEBUG(("%s: fail to acquire nvram lock",
3921 			bgep->ifname));
3922 
3923 	bge_ape_lock(bgep, BGE_APE_LOCK_GRC);
3924 
3925 #ifdef BGE_IPMI_ASF
3926 	if (!bgep->asf_enabled) {
3927 #endif
3928 		magic = (uint64_t)T3_MAGIC_NUMBER << 32;
3929 		bge_nic_put64(bgep, NIC_MEM_GENCOMM, magic);
3930 #ifdef BGE_IPMI_ASF
3931 	}
3932 #endif
3933 
3934 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
3935 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
3936 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
3937 		bge_reg_set32(bgep, FAST_BOOT_PC, 0);
3938 		if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3939 			retval = DDI_FAILURE;
3940 	}
3941 
3942 	mhcr = mhcr_base;
3943 #ifdef _BIG_ENDIAN
3944 	mhcr |= (MHCR_ENABLE_ENDIAN_WORD_SWAP |
3945 	         MHCR_ENABLE_ENDIAN_BYTE_SWAP);
3946 #endif
3947 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR, mhcr);
3948 
3949 	if (!bge_chip_reset_engine(bgep, MISC_CONFIG_REG))
3950 		retval = DDI_FAILURE;
3951 
3952 	bge_chip_cfg_init(bgep, &chipid, enable_dma);
3953 
3954 	/*
3955 	 * Step 8a: This may belong elsewhere, but BCM5721 needs
3956 	 * a bit set to avoid a fifo overflow/underflow bug.
3957 	 */
3958 	if ((bgep->chipid.chip_label == 5721) ||
3959 		(bgep->chipid.chip_label == 5751) ||
3960 		(bgep->chipid.chip_label == 5752) ||
3961 		(bgep->chipid.chip_label == 5755) ||
3962 		(bgep->chipid.chip_label == 5756) ||
3963 		(bgep->chipid.chip_label == 5789) ||
3964 		(bgep->chipid.chip_label == 5906))
3965 		bge_reg_set32(bgep, TLP_CONTROL_REG, TLP_DATA_FIFO_PROTECT);
3966 
3967 	/*
3968 	 * In the 57765 family of devices we need to work around an apparent
3969 	 * transmit hang by dorking with the PCIe serdes training clocks.
3970 	 */
3971 	if (DEVICE_57765_SERIES_CHIPSETS(bgep) &&
3972 	    (CHIP_ASIC_REV_PROD_ID(bgep) >> 8) != CHIP_ASIC_REV_57765_AX) {
3973 		tmp = bge_reg_get32(bgep, CPMU_PADRNG_CTL_REG);
3974 		tmp |= CPMU_PADRNG_CTL_RDIV2;
3975 		bge_reg_set32(bgep, CPMU_PADRNG_CTL_REG, tmp);
3976 	}
3977 
3978 
3979 	/*
3980 	 * Step 9: enable MAC memory arbiter,bit30 and bit31 of 5714/5715 should
3981 	 * not be changed.
3982 	 */
3983 	if (!bge_chip_enable_engine(bgep, MEMORY_ARBITER_MODE_REG, 0))
3984 		retval = DDI_FAILURE;
3985 
3986 	/*
3987 	 * Steps 10-11: configure PIO endianness options and
3988 	 * enable indirect register access -- already done
3989 	 * Steps 12-13: enable writing to the PCI state & clock
3990 	 * control registers -- not required; we aren't going to
3991 	 * use those features.
3992 	 * Steps 14-15: Configure DMA endianness options.  See
3993 	 * the comments on the setting of the MHCR above.
3994 	 */
3995 	tmp = MODE_WORD_SWAP_FRAME | MODE_BYTE_SWAP_FRAME;
3996 #ifdef _BIG_ENDIAN
3997 	tmp |= (MODE_WORD_SWAP_NONFRAME | MODE_BYTE_SWAP_NONFRAME);
3998 #endif
3999 #ifdef BGE_IPMI_ASF
4000 	if (bgep->asf_enabled)
4001 		tmp |= MODE_HOST_STACK_UP;
4002 #endif
4003 	bge_reg_put32(bgep, MODE_CONTROL_REG, tmp);
4004 
4005 #ifdef BGE_IPMI_ASF
4006 	if (bgep->asf_enabled) {
4007 #ifdef __sparc
4008 		bge_reg_put32(bgep, MEMORY_ARBITER_MODE_REG,
4009 			MEMORY_ARBITER_ENABLE |
4010 			bge_reg_get32(bgep, MEMORY_ARBITER_MODE_REG));
4011 #endif
4012 
4013 #ifdef  BGE_NETCONSOLE
4014 		if (!bgep->asf_newhandshake) {
4015 			if ((asf_mode == ASF_MODE_INIT) ||
4016 			(asf_mode == ASF_MODE_POST_INIT)) {
4017 				bge_asf_post_reset_old_mode(bgep,
4018 					BGE_INIT_RESET);
4019 			} else {
4020 				bge_asf_post_reset_old_mode(bgep,
4021 					BGE_SHUTDOWN_RESET);
4022 			}
4023 		}
4024 #endif
4025 
4026 		/* Wait for NVRAM init */
4027 		i = 0;
4028 		drv_usecwait(5000);
4029 		mailbox = bge_nic_get32(bgep, BGE_FIRMWARE_MAILBOX);
4030 
4031 		while ((mailbox != (uint32_t)
4032 			~BGE_MAGIC_NUM_FIRMWARE_INIT_DONE) &&
4033 			(i < 10000)) {
4034 			drv_usecwait(100);
4035 			mailbox = bge_nic_get32(bgep,
4036 				BGE_FIRMWARE_MAILBOX);
4037 			i++;
4038 		}
4039 
4040 #ifndef BGE_NETCONSOLE
4041 		if (!bgep->asf_newhandshake) {
4042 			if ((asf_mode == ASF_MODE_INIT) ||
4043 				(asf_mode == ASF_MODE_POST_INIT)) {
4044 
4045 				bge_asf_post_reset_old_mode(bgep,
4046 					BGE_INIT_RESET);
4047 			} else {
4048 				bge_asf_post_reset_old_mode(bgep,
4049 					BGE_SHUTDOWN_RESET);
4050 			}
4051 		}
4052 #endif
4053 	}
4054 #endif
4055 
4056 	bge_ape_unlock(bgep, BGE_APE_LOCK_GRC);
4057 
4058 	/*
4059 	 * Steps 16-17: poll for firmware completion
4060 	 */
4061 	mac = bge_poll_firmware(bgep);
4062 
4063 	if (bgep->chipid.device == DEVICE_ID_5720) {
4064 		tmp = bge_reg_get32(bgep, CPMU_CLCK_ORIDE_REG);
4065 		bge_reg_put32(bgep, CPMU_CLCK_ORIDE_REG,
4066 		              (tmp & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN));
4067 	}
4068 
4069 	/*
4070 	 * Step 18: enable external memory -- doesn't apply.
4071 	 *
4072 	 * However we take the opportunity to set the MLCR anyway, as
4073 	 * this register also controls the SEEPROM auto-access method
4074 	 * which we may want to use later ...
4075 	 *
4076 	 * The proper value here depends on the way the chip is wired
4077 	 * into the circuit board, as this register *also* controls which
4078 	 * of the "Miscellaneous I/O" pins are driven as outputs and the
4079 	 * values driven onto those pins!
4080 	 *
4081 	 * See also step 74 in the PRM ...
4082 	 */
4083 	bge_reg_put32(bgep, MISC_LOCAL_CONTROL_REG,
4084 	    bgep->chipid.bge_mlcr_default);
4085 
4086 	if ((bgep->chipid.flags & CHIP_FLAG_SERDES) &&
4087 	    DEVICE_5714_SERIES_CHIPSETS(bgep)) {
4088 		tmp = bge_reg_get32(bgep, SERDES_RX_CONTROL);
4089 		tmp |= SERDES_RX_CONTROL_SIG_DETECT;
4090 		bge_reg_put32(bgep, SERDES_RX_CONTROL, tmp);
4091 	}
4092 
4093 	bge_reg_set32(bgep, SERIAL_EEPROM_ADDRESS_REG, SEEPROM_ACCESS_INIT);
4094 
4095 	/*
4096 	 * Step 20: clear the Ethernet MAC mode register
4097 	 */
4098 	if (bgep->ape_enabled)
4099 		bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG,
4100 		    ETHERNET_MODE_APE_TX_EN | ETHERNET_MODE_APE_RX_EN);
4101 	else
4102 		bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG, 0);
4103 
4104 	/*
4105 	 * Step 21: restore cache-line-size, latency timer, and
4106 	 * subsystem ID registers to their original values (not
4107 	 * those read into the local structure <chipid>, 'cos
4108 	 * that was after they were cleared by the RESET).
4109 	 *
4110 	 * Note: the Subsystem Vendor/Device ID registers are not
4111 	 * directly writable in config space, so we use the shadow
4112 	 * copy in "Page Zero" of register space to restore them
4113 	 * both in one go ...
4114 	 */
4115 	pci_config_put8(bgep->cfg_handle, PCI_CONF_CACHE_LINESZ,
4116 		bgep->chipid.clsize);
4117 	pci_config_put8(bgep->cfg_handle, PCI_CONF_LATENCY_TIMER,
4118 		bgep->chipid.latency);
4119 	bge_reg_put32(bgep, PCI_CONF_SUBVENID,
4120 		(bgep->chipid.subdev << 16) | bgep->chipid.subven);
4121 
4122 	/*
4123 	 * The SEND INDEX registers should be reset to zero by the
4124 	 * global chip reset; if they're not, there'll be trouble
4125 	 * later on.
4126 	 */
4127 	sx0 = bge_reg_get32(bgep, NIC_DIAG_SEND_INDEX_REG(0));
4128 	if (sx0 != 0) {
4129 		BGE_REPORT((bgep, "SEND INDEX - device didn't RESET"));
4130 		bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
4131 		retval = DDI_FAILURE;
4132 	}
4133 
4134 	/* Enable MSI code */
4135 	if (bgep->intr_type == DDI_INTR_TYPE_MSI)
4136 		bge_reg_set32(bgep, MSI_MODE_REG,
4137 		    MSI_PRI_HIGHEST|MSI_MSI_ENABLE|MSI_ERROR_ATTENTION);
4138 
4139 	/*
4140 	 * On the first time through, save the factory-set MAC address
4141 	 * (if any).  If bge_poll_firmware() above didn't return one
4142 	 * (from a chip register) consider looking in the attached NV
4143 	 * memory device, if any.  Once we have it, we save it in both
4144 	 * register-image (64-bit) and byte-array forms.  All-zero and
4145 	 * all-one addresses are not valid, and we refuse to stash those.
4146 	 */
4147 	if (bgep->bge_chip_state == BGE_CHIP_INITIAL) {
4148 		if (mac == 0ULL)
4149 			mac = bge_get_nvmac(bgep);
4150 		if (mac != 0ULL && mac != ~0ULL) {
4151 			bgep->chipid.hw_mac_addr = mac;
4152 			for (i = ETHERADDRL; i-- != 0; ) {
4153 				bgep->chipid.vendor_addr.addr[i] = (uchar_t)mac;
4154 				mac >>= 8;
4155 			}
4156 			bgep->chipid.vendor_addr.set = B_TRUE;
4157 		}
4158 	}
4159 
4160 #ifdef BGE_IPMI_ASF
4161 	if (bgep->asf_enabled && bgep->asf_newhandshake) {
4162 		if (asf_mode != ASF_MODE_NONE) {
4163 			if ((asf_mode == ASF_MODE_INIT) ||
4164 				(asf_mode == ASF_MODE_POST_INIT)) {
4165 
4166 				bge_asf_post_reset_new_mode(bgep,
4167 					BGE_INIT_RESET);
4168 			} else {
4169 				bge_asf_post_reset_new_mode(bgep,
4170 					BGE_SHUTDOWN_RESET);
4171 			}
4172 		}
4173 	}
4174 #endif
4175 
4176 	/*
4177 	 * Record the new state
4178 	 */
4179 	bgep->chip_resets += 1;
4180 	bgep->bge_chip_state = BGE_CHIP_RESET;
4181 	return (retval);
4182 }
4183 
4184 /*
4185  * bge_chip_start() -- start the chip transmitting and/or receiving,
4186  * including enabling interrupts
4187  */
4188 
4189 void
bge_chip_coalesce_update(bge_t * bgep)4190 bge_chip_coalesce_update(bge_t *bgep)
4191 {
4192 	bge_reg_put32(bgep, SEND_COALESCE_MAX_BD_REG,
4193 	    bgep->chipid.tx_count_norm);
4194 	bge_reg_put32(bgep, SEND_COALESCE_TICKS_REG,
4195 	    bgep->chipid.tx_ticks_norm);
4196 	bge_reg_put32(bgep, RCV_COALESCE_MAX_BD_REG,
4197 	    bgep->chipid.rx_count_norm);
4198 	bge_reg_put32(bgep, RCV_COALESCE_TICKS_REG,
4199 	    bgep->chipid.rx_ticks_norm);
4200 }
4201 
4202 int
bge_chip_start(bge_t * bgep,boolean_t reset_phys)4203 bge_chip_start(bge_t *bgep, boolean_t reset_phys)
4204 {
4205 	uint32_t coalmode;
4206 	uint32_t ledctl;
4207 	uint32_t mtu;
4208 	uint32_t maxring;
4209 	uint32_t stats_mask;
4210 	uint32_t dma_wrprio;
4211 	uint64_t ring;
4212 	uint32_t reg;
4213 	uint32_t regval;
4214 	uint32_t mhcr;
4215 	int retval = DDI_SUCCESS;
4216 	int i;
4217 
4218 	BGE_TRACE(("bge_chip_start($%p)",
4219 	    (void *)bgep));
4220 
4221 	ASSERT(mutex_owned(bgep->genlock));
4222 	ASSERT(bgep->bge_chip_state == BGE_CHIP_RESET);
4223 
4224 	/* Initialize EEE, enable MAC control of LPI */
4225 	bge_eee_init(bgep);
4226 
4227 	if (bgep->ape_enabled) {
4228 		/*
4229 		 * Allow reads and writes to the
4230 		 * APE register and memory space.
4231 		 */
4232 		regval = pci_config_get32(bgep->cfg_handle,
4233 		    PCI_CONF_BGE_PCISTATE);
4234 		regval |= PCISTATE_ALLOW_APE_CTLSPC_WR |
4235 		    PCISTATE_ALLOW_APE_SHMEM_WR | PCISTATE_ALLOW_APE_PSPACE_WR;
4236 		pci_config_put32(bgep->cfg_handle,
4237 		    PCI_CONF_BGE_PCISTATE, regval);
4238 	}
4239 
4240 	/*
4241 	 * Taken from Broadcom document 570X-PG102-R, pp 102-116.
4242 	 * The document specifies 95 separate steps to fully
4243 	 * initialise the chip!!!!
4244 	 *
4245 	 * The reset code above has already got us as far as step
4246 	 * 21, so we continue with ...
4247 	 *
4248 	 * Step 22: clear the MAC statistics block
4249 	 * (0x0300-0x0aff in NIC-local memory)
4250 	 */
4251 	if (bgep->chipid.statistic_type == BGE_STAT_BLK)
4252 		bge_nic_zero(bgep, NIC_MEM_STATISTICS,
4253 		    NIC_MEM_STATISTICS_SIZE);
4254 
4255 	/*
4256 	 * Step 23: clear the status block (in host memory)
4257 	 */
4258 	DMA_ZERO(bgep->status_block);
4259 
4260 	/*
4261 	 * Step 24: set DMA read/write control register
4262 	 */
4263 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_PDRWCR,
4264 	    bgep->chipid.bge_dma_rwctrl);
4265 
4266 	/*
4267 	 * Step 25: Configure DMA endianness -- already done (16/17)
4268 	 * Step 26: Configure Host-Based Send Rings
4269 	 * Step 27: Indicate Host Stack Up
4270 	 */
4271 	bge_reg_set32(bgep, MODE_CONTROL_REG,
4272 	    MODE_HOST_SEND_BDS |
4273 	    MODE_HOST_STACK_UP);
4274 
4275 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
4276 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
4277 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
4278 		reg = (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5762)
4279 		          ? RDMA_RSRV_CTRL_REG2 : RDMA_RSRV_CTRL_REG;
4280 		regval = bge_reg_get32(bgep, reg);
4281 		if ((bgep->chipid.device == DEVICE_ID_5719) ||
4282 		    (bgep->chipid.device == DEVICE_ID_5720) ||
4283 		    (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5762)) {
4284 			regval &= ~(RDMA_RSRV_CTRL_TXMRGN_MASK |
4285 			            RDMA_RSRV_CTRL_FIFO_LWM_MASK |
4286 			            RDMA_RSRV_CTRL_FIFO_HWM_MASK);
4287 			regval |= (RDMA_RSRV_CTRL_TXMRGN_320B |
4288 			           RDMA_RSRV_CTRL_FIFO_LWM_1_5K |
4289 			           RDMA_RSRV_CTRL_FIFO_HWM_1_5K);
4290 		}
4291 		/* Enable the DMA FIFO Overrun fix. */
4292 		bge_reg_put32(bgep, reg,
4293 		    (regval | RDMA_RSRV_CTRL_FIFO_OFLW_FIX));
4294 
4295 		if ((CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5719) ||
4296 		    (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5720) ||
4297 		    (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5762)) {
4298 			reg = (CHIP_ASIC_REV(bgep) == CHIP_ASIC_REV_5762)
4299 			          ? RDMA_CORR_CTRL_REG2 : RDMA_CORR_CTRL_REG;
4300 			regval = bge_reg_get32(bgep, reg);
4301 			bge_reg_put32(bgep, reg, (regval |
4302 			                          RDMA_CORR_CTRL_BLEN_BD_4K |
4303 			                          RDMA_CORR_CTRL_BLEN_LSO_4K));
4304 		}
4305 	}
4306 
4307 	/*
4308 	 * Step 28: Configure checksum options:
4309 	 *	Solaris supports the hardware default checksum options.
4310 	 *
4311 	 *	Workaround for Incorrect pseudo-header checksum calculation.
4312 	 */
4313 	if (bgep->chipid.flags & CHIP_FLAG_PARTIAL_CSUM)
4314 		bge_reg_set32(bgep, MODE_CONTROL_REG,
4315 		    MODE_SEND_NO_PSEUDO_HDR_CSUM);
4316 
4317 	/*
4318 	 * Step 29: configure Timer Prescaler.  The value is always the
4319 	 * same: the Core Clock frequency in MHz (66), minus 1, shifted
4320 	 * into bits 7-1.  Don't set bit 0, 'cos that's the RESET bit
4321 	 * for the whole chip!
4322 	 */
4323 	regval = bge_reg_get32(bgep, MISC_CONFIG_REG);
4324 	regval = (regval & 0xffffff00) | MISC_CONFIG_DEFAULT;
4325 	bge_reg_put32(bgep, MISC_CONFIG_REG, regval);
4326 
4327 	if (DEVICE_5906_SERIES_CHIPSETS(bgep)) {
4328 		drv_usecwait(40);
4329 		/* put PHY into ready state */
4330 		bge_reg_clr32(bgep, MISC_CONFIG_REG, MISC_CONFIG_EPHY_IDDQ);
4331 		(void) bge_reg_get32(bgep, MISC_CONFIG_REG); /* flush */
4332 		drv_usecwait(40);
4333 	}
4334 
4335 	/*
4336 	 * Steps 30-31: Configure MAC local memory pool & DMA pool registers
4337 	 *
4338 	 * If the mbuf_length is specified as 0, we just leave these at
4339 	 * their hardware defaults, rather than explicitly setting them.
4340 	 * As the Broadcom HRM,driver better not change the parameters
4341 	 * when the chipsets is 5705/5788/5721/5751/5714 and 5715.
4342 	 */
4343 	if ((bgep->chipid.mbuf_length != 0) &&
4344 	    (DEVICE_5704_SERIES_CHIPSETS(bgep))) {
4345 			bge_reg_put32(bgep, MBUF_POOL_BASE_REG,
4346 			    bgep->chipid.mbuf_base);
4347 			bge_reg_put32(bgep, MBUF_POOL_LENGTH_REG,
4348 			    bgep->chipid.mbuf_length);
4349 			bge_reg_put32(bgep, DMAD_POOL_BASE_REG,
4350 			    DMAD_POOL_BASE_DEFAULT);
4351 			bge_reg_put32(bgep, DMAD_POOL_LENGTH_REG,
4352 			    DMAD_POOL_LENGTH_DEFAULT);
4353 	}
4354 
4355 	/*
4356 	 * Step 32: configure MAC memory pool watermarks
4357 	 */
4358 	bge_reg_put32(bgep, RDMA_MBUF_LOWAT_REG,
4359 	    bgep->chipid.mbuf_lo_water_rdma);
4360 	bge_reg_put32(bgep, MAC_RX_MBUF_LOWAT_REG,
4361 	    bgep->chipid.mbuf_lo_water_rmac);
4362 	bge_reg_put32(bgep, MBUF_HIWAT_REG,
4363 	    bgep->chipid.mbuf_hi_water);
4364 
4365 	/*
4366 	 * Step 33: configure DMA resource watermarks
4367 	 */
4368 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4369 		bge_reg_put32(bgep, DMAD_POOL_LOWAT_REG,
4370 		    bge_dmad_lo_water);
4371 		bge_reg_put32(bgep, DMAD_POOL_HIWAT_REG,
4372 		    bge_dmad_hi_water);
4373 	}
4374 	bge_reg_put32(bgep, LOWAT_MAX_RECV_FRAMES_REG, bge_lowat_recv_frames);
4375 
4376 	/*
4377 	 * Steps 34-36: enable buffer manager & internal h/w queues
4378 	 */
4379 	regval = STATE_MACHINE_ATTN_ENABLE_BIT;
4380 	if (bgep->chipid.device == DEVICE_ID_5719)
4381 		regval |= BUFFER_MANAGER_MODE_NO_TX_UNDERRUN;
4382 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
4383 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
4384 	    DEVICE_57765_SERIES_CHIPSETS(bgep))
4385 		regval |= BUFFER_MANAGER_MODE_MBLOW_ATTN_ENABLE;
4386 	if (!bge_chip_enable_engine(bgep, BUFFER_MANAGER_MODE_REG, regval))
4387 		retval = DDI_FAILURE;
4388 
4389 	if (!bge_chip_enable_engine(bgep, FTQ_RESET_REG, 0))
4390 		retval = DDI_FAILURE;
4391 
4392 	/*
4393 	 * Steps 37-39: initialise Receive Buffer (Producer) RCBs
4394 	 */
4395 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
4396 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
4397 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
4398 		buff_ring_t *brp = &bgep->buff[BGE_STD_BUFF_RING];
4399 		bge_reg_put64(bgep, STD_RCV_BD_RING_RCB_REG,
4400 		    brp->desc.cookie.dmac_laddress);
4401 		bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 8,
4402 		    (brp->desc.nslots) << 16 | brp->buf[0].size << 2);
4403 		if (DEVICE_57765_SERIES_CHIPSETS(bgep)) {
4404 			bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 0xc,
4405 			    NIC_MEM_SHADOW_BUFF_STD);
4406 		} else {
4407 			bge_reg_put32(bgep, STD_RCV_BD_RING_RCB_REG + 0xc,
4408 			    NIC_MEM_SHADOW_BUFF_STD_5717);
4409 		}
4410 	} else {
4411 		bge_reg_putrcb(bgep, STD_RCV_BD_RING_RCB_REG,
4412 		    &bgep->buff[BGE_STD_BUFF_RING].hw_rcb);
4413 	}
4414 
4415 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4416 		bge_reg_putrcb(bgep, JUMBO_RCV_BD_RING_RCB_REG,
4417 		    &bgep->buff[BGE_JUMBO_BUFF_RING].hw_rcb);
4418 		bge_reg_putrcb(bgep, MINI_RCV_BD_RING_RCB_REG,
4419 		    &bgep->buff[BGE_MINI_BUFF_RING].hw_rcb);
4420 	}
4421 
4422 	/*
4423 	 * Step 40: set Receive Buffer Descriptor Ring replenish thresholds
4424 	 */
4425 	bge_reg_put32(bgep, STD_RCV_BD_REPLENISH_REG, bge_replenish_std);
4426 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4427 		bge_reg_put32(bgep, JUMBO_RCV_BD_REPLENISH_REG,
4428 		    bge_replenish_jumbo);
4429 		bge_reg_put32(bgep, MINI_RCV_BD_REPLENISH_REG,
4430 		    bge_replenish_mini);
4431 	}
4432 
4433 	/*
4434 	 * Steps 41-43: clear Send Ring Producer Indices and initialise
4435 	 * Send Producer Rings (0x0100-0x01ff in NIC-local memory)
4436 	 */
4437 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4438 		maxring = BGE_SEND_RINGS_MAX;
4439 	else
4440 		maxring = BGE_SEND_RINGS_MAX_5705;
4441 	for (ring = 0; ring < maxring; ++ring) {
4442 		bge_mbx_put(bgep, SEND_RING_HOST_INDEX_REG(ring), 0);
4443 		bge_mbx_put(bgep, SEND_RING_NIC_INDEX_REG(ring), 0);
4444 		bge_nic_putrcb(bgep, NIC_MEM_SEND_RING(ring),
4445 		    &bgep->send[ring].hw_rcb);
4446 	}
4447 
4448 	/*
4449 	 * Steps 44-45: initialise Receive Return Rings
4450 	 * (0x0200-0x02ff in NIC-local memory)
4451 	 */
4452 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4453 		maxring = BGE_RECV_RINGS_MAX;
4454 	else
4455 		maxring = BGE_RECV_RINGS_MAX_5705;
4456 	for (ring = 0; ring < maxring; ++ring)
4457 		bge_nic_putrcb(bgep, NIC_MEM_RECV_RING(ring),
4458 		    &bgep->recv[ring].hw_rcb);
4459 
4460 	/*
4461 	 * Step 46: initialise Receive Buffer (Producer) Ring indexes
4462 	 */
4463 	bge_mbx_put(bgep, RECV_STD_PROD_INDEX_REG, 0);
4464 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4465 		bge_mbx_put(bgep, RECV_JUMBO_PROD_INDEX_REG, 0);
4466 		bge_mbx_put(bgep, RECV_MINI_PROD_INDEX_REG, 0);
4467 	}
4468 	/*
4469 	 * Step 47: configure the MAC unicast address
4470 	 * Step 48: configure the random backoff seed
4471 	 * Step 96: set up multicast filters
4472 	 */
4473 #ifdef BGE_IPMI_ASF
4474 	if (bge_chip_sync(bgep, B_FALSE) == DDI_FAILURE)
4475 #else
4476 	if (bge_chip_sync(bgep) == DDI_FAILURE)
4477 #endif
4478 		retval = DDI_FAILURE;
4479 
4480 	/*
4481 	 * Step 49: configure the MTU
4482 	 */
4483 	mtu = bgep->chipid.ethmax_size+ETHERFCSL+VLAN_TAGSZ;
4484 	bge_reg_put32(bgep, MAC_RX_MTU_SIZE_REG, mtu);
4485 
4486 	/*
4487 	 * Step 50: configure the IPG et al
4488 	 */
4489 	bge_reg_put32(bgep, MAC_TX_LENGTHS_REG, MAC_TX_LENGTHS_DEFAULT);
4490 
4491 	/*
4492 	 * Step 51: configure the default Rx Return Ring
4493 	 */
4494 	bge_reg_put32(bgep, RCV_RULES_CONFIG_REG, RCV_RULES_CONFIG_DEFAULT);
4495 
4496 	/*
4497 	 * Steps 52-54: configure Receive List Placement,
4498 	 * and enable Receive List Placement Statistics
4499 	 */
4500 	bge_reg_put32(bgep, RCV_LP_CONFIG_REG,
4501 	    RCV_LP_CONFIG(bgep->chipid.rx_rings));
4502 	switch (MHCR_CHIP_ASIC_REV(bgep)) {
4503 	case MHCR_CHIP_ASIC_REV_5700:
4504 	case MHCR_CHIP_ASIC_REV_5701:
4505 	case MHCR_CHIP_ASIC_REV_5703:
4506 	case MHCR_CHIP_ASIC_REV_5704:
4507 		bge_reg_put32(bgep, RCV_LP_STATS_ENABLE_MASK_REG, ~0);
4508 		break;
4509 	case MHCR_CHIP_ASIC_REV_5705:
4510 		break;
4511 	default:
4512 		stats_mask = bge_reg_get32(bgep, RCV_LP_STATS_ENABLE_MASK_REG);
4513 		stats_mask &= ~RCV_LP_STATS_DISABLE_MACTQ;
4514 		bge_reg_put32(bgep, RCV_LP_STATS_ENABLE_MASK_REG, stats_mask);
4515 		break;
4516 	}
4517 	bge_reg_set32(bgep, RCV_LP_STATS_CONTROL_REG, RCV_LP_STATS_ENABLE);
4518 
4519 	if (bgep->chipid.rx_rings > 1)
4520 		bge_init_recv_rule(bgep);
4521 
4522 	/*
4523 	 * Steps 55-56: enable Send Data Initiator Statistics
4524 	 */
4525 	bge_reg_put32(bgep, SEND_INIT_STATS_ENABLE_MASK_REG, ~0);
4526 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4527 		bge_reg_put32(bgep, SEND_INIT_STATS_CONTROL_REG,
4528 		    SEND_INIT_STATS_ENABLE | SEND_INIT_STATS_FASTER);
4529 	} else {
4530 		bge_reg_put32(bgep, SEND_INIT_STATS_CONTROL_REG,
4531 		    SEND_INIT_STATS_ENABLE);
4532 	}
4533 	/*
4534 	 * Steps 57-58: stop (?) the Host Coalescing Engine
4535 	 */
4536 	if (!bge_chip_disable_engine(bgep, HOST_COALESCE_MODE_REG, ~0))
4537 		retval = DDI_FAILURE;
4538 
4539 	/*
4540 	 * Steps 59-62: initialise Host Coalescing parameters
4541 	 */
4542 	bge_chip_coalesce_update(bgep);
4543 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4544 		bge_reg_put32(bgep, SEND_COALESCE_INT_BD_REG,
4545 		    bge_tx_count_intr);
4546 		bge_reg_put32(bgep, SEND_COALESCE_INT_TICKS_REG,
4547 		    bge_tx_ticks_intr);
4548 		bge_reg_put32(bgep, RCV_COALESCE_INT_BD_REG,
4549 		    bge_rx_count_intr);
4550 		bge_reg_put32(bgep, RCV_COALESCE_INT_TICKS_REG,
4551 		    bge_rx_ticks_intr);
4552 	}
4553 
4554 	/*
4555 	 * Steps 63-64: initialise status block & statistics
4556 	 * host memory addresses
4557 	 * The statistic block does not exist in some chipsets
4558 	 * Step 65: initialise Statistics Coalescing Tick Counter
4559 	 */
4560 	bge_reg_put64(bgep, STATUS_BLOCK_HOST_ADDR_REG,
4561 	    bgep->status_block.cookie.dmac_laddress);
4562 
4563 	/*
4564 	 * Steps 66-67: initialise status block & statistics
4565 	 * NIC-local memory addresses
4566 	 */
4567 	if (DEVICE_5704_SERIES_CHIPSETS(bgep)) {
4568 		bge_reg_put64(bgep, STATISTICS_HOST_ADDR_REG,
4569 		    bgep->statistics.cookie.dmac_laddress);
4570 		bge_reg_put32(bgep, STATISTICS_TICKS_REG,
4571 		    STATISTICS_TICKS_DEFAULT);
4572 		bge_reg_put32(bgep, STATUS_BLOCK_BASE_ADDR_REG,
4573 		    NIC_MEM_STATUS_BLOCK);
4574 		bge_reg_put32(bgep, STATISTICS_BASE_ADDR_REG,
4575 		    NIC_MEM_STATISTICS);
4576 	}
4577 
4578 	/*
4579 	 * Steps 68-71: start the Host Coalescing Engine, the Receive BD
4580 	 * Completion Engine, the Receive List Placement Engine, and the
4581 	 * Receive List selector.Pay attention:0x3400 is not exist in BCM5714
4582 	 * and BCM5715.
4583 	 */
4584 
4585 	if (bgep->chipid.device == DEVICE_ID_5719) {
4586 		for (i = 0; i < BGE_NUM_RDMA_CHANNELS; i++) {
4587 			if (bge_reg_get32(bgep, (BGE_RDMA_LENGTH + (i << 2))) >
4588 			    bgep->chipid.default_mtu)
4589 				break;
4590 		}
4591 		if (i < BGE_NUM_RDMA_CHANNELS) {
4592 			regval = bge_reg_get32(bgep, RDMA_CORR_CTRL_REG);
4593 			regval |= RDMA_CORR_CTRL_TX_LENGTH_WA;
4594 			bge_reg_put32(bgep, RDMA_CORR_CTRL_REG, regval);
4595 			bgep->rdma_length_bug_on_5719 = B_TRUE;
4596 		}
4597 	}
4598 
4599 	if (bgep->chipid.tx_rings <= COALESCE_64_BYTE_RINGS &&
4600 	    bgep->chipid.rx_rings <= COALESCE_64_BYTE_RINGS)
4601 		coalmode = COALESCE_64_BYTE_STATUS;
4602 	else
4603 		coalmode = 0;
4604 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
4605 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
4606 	    DEVICE_57765_SERIES_CHIPSETS(bgep))
4607 		coalmode = COALESCE_CLR_TICKS_RX;
4608 	if (!bge_chip_enable_engine(bgep, HOST_COALESCE_MODE_REG, coalmode))
4609 		retval = DDI_FAILURE;
4610 	if (!bge_chip_enable_engine(bgep, RCV_BD_COMPLETION_MODE_REG,
4611 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4612 		retval = DDI_FAILURE;
4613 	if (!bge_chip_enable_engine(bgep, RCV_LIST_PLACEMENT_MODE_REG, 0))
4614 		retval = DDI_FAILURE;
4615 
4616 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4617 		if (!bge_chip_enable_engine(bgep, RCV_LIST_SELECTOR_MODE_REG,
4618 		    STATE_MACHINE_ATTN_ENABLE_BIT))
4619 			retval = DDI_FAILURE;
4620 
4621 	/*
4622 	 * Step 72: Enable MAC DMA engines
4623 	 * Step 73: Clear & enable MAC statistics
4624 	 */
4625 	if (bgep->ape_enabled) {
4626 		/* XXX put32 instead of set32 ? */
4627 		bge_reg_put32(bgep, ETHERNET_MAC_MODE_REG,
4628 		    ETHERNET_MODE_APE_TX_EN | ETHERNET_MODE_APE_RX_EN);
4629 	}
4630 	bge_reg_set32(bgep, ETHERNET_MAC_MODE_REG,
4631 	    ETHERNET_MODE_ENABLE_FHDE |
4632 	    ETHERNET_MODE_ENABLE_RDE |
4633 	    ETHERNET_MODE_ENABLE_TDE);
4634 	bge_reg_set32(bgep, ETHERNET_MAC_MODE_REG,
4635 	    ETHERNET_MODE_ENABLE_TX_STATS |
4636 	    ETHERNET_MODE_ENABLE_RX_STATS |
4637 	    ETHERNET_MODE_CLEAR_TX_STATS |
4638 	    ETHERNET_MODE_CLEAR_RX_STATS);
4639 
4640 	drv_usecwait(140);
4641 
4642 	if (bgep->ape_enabled) {
4643 		/* Write our heartbeat update interval to APE. */
4644 		bge_ape_put32(bgep, BGE_APE_HOST_HEARTBEAT_INT_MS,
4645 		    APE_HOST_HEARTBEAT_INT_DISABLE);
4646 	}
4647 
4648 	/*
4649 	 * Step 74: configure the MLCR (Miscellaneous Local Control
4650 	 * Register); not required, as we set up the MLCR in step 10
4651 	 * (part of the reset code) above.
4652 	 *
4653 	 * Step 75: clear Interrupt Mailbox 0
4654 	 */
4655 	bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG, 0);
4656 
4657 	/*
4658 	 * Steps 76-87: Gentlemen, start your engines ...
4659 	 *
4660 	 * Enable the DMA Completion Engine, the Write DMA Engine,
4661 	 * the Read DMA Engine, Receive Data Completion Engine,
4662 	 * the MBuf Cluster Free Engine, the Send Data Completion Engine,
4663 	 * the Send BD Completion Engine, the Receive BD Initiator Engine,
4664 	 * the Receive Data Initiator Engine, the Send Data Initiator Engine,
4665 	 * the Send BD Initiator Engine, and the Send BD Selector Engine.
4666 	 *
4667 	 * Beware exhaust fumes?
4668 	 */
4669 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4670 		if (!bge_chip_enable_engine(bgep, DMA_COMPLETION_MODE_REG, 0))
4671 			retval = DDI_FAILURE;
4672 	dma_wrprio = (bge_dma_wrprio << DMA_PRIORITY_SHIFT) |
4673 	    ALL_DMA_ATTN_BITS;
4674 	/* the 5723 check here covers all newer chip families (OK) */
4675 	if ((MHCR_CHIP_ASIC_REV(bgep) == MHCR_CHIP_ASIC_REV_5755) ||
4676 	    (MHCR_CHIP_ASIC_REV(bgep) == MHCR_CHIP_ASIC_REV_5723) ||
4677 	    (MHCR_CHIP_ASIC_REV(bgep) == MHCR_CHIP_ASIC_REV_5906)) {
4678 		dma_wrprio |= DMA_STATUS_TAG_FIX_CQ12384;
4679 	}
4680 	if (!bge_chip_enable_engine(bgep, WRITE_DMA_MODE_REG,
4681 	    dma_wrprio))
4682 		retval = DDI_FAILURE;
4683 
4684 	drv_usecwait(40);
4685 
4686 	/*
4687 	 * These chipsets no longer use the rdprio logic (bits 31:30 are
4688 	 * reserved).
4689 	 */
4690 	if (DEVICE_5723_SERIES_CHIPSETS(bgep) ||
4691 	    DEVICE_5717_SERIES_CHIPSETS(bgep) ||
4692 	    DEVICE_5725_SERIES_CHIPSETS(bgep))
4693 		bge_dma_rdprio = 0;
4694 	if (!bge_chip_enable_engine(bgep, READ_DMA_MODE_REG,
4695 	    (bge_dma_rdprio << DMA_PRIORITY_SHIFT) | ALL_DMA_ATTN_BITS))
4696 		retval = DDI_FAILURE;
4697 
4698 	drv_usecwait(40);
4699 
4700 	if (!bge_chip_enable_engine(bgep, RCV_DATA_COMPLETION_MODE_REG,
4701 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4702 		retval = DDI_FAILURE;
4703 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4704 		if (!bge_chip_enable_engine(bgep,
4705 		    MBUF_CLUSTER_FREE_MODE_REG, 0))
4706 			retval = DDI_FAILURE;
4707 	if (!bge_chip_enable_engine(bgep, SEND_DATA_COMPLETION_MODE_REG, 0))
4708 		retval = DDI_FAILURE;
4709 	if (!bge_chip_enable_engine(bgep, SEND_BD_COMPLETION_MODE_REG,
4710 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4711 		retval = DDI_FAILURE;
4712 	if (!bge_chip_enable_engine(bgep, RCV_BD_INITIATOR_MODE_REG,
4713 	    RCV_BD_DISABLED_RING_ATTN))
4714 		retval = DDI_FAILURE;
4715 	if (!bge_chip_enable_engine(bgep, RCV_DATA_BD_INITIATOR_MODE_REG,
4716 	    RCV_DATA_BD_ILL_RING_ATTN))
4717 		retval = DDI_FAILURE;
4718 	if (!bge_chip_enable_engine(bgep, SEND_DATA_INITIATOR_MODE_REG, 0))
4719 		retval = DDI_FAILURE;
4720 	if (!bge_chip_enable_engine(bgep, SEND_BD_INITIATOR_MODE_REG,
4721 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4722 		retval = DDI_FAILURE;
4723 	if (!bge_chip_enable_engine(bgep, SEND_BD_SELECTOR_MODE_REG,
4724 	    STATE_MACHINE_ATTN_ENABLE_BIT))
4725 		retval = DDI_FAILURE;
4726 
4727 	drv_usecwait(40);
4728 
4729 	/*
4730 	 * Step 88: download firmware -- doesn't apply
4731 	 * Steps 89-90: enable Transmit & Receive MAC Engines
4732 	 */
4733 	regval = 0;
4734 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
4735 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
4736 		regval |= TRANSMIT_MODE_MBUF_LOCKUP_FIX;
4737 	}
4738 	if (!bge_chip_enable_engine(bgep, TRANSMIT_MAC_MODE_REG, regval))
4739 		retval = DDI_FAILURE;
4740 
4741 	drv_usecwait(100);
4742 
4743 #ifdef BGE_IPMI_ASF
4744 	if (!bgep->asf_enabled) {
4745 		if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4746 		    RECEIVE_MODE_KEEP_VLAN_TAG))
4747 			retval = DDI_FAILURE;
4748 	} else {
4749 		if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG, 0))
4750 			retval = DDI_FAILURE;
4751 	}
4752 #else
4753 	if (!bge_chip_enable_engine(bgep, RECEIVE_MAC_MODE_REG,
4754 	    RECEIVE_MODE_KEEP_VLAN_TAG))
4755 		retval = DDI_FAILURE;
4756 #endif
4757 
4758 	drv_usecwait(100);
4759 
4760 	/*
4761 	 * Step 91: disable auto-polling of PHY status
4762 	 */
4763 	bge_reg_put32(bgep, MI_MODE_REG, MI_MODE_DEFAULT);
4764 
4765 	/*
4766 	 * Step 92: configure D0 power state (not required)
4767 	 * Step 93: initialise LED control register ()
4768 	 */
4769 	ledctl = LED_CONTROL_DEFAULT;
4770 	switch (bgep->chipid.device) {
4771 	case DEVICE_ID_5700:
4772 	case DEVICE_ID_5700x:
4773 	case DEVICE_ID_5701:
4774 		/*
4775 		 * Switch to 5700 (MAC) mode on these older chips
4776 		 */
4777 		ledctl &= ~LED_CONTROL_LED_MODE_MASK;
4778 		ledctl |= LED_CONTROL_LED_MODE_5700;
4779 		break;
4780 
4781 	default:
4782 		break;
4783 	}
4784 	bge_reg_put32(bgep, ETHERNET_MAC_LED_CONTROL_REG, ledctl);
4785 
4786 	/*
4787 	 * Step 94: activate link
4788 	 */
4789 	bge_reg_put32(bgep, MI_STATUS_REG, MI_STATUS_LINK);
4790 
4791 	/*
4792 	 * Step 95: set up physical layer (PHY/SerDes)
4793 	 * restart autoneg (if required)
4794 	 */
4795 	if (reset_phys)
4796 	{
4797 		if (bge_phys_update(bgep) == DDI_FAILURE)
4798 			retval = DDI_FAILURE;
4799 		/* forcing a mac link update here */
4800 		bge_phys_check(bgep);
4801 		bgep->link_state = (bgep->param_link_up) ? LINK_STATE_UP :
4802 		                                           LINK_STATE_DOWN;
4803 		bge_sync_mac_modes(bgep);
4804 		mac_link_update(bgep->mh, bgep->link_state);
4805 	}
4806 
4807 	/*
4808 	 * Extra step (DSG): hand over all the Receive Buffers to the chip
4809 	 */
4810 	for (ring = 0; ring < BGE_BUFF_RINGS_USED; ++ring)
4811 		bge_mbx_put(bgep, bgep->buff[ring].chip_mbx_reg,
4812 		    bgep->buff[ring].rf_next);
4813 
4814 	/*
4815 	 * MSI bits:The least significant MSI 16-bit word.
4816 	 * ISR will be triggered different.
4817 	 */
4818 	if (bgep->intr_type == DDI_INTR_TYPE_MSI)
4819 		bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, 0x70);
4820 
4821 	/*
4822 	 * Extra step (DSG): select which interrupts are enabled
4823 	 *
4824 	 * Program the Ethernet MAC engine to signal attention on
4825 	 * Link Change events, then enable interrupts on MAC, DMA,
4826 	 * and FLOW attention signals.
4827 	 */
4828 	bge_reg_set32(bgep, ETHERNET_MAC_EVENT_ENABLE_REG,
4829 	    ETHERNET_EVENT_LINK_INT |
4830 	    ETHERNET_STATUS_PCS_ERROR_INT);
4831 #ifdef BGE_IPMI_ASF
4832 	if (bgep->asf_enabled) {
4833 		bge_reg_set32(bgep, MODE_CONTROL_REG,
4834 		    MODE_INT_ON_FLOW_ATTN |
4835 		    MODE_INT_ON_DMA_ATTN |
4836 		    MODE_HOST_STACK_UP|
4837 		    MODE_INT_ON_MAC_ATTN);
4838 	} else {
4839 #endif
4840 		bge_reg_set32(bgep, MODE_CONTROL_REG,
4841 		    MODE_INT_ON_FLOW_ATTN |
4842 		    MODE_INT_ON_DMA_ATTN |
4843 		    MODE_INT_ON_MAC_ATTN);
4844 #ifdef BGE_IPMI_ASF
4845 	}
4846 #endif
4847 
4848 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
4849 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
4850 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
4851 		bge_cfg_clr16(bgep, PCI_CONF_DEV_CTRL_5717,
4852 		    DEV_CTRL_NO_SNOOP | DEV_CTRL_RELAXED);
4853 #if 0
4854 		mhcr = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MHCR);
4855 		pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MHCR,
4856 		                 (mhcr | MHCR_TLP_MINOR_ERR_TOLERANCE));
4857 #endif
4858 	}
4859 
4860 	/*
4861 	 * Step 97: enable PCI interrupts!!!
4862 	 */
4863 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED)
4864 		bge_cfg_clr32(bgep, PCI_CONF_BGE_MHCR,
4865 		    bgep->chipid.mask_pci_int);
4866 
4867 	/*
4868 	 * All done!
4869 	 */
4870 	bgep->bge_chip_state = BGE_CHIP_RUNNING;
4871 	return (retval);
4872 }
4873 
4874 
4875 /*
4876  * ========== Hardware interrupt handler ==========
4877  */
4878 
4879 #undef	BGE_DBG
4880 #define	BGE_DBG		BGE_DBG_INT	/* debug flag for this code	*/
4881 
4882 /*
4883  * Sync the status block, then atomically clear the specified bits in
4884  * the <flags-and-tag> field of the status block.
4885  * the <flags> word of the status block, returning the value of the
4886  * <tag> and the <flags> before the bits were cleared.
4887  */
4888 static int
bge_status_sync(bge_t * bgep,uint64_t bits,uint64_t * flags)4889 bge_status_sync(bge_t *bgep, uint64_t bits, uint64_t *flags)
4890 {
4891 	bge_status_t *bsp;
4892 	int retval;
4893 
4894 	BGE_TRACE(("bge_status_sync($%p, 0x%llx)",
4895 	    (void *)bgep, bits));
4896 
4897 	ASSERT(bgep->bge_guard == BGE_GUARD);
4898 
4899 	DMA_SYNC(bgep->status_block, DDI_DMA_SYNC_FORKERNEL);
4900 	retval = bge_check_dma_handle(bgep, bgep->status_block.dma_hdl);
4901 	if (retval != DDI_FM_OK)
4902 		return (retval);
4903 
4904 	bsp = DMA_VPTR(bgep->status_block);
4905 	*flags = bge_atomic_clr64(&bsp->flags_n_tag, bits);
4906 
4907 	BGE_DEBUG(("bge_status_sync($%p, 0x%llx) returning 0x%llx",
4908 	    (void *)bgep, bits, *flags));
4909 
4910 	return (retval);
4911 }
4912 
4913 void
bge_wake_factotum(bge_t * bgep)4914 bge_wake_factotum(bge_t *bgep)
4915 {
4916 	mutex_enter(bgep->softintrlock);
4917 	if (bgep->factotum_flag == 0) {
4918 		bgep->factotum_flag = 1;
4919 		ddi_trigger_softintr(bgep->factotum_id);
4920 	}
4921 	mutex_exit(bgep->softintrlock);
4922 }
4923 
4924 static void
bge_intr_error_handler(bge_t * bgep)4925 bge_intr_error_handler(bge_t *bgep)
4926 {
4927 	uint32_t flow;
4928 	uint32_t rdma;
4929 	uint32_t wdma;
4930 	uint32_t tmac;
4931 	uint32_t rmac;
4932 	uint32_t rxrs;
4933 	uint32_t emac;
4934 	uint32_t msis;
4935 	uint32_t txrs = 0;
4936 
4937 	ASSERT(mutex_owned(bgep->genlock));
4938 
4939 	/*
4940 	 * Read all the registers that show the possible
4941 	 * reasons for the ERROR bit to be asserted
4942 	 */
4943 	flow = bge_reg_get32(bgep, FLOW_ATTN_REG);
4944 	rdma = bge_reg_get32(bgep, READ_DMA_STATUS_REG);
4945 	wdma = bge_reg_get32(bgep, WRITE_DMA_STATUS_REG);
4946 	tmac = bge_reg_get32(bgep, TRANSMIT_MAC_STATUS_REG);
4947 	rmac = bge_reg_get32(bgep, RECEIVE_MAC_STATUS_REG);
4948 	rxrs = bge_reg_get32(bgep, RX_RISC_STATE_REG);
4949 	emac = bge_reg_get32(bgep, ETHERNET_MAC_STATUS_REG);
4950 	msis = bge_reg_get32(bgep, MSI_STATUS_REG);
4951 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4952 		txrs = bge_reg_get32(bgep, TX_RISC_STATE_REG);
4953 
4954 	BGE_DEBUG(("factotum($%p) flow 0x%x rdma 0x%x wdma 0x%x emac 0x%x msis 0x%x",
4955 	    (void *)bgep, flow, rdma, wdma, emac, msis));
4956 	BGE_DEBUG(("factotum($%p) tmac 0x%x rmac 0x%x rxrs 0x%08x txrs 0x%08x",
4957 	    (void *)bgep, tmac, rmac, rxrs, txrs));
4958 
4959 	/*
4960 	 * For now, just clear all the errors ...
4961 	 */
4962 	if (DEVICE_5704_SERIES_CHIPSETS(bgep))
4963 		bge_reg_put32(bgep, TX_RISC_STATE_REG, ~0);
4964 	bge_reg_put32(bgep, RX_RISC_STATE_REG, ~0);
4965 	bge_reg_put32(bgep, RECEIVE_MAC_STATUS_REG, ~0);
4966 	bge_reg_put32(bgep, WRITE_DMA_STATUS_REG, ~0);
4967 	bge_reg_put32(bgep, READ_DMA_STATUS_REG, ~0);
4968 	bge_reg_put32(bgep, FLOW_ATTN_REG, ~0);
4969 }
4970 
4971 /*
4972  *	bge_intr() -- handle chip interrupts
4973  */
4974 uint_t
bge_intr(caddr_t arg1,caddr_t arg2)4975 bge_intr(caddr_t arg1, caddr_t arg2)
4976 {
4977 	bge_t *bgep = (void *)arg1;		/* private device info	*/
4978 	bge_status_t *bsp;
4979 	uint64_t flags;
4980 	uint32_t regval;
4981 	uint_t result;
4982 	int retval, loop_cnt = 0;
4983 
4984 	BGE_TRACE(("bge_intr($%p) ($%p)", arg1, arg2));
4985 
4986 	/*
4987 	 * GLD v2 checks that s/w setup is complete before passing
4988 	 * interrupts to this routine, thus eliminating the old
4989 	 * (and well-known) race condition around ddi_add_intr()
4990 	 */
4991 	ASSERT(bgep->progress & PROGRESS_HWINT);
4992 
4993 	result = DDI_INTR_UNCLAIMED;
4994 	mutex_enter(bgep->genlock);
4995 
4996 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
4997 		/*
4998 		 * Check whether chip's says it's asserting #INTA;
4999 		 * if not, don't process or claim the interrupt.
5000 		 *
5001 		 * Note that the PCI signal is active low, so the
5002 		 * bit is *zero* when the interrupt is asserted.
5003 		 */
5004 		regval = bge_reg_get32(bgep, MISC_LOCAL_CONTROL_REG);
5005 		if (!(DEVICE_5717_SERIES_CHIPSETS(bgep) ||
5006 		      DEVICE_5725_SERIES_CHIPSETS(bgep) ||
5007 		      DEVICE_57765_SERIES_CHIPSETS(bgep)) &&
5008 		    (regval & MLCR_INTA_STATE)) {
5009 			if (bge_check_acc_handle(bgep, bgep->io_handle)
5010 			    != DDI_FM_OK)
5011 				goto chip_stop;
5012 			mutex_exit(bgep->genlock);
5013 			return (result);
5014 		}
5015 
5016 		/*
5017 		 * Block further PCI interrupts ...
5018 		 */
5019 		bge_reg_set32(bgep, PCI_CONF_BGE_MHCR,
5020 		    bgep->chipid.mask_pci_int);
5021 
5022 	} else {
5023 		/*
5024 		 * Check MSI status
5025 		 */
5026 		regval = bge_reg_get32(bgep, MSI_STATUS_REG);
5027 		if (regval & MSI_ERROR_ATTENTION) {
5028 			BGE_REPORT((bgep, "msi error attention,"
5029 			    " status=0x%x", regval));
5030 			bge_reg_put32(bgep, MSI_STATUS_REG, regval);
5031 		}
5032 	}
5033 
5034 	result = DDI_INTR_CLAIMED;
5035 
5036 	BGE_DEBUG(("bge_intr($%p) ($%p) regval 0x%08x", arg1, arg2, regval));
5037 
5038 	/*
5039 	 * Sync the status block and grab the flags-n-tag from it.
5040 	 * We count the number of interrupts where there doesn't
5041 	 * seem to have been a DMA update of the status block; if
5042 	 * it *has* been updated, the counter will be cleared in
5043 	 * the while() loop below ...
5044 	 */
5045 	bgep->missed_dmas += 1;
5046 	bsp = DMA_VPTR(bgep->status_block);
5047 	for (loop_cnt = 0; loop_cnt < bge_intr_max_loop; loop_cnt++) {
5048 		if (bgep->bge_chip_state != BGE_CHIP_RUNNING) {
5049 			/*
5050 			 * bge_chip_stop() may have freed dma area etc
5051 			 * while we were in this interrupt handler -
5052 			 * better not call bge_status_sync()
5053 			 */
5054 			(void) bge_check_acc_handle(bgep,
5055 			    bgep->io_handle);
5056 			mutex_exit(bgep->genlock);
5057 			return (DDI_INTR_CLAIMED);
5058 		}
5059 
5060 		retval = bge_status_sync(bgep, STATUS_FLAG_UPDATED |
5061 		    STATUS_FLAG_LINK_CHANGED | STATUS_FLAG_ERROR, &flags);
5062 		if (retval != DDI_FM_OK) {
5063 			bgep->bge_dma_error = B_TRUE;
5064 			goto chip_stop;
5065 		}
5066 
5067 		if (!(flags & STATUS_FLAG_UPDATED))
5068 			break;
5069 
5070 		/*
5071 		 * Tell the chip that we're processing the interrupt
5072 		 */
5073 		bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG,
5074 		    INTERRUPT_MBOX_DISABLE(flags));
5075 		if (bge_check_acc_handle(bgep, bgep->io_handle) !=
5076 		    DDI_FM_OK)
5077 			goto chip_stop;
5078 
5079 		if (flags & STATUS_FLAG_LINK_CHANGED) {
5080 			BGE_DEBUG(("bge_intr($%p) ($%p) link event", arg1, arg2));
5081 			if (bge_phys_check(bgep)) {
5082 				bgep->link_state = bgep->param_link_up ?
5083 				    LINK_STATE_UP : LINK_STATE_DOWN;
5084 				bge_sync_mac_modes(bgep);
5085 				mac_link_update(bgep->mh, bgep->link_state);
5086 			}
5087 
5088 			if (bge_check_acc_handle(bgep, bgep->io_handle) !=
5089 			    DDI_FM_OK)
5090 				goto chip_stop;
5091 		}
5092 
5093 		if (flags & STATUS_FLAG_ERROR) {
5094 			bge_intr_error_handler(bgep);
5095 
5096 			if (bge_check_acc_handle(bgep, bgep->io_handle) !=
5097 			    DDI_FM_OK)
5098 				goto chip_stop;
5099 		}
5100 
5101 		/*
5102 		 * Drop the mutex while we:
5103 		 *	Receive any newly-arrived packets
5104 		 *	Recycle any newly-finished send buffers
5105 		 */
5106 		bgep->bge_intr_running = B_TRUE;
5107 		mutex_exit(bgep->genlock);
5108 		bge_receive(bgep, bsp);
5109 		(void) bge_recycle(bgep, bsp);
5110 		mutex_enter(bgep->genlock);
5111 		bgep->bge_intr_running = B_FALSE;
5112 
5113 		/*
5114 		 * Tell the chip we've finished processing, and
5115 		 * give it the tag that we got from the status
5116 		 * block earlier, so that it knows just how far
5117 		 * we've gone.  If it's got more for us to do,
5118 		 * it will now update the status block and try
5119 		 * to assert an interrupt (but we've got the
5120 		 * #INTA blocked at present).  If we see the
5121 		 * update, we'll loop around to do some more.
5122 		 * Eventually we'll get out of here ...
5123 		 */
5124 		bge_mbx_put(bgep, INTERRUPT_MBOX_0_REG,
5125 		    INTERRUPT_MBOX_ENABLE(flags));
5126 		if (bgep->chipid.pci_type == BGE_PCI_E)
5127 			(void) bge_mbx_get(bgep, INTERRUPT_MBOX_0_REG);
5128 		bgep->missed_dmas = 0;
5129 	}
5130 
5131 	if (bgep->missed_dmas) {
5132 		/*
5133 		 * Probably due to the internal status tag not
5134 		 * being reset.  Force a status block update now;
5135 		 * this should ensure that we get an update and
5136 		 * a new interrupt.  After that, we should be in
5137 		 * sync again ...
5138 		 */
5139 		BGE_REPORT((bgep, "interrupt: flags 0x%llx - "
5140 		    "not updated?", flags));
5141 		bgep->missed_updates++;
5142 		bge_reg_set32(bgep, HOST_COALESCE_MODE_REG,
5143 		    COALESCE_NOW);
5144 
5145 		if (bgep->missed_dmas >= bge_dma_miss_limit) {
5146 			/*
5147 			 * If this happens multiple times in a row,
5148 			 * it means DMA is just not working.  Maybe
5149 			 * the chip's failed, or maybe there's a
5150 			 * problem on the PCI bus or in the host-PCI
5151 			 * bridge (Tomatillo).
5152 			 *
5153 			 * At all events, we want to stop further
5154 			 * interrupts and let the recovery code take
5155 			 * over to see whether anything can be done
5156 			 * about it ...
5157 			 */
5158 			bge_fm_ereport(bgep,
5159 			    DDI_FM_DEVICE_BADINT_LIMIT);
5160 			goto chip_stop;
5161 		}
5162 	}
5163 
5164 	/*
5165 	 * Reenable assertion of #INTA, unless there's a DMA fault
5166 	 */
5167 	if (bgep->intr_type == DDI_INTR_TYPE_FIXED) {
5168 		bge_reg_clr32(bgep, PCI_CONF_BGE_MHCR,
5169 		    bgep->chipid.mask_pci_int);
5170 		if (bge_check_acc_handle(bgep, bgep->cfg_handle) !=
5171 		    DDI_FM_OK)
5172 			goto chip_stop;
5173 	}
5174 
5175 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
5176 		goto chip_stop;
5177 
5178 	mutex_exit(bgep->genlock);
5179 	return (result);
5180 
5181 chip_stop:
5182 
5183 #ifdef BGE_IPMI_ASF
5184 	if (bgep->asf_enabled && bgep->asf_status == ASF_STAT_RUN) {
5185 		/*
5186 		 * We must stop ASF heart beat before
5187 		 * bge_chip_stop(), otherwise some
5188 		 * computers (ex. IBM HS20 blade
5189 		 * server) may crash.
5190 		 */
5191 		bge_asf_update_status(bgep);
5192 		bge_asf_stop_timer(bgep);
5193 		bgep->asf_status = ASF_STAT_STOP;
5194 
5195 		bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
5196 		(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
5197 	}
5198 #endif
5199 	bge_chip_stop(bgep, B_TRUE);
5200 	(void) bge_check_acc_handle(bgep, bgep->io_handle);
5201 	mutex_exit(bgep->genlock);
5202 	return (result);
5203 }
5204 
5205 /*
5206  * ========== Factotum, implemented as a softint handler ==========
5207  */
5208 
5209 #undef	BGE_DBG
5210 #define	BGE_DBG		BGE_DBG_FACT	/* debug flag for this code	*/
5211 
5212 /*
5213  * Factotum routine to check for Tx stall, using the 'watchdog' counter
5214  */
5215 static boolean_t
bge_factotum_stall_check(bge_t * bgep)5216 bge_factotum_stall_check(bge_t *bgep)
5217 {
5218 	uint32_t dogval;
5219 	bge_status_t *bsp;
5220 	uint64_t now = gethrtime();
5221 
5222 	if ((now - bgep->timestamp) < BGE_CYCLIC_PERIOD)
5223 		return (B_FALSE);
5224 
5225 	bgep->timestamp = now;
5226 
5227 	ASSERT(mutex_owned(bgep->genlock));
5228 
5229 	/*
5230 	 * Specific check for Tx stall ...
5231 	 *
5232 	 * The 'watchdog' counter is incremented whenever a packet
5233 	 * is queued, reset to 1 when some (but not all) buffers
5234 	 * are reclaimed, reset to 0 (disabled) when all buffers
5235 	 * are reclaimed, and shifted left here.  If it exceeds the
5236 	 * threshold value, the chip is assumed to have stalled and
5237 	 * is put into the ERROR state.  The factotum will then reset
5238 	 * it on the next pass.
5239 	 *
5240 	 * All of which should ensure that we don't get into a state
5241 	 * where packets are left pending indefinitely!
5242 	 */
5243 	dogval = bge_atomic_shl32(&bgep->watchdog, 1);
5244 	bsp = DMA_VPTR(bgep->status_block);
5245 	if (dogval < bge_watchdog_count || bge_recycle(bgep, bsp))
5246 		return (B_FALSE);
5247 
5248 #if !defined(BGE_NETCONSOLE)
5249 	BGE_REPORT((bgep, "Tx stall detected, watchdog code 0x%x", dogval));
5250 #endif
5251 	bge_fm_ereport(bgep, DDI_FM_DEVICE_STALL);
5252 	return (B_TRUE);
5253 }
5254 
5255 /*
5256  * The factotum is woken up when there's something to do that we'd rather
5257  * not do from inside a hardware interrupt handler or high-level cyclic.
5258  * Its main task is to reset & restart the chip after an error.
5259  */
5260 uint_t
bge_chip_factotum(caddr_t arg)5261 bge_chip_factotum(caddr_t arg)
5262 {
5263 	bge_t *bgep;
5264 	uint_t result;
5265 	boolean_t error;
5266 	int dma_state;
5267 
5268 	bgep = (void *)arg;
5269 
5270 	BGE_TRACE(("bge_chip_factotum($%p)", (void *)bgep));
5271 
5272 	mutex_enter(bgep->softintrlock);
5273 	if (bgep->factotum_flag == 0) {
5274 		mutex_exit(bgep->softintrlock);
5275 		return (DDI_INTR_UNCLAIMED);
5276 	}
5277 	bgep->factotum_flag = 0;
5278 	mutex_exit(bgep->softintrlock);
5279 
5280 	result = DDI_INTR_CLAIMED;
5281 	error = B_FALSE;
5282 
5283 	mutex_enter(bgep->genlock);
5284 	switch (bgep->bge_chip_state) {
5285 	default:
5286 		break;
5287 
5288 	case BGE_CHIP_RUNNING:
5289 
5290 		if (bgep->chipid.device == DEVICE_ID_5700) {
5291 			if (bge_phys_check(bgep)) {
5292 				bgep->link_state = (bgep->param_link_up) ?
5293 				    LINK_STATE_UP : LINK_STATE_DOWN;
5294 				bge_sync_mac_modes(bgep);
5295 				mac_link_update(bgep->mh, bgep->link_state);
5296 			}
5297 		}
5298 
5299 		error = bge_factotum_stall_check(bgep);
5300 		if (dma_state != DDI_FM_OK) {
5301 			bgep->bge_dma_error = B_TRUE;
5302 			error = B_TRUE;
5303 		}
5304 		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
5305 			error = B_TRUE;
5306 		if (error)
5307 			bgep->bge_chip_state = BGE_CHIP_ERROR;
5308 		break;
5309 
5310 	case BGE_CHIP_ERROR:
5311 		error = B_TRUE;
5312 		break;
5313 
5314 	case BGE_CHIP_FAULT:
5315 		/*
5316 		 * Fault detected, time to reset ...
5317 		 */
5318 		if (bge_autorecover) {
5319 			if (!(bgep->progress & PROGRESS_BUFS)) {
5320 				/*
5321 				 * if we can't allocate the ring buffers,
5322 				 * try later
5323 				 */
5324 				if (bge_alloc_bufs(bgep) != DDI_SUCCESS) {
5325 					mutex_exit(bgep->genlock);
5326 					return (result);
5327 				}
5328 				bgep->progress |= PROGRESS_BUFS;
5329 			}
5330 			if (!(bgep->progress & PROGRESS_INTR)) {
5331 				bge_init_rings(bgep);
5332 				bge_intr_enable(bgep);
5333 				bgep->progress |= PROGRESS_INTR;
5334 			}
5335 			if (!(bgep->progress & PROGRESS_KSTATS)) {
5336 				bge_init_kstats(bgep,
5337 				    ddi_get_instance(bgep->devinfo));
5338 				bgep->progress |= PROGRESS_KSTATS;
5339 			}
5340 
5341 			BGE_REPORT((bgep, "automatic recovery activated"));
5342 
5343 			if (bge_restart(bgep, B_FALSE) != DDI_SUCCESS) {
5344 				bgep->bge_chip_state = BGE_CHIP_ERROR;
5345 				error = B_TRUE;
5346 			}
5347 			if (bge_check_acc_handle(bgep, bgep->cfg_handle) !=
5348 			    DDI_FM_OK) {
5349 				bgep->bge_chip_state = BGE_CHIP_ERROR;
5350 				error = B_TRUE;
5351 			}
5352 			if (bge_check_acc_handle(bgep, bgep->io_handle) !=
5353 			    DDI_FM_OK) {
5354 				bgep->bge_chip_state = BGE_CHIP_ERROR;
5355 				error = B_TRUE;
5356 			}
5357 			if (error == B_FALSE) {
5358 #ifdef BGE_IPMI_ASF
5359 				if (bgep->asf_enabled &&
5360 				    bgep->asf_status != ASF_STAT_RUN) {
5361 					bgep->asf_timeout_id = timeout(
5362 					    bge_asf_heartbeat, (void *)bgep,
5363 					    drv_usectohz(
5364 					    BGE_ASF_HEARTBEAT_INTERVAL));
5365 					bgep->asf_status = ASF_STAT_RUN;
5366 				}
5367 #endif
5368 				if (!bgep->manual_reset) {
5369 					ddi_fm_service_impact(bgep->devinfo,
5370 					    DDI_SERVICE_RESTORED);
5371 				}
5372 			}
5373 		}
5374 		break;
5375 	}
5376 
5377 	/*
5378 	 * If an error is detected, stop the chip now, marking it as
5379 	 * faulty, so that it will be reset next time through ...
5380 	 *
5381 	 * Note that if intr_running is set, then bge_intr() has dropped
5382 	 * genlock to call bge_receive/bge_recycle. Can't stop the chip at
5383 	 * this point so have to wait until the next time the factotum runs.
5384 	 */
5385 	if (error && !bgep->bge_intr_running) {
5386 #ifdef BGE_IPMI_ASF
5387 		if (bgep->asf_enabled && (bgep->asf_status == ASF_STAT_RUN)) {
5388 			/*
5389 			 * We must stop ASF heart beat before bge_chip_stop(),
5390 			 * otherwise some computers (ex. IBM HS20 blade server)
5391 			 * may crash.
5392 			 */
5393 			bge_asf_update_status(bgep);
5394 			bge_asf_stop_timer(bgep);
5395 			bgep->asf_status = ASF_STAT_STOP;
5396 
5397 			bge_asf_pre_reset_operations(bgep, BGE_INIT_RESET);
5398 			(void) bge_check_acc_handle(bgep, bgep->cfg_handle);
5399 		}
5400 #endif
5401 		bge_chip_stop(bgep, B_TRUE);
5402 		(void) bge_check_acc_handle(bgep, bgep->io_handle);
5403 	}
5404 	mutex_exit(bgep->genlock);
5405 
5406 	return (result);
5407 }
5408 
5409 /*
5410  * High-level cyclic handler
5411  *
5412  * This routine schedules a (low-level) softint callback to the
5413  * factotum, and prods the chip to update the status block (which
5414  * will cause a hardware interrupt when complete).
5415  */
5416 void
bge_chip_cyclic(void * arg)5417 bge_chip_cyclic(void *arg)
5418 {
5419 	bge_t *bgep;
5420 	uint32_t regval;
5421 
5422 	bgep = arg;
5423 
5424 	switch (bgep->bge_chip_state) {
5425 	default:
5426 		return;
5427 
5428 	case BGE_CHIP_RUNNING:
5429 
5430 		/* XXX I really don't like this forced interrupt... */
5431 		bge_reg_set32(bgep, HOST_COALESCE_MODE_REG, COALESCE_NOW);
5432 		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
5433 			ddi_fm_service_impact(bgep->devinfo,
5434 			    DDI_SERVICE_UNAFFECTED);
5435 
5436 		break;
5437 
5438 	case BGE_CHIP_FAULT:
5439 	case BGE_CHIP_ERROR:
5440 
5441 		break;
5442 	}
5443 
5444 	mutex_enter(bgep->genlock);
5445 
5446 	if (bgep->eee_lpi_wait && !--bgep->eee_lpi_wait) {
5447 		BGE_DEBUG(("eee cyclic, lpi enabled"));
5448 		bge_eee_enable(bgep);
5449 	}
5450 
5451 	if (bgep->rdma_length_bug_on_5719) {
5452 		if ((bge_reg_get32(bgep, STAT_IFHCOUT_UPKGS_REG) +
5453 		     bge_reg_get32(bgep, STAT_IFHCOUT_MPKGS_REG) +
5454 		     bge_reg_get32(bgep, STAT_IFHCOUT_BPKGS_REG)) >
5455 		    BGE_NUM_RDMA_CHANNELS) {
5456 			regval = bge_reg_get32(bgep, RDMA_CORR_CTRL_REG);
5457 			regval &= ~RDMA_CORR_CTRL_TX_LENGTH_WA;
5458 			bge_reg_put32(bgep, RDMA_CORR_CTRL_REG, regval);
5459 			bgep->rdma_length_bug_on_5719 = B_FALSE;
5460 		}
5461 	}
5462 
5463 	mutex_exit(bgep->genlock);
5464 
5465 	bge_wake_factotum(bgep);
5466 
5467 }
5468 
5469 
5470 /*
5471  * ========== Ioctl subfunctions ==========
5472  */
5473 
5474 #undef	BGE_DBG
5475 #define	BGE_DBG		BGE_DBG_PPIO	/* debug flag for this code	*/
5476 
5477 #if	BGE_DEBUGGING || BGE_DO_PPIO
5478 
5479 static void
bge_chip_peek_cfg(bge_t * bgep,bge_peekpoke_t * ppd)5480 bge_chip_peek_cfg(bge_t *bgep, bge_peekpoke_t *ppd)
5481 {
5482 	uint64_t regval;
5483 	uint64_t regno;
5484 
5485 	BGE_TRACE(("bge_chip_peek_cfg($%p, $%p)",
5486 	    (void *)bgep, (void *)ppd));
5487 
5488 	regno = ppd->pp_acc_offset;
5489 
5490 	switch (ppd->pp_acc_size) {
5491 	case 1:
5492 		regval = pci_config_get8(bgep->cfg_handle, regno);
5493 		break;
5494 
5495 	case 2:
5496 		regval = pci_config_get16(bgep->cfg_handle, regno);
5497 		break;
5498 
5499 	case 4:
5500 		regval = pci_config_get32(bgep->cfg_handle, regno);
5501 		break;
5502 
5503 	case 8:
5504 		regval = pci_config_get64(bgep->cfg_handle, regno);
5505 		break;
5506 	}
5507 
5508 	ppd->pp_acc_data = regval;
5509 }
5510 
5511 static void
bge_chip_poke_cfg(bge_t * bgep,bge_peekpoke_t * ppd)5512 bge_chip_poke_cfg(bge_t *bgep, bge_peekpoke_t *ppd)
5513 {
5514 	uint64_t regval;
5515 	uint64_t regno;
5516 
5517 	BGE_TRACE(("bge_chip_poke_cfg($%p, $%p)",
5518 	    (void *)bgep, (void *)ppd));
5519 
5520 	regno = ppd->pp_acc_offset;
5521 	regval = ppd->pp_acc_data;
5522 
5523 	switch (ppd->pp_acc_size) {
5524 	case 1:
5525 		pci_config_put8(bgep->cfg_handle, regno, regval);
5526 		break;
5527 
5528 	case 2:
5529 		pci_config_put16(bgep->cfg_handle, regno, regval);
5530 		break;
5531 
5532 	case 4:
5533 		pci_config_put32(bgep->cfg_handle, regno, regval);
5534 		break;
5535 
5536 	case 8:
5537 		pci_config_put64(bgep->cfg_handle, regno, regval);
5538 		break;
5539 	}
5540 }
5541 
5542 static void
bge_chip_peek_reg(bge_t * bgep,bge_peekpoke_t * ppd)5543 bge_chip_peek_reg(bge_t *bgep, bge_peekpoke_t *ppd)
5544 {
5545 	uint64_t regval;
5546 	void *regaddr;
5547 
5548 	BGE_TRACE(("bge_chip_peek_reg($%p, $%p)",
5549 	    (void *)bgep, (void *)ppd));
5550 
5551 	regaddr = PIO_ADDR(bgep, ppd->pp_acc_offset);
5552 
5553 	switch (ppd->pp_acc_size) {
5554 	case 1:
5555 		regval = ddi_get8(bgep->io_handle, regaddr);
5556 		break;
5557 
5558 	case 2:
5559 		regval = ddi_get16(bgep->io_handle, regaddr);
5560 		break;
5561 
5562 	case 4:
5563 		regval = ddi_get32(bgep->io_handle, regaddr);
5564 		break;
5565 
5566 	case 8:
5567 		regval = ddi_get64(bgep->io_handle, regaddr);
5568 		break;
5569 	}
5570 
5571 	ppd->pp_acc_data = regval;
5572 }
5573 
5574 static void
bge_chip_poke_reg(bge_t * bgep,bge_peekpoke_t * ppd)5575 bge_chip_poke_reg(bge_t *bgep, bge_peekpoke_t *ppd)
5576 {
5577 	uint64_t regval;
5578 	void *regaddr;
5579 
5580 	BGE_TRACE(("bge_chip_poke_reg($%p, $%p)",
5581 	    (void *)bgep, (void *)ppd));
5582 
5583 	regaddr = PIO_ADDR(bgep, ppd->pp_acc_offset);
5584 	regval = ppd->pp_acc_data;
5585 
5586 	switch (ppd->pp_acc_size) {
5587 	case 1:
5588 		ddi_put8(bgep->io_handle, regaddr, regval);
5589 		break;
5590 
5591 	case 2:
5592 		ddi_put16(bgep->io_handle, regaddr, regval);
5593 		break;
5594 
5595 	case 4:
5596 		ddi_put32(bgep->io_handle, regaddr, regval);
5597 		break;
5598 
5599 	case 8:
5600 		ddi_put64(bgep->io_handle, regaddr, regval);
5601 		break;
5602 	}
5603 	BGE_PCICHK(bgep);
5604 }
5605 
5606 static void
bge_chip_peek_nic(bge_t * bgep,bge_peekpoke_t * ppd)5607 bge_chip_peek_nic(bge_t *bgep, bge_peekpoke_t *ppd)
5608 {
5609 	uint64_t regoff;
5610 	uint64_t regval;
5611 	void *regaddr;
5612 
5613 	BGE_TRACE(("bge_chip_peek_nic($%p, $%p)",
5614 	    (void *)bgep, (void *)ppd));
5615 
5616 	regoff = ppd->pp_acc_offset;
5617 	bge_nic_setwin(bgep, regoff & ~MWBAR_GRANULE_MASK);
5618 	regoff &= MWBAR_GRANULE_MASK;
5619 	regoff += NIC_MEM_WINDOW_OFFSET;
5620 	regaddr = PIO_ADDR(bgep, regoff);
5621 
5622 	switch (ppd->pp_acc_size) {
5623 	case 1:
5624 		regval = ddi_get8(bgep->io_handle, regaddr);
5625 		break;
5626 
5627 	case 2:
5628 		regval = ddi_get16(bgep->io_handle, regaddr);
5629 		break;
5630 
5631 	case 4:
5632 		regval = ddi_get32(bgep->io_handle, regaddr);
5633 		break;
5634 
5635 	case 8:
5636 		regval = ddi_get64(bgep->io_handle, regaddr);
5637 		break;
5638 	}
5639 
5640 	ppd->pp_acc_data = regval;
5641 }
5642 
5643 static void
bge_chip_poke_nic(bge_t * bgep,bge_peekpoke_t * ppd)5644 bge_chip_poke_nic(bge_t *bgep, bge_peekpoke_t *ppd)
5645 {
5646 	uint64_t regoff;
5647 	uint64_t regval;
5648 	void *regaddr;
5649 
5650 	BGE_TRACE(("bge_chip_poke_nic($%p, $%p)",
5651 	    (void *)bgep, (void *)ppd));
5652 
5653 	regoff = ppd->pp_acc_offset;
5654 	bge_nic_setwin(bgep, regoff & ~MWBAR_GRANULE_MASK);
5655 	regoff &= MWBAR_GRANULE_MASK;
5656 	regoff += NIC_MEM_WINDOW_OFFSET;
5657 	regaddr = PIO_ADDR(bgep, regoff);
5658 	regval = ppd->pp_acc_data;
5659 
5660 	switch (ppd->pp_acc_size) {
5661 	case 1:
5662 		ddi_put8(bgep->io_handle, regaddr, regval);
5663 		break;
5664 
5665 	case 2:
5666 		ddi_put16(bgep->io_handle, regaddr, regval);
5667 		break;
5668 
5669 	case 4:
5670 		ddi_put32(bgep->io_handle, regaddr, regval);
5671 		break;
5672 
5673 	case 8:
5674 		ddi_put64(bgep->io_handle, regaddr, regval);
5675 		break;
5676 	}
5677 	BGE_PCICHK(bgep);
5678 }
5679 
5680 static void
bge_chip_peek_mii(bge_t * bgep,bge_peekpoke_t * ppd)5681 bge_chip_peek_mii(bge_t *bgep, bge_peekpoke_t *ppd)
5682 {
5683 	BGE_TRACE(("bge_chip_peek_mii($%p, $%p)",
5684 	    (void *)bgep, (void *)ppd));
5685 
5686 	ppd->pp_acc_data = bge_mii_get16(bgep, ppd->pp_acc_offset/2);
5687 }
5688 
5689 static void
bge_chip_poke_mii(bge_t * bgep,bge_peekpoke_t * ppd)5690 bge_chip_poke_mii(bge_t *bgep, bge_peekpoke_t *ppd)
5691 {
5692 	BGE_TRACE(("bge_chip_poke_mii($%p, $%p)",
5693 	    (void *)bgep, (void *)ppd));
5694 
5695 	bge_mii_put16(bgep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
5696 }
5697 
5698 #if	BGE_SEE_IO32
5699 
5700 static void
bge_chip_peek_seeprom(bge_t * bgep,bge_peekpoke_t * ppd)5701 bge_chip_peek_seeprom(bge_t *bgep, bge_peekpoke_t *ppd)
5702 {
5703 	uint32_t data;
5704 	int err;
5705 
5706 	BGE_TRACE(("bge_chip_peek_seeprom($%p, $%p)",
5707 	    (void *)bgep, (void *)ppd));
5708 
5709 	err = bge_nvmem_rw32(bgep, BGE_SEE_READ, ppd->pp_acc_offset, &data);
5710 	ppd->pp_acc_data = err ? ~0ull : data;
5711 }
5712 
5713 static void
bge_chip_poke_seeprom(bge_t * bgep,bge_peekpoke_t * ppd)5714 bge_chip_poke_seeprom(bge_t *bgep, bge_peekpoke_t *ppd)
5715 {
5716 	uint32_t data;
5717 
5718 	BGE_TRACE(("bge_chip_poke_seeprom($%p, $%p)",
5719 	    (void *)bgep, (void *)ppd));
5720 
5721 	data = ppd->pp_acc_data;
5722 	(void) bge_nvmem_rw32(bgep, BGE_SEE_WRITE, ppd->pp_acc_offset, &data);
5723 }
5724 #endif	/* BGE_SEE_IO32 */
5725 
5726 #if	BGE_FLASH_IO32
5727 
5728 static void
bge_chip_peek_flash(bge_t * bgep,bge_peekpoke_t * ppd)5729 bge_chip_peek_flash(bge_t *bgep, bge_peekpoke_t *ppd)
5730 {
5731 	uint32_t data;
5732 	int err;
5733 
5734 	BGE_TRACE(("bge_chip_peek_flash($%p, $%p)",
5735 	    (void *)bgep, (void *)ppd));
5736 
5737 	err = bge_nvmem_rw32(bgep, BGE_FLASH_READ, ppd->pp_acc_offset, &data);
5738 	ppd->pp_acc_data = err ? ~0ull : data;
5739 }
5740 
5741 static void
bge_chip_poke_flash(bge_t * bgep,bge_peekpoke_t * ppd)5742 bge_chip_poke_flash(bge_t *bgep, bge_peekpoke_t *ppd)
5743 {
5744 	uint32_t data;
5745 
5746 	BGE_TRACE(("bge_chip_poke_flash($%p, $%p)",
5747 	    (void *)bgep, (void *)ppd));
5748 
5749 	data = ppd->pp_acc_data;
5750 	(void) bge_nvmem_rw32(bgep, BGE_FLASH_WRITE,
5751 	    ppd->pp_acc_offset, &data);
5752 }
5753 #endif	/* BGE_FLASH_IO32 */
5754 
5755 static void
bge_chip_peek_mem(bge_t * bgep,bge_peekpoke_t * ppd)5756 bge_chip_peek_mem(bge_t *bgep, bge_peekpoke_t *ppd)
5757 {
5758 	uint64_t regval;
5759 	void *vaddr;
5760 
5761 	BGE_TRACE(("bge_chip_peek_bge($%p, $%p)",
5762 	    (void *)bgep, (void *)ppd));
5763 
5764 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5765 
5766 	switch (ppd->pp_acc_size) {
5767 	case 1:
5768 		regval = *(uint8_t *)vaddr;
5769 		break;
5770 
5771 	case 2:
5772 		regval = *(uint16_t *)vaddr;
5773 		break;
5774 
5775 	case 4:
5776 		regval = *(uint32_t *)vaddr;
5777 		break;
5778 
5779 	case 8:
5780 		regval = *(uint64_t *)vaddr;
5781 		break;
5782 	}
5783 
5784 	BGE_DEBUG(("bge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
5785 	    (void *)bgep, (void *)ppd, regval, vaddr));
5786 
5787 	ppd->pp_acc_data = regval;
5788 }
5789 
5790 static void
bge_chip_poke_mem(bge_t * bgep,bge_peekpoke_t * ppd)5791 bge_chip_poke_mem(bge_t *bgep, bge_peekpoke_t *ppd)
5792 {
5793 	uint64_t regval;
5794 	void *vaddr;
5795 
5796 	BGE_TRACE(("bge_chip_poke_mem($%p, $%p)",
5797 	    (void *)bgep, (void *)ppd));
5798 
5799 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5800 	regval = ppd->pp_acc_data;
5801 
5802 	BGE_DEBUG(("bge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
5803 	    (void *)bgep, (void *)ppd, regval, vaddr));
5804 
5805 	switch (ppd->pp_acc_size) {
5806 	case 1:
5807 		*(uint8_t *)vaddr = (uint8_t)regval;
5808 		break;
5809 
5810 	case 2:
5811 		*(uint16_t *)vaddr = (uint16_t)regval;
5812 		break;
5813 
5814 	case 4:
5815 		*(uint32_t *)vaddr = (uint32_t)regval;
5816 		break;
5817 
5818 	case 8:
5819 		*(uint64_t *)vaddr = (uint64_t)regval;
5820 		break;
5821 	}
5822 }
5823 
5824 static enum ioc_reply
bge_pp_ioctl(bge_t * bgep,int cmd,mblk_t * mp,struct iocblk * iocp)5825 bge_pp_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
5826 {
5827 	void (*ppfn)(bge_t *bgep, bge_peekpoke_t *ppd);
5828 	bge_peekpoke_t *ppd;
5829 	dma_area_t *areap;
5830 	uint64_t sizemask;
5831 	uint64_t mem_va;
5832 	uint64_t maxoff;
5833 	boolean_t peek;
5834 
5835 	switch (cmd) {
5836 	default:
5837 		/* NOTREACHED */
5838 		bge_error(bgep, "bge_pp_ioctl: invalid cmd 0x%x", cmd);
5839 		return (IOC_INVAL);
5840 
5841 	case BGE_PEEK:
5842 		peek = B_TRUE;
5843 		break;
5844 
5845 	case BGE_POKE:
5846 		peek = B_FALSE;
5847 		break;
5848 	}
5849 
5850 	/*
5851 	 * Validate format of ioctl
5852 	 */
5853 	if (iocp->ioc_count != sizeof (bge_peekpoke_t))
5854 		return (IOC_INVAL);
5855 	if (mp->b_cont == NULL)
5856 		return (IOC_INVAL);
5857 	ppd = (void *)mp->b_cont->b_rptr;
5858 
5859 	/*
5860 	 * Validate request parameters
5861 	 */
5862 	switch (ppd->pp_acc_space) {
5863 	default:
5864 		return (IOC_INVAL);
5865 
5866 	case BGE_PP_SPACE_CFG:
5867 		/*
5868 		 * Config space
5869 		 */
5870 		sizemask = 8|4|2|1;
5871 		mem_va = 0;
5872 		maxoff = PCI_CONF_HDR_SIZE;
5873 		ppfn = peek ? bge_chip_peek_cfg : bge_chip_poke_cfg;
5874 		break;
5875 
5876 	case BGE_PP_SPACE_REG:
5877 		/*
5878 		 * Memory-mapped I/O space
5879 		 */
5880 		sizemask = 8|4|2|1;
5881 		mem_va = 0;
5882 		maxoff = RIAAR_REGISTER_MAX;
5883 		ppfn = peek ? bge_chip_peek_reg : bge_chip_poke_reg;
5884 		break;
5885 
5886 	case BGE_PP_SPACE_NIC:
5887 		/*
5888 		 * NIC on-chip memory
5889 		 */
5890 		sizemask = 8|4|2|1;
5891 		mem_va = 0;
5892 		maxoff = MWBAR_ONCHIP_MAX;
5893 		ppfn = peek ? bge_chip_peek_nic : bge_chip_poke_nic;
5894 		break;
5895 
5896 	case BGE_PP_SPACE_MII:
5897 		/*
5898 		 * PHY's MII registers
5899 		 * NB: all PHY registers are two bytes, but the
5900 		 * addresses increment in ones (word addressing).
5901 		 * So we scale the address here, then undo the
5902 		 * transformation inside the peek/poke functions.
5903 		 */
5904 		ppd->pp_acc_offset *= 2;
5905 		sizemask = 2;
5906 		mem_va = 0;
5907 		maxoff = (MII_MAXREG+1)*2;
5908 		ppfn = peek ? bge_chip_peek_mii : bge_chip_poke_mii;
5909 		break;
5910 
5911 #if	BGE_SEE_IO32
5912 	case BGE_PP_SPACE_SEEPROM:
5913 		/*
5914 		 * Attached SEEPROM(s), if any.
5915 		 * NB: we use the high-order bits of the 'address' as
5916 		 * a device select to accommodate multiple SEEPROMS,
5917 		 * If each one is the maximum size (64kbytes), this
5918 		 * makes them appear contiguous.  Otherwise, there may
5919 		 * be holes in the mapping.  ENxS doesn't have any
5920 		 * SEEPROMs anyway ...
5921 		 */
5922 		sizemask = 4;
5923 		mem_va = 0;
5924 		maxoff = SEEPROM_DEV_AND_ADDR_MASK;
5925 		ppfn = peek ? bge_chip_peek_seeprom : bge_chip_poke_seeprom;
5926 		break;
5927 #endif	/* BGE_SEE_IO32 */
5928 
5929 #if	BGE_FLASH_IO32
5930 	case BGE_PP_SPACE_FLASH:
5931 		/*
5932 		 * Attached Flash device (if any); a maximum of one device
5933 		 * is currently supported.  But it can be up to 1MB (unlike
5934 		 * the 64k limit on SEEPROMs) so why would you need more ;-)
5935 		 */
5936 		sizemask = 4;
5937 		mem_va = 0;
5938 		maxoff = NVM_FLASH_ADDR_MASK;
5939 		ppfn = peek ? bge_chip_peek_flash : bge_chip_poke_flash;
5940 		break;
5941 #endif	/* BGE_FLASH_IO32 */
5942 
5943 	case BGE_PP_SPACE_BGE:
5944 		/*
5945 		 * BGE data structure!
5946 		 */
5947 		sizemask = 8|4|2|1;
5948 		mem_va = (uintptr_t)bgep;
5949 		maxoff = sizeof (*bgep);
5950 		ppfn = peek ? bge_chip_peek_mem : bge_chip_poke_mem;
5951 		break;
5952 
5953 	case BGE_PP_SPACE_STATUS:
5954 	case BGE_PP_SPACE_STATISTICS:
5955 	case BGE_PP_SPACE_TXDESC:
5956 	case BGE_PP_SPACE_TXBUFF:
5957 	case BGE_PP_SPACE_RXDESC:
5958 	case BGE_PP_SPACE_RXBUFF:
5959 		/*
5960 		 * Various DMA_AREAs
5961 		 */
5962 		switch (ppd->pp_acc_space) {
5963 		case BGE_PP_SPACE_TXDESC:
5964 			areap = &bgep->tx_desc;
5965 			break;
5966 		case BGE_PP_SPACE_TXBUFF:
5967 			areap = &bgep->tx_buff[0];
5968 			break;
5969 		case BGE_PP_SPACE_RXDESC:
5970 			areap = &bgep->rx_desc[0];
5971 			break;
5972 		case BGE_PP_SPACE_RXBUFF:
5973 			areap = &bgep->rx_buff[0];
5974 			break;
5975 		case BGE_PP_SPACE_STATUS:
5976 			areap = &bgep->status_block;
5977 			break;
5978 		case BGE_PP_SPACE_STATISTICS:
5979 			if (bgep->chipid.statistic_type == BGE_STAT_BLK)
5980 				areap = &bgep->statistics;
5981 			break;
5982 		}
5983 
5984 		sizemask = 8|4|2|1;
5985 		mem_va = (uintptr_t)areap->mem_va;
5986 		maxoff = areap->alength;
5987 		ppfn = peek ? bge_chip_peek_mem : bge_chip_poke_mem;
5988 		break;
5989 	}
5990 
5991 	switch (ppd->pp_acc_size) {
5992 	default:
5993 		return (IOC_INVAL);
5994 
5995 	case 8:
5996 	case 4:
5997 	case 2:
5998 	case 1:
5999 		if ((ppd->pp_acc_size & sizemask) == 0)
6000 			return (IOC_INVAL);
6001 		break;
6002 	}
6003 
6004 	if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
6005 		return (IOC_INVAL);
6006 
6007 	if (ppd->pp_acc_offset >= maxoff)
6008 		return (IOC_INVAL);
6009 
6010 	if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
6011 		return (IOC_INVAL);
6012 
6013 	/*
6014 	 * All OK - go do it!
6015 	 */
6016 	ppd->pp_acc_offset += mem_va;
6017 	(*ppfn)(bgep, ppd);
6018 	return (peek ? IOC_REPLY : IOC_ACK);
6019 }
6020 
6021 static enum ioc_reply
bge_diag_ioctl(bge_t * bgep,int cmd,mblk_t * mp,struct iocblk * iocp)6022 bge_diag_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
6023 {
6024 	ASSERT(mutex_owned(bgep->genlock));
6025 
6026 	switch (cmd) {
6027 	default:
6028 		/* NOTREACHED */
6029 		bge_error(bgep, "bge_diag_ioctl: invalid cmd 0x%x", cmd);
6030 		return (IOC_INVAL);
6031 
6032 	case BGE_DIAG:
6033 		/*
6034 		 * Currently a no-op
6035 		 */
6036 		return (IOC_ACK);
6037 
6038 	case BGE_PEEK:
6039 	case BGE_POKE:
6040 		return (bge_pp_ioctl(bgep, cmd, mp, iocp));
6041 
6042 	case BGE_PHY_RESET:
6043 		return (IOC_RESTART_ACK);
6044 
6045 	case BGE_SOFT_RESET:
6046 	case BGE_HARD_RESET:
6047 		/*
6048 		 * Reset and reinitialise the 570x hardware
6049 		 */
6050 		bgep->bge_chip_state = BGE_CHIP_FAULT;
6051 		ddi_trigger_softintr(bgep->factotum_id);
6052 		(void) bge_restart(bgep, cmd == BGE_HARD_RESET);
6053 		return (IOC_ACK);
6054 	}
6055 
6056 	/* NOTREACHED */
6057 }
6058 
6059 #endif	/* BGE_DEBUGGING || BGE_DO_PPIO */
6060 
6061 static enum ioc_reply
bge_mii_ioctl(bge_t * bgep,int cmd,mblk_t * mp,struct iocblk * iocp)6062 bge_mii_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
6063 {
6064 	struct bge_mii_rw *miirwp;
6065 
6066 	/*
6067 	 * Validate format of ioctl
6068 	 */
6069 	if (iocp->ioc_count != sizeof (struct bge_mii_rw))
6070 		return (IOC_INVAL);
6071 	if (mp->b_cont == NULL)
6072 		return (IOC_INVAL);
6073 	miirwp = (void *)mp->b_cont->b_rptr;
6074 
6075 	/*
6076 	 * Validate request parameters ...
6077 	 */
6078 	if (miirwp->mii_reg > MII_MAXREG)
6079 		return (IOC_INVAL);
6080 
6081 	switch (cmd) {
6082 	default:
6083 		/* NOTREACHED */
6084 		bge_error(bgep, "bge_mii_ioctl: invalid cmd 0x%x", cmd);
6085 		return (IOC_INVAL);
6086 
6087 	case BGE_MII_READ:
6088 		miirwp->mii_data = bge_mii_get16(bgep, miirwp->mii_reg);
6089 		return (IOC_REPLY);
6090 
6091 	case BGE_MII_WRITE:
6092 		bge_mii_put16(bgep, miirwp->mii_reg, miirwp->mii_data);
6093 		return (IOC_ACK);
6094 	}
6095 
6096 	/* NOTREACHED */
6097 }
6098 
6099 #if	BGE_SEE_IO32
6100 
6101 static enum ioc_reply
bge_see_ioctl(bge_t * bgep,int cmd,mblk_t * mp,struct iocblk * iocp)6102 bge_see_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
6103 {
6104 	struct bge_see_rw *seerwp;
6105 
6106 	/*
6107 	 * Validate format of ioctl
6108 	 */
6109 	if (iocp->ioc_count != sizeof (struct bge_see_rw))
6110 		return (IOC_INVAL);
6111 	if (mp->b_cont == NULL)
6112 		return (IOC_INVAL);
6113 	seerwp = (void *)mp->b_cont->b_rptr;
6114 
6115 	/*
6116 	 * Validate request parameters ...
6117 	 */
6118 	if (seerwp->see_addr & ~SEEPROM_DEV_AND_ADDR_MASK)
6119 		return (IOC_INVAL);
6120 
6121 	switch (cmd) {
6122 	default:
6123 		/* NOTREACHED */
6124 		bge_error(bgep, "bge_see_ioctl: invalid cmd 0x%x", cmd);
6125 		return (IOC_INVAL);
6126 
6127 	case BGE_SEE_READ:
6128 	case BGE_SEE_WRITE:
6129 		iocp->ioc_error = bge_nvmem_rw32(bgep, cmd,
6130 		    seerwp->see_addr, &seerwp->see_data);
6131 		return (IOC_REPLY);
6132 	}
6133 
6134 	/* NOTREACHED */
6135 }
6136 
6137 #endif	/* BGE_SEE_IO32 */
6138 
6139 #if	BGE_FLASH_IO32
6140 
6141 static enum ioc_reply
bge_flash_ioctl(bge_t * bgep,int cmd,mblk_t * mp,struct iocblk * iocp)6142 bge_flash_ioctl(bge_t *bgep, int cmd, mblk_t *mp, struct iocblk *iocp)
6143 {
6144 	struct bge_flash_rw *flashrwp;
6145 
6146 	/*
6147 	 * Validate format of ioctl
6148 	 */
6149 	if (iocp->ioc_count != sizeof (struct bge_flash_rw))
6150 		return (IOC_INVAL);
6151 	if (mp->b_cont == NULL)
6152 		return (IOC_INVAL);
6153 	flashrwp = (void *)mp->b_cont->b_rptr;
6154 
6155 	/*
6156 	 * Validate request parameters ...
6157 	 */
6158 	if (flashrwp->flash_addr & ~NVM_FLASH_ADDR_MASK)
6159 		return (IOC_INVAL);
6160 
6161 	switch (cmd) {
6162 	default:
6163 		/* NOTREACHED */
6164 		bge_error(bgep, "bge_flash_ioctl: invalid cmd 0x%x", cmd);
6165 		return (IOC_INVAL);
6166 
6167 	case BGE_FLASH_READ:
6168 	case BGE_FLASH_WRITE:
6169 		iocp->ioc_error = bge_nvmem_rw32(bgep, cmd,
6170 		    flashrwp->flash_addr, &flashrwp->flash_data);
6171 		return (IOC_REPLY);
6172 	}
6173 
6174 	/* NOTREACHED */
6175 }
6176 
6177 #endif	/* BGE_FLASH_IO32 */
6178 
6179 enum ioc_reply
bge_chip_ioctl(bge_t * bgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)6180 bge_chip_ioctl(bge_t *bgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
6181 {
6182 	int cmd;
6183 
6184 	BGE_TRACE(("bge_chip_ioctl($%p, $%p, $%p, $%p)",
6185 	    (void *)bgep, (void *)wq, (void *)mp, (void *)iocp));
6186 
6187 	ASSERT(mutex_owned(bgep->genlock));
6188 
6189 	cmd = iocp->ioc_cmd;
6190 	switch (cmd) {
6191 	default:
6192 		/* NOTREACHED */
6193 		bge_error(bgep, "bge_chip_ioctl: invalid cmd 0x%x", cmd);
6194 		return (IOC_INVAL);
6195 
6196 	case BGE_DIAG:
6197 	case BGE_PEEK:
6198 	case BGE_POKE:
6199 	case BGE_PHY_RESET:
6200 	case BGE_SOFT_RESET:
6201 	case BGE_HARD_RESET:
6202 #if	BGE_DEBUGGING || BGE_DO_PPIO
6203 		return (bge_diag_ioctl(bgep, cmd, mp, iocp));
6204 #else
6205 		return (IOC_INVAL);
6206 #endif	/* BGE_DEBUGGING || BGE_DO_PPIO */
6207 
6208 	case BGE_MII_READ:
6209 	case BGE_MII_WRITE:
6210 		return (bge_mii_ioctl(bgep, cmd, mp, iocp));
6211 
6212 #if	BGE_SEE_IO32
6213 	case BGE_SEE_READ:
6214 	case BGE_SEE_WRITE:
6215 		return (bge_see_ioctl(bgep, cmd, mp, iocp));
6216 #endif	/* BGE_SEE_IO32 */
6217 
6218 #if	BGE_FLASH_IO32
6219 	case BGE_FLASH_READ:
6220 	case BGE_FLASH_WRITE:
6221 		return (bge_flash_ioctl(bgep, cmd, mp, iocp));
6222 #endif	/* BGE_FLASH_IO32 */
6223 	}
6224 
6225 	/* NOTREACHED */
6226 }
6227 
6228 /* ARGSUSED */
6229 void
bge_chip_blank(void * arg,time_t ticks,uint_t count,int flag)6230 bge_chip_blank(void *arg, time_t ticks, uint_t count, int flag)
6231 {
6232 	recv_ring_t *rrp = arg;
6233 	bge_t *bgep = rrp->bgep;
6234 
6235 	mutex_enter(bgep->genlock);
6236 	rrp->poll_flag = flag;
6237 #ifdef NOT_YET
6238 	/*
6239 	 * XXX-Sunay: Since most broadcom cards support only one
6240 	 * interrupt but multiple rx rings, we can't disable the
6241 	 * physical interrupt. This need to be done via capability
6242 	 * negotiation depending on the NIC.
6243 	 */
6244 	bge_reg_put32(bgep, RCV_COALESCE_TICKS_REG, ticks);
6245 	bge_reg_put32(bgep, RCV_COALESCE_MAX_BD_REG, count);
6246 #endif
6247 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
6248 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
6249 	mutex_exit(bgep->genlock);
6250 }
6251 
6252 #ifdef BGE_IPMI_ASF
6253 
6254 uint32_t
bge_nic_read32(bge_t * bgep,bge_regno_t addr)6255 bge_nic_read32(bge_t *bgep, bge_regno_t addr)
6256 {
6257 	uint32_t data;
6258 
6259 #ifndef __sparc
6260 	if (!bgep->asf_wordswapped) {
6261 		/* a workaround word swap error */
6262 		if (addr & 4)
6263 			addr = addr - 4;
6264 		else
6265 			addr = addr + 4;
6266 	}
6267 #else
6268 	if (DEVICE_5717_SERIES_CHIPSETS(bgep) ||
6269 	    DEVICE_5725_SERIES_CHIPSETS(bgep) ||
6270 	    DEVICE_57765_SERIES_CHIPSETS(bgep)) {
6271 		addr = LE_32(addr);
6272 	}
6273 #endif
6274 
6275 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, addr);
6276 	data = pci_config_get32(bgep->cfg_handle, PCI_CONF_BGE_MWDAR);
6277 	pci_config_put32(bgep->cfg_handle, PCI_CONF_BGE_MWBAR, 0);
6278 
6279 	data = LE_32(data);
6280 
6281 	BGE_DEBUG(("bge_nic_read32($%p, 0x%x) => 0x%x",
6282 	    (void *)bgep, addr, data));
6283 
6284 	return (data);
6285 }
6286 
6287 void
bge_asf_update_status(bge_t * bgep)6288 bge_asf_update_status(bge_t *bgep)
6289 {
6290 	uint32_t event;
6291 
6292 	bge_nic_put32(bgep, BGE_CMD_MAILBOX, BGE_CMD_NICDRV_ALIVE);
6293 	bge_nic_put32(bgep, BGE_CMD_LENGTH_MAILBOX, 4);
6294 	bge_nic_put32(bgep, BGE_CMD_DATA_MAILBOX,   3);
6295 
6296 	event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
6297 	bge_reg_put32(bgep, RX_RISC_EVENT_REG, event | RRER_ASF_EVENT);
6298 }
6299 
6300 
6301 /*
6302  * The driver is supposed to notify ASF that the OS is still running
6303  * every three seconds, otherwise the management server may attempt
6304  * to reboot the machine.  If it hasn't actually failed, this is
6305  * not a desirable result.  However, this isn't running as a real-time
6306  * thread, and even if it were, it might not be able to generate the
6307  * heartbeat in a timely manner due to system load.  As it isn't a
6308  * significant strain on the machine, we will set the interval to half
6309  * of the required value.
6310  */
6311 void
bge_asf_heartbeat(void * arg)6312 bge_asf_heartbeat(void *arg)
6313 {
6314 	bge_t *bgep = (bge_t *)arg;
6315 
6316 	mutex_enter(bgep->genlock);
6317 	bge_asf_update_status((bge_t *)bgep);
6318 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
6319 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
6320 	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK)
6321 		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
6322 	mutex_exit(bgep->genlock);
6323 	((bge_t *)bgep)->asf_timeout_id = timeout(bge_asf_heartbeat, bgep,
6324 	    drv_usectohz(BGE_ASF_HEARTBEAT_INTERVAL));
6325 }
6326 
6327 
6328 void
bge_asf_stop_timer(bge_t * bgep)6329 bge_asf_stop_timer(bge_t *bgep)
6330 {
6331 	timeout_id_t tmp_id = 0;
6332 
6333 	while ((bgep->asf_timeout_id != 0) &&
6334 	    (tmp_id != bgep->asf_timeout_id)) {
6335 		tmp_id = bgep->asf_timeout_id;
6336 		(void) untimeout(tmp_id);
6337 	}
6338 	bgep->asf_timeout_id = 0;
6339 }
6340 
6341 
6342 
6343 /*
6344  * This function should be placed at the earliest position of bge_attach().
6345  */
6346 void
bge_asf_get_config(bge_t * bgep)6347 bge_asf_get_config(bge_t *bgep)
6348 {
6349 	uint32_t nicsig;
6350 	uint32_t niccfg;
6351 
6352 	bgep->asf_enabled = B_FALSE;
6353 
6354 	/* No ASF if APE present. */
6355 	if (bgep->ape_enabled)
6356 		return;
6357 
6358 	nicsig = bge_nic_read32(bgep, BGE_NIC_DATA_SIG_ADDR);
6359 	if (nicsig == BGE_NIC_DATA_SIG) {
6360 		niccfg = bge_nic_read32(bgep, BGE_NIC_DATA_NIC_CFG_ADDR);
6361 		if (niccfg & BGE_NIC_CFG_ENABLE_ASF)
6362 			/*
6363 			 * Here, we don't consider BAXTER, because BGE haven't
6364 			 * supported BAXTER (that is 5752). Also, as I know,
6365 			 * BAXTER doesn't support ASF feature.
6366 			 */
6367 			bgep->asf_enabled = B_TRUE;
6368 		else
6369 			bgep->asf_enabled = B_FALSE;
6370 	} else
6371 		bgep->asf_enabled = B_FALSE;
6372 }
6373 
6374 
6375 void
bge_asf_pre_reset_operations(bge_t * bgep,uint32_t mode)6376 bge_asf_pre_reset_operations(bge_t *bgep, uint32_t mode)
6377 {
6378 	uint32_t tries;
6379 	uint32_t event;
6380 
6381 	ASSERT(bgep->asf_enabled);
6382 
6383 	/* Issues "pause firmware" command and wait for ACK */
6384 	bge_nic_put32(bgep, BGE_CMD_MAILBOX, BGE_CMD_NICDRV_PAUSE_FW);
6385 	event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
6386 	bge_reg_put32(bgep, RX_RISC_EVENT_REG, event | RRER_ASF_EVENT);
6387 
6388 	event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
6389 	tries = 0;
6390 	while ((event & RRER_ASF_EVENT) && (tries < 100)) {
6391 		drv_usecwait(1);
6392 		tries ++;
6393 		event = bge_reg_get32(bgep, RX_RISC_EVENT_REG);
6394 	}
6395 
6396 	bge_nic_put32(bgep, BGE_FIRMWARE_MAILBOX,
6397 	    BGE_MAGIC_NUM_FIRMWARE_INIT_DONE);
6398 
6399 	if (bgep->asf_newhandshake) {
6400 		switch (mode) {
6401 		case BGE_INIT_RESET:
6402 			bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
6403 			    BGE_DRV_STATE_START);
6404 			break;
6405 		case BGE_SHUTDOWN_RESET:
6406 			bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
6407 			    BGE_DRV_STATE_UNLOAD);
6408 			break;
6409 		case BGE_SUSPEND_RESET:
6410 			bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
6411 			    BGE_DRV_STATE_SUSPEND);
6412 			break;
6413 		default:
6414 			break;
6415 		}
6416 	}
6417 
6418 	if (mode == BGE_INIT_RESET ||
6419 	    mode == BGE_SUSPEND_RESET)
6420 		bge_ape_driver_state_change(bgep, mode);
6421 }
6422 
6423 
6424 void
bge_asf_post_reset_old_mode(bge_t * bgep,uint32_t mode)6425 bge_asf_post_reset_old_mode(bge_t *bgep, uint32_t mode)
6426 {
6427 	switch (mode) {
6428 	case BGE_INIT_RESET:
6429 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
6430 		    BGE_DRV_STATE_START);
6431 		break;
6432 	case BGE_SHUTDOWN_RESET:
6433 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
6434 		    BGE_DRV_STATE_UNLOAD);
6435 		break;
6436 	case BGE_SUSPEND_RESET:
6437 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
6438 		    BGE_DRV_STATE_SUSPEND);
6439 		break;
6440 	default:
6441 		break;
6442 	}
6443 }
6444 
6445 
6446 void
bge_asf_post_reset_new_mode(bge_t * bgep,uint32_t mode)6447 bge_asf_post_reset_new_mode(bge_t *bgep, uint32_t mode)
6448 {
6449 	switch (mode) {
6450 	case BGE_INIT_RESET:
6451 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
6452 		    BGE_DRV_STATE_START_DONE);
6453 		break;
6454 	case BGE_SHUTDOWN_RESET:
6455 		bge_nic_put32(bgep, BGE_DRV_STATE_MAILBOX,
6456 		    BGE_DRV_STATE_UNLOAD_DONE);
6457 		break;
6458 	default:
6459 		break;
6460 	}
6461 
6462 	if (mode == BGE_SHUTDOWN_RESET)
6463 		bge_ape_driver_state_change(bgep, mode);
6464 }
6465 
6466 #endif /* BGE_IPMI_ASF */
6467