xref: /illumos-gate/usr/src/uts/common/io/nge/nge_chip.c (revision ae5a8bed14db6c16225cac733ea042c27e242d18)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2018, Joyent, Inc.
29  */
30 
31 #include "nge.h"
32 static uint32_t	nge_watchdog_count	= 1 << 5;
33 static uint32_t	nge_watchdog_check	= 1 << 3;
34 extern boolean_t nge_enable_msi;
35 static void nge_sync_mac_modes(nge_t *);
36 
37 #undef NGE_DBG
38 #define	NGE_DBG		NGE_DBG_CHIP
39 
40 /*
41  * Operating register get/set access routines
42  */
43 uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno);
44 #pragma	inline(nge_reg_get8)
45 
46 uint8_t
nge_reg_get8(nge_t * ngep,nge_regno_t regno)47 nge_reg_get8(nge_t *ngep, nge_regno_t regno)
48 {
49 	NGE_TRACE(("nge_reg_get8($%p, 0x%lx)", (void *)ngep, regno));
50 
51 	return (ddi_get8(ngep->io_handle, PIO_ADDR(ngep, regno)));
52 }
53 
54 void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data);
55 #pragma	inline(nge_reg_put8)
56 
57 void
nge_reg_put8(nge_t * ngep,nge_regno_t regno,uint8_t data)58 nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data)
59 {
60 	NGE_TRACE(("nge_reg_put8($%p, 0x%lx, 0x%x)",
61 	    (void *)ngep, regno, data));
62 	ddi_put8(ngep->io_handle, PIO_ADDR(ngep, regno), data);
63 
64 }
65 
66 uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno);
67 #pragma	inline(nge_reg_get16)
68 
69 uint16_t
nge_reg_get16(nge_t * ngep,nge_regno_t regno)70 nge_reg_get16(nge_t *ngep, nge_regno_t regno)
71 {
72 	NGE_TRACE(("nge_reg_get16($%p, 0x%lx)", (void *)ngep, regno));
73 	return (ddi_get16(ngep->io_handle, PIO_ADDR(ngep, regno)));
74 }
75 
76 void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data);
77 #pragma	inline(nge_reg_put16)
78 
79 void
nge_reg_put16(nge_t * ngep,nge_regno_t regno,uint16_t data)80 nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data)
81 {
82 	NGE_TRACE(("nge_reg_put16($%p, 0x%lx, 0x%x)",
83 	    (void *)ngep, regno, data));
84 	ddi_put16(ngep->io_handle, PIO_ADDR(ngep, regno), data);
85 
86 }
87 
88 uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno);
89 #pragma	inline(nge_reg_get32)
90 
91 uint32_t
nge_reg_get32(nge_t * ngep,nge_regno_t regno)92 nge_reg_get32(nge_t *ngep, nge_regno_t regno)
93 {
94 	NGE_TRACE(("nge_reg_get32($%p, 0x%lx)", (void *)ngep, regno));
95 	return (ddi_get32(ngep->io_handle, PIO_ADDR(ngep, regno)));
96 }
97 
98 void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data);
99 #pragma	inline(nge_reg_put32)
100 
101 void
nge_reg_put32(nge_t * ngep,nge_regno_t regno,uint32_t data)102 nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data)
103 {
104 	NGE_TRACE(("nge_reg_put32($%p, 0x%lx, 0x%x)",
105 	    (void *)ngep, regno, data));
106 	ddi_put32(ngep->io_handle, PIO_ADDR(ngep, regno), data);
107 
108 }
109 
110 #if	NGE_DEBUGGING
111 static int nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd);
112 #pragma	no_inline(nge_chip_peek_cfg)
113 
114 static int
nge_chip_peek_cfg(nge_t * ngep,nge_peekpoke_t * ppd)115 nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd)
116 {
117 	int err;
118 	uint64_t regval;
119 	uint64_t regno;
120 
121 	NGE_TRACE(("nge_chip_peek_cfg($%p, $%p)",
122 	    (void *)ngep, (void *)ppd));
123 
124 	err = DDI_SUCCESS;
125 	regno = ppd->pp_acc_offset;
126 
127 	switch (ppd->pp_acc_size) {
128 	case 1:
129 		regval = pci_config_get8(ngep->cfg_handle, regno);
130 		break;
131 
132 	case 2:
133 		regval = pci_config_get16(ngep->cfg_handle, regno);
134 		break;
135 
136 	case 4:
137 		regval = pci_config_get32(ngep->cfg_handle, regno);
138 		break;
139 
140 	case 8:
141 		regval = pci_config_get64(ngep->cfg_handle, regno);
142 		break;
143 	}
144 	ppd->pp_acc_data = regval;
145 	return (err);
146 }
147 
148 static int nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd);
149 
150 static int
nge_chip_poke_cfg(nge_t * ngep,nge_peekpoke_t * ppd)151 nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd)
152 {
153 	int err;
154 	uint64_t regval;
155 	uint64_t regno;
156 
157 	NGE_TRACE(("nge_chip_poke_cfg($%p, $%p)",
158 	    (void *)ngep, (void *)ppd));
159 
160 	err = DDI_SUCCESS;
161 	regno = ppd->pp_acc_offset;
162 	regval = ppd->pp_acc_data;
163 
164 	switch (ppd->pp_acc_size) {
165 	case 1:
166 		pci_config_put8(ngep->cfg_handle, regno, regval);
167 		break;
168 
169 	case 2:
170 		pci_config_put16(ngep->cfg_handle, regno, regval);
171 		break;
172 
173 	case 4:
174 		pci_config_put32(ngep->cfg_handle, regno, regval);
175 		break;
176 
177 	case 8:
178 		pci_config_put64(ngep->cfg_handle, regno, regval);
179 		break;
180 	}
181 
182 	return (err);
183 
184 }
185 
186 static int nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd);
187 
188 static int
nge_chip_peek_reg(nge_t * ngep,nge_peekpoke_t * ppd)189 nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd)
190 {
191 	int err;
192 	uint64_t regval;
193 	void *regaddr;
194 
195 	NGE_TRACE(("nge_chip_peek_reg($%p, $%p)",
196 	    (void *)ngep, (void *)ppd));
197 
198 	err = DDI_SUCCESS;
199 	regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset);
200 
201 	switch (ppd->pp_acc_size) {
202 	case 1:
203 		regval = ddi_get8(ngep->io_handle, regaddr);
204 	break;
205 
206 	case 2:
207 		regval = ddi_get16(ngep->io_handle, regaddr);
208 	break;
209 
210 	case 4:
211 		regval = ddi_get32(ngep->io_handle, regaddr);
212 	break;
213 
214 	case 8:
215 		regval = ddi_get64(ngep->io_handle, regaddr);
216 	break;
217 
218 	default:
219 		regval = 0x0ull;
220 	break;
221 	}
222 	ppd->pp_acc_data = regval;
223 	return (err);
224 }
225 
226 static int nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd);
227 
228 static int
nge_chip_poke_reg(nge_t * ngep,nge_peekpoke_t * ppd)229 nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd)
230 {
231 	int err;
232 	uint64_t regval;
233 	void *regaddr;
234 
235 	NGE_TRACE(("nge_chip_poke_reg($%p, $%p)",
236 	    (void *)ngep, (void *)ppd));
237 
238 	err = DDI_SUCCESS;
239 	regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset);
240 	regval = ppd->pp_acc_data;
241 
242 	switch (ppd->pp_acc_size) {
243 	case 1:
244 		ddi_put8(ngep->io_handle, regaddr, regval);
245 		break;
246 
247 	case 2:
248 		ddi_put16(ngep->io_handle, regaddr, regval);
249 		break;
250 
251 	case 4:
252 		ddi_put32(ngep->io_handle, regaddr, regval);
253 		break;
254 
255 	case 8:
256 		ddi_put64(ngep->io_handle, regaddr, regval);
257 		break;
258 	}
259 	return (err);
260 }
261 
262 static int nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd);
263 #pragma	no_inline(nge_chip_peek_mii)
264 
265 static int
nge_chip_peek_mii(nge_t * ngep,nge_peekpoke_t * ppd)266 nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd)
267 {
268 	int err;
269 
270 	err = DDI_SUCCESS;
271 	ppd->pp_acc_data = nge_mii_get16(ngep, ppd->pp_acc_offset/2);
272 	return (err);
273 }
274 
275 static int nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd);
276 #pragma	no_inline(nge_chip_poke_mii)
277 
278 static int
nge_chip_poke_mii(nge_t * ngep,nge_peekpoke_t * ppd)279 nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd)
280 {
281 	int err;
282 	err = DDI_SUCCESS;
283 	nge_mii_put16(ngep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
284 	return (err);
285 }
286 
287 /*
288  * Basic SEEPROM get/set access routine
289  *
290  * This uses the chip's SEEPROM auto-access method, controlled by the
291  * Serial EEPROM Address/Data Registers at 0x504h, so the CPU
292  * doesn't have to fiddle with the individual bits.
293  *
294  * The caller should hold <genlock> and *also* have already acquired
295  * the right to access the SEEPROM.
296  *
297  * Return value:
298  *	0 on success,
299  *	ENODATA on access timeout (maybe retryable: device may just be busy)
300  *	EPROTO on other h/w or s/w errors.
301  *
302  * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output
303  * from a (successful) SEEPROM_ACCESS_READ.
304  */
305 
306 static int
nge_seeprom_access(nge_t * ngep,uint32_t cmd,nge_regno_t addr,uint16_t * dp)307 nge_seeprom_access(nge_t *ngep, uint32_t cmd, nge_regno_t addr, uint16_t *dp)
308 {
309 	uint32_t tries;
310 	nge_ep_cmd cmd_reg;
311 	nge_ep_data data_reg;
312 
313 	NGE_TRACE(("nge_seeprom_access($%p, %d, %x, $%p)",
314 	    (void *)ngep, cmd, addr, (void *)dp));
315 
316 	ASSERT(mutex_owned(ngep->genlock));
317 
318 	/*
319 	 * Check there's no command in progress.
320 	 *
321 	 * Note: this *shouldn't* ever find that there is a command
322 	 * in progress, because we already hold the <genlock> mutex.
323 	 * Also, to ensure we don't have a conflict with the chip's
324 	 * internal firmware or a process accessing the same (shared)
325 	 * So this is just a final consistency check: we shouldn't
326 	 * see EITHER the START bit (command started but not complete)
327 	 * OR the COMPLETE bit (command completed but not cleared).
328 	 */
329 	cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
330 	for (tries = 0; tries < 30; tries++) {
331 		if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
332 			break;
333 		drv_usecwait(10);
334 		cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
335 	}
336 
337 	/*
338 	 * This should not happen. If so, we have to restart eeprom
339 	 *  state machine
340 	 */
341 	if (tries == 30) {
342 		cmd_reg.cmd_bits.sts = SEEPROM_READY;
343 		nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val);
344 		drv_usecwait(10);
345 		/*
346 		 * Polling the status bit to make assure the eeprom is ready
347 		 */
348 		cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
349 		for (tries = 0; tries < 30; tries++) {
350 			if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
351 				break;
352 			drv_usecwait(10);
353 			cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
354 		}
355 	}
356 
357 	/*
358 	 * Assemble the command ...
359 	 */
360 	cmd_reg.cmd_bits.addr = (uint32_t)addr;
361 	cmd_reg.cmd_bits.cmd = cmd;
362 	cmd_reg.cmd_bits.sts = 0;
363 
364 	nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val);
365 
366 	/*
367 	 * Polling whether the access is successful.
368 	 *
369 	 */
370 	cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
371 	for (tries = 0; tries < 30; tries++) {
372 		if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
373 			break;
374 		drv_usecwait(10);
375 		cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
376 	}
377 
378 	if (tries == 30) {
379 		nge_report(ngep, NGE_HW_ROM);
380 		return (DDI_FAILURE);
381 	}
382 	switch (cmd) {
383 	default:
384 	case SEEPROM_CMD_WRITE_ENABLE:
385 	case SEEPROM_CMD_ERASE:
386 	case SEEPROM_CMD_ERALSE_ALL:
387 	case SEEPROM_CMD_WRITE_DIS:
388 	break;
389 
390 	case SEEPROM_CMD_READ:
391 		data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA);
392 		*dp = data_reg.data_bits.data;
393 	break;
394 
395 	case SEEPROM_CMD_WRITE:
396 		data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA);
397 		data_reg.data_bits.data = *dp;
398 		nge_reg_put32(ngep, NGE_EP_DATA, data_reg.data_val);
399 	break;
400 	}
401 
402 	return (DDI_SUCCESS);
403 }
404 
405 
406 static int
nge_chip_peek_seeprom(nge_t * ngep,nge_peekpoke_t * ppd)407 nge_chip_peek_seeprom(nge_t *ngep, nge_peekpoke_t *ppd)
408 {
409 	uint16_t data;
410 	int err;
411 
412 	err = nge_seeprom_access(ngep, SEEPROM_CMD_READ,
413 	    ppd->pp_acc_offset, &data);
414 	ppd->pp_acc_data =  data;
415 	return (err);
416 }
417 
418 static int
nge_chip_poke_seeprom(nge_t * ngep,nge_peekpoke_t * ppd)419 nge_chip_poke_seeprom(nge_t *ngep, nge_peekpoke_t *ppd)
420 {
421 	uint16_t data;
422 	int err;
423 
424 	data = ppd->pp_acc_data;
425 	err = nge_seeprom_access(ngep, SEEPROM_CMD_WRITE,
426 	    ppd->pp_acc_offset, &data);
427 	return (err);
428 }
429 #endif /* NGE_DEBUGGING */
430 
431 void
nge_init_dev_spec_param(nge_t * ngep)432 nge_init_dev_spec_param(nge_t *ngep)
433 {
434 	nge_dev_spec_param_t	*dev_param_p;
435 	chip_info_t	*infop;
436 
437 	dev_param_p = &ngep->dev_spec_param;
438 	infop = (chip_info_t *)&ngep->chipinfo;
439 
440 	switch (infop->device) {
441 	case DEVICE_ID_NF3_E6:
442 	case DEVICE_ID_NF3_DF:
443 	case DEVICE_ID_MCP04_37:
444 	case DEVICE_ID_MCP04_38:
445 		dev_param_p->msi = B_FALSE;
446 		dev_param_p->msi_x = B_FALSE;
447 		dev_param_p->vlan = B_FALSE;
448 		dev_param_p->advanced_pm = B_FALSE;
449 		dev_param_p->mac_addr_order = B_FALSE;
450 		dev_param_p->tx_pause_frame = B_FALSE;
451 		dev_param_p->rx_pause_frame = B_FALSE;
452 		dev_param_p->jumbo = B_FALSE;
453 		dev_param_p->tx_rx_64byte = B_FALSE;
454 		dev_param_p->rx_hw_checksum = B_FALSE;
455 		dev_param_p->tx_hw_checksum = 0;
456 		dev_param_p->desc_type = DESC_OFFLOAD;
457 		dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
458 		dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
459 		dev_param_p->nge_split = NGE_SPLIT_32;
460 		break;
461 
462 	case DEVICE_ID_CK804_56:
463 	case DEVICE_ID_CK804_57:
464 		dev_param_p->msi = B_TRUE;
465 		dev_param_p->msi_x = B_TRUE;
466 		dev_param_p->vlan = B_FALSE;
467 		dev_param_p->advanced_pm = B_FALSE;
468 		dev_param_p->mac_addr_order = B_FALSE;
469 		dev_param_p->tx_pause_frame = B_FALSE;
470 		dev_param_p->rx_pause_frame = B_TRUE;
471 		dev_param_p->jumbo = B_TRUE;
472 		dev_param_p->tx_rx_64byte = B_FALSE;
473 		dev_param_p->rx_hw_checksum = B_TRUE;
474 		dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM;
475 		dev_param_p->desc_type = DESC_HOT;
476 		dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072;
477 		dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072;
478 		dev_param_p->nge_split = NGE_SPLIT_96;
479 		break;
480 
481 	case DEVICE_ID_MCP51_268:
482 	case DEVICE_ID_MCP51_269:
483 		dev_param_p->msi = B_FALSE;
484 		dev_param_p->msi_x = B_FALSE;
485 		dev_param_p->vlan = B_FALSE;
486 		dev_param_p->advanced_pm = B_TRUE;
487 		dev_param_p->mac_addr_order = B_FALSE;
488 		dev_param_p->tx_pause_frame = B_FALSE;
489 		dev_param_p->rx_pause_frame = B_FALSE;
490 		dev_param_p->jumbo = B_FALSE;
491 		dev_param_p->tx_rx_64byte = B_TRUE;
492 		dev_param_p->rx_hw_checksum = B_FALSE;
493 		dev_param_p->tx_hw_checksum = 0;
494 		dev_param_p->desc_type = DESC_OFFLOAD;
495 		dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
496 		dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
497 		dev_param_p->nge_split = NGE_SPLIT_32;
498 		break;
499 
500 	case DEVICE_ID_MCP55_372:
501 	case DEVICE_ID_MCP55_373:
502 		dev_param_p->msi = B_TRUE;
503 		dev_param_p->msi_x = B_TRUE;
504 		dev_param_p->vlan = B_TRUE;
505 		dev_param_p->advanced_pm = B_TRUE;
506 		dev_param_p->mac_addr_order = B_FALSE;
507 		dev_param_p->tx_pause_frame = B_TRUE;
508 		dev_param_p->rx_pause_frame = B_TRUE;
509 		dev_param_p->jumbo = B_TRUE;
510 		dev_param_p->tx_rx_64byte = B_TRUE;
511 		dev_param_p->rx_hw_checksum = B_TRUE;
512 		dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM;
513 		dev_param_p->desc_type = DESC_HOT;
514 		dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072;
515 		dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072;
516 		dev_param_p->nge_split = NGE_SPLIT_96;
517 		break;
518 
519 	case DEVICE_ID_MCP61_3EE:
520 	case DEVICE_ID_MCP61_3EF:
521 		dev_param_p->msi = B_FALSE;
522 		dev_param_p->msi_x = B_FALSE;
523 		dev_param_p->vlan = B_FALSE;
524 		dev_param_p->advanced_pm = B_TRUE;
525 		dev_param_p->mac_addr_order = B_TRUE;
526 		dev_param_p->tx_pause_frame = B_FALSE;
527 		dev_param_p->rx_pause_frame = B_FALSE;
528 		dev_param_p->jumbo = B_FALSE;
529 		dev_param_p->tx_rx_64byte = B_TRUE;
530 		dev_param_p->rx_hw_checksum = B_FALSE;
531 		dev_param_p->tx_hw_checksum = 0;
532 		dev_param_p->desc_type = DESC_OFFLOAD;
533 		dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
534 		dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
535 		dev_param_p->nge_split = NGE_SPLIT_32;
536 		break;
537 
538 	case DEVICE_ID_MCP77_760:
539 	case DEVICE_ID_MCP79_AB0:
540 		dev_param_p->msi = B_FALSE;
541 		dev_param_p->msi_x = B_FALSE;
542 		dev_param_p->vlan = B_FALSE;
543 		dev_param_p->advanced_pm = B_TRUE;
544 		dev_param_p->mac_addr_order = B_TRUE;
545 		dev_param_p->tx_pause_frame = B_FALSE;
546 		dev_param_p->rx_pause_frame = B_FALSE;
547 		dev_param_p->jumbo = B_FALSE;
548 		dev_param_p->tx_rx_64byte = B_TRUE;
549 		dev_param_p->rx_hw_checksum = B_FALSE;
550 		dev_param_p->tx_hw_checksum = 0;
551 		dev_param_p->desc_type = DESC_HOT;
552 		dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
553 		dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
554 		dev_param_p->nge_split = NGE_SPLIT_32;
555 		break;
556 
557 	default:
558 		dev_param_p->msi = B_FALSE;
559 		dev_param_p->msi_x = B_FALSE;
560 		dev_param_p->vlan = B_FALSE;
561 		dev_param_p->advanced_pm = B_FALSE;
562 		dev_param_p->mac_addr_order = B_FALSE;
563 		dev_param_p->tx_pause_frame = B_FALSE;
564 		dev_param_p->rx_pause_frame = B_FALSE;
565 		dev_param_p->jumbo = B_FALSE;
566 		dev_param_p->tx_rx_64byte = B_FALSE;
567 		dev_param_p->rx_hw_checksum = B_FALSE;
568 		dev_param_p->tx_hw_checksum = 0;
569 		dev_param_p->desc_type = DESC_OFFLOAD;
570 		dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
571 		dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
572 		dev_param_p->nge_split = NGE_SPLIT_32;
573 		return;
574 	}
575 }
576 /*
577  * Perform first-stage chip (re-)initialisation, using only config-space
578  * accesses:
579  *
580  * + Read the vendor/device/revision/subsystem/cache-line-size registers,
581  *   returning the data in the structure pointed to by <infop>.
582  */
583 void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset);
584 #pragma	no_inline(nge_chip_cfg_init)
585 
586 void
nge_chip_cfg_init(nge_t * ngep,chip_info_t * infop,boolean_t reset)587 nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset)
588 {
589 	uint16_t command;
590 	ddi_acc_handle_t handle;
591 	nge_interbus_conf interbus_conf;
592 	nge_msi_mask_conf msi_mask_conf;
593 	nge_msi_map_cap_conf cap_conf;
594 
595 	NGE_TRACE(("nge_chip_cfg_init($%p, $%p, %d)",
596 	    (void *)ngep, (void *)infop, reset));
597 
598 	/*
599 	 * save PCI cache line size and subsystem vendor ID
600 	 *
601 	 * Read all the config-space registers that characterise the
602 	 * chip, specifically vendor/device/revision/subsystem vendor
603 	 * and subsystem device id.  We expect (but don't check) that
604 	 */
605 	handle = ngep->cfg_handle;
606 	/* reading the vendor information once */
607 	if (reset == B_FALSE) {
608 		infop->command = pci_config_get16(handle,
609 		    PCI_CONF_COMM);
610 		infop->vendor = pci_config_get16(handle,
611 		    PCI_CONF_VENID);
612 		infop->device = pci_config_get16(handle,
613 		    PCI_CONF_DEVID);
614 		infop->subven = pci_config_get16(handle,
615 		    PCI_CONF_SUBVENID);
616 		infop->subdev = pci_config_get16(handle,
617 		    PCI_CONF_SUBSYSID);
618 		infop->class_code = pci_config_get8(handle,
619 		    PCI_CONF_BASCLASS);
620 		infop->revision = pci_config_get8(handle,
621 		    PCI_CONF_REVID);
622 		infop->clsize = pci_config_get8(handle,
623 		    PCI_CONF_CACHE_LINESZ);
624 		infop->latency = pci_config_get8(handle,
625 		    PCI_CONF_LATENCY_TIMER);
626 	}
627 	if (nge_enable_msi) {
628 		/* Disable the hidden for MSI support */
629 		interbus_conf.conf_val = pci_config_get32(handle,
630 		    PCI_CONF_HT_INTERNAL);
631 		if ((infop->device == DEVICE_ID_MCP55_373) ||
632 		    (infop->device == DEVICE_ID_MCP55_372))
633 			interbus_conf.conf_bits.msix_off = NGE_SET;
634 		interbus_conf.conf_bits.msi_off = NGE_CLEAR;
635 		pci_config_put32(handle, PCI_CONF_HT_INTERNAL,
636 		    interbus_conf.conf_val);
637 
638 		if ((infop->device == DEVICE_ID_MCP55_373) ||
639 		    (infop->device == DEVICE_ID_MCP55_372)) {
640 
641 			/* Disable the vector off for mcp55 */
642 			msi_mask_conf.msi_mask_conf_val =
643 			    pci_config_get32(handle, PCI_CONF_HT_MSI_MASK);
644 			msi_mask_conf.msi_mask_bits.vec0_off = NGE_CLEAR;
645 			msi_mask_conf.msi_mask_bits.vec1_off = NGE_CLEAR;
646 			msi_mask_conf.msi_mask_bits.vec2_off = NGE_CLEAR;
647 			msi_mask_conf.msi_mask_bits.vec3_off = NGE_CLEAR;
648 			msi_mask_conf.msi_mask_bits.vec4_off = NGE_CLEAR;
649 			msi_mask_conf.msi_mask_bits.vec5_off = NGE_CLEAR;
650 			msi_mask_conf.msi_mask_bits.vec6_off = NGE_CLEAR;
651 			msi_mask_conf.msi_mask_bits.vec7_off = NGE_CLEAR;
652 			pci_config_put32(handle, PCI_CONF_HT_MSI_MASK,
653 			    msi_mask_conf.msi_mask_conf_val);
654 
655 			/* Enable the MSI mapping */
656 			cap_conf.msi_map_cap_conf_val =
657 			    pci_config_get32(handle, PCI_CONF_HT_MSI_MAP_CAP);
658 			cap_conf.map_cap_conf_bits.map_en = NGE_SET;
659 			pci_config_put32(handle, PCI_CONF_HT_MSI_MAP_CAP,
660 			    cap_conf.msi_map_cap_conf_val);
661 		}
662 	} else {
663 		interbus_conf.conf_val = pci_config_get32(handle,
664 		    PCI_CONF_HT_INTERNAL);
665 		interbus_conf.conf_bits.msi_off = NGE_SET;
666 		pci_config_put32(handle, PCI_CONF_HT_INTERNAL,
667 		    interbus_conf.conf_val);
668 	}
669 	command = infop->command | PCI_COMM_MAE;
670 	command &= ~PCI_COMM_MEMWR_INVAL;
671 	command |= PCI_COMM_ME;
672 	pci_config_put16(handle, PCI_CONF_COMM, command);
673 	pci_config_put16(handle, PCI_CONF_STAT, ~0);
674 
675 }
676 
677 int
nge_chip_stop(nge_t * ngep,boolean_t fault)678 nge_chip_stop(nge_t *ngep, boolean_t fault)
679 {
680 	int err;
681 	uint32_t reg_val;
682 	uint32_t	tries;
683 	nge_mintr_src mintr_src;
684 	nge_mii_cs mii_cs;
685 	nge_rx_poll rx_poll;
686 	nge_tx_poll tx_poll;
687 	nge_rx_en rx_en;
688 	nge_tx_en tx_en;
689 	nge_tx_sta tx_sta;
690 	nge_rx_sta rx_sta;
691 	nge_mode_cntl mode;
692 	nge_pmu_cntl2 pmu_cntl2;
693 
694 	NGE_TRACE(("nge_chip_stop($%p, %d)", (void *)ngep, fault));
695 
696 	err = DDI_SUCCESS;
697 
698 	/* Clear any pending PHY interrupt */
699 	mintr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC);
700 	nge_reg_put8(ngep, NGE_MINTR_SRC, mintr_src.src_val);
701 
702 	/* Mask all interrupts */
703 	reg_val = nge_reg_get32(ngep, NGE_INTR_MASK);
704 	reg_val &= ~NGE_INTR_ALL_EN;
705 	nge_reg_put32(ngep, NGE_INTR_MASK, reg_val);
706 
707 	/* Disable auto-polling of phy */
708 	mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS);
709 	mii_cs.cs_bits.ap_en = NGE_CLEAR;
710 	nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val);
711 
712 	/* Reset buffer management & DMA */
713 	mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
714 	mode.mode_bits.dma_dis = NGE_SET;
715 	mode.mode_bits.desc_type = ngep->desc_mode;
716 	nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val);
717 
718 	for (tries = 0; tries < 10000; tries++) {
719 		drv_usecwait(10);
720 		mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
721 		if (mode.mode_bits.dma_status == NGE_SET)
722 			break;
723 	}
724 	if (tries == 10000) {
725 		ngep->nge_chip_state = NGE_CHIP_ERROR;
726 		return (DDI_FAILURE);
727 	}
728 
729 	/* Disable rx's machine */
730 	rx_en.val = nge_reg_get8(ngep, NGE_RX_EN);
731 	rx_en.bits.rx_en = NGE_CLEAR;
732 	nge_reg_put8(ngep, NGE_RX_EN, rx_en.val);
733 
734 	/* Disable tx's machine */
735 	tx_en.val = nge_reg_get8(ngep, NGE_TX_EN);
736 	tx_en.bits.tx_en = NGE_CLEAR;
737 	nge_reg_put8(ngep, NGE_TX_EN, tx_en.val);
738 
739 	/*
740 	 * Clean the status of tx's state machine
741 	 * and Make assure the tx's channel is idle
742 	 */
743 	tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA);
744 	for (tries = 0; tries < 1000; tries++) {
745 		if (tx_sta.sta_bits.tx_chan_sta == NGE_CLEAR)
746 			break;
747 		drv_usecwait(10);
748 		tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA);
749 	}
750 	if (tries == 1000) {
751 		ngep->nge_chip_state = NGE_CHIP_ERROR;
752 		return (DDI_FAILURE);
753 	}
754 	nge_reg_put32(ngep, NGE_TX_STA,  tx_sta.sta_val);
755 
756 	/*
757 	 * Clean the status of rx's state machine
758 	 * and Make assure the tx's channel is idle
759 	 */
760 	rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA);
761 	for (tries = 0; tries < 1000; tries++) {
762 		if (rx_sta.sta_bits.rx_chan_sta == NGE_CLEAR)
763 			break;
764 		drv_usecwait(10);
765 		rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA);
766 	}
767 	if (tries == 1000) {
768 		ngep->nge_chip_state = NGE_CHIP_ERROR;
769 		return (DDI_FAILURE);
770 	}
771 	nge_reg_put32(ngep, NGE_RX_STA, rx_sta.sta_val);
772 
773 	/* Disable auto-poll of rx's state machine */
774 	rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL);
775 	rx_poll.poll_bits.rpen = NGE_CLEAR;
776 	rx_poll.poll_bits.rpi = NGE_CLEAR;
777 	nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val);
778 
779 	/* Disable auto-polling of tx's  state machine */
780 	tx_poll.poll_val = nge_reg_get32(ngep, NGE_TX_POLL);
781 	tx_poll.poll_bits.tpen = NGE_CLEAR;
782 	tx_poll.poll_bits.tpi = NGE_CLEAR;
783 	nge_reg_put32(ngep, NGE_TX_POLL, tx_poll.poll_val);
784 
785 	/* Restore buffer management */
786 	mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
787 	mode.mode_bits.bm_reset = NGE_SET;
788 	mode.mode_bits.tx_rcom_en = NGE_SET;
789 	nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val);
790 
791 	if (ngep->dev_spec_param.advanced_pm) {
792 
793 		nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 0);
794 		nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 0);
795 
796 		pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2);
797 		pmu_cntl2.cntl2_bits.cidle_timer = NGE_CLEAR;
798 		pmu_cntl2.cntl2_bits.didle_timer = NGE_CLEAR;
799 		nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val);
800 	}
801 	if (fault)
802 		ngep->nge_chip_state = NGE_CHIP_FAULT;
803 	else
804 		ngep->nge_chip_state = NGE_CHIP_STOPPED;
805 
806 	return (err);
807 }
808 
809 static void
nge_rx_setup(nge_t * ngep)810 nge_rx_setup(nge_t *ngep)
811 {
812 	uint64_t desc_addr;
813 	nge_rxtx_dlen dlen;
814 	nge_rx_poll rx_poll;
815 
816 	/*
817 	 * Filling the address and length of rx's descriptors
818 	 */
819 	desc_addr = ngep->recv->desc.cookie.dmac_laddress;
820 	nge_reg_put32(ngep, NGE_RX_DADR, desc_addr);
821 	nge_reg_put32(ngep, NGE_RX_DADR_HI, desc_addr >> 32);
822 	dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN);
823 	dlen.dlen_bits.rdlen = ngep->recv->desc.nslots - 1;
824 	nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val);
825 
826 	rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL);
827 	rx_poll.poll_bits.rpi = RX_POLL_INTV_1G;
828 	rx_poll.poll_bits.rpen = NGE_SET;
829 	nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val);
830 }
831 
832 static void
nge_tx_setup(nge_t * ngep)833 nge_tx_setup(nge_t *ngep)
834 {
835 	uint64_t desc_addr;
836 	nge_rxtx_dlen dlen;
837 
838 	/*
839 	 * Filling the address and length of tx's descriptors
840 	 */
841 	desc_addr = ngep->send->desc.cookie.dmac_laddress;
842 	nge_reg_put32(ngep, NGE_TX_DADR, desc_addr);
843 	nge_reg_put32(ngep, NGE_TX_DADR_HI, desc_addr >> 32);
844 	dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN);
845 	dlen.dlen_bits.tdlen = ngep->send->desc.nslots - 1;
846 	nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val);
847 }
848 
849 static int
nge_buff_setup(nge_t * ngep)850 nge_buff_setup(nge_t *ngep)
851 {
852 	nge_mode_cntl mode_cntl;
853 	nge_dev_spec_param_t	*dev_param_p;
854 
855 	dev_param_p = &ngep->dev_spec_param;
856 
857 	/*
858 	 * Configure Rx&Tx's buffer
859 	 */
860 	nge_rx_setup(ngep);
861 	nge_tx_setup(ngep);
862 
863 	/*
864 	 * Configure buffer attribute
865 	 */
866 	mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
867 
868 	/*
869 	 * Enable Dma access request
870 	 */
871 	mode_cntl.mode_bits.dma_dis = NGE_CLEAR;
872 
873 	/*
874 	 * Enbale Buffer management
875 	 */
876 	mode_cntl.mode_bits.bm_reset = NGE_CLEAR;
877 
878 	/*
879 	 * Support Standoffload Descriptor
880 	 */
881 	mode_cntl.mode_bits.desc_type = ngep->desc_mode;
882 
883 	/*
884 	 * Support receive hardware checksum
885 	 */
886 	if (dev_param_p->rx_hw_checksum) {
887 		mode_cntl.mode_bits.rx_sum_en = NGE_SET;
888 	} else
889 		mode_cntl.mode_bits.rx_sum_en = NGE_CLEAR;
890 
891 	/*
892 	 * Disable Tx PRD coarse update
893 	 */
894 	mode_cntl.mode_bits.tx_prd_cu_en = NGE_CLEAR;
895 
896 	/*
897 	 * Disable 64-byte access
898 	 */
899 	mode_cntl.mode_bits.w64_dis = NGE_SET;
900 
901 	/*
902 	 * Skip Rx Error Frame is not supported and if
903 	 * enable it, jumbo frame does not work any more.
904 	 */
905 	mode_cntl.mode_bits.rx_filter_en = NGE_CLEAR;
906 
907 	/*
908 	 * Can not support hot mode now
909 	 */
910 	mode_cntl.mode_bits.resv15 = NGE_CLEAR;
911 
912 	if (dev_param_p->vlan) {
913 		/* Disable the vlan strip for devices which support vlan */
914 		mode_cntl.mode_bits.vlan_strip = NGE_CLEAR;
915 
916 		/* Disable the vlan insert for devices which supprot vlan */
917 		mode_cntl.mode_bits.vlan_ins = NGE_CLEAR;
918 	}
919 
920 	if (dev_param_p->tx_rx_64byte) {
921 
922 		/* Set the maximum TX PRD fetch size to 64 bytes */
923 		mode_cntl.mode_bits.tx_fetch_prd = NGE_SET;
924 
925 		/* Set the maximum RX PRD fetch size to 64 bytes */
926 		mode_cntl.mode_bits.rx_fetch_prd = NGE_SET;
927 	}
928 	/*
929 	 * Upload Rx data as it arrives, rather than waiting for full frame
930 	 */
931 	mode_cntl.mode_bits.resv16 = NGE_CLEAR;
932 
933 	/*
934 	 * Normal HOT table accesses
935 	 */
936 	mode_cntl.mode_bits.resv17 = NGE_CLEAR;
937 
938 	/*
939 	 * Normal HOT buffer requesting
940 	 */
941 	mode_cntl.mode_bits.resv18 = NGE_CLEAR;
942 	nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
943 
944 	/*
945 	 * Signal controller to check for new Rx descriptors
946 	 */
947 	mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
948 	mode_cntl.mode_bits.rxdm = NGE_SET;
949 	mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
950 	nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
951 
952 
953 	return (DDI_SUCCESS);
954 }
955 
956 /*
957  * When chipset resets, the chipset can not restore  the orignial
958  * mac address to the mac address registers.
959  *
960  * When the driver is dettached, the function will write the orignial
961  * mac address to the mac address registers.
962  */
963 
964 void
nge_restore_mac_addr(nge_t * ngep)965 nge_restore_mac_addr(nge_t *ngep)
966 {
967 	uint32_t mac_addr;
968 
969 	mac_addr = (uint32_t)ngep->chipinfo.hw_mac_addr;
970 	nge_reg_put32(ngep, NGE_UNI_ADDR0, mac_addr);
971 	mac_addr = (uint32_t)(ngep->chipinfo.hw_mac_addr >> 32);
972 	nge_reg_put32(ngep, NGE_UNI_ADDR1, mac_addr);
973 }
974 
975 int
nge_chip_reset(nge_t * ngep)976 nge_chip_reset(nge_t *ngep)
977 {
978 	int err;
979 	uint8_t i;
980 	uint32_t regno;
981 	uint64_t mac = 0;
982 	nge_uni_addr1 uaddr1;
983 	nge_cp_cntl ee_cntl;
984 	nge_soft_misc soft_misc;
985 	nge_pmu_cntl0 pmu_cntl0;
986 	nge_pmu_cntl2 pmu_cntl2;
987 	nge_pm_cntl2 pm_cntl2;
988 	const nge_ksindex_t *ksip;
989 
990 	NGE_TRACE(("nge_chip_reset($%p)", (void *)ngep));
991 
992 	/*
993 	 * Clear the statistics by reading the statistics register
994 	 */
995 	for (ksip = nge_statistics; ksip->name != NULL; ++ksip) {
996 		regno = KS_BASE + ksip->index * sizeof (uint32_t);
997 		(void) nge_reg_get32(ngep, regno);
998 	}
999 
1000 	/*
1001 	 * Setup seeprom control
1002 	 */
1003 	ee_cntl.cntl_val = nge_reg_get32(ngep, NGE_EP_CNTL);
1004 	ee_cntl.cntl_bits.clkdiv = EEPROM_CLKDIV;
1005 	ee_cntl.cntl_bits.rom_size = EEPROM_32K;
1006 	ee_cntl.cntl_bits.word_wid = ACCESS_16BIT;
1007 	ee_cntl.cntl_bits.wait_slots = EEPROM_WAITCLK;
1008 	nge_reg_put32(ngep, NGE_EP_CNTL, ee_cntl.cntl_val);
1009 
1010 	/*
1011 	 * Reading the unicast mac address table
1012 	 */
1013 	if (ngep->nge_chip_state == NGE_CHIP_INITIAL) {
1014 		uaddr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1);
1015 		mac = uaddr1.addr_bits.addr;
1016 		mac <<= 32;
1017 		mac |= nge_reg_get32(ngep, NGE_UNI_ADDR0);
1018 		ngep->chipinfo.hw_mac_addr = mac;
1019 		if (ngep->dev_spec_param.mac_addr_order) {
1020 			for (i = 0; i < ETHERADDRL; i++) {
1021 				ngep->chipinfo.vendor_addr.addr[i] =
1022 				    (uchar_t)mac;
1023 				ngep->cur_uni_addr.addr[i] =
1024 				    (uchar_t)mac;
1025 				mac >>= 8;
1026 			}
1027 		} else {
1028 			for (i = ETHERADDRL; i-- != 0; ) {
1029 				ngep->chipinfo.vendor_addr.addr[i] =
1030 				    (uchar_t)mac;
1031 				ngep->cur_uni_addr.addr[i] =
1032 				    (uchar_t)mac;
1033 				mac >>= 8;
1034 			}
1035 		}
1036 		ngep->chipinfo.vendor_addr.set = 1;
1037 	}
1038 	pci_config_put8(ngep->cfg_handle, PCI_CONF_CACHE_LINESZ,
1039 	    ngep->chipinfo.clsize);
1040 	pci_config_put8(ngep->cfg_handle, PCI_CONF_LATENCY_TIMER,
1041 	    ngep->chipinfo.latency);
1042 
1043 
1044 	if (ngep->dev_spec_param.advanced_pm) {
1045 
1046 		/* Program software misc register */
1047 		soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC);
1048 		soft_misc.misc_bits.rx_clk_vx_rst = NGE_SET;
1049 		soft_misc.misc_bits.tx_clk_vx_rst = NGE_SET;
1050 		soft_misc.misc_bits.clk12m_vx_rst = NGE_SET;
1051 		soft_misc.misc_bits.fpci_clk_vx_rst = NGE_SET;
1052 		soft_misc.misc_bits.rx_clk_vc_rst = NGE_SET;
1053 		soft_misc.misc_bits.tx_clk_vc_rst = NGE_SET;
1054 		soft_misc.misc_bits.fs_clk_vc_rst = NGE_SET;
1055 		soft_misc.misc_bits.rst_ex_m2pintf = NGE_SET;
1056 		nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val);
1057 
1058 		/* wait for 32 us */
1059 		drv_usecwait(32);
1060 
1061 		soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC);
1062 		soft_misc.misc_bits.rx_clk_vx_rst = NGE_CLEAR;
1063 		soft_misc.misc_bits.tx_clk_vx_rst = NGE_CLEAR;
1064 		soft_misc.misc_bits.clk12m_vx_rst = NGE_CLEAR;
1065 		soft_misc.misc_bits.fpci_clk_vx_rst = NGE_CLEAR;
1066 		soft_misc.misc_bits.rx_clk_vc_rst = NGE_CLEAR;
1067 		soft_misc.misc_bits.tx_clk_vc_rst = NGE_CLEAR;
1068 		soft_misc.misc_bits.fs_clk_vc_rst = NGE_CLEAR;
1069 		soft_misc.misc_bits.rst_ex_m2pintf = NGE_CLEAR;
1070 		nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val);
1071 
1072 		/* Program PMU registers */
1073 		pmu_cntl0.cntl0_val = nge_reg_get32(ngep, NGE_PMU_CNTL0);
1074 		pmu_cntl0.cntl0_bits.core_spd10_fp =
1075 		    NGE_PMU_CORE_SPD10_BUSY;
1076 		pmu_cntl0.cntl0_bits.core_spd10_idle =
1077 		    NGE_PMU_CORE_SPD10_IDLE;
1078 		pmu_cntl0.cntl0_bits.core_spd100_fp =
1079 		    NGE_PMU_CORE_SPD100_BUSY;
1080 		pmu_cntl0.cntl0_bits.core_spd100_idle =
1081 		    NGE_PMU_CORE_SPD100_IDLE;
1082 		pmu_cntl0.cntl0_bits.core_spd1000_fp =
1083 		    NGE_PMU_CORE_SPD1000_BUSY;
1084 		pmu_cntl0.cntl0_bits.core_spd1000_idle =
1085 		    NGE_PMU_CORE_SPD100_IDLE;
1086 		pmu_cntl0.cntl0_bits.core_spd10_idle =
1087 		    NGE_PMU_CORE_SPD10_IDLE;
1088 		nge_reg_put32(ngep, NGE_PMU_CNTL0, pmu_cntl0.cntl0_val);
1089 
1090 		/* Set the core idle limit value */
1091 		nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT,
1092 		    NGE_PMU_CIDLE_LIMIT_DEF);
1093 
1094 		/* Set the device idle limit value */
1095 		nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT,
1096 		    NGE_PMU_DIDLE_LIMIT_DEF);
1097 
1098 		/* Enable the core/device idle timer in PMU control 2 */
1099 		pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2);
1100 		pmu_cntl2.cntl2_bits.cidle_timer = NGE_SET;
1101 		pmu_cntl2.cntl2_bits.didle_timer = NGE_SET;
1102 		pmu_cntl2.cntl2_bits.core_enable = NGE_SET;
1103 		pmu_cntl2.cntl2_bits.dev_enable = NGE_SET;
1104 		nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val);
1105 	}
1106 	/*
1107 	 * Stop the chipset and clear buffer management
1108 	 */
1109 	err = nge_chip_stop(ngep, B_FALSE);
1110 	if (err == DDI_FAILURE)
1111 		return (err);
1112 	/*
1113 	 * Clear the power state bits for phy since interface no longer
1114 	 * works after rebooting from Windows on a multi-boot machine
1115 	 */
1116 	if (ngep->chipinfo.device == DEVICE_ID_MCP51_268 ||
1117 	    ngep->chipinfo.device == DEVICE_ID_MCP51_269 ||
1118 	    ngep->chipinfo.device == DEVICE_ID_MCP55_372 ||
1119 	    ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
1120 	    ngep->chipinfo.device == DEVICE_ID_MCP61_3EE ||
1121 	    ngep->chipinfo.device == DEVICE_ID_MCP61_3EF ||
1122 	    ngep->chipinfo.device == DEVICE_ID_MCP77_760 ||
1123 	    ngep->chipinfo.device == DEVICE_ID_MCP79_AB0) {
1124 
1125 		pm_cntl2.cntl_val = nge_reg_get32(ngep, NGE_PM_CNTL2);
1126 		/* bring phy out of coma mode */
1127 		pm_cntl2.cntl_bits.phy_coma_set = NGE_CLEAR;
1128 		/* disable auto reset coma bits */
1129 		pm_cntl2.cntl_bits.resv4 = NGE_CLEAR;
1130 		/* restore power to gated clocks */
1131 		pm_cntl2.cntl_bits.resv8_11 = NGE_CLEAR;
1132 		nge_reg_put32(ngep, NGE_PM_CNTL2, pm_cntl2.cntl_val);
1133 	}
1134 
1135 	ngep->nge_chip_state = NGE_CHIP_RESET;
1136 	return (DDI_SUCCESS);
1137 }
1138 
1139 int
nge_chip_start(nge_t * ngep)1140 nge_chip_start(nge_t *ngep)
1141 {
1142 	int err;
1143 	nge_itc itc;
1144 	nge_tx_cntl tx_cntl;
1145 	nge_rx_cntrl0 rx_cntl0;
1146 	nge_rx_cntl1 rx_cntl1;
1147 	nge_tx_en tx_en;
1148 	nge_rx_en rx_en;
1149 	nge_mii_cs mii_cs;
1150 	nge_swtr_cntl swtr_cntl;
1151 	nge_rx_fifo_wm rx_fifo;
1152 	nge_intr_mask intr_mask;
1153 	nge_mintr_mask mintr_mask;
1154 	nge_dev_spec_param_t	*dev_param_p;
1155 
1156 	NGE_TRACE(("nge_chip_start($%p)", (void *)ngep));
1157 
1158 	/*
1159 	 * Setup buffer management
1160 	 */
1161 	err = nge_buff_setup(ngep);
1162 	if (err == DDI_FAILURE)
1163 		return (err);
1164 
1165 	dev_param_p = &ngep->dev_spec_param;
1166 
1167 	/*
1168 	 * Enable polling attribute
1169 	 */
1170 	mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS);
1171 	mii_cs.cs_bits.ap_paddr = ngep->phy_xmii_addr;
1172 	mii_cs.cs_bits.ap_en = NGE_SET;
1173 	mii_cs.cs_bits.ap_intv = MII_POLL_INTV;
1174 	nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val);
1175 
1176 	/*
1177 	 * Setup link
1178 	 */
1179 	(*ngep->physops->phys_update)(ngep);
1180 
1181 	/*
1182 	 * Configure the tx's parameters
1183 	 */
1184 	tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL);
1185 	if (dev_param_p->tx_pause_frame)
1186 		tx_cntl.cntl_bits.paen = NGE_SET;
1187 	else
1188 		tx_cntl.cntl_bits.paen = NGE_CLEAR;
1189 	tx_cntl.cntl_bits.retry_en = NGE_SET;
1190 	tx_cntl.cntl_bits.pad_en = NGE_SET;
1191 	tx_cntl.cntl_bits.fappend_en = NGE_SET;
1192 	tx_cntl.cntl_bits.two_def_en = NGE_SET;
1193 	tx_cntl.cntl_bits.max_retry = 15;
1194 	tx_cntl.cntl_bits.burst_en = NGE_CLEAR;
1195 	tx_cntl.cntl_bits.uflo_err_mask = NGE_CLEAR;
1196 	tx_cntl.cntl_bits.tlcol_mask = NGE_CLEAR;
1197 	tx_cntl.cntl_bits.lcar_mask = NGE_CLEAR;
1198 	tx_cntl.cntl_bits.def_mask = NGE_CLEAR;
1199 	tx_cntl.cntl_bits.exdef_mask = NGE_SET;
1200 	tx_cntl.cntl_bits.lcar_mask = NGE_SET;
1201 	tx_cntl.cntl_bits.tlcol_mask = NGE_SET;
1202 	tx_cntl.cntl_bits.uflo_err_mask = NGE_SET;
1203 	tx_cntl.cntl_bits.jam_seq_en = NGE_CLEAR;
1204 	nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val);
1205 
1206 
1207 	/*
1208 	 * Configure the parameters of Rx's state machine
1209 	 * Enabe the parameters:
1210 	 * 1). Pad Strip
1211 	 * 2). FCS Relay
1212 	 * 3). Pause
1213 	 * 4). Address filter
1214 	 * 5). Runt Packet receive
1215 	 * 6). Broadcast
1216 	 * 7). Receive Deferral
1217 	 *
1218 	 * Disable the following parameters for decreasing
1219 	 * the number of interrupts:
1220 	 * 1). Runt Inerrupt.
1221 	 * 2). Rx's Late Collision interrupt.
1222 	 * 3). Rx's Max length Error Interrupt.
1223 	 * 4). Rx's Length Field error Interrupt.
1224 	 * 5). Rx's FCS error interrupt.
1225 	 * 6). Rx's overflow error interrupt.
1226 	 * 7). Rx's Frame alignment error interrupt.
1227 	 */
1228 	rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
1229 	rx_cntl0.cntl_bits.padsen = NGE_CLEAR;
1230 	rx_cntl0.cntl_bits.fcsren = NGE_CLEAR;
1231 	if (dev_param_p->rx_pause_frame)
1232 		rx_cntl0.cntl_bits.paen = NGE_SET;
1233 	else
1234 		rx_cntl0.cntl_bits.paen = NGE_CLEAR;
1235 	rx_cntl0.cntl_bits.lben = NGE_CLEAR;
1236 	rx_cntl0.cntl_bits.afen = NGE_SET;
1237 	rx_cntl0.cntl_bits.runten = NGE_CLEAR;
1238 	rx_cntl0.cntl_bits.brdis = NGE_CLEAR;
1239 	rx_cntl0.cntl_bits.rdfen = NGE_CLEAR;
1240 	rx_cntl0.cntl_bits.runtm = NGE_CLEAR;
1241 	rx_cntl0.cntl_bits.slfb = NGE_CLEAR;
1242 	rx_cntl0.cntl_bits.rlcolm = NGE_CLEAR;
1243 	rx_cntl0.cntl_bits.maxerm = NGE_CLEAR;
1244 	rx_cntl0.cntl_bits.lferm = NGE_CLEAR;
1245 	rx_cntl0.cntl_bits.crcm = NGE_CLEAR;
1246 	rx_cntl0.cntl_bits.ofolm = NGE_CLEAR;
1247 	rx_cntl0.cntl_bits.framerm = NGE_CLEAR;
1248 	nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val);
1249 
1250 	/*
1251 	 * Configure the watermark for the rx's statemachine
1252 	 */
1253 	rx_fifo.wm_val = nge_reg_get32(ngep, NGE_RX_FIFO_WM);
1254 	rx_fifo.wm_bits.data_hwm = ngep->rx_datahwm;
1255 	rx_fifo.wm_bits.prd_lwm = ngep->rx_prdlwm;
1256 	rx_fifo.wm_bits.prd_hwm = ngep->rx_prdhwm;
1257 	nge_reg_put32(ngep, NGE_RX_FIFO_WM, rx_fifo.wm_val);
1258 
1259 	/*
1260 	 * Configure the deffer time slot for rx's state machine
1261 	 */
1262 	nge_reg_put8(ngep, NGE_RX_DEf, ngep->rx_def);
1263 
1264 	/*
1265 	 * Configure the length of rx's packet
1266 	 */
1267 	rx_cntl1.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL1);
1268 	rx_cntl1.cntl_bits.length = ngep->max_sdu;
1269 	nge_reg_put32(ngep, NGE_RX_CNTL1, rx_cntl1.cntl_val);
1270 	/*
1271 	 * Enable Tx's state machine
1272 	 */
1273 	tx_en.val = nge_reg_get8(ngep, NGE_TX_EN);
1274 	tx_en.bits.tx_en = NGE_SET;
1275 	nge_reg_put8(ngep, NGE_TX_EN, tx_en.val);
1276 
1277 	/*
1278 	 * Enable Rx's state machine
1279 	 */
1280 	rx_en.val = nge_reg_get8(ngep, NGE_RX_EN);
1281 	rx_en.bits.rx_en = NGE_SET;
1282 	nge_reg_put8(ngep, NGE_RX_EN, rx_en.val);
1283 
1284 	itc.itc_val = nge_reg_get32(ngep, NGE_SWTR_ITC);
1285 	itc.itc_bits.sw_intv = ngep->sw_intr_intv;
1286 	nge_reg_put32(ngep, NGE_SWTR_ITC, itc.itc_val);
1287 
1288 	swtr_cntl.ctrl_val = nge_reg_get8(ngep, NGE_SWTR_CNTL);
1289 	swtr_cntl.cntl_bits.sten = NGE_SET;
1290 	swtr_cntl.cntl_bits.stren = NGE_SET;
1291 	nge_reg_put32(ngep, NGE_SWTR_CNTL, swtr_cntl.ctrl_val);
1292 
1293 	/*
1294 	 * Disable all mii read/write operation Interrupt
1295 	 */
1296 	mintr_mask.mask_val = nge_reg_get8(ngep, NGE_MINTR_MASK);
1297 	mintr_mask.mask_bits.mrei = NGE_CLEAR;
1298 	mintr_mask.mask_bits.mcc2 = NGE_CLEAR;
1299 	mintr_mask.mask_bits.mcc1 = NGE_CLEAR;
1300 	mintr_mask.mask_bits.mapi = NGE_SET;
1301 	mintr_mask.mask_bits.mpdi = NGE_SET;
1302 	nge_reg_put8(ngep, NGE_MINTR_MASK, mintr_mask.mask_val);
1303 
1304 	/*
1305 	 * Enable all interrupt event
1306 	 */
1307 	intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
1308 	intr_mask.mask_bits.reint = NGE_SET;
1309 	intr_mask.mask_bits.rcint = NGE_SET;
1310 	intr_mask.mask_bits.miss = NGE_SET;
1311 	intr_mask.mask_bits.teint = NGE_SET;
1312 	intr_mask.mask_bits.tcint = NGE_CLEAR;
1313 	intr_mask.mask_bits.stint = NGE_CLEAR;
1314 	intr_mask.mask_bits.mint = NGE_CLEAR;
1315 	intr_mask.mask_bits.rfint = NGE_CLEAR;
1316 	intr_mask.mask_bits.tfint = NGE_SET;
1317 	intr_mask.mask_bits.feint = NGE_SET;
1318 	intr_mask.mask_bits.resv10 = NGE_CLEAR;
1319 	intr_mask.mask_bits.resv11 = NGE_CLEAR;
1320 	intr_mask.mask_bits.resv12 = NGE_CLEAR;
1321 	intr_mask.mask_bits.resv13 = NGE_CLEAR;
1322 	intr_mask.mask_bits.phyint = NGE_CLEAR;
1323 	ngep->intr_masks = intr_mask.mask_val;
1324 	nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
1325 	ngep->nge_chip_state = NGE_CHIP_RUNNING;
1326 	return (DDI_SUCCESS);
1327 }
1328 
1329 /*
1330  * nge_chip_sync() -- program the chip with the unicast MAC address,
1331  * the multicast hash table, the required level of promiscuity.
1332  */
1333 void
nge_chip_sync(nge_t * ngep)1334 nge_chip_sync(nge_t *ngep)
1335 {
1336 	uint8_t i;
1337 	uint64_t macaddr;
1338 	uint64_t mul_addr;
1339 	uint64_t mul_mask;
1340 	nge_rx_cntrl0 rx_cntl;
1341 	nge_uni_addr1 uni_adr1;
1342 
1343 	NGE_TRACE(("nge_chip_sync($%p)", (void *)ngep));
1344 
1345 	macaddr = 0x0ull;
1346 	mul_addr = 0x0ull;
1347 	mul_mask = 0x0ull;
1348 	rx_cntl.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
1349 
1350 	if (ngep->promisc) {
1351 		rx_cntl.cntl_bits.afen = NGE_CLEAR;
1352 		rx_cntl.cntl_bits.brdis = NGE_SET;
1353 	} else {
1354 		rx_cntl.cntl_bits.afen = NGE_SET;
1355 		rx_cntl.cntl_bits.brdis = NGE_CLEAR;
1356 	}
1357 
1358 	/*
1359 	 * Transform the MAC address from host to chip format, the unicast
1360 	 * MAC address(es) ...
1361 	 */
1362 	for (i = ETHERADDRL, macaddr = 0ull; i != 0; --i) {
1363 		macaddr |= ngep->cur_uni_addr.addr[i-1];
1364 		macaddr <<= (i > 1) ? 8 : 0;
1365 	}
1366 
1367 	nge_reg_put32(ngep, NGE_UNI_ADDR0, (uint32_t)macaddr);
1368 	macaddr = macaddr >>32;
1369 	uni_adr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1);
1370 	uni_adr1.addr_bits.addr = (uint16_t)macaddr;
1371 	uni_adr1.addr_bits.resv16_31 = (uint16_t)0;
1372 	nge_reg_put32(ngep, NGE_UNI_ADDR1, uni_adr1.addr_val);
1373 
1374 	/*
1375 	 * Reprogram the  multicast address table ...
1376 	 */
1377 	for (i = ETHERADDRL, mul_addr = 0ull; i != 0; --i) {
1378 		mul_addr |= ngep->cur_mul_addr.addr[i-1];
1379 		mul_addr <<= (i > 1) ? 8 : 0;
1380 		mul_mask |= ngep->cur_mul_mask.addr[i-1];
1381 		mul_mask <<= (i > 1) ? 8 : 0;
1382 	}
1383 	nge_reg_put32(ngep, NGE_MUL_ADDR0, (uint32_t)mul_addr);
1384 	mul_addr >>= 32;
1385 	nge_reg_put32(ngep, NGE_MUL_ADDR1, mul_addr);
1386 	nge_reg_put32(ngep, NGE_MUL_MASK, (uint32_t)mul_mask);
1387 	mul_mask >>= 32;
1388 	nge_reg_put32(ngep, NGE_MUL_MASK1, mul_mask);
1389 	/*
1390 	 * Set or clear the PROMISCUOUS mode bit
1391 	 */
1392 	nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl.cntl_val);
1393 	/*
1394 	 * For internal PHY loopback, the link will
1395 	 * not be up, so it need to sync mac modes directly.
1396 	 */
1397 	if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY)
1398 		nge_sync_mac_modes(ngep);
1399 }
1400 
1401 static void
nge_chip_err(nge_t * ngep)1402 nge_chip_err(nge_t *ngep)
1403 {
1404 	nge_reg010 reg010_ins;
1405 	nge_sw_statistics_t *psw_stat;
1406 	nge_intr_mask intr_mask;
1407 
1408 	NGE_TRACE(("nge_chip_err($%p)", (void *)ngep));
1409 
1410 	psw_stat = (nge_sw_statistics_t *)&ngep->statistics.sw_statistics;
1411 	reg010_ins.reg010_val = nge_reg_get32(ngep, NGE_REG010);
1412 	if (reg010_ins.reg010_bits.resv0)
1413 		psw_stat->fe_err.tso_err_mss ++;
1414 
1415 	if (reg010_ins.reg010_bits.resv1)
1416 		psw_stat->fe_err.tso_dis ++;
1417 
1418 	if (reg010_ins.reg010_bits.resv2)
1419 		psw_stat->fe_err.tso_err_nosum ++;
1420 
1421 	if (reg010_ins.reg010_bits.resv3)
1422 		psw_stat->fe_err.tso_err_hov ++;
1423 
1424 	if (reg010_ins.reg010_bits.resv4)
1425 		psw_stat->fe_err.tso_err_huf ++;
1426 
1427 	if (reg010_ins.reg010_bits.resv5)
1428 		psw_stat->fe_err.tso_err_l2 ++;
1429 
1430 	if (reg010_ins.reg010_bits.resv6)
1431 		psw_stat->fe_err.tso_err_ip ++;
1432 
1433 	if (reg010_ins.reg010_bits.resv7)
1434 		psw_stat->fe_err.tso_err_l4 ++;
1435 
1436 	if (reg010_ins.reg010_bits.resv8)
1437 		psw_stat->fe_err.tso_err_tcp ++;
1438 
1439 	if (reg010_ins.reg010_bits.resv9)
1440 		psw_stat->fe_err.hsum_err_ip ++;
1441 
1442 	if (reg010_ins.reg010_bits.resv10)
1443 		psw_stat->fe_err.hsum_err_l4 ++;
1444 
1445 	if (reg010_ins.reg010_val != 0) {
1446 
1447 		/*
1448 		 * Fatal error is triggered by malformed driver commands.
1449 		 * Disable unless debugging.
1450 		 */
1451 		intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
1452 		intr_mask.mask_bits.feint = NGE_CLEAR;
1453 		nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
1454 		ngep->intr_masks = intr_mask.mask_val;
1455 
1456 	}
1457 }
1458 
1459 static void
nge_sync_mac_modes(nge_t * ngep)1460 nge_sync_mac_modes(nge_t *ngep)
1461 {
1462 	nge_tx_def tx_def;
1463 	nge_tx_fifo_wm tx_fifo;
1464 	nge_bkoff_cntl bk_cntl;
1465 	nge_mac2phy m2p;
1466 	nge_rx_cntrl0 rx_cntl0;
1467 	nge_tx_cntl tx_cntl;
1468 	nge_dev_spec_param_t	*dev_param_p;
1469 
1470 	dev_param_p = &ngep->dev_spec_param;
1471 
1472 	tx_def.def_val = nge_reg_get32(ngep, NGE_TX_DEF);
1473 	m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY);
1474 	tx_fifo.wm_val = nge_reg_get32(ngep, NGE_TX_FIFO_WM);
1475 	bk_cntl.cntl_val = nge_reg_get32(ngep, NGE_BKOFF_CNTL);
1476 	bk_cntl.bkoff_bits.rseed = BKOFF_RSEED;
1477 	switch (ngep->param_link_speed) {
1478 	case 10:
1479 		m2p.m2p_bits.speed = low_speed;
1480 		tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
1481 		if (ngep->phy_mode == RGMII_IN) {
1482 			tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100;
1483 			tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
1484 		} else {
1485 			tx_def.def_bits.if_def = TX_TIFG_MII;
1486 			tx_def.def_bits.ifg2_def = TX_IFG2_MII;
1487 		}
1488 		tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII;
1489 		bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII;
1490 		break;
1491 
1492 	case 100:
1493 		m2p.m2p_bits.speed = fast_speed;
1494 		tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
1495 		if (ngep->phy_mode == RGMII_IN) {
1496 			tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100;
1497 			tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
1498 		} else {
1499 			tx_def.def_bits.if_def = TX_TIFG_MII;
1500 			tx_def.def_bits.ifg2_def = TX_IFG2_MII;
1501 		}
1502 		tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII;
1503 		bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII;
1504 		break;
1505 
1506 	case 1000:
1507 		m2p.m2p_bits.speed = giga_speed;
1508 		tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
1509 		if (ngep->param_link_duplex == LINK_DUPLEX_FULL) {
1510 			tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000;
1511 			tx_def.def_bits.if_def = TX_IFG_RGMII_1000_FD;
1512 		} else {
1513 			tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000;
1514 			tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
1515 		}
1516 
1517 		tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_GMII;
1518 		bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_GMII;
1519 		break;
1520 	}
1521 
1522 	if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
1523 	    ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
1524 		m2p.m2p_bits.phyintr = NGE_CLEAR;
1525 		m2p.m2p_bits.phyintrlvl = NGE_CLEAR;
1526 	}
1527 	if (ngep->param_link_duplex == LINK_DUPLEX_HALF) {
1528 		m2p.m2p_bits.hdup_en = NGE_SET;
1529 	}
1530 	else
1531 		m2p.m2p_bits.hdup_en = NGE_CLEAR;
1532 	nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val);
1533 	nge_reg_put32(ngep, NGE_TX_DEF, tx_def.def_val);
1534 
1535 	tx_fifo.wm_bits.data_lwm = TX_FIFO_DATA_LWM;
1536 	tx_fifo.wm_bits.prd_lwm = TX_FIFO_PRD_LWM;
1537 	tx_fifo.wm_bits.uprd_hwm = TX_FIFO_PRD_HWM;
1538 	tx_fifo.wm_bits.fb_wm = TX_FIFO_TBFW;
1539 	nge_reg_put32(ngep, NGE_TX_FIFO_WM, tx_fifo.wm_val);
1540 
1541 	nge_reg_put32(ngep, NGE_BKOFF_CNTL, bk_cntl.cntl_val);
1542 
1543 	rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
1544 	if (ngep->param_link_rx_pause && dev_param_p->rx_pause_frame) {
1545 		if (rx_cntl0.cntl_bits.paen == NGE_CLEAR) {
1546 			rx_cntl0.cntl_bits.paen = NGE_SET;
1547 			nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val);
1548 	}
1549 	} else {
1550 		if (rx_cntl0.cntl_bits.paen == NGE_SET) {
1551 			rx_cntl0.cntl_bits.paen = NGE_CLEAR;
1552 			nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val);
1553 		}
1554 	}
1555 
1556 	tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL);
1557 	if (ngep->param_link_tx_pause && dev_param_p->tx_pause_frame) {
1558 		if (tx_cntl.cntl_bits.paen == NGE_CLEAR) {
1559 			tx_cntl.cntl_bits.paen = NGE_SET;
1560 			nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val);
1561 		}
1562 	} else {
1563 		if (tx_cntl.cntl_bits.paen == NGE_SET) {
1564 			tx_cntl.cntl_bits.paen = NGE_CLEAR;
1565 			nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val);
1566 		}
1567 	}
1568 }
1569 
1570 /*
1571  * Handler for hardware link state change.
1572  *
1573  * When this routine is called, the hardware link state has changed
1574  * and the new state is reflected in the param_* variables.  Here
1575  * we must update the softstate, reprogram the MAC to match, and
1576  * record the change in the log and/or on the console.
1577  */
1578 static void
nge_factotum_link_handler(nge_t * ngep)1579 nge_factotum_link_handler(nge_t *ngep)
1580 {
1581 	/*
1582 	 * Update the s/w link_state
1583 	 */
1584 	if (ngep->param_link_up)
1585 		ngep->link_state = LINK_STATE_UP;
1586 	else
1587 		ngep->link_state = LINK_STATE_DOWN;
1588 
1589 	/*
1590 	 * Reprogram the MAC modes to match
1591 	 */
1592 	nge_sync_mac_modes(ngep);
1593 }
1594 
1595 static boolean_t
nge_factotum_link_check(nge_t * ngep)1596 nge_factotum_link_check(nge_t *ngep)
1597 {
1598 	boolean_t lchg;
1599 	boolean_t check;
1600 
1601 	ASSERT(mutex_owned(ngep->genlock));
1602 
1603 	(*ngep->physops->phys_check)(ngep);
1604 	switch (ngep->link_state) {
1605 	case LINK_STATE_UP:
1606 		lchg = (ngep->param_link_up == B_FALSE);
1607 		check = (ngep->param_link_up == B_FALSE);
1608 		break;
1609 
1610 	case LINK_STATE_DOWN:
1611 		lchg = (ngep->param_link_up == B_TRUE);
1612 		check = (ngep->param_link_up == B_TRUE);
1613 		break;
1614 
1615 	default:
1616 		check = B_TRUE;
1617 		break;
1618 	}
1619 
1620 	/*
1621 	 * If <check> is false, we're sure the link hasn't changed.
1622 	 * If true, however, it's not yet definitive; we have to call
1623 	 * nge_phys_check() to determine whether the link has settled
1624 	 * into a new state yet ... and if it has, then call the link
1625 	 * state change handler.But when the chip is 5700 in Dell 6650
1626 	 * ,even if check is false, the link may have changed.So we
1627 	 * have to call nge_phys_check() to determine the link state.
1628 	 */
1629 	if (check)
1630 		nge_factotum_link_handler(ngep);
1631 
1632 	return (lchg);
1633 }
1634 
1635 /*
1636  * Factotum routine to check for Tx stall, using the 'watchdog' counter
1637  */
1638 static boolean_t nge_factotum_stall_check(nge_t *ngep);
1639 
1640 static boolean_t
nge_factotum_stall_check(nge_t * ngep)1641 nge_factotum_stall_check(nge_t *ngep)
1642 {
1643 	uint32_t dogval;
1644 	send_ring_t *srp;
1645 	srp = ngep->send;
1646 	/*
1647 	 * Specific check for Tx stall ...
1648 	 *
1649 	 * The 'watchdog' counter is incremented whenever a packet
1650 	 * is queued, reset to 1 when some (but not all) buffers
1651 	 * are reclaimed, reset to 0 (disabled) when all buffers
1652 	 * are reclaimed, and shifted left here.  If it exceeds the
1653 	 * threshold value, the chip is assumed to have stalled and
1654 	 * is put into the ERROR state.  The factotum will then reset
1655 	 * it on the next pass.
1656 	 *
1657 	 * All of which should ensure that we don't get into a state
1658 	 * where packets are left pending indefinitely!
1659 	 */
1660 	if (ngep->watchdog == 0 &&
1661 	    srp->tx_free < srp->desc.nslots)
1662 		ngep->watchdog = 1;
1663 	dogval = nge_atomic_shl32(&ngep->watchdog, 1);
1664 	if (dogval >= nge_watchdog_check)
1665 		nge_tx_recycle(ngep, B_FALSE);
1666 	if (dogval < nge_watchdog_count)
1667 		return (B_FALSE);
1668 	else {
1669 		ngep->statistics.sw_statistics.tx_stall++;
1670 		return (B_TRUE);
1671 	}
1672 }
1673 
1674 
1675 /*
1676  * The factotum is woken up when there's something to do that we'd rather
1677  * not do from inside a hardware interrupt handler or high-level cyclic.
1678  * Its two main tasks are:
1679  *	reset & restart the chip after an error
1680  *	check the link status whenever necessary
1681  */
1682 /* ARGSUSED */
1683 uint_t
nge_chip_factotum(caddr_t args1,caddr_t args2)1684 nge_chip_factotum(caddr_t args1, caddr_t args2)
1685 {
1686 	uint_t result;
1687 	nge_t *ngep;
1688 	boolean_t err;
1689 	boolean_t linkchg;
1690 
1691 	ngep = (nge_t *)args1;
1692 
1693 	NGE_TRACE(("nge_chip_factotum($%p)", (void *)ngep));
1694 
1695 	mutex_enter(ngep->softlock);
1696 	if (ngep->factotum_flag == 0) {
1697 		mutex_exit(ngep->softlock);
1698 		return (DDI_INTR_UNCLAIMED);
1699 	}
1700 	ngep->factotum_flag = 0;
1701 	mutex_exit(ngep->softlock);
1702 	err = B_FALSE;
1703 	linkchg = B_FALSE;
1704 	result = DDI_INTR_CLAIMED;
1705 
1706 	mutex_enter(ngep->genlock);
1707 	switch (ngep->nge_chip_state) {
1708 	default:
1709 		break;
1710 
1711 	case NGE_CHIP_RUNNING:
1712 		linkchg = nge_factotum_link_check(ngep);
1713 		err = nge_factotum_stall_check(ngep);
1714 		break;
1715 
1716 	case NGE_CHIP_FAULT:
1717 		(void) nge_restart(ngep);
1718 		NGE_REPORT((ngep, "automatic recovery activated"));
1719 		break;
1720 	}
1721 
1722 	if (err)
1723 		(void) nge_chip_stop(ngep, B_TRUE);
1724 	mutex_exit(ngep->genlock);
1725 
1726 	/*
1727 	 * If the link state changed, tell the world about it (if
1728 	 * this version of MAC supports link state notification).
1729 	 * Note: can't do this while still holding the mutex.
1730 	 */
1731 	if (linkchg)
1732 		mac_link_update(ngep->mh, ngep->link_state);
1733 
1734 	return (result);
1735 
1736 }
1737 
1738 static void
nge_intr_handle(nge_t * ngep,nge_intr_src * pintr_src)1739 nge_intr_handle(nge_t *ngep, nge_intr_src *pintr_src)
1740 {
1741 	boolean_t brx;
1742 	boolean_t btx;
1743 	nge_mintr_src mintr_src;
1744 
1745 	brx = B_FALSE;
1746 	btx = B_FALSE;
1747 	ngep->statistics.sw_statistics.intr_count++;
1748 	ngep->statistics.sw_statistics.intr_lval = pintr_src->intr_val;
1749 	brx = (pintr_src->int_bits.reint | pintr_src->int_bits.miss
1750 	    | pintr_src->int_bits.rcint | pintr_src->int_bits.stint)
1751 	    != 0 ? B_TRUE : B_FALSE;
1752 	if (pintr_src->int_bits.reint)
1753 		ngep->statistics.sw_statistics.rx_err++;
1754 	if (pintr_src->int_bits.miss)
1755 		ngep->statistics.sw_statistics.rx_nobuffer++;
1756 
1757 	btx = (pintr_src->int_bits.teint | pintr_src->int_bits.tfint)
1758 	    != 0 ? B_TRUE : B_FALSE;
1759 	if (btx)
1760 		nge_tx_recycle(ngep, B_TRUE);
1761 	if (brx)
1762 		nge_receive(ngep);
1763 	if (pintr_src->int_bits.teint)
1764 		ngep->statistics.sw_statistics.tx_stop_err++;
1765 	if (ngep->intr_moderation && brx) {
1766 		if (ngep->poll) {
1767 			if (ngep->recv_count < ngep->param_rx_intr_hwater) {
1768 				ngep->quiet_time++;
1769 				if (ngep->quiet_time ==
1770 				    ngep->param_poll_quiet_time) {
1771 					ngep->poll = B_FALSE;
1772 					ngep->quiet_time = 0;
1773 				}
1774 			} else
1775 				ngep->quiet_time = 0;
1776 		} else {
1777 			if (ngep->recv_count > ngep->param_rx_intr_lwater) {
1778 				ngep->busy_time++;
1779 				if (ngep->busy_time ==
1780 				    ngep->param_poll_busy_time) {
1781 					ngep->poll = B_TRUE;
1782 					ngep->busy_time = 0;
1783 				}
1784 			} else
1785 				ngep->busy_time = 0;
1786 		}
1787 	}
1788 	ngep->recv_count = 0;
1789 	if (pintr_src->int_bits.feint)
1790 		nge_chip_err(ngep);
1791 	/* link interrupt, check the link state */
1792 	if (pintr_src->int_bits.mint) {
1793 		mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC);
1794 		nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val);
1795 		nge_wake_factotum(ngep);
1796 	}
1797 }
1798 
1799 /*
1800  *	nge_chip_intr() -- handle chip interrupts
1801  */
1802 /* ARGSUSED */
1803 uint_t
nge_chip_intr(caddr_t arg1,caddr_t arg2)1804 nge_chip_intr(caddr_t arg1, caddr_t arg2)
1805 {
1806 	nge_t *ngep = (nge_t *)arg1;
1807 	nge_intr_src intr_src;
1808 	nge_intr_mask intr_mask;
1809 
1810 	mutex_enter(ngep->genlock);
1811 
1812 	if (ngep->suspended) {
1813 		mutex_exit(ngep->genlock);
1814 		return (DDI_INTR_UNCLAIMED);
1815 	}
1816 
1817 	/*
1818 	 * Check whether chip's says it's asserting #INTA;
1819 	 * if not, don't process or claim the interrupt.
1820 	 */
1821 	intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC);
1822 	if (intr_src.intr_val == 0) {
1823 		mutex_exit(ngep->genlock);
1824 		return (DDI_INTR_UNCLAIMED);
1825 	}
1826 	/*
1827 	 * Ack the interrupt
1828 	 */
1829 	nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val);
1830 
1831 	if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
1832 		mutex_exit(ngep->genlock);
1833 		return (DDI_INTR_CLAIMED);
1834 	}
1835 	nge_intr_handle(ngep, &intr_src);
1836 	if (ngep->poll && !ngep->ch_intr_mode) {
1837 		intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
1838 		intr_mask.mask_bits.stint = NGE_SET;
1839 		intr_mask.mask_bits.rcint = NGE_CLEAR;
1840 		nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
1841 		ngep->ch_intr_mode = B_TRUE;
1842 	} else if ((ngep->ch_intr_mode) && (!ngep->poll)) {
1843 		nge_reg_put32(ngep, NGE_INTR_MASK, ngep->intr_masks);
1844 		ngep->ch_intr_mode = B_FALSE;
1845 	}
1846 	mutex_exit(ngep->genlock);
1847 	return (DDI_INTR_CLAIMED);
1848 }
1849 
1850 #if	NGE_DEBUGGING
1851 static enum ioc_reply
nge_pp_ioctl(nge_t * ngep,int cmd,mblk_t * mp,struct iocblk * iocp)1852 nge_pp_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp)
1853 {
1854 	int err;
1855 	uint64_t sizemask;
1856 	uint64_t mem_va;
1857 	uint64_t maxoff;
1858 	boolean_t peek;
1859 	nge_peekpoke_t *ppd;
1860 	int (*ppfn)(nge_t *ngep, nge_peekpoke_t *ppd);
1861 
1862 	switch (cmd) {
1863 	default:
1864 		return (IOC_INVAL);
1865 
1866 	case NGE_PEEK:
1867 		peek = B_TRUE;
1868 		break;
1869 
1870 	case NGE_POKE:
1871 		peek = B_FALSE;
1872 		break;
1873 	}
1874 
1875 	/*
1876 	 * Validate format of ioctl
1877 	 */
1878 	if (iocp->ioc_count != sizeof (nge_peekpoke_t))
1879 		return (IOC_INVAL);
1880 	if (mp->b_cont == NULL)
1881 		return (IOC_INVAL);
1882 	ppd = (nge_peekpoke_t *)mp->b_cont->b_rptr;
1883 
1884 	/*
1885 	 * Validate request parameters
1886 	 */
1887 	switch (ppd->pp_acc_space) {
1888 	default:
1889 		return (IOC_INVAL);
1890 
1891 	case NGE_PP_SPACE_CFG:
1892 		/*
1893 		 * Config space
1894 		 */
1895 		sizemask = 8|4|2|1;
1896 		mem_va = 0;
1897 		maxoff = PCI_CONF_HDR_SIZE;
1898 		ppfn = peek ? nge_chip_peek_cfg : nge_chip_poke_cfg;
1899 		break;
1900 
1901 	case NGE_PP_SPACE_REG:
1902 		/*
1903 		 * Memory-mapped I/O space
1904 		 */
1905 		sizemask = 8|4|2|1;
1906 		mem_va = 0;
1907 		maxoff = NGE_REG_SIZE;
1908 		ppfn = peek ? nge_chip_peek_reg : nge_chip_poke_reg;
1909 		break;
1910 
1911 	case NGE_PP_SPACE_MII:
1912 		sizemask = 4|2|1;
1913 		mem_va = 0;
1914 		maxoff = NGE_MII_SIZE;
1915 		ppfn = peek ? nge_chip_peek_mii : nge_chip_poke_mii;
1916 		break;
1917 
1918 	case NGE_PP_SPACE_SEEPROM:
1919 		sizemask = 4|2|1;
1920 		mem_va = 0;
1921 		maxoff = NGE_SEEROM_SIZE;
1922 		ppfn = peek ? nge_chip_peek_seeprom : nge_chip_poke_seeprom;
1923 		break;
1924 	}
1925 
1926 	switch (ppd->pp_acc_size) {
1927 	default:
1928 		return (IOC_INVAL);
1929 
1930 	case 8:
1931 	case 4:
1932 	case 2:
1933 	case 1:
1934 		if ((ppd->pp_acc_size & sizemask) == 0)
1935 			return (IOC_INVAL);
1936 		break;
1937 	}
1938 
1939 	if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
1940 		return (IOC_INVAL);
1941 
1942 	if (ppd->pp_acc_offset >= maxoff)
1943 		return (IOC_INVAL);
1944 
1945 	if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
1946 		return (IOC_INVAL);
1947 
1948 	/*
1949 	 * All OK - go do it!
1950 	 */
1951 	ppd->pp_acc_offset += mem_va;
1952 	if (ppfn)
1953 		err = (*ppfn)(ngep, ppd);
1954 	if (err != DDI_SUCCESS)
1955 		return (IOC_INVAL);
1956 	return (peek ? IOC_REPLY : IOC_ACK);
1957 }
1958 
1959 static enum ioc_reply nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp,
1960 					struct iocblk *iocp);
1961 #pragma	no_inline(nge_diag_ioctl)
1962 
1963 static enum ioc_reply
nge_diag_ioctl(nge_t * ngep,int cmd,mblk_t * mp,struct iocblk * iocp)1964 nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp)
1965 {
1966 	ASSERT(mutex_owned(ngep->genlock));
1967 
1968 	switch (cmd) {
1969 	default:
1970 		nge_error(ngep, "nge_diag_ioctl: invalid cmd 0x%x", cmd);
1971 		return (IOC_INVAL);
1972 
1973 	case NGE_DIAG:
1974 		return (IOC_ACK);
1975 
1976 	case NGE_PEEK:
1977 	case NGE_POKE:
1978 		return (nge_pp_ioctl(ngep, cmd, mp, iocp));
1979 
1980 	case NGE_PHY_RESET:
1981 		return (IOC_RESTART_ACK);
1982 
1983 	case NGE_SOFT_RESET:
1984 	case NGE_HARD_RESET:
1985 		return (IOC_ACK);
1986 	}
1987 
1988 	/* NOTREACHED */
1989 }
1990 #endif /* NGE_DEBUGGING */
1991 
1992 enum ioc_reply
nge_chip_ioctl(nge_t * ngep,mblk_t * mp,struct iocblk * iocp)1993 nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
1994 {
1995 	int cmd;
1996 
1997 	ASSERT(mutex_owned(ngep->genlock));
1998 
1999 	cmd = iocp->ioc_cmd;
2000 
2001 	switch (cmd) {
2002 	default:
2003 		return (IOC_INVAL);
2004 
2005 	case NGE_DIAG:
2006 	case NGE_PEEK:
2007 	case NGE_POKE:
2008 	case NGE_PHY_RESET:
2009 	case NGE_SOFT_RESET:
2010 	case NGE_HARD_RESET:
2011 #if	NGE_DEBUGGING
2012 		return (nge_diag_ioctl(ngep, cmd, mp, iocp));
2013 #else
2014 		return (IOC_INVAL);
2015 #endif
2016 
2017 	case NGE_MII_READ:
2018 	case NGE_MII_WRITE:
2019 		return (IOC_INVAL);
2020 
2021 #if	NGE_SEE_IO32
2022 	case NGE_SEE_READ:
2023 	case NGE_SEE_WRITE:
2024 		return (IOC_INVAL);
2025 #endif
2026 
2027 #if	NGE_FLASH_IO32
2028 	case NGE_FLASH_READ:
2029 	case NGE_FLASH_WRITE:
2030 		return (IOC_INVAL);
2031 #endif
2032 	}
2033 }
2034