1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include "nge.h"
28 static uint32_t nge_watchdog_count = 1 << 5;
29 static uint32_t nge_watchdog_check = 1 << 3;
30 extern boolean_t nge_enable_msi;
31 static void nge_sync_mac_modes(nge_t *);
32
33 #undef NGE_DBG
34 #define NGE_DBG NGE_DBG_CHIP
35
36 /*
37 * Operating register get/set access routines
38 */
39 uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno);
40 #pragma inline(nge_reg_get8)
41
42 uint8_t
nge_reg_get8(nge_t * ngep,nge_regno_t regno)43 nge_reg_get8(nge_t *ngep, nge_regno_t regno)
44 {
45 NGE_TRACE(("nge_reg_get8($%p, 0x%lx)", (void *)ngep, regno));
46
47 return (ddi_get8(ngep->io_handle, PIO_ADDR(ngep, regno)));
48 }
49
50 void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data);
51 #pragma inline(nge_reg_put8)
52
53 void
nge_reg_put8(nge_t * ngep,nge_regno_t regno,uint8_t data)54 nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data)
55 {
56 NGE_TRACE(("nge_reg_put8($%p, 0x%lx, 0x%x)",
57 (void *)ngep, regno, data));
58 ddi_put8(ngep->io_handle, PIO_ADDR(ngep, regno), data);
59
60 }
61
62 uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno);
63 #pragma inline(nge_reg_get16)
64
65 uint16_t
nge_reg_get16(nge_t * ngep,nge_regno_t regno)66 nge_reg_get16(nge_t *ngep, nge_regno_t regno)
67 {
68 NGE_TRACE(("nge_reg_get16($%p, 0x%lx)", (void *)ngep, regno));
69 return (ddi_get16(ngep->io_handle, PIO_ADDR(ngep, regno)));
70 }
71
72 void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data);
73 #pragma inline(nge_reg_put16)
74
75 void
nge_reg_put16(nge_t * ngep,nge_regno_t regno,uint16_t data)76 nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data)
77 {
78 NGE_TRACE(("nge_reg_put16($%p, 0x%lx, 0x%x)",
79 (void *)ngep, regno, data));
80 ddi_put16(ngep->io_handle, PIO_ADDR(ngep, regno), data);
81
82 }
83
84 uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno);
85 #pragma inline(nge_reg_get32)
86
87 uint32_t
nge_reg_get32(nge_t * ngep,nge_regno_t regno)88 nge_reg_get32(nge_t *ngep, nge_regno_t regno)
89 {
90 NGE_TRACE(("nge_reg_get32($%p, 0x%lx)", (void *)ngep, regno));
91 return (ddi_get32(ngep->io_handle, PIO_ADDR(ngep, regno)));
92 }
93
94 void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data);
95 #pragma inline(nge_reg_put32)
96
97 void
nge_reg_put32(nge_t * ngep,nge_regno_t regno,uint32_t data)98 nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data)
99 {
100 NGE_TRACE(("nge_reg_put32($%p, 0x%lx, 0x%x)",
101 (void *)ngep, regno, data));
102 ddi_put32(ngep->io_handle, PIO_ADDR(ngep, regno), data);
103
104 }
105
106
107 static int nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd);
108 #pragma no_inline(nge_chip_peek_cfg)
109
110 static int
nge_chip_peek_cfg(nge_t * ngep,nge_peekpoke_t * ppd)111 nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd)
112 {
113 int err;
114 uint64_t regval;
115 uint64_t regno;
116
117 NGE_TRACE(("nge_chip_peek_cfg($%p, $%p)",
118 (void *)ngep, (void *)ppd));
119
120 err = DDI_SUCCESS;
121 regno = ppd->pp_acc_offset;
122
123 switch (ppd->pp_acc_size) {
124 case 1:
125 regval = pci_config_get8(ngep->cfg_handle, regno);
126 break;
127
128 case 2:
129 regval = pci_config_get16(ngep->cfg_handle, regno);
130 break;
131
132 case 4:
133 regval = pci_config_get32(ngep->cfg_handle, regno);
134 break;
135
136 case 8:
137 regval = pci_config_get64(ngep->cfg_handle, regno);
138 break;
139 }
140 ppd->pp_acc_data = regval;
141 return (err);
142 }
143
144 static int nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd);
145
146 static int
nge_chip_poke_cfg(nge_t * ngep,nge_peekpoke_t * ppd)147 nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd)
148 {
149 int err;
150 uint64_t regval;
151 uint64_t regno;
152
153 NGE_TRACE(("nge_chip_poke_cfg($%p, $%p)",
154 (void *)ngep, (void *)ppd));
155
156 err = DDI_SUCCESS;
157 regno = ppd->pp_acc_offset;
158 regval = ppd->pp_acc_data;
159
160 switch (ppd->pp_acc_size) {
161 case 1:
162 pci_config_put8(ngep->cfg_handle, regno, regval);
163 break;
164
165 case 2:
166 pci_config_put16(ngep->cfg_handle, regno, regval);
167 break;
168
169 case 4:
170 pci_config_put32(ngep->cfg_handle, regno, regval);
171 break;
172
173 case 8:
174 pci_config_put64(ngep->cfg_handle, regno, regval);
175 break;
176 }
177
178 return (err);
179
180 }
181
182 static int nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd);
183
184 static int
nge_chip_peek_reg(nge_t * ngep,nge_peekpoke_t * ppd)185 nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd)
186 {
187 int err;
188 uint64_t regval;
189 void *regaddr;
190
191 NGE_TRACE(("nge_chip_peek_reg($%p, $%p)",
192 (void *)ngep, (void *)ppd));
193
194 err = DDI_SUCCESS;
195 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset);
196
197 switch (ppd->pp_acc_size) {
198 case 1:
199 regval = ddi_get8(ngep->io_handle, regaddr);
200 break;
201
202 case 2:
203 regval = ddi_get16(ngep->io_handle, regaddr);
204 break;
205
206 case 4:
207 regval = ddi_get32(ngep->io_handle, regaddr);
208 break;
209
210 case 8:
211 regval = ddi_get64(ngep->io_handle, regaddr);
212 break;
213
214 default:
215 regval = 0x0ull;
216 break;
217 }
218 ppd->pp_acc_data = regval;
219 return (err);
220 }
221
222 static int nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd);
223
224 static int
nge_chip_poke_reg(nge_t * ngep,nge_peekpoke_t * ppd)225 nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd)
226 {
227 int err;
228 uint64_t regval;
229 void *regaddr;
230
231 NGE_TRACE(("nge_chip_poke_reg($%p, $%p)",
232 (void *)ngep, (void *)ppd));
233
234 err = DDI_SUCCESS;
235 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset);
236 regval = ppd->pp_acc_data;
237
238 switch (ppd->pp_acc_size) {
239 case 1:
240 ddi_put8(ngep->io_handle, regaddr, regval);
241 break;
242
243 case 2:
244 ddi_put16(ngep->io_handle, regaddr, regval);
245 break;
246
247 case 4:
248 ddi_put32(ngep->io_handle, regaddr, regval);
249 break;
250
251 case 8:
252 ddi_put64(ngep->io_handle, regaddr, regval);
253 break;
254 }
255 return (err);
256 }
257
258 static int nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd);
259 #pragma no_inline(nge_chip_peek_mii)
260
261 static int
nge_chip_peek_mii(nge_t * ngep,nge_peekpoke_t * ppd)262 nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd)
263 {
264 int err;
265
266 err = DDI_SUCCESS;
267 ppd->pp_acc_data = nge_mii_get16(ngep, ppd->pp_acc_offset/2);
268 return (err);
269 }
270
271 static int nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd);
272 #pragma no_inline(nge_chip_poke_mii)
273
274 static int
nge_chip_poke_mii(nge_t * ngep,nge_peekpoke_t * ppd)275 nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd)
276 {
277 int err;
278 err = DDI_SUCCESS;
279 nge_mii_put16(ngep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
280 return (err);
281 }
282
283 /*
284 * Basic SEEPROM get/set access routine
285 *
286 * This uses the chip's SEEPROM auto-access method, controlled by the
287 * Serial EEPROM Address/Data Registers at 0x504h, so the CPU
288 * doesn't have to fiddle with the individual bits.
289 *
290 * The caller should hold <genlock> and *also* have already acquired
291 * the right to access the SEEPROM.
292 *
293 * Return value:
294 * 0 on success,
295 * ENODATA on access timeout (maybe retryable: device may just be busy)
296 * EPROTO on other h/w or s/w errors.
297 *
298 * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output
299 * from a (successful) SEEPROM_ACCESS_READ.
300 */
301
302 static int
nge_seeprom_access(nge_t * ngep,uint32_t cmd,nge_regno_t addr,uint16_t * dp)303 nge_seeprom_access(nge_t *ngep, uint32_t cmd, nge_regno_t addr, uint16_t *dp)
304 {
305 uint32_t tries;
306 nge_ep_cmd cmd_reg;
307 nge_ep_data data_reg;
308
309 NGE_TRACE(("nge_seeprom_access($%p, %d, %x, $%p)",
310 (void *)ngep, cmd, addr, (void *)dp));
311
312 ASSERT(mutex_owned(ngep->genlock));
313
314 /*
315 * Check there's no command in progress.
316 *
317 * Note: this *shouldn't* ever find that there is a command
318 * in progress, because we already hold the <genlock> mutex.
319 * Also, to ensure we don't have a conflict with the chip's
320 * internal firmware or a process accessing the same (shared)
321 * So this is just a final consistency check: we shouldn't
322 * see EITHER the START bit (command started but not complete)
323 * OR the COMPLETE bit (command completed but not cleared).
324 */
325 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
326 for (tries = 0; tries < 30; tries++) {
327 if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
328 break;
329 drv_usecwait(10);
330 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
331 }
332
333 /*
334 * This should not happen. If so, we have to restart eeprom
335 * state machine
336 */
337 if (tries == 30) {
338 cmd_reg.cmd_bits.sts = SEEPROM_READY;
339 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val);
340 drv_usecwait(10);
341 /*
342 * Polling the status bit to make assure the eeprom is ready
343 */
344 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
345 for (tries = 0; tries < 30; tries++) {
346 if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
347 break;
348 drv_usecwait(10);
349 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
350 }
351 }
352
353 /*
354 * Assemble the command ...
355 */
356 cmd_reg.cmd_bits.addr = (uint32_t)addr;
357 cmd_reg.cmd_bits.cmd = cmd;
358 cmd_reg.cmd_bits.sts = 0;
359
360 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val);
361
362 /*
363 * Polling whether the access is successful.
364 *
365 */
366 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
367 for (tries = 0; tries < 30; tries++) {
368 if (cmd_reg.cmd_bits.sts == SEEPROM_READY)
369 break;
370 drv_usecwait(10);
371 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD);
372 }
373
374 if (tries == 30) {
375 nge_report(ngep, NGE_HW_ROM);
376 return (DDI_FAILURE);
377 }
378 switch (cmd) {
379 default:
380 case SEEPROM_CMD_WRITE_ENABLE:
381 case SEEPROM_CMD_ERASE:
382 case SEEPROM_CMD_ERALSE_ALL:
383 case SEEPROM_CMD_WRITE_DIS:
384 break;
385
386 case SEEPROM_CMD_READ:
387 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA);
388 *dp = data_reg.data_bits.data;
389 break;
390
391 case SEEPROM_CMD_WRITE:
392 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA);
393 data_reg.data_bits.data = *dp;
394 nge_reg_put32(ngep, NGE_EP_DATA, data_reg.data_val);
395 break;
396 }
397
398 return (DDI_SUCCESS);
399 }
400
401
402 static int
nge_chip_peek_seeprom(nge_t * ngep,nge_peekpoke_t * ppd)403 nge_chip_peek_seeprom(nge_t *ngep, nge_peekpoke_t *ppd)
404 {
405 uint16_t data;
406 int err;
407
408 err = nge_seeprom_access(ngep, SEEPROM_CMD_READ,
409 ppd->pp_acc_offset, &data);
410 ppd->pp_acc_data = data;
411 return (err);
412 }
413
414 static int
nge_chip_poke_seeprom(nge_t * ngep,nge_peekpoke_t * ppd)415 nge_chip_poke_seeprom(nge_t *ngep, nge_peekpoke_t *ppd)
416 {
417 uint16_t data;
418 int err;
419
420 data = ppd->pp_acc_data;
421 err = nge_seeprom_access(ngep, SEEPROM_CMD_WRITE,
422 ppd->pp_acc_offset, &data);
423 return (err);
424 }
425
426 void
nge_init_dev_spec_param(nge_t * ngep)427 nge_init_dev_spec_param(nge_t *ngep)
428 {
429 nge_dev_spec_param_t *dev_param_p;
430 chip_info_t *infop;
431
432 dev_param_p = &ngep->dev_spec_param;
433 infop = (chip_info_t *)&ngep->chipinfo;
434
435 switch (infop->device) {
436 case DEVICE_ID_NF3_E6:
437 case DEVICE_ID_NF3_DF:
438 case DEVICE_ID_MCP04_37:
439 case DEVICE_ID_MCP04_38:
440 dev_param_p->msi = B_FALSE;
441 dev_param_p->msi_x = B_FALSE;
442 dev_param_p->vlan = B_FALSE;
443 dev_param_p->advanced_pm = B_FALSE;
444 dev_param_p->mac_addr_order = B_FALSE;
445 dev_param_p->tx_pause_frame = B_FALSE;
446 dev_param_p->rx_pause_frame = B_FALSE;
447 dev_param_p->jumbo = B_FALSE;
448 dev_param_p->tx_rx_64byte = B_FALSE;
449 dev_param_p->rx_hw_checksum = B_FALSE;
450 dev_param_p->tx_hw_checksum = 0;
451 dev_param_p->desc_type = DESC_OFFLOAD;
452 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
453 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
454 dev_param_p->nge_split = NGE_SPLIT_32;
455 break;
456
457 case DEVICE_ID_CK804_56:
458 case DEVICE_ID_CK804_57:
459 dev_param_p->msi = B_TRUE;
460 dev_param_p->msi_x = B_TRUE;
461 dev_param_p->vlan = B_FALSE;
462 dev_param_p->advanced_pm = B_FALSE;
463 dev_param_p->mac_addr_order = B_FALSE;
464 dev_param_p->tx_pause_frame = B_FALSE;
465 dev_param_p->rx_pause_frame = B_TRUE;
466 dev_param_p->jumbo = B_TRUE;
467 dev_param_p->tx_rx_64byte = B_FALSE;
468 dev_param_p->rx_hw_checksum = B_TRUE;
469 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM;
470 dev_param_p->desc_type = DESC_HOT;
471 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072;
472 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072;
473 dev_param_p->nge_split = NGE_SPLIT_96;
474 break;
475
476 case DEVICE_ID_MCP51_268:
477 case DEVICE_ID_MCP51_269:
478 dev_param_p->msi = B_FALSE;
479 dev_param_p->msi_x = B_FALSE;
480 dev_param_p->vlan = B_FALSE;
481 dev_param_p->advanced_pm = B_TRUE;
482 dev_param_p->mac_addr_order = B_FALSE;
483 dev_param_p->tx_pause_frame = B_FALSE;
484 dev_param_p->rx_pause_frame = B_FALSE;
485 dev_param_p->jumbo = B_FALSE;
486 dev_param_p->tx_rx_64byte = B_TRUE;
487 dev_param_p->rx_hw_checksum = B_FALSE;
488 dev_param_p->tx_hw_checksum = 0;
489 dev_param_p->desc_type = DESC_OFFLOAD;
490 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
491 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
492 dev_param_p->nge_split = NGE_SPLIT_32;
493 break;
494
495 case DEVICE_ID_MCP55_372:
496 case DEVICE_ID_MCP55_373:
497 dev_param_p->msi = B_TRUE;
498 dev_param_p->msi_x = B_TRUE;
499 dev_param_p->vlan = B_TRUE;
500 dev_param_p->advanced_pm = B_TRUE;
501 dev_param_p->mac_addr_order = B_FALSE;
502 dev_param_p->tx_pause_frame = B_TRUE;
503 dev_param_p->rx_pause_frame = B_TRUE;
504 dev_param_p->jumbo = B_TRUE;
505 dev_param_p->tx_rx_64byte = B_TRUE;
506 dev_param_p->rx_hw_checksum = B_TRUE;
507 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM;
508 dev_param_p->desc_type = DESC_HOT;
509 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072;
510 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072;
511 dev_param_p->nge_split = NGE_SPLIT_96;
512 break;
513
514 case DEVICE_ID_MCP61_3EE:
515 case DEVICE_ID_MCP61_3EF:
516 dev_param_p->msi = B_FALSE;
517 dev_param_p->msi_x = B_FALSE;
518 dev_param_p->vlan = B_FALSE;
519 dev_param_p->advanced_pm = B_TRUE;
520 dev_param_p->mac_addr_order = B_TRUE;
521 dev_param_p->tx_pause_frame = B_FALSE;
522 dev_param_p->rx_pause_frame = B_FALSE;
523 dev_param_p->jumbo = B_FALSE;
524 dev_param_p->tx_rx_64byte = B_TRUE;
525 dev_param_p->rx_hw_checksum = B_FALSE;
526 dev_param_p->tx_hw_checksum = 0;
527 dev_param_p->desc_type = DESC_OFFLOAD;
528 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
529 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
530 dev_param_p->nge_split = NGE_SPLIT_32;
531 break;
532
533 case DEVICE_ID_MCP77_760:
534 case DEVICE_ID_MCP79_AB0:
535 dev_param_p->msi = B_FALSE;
536 dev_param_p->msi_x = B_FALSE;
537 dev_param_p->vlan = B_FALSE;
538 dev_param_p->advanced_pm = B_TRUE;
539 dev_param_p->mac_addr_order = B_TRUE;
540 dev_param_p->tx_pause_frame = B_FALSE;
541 dev_param_p->rx_pause_frame = B_FALSE;
542 dev_param_p->jumbo = B_FALSE;
543 dev_param_p->tx_rx_64byte = B_TRUE;
544 dev_param_p->rx_hw_checksum = B_FALSE;
545 dev_param_p->tx_hw_checksum = 0;
546 dev_param_p->desc_type = DESC_HOT;
547 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
548 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
549 dev_param_p->nge_split = NGE_SPLIT_32;
550 break;
551
552 default:
553 dev_param_p->msi = B_FALSE;
554 dev_param_p->msi_x = B_FALSE;
555 dev_param_p->vlan = B_FALSE;
556 dev_param_p->advanced_pm = B_FALSE;
557 dev_param_p->mac_addr_order = B_FALSE;
558 dev_param_p->tx_pause_frame = B_FALSE;
559 dev_param_p->rx_pause_frame = B_FALSE;
560 dev_param_p->jumbo = B_FALSE;
561 dev_param_p->tx_rx_64byte = B_FALSE;
562 dev_param_p->rx_hw_checksum = B_FALSE;
563 dev_param_p->tx_hw_checksum = 0;
564 dev_param_p->desc_type = DESC_OFFLOAD;
565 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024;
566 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024;
567 dev_param_p->nge_split = NGE_SPLIT_32;
568 return;
569 }
570 }
571 /*
572 * Perform first-stage chip (re-)initialisation, using only config-space
573 * accesses:
574 *
575 * + Read the vendor/device/revision/subsystem/cache-line-size registers,
576 * returning the data in the structure pointed to by <infop>.
577 */
578 void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset);
579 #pragma no_inline(nge_chip_cfg_init)
580
581 void
nge_chip_cfg_init(nge_t * ngep,chip_info_t * infop,boolean_t reset)582 nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset)
583 {
584 uint16_t command;
585 ddi_acc_handle_t handle;
586 nge_interbus_conf interbus_conf;
587 nge_msi_mask_conf msi_mask_conf;
588 nge_msi_map_cap_conf cap_conf;
589
590 NGE_TRACE(("nge_chip_cfg_init($%p, $%p, %d)",
591 (void *)ngep, (void *)infop, reset));
592
593 /*
594 * save PCI cache line size and subsystem vendor ID
595 *
596 * Read all the config-space registers that characterise the
597 * chip, specifically vendor/device/revision/subsystem vendor
598 * and subsystem device id. We expect (but don't check) that
599 */
600 handle = ngep->cfg_handle;
601 /* reading the vendor information once */
602 if (reset == B_FALSE) {
603 infop->command = pci_config_get16(handle,
604 PCI_CONF_COMM);
605 infop->vendor = pci_config_get16(handle,
606 PCI_CONF_VENID);
607 infop->device = pci_config_get16(handle,
608 PCI_CONF_DEVID);
609 infop->subven = pci_config_get16(handle,
610 PCI_CONF_SUBVENID);
611 infop->subdev = pci_config_get16(handle,
612 PCI_CONF_SUBSYSID);
613 infop->class_code = pci_config_get8(handle,
614 PCI_CONF_BASCLASS);
615 infop->revision = pci_config_get8(handle,
616 PCI_CONF_REVID);
617 infop->clsize = pci_config_get8(handle,
618 PCI_CONF_CACHE_LINESZ);
619 infop->latency = pci_config_get8(handle,
620 PCI_CONF_LATENCY_TIMER);
621 }
622 if (nge_enable_msi) {
623 /* Disable the hidden for MSI support */
624 interbus_conf.conf_val = pci_config_get32(handle,
625 PCI_CONF_HT_INTERNAL);
626 if ((infop->device == DEVICE_ID_MCP55_373) ||
627 (infop->device == DEVICE_ID_MCP55_372))
628 interbus_conf.conf_bits.msix_off = NGE_SET;
629 interbus_conf.conf_bits.msi_off = NGE_CLEAR;
630 pci_config_put32(handle, PCI_CONF_HT_INTERNAL,
631 interbus_conf.conf_val);
632
633 if ((infop->device == DEVICE_ID_MCP55_373) ||
634 (infop->device == DEVICE_ID_MCP55_372)) {
635
636 /* Disable the vector off for mcp55 */
637 msi_mask_conf.msi_mask_conf_val =
638 pci_config_get32(handle, PCI_CONF_HT_MSI_MASK);
639 msi_mask_conf.msi_mask_bits.vec0_off = NGE_CLEAR;
640 msi_mask_conf.msi_mask_bits.vec1_off = NGE_CLEAR;
641 msi_mask_conf.msi_mask_bits.vec2_off = NGE_CLEAR;
642 msi_mask_conf.msi_mask_bits.vec3_off = NGE_CLEAR;
643 msi_mask_conf.msi_mask_bits.vec4_off = NGE_CLEAR;
644 msi_mask_conf.msi_mask_bits.vec5_off = NGE_CLEAR;
645 msi_mask_conf.msi_mask_bits.vec6_off = NGE_CLEAR;
646 msi_mask_conf.msi_mask_bits.vec7_off = NGE_CLEAR;
647 pci_config_put32(handle, PCI_CONF_HT_MSI_MASK,
648 msi_mask_conf.msi_mask_conf_val);
649
650 /* Enable the MSI mapping */
651 cap_conf.msi_map_cap_conf_val =
652 pci_config_get32(handle, PCI_CONF_HT_MSI_MAP_CAP);
653 cap_conf.map_cap_conf_bits.map_en = NGE_SET;
654 pci_config_put32(handle, PCI_CONF_HT_MSI_MAP_CAP,
655 cap_conf.msi_map_cap_conf_val);
656 }
657 } else {
658 interbus_conf.conf_val = pci_config_get32(handle,
659 PCI_CONF_HT_INTERNAL);
660 interbus_conf.conf_bits.msi_off = NGE_SET;
661 pci_config_put32(handle, PCI_CONF_HT_INTERNAL,
662 interbus_conf.conf_val);
663 }
664 command = infop->command | PCI_COMM_MAE;
665 command &= ~PCI_COMM_MEMWR_INVAL;
666 command |= PCI_COMM_ME;
667 pci_config_put16(handle, PCI_CONF_COMM, command);
668 pci_config_put16(handle, PCI_CONF_STAT, ~0);
669
670 }
671
672 int
nge_chip_stop(nge_t * ngep,boolean_t fault)673 nge_chip_stop(nge_t *ngep, boolean_t fault)
674 {
675 int err;
676 uint32_t reg_val;
677 uint32_t tries;
678 nge_mintr_src mintr_src;
679 nge_mii_cs mii_cs;
680 nge_rx_poll rx_poll;
681 nge_tx_poll tx_poll;
682 nge_rx_en rx_en;
683 nge_tx_en tx_en;
684 nge_tx_sta tx_sta;
685 nge_rx_sta rx_sta;
686 nge_mode_cntl mode;
687 nge_pmu_cntl2 pmu_cntl2;
688
689 NGE_TRACE(("nge_chip_stop($%p, %d)", (void *)ngep, fault));
690
691 err = DDI_SUCCESS;
692
693 /* Clear any pending PHY interrupt */
694 mintr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC);
695 nge_reg_put8(ngep, NGE_MINTR_SRC, mintr_src.src_val);
696
697 /* Mask all interrupts */
698 reg_val = nge_reg_get32(ngep, NGE_INTR_MASK);
699 reg_val &= ~NGE_INTR_ALL_EN;
700 nge_reg_put32(ngep, NGE_INTR_MASK, reg_val);
701
702 /* Disable auto-polling of phy */
703 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS);
704 mii_cs.cs_bits.ap_en = NGE_CLEAR;
705 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val);
706
707 /* Reset buffer management & DMA */
708 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
709 mode.mode_bits.dma_dis = NGE_SET;
710 mode.mode_bits.desc_type = ngep->desc_mode;
711 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val);
712
713 for (tries = 0; tries < 10000; tries++) {
714 drv_usecwait(10);
715 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
716 if (mode.mode_bits.dma_status == NGE_SET)
717 break;
718 }
719 if (tries == 10000) {
720 ngep->nge_chip_state = NGE_CHIP_ERROR;
721 return (DDI_FAILURE);
722 }
723
724 /* Disable rx's machine */
725 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN);
726 rx_en.bits.rx_en = NGE_CLEAR;
727 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val);
728
729 /* Disable tx's machine */
730 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN);
731 tx_en.bits.tx_en = NGE_CLEAR;
732 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val);
733
734 /*
735 * Clean the status of tx's state machine
736 * and Make assure the tx's channel is idle
737 */
738 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA);
739 for (tries = 0; tries < 1000; tries++) {
740 if (tx_sta.sta_bits.tx_chan_sta == NGE_CLEAR)
741 break;
742 drv_usecwait(10);
743 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA);
744 }
745 if (tries == 1000) {
746 ngep->nge_chip_state = NGE_CHIP_ERROR;
747 return (DDI_FAILURE);
748 }
749 nge_reg_put32(ngep, NGE_TX_STA, tx_sta.sta_val);
750
751 /*
752 * Clean the status of rx's state machine
753 * and Make assure the tx's channel is idle
754 */
755 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA);
756 for (tries = 0; tries < 1000; tries++) {
757 if (rx_sta.sta_bits.rx_chan_sta == NGE_CLEAR)
758 break;
759 drv_usecwait(10);
760 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA);
761 }
762 if (tries == 1000) {
763 ngep->nge_chip_state = NGE_CHIP_ERROR;
764 return (DDI_FAILURE);
765 }
766 nge_reg_put32(ngep, NGE_RX_STA, rx_sta.sta_val);
767
768 /* Disable auto-poll of rx's state machine */
769 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL);
770 rx_poll.poll_bits.rpen = NGE_CLEAR;
771 rx_poll.poll_bits.rpi = NGE_CLEAR;
772 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val);
773
774 /* Disable auto-polling of tx's state machine */
775 tx_poll.poll_val = nge_reg_get32(ngep, NGE_TX_POLL);
776 tx_poll.poll_bits.tpen = NGE_CLEAR;
777 tx_poll.poll_bits.tpi = NGE_CLEAR;
778 nge_reg_put32(ngep, NGE_TX_POLL, tx_poll.poll_val);
779
780 /* Restore buffer management */
781 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
782 mode.mode_bits.bm_reset = NGE_SET;
783 mode.mode_bits.tx_rcom_en = NGE_SET;
784 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val);
785
786 if (ngep->dev_spec_param.advanced_pm) {
787
788 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 0);
789 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 0);
790
791 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2);
792 pmu_cntl2.cntl2_bits.cidle_timer = NGE_CLEAR;
793 pmu_cntl2.cntl2_bits.didle_timer = NGE_CLEAR;
794 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val);
795 }
796 if (fault)
797 ngep->nge_chip_state = NGE_CHIP_FAULT;
798 else
799 ngep->nge_chip_state = NGE_CHIP_STOPPED;
800
801 return (err);
802 }
803
804 static void
nge_rx_setup(nge_t * ngep)805 nge_rx_setup(nge_t *ngep)
806 {
807 uint64_t desc_addr;
808 nge_rxtx_dlen dlen;
809 nge_rx_poll rx_poll;
810
811 /*
812 * Filling the address and length of rx's descriptors
813 */
814 desc_addr = ngep->recv->desc.cookie.dmac_laddress;
815 nge_reg_put32(ngep, NGE_RX_DADR, desc_addr);
816 nge_reg_put32(ngep, NGE_RX_DADR_HI, desc_addr >> 32);
817 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN);
818 dlen.dlen_bits.rdlen = ngep->recv->desc.nslots - 1;
819 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val);
820
821 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL);
822 rx_poll.poll_bits.rpi = RX_POLL_INTV_1G;
823 rx_poll.poll_bits.rpen = NGE_SET;
824 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val);
825 }
826
827 static void
nge_tx_setup(nge_t * ngep)828 nge_tx_setup(nge_t *ngep)
829 {
830 uint64_t desc_addr;
831 nge_rxtx_dlen dlen;
832
833 /*
834 * Filling the address and length of tx's descriptors
835 */
836 desc_addr = ngep->send->desc.cookie.dmac_laddress;
837 nge_reg_put32(ngep, NGE_TX_DADR, desc_addr);
838 nge_reg_put32(ngep, NGE_TX_DADR_HI, desc_addr >> 32);
839 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN);
840 dlen.dlen_bits.tdlen = ngep->send->desc.nslots - 1;
841 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val);
842 }
843
844 static int
nge_buff_setup(nge_t * ngep)845 nge_buff_setup(nge_t *ngep)
846 {
847 nge_mode_cntl mode_cntl;
848 nge_dev_spec_param_t *dev_param_p;
849
850 dev_param_p = &ngep->dev_spec_param;
851
852 /*
853 * Configure Rx&Tx's buffer
854 */
855 nge_rx_setup(ngep);
856 nge_tx_setup(ngep);
857
858 /*
859 * Configure buffer attribute
860 */
861 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
862
863 /*
864 * Enable Dma access request
865 */
866 mode_cntl.mode_bits.dma_dis = NGE_CLEAR;
867
868 /*
869 * Enbale Buffer management
870 */
871 mode_cntl.mode_bits.bm_reset = NGE_CLEAR;
872
873 /*
874 * Support Standoffload Descriptor
875 */
876 mode_cntl.mode_bits.desc_type = ngep->desc_mode;
877
878 /*
879 * Support receive hardware checksum
880 */
881 if (dev_param_p->rx_hw_checksum) {
882 mode_cntl.mode_bits.rx_sum_en = NGE_SET;
883 } else
884 mode_cntl.mode_bits.rx_sum_en = NGE_CLEAR;
885
886 /*
887 * Disable Tx PRD coarse update
888 */
889 mode_cntl.mode_bits.tx_prd_cu_en = NGE_CLEAR;
890
891 /*
892 * Disable 64-byte access
893 */
894 mode_cntl.mode_bits.w64_dis = NGE_SET;
895
896 /*
897 * Skip Rx Error Frame is not supported and if
898 * enable it, jumbo frame does not work any more.
899 */
900 mode_cntl.mode_bits.rx_filter_en = NGE_CLEAR;
901
902 /*
903 * Can not support hot mode now
904 */
905 mode_cntl.mode_bits.resv15 = NGE_CLEAR;
906
907 if (dev_param_p->vlan) {
908 /* Disable the vlan strip for devices which support vlan */
909 mode_cntl.mode_bits.vlan_strip = NGE_CLEAR;
910
911 /* Disable the vlan insert for devices which supprot vlan */
912 mode_cntl.mode_bits.vlan_ins = NGE_CLEAR;
913 }
914
915 if (dev_param_p->tx_rx_64byte) {
916
917 /* Set the maximum TX PRD fetch size to 64 bytes */
918 mode_cntl.mode_bits.tx_fetch_prd = NGE_SET;
919
920 /* Set the maximum RX PRD fetch size to 64 bytes */
921 mode_cntl.mode_bits.rx_fetch_prd = NGE_SET;
922 }
923 /*
924 * Upload Rx data as it arrives, rather than waiting for full frame
925 */
926 mode_cntl.mode_bits.resv16 = NGE_CLEAR;
927
928 /*
929 * Normal HOT table accesses
930 */
931 mode_cntl.mode_bits.resv17 = NGE_CLEAR;
932
933 /*
934 * Normal HOT buffer requesting
935 */
936 mode_cntl.mode_bits.resv18 = NGE_CLEAR;
937 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
938
939 /*
940 * Signal controller to check for new Rx descriptors
941 */
942 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
943 mode_cntl.mode_bits.rxdm = NGE_SET;
944 mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
945 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
946
947
948 return (DDI_SUCCESS);
949 }
950
951 /*
952 * When chipset resets, the chipset can not restore the orignial
953 * mac address to the mac address registers.
954 *
955 * When the driver is dettached, the function will write the orignial
956 * mac address to the mac address registers.
957 */
958
959 void
nge_restore_mac_addr(nge_t * ngep)960 nge_restore_mac_addr(nge_t *ngep)
961 {
962 uint32_t mac_addr;
963
964 mac_addr = (uint32_t)ngep->chipinfo.hw_mac_addr;
965 nge_reg_put32(ngep, NGE_UNI_ADDR0, mac_addr);
966 mac_addr = (uint32_t)(ngep->chipinfo.hw_mac_addr >> 32);
967 nge_reg_put32(ngep, NGE_UNI_ADDR1, mac_addr);
968 }
969
970 int
nge_chip_reset(nge_t * ngep)971 nge_chip_reset(nge_t *ngep)
972 {
973 int err;
974 uint8_t i;
975 uint32_t regno;
976 uint64_t mac = 0;
977 nge_uni_addr1 uaddr1;
978 nge_cp_cntl ee_cntl;
979 nge_soft_misc soft_misc;
980 nge_pmu_cntl0 pmu_cntl0;
981 nge_pmu_cntl2 pmu_cntl2;
982 nge_pm_cntl2 pm_cntl2;
983 const nge_ksindex_t *ksip;
984
985 NGE_TRACE(("nge_chip_reset($%p)", (void *)ngep));
986
987 /*
988 * Clear the statistics by reading the statistics register
989 */
990 for (ksip = nge_statistics; ksip->name != NULL; ++ksip) {
991 regno = KS_BASE + ksip->index * sizeof (uint32_t);
992 (void) nge_reg_get32(ngep, regno);
993 }
994
995 /*
996 * Setup seeprom control
997 */
998 ee_cntl.cntl_val = nge_reg_get32(ngep, NGE_EP_CNTL);
999 ee_cntl.cntl_bits.clkdiv = EEPROM_CLKDIV;
1000 ee_cntl.cntl_bits.rom_size = EEPROM_32K;
1001 ee_cntl.cntl_bits.word_wid = ACCESS_16BIT;
1002 ee_cntl.cntl_bits.wait_slots = EEPROM_WAITCLK;
1003 nge_reg_put32(ngep, NGE_EP_CNTL, ee_cntl.cntl_val);
1004
1005 /*
1006 * Reading the unicast mac address table
1007 */
1008 if (ngep->nge_chip_state == NGE_CHIP_INITIAL) {
1009 uaddr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1);
1010 mac = uaddr1.addr_bits.addr;
1011 mac <<= 32;
1012 mac |= nge_reg_get32(ngep, NGE_UNI_ADDR0);
1013 ngep->chipinfo.hw_mac_addr = mac;
1014 if (ngep->dev_spec_param.mac_addr_order) {
1015 for (i = 0; i < ETHERADDRL; i++) {
1016 ngep->chipinfo.vendor_addr.addr[i] =
1017 (uchar_t)mac;
1018 ngep->cur_uni_addr.addr[i] =
1019 (uchar_t)mac;
1020 mac >>= 8;
1021 }
1022 } else {
1023 for (i = ETHERADDRL; i-- != 0; ) {
1024 ngep->chipinfo.vendor_addr.addr[i] =
1025 (uchar_t)mac;
1026 ngep->cur_uni_addr.addr[i] =
1027 (uchar_t)mac;
1028 mac >>= 8;
1029 }
1030 }
1031 ngep->chipinfo.vendor_addr.set = 1;
1032 }
1033 pci_config_put8(ngep->cfg_handle, PCI_CONF_CACHE_LINESZ,
1034 ngep->chipinfo.clsize);
1035 pci_config_put8(ngep->cfg_handle, PCI_CONF_LATENCY_TIMER,
1036 ngep->chipinfo.latency);
1037
1038
1039 if (ngep->dev_spec_param.advanced_pm) {
1040
1041 /* Program software misc register */
1042 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC);
1043 soft_misc.misc_bits.rx_clk_vx_rst = NGE_SET;
1044 soft_misc.misc_bits.tx_clk_vx_rst = NGE_SET;
1045 soft_misc.misc_bits.clk12m_vx_rst = NGE_SET;
1046 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_SET;
1047 soft_misc.misc_bits.rx_clk_vc_rst = NGE_SET;
1048 soft_misc.misc_bits.tx_clk_vc_rst = NGE_SET;
1049 soft_misc.misc_bits.fs_clk_vc_rst = NGE_SET;
1050 soft_misc.misc_bits.rst_ex_m2pintf = NGE_SET;
1051 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val);
1052
1053 /* wait for 32 us */
1054 drv_usecwait(32);
1055
1056 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC);
1057 soft_misc.misc_bits.rx_clk_vx_rst = NGE_CLEAR;
1058 soft_misc.misc_bits.tx_clk_vx_rst = NGE_CLEAR;
1059 soft_misc.misc_bits.clk12m_vx_rst = NGE_CLEAR;
1060 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_CLEAR;
1061 soft_misc.misc_bits.rx_clk_vc_rst = NGE_CLEAR;
1062 soft_misc.misc_bits.tx_clk_vc_rst = NGE_CLEAR;
1063 soft_misc.misc_bits.fs_clk_vc_rst = NGE_CLEAR;
1064 soft_misc.misc_bits.rst_ex_m2pintf = NGE_CLEAR;
1065 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val);
1066
1067 /* Program PMU registers */
1068 pmu_cntl0.cntl0_val = nge_reg_get32(ngep, NGE_PMU_CNTL0);
1069 pmu_cntl0.cntl0_bits.core_spd10_fp =
1070 NGE_PMU_CORE_SPD10_BUSY;
1071 pmu_cntl0.cntl0_bits.core_spd10_idle =
1072 NGE_PMU_CORE_SPD10_IDLE;
1073 pmu_cntl0.cntl0_bits.core_spd100_fp =
1074 NGE_PMU_CORE_SPD100_BUSY;
1075 pmu_cntl0.cntl0_bits.core_spd100_idle =
1076 NGE_PMU_CORE_SPD100_IDLE;
1077 pmu_cntl0.cntl0_bits.core_spd1000_fp =
1078 NGE_PMU_CORE_SPD1000_BUSY;
1079 pmu_cntl0.cntl0_bits.core_spd1000_idle =
1080 NGE_PMU_CORE_SPD100_IDLE;
1081 pmu_cntl0.cntl0_bits.core_spd10_idle =
1082 NGE_PMU_CORE_SPD10_IDLE;
1083 nge_reg_put32(ngep, NGE_PMU_CNTL0, pmu_cntl0.cntl0_val);
1084
1085 /* Set the core idle limit value */
1086 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT,
1087 NGE_PMU_CIDLE_LIMIT_DEF);
1088
1089 /* Set the device idle limit value */
1090 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT,
1091 NGE_PMU_DIDLE_LIMIT_DEF);
1092
1093 /* Enable the core/device idle timer in PMU control 2 */
1094 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2);
1095 pmu_cntl2.cntl2_bits.cidle_timer = NGE_SET;
1096 pmu_cntl2.cntl2_bits.didle_timer = NGE_SET;
1097 pmu_cntl2.cntl2_bits.core_enable = NGE_SET;
1098 pmu_cntl2.cntl2_bits.dev_enable = NGE_SET;
1099 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val);
1100 }
1101 /*
1102 * Stop the chipset and clear buffer management
1103 */
1104 err = nge_chip_stop(ngep, B_FALSE);
1105 if (err == DDI_FAILURE)
1106 return (err);
1107 /*
1108 * Clear the power state bits for phy since interface no longer
1109 * works after rebooting from Windows on a multi-boot machine
1110 */
1111 if (ngep->chipinfo.device == DEVICE_ID_MCP51_268 ||
1112 ngep->chipinfo.device == DEVICE_ID_MCP51_269 ||
1113 ngep->chipinfo.device == DEVICE_ID_MCP55_372 ||
1114 ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
1115 ngep->chipinfo.device == DEVICE_ID_MCP61_3EE ||
1116 ngep->chipinfo.device == DEVICE_ID_MCP61_3EF ||
1117 ngep->chipinfo.device == DEVICE_ID_MCP77_760 ||
1118 ngep->chipinfo.device == DEVICE_ID_MCP79_AB0) {
1119
1120 pm_cntl2.cntl_val = nge_reg_get32(ngep, NGE_PM_CNTL2);
1121 /* bring phy out of coma mode */
1122 pm_cntl2.cntl_bits.phy_coma_set = NGE_CLEAR;
1123 /* disable auto reset coma bits */
1124 pm_cntl2.cntl_bits.resv4 = NGE_CLEAR;
1125 /* restore power to gated clocks */
1126 pm_cntl2.cntl_bits.resv8_11 = NGE_CLEAR;
1127 nge_reg_put32(ngep, NGE_PM_CNTL2, pm_cntl2.cntl_val);
1128 }
1129
1130 ngep->nge_chip_state = NGE_CHIP_RESET;
1131 return (DDI_SUCCESS);
1132 }
1133
1134 int
nge_chip_start(nge_t * ngep)1135 nge_chip_start(nge_t *ngep)
1136 {
1137 int err;
1138 nge_itc itc;
1139 nge_tx_cntl tx_cntl;
1140 nge_rx_cntrl0 rx_cntl0;
1141 nge_rx_cntl1 rx_cntl1;
1142 nge_tx_en tx_en;
1143 nge_rx_en rx_en;
1144 nge_mii_cs mii_cs;
1145 nge_swtr_cntl swtr_cntl;
1146 nge_rx_fifo_wm rx_fifo;
1147 nge_intr_mask intr_mask;
1148 nge_mintr_mask mintr_mask;
1149 nge_dev_spec_param_t *dev_param_p;
1150
1151 NGE_TRACE(("nge_chip_start($%p)", (void *)ngep));
1152
1153 /*
1154 * Setup buffer management
1155 */
1156 err = nge_buff_setup(ngep);
1157 if (err == DDI_FAILURE)
1158 return (err);
1159
1160 dev_param_p = &ngep->dev_spec_param;
1161
1162 /*
1163 * Enable polling attribute
1164 */
1165 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS);
1166 mii_cs.cs_bits.ap_paddr = ngep->phy_xmii_addr;
1167 mii_cs.cs_bits.ap_en = NGE_SET;
1168 mii_cs.cs_bits.ap_intv = MII_POLL_INTV;
1169 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val);
1170
1171 /*
1172 * Setup link
1173 */
1174 (*ngep->physops->phys_update)(ngep);
1175
1176 /*
1177 * Configure the tx's parameters
1178 */
1179 tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL);
1180 if (dev_param_p->tx_pause_frame)
1181 tx_cntl.cntl_bits.paen = NGE_SET;
1182 else
1183 tx_cntl.cntl_bits.paen = NGE_CLEAR;
1184 tx_cntl.cntl_bits.retry_en = NGE_SET;
1185 tx_cntl.cntl_bits.pad_en = NGE_SET;
1186 tx_cntl.cntl_bits.fappend_en = NGE_SET;
1187 tx_cntl.cntl_bits.two_def_en = NGE_SET;
1188 tx_cntl.cntl_bits.max_retry = 15;
1189 tx_cntl.cntl_bits.burst_en = NGE_CLEAR;
1190 tx_cntl.cntl_bits.uflo_err_mask = NGE_CLEAR;
1191 tx_cntl.cntl_bits.tlcol_mask = NGE_CLEAR;
1192 tx_cntl.cntl_bits.lcar_mask = NGE_CLEAR;
1193 tx_cntl.cntl_bits.def_mask = NGE_CLEAR;
1194 tx_cntl.cntl_bits.exdef_mask = NGE_SET;
1195 tx_cntl.cntl_bits.lcar_mask = NGE_SET;
1196 tx_cntl.cntl_bits.tlcol_mask = NGE_SET;
1197 tx_cntl.cntl_bits.uflo_err_mask = NGE_SET;
1198 tx_cntl.cntl_bits.jam_seq_en = NGE_CLEAR;
1199 nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val);
1200
1201
1202 /*
1203 * Configure the parameters of Rx's state machine
1204 * Enabe the parameters:
1205 * 1). Pad Strip
1206 * 2). FCS Relay
1207 * 3). Pause
1208 * 4). Address filter
1209 * 5). Runt Packet receive
1210 * 6). Broadcast
1211 * 7). Receive Deferral
1212 *
1213 * Disable the following parameters for decreasing
1214 * the number of interrupts:
1215 * 1). Runt Inerrupt.
1216 * 2). Rx's Late Collision interrupt.
1217 * 3). Rx's Max length Error Interrupt.
1218 * 4). Rx's Length Field error Interrupt.
1219 * 5). Rx's FCS error interrupt.
1220 * 6). Rx's overflow error interrupt.
1221 * 7). Rx's Frame alignment error interrupt.
1222 */
1223 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
1224 rx_cntl0.cntl_bits.padsen = NGE_CLEAR;
1225 rx_cntl0.cntl_bits.fcsren = NGE_CLEAR;
1226 if (dev_param_p->rx_pause_frame)
1227 rx_cntl0.cntl_bits.paen = NGE_SET;
1228 else
1229 rx_cntl0.cntl_bits.paen = NGE_CLEAR;
1230 rx_cntl0.cntl_bits.lben = NGE_CLEAR;
1231 rx_cntl0.cntl_bits.afen = NGE_SET;
1232 rx_cntl0.cntl_bits.runten = NGE_CLEAR;
1233 rx_cntl0.cntl_bits.brdis = NGE_CLEAR;
1234 rx_cntl0.cntl_bits.rdfen = NGE_CLEAR;
1235 rx_cntl0.cntl_bits.runtm = NGE_CLEAR;
1236 rx_cntl0.cntl_bits.slfb = NGE_CLEAR;
1237 rx_cntl0.cntl_bits.rlcolm = NGE_CLEAR;
1238 rx_cntl0.cntl_bits.maxerm = NGE_CLEAR;
1239 rx_cntl0.cntl_bits.lferm = NGE_CLEAR;
1240 rx_cntl0.cntl_bits.crcm = NGE_CLEAR;
1241 rx_cntl0.cntl_bits.ofolm = NGE_CLEAR;
1242 rx_cntl0.cntl_bits.framerm = NGE_CLEAR;
1243 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val);
1244
1245 /*
1246 * Configure the watermark for the rx's statemachine
1247 */
1248 rx_fifo.wm_val = nge_reg_get32(ngep, NGE_RX_FIFO_WM);
1249 rx_fifo.wm_bits.data_hwm = ngep->rx_datahwm;
1250 rx_fifo.wm_bits.prd_lwm = ngep->rx_prdlwm;
1251 rx_fifo.wm_bits.prd_hwm = ngep->rx_prdhwm;
1252 nge_reg_put32(ngep, NGE_RX_FIFO_WM, rx_fifo.wm_val);
1253
1254 /*
1255 * Configure the deffer time slot for rx's state machine
1256 */
1257 nge_reg_put8(ngep, NGE_RX_DEf, ngep->rx_def);
1258
1259 /*
1260 * Configure the length of rx's packet
1261 */
1262 rx_cntl1.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL1);
1263 rx_cntl1.cntl_bits.length = ngep->max_sdu;
1264 nge_reg_put32(ngep, NGE_RX_CNTL1, rx_cntl1.cntl_val);
1265 /*
1266 * Enable Tx's state machine
1267 */
1268 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN);
1269 tx_en.bits.tx_en = NGE_SET;
1270 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val);
1271
1272 /*
1273 * Enable Rx's state machine
1274 */
1275 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN);
1276 rx_en.bits.rx_en = NGE_SET;
1277 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val);
1278
1279 itc.itc_val = nge_reg_get32(ngep, NGE_SWTR_ITC);
1280 itc.itc_bits.sw_intv = ngep->sw_intr_intv;
1281 nge_reg_put32(ngep, NGE_SWTR_ITC, itc.itc_val);
1282
1283 swtr_cntl.ctrl_val = nge_reg_get8(ngep, NGE_SWTR_CNTL);
1284 swtr_cntl.cntl_bits.sten = NGE_SET;
1285 swtr_cntl.cntl_bits.stren = NGE_SET;
1286 nge_reg_put32(ngep, NGE_SWTR_CNTL, swtr_cntl.ctrl_val);
1287
1288 /*
1289 * Disable all mii read/write operation Interrupt
1290 */
1291 mintr_mask.mask_val = nge_reg_get8(ngep, NGE_MINTR_MASK);
1292 mintr_mask.mask_bits.mrei = NGE_CLEAR;
1293 mintr_mask.mask_bits.mcc2 = NGE_CLEAR;
1294 mintr_mask.mask_bits.mcc1 = NGE_CLEAR;
1295 mintr_mask.mask_bits.mapi = NGE_SET;
1296 mintr_mask.mask_bits.mpdi = NGE_SET;
1297 nge_reg_put8(ngep, NGE_MINTR_MASK, mintr_mask.mask_val);
1298
1299 /*
1300 * Enable all interrupt event
1301 */
1302 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
1303 intr_mask.mask_bits.reint = NGE_SET;
1304 intr_mask.mask_bits.rcint = NGE_SET;
1305 intr_mask.mask_bits.miss = NGE_SET;
1306 intr_mask.mask_bits.teint = NGE_SET;
1307 intr_mask.mask_bits.tcint = NGE_CLEAR;
1308 intr_mask.mask_bits.stint = NGE_CLEAR;
1309 intr_mask.mask_bits.mint = NGE_CLEAR;
1310 intr_mask.mask_bits.rfint = NGE_CLEAR;
1311 intr_mask.mask_bits.tfint = NGE_SET;
1312 intr_mask.mask_bits.feint = NGE_SET;
1313 intr_mask.mask_bits.resv10 = NGE_CLEAR;
1314 intr_mask.mask_bits.resv11 = NGE_CLEAR;
1315 intr_mask.mask_bits.resv12 = NGE_CLEAR;
1316 intr_mask.mask_bits.resv13 = NGE_CLEAR;
1317 intr_mask.mask_bits.phyint = NGE_CLEAR;
1318 ngep->intr_masks = intr_mask.mask_val;
1319 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
1320 ngep->nge_chip_state = NGE_CHIP_RUNNING;
1321 return (DDI_SUCCESS);
1322 }
1323
1324 /*
1325 * nge_chip_sync() -- program the chip with the unicast MAC address,
1326 * the multicast hash table, the required level of promiscuity.
1327 */
1328 void
nge_chip_sync(nge_t * ngep)1329 nge_chip_sync(nge_t *ngep)
1330 {
1331 uint8_t i;
1332 uint64_t macaddr;
1333 uint64_t mul_addr;
1334 uint64_t mul_mask;
1335 nge_rx_cntrl0 rx_cntl;
1336 nge_uni_addr1 uni_adr1;
1337
1338 NGE_TRACE(("nge_chip_sync($%p)", (void *)ngep));
1339
1340 macaddr = 0x0ull;
1341 mul_addr = 0x0ull;
1342 mul_mask = 0x0ull;
1343 rx_cntl.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
1344
1345 if (ngep->promisc) {
1346 rx_cntl.cntl_bits.afen = NGE_CLEAR;
1347 rx_cntl.cntl_bits.brdis = NGE_SET;
1348 } else {
1349 rx_cntl.cntl_bits.afen = NGE_SET;
1350 rx_cntl.cntl_bits.brdis = NGE_CLEAR;
1351 }
1352
1353 /*
1354 * Transform the MAC address from host to chip format, the unicast
1355 * MAC address(es) ...
1356 */
1357 for (i = ETHERADDRL, macaddr = 0ull; i != 0; --i) {
1358 macaddr |= ngep->cur_uni_addr.addr[i-1];
1359 macaddr <<= (i > 1) ? 8 : 0;
1360 }
1361
1362 nge_reg_put32(ngep, NGE_UNI_ADDR0, (uint32_t)macaddr);
1363 macaddr = macaddr >>32;
1364 uni_adr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1);
1365 uni_adr1.addr_bits.addr = (uint16_t)macaddr;
1366 uni_adr1.addr_bits.resv16_31 = (uint16_t)0;
1367 nge_reg_put32(ngep, NGE_UNI_ADDR1, uni_adr1.addr_val);
1368
1369 /*
1370 * Reprogram the multicast address table ...
1371 */
1372 for (i = ETHERADDRL, mul_addr = 0ull; i != 0; --i) {
1373 mul_addr |= ngep->cur_mul_addr.addr[i-1];
1374 mul_addr <<= (i > 1) ? 8 : 0;
1375 mul_mask |= ngep->cur_mul_mask.addr[i-1];
1376 mul_mask <<= (i > 1) ? 8 : 0;
1377 }
1378 nge_reg_put32(ngep, NGE_MUL_ADDR0, (uint32_t)mul_addr);
1379 mul_addr >>= 32;
1380 nge_reg_put32(ngep, NGE_MUL_ADDR1, mul_addr);
1381 nge_reg_put32(ngep, NGE_MUL_MASK, (uint32_t)mul_mask);
1382 mul_mask >>= 32;
1383 nge_reg_put32(ngep, NGE_MUL_MASK1, mul_mask);
1384 /*
1385 * Set or clear the PROMISCUOUS mode bit
1386 */
1387 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl.cntl_val);
1388 /*
1389 * For internal PHY loopback, the link will
1390 * not be up, so it need to sync mac modes directly.
1391 */
1392 if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY)
1393 nge_sync_mac_modes(ngep);
1394 }
1395
1396 static void
nge_chip_err(nge_t * ngep)1397 nge_chip_err(nge_t *ngep)
1398 {
1399 nge_reg010 reg010_ins;
1400 nge_sw_statistics_t *psw_stat;
1401 nge_intr_mask intr_mask;
1402
1403 NGE_TRACE(("nge_chip_err($%p)", (void *)ngep));
1404
1405 psw_stat = (nge_sw_statistics_t *)&ngep->statistics.sw_statistics;
1406 reg010_ins.reg010_val = nge_reg_get32(ngep, NGE_REG010);
1407 if (reg010_ins.reg010_bits.resv0)
1408 psw_stat->fe_err.tso_err_mss ++;
1409
1410 if (reg010_ins.reg010_bits.resv1)
1411 psw_stat->fe_err.tso_dis ++;
1412
1413 if (reg010_ins.reg010_bits.resv2)
1414 psw_stat->fe_err.tso_err_nosum ++;
1415
1416 if (reg010_ins.reg010_bits.resv3)
1417 psw_stat->fe_err.tso_err_hov ++;
1418
1419 if (reg010_ins.reg010_bits.resv4)
1420 psw_stat->fe_err.tso_err_huf ++;
1421
1422 if (reg010_ins.reg010_bits.resv5)
1423 psw_stat->fe_err.tso_err_l2 ++;
1424
1425 if (reg010_ins.reg010_bits.resv6)
1426 psw_stat->fe_err.tso_err_ip ++;
1427
1428 if (reg010_ins.reg010_bits.resv7)
1429 psw_stat->fe_err.tso_err_l4 ++;
1430
1431 if (reg010_ins.reg010_bits.resv8)
1432 psw_stat->fe_err.tso_err_tcp ++;
1433
1434 if (reg010_ins.reg010_bits.resv9)
1435 psw_stat->fe_err.hsum_err_ip ++;
1436
1437 if (reg010_ins.reg010_bits.resv10)
1438 psw_stat->fe_err.hsum_err_l4 ++;
1439
1440 if (reg010_ins.reg010_val != 0) {
1441
1442 /*
1443 * Fatal error is triggered by malformed driver commands.
1444 * Disable unless debugging.
1445 */
1446 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
1447 intr_mask.mask_bits.feint = NGE_CLEAR;
1448 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
1449 ngep->intr_masks = intr_mask.mask_val;
1450
1451 }
1452 }
1453
1454 static void
nge_sync_mac_modes(nge_t * ngep)1455 nge_sync_mac_modes(nge_t *ngep)
1456 {
1457 nge_tx_def tx_def;
1458 nge_tx_fifo_wm tx_fifo;
1459 nge_bkoff_cntl bk_cntl;
1460 nge_mac2phy m2p;
1461 nge_rx_cntrl0 rx_cntl0;
1462 nge_tx_cntl tx_cntl;
1463 nge_dev_spec_param_t *dev_param_p;
1464
1465 dev_param_p = &ngep->dev_spec_param;
1466
1467 tx_def.def_val = nge_reg_get32(ngep, NGE_TX_DEF);
1468 m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY);
1469 tx_fifo.wm_val = nge_reg_get32(ngep, NGE_TX_FIFO_WM);
1470 bk_cntl.cntl_val = nge_reg_get32(ngep, NGE_BKOFF_CNTL);
1471 bk_cntl.bkoff_bits.rseed = BKOFF_RSEED;
1472 switch (ngep->param_link_speed) {
1473 case 10:
1474 m2p.m2p_bits.speed = low_speed;
1475 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
1476 if (ngep->phy_mode == RGMII_IN) {
1477 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100;
1478 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
1479 } else {
1480 tx_def.def_bits.if_def = TX_TIFG_MII;
1481 tx_def.def_bits.ifg2_def = TX_IFG2_MII;
1482 }
1483 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII;
1484 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII;
1485 break;
1486
1487 case 100:
1488 m2p.m2p_bits.speed = fast_speed;
1489 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
1490 if (ngep->phy_mode == RGMII_IN) {
1491 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100;
1492 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
1493 } else {
1494 tx_def.def_bits.if_def = TX_TIFG_MII;
1495 tx_def.def_bits.ifg2_def = TX_IFG2_MII;
1496 }
1497 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII;
1498 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII;
1499 break;
1500
1501 case 1000:
1502 m2p.m2p_bits.speed = giga_speed;
1503 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT;
1504 if (ngep->param_link_duplex == LINK_DUPLEX_FULL) {
1505 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000;
1506 tx_def.def_bits.if_def = TX_IFG_RGMII_1000_FD;
1507 } else {
1508 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000;
1509 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER;
1510 }
1511
1512 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_GMII;
1513 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_GMII;
1514 break;
1515 }
1516
1517 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 ||
1518 ngep->chipinfo.device == DEVICE_ID_MCP55_372) {
1519 m2p.m2p_bits.phyintr = NGE_CLEAR;
1520 m2p.m2p_bits.phyintrlvl = NGE_CLEAR;
1521 }
1522 if (ngep->param_link_duplex == LINK_DUPLEX_HALF) {
1523 m2p.m2p_bits.hdup_en = NGE_SET;
1524 }
1525 else
1526 m2p.m2p_bits.hdup_en = NGE_CLEAR;
1527 nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val);
1528 nge_reg_put32(ngep, NGE_TX_DEF, tx_def.def_val);
1529
1530 tx_fifo.wm_bits.data_lwm = TX_FIFO_DATA_LWM;
1531 tx_fifo.wm_bits.prd_lwm = TX_FIFO_PRD_LWM;
1532 tx_fifo.wm_bits.uprd_hwm = TX_FIFO_PRD_HWM;
1533 tx_fifo.wm_bits.fb_wm = TX_FIFO_TBFW;
1534 nge_reg_put32(ngep, NGE_TX_FIFO_WM, tx_fifo.wm_val);
1535
1536 nge_reg_put32(ngep, NGE_BKOFF_CNTL, bk_cntl.cntl_val);
1537
1538 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0);
1539 if (ngep->param_link_rx_pause && dev_param_p->rx_pause_frame) {
1540 if (rx_cntl0.cntl_bits.paen == NGE_CLEAR) {
1541 rx_cntl0.cntl_bits.paen = NGE_SET;
1542 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val);
1543 }
1544 } else {
1545 if (rx_cntl0.cntl_bits.paen == NGE_SET) {
1546 rx_cntl0.cntl_bits.paen = NGE_CLEAR;
1547 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val);
1548 }
1549 }
1550
1551 tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL);
1552 if (ngep->param_link_tx_pause && dev_param_p->tx_pause_frame) {
1553 if (tx_cntl.cntl_bits.paen == NGE_CLEAR) {
1554 tx_cntl.cntl_bits.paen = NGE_SET;
1555 nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val);
1556 }
1557 } else {
1558 if (tx_cntl.cntl_bits.paen == NGE_SET) {
1559 tx_cntl.cntl_bits.paen = NGE_CLEAR;
1560 nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val);
1561 }
1562 }
1563 }
1564
1565 /*
1566 * Handler for hardware link state change.
1567 *
1568 * When this routine is called, the hardware link state has changed
1569 * and the new state is reflected in the param_* variables. Here
1570 * we must update the softstate, reprogram the MAC to match, and
1571 * record the change in the log and/or on the console.
1572 */
1573 static void
nge_factotum_link_handler(nge_t * ngep)1574 nge_factotum_link_handler(nge_t *ngep)
1575 {
1576 /*
1577 * Update the s/w link_state
1578 */
1579 if (ngep->param_link_up)
1580 ngep->link_state = LINK_STATE_UP;
1581 else
1582 ngep->link_state = LINK_STATE_DOWN;
1583
1584 /*
1585 * Reprogram the MAC modes to match
1586 */
1587 nge_sync_mac_modes(ngep);
1588 }
1589
1590 static boolean_t
nge_factotum_link_check(nge_t * ngep)1591 nge_factotum_link_check(nge_t *ngep)
1592 {
1593 boolean_t lchg;
1594 boolean_t check;
1595
1596 ASSERT(mutex_owned(ngep->genlock));
1597
1598 (*ngep->physops->phys_check)(ngep);
1599 switch (ngep->link_state) {
1600 case LINK_STATE_UP:
1601 lchg = (ngep->param_link_up == B_FALSE);
1602 check = (ngep->param_link_up == B_FALSE);
1603 break;
1604
1605 case LINK_STATE_DOWN:
1606 lchg = (ngep->param_link_up == B_TRUE);
1607 check = (ngep->param_link_up == B_TRUE);
1608 break;
1609
1610 default:
1611 check = B_TRUE;
1612 break;
1613 }
1614
1615 /*
1616 * If <check> is false, we're sure the link hasn't changed.
1617 * If true, however, it's not yet definitive; we have to call
1618 * nge_phys_check() to determine whether the link has settled
1619 * into a new state yet ... and if it has, then call the link
1620 * state change handler.But when the chip is 5700 in Dell 6650
1621 * ,even if check is false, the link may have changed.So we
1622 * have to call nge_phys_check() to determine the link state.
1623 */
1624 if (check)
1625 nge_factotum_link_handler(ngep);
1626
1627 return (lchg);
1628 }
1629
1630 /*
1631 * Factotum routine to check for Tx stall, using the 'watchdog' counter
1632 */
1633 static boolean_t nge_factotum_stall_check(nge_t *ngep);
1634
1635 static boolean_t
nge_factotum_stall_check(nge_t * ngep)1636 nge_factotum_stall_check(nge_t *ngep)
1637 {
1638 uint32_t dogval;
1639 send_ring_t *srp;
1640 srp = ngep->send;
1641 /*
1642 * Specific check for Tx stall ...
1643 *
1644 * The 'watchdog' counter is incremented whenever a packet
1645 * is queued, reset to 1 when some (but not all) buffers
1646 * are reclaimed, reset to 0 (disabled) when all buffers
1647 * are reclaimed, and shifted left here. If it exceeds the
1648 * threshold value, the chip is assumed to have stalled and
1649 * is put into the ERROR state. The factotum will then reset
1650 * it on the next pass.
1651 *
1652 * All of which should ensure that we don't get into a state
1653 * where packets are left pending indefinitely!
1654 */
1655 if (ngep->watchdog == 0 &&
1656 srp->tx_free < srp->desc.nslots)
1657 ngep->watchdog = 1;
1658 dogval = nge_atomic_shl32(&ngep->watchdog, 1);
1659 if (dogval >= nge_watchdog_check)
1660 nge_tx_recycle(ngep, B_FALSE);
1661 if (dogval < nge_watchdog_count)
1662 return (B_FALSE);
1663 else {
1664 ngep->statistics.sw_statistics.tx_stall++;
1665 return (B_TRUE);
1666 }
1667 }
1668
1669
1670 /*
1671 * The factotum is woken up when there's something to do that we'd rather
1672 * not do from inside a hardware interrupt handler or high-level cyclic.
1673 * Its two main tasks are:
1674 * reset & restart the chip after an error
1675 * check the link status whenever necessary
1676 */
1677 /* ARGSUSED */
1678 uint_t
nge_chip_factotum(caddr_t args1,caddr_t args2)1679 nge_chip_factotum(caddr_t args1, caddr_t args2)
1680 {
1681 uint_t result;
1682 nge_t *ngep;
1683 boolean_t err;
1684 boolean_t linkchg;
1685
1686 ngep = (nge_t *)args1;
1687
1688 NGE_TRACE(("nge_chip_factotum($%p)", (void *)ngep));
1689
1690 mutex_enter(ngep->softlock);
1691 if (ngep->factotum_flag == 0) {
1692 mutex_exit(ngep->softlock);
1693 return (DDI_INTR_UNCLAIMED);
1694 }
1695 ngep->factotum_flag = 0;
1696 mutex_exit(ngep->softlock);
1697 err = B_FALSE;
1698 linkchg = B_FALSE;
1699 result = DDI_INTR_CLAIMED;
1700
1701 mutex_enter(ngep->genlock);
1702 switch (ngep->nge_chip_state) {
1703 default:
1704 break;
1705
1706 case NGE_CHIP_RUNNING:
1707 linkchg = nge_factotum_link_check(ngep);
1708 err = nge_factotum_stall_check(ngep);
1709 break;
1710
1711 case NGE_CHIP_FAULT:
1712 (void) nge_restart(ngep);
1713 NGE_REPORT((ngep, "automatic recovery activated"));
1714 break;
1715 }
1716
1717 if (err)
1718 (void) nge_chip_stop(ngep, B_TRUE);
1719 mutex_exit(ngep->genlock);
1720
1721 /*
1722 * If the link state changed, tell the world about it (if
1723 * this version of MAC supports link state notification).
1724 * Note: can't do this while still holding the mutex.
1725 */
1726 if (linkchg)
1727 mac_link_update(ngep->mh, ngep->link_state);
1728
1729 return (result);
1730
1731 }
1732
1733 static void
nge_intr_handle(nge_t * ngep,nge_intr_src * pintr_src)1734 nge_intr_handle(nge_t *ngep, nge_intr_src *pintr_src)
1735 {
1736 boolean_t brx;
1737 boolean_t btx;
1738 nge_mintr_src mintr_src;
1739
1740 brx = B_FALSE;
1741 btx = B_FALSE;
1742 ngep->statistics.sw_statistics.intr_count++;
1743 ngep->statistics.sw_statistics.intr_lval = pintr_src->intr_val;
1744 brx = (pintr_src->int_bits.reint | pintr_src->int_bits.miss
1745 | pintr_src->int_bits.rcint | pintr_src->int_bits.stint)
1746 != 0 ? B_TRUE : B_FALSE;
1747 if (pintr_src->int_bits.reint)
1748 ngep->statistics.sw_statistics.rx_err++;
1749 if (pintr_src->int_bits.miss)
1750 ngep->statistics.sw_statistics.rx_nobuffer++;
1751
1752 btx = (pintr_src->int_bits.teint | pintr_src->int_bits.tfint)
1753 != 0 ? B_TRUE : B_FALSE;
1754 if (btx)
1755 nge_tx_recycle(ngep, B_TRUE);
1756 if (brx)
1757 nge_receive(ngep);
1758 if (pintr_src->int_bits.teint)
1759 ngep->statistics.sw_statistics.tx_stop_err++;
1760 if (ngep->intr_moderation && brx) {
1761 if (ngep->poll) {
1762 if (ngep->recv_count < ngep->param_rx_intr_hwater) {
1763 ngep->quiet_time++;
1764 if (ngep->quiet_time ==
1765 ngep->param_poll_quiet_time) {
1766 ngep->poll = B_FALSE;
1767 ngep->quiet_time = 0;
1768 }
1769 } else
1770 ngep->quiet_time = 0;
1771 } else {
1772 if (ngep->recv_count > ngep->param_rx_intr_lwater) {
1773 ngep->busy_time++;
1774 if (ngep->busy_time ==
1775 ngep->param_poll_busy_time) {
1776 ngep->poll = B_TRUE;
1777 ngep->busy_time = 0;
1778 }
1779 } else
1780 ngep->busy_time = 0;
1781 }
1782 }
1783 ngep->recv_count = 0;
1784 if (pintr_src->int_bits.feint)
1785 nge_chip_err(ngep);
1786 /* link interrupt, check the link state */
1787 if (pintr_src->int_bits.mint) {
1788 mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC);
1789 nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val);
1790 nge_wake_factotum(ngep);
1791 }
1792 }
1793
1794 /*
1795 * nge_chip_intr() -- handle chip interrupts
1796 */
1797 /* ARGSUSED */
1798 uint_t
nge_chip_intr(caddr_t arg1,caddr_t arg2)1799 nge_chip_intr(caddr_t arg1, caddr_t arg2)
1800 {
1801 nge_t *ngep = (nge_t *)arg1;
1802 nge_intr_src intr_src;
1803 nge_intr_mask intr_mask;
1804
1805 mutex_enter(ngep->genlock);
1806
1807 if (ngep->suspended) {
1808 mutex_exit(ngep->genlock);
1809 return (DDI_INTR_UNCLAIMED);
1810 }
1811
1812 /*
1813 * Check whether chip's says it's asserting #INTA;
1814 * if not, don't process or claim the interrupt.
1815 */
1816 intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC);
1817 if (intr_src.intr_val == 0) {
1818 mutex_exit(ngep->genlock);
1819 return (DDI_INTR_UNCLAIMED);
1820 }
1821 /*
1822 * Ack the interrupt
1823 */
1824 nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val);
1825
1826 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
1827 mutex_exit(ngep->genlock);
1828 return (DDI_INTR_CLAIMED);
1829 }
1830 nge_intr_handle(ngep, &intr_src);
1831 if (ngep->poll && !ngep->ch_intr_mode) {
1832 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK);
1833 intr_mask.mask_bits.stint = NGE_SET;
1834 intr_mask.mask_bits.rcint = NGE_CLEAR;
1835 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val);
1836 ngep->ch_intr_mode = B_TRUE;
1837 } else if ((ngep->ch_intr_mode) && (!ngep->poll)) {
1838 nge_reg_put32(ngep, NGE_INTR_MASK, ngep->intr_masks);
1839 ngep->ch_intr_mode = B_FALSE;
1840 }
1841 mutex_exit(ngep->genlock);
1842 return (DDI_INTR_CLAIMED);
1843 }
1844
1845 static enum ioc_reply
nge_pp_ioctl(nge_t * ngep,int cmd,mblk_t * mp,struct iocblk * iocp)1846 nge_pp_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp)
1847 {
1848 int err;
1849 uint64_t sizemask;
1850 uint64_t mem_va;
1851 uint64_t maxoff;
1852 boolean_t peek;
1853 nge_peekpoke_t *ppd;
1854 int (*ppfn)(nge_t *ngep, nge_peekpoke_t *ppd);
1855
1856 switch (cmd) {
1857 default:
1858 return (IOC_INVAL);
1859
1860 case NGE_PEEK:
1861 peek = B_TRUE;
1862 break;
1863
1864 case NGE_POKE:
1865 peek = B_FALSE;
1866 break;
1867 }
1868
1869 /*
1870 * Validate format of ioctl
1871 */
1872 if (iocp->ioc_count != sizeof (nge_peekpoke_t))
1873 return (IOC_INVAL);
1874 if (mp->b_cont == NULL)
1875 return (IOC_INVAL);
1876 ppd = (nge_peekpoke_t *)mp->b_cont->b_rptr;
1877
1878 /*
1879 * Validate request parameters
1880 */
1881 switch (ppd->pp_acc_space) {
1882 default:
1883 return (IOC_INVAL);
1884
1885 case NGE_PP_SPACE_CFG:
1886 /*
1887 * Config space
1888 */
1889 sizemask = 8|4|2|1;
1890 mem_va = 0;
1891 maxoff = PCI_CONF_HDR_SIZE;
1892 ppfn = peek ? nge_chip_peek_cfg : nge_chip_poke_cfg;
1893 break;
1894
1895 case NGE_PP_SPACE_REG:
1896 /*
1897 * Memory-mapped I/O space
1898 */
1899 sizemask = 8|4|2|1;
1900 mem_va = 0;
1901 maxoff = NGE_REG_SIZE;
1902 ppfn = peek ? nge_chip_peek_reg : nge_chip_poke_reg;
1903 break;
1904
1905 case NGE_PP_SPACE_MII:
1906 sizemask = 4|2|1;
1907 mem_va = 0;
1908 maxoff = NGE_MII_SIZE;
1909 ppfn = peek ? nge_chip_peek_mii : nge_chip_poke_mii;
1910 break;
1911
1912 case NGE_PP_SPACE_SEEPROM:
1913 sizemask = 4|2|1;
1914 mem_va = 0;
1915 maxoff = NGE_SEEROM_SIZE;
1916 ppfn = peek ? nge_chip_peek_seeprom : nge_chip_poke_seeprom;
1917 break;
1918 }
1919
1920 switch (ppd->pp_acc_size) {
1921 default:
1922 return (IOC_INVAL);
1923
1924 case 8:
1925 case 4:
1926 case 2:
1927 case 1:
1928 if ((ppd->pp_acc_size & sizemask) == 0)
1929 return (IOC_INVAL);
1930 break;
1931 }
1932
1933 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
1934 return (IOC_INVAL);
1935
1936 if (ppd->pp_acc_offset >= maxoff)
1937 return (IOC_INVAL);
1938
1939 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
1940 return (IOC_INVAL);
1941
1942 /*
1943 * All OK - go do it!
1944 */
1945 ppd->pp_acc_offset += mem_va;
1946 if (ppfn)
1947 err = (*ppfn)(ngep, ppd);
1948 if (err != DDI_SUCCESS)
1949 return (IOC_INVAL);
1950 return (peek ? IOC_REPLY : IOC_ACK);
1951 }
1952
1953 static enum ioc_reply nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp,
1954 struct iocblk *iocp);
1955 #pragma no_inline(nge_diag_ioctl)
1956
1957 static enum ioc_reply
nge_diag_ioctl(nge_t * ngep,int cmd,mblk_t * mp,struct iocblk * iocp)1958 nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp)
1959 {
1960 ASSERT(mutex_owned(ngep->genlock));
1961
1962 switch (cmd) {
1963 default:
1964 nge_error(ngep, "nge_diag_ioctl: invalid cmd 0x%x", cmd);
1965 return (IOC_INVAL);
1966
1967 case NGE_DIAG:
1968 return (IOC_ACK);
1969
1970 case NGE_PEEK:
1971 case NGE_POKE:
1972 return (nge_pp_ioctl(ngep, cmd, mp, iocp));
1973
1974 case NGE_PHY_RESET:
1975 return (IOC_RESTART_ACK);
1976
1977 case NGE_SOFT_RESET:
1978 case NGE_HARD_RESET:
1979 return (IOC_ACK);
1980 }
1981
1982 /* NOTREACHED */
1983 }
1984
1985 enum ioc_reply
nge_chip_ioctl(nge_t * ngep,mblk_t * mp,struct iocblk * iocp)1986 nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp)
1987 {
1988 int cmd;
1989
1990 ASSERT(mutex_owned(ngep->genlock));
1991
1992 cmd = iocp->ioc_cmd;
1993
1994 switch (cmd) {
1995 default:
1996 return (IOC_INVAL);
1997
1998 case NGE_DIAG:
1999 case NGE_PEEK:
2000 case NGE_POKE:
2001 case NGE_PHY_RESET:
2002 case NGE_SOFT_RESET:
2003 case NGE_HARD_RESET:
2004 #if NGE_DEBUGGING
2005 return (nge_diag_ioctl(ngep, cmd, mp, iocp));
2006 #else
2007 return (IOC_INVAL);
2008 #endif
2009
2010 case NGE_MII_READ:
2011 case NGE_MII_WRITE:
2012 return (IOC_INVAL);
2013
2014 #if NGE_SEE_IO32
2015 case NGE_SEE_READ:
2016 case NGE_SEE_WRITE:
2017 return (IOC_INVAL);
2018 #endif
2019
2020 #if NGE_FLASH_IO32
2021 case NGE_FLASH_READ:
2022 case NGE_FLASH_WRITE:
2023 return (IOC_INVAL);
2024 #endif
2025 }
2026 }
2027