1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/nxge/nxge_impl.h>
26
27 /*
28 * Tunable Receive Completion Ring Configuration B parameters.
29 */
30 uint16_t nxge_rx_pkt_thres; /* 16 bits */
31 uint8_t nxge_rx_pkt_timeout; /* 6 bits based on DMA clock divider */
32
33 lb_property_t lb_normal = {normal, "normal", nxge_lb_normal};
34 lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g};
35 lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000};
36 lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100};
37 lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10};
38 lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g};
39 lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000};
40 lb_property_t lb_phy = {internal, "phy", nxge_lb_phy};
41 lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g};
42 lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000};
43 lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g};
44 lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000};
45 lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac};
46
47 uint32_t nxge_lb_dbg = 1;
48 void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp);
49 void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp);
50 static nxge_status_t nxge_check_xaui_xfp(p_nxge_t nxgep);
51
52 extern uint32_t nxge_rx_mode;
53 extern uint32_t nxge_jumbo_mtu;
54 extern uint16_t nxge_rdc_buf_offset;
55
56 static void
57 nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
58
59 /* ARGSUSED */
60 nxge_status_t
nxge_global_reset(p_nxge_t nxgep)61 nxge_global_reset(p_nxge_t nxgep)
62 {
63 nxge_status_t status = NXGE_OK;
64
65 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset"));
66
67 if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_STOP)) != NXGE_OK)
68 return (status);
69 (void) nxge_intr_hw_disable(nxgep);
70
71 if ((nxgep->suspended) ||
72 ((nxgep->statsp->port_stats.lb_mode ==
73 nxge_lb_phy1000) ||
74 (nxgep->statsp->port_stats.lb_mode ==
75 nxge_lb_phy10g) ||
76 (nxgep->statsp->port_stats.lb_mode ==
77 nxge_lb_serdes1000) ||
78 (nxgep->statsp->port_stats.lb_mode ==
79 nxge_lb_serdes10g))) {
80 if ((status = nxge_link_init(nxgep)) != NXGE_OK)
81 return (status);
82 }
83
84 if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_START)) != NXGE_OK)
85 return (status);
86 if ((status = nxge_mac_init(nxgep)) != NXGE_OK)
87 return (status);
88 (void) nxge_intr_hw_enable(nxgep);
89
90 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset"));
91 return (status);
92 }
93
94 /* ARGSUSED */
95 void
nxge_hw_id_init(p_nxge_t nxgep)96 nxge_hw_id_init(p_nxge_t nxgep)
97 {
98 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init"));
99
100 /*
101 * Set up initial hardware parameters required such as mac mtu size.
102 */
103 nxgep->mac.is_jumbo = B_FALSE;
104
105 /*
106 * Set the maxframe size to 1522 (1518 + 4) to account for
107 * VLAN tagged packets.
108 */
109 nxgep->mac.minframesize = NXGE_MIN_MAC_FRAMESIZE; /* 64 */
110 nxgep->mac.maxframesize = NXGE_MAX_MAC_FRAMESIZE; /* 1522 */
111
112 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init: maxframesize %d",
113 nxgep->mac.maxframesize));
114 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init"));
115 }
116
117 /* ARGSUSED */
118 void
nxge_hw_init_niu_common(p_nxge_t nxgep)119 nxge_hw_init_niu_common(p_nxge_t nxgep)
120 {
121 p_nxge_hw_list_t hw_p;
122
123 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common"));
124
125 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
126 return;
127 }
128 MUTEX_ENTER(&hw_p->nxge_cfg_lock);
129 if (hw_p->flags & COMMON_INIT_DONE) {
130 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
131 "nxge_hw_init_niu_common"
132 " already done for dip $%p function %d exiting",
133 hw_p->parent_devp, nxgep->function_num));
134 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
135 return;
136 }
137
138 hw_p->flags = COMMON_INIT_START;
139 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
140 " Started for device id %x with function %d",
141 hw_p->parent_devp, nxgep->function_num));
142
143 /* per neptune common block init */
144 (void) nxge_fflp_hw_reset(nxgep);
145
146 if (nxgep->niu_hw_type != NIU_HW_TYPE_RF) {
147 switch (nxge_rdc_buf_offset) {
148 case SW_OFFSET_NO_OFFSET:
149 case SW_OFFSET_64:
150 case SW_OFFSET_128:
151 break;
152 default:
153 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
154 "nxge_hw_init_niu_common: Unsupported RDC buffer"
155 " offset code %d, setting to %d",
156 nxge_rdc_buf_offset, SW_OFFSET_NO_OFFSET));
157 nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
158 break;
159 }
160 } else {
161 switch (nxge_rdc_buf_offset) {
162 case SW_OFFSET_NO_OFFSET:
163 case SW_OFFSET_64:
164 case SW_OFFSET_128:
165 case SW_OFFSET_192:
166 case SW_OFFSET_256:
167 case SW_OFFSET_320:
168 case SW_OFFSET_384:
169 case SW_OFFSET_448:
170 break;
171 default:
172 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
173 "nxge_hw_init_niu_common: Unsupported RDC buffer"
174 " offset code %d, setting to %d",
175 nxge_rdc_buf_offset, SW_OFFSET_NO_OFFSET));
176 nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
177 break;
178 }
179 }
180
181 hw_p->flags = COMMON_INIT_DONE;
182 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
183
184 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
185 " Done for device id %x with function %d",
186 hw_p->parent_devp, nxgep->function_num));
187 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common"));
188 }
189
190 /* ARGSUSED */
191 uint_t
nxge_intr(void * arg1,void * arg2)192 nxge_intr(void *arg1, void *arg2)
193 {
194 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
195 p_nxge_t nxgep = (p_nxge_t)arg2;
196 uint_t serviced = DDI_INTR_UNCLAIMED;
197 uint8_t ldv;
198 npi_handle_t handle;
199 p_nxge_ldgv_t ldgvp;
200 p_nxge_ldg_t ldgp, t_ldgp;
201 p_nxge_ldv_t t_ldvp;
202 uint64_t vector0 = 0, vector1 = 0, vector2 = 0;
203 int i, j, nldvs, nintrs = 1;
204 npi_status_t rs = NPI_SUCCESS;
205
206 /* DDI interface returns second arg as NULL (n2 niumx driver) !!! */
207 if (arg2 == NULL || (void *) ldvp->nxgep != arg2) {
208 nxgep = ldvp->nxgep;
209 }
210 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr"));
211
212 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
213 NXGE_ERROR_MSG((nxgep, INT_CTL,
214 "<== nxge_intr: not initialized 0x%x", serviced));
215 return (serviced);
216 }
217
218 ldgvp = nxgep->ldgvp;
219 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp));
220 if (ldvp == NULL && ldgvp) {
221 t_ldvp = ldvp = ldgvp->ldvp;
222 }
223 if (ldvp) {
224 ldgp = t_ldgp = ldvp->ldgp;
225 }
226 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
227 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
228 if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
229 NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: "
230 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
231 NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready"));
232 return (DDI_INTR_UNCLAIMED);
233 }
234 /*
235 * This interrupt handler will have to go through all the logical
236 * devices to find out which logical device interrupts us and then call
237 * its handler to process the events.
238 */
239 handle = NXGE_DEV_NPI_HANDLE(nxgep);
240 t_ldgp = ldgp;
241 t_ldvp = ldgp->ldvp;
242
243 nldvs = ldgp->nldvs;
244
245 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d",
246 nldvs, ldgvp->ldg_intrs));
247
248 serviced = DDI_INTR_CLAIMED;
249 for (i = 0; i < nintrs; i++, t_ldgp++) {
250 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d "
251 " #intrs %d", i, nldvs, nintrs));
252 /* Get this group's flag bits. */
253 rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg,
254 &vector0, &vector1, &vector2);
255 if (rs) {
256 continue;
257 }
258 if (!vector0 && !vector1 && !vector2) {
259 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
260 "no interrupts on group %d", t_ldgp->ldg));
261 continue;
262 }
263 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
264 "vector0 0x%llx vector1 0x%llx vector2 0x%llx",
265 vector0, vector1, vector2));
266 nldvs = t_ldgp->nldvs;
267 for (j = 0; j < nldvs; j++, t_ldvp++) {
268 /*
269 * Call device's handler if flag bits are on.
270 */
271 ldv = t_ldvp->ldv;
272 if (((ldv < NXGE_MAC_LD_START) &&
273 (LDV_ON(ldv, vector0) |
274 (LDV_ON(ldv, vector1)))) ||
275 (ldv >= NXGE_MAC_LD_START &&
276 ((LDV2_ON_1(ldv, vector2)) ||
277 (LDV2_ON_2(ldv, vector2))))) {
278 (void) (t_ldvp->ldv_intr_handler)(
279 (caddr_t)t_ldvp, arg2);
280 NXGE_DEBUG_MSG((nxgep, INT_CTL,
281 "==> nxge_intr: "
282 "calling device %d #ldvs %d #intrs %d",
283 j, nldvs, nintrs));
284 }
285 }
286 }
287
288 t_ldgp = ldgp;
289 for (i = 0; i < nintrs; i++, t_ldgp++) {
290 /* rearm group interrupts */
291 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm "
292 "group %d", t_ldgp->ldg));
293 (void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
294 t_ldgp->arm, t_ldgp->ldg_timer);
295 }
296
297 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x",
298 serviced));
299 return (serviced);
300 }
301
302
303 /*
304 * XFP Related Status Register Values Under 3 Different Conditions
305 *
306 * -------------+-------------------------+-------------------------
307 * | Intel XFP and Avago | Picolight XFP
308 * -------------+---------+---------------+---------+---------------
309 * | STATUS0 | TX_ALARM_STAT | STATUS0 | TX_ALARM_STAT
310 * -------------+---------+---------------+---------+---------------
311 * No XFP | 0x639C | 0x40 | 0x639C | 0x40
312 * -------------+---------+---------------+---------+---------------
313 * XFP,linkdown | 0x43BC | 0x40 | 0x639C | 0x40
314 * -------------+---------+---------------+---------+---------------
315 * XFP,linkup | 0x03FC | 0x0 | 0x03FC | 0x0
316 * -------------+---------+---------------+---------+---------------
317 * Note:
318 * STATUS0 = BCM8704_USER_ANALOG_STATUS0_REG
319 * TX_ALARM_STAT = BCM8704_USER_TX_ALARM_STATUS_REG
320 */
321 /* ARGSUSED */
322 static nxge_status_t
nxge_check_xaui_xfp(p_nxge_t nxgep)323 nxge_check_xaui_xfp(p_nxge_t nxgep)
324 {
325 nxge_status_t status = NXGE_OK;
326 uint8_t phy_port_addr;
327 uint16_t val;
328 uint16_t val1;
329 uint8_t portn;
330
331 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_check_xaui_xfp"));
332
333 portn = nxgep->mac.portnum;
334 phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
335
336 /*
337 * Keep the val1 code even though it is not used. Could be
338 * used to differenciate the "No XFP" case and "XFP,linkdown"
339 * case when a Intel XFP is used.
340 */
341 if ((status = nxge_mdio_read(nxgep, phy_port_addr,
342 BCM8704_USER_DEV3_ADDR,
343 BCM8704_USER_ANALOG_STATUS0_REG, &val)) == NXGE_OK) {
344 status = nxge_mdio_read(nxgep, phy_port_addr,
345 BCM8704_USER_DEV3_ADDR,
346 BCM8704_USER_TX_ALARM_STATUS_REG, &val1);
347 }
348
349 if (status != NXGE_OK) {
350 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
351 NXGE_FM_EREPORT_XAUI_ERR);
352 if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities)) {
353 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
354 "XAUI is bad or absent on port<%d>\n", portn));
355 }
356 #ifdef NXGE_DEBUG
357 /*
358 * As a workaround for CR6693529, do not execute this block of
359 * code for non-debug driver. When a Picolight XFP transceiver
360 * is used, register BCM8704_USER_ANALOG_STATUS0_REG returns
361 * the same 0x639C value in normal link down case, which causes
362 * false FMA messages and link reconnection problem.
363 */
364 } else if (nxgep->mac.portmode == PORT_10G_FIBER) {
365 /*
366 * 0x03FC = 0000 0011 1111 1100 (XFP is normal)
367 * 0x639C = 0110 0011 1001 1100 (XFP has problem)
368 * bit14 = 1: PDM loss-of-light indicator
369 * bit13 = 1: PDM Rx loss-of-signal
370 * bit6 = 0: Light is NOT ok
371 * bit5 = 0: PMD Rx signal is NOT ok
372 */
373 if (val == 0x639C) {
374 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
375 NXGE_FM_EREPORT_XFP_ERR);
376 if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities)) {
377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
378 "XFP is bad or absent on port<%d>\n",
379 portn));
380 }
381 status = NXGE_ERROR;
382 }
383 #endif
384 }
385 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_check_xaui_xfp"));
386 return (status);
387 }
388
389
390 /* ARGSUSED */
391 uint_t
nxge_syserr_intr(void * arg1,void * arg2)392 nxge_syserr_intr(void *arg1, void *arg2)
393 {
394 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
395 p_nxge_t nxgep = (p_nxge_t)arg2;
396 p_nxge_ldg_t ldgp = NULL;
397 npi_handle_t handle;
398 sys_err_stat_t estat;
399 uint_t serviced = DDI_INTR_UNCLAIMED;
400
401 if (arg1 == NULL && arg2 == NULL) {
402 return (serviced);
403 }
404 if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) {
405 if (ldvp != NULL) {
406 nxgep = ldvp->nxgep;
407 }
408 }
409 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
410 "==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp));
411 if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
412 ldgp = ldvp->ldgp;
413 if (ldgp == NULL) {
414 NXGE_ERROR_MSG((nxgep, SYSERR_CTL,
415 "<== nxge_syserrintr(no logical group): "
416 "arg2 $%p arg1 $%p", nxgep, ldvp));
417 return (DDI_INTR_UNCLAIMED);
418 }
419 /*
420 * Get the logical device state if the function uses interrupt.
421 */
422 }
423
424 /* This interrupt handler is for system error interrupts. */
425 handle = NXGE_DEV_NPI_HANDLE(nxgep);
426 estat.value = 0;
427 (void) npi_fzc_sys_err_stat_get(handle, &estat);
428 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
429 "==> nxge_syserr_intr: device error 0x%016llx", estat.value));
430
431 if (estat.bits.ldw.smx) {
432 /* SMX */
433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
434 "==> nxge_syserr_intr: device error - SMX"));
435 } else if (estat.bits.ldw.mac) {
436 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
437 "==> nxge_syserr_intr: device error - MAC"));
438 /*
439 * There is nothing to be done here. All MAC errors go to per
440 * MAC port interrupt. MIF interrupt is the only MAC sub-block
441 * that can generate status here. MIF status reported will be
442 * ignored here. It is checked by per port timer instead.
443 */
444 } else if (estat.bits.ldw.ipp) {
445 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
446 "==> nxge_syserr_intr: device error - IPP"));
447 (void) nxge_ipp_handle_sys_errors(nxgep);
448 } else if (estat.bits.ldw.zcp) {
449 /* ZCP */
450 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
451 "==> nxge_syserr_intr: device error - ZCP"));
452 (void) nxge_zcp_handle_sys_errors(nxgep);
453 } else if (estat.bits.ldw.tdmc) {
454 /* TDMC */
455 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
456 "==> nxge_syserr_intr: device error - TDMC"));
457 /*
458 * There is no TDMC system errors defined in the PRM. All TDMC
459 * channel specific errors are reported on a per channel basis.
460 */
461 } else if (estat.bits.ldw.rdmc) {
462 /* RDMC */
463 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
464 "==> nxge_syserr_intr: device error - RDMC"));
465 (void) nxge_rxdma_handle_sys_errors(nxgep);
466 } else if (estat.bits.ldw.txc) {
467 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
468 "==> nxge_syserr_intr: device error - TXC"));
469 (void) nxge_txc_handle_sys_errors(nxgep);
470 } else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) {
471 /* PCI-E */
472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
473 "==> nxge_syserr_intr: device error - PCI-E"));
474 } else if (estat.bits.ldw.meta1) {
475 /* META1 */
476 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
477 "==> nxge_syserr_intr: device error - META1"));
478 } else if (estat.bits.ldw.meta2) {
479 /* META2 */
480 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
481 "==> nxge_syserr_intr: device error - META2"));
482 } else if (estat.bits.ldw.fflp) {
483 /* FFLP */
484 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
485 "==> nxge_syserr_intr: device error - FFLP"));
486 (void) nxge_fflp_handle_sys_errors(nxgep);
487 }
488
489 /*
490 * nxge_check_xaui_xfg checks XAUI for all of the following
491 * portmodes, but checks XFP only if portmode == PORT_10G_FIBER.
492 */
493 if (nxgep->mac.portmode == PORT_10G_FIBER ||
494 nxgep->mac.portmode == PORT_10G_COPPER ||
495 nxgep->mac.portmode == PORT_10G_TN1010 ||
496 nxgep->mac.portmode == PORT_1G_TN1010) {
497 if (nxge_check_xaui_xfp(nxgep) != NXGE_OK) {
498 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
499 "==> nxge_syserr_intr: device error - XAUI"));
500 }
501 }
502
503 serviced = DDI_INTR_CLAIMED;
504
505 if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 &&
506 !ldvp->use_timer) {
507 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
508 B_TRUE, ldgp->ldg_timer);
509 }
510 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr"));
511 return (serviced);
512 }
513
514 /* ARGSUSED */
515 void
nxge_intr_hw_enable(p_nxge_t nxgep)516 nxge_intr_hw_enable(p_nxge_t nxgep)
517 {
518 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable"));
519 (void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE);
520 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable"));
521 }
522
523 /* ARGSUSED */
524 void
nxge_intr_hw_disable(p_nxge_t nxgep)525 nxge_intr_hw_disable(p_nxge_t nxgep)
526 {
527 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable"));
528 (void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE);
529 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable"));
530 }
531
532 /* ARGSUSED */
533 void
nxge_rx_hw_blank(void * arg,time_t ticks,uint_t count)534 nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
535 {
536 p_nxge_t nxgep = (p_nxge_t)arg;
537 uint8_t channel;
538 npi_handle_t handle;
539 p_nxge_ldgv_t ldgvp;
540 p_nxge_ldv_t ldvp;
541 int i;
542
543 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank"));
544 handle = NXGE_DEV_NPI_HANDLE(nxgep);
545
546 if ((ldgvp = nxgep->ldgvp) == NULL) {
547 NXGE_ERROR_MSG((nxgep, INT_CTL,
548 "<== nxge_rx_hw_blank (not enabled)"));
549 return;
550 }
551 ldvp = nxgep->ldgvp->ldvp;
552 if (ldvp == NULL) {
553 return;
554 }
555 for (i = 0; i < ldgvp->nldvs; i++, ldvp++) {
556 if (ldvp->is_rxdma) {
557 channel = ldvp->channel;
558 (void) npi_rxdma_cfg_rdc_rcr_threshold(handle,
559 channel, count);
560 (void) npi_rxdma_cfg_rdc_rcr_timeout(handle,
561 channel, ticks);
562 }
563 }
564
565 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank"));
566 }
567
568 /* ARGSUSED */
569 void
nxge_hw_stop(p_nxge_t nxgep)570 nxge_hw_stop(p_nxge_t nxgep)
571 {
572 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop"));
573
574 (void) nxge_tx_mac_disable(nxgep);
575 (void) nxge_rx_mac_disable(nxgep);
576 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
577 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
578
579 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop"));
580 }
581
582 /* ARGSUSED */
583 void
nxge_hw_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)584 nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
585 {
586 int cmd;
587
588 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl"));
589
590 if (nxgep == NULL) {
591 miocnak(wq, mp, 0, EINVAL);
592 return;
593 }
594 iocp->ioc_error = 0;
595 cmd = iocp->ioc_cmd;
596
597 switch (cmd) {
598 default:
599 miocnak(wq, mp, 0, EINVAL);
600 return;
601
602 case NXGE_GET_MII:
603 nxge_get_mii(nxgep, mp->b_cont);
604 miocack(wq, mp, sizeof (uint16_t), 0);
605 break;
606
607 case NXGE_PUT_MII:
608 nxge_put_mii(nxgep, mp->b_cont);
609 miocack(wq, mp, 0, 0);
610 break;
611
612 case NXGE_GET64:
613 nxge_get64(nxgep, mp->b_cont);
614 miocack(wq, mp, sizeof (uint32_t), 0);
615 break;
616
617 case NXGE_PUT64:
618 nxge_put64(nxgep, mp->b_cont);
619 miocack(wq, mp, 0, 0);
620 break;
621
622 case NXGE_PUT_TCAM:
623 nxge_put_tcam(nxgep, mp->b_cont);
624 miocack(wq, mp, 0, 0);
625 break;
626
627 case NXGE_GET_TCAM:
628 nxge_get_tcam(nxgep, mp->b_cont);
629 miocack(wq, mp, 0, 0);
630 break;
631
632 case NXGE_TX_REGS_DUMP:
633 nxge_txdma_regs_dump_channels(nxgep);
634 miocack(wq, mp, 0, 0);
635 break;
636 case NXGE_RX_REGS_DUMP:
637 nxge_rxdma_regs_dump_channels(nxgep);
638 miocack(wq, mp, 0, 0);
639 break;
640 case NXGE_VIR_INT_REGS_DUMP:
641 case NXGE_INT_REGS_DUMP:
642 nxge_virint_regs_dump(nxgep);
643 miocack(wq, mp, 0, 0);
644 break;
645 case NXGE_RTRACE:
646 nxge_rtrace_ioctl(nxgep, wq, mp, iocp);
647 break;
648 }
649 }
650
651 /* ARGSUSED */
652 void
nxge_loopback_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)653 nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
654 struct iocblk *iocp)
655 {
656 p_lb_property_t lb_props;
657
658 size_t size;
659 int i;
660
661 if (mp->b_cont == NULL) {
662 miocnak(wq, mp, 0, EINVAL);
663 }
664 switch (iocp->ioc_cmd) {
665 case LB_GET_MODE:
666 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command"));
667 if (nxgep != NULL) {
668 *(lb_info_sz_t *)mp->b_cont->b_rptr =
669 nxgep->statsp->port_stats.lb_mode;
670 miocack(wq, mp, sizeof (nxge_lb_t), 0);
671 } else {
672 miocnak(wq, mp, 0, EINVAL);
673 }
674 break;
675 case LB_SET_MODE:
676 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command"));
677 if (iocp->ioc_count != sizeof (uint32_t)) {
678 miocack(wq, mp, 0, 0);
679 break;
680 }
681 if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) {
682 miocack(wq, mp, 0, 0);
683 } else {
684 miocnak(wq, mp, 0, EPROTO);
685 }
686 break;
687 case LB_GET_INFO_SIZE:
688 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
689 if (nxgep != NULL) {
690 size = sizeof (lb_normal);
691 if (nxgep->statsp->mac_stats.cap_10gfdx) {
692 /* TN1010 does not support external loopback */
693 if (nxgep->mac.portmode != PORT_1G_TN1010 &&
694 nxgep->mac.portmode != PORT_10G_TN1010) {
695 size += sizeof (lb_external10g);
696 }
697 size += sizeof (lb_mac10g);
698 /* Publish PHY loopback if PHY is present */
699 if (nxgep->mac.portmode == PORT_10G_COPPER ||
700 nxgep->mac.portmode == PORT_10G_TN1010 ||
701 nxgep->mac.portmode == PORT_10G_FIBER)
702 size += sizeof (lb_phy10g);
703 }
704
705 /*
706 * Even if cap_10gfdx is false, we still do 10G
707 * serdes loopback as a part of SunVTS xnetlbtest
708 * internal loopback test.
709 */
710 if (nxgep->mac.portmode == PORT_10G_FIBER ||
711 nxgep->mac.portmode == PORT_10G_COPPER ||
712 nxgep->mac.portmode == PORT_10G_TN1010 ||
713 nxgep->mac.portmode == PORT_10G_SERDES)
714 size += sizeof (lb_serdes10g);
715
716 if (nxgep->statsp->mac_stats.cap_1000fdx) {
717 /* TN1010 does not support external loopback */
718 if (nxgep->mac.portmode != PORT_1G_TN1010 &&
719 nxgep->mac.portmode != PORT_10G_TN1010) {
720 size += sizeof (lb_external1000);
721 }
722 size += sizeof (lb_mac1000);
723 if (nxgep->mac.portmode == PORT_1G_COPPER ||
724 nxgep->mac.portmode == PORT_1G_TN1010 ||
725 nxgep->mac.portmode ==
726 PORT_1G_RGMII_FIBER)
727 size += sizeof (lb_phy1000);
728 }
729 if (nxgep->statsp->mac_stats.cap_100fdx)
730 size += sizeof (lb_external100);
731 if (nxgep->statsp->mac_stats.cap_10fdx)
732 size += sizeof (lb_external10);
733 if (nxgep->mac.portmode == PORT_1G_FIBER ||
734 nxgep->mac.portmode == PORT_1G_TN1010 ||
735 nxgep->mac.portmode == PORT_1G_SERDES)
736 size += sizeof (lb_serdes1000);
737
738 *(lb_info_sz_t *)mp->b_cont->b_rptr = size;
739
740 NXGE_DEBUG_MSG((nxgep, IOC_CTL,
741 "NXGE_GET_LB_INFO command: size %d", size));
742 miocack(wq, mp, sizeof (lb_info_sz_t), 0);
743 } else
744 miocnak(wq, mp, 0, EINVAL);
745 break;
746
747 case LB_GET_INFO:
748 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command"));
749 if (nxgep != NULL) {
750 size = sizeof (lb_normal);
751 if (nxgep->statsp->mac_stats.cap_10gfdx) {
752 /* TN1010 does not support external loopback */
753 if (nxgep->mac.portmode != PORT_1G_TN1010 &&
754 nxgep->mac.portmode != PORT_10G_TN1010) {
755 size += sizeof (lb_external10g);
756 }
757 size += sizeof (lb_mac10g);
758 /* Publish PHY loopback if PHY is present */
759 if (nxgep->mac.portmode == PORT_10G_COPPER ||
760 nxgep->mac.portmode == PORT_10G_TN1010 ||
761 nxgep->mac.portmode == PORT_10G_FIBER)
762 size += sizeof (lb_phy10g);
763 }
764 if (nxgep->mac.portmode == PORT_10G_FIBER ||
765 nxgep->mac.portmode == PORT_10G_COPPER ||
766 nxgep->mac.portmode == PORT_10G_TN1010 ||
767 nxgep->mac.portmode == PORT_10G_SERDES)
768 size += sizeof (lb_serdes10g);
769
770 if (nxgep->statsp->mac_stats.cap_1000fdx) {
771 /* TN1010 does not support external loopback */
772 if (nxgep->mac.portmode != PORT_1G_TN1010 &&
773 nxgep->mac.portmode != PORT_10G_TN1010) {
774 size += sizeof (lb_external1000);
775 }
776 size += sizeof (lb_mac1000);
777 if (nxgep->mac.portmode == PORT_1G_COPPER ||
778 nxgep->mac.portmode == PORT_1G_TN1010 ||
779 nxgep->mac.portmode ==
780 PORT_1G_RGMII_FIBER)
781 size += sizeof (lb_phy1000);
782 }
783 if (nxgep->statsp->mac_stats.cap_100fdx)
784 size += sizeof (lb_external100);
785
786 if (nxgep->statsp->mac_stats.cap_10fdx)
787 size += sizeof (lb_external10);
788
789 if (nxgep->mac.portmode == PORT_1G_FIBER ||
790 nxgep->mac.portmode == PORT_1G_TN1010 ||
791 nxgep->mac.portmode == PORT_1G_SERDES)
792 size += sizeof (lb_serdes1000);
793
794 NXGE_DEBUG_MSG((nxgep, IOC_CTL,
795 "NXGE_GET_LB_INFO command: size %d", size));
796 if (size == iocp->ioc_count) {
797 i = 0;
798 lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
799 lb_props[i++] = lb_normal;
800
801 if (nxgep->statsp->mac_stats.cap_10gfdx) {
802 lb_props[i++] = lb_mac10g;
803 if (nxgep->mac.portmode ==
804 PORT_10G_COPPER ||
805 nxgep->mac.portmode ==
806 PORT_10G_TN1010 ||
807 nxgep->mac.portmode ==
808 PORT_10G_FIBER) {
809 lb_props[i++] = lb_phy10g;
810 }
811 /* TN1010 does not support ext lb */
812 if (nxgep->mac.portmode !=
813 PORT_10G_TN1010 &&
814 nxgep->mac.portmode !=
815 PORT_1G_TN1010) {
816 lb_props[i++] = lb_external10g;
817 }
818 }
819
820 if (nxgep->mac.portmode == PORT_10G_FIBER ||
821 nxgep->mac.portmode == PORT_10G_COPPER ||
822 nxgep->mac.portmode == PORT_10G_TN1010 ||
823 nxgep->mac.portmode == PORT_10G_SERDES)
824 lb_props[i++] = lb_serdes10g;
825
826 if (nxgep->statsp->mac_stats.cap_1000fdx) {
827 /* TN1010 does not support ext lb */
828 if (nxgep->mac.portmode !=
829 PORT_10G_TN1010 &&
830 nxgep->mac.portmode !=
831 PORT_1G_TN1010) {
832 lb_props[i++] = lb_external1000;
833 }
834 }
835
836 if (nxgep->statsp->mac_stats.cap_100fdx)
837 lb_props[i++] = lb_external100;
838
839 if (nxgep->statsp->mac_stats.cap_10fdx)
840 lb_props[i++] = lb_external10;
841
842 if (nxgep->statsp->mac_stats.cap_1000fdx)
843 lb_props[i++] = lb_mac1000;
844
845 if (nxgep->mac.portmode == PORT_1G_COPPER ||
846 nxgep->mac.portmode == PORT_1G_TN1010 ||
847 nxgep->mac.portmode ==
848 PORT_1G_RGMII_FIBER) {
849 if (nxgep->statsp->mac_stats.
850 cap_1000fdx)
851 lb_props[i++] = lb_phy1000;
852 } else if (nxgep->mac.portmode ==
853 PORT_1G_FIBER ||
854 nxgep->mac.portmode == PORT_1G_TN1010 ||
855 nxgep->mac.portmode == PORT_1G_SERDES) {
856 lb_props[i++] = lb_serdes1000;
857 }
858 miocack(wq, mp, size, 0);
859 } else
860 miocnak(wq, mp, 0, EINVAL);
861 } else {
862 miocnak(wq, mp, 0, EINVAL);
863 cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x",
864 iocp->ioc_cmd);
865 }
866 break;
867 }
868 }
869
870 /*
871 * DMA channel interfaces to access various channel specific
872 * hardware functions.
873 */
874 /* ARGSUSED */
875 void
nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle,void * reg_addrp,uint32_t reg_base,uint16_t channel,uint64_t reg_data)876 nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp,
877 uint32_t reg_base, uint16_t channel, uint64_t reg_data)
878 {
879 uint64_t reg_offset;
880
881 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
882
883 /*
884 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
885 * use the virtual DMA CSR address space from the config space (in PCI
886 * case), then the following code need to be use different offset
887 * computation macro.
888 */
889 reg_offset = reg_base + DMC_OFFSET(channel);
890 NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data);
891
892 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
893 }
894
895 /* ARGSUSED */
896 uint64_t
nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle,void * reg_addrp,uint32_t reg_base,uint16_t channel)897 nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp,
898 uint32_t reg_base, uint16_t channel)
899 {
900 uint64_t reg_offset;
901
902 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
903
904 /*
905 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
906 * use the virtual DMA CSR address space from the config space (in PCI
907 * case), then the following code need to be use different offset
908 * computation macro.
909 */
910 reg_offset = reg_base + DMC_OFFSET(channel);
911
912 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
913
914 return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset));
915 }
916
917 /* ARGSUSED */
918 void
nxge_get32(p_nxge_t nxgep,p_mblk_t mp)919 nxge_get32(p_nxge_t nxgep, p_mblk_t mp)
920 {
921 nxge_os_acc_handle_t nxge_regh;
922
923 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
924 nxge_regh = nxgep->dev_regs->nxge_regh;
925
926 *(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh,
927 nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr);
928
929 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X",
930 *(uint32_t *)mp->b_rptr));
931 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
932 }
933
934 /* ARGSUSED */
935 void
nxge_put32(p_nxge_t nxgep,p_mblk_t mp)936 nxge_put32(p_nxge_t nxgep, p_mblk_t mp)
937 {
938 nxge_os_acc_handle_t nxge_regh;
939 uint32_t *buf;
940 uint8_t *reg;
941
942 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
943 nxge_regh = nxgep->dev_regs->nxge_regh;
944
945 buf = (uint32_t *)mp->b_rptr;
946 reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0];
947 NXGE_DEBUG_MSG((nxgep, IOC_CTL,
948 "reg = 0x%016llX index = 0x%08X value = 0x%08X",
949 reg, buf[0], buf[1]));
950 NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]);
951 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
952 }
953
954 /*ARGSUSED*/
955 boolean_t
nxge_set_lb(p_nxge_t nxgep,queue_t * wq,p_mblk_t mp)956 nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp)
957 {
958 boolean_t status = B_TRUE;
959 uint32_t lb_mode;
960 lb_property_t *lb_info;
961
962 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb"));
963 lb_mode = nxgep->statsp->port_stats.lb_mode;
964 if (lb_mode == *(uint32_t *)mp->b_rptr) {
965 cmn_err(CE_NOTE,
966 "!nxge%d: Loopback mode already set (lb_mode %d).\n",
967 nxgep->instance, lb_mode);
968 status = B_FALSE;
969 goto nxge_set_lb_exit;
970 }
971 lb_mode = *(uint32_t *)mp->b_rptr;
972 lb_info = NULL;
973 if (lb_mode == lb_normal.value)
974 lb_info = &lb_normal;
975 else if ((lb_mode == lb_external10g.value) &&
976 (nxgep->statsp->mac_stats.cap_10gfdx))
977 lb_info = &lb_external10g;
978 else if ((lb_mode == lb_external1000.value) &&
979 (nxgep->statsp->mac_stats.cap_1000fdx))
980 lb_info = &lb_external1000;
981 else if ((lb_mode == lb_external100.value) &&
982 (nxgep->statsp->mac_stats.cap_100fdx))
983 lb_info = &lb_external100;
984 else if ((lb_mode == lb_external10.value) &&
985 (nxgep->statsp->mac_stats.cap_10fdx))
986 lb_info = &lb_external10;
987 else if ((lb_mode == lb_phy10g.value) &&
988 (nxgep->mac.portmode == PORT_10G_COPPER ||
989 nxgep->mac.portmode == PORT_10G_TN1010 ||
990 nxgep->mac.portmode == PORT_10G_FIBER))
991 lb_info = &lb_phy10g;
992 else if ((lb_mode == lb_phy1000.value) &&
993 (nxgep->mac.portmode == PORT_1G_COPPER ||
994 nxgep->mac.portmode == PORT_1G_TN1010 ||
995 nxgep->mac.portmode == PORT_1G_RGMII_FIBER))
996 lb_info = &lb_phy1000;
997 else if ((lb_mode == lb_phy.value) &&
998 (nxgep->mac.portmode == PORT_1G_COPPER))
999 lb_info = &lb_phy;
1000 else if ((lb_mode == lb_serdes10g.value) &&
1001 (nxgep->mac.portmode == PORT_10G_FIBER ||
1002 nxgep->mac.portmode == PORT_10G_COPPER ||
1003 nxgep->mac.portmode == PORT_10G_TN1010 ||
1004 nxgep->mac.portmode == PORT_10G_SERDES))
1005 lb_info = &lb_serdes10g;
1006 else if ((lb_mode == lb_serdes1000.value) &&
1007 (nxgep->mac.portmode == PORT_1G_FIBER ||
1008 nxgep->mac.portmode == PORT_1G_TN1010 ||
1009 nxgep->mac.portmode == PORT_1G_SERDES))
1010 lb_info = &lb_serdes1000;
1011 else if (lb_mode == lb_mac10g.value)
1012 lb_info = &lb_mac10g;
1013 else if (lb_mode == lb_mac1000.value)
1014 lb_info = &lb_mac1000;
1015 else if (lb_mode == lb_mac.value)
1016 lb_info = &lb_mac;
1017 else {
1018 cmn_err(CE_NOTE,
1019 "!nxge%d: Loopback mode not supported(mode %d).\n",
1020 nxgep->instance, lb_mode);
1021 status = B_FALSE;
1022 goto nxge_set_lb_exit;
1023 }
1024
1025 if (lb_mode == nxge_lb_normal) {
1026 if (nxge_lb_dbg) {
1027 cmn_err(CE_NOTE,
1028 "!nxge%d: Returning to normal operation",
1029 nxgep->instance);
1030 }
1031 if (nxge_set_lb_normal(nxgep) != NXGE_OK) {
1032 status = B_FALSE;
1033 cmn_err(CE_NOTE,
1034 "!nxge%d: Failed to return to normal operation",
1035 nxgep->instance);
1036 }
1037 goto nxge_set_lb_exit;
1038 }
1039 nxgep->statsp->port_stats.lb_mode = lb_mode;
1040
1041 if (nxge_lb_dbg)
1042 cmn_err(CE_NOTE,
1043 "!nxge%d: Adapter now in %s loopback mode",
1044 nxgep->instance, lb_info->key);
1045 nxgep->param_arr[param_autoneg].value = 0;
1046 nxgep->param_arr[param_anar_10gfdx].value =
1047 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
1048 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
1049 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
1050 (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g);
1051 nxgep->param_arr[param_anar_10ghdx].value = 0;
1052 nxgep->param_arr[param_anar_1000fdx].value =
1053 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
1054 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) ||
1055 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
1056 (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000);
1057 nxgep->param_arr[param_anar_1000hdx].value = 0;
1058 nxgep->param_arr[param_anar_100fdx].value =
1059 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) ||
1060 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
1061 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100);
1062 nxgep->param_arr[param_anar_100hdx].value = 0;
1063 nxgep->param_arr[param_anar_10fdx].value =
1064 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
1065 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10);
1066 if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) {
1067 nxgep->param_arr[param_master_cfg_enable].value = 1;
1068 nxgep->param_arr[param_master_cfg_value].value = 1;
1069 }
1070 if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
1071 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
1072 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) ||
1073 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) ||
1074 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
1075 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
1076 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) {
1077
1078 if (nxge_link_monitor(nxgep, LINK_MONITOR_STOP) != NXGE_OK)
1079 goto nxge_set_lb_err;
1080 if (nxge_xcvr_find(nxgep) != NXGE_OK)
1081 goto nxge_set_lb_err;
1082 if (nxge_link_init(nxgep) != NXGE_OK)
1083 goto nxge_set_lb_err;
1084 if (nxge_link_monitor(nxgep, LINK_MONITOR_START) != NXGE_OK)
1085 goto nxge_set_lb_err;
1086 }
1087 if (lb_info->lb_type == internal) {
1088 if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
1089 (nxgep->statsp->port_stats.lb_mode ==
1090 nxge_lb_phy10g) ||
1091 (nxgep->statsp->port_stats.lb_mode ==
1092 nxge_lb_serdes10g)) {
1093 nxgep->statsp->mac_stats.link_speed = 10000;
1094 } else if ((nxgep->statsp->port_stats.lb_mode
1095 == nxge_lb_mac1000) ||
1096 (nxgep->statsp->port_stats.lb_mode ==
1097 nxge_lb_phy1000) ||
1098 (nxgep->statsp->port_stats.lb_mode ==
1099 nxge_lb_serdes1000)) {
1100 nxgep->statsp->mac_stats.link_speed = 1000;
1101 } else {
1102 nxgep->statsp->mac_stats.link_speed = 100;
1103 }
1104 nxgep->statsp->mac_stats.link_duplex = 2;
1105 nxgep->statsp->mac_stats.link_up = 1;
1106 }
1107 if (nxge_global_reset(nxgep) != NXGE_OK)
1108 goto nxge_set_lb_err;
1109
1110 nxge_set_lb_exit:
1111 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1112 "<== nxge_set_lb status = 0x%08x", status));
1113 return (status);
1114 nxge_set_lb_err:
1115 status = B_FALSE;
1116 cmn_err(CE_NOTE,
1117 "!nxge%d: Failed to put adapter in %s loopback mode",
1118 nxgep->instance, lb_info->key);
1119 return (status);
1120 }
1121
1122 /* Return to normal (no loopback) mode */
1123 /* ARGSUSED */
1124 nxge_status_t
nxge_set_lb_normal(p_nxge_t nxgep)1125 nxge_set_lb_normal(p_nxge_t nxgep)
1126 {
1127 nxge_status_t status = NXGE_OK;
1128
1129 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal"));
1130
1131 nxgep->statsp->port_stats.lb_mode = nxge_lb_normal;
1132 nxgep->param_arr[param_autoneg].value =
1133 nxgep->param_arr[param_autoneg].old_value;
1134 nxgep->param_arr[param_anar_1000fdx].value =
1135 nxgep->param_arr[param_anar_1000fdx].old_value;
1136 nxgep->param_arr[param_anar_1000hdx].value =
1137 nxgep->param_arr[param_anar_1000hdx].old_value;
1138 nxgep->param_arr[param_anar_100fdx].value =
1139 nxgep->param_arr[param_anar_100fdx].old_value;
1140 nxgep->param_arr[param_anar_100hdx].value =
1141 nxgep->param_arr[param_anar_100hdx].old_value;
1142 nxgep->param_arr[param_anar_10fdx].value =
1143 nxgep->param_arr[param_anar_10fdx].old_value;
1144 nxgep->param_arr[param_master_cfg_enable].value =
1145 nxgep->param_arr[param_master_cfg_enable].old_value;
1146 nxgep->param_arr[param_master_cfg_value].value =
1147 nxgep->param_arr[param_master_cfg_value].old_value;
1148
1149 if ((status = nxge_global_reset(nxgep)) != NXGE_OK)
1150 return (status);
1151
1152 if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_STOP)) != NXGE_OK)
1153 return (status);
1154 if ((status = nxge_xcvr_find(nxgep)) != NXGE_OK)
1155 return (status);
1156 if ((status = nxge_link_init(nxgep)) != NXGE_OK)
1157 return (status);
1158 status = nxge_link_monitor(nxgep, LINK_MONITOR_START);
1159
1160 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal"));
1161
1162 return (status);
1163 }
1164
1165 /* ARGSUSED */
1166 void
nxge_get_mii(p_nxge_t nxgep,p_mblk_t mp)1167 nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp)
1168 {
1169 uint16_t reg;
1170
1171 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii"));
1172
1173 reg = *(uint16_t *)mp->b_rptr;
1174 (void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg,
1175 (uint16_t *)mp->b_rptr);
1176 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X",
1177 reg, *(uint16_t *)mp->b_rptr));
1178 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii"));
1179 }
1180
1181 /* ARGSUSED */
1182 void
nxge_put_mii(p_nxge_t nxgep,p_mblk_t mp)1183 nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp)
1184 {
1185 uint16_t *buf;
1186 uint8_t reg;
1187
1188 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii"));
1189 buf = (uint16_t *)mp->b_rptr;
1190 reg = (uint8_t)buf[0];
1191 NXGE_DEBUG_MSG((nxgep, IOC_CTL,
1192 "reg = 0x%08X index = 0x%08X value = 0x%08X",
1193 reg, buf[0], buf[1]));
1194 (void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
1195 reg, buf[1]);
1196 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii"));
1197 }
1198
1199 /* ARGSUSED */
1200 void
nxge_check_hw_state(p_nxge_t nxgep)1201 nxge_check_hw_state(p_nxge_t nxgep)
1202 {
1203 p_nxge_ldgv_t ldgvp;
1204 p_nxge_ldv_t t_ldvp;
1205
1206 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state"));
1207
1208 MUTEX_ENTER(nxgep->genlock);
1209 nxgep->nxge_timerid = 0;
1210 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1211 goto nxge_check_hw_state_exit;
1212 }
1213 nxge_check_tx_hang(nxgep);
1214
1215 ldgvp = nxgep->ldgvp;
1216 if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
1217 NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
1218 "NULL ldgvp (interrupt not ready)."));
1219 goto nxge_check_hw_state_exit;
1220 }
1221 t_ldvp = ldgvp->ldvp_syserr;
1222 if (!t_ldvp->use_timer) {
1223 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
1224 "ldgvp $%p t_ldvp $%p use_timer flag %d",
1225 ldgvp, t_ldvp, t_ldvp->use_timer));
1226 goto nxge_check_hw_state_exit;
1227 }
1228 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1229 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1230 "port%d Bad register acc handle", nxgep->mac.portnum));
1231 }
1232 (void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep);
1233
1234 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
1235 NXGE_CHECK_TIMER);
1236
1237 nxge_check_hw_state_exit:
1238 MUTEX_EXIT(nxgep->genlock);
1239 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state"));
1240 }
1241
1242 /*ARGSUSED*/
1243 static void
nxge_rtrace_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)1244 nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
1245 struct iocblk *iocp)
1246 {
1247 ssize_t size;
1248 rtrace_t *rtp;
1249 mblk_t *nmp;
1250 uint32_t i, j;
1251 uint32_t start_blk;
1252 uint32_t base_entry;
1253 uint32_t num_entries;
1254
1255 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl"));
1256
1257 size = 1024;
1258 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
1259 NXGE_DEBUG_MSG((nxgep, STR_CTL,
1260 "malformed M_IOCTL MBLKL = %d size = %d",
1261 MBLKL(mp->b_cont), size));
1262 miocnak(wq, mp, 0, EINVAL);
1263 return;
1264 }
1265 nmp = mp->b_cont;
1266 rtp = (rtrace_t *)nmp->b_rptr;
1267 start_blk = rtp->next_idx;
1268 num_entries = rtp->last_idx;
1269 base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
1270
1271 NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk));
1272 NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries));
1273 NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry));
1274
1275 rtp->next_idx = npi_rtracebuf.next_idx;
1276 rtp->last_idx = npi_rtracebuf.last_idx;
1277 rtp->wrapped = npi_rtracebuf.wrapped;
1278 for (i = 0, j = base_entry; i < num_entries; i++, j++) {
1279 rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr;
1280 rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32;
1281 rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32;
1282 }
1283
1284 nmp->b_wptr = nmp->b_rptr + size;
1285 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl"));
1286 miocack(wq, mp, (int)size, 0);
1287 }
1288