1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/nxge/nxge_impl.h>
26
27 /*
28 * Tunable Receive Completion Ring Configuration B parameters.
29 */
30 uint16_t nxge_rx_pkt_thres; /* 16 bits */
31 uint8_t nxge_rx_pkt_timeout; /* 6 bits based on DMA clock divider */
32
33 lb_property_t lb_normal = {normal, "normal", nxge_lb_normal};
34 lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g};
35 lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000};
36 lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100};
37 lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10};
38 lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g};
39 lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000};
40 lb_property_t lb_phy = {internal, "phy", nxge_lb_phy};
41 lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g};
42 lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000};
43 lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g};
44 lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000};
45 lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac};
46
47 uint32_t nxge_lb_dbg = 1;
48 void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp);
49 void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp);
50 static nxge_status_t nxge_check_xaui_xfp(p_nxge_t nxgep);
51
52 extern uint32_t nxge_rx_mode;
53 extern uint32_t nxge_jumbo_mtu;
54 extern uint16_t nxge_rdc_buf_offset;
55
56 static void
57 nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *);
58
59 /* ARGSUSED */
60 nxge_status_t
nxge_global_reset(p_nxge_t nxgep)61 nxge_global_reset(p_nxge_t nxgep)
62 {
63 nxge_status_t status = NXGE_OK;
64
65 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset"));
66
67 if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_STOP)) != NXGE_OK)
68 return (status);
69 (void) nxge_intr_hw_disable(nxgep);
70
71 if ((nxgep->suspended) ||
72 ((nxgep->statsp->port_stats.lb_mode ==
73 nxge_lb_phy1000) ||
74 (nxgep->statsp->port_stats.lb_mode ==
75 nxge_lb_phy10g) ||
76 (nxgep->statsp->port_stats.lb_mode ==
77 nxge_lb_serdes1000) ||
78 (nxgep->statsp->port_stats.lb_mode ==
79 nxge_lb_serdes10g))) {
80 if ((status = nxge_link_init(nxgep)) != NXGE_OK)
81 return (status);
82 }
83
84 if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_START)) != NXGE_OK)
85 return (status);
86 if ((status = nxge_mac_init(nxgep)) != NXGE_OK)
87 return (status);
88 (void) nxge_intr_hw_enable(nxgep);
89
90 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset"));
91 return (status);
92 }
93
94 /* ARGSUSED */
95 void
nxge_hw_id_init(p_nxge_t nxgep)96 nxge_hw_id_init(p_nxge_t nxgep)
97 {
98 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init"));
99
100 /*
101 * Set up initial hardware parameters required such as mac mtu size.
102 */
103 nxgep->mac.is_jumbo = B_FALSE;
104
105 /*
106 * Set the maxframe size to 1522 (1518 + 4) to account for
107 * VLAN tagged packets.
108 */
109 nxgep->mac.minframesize = NXGE_MIN_MAC_FRAMESIZE; /* 64 */
110 nxgep->mac.maxframesize = NXGE_MAX_MAC_FRAMESIZE; /* 1522 */
111
112 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init: maxframesize %d",
113 nxgep->mac.maxframesize));
114 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init"));
115 }
116
117 /* ARGSUSED */
118 void
nxge_hw_init_niu_common(p_nxge_t nxgep)119 nxge_hw_init_niu_common(p_nxge_t nxgep)
120 {
121 p_nxge_hw_list_t hw_p;
122
123 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common"));
124
125 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
126 return;
127 }
128 MUTEX_ENTER(&hw_p->nxge_cfg_lock);
129 if (hw_p->flags & COMMON_INIT_DONE) {
130 NXGE_DEBUG_MSG((nxgep, MOD_CTL,
131 "nxge_hw_init_niu_common"
132 " already done for dip $%p function %d exiting",
133 hw_p->parent_devp, nxgep->function_num));
134 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
135 return;
136 }
137
138 hw_p->flags = COMMON_INIT_START;
139 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
140 " Started for device id %x with function %d",
141 hw_p->parent_devp, nxgep->function_num));
142
143 /* per neptune common block init */
144 (void) nxge_fflp_hw_reset(nxgep);
145
146 if (nxgep->niu_hw_type != NIU_HW_TYPE_RF) {
147 switch (nxge_rdc_buf_offset) {
148 case SW_OFFSET_NO_OFFSET:
149 case SW_OFFSET_64:
150 case SW_OFFSET_128:
151 break;
152 default:
153 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
154 "nxge_hw_init_niu_common: Unsupported RDC buffer"
155 " offset code %d, setting to %d",
156 nxge_rdc_buf_offset, SW_OFFSET_NO_OFFSET));
157 nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
158 break;
159 }
160 } else {
161 switch (nxge_rdc_buf_offset) {
162 case SW_OFFSET_NO_OFFSET:
163 case SW_OFFSET_64:
164 case SW_OFFSET_128:
165 case SW_OFFSET_192:
166 case SW_OFFSET_256:
167 case SW_OFFSET_320:
168 case SW_OFFSET_384:
169 case SW_OFFSET_448:
170 break;
171 default:
172 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
173 "nxge_hw_init_niu_common: Unsupported RDC buffer"
174 " offset code %d, setting to %d",
175 nxge_rdc_buf_offset, SW_OFFSET_NO_OFFSET));
176 nxge_rdc_buf_offset = SW_OFFSET_NO_OFFSET;
177 break;
178 }
179 }
180
181 hw_p->flags = COMMON_INIT_DONE;
182 MUTEX_EXIT(&hw_p->nxge_cfg_lock);
183
184 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common"
185 " Done for device id %x with function %d",
186 hw_p->parent_devp, nxgep->function_num));
187 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common"));
188 }
189
190 uint_t
nxge_intr(char * arg1,char * arg2)191 nxge_intr(char *arg1, char *arg2)
192 {
193 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
194 p_nxge_t nxgep = (p_nxge_t)arg2;
195 uint_t serviced = DDI_INTR_UNCLAIMED;
196 uint8_t ldv;
197 npi_handle_t handle;
198 p_nxge_ldgv_t ldgvp;
199 p_nxge_ldg_t ldgp, t_ldgp;
200 p_nxge_ldv_t t_ldvp;
201 uint64_t vector0 = 0, vector1 = 0, vector2 = 0;
202 int i, j, nldvs, nintrs = 1;
203 npi_status_t rs = NPI_SUCCESS;
204
205 VERIFY(ldvp != NULL);
206
207 /* DDI interface returns second arg as NULL (n2 niumx driver) !!! */
208 if (arg2 == NULL || (void *) ldvp->nxgep != arg2) {
209 nxgep = ldvp->nxgep;
210 }
211 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr"));
212
213 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
214 NXGE_ERROR_MSG((nxgep, INT_CTL,
215 "<== nxge_intr: not initialized 0x%x", serviced));
216 return (serviced);
217 }
218
219 ldgvp = nxgep->ldgvp;
220 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp));
221 if (ldvp == NULL && ldgvp) {
222 t_ldvp = ldvp = ldgvp->ldvp;
223 }
224 if (ldvp) {
225 ldgp = t_ldgp = ldvp->ldgp;
226 }
227 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
228 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
229 if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
230 NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: "
231 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
232 NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready"));
233 return (DDI_INTR_UNCLAIMED);
234 }
235 /*
236 * This interrupt handler will have to go through all the logical
237 * devices to find out which logical device interrupts us and then call
238 * its handler to process the events.
239 */
240 handle = NXGE_DEV_NPI_HANDLE(nxgep);
241 t_ldgp = ldgp;
242 t_ldvp = ldgp->ldvp;
243
244 nldvs = ldgp->nldvs;
245
246 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d",
247 nldvs, ldgvp->ldg_intrs));
248
249 serviced = DDI_INTR_CLAIMED;
250 for (i = 0; i < nintrs; i++, t_ldgp++) {
251 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d "
252 " #intrs %d", i, nldvs, nintrs));
253 /* Get this group's flag bits. */
254 rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg,
255 &vector0, &vector1, &vector2);
256 if (rs) {
257 continue;
258 }
259 if (!vector0 && !vector1 && !vector2) {
260 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
261 "no interrupts on group %d", t_ldgp->ldg));
262 continue;
263 }
264 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: "
265 "vector0 0x%llx vector1 0x%llx vector2 0x%llx",
266 vector0, vector1, vector2));
267 nldvs = t_ldgp->nldvs;
268 for (j = 0; j < nldvs; j++, t_ldvp++) {
269 /*
270 * Call device's handler if flag bits are on.
271 */
272 ldv = t_ldvp->ldv;
273 if (((ldv < NXGE_MAC_LD_START) &&
274 (LDV_ON(ldv, vector0) |
275 (LDV_ON(ldv, vector1)))) ||
276 (ldv >= NXGE_MAC_LD_START &&
277 ((LDV2_ON_1(ldv, vector2)) ||
278 (LDV2_ON_2(ldv, vector2))))) {
279 (void) (t_ldvp->ldv_intr_handler)(
280 (caddr_t)t_ldvp, arg2);
281 NXGE_DEBUG_MSG((nxgep, INT_CTL,
282 "==> nxge_intr: "
283 "calling device %d #ldvs %d #intrs %d",
284 j, nldvs, nintrs));
285 }
286 }
287 }
288
289 t_ldgp = ldgp;
290 for (i = 0; i < nintrs; i++, t_ldgp++) {
291 /* rearm group interrupts */
292 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm "
293 "group %d", t_ldgp->ldg));
294 (void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
295 t_ldgp->arm, t_ldgp->ldg_timer);
296 }
297
298 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x",
299 serviced));
300 return (serviced);
301 }
302
303
304 /*
305 * XFP Related Status Register Values Under 3 Different Conditions
306 *
307 * -------------+-------------------------+-------------------------
308 * | Intel XFP and Avago | Picolight XFP
309 * -------------+---------+---------------+---------+---------------
310 * | STATUS0 | TX_ALARM_STAT | STATUS0 | TX_ALARM_STAT
311 * -------------+---------+---------------+---------+---------------
312 * No XFP | 0x639C | 0x40 | 0x639C | 0x40
313 * -------------+---------+---------------+---------+---------------
314 * XFP,linkdown | 0x43BC | 0x40 | 0x639C | 0x40
315 * -------------+---------+---------------+---------+---------------
316 * XFP,linkup | 0x03FC | 0x0 | 0x03FC | 0x0
317 * -------------+---------+---------------+---------+---------------
318 * Note:
319 * STATUS0 = BCM8704_USER_ANALOG_STATUS0_REG
320 * TX_ALARM_STAT = BCM8704_USER_TX_ALARM_STATUS_REG
321 */
322 /* ARGSUSED */
323 static nxge_status_t
nxge_check_xaui_xfp(p_nxge_t nxgep)324 nxge_check_xaui_xfp(p_nxge_t nxgep)
325 {
326 nxge_status_t status = NXGE_OK;
327 uint8_t phy_port_addr;
328 uint16_t val;
329 uint16_t val1;
330 uint8_t portn;
331
332 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_check_xaui_xfp"));
333
334 portn = nxgep->mac.portnum;
335 phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn;
336
337 /*
338 * Keep the val1 code even though it is not used. Could be
339 * used to differenciate the "No XFP" case and "XFP,linkdown"
340 * case when a Intel XFP is used.
341 */
342 if ((status = nxge_mdio_read(nxgep, phy_port_addr,
343 BCM8704_USER_DEV3_ADDR,
344 BCM8704_USER_ANALOG_STATUS0_REG, &val)) == NXGE_OK) {
345 status = nxge_mdio_read(nxgep, phy_port_addr,
346 BCM8704_USER_DEV3_ADDR,
347 BCM8704_USER_TX_ALARM_STATUS_REG, &val1);
348 }
349
350 if (status != NXGE_OK) {
351 NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
352 NXGE_FM_EREPORT_XAUI_ERR);
353 if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities)) {
354 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
355 "XAUI is bad or absent on port<%d>\n", portn));
356 }
357 #ifdef NXGE_DEBUG
358 /*
359 * As a workaround for CR6693529, do not execute this block of
360 * code for non-debug driver. When a Picolight XFP transceiver
361 * is used, register BCM8704_USER_ANALOG_STATUS0_REG returns
362 * the same 0x639C value in normal link down case, which causes
363 * false FMA messages and link reconnection problem.
364 */
365 } else if (nxgep->mac.portmode == PORT_10G_FIBER) {
366 /*
367 * 0x03FC = 0000 0011 1111 1100 (XFP is normal)
368 * 0x639C = 0110 0011 1001 1100 (XFP has problem)
369 * bit14 = 1: PDM loss-of-light indicator
370 * bit13 = 1: PDM Rx loss-of-signal
371 * bit6 = 0: Light is NOT ok
372 * bit5 = 0: PMD Rx signal is NOT ok
373 */
374 if (val == 0x639C) {
375 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
376 NXGE_FM_EREPORT_XFP_ERR);
377 if (DDI_FM_EREPORT_CAP(nxgep->fm_capabilities)) {
378 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
379 "XFP is bad or absent on port<%d>\n",
380 portn));
381 }
382 status = NXGE_ERROR;
383 }
384 #endif
385 }
386 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_check_xaui_xfp"));
387 return (status);
388 }
389
390
391 /* ARGSUSED */
392 uint_t
nxge_syserr_intr(void * arg1,void * arg2)393 nxge_syserr_intr(void *arg1, void *arg2)
394 {
395 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
396 p_nxge_t nxgep = (p_nxge_t)arg2;
397 p_nxge_ldg_t ldgp = NULL;
398 npi_handle_t handle;
399 sys_err_stat_t estat;
400 uint_t serviced = DDI_INTR_UNCLAIMED;
401
402 if (arg1 == NULL && arg2 == NULL) {
403 return (serviced);
404 }
405 if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) {
406 if (ldvp != NULL) {
407 nxgep = ldvp->nxgep;
408 }
409 }
410 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
411 "==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp));
412 if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
413 ldgp = ldvp->ldgp;
414 if (ldgp == NULL) {
415 NXGE_ERROR_MSG((nxgep, SYSERR_CTL,
416 "<== nxge_syserrintr(no logical group): "
417 "arg2 $%p arg1 $%p", nxgep, ldvp));
418 return (DDI_INTR_UNCLAIMED);
419 }
420 /*
421 * Get the logical device state if the function uses interrupt.
422 */
423 }
424
425 /* This interrupt handler is for system error interrupts. */
426 handle = NXGE_DEV_NPI_HANDLE(nxgep);
427 estat.value = 0;
428 (void) npi_fzc_sys_err_stat_get(handle, &estat);
429 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL,
430 "==> nxge_syserr_intr: device error 0x%016llx", estat.value));
431
432 if (estat.bits.ldw.smx) {
433 /* SMX */
434 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
435 "==> nxge_syserr_intr: device error - SMX"));
436 } else if (estat.bits.ldw.mac) {
437 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
438 "==> nxge_syserr_intr: device error - MAC"));
439 /*
440 * There is nothing to be done here. All MAC errors go to per
441 * MAC port interrupt. MIF interrupt is the only MAC sub-block
442 * that can generate status here. MIF status reported will be
443 * ignored here. It is checked by per port timer instead.
444 */
445 } else if (estat.bits.ldw.ipp) {
446 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
447 "==> nxge_syserr_intr: device error - IPP"));
448 (void) nxge_ipp_handle_sys_errors(nxgep);
449 } else if (estat.bits.ldw.zcp) {
450 /* ZCP */
451 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
452 "==> nxge_syserr_intr: device error - ZCP"));
453 (void) nxge_zcp_handle_sys_errors(nxgep);
454 } else if (estat.bits.ldw.tdmc) {
455 /* TDMC */
456 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
457 "==> nxge_syserr_intr: device error - TDMC"));
458 /*
459 * There is no TDMC system errors defined in the PRM. All TDMC
460 * channel specific errors are reported on a per channel basis.
461 */
462 } else if (estat.bits.ldw.rdmc) {
463 /* RDMC */
464 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
465 "==> nxge_syserr_intr: device error - RDMC"));
466 (void) nxge_rxdma_handle_sys_errors(nxgep);
467 } else if (estat.bits.ldw.txc) {
468 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
469 "==> nxge_syserr_intr: device error - TXC"));
470 (void) nxge_txc_handle_sys_errors(nxgep);
471 } else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) {
472 /* PCI-E */
473 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
474 "==> nxge_syserr_intr: device error - PCI-E"));
475 } else if (estat.bits.ldw.meta1) {
476 /* META1 */
477 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
478 "==> nxge_syserr_intr: device error - META1"));
479 } else if (estat.bits.ldw.meta2) {
480 /* META2 */
481 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
482 "==> nxge_syserr_intr: device error - META2"));
483 } else if (estat.bits.ldw.fflp) {
484 /* FFLP */
485 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
486 "==> nxge_syserr_intr: device error - FFLP"));
487 (void) nxge_fflp_handle_sys_errors(nxgep);
488 }
489
490 /*
491 * nxge_check_xaui_xfg checks XAUI for all of the following
492 * portmodes, but checks XFP only if portmode == PORT_10G_FIBER.
493 */
494 if (nxgep->mac.portmode == PORT_10G_FIBER ||
495 nxgep->mac.portmode == PORT_10G_COPPER ||
496 nxgep->mac.portmode == PORT_10G_TN1010 ||
497 nxgep->mac.portmode == PORT_1G_TN1010) {
498 if (nxge_check_xaui_xfp(nxgep) != NXGE_OK) {
499 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
500 "==> nxge_syserr_intr: device error - XAUI"));
501 }
502 }
503
504 serviced = DDI_INTR_CLAIMED;
505
506 if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 &&
507 !ldvp->use_timer) {
508 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
509 B_TRUE, ldgp->ldg_timer);
510 }
511 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr"));
512 return (serviced);
513 }
514
515 /* ARGSUSED */
516 void
nxge_intr_hw_enable(p_nxge_t nxgep)517 nxge_intr_hw_enable(p_nxge_t nxgep)
518 {
519 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable"));
520 (void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE);
521 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable"));
522 }
523
524 /* ARGSUSED */
525 void
nxge_intr_hw_disable(p_nxge_t nxgep)526 nxge_intr_hw_disable(p_nxge_t nxgep)
527 {
528 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable"));
529 (void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE);
530 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable"));
531 }
532
533 /* ARGSUSED */
534 void
nxge_rx_hw_blank(void * arg,time_t ticks,uint_t count)535 nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
536 {
537 p_nxge_t nxgep = (p_nxge_t)arg;
538 uint8_t channel;
539 npi_handle_t handle;
540 p_nxge_ldgv_t ldgvp;
541 p_nxge_ldv_t ldvp;
542 int i;
543
544 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank"));
545 handle = NXGE_DEV_NPI_HANDLE(nxgep);
546
547 if ((ldgvp = nxgep->ldgvp) == NULL) {
548 NXGE_ERROR_MSG((nxgep, INT_CTL,
549 "<== nxge_rx_hw_blank (not enabled)"));
550 return;
551 }
552 ldvp = nxgep->ldgvp->ldvp;
553 if (ldvp == NULL) {
554 return;
555 }
556 for (i = 0; i < ldgvp->nldvs; i++, ldvp++) {
557 if (ldvp->is_rxdma) {
558 channel = ldvp->channel;
559 (void) npi_rxdma_cfg_rdc_rcr_threshold(handle,
560 channel, count);
561 (void) npi_rxdma_cfg_rdc_rcr_timeout(handle,
562 channel, ticks);
563 }
564 }
565
566 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank"));
567 }
568
569 /* ARGSUSED */
570 void
nxge_hw_stop(p_nxge_t nxgep)571 nxge_hw_stop(p_nxge_t nxgep)
572 {
573 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop"));
574
575 (void) nxge_tx_mac_disable(nxgep);
576 (void) nxge_rx_mac_disable(nxgep);
577 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
578 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
579
580 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop"));
581 }
582
583 /* ARGSUSED */
584 void
nxge_hw_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)585 nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
586 {
587 int cmd;
588
589 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl"));
590
591 if (nxgep == NULL) {
592 miocnak(wq, mp, 0, EINVAL);
593 return;
594 }
595 iocp->ioc_error = 0;
596 cmd = iocp->ioc_cmd;
597
598 switch (cmd) {
599 default:
600 miocnak(wq, mp, 0, EINVAL);
601 return;
602
603 case NXGE_GET_MII:
604 nxge_get_mii(nxgep, mp->b_cont);
605 miocack(wq, mp, sizeof (uint16_t), 0);
606 break;
607
608 case NXGE_PUT_MII:
609 nxge_put_mii(nxgep, mp->b_cont);
610 miocack(wq, mp, 0, 0);
611 break;
612
613 case NXGE_GET64:
614 nxge_get64(nxgep, mp->b_cont);
615 miocack(wq, mp, sizeof (uint32_t), 0);
616 break;
617
618 case NXGE_PUT64:
619 nxge_put64(nxgep, mp->b_cont);
620 miocack(wq, mp, 0, 0);
621 break;
622
623 case NXGE_PUT_TCAM:
624 nxge_put_tcam(nxgep, mp->b_cont);
625 miocack(wq, mp, 0, 0);
626 break;
627
628 case NXGE_GET_TCAM:
629 nxge_get_tcam(nxgep, mp->b_cont);
630 miocack(wq, mp, 0, 0);
631 break;
632
633 case NXGE_TX_REGS_DUMP:
634 nxge_txdma_regs_dump_channels(nxgep);
635 miocack(wq, mp, 0, 0);
636 break;
637 case NXGE_RX_REGS_DUMP:
638 nxge_rxdma_regs_dump_channels(nxgep);
639 miocack(wq, mp, 0, 0);
640 break;
641 case NXGE_VIR_INT_REGS_DUMP:
642 case NXGE_INT_REGS_DUMP:
643 nxge_virint_regs_dump(nxgep);
644 miocack(wq, mp, 0, 0);
645 break;
646 case NXGE_RTRACE:
647 nxge_rtrace_ioctl(nxgep, wq, mp, iocp);
648 break;
649 }
650 }
651
652 /* ARGSUSED */
653 void
nxge_loopback_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)654 nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
655 struct iocblk *iocp)
656 {
657 p_lb_property_t lb_props;
658
659 size_t size;
660 int i;
661
662 if (mp->b_cont == NULL) {
663 miocnak(wq, mp, 0, EINVAL);
664 }
665 switch (iocp->ioc_cmd) {
666 case LB_GET_MODE:
667 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command"));
668 if (nxgep != NULL) {
669 *(lb_info_sz_t *)mp->b_cont->b_rptr =
670 nxgep->statsp->port_stats.lb_mode;
671 miocack(wq, mp, sizeof (nxge_lb_t), 0);
672 } else {
673 miocnak(wq, mp, 0, EINVAL);
674 }
675 break;
676 case LB_SET_MODE:
677 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command"));
678 if (iocp->ioc_count != sizeof (uint32_t)) {
679 miocack(wq, mp, 0, 0);
680 break;
681 }
682 if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) {
683 miocack(wq, mp, 0, 0);
684 } else {
685 miocnak(wq, mp, 0, EPROTO);
686 }
687 break;
688 case LB_GET_INFO_SIZE:
689 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
690 if (nxgep != NULL) {
691 size = sizeof (lb_normal);
692 if (nxgep->statsp->mac_stats.cap_10gfdx) {
693 /* TN1010 does not support external loopback */
694 if (nxgep->mac.portmode != PORT_1G_TN1010 &&
695 nxgep->mac.portmode != PORT_10G_TN1010) {
696 size += sizeof (lb_external10g);
697 }
698 size += sizeof (lb_mac10g);
699 /* Publish PHY loopback if PHY is present */
700 if (nxgep->mac.portmode == PORT_10G_COPPER ||
701 nxgep->mac.portmode == PORT_10G_TN1010 ||
702 nxgep->mac.portmode == PORT_10G_FIBER)
703 size += sizeof (lb_phy10g);
704 }
705
706 /*
707 * Even if cap_10gfdx is false, we still do 10G
708 * serdes loopback as a part of SunVTS xnetlbtest
709 * internal loopback test.
710 */
711 if (nxgep->mac.portmode == PORT_10G_FIBER ||
712 nxgep->mac.portmode == PORT_10G_COPPER ||
713 nxgep->mac.portmode == PORT_10G_TN1010 ||
714 nxgep->mac.portmode == PORT_10G_SERDES)
715 size += sizeof (lb_serdes10g);
716
717 if (nxgep->statsp->mac_stats.cap_1000fdx) {
718 /* TN1010 does not support external loopback */
719 if (nxgep->mac.portmode != PORT_1G_TN1010 &&
720 nxgep->mac.portmode != PORT_10G_TN1010) {
721 size += sizeof (lb_external1000);
722 }
723 size += sizeof (lb_mac1000);
724 if (nxgep->mac.portmode == PORT_1G_COPPER ||
725 nxgep->mac.portmode == PORT_1G_TN1010 ||
726 nxgep->mac.portmode ==
727 PORT_1G_RGMII_FIBER)
728 size += sizeof (lb_phy1000);
729 }
730 if (nxgep->statsp->mac_stats.cap_100fdx)
731 size += sizeof (lb_external100);
732 if (nxgep->statsp->mac_stats.cap_10fdx)
733 size += sizeof (lb_external10);
734 if (nxgep->mac.portmode == PORT_1G_FIBER ||
735 nxgep->mac.portmode == PORT_1G_TN1010 ||
736 nxgep->mac.portmode == PORT_1G_SERDES)
737 size += sizeof (lb_serdes1000);
738
739 *(lb_info_sz_t *)mp->b_cont->b_rptr = size;
740
741 NXGE_DEBUG_MSG((nxgep, IOC_CTL,
742 "NXGE_GET_LB_INFO command: size %d", size));
743 miocack(wq, mp, sizeof (lb_info_sz_t), 0);
744 } else
745 miocnak(wq, mp, 0, EINVAL);
746 break;
747
748 case LB_GET_INFO:
749 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command"));
750 if (nxgep != NULL) {
751 size = sizeof (lb_normal);
752 if (nxgep->statsp->mac_stats.cap_10gfdx) {
753 /* TN1010 does not support external loopback */
754 if (nxgep->mac.portmode != PORT_1G_TN1010 &&
755 nxgep->mac.portmode != PORT_10G_TN1010) {
756 size += sizeof (lb_external10g);
757 }
758 size += sizeof (lb_mac10g);
759 /* Publish PHY loopback if PHY is present */
760 if (nxgep->mac.portmode == PORT_10G_COPPER ||
761 nxgep->mac.portmode == PORT_10G_TN1010 ||
762 nxgep->mac.portmode == PORT_10G_FIBER)
763 size += sizeof (lb_phy10g);
764 }
765 if (nxgep->mac.portmode == PORT_10G_FIBER ||
766 nxgep->mac.portmode == PORT_10G_COPPER ||
767 nxgep->mac.portmode == PORT_10G_TN1010 ||
768 nxgep->mac.portmode == PORT_10G_SERDES)
769 size += sizeof (lb_serdes10g);
770
771 if (nxgep->statsp->mac_stats.cap_1000fdx) {
772 /* TN1010 does not support external loopback */
773 if (nxgep->mac.portmode != PORT_1G_TN1010 &&
774 nxgep->mac.portmode != PORT_10G_TN1010) {
775 size += sizeof (lb_external1000);
776 }
777 size += sizeof (lb_mac1000);
778 if (nxgep->mac.portmode == PORT_1G_COPPER ||
779 nxgep->mac.portmode == PORT_1G_TN1010 ||
780 nxgep->mac.portmode ==
781 PORT_1G_RGMII_FIBER)
782 size += sizeof (lb_phy1000);
783 }
784 if (nxgep->statsp->mac_stats.cap_100fdx)
785 size += sizeof (lb_external100);
786
787 if (nxgep->statsp->mac_stats.cap_10fdx)
788 size += sizeof (lb_external10);
789
790 if (nxgep->mac.portmode == PORT_1G_FIBER ||
791 nxgep->mac.portmode == PORT_1G_TN1010 ||
792 nxgep->mac.portmode == PORT_1G_SERDES)
793 size += sizeof (lb_serdes1000);
794
795 NXGE_DEBUG_MSG((nxgep, IOC_CTL,
796 "NXGE_GET_LB_INFO command: size %d", size));
797 if (size == iocp->ioc_count) {
798 i = 0;
799 lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
800 lb_props[i++] = lb_normal;
801
802 if (nxgep->statsp->mac_stats.cap_10gfdx) {
803 lb_props[i++] = lb_mac10g;
804 if (nxgep->mac.portmode ==
805 PORT_10G_COPPER ||
806 nxgep->mac.portmode ==
807 PORT_10G_TN1010 ||
808 nxgep->mac.portmode ==
809 PORT_10G_FIBER) {
810 lb_props[i++] = lb_phy10g;
811 }
812 /* TN1010 does not support ext lb */
813 if (nxgep->mac.portmode !=
814 PORT_10G_TN1010 &&
815 nxgep->mac.portmode !=
816 PORT_1G_TN1010) {
817 lb_props[i++] = lb_external10g;
818 }
819 }
820
821 if (nxgep->mac.portmode == PORT_10G_FIBER ||
822 nxgep->mac.portmode == PORT_10G_COPPER ||
823 nxgep->mac.portmode == PORT_10G_TN1010 ||
824 nxgep->mac.portmode == PORT_10G_SERDES)
825 lb_props[i++] = lb_serdes10g;
826
827 if (nxgep->statsp->mac_stats.cap_1000fdx) {
828 /* TN1010 does not support ext lb */
829 if (nxgep->mac.portmode !=
830 PORT_10G_TN1010 &&
831 nxgep->mac.portmode !=
832 PORT_1G_TN1010) {
833 lb_props[i++] = lb_external1000;
834 }
835 }
836
837 if (nxgep->statsp->mac_stats.cap_100fdx)
838 lb_props[i++] = lb_external100;
839
840 if (nxgep->statsp->mac_stats.cap_10fdx)
841 lb_props[i++] = lb_external10;
842
843 if (nxgep->statsp->mac_stats.cap_1000fdx)
844 lb_props[i++] = lb_mac1000;
845
846 if (nxgep->mac.portmode == PORT_1G_COPPER ||
847 nxgep->mac.portmode == PORT_1G_TN1010 ||
848 nxgep->mac.portmode ==
849 PORT_1G_RGMII_FIBER) {
850 if (nxgep->statsp->mac_stats.
851 cap_1000fdx)
852 lb_props[i++] = lb_phy1000;
853 } else if (nxgep->mac.portmode ==
854 PORT_1G_FIBER ||
855 nxgep->mac.portmode == PORT_1G_TN1010 ||
856 nxgep->mac.portmode == PORT_1G_SERDES) {
857 lb_props[i++] = lb_serdes1000;
858 }
859 miocack(wq, mp, size, 0);
860 } else
861 miocnak(wq, mp, 0, EINVAL);
862 } else {
863 miocnak(wq, mp, 0, EINVAL);
864 cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x",
865 iocp->ioc_cmd);
866 }
867 break;
868 }
869 }
870
871 /*
872 * DMA channel interfaces to access various channel specific
873 * hardware functions.
874 */
875 /* ARGSUSED */
876 void
nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle,void * reg_addrp,uint32_t reg_base,uint16_t channel,uint64_t reg_data)877 nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp,
878 uint32_t reg_base, uint16_t channel, uint64_t reg_data)
879 {
880 uint64_t reg_offset;
881
882 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
883
884 /*
885 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
886 * use the virtual DMA CSR address space from the config space (in PCI
887 * case), then the following code need to be use different offset
888 * computation macro.
889 */
890 reg_offset = reg_base + DMC_OFFSET(channel);
891 NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data);
892
893 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64"));
894 }
895
896 /* ARGSUSED */
897 uint64_t
nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle,void * reg_addrp,uint32_t reg_base,uint16_t channel)898 nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp,
899 uint32_t reg_base, uint16_t channel)
900 {
901 uint64_t reg_offset;
902
903 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
904
905 /*
906 * Channel is assumed to be from 0 to the maximum DMA channel #. If we
907 * use the virtual DMA CSR address space from the config space (in PCI
908 * case), then the following code need to be use different offset
909 * computation macro.
910 */
911 reg_offset = reg_base + DMC_OFFSET(channel);
912
913 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64"));
914
915 return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset));
916 }
917
918 /* ARGSUSED */
919 void
nxge_get32(p_nxge_t nxgep,p_mblk_t mp)920 nxge_get32(p_nxge_t nxgep, p_mblk_t mp)
921 {
922 nxge_os_acc_handle_t nxge_regh;
923
924 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
925 nxge_regh = nxgep->dev_regs->nxge_regh;
926
927 *(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh,
928 nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr);
929
930 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X",
931 *(uint32_t *)mp->b_rptr));
932 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32"));
933 }
934
935 /* ARGSUSED */
936 void
nxge_put32(p_nxge_t nxgep,p_mblk_t mp)937 nxge_put32(p_nxge_t nxgep, p_mblk_t mp)
938 {
939 nxge_os_acc_handle_t nxge_regh;
940 uint32_t *buf;
941 uint8_t *reg;
942
943 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
944 nxge_regh = nxgep->dev_regs->nxge_regh;
945
946 buf = (uint32_t *)mp->b_rptr;
947 reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0];
948 NXGE_DEBUG_MSG((nxgep, IOC_CTL,
949 "reg = 0x%016llX index = 0x%08X value = 0x%08X",
950 reg, buf[0], buf[1]));
951 NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]);
952 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32"));
953 }
954
955 /*ARGSUSED*/
956 boolean_t
nxge_set_lb(p_nxge_t nxgep,queue_t * wq,p_mblk_t mp)957 nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp)
958 {
959 boolean_t status = B_TRUE;
960 uint32_t lb_mode;
961 lb_property_t *lb_info;
962
963 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb"));
964 lb_mode = nxgep->statsp->port_stats.lb_mode;
965 if (lb_mode == *(uint32_t *)mp->b_rptr) {
966 cmn_err(CE_NOTE,
967 "!nxge%d: Loopback mode already set (lb_mode %d).\n",
968 nxgep->instance, lb_mode);
969 status = B_FALSE;
970 goto nxge_set_lb_exit;
971 }
972 lb_mode = *(uint32_t *)mp->b_rptr;
973 lb_info = NULL;
974 if (lb_mode == lb_normal.value)
975 lb_info = &lb_normal;
976 else if ((lb_mode == lb_external10g.value) &&
977 (nxgep->statsp->mac_stats.cap_10gfdx))
978 lb_info = &lb_external10g;
979 else if ((lb_mode == lb_external1000.value) &&
980 (nxgep->statsp->mac_stats.cap_1000fdx))
981 lb_info = &lb_external1000;
982 else if ((lb_mode == lb_external100.value) &&
983 (nxgep->statsp->mac_stats.cap_100fdx))
984 lb_info = &lb_external100;
985 else if ((lb_mode == lb_external10.value) &&
986 (nxgep->statsp->mac_stats.cap_10fdx))
987 lb_info = &lb_external10;
988 else if ((lb_mode == lb_phy10g.value) &&
989 (nxgep->mac.portmode == PORT_10G_COPPER ||
990 nxgep->mac.portmode == PORT_10G_TN1010 ||
991 nxgep->mac.portmode == PORT_10G_FIBER))
992 lb_info = &lb_phy10g;
993 else if ((lb_mode == lb_phy1000.value) &&
994 (nxgep->mac.portmode == PORT_1G_COPPER ||
995 nxgep->mac.portmode == PORT_1G_TN1010 ||
996 nxgep->mac.portmode == PORT_1G_RGMII_FIBER))
997 lb_info = &lb_phy1000;
998 else if ((lb_mode == lb_phy.value) &&
999 (nxgep->mac.portmode == PORT_1G_COPPER))
1000 lb_info = &lb_phy;
1001 else if ((lb_mode == lb_serdes10g.value) &&
1002 (nxgep->mac.portmode == PORT_10G_FIBER ||
1003 nxgep->mac.portmode == PORT_10G_COPPER ||
1004 nxgep->mac.portmode == PORT_10G_TN1010 ||
1005 nxgep->mac.portmode == PORT_10G_SERDES))
1006 lb_info = &lb_serdes10g;
1007 else if ((lb_mode == lb_serdes1000.value) &&
1008 (nxgep->mac.portmode == PORT_1G_FIBER ||
1009 nxgep->mac.portmode == PORT_1G_TN1010 ||
1010 nxgep->mac.portmode == PORT_1G_SERDES))
1011 lb_info = &lb_serdes1000;
1012 else if (lb_mode == lb_mac10g.value)
1013 lb_info = &lb_mac10g;
1014 else if (lb_mode == lb_mac1000.value)
1015 lb_info = &lb_mac1000;
1016 else if (lb_mode == lb_mac.value)
1017 lb_info = &lb_mac;
1018 else {
1019 cmn_err(CE_NOTE,
1020 "!nxge%d: Loopback mode not supported(mode %d).\n",
1021 nxgep->instance, lb_mode);
1022 status = B_FALSE;
1023 goto nxge_set_lb_exit;
1024 }
1025
1026 if (lb_mode == nxge_lb_normal) {
1027 if (nxge_lb_dbg) {
1028 cmn_err(CE_NOTE,
1029 "!nxge%d: Returning to normal operation",
1030 nxgep->instance);
1031 }
1032 if (nxge_set_lb_normal(nxgep) != NXGE_OK) {
1033 status = B_FALSE;
1034 cmn_err(CE_NOTE,
1035 "!nxge%d: Failed to return to normal operation",
1036 nxgep->instance);
1037 }
1038 goto nxge_set_lb_exit;
1039 }
1040 nxgep->statsp->port_stats.lb_mode = lb_mode;
1041
1042 if (nxge_lb_dbg)
1043 cmn_err(CE_NOTE,
1044 "!nxge%d: Adapter now in %s loopback mode",
1045 nxgep->instance, lb_info->key);
1046 nxgep->param_arr[param_autoneg].value = 0;
1047 nxgep->param_arr[param_anar_10gfdx].value =
1048 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
1049 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
1050 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
1051 (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g);
1052 nxgep->param_arr[param_anar_10ghdx].value = 0;
1053 nxgep->param_arr[param_anar_1000fdx].value =
1054 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
1055 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) ||
1056 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
1057 (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000);
1058 nxgep->param_arr[param_anar_1000hdx].value = 0;
1059 nxgep->param_arr[param_anar_100fdx].value =
1060 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) ||
1061 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
1062 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100);
1063 nxgep->param_arr[param_anar_100hdx].value = 0;
1064 nxgep->param_arr[param_anar_10fdx].value =
1065 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) ||
1066 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10);
1067 if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) {
1068 nxgep->param_arr[param_master_cfg_enable].value = 1;
1069 nxgep->param_arr[param_master_cfg_value].value = 1;
1070 }
1071 if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) ||
1072 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) ||
1073 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) ||
1074 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) ||
1075 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) ||
1076 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) ||
1077 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) {
1078
1079 if (nxge_link_monitor(nxgep, LINK_MONITOR_STOP) != NXGE_OK)
1080 goto nxge_set_lb_err;
1081 if (nxge_xcvr_find(nxgep) != NXGE_OK)
1082 goto nxge_set_lb_err;
1083 if (nxge_link_init(nxgep) != NXGE_OK)
1084 goto nxge_set_lb_err;
1085 if (nxge_link_monitor(nxgep, LINK_MONITOR_START) != NXGE_OK)
1086 goto nxge_set_lb_err;
1087 }
1088 if (lb_info->lb_type == internal) {
1089 if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) ||
1090 (nxgep->statsp->port_stats.lb_mode ==
1091 nxge_lb_phy10g) ||
1092 (nxgep->statsp->port_stats.lb_mode ==
1093 nxge_lb_serdes10g)) {
1094 nxgep->statsp->mac_stats.link_speed = 10000;
1095 } else if ((nxgep->statsp->port_stats.lb_mode
1096 == nxge_lb_mac1000) ||
1097 (nxgep->statsp->port_stats.lb_mode ==
1098 nxge_lb_phy1000) ||
1099 (nxgep->statsp->port_stats.lb_mode ==
1100 nxge_lb_serdes1000)) {
1101 nxgep->statsp->mac_stats.link_speed = 1000;
1102 } else {
1103 nxgep->statsp->mac_stats.link_speed = 100;
1104 }
1105 nxgep->statsp->mac_stats.link_duplex = 2;
1106 nxgep->statsp->mac_stats.link_up = 1;
1107 }
1108 if (nxge_global_reset(nxgep) != NXGE_OK)
1109 goto nxge_set_lb_err;
1110
1111 nxge_set_lb_exit:
1112 NXGE_DEBUG_MSG((nxgep, DDI_CTL,
1113 "<== nxge_set_lb status = 0x%08x", status));
1114 return (status);
1115 nxge_set_lb_err:
1116 status = B_FALSE;
1117 cmn_err(CE_NOTE,
1118 "!nxge%d: Failed to put adapter in %s loopback mode",
1119 nxgep->instance, lb_info->key);
1120 return (status);
1121 }
1122
1123 /* Return to normal (no loopback) mode */
1124 /* ARGSUSED */
1125 nxge_status_t
nxge_set_lb_normal(p_nxge_t nxgep)1126 nxge_set_lb_normal(p_nxge_t nxgep)
1127 {
1128 nxge_status_t status = NXGE_OK;
1129
1130 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal"));
1131
1132 nxgep->statsp->port_stats.lb_mode = nxge_lb_normal;
1133 nxgep->param_arr[param_autoneg].value =
1134 nxgep->param_arr[param_autoneg].old_value;
1135 nxgep->param_arr[param_anar_1000fdx].value =
1136 nxgep->param_arr[param_anar_1000fdx].old_value;
1137 nxgep->param_arr[param_anar_1000hdx].value =
1138 nxgep->param_arr[param_anar_1000hdx].old_value;
1139 nxgep->param_arr[param_anar_100fdx].value =
1140 nxgep->param_arr[param_anar_100fdx].old_value;
1141 nxgep->param_arr[param_anar_100hdx].value =
1142 nxgep->param_arr[param_anar_100hdx].old_value;
1143 nxgep->param_arr[param_anar_10fdx].value =
1144 nxgep->param_arr[param_anar_10fdx].old_value;
1145 nxgep->param_arr[param_master_cfg_enable].value =
1146 nxgep->param_arr[param_master_cfg_enable].old_value;
1147 nxgep->param_arr[param_master_cfg_value].value =
1148 nxgep->param_arr[param_master_cfg_value].old_value;
1149
1150 if ((status = nxge_global_reset(nxgep)) != NXGE_OK)
1151 return (status);
1152
1153 if ((status = nxge_link_monitor(nxgep, LINK_MONITOR_STOP)) != NXGE_OK)
1154 return (status);
1155 if ((status = nxge_xcvr_find(nxgep)) != NXGE_OK)
1156 return (status);
1157 if ((status = nxge_link_init(nxgep)) != NXGE_OK)
1158 return (status);
1159 status = nxge_link_monitor(nxgep, LINK_MONITOR_START);
1160
1161 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal"));
1162
1163 return (status);
1164 }
1165
1166 /* ARGSUSED */
1167 void
nxge_get_mii(p_nxge_t nxgep,p_mblk_t mp)1168 nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp)
1169 {
1170 uint16_t reg;
1171
1172 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii"));
1173
1174 reg = *(uint16_t *)mp->b_rptr;
1175 (void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg,
1176 (uint16_t *)mp->b_rptr);
1177 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X",
1178 reg, *(uint16_t *)mp->b_rptr));
1179 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii"));
1180 }
1181
1182 /* ARGSUSED */
1183 void
nxge_put_mii(p_nxge_t nxgep,p_mblk_t mp)1184 nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp)
1185 {
1186 uint16_t *buf;
1187 uint8_t reg;
1188
1189 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii"));
1190 buf = (uint16_t *)mp->b_rptr;
1191 reg = (uint8_t)buf[0];
1192 NXGE_DEBUG_MSG((nxgep, IOC_CTL,
1193 "reg = 0x%08X index = 0x%08X value = 0x%08X",
1194 reg, buf[0], buf[1]));
1195 (void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn,
1196 reg, buf[1]);
1197 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii"));
1198 }
1199
1200 /* ARGSUSED */
1201 void
nxge_check_hw_state(p_nxge_t nxgep)1202 nxge_check_hw_state(p_nxge_t nxgep)
1203 {
1204 p_nxge_ldgv_t ldgvp;
1205 p_nxge_ldv_t t_ldvp;
1206
1207 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state"));
1208
1209 MUTEX_ENTER(nxgep->genlock);
1210 nxgep->nxge_timerid = 0;
1211 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1212 goto nxge_check_hw_state_exit;
1213 }
1214 nxge_check_tx_hang(nxgep);
1215
1216 ldgvp = nxgep->ldgvp;
1217 if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
1218 NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
1219 "NULL ldgvp (interrupt not ready)."));
1220 goto nxge_check_hw_state_exit;
1221 }
1222 t_ldvp = ldgvp->ldvp_syserr;
1223 if (!t_ldvp->use_timer) {
1224 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: "
1225 "ldgvp $%p t_ldvp $%p use_timer flag %d",
1226 ldgvp, t_ldvp, t_ldvp->use_timer));
1227 goto nxge_check_hw_state_exit;
1228 }
1229 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
1230 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1231 "port%d Bad register acc handle", nxgep->mac.portnum));
1232 }
1233 (void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep);
1234
1235 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
1236 NXGE_CHECK_TIMER);
1237
1238 nxge_check_hw_state_exit:
1239 MUTEX_EXIT(nxgep->genlock);
1240 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state"));
1241 }
1242
1243 /*ARGSUSED*/
1244 static void
nxge_rtrace_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)1245 nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp,
1246 struct iocblk *iocp)
1247 {
1248 ssize_t size;
1249 rtrace_t *rtp;
1250 mblk_t *nmp;
1251 uint32_t i, j;
1252 uint32_t start_blk;
1253 uint32_t base_entry;
1254 uint32_t num_entries;
1255
1256 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl"));
1257
1258 size = 1024;
1259 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
1260 NXGE_DEBUG_MSG((nxgep, STR_CTL,
1261 "malformed M_IOCTL MBLKL = %d size = %d",
1262 MBLKL(mp->b_cont), size));
1263 miocnak(wq, mp, 0, EINVAL);
1264 return;
1265 }
1266 nmp = mp->b_cont;
1267 rtp = (rtrace_t *)nmp->b_rptr;
1268 start_blk = rtp->next_idx;
1269 num_entries = rtp->last_idx;
1270 base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
1271
1272 NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk));
1273 NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries));
1274 NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry));
1275
1276 rtp->next_idx = npi_rtracebuf.next_idx;
1277 rtp->last_idx = npi_rtracebuf.last_idx;
1278 rtp->wrapped = npi_rtracebuf.wrapped;
1279 for (i = 0, j = base_entry; i < num_entries; i++, j++) {
1280 rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr;
1281 rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32;
1282 rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32;
1283 }
1284
1285 nmp->b_wptr = nmp->b_rptr + size;
1286 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl"));
1287 miocack(wq, mp, (int)size, 0);
1288 }
1289