1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <hxge_impl.h>
27
28 lb_property_t lb_normal = {normal, "normal", hxge_lb_normal};
29 lb_property_t lb_mac10g = {internal, "mac10g", hxge_lb_mac10g};
30
31 uint32_t hxge_lb_dbg = 1;
32
33 extern uint32_t hxge_jumbo_frame_size;
34
35 static void hxge_rtrace_ioctl(p_hxge_t, queue_t *, mblk_t *, struct iocblk *);
36
37 void
hxge_global_reset(p_hxge_t hxgep)38 hxge_global_reset(p_hxge_t hxgep)
39 {
40 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_global_reset"));
41
42 (void) hxge_intr_hw_disable(hxgep);
43
44 if (hxgep->suspended)
45 (void) hxge_link_init(hxgep);
46
47 (void) hxge_vmac_init(hxgep);
48
49 (void) hxge_intr_hw_enable(hxgep);
50
51 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_global_reset"));
52 }
53
54
55 void
hxge_hw_id_init(p_hxge_t hxgep)56 hxge_hw_id_init(p_hxge_t hxgep)
57 {
58 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init"));
59
60 /*
61 * Initialize the frame size to either standard "1500 + 38" or
62 * jumbo. The user may tune the frame size through the "mtu" parameter
63 * using "dladm set-linkprop"
64 */
65 hxgep->vmac.minframesize = MIN_FRAME_SIZE;
66 hxgep->vmac.maxframesize = HXGE_DEFAULT_MTU + MTU_TO_FRAME_SIZE;
67 if (hxgep->param_arr[param_accept_jumbo].value)
68 hxgep->vmac.maxframesize = (uint16_t)hxge_jumbo_frame_size;
69
70 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init: maxframesize %d",
71 hxgep->vmac.maxframesize));
72 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_id_init"));
73 }
74
75 void
hxge_hw_init_niu_common(p_hxge_t hxgep)76 hxge_hw_init_niu_common(p_hxge_t hxgep)
77 {
78 p_hxge_hw_list_t hw_p;
79
80 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common"));
81
82 if ((hw_p = hxgep->hxge_hw_p) == NULL) {
83 return;
84 }
85
86 MUTEX_ENTER(&hw_p->hxge_cfg_lock);
87 if (hw_p->flags & COMMON_INIT_DONE) {
88 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common"
89 " already done for dip $%p exiting", hw_p->parent_devp));
90 MUTEX_EXIT(&hw_p->hxge_cfg_lock);
91 return;
92 }
93
94 hw_p->flags = COMMON_INIT_START;
95 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
96 "hxge_hw_init_niu_common Started for device id %x",
97 hw_p->parent_devp));
98
99 (void) hxge_pfc_hw_reset(hxgep);
100 hw_p->flags = COMMON_INIT_DONE;
101 MUTEX_EXIT(&hw_p->hxge_cfg_lock);
102
103 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
104 "hxge_hw_init_niu_common Done for device id %x",
105 hw_p->parent_devp));
106 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common"));
107 }
108
109 uint_t
hxge_intr(caddr_t arg1,caddr_t arg2)110 hxge_intr(caddr_t arg1, caddr_t arg2)
111 {
112 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
113 p_hxge_t hxgep = (p_hxge_t)arg2;
114 uint8_t ldv;
115 hpi_handle_t handle;
116 p_hxge_ldgv_t ldgvp;
117 p_hxge_ldg_t ldgp, t_ldgp;
118 p_hxge_ldv_t t_ldvp;
119 uint32_t vector0 = 0, vector1 = 0;
120 int j, nldvs;
121 hpi_status_t rs = HPI_SUCCESS;
122
123 /*
124 * DDI interface returns second arg as NULL
125 */
126 if ((arg2 == NULL) || ((void *) ldvp->hxgep != arg2)) {
127 hxgep = ldvp->hxgep;
128 }
129
130 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr"));
131
132 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
133 HXGE_ERROR_MSG((hxgep, INT_CTL,
134 "<== hxge_intr: not initialized"));
135 return (DDI_INTR_UNCLAIMED);
136 }
137
138 ldgvp = hxgep->ldgvp;
139
140 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: ldgvp $%p", ldgvp));
141
142 if (ldvp == NULL && ldgvp)
143 t_ldvp = ldvp = ldgvp->ldvp;
144 if (ldvp)
145 ldgp = t_ldgp = ldvp->ldgp;
146
147 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
148 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
149
150 if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) {
151 HXGE_ERROR_MSG((hxgep, INT_CTL, "==> hxge_intr: "
152 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp));
153 HXGE_ERROR_MSG((hxgep, INT_CTL, "<== hxge_intr: not ready"));
154 return (DDI_INTR_UNCLAIMED);
155 }
156
157 /*
158 * This interrupt handler will have to go through
159 * all the logical devices to find out which
160 * logical device interrupts us and then call
161 * its handler to process the events.
162 */
163 handle = HXGE_DEV_HPI_HANDLE(hxgep);
164 t_ldgp = ldgp;
165 t_ldvp = ldgp->ldvp;
166 nldvs = ldgp->nldvs;
167
168 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: #ldvs %d #intrs %d",
169 nldvs, ldgvp->ldg_intrs));
170 HXGE_DEBUG_MSG((hxgep, INT_CTL,
171 "==> hxge_intr(%d): #ldvs %d", i, nldvs));
172
173 /*
174 * Get this group's flag bits.
175 */
176 t_ldgp->interrupted = B_FALSE;
177 rs = hpi_ldsv_ldfs_get(handle, t_ldgp->ldg, &vector0, &vector1);
178 if (rs != HPI_SUCCESS)
179 return (DDI_INTR_UNCLAIMED);
180
181 if (!vector0 && !vector1) {
182 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
183 "no interrupts on group %d", t_ldgp->ldg));
184 return (DDI_INTR_UNCLAIMED);
185 }
186
187 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: "
188 "vector0 0x%llx vector1 0x%llx", vector0, vector1));
189
190 t_ldgp->interrupted = B_TRUE;
191 nldvs = t_ldgp->nldvs;
192
193 /*
194 * Process all devices that share this group.
195 */
196 for (j = 0; j < nldvs; j++, t_ldvp++) {
197 /*
198 * Call device's handler if flag bits are on.
199 */
200 ldv = t_ldvp->ldv;
201 if ((LDV_ON(ldv, vector0) | (LDV_ON(ldv, vector1)))) {
202 HXGE_DEBUG_MSG((hxgep, INT_CTL,
203 "==> hxge_intr: calling device %d"
204 " #ldvs %d #intrs %d", j, nldvs, nintrs));
205 (void) (t_ldvp->ldv_intr_handler)(
206 (caddr_t)t_ldvp, arg2);
207 }
208 }
209
210 /*
211 * Re-arm group interrupts
212 */
213 if (t_ldgp->interrupted) {
214 HXGE_DEBUG_MSG((hxgep, INT_CTL,
215 "==> hxge_intr: arm group %d", t_ldgp->ldg));
216 (void) hpi_intr_ldg_mgmt_set(handle, t_ldgp->ldg,
217 t_ldgp->arm, t_ldgp->ldg_timer);
218 }
219
220 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr"));
221 return (DDI_INTR_CLAIMED);
222 }
223
224 hxge_status_t
hxge_peu_handle_sys_errors(p_hxge_t hxgep)225 hxge_peu_handle_sys_errors(p_hxge_t hxgep)
226 {
227 hpi_handle_t handle;
228 p_hxge_peu_sys_stats_t statsp;
229 peu_intr_stat_t stat;
230
231 handle = hxgep->hpi_handle;
232 statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats;
233
234 HXGE_REG_RD32(handle, PEU_INTR_STAT, &stat.value);
235
236 /*
237 * The PCIE errors are unrecoverrable and cannot be cleared.
238 * The only thing we can do here is to mask them off to prevent
239 * continued interrupts.
240 */
241 HXGE_REG_WR32(handle, PEU_INTR_MASK, 0xffffffff);
242
243 if (stat.bits.spc_acc_err) {
244 statsp->spc_acc_err++;
245 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
246 "==> hxge_peu_handle_sys_errors: spc_acc_err"));
247 }
248
249 if (stat.bits.tdc_pioacc_err) {
250 statsp->tdc_pioacc_err++;
251 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
252 "==> hxge_peu_handle_sys_errors: tdc_pioacc_err"));
253 }
254
255 if (stat.bits.rdc_pioacc_err) {
256 statsp->rdc_pioacc_err++;
257 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
258 "==> hxge_peu_handle_sys_errors: rdc_pioacc_err"));
259 }
260
261 if (stat.bits.pfc_pioacc_err) {
262 statsp->pfc_pioacc_err++;
263 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
264 "==> hxge_peu_handle_sys_errors: pfc_pioacc_err"));
265 }
266
267 if (stat.bits.vmac_pioacc_err) {
268 statsp->vmac_pioacc_err++;
269 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
270 "==> hxge_peu_handle_sys_errors: vmac_pioacc_err"));
271 }
272
273 if (stat.bits.cpl_hdrq_parerr) {
274 statsp->cpl_hdrq_parerr++;
275 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
276 "==> hxge_peu_handle_sys_errors: cpl_hdrq_parerr"));
277 }
278
279 if (stat.bits.cpl_dataq_parerr) {
280 statsp->cpl_dataq_parerr++;
281 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
282 "==> hxge_peu_handle_sys_errors: cpl_dataq_parerr"));
283 }
284
285 if (stat.bits.retryram_xdlh_parerr) {
286 statsp->retryram_xdlh_parerr++;
287 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
288 "==> hxge_peu_handle_sys_errors: retryram_xdlh_parerr"));
289 }
290
291 if (stat.bits.retrysotram_xdlh_parerr) {
292 statsp->retrysotram_xdlh_parerr++;
293 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
294 "==> hxge_peu_handle_sys_errors: retrysotram_xdlh_parerr"));
295 }
296
297 if (stat.bits.p_hdrq_parerr) {
298 statsp->p_hdrq_parerr++;
299 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
300 "==> hxge_peu_handle_sys_errors: p_hdrq_parerr"));
301 }
302
303 if (stat.bits.p_dataq_parerr) {
304 statsp->p_dataq_parerr++;
305 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
306 "==> hxge_peu_handle_sys_errors: p_dataq_parerr"));
307 }
308
309 if (stat.bits.np_hdrq_parerr) {
310 statsp->np_hdrq_parerr++;
311 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
312 "==> hxge_peu_handle_sys_errors: np_hdrq_parerr"));
313 }
314
315 if (stat.bits.np_dataq_parerr) {
316 statsp->np_dataq_parerr++;
317 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
318 "==> hxge_peu_handle_sys_errors: np_dataq_parerr"));
319 }
320
321 if (stat.bits.eic_msix_parerr) {
322 statsp->eic_msix_parerr++;
323 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
324 "==> hxge_peu_handle_sys_errors: eic_msix_parerr"));
325 }
326
327 if (stat.bits.hcr_parerr) {
328 statsp->hcr_parerr++;
329 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
330 "==> hxge_peu_handle_sys_errors: hcr_parerr"));
331 }
332
333 HXGE_FM_REPORT_ERROR(hxgep, 0, HXGE_FM_EREPORT_PEU_ERR);
334 return (HXGE_OK);
335 }
336
337 /*ARGSUSED*/
338 uint_t
hxge_syserr_intr(caddr_t arg1,caddr_t arg2)339 hxge_syserr_intr(caddr_t arg1, caddr_t arg2)
340 {
341 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1;
342 p_hxge_t hxgep = (p_hxge_t)arg2;
343 p_hxge_ldg_t ldgp = NULL;
344 hpi_handle_t handle;
345 dev_err_stat_t estat;
346
347 if ((arg1 == NULL) && (arg2 == NULL)) {
348 return (DDI_INTR_UNCLAIMED);
349 }
350
351 if ((arg2 == NULL) ||
352 ((ldvp != NULL) && ((void *)ldvp->hxgep != arg2))) {
353 if (ldvp != NULL) {
354 hxgep = ldvp->hxgep;
355 }
356 }
357
358 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL,
359 "==> hxge_syserr_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
360
361 if (ldvp != NULL && ldvp->use_timer == B_FALSE) {
362 ldgp = ldvp->ldgp;
363 if (ldgp == NULL) {
364 HXGE_ERROR_MSG((hxgep, SYSERR_CTL,
365 "<== hxge_syserrintr(no logical group): "
366 "arg2 $%p arg1 $%p", hxgep, ldvp));
367 return (DDI_INTR_UNCLAIMED);
368 }
369 }
370
371 /*
372 * This interrupt handler is for system error interrupts.
373 */
374 handle = HXGE_DEV_HPI_HANDLE(hxgep);
375 estat.value = 0;
376 (void) hpi_fzc_sys_err_stat_get(handle, &estat);
377 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL,
378 "==> hxge_syserr_intr: device error 0x%016llx", estat.value));
379
380 if (estat.bits.tdc_err0 || estat.bits.tdc_err1) {
381 /* TDMC */
382 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
383 "==> hxge_syserr_intr: device error - TDMC"));
384 (void) hxge_txdma_handle_sys_errors(hxgep);
385 } else if (estat.bits.rdc_err0 || estat.bits.rdc_err1) {
386 /* RDMC */
387 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
388 "==> hxge_syserr_intr: device error - RDMC"));
389 (void) hxge_rxdma_handle_sys_errors(hxgep);
390 } else if (estat.bits.vnm_pio_err1 || estat.bits.peu_err1) {
391 /* PCI-E */
392 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
393 "==> hxge_syserr_intr: device error - PCI-E"));
394
395 /* kstats are updated here */
396 (void) hxge_peu_handle_sys_errors(hxgep);
397
398 if (estat.bits.peu_err1)
399 HXGE_FM_REPORT_ERROR(hxgep, 0,
400 HXGE_FM_EREPORT_PEU_ERR);
401
402 if (estat.bits.vnm_pio_err1)
403 HXGE_FM_REPORT_ERROR(hxgep, 0,
404 HXGE_FM_EREPORT_PEU_VNM_PIO_ERR);
405 } else if (estat.value != 0) {
406 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
407 "==> hxge_syserr_intr: device error - unknown"));
408 }
409
410 if ((ldgp != NULL) && (ldvp != NULL) &&
411 (ldgp->nldvs == 1) && !ldvp->use_timer) {
412 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
413 B_TRUE, ldgp->ldg_timer);
414 }
415
416 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_syserr_intr"));
417 return (DDI_INTR_CLAIMED);
418 }
419
420 void
hxge_intr_hw_enable(p_hxge_t hxgep)421 hxge_intr_hw_enable(p_hxge_t hxgep)
422 {
423 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_enable"));
424
425 (void) hxge_intr_mask_mgmt_set(hxgep, B_TRUE);
426
427 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_enable"));
428 }
429
430 void
hxge_intr_hw_disable(p_hxge_t hxgep)431 hxge_intr_hw_disable(p_hxge_t hxgep)
432 {
433 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_disable"));
434
435 (void) hxge_intr_mask_mgmt_set(hxgep, B_FALSE);
436
437 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_disable"));
438 }
439
440 /*ARGSUSED*/
441 void
hxge_rx_hw_blank(void * arg,time_t ticks,uint_t count)442 hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count)
443 {
444 p_hxge_t hxgep = (p_hxge_t)arg;
445
446 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_hw_blank"));
447
448 /*
449 * Replace current ticks and counts for later
450 * processing by the receive packet interrupt routines.
451 */
452 hxgep->intr_timeout = (uint16_t)ticks;
453
454 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_hw_blank"));
455 }
456
457 void
hxge_hw_stop(p_hxge_t hxgep)458 hxge_hw_stop(p_hxge_t hxgep)
459 {
460 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_stop"));
461
462 (void) hxge_tx_vmac_disable(hxgep);
463 (void) hxge_rx_vmac_disable(hxgep);
464 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
465 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
466
467 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_stop"));
468 }
469
470 void
hxge_hw_ioctl(p_hxge_t hxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)471 hxge_hw_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
472 {
473 int cmd;
474
475 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_hw_ioctl"));
476
477 if (hxgep == NULL) {
478 miocnak(wq, mp, 0, EINVAL);
479 return;
480 }
481
482 iocp->ioc_error = 0;
483 cmd = iocp->ioc_cmd;
484
485 switch (cmd) {
486 default:
487 miocnak(wq, mp, 0, EINVAL);
488 return;
489
490 case HXGE_PUT_TCAM:
491 hxge_put_tcam(hxgep, mp->b_cont);
492 miocack(wq, mp, 0, 0);
493 break;
494
495 case HXGE_GET_TCAM:
496 hxge_get_tcam(hxgep, mp->b_cont);
497 miocack(wq, mp, 0, 0);
498 break;
499
500 case HXGE_RTRACE:
501 hxge_rtrace_ioctl(hxgep, wq, mp, iocp);
502 break;
503 }
504 }
505
506 /*
507 * 10G is the only loopback mode for Hydra.
508 */
509 void
hxge_loopback_ioctl(p_hxge_t hxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)510 hxge_loopback_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
511 struct iocblk *iocp)
512 {
513 p_lb_property_t lb_props;
514 size_t size;
515 int i;
516
517 if (mp->b_cont == NULL) {
518 miocnak(wq, mp, 0, EINVAL);
519 }
520
521 switch (iocp->ioc_cmd) {
522 case LB_GET_MODE:
523 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_MODE command"));
524 if (hxgep != NULL) {
525 *(lb_info_sz_t *)mp->b_cont->b_rptr =
526 hxgep->statsp->port_stats.lb_mode;
527 miocack(wq, mp, sizeof (hxge_lb_t), 0);
528 } else
529 miocnak(wq, mp, 0, EINVAL);
530 break;
531
532 case LB_SET_MODE:
533 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_SET_LB_MODE command"));
534 if (iocp->ioc_count != sizeof (uint32_t)) {
535 miocack(wq, mp, 0, 0);
536 break;
537 }
538 if ((hxgep != NULL) && hxge_set_lb(hxgep, wq, mp->b_cont)) {
539 miocack(wq, mp, 0, 0);
540 } else {
541 miocnak(wq, mp, 0, EPROTO);
542 }
543 break;
544
545 case LB_GET_INFO_SIZE:
546 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "LB_GET_INFO_SIZE command"));
547 if (hxgep != NULL) {
548 size = sizeof (lb_normal) + sizeof (lb_mac10g);
549
550 *(lb_info_sz_t *)mp->b_cont->b_rptr = size;
551
552 HXGE_DEBUG_MSG((hxgep, IOC_CTL,
553 "HXGE_GET_LB_INFO command: size %d", size));
554 miocack(wq, mp, sizeof (lb_info_sz_t), 0);
555 } else
556 miocnak(wq, mp, 0, EINVAL);
557 break;
558
559 case LB_GET_INFO:
560 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command"));
561 if (hxgep != NULL) {
562 size = sizeof (lb_normal) + sizeof (lb_mac10g);
563 HXGE_DEBUG_MSG((hxgep, IOC_CTL,
564 "HXGE_GET_LB_INFO command: size %d", size));
565 if (size == iocp->ioc_count) {
566 i = 0;
567 lb_props = (p_lb_property_t)mp->b_cont->b_rptr;
568 lb_props[i++] = lb_normal;
569 lb_props[i++] = lb_mac10g;
570
571 miocack(wq, mp, size, 0);
572 } else
573 miocnak(wq, mp, 0, EINVAL);
574 } else {
575 miocnak(wq, mp, 0, EINVAL);
576 cmn_err(CE_NOTE, "hxge_hw_ioctl: invalid command 0x%x",
577 iocp->ioc_cmd);
578 }
579
580 break;
581 }
582 }
583
584 /*ARGSUSED*/
585 boolean_t
hxge_set_lb(p_hxge_t hxgep,queue_t * wq,p_mblk_t mp)586 hxge_set_lb(p_hxge_t hxgep, queue_t *wq, p_mblk_t mp)
587 {
588 boolean_t status = B_TRUE;
589 uint32_t lb_mode;
590 lb_property_t *lb_info;
591
592 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_set_lb"));
593 lb_mode = hxgep->statsp->port_stats.lb_mode;
594 if (lb_mode == *(uint32_t *)mp->b_rptr) {
595 cmn_err(CE_NOTE,
596 "hxge%d: Loopback mode already set (lb_mode %d).\n",
597 hxgep->instance, lb_mode);
598 status = B_FALSE;
599 goto hxge_set_lb_exit;
600 }
601
602 lb_mode = *(uint32_t *)mp->b_rptr;
603 lb_info = NULL;
604
605 /* 10G is the only loopback mode for Hydra */
606 if (lb_mode == lb_normal.value)
607 lb_info = &lb_normal;
608 else if (lb_mode == lb_mac10g.value)
609 lb_info = &lb_mac10g;
610 else {
611 cmn_err(CE_NOTE,
612 "hxge%d: Loopback mode not supported(mode %d).\n",
613 hxgep->instance, lb_mode);
614 status = B_FALSE;
615 goto hxge_set_lb_exit;
616 }
617
618 if (lb_mode == hxge_lb_normal) {
619 if (hxge_lb_dbg) {
620 cmn_err(CE_NOTE,
621 "!hxge%d: Returning to normal operation",
622 hxgep->instance);
623 }
624
625 hxgep->statsp->port_stats.lb_mode = hxge_lb_normal;
626 hxge_global_reset(hxgep);
627
628 goto hxge_set_lb_exit;
629 }
630
631 hxgep->statsp->port_stats.lb_mode = lb_mode;
632
633 if (hxge_lb_dbg)
634 cmn_err(CE_NOTE, "!hxge%d: Adapter now in %s loopback mode",
635 hxgep->instance, lb_info->key);
636
637 if (lb_info->lb_type == internal) {
638 if ((hxgep->statsp->port_stats.lb_mode == hxge_lb_mac10g))
639 hxgep->statsp->mac_stats.link_speed = 10000;
640 else {
641 cmn_err(CE_NOTE,
642 "hxge%d: Loopback mode not supported(mode %d).\n",
643 hxgep->instance, lb_mode);
644 status = B_FALSE;
645 goto hxge_set_lb_exit;
646 }
647 hxgep->statsp->mac_stats.link_duplex = 2;
648 hxgep->statsp->mac_stats.link_up = 1;
649 }
650
651 hxge_global_reset(hxgep);
652
653 hxge_set_lb_exit:
654 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
655 "<== hxge_set_lb status = 0x%08x", status));
656
657 return (status);
658 }
659
660 void
hxge_check_hw_state(p_hxge_t hxgep)661 hxge_check_hw_state(p_hxge_t hxgep)
662 {
663 p_hxge_ldgv_t ldgvp;
664 p_hxge_ldv_t t_ldvp;
665
666 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "==> hxge_check_hw_state"));
667
668 MUTEX_ENTER(hxgep->genlock);
669
670 hxgep->hxge_timerid = 0;
671 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
672 goto hxge_check_hw_state_exit;
673 }
674
675 hxge_check_tx_hang(hxgep);
676
677 ldgvp = hxgep->ldgvp;
678 if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) {
679 HXGE_ERROR_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: "
680 "NULL ldgvp (interrupt not ready)."));
681 goto hxge_check_hw_state_exit;
682 }
683
684 t_ldvp = ldgvp->ldvp_syserr;
685 if (!t_ldvp->use_timer) {
686 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: "
687 "ldgvp $%p t_ldvp $%p use_timer flag %d",
688 ldgvp, t_ldvp, t_ldvp->use_timer));
689 goto hxge_check_hw_state_exit;
690 }
691
692 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
693 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
694 "Bad register acc handle"));
695 }
696
697 (void) hxge_syserr_intr((caddr_t)t_ldvp, (caddr_t)hxgep);
698
699 hxgep->hxge_timerid = hxge_start_timer(hxgep, hxge_check_hw_state,
700 HXGE_CHECK_TIMER);
701
702 hxge_check_hw_state_exit:
703 MUTEX_EXIT(hxgep->genlock);
704
705 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state"));
706 }
707
708 /*ARGSUSED*/
709 static void
hxge_rtrace_ioctl(p_hxge_t hxgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)710 hxge_rtrace_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp,
711 struct iocblk *iocp)
712 {
713 ssize_t size;
714 rtrace_t *rtp;
715 mblk_t *nmp;
716 uint32_t i, j;
717 uint32_t start_blk;
718 uint32_t base_entry;
719 uint32_t num_entries;
720
721 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_rtrace_ioctl"));
722
723 size = 1024;
724 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) {
725 HXGE_DEBUG_MSG((hxgep, STR_CTL,
726 "malformed M_IOCTL MBLKL = %d size = %d",
727 MBLKL(mp->b_cont), size));
728 miocnak(wq, mp, 0, EINVAL);
729 return;
730 }
731
732 nmp = mp->b_cont;
733 rtp = (rtrace_t *)nmp->b_rptr;
734 start_blk = rtp->next_idx;
735 num_entries = rtp->last_idx;
736 base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES;
737
738 HXGE_DEBUG_MSG((hxgep, STR_CTL, "start_blk = %d\n", start_blk));
739 HXGE_DEBUG_MSG((hxgep, STR_CTL, "num_entries = %d\n", num_entries));
740 HXGE_DEBUG_MSG((hxgep, STR_CTL, "base_entry = %d\n", base_entry));
741
742 rtp->next_idx = hpi_rtracebuf.next_idx;
743 rtp->last_idx = hpi_rtracebuf.last_idx;
744 rtp->wrapped = hpi_rtracebuf.wrapped;
745 for (i = 0, j = base_entry; i < num_entries; i++, j++) {
746 rtp->buf[i].ctl_addr = hpi_rtracebuf.buf[j].ctl_addr;
747 rtp->buf[i].val_l32 = hpi_rtracebuf.buf[j].val_l32;
748 rtp->buf[i].val_h32 = hpi_rtracebuf.buf[j].val_h32;
749 }
750
751 nmp->b_wptr = nmp->b_rptr + size;
752 HXGE_DEBUG_MSG((hxgep, STR_CTL, "<== hxge_rtrace_ioctl"));
753 miocack(wq, mp, (int)size, 0);
754 }
755