1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_rxdma.h>
29 #include <sys/nxge/nxge_hio.h>
30
31 #if !defined(_BIG_ENDIAN)
32 #include <npi_rx_rd32.h>
33 #endif
34 #include <npi_rx_rd64.h>
35 #include <npi_rx_wr64.h>
36
37 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
38 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
39 #define NXGE_ACTUAL_RDC(nxgep, rdc) \
40 (rdc + nxgep->pt_config.hw_config.start_rdc)
41
42 /*
43 * Globals: tunable parameters (/etc/system or adb)
44 *
45 */
46 extern uint32_t nxge_rbr_size;
47 extern uint32_t nxge_rcr_size;
48 extern uint32_t nxge_rbr_spare_size;
49 extern uint16_t nxge_rdc_buf_offset;
50
51 extern uint32_t nxge_mblks_pending;
52
53 /*
54 * Tunable to reduce the amount of time spent in the
55 * ISR doing Rx Processing.
56 */
57 extern uint32_t nxge_max_rx_pkts;
58
59 /*
60 * Tunables to manage the receive buffer blocks.
61 *
62 * nxge_rx_threshold_hi: copy all buffers.
63 * nxge_rx_bcopy_size_type: receive buffer block size type.
64 * nxge_rx_threshold_lo: copy only up to tunable block size type.
65 */
66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
67 extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
68 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
69
70 extern uint32_t nxge_cksum_offload;
71
72 static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
73 static void nxge_unmap_rxdma(p_nxge_t, int);
74
75 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
76
77 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
78 static void nxge_rxdma_hw_stop(p_nxge_t, int);
79
80 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
81 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
82 uint32_t,
83 p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
84 p_rx_mbox_t *);
85 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
86 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
87
88 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
89 uint16_t,
90 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
91 p_rx_rcr_ring_t *, p_rx_mbox_t *);
92 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
93 p_rx_rcr_ring_t, p_rx_mbox_t);
94
95 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
96 uint16_t,
97 p_nxge_dma_common_t *,
98 p_rx_rbr_ring_t *, uint32_t);
99 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
100 p_rx_rbr_ring_t);
101
102 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
103 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
104 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
105
106 static mblk_t *
107 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
108
109 static void nxge_receive_packet(p_nxge_t,
110 p_rx_rcr_ring_t,
111 p_rcr_entry_t,
112 boolean_t *,
113 mblk_t **, mblk_t **);
114
115 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
116
117 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
118 static void nxge_freeb(p_rx_msg_t);
119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
120
121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
122 uint32_t, uint32_t);
123
124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
125 p_rx_rbr_ring_t);
126
127
128 static nxge_status_t
129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
130
131 nxge_status_t
132 nxge_rx_port_fatal_err_recover(p_nxge_t);
133
134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
135
136 nxge_status_t
nxge_init_rxdma_channels(p_nxge_t nxgep)137 nxge_init_rxdma_channels(p_nxge_t nxgep)
138 {
139 nxge_grp_set_t *set = &nxgep->rx_set;
140 int i, count, channel;
141 nxge_grp_t *group;
142 dc_map_t map;
143 int dev_gindex;
144
145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
146
147 if (!isLDOMguest(nxgep)) {
148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
149 cmn_err(CE_NOTE, "hw_start_common");
150 return (NXGE_ERROR);
151 }
152 }
153
154 /*
155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
156 * We only have 8 hardware RDC tables, but we may have
157 * up to 16 logical (software-defined) groups of RDCS,
158 * if we make use of layer 3 & 4 hardware classification.
159 */
160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
161 if ((1 << i) & set->lg.map) {
162 group = set->group[i];
163 dev_gindex =
164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
165 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
167 if ((1 << channel) & map) {
168 if ((nxge_grp_dc_add(nxgep,
169 group, VP_BOUND_RX, channel)))
170 goto init_rxdma_channels_exit;
171 }
172 }
173 }
174 if (++count == set->lg.count)
175 break;
176 }
177
178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
179 return (NXGE_OK);
180
181 init_rxdma_channels_exit:
182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
183 if ((1 << i) & set->lg.map) {
184 group = set->group[i];
185 dev_gindex =
186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
187 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
189 if ((1 << channel) & map) {
190 nxge_grp_dc_remove(nxgep,
191 VP_BOUND_RX, channel);
192 }
193 }
194 }
195 if (++count == set->lg.count)
196 break;
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
200 return (NXGE_ERROR);
201 }
202
203 nxge_status_t
nxge_init_rxdma_channel(p_nxge_t nxge,int channel)204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
205 {
206 nxge_status_t status;
207
208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
209
210 status = nxge_map_rxdma(nxge, channel);
211 if (status != NXGE_OK) {
212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
213 "<== nxge_init_rxdma: status 0x%x", status));
214 return (status);
215 }
216
217 #if defined(sun4v)
218 if (isLDOMguest(nxge)) {
219 /* set rcr_ring */
220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel];
221
222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel);
223 if (status != NXGE_OK) {
224 nxge_unmap_rxdma(nxge, channel);
225 return (status);
226 }
227 }
228 #endif
229
230 status = nxge_rxdma_hw_start(nxge, channel);
231 if (status != NXGE_OK) {
232 nxge_unmap_rxdma(nxge, channel);
233 }
234
235 if (!nxge->statsp->rdc_ksp[channel])
236 nxge_setup_rdc_kstats(nxge, channel);
237
238 NXGE_DEBUG_MSG((nxge, MEM2_CTL,
239 "<== nxge_init_rxdma_channel: status 0x%x", status));
240
241 return (status);
242 }
243
244 void
nxge_uninit_rxdma_channels(p_nxge_t nxgep)245 nxge_uninit_rxdma_channels(p_nxge_t nxgep)
246 {
247 nxge_grp_set_t *set = &nxgep->rx_set;
248 int rdc;
249
250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
251
252 if (set->owned.map == 0) {
253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
254 "nxge_uninit_rxdma_channels: no channels"));
255 return;
256 }
257
258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
259 if ((1 << rdc) & set->owned.map) {
260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
261 }
262 }
263
264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
265 }
266
267 void
nxge_uninit_rxdma_channel(p_nxge_t nxgep,int channel)268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
269 {
270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
271
272 if (nxgep->statsp->rdc_ksp[channel]) {
273 kstat_delete(nxgep->statsp->rdc_ksp[channel]);
274 nxgep->statsp->rdc_ksp[channel] = 0;
275 }
276
277 nxge_rxdma_hw_stop(nxgep, channel);
278 nxge_unmap_rxdma(nxgep, channel);
279
280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
281 }
282
283 nxge_status_t
nxge_reset_rxdma_channel(p_nxge_t nxgep,uint16_t channel)284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
285 {
286 npi_handle_t handle;
287 npi_status_t rs = NPI_SUCCESS;
288 nxge_status_t status = NXGE_OK;
289
290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel"));
291
292 handle = NXGE_DEV_NPI_HANDLE(nxgep);
293 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
294
295 if (rs != NPI_SUCCESS) {
296 status = NXGE_ERROR | rs;
297 }
298
299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
300
301 return (status);
302 }
303
304 void
nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
306 {
307 nxge_grp_set_t *set = &nxgep->rx_set;
308 int rdc;
309
310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
311
312 if (!isLDOMguest(nxgep)) {
313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
314 (void) npi_rxdma_dump_fzc_regs(handle);
315 }
316
317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
318 NXGE_DEBUG_MSG((nxgep, TX_CTL,
319 "nxge_rxdma_regs_dump_channels: "
320 "NULL ring pointer(s)"));
321 return;
322 }
323
324 if (set->owned.map == 0) {
325 NXGE_DEBUG_MSG((nxgep, RX_CTL,
326 "nxge_rxdma_regs_dump_channels: no channels"));
327 return;
328 }
329
330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
331 if ((1 << rdc) & set->owned.map) {
332 rx_rbr_ring_t *ring =
333 nxgep->rx_rbr_rings->rbr_rings[rdc];
334 if (ring) {
335 (void) nxge_dump_rxdma_channel(nxgep, rdc);
336 }
337 }
338 }
339
340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
341 }
342
343 nxge_status_t
nxge_dump_rxdma_channel(p_nxge_t nxgep,uint8_t channel)344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
345 {
346 npi_handle_t handle;
347 npi_status_t rs = NPI_SUCCESS;
348 nxge_status_t status = NXGE_OK;
349
350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
351
352 handle = NXGE_DEV_NPI_HANDLE(nxgep);
353 rs = npi_rxdma_dump_rdc_regs(handle, channel);
354
355 if (rs != NPI_SUCCESS) {
356 status = NXGE_ERROR | rs;
357 }
358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
359 return (status);
360 }
361
362 nxge_status_t
nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ent_msk_t mask_p)363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
364 p_rx_dma_ent_msk_t mask_p)
365 {
366 npi_handle_t handle;
367 npi_status_t rs = NPI_SUCCESS;
368 nxge_status_t status = NXGE_OK;
369
370 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
371 "<== nxge_init_rxdma_channel_event_mask"));
372
373 handle = NXGE_DEV_NPI_HANDLE(nxgep);
374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
375 if (rs != NPI_SUCCESS) {
376 status = NXGE_ERROR | rs;
377 }
378
379 return (status);
380 }
381
382 nxge_status_t
nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ctl_stat_t cs_p)383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
384 p_rx_dma_ctl_stat_t cs_p)
385 {
386 npi_handle_t handle;
387 npi_status_t rs = NPI_SUCCESS;
388 nxge_status_t status = NXGE_OK;
389
390 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
391 "<== nxge_init_rxdma_channel_cntl_stat"));
392
393 handle = NXGE_DEV_NPI_HANDLE(nxgep);
394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
395
396 if (rs != NPI_SUCCESS) {
397 status = NXGE_ERROR | rs;
398 }
399
400 return (status);
401 }
402
403 /*
404 * nxge_rxdma_cfg_rdcgrp_default_rdc
405 *
406 * Set the default RDC for an RDC Group (Table)
407 *
408 * Arguments:
409 * nxgep
410 * rdcgrp The group to modify
411 * rdc The new default RDC.
412 *
413 * Notes:
414 *
415 * NPI/NXGE function calls:
416 * npi_rxdma_cfg_rdc_table_default_rdc()
417 *
418 * Registers accessed:
419 * RDC_TBL_REG: FZC_ZCP + 0x10000
420 *
421 * Context:
422 * Service domain
423 */
424 nxge_status_t
nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep,uint8_t rdcgrp,uint8_t rdc)425 nxge_rxdma_cfg_rdcgrp_default_rdc(
426 p_nxge_t nxgep,
427 uint8_t rdcgrp,
428 uint8_t rdc)
429 {
430 npi_handle_t handle;
431 npi_status_t rs = NPI_SUCCESS;
432 p_nxge_dma_pt_cfg_t p_dma_cfgp;
433 p_nxge_rdc_grp_t rdc_grp_p;
434 uint8_t actual_rdcgrp, actual_rdc;
435
436 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
439
440 handle = NXGE_DEV_NPI_HANDLE(nxgep);
441
442 /*
443 * This has to be rewritten. Do we even allow this anymore?
444 */
445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
446 RDC_MAP_IN(rdc_grp_p->map, rdc);
447 rdc_grp_p->def_rdc = rdc;
448
449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
451
452 rs = npi_rxdma_cfg_rdc_table_default_rdc(
453 handle, actual_rdcgrp, actual_rdc);
454
455 if (rs != NPI_SUCCESS) {
456 return (NXGE_ERROR | rs);
457 }
458 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
460 return (NXGE_OK);
461 }
462
463 nxge_status_t
nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep,uint8_t port,uint8_t rdc)464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
465 {
466 npi_handle_t handle;
467
468 uint8_t actual_rdc;
469 npi_status_t rs = NPI_SUCCESS;
470
471 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
472 " ==> nxge_rxdma_cfg_port_default_rdc"));
473
474 handle = NXGE_DEV_NPI_HANDLE(nxgep);
475 actual_rdc = rdc; /* XXX Hack! */
476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
477
478
479 if (rs != NPI_SUCCESS) {
480 return (NXGE_ERROR | rs);
481 }
482 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
483 " <== nxge_rxdma_cfg_port_default_rdc"));
484
485 return (NXGE_OK);
486 }
487
488 nxge_status_t
nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep,uint8_t channel,uint16_t pkts)489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
490 uint16_t pkts)
491 {
492 npi_status_t rs = NPI_SUCCESS;
493 npi_handle_t handle;
494 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
495 " ==> nxge_rxdma_cfg_rcr_threshold"));
496 handle = NXGE_DEV_NPI_HANDLE(nxgep);
497
498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
499
500 if (rs != NPI_SUCCESS) {
501 return (NXGE_ERROR | rs);
502 }
503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
504 return (NXGE_OK);
505 }
506
507 nxge_status_t
nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep,uint8_t channel,uint16_t tout,uint8_t enable)508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
509 uint16_t tout, uint8_t enable)
510 {
511 npi_status_t rs = NPI_SUCCESS;
512 npi_handle_t handle;
513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
514 handle = NXGE_DEV_NPI_HANDLE(nxgep);
515 if (enable == 0) {
516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
517 } else {
518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
519 tout);
520 }
521
522 if (rs != NPI_SUCCESS) {
523 return (NXGE_ERROR | rs);
524 }
525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
526 return (NXGE_OK);
527 }
528
529 nxge_status_t
nxge_enable_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
532 {
533 npi_handle_t handle;
534 rdc_desc_cfg_t rdc_desc;
535 p_rcrcfig_b_t cfgb_p;
536 npi_status_t rs = NPI_SUCCESS;
537
538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
539 handle = NXGE_DEV_NPI_HANDLE(nxgep);
540 /*
541 * Use configuration data composed at init time.
542 * Write to hardware the receive ring configurations.
543 */
544 rdc_desc.mbox_enable = 1;
545 rdc_desc.mbox_addr = mbox_p->mbox_addr;
546 NXGE_DEBUG_MSG((nxgep, RX_CTL,
547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
548 mbox_p->mbox_addr, rdc_desc.mbox_addr));
549
550 rdc_desc.rbr_len = rbr_p->rbb_max;
551 rdc_desc.rbr_addr = rbr_p->rbr_addr;
552
553 switch (nxgep->rx_bksize_code) {
554 case RBR_BKSIZE_4K:
555 rdc_desc.page_size = SIZE_4KB;
556 break;
557 case RBR_BKSIZE_8K:
558 rdc_desc.page_size = SIZE_8KB;
559 break;
560 case RBR_BKSIZE_16K:
561 rdc_desc.page_size = SIZE_16KB;
562 break;
563 case RBR_BKSIZE_32K:
564 rdc_desc.page_size = SIZE_32KB;
565 break;
566 }
567
568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
569 rdc_desc.valid0 = 1;
570
571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
572 rdc_desc.valid1 = 1;
573
574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
575 rdc_desc.valid2 = 1;
576
577 rdc_desc.full_hdr = rcr_p->full_hdr_flag;
578 rdc_desc.offset = rcr_p->sw_priv_hdr_len;
579
580 rdc_desc.rcr_len = rcr_p->comp_size;
581 rdc_desc.rcr_addr = rcr_p->rcr_addr;
582
583 cfgb_p = &(rcr_p->rcr_cfgb);
584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
585 /* For now, disable this timeout in a guest domain. */
586 if (isLDOMguest(nxgep)) {
587 rdc_desc.rcr_timeout = 0;
588 rdc_desc.rcr_timeout_enable = 0;
589 } else {
590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
592 }
593
594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
595 "rbr_len qlen %d pagesize code %d rcr_len %d",
596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
598 "size 0 %d size 1 %d size 2 %d",
599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
600 rbr_p->npi_pkt_buf_size2));
601
602 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF)
603 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
604 &rdc_desc, B_TRUE);
605 else
606 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
607 &rdc_desc, B_FALSE);
608 if (rs != NPI_SUCCESS) {
609 return (NXGE_ERROR | rs);
610 }
611
612 /*
613 * Enable the timeout and threshold.
614 */
615 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
616 rdc_desc.rcr_threshold);
617 if (rs != NPI_SUCCESS) {
618 return (NXGE_ERROR | rs);
619 }
620
621 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
622 rdc_desc.rcr_timeout);
623 if (rs != NPI_SUCCESS) {
624 return (NXGE_ERROR | rs);
625 }
626
627 if (!isLDOMguest(nxgep)) {
628 /* Enable the DMA */
629 rs = npi_rxdma_cfg_rdc_enable(handle, channel);
630 if (rs != NPI_SUCCESS) {
631 return (NXGE_ERROR | rs);
632 }
633 }
634
635 /* Kick the DMA engine. */
636 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
637
638 if (!isLDOMguest(nxgep)) {
639 /* Clear the rbr empty bit */
640 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
641 }
642
643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
644
645 return (NXGE_OK);
646 }
647
648 nxge_status_t
nxge_disable_rxdma_channel(p_nxge_t nxgep,uint16_t channel)649 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
650 {
651 npi_handle_t handle;
652 npi_status_t rs = NPI_SUCCESS;
653
654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
655 handle = NXGE_DEV_NPI_HANDLE(nxgep);
656
657 /* disable the DMA */
658 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
659 if (rs != NPI_SUCCESS) {
660 NXGE_DEBUG_MSG((nxgep, RX_CTL,
661 "<== nxge_disable_rxdma_channel:failed (0x%x)",
662 rs));
663 return (NXGE_ERROR | rs);
664 }
665
666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
667 return (NXGE_OK);
668 }
669
670 nxge_status_t
nxge_rxdma_channel_rcrflush(p_nxge_t nxgep,uint8_t channel)671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
672 {
673 npi_handle_t handle;
674 nxge_status_t status = NXGE_OK;
675
676 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
677 "<== nxge_init_rxdma_channel_rcrflush"));
678
679 handle = NXGE_DEV_NPI_HANDLE(nxgep);
680 npi_rxdma_rdc_rcr_flush(handle, channel);
681
682 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
683 "<== nxge_init_rxdma_channel_rcrflsh"));
684 return (status);
685
686 }
687
688 #define MID_INDEX(l, r) ((r + l + 1) >> 1)
689
690 #define TO_LEFT -1
691 #define TO_RIGHT 1
692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
693 #define BOTH_LEFT (TO_LEFT + TO_LEFT)
694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT)
695 #define NO_HINT 0xffffffff
696
697 /*ARGSUSED*/
698 nxge_status_t
nxge_rxbuf_pp_to_vp(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p,uint8_t pktbufsz_type,uint64_t * pkt_buf_addr_pp,uint64_t ** pkt_buf_addr_p,uint32_t * bufoffset,uint32_t * msg_index)699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
700 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
701 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
702 {
703 int bufsize;
704 uint64_t pktbuf_pp;
705 uint64_t dvma_addr;
706 rxring_info_t *ring_info;
707 int base_side, end_side;
708 int r_index, l_index, anchor_index;
709 int found, search_done;
710 uint32_t offset, chunk_size, block_size, page_size_mask;
711 uint32_t chunk_index, block_index, total_index;
712 int max_iterations, iteration;
713 rxbuf_index_info_t *bufinfo;
714
715 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
716
717 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
719 pkt_buf_addr_pp,
720 pktbufsz_type));
721 #if defined(__i386)
722 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
723 #else
724 pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
725 #endif
726
727 switch (pktbufsz_type) {
728 case 0:
729 bufsize = rbr_p->pkt_buf_size0;
730 break;
731 case 1:
732 bufsize = rbr_p->pkt_buf_size1;
733 break;
734 case 2:
735 bufsize = rbr_p->pkt_buf_size2;
736 break;
737 case RCR_SINGLE_BLOCK:
738 bufsize = 0;
739 anchor_index = 0;
740 break;
741 default:
742 return (NXGE_ERROR);
743 }
744
745 if (rbr_p->num_blocks == 1) {
746 anchor_index = 0;
747 ring_info = rbr_p->ring_info;
748 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
749 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
750 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
751 "buf_pp $%p btype %d anchor_index %d "
752 "bufinfo $%p",
753 pkt_buf_addr_pp,
754 pktbufsz_type,
755 anchor_index,
756 bufinfo));
757
758 goto found_index;
759 }
760
761 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
762 "==> nxge_rxbuf_pp_to_vp: "
763 "buf_pp $%p btype %d anchor_index %d",
764 pkt_buf_addr_pp,
765 pktbufsz_type,
766 anchor_index));
767
768 ring_info = rbr_p->ring_info;
769 found = B_FALSE;
770 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
771 iteration = 0;
772 max_iterations = ring_info->max_iterations;
773 /*
774 * First check if this block has been seen
775 * recently. This is indicated by a hint which
776 * is initialized when the first buffer of the block
777 * is seen. The hint is reset when the last buffer of
778 * the block has been processed.
779 * As three block sizes are supported, three hints
780 * are kept. The idea behind the hints is that once
781 * the hardware uses a block for a buffer of that
782 * size, it will use it exclusively for that size
783 * and will use it until it is exhausted. It is assumed
784 * that there would a single block being used for the same
785 * buffer sizes at any given time.
786 */
787 if (ring_info->hint[pktbufsz_type] != NO_HINT) {
788 anchor_index = ring_info->hint[pktbufsz_type];
789 dvma_addr = bufinfo[anchor_index].dvma_addr;
790 chunk_size = bufinfo[anchor_index].buf_size;
791 if ((pktbuf_pp >= dvma_addr) &&
792 (pktbuf_pp < (dvma_addr + chunk_size))) {
793 found = B_TRUE;
794 /*
795 * check if this is the last buffer in the block
796 * If so, then reset the hint for the size;
797 */
798
799 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
800 ring_info->hint[pktbufsz_type] = NO_HINT;
801 }
802 }
803
804 if (found == B_FALSE) {
805 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
806 "==> nxge_rxbuf_pp_to_vp: (!found)"
807 "buf_pp $%p btype %d anchor_index %d",
808 pkt_buf_addr_pp,
809 pktbufsz_type,
810 anchor_index));
811
812 /*
813 * This is the first buffer of the block of this
814 * size. Need to search the whole information
815 * array.
816 * the search algorithm uses a binary tree search
817 * algorithm. It assumes that the information is
818 * already sorted with increasing order
819 * info[0] < info[1] < info[2] .... < info[n-1]
820 * where n is the size of the information array
821 */
822 r_index = rbr_p->num_blocks - 1;
823 l_index = 0;
824 search_done = B_FALSE;
825 anchor_index = MID_INDEX(r_index, l_index);
826 while (search_done == B_FALSE) {
827 if ((r_index == l_index) ||
828 (iteration >= max_iterations))
829 search_done = B_TRUE;
830 end_side = TO_RIGHT; /* to the right */
831 base_side = TO_LEFT; /* to the left */
832 /* read the DVMA address information and sort it */
833 dvma_addr = bufinfo[anchor_index].dvma_addr;
834 chunk_size = bufinfo[anchor_index].buf_size;
835 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
836 "==> nxge_rxbuf_pp_to_vp: (searching)"
837 "buf_pp $%p btype %d "
838 "anchor_index %d chunk_size %d dvmaaddr $%p",
839 pkt_buf_addr_pp,
840 pktbufsz_type,
841 anchor_index,
842 chunk_size,
843 dvma_addr));
844
845 if (pktbuf_pp >= dvma_addr)
846 base_side = TO_RIGHT; /* to the right */
847 if (pktbuf_pp < (dvma_addr + chunk_size))
848 end_side = TO_LEFT; /* to the left */
849
850 switch (base_side + end_side) {
851 case IN_MIDDLE:
852 /* found */
853 found = B_TRUE;
854 search_done = B_TRUE;
855 if ((pktbuf_pp + bufsize) <
856 (dvma_addr + chunk_size))
857 ring_info->hint[pktbufsz_type] =
858 bufinfo[anchor_index].buf_index;
859 break;
860 case BOTH_RIGHT:
861 /* not found: go to the right */
862 l_index = anchor_index + 1;
863 anchor_index = MID_INDEX(r_index, l_index);
864 break;
865
866 case BOTH_LEFT:
867 /* not found: go to the left */
868 r_index = anchor_index - 1;
869 anchor_index = MID_INDEX(r_index, l_index);
870 break;
871 default: /* should not come here */
872 return (NXGE_ERROR);
873 }
874 iteration++;
875 }
876
877 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
878 "==> nxge_rxbuf_pp_to_vp: (search done)"
879 "buf_pp $%p btype %d anchor_index %d",
880 pkt_buf_addr_pp,
881 pktbufsz_type,
882 anchor_index));
883 }
884
885 if (found == B_FALSE) {
886 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
887 "==> nxge_rxbuf_pp_to_vp: (search failed)"
888 "buf_pp $%p btype %d anchor_index %d",
889 pkt_buf_addr_pp,
890 pktbufsz_type,
891 anchor_index));
892 return (NXGE_ERROR);
893 }
894
895 found_index:
896 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
897 "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
898 "buf_pp $%p btype %d bufsize %d anchor_index %d",
899 pkt_buf_addr_pp,
900 pktbufsz_type,
901 bufsize,
902 anchor_index));
903
904 /* index of the first block in this chunk */
905 chunk_index = bufinfo[anchor_index].start_index;
906 dvma_addr = bufinfo[anchor_index].dvma_addr;
907 page_size_mask = ring_info->block_size_mask;
908
909 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
910 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
911 "buf_pp $%p btype %d bufsize %d "
912 "anchor_index %d chunk_index %d dvma $%p",
913 pkt_buf_addr_pp,
914 pktbufsz_type,
915 bufsize,
916 anchor_index,
917 chunk_index,
918 dvma_addr));
919
920 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
921 block_size = rbr_p->block_size; /* System block(page) size */
922
923 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
924 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
925 "buf_pp $%p btype %d bufsize %d "
926 "anchor_index %d chunk_index %d dvma $%p "
927 "offset %d block_size %d",
928 pkt_buf_addr_pp,
929 pktbufsz_type,
930 bufsize,
931 anchor_index,
932 chunk_index,
933 dvma_addr,
934 offset,
935 block_size));
936
937 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
938
939 block_index = (offset / block_size); /* index within chunk */
940 total_index = chunk_index + block_index;
941
942
943 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
944 "==> nxge_rxbuf_pp_to_vp: "
945 "total_index %d dvma_addr $%p "
946 "offset %d block_size %d "
947 "block_index %d ",
948 total_index, dvma_addr,
949 offset, block_size,
950 block_index));
951 #if defined(__i386)
952 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
953 (uint32_t)offset);
954 #else
955 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
956 (uint64_t)offset);
957 #endif
958
959 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
960 "==> nxge_rxbuf_pp_to_vp: "
961 "total_index %d dvma_addr $%p "
962 "offset %d block_size %d "
963 "block_index %d "
964 "*pkt_buf_addr_p $%p",
965 total_index, dvma_addr,
966 offset, block_size,
967 block_index,
968 *pkt_buf_addr_p));
969
970
971 *msg_index = total_index;
972 *bufoffset = (offset & page_size_mask);
973
974 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
975 "==> nxge_rxbuf_pp_to_vp: get msg index: "
976 "msg_index %d bufoffset_index %d",
977 *msg_index,
978 *bufoffset));
979
980 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
981
982 return (NXGE_OK);
983 }
984
985 /*
986 * used by quick sort (qsort) function
987 * to perform comparison
988 */
989 static int
nxge_sort_compare(const void * p1,const void * p2)990 nxge_sort_compare(const void *p1, const void *p2)
991 {
992
993 rxbuf_index_info_t *a, *b;
994
995 a = (rxbuf_index_info_t *)p1;
996 b = (rxbuf_index_info_t *)p2;
997
998 if (a->dvma_addr > b->dvma_addr)
999 return (1);
1000 if (a->dvma_addr < b->dvma_addr)
1001 return (-1);
1002 return (0);
1003 }
1004
1005
1006
1007 /*
1008 * grabbed this sort implementation from common/syscall/avl.c
1009 *
1010 */
1011 /*
1012 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
1013 * v = Ptr to array/vector of objs
1014 * n = # objs in the array
1015 * s = size of each obj (must be multiples of a word size)
1016 * f = ptr to function to compare two objs
1017 * returns (-1 = less than, 0 = equal, 1 = greater than
1018 */
1019 void
nxge_ksort(caddr_t v,int n,int s,int (* f)())1020 nxge_ksort(caddr_t v, int n, int s, int (*f)())
1021 {
1022 int g, i, j, ii;
1023 unsigned int *p1, *p2;
1024 unsigned int tmp;
1025
1026 /* No work to do */
1027 if (v == NULL || n <= 1)
1028 return;
1029 /* Sanity check on arguments */
1030 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
1031 ASSERT(s > 0);
1032
1033 for (g = n / 2; g > 0; g /= 2) {
1034 for (i = g; i < n; i++) {
1035 for (j = i - g; j >= 0 &&
1036 (*f)(v + j * s, v + (j + g) * s) == 1;
1037 j -= g) {
1038 p1 = (unsigned *)(v + j * s);
1039 p2 = (unsigned *)(v + (j + g) * s);
1040 for (ii = 0; ii < s / 4; ii++) {
1041 tmp = *p1;
1042 *p1++ = *p2;
1043 *p2++ = tmp;
1044 }
1045 }
1046 }
1047 }
1048 }
1049
1050 /*
1051 * Initialize data structures required for rxdma
1052 * buffer dvma->vmem address lookup
1053 */
1054 /*ARGSUSED*/
1055 static nxge_status_t
nxge_rxbuf_index_info_init(p_nxge_t nxgep,p_rx_rbr_ring_t rbrp)1056 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
1057 {
1058
1059 int index;
1060 rxring_info_t *ring_info;
1061 int max_iteration = 0, max_index = 0;
1062
1063 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
1064
1065 ring_info = rbrp->ring_info;
1066 ring_info->hint[0] = NO_HINT;
1067 ring_info->hint[1] = NO_HINT;
1068 ring_info->hint[2] = NO_HINT;
1069 max_index = rbrp->num_blocks;
1070
1071 /* read the DVMA address information and sort it */
1072 /* do init of the information array */
1073
1074
1075 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1076 " nxge_rxbuf_index_info_init Sort ptrs"));
1077
1078 /* sort the array */
1079 nxge_ksort((void *)ring_info->buffer, max_index,
1080 sizeof (rxbuf_index_info_t), nxge_sort_compare);
1081
1082
1083
1084 for (index = 0; index < max_index; index++) {
1085 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1086 " nxge_rxbuf_index_info_init: sorted chunk %d "
1087 " ioaddr $%p kaddr $%p size %x",
1088 index, ring_info->buffer[index].dvma_addr,
1089 ring_info->buffer[index].kaddr,
1090 ring_info->buffer[index].buf_size));
1091 }
1092
1093 max_iteration = 0;
1094 while (max_index >= (1ULL << max_iteration))
1095 max_iteration++;
1096 ring_info->max_iterations = max_iteration + 1;
1097 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1098 " nxge_rxbuf_index_info_init Find max iter %d",
1099 ring_info->max_iterations));
1100
1101 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
1102 return (NXGE_OK);
1103 }
1104
1105 /* ARGSUSED */
1106 void
nxge_dump_rcr_entry(p_nxge_t nxgep,p_rcr_entry_t entry_p)1107 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
1108 {
1109 #ifdef NXGE_DEBUG
1110
1111 uint32_t bptr;
1112 uint64_t pp;
1113
1114 bptr = entry_p->bits.hdw.pkt_buf_addr;
1115
1116 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1117 "\trcr entry $%p "
1118 "\trcr entry 0x%0llx "
1119 "\trcr entry 0x%08x "
1120 "\trcr entry 0x%08x "
1121 "\tvalue 0x%0llx\n"
1122 "\tmulti = %d\n"
1123 "\tpkt_type = 0x%x\n"
1124 "\tzero_copy = %d\n"
1125 "\tnoport = %d\n"
1126 "\tpromis = %d\n"
1127 "\terror = 0x%04x\n"
1128 "\tdcf_err = 0x%01x\n"
1129 "\tl2_len = %d\n"
1130 "\tpktbufsize = %d\n"
1131 "\tpkt_buf_addr = $%p\n"
1132 "\tpkt_buf_addr (<< 6) = $%p\n",
1133 entry_p,
1134 *(int64_t *)entry_p,
1135 *(int32_t *)entry_p,
1136 *(int32_t *)((char *)entry_p + 32),
1137 entry_p->value,
1138 entry_p->bits.hdw.multi,
1139 entry_p->bits.hdw.pkt_type,
1140 entry_p->bits.hdw.zero_copy,
1141 entry_p->bits.hdw.noport,
1142 entry_p->bits.hdw.promis,
1143 entry_p->bits.hdw.error,
1144 entry_p->bits.hdw.dcf_err,
1145 entry_p->bits.hdw.l2_len,
1146 entry_p->bits.hdw.pktbufsz,
1147 bptr,
1148 entry_p->bits.ldw.pkt_buf_addr));
1149
1150 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
1151 RCR_PKT_BUF_ADDR_SHIFT;
1152
1153 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
1154 pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
1155 #endif
1156 }
1157
1158 void
nxge_rxdma_regs_dump(p_nxge_t nxgep,int rdc)1159 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
1160 {
1161 npi_handle_t handle;
1162 rbr_stat_t rbr_stat;
1163 addr44_t hd_addr;
1164 addr44_t tail_addr;
1165 uint16_t qlen;
1166
1167 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1168 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
1169
1170 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1171
1172 /* RBR head */
1173 hd_addr.addr = 0;
1174 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
1175 #if defined(__i386)
1176 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1177 (void *)(uint32_t)hd_addr.addr);
1178 #else
1179 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1180 (void *)hd_addr.addr);
1181 #endif
1182
1183 /* RBR stats */
1184 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
1185 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
1186
1187 /* RCR tail */
1188 tail_addr.addr = 0;
1189 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
1190 #if defined(__i386)
1191 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1192 (void *)(uint32_t)tail_addr.addr);
1193 #else
1194 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1195 (void *)tail_addr.addr);
1196 #endif
1197
1198 /* RCR qlen */
1199 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1200 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1201
1202 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1203 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1204 }
1205
1206 nxge_status_t
nxge_rxdma_hw_mode(p_nxge_t nxgep,boolean_t enable)1207 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1208 {
1209 nxge_grp_set_t *set = &nxgep->rx_set;
1210 nxge_status_t status;
1211 npi_status_t rs;
1212 int rdc;
1213
1214 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1215 "==> nxge_rxdma_hw_mode: mode %d", enable));
1216
1217 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1218 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1219 "<== nxge_rxdma_mode: not initialized"));
1220 return (NXGE_ERROR);
1221 }
1222
1223 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1224 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1225 "<== nxge_tx_port_fatal_err_recover: "
1226 "NULL ring pointer(s)"));
1227 return (NXGE_ERROR);
1228 }
1229
1230 if (set->owned.map == 0) {
1231 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1232 "nxge_rxdma_regs_dump_channels: no channels"));
1233 return (NULL);
1234 }
1235
1236 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1237 if ((1 << rdc) & set->owned.map) {
1238 rx_rbr_ring_t *ring =
1239 nxgep->rx_rbr_rings->rbr_rings[rdc];
1240 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
1241 if (ring) {
1242 if (enable) {
1243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1244 "==> nxge_rxdma_hw_mode: "
1245 "channel %d (enable)", rdc));
1246 rs = npi_rxdma_cfg_rdc_enable
1247 (handle, rdc);
1248 } else {
1249 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1250 "==> nxge_rxdma_hw_mode: "
1251 "channel %d disable)", rdc));
1252 rs = npi_rxdma_cfg_rdc_disable
1253 (handle, rdc);
1254 }
1255 }
1256 }
1257 }
1258
1259 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1260
1261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1262 "<== nxge_rxdma_hw_mode: status 0x%x", status));
1263
1264 return (status);
1265 }
1266
1267 void
nxge_rxdma_enable_channel(p_nxge_t nxgep,uint16_t channel)1268 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1269 {
1270 npi_handle_t handle;
1271
1272 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1273 "==> nxge_rxdma_enable_channel: channel %d", channel));
1274
1275 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1276 (void) npi_rxdma_cfg_rdc_enable(handle, channel);
1277
1278 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
1279 }
1280
1281 void
nxge_rxdma_disable_channel(p_nxge_t nxgep,uint16_t channel)1282 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1283 {
1284 npi_handle_t handle;
1285
1286 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1287 "==> nxge_rxdma_disable_channel: channel %d", channel));
1288
1289 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1290 (void) npi_rxdma_cfg_rdc_disable(handle, channel);
1291
1292 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
1293 }
1294
1295 void
nxge_hw_start_rx(p_nxge_t nxgep)1296 nxge_hw_start_rx(p_nxge_t nxgep)
1297 {
1298 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
1299
1300 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1301 (void) nxge_rx_mac_enable(nxgep);
1302
1303 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
1304 }
1305
1306 /*ARGSUSED*/
1307 void
nxge_fixup_rxdma_rings(p_nxge_t nxgep)1308 nxge_fixup_rxdma_rings(p_nxge_t nxgep)
1309 {
1310 nxge_grp_set_t *set = &nxgep->rx_set;
1311 int rdc;
1312
1313 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
1314
1315 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1316 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1317 "<== nxge_tx_port_fatal_err_recover: "
1318 "NULL ring pointer(s)"));
1319 return;
1320 }
1321
1322 if (set->owned.map == 0) {
1323 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1324 "nxge_rxdma_regs_dump_channels: no channels"));
1325 return;
1326 }
1327
1328 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1329 if ((1 << rdc) & set->owned.map) {
1330 rx_rbr_ring_t *ring =
1331 nxgep->rx_rbr_rings->rbr_rings[rdc];
1332 if (ring) {
1333 nxge_rxdma_hw_stop(nxgep, rdc);
1334 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1335 "==> nxge_fixup_rxdma_rings: "
1336 "channel %d ring $%px",
1337 rdc, ring));
1338 (void) nxge_rxdma_fix_channel(nxgep, rdc);
1339 }
1340 }
1341 }
1342
1343 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
1344 }
1345
1346 void
nxge_rxdma_fix_channel(p_nxge_t nxgep,uint16_t channel)1347 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1348 {
1349 int ndmas;
1350 p_rx_rbr_rings_t rx_rbr_rings;
1351 p_rx_rbr_ring_t *rbr_rings;
1352 p_rx_rcr_rings_t rx_rcr_rings;
1353 p_rx_rcr_ring_t *rcr_rings;
1354 p_rx_mbox_areas_t rx_mbox_areas_p;
1355 p_rx_mbox_t *rx_mbox_p;
1356 p_nxge_dma_pool_t dma_buf_poolp;
1357 p_nxge_dma_pool_t dma_cntl_poolp;
1358 p_rx_rbr_ring_t rbrp;
1359 p_rx_rcr_ring_t rcrp;
1360 p_rx_mbox_t mboxp;
1361 p_nxge_dma_common_t dmap;
1362 nxge_status_t status = NXGE_OK;
1363
1364 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
1365
1366 (void) nxge_rxdma_stop_channel(nxgep, channel);
1367
1368 dma_buf_poolp = nxgep->rx_buf_pool_p;
1369 dma_cntl_poolp = nxgep->rx_cntl_pool_p;
1370
1371 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1372 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1373 "<== nxge_rxdma_fix_channel: buf not allocated"));
1374 return;
1375 }
1376
1377 ndmas = dma_buf_poolp->ndmas;
1378 if (!ndmas) {
1379 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1380 "<== nxge_rxdma_fix_channel: no dma allocated"));
1381 return;
1382 }
1383
1384 rx_rbr_rings = nxgep->rx_rbr_rings;
1385 rx_rcr_rings = nxgep->rx_rcr_rings;
1386 rbr_rings = rx_rbr_rings->rbr_rings;
1387 rcr_rings = rx_rcr_rings->rcr_rings;
1388 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
1389 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
1390
1391 /* Reinitialize the receive block and completion rings */
1392 rbrp = (p_rx_rbr_ring_t)rbr_rings[channel],
1393 rcrp = (p_rx_rcr_ring_t)rcr_rings[channel],
1394 mboxp = (p_rx_mbox_t)rx_mbox_p[channel];
1395
1396 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
1397 rbrp->rbr_rd_index = 0;
1398 rcrp->comp_rd_index = 0;
1399 rcrp->comp_wt_index = 0;
1400
1401 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
1402 bzero((caddr_t)dmap->kaddrp, dmap->alength);
1403
1404 status = nxge_rxdma_start_channel(nxgep, channel,
1405 rbrp, rcrp, mboxp);
1406 if (status != NXGE_OK) {
1407 goto nxge_rxdma_fix_channel_fail;
1408 }
1409
1410 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1411 "<== nxge_rxdma_fix_channel: success (0x%08x)", status));
1412 return;
1413
1414 nxge_rxdma_fix_channel_fail:
1415 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1416 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
1417 }
1418
1419 p_rx_rbr_ring_t
nxge_rxdma_get_rbr_ring(p_nxge_t nxgep,uint16_t channel)1420 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1421 {
1422 nxge_grp_set_t *set = &nxgep->rx_set;
1423 nxge_channel_t rdc;
1424
1425 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1426 "==> nxge_rxdma_get_rbr_ring: channel %d", channel));
1427
1428 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1429 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1430 "<== nxge_rxdma_get_rbr_ring: "
1431 "NULL ring pointer(s)"));
1432 return (NULL);
1433 }
1434
1435 if (set->owned.map == 0) {
1436 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1437 "<== nxge_rxdma_get_rbr_ring: no channels"));
1438 return (NULL);
1439 }
1440
1441 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1442 if ((1 << rdc) & set->owned.map) {
1443 rx_rbr_ring_t *ring =
1444 nxgep->rx_rbr_rings->rbr_rings[rdc];
1445 if (ring) {
1446 if (channel == ring->rdc) {
1447 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1448 "==> nxge_rxdma_get_rbr_ring: "
1449 "channel %d ring $%p", rdc, ring));
1450 return (ring);
1451 }
1452 }
1453 }
1454 }
1455
1456 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1457 "<== nxge_rxdma_get_rbr_ring: not found"));
1458
1459 return (NULL);
1460 }
1461
1462 p_rx_rcr_ring_t
nxge_rxdma_get_rcr_ring(p_nxge_t nxgep,uint16_t channel)1463 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
1464 {
1465 nxge_grp_set_t *set = &nxgep->rx_set;
1466 nxge_channel_t rdc;
1467
1468 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1469 "==> nxge_rxdma_get_rcr_ring: channel %d", channel));
1470
1471 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
1472 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1473 "<== nxge_rxdma_get_rcr_ring: "
1474 "NULL ring pointer(s)"));
1475 return (NULL);
1476 }
1477
1478 if (set->owned.map == 0) {
1479 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1480 "<== nxge_rxdma_get_rbr_ring: no channels"));
1481 return (NULL);
1482 }
1483
1484 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1485 if ((1 << rdc) & set->owned.map) {
1486 rx_rcr_ring_t *ring =
1487 nxgep->rx_rcr_rings->rcr_rings[rdc];
1488 if (ring) {
1489 if (channel == ring->rdc) {
1490 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1491 "==> nxge_rxdma_get_rcr_ring: "
1492 "channel %d ring $%p", rdc, ring));
1493 return (ring);
1494 }
1495 }
1496 }
1497 }
1498
1499 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1500 "<== nxge_rxdma_get_rcr_ring: not found"));
1501
1502 return (NULL);
1503 }
1504
1505 /*
1506 * Static functions start here.
1507 */
1508 static p_rx_msg_t
nxge_allocb(size_t size,uint32_t pri,p_nxge_dma_common_t dmabuf_p)1509 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
1510 {
1511 p_rx_msg_t nxge_mp = NULL;
1512 p_nxge_dma_common_t dmamsg_p;
1513 uchar_t *buffer;
1514
1515 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
1516 if (nxge_mp == NULL) {
1517 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1518 "Allocation of a rx msg failed."));
1519 goto nxge_allocb_exit;
1520 }
1521
1522 nxge_mp->use_buf_pool = B_FALSE;
1523 if (dmabuf_p) {
1524 nxge_mp->use_buf_pool = B_TRUE;
1525 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
1526 *dmamsg_p = *dmabuf_p;
1527 dmamsg_p->nblocks = 1;
1528 dmamsg_p->block_size = size;
1529 dmamsg_p->alength = size;
1530 buffer = (uchar_t *)dmabuf_p->kaddrp;
1531
1532 dmabuf_p->kaddrp = (void *)
1533 ((char *)dmabuf_p->kaddrp + size);
1534 dmabuf_p->ioaddr_pp = (void *)
1535 ((char *)dmabuf_p->ioaddr_pp + size);
1536 dmabuf_p->alength -= size;
1537 dmabuf_p->offset += size;
1538 dmabuf_p->dma_cookie.dmac_laddress += size;
1539 dmabuf_p->dma_cookie.dmac_size -= size;
1540
1541 } else {
1542 buffer = KMEM_ALLOC(size, KM_NOSLEEP);
1543 if (buffer == NULL) {
1544 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1545 "Allocation of a receive page failed."));
1546 goto nxge_allocb_fail1;
1547 }
1548 }
1549
1550 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
1551 if (nxge_mp->rx_mblk_p == NULL) {
1552 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
1553 goto nxge_allocb_fail2;
1554 }
1555
1556 nxge_mp->buffer = buffer;
1557 nxge_mp->block_size = size;
1558 nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
1559 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
1560 nxge_mp->ref_cnt = 1;
1561 nxge_mp->free = B_TRUE;
1562 nxge_mp->rx_use_bcopy = B_FALSE;
1563
1564 atomic_inc_32(&nxge_mblks_pending);
1565
1566 goto nxge_allocb_exit;
1567
1568 nxge_allocb_fail2:
1569 if (!nxge_mp->use_buf_pool) {
1570 KMEM_FREE(buffer, size);
1571 }
1572
1573 nxge_allocb_fail1:
1574 KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
1575 nxge_mp = NULL;
1576
1577 nxge_allocb_exit:
1578 return (nxge_mp);
1579 }
1580
1581 p_mblk_t
nxge_dupb(p_rx_msg_t nxge_mp,uint_t offset,size_t size)1582 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1583 {
1584 p_mblk_t mp;
1585
1586 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
1587 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
1588 "offset = 0x%08X "
1589 "size = 0x%08X",
1590 nxge_mp, offset, size));
1591
1592 mp = desballoc(&nxge_mp->buffer[offset], size,
1593 0, &nxge_mp->freeb);
1594 if (mp == NULL) {
1595 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1596 goto nxge_dupb_exit;
1597 }
1598 atomic_inc_32(&nxge_mp->ref_cnt);
1599
1600
1601 nxge_dupb_exit:
1602 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1603 nxge_mp));
1604 return (mp);
1605 }
1606
1607 p_mblk_t
nxge_dupb_bcopy(p_rx_msg_t nxge_mp,uint_t offset,size_t size)1608 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1609 {
1610 p_mblk_t mp;
1611 uchar_t *dp;
1612
1613 mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
1614 if (mp == NULL) {
1615 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1616 goto nxge_dupb_bcopy_exit;
1617 }
1618 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
1619 bcopy((void *)&nxge_mp->buffer[offset], dp, size);
1620 mp->b_wptr = dp + size;
1621
1622 nxge_dupb_bcopy_exit:
1623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1624 nxge_mp));
1625 return (mp);
1626 }
1627
1628 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
1629 p_rx_msg_t rx_msg_p);
1630
1631 void
nxge_post_page(p_nxge_t nxgep,p_rx_rbr_ring_t rx_rbr_p,p_rx_msg_t rx_msg_p)1632 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
1633 {
1634 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
1635
1636 /* Reuse this buffer */
1637 rx_msg_p->free = B_FALSE;
1638 rx_msg_p->cur_usage_cnt = 0;
1639 rx_msg_p->max_usage_cnt = 0;
1640 rx_msg_p->pkt_buf_size = 0;
1641
1642 if (rx_rbr_p->rbr_use_bcopy) {
1643 rx_msg_p->rx_use_bcopy = B_FALSE;
1644 atomic_dec_32(&rx_rbr_p->rbr_consumed);
1645 }
1646
1647 /*
1648 * Get the rbr header pointer and its offset index.
1649 */
1650 MUTEX_ENTER(&rx_rbr_p->post_lock);
1651 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
1652 rx_rbr_p->rbr_wrap_mask);
1653 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1654 MUTEX_EXIT(&rx_rbr_p->post_lock);
1655 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
1656 rx_rbr_p->rdc, 1);
1657
1658 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1659 "<== nxge_post_page (channel %d post_next_index %d)",
1660 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1661
1662 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
1663 }
1664
1665 void
nxge_freeb(p_rx_msg_t rx_msg_p)1666 nxge_freeb(p_rx_msg_t rx_msg_p)
1667 {
1668 size_t size;
1669 uchar_t *buffer = NULL;
1670 int ref_cnt;
1671 boolean_t free_state = B_FALSE;
1672
1673 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1674
1675 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
1676 NXGE_DEBUG_MSG((NULL, MEM2_CTL,
1677 "nxge_freeb:rx_msg_p = $%p (block pending %d)",
1678 rx_msg_p, nxge_mblks_pending));
1679
1680 /*
1681 * First we need to get the free state, then
1682 * atomic decrement the reference count to prevent
1683 * the race condition with the interrupt thread that
1684 * is processing a loaned up buffer block.
1685 */
1686 free_state = rx_msg_p->free;
1687 ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
1688 if (!ref_cnt) {
1689 atomic_dec_32(&nxge_mblks_pending);
1690 buffer = rx_msg_p->buffer;
1691 size = rx_msg_p->block_size;
1692 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
1693 "will free: rx_msg_p = $%p (block pending %d)",
1694 rx_msg_p, nxge_mblks_pending));
1695
1696 if (!rx_msg_p->use_buf_pool) {
1697 KMEM_FREE(buffer, size);
1698 }
1699
1700 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1701
1702 if (ring) {
1703 /*
1704 * Decrement the receive buffer ring's reference
1705 * count, too.
1706 */
1707 atomic_dec_32(&ring->rbr_ref_cnt);
1708
1709 /*
1710 * Free the receive buffer ring, if
1711 * 1. all the receive buffers have been freed
1712 * 2. and we are in the proper state (that is,
1713 * we are not UNMAPPING).
1714 */
1715 if (ring->rbr_ref_cnt == 0 &&
1716 ring->rbr_state == RBR_UNMAPPED) {
1717 /*
1718 * Free receive data buffers,
1719 * buffer index information
1720 * (rxring_info) and
1721 * the message block ring.
1722 */
1723 NXGE_DEBUG_MSG((NULL, RX_CTL,
1724 "nxge_freeb:rx_msg_p = $%p "
1725 "(block pending %d) free buffers",
1726 rx_msg_p, nxge_mblks_pending));
1727 nxge_rxdma_databuf_free(ring);
1728 if (ring->ring_info) {
1729 KMEM_FREE(ring->ring_info,
1730 sizeof (rxring_info_t));
1731 }
1732
1733 if (ring->rx_msg_ring) {
1734 KMEM_FREE(ring->rx_msg_ring,
1735 ring->tnblocks *
1736 sizeof (p_rx_msg_t));
1737 }
1738 KMEM_FREE(ring, sizeof (*ring));
1739 }
1740 }
1741 return;
1742 }
1743
1744 /*
1745 * Repost buffer.
1746 */
1747 if (free_state && (ref_cnt == 1) && ring) {
1748 NXGE_DEBUG_MSG((NULL, RX_CTL,
1749 "nxge_freeb: post page $%p:", rx_msg_p));
1750 if (ring->rbr_state == RBR_POSTING)
1751 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
1752 }
1753
1754 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
1755 }
1756
1757 uint_t
nxge_rx_intr(void * arg1,void * arg2)1758 nxge_rx_intr(void *arg1, void *arg2)
1759 {
1760 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1761 p_nxge_t nxgep = (p_nxge_t)arg2;
1762 p_nxge_ldg_t ldgp;
1763 uint8_t channel;
1764 npi_handle_t handle;
1765 rx_dma_ctl_stat_t cs;
1766 p_rx_rcr_ring_t rcrp;
1767 mblk_t *mp = NULL;
1768
1769 if (ldvp == NULL) {
1770 NXGE_DEBUG_MSG((NULL, INT_CTL,
1771 "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1772 nxgep, ldvp));
1773 return (DDI_INTR_CLAIMED);
1774 }
1775
1776 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1777 nxgep = ldvp->nxgep;
1778 }
1779
1780 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1781 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1782 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1783 "<== nxge_rx_intr: interface not started or intialized"));
1784 return (DDI_INTR_CLAIMED);
1785 }
1786
1787 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1788 "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1789 nxgep, ldvp));
1790
1791 /*
1792 * Get the PIO handle.
1793 */
1794 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1795
1796 /*
1797 * Get the ring to enable us to process packets.
1798 */
1799 rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
1800
1801 /*
1802 * The RCR ring lock must be held when packets
1803 * are being processed and the hardware registers are
1804 * being read or written to prevent race condition
1805 * among the interrupt thread, the polling thread
1806 * (will cause fatal errors such as rcrincon bit set)
1807 * and the setting of the poll_flag.
1808 */
1809 MUTEX_ENTER(&rcrp->lock);
1810
1811 /*
1812 * Get the control and status for this channel.
1813 */
1814 channel = ldvp->channel;
1815 ldgp = ldvp->ldgp;
1816
1817 if (!isLDOMguest(nxgep) && (!rcrp->started)) {
1818 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1819 "<== nxge_rx_intr: channel is not started"));
1820
1821 /*
1822 * We received an interrupt before the ring is started.
1823 */
1824 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1825 &cs.value);
1826 cs.value &= RX_DMA_CTL_STAT_WR1C;
1827 cs.bits.hdw.mex = 1;
1828 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1829 cs.value);
1830
1831 /*
1832 * Rearm this logical group if this is a single device
1833 * group.
1834 */
1835 if (ldgp->nldvs == 1) {
1836 if (isLDOMguest(nxgep)) {
1837 nxge_hio_ldgimgn(nxgep, ldgp);
1838 } else {
1839 ldgimgm_t mgm;
1840
1841 mgm.value = 0;
1842 mgm.bits.ldw.arm = 1;
1843 mgm.bits.ldw.timer = ldgp->ldg_timer;
1844
1845 NXGE_REG_WR64(handle,
1846 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1847 mgm.value);
1848 }
1849 }
1850 MUTEX_EXIT(&rcrp->lock);
1851 return (DDI_INTR_CLAIMED);
1852 }
1853
1854 ASSERT(rcrp->ldgp == ldgp);
1855 ASSERT(rcrp->ldvp == ldvp);
1856
1857 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1858
1859 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1860 "cs 0x%016llx rcrto 0x%x rcrthres %x",
1861 channel,
1862 cs.value,
1863 cs.bits.hdw.rcrto,
1864 cs.bits.hdw.rcrthres));
1865
1866 if (!rcrp->poll_flag) {
1867 mp = nxge_rx_pkts(nxgep, rcrp, cs, -1);
1868 }
1869
1870 /* error events. */
1871 if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1872 (void) nxge_rx_err_evnts(nxgep, channel, cs);
1873 }
1874
1875 /*
1876 * Enable the mailbox update interrupt if we want
1877 * to use mailbox. We probably don't need to use
1878 * mailbox as it only saves us one pio read.
1879 * Also write 1 to rcrthres and rcrto to clear
1880 * these two edge triggered bits.
1881 */
1882 cs.value &= RX_DMA_CTL_STAT_WR1C;
1883 cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1;
1884 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1885 cs.value);
1886
1887 /*
1888 * If the polling mode is enabled, disable the interrupt.
1889 */
1890 if (rcrp->poll_flag) {
1891 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1892 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1893 "(disabling interrupts)", channel, ldgp, ldvp));
1894
1895 /*
1896 * Disarm this logical group if this is a single device
1897 * group.
1898 */
1899 if (ldgp->nldvs == 1) {
1900 if (isLDOMguest(nxgep)) {
1901 ldgp->arm = B_FALSE;
1902 nxge_hio_ldgimgn(nxgep, ldgp);
1903 } else {
1904 ldgimgm_t mgm;
1905 mgm.value = 0;
1906 mgm.bits.ldw.arm = 0;
1907 NXGE_REG_WR64(handle,
1908 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1909 mgm.value);
1910 }
1911 }
1912 } else {
1913 /*
1914 * Rearm this logical group if this is a single device
1915 * group.
1916 */
1917 if (ldgp->nldvs == 1) {
1918 if (isLDOMguest(nxgep)) {
1919 nxge_hio_ldgimgn(nxgep, ldgp);
1920 } else {
1921 ldgimgm_t mgm;
1922
1923 mgm.value = 0;
1924 mgm.bits.ldw.arm = 1;
1925 mgm.bits.ldw.timer = ldgp->ldg_timer;
1926
1927 NXGE_REG_WR64(handle,
1928 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1929 mgm.value);
1930 }
1931 }
1932
1933 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1934 "==> nxge_rx_intr: rdc %d ldgp $%p "
1935 "exiting ISR (and call mac_rx_ring)", channel, ldgp));
1936 }
1937 MUTEX_EXIT(&rcrp->lock);
1938
1939 if (mp != NULL) {
1940 mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp,
1941 rcrp->rcr_gen_num);
1942 }
1943 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1944 return (DDI_INTR_CLAIMED);
1945 }
1946
1947 /*
1948 * This routine is the main packet receive processing function.
1949 * It gets the packet type, error code, and buffer related
1950 * information from the receive completion entry.
1951 * How many completion entries to process is based on the number of packets
1952 * queued by the hardware, a hardware maintained tail pointer
1953 * and a configurable receive packet count.
1954 *
1955 * A chain of message blocks will be created as result of processing
1956 * the completion entries. This chain of message blocks will be returned and
1957 * a hardware control status register will be updated with the number of
1958 * packets were removed from the hardware queue.
1959 *
1960 * The RCR ring lock is held when entering this function.
1961 */
1962 static mblk_t *
nxge_rx_pkts(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,rx_dma_ctl_stat_t cs,int bytes_to_pickup)1963 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1964 int bytes_to_pickup)
1965 {
1966 npi_handle_t handle;
1967 uint8_t channel;
1968 uint32_t comp_rd_index;
1969 p_rcr_entry_t rcr_desc_rd_head_p;
1970 p_rcr_entry_t rcr_desc_rd_head_pp;
1971 p_mblk_t nmp, mp_cont, head_mp, *tail_mp;
1972 uint16_t qlen, nrcr_read, npkt_read;
1973 uint32_t qlen_hw;
1974 boolean_t multi;
1975 rcrcfig_b_t rcr_cfg_b;
1976 int totallen = 0;
1977 #if defined(_BIG_ENDIAN)
1978 npi_status_t rs = NPI_SUCCESS;
1979 #endif
1980
1981 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
1982 "channel %d", rcr_p->rdc));
1983
1984 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1985 return (NULL);
1986 }
1987 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1988 channel = rcr_p->rdc;
1989
1990 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1991 "==> nxge_rx_pkts: START: rcr channel %d "
1992 "head_p $%p head_pp $%p index %d ",
1993 channel, rcr_p->rcr_desc_rd_head_p,
1994 rcr_p->rcr_desc_rd_head_pp,
1995 rcr_p->comp_rd_index));
1996
1997
1998 #if !defined(_BIG_ENDIAN)
1999 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
2000 #else
2001 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
2002 if (rs != NPI_SUCCESS) {
2003 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
2004 "channel %d, get qlen failed 0x%08x",
2005 channel, rs));
2006 return (NULL);
2007 }
2008 #endif
2009 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
2010 "qlen %d", channel, qlen));
2011
2012
2013
2014 if (!qlen) {
2015 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2016 "==> nxge_rx_pkts:rcr channel %d "
2017 "qlen %d (no pkts)", channel, qlen));
2018
2019 return (NULL);
2020 }
2021
2022 comp_rd_index = rcr_p->comp_rd_index;
2023
2024 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
2025 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
2026 nrcr_read = npkt_read = 0;
2027
2028 /*
2029 * Number of packets queued
2030 * (The jumbo or multi packet will be counted as only one
2031 * packets and it may take up more than one completion entry).
2032 */
2033 qlen_hw = (qlen < nxge_max_rx_pkts) ?
2034 qlen : nxge_max_rx_pkts;
2035 head_mp = NULL;
2036 tail_mp = &head_mp;
2037 nmp = mp_cont = NULL;
2038 multi = B_FALSE;
2039
2040 while (qlen_hw) {
2041
2042 #ifdef NXGE_DEBUG
2043 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
2044 #endif
2045 /*
2046 * Process one completion ring entry.
2047 */
2048 nxge_receive_packet(nxgep,
2049 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
2050
2051 /*
2052 * message chaining modes
2053 */
2054 if (nmp) {
2055 nmp->b_next = NULL;
2056 if (!multi && !mp_cont) { /* frame fits a partition */
2057 *tail_mp = nmp;
2058 tail_mp = &nmp->b_next;
2059 totallen += MBLKL(nmp);
2060 nmp = NULL;
2061 } else if (multi && !mp_cont) { /* first segment */
2062 *tail_mp = nmp;
2063 tail_mp = &nmp->b_cont;
2064 totallen += MBLKL(nmp);
2065 } else if (multi && mp_cont) { /* mid of multi segs */
2066 *tail_mp = mp_cont;
2067 tail_mp = &mp_cont->b_cont;
2068 totallen += MBLKL(mp_cont);
2069 } else if (!multi && mp_cont) { /* last segment */
2070 *tail_mp = mp_cont;
2071 tail_mp = &nmp->b_next;
2072 totallen += MBLKL(mp_cont);
2073 nmp = NULL;
2074 }
2075 }
2076 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2077 "==> nxge_rx_pkts: loop: rcr channel %d "
2078 "before updating: multi %d "
2079 "nrcr_read %d "
2080 "npk read %d "
2081 "head_pp $%p index %d ",
2082 channel,
2083 multi,
2084 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2085 comp_rd_index));
2086
2087 if (!multi) {
2088 qlen_hw--;
2089 npkt_read++;
2090 }
2091
2092 /*
2093 * Update the next read entry.
2094 */
2095 comp_rd_index = NEXT_ENTRY(comp_rd_index,
2096 rcr_p->comp_wrap_mask);
2097
2098 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
2099 rcr_p->rcr_desc_first_p,
2100 rcr_p->rcr_desc_last_p);
2101
2102 nrcr_read++;
2103
2104 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2105 "<== nxge_rx_pkts: (SAM, process one packet) "
2106 "nrcr_read %d",
2107 nrcr_read));
2108 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2109 "==> nxge_rx_pkts: loop: rcr channel %d "
2110 "multi %d "
2111 "nrcr_read %d "
2112 "npk read %d "
2113 "head_pp $%p index %d ",
2114 channel,
2115 multi,
2116 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2117 comp_rd_index));
2118
2119 if ((bytes_to_pickup != -1) &&
2120 (totallen >= bytes_to_pickup)) {
2121 break;
2122 }
2123 }
2124
2125 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2126 rcr_p->comp_rd_index = comp_rd_index;
2127 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2128 if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2129 (nxgep->intr_threshold != rcr_p->intr_threshold)) {
2130
2131 rcr_p->intr_timeout = (nxgep->intr_timeout <
2132 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
2133 nxgep->intr_timeout;
2134
2135 rcr_p->intr_threshold = (nxgep->intr_threshold <
2136 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
2137 nxgep->intr_threshold;
2138
2139 rcr_cfg_b.value = 0x0ULL;
2140 rcr_cfg_b.bits.ldw.entout = 1;
2141 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
2142 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
2143
2144 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
2145 channel, rcr_cfg_b.value);
2146 }
2147
2148 cs.bits.ldw.pktread = npkt_read;
2149 cs.bits.ldw.ptrread = nrcr_read;
2150 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2151 channel, cs.value);
2152 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2153 "==> nxge_rx_pkts: EXIT: rcr channel %d "
2154 "head_pp $%p index %016llx ",
2155 channel,
2156 rcr_p->rcr_desc_rd_head_pp,
2157 rcr_p->comp_rd_index));
2158 /*
2159 * Update RCR buffer pointer read and number of packets
2160 * read.
2161 */
2162
2163 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
2164 "channel %d", rcr_p->rdc));
2165
2166 return (head_mp);
2167 }
2168
2169 void
nxge_receive_packet(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rcr_entry_t rcr_desc_rd_head_p,boolean_t * multi_p,mblk_t ** mp,mblk_t ** mp_cont)2170 nxge_receive_packet(p_nxge_t nxgep,
2171 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2172 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2173 {
2174 p_mblk_t nmp = NULL;
2175 uint64_t multi;
2176 uint64_t dcf_err;
2177 uint8_t channel;
2178
2179 boolean_t first_entry = B_TRUE;
2180 boolean_t is_tcp_udp = B_FALSE;
2181 boolean_t buffer_free = B_FALSE;
2182 boolean_t error_send_up = B_FALSE;
2183 uint8_t error_type;
2184 uint16_t l2_len;
2185 uint16_t skip_len;
2186 uint8_t pktbufsz_type;
2187 uint64_t rcr_entry;
2188 uint64_t *pkt_buf_addr_pp;
2189 uint64_t *pkt_buf_addr_p;
2190 uint32_t buf_offset;
2191 uint32_t bsize;
2192 uint32_t error_disp_cnt;
2193 uint32_t msg_index;
2194 p_rx_rbr_ring_t rx_rbr_p;
2195 p_rx_msg_t *rx_msg_ring_p;
2196 p_rx_msg_t rx_msg_p;
2197 uint16_t sw_offset_bytes = 0, hdr_size = 0;
2198 nxge_status_t status = NXGE_OK;
2199 boolean_t is_valid = B_FALSE;
2200 p_nxge_rx_ring_stats_t rdc_stats;
2201 uint32_t bytes_read;
2202 uint64_t pkt_type;
2203 uint64_t frag;
2204 boolean_t pkt_too_long_err = B_FALSE;
2205 #ifdef NXGE_DEBUG
2206 int dump_len;
2207 #endif
2208 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
2209 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
2210
2211 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
2212
2213 multi = (rcr_entry & RCR_MULTI_MASK);
2214 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
2215 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
2216
2217 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
2218 frag = (rcr_entry & RCR_FRAG_MASK);
2219
2220 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
2221
2222 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
2223 RCR_PKTBUFSZ_SHIFT);
2224 #if defined(__i386)
2225 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
2226 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
2227 #else
2228 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
2229 RCR_PKT_BUF_ADDR_SHIFT);
2230 #endif
2231
2232 channel = rcr_p->rdc;
2233
2234 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2235 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2236 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2237 "error_type 0x%x pkt_type 0x%x "
2238 "pktbufsz_type %d ",
2239 rcr_desc_rd_head_p,
2240 rcr_entry, pkt_buf_addr_pp, l2_len,
2241 multi,
2242 error_type,
2243 pkt_type,
2244 pktbufsz_type));
2245
2246 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2247 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2248 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2249 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
2250 rcr_entry, pkt_buf_addr_pp, l2_len,
2251 multi,
2252 error_type,
2253 pkt_type));
2254
2255 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2256 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2257 "full pkt_buf_addr_pp $%p l2_len %d",
2258 rcr_entry, pkt_buf_addr_pp, l2_len));
2259
2260 /* get the stats ptr */
2261 rdc_stats = rcr_p->rdc_stats;
2262
2263 if (!l2_len) {
2264
2265 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2266 "<== nxge_receive_packet: failed: l2 length is 0."));
2267 return;
2268 }
2269
2270 /*
2271 * Software workaround for BMAC hardware limitation that allows
2272 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2273 * instead of 0x2400 for jumbo.
2274 */
2275 if (l2_len > nxgep->mac.maxframesize) {
2276 pkt_too_long_err = B_TRUE;
2277 }
2278
2279 /* Hardware sends us 4 bytes of CRC as no stripping is done. */
2280 l2_len -= ETHERFCSL;
2281
2282 /* shift 6 bits to get the full io address */
2283 #if defined(__i386)
2284 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
2285 RCR_PKT_BUF_ADDR_SHIFT_FULL);
2286 #else
2287 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
2288 RCR_PKT_BUF_ADDR_SHIFT_FULL);
2289 #endif
2290 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2291 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2292 "full pkt_buf_addr_pp $%p l2_len %d",
2293 rcr_entry, pkt_buf_addr_pp, l2_len));
2294
2295 rx_rbr_p = rcr_p->rx_rbr_p;
2296 rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
2297
2298 if (first_entry) {
2299 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
2300 RXDMA_HDR_SIZE_DEFAULT);
2301
2302 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2303 "==> nxge_receive_packet: first entry 0x%016llx "
2304 "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2305 rcr_entry, pkt_buf_addr_pp, l2_len,
2306 hdr_size));
2307 }
2308
2309 MUTEX_ENTER(&rx_rbr_p->lock);
2310
2311 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2312 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2313 "full pkt_buf_addr_pp $%p l2_len %d",
2314 rcr_entry, pkt_buf_addr_pp, l2_len));
2315
2316 /*
2317 * Packet buffer address in the completion entry points
2318 * to the starting buffer address (offset 0).
2319 * Use the starting buffer address to locate the corresponding
2320 * kernel address.
2321 */
2322 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
2323 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
2324 &buf_offset,
2325 &msg_index);
2326
2327 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2328 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2329 "full pkt_buf_addr_pp $%p l2_len %d",
2330 rcr_entry, pkt_buf_addr_pp, l2_len));
2331
2332 if (status != NXGE_OK) {
2333 MUTEX_EXIT(&rx_rbr_p->lock);
2334 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2335 "<== nxge_receive_packet: found vaddr failed %d",
2336 status));
2337 return;
2338 }
2339
2340 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2341 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
2342 "full pkt_buf_addr_pp $%p l2_len %d",
2343 rcr_entry, pkt_buf_addr_pp, l2_len));
2344
2345 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2346 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2347 "full pkt_buf_addr_pp $%p l2_len %d",
2348 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2349
2350 rx_msg_p = rx_msg_ring_p[msg_index];
2351
2352 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2353 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2354 "full pkt_buf_addr_pp $%p l2_len %d",
2355 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2356
2357 switch (pktbufsz_type) {
2358 case RCR_PKTBUFSZ_0:
2359 bsize = rx_rbr_p->pkt_buf_size0_bytes;
2360 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2361 "==> nxge_receive_packet: 0 buf %d", bsize));
2362 break;
2363 case RCR_PKTBUFSZ_1:
2364 bsize = rx_rbr_p->pkt_buf_size1_bytes;
2365 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2366 "==> nxge_receive_packet: 1 buf %d", bsize));
2367 break;
2368 case RCR_PKTBUFSZ_2:
2369 bsize = rx_rbr_p->pkt_buf_size2_bytes;
2370 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2371 "==> nxge_receive_packet: 2 buf %d", bsize));
2372 break;
2373 case RCR_SINGLE_BLOCK:
2374 bsize = rx_msg_p->block_size;
2375 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2376 "==> nxge_receive_packet: single %d", bsize));
2377
2378 break;
2379 default:
2380 MUTEX_EXIT(&rx_rbr_p->lock);
2381 return;
2382 }
2383
2384 switch (nxge_rdc_buf_offset) {
2385 case SW_OFFSET_NO_OFFSET:
2386 sw_offset_bytes = 0;
2387 break;
2388 case SW_OFFSET_64:
2389 sw_offset_bytes = 64;
2390 break;
2391 case SW_OFFSET_128:
2392 sw_offset_bytes = 128;
2393 break;
2394 case SW_OFFSET_192:
2395 sw_offset_bytes = 192;
2396 break;
2397 case SW_OFFSET_256:
2398 sw_offset_bytes = 256;
2399 break;
2400 case SW_OFFSET_320:
2401 sw_offset_bytes = 320;
2402 break;
2403 case SW_OFFSET_384:
2404 sw_offset_bytes = 384;
2405 break;
2406 case SW_OFFSET_448:
2407 sw_offset_bytes = 448;
2408 break;
2409 default:
2410 sw_offset_bytes = 0;
2411 break;
2412 }
2413
2414 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2415 (buf_offset + sw_offset_bytes),
2416 (hdr_size + l2_len),
2417 DDI_DMA_SYNC_FORCPU);
2418
2419 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2420 "==> nxge_receive_packet: after first dump:usage count"));
2421
2422 if (rx_msg_p->cur_usage_cnt == 0) {
2423 if (rx_rbr_p->rbr_use_bcopy) {
2424 atomic_inc_32(&rx_rbr_p->rbr_consumed);
2425 if (rx_rbr_p->rbr_consumed <
2426 rx_rbr_p->rbr_threshold_hi) {
2427 if (rx_rbr_p->rbr_threshold_lo == 0 ||
2428 ((rx_rbr_p->rbr_consumed >=
2429 rx_rbr_p->rbr_threshold_lo) &&
2430 (rx_rbr_p->rbr_bufsize_type >=
2431 pktbufsz_type))) {
2432 rx_msg_p->rx_use_bcopy = B_TRUE;
2433 }
2434 } else {
2435 rx_msg_p->rx_use_bcopy = B_TRUE;
2436 }
2437 }
2438 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2439 "==> nxge_receive_packet: buf %d (new block) ",
2440 bsize));
2441
2442 rx_msg_p->pkt_buf_size_code = pktbufsz_type;
2443 rx_msg_p->pkt_buf_size = bsize;
2444 rx_msg_p->cur_usage_cnt = 1;
2445 if (pktbufsz_type == RCR_SINGLE_BLOCK) {
2446 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2447 "==> nxge_receive_packet: buf %d "
2448 "(single block) ",
2449 bsize));
2450 /*
2451 * Buffer can be reused once the free function
2452 * is called.
2453 */
2454 rx_msg_p->max_usage_cnt = 1;
2455 buffer_free = B_TRUE;
2456 } else {
2457 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
2458 if (rx_msg_p->max_usage_cnt == 1) {
2459 buffer_free = B_TRUE;
2460 }
2461 }
2462 } else {
2463 rx_msg_p->cur_usage_cnt++;
2464 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
2465 buffer_free = B_TRUE;
2466 }
2467 }
2468
2469 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2470 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
2471 msg_index, l2_len,
2472 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
2473
2474 if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
2475 rdc_stats->ierrors++;
2476 if (dcf_err) {
2477 rdc_stats->dcf_err++;
2478 #ifdef NXGE_DEBUG
2479 if (!rdc_stats->dcf_err) {
2480 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2481 "nxge_receive_packet: channel %d dcf_err rcr"
2482 " 0x%llx", channel, rcr_entry));
2483 }
2484 #endif
2485 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
2486 NXGE_FM_EREPORT_RDMC_DCF_ERR);
2487 } else if (pkt_too_long_err) {
2488 rdc_stats->pkt_too_long_err++;
2489 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
2490 " channel %d packet length [%d] > "
2491 "maxframesize [%d]", channel, l2_len + ETHERFCSL,
2492 nxgep->mac.maxframesize));
2493 } else {
2494 /* Update error stats */
2495 error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2496 rdc_stats->errlog.compl_err_type = error_type;
2497
2498 switch (error_type) {
2499 /*
2500 * Do not send FMA ereport for RCR_L2_ERROR and
2501 * RCR_L4_CSUM_ERROR because most likely they indicate
2502 * back pressure rather than HW failures.
2503 */
2504 case RCR_L2_ERROR:
2505 rdc_stats->l2_err++;
2506 if (rdc_stats->l2_err <
2507 error_disp_cnt) {
2508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2509 " nxge_receive_packet:"
2510 " channel %d RCR L2_ERROR",
2511 channel));
2512 }
2513 break;
2514 case RCR_L4_CSUM_ERROR:
2515 error_send_up = B_TRUE;
2516 rdc_stats->l4_cksum_err++;
2517 if (rdc_stats->l4_cksum_err <
2518 error_disp_cnt) {
2519 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2520 " nxge_receive_packet:"
2521 " channel %d"
2522 " RCR L4_CSUM_ERROR", channel));
2523 }
2524 break;
2525 /*
2526 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2527 * RCR_ZCP_SOFT_ERROR because they reflect the same
2528 * FFLP and ZCP errors that have been reported by
2529 * nxge_fflp.c and nxge_zcp.c.
2530 */
2531 case RCR_FFLP_SOFT_ERROR:
2532 error_send_up = B_TRUE;
2533 rdc_stats->fflp_soft_err++;
2534 if (rdc_stats->fflp_soft_err <
2535 error_disp_cnt) {
2536 NXGE_ERROR_MSG((nxgep,
2537 NXGE_ERR_CTL,
2538 " nxge_receive_packet:"
2539 " channel %d"
2540 " RCR FFLP_SOFT_ERROR", channel));
2541 }
2542 break;
2543 case RCR_ZCP_SOFT_ERROR:
2544 error_send_up = B_TRUE;
2545 rdc_stats->fflp_soft_err++;
2546 if (rdc_stats->zcp_soft_err <
2547 error_disp_cnt)
2548 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2549 " nxge_receive_packet: Channel %d"
2550 " RCR ZCP_SOFT_ERROR", channel));
2551 break;
2552 default:
2553 rdc_stats->rcr_unknown_err++;
2554 if (rdc_stats->rcr_unknown_err
2555 < error_disp_cnt) {
2556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2557 " nxge_receive_packet: Channel %d"
2558 " RCR entry 0x%llx error 0x%x",
2559 rcr_entry, channel, error_type));
2560 }
2561 break;
2562 }
2563 }
2564
2565 /*
2566 * Update and repost buffer block if max usage
2567 * count is reached.
2568 */
2569 if (error_send_up == B_FALSE) {
2570 atomic_inc_32(&rx_msg_p->ref_cnt);
2571 if (buffer_free == B_TRUE) {
2572 rx_msg_p->free = B_TRUE;
2573 }
2574
2575 MUTEX_EXIT(&rx_rbr_p->lock);
2576 nxge_freeb(rx_msg_p);
2577 return;
2578 }
2579 }
2580
2581 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2582 "==> nxge_receive_packet: DMA sync second "));
2583
2584 bytes_read = rcr_p->rcvd_pkt_bytes;
2585 skip_len = sw_offset_bytes + hdr_size;
2586 if (!rx_msg_p->rx_use_bcopy) {
2587 /*
2588 * For loaned up buffers, the driver reference count
2589 * will be incremented first and then the free state.
2590 */
2591 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
2592 if (first_entry) {
2593 nmp->b_rptr = &nmp->b_rptr[skip_len];
2594 if (l2_len < bsize - skip_len) {
2595 nmp->b_wptr = &nmp->b_rptr[l2_len];
2596 } else {
2597 nmp->b_wptr = &nmp->b_rptr[bsize
2598 - skip_len];
2599 }
2600 } else {
2601 if (l2_len - bytes_read < bsize) {
2602 nmp->b_wptr =
2603 &nmp->b_rptr[l2_len - bytes_read];
2604 } else {
2605 nmp->b_wptr = &nmp->b_rptr[bsize];
2606 }
2607 }
2608 }
2609 } else {
2610 if (first_entry) {
2611 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
2612 l2_len < bsize - skip_len ?
2613 l2_len : bsize - skip_len);
2614 } else {
2615 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
2616 l2_len - bytes_read < bsize ?
2617 l2_len - bytes_read : bsize);
2618 }
2619 }
2620 if (nmp != NULL) {
2621 if (first_entry) {
2622 /*
2623 * Jumbo packets may be received with more than one
2624 * buffer, increment ipackets for the first entry only.
2625 */
2626 rdc_stats->ipackets++;
2627
2628 /* Update ibytes for kstat. */
2629 rdc_stats->ibytes += skip_len
2630 + l2_len < bsize ? l2_len : bsize;
2631 /*
2632 * Update the number of bytes read so far for the
2633 * current frame.
2634 */
2635 bytes_read = nmp->b_wptr - nmp->b_rptr;
2636 } else {
2637 rdc_stats->ibytes += l2_len - bytes_read < bsize ?
2638 l2_len - bytes_read : bsize;
2639 bytes_read += nmp->b_wptr - nmp->b_rptr;
2640 }
2641
2642 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2643 "==> nxge_receive_packet after dupb: "
2644 "rbr consumed %d "
2645 "pktbufsz_type %d "
2646 "nmp $%p rptr $%p wptr $%p "
2647 "buf_offset %d bzise %d l2_len %d skip_len %d",
2648 rx_rbr_p->rbr_consumed,
2649 pktbufsz_type,
2650 nmp, nmp->b_rptr, nmp->b_wptr,
2651 buf_offset, bsize, l2_len, skip_len));
2652 } else {
2653 cmn_err(CE_WARN, "!nxge_receive_packet: "
2654 "update stats (error)");
2655 atomic_inc_32(&rx_msg_p->ref_cnt);
2656 if (buffer_free == B_TRUE) {
2657 rx_msg_p->free = B_TRUE;
2658 }
2659 MUTEX_EXIT(&rx_rbr_p->lock);
2660 nxge_freeb(rx_msg_p);
2661 return;
2662 }
2663
2664 if (buffer_free == B_TRUE) {
2665 rx_msg_p->free = B_TRUE;
2666 }
2667
2668 is_valid = (nmp != NULL);
2669
2670 rcr_p->rcvd_pkt_bytes = bytes_read;
2671
2672 MUTEX_EXIT(&rx_rbr_p->lock);
2673
2674 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2675 atomic_inc_32(&rx_msg_p->ref_cnt);
2676 nxge_freeb(rx_msg_p);
2677 }
2678
2679 if (is_valid) {
2680 nmp->b_cont = NULL;
2681 if (first_entry) {
2682 *mp = nmp;
2683 *mp_cont = NULL;
2684 } else {
2685 *mp_cont = nmp;
2686 }
2687 }
2688
2689 /*
2690 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2691 * If a packet is not fragmented and no error bit is set, then
2692 * L4 checksum is OK.
2693 */
2694
2695 if (is_valid && !multi) {
2696 /*
2697 * If the checksum flag nxge_chksum_offload
2698 * is 1, TCP and UDP packets can be sent
2699 * up with good checksum. If the checksum flag
2700 * is set to 0, checksum reporting will apply to
2701 * TCP packets only (workaround for a hardware bug).
2702 * If the checksum flag nxge_cksum_offload is
2703 * greater than 1, both TCP and UDP packets
2704 * will not be reported its hardware checksum results.
2705 */
2706 if (nxge_cksum_offload == 1) {
2707 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2708 pkt_type == RCR_PKT_IS_UDP) ?
2709 B_TRUE: B_FALSE);
2710 } else if (!nxge_cksum_offload) {
2711 /* TCP checksum only. */
2712 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
2713 B_TRUE: B_FALSE);
2714 }
2715
2716 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
2717 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
2718 is_valid, multi, is_tcp_udp, frag, error_type));
2719
2720 if (is_tcp_udp && !frag && !error_type) {
2721 mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
2722 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2723 "==> nxge_receive_packet: Full tcp/udp cksum "
2724 "is_valid 0x%x multi 0x%llx pkt %d frag %d "
2725 "error %d",
2726 is_valid, multi, is_tcp_udp, frag, error_type));
2727 }
2728 }
2729
2730 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2731 "==> nxge_receive_packet: *mp 0x%016llx", *mp));
2732
2733 *multi_p = (multi == RCR_MULTI_MASK);
2734 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2735 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2736 *multi_p, nmp, *mp, *mp_cont));
2737 }
2738
2739 /*
2740 * Enable polling for a ring. Interrupt for the ring is disabled when
2741 * the nxge interrupt comes (see nxge_rx_intr).
2742 */
2743 int
nxge_enable_poll(void * arg)2744 nxge_enable_poll(void *arg)
2745 {
2746 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2747 p_rx_rcr_ring_t ringp;
2748 p_nxge_t nxgep;
2749 p_nxge_ldg_t ldgp;
2750 uint32_t channel;
2751
2752 if (ring_handle == NULL) {
2753 ASSERT(ring_handle != NULL);
2754 return (0);
2755 }
2756
2757 nxgep = ring_handle->nxgep;
2758 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2759 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2760 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2761 "==> nxge_enable_poll: rdc %d ", ringp->rdc));
2762 ldgp = ringp->ldgp;
2763 if (ldgp == NULL) {
2764 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2765 "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2766 ringp->rdc));
2767 return (0);
2768 }
2769
2770 MUTEX_ENTER(&ringp->lock);
2771 /* enable polling */
2772 if (ringp->poll_flag == 0) {
2773 ringp->poll_flag = 1;
2774 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2775 "==> nxge_enable_poll: rdc %d set poll flag to 1",
2776 ringp->rdc));
2777 }
2778
2779 MUTEX_EXIT(&ringp->lock);
2780 return (0);
2781 }
2782 /*
2783 * Disable polling for a ring and enable its interrupt.
2784 */
2785 int
nxge_disable_poll(void * arg)2786 nxge_disable_poll(void *arg)
2787 {
2788 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2789 p_rx_rcr_ring_t ringp;
2790 p_nxge_t nxgep;
2791 uint32_t channel;
2792
2793 if (ring_handle == NULL) {
2794 ASSERT(ring_handle != NULL);
2795 return (0);
2796 }
2797
2798 nxgep = ring_handle->nxgep;
2799 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2800 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2801
2802 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2803 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
2804
2805 MUTEX_ENTER(&ringp->lock);
2806
2807 /* disable polling: enable interrupt */
2808 if (ringp->poll_flag) {
2809 npi_handle_t handle;
2810 rx_dma_ctl_stat_t cs;
2811 uint8_t channel;
2812 p_nxge_ldg_t ldgp;
2813
2814 /*
2815 * Get the control and status for this channel.
2816 */
2817 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2818 channel = ringp->rdc;
2819 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
2820 channel, &cs.value);
2821
2822 /*
2823 * Enable mailbox update
2824 * Since packets were not read and the hardware uses
2825 * bits pktread and ptrread to update the queue
2826 * length, we need to set both bits to 0.
2827 */
2828 cs.bits.ldw.pktread = 0;
2829 cs.bits.ldw.ptrread = 0;
2830 cs.bits.hdw.mex = 1;
2831 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2832 cs.value);
2833
2834 /*
2835 * Rearm this logical group if this is a single device
2836 * group.
2837 */
2838 ldgp = ringp->ldgp;
2839 if (ldgp == NULL) {
2840 ringp->poll_flag = 0;
2841 MUTEX_EXIT(&ringp->lock);
2842 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2843 "==> nxge_disable_poll: no ldgp rdc %d "
2844 "(still set poll to 0", ringp->rdc));
2845 return (0);
2846 }
2847 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2848 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2849 ringp->rdc, ldgp));
2850 if (ldgp->nldvs == 1) {
2851 if (isLDOMguest(nxgep)) {
2852 ldgp->arm = B_TRUE;
2853 nxge_hio_ldgimgn(nxgep, ldgp);
2854 } else {
2855 ldgimgm_t mgm;
2856 mgm.value = 0;
2857 mgm.bits.ldw.arm = 1;
2858 mgm.bits.ldw.timer = ldgp->ldg_timer;
2859 NXGE_REG_WR64(handle,
2860 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
2861 mgm.value);
2862 }
2863 }
2864 ringp->poll_flag = 0;
2865 }
2866
2867 MUTEX_EXIT(&ringp->lock);
2868 return (0);
2869 }
2870
2871 /*
2872 * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2873 */
2874 mblk_t *
nxge_rx_poll(void * arg,int bytes_to_pickup)2875 nxge_rx_poll(void *arg, int bytes_to_pickup)
2876 {
2877 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2878 p_rx_rcr_ring_t rcr_p;
2879 p_nxge_t nxgep;
2880 npi_handle_t handle;
2881 rx_dma_ctl_stat_t cs;
2882 mblk_t *mblk;
2883 p_nxge_ldv_t ldvp;
2884 uint32_t channel;
2885
2886 nxgep = ring_handle->nxgep;
2887
2888 /*
2889 * Get the control and status for this channel.
2890 */
2891 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2892 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2893 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
2894 MUTEX_ENTER(&rcr_p->lock);
2895 ASSERT(rcr_p->poll_flag == 1);
2896
2897 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
2898
2899 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2900 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2901 rcr_p->rdc, rcr_p->poll_flag));
2902 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
2903
2904 ldvp = rcr_p->ldvp;
2905 /* error events. */
2906 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
2907 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
2908 }
2909
2910 MUTEX_EXIT(&rcr_p->lock);
2911
2912 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2913 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
2914 return (mblk);
2915 }
2916
2917
2918 /*ARGSUSED*/
2919 static nxge_status_t
nxge_rx_err_evnts(p_nxge_t nxgep,int channel,rx_dma_ctl_stat_t cs)2920 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2921 {
2922 p_nxge_rx_ring_stats_t rdc_stats;
2923 npi_handle_t handle;
2924 npi_status_t rs;
2925 boolean_t rxchan_fatal = B_FALSE;
2926 boolean_t rxport_fatal = B_FALSE;
2927 uint8_t portn;
2928 nxge_status_t status = NXGE_OK;
2929 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2930 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
2931
2932 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2933 portn = nxgep->mac.portnum;
2934 rdc_stats = &nxgep->statsp->rdc_stats[channel];
2935
2936 if (cs.bits.hdw.rbr_tmout) {
2937 rdc_stats->rx_rbr_tmout++;
2938 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2939 NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
2940 rxchan_fatal = B_TRUE;
2941 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2942 "==> nxge_rx_err_evnts: rx_rbr_timeout"));
2943 }
2944 if (cs.bits.hdw.rsp_cnt_err) {
2945 rdc_stats->rsp_cnt_err++;
2946 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2947 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
2948 rxchan_fatal = B_TRUE;
2949 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2950 "==> nxge_rx_err_evnts(channel %d): "
2951 "rsp_cnt_err", channel));
2952 }
2953 if (cs.bits.hdw.byte_en_bus) {
2954 rdc_stats->byte_en_bus++;
2955 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2956 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
2957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2958 "==> nxge_rx_err_evnts(channel %d): "
2959 "fatal error: byte_en_bus", channel));
2960 rxchan_fatal = B_TRUE;
2961 }
2962 if (cs.bits.hdw.rsp_dat_err) {
2963 rdc_stats->rsp_dat_err++;
2964 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2965 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
2966 rxchan_fatal = B_TRUE;
2967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2968 "==> nxge_rx_err_evnts(channel %d): "
2969 "fatal error: rsp_dat_err", channel));
2970 }
2971 if (cs.bits.hdw.rcr_ack_err) {
2972 rdc_stats->rcr_ack_err++;
2973 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2974 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
2975 rxchan_fatal = B_TRUE;
2976 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2977 "==> nxge_rx_err_evnts(channel %d): "
2978 "fatal error: rcr_ack_err", channel));
2979 }
2980 if (cs.bits.hdw.dc_fifo_err) {
2981 rdc_stats->dc_fifo_err++;
2982 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2983 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
2984 /* This is not a fatal error! */
2985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2986 "==> nxge_rx_err_evnts(channel %d): "
2987 "dc_fifo_err", channel));
2988 rxport_fatal = B_TRUE;
2989 }
2990 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
2991 if ((rs = npi_rxdma_ring_perr_stat_get(handle,
2992 &rdc_stats->errlog.pre_par,
2993 &rdc_stats->errlog.sha_par))
2994 != NPI_SUCCESS) {
2995 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2996 "==> nxge_rx_err_evnts(channel %d): "
2997 "rcr_sha_par: get perr", channel));
2998 return (NXGE_ERROR | rs);
2999 }
3000 if (cs.bits.hdw.rcr_sha_par) {
3001 rdc_stats->rcr_sha_par++;
3002 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3003 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
3004 rxchan_fatal = B_TRUE;
3005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3006 "==> nxge_rx_err_evnts(channel %d): "
3007 "fatal error: rcr_sha_par", channel));
3008 }
3009 if (cs.bits.hdw.rbr_pre_par) {
3010 rdc_stats->rbr_pre_par++;
3011 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3012 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
3013 rxchan_fatal = B_TRUE;
3014 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3015 "==> nxge_rx_err_evnts(channel %d): "
3016 "fatal error: rbr_pre_par", channel));
3017 }
3018 }
3019 /*
3020 * The Following 4 status bits are for information, the system
3021 * is running fine. There is no need to send FMA ereports or
3022 * log messages.
3023 */
3024 if (cs.bits.hdw.port_drop_pkt) {
3025 rdc_stats->port_drop_pkt++;
3026 }
3027 if (cs.bits.hdw.wred_drop) {
3028 rdc_stats->wred_drop++;
3029 }
3030 if (cs.bits.hdw.rbr_pre_empty) {
3031 rdc_stats->rbr_pre_empty++;
3032 }
3033 if (cs.bits.hdw.rcr_shadow_full) {
3034 rdc_stats->rcr_shadow_full++;
3035 }
3036 if (cs.bits.hdw.config_err) {
3037 rdc_stats->config_err++;
3038 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3039 NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
3040 rxchan_fatal = B_TRUE;
3041 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3042 "==> nxge_rx_err_evnts(channel %d): "
3043 "config error", channel));
3044 }
3045 if (cs.bits.hdw.rcrincon) {
3046 rdc_stats->rcrincon++;
3047 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3048 NXGE_FM_EREPORT_RDMC_RCRINCON);
3049 rxchan_fatal = B_TRUE;
3050 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3051 "==> nxge_rx_err_evnts(channel %d): "
3052 "fatal error: rcrincon error", channel));
3053 }
3054 if (cs.bits.hdw.rcrfull) {
3055 rdc_stats->rcrfull++;
3056 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3057 NXGE_FM_EREPORT_RDMC_RCRFULL);
3058 rxchan_fatal = B_TRUE;
3059 if (rdc_stats->rcrfull < error_disp_cnt)
3060 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3061 "==> nxge_rx_err_evnts(channel %d): "
3062 "fatal error: rcrfull error", channel));
3063 }
3064 if (cs.bits.hdw.rbr_empty) {
3065 /*
3066 * This bit is for information, there is no need
3067 * send FMA ereport or log a message.
3068 */
3069 rdc_stats->rbr_empty++;
3070 }
3071 if (cs.bits.hdw.rbrfull) {
3072 rdc_stats->rbrfull++;
3073 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3074 NXGE_FM_EREPORT_RDMC_RBRFULL);
3075 rxchan_fatal = B_TRUE;
3076 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3077 "==> nxge_rx_err_evnts(channel %d): "
3078 "fatal error: rbr_full error", channel));
3079 }
3080 if (cs.bits.hdw.rbrlogpage) {
3081 rdc_stats->rbrlogpage++;
3082 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3083 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
3084 rxchan_fatal = B_TRUE;
3085 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3086 "==> nxge_rx_err_evnts(channel %d): "
3087 "fatal error: rbr logical page error", channel));
3088 }
3089 if (cs.bits.hdw.cfiglogpage) {
3090 rdc_stats->cfiglogpage++;
3091 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3092 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
3093 rxchan_fatal = B_TRUE;
3094 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3095 "==> nxge_rx_err_evnts(channel %d): "
3096 "fatal error: cfig logical page error", channel));
3097 }
3098
3099 if (rxport_fatal) {
3100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3101 " nxge_rx_err_evnts: fatal error on Port #%d\n",
3102 portn));
3103 if (isLDOMguest(nxgep)) {
3104 status = NXGE_ERROR;
3105 } else {
3106 status = nxge_ipp_fatal_err_recover(nxgep);
3107 if (status == NXGE_OK) {
3108 FM_SERVICE_RESTORED(nxgep);
3109 }
3110 }
3111 }
3112
3113 if (rxchan_fatal) {
3114 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3115 " nxge_rx_err_evnts: fatal error on Channel #%d\n",
3116 channel));
3117 if (isLDOMguest(nxgep)) {
3118 status = NXGE_ERROR;
3119 } else {
3120 status = nxge_rxdma_fatal_err_recover(nxgep, channel);
3121 if (status == NXGE_OK) {
3122 FM_SERVICE_RESTORED(nxgep);
3123 }
3124 }
3125 }
3126
3127 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
3128
3129 return (status);
3130 }
3131
3132 /*
3133 * nxge_rdc_hvio_setup
3134 *
3135 * This code appears to setup some Hypervisor variables.
3136 *
3137 * Arguments:
3138 * nxgep
3139 * channel
3140 *
3141 * Notes:
3142 * What does NIU_LP_WORKAROUND mean?
3143 *
3144 * NPI/NXGE function calls:
3145 * na
3146 *
3147 * Context:
3148 * Any domain
3149 */
3150 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3151 static void
nxge_rdc_hvio_setup(nxge_t * nxgep,int channel)3152 nxge_rdc_hvio_setup(
3153 nxge_t *nxgep, int channel)
3154 {
3155 nxge_dma_common_t *dma_common;
3156 nxge_dma_common_t *dma_control;
3157 rx_rbr_ring_t *ring;
3158
3159 ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3160 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3161
3162 ring->hv_set = B_FALSE;
3163
3164 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
3165 dma_common->orig_ioaddr_pp;
3166 ring->hv_rx_buf_ioaddr_size = (uint64_t)
3167 dma_common->orig_alength;
3168
3169 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3170 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
3171 channel, ring->hv_rx_buf_base_ioaddr_pp,
3172 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
3173 dma_common->orig_alength, dma_common->orig_alength));
3174
3175 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3176
3177 ring->hv_rx_cntl_base_ioaddr_pp =
3178 (uint64_t)dma_control->orig_ioaddr_pp;
3179 ring->hv_rx_cntl_ioaddr_size =
3180 (uint64_t)dma_control->orig_alength;
3181
3182 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3183 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
3184 channel, ring->hv_rx_cntl_base_ioaddr_pp,
3185 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
3186 dma_control->orig_alength, dma_control->orig_alength));
3187 }
3188 #endif
3189
3190 /*
3191 * nxge_map_rxdma
3192 *
3193 * Map an RDC into our kernel space.
3194 *
3195 * Arguments:
3196 * nxgep
3197 * channel The channel to map.
3198 *
3199 * Notes:
3200 * 1. Allocate & initialise a memory pool, if necessary.
3201 * 2. Allocate however many receive buffers are required.
3202 * 3. Setup buffers, descriptors, and mailbox.
3203 *
3204 * NPI/NXGE function calls:
3205 * nxge_alloc_rx_mem_pool()
3206 * nxge_alloc_rbb()
3207 * nxge_map_rxdma_channel()
3208 *
3209 * Registers accessed:
3210 *
3211 * Context:
3212 * Any domain
3213 */
3214 static nxge_status_t
nxge_map_rxdma(p_nxge_t nxgep,int channel)3215 nxge_map_rxdma(p_nxge_t nxgep, int channel)
3216 {
3217 nxge_dma_common_t **data;
3218 nxge_dma_common_t **control;
3219 rx_rbr_ring_t **rbr_ring;
3220 rx_rcr_ring_t **rcr_ring;
3221 rx_mbox_t **mailbox;
3222 uint32_t chunks;
3223
3224 nxge_status_t status;
3225
3226 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
3227
3228 if (!nxgep->rx_buf_pool_p) {
3229 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
3230 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3231 "<== nxge_map_rxdma: buf not allocated"));
3232 return (NXGE_ERROR);
3233 }
3234 }
3235
3236 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
3237 return (NXGE_ERROR);
3238
3239 /*
3240 * Map descriptors from the buffer polls for each dma channel.
3241 */
3242
3243 /*
3244 * Set up and prepare buffer blocks, descriptors
3245 * and mailbox.
3246 */
3247 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3248 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
3249 chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
3250
3251 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3252 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
3253
3254 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3255
3256 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
3257 chunks, control, rcr_ring, mailbox);
3258 if (status != NXGE_OK) {
3259 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3260 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
3261 "returned 0x%x",
3262 channel, status));
3263 return (status);
3264 }
3265 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
3266 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
3267 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
3268 &nxgep->statsp->rdc_stats[channel];
3269
3270 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3271 if (!isLDOMguest(nxgep))
3272 nxge_rdc_hvio_setup(nxgep, channel);
3273 #endif
3274
3275 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3276 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
3277
3278 return (status);
3279 }
3280
3281 static void
nxge_unmap_rxdma(p_nxge_t nxgep,int channel)3282 nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
3283 {
3284 rx_rbr_ring_t *rbr_ring;
3285 rx_rcr_ring_t *rcr_ring;
3286 rx_mbox_t *mailbox;
3287
3288 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
3289
3290 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
3291 !nxgep->rx_mbox_areas_p)
3292 return;
3293
3294 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3295 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
3296 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3297
3298 if (!rbr_ring || !rcr_ring || !mailbox)
3299 return;
3300
3301 (void) nxge_unmap_rxdma_channel(
3302 nxgep, channel, rbr_ring, rcr_ring, mailbox);
3303
3304 nxge_free_rxb(nxgep, channel);
3305
3306 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
3307 }
3308
3309 nxge_status_t
nxge_map_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)3310 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3311 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
3312 uint32_t num_chunks,
3313 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
3314 p_rx_mbox_t *rx_mbox_p)
3315 {
3316 int status = NXGE_OK;
3317
3318 /*
3319 * Set up and prepare buffer blocks, descriptors
3320 * and mailbox.
3321 */
3322 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3323 "==> nxge_map_rxdma_channel (channel %d)", channel));
3324 /*
3325 * Receive buffer blocks
3326 */
3327 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
3328 dma_buf_p, rbr_p, num_chunks);
3329 if (status != NXGE_OK) {
3330 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3331 "==> nxge_map_rxdma_channel (channel %d): "
3332 "map buffer failed 0x%x", channel, status));
3333 goto nxge_map_rxdma_channel_exit;
3334 }
3335
3336 /*
3337 * Receive block ring, completion ring and mailbox.
3338 */
3339 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
3340 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
3341 if (status != NXGE_OK) {
3342 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3343 "==> nxge_map_rxdma_channel (channel %d): "
3344 "map config failed 0x%x", channel, status));
3345 goto nxge_map_rxdma_channel_fail2;
3346 }
3347
3348 goto nxge_map_rxdma_channel_exit;
3349
3350 nxge_map_rxdma_channel_fail3:
3351 /* Free rbr, rcr */
3352 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3353 "==> nxge_map_rxdma_channel: free rbr/rcr "
3354 "(status 0x%x channel %d)",
3355 status, channel));
3356 nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3357 *rcr_p, *rx_mbox_p);
3358
3359 nxge_map_rxdma_channel_fail2:
3360 /* Free buffer blocks */
3361 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3362 "==> nxge_map_rxdma_channel: free rx buffers"
3363 "(nxgep 0x%x status 0x%x channel %d)",
3364 nxgep, status, channel));
3365 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
3366
3367 status = NXGE_ERROR;
3368
3369 nxge_map_rxdma_channel_exit:
3370 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3371 "<== nxge_map_rxdma_channel: "
3372 "(nxgep 0x%x status 0x%x channel %d)",
3373 nxgep, status, channel));
3374
3375 return (status);
3376 }
3377
3378 /*ARGSUSED*/
3379 static void
nxge_unmap_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)3380 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3381 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3382 {
3383 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3384 "==> nxge_unmap_rxdma_channel (channel %d)", channel));
3385
3386 /*
3387 * unmap receive block ring, completion ring and mailbox.
3388 */
3389 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3390 rcr_p, rx_mbox_p);
3391
3392 /* unmap buffer blocks */
3393 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
3394
3395 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
3396 }
3397
3398 /*ARGSUSED*/
3399 static nxge_status_t
nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_rx_rbr_ring_t * rbr_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)3400 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
3401 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
3402 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
3403 {
3404 p_rx_rbr_ring_t rbrp;
3405 p_rx_rcr_ring_t rcrp;
3406 p_rx_mbox_t mboxp;
3407 p_nxge_dma_common_t cntl_dmap;
3408 p_nxge_dma_common_t dmap;
3409 p_rx_msg_t *rx_msg_ring;
3410 p_rx_msg_t rx_msg_p;
3411 p_rbr_cfig_a_t rcfga_p;
3412 p_rbr_cfig_b_t rcfgb_p;
3413 p_rcrcfig_a_t cfga_p;
3414 p_rcrcfig_b_t cfgb_p;
3415 p_rxdma_cfig1_t cfig1_p;
3416 p_rxdma_cfig2_t cfig2_p;
3417 p_rbr_kick_t kick_p;
3418 uint32_t dmaaddrp;
3419 uint32_t *rbr_vaddrp;
3420 uint32_t bkaddr;
3421 nxge_status_t status = NXGE_OK;
3422 int i;
3423 uint32_t nxge_port_rcr_size;
3424
3425 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3426 "==> nxge_map_rxdma_channel_cfg_ring"));
3427
3428 cntl_dmap = *dma_cntl_p;
3429
3430 /* Map in the receive block ring */
3431 rbrp = *rbr_p;
3432 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
3433 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
3434 /*
3435 * Zero out buffer block ring descriptors.
3436 */
3437 bzero((caddr_t)dmap->kaddrp, dmap->alength);
3438
3439 rcfga_p = &(rbrp->rbr_cfga);
3440 rcfgb_p = &(rbrp->rbr_cfgb);
3441 kick_p = &(rbrp->rbr_kick);
3442 rcfga_p->value = 0;
3443 rcfgb_p->value = 0;
3444 kick_p->value = 0;
3445 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
3446 rcfga_p->value = (rbrp->rbr_addr &
3447 (RBR_CFIG_A_STDADDR_MASK |
3448 RBR_CFIG_A_STDADDR_BASE_MASK));
3449 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
3450
3451 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
3452 rcfgb_p->bits.ldw.vld0 = 1;
3453 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
3454 rcfgb_p->bits.ldw.vld1 = 1;
3455 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
3456 rcfgb_p->bits.ldw.vld2 = 1;
3457 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
3458
3459 /*
3460 * For each buffer block, enter receive block address to the ring.
3461 */
3462 rbr_vaddrp = (uint32_t *)dmap->kaddrp;
3463 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
3464 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3465 "==> nxge_map_rxdma_channel_cfg_ring: channel %d "
3466 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
3467
3468 rx_msg_ring = rbrp->rx_msg_ring;
3469 for (i = 0; i < rbrp->tnblocks; i++) {
3470 rx_msg_p = rx_msg_ring[i];
3471 rx_msg_p->nxgep = nxgep;
3472 rx_msg_p->rx_rbr_p = rbrp;
3473 bkaddr = (uint32_t)
3474 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
3475 >> RBR_BKADDR_SHIFT));
3476 rx_msg_p->free = B_FALSE;
3477 rx_msg_p->max_usage_cnt = 0xbaddcafe;
3478
3479 *rbr_vaddrp++ = bkaddr;
3480 }
3481
3482 kick_p->bits.ldw.bkadd = rbrp->rbb_max;
3483 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3484
3485 rbrp->rbr_rd_index = 0;
3486
3487 rbrp->rbr_consumed = 0;
3488 rbrp->rbr_use_bcopy = B_TRUE;
3489 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
3490 /*
3491 * Do bcopy on packets greater than bcopy size once
3492 * the lo threshold is reached.
3493 * This lo threshold should be less than the hi threshold.
3494 *
3495 * Do bcopy on every packet once the hi threshold is reached.
3496 */
3497 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
3498 /* default it to use hi */
3499 nxge_rx_threshold_lo = nxge_rx_threshold_hi;
3500 }
3501
3502 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
3503 nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
3504 }
3505 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
3506
3507 switch (nxge_rx_threshold_hi) {
3508 default:
3509 case NXGE_RX_COPY_NONE:
3510 /* Do not do bcopy at all */
3511 rbrp->rbr_use_bcopy = B_FALSE;
3512 rbrp->rbr_threshold_hi = rbrp->rbb_max;
3513 break;
3514
3515 case NXGE_RX_COPY_1:
3516 case NXGE_RX_COPY_2:
3517 case NXGE_RX_COPY_3:
3518 case NXGE_RX_COPY_4:
3519 case NXGE_RX_COPY_5:
3520 case NXGE_RX_COPY_6:
3521 case NXGE_RX_COPY_7:
3522 rbrp->rbr_threshold_hi =
3523 rbrp->rbb_max *
3524 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
3525 break;
3526
3527 case NXGE_RX_COPY_ALL:
3528 rbrp->rbr_threshold_hi = 0;
3529 break;
3530 }
3531
3532 switch (nxge_rx_threshold_lo) {
3533 default:
3534 case NXGE_RX_COPY_NONE:
3535 /* Do not do bcopy at all */
3536 if (rbrp->rbr_use_bcopy) {
3537 rbrp->rbr_use_bcopy = B_FALSE;
3538 }
3539 rbrp->rbr_threshold_lo = rbrp->rbb_max;
3540 break;
3541
3542 case NXGE_RX_COPY_1:
3543 case NXGE_RX_COPY_2:
3544 case NXGE_RX_COPY_3:
3545 case NXGE_RX_COPY_4:
3546 case NXGE_RX_COPY_5:
3547 case NXGE_RX_COPY_6:
3548 case NXGE_RX_COPY_7:
3549 rbrp->rbr_threshold_lo =
3550 rbrp->rbb_max *
3551 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
3552 break;
3553
3554 case NXGE_RX_COPY_ALL:
3555 rbrp->rbr_threshold_lo = 0;
3556 break;
3557 }
3558
3559 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3560 "nxge_map_rxdma_channel_cfg_ring: channel %d "
3561 "rbb_max %d "
3562 "rbrp->rbr_bufsize_type %d "
3563 "rbb_threshold_hi %d "
3564 "rbb_threshold_lo %d",
3565 dma_channel,
3566 rbrp->rbb_max,
3567 rbrp->rbr_bufsize_type,
3568 rbrp->rbr_threshold_hi,
3569 rbrp->rbr_threshold_lo));
3570
3571 rbrp->page_valid.value = 0;
3572 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
3573 rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
3574 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
3575 rbrp->page_hdl.value = 0;
3576
3577 rbrp->page_valid.bits.ldw.page0 = 1;
3578 rbrp->page_valid.bits.ldw.page1 = 1;
3579
3580 /* Map in the receive completion ring */
3581 rcrp = (p_rx_rcr_ring_t)
3582 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
3583 rcrp->rdc = dma_channel;
3584
3585 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
3586 rcrp->comp_size = nxge_port_rcr_size;
3587 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
3588
3589 rcrp->max_receive_pkts = nxge_max_rx_pkts;
3590
3591 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
3592 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
3593 sizeof (rcr_entry_t));
3594 rcrp->comp_rd_index = 0;
3595 rcrp->comp_wt_index = 0;
3596 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3597 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3598 #if defined(__i386)
3599 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3600 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3601 #else
3602 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3603 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3604 #endif
3605
3606 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3607 (nxge_port_rcr_size - 1);
3608 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3609 (nxge_port_rcr_size - 1);
3610
3611 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3612 "==> nxge_map_rxdma_channel_cfg_ring: "
3613 "channel %d "
3614 "rbr_vaddrp $%p "
3615 "rcr_desc_rd_head_p $%p "
3616 "rcr_desc_rd_head_pp $%p "
3617 "rcr_desc_rd_last_p $%p "
3618 "rcr_desc_rd_last_pp $%p ",
3619 dma_channel,
3620 rbr_vaddrp,
3621 rcrp->rcr_desc_rd_head_p,
3622 rcrp->rcr_desc_rd_head_pp,
3623 rcrp->rcr_desc_last_p,
3624 rcrp->rcr_desc_last_pp));
3625
3626 /*
3627 * Zero out buffer block ring descriptors.
3628 */
3629 bzero((caddr_t)dmap->kaddrp, dmap->alength);
3630
3631 rcrp->intr_timeout = (nxgep->intr_timeout <
3632 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
3633 nxgep->intr_timeout;
3634
3635 rcrp->intr_threshold = (nxgep->intr_threshold <
3636 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
3637 nxgep->intr_threshold;
3638
3639 rcrp->full_hdr_flag = B_FALSE;
3640
3641 rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset;
3642
3643
3644 cfga_p = &(rcrp->rcr_cfga);
3645 cfgb_p = &(rcrp->rcr_cfgb);
3646 cfga_p->value = 0;
3647 cfgb_p->value = 0;
3648 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
3649 cfga_p->value = (rcrp->rcr_addr &
3650 (RCRCFIG_A_STADDR_MASK |
3651 RCRCFIG_A_STADDR_BASE_MASK));
3652
3653 rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
3654 RCRCFIG_A_LEN_SHIF);
3655
3656 /*
3657 * Timeout should be set based on the system clock divider.
3658 * A timeout value of 1 assumes that the
3659 * granularity (1000) is 3 microseconds running at 300MHz.
3660 */
3661 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
3662 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
3663 cfgb_p->bits.ldw.entout = 1;
3664
3665 /* Map in the mailbox */
3666 mboxp = (p_rx_mbox_t)
3667 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
3668 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
3669 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
3670 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
3671 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
3672 cfig1_p->value = cfig2_p->value = 0;
3673
3674 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
3675 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3676 "==> nxge_map_rxdma_channel_cfg_ring: "
3677 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
3678 dma_channel, cfig1_p->value, cfig2_p->value,
3679 mboxp->mbox_addr));
3680
3681 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
3682 & 0xfff);
3683 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
3684
3685
3686 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
3687 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
3688 RXDMA_CFIG2_MBADDR_L_MASK);
3689
3690 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
3691
3692 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3693 "==> nxge_map_rxdma_channel_cfg_ring: "
3694 "channel %d damaddrp $%p "
3695 "cfg1 0x%016llx cfig2 0x%016llx",
3696 dma_channel, dmaaddrp,
3697 cfig1_p->value, cfig2_p->value));
3698
3699 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
3700 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) {
3701 switch (rcrp->sw_priv_hdr_len) {
3702 case SW_OFFSET_NO_OFFSET:
3703 case SW_OFFSET_64:
3704 case SW_OFFSET_128:
3705 case SW_OFFSET_192:
3706 cfig2_p->bits.ldw.offset =
3707 rcrp->sw_priv_hdr_len;
3708 cfig2_p->bits.ldw.offset256 = 0;
3709 break;
3710 case SW_OFFSET_256:
3711 case SW_OFFSET_320:
3712 case SW_OFFSET_384:
3713 case SW_OFFSET_448:
3714 cfig2_p->bits.ldw.offset =
3715 rcrp->sw_priv_hdr_len & 0x3;
3716 cfig2_p->bits.ldw.offset256 = 1;
3717 break;
3718 default:
3719 cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET;
3720 cfig2_p->bits.ldw.offset256 = 0;
3721 }
3722 } else {
3723 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
3724 }
3725
3726 rbrp->rx_rcr_p = rcrp;
3727 rcrp->rx_rbr_p = rbrp;
3728 *rcr_p = rcrp;
3729 *rx_mbox_p = mboxp;
3730
3731 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3732 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
3733
3734 return (status);
3735 }
3736
3737 /*ARGSUSED*/
3738 static void
nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)3739 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
3740 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3741 {
3742 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3743 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
3744 rcr_p->rdc));
3745
3746 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
3747 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
3748
3749 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3750 "<== nxge_unmap_rxdma_channel_cfg_ring"));
3751 }
3752
3753 static nxge_status_t
nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks)3754 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
3755 p_nxge_dma_common_t *dma_buf_p,
3756 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
3757 {
3758 p_rx_rbr_ring_t rbrp;
3759 p_nxge_dma_common_t dma_bufp, tmp_bufp;
3760 p_rx_msg_t *rx_msg_ring;
3761 p_rx_msg_t rx_msg_p;
3762 p_mblk_t mblk_p;
3763
3764 rxring_info_t *ring_info;
3765 nxge_status_t status = NXGE_OK;
3766 int i, j, index;
3767 uint32_t size, bsize, nblocks, nmsgs;
3768
3769 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3770 "==> nxge_map_rxdma_channel_buf_ring: channel %d",
3771 channel));
3772
3773 dma_bufp = tmp_bufp = *dma_buf_p;
3774 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3775 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
3776 "chunks bufp 0x%016llx",
3777 channel, num_chunks, dma_bufp));
3778
3779 nmsgs = 0;
3780 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
3781 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3782 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3783 "bufp 0x%016llx nblocks %d nmsgs %d",
3784 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
3785 nmsgs += tmp_bufp->nblocks;
3786 }
3787 if (!nmsgs) {
3788 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3789 "<== nxge_map_rxdma_channel_buf_ring: channel %d "
3790 "no msg blocks",
3791 channel));
3792 status = NXGE_ERROR;
3793 goto nxge_map_rxdma_channel_buf_ring_exit;
3794 }
3795
3796 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
3797
3798 size = nmsgs * sizeof (p_rx_msg_t);
3799 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
3800 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
3801 KM_SLEEP);
3802
3803 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
3804 (void *)nxgep->interrupt_cookie);
3805 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
3806 (void *)nxgep->interrupt_cookie);
3807 rbrp->rdc = channel;
3808 rbrp->num_blocks = num_chunks;
3809 rbrp->tnblocks = nmsgs;
3810 rbrp->rbb_max = nmsgs;
3811 rbrp->rbr_max_size = nmsgs;
3812 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
3813
3814 /*
3815 * Buffer sizes suggested by NIU architect.
3816 * 256, 512 and 2K.
3817 */
3818
3819 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
3820 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
3821 rbrp->npi_pkt_buf_size0 = SIZE_256B;
3822
3823 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
3824 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
3825 rbrp->npi_pkt_buf_size1 = SIZE_1KB;
3826
3827 rbrp->block_size = nxgep->rx_default_block_size;
3828
3829 if (!nxgep->mac.is_jumbo) {
3830 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
3831 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
3832 rbrp->npi_pkt_buf_size2 = SIZE_2KB;
3833 } else {
3834 if (rbrp->block_size >= 0x2000) {
3835 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
3836 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
3837 rbrp->npi_pkt_buf_size2 = SIZE_8KB;
3838 } else {
3839 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
3840 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
3841 rbrp->npi_pkt_buf_size2 = SIZE_4KB;
3842 }
3843 }
3844
3845 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3846 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3847 "actual rbr max %d rbb_max %d nmsgs %d "
3848 "rbrp->block_size %d default_block_size %d "
3849 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
3850 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
3851 rbrp->block_size, nxgep->rx_default_block_size,
3852 nxge_rbr_size, nxge_rbr_spare_size));
3853
3854 /* Map in buffers from the buffer pool. */
3855 index = 0;
3856 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
3857 bsize = dma_bufp->block_size;
3858 nblocks = dma_bufp->nblocks;
3859 #if defined(__i386)
3860 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
3861 #else
3862 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
3863 #endif
3864 ring_info->buffer[i].buf_index = i;
3865 ring_info->buffer[i].buf_size = dma_bufp->alength;
3866 ring_info->buffer[i].start_index = index;
3867 #if defined(__i386)
3868 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
3869 #else
3870 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
3871 #endif
3872
3873 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3874 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3875 "chunk %d"
3876 " nblocks %d chunk_size %x block_size 0x%x "
3877 "dma_bufp $%p", channel, i,
3878 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3879 dma_bufp));
3880
3881 for (j = 0; j < nblocks; j++) {
3882 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
3883 dma_bufp)) == NULL) {
3884 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3885 "allocb failed (index %d i %d j %d)",
3886 index, i, j));
3887 goto nxge_map_rxdma_channel_buf_ring_fail1;
3888 }
3889 rx_msg_ring[index] = rx_msg_p;
3890 rx_msg_p->block_index = index;
3891 rx_msg_p->shifted_addr = (uint32_t)
3892 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
3893 RBR_BKADDR_SHIFT));
3894
3895 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3896 "index %d j %d rx_msg_p $%p mblk %p",
3897 index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
3898
3899 mblk_p = rx_msg_p->rx_mblk_p;
3900 mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3901
3902 rbrp->rbr_ref_cnt++;
3903 index++;
3904 rx_msg_p->buf_dma.dma_channel = channel;
3905 }
3906
3907 rbrp->rbr_alloc_type = DDI_MEM_ALLOC;
3908 if (dma_bufp->contig_alloc_type) {
3909 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC;
3910 }
3911
3912 if (dma_bufp->kmem_alloc_type) {
3913 rbrp->rbr_alloc_type = KMEM_ALLOC;
3914 }
3915
3916 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3917 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3918 "chunk %d"
3919 " nblocks %d chunk_size %x block_size 0x%x "
3920 "dma_bufp $%p",
3921 channel, i,
3922 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3923 dma_bufp));
3924 }
3925 if (i < rbrp->num_blocks) {
3926 goto nxge_map_rxdma_channel_buf_ring_fail1;
3927 }
3928
3929 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3930 "nxge_map_rxdma_channel_buf_ring: done buf init "
3931 "channel %d msg block entries %d",
3932 channel, index));
3933 ring_info->block_size_mask = bsize - 1;
3934 rbrp->rx_msg_ring = rx_msg_ring;
3935 rbrp->dma_bufp = dma_buf_p;
3936 rbrp->ring_info = ring_info;
3937
3938 status = nxge_rxbuf_index_info_init(nxgep, rbrp);
3939 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3940 " nxge_map_rxdma_channel_buf_ring: "
3941 "channel %d done buf info init", channel));
3942
3943 /*
3944 * Finally, permit nxge_freeb() to call nxge_post_page().
3945 */
3946 rbrp->rbr_state = RBR_POSTING;
3947
3948 *rbr_p = rbrp;
3949 goto nxge_map_rxdma_channel_buf_ring_exit;
3950
3951 nxge_map_rxdma_channel_buf_ring_fail1:
3952 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3953 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3954 channel, status));
3955
3956 index--;
3957 for (; index >= 0; index--) {
3958 rx_msg_p = rx_msg_ring[index];
3959 if (rx_msg_p != NULL) {
3960 freeb(rx_msg_p->rx_mblk_p);
3961 rx_msg_ring[index] = NULL;
3962 }
3963 }
3964 nxge_map_rxdma_channel_buf_ring_fail:
3965 MUTEX_DESTROY(&rbrp->post_lock);
3966 MUTEX_DESTROY(&rbrp->lock);
3967 KMEM_FREE(ring_info, sizeof (rxring_info_t));
3968 KMEM_FREE(rx_msg_ring, size);
3969 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
3970
3971 status = NXGE_ERROR;
3972
3973 nxge_map_rxdma_channel_buf_ring_exit:
3974 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3975 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
3976
3977 return (status);
3978 }
3979
3980 /*ARGSUSED*/
3981 static void
nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p)3982 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
3983 p_rx_rbr_ring_t rbr_p)
3984 {
3985 p_rx_msg_t *rx_msg_ring;
3986 p_rx_msg_t rx_msg_p;
3987 rxring_info_t *ring_info;
3988 int i;
3989 uint32_t size;
3990 #ifdef NXGE_DEBUG
3991 int num_chunks;
3992 #endif
3993
3994 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3995 "==> nxge_unmap_rxdma_channel_buf_ring"));
3996 if (rbr_p == NULL) {
3997 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3998 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
3999 return;
4000 }
4001 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4002 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
4003 rbr_p->rdc));
4004
4005 rx_msg_ring = rbr_p->rx_msg_ring;
4006 ring_info = rbr_p->ring_info;
4007
4008 if (rx_msg_ring == NULL || ring_info == NULL) {
4009 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4010 "<== nxge_unmap_rxdma_channel_buf_ring: "
4011 "rx_msg_ring $%p ring_info $%p",
4012 rx_msg_p, ring_info));
4013 return;
4014 }
4015
4016 #ifdef NXGE_DEBUG
4017 num_chunks = rbr_p->num_blocks;
4018 #endif
4019 size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
4020 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4021 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
4022 "tnblocks %d (max %d) size ptrs %d ",
4023 rbr_p->rdc, num_chunks,
4024 rbr_p->tnblocks, rbr_p->rbr_max_size, size));
4025
4026 for (i = 0; i < rbr_p->tnblocks; i++) {
4027 rx_msg_p = rx_msg_ring[i];
4028 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4029 " nxge_unmap_rxdma_channel_buf_ring: "
4030 "rx_msg_p $%p",
4031 rx_msg_p));
4032 if (rx_msg_p != NULL) {
4033 freeb(rx_msg_p->rx_mblk_p);
4034 rx_msg_ring[i] = NULL;
4035 }
4036 }
4037
4038 /*
4039 * We no longer may use the mutex <post_lock>. By setting
4040 * <rbr_state> to anything but POSTING, we prevent
4041 * nxge_post_page() from accessing a dead mutex.
4042 */
4043 rbr_p->rbr_state = RBR_UNMAPPING;
4044 MUTEX_DESTROY(&rbr_p->post_lock);
4045
4046 MUTEX_DESTROY(&rbr_p->lock);
4047
4048 if (rbr_p->rbr_ref_cnt == 0) {
4049 /*
4050 * This is the normal state of affairs.
4051 * Need to free the following buffers:
4052 * - data buffers
4053 * - rx_msg ring
4054 * - ring_info
4055 * - rbr ring
4056 */
4057 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4058 "unmap_rxdma_buf_ring: No outstanding - freeing "));
4059 nxge_rxdma_databuf_free(rbr_p);
4060 KMEM_FREE(ring_info, sizeof (rxring_info_t));
4061 KMEM_FREE(rx_msg_ring, size);
4062 KMEM_FREE(rbr_p, sizeof (*rbr_p));
4063 } else {
4064 /*
4065 * Some of our buffers are still being used.
4066 * Therefore, tell nxge_freeb() this ring is
4067 * unmapped, so it may free <rbr_p> for us.
4068 */
4069 rbr_p->rbr_state = RBR_UNMAPPED;
4070 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4071 "unmap_rxdma_buf_ring: %d %s outstanding.",
4072 rbr_p->rbr_ref_cnt,
4073 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
4074 }
4075
4076 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4077 "<== nxge_unmap_rxdma_channel_buf_ring"));
4078 }
4079
4080 /*
4081 * nxge_rxdma_hw_start_common
4082 *
4083 * Arguments:
4084 * nxgep
4085 *
4086 * Notes:
4087 *
4088 * NPI/NXGE function calls:
4089 * nxge_init_fzc_rx_common();
4090 * nxge_init_fzc_rxdma_port();
4091 *
4092 * Registers accessed:
4093 *
4094 * Context:
4095 * Service domain
4096 */
4097 static nxge_status_t
nxge_rxdma_hw_start_common(p_nxge_t nxgep)4098 nxge_rxdma_hw_start_common(p_nxge_t nxgep)
4099 {
4100 nxge_status_t status = NXGE_OK;
4101
4102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4103
4104 /*
4105 * Load the sharable parameters by writing to the
4106 * function zero control registers. These FZC registers
4107 * should be initialized only once for the entire chip.
4108 */
4109 (void) nxge_init_fzc_rx_common(nxgep);
4110
4111 /*
4112 * Initialize the RXDMA port specific FZC control configurations.
4113 * These FZC registers are pertaining to each port.
4114 */
4115 (void) nxge_init_fzc_rxdma_port(nxgep);
4116
4117 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4118
4119 return (status);
4120 }
4121
4122 static nxge_status_t
nxge_rxdma_hw_start(p_nxge_t nxgep,int channel)4123 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel)
4124 {
4125 int i, ndmas;
4126 p_rx_rbr_rings_t rx_rbr_rings;
4127 p_rx_rbr_ring_t *rbr_rings;
4128 p_rx_rcr_rings_t rx_rcr_rings;
4129 p_rx_rcr_ring_t *rcr_rings;
4130 p_rx_mbox_areas_t rx_mbox_areas_p;
4131 p_rx_mbox_t *rx_mbox_p;
4132 nxge_status_t status = NXGE_OK;
4133
4134 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
4135
4136 rx_rbr_rings = nxgep->rx_rbr_rings;
4137 rx_rcr_rings = nxgep->rx_rcr_rings;
4138 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4139 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4140 "<== nxge_rxdma_hw_start: NULL ring pointers"));
4141 return (NXGE_ERROR);
4142 }
4143 ndmas = rx_rbr_rings->ndmas;
4144 if (ndmas == 0) {
4145 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4146 "<== nxge_rxdma_hw_start: no dma channel allocated"));
4147 return (NXGE_ERROR);
4148 }
4149
4150 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4151 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
4152
4153 rbr_rings = rx_rbr_rings->rbr_rings;
4154 rcr_rings = rx_rcr_rings->rcr_rings;
4155 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
4156 if (rx_mbox_areas_p) {
4157 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
4158 }
4159
4160 i = channel;
4161 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4162 "==> nxge_rxdma_hw_start (ndmas %d) channel %d",
4163 ndmas, channel));
4164 status = nxge_rxdma_start_channel(nxgep, channel,
4165 (p_rx_rbr_ring_t)rbr_rings[i],
4166 (p_rx_rcr_ring_t)rcr_rings[i],
4167 (p_rx_mbox_t)rx_mbox_p[i]);
4168 if (status != NXGE_OK) {
4169 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4170 "==> nxge_rxdma_hw_start: disable "
4171 "(status 0x%x channel %d)", status, channel));
4172 return (status);
4173 }
4174
4175 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
4176 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4177 rx_rbr_rings, rx_rcr_rings));
4178
4179 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4180 "==> nxge_rxdma_hw_start: (status 0x%x)", status));
4181
4182 return (status);
4183 }
4184
4185 static void
nxge_rxdma_hw_stop(p_nxge_t nxgep,int channel)4186 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel)
4187 {
4188 p_rx_rbr_rings_t rx_rbr_rings;
4189 p_rx_rcr_rings_t rx_rcr_rings;
4190
4191 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
4192
4193 rx_rbr_rings = nxgep->rx_rbr_rings;
4194 rx_rcr_rings = nxgep->rx_rcr_rings;
4195 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4196 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4197 "<== nxge_rxdma_hw_stop: NULL ring pointers"));
4198 return;
4199 }
4200
4201 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4202 "==> nxge_rxdma_hw_stop(channel %d)",
4203 channel));
4204 (void) nxge_rxdma_stop_channel(nxgep, channel);
4205
4206 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
4207 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4208 rx_rbr_rings, rx_rcr_rings));
4209
4210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
4211 }
4212
4213
4214 static nxge_status_t
nxge_rxdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)4215 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
4216 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
4217
4218 {
4219 npi_handle_t handle;
4220 npi_status_t rs = NPI_SUCCESS;
4221 rx_dma_ctl_stat_t cs;
4222 rx_dma_ent_msk_t ent_mask;
4223 nxge_status_t status = NXGE_OK;
4224
4225 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
4226
4227 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4228
4229 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
4230 "npi handle addr $%p acc $%p",
4231 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4232
4233 /* Reset RXDMA channel, but not if you're a guest. */
4234 if (!isLDOMguest(nxgep)) {
4235 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4236 if (rs != NPI_SUCCESS) {
4237 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4238 "==> nxge_init_fzc_rdc: "
4239 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
4240 channel, rs));
4241 return (NXGE_ERROR | rs);
4242 }
4243
4244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4245 "==> nxge_rxdma_start_channel: reset done: channel %d",
4246 channel));
4247 }
4248
4249 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4250 if (isLDOMguest(nxgep))
4251 (void) nxge_rdc_lp_conf(nxgep, channel);
4252 #endif
4253
4254 /*
4255 * Initialize the RXDMA channel specific FZC control
4256 * configurations. These FZC registers are pertaining
4257 * to each RX channel (logical pages).
4258 */
4259 if (!isLDOMguest(nxgep)) {
4260 status = nxge_init_fzc_rxdma_channel(nxgep, channel);
4261 if (status != NXGE_OK) {
4262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4263 "==> nxge_rxdma_start_channel: "
4264 "init fzc rxdma failed (0x%08x channel %d)",
4265 status, channel));
4266 return (status);
4267 }
4268
4269 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4270 "==> nxge_rxdma_start_channel: fzc done"));
4271 }
4272
4273 /* Set up the interrupt event masks. */
4274 ent_mask.value = 0;
4275 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
4276 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4277 &ent_mask);
4278 if (rs != NPI_SUCCESS) {
4279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4280 "==> nxge_rxdma_start_channel: "
4281 "init rxdma event masks failed "
4282 "(0x%08x channel %d)",
4283 status, channel));
4284 return (NXGE_ERROR | rs);
4285 }
4286
4287 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4288 "==> nxge_rxdma_start_channel: "
4289 "event done: channel %d (mask 0x%016llx)",
4290 channel, ent_mask.value));
4291
4292 /* Initialize the receive DMA control and status register */
4293 cs.value = 0;
4294 cs.bits.hdw.mex = 1;
4295 cs.bits.hdw.rcrthres = 1;
4296 cs.bits.hdw.rcrto = 1;
4297 cs.bits.hdw.rbr_empty = 1;
4298 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4299 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4300 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
4301 if (status != NXGE_OK) {
4302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4303 "==> nxge_rxdma_start_channel: "
4304 "init rxdma control register failed (0x%08x channel %d",
4305 status, channel));
4306 return (status);
4307 }
4308
4309 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4310 "control done - channel %d cs 0x%016llx", channel, cs.value));
4311
4312 /*
4313 * Load RXDMA descriptors, buffers, mailbox,
4314 * initialise the receive DMA channels and
4315 * enable each DMA channel.
4316 */
4317 status = nxge_enable_rxdma_channel(nxgep,
4318 channel, rbr_p, rcr_p, mbox_p);
4319
4320 if (status != NXGE_OK) {
4321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4322 " nxge_rxdma_start_channel: "
4323 " enable rxdma failed (0x%08x channel %d)",
4324 status, channel));
4325 return (status);
4326 }
4327
4328 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4329 "==> nxge_rxdma_start_channel: enabled channel %d"));
4330
4331 if (isLDOMguest(nxgep)) {
4332 /* Add interrupt handler for this channel. */
4333 status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel);
4334 if (status != NXGE_OK) {
4335 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4336 " nxge_rxdma_start_channel: "
4337 " nxge_hio_intr_add failed (0x%08x channel %d)",
4338 status, channel));
4339 return (status);
4340 }
4341 }
4342
4343 ent_mask.value = 0;
4344 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
4345 RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
4346 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4347 &ent_mask);
4348 if (rs != NPI_SUCCESS) {
4349 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4350 "==> nxge_rxdma_start_channel: "
4351 "init rxdma event masks failed (0x%08x channel %d)",
4352 status, channel));
4353 return (NXGE_ERROR | rs);
4354 }
4355
4356 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4357 "control done - channel %d cs 0x%016llx", channel, cs.value));
4358
4359 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
4360
4361 return (NXGE_OK);
4362 }
4363
4364 static nxge_status_t
nxge_rxdma_stop_channel(p_nxge_t nxgep,uint16_t channel)4365 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
4366 {
4367 npi_handle_t handle;
4368 npi_status_t rs = NPI_SUCCESS;
4369 rx_dma_ctl_stat_t cs;
4370 rx_dma_ent_msk_t ent_mask;
4371 nxge_status_t status = NXGE_OK;
4372
4373 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
4374
4375 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4376
4377 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
4378 "npi handle addr $%p acc $%p",
4379 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4380
4381 if (!isLDOMguest(nxgep)) {
4382 /*
4383 * Stop RxMAC = A.9.2.6
4384 */
4385 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) {
4386 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4387 "nxge_rxdma_stop_channel: "
4388 "Failed to disable RxMAC"));
4389 }
4390
4391 /*
4392 * Drain IPP Port = A.9.3.6
4393 */
4394 (void) nxge_ipp_drain(nxgep);
4395 }
4396
4397 /* Reset RXDMA channel */
4398 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4399 if (rs != NPI_SUCCESS) {
4400 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4401 " nxge_rxdma_stop_channel: "
4402 " reset rxdma failed (0x%08x channel %d)",
4403 rs, channel));
4404 return (NXGE_ERROR | rs);
4405 }
4406
4407 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4408 "==> nxge_rxdma_stop_channel: reset done"));
4409
4410 /* Set up the interrupt event masks. */
4411 ent_mask.value = RX_DMA_ENT_MSK_ALL;
4412 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4413 &ent_mask);
4414 if (rs != NPI_SUCCESS) {
4415 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4416 "==> nxge_rxdma_stop_channel: "
4417 "set rxdma event masks failed (0x%08x channel %d)",
4418 rs, channel));
4419 return (NXGE_ERROR | rs);
4420 }
4421
4422 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4423 "==> nxge_rxdma_stop_channel: event done"));
4424
4425 /*
4426 * Initialize the receive DMA control and status register
4427 */
4428 cs.value = 0;
4429 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4430 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
4431 " to default (all 0s) 0x%08x", cs.value));
4432 if (status != NXGE_OK) {
4433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4434 " nxge_rxdma_stop_channel: init rxdma"
4435 " control register failed (0x%08x channel %d",
4436 status, channel));
4437 return (status);
4438 }
4439
4440 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4441 "==> nxge_rxdma_stop_channel: control done"));
4442
4443 /*
4444 * Make sure channel is disabled.
4445 */
4446 status = nxge_disable_rxdma_channel(nxgep, channel);
4447
4448 if (status != NXGE_OK) {
4449 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4450 " nxge_rxdma_stop_channel: "
4451 " init enable rxdma failed (0x%08x channel %d)",
4452 status, channel));
4453 return (status);
4454 }
4455
4456 if (!isLDOMguest(nxgep)) {
4457 /*
4458 * Enable RxMAC = A.9.2.10
4459 */
4460 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
4461 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4462 "nxge_rxdma_stop_channel: Rx MAC still disabled"));
4463 }
4464 }
4465
4466 NXGE_DEBUG_MSG((nxgep,
4467 RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
4468
4469 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
4470
4471 return (NXGE_OK);
4472 }
4473
4474 nxge_status_t
nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)4475 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
4476 {
4477 npi_handle_t handle;
4478 p_nxge_rdc_sys_stats_t statsp;
4479 rx_ctl_dat_fifo_stat_t stat;
4480 uint32_t zcp_err_status;
4481 uint32_t ipp_err_status;
4482 nxge_status_t status = NXGE_OK;
4483 npi_status_t rs = NPI_SUCCESS;
4484 boolean_t my_err = B_FALSE;
4485
4486 handle = nxgep->npi_handle;
4487 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4488
4489 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
4490
4491 if (rs != NPI_SUCCESS)
4492 return (NXGE_ERROR | rs);
4493
4494 if (stat.bits.ldw.id_mismatch) {
4495 statsp->id_mismatch++;
4496 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
4497 NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
4498 /* Global fatal error encountered */
4499 }
4500
4501 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
4502 switch (nxgep->mac.portnum) {
4503 case 0:
4504 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
4505 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
4506 my_err = B_TRUE;
4507 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4508 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4509 }
4510 break;
4511 case 1:
4512 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
4513 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
4514 my_err = B_TRUE;
4515 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4516 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4517 }
4518 break;
4519 case 2:
4520 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
4521 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
4522 my_err = B_TRUE;
4523 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4524 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4525 }
4526 break;
4527 case 3:
4528 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
4529 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
4530 my_err = B_TRUE;
4531 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4532 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4533 }
4534 break;
4535 default:
4536 return (NXGE_ERROR);
4537 }
4538 }
4539
4540 if (my_err) {
4541 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
4542 zcp_err_status);
4543 if (status != NXGE_OK)
4544 return (status);
4545 }
4546
4547 return (NXGE_OK);
4548 }
4549
4550 static nxge_status_t
nxge_rxdma_handle_port_errors(p_nxge_t nxgep,uint32_t ipp_status,uint32_t zcp_status)4551 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
4552 uint32_t zcp_status)
4553 {
4554 boolean_t rxport_fatal = B_FALSE;
4555 p_nxge_rdc_sys_stats_t statsp;
4556 nxge_status_t status = NXGE_OK;
4557 uint8_t portn;
4558
4559 portn = nxgep->mac.portnum;
4560 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4561
4562 if (ipp_status & (0x1 << portn)) {
4563 statsp->ipp_eop_err++;
4564 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
4565 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
4566 rxport_fatal = B_TRUE;
4567 }
4568
4569 if (zcp_status & (0x1 << portn)) {
4570 statsp->zcp_eop_err++;
4571 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
4572 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
4573 rxport_fatal = B_TRUE;
4574 }
4575
4576 if (rxport_fatal) {
4577 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4578 " nxge_rxdma_handle_port_error: "
4579 " fatal error on Port #%d\n",
4580 portn));
4581 status = nxge_rx_port_fatal_err_recover(nxgep);
4582 if (status == NXGE_OK) {
4583 FM_SERVICE_RESTORED(nxgep);
4584 }
4585 }
4586
4587 return (status);
4588 }
4589
4590 static nxge_status_t
nxge_rxdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel)4591 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
4592 {
4593 npi_handle_t handle;
4594 npi_status_t rs = NPI_SUCCESS;
4595 nxge_status_t status = NXGE_OK;
4596 p_rx_rbr_ring_t rbrp;
4597 p_rx_rcr_ring_t rcrp;
4598 p_rx_mbox_t mboxp;
4599 rx_dma_ent_msk_t ent_mask;
4600 p_nxge_dma_common_t dmap;
4601 uint32_t ref_cnt;
4602 p_rx_msg_t rx_msg_p;
4603 int i;
4604 uint32_t nxge_port_rcr_size;
4605
4606 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
4607 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4608 "Recovering from RxDMAChannel#%d error...", channel));
4609
4610 /*
4611 * Stop the dma channel waits for the stop done.
4612 * If the stop done bit is not set, then create
4613 * an error.
4614 */
4615
4616 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4617 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
4618
4619 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel];
4620 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel];
4621
4622 MUTEX_ENTER(&rbrp->lock);
4623 MUTEX_ENTER(&rbrp->post_lock);
4624
4625 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
4626
4627 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
4628 if (rs != NPI_SUCCESS) {
4629 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4630 "nxge_disable_rxdma_channel:failed"));
4631 goto fail;
4632 }
4633
4634 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
4635
4636 /* Disable interrupt */
4637 ent_mask.value = RX_DMA_ENT_MSK_ALL;
4638 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
4639 if (rs != NPI_SUCCESS) {
4640 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4641 "nxge_rxdma_stop_channel: "
4642 "set rxdma event masks failed (channel %d)",
4643 channel));
4644 }
4645
4646 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
4647
4648 /* Reset RXDMA channel */
4649 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4650 if (rs != NPI_SUCCESS) {
4651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4652 "nxge_rxdma_fatal_err_recover: "
4653 " reset rxdma failed (channel %d)", channel));
4654 goto fail;
4655 }
4656
4657 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
4658
4659 mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
4660
4661 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
4662 rbrp->rbr_rd_index = 0;
4663
4664 rcrp->comp_rd_index = 0;
4665 rcrp->comp_wt_index = 0;
4666 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
4667 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
4668 #if defined(__i386)
4669 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4670 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4671 #else
4672 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4673 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4674 #endif
4675
4676 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
4677 (nxge_port_rcr_size - 1);
4678 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
4679 (nxge_port_rcr_size - 1);
4680
4681 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
4682 bzero((caddr_t)dmap->kaddrp, dmap->alength);
4683
4684 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
4685
4686 for (i = 0; i < rbrp->rbr_max_size; i++) {
4687 rx_msg_p = rbrp->rx_msg_ring[i];
4688 ref_cnt = rx_msg_p->ref_cnt;
4689 if (ref_cnt != 1) {
4690 if (rx_msg_p->cur_usage_cnt !=
4691 rx_msg_p->max_usage_cnt) {
4692 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4693 "buf[%d]: cur_usage_cnt = %d "
4694 "max_usage_cnt = %d\n", i,
4695 rx_msg_p->cur_usage_cnt,
4696 rx_msg_p->max_usage_cnt));
4697 } else {
4698 /* Buffer can be re-posted */
4699 rx_msg_p->free = B_TRUE;
4700 rx_msg_p->cur_usage_cnt = 0;
4701 rx_msg_p->max_usage_cnt = 0xbaddcafe;
4702 rx_msg_p->pkt_buf_size = 0;
4703 }
4704 }
4705 }
4706
4707 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
4708
4709 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
4710 if (status != NXGE_OK) {
4711 goto fail;
4712 }
4713
4714 MUTEX_EXIT(&rbrp->post_lock);
4715 MUTEX_EXIT(&rbrp->lock);
4716
4717 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4718 "Recovery Successful, RxDMAChannel#%d Restored",
4719 channel));
4720 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
4721 return (NXGE_OK);
4722
4723 fail:
4724 MUTEX_EXIT(&rbrp->post_lock);
4725 MUTEX_EXIT(&rbrp->lock);
4726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4727 return (NXGE_ERROR | rs);
4728 }
4729
4730 nxge_status_t
nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)4731 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
4732 {
4733 nxge_grp_set_t *set = &nxgep->rx_set;
4734 nxge_status_t status = NXGE_OK;
4735 p_rx_rcr_ring_t rcrp;
4736 int rdc;
4737
4738 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
4739 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4740 "Recovering from RxPort error..."));
4741 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n"));
4742
4743 if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
4744 goto fail;
4745
4746 NXGE_DELAY(1000);
4747
4748 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels..."));
4749
4750 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
4751 if ((1 << rdc) & set->owned.map) {
4752 rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc];
4753 if (rcrp != NULL) {
4754 MUTEX_ENTER(&rcrp->lock);
4755 if (nxge_rxdma_fatal_err_recover(nxgep,
4756 rdc) != NXGE_OK) {
4757 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4758 "Could not recover "
4759 "channel %d", rdc));
4760 }
4761 MUTEX_EXIT(&rcrp->lock);
4762 }
4763 }
4764 }
4765
4766 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP..."));
4767
4768 /* Reset IPP */
4769 if (nxge_ipp_reset(nxgep) != NXGE_OK) {
4770 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4771 "nxge_rx_port_fatal_err_recover: "
4772 "Failed to reset IPP"));
4773 goto fail;
4774 }
4775
4776 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
4777
4778 /* Reset RxMAC */
4779 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
4780 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4781 "nxge_rx_port_fatal_err_recover: "
4782 "Failed to reset RxMAC"));
4783 goto fail;
4784 }
4785
4786 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
4787
4788 /* Re-Initialize IPP */
4789 if (nxge_ipp_init(nxgep) != NXGE_OK) {
4790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4791 "nxge_rx_port_fatal_err_recover: "
4792 "Failed to init IPP"));
4793 goto fail;
4794 }
4795
4796 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
4797
4798 /* Re-Initialize RxMAC */
4799 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
4800 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4801 "nxge_rx_port_fatal_err_recover: "
4802 "Failed to reset RxMAC"));
4803 goto fail;
4804 }
4805
4806 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
4807
4808 /* Re-enable RxMAC */
4809 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
4810 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4811 "nxge_rx_port_fatal_err_recover: "
4812 "Failed to enable RxMAC"));
4813 goto fail;
4814 }
4815
4816 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4817 "Recovery Successful, RxPort Restored"));
4818
4819 return (NXGE_OK);
4820 fail:
4821 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4822 return (status);
4823 }
4824
4825 void
nxge_rxdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)4826 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
4827 {
4828 rx_dma_ctl_stat_t cs;
4829 rx_ctl_dat_fifo_stat_t cdfs;
4830
4831 switch (err_id) {
4832 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
4833 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
4834 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
4835 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
4836 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
4837 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
4838 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
4839 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
4840 case NXGE_FM_EREPORT_RDMC_RCRINCON:
4841 case NXGE_FM_EREPORT_RDMC_RCRFULL:
4842 case NXGE_FM_EREPORT_RDMC_RBRFULL:
4843 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
4844 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
4845 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
4846 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4847 chan, &cs.value);
4848 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
4849 cs.bits.hdw.rcr_ack_err = 1;
4850 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
4851 cs.bits.hdw.dc_fifo_err = 1;
4852 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
4853 cs.bits.hdw.rcr_sha_par = 1;
4854 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
4855 cs.bits.hdw.rbr_pre_par = 1;
4856 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
4857 cs.bits.hdw.rbr_tmout = 1;
4858 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
4859 cs.bits.hdw.rsp_cnt_err = 1;
4860 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
4861 cs.bits.hdw.byte_en_bus = 1;
4862 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
4863 cs.bits.hdw.rsp_dat_err = 1;
4864 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
4865 cs.bits.hdw.config_err = 1;
4866 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
4867 cs.bits.hdw.rcrincon = 1;
4868 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
4869 cs.bits.hdw.rcrfull = 1;
4870 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
4871 cs.bits.hdw.rbrfull = 1;
4872 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
4873 cs.bits.hdw.rbrlogpage = 1;
4874 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
4875 cs.bits.hdw.cfiglogpage = 1;
4876 #if defined(__i386)
4877 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n",
4878 cs.value);
4879 #else
4880 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
4881 cs.value);
4882 #endif
4883 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4884 chan, cs.value);
4885 break;
4886 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
4887 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
4888 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
4889 cdfs.value = 0;
4890 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
4891 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
4892 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
4893 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
4894 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
4895 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
4896 #if defined(__i386)
4897 cmn_err(CE_NOTE,
4898 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4899 cdfs.value);
4900 #else
4901 cmn_err(CE_NOTE,
4902 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4903 cdfs.value);
4904 #endif
4905 NXGE_REG_WR64(nxgep->npi_handle,
4906 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value);
4907 break;
4908 case NXGE_FM_EREPORT_RDMC_DCF_ERR:
4909 break;
4910 case NXGE_FM_EREPORT_RDMC_RCR_ERR:
4911 break;
4912 }
4913 }
4914
4915 static void
nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)4916 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)
4917 {
4918 rxring_info_t *ring_info;
4919 int index;
4920 uint32_t chunk_size;
4921 uint64_t kaddr;
4922 uint_t num_blocks;
4923
4924 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free"));
4925
4926 if (rbr_p == NULL) {
4927 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4928 "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
4929 return;
4930 }
4931
4932 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) {
4933 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4934 "<== nxge_rxdma_databuf_free: DDI"));
4935 return;
4936 }
4937
4938 ring_info = rbr_p->ring_info;
4939 if (ring_info == NULL) {
4940 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4941 "==> nxge_rxdma_databuf_free: NULL ring info"));
4942 return;
4943 }
4944 num_blocks = rbr_p->num_blocks;
4945 for (index = 0; index < num_blocks; index++) {
4946 kaddr = ring_info->buffer[index].kaddr;
4947 chunk_size = ring_info->buffer[index].buf_size;
4948 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4949 "==> nxge_rxdma_databuf_free: free chunk %d "
4950 "kaddrp $%p chunk size %d",
4951 index, kaddr, chunk_size));
4952 if (kaddr == NULL) continue;
4953 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size);
4954 ring_info->buffer[index].kaddr = NULL;
4955 }
4956
4957 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free"));
4958 }
4959
4960 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4961 extern void contig_mem_free(void *, size_t);
4962 #endif
4963
4964 void
nxge_free_buf(buf_alloc_type_t alloc_type,uint64_t kaddr,uint32_t buf_size)4965 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size)
4966 {
4967 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf"));
4968
4969 if (kaddr == NULL || !buf_size) {
4970 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4971 "==> nxge_free_buf: invalid kaddr $%p size to free %d",
4972 kaddr, buf_size));
4973 return;
4974 }
4975
4976 switch (alloc_type) {
4977 case KMEM_ALLOC:
4978 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4979 "==> nxge_free_buf: freeing kmem $%p size %d",
4980 kaddr, buf_size));
4981 #if defined(__i386)
4982 KMEM_FREE((void *)(uint32_t)kaddr, buf_size);
4983 #else
4984 KMEM_FREE((void *)kaddr, buf_size);
4985 #endif
4986 break;
4987
4988 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4989 case CONTIG_MEM_ALLOC:
4990 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4991 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
4992 kaddr, buf_size));
4993 contig_mem_free((void *)kaddr, buf_size);
4994 break;
4995 #endif
4996
4997 default:
4998 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4999 "<== nxge_free_buf: unsupported alloc type %d",
5000 alloc_type));
5001 return;
5002 }
5003
5004 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf"));
5005 }
5006