1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_rxdma.h>
29 #include <sys/nxge/nxge_hio.h>
30
31 #if !defined(_BIG_ENDIAN)
32 #include <npi_rx_rd32.h>
33 #endif
34 #include <npi_rx_rd64.h>
35 #include <npi_rx_wr64.h>
36
37 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
38 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
39 #define NXGE_ACTUAL_RDC(nxgep, rdc) \
40 (rdc + nxgep->pt_config.hw_config.start_rdc)
41
42 /*
43 * Globals: tunable parameters (/etc/system or adb)
44 *
45 */
46 extern uint32_t nxge_rbr_size;
47 extern uint32_t nxge_rcr_size;
48 extern uint32_t nxge_rbr_spare_size;
49 extern uint16_t nxge_rdc_buf_offset;
50
51 extern uint32_t nxge_mblks_pending;
52
53 /*
54 * Tunable to reduce the amount of time spent in the
55 * ISR doing Rx Processing.
56 */
57 extern uint32_t nxge_max_rx_pkts;
58
59 /*
60 * Tunables to manage the receive buffer blocks.
61 *
62 * nxge_rx_threshold_hi: copy all buffers.
63 * nxge_rx_bcopy_size_type: receive buffer block size type.
64 * nxge_rx_threshold_lo: copy only up to tunable block size type.
65 */
66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
67 extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
68 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
69
70 extern uint32_t nxge_cksum_offload;
71
72 static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
73 static void nxge_unmap_rxdma(p_nxge_t, int);
74
75 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
76
77 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
78 static void nxge_rxdma_hw_stop(p_nxge_t, int);
79
80 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
81 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
82 uint32_t,
83 p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
84 p_rx_mbox_t *);
85 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
86 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
87
88 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
89 uint16_t,
90 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
91 p_rx_rcr_ring_t *, p_rx_mbox_t *);
92 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
93 p_rx_rcr_ring_t, p_rx_mbox_t);
94
95 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
96 uint16_t,
97 p_nxge_dma_common_t *,
98 p_rx_rbr_ring_t *, uint32_t);
99 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
100 p_rx_rbr_ring_t);
101
102 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
103 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
104 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
105
106 static mblk_t *
107 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
108
109 static void nxge_receive_packet(p_nxge_t,
110 p_rx_rcr_ring_t,
111 p_rcr_entry_t,
112 boolean_t *,
113 mblk_t **, mblk_t **);
114
115 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
116
117 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
118 static void nxge_freeb(p_rx_msg_t);
119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
120
121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
122 uint32_t, uint32_t);
123
124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
125 p_rx_rbr_ring_t);
126
127
128 static nxge_status_t
129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
130
131 nxge_status_t
132 nxge_rx_port_fatal_err_recover(p_nxge_t);
133
134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
135
136 nxge_status_t
nxge_init_rxdma_channels(p_nxge_t nxgep)137 nxge_init_rxdma_channels(p_nxge_t nxgep)
138 {
139 nxge_grp_set_t *set = &nxgep->rx_set;
140 int i, count, channel;
141 nxge_grp_t *group;
142 dc_map_t map;
143 int dev_gindex;
144
145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
146
147 if (!isLDOMguest(nxgep)) {
148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
149 cmn_err(CE_NOTE, "hw_start_common");
150 return (NXGE_ERROR);
151 }
152 }
153
154 /*
155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
156 * We only have 8 hardware RDC tables, but we may have
157 * up to 16 logical (software-defined) groups of RDCS,
158 * if we make use of layer 3 & 4 hardware classification.
159 */
160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
161 if ((1 << i) & set->lg.map) {
162 group = set->group[i];
163 dev_gindex =
164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
165 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
167 if ((1 << channel) & map) {
168 if ((nxge_grp_dc_add(nxgep,
169 group, VP_BOUND_RX, channel)))
170 goto init_rxdma_channels_exit;
171 }
172 }
173 }
174 if (++count == set->lg.count)
175 break;
176 }
177
178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
179 return (NXGE_OK);
180
181 init_rxdma_channels_exit:
182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
183 if ((1 << i) & set->lg.map) {
184 group = set->group[i];
185 dev_gindex =
186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
187 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
189 if ((1 << channel) & map) {
190 nxge_grp_dc_remove(nxgep,
191 VP_BOUND_RX, channel);
192 }
193 }
194 }
195 if (++count == set->lg.count)
196 break;
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
200 return (NXGE_ERROR);
201 }
202
203 nxge_status_t
nxge_init_rxdma_channel(p_nxge_t nxge,int channel)204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
205 {
206 nxge_status_t status;
207
208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
209
210 status = nxge_map_rxdma(nxge, channel);
211 if (status != NXGE_OK) {
212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
213 "<== nxge_init_rxdma: status 0x%x", status));
214 return (status);
215 }
216
217 #if defined(sun4v)
218 if (isLDOMguest(nxge)) {
219 /* set rcr_ring */
220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel];
221
222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel);
223 if (status != NXGE_OK) {
224 nxge_unmap_rxdma(nxge, channel);
225 return (status);
226 }
227 }
228 #endif
229
230 status = nxge_rxdma_hw_start(nxge, channel);
231 if (status != NXGE_OK) {
232 nxge_unmap_rxdma(nxge, channel);
233 }
234
235 if (!nxge->statsp->rdc_ksp[channel])
236 nxge_setup_rdc_kstats(nxge, channel);
237
238 NXGE_DEBUG_MSG((nxge, MEM2_CTL,
239 "<== nxge_init_rxdma_channel: status 0x%x", status));
240
241 return (status);
242 }
243
244 void
nxge_uninit_rxdma_channels(p_nxge_t nxgep)245 nxge_uninit_rxdma_channels(p_nxge_t nxgep)
246 {
247 nxge_grp_set_t *set = &nxgep->rx_set;
248 int rdc;
249
250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
251
252 if (set->owned.map == 0) {
253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
254 "nxge_uninit_rxdma_channels: no channels"));
255 return;
256 }
257
258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
259 if ((1 << rdc) & set->owned.map) {
260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
261 }
262 }
263
264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
265 }
266
267 void
nxge_uninit_rxdma_channel(p_nxge_t nxgep,int channel)268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
269 {
270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
271
272 if (nxgep->statsp->rdc_ksp[channel]) {
273 kstat_delete(nxgep->statsp->rdc_ksp[channel]);
274 nxgep->statsp->rdc_ksp[channel] = 0;
275 }
276
277 nxge_rxdma_hw_stop(nxgep, channel);
278 nxge_unmap_rxdma(nxgep, channel);
279
280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
281 }
282
283 nxge_status_t
nxge_reset_rxdma_channel(p_nxge_t nxgep,uint16_t channel)284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
285 {
286 npi_handle_t handle;
287 npi_status_t rs = NPI_SUCCESS;
288 nxge_status_t status = NXGE_OK;
289
290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel"));
291
292 handle = NXGE_DEV_NPI_HANDLE(nxgep);
293 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
294
295 if (rs != NPI_SUCCESS) {
296 status = NXGE_ERROR | rs;
297 }
298
299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
300
301 return (status);
302 }
303
304 void
nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
306 {
307 nxge_grp_set_t *set = &nxgep->rx_set;
308 int rdc;
309
310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
311
312 if (!isLDOMguest(nxgep)) {
313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
314 (void) npi_rxdma_dump_fzc_regs(handle);
315 }
316
317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
318 NXGE_DEBUG_MSG((nxgep, TX_CTL,
319 "nxge_rxdma_regs_dump_channels: "
320 "NULL ring pointer(s)"));
321 return;
322 }
323
324 if (set->owned.map == 0) {
325 NXGE_DEBUG_MSG((nxgep, RX_CTL,
326 "nxge_rxdma_regs_dump_channels: no channels"));
327 return;
328 }
329
330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
331 if ((1 << rdc) & set->owned.map) {
332 rx_rbr_ring_t *ring =
333 nxgep->rx_rbr_rings->rbr_rings[rdc];
334 if (ring) {
335 (void) nxge_dump_rxdma_channel(nxgep, rdc);
336 }
337 }
338 }
339
340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
341 }
342
343 nxge_status_t
nxge_dump_rxdma_channel(p_nxge_t nxgep,uint8_t channel)344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
345 {
346 npi_handle_t handle;
347 npi_status_t rs = NPI_SUCCESS;
348 nxge_status_t status = NXGE_OK;
349
350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
351
352 handle = NXGE_DEV_NPI_HANDLE(nxgep);
353 rs = npi_rxdma_dump_rdc_regs(handle, channel);
354
355 if (rs != NPI_SUCCESS) {
356 status = NXGE_ERROR | rs;
357 }
358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
359 return (status);
360 }
361
362 nxge_status_t
nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ent_msk_t mask_p)363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
364 p_rx_dma_ent_msk_t mask_p)
365 {
366 npi_handle_t handle;
367 npi_status_t rs = NPI_SUCCESS;
368 nxge_status_t status = NXGE_OK;
369
370 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
371 "<== nxge_init_rxdma_channel_event_mask"));
372
373 handle = NXGE_DEV_NPI_HANDLE(nxgep);
374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
375 if (rs != NPI_SUCCESS) {
376 status = NXGE_ERROR | rs;
377 }
378
379 return (status);
380 }
381
382 nxge_status_t
nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ctl_stat_t cs_p)383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
384 p_rx_dma_ctl_stat_t cs_p)
385 {
386 npi_handle_t handle;
387 npi_status_t rs = NPI_SUCCESS;
388 nxge_status_t status = NXGE_OK;
389
390 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
391 "<== nxge_init_rxdma_channel_cntl_stat"));
392
393 handle = NXGE_DEV_NPI_HANDLE(nxgep);
394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
395
396 if (rs != NPI_SUCCESS) {
397 status = NXGE_ERROR | rs;
398 }
399
400 return (status);
401 }
402
403 /*
404 * nxge_rxdma_cfg_rdcgrp_default_rdc
405 *
406 * Set the default RDC for an RDC Group (Table)
407 *
408 * Arguments:
409 * nxgep
410 * rdcgrp The group to modify
411 * rdc The new default RDC.
412 *
413 * Notes:
414 *
415 * NPI/NXGE function calls:
416 * npi_rxdma_cfg_rdc_table_default_rdc()
417 *
418 * Registers accessed:
419 * RDC_TBL_REG: FZC_ZCP + 0x10000
420 *
421 * Context:
422 * Service domain
423 */
424 nxge_status_t
nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep,uint8_t rdcgrp,uint8_t rdc)425 nxge_rxdma_cfg_rdcgrp_default_rdc(
426 p_nxge_t nxgep,
427 uint8_t rdcgrp,
428 uint8_t rdc)
429 {
430 npi_handle_t handle;
431 npi_status_t rs = NPI_SUCCESS;
432 p_nxge_dma_pt_cfg_t p_dma_cfgp;
433 p_nxge_rdc_grp_t rdc_grp_p;
434 uint8_t actual_rdcgrp, actual_rdc;
435
436 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
439
440 handle = NXGE_DEV_NPI_HANDLE(nxgep);
441
442 /*
443 * This has to be rewritten. Do we even allow this anymore?
444 */
445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
446 RDC_MAP_IN(rdc_grp_p->map, rdc);
447 rdc_grp_p->def_rdc = rdc;
448
449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
451
452 rs = npi_rxdma_cfg_rdc_table_default_rdc(
453 handle, actual_rdcgrp, actual_rdc);
454
455 if (rs != NPI_SUCCESS) {
456 return (NXGE_ERROR | rs);
457 }
458 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
460 return (NXGE_OK);
461 }
462
463 nxge_status_t
nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep,uint8_t port,uint8_t rdc)464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
465 {
466 npi_handle_t handle;
467
468 uint8_t actual_rdc;
469 npi_status_t rs = NPI_SUCCESS;
470
471 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
472 " ==> nxge_rxdma_cfg_port_default_rdc"));
473
474 handle = NXGE_DEV_NPI_HANDLE(nxgep);
475 actual_rdc = rdc; /* XXX Hack! */
476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
477
478
479 if (rs != NPI_SUCCESS) {
480 return (NXGE_ERROR | rs);
481 }
482 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
483 " <== nxge_rxdma_cfg_port_default_rdc"));
484
485 return (NXGE_OK);
486 }
487
488 nxge_status_t
nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep,uint8_t channel,uint16_t pkts)489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
490 uint16_t pkts)
491 {
492 npi_status_t rs = NPI_SUCCESS;
493 npi_handle_t handle;
494 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
495 " ==> nxge_rxdma_cfg_rcr_threshold"));
496 handle = NXGE_DEV_NPI_HANDLE(nxgep);
497
498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
499
500 if (rs != NPI_SUCCESS) {
501 return (NXGE_ERROR | rs);
502 }
503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
504 return (NXGE_OK);
505 }
506
507 nxge_status_t
nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep,uint8_t channel,uint16_t tout,uint8_t enable)508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
509 uint16_t tout, uint8_t enable)
510 {
511 npi_status_t rs = NPI_SUCCESS;
512 npi_handle_t handle;
513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
514 handle = NXGE_DEV_NPI_HANDLE(nxgep);
515 if (enable == 0) {
516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
517 } else {
518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
519 tout);
520 }
521
522 if (rs != NPI_SUCCESS) {
523 return (NXGE_ERROR | rs);
524 }
525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
526 return (NXGE_OK);
527 }
528
529 nxge_status_t
nxge_enable_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
532 {
533 npi_handle_t handle;
534 rdc_desc_cfg_t rdc_desc;
535 p_rcrcfig_b_t cfgb_p;
536 npi_status_t rs = NPI_SUCCESS;
537
538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
539 handle = NXGE_DEV_NPI_HANDLE(nxgep);
540 /*
541 * Use configuration data composed at init time.
542 * Write to hardware the receive ring configurations.
543 */
544 rdc_desc.mbox_enable = 1;
545 rdc_desc.mbox_addr = mbox_p->mbox_addr;
546 NXGE_DEBUG_MSG((nxgep, RX_CTL,
547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
548 mbox_p->mbox_addr, rdc_desc.mbox_addr));
549
550 rdc_desc.rbr_len = rbr_p->rbb_max;
551 rdc_desc.rbr_addr = rbr_p->rbr_addr;
552
553 switch (nxgep->rx_bksize_code) {
554 case RBR_BKSIZE_4K:
555 rdc_desc.page_size = SIZE_4KB;
556 break;
557 case RBR_BKSIZE_8K:
558 rdc_desc.page_size = SIZE_8KB;
559 break;
560 case RBR_BKSIZE_16K:
561 rdc_desc.page_size = SIZE_16KB;
562 break;
563 case RBR_BKSIZE_32K:
564 rdc_desc.page_size = SIZE_32KB;
565 break;
566 }
567
568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
569 rdc_desc.valid0 = 1;
570
571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
572 rdc_desc.valid1 = 1;
573
574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
575 rdc_desc.valid2 = 1;
576
577 rdc_desc.full_hdr = rcr_p->full_hdr_flag;
578 rdc_desc.offset = rcr_p->sw_priv_hdr_len;
579
580 rdc_desc.rcr_len = rcr_p->comp_size;
581 rdc_desc.rcr_addr = rcr_p->rcr_addr;
582
583 cfgb_p = &(rcr_p->rcr_cfgb);
584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
585 /* For now, disable this timeout in a guest domain. */
586 if (isLDOMguest(nxgep)) {
587 rdc_desc.rcr_timeout = 0;
588 rdc_desc.rcr_timeout_enable = 0;
589 } else {
590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
592 }
593
594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
595 "rbr_len qlen %d pagesize code %d rcr_len %d",
596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
598 "size 0 %d size 1 %d size 2 %d",
599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
600 rbr_p->npi_pkt_buf_size2));
601
602 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF)
603 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
604 &rdc_desc, B_TRUE);
605 else
606 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
607 &rdc_desc, B_FALSE);
608 if (rs != NPI_SUCCESS) {
609 return (NXGE_ERROR | rs);
610 }
611
612 /*
613 * Enable the timeout and threshold.
614 */
615 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
616 rdc_desc.rcr_threshold);
617 if (rs != NPI_SUCCESS) {
618 return (NXGE_ERROR | rs);
619 }
620
621 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
622 rdc_desc.rcr_timeout);
623 if (rs != NPI_SUCCESS) {
624 return (NXGE_ERROR | rs);
625 }
626
627 if (!isLDOMguest(nxgep)) {
628 /* Enable the DMA */
629 rs = npi_rxdma_cfg_rdc_enable(handle, channel);
630 if (rs != NPI_SUCCESS) {
631 return (NXGE_ERROR | rs);
632 }
633 }
634
635 /* Kick the DMA engine. */
636 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
637
638 if (!isLDOMguest(nxgep)) {
639 /* Clear the rbr empty bit */
640 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
641 }
642
643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
644
645 return (NXGE_OK);
646 }
647
648 nxge_status_t
nxge_disable_rxdma_channel(p_nxge_t nxgep,uint16_t channel)649 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
650 {
651 npi_handle_t handle;
652 npi_status_t rs = NPI_SUCCESS;
653
654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
655 handle = NXGE_DEV_NPI_HANDLE(nxgep);
656
657 /* disable the DMA */
658 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
659 if (rs != NPI_SUCCESS) {
660 NXGE_DEBUG_MSG((nxgep, RX_CTL,
661 "<== nxge_disable_rxdma_channel:failed (0x%x)",
662 rs));
663 return (NXGE_ERROR | rs);
664 }
665
666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
667 return (NXGE_OK);
668 }
669
670 nxge_status_t
nxge_rxdma_channel_rcrflush(p_nxge_t nxgep,uint8_t channel)671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
672 {
673 npi_handle_t handle;
674 nxge_status_t status = NXGE_OK;
675
676 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
677 "<== nxge_init_rxdma_channel_rcrflush"));
678
679 handle = NXGE_DEV_NPI_HANDLE(nxgep);
680 npi_rxdma_rdc_rcr_flush(handle, channel);
681
682 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
683 "<== nxge_init_rxdma_channel_rcrflsh"));
684 return (status);
685
686 }
687
688 #define MID_INDEX(l, r) ((r + l + 1) >> 1)
689
690 #define TO_LEFT -1
691 #define TO_RIGHT 1
692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
693 #define BOTH_LEFT (TO_LEFT + TO_LEFT)
694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT)
695 #define NO_HINT 0xffffffff
696
697 /*ARGSUSED*/
698 nxge_status_t
nxge_rxbuf_pp_to_vp(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p,uint8_t pktbufsz_type,uint64_t * pkt_buf_addr_pp,uint64_t ** pkt_buf_addr_p,uint32_t * bufoffset,uint32_t * msg_index)699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
700 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
701 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
702 {
703 int bufsize;
704 uint64_t pktbuf_pp;
705 uint64_t dvma_addr;
706 rxring_info_t *ring_info;
707 int base_side, end_side;
708 int r_index, l_index, anchor_index;
709 int found, search_done;
710 uint32_t offset, chunk_size, block_size, page_size_mask;
711 uint32_t chunk_index, block_index, total_index;
712 int max_iterations, iteration;
713 rxbuf_index_info_t *bufinfo;
714
715 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
716
717 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
719 pkt_buf_addr_pp,
720 pktbufsz_type));
721 pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
722
723 switch (pktbufsz_type) {
724 case 0:
725 bufsize = rbr_p->pkt_buf_size0;
726 break;
727 case 1:
728 bufsize = rbr_p->pkt_buf_size1;
729 break;
730 case 2:
731 bufsize = rbr_p->pkt_buf_size2;
732 break;
733 case RCR_SINGLE_BLOCK:
734 bufsize = 0;
735 anchor_index = 0;
736 break;
737 default:
738 return (NXGE_ERROR);
739 }
740
741 if (rbr_p->num_blocks == 1) {
742 anchor_index = 0;
743 ring_info = rbr_p->ring_info;
744 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
745 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
746 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
747 "buf_pp $%p btype %d anchor_index %d "
748 "bufinfo $%p",
749 pkt_buf_addr_pp,
750 pktbufsz_type,
751 anchor_index,
752 bufinfo));
753
754 goto found_index;
755 }
756
757 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
758 "==> nxge_rxbuf_pp_to_vp: "
759 "buf_pp $%p btype %d anchor_index %d",
760 pkt_buf_addr_pp,
761 pktbufsz_type,
762 anchor_index));
763
764 ring_info = rbr_p->ring_info;
765 found = B_FALSE;
766 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
767 iteration = 0;
768 max_iterations = ring_info->max_iterations;
769 /*
770 * First check if this block has been seen
771 * recently. This is indicated by a hint which
772 * is initialized when the first buffer of the block
773 * is seen. The hint is reset when the last buffer of
774 * the block has been processed.
775 * As three block sizes are supported, three hints
776 * are kept. The idea behind the hints is that once
777 * the hardware uses a block for a buffer of that
778 * size, it will use it exclusively for that size
779 * and will use it until it is exhausted. It is assumed
780 * that there would a single block being used for the same
781 * buffer sizes at any given time.
782 */
783 if (ring_info->hint[pktbufsz_type] != NO_HINT) {
784 anchor_index = ring_info->hint[pktbufsz_type];
785 dvma_addr = bufinfo[anchor_index].dvma_addr;
786 chunk_size = bufinfo[anchor_index].buf_size;
787 if ((pktbuf_pp >= dvma_addr) &&
788 (pktbuf_pp < (dvma_addr + chunk_size))) {
789 found = B_TRUE;
790 /*
791 * check if this is the last buffer in the block
792 * If so, then reset the hint for the size;
793 */
794
795 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
796 ring_info->hint[pktbufsz_type] = NO_HINT;
797 }
798 }
799
800 if (found == B_FALSE) {
801 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
802 "==> nxge_rxbuf_pp_to_vp: (!found)"
803 "buf_pp $%p btype %d anchor_index %d",
804 pkt_buf_addr_pp,
805 pktbufsz_type,
806 anchor_index));
807
808 /*
809 * This is the first buffer of the block of this
810 * size. Need to search the whole information
811 * array.
812 * the search algorithm uses a binary tree search
813 * algorithm. It assumes that the information is
814 * already sorted with increasing order
815 * info[0] < info[1] < info[2] .... < info[n-1]
816 * where n is the size of the information array
817 */
818 r_index = rbr_p->num_blocks - 1;
819 l_index = 0;
820 search_done = B_FALSE;
821 anchor_index = MID_INDEX(r_index, l_index);
822 while (search_done == B_FALSE) {
823 if ((r_index == l_index) ||
824 (iteration >= max_iterations))
825 search_done = B_TRUE;
826 end_side = TO_RIGHT; /* to the right */
827 base_side = TO_LEFT; /* to the left */
828 /* read the DVMA address information and sort it */
829 dvma_addr = bufinfo[anchor_index].dvma_addr;
830 chunk_size = bufinfo[anchor_index].buf_size;
831 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
832 "==> nxge_rxbuf_pp_to_vp: (searching)"
833 "buf_pp $%p btype %d "
834 "anchor_index %d chunk_size %d dvmaaddr $%p",
835 pkt_buf_addr_pp,
836 pktbufsz_type,
837 anchor_index,
838 chunk_size,
839 dvma_addr));
840
841 if (pktbuf_pp >= dvma_addr)
842 base_side = TO_RIGHT; /* to the right */
843 if (pktbuf_pp < (dvma_addr + chunk_size))
844 end_side = TO_LEFT; /* to the left */
845
846 switch (base_side + end_side) {
847 case IN_MIDDLE:
848 /* found */
849 found = B_TRUE;
850 search_done = B_TRUE;
851 if ((pktbuf_pp + bufsize) <
852 (dvma_addr + chunk_size))
853 ring_info->hint[pktbufsz_type] =
854 bufinfo[anchor_index].buf_index;
855 break;
856 case BOTH_RIGHT:
857 /* not found: go to the right */
858 l_index = anchor_index + 1;
859 anchor_index = MID_INDEX(r_index, l_index);
860 break;
861
862 case BOTH_LEFT:
863 /* not found: go to the left */
864 r_index = anchor_index - 1;
865 anchor_index = MID_INDEX(r_index, l_index);
866 break;
867 default: /* should not come here */
868 return (NXGE_ERROR);
869 }
870 iteration++;
871 }
872
873 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
874 "==> nxge_rxbuf_pp_to_vp: (search done)"
875 "buf_pp $%p btype %d anchor_index %d",
876 pkt_buf_addr_pp,
877 pktbufsz_type,
878 anchor_index));
879 }
880
881 if (found == B_FALSE) {
882 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
883 "==> nxge_rxbuf_pp_to_vp: (search failed)"
884 "buf_pp $%p btype %d anchor_index %d",
885 pkt_buf_addr_pp,
886 pktbufsz_type,
887 anchor_index));
888 return (NXGE_ERROR);
889 }
890
891 found_index:
892 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
893 "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
894 "buf_pp $%p btype %d bufsize %d anchor_index %d",
895 pkt_buf_addr_pp,
896 pktbufsz_type,
897 bufsize,
898 anchor_index));
899
900 /* index of the first block in this chunk */
901 chunk_index = bufinfo[anchor_index].start_index;
902 dvma_addr = bufinfo[anchor_index].dvma_addr;
903 page_size_mask = ring_info->block_size_mask;
904
905 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
906 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
907 "buf_pp $%p btype %d bufsize %d "
908 "anchor_index %d chunk_index %d dvma $%p",
909 pkt_buf_addr_pp,
910 pktbufsz_type,
911 bufsize,
912 anchor_index,
913 chunk_index,
914 dvma_addr));
915
916 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
917 block_size = rbr_p->block_size; /* System block(page) size */
918
919 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
920 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
921 "buf_pp $%p btype %d bufsize %d "
922 "anchor_index %d chunk_index %d dvma $%p "
923 "offset %d block_size %d",
924 pkt_buf_addr_pp,
925 pktbufsz_type,
926 bufsize,
927 anchor_index,
928 chunk_index,
929 dvma_addr,
930 offset,
931 block_size));
932
933 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
934
935 block_index = (offset / block_size); /* index within chunk */
936 total_index = chunk_index + block_index;
937
938
939 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
940 "==> nxge_rxbuf_pp_to_vp: "
941 "total_index %d dvma_addr $%p "
942 "offset %d block_size %d "
943 "block_index %d ",
944 total_index, dvma_addr,
945 offset, block_size,
946 block_index));
947 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
948 (uint64_t)offset);
949
950 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
951 "==> nxge_rxbuf_pp_to_vp: "
952 "total_index %d dvma_addr $%p "
953 "offset %d block_size %d "
954 "block_index %d "
955 "*pkt_buf_addr_p $%p",
956 total_index, dvma_addr,
957 offset, block_size,
958 block_index,
959 *pkt_buf_addr_p));
960
961
962 *msg_index = total_index;
963 *bufoffset = (offset & page_size_mask);
964
965 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
966 "==> nxge_rxbuf_pp_to_vp: get msg index: "
967 "msg_index %d bufoffset_index %d",
968 *msg_index,
969 *bufoffset));
970
971 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
972
973 return (NXGE_OK);
974 }
975
976 /*
977 * used by quick sort (qsort) function
978 * to perform comparison
979 */
980 static int
nxge_sort_compare(const void * p1,const void * p2)981 nxge_sort_compare(const void *p1, const void *p2)
982 {
983
984 rxbuf_index_info_t *a, *b;
985
986 a = (rxbuf_index_info_t *)p1;
987 b = (rxbuf_index_info_t *)p2;
988
989 if (a->dvma_addr > b->dvma_addr)
990 return (1);
991 if (a->dvma_addr < b->dvma_addr)
992 return (-1);
993 return (0);
994 }
995
996
997
998 /*
999 * grabbed this sort implementation from common/syscall/avl.c
1000 *
1001 */
1002 /*
1003 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
1004 * v = Ptr to array/vector of objs
1005 * n = # objs in the array
1006 * s = size of each obj (must be multiples of a word size)
1007 * f = ptr to function to compare two objs
1008 * returns (-1 = less than, 0 = equal, 1 = greater than
1009 */
1010 void
nxge_ksort(caddr_t v,int n,int s,int (* f)())1011 nxge_ksort(caddr_t v, int n, int s, int (*f)())
1012 {
1013 int g, i, j, ii;
1014 unsigned int *p1, *p2;
1015 unsigned int tmp;
1016
1017 /* No work to do */
1018 if (v == NULL || n <= 1)
1019 return;
1020 /* Sanity check on arguments */
1021 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
1022 ASSERT(s > 0);
1023
1024 for (g = n / 2; g > 0; g /= 2) {
1025 for (i = g; i < n; i++) {
1026 for (j = i - g; j >= 0 &&
1027 (*f)(v + j * s, v + (j + g) * s) == 1;
1028 j -= g) {
1029 p1 = (unsigned *)(v + j * s);
1030 p2 = (unsigned *)(v + (j + g) * s);
1031 for (ii = 0; ii < s / 4; ii++) {
1032 tmp = *p1;
1033 *p1++ = *p2;
1034 *p2++ = tmp;
1035 }
1036 }
1037 }
1038 }
1039 }
1040
1041 /*
1042 * Initialize data structures required for rxdma
1043 * buffer dvma->vmem address lookup
1044 */
1045 /*ARGSUSED*/
1046 static nxge_status_t
nxge_rxbuf_index_info_init(p_nxge_t nxgep,p_rx_rbr_ring_t rbrp)1047 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
1048 {
1049
1050 int index;
1051 rxring_info_t *ring_info;
1052 int max_iteration = 0, max_index = 0;
1053
1054 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
1055
1056 ring_info = rbrp->ring_info;
1057 ring_info->hint[0] = NO_HINT;
1058 ring_info->hint[1] = NO_HINT;
1059 ring_info->hint[2] = NO_HINT;
1060 max_index = rbrp->num_blocks;
1061
1062 /* read the DVMA address information and sort it */
1063 /* do init of the information array */
1064
1065
1066 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1067 " nxge_rxbuf_index_info_init Sort ptrs"));
1068
1069 /* sort the array */
1070 nxge_ksort((void *)ring_info->buffer, max_index,
1071 sizeof (rxbuf_index_info_t), nxge_sort_compare);
1072
1073
1074
1075 for (index = 0; index < max_index; index++) {
1076 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1077 " nxge_rxbuf_index_info_init: sorted chunk %d "
1078 " ioaddr $%p kaddr $%p size %x",
1079 index, ring_info->buffer[index].dvma_addr,
1080 ring_info->buffer[index].kaddr,
1081 ring_info->buffer[index].buf_size));
1082 }
1083
1084 max_iteration = 0;
1085 while (max_index >= (1ULL << max_iteration))
1086 max_iteration++;
1087 ring_info->max_iterations = max_iteration + 1;
1088 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1089 " nxge_rxbuf_index_info_init Find max iter %d",
1090 ring_info->max_iterations));
1091
1092 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
1093 return (NXGE_OK);
1094 }
1095
1096 /* ARGSUSED */
1097 void
nxge_dump_rcr_entry(p_nxge_t nxgep,p_rcr_entry_t entry_p)1098 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
1099 {
1100 #ifdef NXGE_DEBUG
1101
1102 uint32_t bptr;
1103 uint64_t pp;
1104
1105 bptr = entry_p->bits.hdw.pkt_buf_addr;
1106
1107 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1108 "\trcr entry $%p "
1109 "\trcr entry 0x%0llx "
1110 "\trcr entry 0x%08x "
1111 "\trcr entry 0x%08x "
1112 "\tvalue 0x%0llx\n"
1113 "\tmulti = %d\n"
1114 "\tpkt_type = 0x%x\n"
1115 "\tzero_copy = %d\n"
1116 "\tnoport = %d\n"
1117 "\tpromis = %d\n"
1118 "\terror = 0x%04x\n"
1119 "\tdcf_err = 0x%01x\n"
1120 "\tl2_len = %d\n"
1121 "\tpktbufsize = %d\n"
1122 "\tpkt_buf_addr = $%p\n"
1123 "\tpkt_buf_addr (<< 6) = $%p\n",
1124 entry_p,
1125 *(int64_t *)entry_p,
1126 *(int32_t *)entry_p,
1127 *(int32_t *)((char *)entry_p + 32),
1128 entry_p->value,
1129 entry_p->bits.hdw.multi,
1130 entry_p->bits.hdw.pkt_type,
1131 entry_p->bits.hdw.zero_copy,
1132 entry_p->bits.hdw.noport,
1133 entry_p->bits.hdw.promis,
1134 entry_p->bits.hdw.error,
1135 entry_p->bits.hdw.dcf_err,
1136 entry_p->bits.hdw.l2_len,
1137 entry_p->bits.hdw.pktbufsz,
1138 bptr,
1139 entry_p->bits.ldw.pkt_buf_addr));
1140
1141 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
1142 RCR_PKT_BUF_ADDR_SHIFT;
1143
1144 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
1145 pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
1146 #endif
1147 }
1148
1149 void
nxge_rxdma_regs_dump(p_nxge_t nxgep,int rdc)1150 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
1151 {
1152 npi_handle_t handle;
1153 rbr_stat_t rbr_stat;
1154 addr44_t hd_addr;
1155 addr44_t tail_addr;
1156 uint16_t qlen;
1157
1158 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1159 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
1160
1161 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1162
1163 /* RBR head */
1164 hd_addr.addr = 0;
1165 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
1166 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1167 (void *)hd_addr.addr);
1168
1169 /* RBR stats */
1170 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
1171 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
1172
1173 /* RCR tail */
1174 tail_addr.addr = 0;
1175 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
1176 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1177 (void *)tail_addr.addr);
1178
1179 /* RCR qlen */
1180 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1181 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1182
1183 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1184 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1185 }
1186
1187 nxge_status_t
nxge_rxdma_hw_mode(p_nxge_t nxgep,boolean_t enable)1188 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1189 {
1190 nxge_grp_set_t *set = &nxgep->rx_set;
1191 nxge_status_t status;
1192 npi_status_t rs;
1193 int rdc;
1194
1195 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1196 "==> nxge_rxdma_hw_mode: mode %d", enable));
1197
1198 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1199 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1200 "<== nxge_rxdma_mode: not initialized"));
1201 return (NXGE_ERROR);
1202 }
1203
1204 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1205 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1206 "<== nxge_tx_port_fatal_err_recover: "
1207 "NULL ring pointer(s)"));
1208 return (NXGE_ERROR);
1209 }
1210
1211 if (set->owned.map == 0) {
1212 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1213 "nxge_rxdma_regs_dump_channels: no channels"));
1214 return (0);
1215 }
1216
1217 rs = 0;
1218 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1219 if ((1 << rdc) & set->owned.map) {
1220 rx_rbr_ring_t *ring =
1221 nxgep->rx_rbr_rings->rbr_rings[rdc];
1222 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
1223 if (ring) {
1224 if (enable) {
1225 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1226 "==> nxge_rxdma_hw_mode: "
1227 "channel %d (enable)", rdc));
1228 rs = npi_rxdma_cfg_rdc_enable
1229 (handle, rdc);
1230 } else {
1231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1232 "==> nxge_rxdma_hw_mode: "
1233 "channel %d disable)", rdc));
1234 rs = npi_rxdma_cfg_rdc_disable
1235 (handle, rdc);
1236 }
1237 }
1238 }
1239 }
1240
1241 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1242
1243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1244 "<== nxge_rxdma_hw_mode: status 0x%x", status));
1245
1246 return (status);
1247 }
1248
1249 void
nxge_rxdma_enable_channel(p_nxge_t nxgep,uint16_t channel)1250 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1251 {
1252 npi_handle_t handle;
1253
1254 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1255 "==> nxge_rxdma_enable_channel: channel %d", channel));
1256
1257 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1258 (void) npi_rxdma_cfg_rdc_enable(handle, channel);
1259
1260 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
1261 }
1262
1263 void
nxge_rxdma_disable_channel(p_nxge_t nxgep,uint16_t channel)1264 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1265 {
1266 npi_handle_t handle;
1267
1268 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1269 "==> nxge_rxdma_disable_channel: channel %d", channel));
1270
1271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1272 (void) npi_rxdma_cfg_rdc_disable(handle, channel);
1273
1274 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
1275 }
1276
1277 void
nxge_hw_start_rx(p_nxge_t nxgep)1278 nxge_hw_start_rx(p_nxge_t nxgep)
1279 {
1280 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
1281
1282 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1283 (void) nxge_rx_mac_enable(nxgep);
1284
1285 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
1286 }
1287
1288 /*ARGSUSED*/
1289 void
nxge_fixup_rxdma_rings(p_nxge_t nxgep)1290 nxge_fixup_rxdma_rings(p_nxge_t nxgep)
1291 {
1292 nxge_grp_set_t *set = &nxgep->rx_set;
1293 int rdc;
1294
1295 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
1296
1297 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1298 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1299 "<== nxge_tx_port_fatal_err_recover: "
1300 "NULL ring pointer(s)"));
1301 return;
1302 }
1303
1304 if (set->owned.map == 0) {
1305 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1306 "nxge_rxdma_regs_dump_channels: no channels"));
1307 return;
1308 }
1309
1310 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1311 if ((1 << rdc) & set->owned.map) {
1312 rx_rbr_ring_t *ring =
1313 nxgep->rx_rbr_rings->rbr_rings[rdc];
1314 if (ring) {
1315 nxge_rxdma_hw_stop(nxgep, rdc);
1316 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1317 "==> nxge_fixup_rxdma_rings: "
1318 "channel %d ring $%px",
1319 rdc, ring));
1320 (void) nxge_rxdma_fix_channel(nxgep, rdc);
1321 }
1322 }
1323 }
1324
1325 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
1326 }
1327
1328 void
nxge_rxdma_fix_channel(p_nxge_t nxgep,uint16_t channel)1329 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1330 {
1331 int ndmas;
1332 p_rx_rbr_rings_t rx_rbr_rings;
1333 p_rx_rbr_ring_t *rbr_rings;
1334 p_rx_rcr_rings_t rx_rcr_rings;
1335 p_rx_rcr_ring_t *rcr_rings;
1336 p_rx_mbox_areas_t rx_mbox_areas_p;
1337 p_rx_mbox_t *rx_mbox_p;
1338 p_nxge_dma_pool_t dma_buf_poolp;
1339 p_nxge_dma_pool_t dma_cntl_poolp;
1340 p_rx_rbr_ring_t rbrp;
1341 p_rx_rcr_ring_t rcrp;
1342 p_rx_mbox_t mboxp;
1343 p_nxge_dma_common_t dmap;
1344 nxge_status_t status = NXGE_OK;
1345
1346 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
1347
1348 (void) nxge_rxdma_stop_channel(nxgep, channel);
1349
1350 dma_buf_poolp = nxgep->rx_buf_pool_p;
1351 dma_cntl_poolp = nxgep->rx_cntl_pool_p;
1352
1353 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1354 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1355 "<== nxge_rxdma_fix_channel: buf not allocated"));
1356 return;
1357 }
1358
1359 ndmas = dma_buf_poolp->ndmas;
1360 if (!ndmas) {
1361 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1362 "<== nxge_rxdma_fix_channel: no dma allocated"));
1363 return;
1364 }
1365
1366 rx_rbr_rings = nxgep->rx_rbr_rings;
1367 rx_rcr_rings = nxgep->rx_rcr_rings;
1368 rbr_rings = rx_rbr_rings->rbr_rings;
1369 rcr_rings = rx_rcr_rings->rcr_rings;
1370 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
1371 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
1372
1373 /* Reinitialize the receive block and completion rings */
1374 rbrp = (p_rx_rbr_ring_t)rbr_rings[channel],
1375 rcrp = (p_rx_rcr_ring_t)rcr_rings[channel],
1376 mboxp = (p_rx_mbox_t)rx_mbox_p[channel];
1377
1378 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
1379 rbrp->rbr_rd_index = 0;
1380 rcrp->comp_rd_index = 0;
1381 rcrp->comp_wt_index = 0;
1382
1383 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
1384 bzero((caddr_t)dmap->kaddrp, dmap->alength);
1385
1386 status = nxge_rxdma_start_channel(nxgep, channel,
1387 rbrp, rcrp, mboxp);
1388 if (status != NXGE_OK) {
1389 goto nxge_rxdma_fix_channel_fail;
1390 }
1391
1392 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1393 "<== nxge_rxdma_fix_channel: success (0x%08x)", status));
1394 return;
1395
1396 nxge_rxdma_fix_channel_fail:
1397 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1398 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
1399 }
1400
1401 p_rx_rbr_ring_t
nxge_rxdma_get_rbr_ring(p_nxge_t nxgep,uint16_t channel)1402 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1403 {
1404 nxge_grp_set_t *set = &nxgep->rx_set;
1405 nxge_channel_t rdc;
1406
1407 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1408 "==> nxge_rxdma_get_rbr_ring: channel %d", channel));
1409
1410 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1411 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1412 "<== nxge_rxdma_get_rbr_ring: "
1413 "NULL ring pointer(s)"));
1414 return (NULL);
1415 }
1416
1417 if (set->owned.map == 0) {
1418 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1419 "<== nxge_rxdma_get_rbr_ring: no channels"));
1420 return (NULL);
1421 }
1422
1423 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1424 if ((1 << rdc) & set->owned.map) {
1425 rx_rbr_ring_t *ring =
1426 nxgep->rx_rbr_rings->rbr_rings[rdc];
1427 if (ring) {
1428 if (channel == ring->rdc) {
1429 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1430 "==> nxge_rxdma_get_rbr_ring: "
1431 "channel %d ring $%p", rdc, ring));
1432 return (ring);
1433 }
1434 }
1435 }
1436 }
1437
1438 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1439 "<== nxge_rxdma_get_rbr_ring: not found"));
1440
1441 return (NULL);
1442 }
1443
1444 p_rx_rcr_ring_t
nxge_rxdma_get_rcr_ring(p_nxge_t nxgep,uint16_t channel)1445 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
1446 {
1447 nxge_grp_set_t *set = &nxgep->rx_set;
1448 nxge_channel_t rdc;
1449
1450 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1451 "==> nxge_rxdma_get_rcr_ring: channel %d", channel));
1452
1453 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
1454 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1455 "<== nxge_rxdma_get_rcr_ring: "
1456 "NULL ring pointer(s)"));
1457 return (NULL);
1458 }
1459
1460 if (set->owned.map == 0) {
1461 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1462 "<== nxge_rxdma_get_rbr_ring: no channels"));
1463 return (NULL);
1464 }
1465
1466 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1467 if ((1 << rdc) & set->owned.map) {
1468 rx_rcr_ring_t *ring =
1469 nxgep->rx_rcr_rings->rcr_rings[rdc];
1470 if (ring) {
1471 if (channel == ring->rdc) {
1472 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1473 "==> nxge_rxdma_get_rcr_ring: "
1474 "channel %d ring $%p", rdc, ring));
1475 return (ring);
1476 }
1477 }
1478 }
1479 }
1480
1481 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1482 "<== nxge_rxdma_get_rcr_ring: not found"));
1483
1484 return (NULL);
1485 }
1486
1487 /*
1488 * Static functions start here.
1489 */
1490 static p_rx_msg_t
nxge_allocb(size_t size,uint32_t pri,p_nxge_dma_common_t dmabuf_p)1491 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
1492 {
1493 p_rx_msg_t nxge_mp = NULL;
1494 p_nxge_dma_common_t dmamsg_p;
1495 uchar_t *buffer;
1496
1497 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
1498 if (nxge_mp == NULL) {
1499 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1500 "Allocation of a rx msg failed."));
1501 goto nxge_allocb_exit;
1502 }
1503
1504 nxge_mp->use_buf_pool = B_FALSE;
1505 if (dmabuf_p) {
1506 nxge_mp->use_buf_pool = B_TRUE;
1507 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
1508 *dmamsg_p = *dmabuf_p;
1509 dmamsg_p->nblocks = 1;
1510 dmamsg_p->block_size = size;
1511 dmamsg_p->alength = size;
1512 buffer = (uchar_t *)dmabuf_p->kaddrp;
1513
1514 dmabuf_p->kaddrp = (void *)
1515 ((char *)dmabuf_p->kaddrp + size);
1516 dmabuf_p->ioaddr_pp = (void *)
1517 ((char *)dmabuf_p->ioaddr_pp + size);
1518 dmabuf_p->alength -= size;
1519 dmabuf_p->offset += size;
1520 dmabuf_p->dma_cookie.dmac_laddress += size;
1521 dmabuf_p->dma_cookie.dmac_size -= size;
1522
1523 } else {
1524 buffer = KMEM_ALLOC(size, KM_NOSLEEP);
1525 if (buffer == NULL) {
1526 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1527 "Allocation of a receive page failed."));
1528 goto nxge_allocb_fail1;
1529 }
1530 }
1531
1532 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
1533 if (nxge_mp->rx_mblk_p == NULL) {
1534 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
1535 goto nxge_allocb_fail2;
1536 }
1537
1538 nxge_mp->buffer = buffer;
1539 nxge_mp->block_size = size;
1540 nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
1541 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
1542 nxge_mp->ref_cnt = 1;
1543 nxge_mp->free = B_TRUE;
1544 nxge_mp->rx_use_bcopy = B_FALSE;
1545
1546 atomic_inc_32(&nxge_mblks_pending);
1547
1548 goto nxge_allocb_exit;
1549
1550 nxge_allocb_fail2:
1551 if (!nxge_mp->use_buf_pool) {
1552 KMEM_FREE(buffer, size);
1553 }
1554
1555 nxge_allocb_fail1:
1556 KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
1557 nxge_mp = NULL;
1558
1559 nxge_allocb_exit:
1560 return (nxge_mp);
1561 }
1562
1563 p_mblk_t
nxge_dupb(p_rx_msg_t nxge_mp,uint_t offset,size_t size)1564 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1565 {
1566 p_mblk_t mp;
1567
1568 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
1569 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
1570 "offset = 0x%08X "
1571 "size = 0x%08X",
1572 nxge_mp, offset, size));
1573
1574 mp = desballoc(&nxge_mp->buffer[offset], size,
1575 0, &nxge_mp->freeb);
1576 if (mp == NULL) {
1577 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1578 goto nxge_dupb_exit;
1579 }
1580 atomic_inc_32(&nxge_mp->ref_cnt);
1581
1582
1583 nxge_dupb_exit:
1584 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1585 nxge_mp));
1586 return (mp);
1587 }
1588
1589 p_mblk_t
nxge_dupb_bcopy(p_rx_msg_t nxge_mp,uint_t offset,size_t size)1590 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1591 {
1592 p_mblk_t mp;
1593 uchar_t *dp;
1594
1595 mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
1596 if (mp == NULL) {
1597 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1598 goto nxge_dupb_bcopy_exit;
1599 }
1600 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
1601 bcopy((void *)&nxge_mp->buffer[offset], dp, size);
1602 mp->b_wptr = dp + size;
1603
1604 nxge_dupb_bcopy_exit:
1605 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1606 nxge_mp));
1607 return (mp);
1608 }
1609
1610 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
1611 p_rx_msg_t rx_msg_p);
1612
1613 void
nxge_post_page(p_nxge_t nxgep,p_rx_rbr_ring_t rx_rbr_p,p_rx_msg_t rx_msg_p)1614 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
1615 {
1616 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
1617
1618 /* Reuse this buffer */
1619 rx_msg_p->free = B_FALSE;
1620 rx_msg_p->cur_usage_cnt = 0;
1621 rx_msg_p->max_usage_cnt = 0;
1622 rx_msg_p->pkt_buf_size = 0;
1623
1624 if (rx_rbr_p->rbr_use_bcopy) {
1625 rx_msg_p->rx_use_bcopy = B_FALSE;
1626 atomic_dec_32(&rx_rbr_p->rbr_consumed);
1627 }
1628
1629 /*
1630 * Get the rbr header pointer and its offset index.
1631 */
1632 MUTEX_ENTER(&rx_rbr_p->post_lock);
1633 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
1634 rx_rbr_p->rbr_wrap_mask);
1635 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1636 MUTEX_EXIT(&rx_rbr_p->post_lock);
1637 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
1638 rx_rbr_p->rdc, 1);
1639
1640 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1641 "<== nxge_post_page (channel %d post_next_index %d)",
1642 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1643
1644 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
1645 }
1646
1647 void
nxge_freeb(p_rx_msg_t rx_msg_p)1648 nxge_freeb(p_rx_msg_t rx_msg_p)
1649 {
1650 size_t size;
1651 uchar_t *buffer = NULL;
1652 int ref_cnt;
1653 boolean_t free_state = B_FALSE;
1654
1655 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1656
1657 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
1658 NXGE_DEBUG_MSG((NULL, MEM2_CTL,
1659 "nxge_freeb:rx_msg_p = $%p (block pending %d)",
1660 rx_msg_p, nxge_mblks_pending));
1661
1662 /*
1663 * First we need to get the free state, then
1664 * atomic decrement the reference count to prevent
1665 * the race condition with the interrupt thread that
1666 * is processing a loaned up buffer block.
1667 */
1668 free_state = rx_msg_p->free;
1669 ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
1670 if (!ref_cnt) {
1671 atomic_dec_32(&nxge_mblks_pending);
1672 buffer = rx_msg_p->buffer;
1673 size = rx_msg_p->block_size;
1674 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
1675 "will free: rx_msg_p = $%p (block pending %d)",
1676 rx_msg_p, nxge_mblks_pending));
1677
1678 if (!rx_msg_p->use_buf_pool) {
1679 KMEM_FREE(buffer, size);
1680 }
1681
1682 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1683
1684 if (ring) {
1685 /*
1686 * Decrement the receive buffer ring's reference
1687 * count, too.
1688 */
1689 atomic_dec_32(&ring->rbr_ref_cnt);
1690
1691 /*
1692 * Free the receive buffer ring, if
1693 * 1. all the receive buffers have been freed
1694 * 2. and we are in the proper state (that is,
1695 * we are not UNMAPPING).
1696 */
1697 if (ring->rbr_ref_cnt == 0 &&
1698 ring->rbr_state == RBR_UNMAPPED) {
1699 /*
1700 * Free receive data buffers,
1701 * buffer index information
1702 * (rxring_info) and
1703 * the message block ring.
1704 */
1705 NXGE_DEBUG_MSG((NULL, RX_CTL,
1706 "nxge_freeb:rx_msg_p = $%p "
1707 "(block pending %d) free buffers",
1708 rx_msg_p, nxge_mblks_pending));
1709 nxge_rxdma_databuf_free(ring);
1710 if (ring->ring_info) {
1711 KMEM_FREE(ring->ring_info,
1712 sizeof (rxring_info_t));
1713 }
1714
1715 if (ring->rx_msg_ring) {
1716 KMEM_FREE(ring->rx_msg_ring,
1717 ring->tnblocks *
1718 sizeof (p_rx_msg_t));
1719 }
1720 KMEM_FREE(ring, sizeof (*ring));
1721 }
1722 }
1723 return;
1724 }
1725
1726 /*
1727 * Repost buffer.
1728 */
1729 if (free_state && (ref_cnt == 1) && ring) {
1730 NXGE_DEBUG_MSG((NULL, RX_CTL,
1731 "nxge_freeb: post page $%p:", rx_msg_p));
1732 if (ring->rbr_state == RBR_POSTING)
1733 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
1734 }
1735
1736 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
1737 }
1738
1739 uint_t
nxge_rx_intr(char * arg1,char * arg2)1740 nxge_rx_intr(char *arg1, char *arg2)
1741 {
1742 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1743 p_nxge_t nxgep = (p_nxge_t)arg2;
1744 p_nxge_ldg_t ldgp;
1745 uint8_t channel;
1746 npi_handle_t handle;
1747 rx_dma_ctl_stat_t cs;
1748 p_rx_rcr_ring_t rcrp;
1749 mblk_t *mp = NULL;
1750
1751 if (ldvp == NULL) {
1752 NXGE_DEBUG_MSG((NULL, INT_CTL,
1753 "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1754 nxgep, ldvp));
1755 return (DDI_INTR_CLAIMED);
1756 }
1757
1758 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1759 nxgep = ldvp->nxgep;
1760 }
1761
1762 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1763 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1764 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1765 "<== nxge_rx_intr: interface not started or intialized"));
1766 return (DDI_INTR_CLAIMED);
1767 }
1768
1769 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1770 "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1771 nxgep, ldvp));
1772
1773 /*
1774 * Get the PIO handle.
1775 */
1776 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1777
1778 /*
1779 * Get the ring to enable us to process packets.
1780 */
1781 rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
1782
1783 /*
1784 * The RCR ring lock must be held when packets
1785 * are being processed and the hardware registers are
1786 * being read or written to prevent race condition
1787 * among the interrupt thread, the polling thread
1788 * (will cause fatal errors such as rcrincon bit set)
1789 * and the setting of the poll_flag.
1790 */
1791 MUTEX_ENTER(&rcrp->lock);
1792
1793 /*
1794 * Get the control and status for this channel.
1795 */
1796 channel = ldvp->channel;
1797 ldgp = ldvp->ldgp;
1798
1799 if (!isLDOMguest(nxgep) && (!rcrp->started)) {
1800 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1801 "<== nxge_rx_intr: channel is not started"));
1802
1803 /*
1804 * We received an interrupt before the ring is started.
1805 */
1806 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1807 &cs.value);
1808 cs.value &= RX_DMA_CTL_STAT_WR1C;
1809 cs.bits.hdw.mex = 1;
1810 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1811 cs.value);
1812
1813 /*
1814 * Rearm this logical group if this is a single device
1815 * group.
1816 */
1817 if (ldgp->nldvs == 1) {
1818 if (isLDOMguest(nxgep)) {
1819 nxge_hio_ldgimgn(nxgep, ldgp);
1820 } else {
1821 ldgimgm_t mgm;
1822
1823 mgm.value = 0;
1824 mgm.bits.ldw.arm = 1;
1825 mgm.bits.ldw.timer = ldgp->ldg_timer;
1826
1827 NXGE_REG_WR64(handle,
1828 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1829 mgm.value);
1830 }
1831 }
1832 MUTEX_EXIT(&rcrp->lock);
1833 return (DDI_INTR_CLAIMED);
1834 }
1835
1836 ASSERT(rcrp->ldgp == ldgp);
1837 ASSERT(rcrp->ldvp == ldvp);
1838
1839 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1840
1841 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1842 "cs 0x%016llx rcrto 0x%x rcrthres %x",
1843 channel,
1844 cs.value,
1845 cs.bits.hdw.rcrto,
1846 cs.bits.hdw.rcrthres));
1847
1848 if (!rcrp->poll_flag) {
1849 mp = nxge_rx_pkts(nxgep, rcrp, cs, -1);
1850 }
1851
1852 /* error events. */
1853 if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1854 (void) nxge_rx_err_evnts(nxgep, channel, cs);
1855 }
1856
1857 /*
1858 * Enable the mailbox update interrupt if we want
1859 * to use mailbox. We probably don't need to use
1860 * mailbox as it only saves us one pio read.
1861 * Also write 1 to rcrthres and rcrto to clear
1862 * these two edge triggered bits.
1863 */
1864 cs.value &= RX_DMA_CTL_STAT_WR1C;
1865 cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1;
1866 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1867 cs.value);
1868
1869 /*
1870 * If the polling mode is enabled, disable the interrupt.
1871 */
1872 if (rcrp->poll_flag) {
1873 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1874 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1875 "(disabling interrupts)", channel, ldgp, ldvp));
1876
1877 /*
1878 * Disarm this logical group if this is a single device
1879 * group.
1880 */
1881 if (ldgp->nldvs == 1) {
1882 if (isLDOMguest(nxgep)) {
1883 ldgp->arm = B_FALSE;
1884 nxge_hio_ldgimgn(nxgep, ldgp);
1885 } else {
1886 ldgimgm_t mgm;
1887 mgm.value = 0;
1888 mgm.bits.ldw.arm = 0;
1889 NXGE_REG_WR64(handle,
1890 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1891 mgm.value);
1892 }
1893 }
1894 } else {
1895 /*
1896 * Rearm this logical group if this is a single device
1897 * group.
1898 */
1899 if (ldgp->nldvs == 1) {
1900 if (isLDOMguest(nxgep)) {
1901 nxge_hio_ldgimgn(nxgep, ldgp);
1902 } else {
1903 ldgimgm_t mgm;
1904
1905 mgm.value = 0;
1906 mgm.bits.ldw.arm = 1;
1907 mgm.bits.ldw.timer = ldgp->ldg_timer;
1908
1909 NXGE_REG_WR64(handle,
1910 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1911 mgm.value);
1912 }
1913 }
1914
1915 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1916 "==> nxge_rx_intr: rdc %d ldgp $%p "
1917 "exiting ISR (and call mac_rx_ring)", channel, ldgp));
1918 }
1919 MUTEX_EXIT(&rcrp->lock);
1920
1921 if (mp != NULL) {
1922 mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp,
1923 rcrp->rcr_gen_num);
1924 }
1925 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1926 return (DDI_INTR_CLAIMED);
1927 }
1928
1929 /*
1930 * This routine is the main packet receive processing function.
1931 * It gets the packet type, error code, and buffer related
1932 * information from the receive completion entry.
1933 * How many completion entries to process is based on the number of packets
1934 * queued by the hardware, a hardware maintained tail pointer
1935 * and a configurable receive packet count.
1936 *
1937 * A chain of message blocks will be created as result of processing
1938 * the completion entries. This chain of message blocks will be returned and
1939 * a hardware control status register will be updated with the number of
1940 * packets were removed from the hardware queue.
1941 *
1942 * The RCR ring lock is held when entering this function.
1943 */
1944 static mblk_t *
nxge_rx_pkts(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,rx_dma_ctl_stat_t cs,int bytes_to_pickup)1945 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1946 int bytes_to_pickup)
1947 {
1948 npi_handle_t handle;
1949 uint8_t channel;
1950 uint32_t comp_rd_index;
1951 p_rcr_entry_t rcr_desc_rd_head_p;
1952 p_rcr_entry_t rcr_desc_rd_head_pp;
1953 p_mblk_t nmp, mp_cont, head_mp, *tail_mp;
1954 uint16_t qlen, nrcr_read, npkt_read;
1955 uint32_t qlen_hw;
1956 boolean_t multi;
1957 rcrcfig_b_t rcr_cfg_b;
1958 int totallen = 0;
1959 #if defined(_BIG_ENDIAN)
1960 npi_status_t rs = NPI_SUCCESS;
1961 #endif
1962
1963 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
1964 "channel %d", rcr_p->rdc));
1965
1966 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1967 return (NULL);
1968 }
1969 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1970 channel = rcr_p->rdc;
1971
1972 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1973 "==> nxge_rx_pkts: START: rcr channel %d "
1974 "head_p $%p head_pp $%p index %d ",
1975 channel, rcr_p->rcr_desc_rd_head_p,
1976 rcr_p->rcr_desc_rd_head_pp,
1977 rcr_p->comp_rd_index));
1978
1979
1980 #if !defined(_BIG_ENDIAN)
1981 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
1982 #else
1983 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1984 if (rs != NPI_SUCCESS) {
1985 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
1986 "channel %d, get qlen failed 0x%08x",
1987 channel, rs));
1988 return (NULL);
1989 }
1990 #endif
1991 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
1992 "qlen %d", channel, qlen));
1993
1994
1995
1996 if (!qlen) {
1997 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1998 "==> nxge_rx_pkts:rcr channel %d "
1999 "qlen %d (no pkts)", channel, qlen));
2000
2001 return (NULL);
2002 }
2003
2004 comp_rd_index = rcr_p->comp_rd_index;
2005
2006 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
2007 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
2008 nrcr_read = npkt_read = 0;
2009
2010 /*
2011 * Number of packets queued
2012 * (The jumbo or multi packet will be counted as only one
2013 * packets and it may take up more than one completion entry).
2014 */
2015 qlen_hw = (qlen < nxge_max_rx_pkts) ?
2016 qlen : nxge_max_rx_pkts;
2017 head_mp = NULL;
2018 tail_mp = &head_mp;
2019 nmp = mp_cont = NULL;
2020 multi = B_FALSE;
2021
2022 while (qlen_hw) {
2023
2024 #ifdef NXGE_DEBUG
2025 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
2026 #endif
2027 /*
2028 * Process one completion ring entry.
2029 */
2030 nxge_receive_packet(nxgep,
2031 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
2032
2033 /*
2034 * message chaining modes
2035 */
2036 if (nmp) {
2037 nmp->b_next = NULL;
2038 if (!multi && !mp_cont) { /* frame fits a partition */
2039 *tail_mp = nmp;
2040 tail_mp = &nmp->b_next;
2041 totallen += MBLKL(nmp);
2042 nmp = NULL;
2043 } else if (multi && !mp_cont) { /* first segment */
2044 *tail_mp = nmp;
2045 tail_mp = &nmp->b_cont;
2046 totallen += MBLKL(nmp);
2047 } else if (multi && mp_cont) { /* mid of multi segs */
2048 *tail_mp = mp_cont;
2049 tail_mp = &mp_cont->b_cont;
2050 totallen += MBLKL(mp_cont);
2051 } else if (!multi && mp_cont) { /* last segment */
2052 *tail_mp = mp_cont;
2053 tail_mp = &nmp->b_next;
2054 totallen += MBLKL(mp_cont);
2055 nmp = NULL;
2056 }
2057 }
2058 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2059 "==> nxge_rx_pkts: loop: rcr channel %d "
2060 "before updating: multi %d "
2061 "nrcr_read %d "
2062 "npk read %d "
2063 "head_pp $%p index %d ",
2064 channel,
2065 multi,
2066 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2067 comp_rd_index));
2068
2069 if (!multi) {
2070 qlen_hw--;
2071 npkt_read++;
2072 }
2073
2074 /*
2075 * Update the next read entry.
2076 */
2077 comp_rd_index = NEXT_ENTRY(comp_rd_index,
2078 rcr_p->comp_wrap_mask);
2079
2080 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
2081 rcr_p->rcr_desc_first_p,
2082 rcr_p->rcr_desc_last_p);
2083
2084 nrcr_read++;
2085
2086 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2087 "<== nxge_rx_pkts: (SAM, process one packet) "
2088 "nrcr_read %d",
2089 nrcr_read));
2090 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2091 "==> nxge_rx_pkts: loop: rcr channel %d "
2092 "multi %d "
2093 "nrcr_read %d "
2094 "npk read %d "
2095 "head_pp $%p index %d ",
2096 channel,
2097 multi,
2098 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2099 comp_rd_index));
2100
2101 if ((bytes_to_pickup != -1) &&
2102 (totallen >= bytes_to_pickup)) {
2103 break;
2104 }
2105 }
2106
2107 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2108 rcr_p->comp_rd_index = comp_rd_index;
2109 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2110 if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2111 (nxgep->intr_threshold != rcr_p->intr_threshold)) {
2112
2113 rcr_p->intr_timeout = (nxgep->intr_timeout <
2114 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
2115 nxgep->intr_timeout;
2116
2117 rcr_p->intr_threshold = (nxgep->intr_threshold <
2118 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
2119 nxgep->intr_threshold;
2120
2121 rcr_cfg_b.value = 0x0ULL;
2122 rcr_cfg_b.bits.ldw.entout = 1;
2123 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
2124 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
2125
2126 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
2127 channel, rcr_cfg_b.value);
2128 }
2129
2130 cs.bits.ldw.pktread = npkt_read;
2131 cs.bits.ldw.ptrread = nrcr_read;
2132 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2133 channel, cs.value);
2134 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2135 "==> nxge_rx_pkts: EXIT: rcr channel %d "
2136 "head_pp $%p index %016llx ",
2137 channel,
2138 rcr_p->rcr_desc_rd_head_pp,
2139 rcr_p->comp_rd_index));
2140 /*
2141 * Update RCR buffer pointer read and number of packets
2142 * read.
2143 */
2144
2145 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
2146 "channel %d", rcr_p->rdc));
2147
2148 return (head_mp);
2149 }
2150
2151 void
nxge_receive_packet(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rcr_entry_t rcr_desc_rd_head_p,boolean_t * multi_p,mblk_t ** mp,mblk_t ** mp_cont)2152 nxge_receive_packet(p_nxge_t nxgep,
2153 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2154 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2155 {
2156 p_mblk_t nmp = NULL;
2157 uint64_t multi;
2158 uint64_t dcf_err;
2159 uint8_t channel;
2160
2161 boolean_t first_entry = B_TRUE;
2162 boolean_t is_tcp_udp = B_FALSE;
2163 boolean_t buffer_free = B_FALSE;
2164 boolean_t error_send_up = B_FALSE;
2165 uint8_t error_type;
2166 uint16_t l2_len;
2167 uint16_t skip_len;
2168 uint8_t pktbufsz_type;
2169 uint64_t rcr_entry;
2170 uint64_t *pkt_buf_addr_pp;
2171 uint64_t *pkt_buf_addr_p;
2172 uint32_t buf_offset;
2173 uint32_t bsize;
2174 uint32_t error_disp_cnt;
2175 uint32_t msg_index;
2176 p_rx_rbr_ring_t rx_rbr_p;
2177 p_rx_msg_t *rx_msg_ring_p;
2178 p_rx_msg_t rx_msg_p;
2179 uint16_t sw_offset_bytes = 0, hdr_size = 0;
2180 nxge_status_t status = NXGE_OK;
2181 boolean_t is_valid = B_FALSE;
2182 p_nxge_rx_ring_stats_t rdc_stats;
2183 uint32_t bytes_read;
2184 uint64_t pkt_type;
2185 uint64_t frag;
2186 boolean_t pkt_too_long_err = B_FALSE;
2187 #ifdef NXGE_DEBUG
2188 int dump_len;
2189 #endif
2190 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
2191 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
2192
2193 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
2194
2195 multi = (rcr_entry & RCR_MULTI_MASK);
2196 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
2197 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
2198
2199 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
2200 frag = (rcr_entry & RCR_FRAG_MASK);
2201
2202 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
2203
2204 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
2205 RCR_PKTBUFSZ_SHIFT);
2206 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
2207 RCR_PKT_BUF_ADDR_SHIFT);
2208
2209 channel = rcr_p->rdc;
2210
2211 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2212 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2213 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2214 "error_type 0x%x pkt_type 0x%x "
2215 "pktbufsz_type %d ",
2216 rcr_desc_rd_head_p,
2217 rcr_entry, pkt_buf_addr_pp, l2_len,
2218 multi,
2219 error_type,
2220 pkt_type,
2221 pktbufsz_type));
2222
2223 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2224 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2225 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2226 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
2227 rcr_entry, pkt_buf_addr_pp, l2_len,
2228 multi,
2229 error_type,
2230 pkt_type));
2231
2232 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2233 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2234 "full pkt_buf_addr_pp $%p l2_len %d",
2235 rcr_entry, pkt_buf_addr_pp, l2_len));
2236
2237 /* get the stats ptr */
2238 rdc_stats = rcr_p->rdc_stats;
2239
2240 if (!l2_len) {
2241
2242 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2243 "<== nxge_receive_packet: failed: l2 length is 0."));
2244 return;
2245 }
2246
2247 /*
2248 * Software workaround for BMAC hardware limitation that allows
2249 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2250 * instead of 0x2400 for jumbo.
2251 */
2252 if (l2_len > nxgep->mac.maxframesize) {
2253 pkt_too_long_err = B_TRUE;
2254 }
2255
2256 /* Hardware sends us 4 bytes of CRC as no stripping is done. */
2257 l2_len -= ETHERFCSL;
2258
2259 /* shift 6 bits to get the full io address */
2260 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
2261 RCR_PKT_BUF_ADDR_SHIFT_FULL);
2262 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2263 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2264 "full pkt_buf_addr_pp $%p l2_len %d",
2265 rcr_entry, pkt_buf_addr_pp, l2_len));
2266
2267 rx_rbr_p = rcr_p->rx_rbr_p;
2268 rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
2269
2270 if (first_entry) {
2271 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
2272 RXDMA_HDR_SIZE_DEFAULT);
2273
2274 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2275 "==> nxge_receive_packet: first entry 0x%016llx "
2276 "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2277 rcr_entry, pkt_buf_addr_pp, l2_len,
2278 hdr_size));
2279 }
2280
2281 MUTEX_ENTER(&rx_rbr_p->lock);
2282
2283 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2284 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2285 "full pkt_buf_addr_pp $%p l2_len %d",
2286 rcr_entry, pkt_buf_addr_pp, l2_len));
2287
2288 /*
2289 * Packet buffer address in the completion entry points
2290 * to the starting buffer address (offset 0).
2291 * Use the starting buffer address to locate the corresponding
2292 * kernel address.
2293 */
2294 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
2295 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
2296 &buf_offset,
2297 &msg_index);
2298
2299 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2300 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2301 "full pkt_buf_addr_pp $%p l2_len %d",
2302 rcr_entry, pkt_buf_addr_pp, l2_len));
2303
2304 if (status != NXGE_OK) {
2305 MUTEX_EXIT(&rx_rbr_p->lock);
2306 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2307 "<== nxge_receive_packet: found vaddr failed %d",
2308 status));
2309 return;
2310 }
2311
2312 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2313 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
2314 "full pkt_buf_addr_pp $%p l2_len %d",
2315 rcr_entry, pkt_buf_addr_pp, l2_len));
2316
2317 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2318 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2319 "full pkt_buf_addr_pp $%p l2_len %d",
2320 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2321
2322 rx_msg_p = rx_msg_ring_p[msg_index];
2323
2324 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2325 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2326 "full pkt_buf_addr_pp $%p l2_len %d",
2327 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2328
2329 switch (pktbufsz_type) {
2330 case RCR_PKTBUFSZ_0:
2331 bsize = rx_rbr_p->pkt_buf_size0_bytes;
2332 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2333 "==> nxge_receive_packet: 0 buf %d", bsize));
2334 break;
2335 case RCR_PKTBUFSZ_1:
2336 bsize = rx_rbr_p->pkt_buf_size1_bytes;
2337 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2338 "==> nxge_receive_packet: 1 buf %d", bsize));
2339 break;
2340 case RCR_PKTBUFSZ_2:
2341 bsize = rx_rbr_p->pkt_buf_size2_bytes;
2342 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2343 "==> nxge_receive_packet: 2 buf %d", bsize));
2344 break;
2345 case RCR_SINGLE_BLOCK:
2346 bsize = rx_msg_p->block_size;
2347 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2348 "==> nxge_receive_packet: single %d", bsize));
2349
2350 break;
2351 default:
2352 MUTEX_EXIT(&rx_rbr_p->lock);
2353 return;
2354 }
2355
2356 switch (nxge_rdc_buf_offset) {
2357 case SW_OFFSET_NO_OFFSET:
2358 sw_offset_bytes = 0;
2359 break;
2360 case SW_OFFSET_64:
2361 sw_offset_bytes = 64;
2362 break;
2363 case SW_OFFSET_128:
2364 sw_offset_bytes = 128;
2365 break;
2366 case SW_OFFSET_192:
2367 sw_offset_bytes = 192;
2368 break;
2369 case SW_OFFSET_256:
2370 sw_offset_bytes = 256;
2371 break;
2372 case SW_OFFSET_320:
2373 sw_offset_bytes = 320;
2374 break;
2375 case SW_OFFSET_384:
2376 sw_offset_bytes = 384;
2377 break;
2378 case SW_OFFSET_448:
2379 sw_offset_bytes = 448;
2380 break;
2381 default:
2382 sw_offset_bytes = 0;
2383 break;
2384 }
2385
2386 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2387 (buf_offset + sw_offset_bytes),
2388 (hdr_size + l2_len),
2389 DDI_DMA_SYNC_FORCPU);
2390
2391 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2392 "==> nxge_receive_packet: after first dump:usage count"));
2393
2394 if (rx_msg_p->cur_usage_cnt == 0) {
2395 if (rx_rbr_p->rbr_use_bcopy) {
2396 atomic_inc_32(&rx_rbr_p->rbr_consumed);
2397 if (rx_rbr_p->rbr_consumed <
2398 rx_rbr_p->rbr_threshold_hi) {
2399 if (rx_rbr_p->rbr_threshold_lo == 0 ||
2400 ((rx_rbr_p->rbr_consumed >=
2401 rx_rbr_p->rbr_threshold_lo) &&
2402 (rx_rbr_p->rbr_bufsize_type >=
2403 pktbufsz_type))) {
2404 rx_msg_p->rx_use_bcopy = B_TRUE;
2405 }
2406 } else {
2407 rx_msg_p->rx_use_bcopy = B_TRUE;
2408 }
2409 }
2410 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2411 "==> nxge_receive_packet: buf %d (new block) ",
2412 bsize));
2413
2414 rx_msg_p->pkt_buf_size_code = pktbufsz_type;
2415 rx_msg_p->pkt_buf_size = bsize;
2416 rx_msg_p->cur_usage_cnt = 1;
2417 if (pktbufsz_type == RCR_SINGLE_BLOCK) {
2418 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2419 "==> nxge_receive_packet: buf %d "
2420 "(single block) ",
2421 bsize));
2422 /*
2423 * Buffer can be reused once the free function
2424 * is called.
2425 */
2426 rx_msg_p->max_usage_cnt = 1;
2427 buffer_free = B_TRUE;
2428 } else {
2429 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
2430 if (rx_msg_p->max_usage_cnt == 1) {
2431 buffer_free = B_TRUE;
2432 }
2433 }
2434 } else {
2435 rx_msg_p->cur_usage_cnt++;
2436 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
2437 buffer_free = B_TRUE;
2438 }
2439 }
2440
2441 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2442 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
2443 msg_index, l2_len,
2444 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
2445
2446 if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
2447 rdc_stats->ierrors++;
2448 if (dcf_err) {
2449 rdc_stats->dcf_err++;
2450 #ifdef NXGE_DEBUG
2451 if (!rdc_stats->dcf_err) {
2452 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2453 "nxge_receive_packet: channel %d dcf_err rcr"
2454 " 0x%llx", channel, rcr_entry));
2455 }
2456 #endif
2457 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0,
2458 NXGE_FM_EREPORT_RDMC_DCF_ERR);
2459 } else if (pkt_too_long_err) {
2460 rdc_stats->pkt_too_long_err++;
2461 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
2462 " channel %d packet length [%d] > "
2463 "maxframesize [%d]", channel, l2_len + ETHERFCSL,
2464 nxgep->mac.maxframesize));
2465 } else {
2466 /* Update error stats */
2467 error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2468 rdc_stats->errlog.compl_err_type = error_type;
2469
2470 switch (error_type) {
2471 /*
2472 * Do not send FMA ereport for RCR_L2_ERROR and
2473 * RCR_L4_CSUM_ERROR because most likely they indicate
2474 * back pressure rather than HW failures.
2475 */
2476 case RCR_L2_ERROR:
2477 rdc_stats->l2_err++;
2478 if (rdc_stats->l2_err <
2479 error_disp_cnt) {
2480 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2481 " nxge_receive_packet:"
2482 " channel %d RCR L2_ERROR",
2483 channel));
2484 }
2485 break;
2486 case RCR_L4_CSUM_ERROR:
2487 error_send_up = B_TRUE;
2488 rdc_stats->l4_cksum_err++;
2489 if (rdc_stats->l4_cksum_err <
2490 error_disp_cnt) {
2491 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2492 " nxge_receive_packet:"
2493 " channel %d"
2494 " RCR L4_CSUM_ERROR", channel));
2495 }
2496 break;
2497 /*
2498 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2499 * RCR_ZCP_SOFT_ERROR because they reflect the same
2500 * FFLP and ZCP errors that have been reported by
2501 * nxge_fflp.c and nxge_zcp.c.
2502 */
2503 case RCR_FFLP_SOFT_ERROR:
2504 error_send_up = B_TRUE;
2505 rdc_stats->fflp_soft_err++;
2506 if (rdc_stats->fflp_soft_err <
2507 error_disp_cnt) {
2508 NXGE_ERROR_MSG((nxgep,
2509 NXGE_ERR_CTL,
2510 " nxge_receive_packet:"
2511 " channel %d"
2512 " RCR FFLP_SOFT_ERROR", channel));
2513 }
2514 break;
2515 case RCR_ZCP_SOFT_ERROR:
2516 error_send_up = B_TRUE;
2517 rdc_stats->fflp_soft_err++;
2518 if (rdc_stats->zcp_soft_err <
2519 error_disp_cnt)
2520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2521 " nxge_receive_packet: Channel %d"
2522 " RCR ZCP_SOFT_ERROR", channel));
2523 break;
2524 default:
2525 rdc_stats->rcr_unknown_err++;
2526 if (rdc_stats->rcr_unknown_err
2527 < error_disp_cnt) {
2528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2529 " nxge_receive_packet: Channel %d"
2530 " RCR entry 0x%llx error 0x%x",
2531 rcr_entry, channel, error_type));
2532 }
2533 break;
2534 }
2535 }
2536
2537 /*
2538 * Update and repost buffer block if max usage
2539 * count is reached.
2540 */
2541 if (error_send_up == B_FALSE) {
2542 atomic_inc_32(&rx_msg_p->ref_cnt);
2543 if (buffer_free == B_TRUE) {
2544 rx_msg_p->free = B_TRUE;
2545 }
2546
2547 MUTEX_EXIT(&rx_rbr_p->lock);
2548 nxge_freeb(rx_msg_p);
2549 return;
2550 }
2551 }
2552
2553 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2554 "==> nxge_receive_packet: DMA sync second "));
2555
2556 bytes_read = rcr_p->rcvd_pkt_bytes;
2557 skip_len = sw_offset_bytes + hdr_size;
2558 if (!rx_msg_p->rx_use_bcopy) {
2559 /*
2560 * For loaned up buffers, the driver reference count
2561 * will be incremented first and then the free state.
2562 */
2563 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
2564 if (first_entry) {
2565 nmp->b_rptr = &nmp->b_rptr[skip_len];
2566 if (l2_len < bsize - skip_len) {
2567 nmp->b_wptr = &nmp->b_rptr[l2_len];
2568 } else {
2569 nmp->b_wptr = &nmp->b_rptr[bsize
2570 - skip_len];
2571 }
2572 } else {
2573 if (l2_len - bytes_read < bsize) {
2574 nmp->b_wptr =
2575 &nmp->b_rptr[l2_len - bytes_read];
2576 } else {
2577 nmp->b_wptr = &nmp->b_rptr[bsize];
2578 }
2579 }
2580 }
2581 } else {
2582 if (first_entry) {
2583 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
2584 l2_len < bsize - skip_len ?
2585 l2_len : bsize - skip_len);
2586 } else {
2587 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
2588 l2_len - bytes_read < bsize ?
2589 l2_len - bytes_read : bsize);
2590 }
2591 }
2592 if (nmp != NULL) {
2593 if (first_entry) {
2594 /*
2595 * Jumbo packets may be received with more than one
2596 * buffer, increment ipackets for the first entry only.
2597 */
2598 rdc_stats->ipackets++;
2599
2600 /* Update ibytes for kstat. */
2601 rdc_stats->ibytes += skip_len
2602 + l2_len < bsize ? l2_len : bsize;
2603 /*
2604 * Update the number of bytes read so far for the
2605 * current frame.
2606 */
2607 bytes_read = nmp->b_wptr - nmp->b_rptr;
2608 } else {
2609 rdc_stats->ibytes += l2_len - bytes_read < bsize ?
2610 l2_len - bytes_read : bsize;
2611 bytes_read += nmp->b_wptr - nmp->b_rptr;
2612 }
2613
2614 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2615 "==> nxge_receive_packet after dupb: "
2616 "rbr consumed %d "
2617 "pktbufsz_type %d "
2618 "nmp $%p rptr $%p wptr $%p "
2619 "buf_offset %d bzise %d l2_len %d skip_len %d",
2620 rx_rbr_p->rbr_consumed,
2621 pktbufsz_type,
2622 nmp, nmp->b_rptr, nmp->b_wptr,
2623 buf_offset, bsize, l2_len, skip_len));
2624 } else {
2625 cmn_err(CE_WARN, "!nxge_receive_packet: "
2626 "update stats (error)");
2627 atomic_inc_32(&rx_msg_p->ref_cnt);
2628 if (buffer_free == B_TRUE) {
2629 rx_msg_p->free = B_TRUE;
2630 }
2631 MUTEX_EXIT(&rx_rbr_p->lock);
2632 nxge_freeb(rx_msg_p);
2633 return;
2634 }
2635
2636 if (buffer_free == B_TRUE) {
2637 rx_msg_p->free = B_TRUE;
2638 }
2639
2640 is_valid = (nmp != NULL);
2641
2642 rcr_p->rcvd_pkt_bytes = bytes_read;
2643
2644 MUTEX_EXIT(&rx_rbr_p->lock);
2645
2646 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2647 atomic_inc_32(&rx_msg_p->ref_cnt);
2648 nxge_freeb(rx_msg_p);
2649 }
2650
2651 if (is_valid) {
2652 nmp->b_cont = NULL;
2653 if (first_entry) {
2654 *mp = nmp;
2655 *mp_cont = NULL;
2656 } else {
2657 *mp_cont = nmp;
2658 }
2659 }
2660
2661 /*
2662 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2663 * If a packet is not fragmented and no error bit is set, then
2664 * L4 checksum is OK.
2665 */
2666
2667 if (is_valid && !multi) {
2668 /*
2669 * If the checksum flag nxge_chksum_offload
2670 * is 1, TCP and UDP packets can be sent
2671 * up with good checksum. If the checksum flag
2672 * is set to 0, checksum reporting will apply to
2673 * TCP packets only (workaround for a hardware bug).
2674 * If the checksum flag nxge_cksum_offload is
2675 * greater than 1, both TCP and UDP packets
2676 * will not be reported its hardware checksum results.
2677 */
2678 if (nxge_cksum_offload == 1) {
2679 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2680 pkt_type == RCR_PKT_IS_UDP) ?
2681 B_TRUE: B_FALSE);
2682 } else if (!nxge_cksum_offload) {
2683 /* TCP checksum only. */
2684 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
2685 B_TRUE: B_FALSE);
2686 }
2687
2688 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
2689 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
2690 is_valid, multi, is_tcp_udp, frag, error_type));
2691
2692 if (is_tcp_udp && !frag && !error_type) {
2693 mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
2694 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2695 "==> nxge_receive_packet: Full tcp/udp cksum "
2696 "is_valid 0x%x multi 0x%llx pkt %d frag %d "
2697 "error %d",
2698 is_valid, multi, is_tcp_udp, frag, error_type));
2699 }
2700 }
2701
2702 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2703 "==> nxge_receive_packet: *mp 0x%016llx", *mp));
2704
2705 *multi_p = (multi == RCR_MULTI_MASK);
2706 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2707 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2708 *multi_p, nmp, *mp, *mp_cont));
2709 }
2710
2711 /*
2712 * Enable polling for a ring. Interrupt for the ring is disabled when
2713 * the nxge interrupt comes (see nxge_rx_intr).
2714 */
2715 int
nxge_enable_poll(void * arg)2716 nxge_enable_poll(void *arg)
2717 {
2718 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2719 p_rx_rcr_ring_t ringp;
2720 p_nxge_t nxgep;
2721 p_nxge_ldg_t ldgp;
2722 uint32_t channel;
2723
2724 if (ring_handle == NULL) {
2725 ASSERT(ring_handle != NULL);
2726 return (0);
2727 }
2728
2729 nxgep = ring_handle->nxgep;
2730 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2731 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2732 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2733 "==> nxge_enable_poll: rdc %d ", ringp->rdc));
2734 ldgp = ringp->ldgp;
2735 if (ldgp == NULL) {
2736 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2737 "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2738 ringp->rdc));
2739 return (0);
2740 }
2741
2742 MUTEX_ENTER(&ringp->lock);
2743 /* enable polling */
2744 if (ringp->poll_flag == 0) {
2745 ringp->poll_flag = 1;
2746 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2747 "==> nxge_enable_poll: rdc %d set poll flag to 1",
2748 ringp->rdc));
2749 }
2750
2751 MUTEX_EXIT(&ringp->lock);
2752 return (0);
2753 }
2754 /*
2755 * Disable polling for a ring and enable its interrupt.
2756 */
2757 int
nxge_disable_poll(void * arg)2758 nxge_disable_poll(void *arg)
2759 {
2760 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2761 p_rx_rcr_ring_t ringp;
2762 p_nxge_t nxgep;
2763 uint32_t channel;
2764
2765 if (ring_handle == NULL) {
2766 ASSERT(ring_handle != NULL);
2767 return (0);
2768 }
2769
2770 nxgep = ring_handle->nxgep;
2771 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2772 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2773
2774 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2775 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
2776
2777 MUTEX_ENTER(&ringp->lock);
2778
2779 /* disable polling: enable interrupt */
2780 if (ringp->poll_flag) {
2781 npi_handle_t handle;
2782 rx_dma_ctl_stat_t cs;
2783 uint8_t channel;
2784 p_nxge_ldg_t ldgp;
2785
2786 /*
2787 * Get the control and status for this channel.
2788 */
2789 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2790 channel = ringp->rdc;
2791 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
2792 channel, &cs.value);
2793
2794 /*
2795 * Enable mailbox update
2796 * Since packets were not read and the hardware uses
2797 * bits pktread and ptrread to update the queue
2798 * length, we need to set both bits to 0.
2799 */
2800 cs.bits.ldw.pktread = 0;
2801 cs.bits.ldw.ptrread = 0;
2802 cs.bits.hdw.mex = 1;
2803 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2804 cs.value);
2805
2806 /*
2807 * Rearm this logical group if this is a single device
2808 * group.
2809 */
2810 ldgp = ringp->ldgp;
2811 if (ldgp == NULL) {
2812 ringp->poll_flag = 0;
2813 MUTEX_EXIT(&ringp->lock);
2814 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2815 "==> nxge_disable_poll: no ldgp rdc %d "
2816 "(still set poll to 0", ringp->rdc));
2817 return (0);
2818 }
2819 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2820 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2821 ringp->rdc, ldgp));
2822 if (ldgp->nldvs == 1) {
2823 if (isLDOMguest(nxgep)) {
2824 ldgp->arm = B_TRUE;
2825 nxge_hio_ldgimgn(nxgep, ldgp);
2826 } else {
2827 ldgimgm_t mgm;
2828 mgm.value = 0;
2829 mgm.bits.ldw.arm = 1;
2830 mgm.bits.ldw.timer = ldgp->ldg_timer;
2831 NXGE_REG_WR64(handle,
2832 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
2833 mgm.value);
2834 }
2835 }
2836 ringp->poll_flag = 0;
2837 }
2838
2839 MUTEX_EXIT(&ringp->lock);
2840 return (0);
2841 }
2842
2843 /*
2844 * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2845 */
2846 mblk_t *
nxge_rx_poll(void * arg,int bytes_to_pickup)2847 nxge_rx_poll(void *arg, int bytes_to_pickup)
2848 {
2849 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2850 p_rx_rcr_ring_t rcr_p;
2851 p_nxge_t nxgep;
2852 npi_handle_t handle;
2853 rx_dma_ctl_stat_t cs;
2854 mblk_t *mblk;
2855 p_nxge_ldv_t ldvp;
2856 uint32_t channel;
2857
2858 nxgep = ring_handle->nxgep;
2859
2860 /*
2861 * Get the control and status for this channel.
2862 */
2863 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2864 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2865 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
2866 MUTEX_ENTER(&rcr_p->lock);
2867 ASSERT(rcr_p->poll_flag == 1);
2868
2869 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
2870
2871 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2872 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2873 rcr_p->rdc, rcr_p->poll_flag));
2874 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
2875
2876 ldvp = rcr_p->ldvp;
2877 /* error events. */
2878 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
2879 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
2880 }
2881
2882 MUTEX_EXIT(&rcr_p->lock);
2883
2884 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2885 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
2886 return (mblk);
2887 }
2888
2889
2890 /*ARGSUSED*/
2891 static nxge_status_t
nxge_rx_err_evnts(p_nxge_t nxgep,int channel,rx_dma_ctl_stat_t cs)2892 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2893 {
2894 p_nxge_rx_ring_stats_t rdc_stats;
2895 npi_handle_t handle;
2896 npi_status_t rs;
2897 boolean_t rxchan_fatal = B_FALSE;
2898 boolean_t rxport_fatal = B_FALSE;
2899 uint8_t portn;
2900 nxge_status_t status = NXGE_OK;
2901 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2902 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
2903
2904 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2905 portn = nxgep->mac.portnum;
2906 rdc_stats = &nxgep->statsp->rdc_stats[channel];
2907
2908 if (cs.bits.hdw.rbr_tmout) {
2909 rdc_stats->rx_rbr_tmout++;
2910 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2911 NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
2912 rxchan_fatal = B_TRUE;
2913 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2914 "==> nxge_rx_err_evnts: rx_rbr_timeout"));
2915 }
2916 if (cs.bits.hdw.rsp_cnt_err) {
2917 rdc_stats->rsp_cnt_err++;
2918 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2919 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
2920 rxchan_fatal = B_TRUE;
2921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2922 "==> nxge_rx_err_evnts(channel %d): "
2923 "rsp_cnt_err", channel));
2924 }
2925 if (cs.bits.hdw.byte_en_bus) {
2926 rdc_stats->byte_en_bus++;
2927 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2928 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
2929 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2930 "==> nxge_rx_err_evnts(channel %d): "
2931 "fatal error: byte_en_bus", channel));
2932 rxchan_fatal = B_TRUE;
2933 }
2934 if (cs.bits.hdw.rsp_dat_err) {
2935 rdc_stats->rsp_dat_err++;
2936 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2937 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
2938 rxchan_fatal = B_TRUE;
2939 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2940 "==> nxge_rx_err_evnts(channel %d): "
2941 "fatal error: rsp_dat_err", channel));
2942 }
2943 if (cs.bits.hdw.rcr_ack_err) {
2944 rdc_stats->rcr_ack_err++;
2945 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2946 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
2947 rxchan_fatal = B_TRUE;
2948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2949 "==> nxge_rx_err_evnts(channel %d): "
2950 "fatal error: rcr_ack_err", channel));
2951 }
2952 if (cs.bits.hdw.dc_fifo_err) {
2953 rdc_stats->dc_fifo_err++;
2954 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2955 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
2956 /* This is not a fatal error! */
2957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2958 "==> nxge_rx_err_evnts(channel %d): "
2959 "dc_fifo_err", channel));
2960 rxport_fatal = B_TRUE;
2961 }
2962 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
2963 if ((rs = npi_rxdma_ring_perr_stat_get(handle,
2964 &rdc_stats->errlog.pre_par,
2965 &rdc_stats->errlog.sha_par))
2966 != NPI_SUCCESS) {
2967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2968 "==> nxge_rx_err_evnts(channel %d): "
2969 "rcr_sha_par: get perr", channel));
2970 return (NXGE_ERROR | rs);
2971 }
2972 if (cs.bits.hdw.rcr_sha_par) {
2973 rdc_stats->rcr_sha_par++;
2974 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2975 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
2976 rxchan_fatal = B_TRUE;
2977 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2978 "==> nxge_rx_err_evnts(channel %d): "
2979 "fatal error: rcr_sha_par", channel));
2980 }
2981 if (cs.bits.hdw.rbr_pre_par) {
2982 rdc_stats->rbr_pre_par++;
2983 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2984 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
2985 rxchan_fatal = B_TRUE;
2986 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2987 "==> nxge_rx_err_evnts(channel %d): "
2988 "fatal error: rbr_pre_par", channel));
2989 }
2990 }
2991 /*
2992 * The Following 4 status bits are for information, the system
2993 * is running fine. There is no need to send FMA ereports or
2994 * log messages.
2995 */
2996 if (cs.bits.hdw.port_drop_pkt) {
2997 rdc_stats->port_drop_pkt++;
2998 }
2999 if (cs.bits.hdw.wred_drop) {
3000 rdc_stats->wred_drop++;
3001 }
3002 if (cs.bits.hdw.rbr_pre_empty) {
3003 rdc_stats->rbr_pre_empty++;
3004 }
3005 if (cs.bits.hdw.rcr_shadow_full) {
3006 rdc_stats->rcr_shadow_full++;
3007 }
3008 if (cs.bits.hdw.config_err) {
3009 rdc_stats->config_err++;
3010 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3011 NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
3012 rxchan_fatal = B_TRUE;
3013 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3014 "==> nxge_rx_err_evnts(channel %d): "
3015 "config error", channel));
3016 }
3017 if (cs.bits.hdw.rcrincon) {
3018 rdc_stats->rcrincon++;
3019 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3020 NXGE_FM_EREPORT_RDMC_RCRINCON);
3021 rxchan_fatal = B_TRUE;
3022 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3023 "==> nxge_rx_err_evnts(channel %d): "
3024 "fatal error: rcrincon error", channel));
3025 }
3026 if (cs.bits.hdw.rcrfull) {
3027 rdc_stats->rcrfull++;
3028 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3029 NXGE_FM_EREPORT_RDMC_RCRFULL);
3030 rxchan_fatal = B_TRUE;
3031 if (rdc_stats->rcrfull < error_disp_cnt) {
3032 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3033 "==> nxge_rx_err_evnts(channel %d): "
3034 "fatal error: rcrfull error", channel));
3035 }
3036 }
3037 if (cs.bits.hdw.rbr_empty) {
3038 /*
3039 * This bit is for information, there is no need
3040 * send FMA ereport or log a message.
3041 */
3042 rdc_stats->rbr_empty++;
3043 }
3044 if (cs.bits.hdw.rbrfull) {
3045 rdc_stats->rbrfull++;
3046 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3047 NXGE_FM_EREPORT_RDMC_RBRFULL);
3048 rxchan_fatal = B_TRUE;
3049 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3050 "==> nxge_rx_err_evnts(channel %d): "
3051 "fatal error: rbr_full error", channel));
3052 }
3053 if (cs.bits.hdw.rbrlogpage) {
3054 rdc_stats->rbrlogpage++;
3055 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3056 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
3057 rxchan_fatal = B_TRUE;
3058 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3059 "==> nxge_rx_err_evnts(channel %d): "
3060 "fatal error: rbr logical page error", channel));
3061 }
3062 if (cs.bits.hdw.cfiglogpage) {
3063 rdc_stats->cfiglogpage++;
3064 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3065 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
3066 rxchan_fatal = B_TRUE;
3067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3068 "==> nxge_rx_err_evnts(channel %d): "
3069 "fatal error: cfig logical page error", channel));
3070 }
3071
3072 if (rxport_fatal) {
3073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3074 " nxge_rx_err_evnts: fatal error on Port #%d\n",
3075 portn));
3076 if (isLDOMguest(nxgep)) {
3077 status = NXGE_ERROR;
3078 } else {
3079 status = nxge_ipp_fatal_err_recover(nxgep);
3080 if (status == NXGE_OK) {
3081 FM_SERVICE_RESTORED(nxgep);
3082 }
3083 }
3084 }
3085
3086 if (rxchan_fatal) {
3087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3088 " nxge_rx_err_evnts: fatal error on Channel #%d\n",
3089 channel));
3090 if (isLDOMguest(nxgep)) {
3091 status = NXGE_ERROR;
3092 } else {
3093 status = nxge_rxdma_fatal_err_recover(nxgep, channel);
3094 if (status == NXGE_OK) {
3095 FM_SERVICE_RESTORED(nxgep);
3096 }
3097 }
3098 }
3099
3100 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
3101
3102 return (status);
3103 }
3104
3105 /*
3106 * nxge_rdc_hvio_setup
3107 *
3108 * This code appears to setup some Hypervisor variables.
3109 *
3110 * Arguments:
3111 * nxgep
3112 * channel
3113 *
3114 * Notes:
3115 * What does NIU_LP_WORKAROUND mean?
3116 *
3117 * NPI/NXGE function calls:
3118 * na
3119 *
3120 * Context:
3121 * Any domain
3122 */
3123 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3124 static void
nxge_rdc_hvio_setup(nxge_t * nxgep,int channel)3125 nxge_rdc_hvio_setup(
3126 nxge_t *nxgep, int channel)
3127 {
3128 nxge_dma_common_t *dma_common;
3129 nxge_dma_common_t *dma_control;
3130 rx_rbr_ring_t *ring;
3131
3132 ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3133 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3134
3135 ring->hv_set = B_FALSE;
3136
3137 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
3138 dma_common->orig_ioaddr_pp;
3139 ring->hv_rx_buf_ioaddr_size = (uint64_t)
3140 dma_common->orig_alength;
3141
3142 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3143 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
3144 channel, ring->hv_rx_buf_base_ioaddr_pp,
3145 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
3146 dma_common->orig_alength, dma_common->orig_alength));
3147
3148 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3149
3150 ring->hv_rx_cntl_base_ioaddr_pp =
3151 (uint64_t)dma_control->orig_ioaddr_pp;
3152 ring->hv_rx_cntl_ioaddr_size =
3153 (uint64_t)dma_control->orig_alength;
3154
3155 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3156 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
3157 channel, ring->hv_rx_cntl_base_ioaddr_pp,
3158 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
3159 dma_control->orig_alength, dma_control->orig_alength));
3160 }
3161 #endif
3162
3163 /*
3164 * nxge_map_rxdma
3165 *
3166 * Map an RDC into our kernel space.
3167 *
3168 * Arguments:
3169 * nxgep
3170 * channel The channel to map.
3171 *
3172 * Notes:
3173 * 1. Allocate & initialise a memory pool, if necessary.
3174 * 2. Allocate however many receive buffers are required.
3175 * 3. Setup buffers, descriptors, and mailbox.
3176 *
3177 * NPI/NXGE function calls:
3178 * nxge_alloc_rx_mem_pool()
3179 * nxge_alloc_rbb()
3180 * nxge_map_rxdma_channel()
3181 *
3182 * Registers accessed:
3183 *
3184 * Context:
3185 * Any domain
3186 */
3187 static nxge_status_t
nxge_map_rxdma(p_nxge_t nxgep,int channel)3188 nxge_map_rxdma(p_nxge_t nxgep, int channel)
3189 {
3190 nxge_dma_common_t **data;
3191 nxge_dma_common_t **control;
3192 rx_rbr_ring_t **rbr_ring;
3193 rx_rcr_ring_t **rcr_ring;
3194 rx_mbox_t **mailbox;
3195 uint32_t chunks;
3196
3197 nxge_status_t status;
3198
3199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
3200
3201 if (!nxgep->rx_buf_pool_p) {
3202 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
3203 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3204 "<== nxge_map_rxdma: buf not allocated"));
3205 return (NXGE_ERROR);
3206 }
3207 }
3208
3209 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
3210 return (NXGE_ERROR);
3211
3212 /*
3213 * Map descriptors from the buffer polls for each dma channel.
3214 */
3215
3216 /*
3217 * Set up and prepare buffer blocks, descriptors
3218 * and mailbox.
3219 */
3220 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3221 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
3222 chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
3223
3224 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3225 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
3226
3227 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3228
3229 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
3230 chunks, control, rcr_ring, mailbox);
3231 if (status != NXGE_OK) {
3232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3233 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
3234 "returned 0x%x",
3235 channel, status));
3236 return (status);
3237 }
3238 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
3239 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
3240 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
3241 &nxgep->statsp->rdc_stats[channel];
3242
3243 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3244 if (!isLDOMguest(nxgep))
3245 nxge_rdc_hvio_setup(nxgep, channel);
3246 #endif
3247
3248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3249 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
3250
3251 return (status);
3252 }
3253
3254 static void
nxge_unmap_rxdma(p_nxge_t nxgep,int channel)3255 nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
3256 {
3257 rx_rbr_ring_t *rbr_ring;
3258 rx_rcr_ring_t *rcr_ring;
3259 rx_mbox_t *mailbox;
3260
3261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
3262
3263 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
3264 !nxgep->rx_mbox_areas_p)
3265 return;
3266
3267 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3268 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
3269 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3270
3271 if (!rbr_ring || !rcr_ring || !mailbox)
3272 return;
3273
3274 (void) nxge_unmap_rxdma_channel(
3275 nxgep, channel, rbr_ring, rcr_ring, mailbox);
3276
3277 nxge_free_rxb(nxgep, channel);
3278
3279 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
3280 }
3281
3282 nxge_status_t
nxge_map_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)3283 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3284 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
3285 uint32_t num_chunks,
3286 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
3287 p_rx_mbox_t *rx_mbox_p)
3288 {
3289 int status = NXGE_OK;
3290
3291 /*
3292 * Set up and prepare buffer blocks, descriptors
3293 * and mailbox.
3294 */
3295 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3296 "==> nxge_map_rxdma_channel (channel %d)", channel));
3297 /*
3298 * Receive buffer blocks
3299 */
3300 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
3301 dma_buf_p, rbr_p, num_chunks);
3302 if (status != NXGE_OK) {
3303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3304 "==> nxge_map_rxdma_channel (channel %d): "
3305 "map buffer failed 0x%x", channel, status));
3306 goto nxge_map_rxdma_channel_exit;
3307 }
3308
3309 /*
3310 * Receive block ring, completion ring and mailbox.
3311 */
3312 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
3313 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
3314 if (status != NXGE_OK) {
3315 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3316 "==> nxge_map_rxdma_channel (channel %d): "
3317 "map config failed 0x%x", channel, status));
3318 goto nxge_map_rxdma_channel_fail2;
3319 }
3320
3321 goto nxge_map_rxdma_channel_exit;
3322
3323 nxge_map_rxdma_channel_fail3:
3324 /* Free rbr, rcr */
3325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3326 "==> nxge_map_rxdma_channel: free rbr/rcr "
3327 "(status 0x%x channel %d)",
3328 status, channel));
3329 nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3330 *rcr_p, *rx_mbox_p);
3331
3332 nxge_map_rxdma_channel_fail2:
3333 /* Free buffer blocks */
3334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3335 "==> nxge_map_rxdma_channel: free rx buffers"
3336 "(nxgep 0x%x status 0x%x channel %d)",
3337 nxgep, status, channel));
3338 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
3339
3340 status = NXGE_ERROR;
3341
3342 nxge_map_rxdma_channel_exit:
3343 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3344 "<== nxge_map_rxdma_channel: "
3345 "(nxgep 0x%x status 0x%x channel %d)",
3346 nxgep, status, channel));
3347
3348 return (status);
3349 }
3350
3351 /*ARGSUSED*/
3352 static void
nxge_unmap_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)3353 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3354 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3355 {
3356 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3357 "==> nxge_unmap_rxdma_channel (channel %d)", channel));
3358
3359 /*
3360 * unmap receive block ring, completion ring and mailbox.
3361 */
3362 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3363 rcr_p, rx_mbox_p);
3364
3365 /* unmap buffer blocks */
3366 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
3367
3368 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
3369 }
3370
3371 /*ARGSUSED*/
3372 static nxge_status_t
nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_rx_rbr_ring_t * rbr_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)3373 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
3374 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
3375 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
3376 {
3377 p_rx_rbr_ring_t rbrp;
3378 p_rx_rcr_ring_t rcrp;
3379 p_rx_mbox_t mboxp;
3380 p_nxge_dma_common_t cntl_dmap;
3381 p_nxge_dma_common_t dmap;
3382 p_rx_msg_t *rx_msg_ring;
3383 p_rx_msg_t rx_msg_p;
3384 p_rbr_cfig_a_t rcfga_p;
3385 p_rbr_cfig_b_t rcfgb_p;
3386 p_rcrcfig_a_t cfga_p;
3387 p_rcrcfig_b_t cfgb_p;
3388 p_rxdma_cfig1_t cfig1_p;
3389 p_rxdma_cfig2_t cfig2_p;
3390 p_rbr_kick_t kick_p;
3391 uint32_t dmaaddrp;
3392 uint32_t *rbr_vaddrp;
3393 uint32_t bkaddr;
3394 nxge_status_t status = NXGE_OK;
3395 int i;
3396 uint32_t nxge_port_rcr_size;
3397
3398 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3399 "==> nxge_map_rxdma_channel_cfg_ring"));
3400
3401 cntl_dmap = *dma_cntl_p;
3402
3403 /* Map in the receive block ring */
3404 rbrp = *rbr_p;
3405 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
3406 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
3407 /*
3408 * Zero out buffer block ring descriptors.
3409 */
3410 bzero((caddr_t)dmap->kaddrp, dmap->alength);
3411
3412 rcfga_p = &(rbrp->rbr_cfga);
3413 rcfgb_p = &(rbrp->rbr_cfgb);
3414 kick_p = &(rbrp->rbr_kick);
3415 rcfga_p->value = 0;
3416 rcfgb_p->value = 0;
3417 kick_p->value = 0;
3418 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
3419 rcfga_p->value = (rbrp->rbr_addr &
3420 (RBR_CFIG_A_STDADDR_MASK |
3421 RBR_CFIG_A_STDADDR_BASE_MASK));
3422 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
3423
3424 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
3425 rcfgb_p->bits.ldw.vld0 = 1;
3426 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
3427 rcfgb_p->bits.ldw.vld1 = 1;
3428 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
3429 rcfgb_p->bits.ldw.vld2 = 1;
3430 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
3431
3432 /*
3433 * For each buffer block, enter receive block address to the ring.
3434 */
3435 rbr_vaddrp = (uint32_t *)dmap->kaddrp;
3436 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
3437 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3438 "==> nxge_map_rxdma_channel_cfg_ring: channel %d "
3439 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
3440
3441 rx_msg_ring = rbrp->rx_msg_ring;
3442 for (i = 0; i < rbrp->tnblocks; i++) {
3443 rx_msg_p = rx_msg_ring[i];
3444 rx_msg_p->nxgep = nxgep;
3445 rx_msg_p->rx_rbr_p = rbrp;
3446 bkaddr = (uint32_t)
3447 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
3448 >> RBR_BKADDR_SHIFT));
3449 rx_msg_p->free = B_FALSE;
3450 rx_msg_p->max_usage_cnt = 0xbaddcafe;
3451
3452 *rbr_vaddrp++ = bkaddr;
3453 }
3454
3455 kick_p->bits.ldw.bkadd = rbrp->rbb_max;
3456 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3457
3458 rbrp->rbr_rd_index = 0;
3459
3460 rbrp->rbr_consumed = 0;
3461 rbrp->rbr_use_bcopy = B_TRUE;
3462 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
3463 /*
3464 * Do bcopy on packets greater than bcopy size once
3465 * the lo threshold is reached.
3466 * This lo threshold should be less than the hi threshold.
3467 *
3468 * Do bcopy on every packet once the hi threshold is reached.
3469 */
3470 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
3471 /* default it to use hi */
3472 nxge_rx_threshold_lo = nxge_rx_threshold_hi;
3473 }
3474
3475 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
3476 nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
3477 }
3478 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
3479
3480 switch (nxge_rx_threshold_hi) {
3481 default:
3482 case NXGE_RX_COPY_NONE:
3483 /* Do not do bcopy at all */
3484 rbrp->rbr_use_bcopy = B_FALSE;
3485 rbrp->rbr_threshold_hi = rbrp->rbb_max;
3486 break;
3487
3488 case NXGE_RX_COPY_1:
3489 case NXGE_RX_COPY_2:
3490 case NXGE_RX_COPY_3:
3491 case NXGE_RX_COPY_4:
3492 case NXGE_RX_COPY_5:
3493 case NXGE_RX_COPY_6:
3494 case NXGE_RX_COPY_7:
3495 rbrp->rbr_threshold_hi =
3496 rbrp->rbb_max *
3497 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
3498 break;
3499
3500 case NXGE_RX_COPY_ALL:
3501 rbrp->rbr_threshold_hi = 0;
3502 break;
3503 }
3504
3505 switch (nxge_rx_threshold_lo) {
3506 default:
3507 case NXGE_RX_COPY_NONE:
3508 /* Do not do bcopy at all */
3509 if (rbrp->rbr_use_bcopy) {
3510 rbrp->rbr_use_bcopy = B_FALSE;
3511 }
3512 rbrp->rbr_threshold_lo = rbrp->rbb_max;
3513 break;
3514
3515 case NXGE_RX_COPY_1:
3516 case NXGE_RX_COPY_2:
3517 case NXGE_RX_COPY_3:
3518 case NXGE_RX_COPY_4:
3519 case NXGE_RX_COPY_5:
3520 case NXGE_RX_COPY_6:
3521 case NXGE_RX_COPY_7:
3522 rbrp->rbr_threshold_lo =
3523 rbrp->rbb_max *
3524 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
3525 break;
3526
3527 case NXGE_RX_COPY_ALL:
3528 rbrp->rbr_threshold_lo = 0;
3529 break;
3530 }
3531
3532 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3533 "nxge_map_rxdma_channel_cfg_ring: channel %d "
3534 "rbb_max %d "
3535 "rbrp->rbr_bufsize_type %d "
3536 "rbb_threshold_hi %d "
3537 "rbb_threshold_lo %d",
3538 dma_channel,
3539 rbrp->rbb_max,
3540 rbrp->rbr_bufsize_type,
3541 rbrp->rbr_threshold_hi,
3542 rbrp->rbr_threshold_lo));
3543
3544 rbrp->page_valid.value = 0;
3545 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
3546 rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
3547 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
3548 rbrp->page_hdl.value = 0;
3549
3550 rbrp->page_valid.bits.ldw.page0 = 1;
3551 rbrp->page_valid.bits.ldw.page1 = 1;
3552
3553 /* Map in the receive completion ring */
3554 rcrp = (p_rx_rcr_ring_t)
3555 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
3556 rcrp->rdc = dma_channel;
3557
3558 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
3559 rcrp->comp_size = nxge_port_rcr_size;
3560 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
3561
3562 rcrp->max_receive_pkts = nxge_max_rx_pkts;
3563
3564 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
3565 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
3566 sizeof (rcr_entry_t));
3567 rcrp->comp_rd_index = 0;
3568 rcrp->comp_wt_index = 0;
3569 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3570 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3571 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3572 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3573
3574 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3575 (nxge_port_rcr_size - 1);
3576 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3577 (nxge_port_rcr_size - 1);
3578
3579 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3580 "==> nxge_map_rxdma_channel_cfg_ring: "
3581 "channel %d "
3582 "rbr_vaddrp $%p "
3583 "rcr_desc_rd_head_p $%p "
3584 "rcr_desc_rd_head_pp $%p "
3585 "rcr_desc_rd_last_p $%p "
3586 "rcr_desc_rd_last_pp $%p ",
3587 dma_channel,
3588 rbr_vaddrp,
3589 rcrp->rcr_desc_rd_head_p,
3590 rcrp->rcr_desc_rd_head_pp,
3591 rcrp->rcr_desc_last_p,
3592 rcrp->rcr_desc_last_pp));
3593
3594 /*
3595 * Zero out buffer block ring descriptors.
3596 */
3597 bzero((caddr_t)dmap->kaddrp, dmap->alength);
3598
3599 rcrp->intr_timeout = (nxgep->intr_timeout <
3600 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
3601 nxgep->intr_timeout;
3602
3603 rcrp->intr_threshold = (nxgep->intr_threshold <
3604 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
3605 nxgep->intr_threshold;
3606
3607 rcrp->full_hdr_flag = B_FALSE;
3608
3609 rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset;
3610
3611
3612 cfga_p = &(rcrp->rcr_cfga);
3613 cfgb_p = &(rcrp->rcr_cfgb);
3614 cfga_p->value = 0;
3615 cfgb_p->value = 0;
3616 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
3617 cfga_p->value = (rcrp->rcr_addr &
3618 (RCRCFIG_A_STADDR_MASK |
3619 RCRCFIG_A_STADDR_BASE_MASK));
3620
3621 rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
3622 RCRCFIG_A_LEN_SHIF);
3623
3624 /*
3625 * Timeout should be set based on the system clock divider.
3626 * A timeout value of 1 assumes that the
3627 * granularity (1000) is 3 microseconds running at 300MHz.
3628 */
3629 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
3630 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
3631 cfgb_p->bits.ldw.entout = 1;
3632
3633 /* Map in the mailbox */
3634 mboxp = (p_rx_mbox_t)
3635 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
3636 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
3637 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
3638 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
3639 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
3640 cfig1_p->value = cfig2_p->value = 0;
3641
3642 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
3643 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3644 "==> nxge_map_rxdma_channel_cfg_ring: "
3645 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
3646 dma_channel, cfig1_p->value, cfig2_p->value,
3647 mboxp->mbox_addr));
3648
3649 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
3650 & 0xfff);
3651 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
3652
3653
3654 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
3655 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
3656 RXDMA_CFIG2_MBADDR_L_MASK);
3657
3658 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
3659
3660 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3661 "==> nxge_map_rxdma_channel_cfg_ring: "
3662 "channel %d damaddrp $%p "
3663 "cfg1 0x%016llx cfig2 0x%016llx",
3664 dma_channel, dmaaddrp,
3665 cfig1_p->value, cfig2_p->value));
3666
3667 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
3668 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) {
3669 switch (rcrp->sw_priv_hdr_len) {
3670 case SW_OFFSET_NO_OFFSET:
3671 case SW_OFFSET_64:
3672 case SW_OFFSET_128:
3673 case SW_OFFSET_192:
3674 cfig2_p->bits.ldw.offset =
3675 rcrp->sw_priv_hdr_len;
3676 cfig2_p->bits.ldw.offset256 = 0;
3677 break;
3678 case SW_OFFSET_256:
3679 case SW_OFFSET_320:
3680 case SW_OFFSET_384:
3681 case SW_OFFSET_448:
3682 cfig2_p->bits.ldw.offset =
3683 rcrp->sw_priv_hdr_len & 0x3;
3684 cfig2_p->bits.ldw.offset256 = 1;
3685 break;
3686 default:
3687 cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET;
3688 cfig2_p->bits.ldw.offset256 = 0;
3689 }
3690 } else {
3691 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
3692 }
3693
3694 rbrp->rx_rcr_p = rcrp;
3695 rcrp->rx_rbr_p = rbrp;
3696 *rcr_p = rcrp;
3697 *rx_mbox_p = mboxp;
3698
3699 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3700 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
3701
3702 return (status);
3703 }
3704
3705 /*ARGSUSED*/
3706 static void
nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)3707 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
3708 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3709 {
3710 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3711 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
3712 rcr_p->rdc));
3713
3714 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
3715 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
3716
3717 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3718 "<== nxge_unmap_rxdma_channel_cfg_ring"));
3719 }
3720
3721 static nxge_status_t
nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks)3722 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
3723 p_nxge_dma_common_t *dma_buf_p,
3724 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
3725 {
3726 p_rx_rbr_ring_t rbrp;
3727 p_nxge_dma_common_t dma_bufp, tmp_bufp;
3728 p_rx_msg_t *rx_msg_ring;
3729 p_rx_msg_t rx_msg_p;
3730 p_mblk_t mblk_p;
3731
3732 rxring_info_t *ring_info;
3733 nxge_status_t status = NXGE_OK;
3734 int i, j, index;
3735 uint32_t size, bsize, nblocks, nmsgs;
3736
3737 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3738 "==> nxge_map_rxdma_channel_buf_ring: channel %d",
3739 channel));
3740
3741 dma_bufp = tmp_bufp = *dma_buf_p;
3742 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3743 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
3744 "chunks bufp 0x%016llx",
3745 channel, num_chunks, dma_bufp));
3746
3747 nmsgs = 0;
3748 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
3749 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3750 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3751 "bufp 0x%016llx nblocks %d nmsgs %d",
3752 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
3753 nmsgs += tmp_bufp->nblocks;
3754 }
3755 if (!nmsgs) {
3756 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3757 "<== nxge_map_rxdma_channel_buf_ring: channel %d "
3758 "no msg blocks",
3759 channel));
3760 status = NXGE_ERROR;
3761 goto nxge_map_rxdma_channel_buf_ring_exit;
3762 }
3763
3764 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
3765
3766 size = nmsgs * sizeof (p_rx_msg_t);
3767 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
3768 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
3769 KM_SLEEP);
3770
3771 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
3772 (void *)nxgep->interrupt_cookie);
3773 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
3774 (void *)nxgep->interrupt_cookie);
3775 rbrp->rdc = channel;
3776 rbrp->num_blocks = num_chunks;
3777 rbrp->tnblocks = nmsgs;
3778 rbrp->rbb_max = nmsgs;
3779 rbrp->rbr_max_size = nmsgs;
3780 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
3781
3782 /*
3783 * Buffer sizes suggested by NIU architect.
3784 * 256, 512 and 2K.
3785 */
3786
3787 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
3788 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
3789 rbrp->npi_pkt_buf_size0 = SIZE_256B;
3790
3791 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
3792 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
3793 rbrp->npi_pkt_buf_size1 = SIZE_1KB;
3794
3795 rbrp->block_size = nxgep->rx_default_block_size;
3796
3797 if (!nxgep->mac.is_jumbo) {
3798 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
3799 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
3800 rbrp->npi_pkt_buf_size2 = SIZE_2KB;
3801 } else {
3802 if (rbrp->block_size >= 0x2000) {
3803 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
3804 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
3805 rbrp->npi_pkt_buf_size2 = SIZE_8KB;
3806 } else {
3807 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
3808 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
3809 rbrp->npi_pkt_buf_size2 = SIZE_4KB;
3810 }
3811 }
3812
3813 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3814 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3815 "actual rbr max %d rbb_max %d nmsgs %d "
3816 "rbrp->block_size %d default_block_size %d "
3817 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
3818 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
3819 rbrp->block_size, nxgep->rx_default_block_size,
3820 nxge_rbr_size, nxge_rbr_spare_size));
3821
3822 /* Map in buffers from the buffer pool. */
3823 index = 0;
3824 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
3825 bsize = dma_bufp->block_size;
3826 nblocks = dma_bufp->nblocks;
3827 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
3828 ring_info->buffer[i].buf_index = i;
3829 ring_info->buffer[i].buf_size = dma_bufp->alength;
3830 ring_info->buffer[i].start_index = index;
3831 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
3832
3833 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3834 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3835 "chunk %d"
3836 " nblocks %d chunk_size %x block_size 0x%x "
3837 "dma_bufp $%p", channel, i,
3838 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3839 dma_bufp));
3840
3841 for (j = 0; j < nblocks; j++) {
3842 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
3843 dma_bufp)) == NULL) {
3844 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3845 "allocb failed (index %d i %d j %d)",
3846 index, i, j));
3847 goto nxge_map_rxdma_channel_buf_ring_fail1;
3848 }
3849 rx_msg_ring[index] = rx_msg_p;
3850 rx_msg_p->block_index = index;
3851 rx_msg_p->shifted_addr = (uint32_t)
3852 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
3853 RBR_BKADDR_SHIFT));
3854
3855 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3856 "index %d j %d rx_msg_p $%p mblk %p",
3857 index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
3858
3859 mblk_p = rx_msg_p->rx_mblk_p;
3860 mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3861
3862 rbrp->rbr_ref_cnt++;
3863 index++;
3864 rx_msg_p->buf_dma.dma_channel = channel;
3865 }
3866
3867 rbrp->rbr_alloc_type = DDI_MEM_ALLOC;
3868 if (dma_bufp->contig_alloc_type) {
3869 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC;
3870 }
3871
3872 if (dma_bufp->kmem_alloc_type) {
3873 rbrp->rbr_alloc_type = KMEM_ALLOC;
3874 }
3875
3876 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3877 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3878 "chunk %d"
3879 " nblocks %d chunk_size %x block_size 0x%x "
3880 "dma_bufp $%p",
3881 channel, i,
3882 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3883 dma_bufp));
3884 }
3885 if (i < rbrp->num_blocks) {
3886 goto nxge_map_rxdma_channel_buf_ring_fail1;
3887 }
3888
3889 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3890 "nxge_map_rxdma_channel_buf_ring: done buf init "
3891 "channel %d msg block entries %d",
3892 channel, index));
3893 ring_info->block_size_mask = bsize - 1;
3894 rbrp->rx_msg_ring = rx_msg_ring;
3895 rbrp->dma_bufp = dma_buf_p;
3896 rbrp->ring_info = ring_info;
3897
3898 status = nxge_rxbuf_index_info_init(nxgep, rbrp);
3899 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3900 " nxge_map_rxdma_channel_buf_ring: "
3901 "channel %d done buf info init", channel));
3902
3903 /*
3904 * Finally, permit nxge_freeb() to call nxge_post_page().
3905 */
3906 rbrp->rbr_state = RBR_POSTING;
3907
3908 *rbr_p = rbrp;
3909 goto nxge_map_rxdma_channel_buf_ring_exit;
3910
3911 nxge_map_rxdma_channel_buf_ring_fail1:
3912 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3913 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3914 channel, status));
3915
3916 index--;
3917 for (; index >= 0; index--) {
3918 rx_msg_p = rx_msg_ring[index];
3919 if (rx_msg_p != NULL) {
3920 freeb(rx_msg_p->rx_mblk_p);
3921 rx_msg_ring[index] = NULL;
3922 }
3923 }
3924 nxge_map_rxdma_channel_buf_ring_fail:
3925 MUTEX_DESTROY(&rbrp->post_lock);
3926 MUTEX_DESTROY(&rbrp->lock);
3927 KMEM_FREE(ring_info, sizeof (rxring_info_t));
3928 KMEM_FREE(rx_msg_ring, size);
3929 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
3930
3931 status = NXGE_ERROR;
3932
3933 nxge_map_rxdma_channel_buf_ring_exit:
3934 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3935 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
3936
3937 return (status);
3938 }
3939
3940 /*ARGSUSED*/
3941 static void
nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p)3942 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
3943 p_rx_rbr_ring_t rbr_p)
3944 {
3945 p_rx_msg_t *rx_msg_ring;
3946 p_rx_msg_t rx_msg_p;
3947 rxring_info_t *ring_info;
3948 int i;
3949 uint32_t size;
3950 #ifdef NXGE_DEBUG
3951 int num_chunks;
3952 #endif
3953
3954 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3955 "==> nxge_unmap_rxdma_channel_buf_ring"));
3956 if (rbr_p == NULL) {
3957 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3958 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
3959 return;
3960 }
3961 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3962 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
3963 rbr_p->rdc));
3964
3965 rx_msg_ring = rbr_p->rx_msg_ring;
3966 ring_info = rbr_p->ring_info;
3967
3968 if (rx_msg_ring == NULL || ring_info == NULL) {
3969 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3970 "<== nxge_unmap_rxdma_channel_buf_ring: "
3971 "rx_msg_ring $%p ring_info $%p",
3972 rx_msg_p, ring_info));
3973 return;
3974 }
3975
3976 #ifdef NXGE_DEBUG
3977 num_chunks = rbr_p->num_blocks;
3978 #endif
3979 size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
3980 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3981 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
3982 "tnblocks %d (max %d) size ptrs %d ",
3983 rbr_p->rdc, num_chunks,
3984 rbr_p->tnblocks, rbr_p->rbr_max_size, size));
3985
3986 for (i = 0; i < rbr_p->tnblocks; i++) {
3987 rx_msg_p = rx_msg_ring[i];
3988 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3989 " nxge_unmap_rxdma_channel_buf_ring: "
3990 "rx_msg_p $%p",
3991 rx_msg_p));
3992 if (rx_msg_p != NULL) {
3993 freeb(rx_msg_p->rx_mblk_p);
3994 rx_msg_ring[i] = NULL;
3995 }
3996 }
3997
3998 /*
3999 * We no longer may use the mutex <post_lock>. By setting
4000 * <rbr_state> to anything but POSTING, we prevent
4001 * nxge_post_page() from accessing a dead mutex.
4002 */
4003 rbr_p->rbr_state = RBR_UNMAPPING;
4004 MUTEX_DESTROY(&rbr_p->post_lock);
4005
4006 MUTEX_DESTROY(&rbr_p->lock);
4007
4008 if (rbr_p->rbr_ref_cnt == 0) {
4009 /*
4010 * This is the normal state of affairs.
4011 * Need to free the following buffers:
4012 * - data buffers
4013 * - rx_msg ring
4014 * - ring_info
4015 * - rbr ring
4016 */
4017 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4018 "unmap_rxdma_buf_ring: No outstanding - freeing "));
4019 nxge_rxdma_databuf_free(rbr_p);
4020 KMEM_FREE(ring_info, sizeof (rxring_info_t));
4021 KMEM_FREE(rx_msg_ring, size);
4022 KMEM_FREE(rbr_p, sizeof (*rbr_p));
4023 } else {
4024 /*
4025 * Some of our buffers are still being used.
4026 * Therefore, tell nxge_freeb() this ring is
4027 * unmapped, so it may free <rbr_p> for us.
4028 */
4029 rbr_p->rbr_state = RBR_UNMAPPED;
4030 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4031 "unmap_rxdma_buf_ring: %d %s outstanding.",
4032 rbr_p->rbr_ref_cnt,
4033 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
4034 }
4035
4036 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4037 "<== nxge_unmap_rxdma_channel_buf_ring"));
4038 }
4039
4040 /*
4041 * nxge_rxdma_hw_start_common
4042 *
4043 * Arguments:
4044 * nxgep
4045 *
4046 * Notes:
4047 *
4048 * NPI/NXGE function calls:
4049 * nxge_init_fzc_rx_common();
4050 * nxge_init_fzc_rxdma_port();
4051 *
4052 * Registers accessed:
4053 *
4054 * Context:
4055 * Service domain
4056 */
4057 static nxge_status_t
nxge_rxdma_hw_start_common(p_nxge_t nxgep)4058 nxge_rxdma_hw_start_common(p_nxge_t nxgep)
4059 {
4060 nxge_status_t status = NXGE_OK;
4061
4062 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4063
4064 /*
4065 * Load the sharable parameters by writing to the
4066 * function zero control registers. These FZC registers
4067 * should be initialized only once for the entire chip.
4068 */
4069 (void) nxge_init_fzc_rx_common(nxgep);
4070
4071 /*
4072 * Initialize the RXDMA port specific FZC control configurations.
4073 * These FZC registers are pertaining to each port.
4074 */
4075 (void) nxge_init_fzc_rxdma_port(nxgep);
4076
4077 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4078
4079 return (status);
4080 }
4081
4082 static nxge_status_t
nxge_rxdma_hw_start(p_nxge_t nxgep,int channel)4083 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel)
4084 {
4085 int i, ndmas;
4086 p_rx_rbr_rings_t rx_rbr_rings;
4087 p_rx_rbr_ring_t *rbr_rings;
4088 p_rx_rcr_rings_t rx_rcr_rings;
4089 p_rx_rcr_ring_t *rcr_rings;
4090 p_rx_mbox_areas_t rx_mbox_areas_p;
4091 p_rx_mbox_t *rx_mbox_p;
4092 nxge_status_t status = NXGE_OK;
4093
4094 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
4095
4096 rx_rbr_rings = nxgep->rx_rbr_rings;
4097 rx_rcr_rings = nxgep->rx_rcr_rings;
4098 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4099 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4100 "<== nxge_rxdma_hw_start: NULL ring pointers"));
4101 return (NXGE_ERROR);
4102 }
4103 ndmas = rx_rbr_rings->ndmas;
4104 if (ndmas == 0) {
4105 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4106 "<== nxge_rxdma_hw_start: no dma channel allocated"));
4107 return (NXGE_ERROR);
4108 }
4109
4110 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4111 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
4112
4113 rbr_rings = rx_rbr_rings->rbr_rings;
4114 rcr_rings = rx_rcr_rings->rcr_rings;
4115 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
4116 if (rx_mbox_areas_p) {
4117 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
4118 } else {
4119 rx_mbox_p = NULL;
4120 }
4121
4122 i = channel;
4123 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4124 "==> nxge_rxdma_hw_start (ndmas %d) channel %d",
4125 ndmas, channel));
4126 status = nxge_rxdma_start_channel(nxgep, channel,
4127 (p_rx_rbr_ring_t)rbr_rings[i],
4128 (p_rx_rcr_ring_t)rcr_rings[i],
4129 (p_rx_mbox_t)rx_mbox_p[i]);
4130 if (status != NXGE_OK) {
4131 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4132 "==> nxge_rxdma_hw_start: disable "
4133 "(status 0x%x channel %d)", status, channel));
4134 return (status);
4135 }
4136
4137 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
4138 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4139 rx_rbr_rings, rx_rcr_rings));
4140
4141 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4142 "==> nxge_rxdma_hw_start: (status 0x%x)", status));
4143
4144 return (status);
4145 }
4146
4147 static void
nxge_rxdma_hw_stop(p_nxge_t nxgep,int channel)4148 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel)
4149 {
4150 p_rx_rbr_rings_t rx_rbr_rings;
4151 p_rx_rcr_rings_t rx_rcr_rings;
4152
4153 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
4154
4155 rx_rbr_rings = nxgep->rx_rbr_rings;
4156 rx_rcr_rings = nxgep->rx_rcr_rings;
4157 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4158 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4159 "<== nxge_rxdma_hw_stop: NULL ring pointers"));
4160 return;
4161 }
4162
4163 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4164 "==> nxge_rxdma_hw_stop(channel %d)",
4165 channel));
4166 (void) nxge_rxdma_stop_channel(nxgep, channel);
4167
4168 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
4169 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4170 rx_rbr_rings, rx_rcr_rings));
4171
4172 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
4173 }
4174
4175
4176 static nxge_status_t
nxge_rxdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)4177 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
4178 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
4179 {
4180 npi_handle_t handle;
4181 npi_status_t rs = NPI_SUCCESS;
4182 rx_dma_ctl_stat_t cs;
4183 rx_dma_ent_msk_t ent_mask;
4184 nxge_status_t status = NXGE_OK;
4185
4186 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
4187
4188 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4189
4190 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
4191 "npi handle addr $%p acc $%p",
4192 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4193
4194 /* Reset RXDMA channel, but not if you're a guest. */
4195 if (!isLDOMguest(nxgep)) {
4196 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4197 if (rs != NPI_SUCCESS) {
4198 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4199 "==> nxge_init_fzc_rdc: "
4200 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
4201 channel, rs));
4202 return (NXGE_ERROR | rs);
4203 }
4204
4205 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4206 "==> nxge_rxdma_start_channel: reset done: channel %d",
4207 channel));
4208 }
4209
4210 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4211 if (isLDOMguest(nxgep))
4212 (void) nxge_rdc_lp_conf(nxgep, channel);
4213 #endif
4214
4215 /*
4216 * Initialize the RXDMA channel specific FZC control
4217 * configurations. These FZC registers are pertaining
4218 * to each RX channel (logical pages).
4219 */
4220 if (!isLDOMguest(nxgep)) {
4221 status = nxge_init_fzc_rxdma_channel(nxgep, channel);
4222 if (status != NXGE_OK) {
4223 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4224 "==> nxge_rxdma_start_channel: "
4225 "init fzc rxdma failed (0x%08x channel %d)",
4226 status, channel));
4227 return (status);
4228 }
4229
4230 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4231 "==> nxge_rxdma_start_channel: fzc done"));
4232 }
4233
4234 /* Set up the interrupt event masks. */
4235 ent_mask.value = 0;
4236 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
4237 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4238 &ent_mask);
4239 if (rs != NPI_SUCCESS) {
4240 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4241 "==> nxge_rxdma_start_channel: "
4242 "init rxdma event masks failed "
4243 "(0x%08x channel %d)",
4244 status, channel));
4245 return (NXGE_ERROR | rs);
4246 }
4247
4248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4249 "==> nxge_rxdma_start_channel: "
4250 "event done: channel %d (mask 0x%016llx)",
4251 channel, ent_mask.value));
4252
4253 /* Initialize the receive DMA control and status register */
4254 cs.value = 0;
4255 cs.bits.hdw.mex = 1;
4256 cs.bits.hdw.rcrthres = 1;
4257 cs.bits.hdw.rcrto = 1;
4258 cs.bits.hdw.rbr_empty = 1;
4259 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4260 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4261 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
4262 if (status != NXGE_OK) {
4263 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4264 "==> nxge_rxdma_start_channel: "
4265 "init rxdma control register failed (0x%08x channel %d",
4266 status, channel));
4267 return (status);
4268 }
4269
4270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4271 "control done - channel %d cs 0x%016llx", channel, cs.value));
4272
4273 /*
4274 * Load RXDMA descriptors, buffers, mailbox,
4275 * initialise the receive DMA channels and
4276 * enable each DMA channel.
4277 */
4278 status = nxge_enable_rxdma_channel(nxgep,
4279 channel, rbr_p, rcr_p, mbox_p);
4280
4281 if (status != NXGE_OK) {
4282 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4283 " nxge_rxdma_start_channel: "
4284 " enable rxdma failed (0x%08x channel %d)",
4285 status, channel));
4286 return (status);
4287 }
4288
4289 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4290 "==> nxge_rxdma_start_channel: enabled channel %d"));
4291
4292 if (isLDOMguest(nxgep)) {
4293 /* Add interrupt handler for this channel. */
4294 status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel);
4295 if (status != NXGE_OK) {
4296 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4297 " nxge_rxdma_start_channel: "
4298 " nxge_hio_intr_add failed (0x%08x channel %d)",
4299 status, channel));
4300 return (status);
4301 }
4302 }
4303
4304 ent_mask.value = 0;
4305 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
4306 RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
4307 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4308 &ent_mask);
4309 if (rs != NPI_SUCCESS) {
4310 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4311 "==> nxge_rxdma_start_channel: "
4312 "init rxdma event masks failed (0x%08x channel %d)",
4313 status, channel));
4314 return (NXGE_ERROR | rs);
4315 }
4316
4317 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4318 "control done - channel %d cs 0x%016llx", channel, cs.value));
4319
4320 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
4321
4322 return (NXGE_OK);
4323 }
4324
4325 static nxge_status_t
nxge_rxdma_stop_channel(p_nxge_t nxgep,uint16_t channel)4326 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
4327 {
4328 npi_handle_t handle;
4329 npi_status_t rs = NPI_SUCCESS;
4330 rx_dma_ctl_stat_t cs;
4331 rx_dma_ent_msk_t ent_mask;
4332 nxge_status_t status = NXGE_OK;
4333
4334 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
4335
4336 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4337
4338 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
4339 "npi handle addr $%p acc $%p",
4340 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4341
4342 if (!isLDOMguest(nxgep)) {
4343 /*
4344 * Stop RxMAC = A.9.2.6
4345 */
4346 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) {
4347 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4348 "nxge_rxdma_stop_channel: "
4349 "Failed to disable RxMAC"));
4350 }
4351
4352 /*
4353 * Drain IPP Port = A.9.3.6
4354 */
4355 (void) nxge_ipp_drain(nxgep);
4356 }
4357
4358 /* Reset RXDMA channel */
4359 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4360 if (rs != NPI_SUCCESS) {
4361 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4362 " nxge_rxdma_stop_channel: "
4363 " reset rxdma failed (0x%08x channel %d)",
4364 rs, channel));
4365 return (NXGE_ERROR | rs);
4366 }
4367
4368 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4369 "==> nxge_rxdma_stop_channel: reset done"));
4370
4371 /* Set up the interrupt event masks. */
4372 ent_mask.value = RX_DMA_ENT_MSK_ALL;
4373 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4374 &ent_mask);
4375 if (rs != NPI_SUCCESS) {
4376 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4377 "==> nxge_rxdma_stop_channel: "
4378 "set rxdma event masks failed (0x%08x channel %d)",
4379 rs, channel));
4380 return (NXGE_ERROR | rs);
4381 }
4382
4383 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4384 "==> nxge_rxdma_stop_channel: event done"));
4385
4386 /*
4387 * Initialize the receive DMA control and status register
4388 */
4389 cs.value = 0;
4390 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4391 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
4392 " to default (all 0s) 0x%08x", cs.value));
4393 if (status != NXGE_OK) {
4394 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4395 " nxge_rxdma_stop_channel: init rxdma"
4396 " control register failed (0x%08x channel %d",
4397 status, channel));
4398 return (status);
4399 }
4400
4401 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4402 "==> nxge_rxdma_stop_channel: control done"));
4403
4404 /*
4405 * Make sure channel is disabled.
4406 */
4407 status = nxge_disable_rxdma_channel(nxgep, channel);
4408
4409 if (status != NXGE_OK) {
4410 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4411 " nxge_rxdma_stop_channel: "
4412 " init enable rxdma failed (0x%08x channel %d)",
4413 status, channel));
4414 return (status);
4415 }
4416
4417 if (!isLDOMguest(nxgep)) {
4418 /*
4419 * Enable RxMAC = A.9.2.10
4420 */
4421 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
4422 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4423 "nxge_rxdma_stop_channel: Rx MAC still disabled"));
4424 }
4425 }
4426
4427 NXGE_DEBUG_MSG((nxgep,
4428 RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
4429
4430 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
4431
4432 return (NXGE_OK);
4433 }
4434
4435 nxge_status_t
nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)4436 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
4437 {
4438 npi_handle_t handle;
4439 p_nxge_rdc_sys_stats_t statsp;
4440 rx_ctl_dat_fifo_stat_t stat;
4441 uint32_t zcp_err_status;
4442 uint32_t ipp_err_status;
4443 nxge_status_t status = NXGE_OK;
4444 npi_status_t rs = NPI_SUCCESS;
4445 boolean_t my_err = B_FALSE;
4446
4447 handle = nxgep->npi_handle;
4448 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4449
4450 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
4451
4452 if (rs != NPI_SUCCESS)
4453 return (NXGE_ERROR | rs);
4454
4455 if (stat.bits.ldw.id_mismatch) {
4456 statsp->id_mismatch++;
4457 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0,
4458 NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
4459 /* Global fatal error encountered */
4460 }
4461
4462 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
4463 switch (nxgep->mac.portnum) {
4464 case 0:
4465 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
4466 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
4467 my_err = B_TRUE;
4468 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4469 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4470 }
4471 break;
4472 case 1:
4473 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
4474 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
4475 my_err = B_TRUE;
4476 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4477 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4478 }
4479 break;
4480 case 2:
4481 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
4482 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
4483 my_err = B_TRUE;
4484 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4485 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4486 }
4487 break;
4488 case 3:
4489 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
4490 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
4491 my_err = B_TRUE;
4492 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4493 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4494 }
4495 break;
4496 default:
4497 return (NXGE_ERROR);
4498 }
4499 }
4500
4501 if (my_err) {
4502 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
4503 zcp_err_status);
4504 if (status != NXGE_OK)
4505 return (status);
4506 }
4507
4508 return (NXGE_OK);
4509 }
4510
4511 static nxge_status_t
nxge_rxdma_handle_port_errors(p_nxge_t nxgep,uint32_t ipp_status,uint32_t zcp_status)4512 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
4513 uint32_t zcp_status)
4514 {
4515 boolean_t rxport_fatal = B_FALSE;
4516 p_nxge_rdc_sys_stats_t statsp;
4517 nxge_status_t status = NXGE_OK;
4518 uint8_t portn;
4519
4520 portn = nxgep->mac.portnum;
4521 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4522
4523 if (ipp_status & (0x1 << portn)) {
4524 statsp->ipp_eop_err++;
4525 NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
4526 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
4527 rxport_fatal = B_TRUE;
4528 }
4529
4530 if (zcp_status & (0x1 << portn)) {
4531 statsp->zcp_eop_err++;
4532 NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
4533 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
4534 rxport_fatal = B_TRUE;
4535 }
4536
4537 if (rxport_fatal) {
4538 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4539 " nxge_rxdma_handle_port_error: "
4540 " fatal error on Port #%d\n",
4541 portn));
4542 status = nxge_rx_port_fatal_err_recover(nxgep);
4543 if (status == NXGE_OK) {
4544 FM_SERVICE_RESTORED(nxgep);
4545 }
4546 }
4547
4548 return (status);
4549 }
4550
4551 static nxge_status_t
nxge_rxdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel)4552 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
4553 {
4554 npi_handle_t handle;
4555 npi_status_t rs = NPI_SUCCESS;
4556 nxge_status_t status = NXGE_OK;
4557 p_rx_rbr_ring_t rbrp;
4558 p_rx_rcr_ring_t rcrp;
4559 p_rx_mbox_t mboxp;
4560 rx_dma_ent_msk_t ent_mask;
4561 p_nxge_dma_common_t dmap;
4562 uint32_t ref_cnt;
4563 p_rx_msg_t rx_msg_p;
4564 int i;
4565 uint32_t nxge_port_rcr_size;
4566
4567 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
4568 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4569 "Recovering from RxDMAChannel#%d error...", channel));
4570
4571 /*
4572 * Stop the dma channel waits for the stop done.
4573 * If the stop done bit is not set, then create
4574 * an error.
4575 */
4576
4577 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4578 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
4579
4580 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel];
4581 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel];
4582
4583 MUTEX_ENTER(&rbrp->lock);
4584 MUTEX_ENTER(&rbrp->post_lock);
4585
4586 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
4587
4588 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
4589 if (rs != NPI_SUCCESS) {
4590 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4591 "nxge_disable_rxdma_channel:failed"));
4592 goto fail;
4593 }
4594
4595 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
4596
4597 /* Disable interrupt */
4598 ent_mask.value = RX_DMA_ENT_MSK_ALL;
4599 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
4600 if (rs != NPI_SUCCESS) {
4601 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4602 "nxge_rxdma_stop_channel: "
4603 "set rxdma event masks failed (channel %d)",
4604 channel));
4605 }
4606
4607 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
4608
4609 /* Reset RXDMA channel */
4610 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4611 if (rs != NPI_SUCCESS) {
4612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4613 "nxge_rxdma_fatal_err_recover: "
4614 " reset rxdma failed (channel %d)", channel));
4615 goto fail;
4616 }
4617
4618 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
4619
4620 mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
4621
4622 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
4623 rbrp->rbr_rd_index = 0;
4624
4625 rcrp->comp_rd_index = 0;
4626 rcrp->comp_wt_index = 0;
4627 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
4628 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
4629 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4630 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4631
4632 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
4633 (nxge_port_rcr_size - 1);
4634 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
4635 (nxge_port_rcr_size - 1);
4636
4637 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
4638 bzero((caddr_t)dmap->kaddrp, dmap->alength);
4639
4640 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
4641
4642 for (i = 0; i < rbrp->rbr_max_size; i++) {
4643 rx_msg_p = rbrp->rx_msg_ring[i];
4644 ref_cnt = rx_msg_p->ref_cnt;
4645 if (ref_cnt != 1) {
4646 if (rx_msg_p->cur_usage_cnt !=
4647 rx_msg_p->max_usage_cnt) {
4648 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4649 "buf[%d]: cur_usage_cnt = %d "
4650 "max_usage_cnt = %d\n", i,
4651 rx_msg_p->cur_usage_cnt,
4652 rx_msg_p->max_usage_cnt));
4653 } else {
4654 /* Buffer can be re-posted */
4655 rx_msg_p->free = B_TRUE;
4656 rx_msg_p->cur_usage_cnt = 0;
4657 rx_msg_p->max_usage_cnt = 0xbaddcafe;
4658 rx_msg_p->pkt_buf_size = 0;
4659 }
4660 }
4661 }
4662
4663 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
4664
4665 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
4666 if (status != NXGE_OK) {
4667 goto fail;
4668 }
4669
4670 MUTEX_EXIT(&rbrp->post_lock);
4671 MUTEX_EXIT(&rbrp->lock);
4672
4673 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4674 "Recovery Successful, RxDMAChannel#%d Restored",
4675 channel));
4676 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
4677 return (NXGE_OK);
4678
4679 fail:
4680 MUTEX_EXIT(&rbrp->post_lock);
4681 MUTEX_EXIT(&rbrp->lock);
4682 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4683 return (NXGE_ERROR | rs);
4684 }
4685
4686 nxge_status_t
nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)4687 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
4688 {
4689 nxge_grp_set_t *set = &nxgep->rx_set;
4690 nxge_status_t status = NXGE_OK;
4691 p_rx_rcr_ring_t rcrp;
4692 int rdc;
4693
4694 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
4695 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4696 "Recovering from RxPort error..."));
4697 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n"));
4698
4699 if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
4700 goto fail;
4701
4702 NXGE_DELAY(1000);
4703
4704 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels..."));
4705
4706 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
4707 if ((1 << rdc) & set->owned.map) {
4708 rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc];
4709 if (rcrp != NULL) {
4710 MUTEX_ENTER(&rcrp->lock);
4711 if (nxge_rxdma_fatal_err_recover(nxgep,
4712 rdc) != NXGE_OK) {
4713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4714 "Could not recover "
4715 "channel %d", rdc));
4716 }
4717 MUTEX_EXIT(&rcrp->lock);
4718 }
4719 }
4720 }
4721
4722 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP..."));
4723
4724 /* Reset IPP */
4725 if (nxge_ipp_reset(nxgep) != NXGE_OK) {
4726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4727 "nxge_rx_port_fatal_err_recover: "
4728 "Failed to reset IPP"));
4729 goto fail;
4730 }
4731
4732 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
4733
4734 /* Reset RxMAC */
4735 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
4736 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4737 "nxge_rx_port_fatal_err_recover: "
4738 "Failed to reset RxMAC"));
4739 goto fail;
4740 }
4741
4742 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
4743
4744 /* Re-Initialize IPP */
4745 if (nxge_ipp_init(nxgep) != NXGE_OK) {
4746 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4747 "nxge_rx_port_fatal_err_recover: "
4748 "Failed to init IPP"));
4749 goto fail;
4750 }
4751
4752 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
4753
4754 /* Re-Initialize RxMAC */
4755 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
4756 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4757 "nxge_rx_port_fatal_err_recover: "
4758 "Failed to reset RxMAC"));
4759 goto fail;
4760 }
4761
4762 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
4763
4764 /* Re-enable RxMAC */
4765 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
4766 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4767 "nxge_rx_port_fatal_err_recover: "
4768 "Failed to enable RxMAC"));
4769 goto fail;
4770 }
4771
4772 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4773 "Recovery Successful, RxPort Restored"));
4774
4775 return (NXGE_OK);
4776 fail:
4777 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4778 return (status);
4779 }
4780
4781 void
nxge_rxdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)4782 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
4783 {
4784 rx_dma_ctl_stat_t cs;
4785 rx_ctl_dat_fifo_stat_t cdfs;
4786
4787 switch (err_id) {
4788 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
4789 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
4790 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
4791 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
4792 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
4793 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
4794 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
4795 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
4796 case NXGE_FM_EREPORT_RDMC_RCRINCON:
4797 case NXGE_FM_EREPORT_RDMC_RCRFULL:
4798 case NXGE_FM_EREPORT_RDMC_RBRFULL:
4799 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
4800 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
4801 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
4802 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4803 chan, &cs.value);
4804 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
4805 cs.bits.hdw.rcr_ack_err = 1;
4806 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
4807 cs.bits.hdw.dc_fifo_err = 1;
4808 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
4809 cs.bits.hdw.rcr_sha_par = 1;
4810 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
4811 cs.bits.hdw.rbr_pre_par = 1;
4812 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
4813 cs.bits.hdw.rbr_tmout = 1;
4814 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
4815 cs.bits.hdw.rsp_cnt_err = 1;
4816 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
4817 cs.bits.hdw.byte_en_bus = 1;
4818 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
4819 cs.bits.hdw.rsp_dat_err = 1;
4820 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
4821 cs.bits.hdw.config_err = 1;
4822 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
4823 cs.bits.hdw.rcrincon = 1;
4824 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
4825 cs.bits.hdw.rcrfull = 1;
4826 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
4827 cs.bits.hdw.rbrfull = 1;
4828 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
4829 cs.bits.hdw.rbrlogpage = 1;
4830 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
4831 cs.bits.hdw.cfiglogpage = 1;
4832 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
4833 cs.value);
4834 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4835 chan, cs.value);
4836 break;
4837 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
4838 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
4839 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
4840 cdfs.value = 0;
4841 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
4842 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
4843 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
4844 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
4845 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
4846 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
4847 cmn_err(CE_NOTE,
4848 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4849 cdfs.value);
4850 NXGE_REG_WR64(nxgep->npi_handle,
4851 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value);
4852 break;
4853 case NXGE_FM_EREPORT_RDMC_DCF_ERR:
4854 break;
4855 case NXGE_FM_EREPORT_RDMC_RCR_ERR:
4856 break;
4857 }
4858 }
4859
4860 static void
nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)4861 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)
4862 {
4863 rxring_info_t *ring_info;
4864 int index;
4865 uint32_t chunk_size;
4866 uint64_t kaddr;
4867 uint_t num_blocks;
4868
4869 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free"));
4870
4871 if (rbr_p == NULL) {
4872 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4873 "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
4874 return;
4875 }
4876
4877 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) {
4878 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4879 "<== nxge_rxdma_databuf_free: DDI"));
4880 return;
4881 }
4882
4883 ring_info = rbr_p->ring_info;
4884 if (ring_info == NULL) {
4885 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4886 "==> nxge_rxdma_databuf_free: NULL ring info"));
4887 return;
4888 }
4889 num_blocks = rbr_p->num_blocks;
4890 for (index = 0; index < num_blocks; index++) {
4891 kaddr = ring_info->buffer[index].kaddr;
4892 chunk_size = ring_info->buffer[index].buf_size;
4893 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4894 "==> nxge_rxdma_databuf_free: free chunk %d "
4895 "kaddrp $%p chunk size %d",
4896 index, kaddr, chunk_size));
4897 if (kaddr == 0)
4898 continue;
4899 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size);
4900 ring_info->buffer[index].kaddr = 0;
4901 }
4902
4903 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free"));
4904 }
4905
4906 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4907 extern void contig_mem_free(void *, size_t);
4908 #endif
4909
4910 void
nxge_free_buf(buf_alloc_type_t alloc_type,uint64_t kaddr,uint32_t buf_size)4911 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size)
4912 {
4913 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf"));
4914
4915 if (kaddr == 0 || !buf_size) {
4916 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4917 "==> nxge_free_buf: invalid kaddr $%p size to free %d",
4918 kaddr, buf_size));
4919 return;
4920 }
4921
4922 switch (alloc_type) {
4923 case KMEM_ALLOC:
4924 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4925 "==> nxge_free_buf: freeing kmem $%p size %d",
4926 kaddr, buf_size));
4927 KMEM_FREE((void *)kaddr, buf_size);
4928 break;
4929
4930 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4931 case CONTIG_MEM_ALLOC:
4932 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4933 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
4934 kaddr, buf_size));
4935 contig_mem_free((void *)kaddr, buf_size);
4936 break;
4937 #endif
4938
4939 default:
4940 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4941 "<== nxge_free_buf: unsupported alloc type %d",
4942 alloc_type));
4943 return;
4944 }
4945
4946 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf"));
4947 }
4948