1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_rxdma.h>
29 #include <sys/nxge/nxge_hio.h>
30
31 #if !defined(_BIG_ENDIAN)
32 #include <npi_rx_rd32.h>
33 #endif
34 #include <npi_rx_rd64.h>
35 #include <npi_rx_wr64.h>
36
37 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
38 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
39 #define NXGE_ACTUAL_RDC(nxgep, rdc) \
40 (rdc + nxgep->pt_config.hw_config.start_rdc)
41
42 /*
43 * Globals: tunable parameters (/etc/system or adb)
44 *
45 */
46 extern uint32_t nxge_rbr_size;
47 extern uint32_t nxge_rcr_size;
48 extern uint32_t nxge_rbr_spare_size;
49 extern uint16_t nxge_rdc_buf_offset;
50
51 extern uint32_t nxge_mblks_pending;
52
53 /*
54 * Tunable to reduce the amount of time spent in the
55 * ISR doing Rx Processing.
56 */
57 extern uint32_t nxge_max_rx_pkts;
58
59 /*
60 * Tunables to manage the receive buffer blocks.
61 *
62 * nxge_rx_threshold_hi: copy all buffers.
63 * nxge_rx_bcopy_size_type: receive buffer block size type.
64 * nxge_rx_threshold_lo: copy only up to tunable block size type.
65 */
66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
67 extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
68 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
69
70 extern uint32_t nxge_cksum_offload;
71
72 static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
73 static void nxge_unmap_rxdma(p_nxge_t, int);
74
75 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
76
77 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
78 static void nxge_rxdma_hw_stop(p_nxge_t, int);
79
80 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
81 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
82 uint32_t,
83 p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
84 p_rx_mbox_t *);
85 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
86 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
87
88 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
89 uint16_t,
90 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
91 p_rx_rcr_ring_t *, p_rx_mbox_t *);
92 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
93 p_rx_rcr_ring_t, p_rx_mbox_t);
94
95 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
96 uint16_t,
97 p_nxge_dma_common_t *,
98 p_rx_rbr_ring_t *, uint32_t);
99 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
100 p_rx_rbr_ring_t);
101
102 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
103 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
104 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
105
106 static mblk_t *
107 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
108
109 static void nxge_receive_packet(p_nxge_t,
110 p_rx_rcr_ring_t,
111 p_rcr_entry_t,
112 boolean_t *,
113 mblk_t **, mblk_t **);
114
115 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
116
117 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
118 static void nxge_freeb(p_rx_msg_t);
119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
120
121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
122 uint32_t, uint32_t);
123
124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
125 p_rx_rbr_ring_t);
126
127
128 static nxge_status_t
129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
130
131 nxge_status_t
132 nxge_rx_port_fatal_err_recover(p_nxge_t);
133
134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
135
136 nxge_status_t
nxge_init_rxdma_channels(p_nxge_t nxgep)137 nxge_init_rxdma_channels(p_nxge_t nxgep)
138 {
139 nxge_grp_set_t *set = &nxgep->rx_set;
140 int i, count, channel;
141 nxge_grp_t *group;
142 dc_map_t map;
143 int dev_gindex;
144
145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
146
147 if (!isLDOMguest(nxgep)) {
148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
149 cmn_err(CE_NOTE, "hw_start_common");
150 return (NXGE_ERROR);
151 }
152 }
153
154 /*
155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
156 * We only have 8 hardware RDC tables, but we may have
157 * up to 16 logical (software-defined) groups of RDCS,
158 * if we make use of layer 3 & 4 hardware classification.
159 */
160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
161 if ((1 << i) & set->lg.map) {
162 group = set->group[i];
163 dev_gindex =
164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
165 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
167 if ((1 << channel) & map) {
168 if ((nxge_grp_dc_add(nxgep,
169 group, VP_BOUND_RX, channel)))
170 goto init_rxdma_channels_exit;
171 }
172 }
173 }
174 if (++count == set->lg.count)
175 break;
176 }
177
178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
179 return (NXGE_OK);
180
181 init_rxdma_channels_exit:
182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
183 if ((1 << i) & set->lg.map) {
184 group = set->group[i];
185 dev_gindex =
186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
187 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
189 if ((1 << channel) & map) {
190 nxge_grp_dc_remove(nxgep,
191 VP_BOUND_RX, channel);
192 }
193 }
194 }
195 if (++count == set->lg.count)
196 break;
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
200 return (NXGE_ERROR);
201 }
202
203 nxge_status_t
nxge_init_rxdma_channel(p_nxge_t nxge,int channel)204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
205 {
206 nxge_status_t status;
207
208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
209
210 status = nxge_map_rxdma(nxge, channel);
211 if (status != NXGE_OK) {
212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
213 "<== nxge_init_rxdma: status 0x%x", status));
214 return (status);
215 }
216
217 #if defined(sun4v)
218 if (isLDOMguest(nxge)) {
219 /* set rcr_ring */
220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel];
221
222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel);
223 if (status != NXGE_OK) {
224 nxge_unmap_rxdma(nxge, channel);
225 return (status);
226 }
227 }
228 #endif
229
230 status = nxge_rxdma_hw_start(nxge, channel);
231 if (status != NXGE_OK) {
232 nxge_unmap_rxdma(nxge, channel);
233 }
234
235 if (!nxge->statsp->rdc_ksp[channel])
236 nxge_setup_rdc_kstats(nxge, channel);
237
238 NXGE_DEBUG_MSG((nxge, MEM2_CTL,
239 "<== nxge_init_rxdma_channel: status 0x%x", status));
240
241 return (status);
242 }
243
244 void
nxge_uninit_rxdma_channels(p_nxge_t nxgep)245 nxge_uninit_rxdma_channels(p_nxge_t nxgep)
246 {
247 nxge_grp_set_t *set = &nxgep->rx_set;
248 int rdc;
249
250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
251
252 if (set->owned.map == 0) {
253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
254 "nxge_uninit_rxdma_channels: no channels"));
255 return;
256 }
257
258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
259 if ((1 << rdc) & set->owned.map) {
260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
261 }
262 }
263
264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
265 }
266
267 void
nxge_uninit_rxdma_channel(p_nxge_t nxgep,int channel)268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
269 {
270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
271
272 if (nxgep->statsp->rdc_ksp[channel]) {
273 kstat_delete(nxgep->statsp->rdc_ksp[channel]);
274 nxgep->statsp->rdc_ksp[channel] = 0;
275 }
276
277 nxge_rxdma_hw_stop(nxgep, channel);
278 nxge_unmap_rxdma(nxgep, channel);
279
280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
281 }
282
283 nxge_status_t
nxge_reset_rxdma_channel(p_nxge_t nxgep,uint16_t channel)284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
285 {
286 npi_handle_t handle;
287 npi_status_t rs = NPI_SUCCESS;
288 nxge_status_t status = NXGE_OK;
289
290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel"));
291
292 handle = NXGE_DEV_NPI_HANDLE(nxgep);
293 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
294
295 if (rs != NPI_SUCCESS) {
296 status = NXGE_ERROR | rs;
297 }
298
299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
300
301 return (status);
302 }
303
304 void
nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
306 {
307 nxge_grp_set_t *set = &nxgep->rx_set;
308 int rdc;
309
310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
311
312 if (!isLDOMguest(nxgep)) {
313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
314 (void) npi_rxdma_dump_fzc_regs(handle);
315 }
316
317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
318 NXGE_DEBUG_MSG((nxgep, TX_CTL,
319 "nxge_rxdma_regs_dump_channels: "
320 "NULL ring pointer(s)"));
321 return;
322 }
323
324 if (set->owned.map == 0) {
325 NXGE_DEBUG_MSG((nxgep, RX_CTL,
326 "nxge_rxdma_regs_dump_channels: no channels"));
327 return;
328 }
329
330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
331 if ((1 << rdc) & set->owned.map) {
332 rx_rbr_ring_t *ring =
333 nxgep->rx_rbr_rings->rbr_rings[rdc];
334 if (ring) {
335 (void) nxge_dump_rxdma_channel(nxgep, rdc);
336 }
337 }
338 }
339
340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
341 }
342
343 nxge_status_t
nxge_dump_rxdma_channel(p_nxge_t nxgep,uint8_t channel)344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
345 {
346 npi_handle_t handle;
347 npi_status_t rs = NPI_SUCCESS;
348 nxge_status_t status = NXGE_OK;
349
350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
351
352 handle = NXGE_DEV_NPI_HANDLE(nxgep);
353 rs = npi_rxdma_dump_rdc_regs(handle, channel);
354
355 if (rs != NPI_SUCCESS) {
356 status = NXGE_ERROR | rs;
357 }
358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
359 return (status);
360 }
361
362 nxge_status_t
nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ent_msk_t mask_p)363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
364 p_rx_dma_ent_msk_t mask_p)
365 {
366 npi_handle_t handle;
367 npi_status_t rs = NPI_SUCCESS;
368 nxge_status_t status = NXGE_OK;
369
370 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
371 "<== nxge_init_rxdma_channel_event_mask"));
372
373 handle = NXGE_DEV_NPI_HANDLE(nxgep);
374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
375 if (rs != NPI_SUCCESS) {
376 status = NXGE_ERROR | rs;
377 }
378
379 return (status);
380 }
381
382 nxge_status_t
nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ctl_stat_t cs_p)383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
384 p_rx_dma_ctl_stat_t cs_p)
385 {
386 npi_handle_t handle;
387 npi_status_t rs = NPI_SUCCESS;
388 nxge_status_t status = NXGE_OK;
389
390 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
391 "<== nxge_init_rxdma_channel_cntl_stat"));
392
393 handle = NXGE_DEV_NPI_HANDLE(nxgep);
394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
395
396 if (rs != NPI_SUCCESS) {
397 status = NXGE_ERROR | rs;
398 }
399
400 return (status);
401 }
402
403 /*
404 * nxge_rxdma_cfg_rdcgrp_default_rdc
405 *
406 * Set the default RDC for an RDC Group (Table)
407 *
408 * Arguments:
409 * nxgep
410 * rdcgrp The group to modify
411 * rdc The new default RDC.
412 *
413 * Notes:
414 *
415 * NPI/NXGE function calls:
416 * npi_rxdma_cfg_rdc_table_default_rdc()
417 *
418 * Registers accessed:
419 * RDC_TBL_REG: FZC_ZCP + 0x10000
420 *
421 * Context:
422 * Service domain
423 */
424 nxge_status_t
nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep,uint8_t rdcgrp,uint8_t rdc)425 nxge_rxdma_cfg_rdcgrp_default_rdc(
426 p_nxge_t nxgep,
427 uint8_t rdcgrp,
428 uint8_t rdc)
429 {
430 npi_handle_t handle;
431 npi_status_t rs = NPI_SUCCESS;
432 p_nxge_dma_pt_cfg_t p_dma_cfgp;
433 p_nxge_rdc_grp_t rdc_grp_p;
434 uint8_t actual_rdcgrp, actual_rdc;
435
436 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
439
440 handle = NXGE_DEV_NPI_HANDLE(nxgep);
441
442 /*
443 * This has to be rewritten. Do we even allow this anymore?
444 */
445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
446 RDC_MAP_IN(rdc_grp_p->map, rdc);
447 rdc_grp_p->def_rdc = rdc;
448
449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
451
452 rs = npi_rxdma_cfg_rdc_table_default_rdc(
453 handle, actual_rdcgrp, actual_rdc);
454
455 if (rs != NPI_SUCCESS) {
456 return (NXGE_ERROR | rs);
457 }
458 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
460 return (NXGE_OK);
461 }
462
463 nxge_status_t
nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep,uint8_t port,uint8_t rdc)464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
465 {
466 npi_handle_t handle;
467
468 uint8_t actual_rdc;
469 npi_status_t rs = NPI_SUCCESS;
470
471 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
472 " ==> nxge_rxdma_cfg_port_default_rdc"));
473
474 handle = NXGE_DEV_NPI_HANDLE(nxgep);
475 actual_rdc = rdc; /* XXX Hack! */
476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
477
478
479 if (rs != NPI_SUCCESS) {
480 return (NXGE_ERROR | rs);
481 }
482 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
483 " <== nxge_rxdma_cfg_port_default_rdc"));
484
485 return (NXGE_OK);
486 }
487
488 nxge_status_t
nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep,uint8_t channel,uint16_t pkts)489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
490 uint16_t pkts)
491 {
492 npi_status_t rs = NPI_SUCCESS;
493 npi_handle_t handle;
494 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
495 " ==> nxge_rxdma_cfg_rcr_threshold"));
496 handle = NXGE_DEV_NPI_HANDLE(nxgep);
497
498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
499
500 if (rs != NPI_SUCCESS) {
501 return (NXGE_ERROR | rs);
502 }
503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
504 return (NXGE_OK);
505 }
506
507 nxge_status_t
nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep,uint8_t channel,uint16_t tout,uint8_t enable)508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
509 uint16_t tout, uint8_t enable)
510 {
511 npi_status_t rs = NPI_SUCCESS;
512 npi_handle_t handle;
513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
514 handle = NXGE_DEV_NPI_HANDLE(nxgep);
515 if (enable == 0) {
516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
517 } else {
518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
519 tout);
520 }
521
522 if (rs != NPI_SUCCESS) {
523 return (NXGE_ERROR | rs);
524 }
525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
526 return (NXGE_OK);
527 }
528
529 nxge_status_t
nxge_enable_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
532 {
533 npi_handle_t handle;
534 rdc_desc_cfg_t rdc_desc;
535 p_rcrcfig_b_t cfgb_p;
536 npi_status_t rs = NPI_SUCCESS;
537
538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
539 handle = NXGE_DEV_NPI_HANDLE(nxgep);
540 /*
541 * Use configuration data composed at init time.
542 * Write to hardware the receive ring configurations.
543 */
544 rdc_desc.mbox_enable = 1;
545 rdc_desc.mbox_addr = mbox_p->mbox_addr;
546 NXGE_DEBUG_MSG((nxgep, RX_CTL,
547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
548 mbox_p->mbox_addr, rdc_desc.mbox_addr));
549
550 rdc_desc.rbr_len = rbr_p->rbb_max;
551 rdc_desc.rbr_addr = rbr_p->rbr_addr;
552
553 switch (nxgep->rx_bksize_code) {
554 case RBR_BKSIZE_4K:
555 rdc_desc.page_size = SIZE_4KB;
556 break;
557 case RBR_BKSIZE_8K:
558 rdc_desc.page_size = SIZE_8KB;
559 break;
560 case RBR_BKSIZE_16K:
561 rdc_desc.page_size = SIZE_16KB;
562 break;
563 case RBR_BKSIZE_32K:
564 rdc_desc.page_size = SIZE_32KB;
565 break;
566 }
567
568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
569 rdc_desc.valid0 = 1;
570
571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
572 rdc_desc.valid1 = 1;
573
574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
575 rdc_desc.valid2 = 1;
576
577 rdc_desc.full_hdr = rcr_p->full_hdr_flag;
578 rdc_desc.offset = rcr_p->sw_priv_hdr_len;
579
580 rdc_desc.rcr_len = rcr_p->comp_size;
581 rdc_desc.rcr_addr = rcr_p->rcr_addr;
582
583 cfgb_p = &(rcr_p->rcr_cfgb);
584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
585 /* For now, disable this timeout in a guest domain. */
586 if (isLDOMguest(nxgep)) {
587 rdc_desc.rcr_timeout = 0;
588 rdc_desc.rcr_timeout_enable = 0;
589 } else {
590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
592 }
593
594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
595 "rbr_len qlen %d pagesize code %d rcr_len %d",
596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
598 "size 0 %d size 1 %d size 2 %d",
599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
600 rbr_p->npi_pkt_buf_size2));
601
602 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF)
603 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
604 &rdc_desc, B_TRUE);
605 else
606 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
607 &rdc_desc, B_FALSE);
608 if (rs != NPI_SUCCESS) {
609 return (NXGE_ERROR | rs);
610 }
611
612 /*
613 * Enable the timeout and threshold.
614 */
615 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
616 rdc_desc.rcr_threshold);
617 if (rs != NPI_SUCCESS) {
618 return (NXGE_ERROR | rs);
619 }
620
621 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
622 rdc_desc.rcr_timeout);
623 if (rs != NPI_SUCCESS) {
624 return (NXGE_ERROR | rs);
625 }
626
627 if (!isLDOMguest(nxgep)) {
628 /* Enable the DMA */
629 rs = npi_rxdma_cfg_rdc_enable(handle, channel);
630 if (rs != NPI_SUCCESS) {
631 return (NXGE_ERROR | rs);
632 }
633 }
634
635 /* Kick the DMA engine. */
636 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
637
638 if (!isLDOMguest(nxgep)) {
639 /* Clear the rbr empty bit */
640 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
641 }
642
643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
644
645 return (NXGE_OK);
646 }
647
648 nxge_status_t
nxge_disable_rxdma_channel(p_nxge_t nxgep,uint16_t channel)649 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
650 {
651 npi_handle_t handle;
652 npi_status_t rs = NPI_SUCCESS;
653
654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
655 handle = NXGE_DEV_NPI_HANDLE(nxgep);
656
657 /* disable the DMA */
658 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
659 if (rs != NPI_SUCCESS) {
660 NXGE_DEBUG_MSG((nxgep, RX_CTL,
661 "<== nxge_disable_rxdma_channel:failed (0x%x)",
662 rs));
663 return (NXGE_ERROR | rs);
664 }
665
666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
667 return (NXGE_OK);
668 }
669
670 nxge_status_t
nxge_rxdma_channel_rcrflush(p_nxge_t nxgep,uint8_t channel)671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
672 {
673 npi_handle_t handle;
674 nxge_status_t status = NXGE_OK;
675
676 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
677 "<== nxge_init_rxdma_channel_rcrflush"));
678
679 handle = NXGE_DEV_NPI_HANDLE(nxgep);
680 npi_rxdma_rdc_rcr_flush(handle, channel);
681
682 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
683 "<== nxge_init_rxdma_channel_rcrflsh"));
684 return (status);
685
686 }
687
688 #define MID_INDEX(l, r) ((r + l + 1) >> 1)
689
690 #define TO_LEFT -1
691 #define TO_RIGHT 1
692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
693 #define BOTH_LEFT (TO_LEFT + TO_LEFT)
694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT)
695 #define NO_HINT 0xffffffff
696
697 /*ARGSUSED*/
698 nxge_status_t
nxge_rxbuf_pp_to_vp(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p,uint8_t pktbufsz_type,uint64_t * pkt_buf_addr_pp,uint64_t ** pkt_buf_addr_p,uint32_t * bufoffset,uint32_t * msg_index)699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
700 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
701 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
702 {
703 int bufsize;
704 uint64_t pktbuf_pp;
705 uint64_t dvma_addr;
706 rxring_info_t *ring_info;
707 int base_side, end_side;
708 int r_index, l_index, anchor_index;
709 int found, search_done;
710 uint32_t offset, chunk_size, block_size, page_size_mask;
711 uint32_t chunk_index, block_index, total_index;
712 int max_iterations, iteration;
713 rxbuf_index_info_t *bufinfo;
714
715 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
716
717 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
719 pkt_buf_addr_pp,
720 pktbufsz_type));
721 pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
722
723 switch (pktbufsz_type) {
724 case 0:
725 bufsize = rbr_p->pkt_buf_size0;
726 break;
727 case 1:
728 bufsize = rbr_p->pkt_buf_size1;
729 break;
730 case 2:
731 bufsize = rbr_p->pkt_buf_size2;
732 break;
733 case RCR_SINGLE_BLOCK:
734 bufsize = 0;
735 anchor_index = 0;
736 break;
737 default:
738 return (NXGE_ERROR);
739 }
740
741 if (rbr_p->num_blocks == 1) {
742 anchor_index = 0;
743 ring_info = rbr_p->ring_info;
744 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
745 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
746 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
747 "buf_pp $%p btype %d anchor_index %d "
748 "bufinfo $%p",
749 pkt_buf_addr_pp,
750 pktbufsz_type,
751 anchor_index,
752 bufinfo));
753
754 goto found_index;
755 }
756
757 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
758 "==> nxge_rxbuf_pp_to_vp: "
759 "buf_pp $%p btype %d anchor_index %d",
760 pkt_buf_addr_pp,
761 pktbufsz_type,
762 anchor_index));
763
764 ring_info = rbr_p->ring_info;
765 found = B_FALSE;
766 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
767 iteration = 0;
768 max_iterations = ring_info->max_iterations;
769 /*
770 * First check if this block has been seen
771 * recently. This is indicated by a hint which
772 * is initialized when the first buffer of the block
773 * is seen. The hint is reset when the last buffer of
774 * the block has been processed.
775 * As three block sizes are supported, three hints
776 * are kept. The idea behind the hints is that once
777 * the hardware uses a block for a buffer of that
778 * size, it will use it exclusively for that size
779 * and will use it until it is exhausted. It is assumed
780 * that there would a single block being used for the same
781 * buffer sizes at any given time.
782 */
783 if (ring_info->hint[pktbufsz_type] != NO_HINT) {
784 anchor_index = ring_info->hint[pktbufsz_type];
785 dvma_addr = bufinfo[anchor_index].dvma_addr;
786 chunk_size = bufinfo[anchor_index].buf_size;
787 if ((pktbuf_pp >= dvma_addr) &&
788 (pktbuf_pp < (dvma_addr + chunk_size))) {
789 found = B_TRUE;
790 /*
791 * check if this is the last buffer in the block
792 * If so, then reset the hint for the size;
793 */
794
795 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
796 ring_info->hint[pktbufsz_type] = NO_HINT;
797 }
798 }
799
800 if (found == B_FALSE) {
801 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
802 "==> nxge_rxbuf_pp_to_vp: (!found)"
803 "buf_pp $%p btype %d anchor_index %d",
804 pkt_buf_addr_pp,
805 pktbufsz_type,
806 anchor_index));
807
808 /*
809 * This is the first buffer of the block of this
810 * size. Need to search the whole information
811 * array.
812 * the search algorithm uses a binary tree search
813 * algorithm. It assumes that the information is
814 * already sorted with increasing order
815 * info[0] < info[1] < info[2] .... < info[n-1]
816 * where n is the size of the information array
817 */
818 r_index = rbr_p->num_blocks - 1;
819 l_index = 0;
820 search_done = B_FALSE;
821 anchor_index = MID_INDEX(r_index, l_index);
822 while (search_done == B_FALSE) {
823 if ((r_index == l_index) ||
824 (iteration >= max_iterations))
825 search_done = B_TRUE;
826 end_side = TO_RIGHT; /* to the right */
827 base_side = TO_LEFT; /* to the left */
828 /* read the DVMA address information and sort it */
829 dvma_addr = bufinfo[anchor_index].dvma_addr;
830 chunk_size = bufinfo[anchor_index].buf_size;
831 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
832 "==> nxge_rxbuf_pp_to_vp: (searching)"
833 "buf_pp $%p btype %d "
834 "anchor_index %d chunk_size %d dvmaaddr $%p",
835 pkt_buf_addr_pp,
836 pktbufsz_type,
837 anchor_index,
838 chunk_size,
839 dvma_addr));
840
841 if (pktbuf_pp >= dvma_addr)
842 base_side = TO_RIGHT; /* to the right */
843 if (pktbuf_pp < (dvma_addr + chunk_size))
844 end_side = TO_LEFT; /* to the left */
845
846 switch (base_side + end_side) {
847 case IN_MIDDLE:
848 /* found */
849 found = B_TRUE;
850 search_done = B_TRUE;
851 if ((pktbuf_pp + bufsize) <
852 (dvma_addr + chunk_size))
853 ring_info->hint[pktbufsz_type] =
854 bufinfo[anchor_index].buf_index;
855 break;
856 case BOTH_RIGHT:
857 /* not found: go to the right */
858 l_index = anchor_index + 1;
859 anchor_index = MID_INDEX(r_index, l_index);
860 break;
861
862 case BOTH_LEFT:
863 /* not found: go to the left */
864 r_index = anchor_index - 1;
865 anchor_index = MID_INDEX(r_index, l_index);
866 break;
867 default: /* should not come here */
868 return (NXGE_ERROR);
869 }
870 iteration++;
871 }
872
873 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
874 "==> nxge_rxbuf_pp_to_vp: (search done)"
875 "buf_pp $%p btype %d anchor_index %d",
876 pkt_buf_addr_pp,
877 pktbufsz_type,
878 anchor_index));
879 }
880
881 if (found == B_FALSE) {
882 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
883 "==> nxge_rxbuf_pp_to_vp: (search failed)"
884 "buf_pp $%p btype %d anchor_index %d",
885 pkt_buf_addr_pp,
886 pktbufsz_type,
887 anchor_index));
888 return (NXGE_ERROR);
889 }
890
891 found_index:
892 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
893 "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
894 "buf_pp $%p btype %d bufsize %d anchor_index %d",
895 pkt_buf_addr_pp,
896 pktbufsz_type,
897 bufsize,
898 anchor_index));
899
900 /* index of the first block in this chunk */
901 chunk_index = bufinfo[anchor_index].start_index;
902 dvma_addr = bufinfo[anchor_index].dvma_addr;
903 page_size_mask = ring_info->block_size_mask;
904
905 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
906 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
907 "buf_pp $%p btype %d bufsize %d "
908 "anchor_index %d chunk_index %d dvma $%p",
909 pkt_buf_addr_pp,
910 pktbufsz_type,
911 bufsize,
912 anchor_index,
913 chunk_index,
914 dvma_addr));
915
916 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
917 block_size = rbr_p->block_size; /* System block(page) size */
918
919 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
920 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
921 "buf_pp $%p btype %d bufsize %d "
922 "anchor_index %d chunk_index %d dvma $%p "
923 "offset %d block_size %d",
924 pkt_buf_addr_pp,
925 pktbufsz_type,
926 bufsize,
927 anchor_index,
928 chunk_index,
929 dvma_addr,
930 offset,
931 block_size));
932
933 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
934
935 block_index = (offset / block_size); /* index within chunk */
936 total_index = chunk_index + block_index;
937
938
939 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
940 "==> nxge_rxbuf_pp_to_vp: "
941 "total_index %d dvma_addr $%p "
942 "offset %d block_size %d "
943 "block_index %d ",
944 total_index, dvma_addr,
945 offset, block_size,
946 block_index));
947 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
948 (uint64_t)offset);
949
950 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
951 "==> nxge_rxbuf_pp_to_vp: "
952 "total_index %d dvma_addr $%p "
953 "offset %d block_size %d "
954 "block_index %d "
955 "*pkt_buf_addr_p $%p",
956 total_index, dvma_addr,
957 offset, block_size,
958 block_index,
959 *pkt_buf_addr_p));
960
961
962 *msg_index = total_index;
963 *bufoffset = (offset & page_size_mask);
964
965 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
966 "==> nxge_rxbuf_pp_to_vp: get msg index: "
967 "msg_index %d bufoffset_index %d",
968 *msg_index,
969 *bufoffset));
970
971 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
972
973 return (NXGE_OK);
974 }
975
976 /*
977 * used by quick sort (qsort) function
978 * to perform comparison
979 */
980 static int
nxge_sort_compare(const void * p1,const void * p2)981 nxge_sort_compare(const void *p1, const void *p2)
982 {
983
984 rxbuf_index_info_t *a, *b;
985
986 a = (rxbuf_index_info_t *)p1;
987 b = (rxbuf_index_info_t *)p2;
988
989 if (a->dvma_addr > b->dvma_addr)
990 return (1);
991 if (a->dvma_addr < b->dvma_addr)
992 return (-1);
993 return (0);
994 }
995
996
997
998 /*
999 * grabbed this sort implementation from common/syscall/avl.c
1000 *
1001 */
1002 /*
1003 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
1004 * v = Ptr to array/vector of objs
1005 * n = # objs in the array
1006 * s = size of each obj (must be multiples of a word size)
1007 * f = ptr to function to compare two objs
1008 * returns (-1 = less than, 0 = equal, 1 = greater than
1009 */
1010 void
nxge_ksort(caddr_t v,int n,int s,int (* f)())1011 nxge_ksort(caddr_t v, int n, int s, int (*f)())
1012 {
1013 int g, i, j, ii;
1014 unsigned int *p1, *p2;
1015 unsigned int tmp;
1016
1017 /* No work to do */
1018 if (v == NULL || n <= 1)
1019 return;
1020 /* Sanity check on arguments */
1021 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
1022 ASSERT(s > 0);
1023
1024 for (g = n / 2; g > 0; g /= 2) {
1025 for (i = g; i < n; i++) {
1026 for (j = i - g; j >= 0 &&
1027 (*f)(v + j * s, v + (j + g) * s) == 1;
1028 j -= g) {
1029 p1 = (unsigned *)(v + j * s);
1030 p2 = (unsigned *)(v + (j + g) * s);
1031 for (ii = 0; ii < s / 4; ii++) {
1032 tmp = *p1;
1033 *p1++ = *p2;
1034 *p2++ = tmp;
1035 }
1036 }
1037 }
1038 }
1039 }
1040
1041 /*
1042 * Initialize data structures required for rxdma
1043 * buffer dvma->vmem address lookup
1044 */
1045 /*ARGSUSED*/
1046 static nxge_status_t
nxge_rxbuf_index_info_init(p_nxge_t nxgep,p_rx_rbr_ring_t rbrp)1047 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
1048 {
1049
1050 int index;
1051 rxring_info_t *ring_info;
1052 int max_iteration = 0, max_index = 0;
1053
1054 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
1055
1056 ring_info = rbrp->ring_info;
1057 ring_info->hint[0] = NO_HINT;
1058 ring_info->hint[1] = NO_HINT;
1059 ring_info->hint[2] = NO_HINT;
1060 max_index = rbrp->num_blocks;
1061
1062 /* read the DVMA address information and sort it */
1063 /* do init of the information array */
1064
1065
1066 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1067 " nxge_rxbuf_index_info_init Sort ptrs"));
1068
1069 /* sort the array */
1070 nxge_ksort((void *)ring_info->buffer, max_index,
1071 sizeof (rxbuf_index_info_t), nxge_sort_compare);
1072
1073
1074
1075 for (index = 0; index < max_index; index++) {
1076 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1077 " nxge_rxbuf_index_info_init: sorted chunk %d "
1078 " ioaddr $%p kaddr $%p size %x",
1079 index, ring_info->buffer[index].dvma_addr,
1080 ring_info->buffer[index].kaddr,
1081 ring_info->buffer[index].buf_size));
1082 }
1083
1084 max_iteration = 0;
1085 while (max_index >= (1ULL << max_iteration))
1086 max_iteration++;
1087 ring_info->max_iterations = max_iteration + 1;
1088 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1089 " nxge_rxbuf_index_info_init Find max iter %d",
1090 ring_info->max_iterations));
1091
1092 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
1093 return (NXGE_OK);
1094 }
1095
1096 /* ARGSUSED */
1097 void
nxge_dump_rcr_entry(p_nxge_t nxgep,p_rcr_entry_t entry_p)1098 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
1099 {
1100 #ifdef NXGE_DEBUG
1101
1102 uint32_t bptr;
1103 uint64_t pp;
1104
1105 bptr = entry_p->bits.hdw.pkt_buf_addr;
1106
1107 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1108 "\trcr entry $%p "
1109 "\trcr entry 0x%0llx "
1110 "\trcr entry 0x%08x "
1111 "\trcr entry 0x%08x "
1112 "\tvalue 0x%0llx\n"
1113 "\tmulti = %d\n"
1114 "\tpkt_type = 0x%x\n"
1115 "\tzero_copy = %d\n"
1116 "\tnoport = %d\n"
1117 "\tpromis = %d\n"
1118 "\terror = 0x%04x\n"
1119 "\tdcf_err = 0x%01x\n"
1120 "\tl2_len = %d\n"
1121 "\tpktbufsize = %d\n"
1122 "\tpkt_buf_addr = $%p\n"
1123 "\tpkt_buf_addr (<< 6) = $%p\n",
1124 entry_p,
1125 *(int64_t *)entry_p,
1126 *(int32_t *)entry_p,
1127 *(int32_t *)((char *)entry_p + 32),
1128 entry_p->value,
1129 entry_p->bits.hdw.multi,
1130 entry_p->bits.hdw.pkt_type,
1131 entry_p->bits.hdw.zero_copy,
1132 entry_p->bits.hdw.noport,
1133 entry_p->bits.hdw.promis,
1134 entry_p->bits.hdw.error,
1135 entry_p->bits.hdw.dcf_err,
1136 entry_p->bits.hdw.l2_len,
1137 entry_p->bits.hdw.pktbufsz,
1138 bptr,
1139 entry_p->bits.ldw.pkt_buf_addr));
1140
1141 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
1142 RCR_PKT_BUF_ADDR_SHIFT;
1143
1144 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
1145 pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
1146 #endif
1147 }
1148
1149 void
nxge_rxdma_regs_dump(p_nxge_t nxgep,int rdc)1150 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
1151 {
1152 npi_handle_t handle;
1153 rbr_stat_t rbr_stat;
1154 addr44_t hd_addr;
1155 addr44_t tail_addr;
1156 uint16_t qlen;
1157
1158 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1159 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
1160
1161 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1162
1163 /* RBR head */
1164 hd_addr.addr = 0;
1165 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
1166 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1167 (void *)hd_addr.addr);
1168
1169 /* RBR stats */
1170 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
1171 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
1172
1173 /* RCR tail */
1174 tail_addr.addr = 0;
1175 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
1176 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1177 (void *)tail_addr.addr);
1178
1179 /* RCR qlen */
1180 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1181 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1182
1183 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1184 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1185 }
1186
1187 nxge_status_t
nxge_rxdma_hw_mode(p_nxge_t nxgep,boolean_t enable)1188 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1189 {
1190 nxge_grp_set_t *set = &nxgep->rx_set;
1191 nxge_status_t status;
1192 npi_status_t rs;
1193 int rdc;
1194
1195 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1196 "==> nxge_rxdma_hw_mode: mode %d", enable));
1197
1198 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1199 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1200 "<== nxge_rxdma_mode: not initialized"));
1201 return (NXGE_ERROR);
1202 }
1203
1204 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1205 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1206 "<== nxge_tx_port_fatal_err_recover: "
1207 "NULL ring pointer(s)"));
1208 return (NXGE_ERROR);
1209 }
1210
1211 if (set->owned.map == 0) {
1212 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1213 "nxge_rxdma_regs_dump_channels: no channels"));
1214 return (0);
1215 }
1216
1217 rs = 0;
1218 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1219 if ((1 << rdc) & set->owned.map) {
1220 rx_rbr_ring_t *ring =
1221 nxgep->rx_rbr_rings->rbr_rings[rdc];
1222 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
1223 if (ring) {
1224 if (enable) {
1225 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1226 "==> nxge_rxdma_hw_mode: "
1227 "channel %d (enable)", rdc));
1228 rs = npi_rxdma_cfg_rdc_enable
1229 (handle, rdc);
1230 } else {
1231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1232 "==> nxge_rxdma_hw_mode: "
1233 "channel %d disable)", rdc));
1234 rs = npi_rxdma_cfg_rdc_disable
1235 (handle, rdc);
1236 }
1237 }
1238 }
1239 }
1240
1241 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1242
1243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1244 "<== nxge_rxdma_hw_mode: status 0x%x", status));
1245
1246 return (status);
1247 }
1248
1249 void
nxge_rxdma_enable_channel(p_nxge_t nxgep,uint16_t channel)1250 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1251 {
1252 npi_handle_t handle;
1253
1254 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1255 "==> nxge_rxdma_enable_channel: channel %d", channel));
1256
1257 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1258 (void) npi_rxdma_cfg_rdc_enable(handle, channel);
1259
1260 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
1261 }
1262
1263 void
nxge_rxdma_disable_channel(p_nxge_t nxgep,uint16_t channel)1264 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1265 {
1266 npi_handle_t handle;
1267
1268 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1269 "==> nxge_rxdma_disable_channel: channel %d", channel));
1270
1271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1272 (void) npi_rxdma_cfg_rdc_disable(handle, channel);
1273
1274 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
1275 }
1276
1277 void
nxge_hw_start_rx(p_nxge_t nxgep)1278 nxge_hw_start_rx(p_nxge_t nxgep)
1279 {
1280 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
1281
1282 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1283 (void) nxge_rx_mac_enable(nxgep);
1284
1285 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
1286 }
1287
1288 /*ARGSUSED*/
1289 void
nxge_fixup_rxdma_rings(p_nxge_t nxgep)1290 nxge_fixup_rxdma_rings(p_nxge_t nxgep)
1291 {
1292 nxge_grp_set_t *set = &nxgep->rx_set;
1293 int rdc;
1294
1295 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
1296
1297 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1298 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1299 "<== nxge_tx_port_fatal_err_recover: "
1300 "NULL ring pointer(s)"));
1301 return;
1302 }
1303
1304 if (set->owned.map == 0) {
1305 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1306 "nxge_rxdma_regs_dump_channels: no channels"));
1307 return;
1308 }
1309
1310 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1311 if ((1 << rdc) & set->owned.map) {
1312 rx_rbr_ring_t *ring =
1313 nxgep->rx_rbr_rings->rbr_rings[rdc];
1314 if (ring) {
1315 nxge_rxdma_hw_stop(nxgep, rdc);
1316 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1317 "==> nxge_fixup_rxdma_rings: "
1318 "channel %d ring $%px",
1319 rdc, ring));
1320 (void) nxge_rxdma_fix_channel(nxgep, rdc);
1321 }
1322 }
1323 }
1324
1325 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
1326 }
1327
1328 void
nxge_rxdma_fix_channel(p_nxge_t nxgep,uint16_t channel)1329 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1330 {
1331 int ndmas;
1332 p_rx_rbr_rings_t rx_rbr_rings;
1333 p_rx_rbr_ring_t *rbr_rings;
1334 p_rx_rcr_rings_t rx_rcr_rings;
1335 p_rx_rcr_ring_t *rcr_rings;
1336 p_rx_mbox_areas_t rx_mbox_areas_p;
1337 p_rx_mbox_t *rx_mbox_p;
1338 p_nxge_dma_pool_t dma_buf_poolp;
1339 p_nxge_dma_pool_t dma_cntl_poolp;
1340 p_rx_rbr_ring_t rbrp;
1341 p_rx_rcr_ring_t rcrp;
1342 p_rx_mbox_t mboxp;
1343 p_nxge_dma_common_t dmap;
1344 nxge_status_t status = NXGE_OK;
1345
1346 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
1347
1348 (void) nxge_rxdma_stop_channel(nxgep, channel);
1349
1350 dma_buf_poolp = nxgep->rx_buf_pool_p;
1351 dma_cntl_poolp = nxgep->rx_cntl_pool_p;
1352
1353 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1354 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1355 "<== nxge_rxdma_fix_channel: buf not allocated"));
1356 return;
1357 }
1358
1359 ndmas = dma_buf_poolp->ndmas;
1360 if (!ndmas) {
1361 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1362 "<== nxge_rxdma_fix_channel: no dma allocated"));
1363 return;
1364 }
1365
1366 rx_rbr_rings = nxgep->rx_rbr_rings;
1367 rx_rcr_rings = nxgep->rx_rcr_rings;
1368 rbr_rings = rx_rbr_rings->rbr_rings;
1369 rcr_rings = rx_rcr_rings->rcr_rings;
1370 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
1371 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
1372
1373 /* Reinitialize the receive block and completion rings */
1374 rbrp = (p_rx_rbr_ring_t)rbr_rings[channel],
1375 rcrp = (p_rx_rcr_ring_t)rcr_rings[channel],
1376 mboxp = (p_rx_mbox_t)rx_mbox_p[channel];
1377
1378 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
1379 rbrp->rbr_rd_index = 0;
1380 rcrp->comp_rd_index = 0;
1381 rcrp->comp_wt_index = 0;
1382
1383 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
1384 bzero((caddr_t)dmap->kaddrp, dmap->alength);
1385
1386 status = nxge_rxdma_start_channel(nxgep, channel,
1387 rbrp, rcrp, mboxp);
1388 if (status != NXGE_OK) {
1389 goto nxge_rxdma_fix_channel_fail;
1390 }
1391
1392 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1393 "<== nxge_rxdma_fix_channel: success (0x%08x)", status));
1394 return;
1395
1396 nxge_rxdma_fix_channel_fail:
1397 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1398 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
1399 }
1400
1401 p_rx_rbr_ring_t
nxge_rxdma_get_rbr_ring(p_nxge_t nxgep,uint16_t channel)1402 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1403 {
1404 nxge_grp_set_t *set = &nxgep->rx_set;
1405 nxge_channel_t rdc;
1406
1407 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1408 "==> nxge_rxdma_get_rbr_ring: channel %d", channel));
1409
1410 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1411 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1412 "<== nxge_rxdma_get_rbr_ring: "
1413 "NULL ring pointer(s)"));
1414 return (NULL);
1415 }
1416
1417 if (set->owned.map == 0) {
1418 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1419 "<== nxge_rxdma_get_rbr_ring: no channels"));
1420 return (NULL);
1421 }
1422
1423 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1424 if ((1 << rdc) & set->owned.map) {
1425 rx_rbr_ring_t *ring =
1426 nxgep->rx_rbr_rings->rbr_rings[rdc];
1427 if (ring) {
1428 if (channel == ring->rdc) {
1429 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1430 "==> nxge_rxdma_get_rbr_ring: "
1431 "channel %d ring $%p", rdc, ring));
1432 return (ring);
1433 }
1434 }
1435 }
1436 }
1437
1438 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1439 "<== nxge_rxdma_get_rbr_ring: not found"));
1440
1441 return (NULL);
1442 }
1443
1444 p_rx_rcr_ring_t
nxge_rxdma_get_rcr_ring(p_nxge_t nxgep,uint16_t channel)1445 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
1446 {
1447 nxge_grp_set_t *set = &nxgep->rx_set;
1448 nxge_channel_t rdc;
1449
1450 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1451 "==> nxge_rxdma_get_rcr_ring: channel %d", channel));
1452
1453 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
1454 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1455 "<== nxge_rxdma_get_rcr_ring: "
1456 "NULL ring pointer(s)"));
1457 return (NULL);
1458 }
1459
1460 if (set->owned.map == 0) {
1461 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1462 "<== nxge_rxdma_get_rbr_ring: no channels"));
1463 return (NULL);
1464 }
1465
1466 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1467 if ((1 << rdc) & set->owned.map) {
1468 rx_rcr_ring_t *ring =
1469 nxgep->rx_rcr_rings->rcr_rings[rdc];
1470 if (ring) {
1471 if (channel == ring->rdc) {
1472 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1473 "==> nxge_rxdma_get_rcr_ring: "
1474 "channel %d ring $%p", rdc, ring));
1475 return (ring);
1476 }
1477 }
1478 }
1479 }
1480
1481 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1482 "<== nxge_rxdma_get_rcr_ring: not found"));
1483
1484 return (NULL);
1485 }
1486
1487 /*
1488 * Static functions start here.
1489 */
1490 static p_rx_msg_t
nxge_allocb(size_t size,uint32_t pri,p_nxge_dma_common_t dmabuf_p)1491 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
1492 {
1493 p_rx_msg_t nxge_mp = NULL;
1494 p_nxge_dma_common_t dmamsg_p;
1495 uchar_t *buffer;
1496
1497 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
1498 if (nxge_mp == NULL) {
1499 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1500 "Allocation of a rx msg failed."));
1501 goto nxge_allocb_exit;
1502 }
1503
1504 nxge_mp->use_buf_pool = B_FALSE;
1505 if (dmabuf_p) {
1506 nxge_mp->use_buf_pool = B_TRUE;
1507 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
1508 *dmamsg_p = *dmabuf_p;
1509 dmamsg_p->nblocks = 1;
1510 dmamsg_p->block_size = size;
1511 dmamsg_p->alength = size;
1512 buffer = (uchar_t *)dmabuf_p->kaddrp;
1513
1514 dmabuf_p->kaddrp = (void *)
1515 ((char *)dmabuf_p->kaddrp + size);
1516 dmabuf_p->ioaddr_pp = (void *)
1517 ((char *)dmabuf_p->ioaddr_pp + size);
1518 dmabuf_p->alength -= size;
1519 dmabuf_p->offset += size;
1520 dmabuf_p->dma_cookie.dmac_laddress += size;
1521 dmabuf_p->dma_cookie.dmac_size -= size;
1522
1523 } else {
1524 buffer = KMEM_ALLOC(size, KM_NOSLEEP);
1525 if (buffer == NULL) {
1526 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1527 "Allocation of a receive page failed."));
1528 goto nxge_allocb_fail1;
1529 }
1530 }
1531
1532 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
1533 if (nxge_mp->rx_mblk_p == NULL) {
1534 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
1535 goto nxge_allocb_fail2;
1536 }
1537
1538 nxge_mp->buffer = buffer;
1539 nxge_mp->block_size = size;
1540 nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
1541 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
1542 nxge_mp->ref_cnt = 1;
1543 nxge_mp->free = B_TRUE;
1544 nxge_mp->rx_use_bcopy = B_FALSE;
1545
1546 atomic_inc_32(&nxge_mblks_pending);
1547
1548 goto nxge_allocb_exit;
1549
1550 nxge_allocb_fail2:
1551 if (!nxge_mp->use_buf_pool) {
1552 KMEM_FREE(buffer, size);
1553 }
1554
1555 nxge_allocb_fail1:
1556 KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
1557 nxge_mp = NULL;
1558
1559 nxge_allocb_exit:
1560 return (nxge_mp);
1561 }
1562
1563 p_mblk_t
nxge_dupb(p_rx_msg_t nxge_mp,uint_t offset,size_t size)1564 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1565 {
1566 p_mblk_t mp;
1567
1568 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
1569 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
1570 "offset = 0x%08X "
1571 "size = 0x%08X",
1572 nxge_mp, offset, size));
1573
1574 mp = desballoc(&nxge_mp->buffer[offset], size,
1575 0, &nxge_mp->freeb);
1576 if (mp == NULL) {
1577 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1578 goto nxge_dupb_exit;
1579 }
1580 atomic_inc_32(&nxge_mp->ref_cnt);
1581
1582
1583 nxge_dupb_exit:
1584 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1585 nxge_mp));
1586 return (mp);
1587 }
1588
1589 p_mblk_t
nxge_dupb_bcopy(p_rx_msg_t nxge_mp,uint_t offset,size_t size)1590 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1591 {
1592 p_mblk_t mp;
1593 uchar_t *dp;
1594
1595 mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
1596 if (mp == NULL) {
1597 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1598 goto nxge_dupb_bcopy_exit;
1599 }
1600 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
1601 bcopy((void *)&nxge_mp->buffer[offset], dp, size);
1602 mp->b_wptr = dp + size;
1603
1604 nxge_dupb_bcopy_exit:
1605 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1606 nxge_mp));
1607 return (mp);
1608 }
1609
1610 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
1611 p_rx_msg_t rx_msg_p);
1612
1613 void
nxge_post_page(p_nxge_t nxgep,p_rx_rbr_ring_t rx_rbr_p,p_rx_msg_t rx_msg_p)1614 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
1615 {
1616 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
1617
1618 /* Reuse this buffer */
1619 rx_msg_p->free = B_FALSE;
1620 rx_msg_p->cur_usage_cnt = 0;
1621 rx_msg_p->max_usage_cnt = 0;
1622 rx_msg_p->pkt_buf_size = 0;
1623
1624 if (rx_rbr_p->rbr_use_bcopy) {
1625 rx_msg_p->rx_use_bcopy = B_FALSE;
1626 atomic_dec_32(&rx_rbr_p->rbr_consumed);
1627 }
1628
1629 /*
1630 * Get the rbr header pointer and its offset index.
1631 */
1632 MUTEX_ENTER(&rx_rbr_p->post_lock);
1633 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
1634 rx_rbr_p->rbr_wrap_mask);
1635 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1636 MUTEX_EXIT(&rx_rbr_p->post_lock);
1637 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
1638 rx_rbr_p->rdc, 1);
1639
1640 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1641 "<== nxge_post_page (channel %d post_next_index %d)",
1642 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1643
1644 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
1645 }
1646
1647 void
nxge_freeb(p_rx_msg_t rx_msg_p)1648 nxge_freeb(p_rx_msg_t rx_msg_p)
1649 {
1650 size_t size;
1651 uchar_t *buffer = NULL;
1652 int ref_cnt;
1653 boolean_t free_state = B_FALSE;
1654
1655 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1656
1657 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
1658 NXGE_DEBUG_MSG((NULL, MEM2_CTL,
1659 "nxge_freeb:rx_msg_p = $%p (block pending %d)",
1660 rx_msg_p, nxge_mblks_pending));
1661
1662 /*
1663 * First we need to get the free state, then
1664 * atomic decrement the reference count to prevent
1665 * the race condition with the interrupt thread that
1666 * is processing a loaned up buffer block.
1667 */
1668 free_state = rx_msg_p->free;
1669 ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
1670 if (!ref_cnt) {
1671 atomic_dec_32(&nxge_mblks_pending);
1672 buffer = rx_msg_p->buffer;
1673 size = rx_msg_p->block_size;
1674 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
1675 "will free: rx_msg_p = $%p (block pending %d)",
1676 rx_msg_p, nxge_mblks_pending));
1677
1678 if (!rx_msg_p->use_buf_pool) {
1679 KMEM_FREE(buffer, size);
1680 }
1681
1682 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1683
1684 if (ring) {
1685 /*
1686 * Decrement the receive buffer ring's reference
1687 * count, too.
1688 */
1689 atomic_dec_32(&ring->rbr_ref_cnt);
1690
1691 /*
1692 * Free the receive buffer ring, if
1693 * 1. all the receive buffers have been freed
1694 * 2. and we are in the proper state (that is,
1695 * we are not UNMAPPING).
1696 */
1697 if (ring->rbr_ref_cnt == 0 &&
1698 ring->rbr_state == RBR_UNMAPPED) {
1699 /*
1700 * Free receive data buffers,
1701 * buffer index information
1702 * (rxring_info) and
1703 * the message block ring.
1704 */
1705 NXGE_DEBUG_MSG((NULL, RX_CTL,
1706 "nxge_freeb:rx_msg_p = $%p "
1707 "(block pending %d) free buffers",
1708 rx_msg_p, nxge_mblks_pending));
1709 nxge_rxdma_databuf_free(ring);
1710 if (ring->ring_info) {
1711 KMEM_FREE(ring->ring_info,
1712 sizeof (rxring_info_t));
1713 }
1714
1715 if (ring->rx_msg_ring) {
1716 KMEM_FREE(ring->rx_msg_ring,
1717 ring->tnblocks *
1718 sizeof (p_rx_msg_t));
1719 }
1720 KMEM_FREE(ring, sizeof (*ring));
1721 }
1722 }
1723 return;
1724 }
1725
1726 /*
1727 * Repost buffer.
1728 */
1729 if (free_state && (ref_cnt == 1) && ring) {
1730 NXGE_DEBUG_MSG((NULL, RX_CTL,
1731 "nxge_freeb: post page $%p:", rx_msg_p));
1732 if (ring->rbr_state == RBR_POSTING)
1733 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
1734 }
1735
1736 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
1737 }
1738
1739 uint_t
nxge_rx_intr(char * arg1,char * arg2)1740 nxge_rx_intr(char *arg1, char *arg2)
1741 {
1742 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1743 p_nxge_t nxgep = (p_nxge_t)arg2;
1744 p_nxge_ldg_t ldgp;
1745 uint8_t channel;
1746 npi_handle_t handle;
1747 rx_dma_ctl_stat_t cs;
1748 p_rx_rcr_ring_t rcrp;
1749 mblk_t *mp = NULL;
1750
1751 if (ldvp == NULL) {
1752 NXGE_DEBUG_MSG((NULL, INT_CTL,
1753 "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1754 nxgep, ldvp));
1755 return (DDI_INTR_CLAIMED);
1756 }
1757
1758 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1759 nxgep = ldvp->nxgep;
1760 }
1761
1762 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1763 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1764 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1765 "<== nxge_rx_intr: interface not started or intialized"));
1766 return (DDI_INTR_CLAIMED);
1767 }
1768
1769 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1770 "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1771 nxgep, ldvp));
1772
1773 /*
1774 * Get the PIO handle.
1775 */
1776 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1777
1778 /*
1779 * Get the ring to enable us to process packets.
1780 */
1781 rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
1782
1783 /*
1784 * The RCR ring lock must be held when packets
1785 * are being processed and the hardware registers are
1786 * being read or written to prevent race condition
1787 * among the interrupt thread, the polling thread
1788 * (will cause fatal errors such as rcrincon bit set)
1789 * and the setting of the poll_flag.
1790 */
1791 MUTEX_ENTER(&rcrp->lock);
1792
1793 /*
1794 * Get the control and status for this channel.
1795 */
1796 channel = ldvp->channel;
1797 ldgp = ldvp->ldgp;
1798
1799 if (!isLDOMguest(nxgep) && (!rcrp->started)) {
1800 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1801 "<== nxge_rx_intr: channel is not started"));
1802
1803 /*
1804 * We received an interrupt before the ring is started.
1805 */
1806 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1807 &cs.value);
1808 cs.value &= RX_DMA_CTL_STAT_WR1C;
1809 cs.bits.hdw.mex = 1;
1810 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1811 cs.value);
1812
1813 /*
1814 * Rearm this logical group if this is a single device
1815 * group.
1816 */
1817 if (ldgp->nldvs == 1) {
1818 if (isLDOMguest(nxgep)) {
1819 nxge_hio_ldgimgn(nxgep, ldgp);
1820 } else {
1821 ldgimgm_t mgm;
1822
1823 mgm.value = 0;
1824 mgm.bits.ldw.arm = 1;
1825 mgm.bits.ldw.timer = ldgp->ldg_timer;
1826
1827 NXGE_REG_WR64(handle,
1828 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1829 mgm.value);
1830 }
1831 }
1832 MUTEX_EXIT(&rcrp->lock);
1833 return (DDI_INTR_CLAIMED);
1834 }
1835
1836 ASSERT(rcrp->ldgp == ldgp);
1837 ASSERT(rcrp->ldvp == ldvp);
1838
1839 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1840
1841 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1842 "cs 0x%016llx rcrto 0x%x rcrthres %x",
1843 channel,
1844 cs.value,
1845 cs.bits.hdw.rcrto,
1846 cs.bits.hdw.rcrthres));
1847
1848 if (!rcrp->poll_flag) {
1849 mp = nxge_rx_pkts(nxgep, rcrp, cs, -1);
1850 }
1851
1852 /* error events. */
1853 if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1854 (void) nxge_rx_err_evnts(nxgep, channel, cs);
1855 }
1856
1857 /*
1858 * Enable the mailbox update interrupt if we want
1859 * to use mailbox. We probably don't need to use
1860 * mailbox as it only saves us one pio read.
1861 * Also write 1 to rcrthres and rcrto to clear
1862 * these two edge triggered bits.
1863 */
1864 cs.value &= RX_DMA_CTL_STAT_WR1C;
1865 cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1;
1866 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1867 cs.value);
1868
1869 /*
1870 * If the polling mode is enabled, disable the interrupt.
1871 */
1872 if (rcrp->poll_flag) {
1873 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1874 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1875 "(disabling interrupts)", channel, ldgp, ldvp));
1876
1877 /*
1878 * Disarm this logical group if this is a single device
1879 * group.
1880 */
1881 if (ldgp->nldvs == 1) {
1882 if (isLDOMguest(nxgep)) {
1883 ldgp->arm = B_FALSE;
1884 nxge_hio_ldgimgn(nxgep, ldgp);
1885 } else {
1886 ldgimgm_t mgm;
1887 mgm.value = 0;
1888 mgm.bits.ldw.arm = 0;
1889 NXGE_REG_WR64(handle,
1890 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1891 mgm.value);
1892 }
1893 }
1894 } else {
1895 /*
1896 * Rearm this logical group if this is a single device
1897 * group.
1898 */
1899 if (ldgp->nldvs == 1) {
1900 if (isLDOMguest(nxgep)) {
1901 nxge_hio_ldgimgn(nxgep, ldgp);
1902 } else {
1903 ldgimgm_t mgm;
1904
1905 mgm.value = 0;
1906 mgm.bits.ldw.arm = 1;
1907 mgm.bits.ldw.timer = ldgp->ldg_timer;
1908
1909 NXGE_REG_WR64(handle,
1910 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1911 mgm.value);
1912 }
1913 }
1914
1915 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1916 "==> nxge_rx_intr: rdc %d ldgp $%p "
1917 "exiting ISR (and call mac_rx_ring)", channel, ldgp));
1918 }
1919 MUTEX_EXIT(&rcrp->lock);
1920
1921 if (mp != NULL) {
1922 mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp,
1923 rcrp->rcr_gen_num);
1924 }
1925 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1926 return (DDI_INTR_CLAIMED);
1927 }
1928
1929 /*
1930 * This routine is the main packet receive processing function.
1931 * It gets the packet type, error code, and buffer related
1932 * information from the receive completion entry.
1933 * How many completion entries to process is based on the number of packets
1934 * queued by the hardware, a hardware maintained tail pointer
1935 * and a configurable receive packet count.
1936 *
1937 * A chain of message blocks will be created as result of processing
1938 * the completion entries. This chain of message blocks will be returned and
1939 * a hardware control status register will be updated with the number of
1940 * packets were removed from the hardware queue.
1941 *
1942 * The RCR ring lock is held when entering this function.
1943 */
1944 static mblk_t *
nxge_rx_pkts(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,rx_dma_ctl_stat_t cs,int bytes_to_pickup)1945 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1946 int bytes_to_pickup)
1947 {
1948 npi_handle_t handle;
1949 uint8_t channel;
1950 uint32_t comp_rd_index;
1951 p_rcr_entry_t rcr_desc_rd_head_p;
1952 p_rcr_entry_t rcr_desc_rd_head_pp;
1953 p_mblk_t nmp, mp_cont, head_mp, *tail_mp;
1954 uint16_t qlen, nrcr_read, npkt_read;
1955 uint32_t qlen_hw;
1956 boolean_t multi;
1957 rcrcfig_b_t rcr_cfg_b;
1958 int totallen = 0;
1959 #if defined(_BIG_ENDIAN)
1960 npi_status_t rs = NPI_SUCCESS;
1961 #endif
1962
1963 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
1964 "channel %d", rcr_p->rdc));
1965
1966 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1967 return (NULL);
1968 }
1969 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1970 channel = rcr_p->rdc;
1971
1972 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1973 "==> nxge_rx_pkts: START: rcr channel %d "
1974 "head_p $%p head_pp $%p index %d ",
1975 channel, rcr_p->rcr_desc_rd_head_p,
1976 rcr_p->rcr_desc_rd_head_pp,
1977 rcr_p->comp_rd_index));
1978
1979
1980 #if !defined(_BIG_ENDIAN)
1981 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
1982 #else
1983 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1984 if (rs != NPI_SUCCESS) {
1985 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
1986 "channel %d, get qlen failed 0x%08x",
1987 channel, rs));
1988 return (NULL);
1989 }
1990 #endif
1991 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
1992 "qlen %d", channel, qlen));
1993
1994
1995
1996 if (!qlen) {
1997 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1998 "==> nxge_rx_pkts:rcr channel %d "
1999 "qlen %d (no pkts)", channel, qlen));
2000
2001 return (NULL);
2002 }
2003
2004 comp_rd_index = rcr_p->comp_rd_index;
2005
2006 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
2007 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
2008 nrcr_read = npkt_read = 0;
2009
2010 /*
2011 * Number of packets queued
2012 * (The jumbo or multi packet will be counted as only one
2013 * packets and it may take up more than one completion entry).
2014 */
2015 qlen_hw = (qlen < nxge_max_rx_pkts) ?
2016 qlen : nxge_max_rx_pkts;
2017 head_mp = NULL;
2018 tail_mp = &head_mp;
2019 nmp = mp_cont = NULL;
2020 multi = B_FALSE;
2021
2022 while (qlen_hw) {
2023
2024 #ifdef NXGE_DEBUG
2025 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
2026 #endif
2027 /*
2028 * Process one completion ring entry.
2029 */
2030 nxge_receive_packet(nxgep,
2031 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
2032
2033 /*
2034 * message chaining modes
2035 */
2036 if (nmp) {
2037 nmp->b_next = NULL;
2038 if (!multi && !mp_cont) { /* frame fits a partition */
2039 *tail_mp = nmp;
2040 tail_mp = &nmp->b_next;
2041 totallen += MBLKL(nmp);
2042 nmp = NULL;
2043 } else if (multi && !mp_cont) { /* first segment */
2044 *tail_mp = nmp;
2045 tail_mp = &nmp->b_cont;
2046 totallen += MBLKL(nmp);
2047 } else if (multi && mp_cont) { /* mid of multi segs */
2048 *tail_mp = mp_cont;
2049 tail_mp = &mp_cont->b_cont;
2050 totallen += MBLKL(mp_cont);
2051 } else if (!multi && mp_cont) { /* last segment */
2052 *tail_mp = mp_cont;
2053 tail_mp = &nmp->b_next;
2054 totallen += MBLKL(mp_cont);
2055 nmp = NULL;
2056 }
2057 }
2058 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2059 "==> nxge_rx_pkts: loop: rcr channel %d "
2060 "before updating: multi %d "
2061 "nrcr_read %d "
2062 "npk read %d "
2063 "head_pp $%p index %d ",
2064 channel,
2065 multi,
2066 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2067 comp_rd_index));
2068
2069 if (!multi) {
2070 qlen_hw--;
2071 npkt_read++;
2072 }
2073
2074 /*
2075 * Update the next read entry.
2076 */
2077 comp_rd_index = NEXT_ENTRY(comp_rd_index,
2078 rcr_p->comp_wrap_mask);
2079
2080 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
2081 rcr_p->rcr_desc_first_p,
2082 rcr_p->rcr_desc_last_p);
2083
2084 nrcr_read++;
2085
2086 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2087 "<== nxge_rx_pkts: (SAM, process one packet) "
2088 "nrcr_read %d",
2089 nrcr_read));
2090 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2091 "==> nxge_rx_pkts: loop: rcr channel %d "
2092 "multi %d "
2093 "nrcr_read %d "
2094 "npk read %d "
2095 "head_pp $%p index %d ",
2096 channel,
2097 multi,
2098 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2099 comp_rd_index));
2100
2101 if ((bytes_to_pickup != -1) &&
2102 (totallen >= bytes_to_pickup)) {
2103 break;
2104 }
2105 }
2106
2107 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2108 rcr_p->comp_rd_index = comp_rd_index;
2109 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2110 if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2111 (nxgep->intr_threshold != rcr_p->intr_threshold)) {
2112
2113 rcr_p->intr_timeout = (nxgep->intr_timeout <
2114 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
2115 nxgep->intr_timeout;
2116
2117 rcr_p->intr_threshold = (nxgep->intr_threshold <
2118 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
2119 nxgep->intr_threshold;
2120
2121 rcr_cfg_b.value = 0x0ULL;
2122 rcr_cfg_b.bits.ldw.entout = 1;
2123 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
2124 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
2125
2126 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
2127 channel, rcr_cfg_b.value);
2128 }
2129
2130 cs.bits.ldw.pktread = npkt_read;
2131 cs.bits.ldw.ptrread = nrcr_read;
2132 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2133 channel, cs.value);
2134 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2135 "==> nxge_rx_pkts: EXIT: rcr channel %d "
2136 "head_pp $%p index %016llx ",
2137 channel,
2138 rcr_p->rcr_desc_rd_head_pp,
2139 rcr_p->comp_rd_index));
2140 /*
2141 * Update RCR buffer pointer read and number of packets
2142 * read.
2143 */
2144
2145 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
2146 "channel %d", rcr_p->rdc));
2147
2148 return (head_mp);
2149 }
2150
2151 void
nxge_receive_packet(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rcr_entry_t rcr_desc_rd_head_p,boolean_t * multi_p,mblk_t ** mp,mblk_t ** mp_cont)2152 nxge_receive_packet(p_nxge_t nxgep,
2153 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2154 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2155 {
2156 p_mblk_t nmp = NULL;
2157 uint64_t multi;
2158 uint64_t dcf_err;
2159 uint8_t channel;
2160
2161 boolean_t first_entry = B_TRUE;
2162 boolean_t is_tcp_udp = B_FALSE;
2163 boolean_t buffer_free = B_FALSE;
2164 boolean_t error_send_up = B_FALSE;
2165 uint8_t error_type;
2166 uint16_t l2_len;
2167 uint16_t skip_len;
2168 uint8_t pktbufsz_type;
2169 uint64_t rcr_entry;
2170 uint64_t *pkt_buf_addr_pp;
2171 uint64_t *pkt_buf_addr_p;
2172 uint32_t buf_offset;
2173 uint32_t bsize;
2174 uint32_t error_disp_cnt;
2175 uint32_t msg_index;
2176 p_rx_rbr_ring_t rx_rbr_p;
2177 p_rx_msg_t *rx_msg_ring_p;
2178 p_rx_msg_t rx_msg_p;
2179 uint16_t sw_offset_bytes = 0, hdr_size = 0;
2180 nxge_status_t status = NXGE_OK;
2181 boolean_t is_valid = B_FALSE;
2182 p_nxge_rx_ring_stats_t rdc_stats;
2183 uint32_t bytes_read;
2184 uint64_t pkt_type;
2185 uint64_t frag;
2186 boolean_t pkt_too_long_err = B_FALSE;
2187 #ifdef NXGE_DEBUG
2188 int dump_len;
2189 #endif
2190 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
2191 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
2192
2193 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
2194
2195 multi = (rcr_entry & RCR_MULTI_MASK);
2196 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
2197 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
2198
2199 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
2200 frag = (rcr_entry & RCR_FRAG_MASK);
2201
2202 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
2203
2204 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
2205 RCR_PKTBUFSZ_SHIFT);
2206 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
2207 RCR_PKT_BUF_ADDR_SHIFT);
2208
2209 channel = rcr_p->rdc;
2210
2211 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2212 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2213 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2214 "error_type 0x%x pkt_type 0x%x "
2215 "pktbufsz_type %d ",
2216 rcr_desc_rd_head_p,
2217 rcr_entry, pkt_buf_addr_pp, l2_len,
2218 multi,
2219 error_type,
2220 pkt_type,
2221 pktbufsz_type));
2222
2223 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2224 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2225 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2226 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
2227 rcr_entry, pkt_buf_addr_pp, l2_len,
2228 multi,
2229 error_type,
2230 pkt_type));
2231
2232 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2233 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2234 "full pkt_buf_addr_pp $%p l2_len %d",
2235 rcr_entry, pkt_buf_addr_pp, l2_len));
2236
2237 /* get the stats ptr */
2238 rdc_stats = rcr_p->rdc_stats;
2239
2240 if (!l2_len) {
2241
2242 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2243 "<== nxge_receive_packet: failed: l2 length is 0."));
2244 return;
2245 }
2246
2247 /*
2248 * Software workaround for BMAC hardware limitation that allows
2249 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2250 * instead of 0x2400 for jumbo.
2251 */
2252 if (l2_len > nxgep->mac.maxframesize) {
2253 pkt_too_long_err = B_TRUE;
2254 }
2255
2256 /* Hardware sends us 4 bytes of CRC as no stripping is done. */
2257 l2_len -= ETHERFCSL;
2258
2259 /* shift 6 bits to get the full io address */
2260 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
2261 RCR_PKT_BUF_ADDR_SHIFT_FULL);
2262 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2263 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2264 "full pkt_buf_addr_pp $%p l2_len %d",
2265 rcr_entry, pkt_buf_addr_pp, l2_len));
2266
2267 rx_rbr_p = rcr_p->rx_rbr_p;
2268 rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
2269
2270 if (first_entry) {
2271 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
2272 RXDMA_HDR_SIZE_DEFAULT);
2273
2274 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2275 "==> nxge_receive_packet: first entry 0x%016llx "
2276 "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2277 rcr_entry, pkt_buf_addr_pp, l2_len,
2278 hdr_size));
2279 }
2280
2281 MUTEX_ENTER(&rx_rbr_p->lock);
2282
2283 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2284 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2285 "full pkt_buf_addr_pp $%p l2_len %d",
2286 rcr_entry, pkt_buf_addr_pp, l2_len));
2287
2288 /*
2289 * Packet buffer address in the completion entry points
2290 * to the starting buffer address (offset 0).
2291 * Use the starting buffer address to locate the corresponding
2292 * kernel address.
2293 */
2294 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
2295 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
2296 &buf_offset,
2297 &msg_index);
2298
2299 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2300 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2301 "full pkt_buf_addr_pp $%p l2_len %d",
2302 rcr_entry, pkt_buf_addr_pp, l2_len));
2303
2304 if (status != NXGE_OK) {
2305 MUTEX_EXIT(&rx_rbr_p->lock);
2306 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2307 "<== nxge_receive_packet: found vaddr failed %d",
2308 status));
2309 return;
2310 }
2311
2312 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2313 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
2314 "full pkt_buf_addr_pp $%p l2_len %d",
2315 rcr_entry, pkt_buf_addr_pp, l2_len));
2316
2317 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2318 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2319 "full pkt_buf_addr_pp $%p l2_len %d",
2320 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2321
2322 rx_msg_p = rx_msg_ring_p[msg_index];
2323
2324 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2325 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2326 "full pkt_buf_addr_pp $%p l2_len %d",
2327 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2328
2329 switch (pktbufsz_type) {
2330 case RCR_PKTBUFSZ_0:
2331 bsize = rx_rbr_p->pkt_buf_size0_bytes;
2332 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2333 "==> nxge_receive_packet: 0 buf %d", bsize));
2334 break;
2335 case RCR_PKTBUFSZ_1:
2336 bsize = rx_rbr_p->pkt_buf_size1_bytes;
2337 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2338 "==> nxge_receive_packet: 1 buf %d", bsize));
2339 break;
2340 case RCR_PKTBUFSZ_2:
2341 bsize = rx_rbr_p->pkt_buf_size2_bytes;
2342 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2343 "==> nxge_receive_packet: 2 buf %d", bsize));
2344 break;
2345 case RCR_SINGLE_BLOCK:
2346 bsize = rx_msg_p->block_size;
2347 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2348 "==> nxge_receive_packet: single %d", bsize));
2349
2350 break;
2351 default:
2352 MUTEX_EXIT(&rx_rbr_p->lock);
2353 return;
2354 }
2355
2356 switch (nxge_rdc_buf_offset) {
2357 case SW_OFFSET_NO_OFFSET:
2358 sw_offset_bytes = 0;
2359 break;
2360 case SW_OFFSET_64:
2361 sw_offset_bytes = 64;
2362 break;
2363 case SW_OFFSET_128:
2364 sw_offset_bytes = 128;
2365 break;
2366 case SW_OFFSET_192:
2367 sw_offset_bytes = 192;
2368 break;
2369 case SW_OFFSET_256:
2370 sw_offset_bytes = 256;
2371 break;
2372 case SW_OFFSET_320:
2373 sw_offset_bytes = 320;
2374 break;
2375 case SW_OFFSET_384:
2376 sw_offset_bytes = 384;
2377 break;
2378 case SW_OFFSET_448:
2379 sw_offset_bytes = 448;
2380 break;
2381 default:
2382 sw_offset_bytes = 0;
2383 break;
2384 }
2385
2386 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2387 (buf_offset + sw_offset_bytes),
2388 (hdr_size + l2_len),
2389 DDI_DMA_SYNC_FORCPU);
2390
2391 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2392 "==> nxge_receive_packet: after first dump:usage count"));
2393
2394 if (rx_msg_p->cur_usage_cnt == 0) {
2395 if (rx_rbr_p->rbr_use_bcopy) {
2396 atomic_inc_32(&rx_rbr_p->rbr_consumed);
2397 if (rx_rbr_p->rbr_consumed <
2398 rx_rbr_p->rbr_threshold_hi) {
2399 if (rx_rbr_p->rbr_threshold_lo == 0 ||
2400 ((rx_rbr_p->rbr_consumed >=
2401 rx_rbr_p->rbr_threshold_lo) &&
2402 (rx_rbr_p->rbr_bufsize_type >=
2403 pktbufsz_type))) {
2404 rx_msg_p->rx_use_bcopy = B_TRUE;
2405 }
2406 } else {
2407 rx_msg_p->rx_use_bcopy = B_TRUE;
2408 }
2409 }
2410 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2411 "==> nxge_receive_packet: buf %d (new block) ",
2412 bsize));
2413
2414 rx_msg_p->pkt_buf_size_code = pktbufsz_type;
2415 rx_msg_p->pkt_buf_size = bsize;
2416 rx_msg_p->cur_usage_cnt = 1;
2417 if (pktbufsz_type == RCR_SINGLE_BLOCK) {
2418 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2419 "==> nxge_receive_packet: buf %d "
2420 "(single block) ",
2421 bsize));
2422 /*
2423 * Buffer can be reused once the free function
2424 * is called.
2425 */
2426 rx_msg_p->max_usage_cnt = 1;
2427 buffer_free = B_TRUE;
2428 } else {
2429 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
2430 if (rx_msg_p->max_usage_cnt == 1) {
2431 buffer_free = B_TRUE;
2432 }
2433 }
2434 } else {
2435 rx_msg_p->cur_usage_cnt++;
2436 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
2437 buffer_free = B_TRUE;
2438 }
2439 }
2440
2441 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2442 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
2443 msg_index, l2_len,
2444 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
2445
2446 if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
2447 rdc_stats->ierrors++;
2448 if (dcf_err) {
2449 rdc_stats->dcf_err++;
2450 #ifdef NXGE_DEBUG
2451 if (!rdc_stats->dcf_err) {
2452 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2453 "nxge_receive_packet: channel %d dcf_err rcr"
2454 " 0x%llx", channel, rcr_entry));
2455 }
2456 #endif
2457 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0,
2458 NXGE_FM_EREPORT_RDMC_DCF_ERR);
2459 } else if (pkt_too_long_err) {
2460 rdc_stats->pkt_too_long_err++;
2461 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
2462 " channel %d packet length [%d] > "
2463 "maxframesize [%d]", channel, l2_len + ETHERFCSL,
2464 nxgep->mac.maxframesize));
2465 } else {
2466 /* Update error stats */
2467 error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2468 rdc_stats->errlog.compl_err_type = error_type;
2469
2470 switch (error_type) {
2471 /*
2472 * Do not send FMA ereport for RCR_L2_ERROR and
2473 * RCR_L4_CSUM_ERROR because most likely they indicate
2474 * back pressure rather than HW failures.
2475 */
2476 case RCR_L2_ERROR:
2477 rdc_stats->l2_err++;
2478 if (rdc_stats->l2_err <
2479 error_disp_cnt) {
2480 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2481 " nxge_receive_packet:"
2482 " channel %d RCR L2_ERROR",
2483 channel));
2484 }
2485 break;
2486 case RCR_L4_CSUM_ERROR:
2487 error_send_up = B_TRUE;
2488 rdc_stats->l4_cksum_err++;
2489 if (rdc_stats->l4_cksum_err <
2490 error_disp_cnt) {
2491 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2492 " nxge_receive_packet:"
2493 " channel %d"
2494 " RCR L4_CSUM_ERROR", channel));
2495 }
2496 break;
2497 /*
2498 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2499 * RCR_ZCP_SOFT_ERROR because they reflect the same
2500 * FFLP and ZCP errors that have been reported by
2501 * nxge_fflp.c and nxge_zcp.c.
2502 */
2503 case RCR_FFLP_SOFT_ERROR:
2504 error_send_up = B_TRUE;
2505 rdc_stats->fflp_soft_err++;
2506 if (rdc_stats->fflp_soft_err <
2507 error_disp_cnt) {
2508 NXGE_ERROR_MSG((nxgep,
2509 NXGE_ERR_CTL,
2510 " nxge_receive_packet:"
2511 " channel %d"
2512 " RCR FFLP_SOFT_ERROR", channel));
2513 }
2514 break;
2515 case RCR_ZCP_SOFT_ERROR:
2516 error_send_up = B_TRUE;
2517 rdc_stats->fflp_soft_err++;
2518 if (rdc_stats->zcp_soft_err <
2519 error_disp_cnt)
2520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2521 " nxge_receive_packet: Channel %d"
2522 " RCR ZCP_SOFT_ERROR", channel));
2523 break;
2524 default:
2525 rdc_stats->rcr_unknown_err++;
2526 if (rdc_stats->rcr_unknown_err
2527 < error_disp_cnt) {
2528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2529 " nxge_receive_packet: Channel %d"
2530 " RCR entry 0x%llx error 0x%x",
2531 rcr_entry, channel, error_type));
2532 }
2533 break;
2534 }
2535 }
2536
2537 /*
2538 * Update and repost buffer block if max usage
2539 * count is reached.
2540 */
2541 if (error_send_up == B_FALSE) {
2542 atomic_inc_32(&rx_msg_p->ref_cnt);
2543 if (buffer_free == B_TRUE) {
2544 rx_msg_p->free = B_TRUE;
2545 }
2546
2547 MUTEX_EXIT(&rx_rbr_p->lock);
2548 nxge_freeb(rx_msg_p);
2549 return;
2550 }
2551 }
2552
2553 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2554 "==> nxge_receive_packet: DMA sync second "));
2555
2556 bytes_read = rcr_p->rcvd_pkt_bytes;
2557 skip_len = sw_offset_bytes + hdr_size;
2558 if (!rx_msg_p->rx_use_bcopy) {
2559 /*
2560 * For loaned up buffers, the driver reference count
2561 * will be incremented first and then the free state.
2562 */
2563 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
2564 if (first_entry) {
2565 nmp->b_rptr = &nmp->b_rptr[skip_len];
2566 if (l2_len < bsize - skip_len) {
2567 nmp->b_wptr = &nmp->b_rptr[l2_len];
2568 } else {
2569 nmp->b_wptr = &nmp->b_rptr[bsize
2570 - skip_len];
2571 }
2572 } else {
2573 if (l2_len - bytes_read < bsize) {
2574 nmp->b_wptr =
2575 &nmp->b_rptr[l2_len - bytes_read];
2576 } else {
2577 nmp->b_wptr = &nmp->b_rptr[bsize];
2578 }
2579 }
2580 }
2581 } else {
2582 if (first_entry) {
2583 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
2584 l2_len < bsize - skip_len ?
2585 l2_len : bsize - skip_len);
2586 } else {
2587 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
2588 l2_len - bytes_read < bsize ?
2589 l2_len - bytes_read : bsize);
2590 }
2591 }
2592 if (nmp != NULL) {
2593 if (first_entry) {
2594 /*
2595 * Jumbo packets may be received with more than one
2596 * buffer, increment ipackets for the first entry only.
2597 */
2598 rdc_stats->ipackets++;
2599
2600 /* Update ibytes for kstat. */
2601 rdc_stats->ibytes += skip_len
2602 + l2_len < bsize ? l2_len : bsize;
2603 /*
2604 * Update the number of bytes read so far for the
2605 * current frame.
2606 */
2607 bytes_read = nmp->b_wptr - nmp->b_rptr;
2608 } else {
2609 rdc_stats->ibytes += l2_len - bytes_read < bsize ?
2610 l2_len - bytes_read : bsize;
2611 bytes_read += nmp->b_wptr - nmp->b_rptr;
2612 }
2613
2614 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2615 "==> nxge_receive_packet after dupb: "
2616 "rbr consumed %d "
2617 "pktbufsz_type %d "
2618 "nmp $%p rptr $%p wptr $%p "
2619 "buf_offset %d bzise %d l2_len %d skip_len %d",
2620 rx_rbr_p->rbr_consumed,
2621 pktbufsz_type,
2622 nmp, nmp->b_rptr, nmp->b_wptr,
2623 buf_offset, bsize, l2_len, skip_len));
2624 } else {
2625 cmn_err(CE_WARN, "!nxge_receive_packet: "
2626 "update stats (error)");
2627 atomic_inc_32(&rx_msg_p->ref_cnt);
2628 if (buffer_free == B_TRUE) {
2629 rx_msg_p->free = B_TRUE;
2630 }
2631 MUTEX_EXIT(&rx_rbr_p->lock);
2632 nxge_freeb(rx_msg_p);
2633 return;
2634 }
2635
2636 if (buffer_free == B_TRUE) {
2637 rx_msg_p->free = B_TRUE;
2638 }
2639
2640 is_valid = (nmp != NULL);
2641
2642 rcr_p->rcvd_pkt_bytes = bytes_read;
2643
2644 MUTEX_EXIT(&rx_rbr_p->lock);
2645
2646 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2647 atomic_inc_32(&rx_msg_p->ref_cnt);
2648 nxge_freeb(rx_msg_p);
2649 }
2650
2651 if (is_valid) {
2652 nmp->b_cont = NULL;
2653 if (first_entry) {
2654 *mp = nmp;
2655 *mp_cont = NULL;
2656 } else {
2657 *mp_cont = nmp;
2658 }
2659 }
2660
2661 /*
2662 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2663 * If a packet is not fragmented and no error bit is set, then
2664 * L4 checksum is OK.
2665 */
2666
2667 if (is_valid && !multi) {
2668 /*
2669 * If the checksum flag nxge_chksum_offload
2670 * is 1, TCP and UDP packets can be sent
2671 * up with good checksum. If the checksum flag
2672 * is set to 0, checksum reporting will apply to
2673 * TCP packets only (workaround for a hardware bug).
2674 * If the checksum flag nxge_cksum_offload is
2675 * greater than 1, both TCP and UDP packets
2676 * will not be reported its hardware checksum results.
2677 */
2678 if (nxge_cksum_offload == 1) {
2679 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2680 pkt_type == RCR_PKT_IS_UDP) ?
2681 B_TRUE: B_FALSE);
2682 } else if (!nxge_cksum_offload) {
2683 /* TCP checksum only. */
2684 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
2685 B_TRUE: B_FALSE);
2686 }
2687
2688 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
2689 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
2690 is_valid, multi, is_tcp_udp, frag, error_type));
2691
2692 if (is_tcp_udp && !frag && !error_type) {
2693 mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
2694 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2695 "==> nxge_receive_packet: Full tcp/udp cksum "
2696 "is_valid 0x%x multi 0x%llx pkt %d frag %d "
2697 "error %d",
2698 is_valid, multi, is_tcp_udp, frag, error_type));
2699 }
2700 }
2701
2702 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2703 "==> nxge_receive_packet: *mp 0x%016llx", *mp));
2704
2705 *multi_p = (multi == RCR_MULTI_MASK);
2706 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2707 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2708 *multi_p, nmp, *mp, *mp_cont));
2709 }
2710
2711 /*
2712 * Enable polling for a ring. Interrupt for the ring is disabled when
2713 * the nxge interrupt comes (see nxge_rx_intr).
2714 */
2715 int
nxge_enable_poll(void * arg)2716 nxge_enable_poll(void *arg)
2717 {
2718 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2719 p_rx_rcr_ring_t ringp;
2720 p_nxge_t nxgep;
2721 p_nxge_ldg_t ldgp;
2722 uint32_t channel;
2723
2724 if (ring_handle == NULL) {
2725 ASSERT(ring_handle != NULL);
2726 return (0);
2727 }
2728
2729 nxgep = ring_handle->nxgep;
2730 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2731 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2732 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2733 "==> nxge_enable_poll: rdc %d ", ringp->rdc));
2734 ldgp = ringp->ldgp;
2735 if (ldgp == NULL) {
2736 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2737 "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2738 ringp->rdc));
2739 return (0);
2740 }
2741
2742 MUTEX_ENTER(&ringp->lock);
2743 /* enable polling */
2744 if (ringp->poll_flag == 0) {
2745 ringp->poll_flag = 1;
2746 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2747 "==> nxge_enable_poll: rdc %d set poll flag to 1",
2748 ringp->rdc));
2749 }
2750
2751 MUTEX_EXIT(&ringp->lock);
2752 return (0);
2753 }
2754 /*
2755 * Disable polling for a ring and enable its interrupt.
2756 */
2757 int
nxge_disable_poll(void * arg)2758 nxge_disable_poll(void *arg)
2759 {
2760 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2761 p_rx_rcr_ring_t ringp;
2762 p_nxge_t nxgep;
2763 uint32_t channel;
2764
2765 if (ring_handle == NULL) {
2766 ASSERT(ring_handle != NULL);
2767 return (0);
2768 }
2769
2770 nxgep = ring_handle->nxgep;
2771 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2772 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2773
2774 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2775 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
2776
2777 MUTEX_ENTER(&ringp->lock);
2778
2779 /* disable polling: enable interrupt */
2780 if (ringp->poll_flag) {
2781 npi_handle_t handle;
2782 rx_dma_ctl_stat_t cs;
2783 uint8_t channel;
2784 p_nxge_ldg_t ldgp;
2785
2786 /*
2787 * Get the control and status for this channel.
2788 */
2789 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2790 channel = ringp->rdc;
2791 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
2792 channel, &cs.value);
2793
2794 /*
2795 * Enable mailbox update
2796 * Since packets were not read and the hardware uses
2797 * bits pktread and ptrread to update the queue
2798 * length, we need to set both bits to 0.
2799 */
2800 cs.bits.ldw.pktread = 0;
2801 cs.bits.ldw.ptrread = 0;
2802 cs.bits.hdw.mex = 1;
2803 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2804 cs.value);
2805
2806 /*
2807 * Rearm this logical group if this is a single device
2808 * group.
2809 */
2810 ldgp = ringp->ldgp;
2811 if (ldgp == NULL) {
2812 ringp->poll_flag = 0;
2813 MUTEX_EXIT(&ringp->lock);
2814 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2815 "==> nxge_disable_poll: no ldgp rdc %d "
2816 "(still set poll to 0", ringp->rdc));
2817 return (0);
2818 }
2819 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2820 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2821 ringp->rdc, ldgp));
2822 if (ldgp->nldvs == 1) {
2823 if (isLDOMguest(nxgep)) {
2824 ldgp->arm = B_TRUE;
2825 nxge_hio_ldgimgn(nxgep, ldgp);
2826 } else {
2827 ldgimgm_t mgm;
2828 mgm.value = 0;
2829 mgm.bits.ldw.arm = 1;
2830 mgm.bits.ldw.timer = ldgp->ldg_timer;
2831 NXGE_REG_WR64(handle,
2832 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
2833 mgm.value);
2834 }
2835 }
2836 ringp->poll_flag = 0;
2837 }
2838
2839 MUTEX_EXIT(&ringp->lock);
2840 return (0);
2841 }
2842
2843 /*
2844 * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2845 */
2846 mblk_t *
nxge_rx_poll(void * arg,int bytes_to_pickup)2847 nxge_rx_poll(void *arg, int bytes_to_pickup)
2848 {
2849 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2850 p_rx_rcr_ring_t rcr_p;
2851 p_nxge_t nxgep;
2852 npi_handle_t handle;
2853 rx_dma_ctl_stat_t cs;
2854 mblk_t *mblk;
2855 p_nxge_ldv_t ldvp;
2856 uint32_t channel;
2857
2858 nxgep = ring_handle->nxgep;
2859
2860 /*
2861 * Get the control and status for this channel.
2862 */
2863 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2864 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2865 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
2866 MUTEX_ENTER(&rcr_p->lock);
2867 ASSERT(rcr_p->poll_flag == 1);
2868
2869 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
2870
2871 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2872 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2873 rcr_p->rdc, rcr_p->poll_flag));
2874 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
2875
2876 ldvp = rcr_p->ldvp;
2877 /* error events. */
2878 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
2879 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
2880 }
2881
2882 MUTEX_EXIT(&rcr_p->lock);
2883
2884 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2885 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
2886 return (mblk);
2887 }
2888
2889
2890 /*ARGSUSED*/
2891 static nxge_status_t
nxge_rx_err_evnts(p_nxge_t nxgep,int channel,rx_dma_ctl_stat_t cs)2892 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2893 {
2894 p_nxge_rx_ring_stats_t rdc_stats;
2895 npi_handle_t handle;
2896 npi_status_t rs;
2897 boolean_t rxchan_fatal = B_FALSE;
2898 boolean_t rxport_fatal = B_FALSE;
2899 uint8_t portn;
2900 nxge_status_t status = NXGE_OK;
2901 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2902 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
2903
2904 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2905 portn = nxgep->mac.portnum;
2906 rdc_stats = &nxgep->statsp->rdc_stats[channel];
2907
2908 if (cs.bits.hdw.rbr_tmout) {
2909 rdc_stats->rx_rbr_tmout++;
2910 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2911 NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
2912 rxchan_fatal = B_TRUE;
2913 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2914 "==> nxge_rx_err_evnts: rx_rbr_timeout"));
2915 }
2916 if (cs.bits.hdw.rsp_cnt_err) {
2917 rdc_stats->rsp_cnt_err++;
2918 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2919 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
2920 rxchan_fatal = B_TRUE;
2921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2922 "==> nxge_rx_err_evnts(channel %d): "
2923 "rsp_cnt_err", channel));
2924 }
2925 if (cs.bits.hdw.byte_en_bus) {
2926 rdc_stats->byte_en_bus++;
2927 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2928 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
2929 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2930 "==> nxge_rx_err_evnts(channel %d): "
2931 "fatal error: byte_en_bus", channel));
2932 rxchan_fatal = B_TRUE;
2933 }
2934 if (cs.bits.hdw.rsp_dat_err) {
2935 rdc_stats->rsp_dat_err++;
2936 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2937 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
2938 rxchan_fatal = B_TRUE;
2939 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2940 "==> nxge_rx_err_evnts(channel %d): "
2941 "fatal error: rsp_dat_err", channel));
2942 }
2943 if (cs.bits.hdw.rcr_ack_err) {
2944 rdc_stats->rcr_ack_err++;
2945 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2946 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
2947 rxchan_fatal = B_TRUE;
2948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2949 "==> nxge_rx_err_evnts(channel %d): "
2950 "fatal error: rcr_ack_err", channel));
2951 }
2952 if (cs.bits.hdw.dc_fifo_err) {
2953 rdc_stats->dc_fifo_err++;
2954 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2955 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
2956 /* This is not a fatal error! */
2957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2958 "==> nxge_rx_err_evnts(channel %d): "
2959 "dc_fifo_err", channel));
2960 rxport_fatal = B_TRUE;
2961 }
2962 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
2963 if ((rs = npi_rxdma_ring_perr_stat_get(handle,
2964 &rdc_stats->errlog.pre_par,
2965 &rdc_stats->errlog.sha_par))
2966 != NPI_SUCCESS) {
2967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2968 "==> nxge_rx_err_evnts(channel %d): "
2969 "rcr_sha_par: get perr", channel));
2970 return (NXGE_ERROR | rs);
2971 }
2972 if (cs.bits.hdw.rcr_sha_par) {
2973 rdc_stats->rcr_sha_par++;
2974 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2975 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
2976 rxchan_fatal = B_TRUE;
2977 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2978 "==> nxge_rx_err_evnts(channel %d): "
2979 "fatal error: rcr_sha_par", channel));
2980 }
2981 if (cs.bits.hdw.rbr_pre_par) {
2982 rdc_stats->rbr_pre_par++;
2983 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2984 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
2985 rxchan_fatal = B_TRUE;
2986 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2987 "==> nxge_rx_err_evnts(channel %d): "
2988 "fatal error: rbr_pre_par", channel));
2989 }
2990 }
2991 /*
2992 * The Following 4 status bits are for information, the system
2993 * is running fine. There is no need to send FMA ereports or
2994 * log messages.
2995 */
2996 if (cs.bits.hdw.port_drop_pkt) {
2997 rdc_stats->port_drop_pkt++;
2998 }
2999 if (cs.bits.hdw.wred_drop) {
3000 rdc_stats->wred_drop++;
3001 }
3002 if (cs.bits.hdw.rbr_pre_empty) {
3003 rdc_stats->rbr_pre_empty++;
3004 }
3005 if (cs.bits.hdw.rcr_shadow_full) {
3006 rdc_stats->rcr_shadow_full++;
3007 }
3008 if (cs.bits.hdw.config_err) {
3009 rdc_stats->config_err++;
3010 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3011 NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
3012 rxchan_fatal = B_TRUE;
3013 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3014 "==> nxge_rx_err_evnts(channel %d): "
3015 "config error", channel));
3016 }
3017 if (cs.bits.hdw.rcrincon) {
3018 rdc_stats->rcrincon++;
3019 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3020 NXGE_FM_EREPORT_RDMC_RCRINCON);
3021 rxchan_fatal = B_TRUE;
3022 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3023 "==> nxge_rx_err_evnts(channel %d): "
3024 "fatal error: rcrincon error", channel));
3025 }
3026 if (cs.bits.hdw.rcrfull) {
3027 rdc_stats->rcrfull++;
3028 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3029 NXGE_FM_EREPORT_RDMC_RCRFULL);
3030 rxchan_fatal = B_TRUE;
3031 if (rdc_stats->rcrfull < error_disp_cnt) {
3032 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3033 "==> nxge_rx_err_evnts(channel %d): "
3034 "fatal error: rcrfull error", channel));
3035 }
3036 }
3037 if (cs.bits.hdw.rbr_empty) {
3038 /*
3039 * This bit is for information, there is no need
3040 * send FMA ereport or log a message.
3041 */
3042 rdc_stats->rbr_empty++;
3043 }
3044 if (cs.bits.hdw.rbrfull) {
3045 rdc_stats->rbrfull++;
3046 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3047 NXGE_FM_EREPORT_RDMC_RBRFULL);
3048 rxchan_fatal = B_TRUE;
3049 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3050 "==> nxge_rx_err_evnts(channel %d): "
3051 "fatal error: rbr_full error", channel));
3052 }
3053 if (cs.bits.hdw.rbrlogpage) {
3054 rdc_stats->rbrlogpage++;
3055 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3056 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
3057 rxchan_fatal = B_TRUE;
3058 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3059 "==> nxge_rx_err_evnts(channel %d): "
3060 "fatal error: rbr logical page error", channel));
3061 }
3062 if (cs.bits.hdw.cfiglogpage) {
3063 rdc_stats->cfiglogpage++;
3064 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3065 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
3066 rxchan_fatal = B_TRUE;
3067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3068 "==> nxge_rx_err_evnts(channel %d): "
3069 "fatal error: cfig logical page error", channel));
3070 }
3071
3072 if (rxport_fatal) {
3073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3074 " nxge_rx_err_evnts: fatal error on Port #%d\n",
3075 portn));
3076 if (isLDOMguest(nxgep)) {
3077 status = NXGE_ERROR;
3078 } else {
3079 status = nxge_ipp_fatal_err_recover(nxgep);
3080 if (status == NXGE_OK) {
3081 FM_SERVICE_RESTORED(nxgep);
3082 }
3083 }
3084 }
3085
3086 if (rxchan_fatal) {
3087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3088 " nxge_rx_err_evnts: fatal error on Channel #%d\n",
3089 channel));
3090 if (isLDOMguest(nxgep)) {
3091 status = NXGE_ERROR;
3092 } else {
3093 status = nxge_rxdma_fatal_err_recover(nxgep, channel);
3094 if (status == NXGE_OK) {
3095 FM_SERVICE_RESTORED(nxgep);
3096 }
3097 }
3098 }
3099
3100 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
3101
3102 return (status);
3103 }
3104
3105 /*
3106 * nxge_rdc_hvio_setup
3107 *
3108 * This code appears to setup some Hypervisor variables.
3109 *
3110 * Arguments:
3111 * nxgep
3112 * channel
3113 *
3114 * Notes:
3115 * What does NIU_LP_WORKAROUND mean?
3116 *
3117 * NPI/NXGE function calls:
3118 * na
3119 *
3120 * Context:
3121 * Any domain
3122 */
3123 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3124 static void
nxge_rdc_hvio_setup(nxge_t * nxgep,int channel)3125 nxge_rdc_hvio_setup(
3126 nxge_t *nxgep, int channel)
3127 {
3128 nxge_dma_common_t *dma_common;
3129 nxge_dma_common_t *dma_control;
3130 rx_rbr_ring_t *ring;
3131
3132 ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3133 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3134
3135 ring->hv_set = B_FALSE;
3136
3137 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
3138 dma_common->orig_ioaddr_pp;
3139 ring->hv_rx_buf_ioaddr_size = (uint64_t)
3140 dma_common->orig_alength;
3141
3142 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3143 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
3144 channel, ring->hv_rx_buf_base_ioaddr_pp,
3145 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
3146 dma_common->orig_alength, dma_common->orig_alength));
3147
3148 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3149
3150 ring->hv_rx_cntl_base_ioaddr_pp =
3151 (uint64_t)dma_control->orig_ioaddr_pp;
3152 ring->hv_rx_cntl_ioaddr_size =
3153 (uint64_t)dma_control->orig_alength;
3154
3155 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3156 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
3157 channel, ring->hv_rx_cntl_base_ioaddr_pp,
3158 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
3159 dma_control->orig_alength, dma_control->orig_alength));
3160 }
3161 #endif
3162
3163 /*
3164 * nxge_map_rxdma
3165 *
3166 * Map an RDC into our kernel space.
3167 *
3168 * Arguments:
3169 * nxgep
3170 * channel The channel to map.
3171 *
3172 * Notes:
3173 * 1. Allocate & initialise a memory pool, if necessary.
3174 * 2. Allocate however many receive buffers are required.
3175 * 3. Setup buffers, descriptors, and mailbox.
3176 *
3177 * NPI/NXGE function calls:
3178 * nxge_alloc_rx_mem_pool()
3179 * nxge_alloc_rbb()
3180 * nxge_map_rxdma_channel()
3181 *
3182 * Registers accessed:
3183 *
3184 * Context:
3185 * Any domain
3186 */
3187 static nxge_status_t
nxge_map_rxdma(p_nxge_t nxgep,int channel)3188 nxge_map_rxdma(p_nxge_t nxgep, int channel)
3189 {
3190 nxge_dma_common_t **data;
3191 nxge_dma_common_t **control;
3192 rx_rbr_ring_t **rbr_ring;
3193 rx_rcr_ring_t **rcr_ring;
3194 rx_mbox_t **mailbox;
3195 uint32_t chunks;
3196
3197 nxge_status_t status;
3198
3199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
3200
3201 if (!nxgep->rx_buf_pool_p) {
3202 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
3203 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3204 "<== nxge_map_rxdma: buf not allocated"));
3205 return (NXGE_ERROR);
3206 }
3207 }
3208
3209 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
3210 return (NXGE_ERROR);
3211
3212 /*
3213 * Map descriptors from the buffer polls for each dma channel.
3214 */
3215
3216 /*
3217 * Set up and prepare buffer blocks, descriptors
3218 * and mailbox.
3219 */
3220 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3221 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
3222 chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
3223
3224 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3225 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
3226
3227 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3228
3229 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
3230 chunks, control, rcr_ring, mailbox);
3231 if (status != NXGE_OK) {
3232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3233 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
3234 "returned 0x%x",
3235 channel, status));
3236 return (status);
3237 }
3238 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
3239 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
3240 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
3241 &nxgep->statsp->rdc_stats[channel];
3242
3243 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3244 if (!isLDOMguest(nxgep))
3245 nxge_rdc_hvio_setup(nxgep, channel);
3246 #endif
3247
3248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3249 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
3250
3251 return (status);
3252 }
3253
3254 static void
nxge_unmap_rxdma(p_nxge_t nxgep,int channel)3255 nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
3256 {
3257 rx_rbr_ring_t *rbr_ring;
3258 rx_rcr_ring_t *rcr_ring;
3259 rx_mbox_t *mailbox;
3260
3261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
3262
3263 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
3264 !nxgep->rx_mbox_areas_p)
3265 return;
3266
3267 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3268 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
3269 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3270
3271 if (!rbr_ring || !rcr_ring || !mailbox)
3272 return;
3273
3274 (void) nxge_unmap_rxdma_channel(
3275 nxgep, channel, rbr_ring, rcr_ring, mailbox);
3276
3277 nxge_free_rxb(nxgep, channel);
3278
3279 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
3280 }
3281
3282 nxge_status_t
nxge_map_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)3283 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3284 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
3285 uint32_t num_chunks,
3286 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
3287 p_rx_mbox_t *rx_mbox_p)
3288 {
3289 int status = NXGE_OK;
3290
3291 /*
3292 * Set up and prepare buffer blocks, descriptors
3293 * and mailbox.
3294 */
3295 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3296 "==> nxge_map_rxdma_channel (channel %d)", channel));
3297 /*
3298 * Receive buffer blocks
3299 */
3300 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
3301 dma_buf_p, rbr_p, num_chunks);
3302 if (status != NXGE_OK) {
3303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3304 "==> nxge_map_rxdma_channel (channel %d): "
3305 "map buffer failed 0x%x", channel, status));
3306 goto nxge_map_rxdma_channel_exit;
3307 }
3308
3309 /*
3310 * Receive block ring, completion ring and mailbox.
3311 */
3312 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
3313 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
3314 if (status != NXGE_OK) {
3315 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3316 "==> nxge_map_rxdma_channel (channel %d): "
3317 "map config failed 0x%x", channel, status));
3318 goto nxge_map_rxdma_channel_fail2;
3319 }
3320
3321 goto nxge_map_rxdma_channel_exit;
3322
3323 nxge_map_rxdma_channel_fail2:
3324 /* Free buffer blocks */
3325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3326 "==> nxge_map_rxdma_channel: free rx buffers"
3327 "(nxgep 0x%x status 0x%x channel %d)",
3328 nxgep, status, channel));
3329 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
3330
3331 status = NXGE_ERROR;
3332
3333 nxge_map_rxdma_channel_exit:
3334 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3335 "<== nxge_map_rxdma_channel: "
3336 "(nxgep 0x%x status 0x%x channel %d)",
3337 nxgep, status, channel));
3338
3339 return (status);
3340 }
3341
3342 /*ARGSUSED*/
3343 static void
nxge_unmap_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)3344 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3345 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3346 {
3347 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3348 "==> nxge_unmap_rxdma_channel (channel %d)", channel));
3349
3350 /*
3351 * unmap receive block ring, completion ring and mailbox.
3352 */
3353 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3354 rcr_p, rx_mbox_p);
3355
3356 /* unmap buffer blocks */
3357 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
3358
3359 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
3360 }
3361
3362 /*ARGSUSED*/
3363 static nxge_status_t
nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_rx_rbr_ring_t * rbr_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)3364 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
3365 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
3366 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
3367 {
3368 p_rx_rbr_ring_t rbrp;
3369 p_rx_rcr_ring_t rcrp;
3370 p_rx_mbox_t mboxp;
3371 p_nxge_dma_common_t cntl_dmap;
3372 p_nxge_dma_common_t dmap;
3373 p_rx_msg_t *rx_msg_ring;
3374 p_rx_msg_t rx_msg_p;
3375 p_rbr_cfig_a_t rcfga_p;
3376 p_rbr_cfig_b_t rcfgb_p;
3377 p_rcrcfig_a_t cfga_p;
3378 p_rcrcfig_b_t cfgb_p;
3379 p_rxdma_cfig1_t cfig1_p;
3380 p_rxdma_cfig2_t cfig2_p;
3381 p_rbr_kick_t kick_p;
3382 uint32_t dmaaddrp;
3383 uint32_t *rbr_vaddrp;
3384 uint32_t bkaddr;
3385 nxge_status_t status = NXGE_OK;
3386 int i;
3387 uint32_t nxge_port_rcr_size;
3388
3389 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3390 "==> nxge_map_rxdma_channel_cfg_ring"));
3391
3392 cntl_dmap = *dma_cntl_p;
3393
3394 /* Map in the receive block ring */
3395 rbrp = *rbr_p;
3396 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
3397 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
3398 /*
3399 * Zero out buffer block ring descriptors.
3400 */
3401 bzero((caddr_t)dmap->kaddrp, dmap->alength);
3402
3403 rcfga_p = &(rbrp->rbr_cfga);
3404 rcfgb_p = &(rbrp->rbr_cfgb);
3405 kick_p = &(rbrp->rbr_kick);
3406 rcfga_p->value = 0;
3407 rcfgb_p->value = 0;
3408 kick_p->value = 0;
3409 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
3410 rcfga_p->value = (rbrp->rbr_addr &
3411 (RBR_CFIG_A_STDADDR_MASK |
3412 RBR_CFIG_A_STDADDR_BASE_MASK));
3413 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
3414
3415 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
3416 rcfgb_p->bits.ldw.vld0 = 1;
3417 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
3418 rcfgb_p->bits.ldw.vld1 = 1;
3419 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
3420 rcfgb_p->bits.ldw.vld2 = 1;
3421 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
3422
3423 /*
3424 * For each buffer block, enter receive block address to the ring.
3425 */
3426 rbr_vaddrp = (uint32_t *)dmap->kaddrp;
3427 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
3428 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3429 "==> nxge_map_rxdma_channel_cfg_ring: channel %d "
3430 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
3431
3432 rx_msg_ring = rbrp->rx_msg_ring;
3433 for (i = 0; i < rbrp->tnblocks; i++) {
3434 rx_msg_p = rx_msg_ring[i];
3435 rx_msg_p->nxgep = nxgep;
3436 rx_msg_p->rx_rbr_p = rbrp;
3437 bkaddr = (uint32_t)
3438 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
3439 >> RBR_BKADDR_SHIFT));
3440 rx_msg_p->free = B_FALSE;
3441 rx_msg_p->max_usage_cnt = 0xbaddcafe;
3442
3443 *rbr_vaddrp++ = bkaddr;
3444 }
3445
3446 kick_p->bits.ldw.bkadd = rbrp->rbb_max;
3447 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3448
3449 rbrp->rbr_rd_index = 0;
3450
3451 rbrp->rbr_consumed = 0;
3452 rbrp->rbr_use_bcopy = B_TRUE;
3453 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
3454 /*
3455 * Do bcopy on packets greater than bcopy size once
3456 * the lo threshold is reached.
3457 * This lo threshold should be less than the hi threshold.
3458 *
3459 * Do bcopy on every packet once the hi threshold is reached.
3460 */
3461 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
3462 /* default it to use hi */
3463 nxge_rx_threshold_lo = nxge_rx_threshold_hi;
3464 }
3465
3466 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
3467 nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
3468 }
3469 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
3470
3471 switch (nxge_rx_threshold_hi) {
3472 default:
3473 case NXGE_RX_COPY_NONE:
3474 /* Do not do bcopy at all */
3475 rbrp->rbr_use_bcopy = B_FALSE;
3476 rbrp->rbr_threshold_hi = rbrp->rbb_max;
3477 break;
3478
3479 case NXGE_RX_COPY_1:
3480 case NXGE_RX_COPY_2:
3481 case NXGE_RX_COPY_3:
3482 case NXGE_RX_COPY_4:
3483 case NXGE_RX_COPY_5:
3484 case NXGE_RX_COPY_6:
3485 case NXGE_RX_COPY_7:
3486 rbrp->rbr_threshold_hi =
3487 rbrp->rbb_max *
3488 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
3489 break;
3490
3491 case NXGE_RX_COPY_ALL:
3492 rbrp->rbr_threshold_hi = 0;
3493 break;
3494 }
3495
3496 switch (nxge_rx_threshold_lo) {
3497 default:
3498 case NXGE_RX_COPY_NONE:
3499 /* Do not do bcopy at all */
3500 if (rbrp->rbr_use_bcopy) {
3501 rbrp->rbr_use_bcopy = B_FALSE;
3502 }
3503 rbrp->rbr_threshold_lo = rbrp->rbb_max;
3504 break;
3505
3506 case NXGE_RX_COPY_1:
3507 case NXGE_RX_COPY_2:
3508 case NXGE_RX_COPY_3:
3509 case NXGE_RX_COPY_4:
3510 case NXGE_RX_COPY_5:
3511 case NXGE_RX_COPY_6:
3512 case NXGE_RX_COPY_7:
3513 rbrp->rbr_threshold_lo =
3514 rbrp->rbb_max *
3515 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
3516 break;
3517
3518 case NXGE_RX_COPY_ALL:
3519 rbrp->rbr_threshold_lo = 0;
3520 break;
3521 }
3522
3523 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3524 "nxge_map_rxdma_channel_cfg_ring: channel %d "
3525 "rbb_max %d "
3526 "rbrp->rbr_bufsize_type %d "
3527 "rbb_threshold_hi %d "
3528 "rbb_threshold_lo %d",
3529 dma_channel,
3530 rbrp->rbb_max,
3531 rbrp->rbr_bufsize_type,
3532 rbrp->rbr_threshold_hi,
3533 rbrp->rbr_threshold_lo));
3534
3535 rbrp->page_valid.value = 0;
3536 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
3537 rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
3538 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
3539 rbrp->page_hdl.value = 0;
3540
3541 rbrp->page_valid.bits.ldw.page0 = 1;
3542 rbrp->page_valid.bits.ldw.page1 = 1;
3543
3544 /* Map in the receive completion ring */
3545 rcrp = (p_rx_rcr_ring_t)
3546 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
3547 rcrp->rdc = dma_channel;
3548
3549 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
3550 rcrp->comp_size = nxge_port_rcr_size;
3551 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
3552
3553 rcrp->max_receive_pkts = nxge_max_rx_pkts;
3554
3555 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
3556 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
3557 sizeof (rcr_entry_t));
3558 rcrp->comp_rd_index = 0;
3559 rcrp->comp_wt_index = 0;
3560 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3561 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3562 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3563 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3564
3565 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3566 (nxge_port_rcr_size - 1);
3567 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3568 (nxge_port_rcr_size - 1);
3569
3570 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3571 "==> nxge_map_rxdma_channel_cfg_ring: "
3572 "channel %d "
3573 "rbr_vaddrp $%p "
3574 "rcr_desc_rd_head_p $%p "
3575 "rcr_desc_rd_head_pp $%p "
3576 "rcr_desc_rd_last_p $%p "
3577 "rcr_desc_rd_last_pp $%p ",
3578 dma_channel,
3579 rbr_vaddrp,
3580 rcrp->rcr_desc_rd_head_p,
3581 rcrp->rcr_desc_rd_head_pp,
3582 rcrp->rcr_desc_last_p,
3583 rcrp->rcr_desc_last_pp));
3584
3585 /*
3586 * Zero out buffer block ring descriptors.
3587 */
3588 bzero((caddr_t)dmap->kaddrp, dmap->alength);
3589
3590 rcrp->intr_timeout = (nxgep->intr_timeout <
3591 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
3592 nxgep->intr_timeout;
3593
3594 rcrp->intr_threshold = (nxgep->intr_threshold <
3595 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
3596 nxgep->intr_threshold;
3597
3598 rcrp->full_hdr_flag = B_FALSE;
3599
3600 rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset;
3601
3602
3603 cfga_p = &(rcrp->rcr_cfga);
3604 cfgb_p = &(rcrp->rcr_cfgb);
3605 cfga_p->value = 0;
3606 cfgb_p->value = 0;
3607 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
3608 cfga_p->value = (rcrp->rcr_addr &
3609 (RCRCFIG_A_STADDR_MASK |
3610 RCRCFIG_A_STADDR_BASE_MASK));
3611
3612 rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
3613 RCRCFIG_A_LEN_SHIF);
3614
3615 /*
3616 * Timeout should be set based on the system clock divider.
3617 * A timeout value of 1 assumes that the
3618 * granularity (1000) is 3 microseconds running at 300MHz.
3619 */
3620 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
3621 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
3622 cfgb_p->bits.ldw.entout = 1;
3623
3624 /* Map in the mailbox */
3625 mboxp = (p_rx_mbox_t)
3626 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
3627 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
3628 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
3629 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
3630 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
3631 cfig1_p->value = cfig2_p->value = 0;
3632
3633 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
3634 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3635 "==> nxge_map_rxdma_channel_cfg_ring: "
3636 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
3637 dma_channel, cfig1_p->value, cfig2_p->value,
3638 mboxp->mbox_addr));
3639
3640 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
3641 & 0xfff);
3642 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
3643
3644
3645 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
3646 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
3647 RXDMA_CFIG2_MBADDR_L_MASK);
3648
3649 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
3650
3651 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3652 "==> nxge_map_rxdma_channel_cfg_ring: "
3653 "channel %d damaddrp $%p "
3654 "cfg1 0x%016llx cfig2 0x%016llx",
3655 dma_channel, dmaaddrp,
3656 cfig1_p->value, cfig2_p->value));
3657
3658 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
3659 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) {
3660 switch (rcrp->sw_priv_hdr_len) {
3661 case SW_OFFSET_NO_OFFSET:
3662 case SW_OFFSET_64:
3663 case SW_OFFSET_128:
3664 case SW_OFFSET_192:
3665 cfig2_p->bits.ldw.offset =
3666 rcrp->sw_priv_hdr_len;
3667 cfig2_p->bits.ldw.offset256 = 0;
3668 break;
3669 case SW_OFFSET_256:
3670 case SW_OFFSET_320:
3671 case SW_OFFSET_384:
3672 case SW_OFFSET_448:
3673 cfig2_p->bits.ldw.offset =
3674 rcrp->sw_priv_hdr_len & 0x3;
3675 cfig2_p->bits.ldw.offset256 = 1;
3676 break;
3677 default:
3678 cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET;
3679 cfig2_p->bits.ldw.offset256 = 0;
3680 }
3681 } else {
3682 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
3683 }
3684
3685 rbrp->rx_rcr_p = rcrp;
3686 rcrp->rx_rbr_p = rbrp;
3687 *rcr_p = rcrp;
3688 *rx_mbox_p = mboxp;
3689
3690 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3691 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
3692
3693 return (status);
3694 }
3695
3696 /*ARGSUSED*/
3697 static void
nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)3698 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
3699 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
3700 {
3701 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3702 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
3703 rcr_p->rdc));
3704
3705 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
3706 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
3707
3708 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3709 "<== nxge_unmap_rxdma_channel_cfg_ring"));
3710 }
3711
3712 static nxge_status_t
nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks)3713 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
3714 p_nxge_dma_common_t *dma_buf_p,
3715 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
3716 {
3717 p_rx_rbr_ring_t rbrp;
3718 p_nxge_dma_common_t dma_bufp, tmp_bufp;
3719 p_rx_msg_t *rx_msg_ring;
3720 p_rx_msg_t rx_msg_p;
3721 p_mblk_t mblk_p;
3722
3723 rxring_info_t *ring_info;
3724 nxge_status_t status = NXGE_OK;
3725 int i, j, index;
3726 uint32_t size, bsize, nblocks, nmsgs;
3727
3728 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3729 "==> nxge_map_rxdma_channel_buf_ring: channel %d",
3730 channel));
3731
3732 dma_bufp = tmp_bufp = *dma_buf_p;
3733 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3734 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
3735 "chunks bufp 0x%016llx",
3736 channel, num_chunks, dma_bufp));
3737
3738 nmsgs = 0;
3739 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
3740 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3741 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3742 "bufp 0x%016llx nblocks %d nmsgs %d",
3743 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
3744 nmsgs += tmp_bufp->nblocks;
3745 }
3746 if (!nmsgs) {
3747 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3748 "<== nxge_map_rxdma_channel_buf_ring: channel %d "
3749 "no msg blocks",
3750 channel));
3751 status = NXGE_ERROR;
3752 goto nxge_map_rxdma_channel_buf_ring_exit;
3753 }
3754
3755 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
3756
3757 size = nmsgs * sizeof (p_rx_msg_t);
3758 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
3759 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
3760 KM_SLEEP);
3761
3762 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
3763 (void *)nxgep->interrupt_cookie);
3764 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
3765 (void *)nxgep->interrupt_cookie);
3766 rbrp->rdc = channel;
3767 rbrp->num_blocks = num_chunks;
3768 rbrp->tnblocks = nmsgs;
3769 rbrp->rbb_max = nmsgs;
3770 rbrp->rbr_max_size = nmsgs;
3771 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
3772
3773 /*
3774 * Buffer sizes suggested by NIU architect.
3775 * 256, 512 and 2K.
3776 */
3777
3778 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
3779 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
3780 rbrp->npi_pkt_buf_size0 = SIZE_256B;
3781
3782 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
3783 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
3784 rbrp->npi_pkt_buf_size1 = SIZE_1KB;
3785
3786 rbrp->block_size = nxgep->rx_default_block_size;
3787
3788 if (!nxgep->mac.is_jumbo) {
3789 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
3790 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
3791 rbrp->npi_pkt_buf_size2 = SIZE_2KB;
3792 } else {
3793 if (rbrp->block_size >= 0x2000) {
3794 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
3795 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
3796 rbrp->npi_pkt_buf_size2 = SIZE_8KB;
3797 } else {
3798 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
3799 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
3800 rbrp->npi_pkt_buf_size2 = SIZE_4KB;
3801 }
3802 }
3803
3804 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3805 "==> nxge_map_rxdma_channel_buf_ring: channel %d "
3806 "actual rbr max %d rbb_max %d nmsgs %d "
3807 "rbrp->block_size %d default_block_size %d "
3808 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
3809 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
3810 rbrp->block_size, nxgep->rx_default_block_size,
3811 nxge_rbr_size, nxge_rbr_spare_size));
3812
3813 /* Map in buffers from the buffer pool. */
3814 index = 0;
3815 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
3816 bsize = dma_bufp->block_size;
3817 nblocks = dma_bufp->nblocks;
3818 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
3819 ring_info->buffer[i].buf_index = i;
3820 ring_info->buffer[i].buf_size = dma_bufp->alength;
3821 ring_info->buffer[i].start_index = index;
3822 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
3823
3824 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3825 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3826 "chunk %d"
3827 " nblocks %d chunk_size %x block_size 0x%x "
3828 "dma_bufp $%p", channel, i,
3829 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3830 dma_bufp));
3831
3832 for (j = 0; j < nblocks; j++) {
3833 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
3834 dma_bufp)) == NULL) {
3835 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3836 "allocb failed (index %d i %d j %d)",
3837 index, i, j));
3838 goto nxge_map_rxdma_channel_buf_ring_fail1;
3839 }
3840 rx_msg_ring[index] = rx_msg_p;
3841 rx_msg_p->block_index = index;
3842 rx_msg_p->shifted_addr = (uint32_t)
3843 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
3844 RBR_BKADDR_SHIFT));
3845
3846 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3847 "index %d j %d rx_msg_p $%p mblk %p",
3848 index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
3849
3850 mblk_p = rx_msg_p->rx_mblk_p;
3851 mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3852
3853 rbrp->rbr_ref_cnt++;
3854 index++;
3855 rx_msg_p->buf_dma.dma_channel = channel;
3856 }
3857
3858 rbrp->rbr_alloc_type = DDI_MEM_ALLOC;
3859 if (dma_bufp->contig_alloc_type) {
3860 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC;
3861 }
3862
3863 if (dma_bufp->kmem_alloc_type) {
3864 rbrp->rbr_alloc_type = KMEM_ALLOC;
3865 }
3866
3867 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3868 " nxge_map_rxdma_channel_buf_ring: map channel %d "
3869 "chunk %d"
3870 " nblocks %d chunk_size %x block_size 0x%x "
3871 "dma_bufp $%p",
3872 channel, i,
3873 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
3874 dma_bufp));
3875 }
3876 if (i < rbrp->num_blocks) {
3877 goto nxge_map_rxdma_channel_buf_ring_fail1;
3878 }
3879
3880 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3881 "nxge_map_rxdma_channel_buf_ring: done buf init "
3882 "channel %d msg block entries %d",
3883 channel, index));
3884 ring_info->block_size_mask = bsize - 1;
3885 rbrp->rx_msg_ring = rx_msg_ring;
3886 rbrp->dma_bufp = dma_buf_p;
3887 rbrp->ring_info = ring_info;
3888
3889 status = nxge_rxbuf_index_info_init(nxgep, rbrp);
3890 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3891 " nxge_map_rxdma_channel_buf_ring: "
3892 "channel %d done buf info init", channel));
3893
3894 /*
3895 * Finally, permit nxge_freeb() to call nxge_post_page().
3896 */
3897 rbrp->rbr_state = RBR_POSTING;
3898
3899 *rbr_p = rbrp;
3900 goto nxge_map_rxdma_channel_buf_ring_exit;
3901
3902 nxge_map_rxdma_channel_buf_ring_fail1:
3903 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3904 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3905 channel, status));
3906
3907 index--;
3908 for (; index >= 0; index--) {
3909 rx_msg_p = rx_msg_ring[index];
3910 if (rx_msg_p != NULL) {
3911 freeb(rx_msg_p->rx_mblk_p);
3912 rx_msg_ring[index] = NULL;
3913 }
3914 }
3915
3916 MUTEX_DESTROY(&rbrp->post_lock);
3917 MUTEX_DESTROY(&rbrp->lock);
3918 KMEM_FREE(ring_info, sizeof (rxring_info_t));
3919 KMEM_FREE(rx_msg_ring, size);
3920 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
3921
3922 status = NXGE_ERROR;
3923
3924 nxge_map_rxdma_channel_buf_ring_exit:
3925 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3926 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
3927
3928 return (status);
3929 }
3930
3931 /*ARGSUSED*/
3932 static void
nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p)3933 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
3934 p_rx_rbr_ring_t rbr_p)
3935 {
3936 p_rx_msg_t *rx_msg_ring;
3937 p_rx_msg_t rx_msg_p;
3938 rxring_info_t *ring_info;
3939 int i;
3940 uint32_t size;
3941 #ifdef NXGE_DEBUG
3942 int num_chunks;
3943 #endif
3944
3945 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3946 "==> nxge_unmap_rxdma_channel_buf_ring"));
3947 if (rbr_p == NULL) {
3948 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3949 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
3950 return;
3951 }
3952 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3953 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
3954 rbr_p->rdc));
3955
3956 rx_msg_ring = rbr_p->rx_msg_ring;
3957 ring_info = rbr_p->ring_info;
3958
3959 if (rx_msg_ring == NULL || ring_info == NULL) {
3960 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3961 "<== nxge_unmap_rxdma_channel_buf_ring: "
3962 "rx_msg_ring $%p ring_info $%p",
3963 rx_msg_p, ring_info));
3964 return;
3965 }
3966
3967 #ifdef NXGE_DEBUG
3968 num_chunks = rbr_p->num_blocks;
3969 #endif
3970 size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
3971 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3972 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
3973 "tnblocks %d (max %d) size ptrs %d ",
3974 rbr_p->rdc, num_chunks,
3975 rbr_p->tnblocks, rbr_p->rbr_max_size, size));
3976
3977 for (i = 0; i < rbr_p->tnblocks; i++) {
3978 rx_msg_p = rx_msg_ring[i];
3979 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3980 " nxge_unmap_rxdma_channel_buf_ring: "
3981 "rx_msg_p $%p",
3982 rx_msg_p));
3983 if (rx_msg_p != NULL) {
3984 freeb(rx_msg_p->rx_mblk_p);
3985 rx_msg_ring[i] = NULL;
3986 }
3987 }
3988
3989 /*
3990 * We no longer may use the mutex <post_lock>. By setting
3991 * <rbr_state> to anything but POSTING, we prevent
3992 * nxge_post_page() from accessing a dead mutex.
3993 */
3994 rbr_p->rbr_state = RBR_UNMAPPING;
3995 MUTEX_DESTROY(&rbr_p->post_lock);
3996
3997 MUTEX_DESTROY(&rbr_p->lock);
3998
3999 if (rbr_p->rbr_ref_cnt == 0) {
4000 /*
4001 * This is the normal state of affairs.
4002 * Need to free the following buffers:
4003 * - data buffers
4004 * - rx_msg ring
4005 * - ring_info
4006 * - rbr ring
4007 */
4008 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4009 "unmap_rxdma_buf_ring: No outstanding - freeing "));
4010 nxge_rxdma_databuf_free(rbr_p);
4011 KMEM_FREE(ring_info, sizeof (rxring_info_t));
4012 KMEM_FREE(rx_msg_ring, size);
4013 KMEM_FREE(rbr_p, sizeof (*rbr_p));
4014 } else {
4015 /*
4016 * Some of our buffers are still being used.
4017 * Therefore, tell nxge_freeb() this ring is
4018 * unmapped, so it may free <rbr_p> for us.
4019 */
4020 rbr_p->rbr_state = RBR_UNMAPPED;
4021 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4022 "unmap_rxdma_buf_ring: %d %s outstanding.",
4023 rbr_p->rbr_ref_cnt,
4024 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
4025 }
4026
4027 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4028 "<== nxge_unmap_rxdma_channel_buf_ring"));
4029 }
4030
4031 /*
4032 * nxge_rxdma_hw_start_common
4033 *
4034 * Arguments:
4035 * nxgep
4036 *
4037 * Notes:
4038 *
4039 * NPI/NXGE function calls:
4040 * nxge_init_fzc_rx_common();
4041 * nxge_init_fzc_rxdma_port();
4042 *
4043 * Registers accessed:
4044 *
4045 * Context:
4046 * Service domain
4047 */
4048 static nxge_status_t
nxge_rxdma_hw_start_common(p_nxge_t nxgep)4049 nxge_rxdma_hw_start_common(p_nxge_t nxgep)
4050 {
4051 nxge_status_t status = NXGE_OK;
4052
4053 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4054
4055 /*
4056 * Load the sharable parameters by writing to the
4057 * function zero control registers. These FZC registers
4058 * should be initialized only once for the entire chip.
4059 */
4060 (void) nxge_init_fzc_rx_common(nxgep);
4061
4062 /*
4063 * Initialize the RXDMA port specific FZC control configurations.
4064 * These FZC registers are pertaining to each port.
4065 */
4066 (void) nxge_init_fzc_rxdma_port(nxgep);
4067
4068 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
4069
4070 return (status);
4071 }
4072
4073 static nxge_status_t
nxge_rxdma_hw_start(p_nxge_t nxgep,int channel)4074 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel)
4075 {
4076 int i, ndmas;
4077 p_rx_rbr_rings_t rx_rbr_rings;
4078 p_rx_rbr_ring_t *rbr_rings;
4079 p_rx_rcr_rings_t rx_rcr_rings;
4080 p_rx_rcr_ring_t *rcr_rings;
4081 p_rx_mbox_areas_t rx_mbox_areas_p;
4082 p_rx_mbox_t *rx_mbox_p;
4083 nxge_status_t status = NXGE_OK;
4084
4085 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
4086
4087 rx_rbr_rings = nxgep->rx_rbr_rings;
4088 rx_rcr_rings = nxgep->rx_rcr_rings;
4089 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4090 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4091 "<== nxge_rxdma_hw_start: NULL ring pointers"));
4092 return (NXGE_ERROR);
4093 }
4094 ndmas = rx_rbr_rings->ndmas;
4095 if (ndmas == 0) {
4096 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4097 "<== nxge_rxdma_hw_start: no dma channel allocated"));
4098 return (NXGE_ERROR);
4099 }
4100
4101 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4102 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
4103
4104 rbr_rings = rx_rbr_rings->rbr_rings;
4105 rcr_rings = rx_rcr_rings->rcr_rings;
4106 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
4107 if (rx_mbox_areas_p) {
4108 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
4109 } else {
4110 rx_mbox_p = NULL;
4111 }
4112
4113 i = channel;
4114 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4115 "==> nxge_rxdma_hw_start (ndmas %d) channel %d",
4116 ndmas, channel));
4117 status = nxge_rxdma_start_channel(nxgep, channel,
4118 (p_rx_rbr_ring_t)rbr_rings[i],
4119 (p_rx_rcr_ring_t)rcr_rings[i],
4120 (p_rx_mbox_t)rx_mbox_p[i]);
4121 if (status != NXGE_OK) {
4122 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4123 "==> nxge_rxdma_hw_start: disable "
4124 "(status 0x%x channel %d)", status, channel));
4125 return (status);
4126 }
4127
4128 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
4129 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4130 rx_rbr_rings, rx_rcr_rings));
4131
4132 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4133 "==> nxge_rxdma_hw_start: (status 0x%x)", status));
4134
4135 return (status);
4136 }
4137
4138 static void
nxge_rxdma_hw_stop(p_nxge_t nxgep,int channel)4139 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel)
4140 {
4141 p_rx_rbr_rings_t rx_rbr_rings;
4142 p_rx_rcr_rings_t rx_rcr_rings;
4143
4144 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
4145
4146 rx_rbr_rings = nxgep->rx_rbr_rings;
4147 rx_rcr_rings = nxgep->rx_rcr_rings;
4148 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
4149 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4150 "<== nxge_rxdma_hw_stop: NULL ring pointers"));
4151 return;
4152 }
4153
4154 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4155 "==> nxge_rxdma_hw_stop(channel %d)",
4156 channel));
4157 (void) nxge_rxdma_stop_channel(nxgep, channel);
4158
4159 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
4160 "rx_rbr_rings 0x%016llx rings 0x%016llx",
4161 rx_rbr_rings, rx_rcr_rings));
4162
4163 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
4164 }
4165
4166
4167 static nxge_status_t
nxge_rxdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)4168 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
4169 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
4170 {
4171 npi_handle_t handle;
4172 npi_status_t rs = NPI_SUCCESS;
4173 rx_dma_ctl_stat_t cs;
4174 rx_dma_ent_msk_t ent_mask;
4175 nxge_status_t status = NXGE_OK;
4176
4177 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
4178
4179 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4180
4181 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
4182 "npi handle addr $%p acc $%p",
4183 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4184
4185 /* Reset RXDMA channel, but not if you're a guest. */
4186 if (!isLDOMguest(nxgep)) {
4187 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4188 if (rs != NPI_SUCCESS) {
4189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4190 "==> nxge_init_fzc_rdc: "
4191 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
4192 channel, rs));
4193 return (NXGE_ERROR | rs);
4194 }
4195
4196 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4197 "==> nxge_rxdma_start_channel: reset done: channel %d",
4198 channel));
4199 }
4200
4201 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4202 if (isLDOMguest(nxgep))
4203 (void) nxge_rdc_lp_conf(nxgep, channel);
4204 #endif
4205
4206 /*
4207 * Initialize the RXDMA channel specific FZC control
4208 * configurations. These FZC registers are pertaining
4209 * to each RX channel (logical pages).
4210 */
4211 if (!isLDOMguest(nxgep)) {
4212 status = nxge_init_fzc_rxdma_channel(nxgep, channel);
4213 if (status != NXGE_OK) {
4214 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4215 "==> nxge_rxdma_start_channel: "
4216 "init fzc rxdma failed (0x%08x channel %d)",
4217 status, channel));
4218 return (status);
4219 }
4220
4221 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4222 "==> nxge_rxdma_start_channel: fzc done"));
4223 }
4224
4225 /* Set up the interrupt event masks. */
4226 ent_mask.value = 0;
4227 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
4228 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4229 &ent_mask);
4230 if (rs != NPI_SUCCESS) {
4231 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4232 "==> nxge_rxdma_start_channel: "
4233 "init rxdma event masks failed "
4234 "(0x%08x channel %d)",
4235 status, channel));
4236 return (NXGE_ERROR | rs);
4237 }
4238
4239 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4240 "==> nxge_rxdma_start_channel: "
4241 "event done: channel %d (mask 0x%016llx)",
4242 channel, ent_mask.value));
4243
4244 /* Initialize the receive DMA control and status register */
4245 cs.value = 0;
4246 cs.bits.hdw.mex = 1;
4247 cs.bits.hdw.rcrthres = 1;
4248 cs.bits.hdw.rcrto = 1;
4249 cs.bits.hdw.rbr_empty = 1;
4250 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4251 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4252 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
4253 if (status != NXGE_OK) {
4254 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4255 "==> nxge_rxdma_start_channel: "
4256 "init rxdma control register failed (0x%08x channel %d",
4257 status, channel));
4258 return (status);
4259 }
4260
4261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4262 "control done - channel %d cs 0x%016llx", channel, cs.value));
4263
4264 /*
4265 * Load RXDMA descriptors, buffers, mailbox,
4266 * initialise the receive DMA channels and
4267 * enable each DMA channel.
4268 */
4269 status = nxge_enable_rxdma_channel(nxgep,
4270 channel, rbr_p, rcr_p, mbox_p);
4271
4272 if (status != NXGE_OK) {
4273 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4274 " nxge_rxdma_start_channel: "
4275 " enable rxdma failed (0x%08x channel %d)",
4276 status, channel));
4277 return (status);
4278 }
4279
4280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4281 "==> nxge_rxdma_start_channel: enabled channel %d"));
4282
4283 if (isLDOMguest(nxgep)) {
4284 /* Add interrupt handler for this channel. */
4285 status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel);
4286 if (status != NXGE_OK) {
4287 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4288 " nxge_rxdma_start_channel: "
4289 " nxge_hio_intr_add failed (0x%08x channel %d)",
4290 status, channel));
4291 return (status);
4292 }
4293 }
4294
4295 ent_mask.value = 0;
4296 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
4297 RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
4298 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4299 &ent_mask);
4300 if (rs != NPI_SUCCESS) {
4301 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
4302 "==> nxge_rxdma_start_channel: "
4303 "init rxdma event masks failed (0x%08x channel %d)",
4304 status, channel));
4305 return (NXGE_ERROR | rs);
4306 }
4307
4308 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
4309 "control done - channel %d cs 0x%016llx", channel, cs.value));
4310
4311 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
4312
4313 return (NXGE_OK);
4314 }
4315
4316 static nxge_status_t
nxge_rxdma_stop_channel(p_nxge_t nxgep,uint16_t channel)4317 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
4318 {
4319 npi_handle_t handle;
4320 npi_status_t rs = NPI_SUCCESS;
4321 rx_dma_ctl_stat_t cs;
4322 rx_dma_ent_msk_t ent_mask;
4323 nxge_status_t status = NXGE_OK;
4324
4325 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
4326
4327 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4328
4329 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
4330 "npi handle addr $%p acc $%p",
4331 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
4332
4333 if (!isLDOMguest(nxgep)) {
4334 /*
4335 * Stop RxMAC = A.9.2.6
4336 */
4337 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) {
4338 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4339 "nxge_rxdma_stop_channel: "
4340 "Failed to disable RxMAC"));
4341 }
4342
4343 /*
4344 * Drain IPP Port = A.9.3.6
4345 */
4346 (void) nxge_ipp_drain(nxgep);
4347 }
4348
4349 /* Reset RXDMA channel */
4350 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4351 if (rs != NPI_SUCCESS) {
4352 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4353 " nxge_rxdma_stop_channel: "
4354 " reset rxdma failed (0x%08x channel %d)",
4355 rs, channel));
4356 return (NXGE_ERROR | rs);
4357 }
4358
4359 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4360 "==> nxge_rxdma_stop_channel: reset done"));
4361
4362 /* Set up the interrupt event masks. */
4363 ent_mask.value = RX_DMA_ENT_MSK_ALL;
4364 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
4365 &ent_mask);
4366 if (rs != NPI_SUCCESS) {
4367 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4368 "==> nxge_rxdma_stop_channel: "
4369 "set rxdma event masks failed (0x%08x channel %d)",
4370 rs, channel));
4371 return (NXGE_ERROR | rs);
4372 }
4373
4374 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4375 "==> nxge_rxdma_stop_channel: event done"));
4376
4377 /*
4378 * Initialize the receive DMA control and status register
4379 */
4380 cs.value = 0;
4381 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
4382 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
4383 " to default (all 0s) 0x%08x", cs.value));
4384 if (status != NXGE_OK) {
4385 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4386 " nxge_rxdma_stop_channel: init rxdma"
4387 " control register failed (0x%08x channel %d",
4388 status, channel));
4389 return (status);
4390 }
4391
4392 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4393 "==> nxge_rxdma_stop_channel: control done"));
4394
4395 /*
4396 * Make sure channel is disabled.
4397 */
4398 status = nxge_disable_rxdma_channel(nxgep, channel);
4399
4400 if (status != NXGE_OK) {
4401 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4402 " nxge_rxdma_stop_channel: "
4403 " init enable rxdma failed (0x%08x channel %d)",
4404 status, channel));
4405 return (status);
4406 }
4407
4408 if (!isLDOMguest(nxgep)) {
4409 /*
4410 * Enable RxMAC = A.9.2.10
4411 */
4412 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
4413 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4414 "nxge_rxdma_stop_channel: Rx MAC still disabled"));
4415 }
4416 }
4417
4418 NXGE_DEBUG_MSG((nxgep,
4419 RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
4420
4421 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
4422
4423 return (NXGE_OK);
4424 }
4425
4426 nxge_status_t
nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)4427 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
4428 {
4429 npi_handle_t handle;
4430 p_nxge_rdc_sys_stats_t statsp;
4431 rx_ctl_dat_fifo_stat_t stat;
4432 uint32_t zcp_err_status;
4433 uint32_t ipp_err_status;
4434 nxge_status_t status = NXGE_OK;
4435 npi_status_t rs = NPI_SUCCESS;
4436 boolean_t my_err = B_FALSE;
4437
4438 handle = nxgep->npi_handle;
4439 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4440
4441 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
4442
4443 if (rs != NPI_SUCCESS)
4444 return (NXGE_ERROR | rs);
4445
4446 if (stat.bits.ldw.id_mismatch) {
4447 statsp->id_mismatch++;
4448 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0,
4449 NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
4450 /* Global fatal error encountered */
4451 }
4452
4453 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
4454 switch (nxgep->mac.portnum) {
4455 case 0:
4456 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
4457 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
4458 my_err = B_TRUE;
4459 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4460 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4461 }
4462 break;
4463 case 1:
4464 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
4465 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
4466 my_err = B_TRUE;
4467 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4468 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4469 }
4470 break;
4471 case 2:
4472 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
4473 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
4474 my_err = B_TRUE;
4475 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4476 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4477 }
4478 break;
4479 case 3:
4480 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
4481 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
4482 my_err = B_TRUE;
4483 zcp_err_status = stat.bits.ldw.zcp_eop_err;
4484 ipp_err_status = stat.bits.ldw.ipp_eop_err;
4485 }
4486 break;
4487 default:
4488 return (NXGE_ERROR);
4489 }
4490 }
4491
4492 if (my_err) {
4493 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
4494 zcp_err_status);
4495 if (status != NXGE_OK)
4496 return (status);
4497 }
4498
4499 return (NXGE_OK);
4500 }
4501
4502 static nxge_status_t
nxge_rxdma_handle_port_errors(p_nxge_t nxgep,uint32_t ipp_status,uint32_t zcp_status)4503 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
4504 uint32_t zcp_status)
4505 {
4506 boolean_t rxport_fatal = B_FALSE;
4507 p_nxge_rdc_sys_stats_t statsp;
4508 nxge_status_t status = NXGE_OK;
4509 uint8_t portn;
4510
4511 portn = nxgep->mac.portnum;
4512 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
4513
4514 if (ipp_status & (0x1 << portn)) {
4515 statsp->ipp_eop_err++;
4516 NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
4517 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
4518 rxport_fatal = B_TRUE;
4519 }
4520
4521 if (zcp_status & (0x1 << portn)) {
4522 statsp->zcp_eop_err++;
4523 NXGE_FM_REPORT_ERROR(nxgep, portn, 0,
4524 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
4525 rxport_fatal = B_TRUE;
4526 }
4527
4528 if (rxport_fatal) {
4529 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4530 " nxge_rxdma_handle_port_error: "
4531 " fatal error on Port #%d\n",
4532 portn));
4533 status = nxge_rx_port_fatal_err_recover(nxgep);
4534 if (status == NXGE_OK) {
4535 FM_SERVICE_RESTORED(nxgep);
4536 }
4537 }
4538
4539 return (status);
4540 }
4541
4542 static nxge_status_t
nxge_rxdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel)4543 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
4544 {
4545 npi_handle_t handle;
4546 npi_status_t rs = NPI_SUCCESS;
4547 nxge_status_t status = NXGE_OK;
4548 p_rx_rbr_ring_t rbrp;
4549 p_rx_rcr_ring_t rcrp;
4550 p_rx_mbox_t mboxp;
4551 rx_dma_ent_msk_t ent_mask;
4552 p_nxge_dma_common_t dmap;
4553 uint32_t ref_cnt;
4554 p_rx_msg_t rx_msg_p;
4555 int i;
4556 uint32_t nxge_port_rcr_size;
4557
4558 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
4559 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4560 "Recovering from RxDMAChannel#%d error...", channel));
4561
4562 /*
4563 * Stop the dma channel waits for the stop done.
4564 * If the stop done bit is not set, then create
4565 * an error.
4566 */
4567
4568 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4569 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
4570
4571 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel];
4572 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel];
4573
4574 MUTEX_ENTER(&rbrp->lock);
4575 MUTEX_ENTER(&rbrp->post_lock);
4576
4577 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
4578
4579 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
4580 if (rs != NPI_SUCCESS) {
4581 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4582 "nxge_disable_rxdma_channel:failed"));
4583 goto fail;
4584 }
4585
4586 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
4587
4588 /* Disable interrupt */
4589 ent_mask.value = RX_DMA_ENT_MSK_ALL;
4590 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
4591 if (rs != NPI_SUCCESS) {
4592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4593 "nxge_rxdma_stop_channel: "
4594 "set rxdma event masks failed (channel %d)",
4595 channel));
4596 }
4597
4598 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
4599
4600 /* Reset RXDMA channel */
4601 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
4602 if (rs != NPI_SUCCESS) {
4603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4604 "nxge_rxdma_fatal_err_recover: "
4605 " reset rxdma failed (channel %d)", channel));
4606 goto fail;
4607 }
4608
4609 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
4610
4611 mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
4612
4613 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
4614 rbrp->rbr_rd_index = 0;
4615
4616 rcrp->comp_rd_index = 0;
4617 rcrp->comp_wt_index = 0;
4618 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
4619 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
4620 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
4621 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
4622
4623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
4624 (nxge_port_rcr_size - 1);
4625 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
4626 (nxge_port_rcr_size - 1);
4627
4628 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
4629 bzero((caddr_t)dmap->kaddrp, dmap->alength);
4630
4631 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
4632
4633 for (i = 0; i < rbrp->rbr_max_size; i++) {
4634 rx_msg_p = rbrp->rx_msg_ring[i];
4635 ref_cnt = rx_msg_p->ref_cnt;
4636 if (ref_cnt != 1) {
4637 if (rx_msg_p->cur_usage_cnt !=
4638 rx_msg_p->max_usage_cnt) {
4639 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4640 "buf[%d]: cur_usage_cnt = %d "
4641 "max_usage_cnt = %d\n", i,
4642 rx_msg_p->cur_usage_cnt,
4643 rx_msg_p->max_usage_cnt));
4644 } else {
4645 /* Buffer can be re-posted */
4646 rx_msg_p->free = B_TRUE;
4647 rx_msg_p->cur_usage_cnt = 0;
4648 rx_msg_p->max_usage_cnt = 0xbaddcafe;
4649 rx_msg_p->pkt_buf_size = 0;
4650 }
4651 }
4652 }
4653
4654 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
4655
4656 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
4657 if (status != NXGE_OK) {
4658 goto fail;
4659 }
4660
4661 MUTEX_EXIT(&rbrp->post_lock);
4662 MUTEX_EXIT(&rbrp->lock);
4663
4664 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4665 "Recovery Successful, RxDMAChannel#%d Restored",
4666 channel));
4667 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
4668 return (NXGE_OK);
4669
4670 fail:
4671 MUTEX_EXIT(&rbrp->post_lock);
4672 MUTEX_EXIT(&rbrp->lock);
4673 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4674 return (NXGE_ERROR | rs);
4675 }
4676
4677 nxge_status_t
nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)4678 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
4679 {
4680 nxge_grp_set_t *set = &nxgep->rx_set;
4681 nxge_status_t status = NXGE_OK;
4682 p_rx_rcr_ring_t rcrp;
4683 int rdc;
4684
4685 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
4686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4687 "Recovering from RxPort error..."));
4688 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n"));
4689
4690 if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
4691 goto fail;
4692
4693 NXGE_DELAY(1000);
4694
4695 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels..."));
4696
4697 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
4698 if ((1 << rdc) & set->owned.map) {
4699 rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc];
4700 if (rcrp != NULL) {
4701 MUTEX_ENTER(&rcrp->lock);
4702 if (nxge_rxdma_fatal_err_recover(nxgep,
4703 rdc) != NXGE_OK) {
4704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4705 "Could not recover "
4706 "channel %d", rdc));
4707 }
4708 MUTEX_EXIT(&rcrp->lock);
4709 }
4710 }
4711 }
4712
4713 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP..."));
4714
4715 /* Reset IPP */
4716 if (nxge_ipp_reset(nxgep) != NXGE_OK) {
4717 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4718 "nxge_rx_port_fatal_err_recover: "
4719 "Failed to reset IPP"));
4720 goto fail;
4721 }
4722
4723 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
4724
4725 /* Reset RxMAC */
4726 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
4727 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4728 "nxge_rx_port_fatal_err_recover: "
4729 "Failed to reset RxMAC"));
4730 goto fail;
4731 }
4732
4733 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
4734
4735 /* Re-Initialize IPP */
4736 if (nxge_ipp_init(nxgep) != NXGE_OK) {
4737 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4738 "nxge_rx_port_fatal_err_recover: "
4739 "Failed to init IPP"));
4740 goto fail;
4741 }
4742
4743 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
4744
4745 /* Re-Initialize RxMAC */
4746 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
4747 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4748 "nxge_rx_port_fatal_err_recover: "
4749 "Failed to reset RxMAC"));
4750 goto fail;
4751 }
4752
4753 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
4754
4755 /* Re-enable RxMAC */
4756 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
4757 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4758 "nxge_rx_port_fatal_err_recover: "
4759 "Failed to enable RxMAC"));
4760 goto fail;
4761 }
4762
4763 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4764 "Recovery Successful, RxPort Restored"));
4765
4766 return (NXGE_OK);
4767 fail:
4768 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
4769 return (status);
4770 }
4771
4772 void
nxge_rxdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)4773 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
4774 {
4775 rx_dma_ctl_stat_t cs;
4776 rx_ctl_dat_fifo_stat_t cdfs;
4777
4778 switch (err_id) {
4779 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
4780 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
4781 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
4782 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
4783 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
4784 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
4785 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
4786 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
4787 case NXGE_FM_EREPORT_RDMC_RCRINCON:
4788 case NXGE_FM_EREPORT_RDMC_RCRFULL:
4789 case NXGE_FM_EREPORT_RDMC_RBRFULL:
4790 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
4791 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
4792 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
4793 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4794 chan, &cs.value);
4795 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
4796 cs.bits.hdw.rcr_ack_err = 1;
4797 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
4798 cs.bits.hdw.dc_fifo_err = 1;
4799 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
4800 cs.bits.hdw.rcr_sha_par = 1;
4801 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
4802 cs.bits.hdw.rbr_pre_par = 1;
4803 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
4804 cs.bits.hdw.rbr_tmout = 1;
4805 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
4806 cs.bits.hdw.rsp_cnt_err = 1;
4807 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
4808 cs.bits.hdw.byte_en_bus = 1;
4809 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
4810 cs.bits.hdw.rsp_dat_err = 1;
4811 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
4812 cs.bits.hdw.config_err = 1;
4813 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
4814 cs.bits.hdw.rcrincon = 1;
4815 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
4816 cs.bits.hdw.rcrfull = 1;
4817 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
4818 cs.bits.hdw.rbrfull = 1;
4819 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
4820 cs.bits.hdw.rbrlogpage = 1;
4821 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
4822 cs.bits.hdw.cfiglogpage = 1;
4823 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
4824 cs.value);
4825 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
4826 chan, cs.value);
4827 break;
4828 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
4829 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
4830 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
4831 cdfs.value = 0;
4832 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
4833 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
4834 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
4835 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
4836 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
4837 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
4838 cmn_err(CE_NOTE,
4839 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
4840 cdfs.value);
4841 NXGE_REG_WR64(nxgep->npi_handle,
4842 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value);
4843 break;
4844 case NXGE_FM_EREPORT_RDMC_DCF_ERR:
4845 break;
4846 case NXGE_FM_EREPORT_RDMC_RCR_ERR:
4847 break;
4848 }
4849 }
4850
4851 static void
nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)4852 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)
4853 {
4854 rxring_info_t *ring_info;
4855 int index;
4856 uint32_t chunk_size;
4857 uint64_t kaddr;
4858 uint_t num_blocks;
4859
4860 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free"));
4861
4862 if (rbr_p == NULL) {
4863 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4864 "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
4865 return;
4866 }
4867
4868 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) {
4869 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4870 "<== nxge_rxdma_databuf_free: DDI"));
4871 return;
4872 }
4873
4874 ring_info = rbr_p->ring_info;
4875 if (ring_info == NULL) {
4876 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4877 "==> nxge_rxdma_databuf_free: NULL ring info"));
4878 return;
4879 }
4880 num_blocks = rbr_p->num_blocks;
4881 for (index = 0; index < num_blocks; index++) {
4882 kaddr = ring_info->buffer[index].kaddr;
4883 chunk_size = ring_info->buffer[index].buf_size;
4884 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4885 "==> nxge_rxdma_databuf_free: free chunk %d "
4886 "kaddrp $%p chunk size %d",
4887 index, kaddr, chunk_size));
4888 if (kaddr == 0)
4889 continue;
4890 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size);
4891 ring_info->buffer[index].kaddr = 0;
4892 }
4893
4894 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free"));
4895 }
4896
4897 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4898 extern void contig_mem_free(void *, size_t);
4899 #endif
4900
4901 void
nxge_free_buf(buf_alloc_type_t alloc_type,uint64_t kaddr,uint32_t buf_size)4902 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size)
4903 {
4904 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf"));
4905
4906 if (kaddr == 0 || !buf_size) {
4907 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4908 "==> nxge_free_buf: invalid kaddr $%p size to free %d",
4909 kaddr, buf_size));
4910 return;
4911 }
4912
4913 switch (alloc_type) {
4914 case KMEM_ALLOC:
4915 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4916 "==> nxge_free_buf: freeing kmem $%p size %d",
4917 kaddr, buf_size));
4918 KMEM_FREE((void *)kaddr, buf_size);
4919 break;
4920
4921 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
4922 case CONTIG_MEM_ALLOC:
4923 NXGE_DEBUG_MSG((NULL, DMA_CTL,
4924 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
4925 kaddr, buf_size));
4926 contig_mem_free((void *)kaddr, buf_size);
4927 break;
4928 #endif
4929
4930 default:
4931 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
4932 "<== nxge_free_buf: unsupported alloc type %d",
4933 alloc_type));
4934 return;
4935 }
4936
4937 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf"));
4938 }
4939