xref: /illumos-gate/usr/src/uts/common/io/nxge/npi/npi_rxdma.c (revision 08516594b0e540dc0f415fa7ae31f54d943a0913)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <npi_rxdma.h>
29 #include <nxge_common.h>
30 
31 #define	 RXDMA_RESET_TRY_COUNT	4
32 #define	 RXDMA_RESET_DELAY	5
33 
34 #define	 RXDMA_OP_DISABLE	0
35 #define	 RXDMA_OP_ENABLE	1
36 #define	 RXDMA_OP_RESET	2
37 
38 #define	 RCR_TIMEOUT_ENABLE	1
39 #define	 RCR_TIMEOUT_DISABLE	2
40 #define	 RCR_THRESHOLD	4
41 
42 /* assume weight is in byte frames unit */
43 #define	WEIGHT_FACTOR 3/2
44 
45 uint64_t rdc_dmc_offset[] = {
46 	RXDMA_CFIG1_REG, RXDMA_CFIG2_REG, RBR_CFIG_A_REG, RBR_CFIG_B_REG,
47 	RBR_KICK_REG, RBR_STAT_REG, RBR_HDH_REG, RBR_HDL_REG,
48 	RCRCFIG_A_REG, RCRCFIG_B_REG, RCRSTAT_A_REG, RCRSTAT_B_REG,
49 	RCRSTAT_C_REG, RX_DMA_ENT_MSK_REG, RX_DMA_CTL_STAT_REG, RCR_FLSH_REG,
50 	RXMISC_DISCARD_REG
51 };
52 
53 const char *rdc_dmc_name[] = {
54 	"RXDMA_CFIG1", "RXDMA_CFIG2", "RBR_CFIG_A", "RBR_CFIG_B",
55 	"RBR_KICK", "RBR_STAT", "RBR_HDH", "RBR_HDL",
56 	"RCRCFIG_A", "RCRCFIG_B", "RCRSTAT_A", "RCRSTAT_B",
57 	"RCRSTAT_C", "RX_DMA_ENT_MSK", "RX_DMA_CTL_STAT", "RCR_FLSH",
58 	"RXMISC_DISCARD"
59 };
60 
61 uint64_t rdc_fzc_offset [] = {
62 	RX_LOG_PAGE_VLD_REG, RX_LOG_PAGE_MASK1_REG, RX_LOG_PAGE_VAL1_REG,
63 	RX_LOG_PAGE_MASK2_REG, RX_LOG_PAGE_VAL2_REG, RX_LOG_PAGE_RELO1_REG,
64 	RX_LOG_PAGE_RELO2_REG, RX_LOG_PAGE_HDL_REG, RDC_RED_PARA_REG,
65 	RED_DIS_CNT_REG
66 };
67 
68 
69 const char *rdc_fzc_name [] = {
70 	"RX_LOG_PAGE_VLD", "RX_LOG_PAGE_MASK1", "RX_LOG_PAGE_VAL1",
71 	"RX_LOG_PAGE_MASK2", "RX_LOG_PAGE_VAL2", "RX_LOG_PAGE_RELO1",
72 	"RX_LOG_PAGE_RELO2", "RX_LOG_PAGE_HDL", "RDC_RED_PARA", "RED_DIS_CNT"
73 };
74 
75 
76 /*
77  * Dump the MEM_ADD register first so all the data registers
78  * will have valid data buffer pointers.
79  */
80 uint64_t rx_fzc_offset[] = {
81 	RX_DMA_CK_DIV_REG, DEF_PT0_RDC_REG, DEF_PT1_RDC_REG, DEF_PT2_RDC_REG,
82 	DEF_PT3_RDC_REG, RX_ADDR_MD_REG, PT_DRR_WT0_REG, PT_DRR_WT1_REG,
83 	PT_DRR_WT2_REG, PT_DRR_WT3_REG, PT_USE0_REG, PT_USE1_REG,
84 	PT_USE2_REG, PT_USE3_REG, RED_RAN_INIT_REG, RX_ADDR_MD_REG,
85 	RDMC_PRE_PAR_ERR_REG, RDMC_SHA_PAR_ERR_REG,
86 	RDMC_MEM_DATA4_REG, RDMC_MEM_DATA3_REG, RDMC_MEM_DATA2_REG,
87 	RDMC_MEM_DATA1_REG, RDMC_MEM_DATA0_REG,
88 	RDMC_MEM_ADDR_REG,
89 	RX_CTL_DAT_FIFO_STAT_REG, RX_CTL_DAT_FIFO_MASK_REG,
90 	RX_CTL_DAT_FIFO_STAT_DBG_REG,
91 	RDMC_TRAINING_VECTOR_REG,
92 };
93 
94 
95 const char *rx_fzc_name[] = {
96 	"RX_DMA_CK_DIV", "DEF_PT0_RDC", "DEF_PT1_RDC", "DEF_PT2_RDC",
97 	"DEF_PT3_RDC", "RX_ADDR_MD", "PT_DRR_WT0", "PT_DRR_WT1",
98 	"PT_DRR_WT2", "PT_DRR_WT3", "PT_USE0", "PT_USE1",
99 	"PT_USE2", "PT_USE3", "RED_RAN_INIT", "RX_ADDR_MD",
100 	"RDMC_PRE_PAR_ERR", "RDMC_SHA_PAR_ERR",
101 	"RDMC_MEM_DATA4", "RDMC_MEM_DATA3", "RDMC_MEM_DATA2",
102 	"RDMC_MEM_DATA1", "RDMC_MEM_DATA0",
103 	"RDMC_MEM_ADDR",
104 	"RX_CTL_DAT_FIFO_STAT", "RX_CTL_DAT_FIFO_MASK",
105 	"RDMC_TRAINING_VECTOR_REG",
106 	"RX_CTL_DAT_FIFO_STAT_DBG_REG"
107 };
108 
109 
110 npi_status_t
111 npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op);
112 npi_status_t
113 npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op,
114 				uint16_t param);
115 
116 
117 /*
118  * npi_rxdma_dump_rdc_regs
119  * Dumps the contents of rdc csrs and fzc registers
120  *
121  * Input:
122  *      handle:	opaque handle interpreted by the underlying OS
123  *         rdc:      RX DMA number
124  *
125  * return:
126  *     NPI_SUCCESS
127  *     NPI_RXDMA_RDC_INVALID
128  *
129  */
130 npi_status_t
131 npi_rxdma_dump_rdc_regs(npi_handle_t handle, uint8_t rdc)
132 {
133 
134 	uint64_t value, offset;
135 	int num_regs, i;
136 #ifdef NPI_DEBUG
137 	extern uint64_t npi_debug_level;
138 	uint64_t old_npi_debug_level = npi_debug_level;
139 #endif
140 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
141 	if (!RXDMA_CHANNEL_VALID(rdc)) {
142 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
143 			    "npi_rxdma_dump_rdc_regs"
144 			    " Illegal RDC number %d \n",
145 			    rdc));
146 		return (NPI_RXDMA_RDC_INVALID);
147 	}
148 #ifdef NPI_DEBUG
149 	npi_debug_level |= DUMP_ALWAYS;
150 #endif
151 	num_regs = sizeof (rdc_dmc_offset) / sizeof (uint64_t);
152 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
153 			    "\nDMC Register Dump for Channel %d\n",
154 			    rdc));
155 	for (i = 0; i < num_regs; i++) {
156 		RXDMA_REG_READ64(handle, rdc_dmc_offset[i], rdc, &value);
157 		offset = NXGE_RXDMA_OFFSET(rdc_dmc_offset[i], handle.is_vraddr,
158 				rdc);
159 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
160 			"%08llx %s\t %08llx \n",
161 			offset, rdc_dmc_name[i], value));
162 	}
163 
164 	NPI_DEBUG_MSG((handle.function, DUMP_ALWAYS,
165 			    "\nFZC_DMC Register Dump for Channel %d\n",
166 			    rdc));
167 	num_regs = sizeof (rdc_fzc_offset) / sizeof (uint64_t);
168 
169 	for (i = 0; i < num_regs; i++) {
170 		offset = REG_FZC_RDC_OFFSET(rdc_fzc_offset[i], rdc);
171 		NXGE_REG_RD64(handle, offset, &value);
172 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
173 				    "%8llx %s\t %8llx \n",
174 				    rdc_fzc_offset[i], rdc_fzc_name[i],
175 				    value));
176 
177 	}
178 
179 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
180 			    "\n Register Dump for Channel %d done\n",
181 			    rdc));
182 #ifdef NPI_DEBUG
183 	npi_debug_level = old_npi_debug_level;
184 #endif
185 	return (NPI_SUCCESS);
186 }
187 
188 /*
189  * npi_rxdma_dump_fzc_regs
190  * Dumps the contents of rdc csrs and fzc registers
191  *
192  * Input:
193  *      handle:	opaque handle interpreted by the underlying OS
194  *
195  * return:
196  *     NPI_SUCCESS
197  */
198 npi_status_t
199 npi_rxdma_dump_fzc_regs(npi_handle_t handle)
200 {
201 
202 	uint64_t value;
203 	int num_regs, i;
204 
205 
206 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
207 			    "\nFZC_DMC Common Register Dump\n"));
208 	num_regs = sizeof (rx_fzc_offset) / sizeof (uint64_t);
209 
210 	for (i = 0; i < num_regs; i++) {
211 		NXGE_REG_RD64(handle, rx_fzc_offset[i], &value);
212 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
213 			"0x%08llx %s\t 0x%08llx \n",
214 			    rx_fzc_offset[i],
215 			rx_fzc_name[i], value));
216 	}
217 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
218 			    "\n FZC_DMC Register Dump Done \n"));
219 
220 	return (NPI_SUCCESS);
221 }
222 
223 
224 
225 /*
226  * per rdc config functions
227  */
228 npi_status_t
229 npi_rxdma_cfg_logical_page_disable(npi_handle_t handle, uint8_t rdc,
230 				    uint8_t page_num)
231 {
232 	log_page_vld_t page_vld;
233 	uint64_t valid_offset;
234 
235 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
236 	if (!RXDMA_CHANNEL_VALID(rdc)) {
237 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
238 				    "rxdma_cfg_logical_page_disable"
239 				    " Illegal RDC number %d \n",
240 				    rdc));
241 		return (NPI_RXDMA_RDC_INVALID);
242 	}
243 
244 	ASSERT(RXDMA_PAGE_VALID(page_num));
245 	if (!RXDMA_PAGE_VALID(page_num)) {
246 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
247 				    "rxdma_cfg_logical_page_disable"
248 				    " Illegal page number %d \n",
249 				    page_num));
250 		return (NPI_RXDMA_PAGE_INVALID);
251 	}
252 
253 	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
254 	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
255 
256 	if (page_num == 0)
257 		page_vld.bits.ldw.page0 = 0;
258 
259 	if (page_num == 1)
260 		page_vld.bits.ldw.page1 = 0;
261 
262 	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
263 	return (NPI_SUCCESS);
264 
265 }
266 
267 npi_status_t
268 npi_rxdma_cfg_logical_page(npi_handle_t handle, uint8_t rdc,
269 			    dma_log_page_t *pg_cfg)
270 {
271 	log_page_vld_t page_vld;
272 	log_page_mask_t page_mask;
273 	log_page_value_t page_value;
274 	log_page_relo_t page_reloc;
275 	uint64_t value_offset, reloc_offset, mask_offset;
276 	uint64_t valid_offset;
277 
278 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
279 	if (!RXDMA_CHANNEL_VALID(rdc)) {
280 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
281 				    " rxdma_cfg_logical_page"
282 				    " Illegal RDC number %d \n",
283 				    rdc));
284 		return (NPI_RXDMA_RDC_INVALID);
285 	}
286 
287 	ASSERT(RXDMA_PAGE_VALID(pg_cfg->page_num));
288 	if (!RXDMA_PAGE_VALID(pg_cfg->page_num)) {
289 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
290 				    " rxdma_cfg_logical_page"
291 				    " Illegal page number %d \n",
292 				    pg_cfg->page_num));
293 		return (NPI_RXDMA_PAGE_INVALID);
294 	}
295 
296 	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
297 	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
298 
299 	if (!pg_cfg->valid) {
300 		if (pg_cfg->page_num == 0)
301 			page_vld.bits.ldw.page0 = 0;
302 
303 		if (pg_cfg->page_num == 1)
304 			page_vld.bits.ldw.page1 = 0;
305 		NXGE_REG_WR64(handle, valid_offset, page_vld.value);
306 		return (NPI_SUCCESS);
307 	}
308 
309 	if (pg_cfg->page_num == 0) {
310 		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK1_REG, rdc);
311 		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL1_REG, rdc);
312 		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO1_REG, rdc);
313 		page_vld.bits.ldw.page0 = 1;
314 	}
315 
316 	if (pg_cfg->page_num == 1) {
317 		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK2_REG, rdc);
318 		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL2_REG, rdc);
319 		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO2_REG, rdc);
320 		page_vld.bits.ldw.page1 = 1;
321 	}
322 
323 
324 	page_vld.bits.ldw.func = pg_cfg->func_num;
325 
326 	page_mask.value = 0;
327 	page_value.value = 0;
328 	page_reloc.value = 0;
329 
330 
331 	page_mask.bits.ldw.mask = pg_cfg->mask >> LOG_PAGE_ADDR_SHIFT;
332 	page_value.bits.ldw.value = pg_cfg->value >> LOG_PAGE_ADDR_SHIFT;
333 	page_reloc.bits.ldw.relo = pg_cfg->reloc >> LOG_PAGE_ADDR_SHIFT;
334 
335 
336 	NXGE_REG_WR64(handle, mask_offset, page_mask.value);
337 	NXGE_REG_WR64(handle, value_offset, page_value.value);
338 	NXGE_REG_WR64(handle, reloc_offset, page_reloc.value);
339 
340 
341 /* enable the logical page */
342 	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
343 	return (NPI_SUCCESS);
344 }
345 
346 npi_status_t
347 npi_rxdma_cfg_logical_page_handle(npi_handle_t handle, uint8_t rdc,
348 				    uint64_t page_handle)
349 {
350 	uint64_t offset;
351 	log_page_hdl_t page_hdl;
352 
353 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
354 	if (!RXDMA_CHANNEL_VALID(rdc)) {
355 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
356 		    "rxdma_cfg_logical_page_handle"
357 		    " Illegal RDC number %d \n", rdc));
358 		return (NPI_RXDMA_RDC_INVALID);
359 	}
360 
361 
362 	page_hdl.value = 0;
363 
364 	page_hdl.bits.ldw.handle = (uint32_t)page_handle;
365 	offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_HDL_REG, rdc);
366 	NXGE_REG_WR64(handle, offset, page_hdl.value);
367 
368 	return (NPI_SUCCESS);
369 }
370 
371 /*
372  * RX DMA functions
373  */
374 npi_status_t
375 npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op)
376 {
377 
378 	rxdma_cfig1_t cfg;
379 	uint32_t count = RXDMA_RESET_TRY_COUNT;
380 	uint32_t delay_time = RXDMA_RESET_DELAY;
381 	uint32_t error = NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RESET_ERR, rdc);
382 
383 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
384 	if (!RXDMA_CHANNEL_VALID(rdc)) {
385 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
386 				    "npi_rxdma_cfg_rdc_ctl"
387 				    " Illegal RDC number %d \n", rdc));
388 		return (NPI_RXDMA_RDC_INVALID);
389 	}
390 
391 
392 	switch (op) {
393 		case RXDMA_OP_ENABLE:
394 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
395 						&cfg.value);
396 			cfg.bits.ldw.en = 1;
397 			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
398 					    rdc, cfg.value);
399 
400 			NXGE_DELAY(delay_time);
401 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
402 						&cfg.value);
403 			while ((count--) && (cfg.bits.ldw.qst == 0)) {
404 				NXGE_DELAY(delay_time);
405 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
406 						&cfg.value);
407 			}
408 
409 			if (cfg.bits.ldw.qst == 0) {
410 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
411 				    " npi_rxdma_cfg_rdc_ctl"
412 				    " RXDMA_OP_ENABLE Failed for RDC %d \n",
413 				    rdc));
414 				return (error);
415 			}
416 
417 			break;
418 		case RXDMA_OP_DISABLE:
419 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
420 						&cfg.value);
421 			cfg.bits.ldw.en = 0;
422 			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
423 					    rdc, cfg.value);
424 
425 			NXGE_DELAY(delay_time);
426 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
427 						&cfg.value);
428 			while ((count--) && (cfg.bits.ldw.qst == 0)) {
429 				NXGE_DELAY(delay_time);
430 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
431 						&cfg.value);
432 			}
433 			if (cfg.bits.ldw.qst == 0) {
434 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
435 				    " npi_rxdma_cfg_rdc_ctl"
436 				    " RXDMA_OP_DISABLE Failed for RDC %d \n",
437 				    rdc));
438 				return (error);
439 			}
440 
441 			break;
442 		case RXDMA_OP_RESET:
443 			cfg.value = 0;
444 			cfg.bits.ldw.rst = 1;
445 			RXDMA_REG_WRITE64(handle,
446 					    RXDMA_CFIG1_REG,
447 					    rdc, cfg.value);
448 			NXGE_DELAY(delay_time);
449 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
450 						&cfg.value);
451 			while ((count--) && (cfg.bits.ldw.rst)) {
452 				NXGE_DELAY(delay_time);
453 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
454 						&cfg.value);
455 			}
456 			if (count == 0) {
457 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
458 					    " npi_rxdma_cfg_rdc_ctl"
459 					    " Reset Failed for RDC %d \n",
460 					    rdc));
461 				return (error);
462 			}
463 			break;
464 		default:
465 			return (NPI_RXDMA_SW_PARAM_ERROR);
466 	}
467 
468 	return (NPI_SUCCESS);
469 }
470 
471 npi_status_t
472 npi_rxdma_cfg_rdc_enable(npi_handle_t handle, uint8_t rdc)
473 {
474 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE));
475 }
476 
477 npi_status_t
478 npi_rxdma_cfg_rdc_disable(npi_handle_t handle, uint8_t rdc)
479 {
480 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE));
481 }
482 
483 npi_status_t
484 npi_rxdma_cfg_rdc_reset(npi_handle_t handle, uint8_t rdc)
485 {
486 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET));
487 }
488 
489 /*
490  * npi_rxdma_cfg_defualt_port_rdc()
491  * Set the default rdc for the port
492  *
493  * Inputs:
494  *	handle:		register handle interpreted by the underlying OS
495  *	portnm:		Physical Port Number
496  *	rdc:	RX DMA Channel number
497  *
498  * Return:
499  * NPI_SUCCESS
500  * NPI_RXDMA_RDC_INVALID
501  * NPI_RXDMA_PORT_INVALID
502  *
503  */
504 npi_status_t npi_rxdma_cfg_default_port_rdc(npi_handle_t handle,
505 				    uint8_t portnm, uint8_t rdc)
506 {
507 
508 	uint64_t offset;
509 	def_pt_rdc_t cfg;
510 
511 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
512 	if (!RXDMA_CHANNEL_VALID(rdc)) {
513 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
514 				    "rxdma_cfg_default_port_rdc"
515 				    " Illegal RDC number %d \n",
516 				    rdc));
517 		return (NPI_RXDMA_RDC_INVALID);
518 	}
519 
520 	ASSERT(RXDMA_PORT_VALID(portnm));
521 	if (!RXDMA_PORT_VALID(portnm)) {
522 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
523 				    "rxdma_cfg_default_port_rdc"
524 				    " Illegal Port number %d \n",
525 				    portnm));
526 		return (NPI_RXDMA_PORT_INVALID);
527 	}
528 
529 	offset = DEF_PT_RDC_REG(portnm);
530 	cfg.value = 0;
531 	cfg.bits.ldw.rdc = rdc;
532 	NXGE_REG_WR64(handle, offset, cfg.value);
533 	return (NPI_SUCCESS);
534 }
535 
536 npi_status_t
537 npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc,
538 			    uint8_t op, uint16_t param)
539 {
540 	rcrcfig_b_t rcr_cfgb;
541 
542 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
543 	if (!RXDMA_CHANNEL_VALID(rdc)) {
544 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
545 				    "rxdma_cfg_rdc_rcr_ctl"
546 				    " Illegal RDC number %d \n",
547 				    rdc));
548 		return (NPI_RXDMA_RDC_INVALID);
549 	}
550 
551 
552 	RXDMA_REG_READ64(handle, RCRCFIG_B_REG, rdc, &rcr_cfgb.value);
553 
554 	switch (op) {
555 		case RCR_TIMEOUT_ENABLE:
556 			rcr_cfgb.bits.ldw.timeout = (uint8_t)param;
557 			rcr_cfgb.bits.ldw.entout = 1;
558 			break;
559 
560 		case RCR_THRESHOLD:
561 			rcr_cfgb.bits.ldw.pthres = param;
562 			break;
563 
564 		case RCR_TIMEOUT_DISABLE:
565 			rcr_cfgb.bits.ldw.entout = 0;
566 			break;
567 
568 		default:
569 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
570 				    "rxdma_cfg_rdc_rcr_ctl"
571 				    " Illegal opcode %x \n",
572 				    op));
573 		return (NPI_RXDMA_OPCODE_INVALID(rdc));
574 	}
575 
576 	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
577 	return (NPI_SUCCESS);
578 }
579 
580 npi_status_t
581 npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t handle, uint8_t rdc)
582 {
583 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
584 	    RCR_TIMEOUT_DISABLE, 0));
585 }
586 
587 npi_status_t
588 npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t handle, uint8_t rdc,
589 				    uint16_t rcr_threshold)
590 {
591 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
592 	    RCR_THRESHOLD, rcr_threshold));
593 
594 }
595 
596 npi_status_t
597 npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t handle, uint8_t rdc,
598 			    uint8_t rcr_timeout)
599 {
600 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
601 	    RCR_TIMEOUT_ENABLE, rcr_timeout));
602 
603 }
604 
605 /*
606  * npi_rxdma_cfg_rdc_ring()
607  * Configure The RDC channel Rcv Buffer Ring
608  */
609 npi_status_t
610 npi_rxdma_cfg_rdc_ring(npi_handle_t handle, uint8_t rdc,
611 			    rdc_desc_cfg_t *rdc_desc_cfg)
612 {
613 	rbr_cfig_a_t cfga;
614 	rbr_cfig_b_t cfgb;
615 	rxdma_cfig1_t cfg1;
616 	rxdma_cfig2_t cfg2;
617 	rcrcfig_a_t rcr_cfga;
618 	rcrcfig_b_t rcr_cfgb;
619 
620 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
621 	if (!RXDMA_CHANNEL_VALID(rdc)) {
622 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
623 				    "rxdma_cfg_rdc_ring"
624 				    " Illegal RDC number %d \n",
625 				    rdc));
626 		return (NPI_RXDMA_RDC_INVALID);
627 	}
628 
629 
630 	cfga.value = 0;
631 	cfgb.value = 0;
632 	cfg1.value = 0;
633 	cfg2.value = 0;
634 
635 	if (rdc_desc_cfg->mbox_enable == 1) {
636 		cfg1.bits.ldw.mbaddr_h =
637 		    (rdc_desc_cfg->mbox_addr >> 32) & 0xfff;
638 		cfg2.bits.ldw.mbaddr =
639 		    ((rdc_desc_cfg->mbox_addr &
640 			    RXDMA_CFIG2_MBADDR_L_MASK) >>
641 			    RXDMA_CFIG2_MBADDR_L_SHIFT);
642 
643 
644 		/*
645 		 * Only after all the configurations are set, then
646 		 * enable the RDC or else configuration fatal error
647 		 * will be returned (especially if the Hypervisor
648 		 * set up the logical pages with non-zero values.
649 		 * This NPI function only sets up the configuration.
650 		 */
651 	}
652 
653 
654 	if (rdc_desc_cfg->full_hdr == 1)
655 		cfg2.bits.ldw.full_hdr = 1;
656 
657 	if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
658 		cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
659 	} else {
660 		cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
661 	}
662 
663 		/* rbr config */
664 
665 	cfga.value = (rdc_desc_cfg->rbr_addr & (RBR_CFIG_A_STDADDR_MASK |
666 					    RBR_CFIG_A_STDADDR_BASE_MASK));
667 
668 	if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) ||
669 		    (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN)) {
670 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
671 				    "npi_rxdma_cfg_rdc_ring"
672 				    " Illegal RBR Queue Length %d \n",
673 				    rdc_desc_cfg->rbr_len));
674 		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RBRSZIE_INVALID, rdc));
675 	}
676 
677 
678 	cfga.bits.hdw.len = rdc_desc_cfg->rbr_len;
679 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
680 		"npi_rxdma_cfg_rdc_ring"
681 		" CFGA 0x%llx hdw.len %d (RBR LEN %d)\n",
682 		cfga.value, cfga.bits.hdw.len,
683 		rdc_desc_cfg->rbr_len));
684 
685 	if (rdc_desc_cfg->page_size == SIZE_4KB)
686 		cfgb.bits.ldw.bksize = RBR_BKSIZE_4K;
687 	else if (rdc_desc_cfg->page_size == SIZE_8KB)
688 		cfgb.bits.ldw.bksize = RBR_BKSIZE_8K;
689 	else if (rdc_desc_cfg->page_size == SIZE_16KB)
690 		cfgb.bits.ldw.bksize = RBR_BKSIZE_16K;
691 	else if (rdc_desc_cfg->page_size == SIZE_32KB)
692 		cfgb.bits.ldw.bksize = RBR_BKSIZE_32K;
693 	else {
694 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
695 			    "rxdma_cfg_rdc_ring"
696 			    " blksize: Illegal buffer size %d \n",
697 			    rdc_desc_cfg->page_size));
698 		return (NPI_RXDMA_BUFSZIE_INVALID);
699 	}
700 
701 	if (rdc_desc_cfg->valid0) {
702 
703 		if (rdc_desc_cfg->size0 == SIZE_256B)
704 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_256B;
705 		else if (rdc_desc_cfg->size0 == SIZE_512B)
706 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_512B;
707 		else if (rdc_desc_cfg->size0 == SIZE_1KB)
708 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_1K;
709 		else if (rdc_desc_cfg->size0 == SIZE_2KB)
710 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_2K;
711 		else {
712 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
713 				    " rxdma_cfg_rdc_ring"
714 				    " blksize0: Illegal buffer size %x \n",
715 				    rdc_desc_cfg->size0));
716 			return (NPI_RXDMA_BUFSZIE_INVALID);
717 		}
718 		cfgb.bits.ldw.vld0 = 1;
719 	} else {
720 		cfgb.bits.ldw.vld0 = 0;
721 	}
722 
723 
724 	if (rdc_desc_cfg->valid1) {
725 		if (rdc_desc_cfg->size1 == SIZE_1KB)
726 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_1K;
727 		else if (rdc_desc_cfg->size1 == SIZE_2KB)
728 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_2K;
729 		else if (rdc_desc_cfg->size1 == SIZE_4KB)
730 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_4K;
731 		else if (rdc_desc_cfg->size1 == SIZE_8KB)
732 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_8K;
733 		else {
734 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
735 				    " rxdma_cfg_rdc_ring"
736 				    " blksize1: Illegal buffer size %x \n",
737 				    rdc_desc_cfg->size1));
738 			return (NPI_RXDMA_BUFSZIE_INVALID);
739 		}
740 		cfgb.bits.ldw.vld1 = 1;
741 	} else {
742 		cfgb.bits.ldw.vld1 = 0;
743 	}
744 
745 
746 	if (rdc_desc_cfg->valid2) {
747 		if (rdc_desc_cfg->size2 == SIZE_2KB)
748 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_2K;
749 		else if (rdc_desc_cfg->size2 == SIZE_4KB)
750 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_4K;
751 		else if (rdc_desc_cfg->size2 == SIZE_8KB)
752 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_8K;
753 		else if (rdc_desc_cfg->size2 == SIZE_16KB)
754 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_16K;
755 		else {
756 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
757 				    " rxdma_cfg_rdc_ring"
758 				    " blksize2: Illegal buffer size %x \n",
759 				    rdc_desc_cfg->size2));
760 			return (NPI_RXDMA_BUFSZIE_INVALID);
761 		}
762 		cfgb.bits.ldw.vld2 = 1;
763 	} else {
764 		cfgb.bits.ldw.vld2 = 0;
765 	}
766 
767 
768 	rcr_cfga.value = (rdc_desc_cfg->rcr_addr &
769 			    (RCRCFIG_A_STADDR_MASK |
770 			    RCRCFIG_A_STADDR_BASE_MASK));
771 
772 
773 	if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) ||
774 		    (rdc_desc_cfg->rcr_len > NXGE_RCR_MAX)) {
775 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
776 			    " rxdma_cfg_rdc_ring"
777 			    " Illegal RCR Queue Length %d \n",
778 			    rdc_desc_cfg->rcr_len));
779 		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RCRSZIE_INVALID, rdc));
780 	}
781 
782 	rcr_cfga.bits.hdw.len = rdc_desc_cfg->rcr_len;
783 
784 
785 	rcr_cfgb.value = 0;
786 	if (rdc_desc_cfg->rcr_timeout_enable == 1) {
787 		/* check if the rcr timeout value is valid */
788 
789 		if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) {
790 			rcr_cfgb.bits.ldw.timeout = rdc_desc_cfg->rcr_timeout;
791 			rcr_cfgb.bits.ldw.entout = 1;
792 		} else {
793 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
794 				    " rxdma_cfg_rdc_ring"
795 				    " Illegal RCR Timeout value %d \n",
796 				    rdc_desc_cfg->rcr_timeout));
797 			rcr_cfgb.bits.ldw.entout = 0;
798 		}
799 	} else {
800 		rcr_cfgb.bits.ldw.entout = 0;
801 	}
802 
803 		/* check if the rcr threshold value is valid */
804 	if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) {
805 		rcr_cfgb.bits.ldw.pthres = rdc_desc_cfg->rcr_threshold;
806 	} else {
807 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
808 			    " rxdma_cfg_rdc_ring"
809 			    " Illegal RCR Threshold value %d \n",
810 			    rdc_desc_cfg->rcr_threshold));
811 		rcr_cfgb.bits.ldw.pthres = 1;
812 	}
813 
814 		/* now do the actual HW configuration */
815 	RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG, rdc, cfg1.value);
816 	RXDMA_REG_WRITE64(handle, RXDMA_CFIG2_REG, rdc, cfg2.value);
817 
818 
819 	RXDMA_REG_WRITE64(handle, RBR_CFIG_A_REG, rdc, cfga.value);
820 	RXDMA_REG_WRITE64(handle, RBR_CFIG_B_REG, rdc, cfgb.value);
821 
822 	RXDMA_REG_WRITE64(handle, RCRCFIG_A_REG, rdc, rcr_cfga.value);
823 	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
824 
825 	return (NPI_SUCCESS);
826 
827 }
828 
829 /*
830  * npi_rxdma_red_discard_stat_get
831  * Gets the current discrad count due RED
832  * The counter overflow bit is cleared, if it has been set.
833  *
834  * Inputs:
835  *      handle:	opaque handle interpreted by the underlying OS
836  *	rdc:		RX DMA Channel number
837  *	cnt:	Ptr to structure to write current RDC discard stat
838  *
839  * Return:
840  * NPI_SUCCESS
841  * NPI_RXDMA_RDC_INVALID
842  *
843  */
844 npi_status_t
845 npi_rxdma_red_discard_stat_get(npi_handle_t handle, uint8_t rdc,
846 				    rx_disc_cnt_t *cnt)
847 {
848 	uint64_t offset;
849 
850 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
851 	if (!RXDMA_CHANNEL_VALID(rdc)) {
852 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
853 				    " npi_rxdma_red_discard_stat_get"
854 				    " Illegal RDC Number %d \n",
855 				    rdc));
856 		return (NPI_RXDMA_RDC_INVALID);
857 	}
858 
859 	offset = RDC_RED_RDC_DISC_REG(rdc);
860 	NXGE_REG_RD64(handle, offset, &cnt->value);
861 	if (cnt->bits.ldw.oflow) {
862 		NPI_DEBUG_MSG((handle.function, NPI_ERR_CTL,
863 			    " npi_rxdma_red_discard_stat_get"
864 			    " Counter overflow for channel %d ",
865 			    " ..... clearing \n",
866 			    rdc));
867 		cnt->bits.ldw.oflow = 0;
868 		NXGE_REG_WR64(handle, offset, cnt->value);
869 		cnt->bits.ldw.oflow = 1;
870 	}
871 
872 	return (NPI_SUCCESS);
873 }
874 
875 /*
876  * npi_rxdma_red_discard_oflow_clear
877  * Clear RED discard counter overflow bit
878  *
879  * Inputs:
880  *      handle:	opaque handle interpreted by the underlying OS
881  *	rdc:		RX DMA Channel number
882  *
883  * Return:
884  * NPI_SUCCESS
885  * NPI_RXDMA_RDC_INVALID
886  *
887  */
888 npi_status_t
889 npi_rxdma_red_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
890 
891 {
892 	uint64_t offset;
893 	rx_disc_cnt_t cnt;
894 
895 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
896 	if (!RXDMA_CHANNEL_VALID(rdc)) {
897 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
898 			    " npi_rxdma_red_discard_oflow_clear"
899 			    " Illegal RDC Number %d \n",
900 			    rdc));
901 		return (NPI_RXDMA_RDC_INVALID);
902 	}
903 
904 	offset = RDC_RED_RDC_DISC_REG(rdc);
905 	NXGE_REG_RD64(handle, offset, &cnt.value);
906 	if (cnt.bits.ldw.oflow) {
907 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
908 			    " npi_rxdma_red_discard_oflow_clear"
909 			    " Counter overflow for channel %d ",
910 			    " ..... clearing \n",
911 			    rdc));
912 		cnt.bits.ldw.oflow = 0;
913 		NXGE_REG_WR64(handle, offset, cnt.value);
914 	}
915 	return (NPI_SUCCESS);
916 }
917 
918 /*
919  * npi_rxdma_misc_discard_stat_get
920  * Gets the current discrad count for the rdc due to
921  * buffer pool empty
922  * The counter overflow bit is cleared, if it has been set.
923  *
924  * Inputs:
925  *      handle:	opaque handle interpreted by the underlying OS
926  *	rdc:		RX DMA Channel number
927  *	cnt:	Ptr to structure to write current RDC discard stat
928  *
929  * Return:
930  * NPI_SUCCESS
931  * NPI_RXDMA_RDC_INVALID
932  *
933  */
934 npi_status_t
935 npi_rxdma_misc_discard_stat_get(npi_handle_t handle, uint8_t rdc,
936 				    rx_disc_cnt_t *cnt)
937 {
938 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
939 	if (!RXDMA_CHANNEL_VALID(rdc)) {
940 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
941 				    " npi_rxdma_misc_discard_stat_get"
942 				    " Illegal RDC Number %d \n",
943 				    rdc));
944 		return (NPI_RXDMA_RDC_INVALID);
945 	}
946 
947 	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt->value);
948 	if (cnt->bits.ldw.oflow) {
949 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
950 			    " npi_rxdma_misc_discard_stat_get"
951 			    " Counter overflow for channel %d ",
952 			    " ..... clearing \n",
953 			    rdc));
954 		cnt->bits.ldw.oflow = 0;
955 		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt->value);
956 		cnt->bits.ldw.oflow = 1;
957 	}
958 
959 	return (NPI_SUCCESS);
960 }
961 
962 /*
963  * npi_rxdma_red_discard_oflow_clear
964  * Clear RED discard counter overflow bit
965  * clear the overflow bit for  buffer pool empty discrad counter
966  * for the rdc
967  *
968  * Inputs:
969  *      handle:	opaque handle interpreted by the underlying OS
970  *	rdc:		RX DMA Channel number
971  *
972  * Return:
973  * NPI_SUCCESS
974  * NPI_RXDMA_RDC_INVALID
975  *
976  */
977 npi_status_t
978 npi_rxdma_misc_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
979 {
980 	rx_disc_cnt_t cnt;
981 
982 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
983 	if (!RXDMA_CHANNEL_VALID(rdc)) {
984 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
985 			    " npi_rxdma_misc_discard_oflow_clear"
986 			    " Illegal RDC Number %d \n",
987 			    rdc));
988 		return (NPI_RXDMA_RDC_INVALID);
989 	}
990 
991 	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt.value);
992 	if (cnt.bits.ldw.oflow) {
993 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
994 			    " npi_rxdma_misc_discard_oflow_clear"
995 			    " Counter overflow for channel %d ",
996 			    " ..... clearing \n",
997 			    rdc));
998 		cnt.bits.ldw.oflow = 0;
999 		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt.value);
1000 	}
1001 
1002 	return (NPI_SUCCESS);
1003 }
1004 
1005 /*
1006  * npi_rxdma_ring_perr_stat_get
1007  * Gets the current RDC Memory parity error
1008  * The counter overflow bit is cleared, if it has been set.
1009  *
1010  * Inputs:
1011  * handle:	opaque handle interpreted by the underlying OS
1012  * pre_log:	Structure to write current RDC Prefetch memory
1013  *		Parity Error stat
1014  * sha_log:	Structure to write current RDC Shadow memory
1015  *		Parity Error stat
1016  *
1017  * Return:
1018  * NPI_SUCCESS
1019  *
1020  */
1021 npi_status_t
1022 npi_rxdma_ring_perr_stat_get(npi_handle_t handle,
1023 			    rdmc_par_err_log_t *pre_log,
1024 			    rdmc_par_err_log_t *sha_log)
1025 {
1026 	uint64_t pre_offset, sha_offset;
1027 	rdmc_par_err_log_t clr;
1028 	int clr_bits = 0;
1029 
1030 	pre_offset = RDMC_PRE_PAR_ERR_REG;
1031 	sha_offset = RDMC_SHA_PAR_ERR_REG;
1032 	NXGE_REG_RD64(handle, pre_offset, &pre_log->value);
1033 	NXGE_REG_RD64(handle, sha_offset, &sha_log->value);
1034 
1035 	clr.value = pre_log->value;
1036 	if (pre_log->bits.ldw.err) {
1037 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1038 			    " npi_rxdma_ring_perr_stat_get"
1039 			    " PRE ERR Bit set ..... clearing \n"));
1040 		clr.bits.ldw.err = 0;
1041 		clr_bits++;
1042 	}
1043 
1044 	if (pre_log->bits.ldw.merr) {
1045 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1046 			    " npi_rxdma_ring_perr_stat_get"
1047 			    " PRE MERR Bit set ..... clearing \n"));
1048 		clr.bits.ldw.merr = 0;
1049 		clr_bits++;
1050 	}
1051 
1052 	if (clr_bits) {
1053 		NXGE_REG_WR64(handle, pre_offset, clr.value);
1054 	}
1055 
1056 	clr_bits = 0;
1057 	clr.value = sha_log->value;
1058 	if (sha_log->bits.ldw.err) {
1059 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1060 			    " npi_rxdma_ring_perr_stat_get"
1061 			    " SHA ERR Bit set ..... clearing \n"));
1062 		clr.bits.ldw.err = 0;
1063 		clr_bits++;
1064 	}
1065 
1066 	if (sha_log->bits.ldw.merr) {
1067 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1068 			    " npi_rxdma_ring_perr_stat_get"
1069 			    " SHA MERR Bit set ..... clearing \n"));
1070 		clr.bits.ldw.merr = 0;
1071 		clr_bits++;
1072 	}
1073 
1074 	if (clr_bits) {
1075 		NXGE_REG_WR64(handle, sha_offset, clr.value);
1076 	}
1077 
1078 	return (NPI_SUCCESS);
1079 }
1080 
1081 /*
1082  * npi_rxdma_ring_perr_stat_clear
1083  * Clear RDC Memory Parity Error counter overflow bits
1084  *
1085  * Inputs:
1086  *      handle:	opaque handle interpreted by the underlying OS
1087  * Return:
1088  * NPI_SUCCESS
1089  *
1090  */
1091 npi_status_t
1092 npi_rxdma_ring_perr_stat_clear(npi_handle_t handle)
1093 {
1094 	uint64_t pre_offset, sha_offset;
1095 	rdmc_par_err_log_t clr;
1096 	int clr_bits = 0;
1097 	pre_offset = RDMC_PRE_PAR_ERR_REG;
1098 	sha_offset = RDMC_SHA_PAR_ERR_REG;
1099 
1100 	NXGE_REG_RD64(handle, pre_offset, &clr.value);
1101 
1102 	if (clr.bits.ldw.err) {
1103 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1104 			    " npi_rxdma_ring_perr_stat_get"
1105 			    " PRE ERR Bit set ..... clearing \n"));
1106 		clr.bits.ldw.err = 0;
1107 		clr_bits++;
1108 	}
1109 
1110 	if (clr.bits.ldw.merr) {
1111 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1112 			    " npi_rxdma_ring_perr_stat_get"
1113 			    " PRE MERR Bit set ..... clearing \n"));
1114 		clr.bits.ldw.merr = 0;
1115 		clr_bits++;
1116 	}
1117 
1118 	if (clr_bits) {
1119 		NXGE_REG_WR64(handle, pre_offset, clr.value);
1120 	}
1121 
1122 	clr_bits = 0;
1123 	NXGE_REG_RD64(handle, sha_offset, &clr.value);
1124 	if (clr.bits.ldw.err) {
1125 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1126 			    " npi_rxdma_ring_perr_stat_get"
1127 			    " SHA ERR Bit set ..... clearing \n"));
1128 		clr.bits.ldw.err = 0;
1129 		clr_bits++;
1130 	}
1131 
1132 	if (clr.bits.ldw.merr) {
1133 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1134 			    " npi_rxdma_ring_perr_stat_get"
1135 			    " SHA MERR Bit set ..... clearing \n"));
1136 		clr.bits.ldw.merr = 0;
1137 		clr_bits++;
1138 	}
1139 
1140 	if (clr_bits) {
1141 		NXGE_REG_WR64(handle, sha_offset, clr.value);
1142 	}
1143 
1144 	return (NPI_SUCCESS);
1145 }
1146 
1147 /*
1148  * Access the RDMC Memory: used for debugging
1149  */
1150 npi_status_t
1151 npi_rxdma_rdmc_memory_io(npi_handle_t handle,
1152 			    rdmc_mem_access_t *data, uint8_t op)
1153 {
1154 	uint64_t d0_offset, d1_offset, d2_offset, d3_offset, d4_offset;
1155 	uint64_t addr_offset;
1156 	rdmc_mem_addr_t addr;
1157 	rdmc_mem_data_t d0, d1, d2, d3, d4;
1158 	d0.value = 0;
1159 	d1.value = 0;
1160 	d2.value = 0;
1161 	d3.value = 0;
1162 	d4.value = 0;
1163 	addr.value = 0;
1164 
1165 
1166 	if ((data->location != RDMC_MEM_ADDR_PREFETCH) &&
1167 		    (data->location != RDMC_MEM_ADDR_SHADOW)) {
1168 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1169 			    " npi_rxdma_rdmc_memory_io"
1170 			    " Illegal memory Type %x \n",
1171 			    data->location));
1172 		return (NPI_RXDMA_OPCODE_INVALID(0));
1173 	}
1174 
1175 	addr_offset = RDMC_MEM_ADDR_REG;
1176 	addr.bits.ldw.addr = data->addr;
1177 	addr.bits.ldw.pre_shad = data->location;
1178 
1179 	d0_offset = RDMC_MEM_DATA0_REG;
1180 	d1_offset = RDMC_MEM_DATA1_REG;
1181 	d2_offset = RDMC_MEM_DATA2_REG;
1182 	d3_offset = RDMC_MEM_DATA3_REG;
1183 	d4_offset = RDMC_MEM_DATA4_REG;
1184 
1185 
1186 	if (op == RDMC_MEM_WRITE) {
1187 		d0.bits.ldw.data = data->data[0];
1188 		d1.bits.ldw.data = data->data[1];
1189 		d2.bits.ldw.data = data->data[2];
1190 		d3.bits.ldw.data = data->data[3];
1191 		d4.bits.ldw.data = data->data[4];
1192 		NXGE_REG_WR64(handle, addr_offset, addr.value);
1193 		NXGE_REG_WR64(handle, d0_offset, d0.value);
1194 		NXGE_REG_WR64(handle, d1_offset, d1.value);
1195 		NXGE_REG_WR64(handle, d2_offset, d2.value);
1196 		NXGE_REG_WR64(handle, d3_offset, d3.value);
1197 		NXGE_REG_WR64(handle, d4_offset, d4.value);
1198 	}
1199 
1200 	if (op == RDMC_MEM_READ) {
1201 		NXGE_REG_WR64(handle, addr_offset, addr.value);
1202 		NXGE_REG_RD64(handle, d4_offset, &d4.value);
1203 		NXGE_REG_RD64(handle, d3_offset, &d3.value);
1204 		NXGE_REG_RD64(handle, d2_offset, &d2.value);
1205 		NXGE_REG_RD64(handle, d1_offset, &d1.value);
1206 		NXGE_REG_RD64(handle, d0_offset, &d0.value);
1207 
1208 		data->data[0] = d0.bits.ldw.data;
1209 		data->data[1] = d1.bits.ldw.data;
1210 		data->data[2] = d2.bits.ldw.data;
1211 		data->data[3] = d3.bits.ldw.data;
1212 		data->data[4] = d4.bits.ldw.data;
1213 	} else {
1214 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1215 			    " npi_rxdma_rdmc_memory_io"
1216 			    " Illegal opcode %x \n",
1217 			    op));
1218 		return (NPI_RXDMA_OPCODE_INVALID(0));
1219 
1220 	}
1221 
1222 	return (NPI_SUCCESS);
1223 }
1224 
1225 /*
1226  * system wide conf functions
1227  */
1228 npi_status_t
1229 npi_rxdma_cfg_clock_div_set(npi_handle_t handle, uint16_t count)
1230 {
1231 	uint64_t offset;
1232 	rx_dma_ck_div_t clk_div;
1233 
1234 	offset = RX_DMA_CK_DIV_REG;
1235 
1236 	clk_div.value = 0;
1237 	clk_div.bits.ldw.cnt = count;
1238 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1239 		    " npi_rxdma_cfg_clock_div_set: add 0x%llx "
1240 		    "handle 0x%llx value 0x%llx",
1241 		    handle.regp, handle.regh, clk_div.value));
1242 
1243 	NXGE_REG_WR64(handle, offset, clk_div.value);
1244 
1245 	return (NPI_SUCCESS);
1246 }
1247 
1248 npi_status_t
1249 npi_rxdma_cfg_red_rand_init(npi_handle_t handle, uint16_t init_value)
1250 {
1251 	uint64_t offset;
1252 	red_ran_init_t rand_reg;
1253 
1254 	offset = RED_RAN_INIT_REG;
1255 
1256 	rand_reg.value = 0;
1257 	rand_reg.bits.ldw.init = init_value;
1258 	rand_reg.bits.ldw.enable = 1;
1259 	NXGE_REG_WR64(handle, offset, rand_reg.value);
1260 
1261 	return (NPI_SUCCESS);
1262 
1263 }
1264 
1265 npi_status_t
1266 npi_rxdma_cfg_red_rand_disable(npi_handle_t handle)
1267 {
1268 	uint64_t offset;
1269 	red_ran_init_t rand_reg;
1270 
1271 	offset = RED_RAN_INIT_REG;
1272 
1273 	NXGE_REG_RD64(handle, offset, &rand_reg.value);
1274 	rand_reg.bits.ldw.enable = 0;
1275 	NXGE_REG_WR64(handle, offset, rand_reg.value);
1276 
1277 	return (NPI_SUCCESS);
1278 
1279 }
1280 
1281 npi_status_t
1282 npi_rxdma_cfg_32bitmode_enable(npi_handle_t handle)
1283 {
1284 	uint64_t offset;
1285 	rx_addr_md_t md_reg;
1286 	offset = RX_ADDR_MD_REG;
1287 	md_reg.value = 0;
1288 	md_reg.bits.ldw.mode32 = 1;
1289 
1290 	NXGE_REG_WR64(handle, offset, md_reg.value);
1291 	return (NPI_SUCCESS);
1292 
1293 }
1294 
1295 npi_status_t
1296 npi_rxdma_cfg_32bitmode_disable(npi_handle_t handle)
1297 {
1298 	uint64_t offset;
1299 	rx_addr_md_t md_reg;
1300 	offset = RX_ADDR_MD_REG;
1301 	md_reg.value = 0;
1302 
1303 	NXGE_REG_WR64(handle, offset, md_reg.value);
1304 	return (NPI_SUCCESS);
1305 
1306 }
1307 
1308 npi_status_t
1309 npi_rxdma_cfg_ram_access_enable(npi_handle_t handle)
1310 {
1311 	uint64_t offset;
1312 	rx_addr_md_t md_reg;
1313 	offset = RX_ADDR_MD_REG;
1314 	NXGE_REG_RD64(handle, offset, &md_reg.value);
1315 	md_reg.bits.ldw.ram_acc = 1;
1316 	NXGE_REG_WR64(handle, offset, md_reg.value);
1317 	return (NPI_SUCCESS);
1318 
1319 }
1320 
1321 npi_status_t
1322 npi_rxdma_cfg_ram_access_disable(npi_handle_t handle)
1323 {
1324 	uint64_t offset;
1325 	rx_addr_md_t md_reg;
1326 	offset = RX_ADDR_MD_REG;
1327 	NXGE_REG_RD64(handle, offset, &md_reg.value);
1328 	md_reg.bits.ldw.ram_acc = 0;
1329 	NXGE_REG_WR64(handle, offset, md_reg.value);
1330 	return (NPI_SUCCESS);
1331 
1332 }
1333 
1334 npi_status_t
1335 npi_rxdma_cfg_port_ddr_weight(npi_handle_t handle,
1336 				    uint8_t portnm, uint32_t weight)
1337 {
1338 
1339 	pt_drr_wt_t wt_reg;
1340 	uint64_t offset;
1341 
1342 	ASSERT(RXDMA_PORT_VALID(portnm));
1343 	if (!RXDMA_PORT_VALID(portnm)) {
1344 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1345 			    " rxdma_cfg_port_ddr_weight"
1346 			    " Illegal Port Number %d \n",
1347 			    portnm));
1348 		return (NPI_RXDMA_PORT_INVALID);
1349 	}
1350 
1351 	offset = PT_DRR_WT_REG(portnm);
1352 	wt_reg.value = 0;
1353 	wt_reg.bits.ldw.wt = weight;
1354 	NXGE_REG_WR64(handle, offset, wt_reg.value);
1355 	return (NPI_SUCCESS);
1356 }
1357 
1358 npi_status_t
1359 npi_rxdma_port_usage_get(npi_handle_t handle,
1360 				    uint8_t portnm, uint32_t *blocks)
1361 {
1362 
1363 	pt_use_t use_reg;
1364 	uint64_t offset;
1365 
1366 	ASSERT(RXDMA_PORT_VALID(portnm));
1367 	if (!RXDMA_PORT_VALID(portnm)) {
1368 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1369 			    " rxdma_port_usage_get"
1370 			    " Illegal Port Number %d \n",
1371 			    portnm));
1372 		return (NPI_RXDMA_PORT_INVALID);
1373 	}
1374 
1375 	offset = PT_USE_REG(portnm);
1376 	NXGE_REG_RD64(handle, offset, &use_reg.value);
1377 	*blocks = use_reg.bits.ldw.cnt;
1378 	return (NPI_SUCCESS);
1379 
1380 }
1381 
1382 npi_status_t
1383 npi_rxdma_cfg_wred_param(npi_handle_t handle, uint8_t rdc,
1384 				    rdc_red_para_t *wred_params)
1385 {
1386 	rdc_red_para_t wred_reg;
1387 	uint64_t offset;
1388 
1389 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1390 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1391 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1392 			    " rxdma_cfg_wred_param"
1393 			    " Illegal RDC Number %d \n",
1394 			    rdc));
1395 		return (NPI_RXDMA_RDC_INVALID);
1396 	}
1397 
1398 	/*
1399 	 * need to update RDC_RED_PARA_REG as well as bit defs in
1400 	 * the hw header file
1401 	 */
1402 	offset = RDC_RED_RDC_PARA_REG(rdc);
1403 
1404 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1405 		" npi_rxdma_cfg_wred_param: "
1406 		"set RED_PARA: passed value 0x%llx "
1407 		"win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
1408 		wred_params->value,
1409 		wred_params->bits.ldw.win,
1410 		wred_params->bits.ldw.thre,
1411 		wred_params->bits.ldw.win_syn,
1412 		wred_params->bits.ldw.thre_sync));
1413 
1414 	wred_reg.value = 0;
1415 	wred_reg.bits.ldw.win = wred_params->bits.ldw.win;
1416 	wred_reg.bits.ldw.thre = wred_params->bits.ldw.thre;
1417 	wred_reg.bits.ldw.win_syn = wred_params->bits.ldw.win_syn;
1418 	wred_reg.bits.ldw.thre_sync = wred_params->bits.ldw.thre_sync;
1419 	NXGE_REG_WR64(handle, offset, wred_reg.value);
1420 
1421 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1422 		"set RED_PARA: value 0x%llx "
1423 		"win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
1424 		wred_reg.value,
1425 		wred_reg.bits.ldw.win,
1426 		wred_reg.bits.ldw.thre,
1427 		wred_reg.bits.ldw.win_syn,
1428 		wred_reg.bits.ldw.thre_sync));
1429 
1430 	return (NPI_SUCCESS);
1431 }
1432 
1433 /*
1434  * npi_rxdma_cfg_rdc_table()
1435  * Configure/populate the RDC table
1436  *
1437  * Inputs:
1438  *	handle:		register handle interpreted by the underlying OS
1439  *	table:		RDC Group Number
1440  *	rdc[]:	 Array of RX DMA Channels
1441  *
1442  * Return:
1443  * NPI_SUCCESS
1444  * NPI_RXDMA_TABLE_INVALID
1445  *
1446  */
1447 npi_status_t
1448 npi_rxdma_cfg_rdc_table(npi_handle_t handle,
1449 			    uint8_t table, uint8_t rdc[])
1450 {
1451 	uint64_t offset;
1452 	int tbl_offset;
1453 	rdc_tbl_t tbl_reg;
1454 	tbl_reg.value = 0;
1455 
1456 	ASSERT(RXDMA_TABLE_VALID(table));
1457 	if (!RXDMA_TABLE_VALID(table)) {
1458 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1459 			    " npi_rxdma_cfg_rdc_table"
1460 			    " Illegal RDC Rable Number %d \n",
1461 			    rdc));
1462 		return (NPI_RXDMA_TABLE_INVALID);
1463 	}
1464 
1465 	offset = REG_RDC_TABLE_OFFSET(table);
1466 	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
1467 		tbl_reg.bits.ldw.rdc = rdc[tbl_offset];
1468 		NXGE_REG_WR64(handle, offset, tbl_reg.value);
1469 		offset += 8;
1470 	}
1471 
1472 	return (NPI_SUCCESS);
1473 
1474 }
1475 
1476 npi_status_t
1477 npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t handle,
1478 			    uint8_t table, uint8_t rdc)
1479 {
1480 	uint64_t offset;
1481 	rdc_tbl_t tbl_reg;
1482 	tbl_reg.value = 0;
1483 
1484 	ASSERT(RXDMA_TABLE_VALID(table));
1485 	if (!RXDMA_TABLE_VALID(table)) {
1486 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1487 			    " npi_rxdma_cfg_rdc_table"
1488 			    " Illegal RDC table Number %d \n",
1489 			    rdc));
1490 		return (NPI_RXDMA_TABLE_INVALID);
1491 	}
1492 
1493 	offset = REG_RDC_TABLE_OFFSET(table);
1494 	tbl_reg.bits.ldw.rdc = rdc;
1495 	NXGE_REG_WR64(handle, offset, tbl_reg.value);
1496 	return (NPI_SUCCESS);
1497 
1498 }
1499 
1500 npi_status_t
1501 npi_rxdma_dump_rdc_table(npi_handle_t handle,
1502 			    uint8_t table)
1503 {
1504 	uint64_t offset;
1505 	int tbl_offset;
1506 	uint64_t value;
1507 
1508 	ASSERT(RXDMA_TABLE_VALID(table));
1509 	if (!RXDMA_TABLE_VALID(table)) {
1510 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1511 			    " npi_rxdma_dump_rdc_table"
1512 			    " Illegal RDC Rable Number %d \n",
1513 			    table));
1514 		return (NPI_RXDMA_TABLE_INVALID);
1515 	}
1516 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1517 			    "\n Register Dump for RDC Table %d \n",
1518 			    table));
1519 	offset = REG_RDC_TABLE_OFFSET(table);
1520 	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
1521 		NXGE_REG_RD64(handle, offset, &value);
1522 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1523 					    " 0x%08llx 0x%08llx \n",
1524 					    offset, value));
1525 		offset += 8;
1526 	}
1527 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1528 			    "\n Register Dump for RDC Table %d done\n",
1529 			    table));
1530 	return (NPI_SUCCESS);
1531 
1532 }
1533 
1534 npi_status_t
1535 npi_rxdma_rdc_rbr_stat_get(npi_handle_t handle, uint8_t rdc,
1536 			    rbr_stat_t *rbr_stat)
1537 {
1538 
1539 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1540 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1541 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1542 			    " rxdma_rdc_rbr_stat_get"
1543 			    " Illegal RDC Number %d \n",
1544 			    rdc));
1545 		return (NPI_RXDMA_RDC_INVALID);
1546 	}
1547 
1548 	RXDMA_REG_READ64(handle, RBR_STAT_REG, rdc, &rbr_stat->value);
1549 	return (NPI_SUCCESS);
1550 }
1551 
1552 /*
1553  * npi_rxdma_rdc_rbr_head_get
1554  * Gets the current rbr head pointer.
1555  *
1556  * Inputs:
1557  *      handle:	opaque handle interpreted by the underlying OS
1558  *	rdc:		RX DMA Channel number
1559  *	hdptr		ptr to write the rbr head value
1560  *
1561  * Return:
1562  * NPI_SUCCESS
1563  * NPI_RXDMA_RDC_INVALID
1564  */
1565 npi_status_t
1566 npi_rxdma_rdc_rbr_head_get(npi_handle_t handle,
1567 			    uint8_t rdc, addr44_t *hdptr)
1568 {
1569 	rbr_hdh_t hh_ptr;
1570 	rbr_hdl_t hl_ptr;
1571 
1572 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1573 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1574 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1575 			    " rxdma_rdc_rbr_head_get"
1576 			    " Illegal RDC Number %d \n",
1577 			    rdc));
1578 		return (NPI_RXDMA_RDC_INVALID);
1579 	}
1580 	hh_ptr.value = 0;
1581 	hl_ptr.value = 0;
1582 	RXDMA_REG_READ64(handle, RBR_HDH_REG, rdc, &hh_ptr.value);
1583 	RXDMA_REG_READ64(handle, RBR_HDL_REG, rdc, &hl_ptr.value);
1584 	hdptr->bits.ldw = hl_ptr.bits.ldw.head_l << 2;
1585 	hdptr->bits.hdw = hh_ptr.bits.ldw.head_h;
1586 	return (NPI_SUCCESS);
1587 
1588 }
1589 
1590 npi_status_t
1591 npi_rxdma_rdc_rcr_qlen_get(npi_handle_t handle, uint8_t rdc,
1592 			    uint16_t *rcr_qlen)
1593 {
1594 
1595 	rcrstat_a_t stats;
1596 
1597 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1598 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1599 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1600 			    " rxdma_rdc_rcr_qlen_get"
1601 			    " Illegal RDC Number %d \n",
1602 			    rdc));
1603 		return (NPI_RXDMA_RDC_INVALID);
1604 	}
1605 
1606 	RXDMA_REG_READ64(handle, RCRSTAT_A_REG, rdc, &stats.value);
1607 	*rcr_qlen =  stats.bits.ldw.qlen;
1608 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1609 		    " rxdma_rdc_rcr_qlen_get"
1610 		    " RDC %d qlen %x qlen %x\n",
1611 		    rdc, *rcr_qlen, stats.bits.ldw.qlen));
1612 	return (NPI_SUCCESS);
1613 }
1614 
1615 npi_status_t
1616 npi_rxdma_rdc_rcr_tail_get(npi_handle_t handle,
1617 			    uint8_t rdc, addr44_t *tail_addr)
1618 {
1619 
1620 	rcrstat_b_t th_ptr;
1621 	rcrstat_c_t tl_ptr;
1622 
1623 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1624 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1625 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1626 				    " rxdma_rdc_rcr_tail_get"
1627 				    " Illegal RDC Number %d \n",
1628 				    rdc));
1629 		return (NPI_RXDMA_RDC_INVALID);
1630 	}
1631 	th_ptr.value = 0;
1632 	tl_ptr.value = 0;
1633 	RXDMA_REG_READ64(handle, RCRSTAT_B_REG, rdc, &th_ptr.value);
1634 	RXDMA_REG_READ64(handle, RCRSTAT_C_REG, rdc, &tl_ptr.value);
1635 	tail_addr->bits.ldw = tl_ptr.bits.ldw.tlptr_l << 3;
1636 	tail_addr->bits.hdw = th_ptr.bits.ldw.tlptr_h;
1637 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1638 			    " rxdma_rdc_rcr_tail_get"
1639 			    " RDC %d rcr_tail %llx tl %x\n",
1640 			    rdc, tl_ptr.value,
1641 			    tl_ptr.bits.ldw.tlptr_l));
1642 
1643 	return (NPI_SUCCESS);
1644 
1645 
1646 }
1647 
1648 /*
1649  * npi_rxdma_rxctl_fifo_error_intr_set
1650  * Configure The RX ctrl fifo error interrupt generation
1651  *
1652  * Inputs:
1653  *      handle:	opaque handle interpreted by the underlying OS
1654  *	mask:	rx_ctl_dat_fifo_mask_t specifying the errors
1655  * valid fields in  rx_ctl_dat_fifo_mask_t structure are:
1656  * zcp_eop_err, ipp_eop_err, id_mismatch. If a field is set
1657  * to 1, we will enable interrupt generation for the
1658  * corresponding error condition. In the hardware, the bit(s)
1659  * have to be cleared to enable interrupt.
1660  *
1661  * Return:
1662  * NPI_SUCCESS
1663  *
1664  */
1665 npi_status_t
1666 npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t handle,
1667 				    rx_ctl_dat_fifo_mask_t *mask)
1668 {
1669 	uint64_t offset;
1670 	rx_ctl_dat_fifo_mask_t intr_mask;
1671 	offset = RX_CTL_DAT_FIFO_MASK_REG;
1672 	NXGE_REG_RD64(handle, offset, &intr_mask.value);
1673 
1674 	if (mask->bits.ldw.ipp_eop_err) {
1675 		intr_mask.bits.ldw.ipp_eop_err = 0;
1676 	}
1677 
1678 	if (mask->bits.ldw.zcp_eop_err) {
1679 		intr_mask.bits.ldw.zcp_eop_err = 0;
1680 	}
1681 
1682 	if (mask->bits.ldw.id_mismatch) {
1683 		intr_mask.bits.ldw.id_mismatch = 0;
1684 	}
1685 
1686 	NXGE_REG_WR64(handle, offset, intr_mask.value);
1687 	return (NPI_SUCCESS);
1688 }
1689 
1690 /*
1691  * npi_rxdma_rxctl_fifo_error_stat_get
1692  * Read The RX ctrl fifo error Status
1693  *
1694  * Inputs:
1695  *      handle:	opaque handle interpreted by the underlying OS
1696  *	stat:	rx_ctl_dat_fifo_stat_t to read the errors to
1697  * valid fields in  rx_ctl_dat_fifo_stat_t structure are:
1698  * zcp_eop_err, ipp_eop_err, id_mismatch.
1699  * Return:
1700  * NPI_SUCCESS
1701  *
1702  */
1703 npi_status_t
1704 npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t handle,
1705 			    rx_ctl_dat_fifo_stat_t *stat)
1706 {
1707 	uint64_t offset = RX_CTL_DAT_FIFO_STAT_REG;
1708 	NXGE_REG_RD64(handle, offset, &stat->value);
1709 	return (NPI_SUCCESS);
1710 }
1711 
1712 npi_status_t
1713 npi_rxdma_rdc_rcr_pktread_update(npi_handle_t handle, uint8_t channel,
1714 				    uint16_t pkts_read)
1715 {
1716 
1717 	rx_dma_ctl_stat_t	cs;
1718 	uint16_t min_read = 0;
1719 
1720 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1721 	if (!RXDMA_CHANNEL_VALID(channel)) {
1722 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1723 		    " npi_rxdma_rdc_rcr_pktread_update ",
1724 		    " channel %d", channel));
1725 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1726 	}
1727 
1728 	if ((pkts_read < min_read) && (pkts_read > 512)) {
1729 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1730 		    " npi_rxdma_rdc_rcr_pktread_update ",
1731 		    " pkts %d out of bound", pkts_read));
1732 		return (NPI_RXDMA_OPCODE_INVALID(pkts_read));
1733 	}
1734 
1735 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1736 				&cs.value);
1737 	cs.bits.ldw.pktread = pkts_read;
1738 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1739 				    channel, cs.value);
1740 
1741 	return (NPI_SUCCESS);
1742 }
1743 
1744 npi_status_t
1745 npi_rxdma_rdc_rcr_bufread_update(npi_handle_t handle, uint8_t channel,
1746 					    uint16_t bufs_read)
1747 {
1748 
1749 	rx_dma_ctl_stat_t	cs;
1750 	uint16_t min_read = 0;
1751 
1752 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1753 	if (!RXDMA_CHANNEL_VALID(channel)) {
1754 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1755 		    " npi_rxdma_rdc_rcr_bufread_update ",
1756 		    " channel %d", channel));
1757 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1758 	}
1759 
1760 	if ((bufs_read < min_read) && (bufs_read > 512)) {
1761 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1762 		    " npi_rxdma_rdc_rcr_bufread_update ",
1763 		    " bufs read %d out of bound", bufs_read));
1764 		return (NPI_RXDMA_OPCODE_INVALID(bufs_read));
1765 	}
1766 
1767 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1768 				&cs.value);
1769 	cs.bits.ldw.ptrread = bufs_read;
1770 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1771 				    channel, cs.value);
1772 
1773 	return (NPI_SUCCESS);
1774 }
1775 
1776 npi_status_t
1777 npi_rxdma_rdc_rcr_read_update(npi_handle_t handle, uint8_t channel,
1778 				    uint16_t pkts_read, uint16_t bufs_read)
1779 {
1780 
1781 	rx_dma_ctl_stat_t	cs;
1782 
1783 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1784 	if (!RXDMA_CHANNEL_VALID(channel)) {
1785 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1786 		    " npi_rxdma_rdc_rcr_read_update ",
1787 		    " channel %d", channel));
1788 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1789 	}
1790 
1791 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1792 	    " npi_rxdma_rdc_rcr_read_update "
1793 	    " bufs read %d pkt read %d",
1794 		bufs_read, pkts_read));
1795 
1796 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1797 				&cs.value);
1798 
1799 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1800 		" npi_rxdma_rdc_rcr_read_update: "
1801 		" value: 0x%llx bufs read %d pkt read %d",
1802 		cs.value,
1803 		cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
1804 
1805 	cs.bits.ldw.pktread = pkts_read;
1806 	cs.bits.ldw.ptrread = bufs_read;
1807 
1808 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1809 				    channel, cs.value);
1810 
1811 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1812 				&cs.value);
1813 
1814 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1815 	    " npi_rxdma_rdc_rcr_read_update: read back after update "
1816 	    " value: 0x%llx bufs read %d pkt read %d",
1817 		cs.value,
1818 		cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
1819 
1820 	return (NPI_SUCCESS);
1821 }
1822 
1823 /*
1824  * npi_rxdma_channel_mex_set():
1825  *	This function is called to arm the DMA channel with
1826  *	mailbox updating capability. Software needs to rearm
1827  *	for each update by writing to the control and status register.
1828  *
1829  * Parameters:
1830  *	handle		- NPI handle (virtualization flag must be defined).
1831  *	channel		- logical RXDMA channel from 0 to 23.
1832  *			  (If virtualization flag is not set, then
1833  *			   logical channel is the same as the hardware
1834  *			   channel number).
1835  *
1836  * Return:
1837  *	NPI_SUCCESS		- If enable channel with mailbox update
1838  *				  is completed successfully.
1839  *
1840  *	Error:
1841  *	NPI error status code
1842  */
1843 npi_status_t
1844 npi_rxdma_channel_mex_set(npi_handle_t handle, uint8_t channel)
1845 {
1846 	return (npi_rxdma_channel_control(handle, RXDMA_MEX_SET, channel));
1847 }
1848 
1849 /*
1850  * npi_rxdma_channel_rcrto_clear():
1851  *	This function is called to reset RCRTO bit to 0.
1852  *
1853  * Parameters:
1854  *	handle		- NPI handle (virtualization flag must be defined).
1855  *	channel		- logical RXDMA channel from 0 to 23.
1856  *			  (If virtualization flag is not set, then
1857  *			   logical channel is the same as the hardware
1858  *			   channel number).
1859  * Return:
1860  *	NPI_SUCCESS
1861  *
1862  *	Error:
1863  *	NPI error status code
1864  */
1865 npi_status_t
1866 npi_rxdma_channel_rcrto_clear(npi_handle_t handle, uint8_t channel)
1867 {
1868 	return (npi_rxdma_channel_control(handle, RXDMA_RCRTO_CLEAR, channel));
1869 }
1870 
1871 /*
1872  * npi_rxdma_channel_pt_drop_pkt_clear():
1873  *	This function is called to clear the port drop packet bit (debug).
1874  *
1875  * Parameters:
1876  *	handle		- NPI handle (virtualization flag must be defined).
1877  *	channel		- logical RXDMA channel from 0 to 23.
1878  *			  (If virtualization flag is not set, then
1879  *			   logical channel is the same as the hardware
1880  *			   channel number).
1881  * Return:
1882  *	NPI_SUCCESS
1883  *
1884  *	Error:
1885  *	NPI error status code
1886  */
1887 npi_status_t
1888 npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t handle, uint8_t channel)
1889 {
1890 	return (npi_rxdma_channel_control(handle, RXDMA_PT_DROP_PKT_CLEAR,
1891 			channel));
1892 }
1893 
1894 /*
1895  * npi_rxdma_channel_wred_drop_clear():
1896  *	This function is called to wred drop bit (debug only).
1897  *
1898  * Parameters:
1899  *	handle		- NPI handle (virtualization flag must be defined).
1900  *	channel		- logical RXDMA channel from 0 to 23.
1901  *			  (If virtualization flag is not set, then
1902  *			   logical channel is the same as the hardware
1903  *			   channel number).
1904  * Return:
1905  *	NPI_SUCCESS
1906  *
1907  *	Error:
1908  *	NPI error status code
1909  */
1910 npi_status_t
1911 npi_rxdma_channel_wred_dop_clear(npi_handle_t handle, uint8_t channel)
1912 {
1913 	return (npi_rxdma_channel_control(handle, RXDMA_WRED_DROP_CLEAR,
1914 			channel));
1915 }
1916 
1917 /*
1918  * npi_rxdma_channel_rcr_shfull_clear():
1919  *	This function is called to clear RCR shadow full bit.
1920  *
1921  * Parameters:
1922  *	handle		- NPI handle (virtualization flag must be defined).
1923  *	channel		- logical RXDMA channel from 0 to 23.
1924  *			  (If virtualization flag is not set, then
1925  *			   logical channel is the same as the hardware
1926  *			   channel number).
1927  * Return:
1928  *	NPI_SUCCESS
1929  *
1930  *	Error:
1931  *	NPI error status code
1932  */
1933 npi_status_t
1934 npi_rxdma_channel_rcr_shfull_clear(npi_handle_t handle, uint8_t channel)
1935 {
1936 	return (npi_rxdma_channel_control(handle, RXDMA_RCR_SFULL_CLEAR,
1937 			channel));
1938 }
1939 
1940 /*
1941  * npi_rxdma_channel_rcrfull_clear():
1942  *	This function is called to clear RCR full bit.
1943  *
1944  * Parameters:
1945  *	handle		- NPI handle (virtualization flag must be defined).
1946  *	channel		- logical RXDMA channel from 0 to 23.
1947  *			  (If virtualization flag is not set, then
1948  *			   logical channel is the same as the hardware
1949  *			   channel number).
1950  * Return:
1951  *	NPI_SUCCESS
1952  *
1953  *	Error:
1954  *	NPI error status code
1955  */
1956 npi_status_t
1957 npi_rxdma_channel_rcr_full_clear(npi_handle_t handle, uint8_t channel)
1958 {
1959 	return (npi_rxdma_channel_control(handle, RXDMA_RCR_FULL_CLEAR,
1960 			channel));
1961 }
1962 
1963 npi_status_t
1964 npi_rxdma_channel_rbr_empty_clear(npi_handle_t handle, uint8_t channel)
1965 {
1966 	return (npi_rxdma_channel_control(handle,
1967 		RXDMA_RBR_EMPTY_CLEAR, channel));
1968 }
1969 
1970 npi_status_t
1971 npi_rxdma_channel_cs_clear_all(npi_handle_t handle, uint8_t channel)
1972 {
1973 	return (npi_rxdma_channel_control(handle, RXDMA_CS_CLEAR_ALL, channel));
1974 }
1975 
1976 /*
1977  * npi_rxdma_channel_control():
1978  *	This function is called to control a receive DMA channel
1979  *	for arming the channel with mailbox updates, resetting
1980  *	various event status bits (control and status register).
1981  *
1982  * Parameters:
1983  *	handle		- NPI handle (virtualization flag must be defined).
1984  *	control		- NPI defined control type supported:
1985  *				- RXDMA_MEX_SET
1986  * 				- RXDMA_RCRTO_CLEAR
1987  *				- RXDMA_PT_DROP_PKT_CLEAR
1988  *				- RXDMA_WRED_DROP_CLEAR
1989  *				- RXDMA_RCR_SFULL_CLEAR
1990  *				- RXDMA_RCR_FULL_CLEAR
1991  *				- RXDMA_RBR_PRE_EMPTY_CLEAR
1992  *				- RXDMA_RBR_EMPTY_CLEAR
1993  *	channel		- logical RXDMA channel from 0 to 23.
1994  *			  (If virtualization flag is not set, then
1995  *			   logical channel is the same as the hardware.
1996  * Return:
1997  *	NPI_SUCCESS
1998  *
1999  *	Error:
2000  *	NPI error status code
2001  */
2002 npi_status_t
2003 npi_rxdma_channel_control(npi_handle_t handle, rxdma_cs_cntl_t control,
2004 			uint8_t channel)
2005 {
2006 
2007 	rx_dma_ctl_stat_t	cs;
2008 
2009 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2010 	if (!RXDMA_CHANNEL_VALID(channel)) {
2011 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2012 		    " npi_rxdma_channel_control",
2013 		    " channel", channel));
2014 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2015 	}
2016 
2017 	switch (control) {
2018 	case RXDMA_MEX_SET:
2019 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2020 				&cs.value);
2021 		cs.bits.hdw.mex = 1;
2022 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2023 				channel, cs.value);
2024 		break;
2025 
2026 	case RXDMA_RCRTO_CLEAR:
2027 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2028 				&cs.value);
2029 		cs.bits.hdw.rcrto = 0;
2030 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2031 				cs.value);
2032 		break;
2033 
2034 	case RXDMA_PT_DROP_PKT_CLEAR:
2035 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2036 				&cs.value);
2037 		cs.bits.hdw.port_drop_pkt = 0;
2038 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2039 				cs.value);
2040 		break;
2041 
2042 	case RXDMA_WRED_DROP_CLEAR:
2043 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2044 				&cs.value);
2045 		cs.bits.hdw.wred_drop = 0;
2046 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2047 				cs.value);
2048 		break;
2049 
2050 	case RXDMA_RCR_SFULL_CLEAR:
2051 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2052 				&cs.value);
2053 		cs.bits.hdw.rcr_shadow_full = 0;
2054 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2055 				cs.value);
2056 		break;
2057 
2058 	case RXDMA_RCR_FULL_CLEAR:
2059 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2060 				&cs.value);
2061 		cs.bits.hdw.rcrfull = 0;
2062 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2063 				cs.value);
2064 		break;
2065 
2066 	case RXDMA_RBR_PRE_EMPTY_CLEAR:
2067 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2068 				&cs.value);
2069 		cs.bits.hdw.rbr_pre_empty = 0;
2070 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2071 				cs.value);
2072 		break;
2073 
2074 	case RXDMA_RBR_EMPTY_CLEAR:
2075 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2076 				&cs.value);
2077 		cs.bits.hdw.rbr_empty = 1;
2078 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2079 				cs.value);
2080 		break;
2081 
2082 	case RXDMA_CS_CLEAR_ALL:
2083 		cs.value = 0;
2084 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2085 				cs.value);
2086 		break;
2087 
2088 	default:
2089 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2090 				    "npi_rxdma_channel_control",
2091 				    "control", control));
2092 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2093 	}
2094 
2095 	return (NPI_SUCCESS);
2096 }
2097 
2098 /*
2099  * npi_rxdma_control_status():
2100  *	This function is called to operate on the control
2101  *	and status register.
2102  *
2103  * Parameters:
2104  *	handle		- NPI handle
2105  *	op_mode		- OP_GET: get hardware control and status
2106  *			  OP_SET: set hardware control and status
2107  *			  OP_UPDATE: update hardware control and status.
2108  *			  OP_CLEAR: clear control and status register to 0s.
2109  *	channel		- hardware RXDMA channel from 0 to 23.
2110  *	cs_p		- pointer to hardware defined control and status
2111  *			  structure.
2112  * Return:
2113  *	NPI_SUCCESS
2114  *
2115  *	Error:
2116  *	NPI error status code
2117  */
2118 npi_status_t
2119 npi_rxdma_control_status(npi_handle_t handle, io_op_t op_mode,
2120 			uint8_t channel, p_rx_dma_ctl_stat_t cs_p)
2121 {
2122 	int			status = NPI_SUCCESS;
2123 	rx_dma_ctl_stat_t	cs;
2124 
2125 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2126 	if (!RXDMA_CHANNEL_VALID(channel)) {
2127 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2128 		    "npi_rxdma_control_status",
2129 		    "channel", channel));
2130 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2131 	}
2132 
2133 	switch (op_mode) {
2134 	case OP_GET:
2135 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2136 				&cs_p->value);
2137 		break;
2138 
2139 	case OP_SET:
2140 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2141 			cs_p->value);
2142 		break;
2143 
2144 	case OP_UPDATE:
2145 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2146 				&cs.value);
2147 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2148 			cs_p->value | cs.value);
2149 		break;
2150 
2151 	default:
2152 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2153 		    "npi_rxdma_control_status",
2154 		    "control", op_mode));
2155 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2156 	}
2157 
2158 	return (status);
2159 }
2160 
2161 /*
2162  * npi_rxdma_event_mask():
2163  *	This function is called to operate on the event mask
2164  *	register which is used for generating interrupts.
2165  *
2166  * Parameters:
2167  *	handle		- NPI handle
2168  *	op_mode		- OP_GET: get hardware event mask
2169  *			  OP_SET: set hardware interrupt event masks
2170  *			  OP_CLEAR: clear control and status register to 0s.
2171  *	channel		- hardware RXDMA channel from 0 to 23.
2172  *	mask_p		- pointer to hardware defined event mask
2173  *			  structure.
2174  * Return:
2175  *	NPI_SUCCESS		- If set is complete successfully.
2176  *
2177  *	Error:
2178  *	NPI error status code
2179  */
2180 npi_status_t
2181 npi_rxdma_event_mask(npi_handle_t handle, io_op_t op_mode,
2182 		uint8_t channel, p_rx_dma_ent_msk_t mask_p)
2183 {
2184 	int			status = NPI_SUCCESS;
2185 	rx_dma_ent_msk_t	mask;
2186 
2187 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2188 	if (!RXDMA_CHANNEL_VALID(channel)) {
2189 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2190 		    "npi_rxdma_event_mask",
2191 		    "channel", channel));
2192 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2193 	}
2194 
2195 	switch (op_mode) {
2196 	case OP_GET:
2197 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2198 				&mask_p->value);
2199 		break;
2200 
2201 	case OP_SET:
2202 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2203 				mask_p->value);
2204 		break;
2205 
2206 	case OP_UPDATE:
2207 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2208 				&mask.value);
2209 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2210 			mask_p->value | mask.value);
2211 		break;
2212 
2213 	default:
2214 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2215 		    "npi_rxdma_event_mask",
2216 		    "eventmask", op_mode));
2217 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2218 	}
2219 
2220 	return (status);
2221 }
2222 
2223 /*
2224  * npi_rxdma_event_mask_config():
2225  *	This function is called to operate on the event mask
2226  *	register which is used for generating interrupts
2227  *	and status register.
2228  *
2229  * Parameters:
2230  *	handle		- NPI handle
2231  *	op_mode		- OP_GET: get hardware event mask
2232  *			  OP_SET: set hardware interrupt event masks
2233  *			  OP_CLEAR: clear control and status register to 0s.
2234  *	channel		- hardware RXDMA channel from 0 to 23.
2235  *	mask_cfgp		- pointer to NPI defined event mask
2236  *			  enum data type.
2237  * Return:
2238  *	NPI_SUCCESS		- If set is complete successfully.
2239  *
2240  *	Error:
2241  *	NPI error status code
2242  */
2243 npi_status_t
2244 npi_rxdma_event_mask_config(npi_handle_t handle, io_op_t op_mode,
2245 		uint8_t channel, rxdma_ent_msk_cfg_t *mask_cfgp)
2246 {
2247 	int		status = NPI_SUCCESS;
2248 	uint64_t	value;
2249 
2250 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2251 	if (!RXDMA_CHANNEL_VALID(channel)) {
2252 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2253 		    "npi_rxdma_event_mask_config",
2254 		    "channel", channel));
2255 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2256 	}
2257 
2258 	switch (op_mode) {
2259 	case OP_GET:
2260 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2261 				mask_cfgp);
2262 		break;
2263 
2264 	case OP_SET:
2265 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2266 				*mask_cfgp);
2267 		break;
2268 
2269 	case OP_UPDATE:
2270 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel, &value);
2271 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2272 			*mask_cfgp | value);
2273 		break;
2274 
2275 	case OP_CLEAR:
2276 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2277 			CFG_RXDMA_MASK_ALL);
2278 		break;
2279 	default:
2280 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2281 		    "npi_rxdma_event_mask_config",
2282 		    "eventmask", op_mode));
2283 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2284 	}
2285 
2286 	return (status);
2287 }
2288