xref: /illumos-gate/usr/src/uts/common/io/nxge/npi/npi_rxdma.c (revision 54034eb2d6e7d811adf4a1fe5105eac6fea6b0b5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <npi_rxdma.h>
29 #include <npi_rx_rd64.h>
30 #include <npi_rx_wr64.h>
31 #include <nxge_common.h>
32 
33 #define	 RXDMA_RESET_TRY_COUNT	4
34 #define	 RXDMA_RESET_DELAY	5
35 
36 #define	 RXDMA_OP_DISABLE	0
37 #define	 RXDMA_OP_ENABLE	1
38 #define	 RXDMA_OP_RESET	2
39 
40 #define	 RCR_TIMEOUT_ENABLE	1
41 #define	 RCR_TIMEOUT_DISABLE	2
42 #define	 RCR_THRESHOLD	4
43 
44 /* assume weight is in byte frames unit */
45 #define	WEIGHT_FACTOR 3/2
46 
47 uint64_t rdc_dmc_offset[] = {
48 	RXDMA_CFIG1_REG, RXDMA_CFIG2_REG, RBR_CFIG_A_REG, RBR_CFIG_B_REG,
49 	RBR_KICK_REG, RBR_STAT_REG, RBR_HDH_REG, RBR_HDL_REG,
50 	RCRCFIG_A_REG, RCRCFIG_B_REG, RCRSTAT_A_REG, RCRSTAT_B_REG,
51 	RCRSTAT_C_REG, RX_DMA_ENT_MSK_REG, RX_DMA_CTL_STAT_REG, RCR_FLSH_REG,
52 	RXMISC_DISCARD_REG
53 };
54 
55 const char *rdc_dmc_name[] = {
56 	"RXDMA_CFIG1", "RXDMA_CFIG2", "RBR_CFIG_A", "RBR_CFIG_B",
57 	"RBR_KICK", "RBR_STAT", "RBR_HDH", "RBR_HDL",
58 	"RCRCFIG_A", "RCRCFIG_B", "RCRSTAT_A", "RCRSTAT_B",
59 	"RCRSTAT_C", "RX_DMA_ENT_MSK", "RX_DMA_CTL_STAT", "RCR_FLSH",
60 	"RXMISC_DISCARD"
61 };
62 
63 uint64_t rdc_fzc_offset [] = {
64 	RX_LOG_PAGE_VLD_REG, RX_LOG_PAGE_MASK1_REG, RX_LOG_PAGE_VAL1_REG,
65 	RX_LOG_PAGE_MASK2_REG, RX_LOG_PAGE_VAL2_REG, RX_LOG_PAGE_RELO1_REG,
66 	RX_LOG_PAGE_RELO2_REG, RX_LOG_PAGE_HDL_REG, RDC_RED_PARA_REG,
67 	RED_DIS_CNT_REG
68 };
69 
70 
71 const char *rdc_fzc_name [] = {
72 	"RX_LOG_PAGE_VLD", "RX_LOG_PAGE_MASK1", "RX_LOG_PAGE_VAL1",
73 	"RX_LOG_PAGE_MASK2", "RX_LOG_PAGE_VAL2", "RX_LOG_PAGE_RELO1",
74 	"RX_LOG_PAGE_RELO2", "RX_LOG_PAGE_HDL", "RDC_RED_PARA", "RED_DIS_CNT"
75 };
76 
77 
78 /*
79  * Dump the MEM_ADD register first so all the data registers
80  * will have valid data buffer pointers.
81  */
82 uint64_t rx_fzc_offset[] = {
83 	RX_DMA_CK_DIV_REG, DEF_PT0_RDC_REG, DEF_PT1_RDC_REG, DEF_PT2_RDC_REG,
84 	DEF_PT3_RDC_REG, RX_ADDR_MD_REG, PT_DRR_WT0_REG, PT_DRR_WT1_REG,
85 	PT_DRR_WT2_REG, PT_DRR_WT3_REG, PT_USE0_REG, PT_USE1_REG,
86 	PT_USE2_REG, PT_USE3_REG, RED_RAN_INIT_REG, RX_ADDR_MD_REG,
87 	RDMC_PRE_PAR_ERR_REG, RDMC_SHA_PAR_ERR_REG,
88 	RDMC_MEM_DATA4_REG, RDMC_MEM_DATA3_REG, RDMC_MEM_DATA2_REG,
89 	RDMC_MEM_DATA1_REG, RDMC_MEM_DATA0_REG,
90 	RDMC_MEM_ADDR_REG,
91 	RX_CTL_DAT_FIFO_STAT_REG, RX_CTL_DAT_FIFO_MASK_REG,
92 	RX_CTL_DAT_FIFO_STAT_DBG_REG,
93 	RDMC_TRAINING_VECTOR_REG,
94 };
95 
96 
97 const char *rx_fzc_name[] = {
98 	"RX_DMA_CK_DIV", "DEF_PT0_RDC", "DEF_PT1_RDC", "DEF_PT2_RDC",
99 	"DEF_PT3_RDC", "RX_ADDR_MD", "PT_DRR_WT0", "PT_DRR_WT1",
100 	"PT_DRR_WT2", "PT_DRR_WT3", "PT_USE0", "PT_USE1",
101 	"PT_USE2", "PT_USE3", "RED_RAN_INIT", "RX_ADDR_MD",
102 	"RDMC_PRE_PAR_ERR", "RDMC_SHA_PAR_ERR",
103 	"RDMC_MEM_DATA4", "RDMC_MEM_DATA3", "RDMC_MEM_DATA2",
104 	"RDMC_MEM_DATA1", "RDMC_MEM_DATA0",
105 	"RDMC_MEM_ADDR",
106 	"RX_CTL_DAT_FIFO_STAT", "RX_CTL_DAT_FIFO_MASK",
107 	"RDMC_TRAINING_VECTOR_REG",
108 	"RX_CTL_DAT_FIFO_STAT_DBG_REG"
109 };
110 
111 
112 npi_status_t
113 npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op);
114 npi_status_t
115 npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op,
116 				uint16_t param);
117 
118 
119 /*
120  * npi_rxdma_dump_rdc_regs
121  * Dumps the contents of rdc csrs and fzc registers
122  *
123  * Input:
124  *      handle:	opaque handle interpreted by the underlying OS
125  *         rdc:      RX DMA number
126  *
127  * return:
128  *     NPI_SUCCESS
129  *     NPI_RXDMA_RDC_INVALID
130  *
131  */
132 npi_status_t
133 npi_rxdma_dump_rdc_regs(npi_handle_t handle, uint8_t rdc)
134 {
135 
136 	uint64_t value, offset;
137 	int num_regs, i;
138 #ifdef NPI_DEBUG
139 	extern uint64_t npi_debug_level;
140 	uint64_t old_npi_debug_level = npi_debug_level;
141 #endif
142 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
143 	if (!RXDMA_CHANNEL_VALID(rdc)) {
144 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
145 		    "npi_rxdma_dump_rdc_regs"
146 		    " Illegal RDC number %d \n",
147 		    rdc));
148 		return (NPI_RXDMA_RDC_INVALID);
149 	}
150 #ifdef NPI_DEBUG
151 	npi_debug_level |= DUMP_ALWAYS;
152 #endif
153 	num_regs = sizeof (rdc_dmc_offset) / sizeof (uint64_t);
154 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
155 	    "\nDMC Register Dump for Channel %d\n",
156 	    rdc));
157 	for (i = 0; i < num_regs; i++) {
158 		RXDMA_REG_READ64(handle, rdc_dmc_offset[i], rdc, &value);
159 		offset = NXGE_RXDMA_OFFSET(rdc_dmc_offset[i], handle.is_vraddr,
160 		    rdc);
161 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
162 		    "%08llx %s\t %08llx \n",
163 		    offset, rdc_dmc_name[i], value));
164 	}
165 
166 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
167 	    "\n Register Dump for Channel %d done\n",
168 	    rdc));
169 #ifdef NPI_DEBUG
170 	npi_debug_level = old_npi_debug_level;
171 #endif
172 	return (NPI_SUCCESS);
173 }
174 
175 /*
176  * npi_rxdma_dump_fzc_regs
177  * Dumps the contents of rdc csrs and fzc registers
178  *
179  * Input:
180  *      handle:	opaque handle interpreted by the underlying OS
181  *
182  * return:
183  *     NPI_SUCCESS
184  */
185 npi_status_t
186 npi_rxdma_dump_fzc_regs(npi_handle_t handle)
187 {
188 
189 	uint64_t value;
190 	int num_regs, i;
191 
192 
193 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
194 	    "\nFZC_DMC Common Register Dump\n"));
195 	num_regs = sizeof (rx_fzc_offset) / sizeof (uint64_t);
196 
197 	for (i = 0; i < num_regs; i++) {
198 		NXGE_REG_RD64(handle, rx_fzc_offset[i], &value);
199 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
200 		    "0x%08llx %s\t 0x%08llx \n",
201 		    rx_fzc_offset[i],
202 		    rx_fzc_name[i], value));
203 	}
204 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
205 	    "\n FZC_DMC Register Dump Done \n"));
206 
207 	return (NPI_SUCCESS);
208 }
209 
210 
211 
212 /*
213  * per rdc config functions
214  */
215 npi_status_t
216 npi_rxdma_cfg_logical_page_disable(npi_handle_t handle, uint8_t rdc,
217 				    uint8_t page_num)
218 {
219 	log_page_vld_t page_vld;
220 	uint64_t valid_offset;
221 
222 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
223 	if (!RXDMA_CHANNEL_VALID(rdc)) {
224 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
225 		    "rxdma_cfg_logical_page_disable"
226 		    " Illegal RDC number %d \n",
227 		    rdc));
228 		return (NPI_RXDMA_RDC_INVALID);
229 	}
230 
231 	ASSERT(RXDMA_PAGE_VALID(page_num));
232 	if (!RXDMA_PAGE_VALID(page_num)) {
233 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
234 		    "rxdma_cfg_logical_page_disable"
235 		    " Illegal page number %d \n",
236 		    page_num));
237 		return (NPI_RXDMA_PAGE_INVALID);
238 	}
239 
240 	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
241 	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
242 
243 	if (page_num == 0)
244 		page_vld.bits.ldw.page0 = 0;
245 
246 	if (page_num == 1)
247 		page_vld.bits.ldw.page1 = 0;
248 
249 	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
250 	return (NPI_SUCCESS);
251 
252 }
253 
254 npi_status_t
255 npi_rxdma_cfg_logical_page(npi_handle_t handle, uint8_t rdc,
256 			    dma_log_page_t *pg_cfg)
257 {
258 	log_page_vld_t page_vld;
259 	log_page_mask_t page_mask;
260 	log_page_value_t page_value;
261 	log_page_relo_t page_reloc;
262 	uint64_t value_offset, reloc_offset, mask_offset;
263 	uint64_t valid_offset;
264 
265 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
266 	if (!RXDMA_CHANNEL_VALID(rdc)) {
267 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
268 		    " rxdma_cfg_logical_page"
269 		    " Illegal RDC number %d \n",
270 		    rdc));
271 		return (NPI_RXDMA_RDC_INVALID);
272 	}
273 
274 	ASSERT(RXDMA_PAGE_VALID(pg_cfg->page_num));
275 	if (!RXDMA_PAGE_VALID(pg_cfg->page_num)) {
276 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
277 		    " rxdma_cfg_logical_page"
278 		    " Illegal page number %d \n",
279 		    pg_cfg->page_num));
280 		return (NPI_RXDMA_PAGE_INVALID);
281 	}
282 
283 	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
284 	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
285 
286 	if (!pg_cfg->valid) {
287 		if (pg_cfg->page_num == 0)
288 			page_vld.bits.ldw.page0 = 0;
289 
290 		if (pg_cfg->page_num == 1)
291 			page_vld.bits.ldw.page1 = 0;
292 		NXGE_REG_WR64(handle, valid_offset, page_vld.value);
293 		return (NPI_SUCCESS);
294 	}
295 
296 	if (pg_cfg->page_num == 0) {
297 		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK1_REG, rdc);
298 		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL1_REG, rdc);
299 		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO1_REG, rdc);
300 		page_vld.bits.ldw.page0 = 1;
301 	}
302 
303 	if (pg_cfg->page_num == 1) {
304 		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK2_REG, rdc);
305 		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL2_REG, rdc);
306 		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO2_REG, rdc);
307 		page_vld.bits.ldw.page1 = 1;
308 	}
309 
310 
311 	page_vld.bits.ldw.func = pg_cfg->func_num;
312 
313 	page_mask.value = 0;
314 	page_value.value = 0;
315 	page_reloc.value = 0;
316 
317 
318 	page_mask.bits.ldw.mask = pg_cfg->mask >> LOG_PAGE_ADDR_SHIFT;
319 	page_value.bits.ldw.value = pg_cfg->value >> LOG_PAGE_ADDR_SHIFT;
320 	page_reloc.bits.ldw.relo = pg_cfg->reloc >> LOG_PAGE_ADDR_SHIFT;
321 
322 
323 	NXGE_REG_WR64(handle, mask_offset, page_mask.value);
324 	NXGE_REG_WR64(handle, value_offset, page_value.value);
325 	NXGE_REG_WR64(handle, reloc_offset, page_reloc.value);
326 
327 
328 /* enable the logical page */
329 	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
330 	return (NPI_SUCCESS);
331 }
332 
333 npi_status_t
334 npi_rxdma_cfg_logical_page_handle(npi_handle_t handle, uint8_t rdc,
335 				    uint64_t page_handle)
336 {
337 	uint64_t offset;
338 	log_page_hdl_t page_hdl;
339 
340 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
341 	if (!RXDMA_CHANNEL_VALID(rdc)) {
342 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
343 		    "rxdma_cfg_logical_page_handle"
344 		    " Illegal RDC number %d \n", rdc));
345 		return (NPI_RXDMA_RDC_INVALID);
346 	}
347 
348 	page_hdl.value = 0;
349 
350 	page_hdl.bits.ldw.handle = (uint32_t)page_handle;
351 	offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_HDL_REG, rdc);
352 	NXGE_REG_WR64(handle, offset, page_hdl.value);
353 
354 	return (NPI_SUCCESS);
355 }
356 
357 /*
358  * RX DMA functions
359  */
360 npi_status_t
361 npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op)
362 {
363 
364 	rxdma_cfig1_t cfg;
365 	uint32_t count = RXDMA_RESET_TRY_COUNT;
366 	uint32_t delay_time = RXDMA_RESET_DELAY;
367 	uint32_t error = NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RESET_ERR, rdc);
368 
369 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
370 	if (!RXDMA_CHANNEL_VALID(rdc)) {
371 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
372 		    "npi_rxdma_cfg_rdc_ctl"
373 		    " Illegal RDC number %d \n", rdc));
374 		return (NPI_RXDMA_RDC_INVALID);
375 	}
376 
377 
378 	switch (op) {
379 		case RXDMA_OP_ENABLE:
380 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
381 			    &cfg.value);
382 			cfg.bits.ldw.en = 1;
383 			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
384 			    rdc, cfg.value);
385 
386 			NXGE_DELAY(delay_time);
387 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
388 			    &cfg.value);
389 			while ((count--) && (cfg.bits.ldw.qst == 0)) {
390 				NXGE_DELAY(delay_time);
391 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
392 				    &cfg.value);
393 			}
394 
395 			if (cfg.bits.ldw.qst == 0) {
396 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
397 				    " npi_rxdma_cfg_rdc_ctl"
398 				    " RXDMA_OP_ENABLE Failed for RDC %d \n",
399 				    rdc));
400 				return (error);
401 			}
402 
403 			break;
404 		case RXDMA_OP_DISABLE:
405 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
406 			    &cfg.value);
407 			cfg.bits.ldw.en = 0;
408 			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
409 			    rdc, cfg.value);
410 
411 			NXGE_DELAY(delay_time);
412 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
413 			    &cfg.value);
414 			while ((count--) && (cfg.bits.ldw.qst == 0)) {
415 				NXGE_DELAY(delay_time);
416 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
417 				    &cfg.value);
418 			}
419 			if (cfg.bits.ldw.qst == 0) {
420 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
421 				    " npi_rxdma_cfg_rdc_ctl"
422 				    " RXDMA_OP_DISABLE Failed for RDC %d \n",
423 				    rdc));
424 				return (error);
425 			}
426 
427 			break;
428 		case RXDMA_OP_RESET:
429 			cfg.value = 0;
430 			cfg.bits.ldw.rst = 1;
431 			RXDMA_REG_WRITE64(handle,
432 			    RXDMA_CFIG1_REG,
433 			    rdc, cfg.value);
434 			NXGE_DELAY(delay_time);
435 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
436 			    &cfg.value);
437 			while ((count--) && (cfg.bits.ldw.rst)) {
438 				NXGE_DELAY(delay_time);
439 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
440 				    &cfg.value);
441 			}
442 			if (count == 0) {
443 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
444 				    " npi_rxdma_cfg_rdc_ctl"
445 				    " Reset Failed for RDC %d \n",
446 				    rdc));
447 				return (error);
448 			}
449 			break;
450 		default:
451 			return (NPI_RXDMA_SW_PARAM_ERROR);
452 	}
453 
454 	return (NPI_SUCCESS);
455 }
456 
457 npi_status_t
458 npi_rxdma_cfg_rdc_enable(npi_handle_t handle, uint8_t rdc)
459 {
460 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE));
461 }
462 
463 npi_status_t
464 npi_rxdma_cfg_rdc_disable(npi_handle_t handle, uint8_t rdc)
465 {
466 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE));
467 }
468 
469 npi_status_t
470 npi_rxdma_cfg_rdc_reset(npi_handle_t handle, uint8_t rdc)
471 {
472 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET));
473 }
474 
475 /*
476  * npi_rxdma_cfg_defualt_port_rdc()
477  * Set the default rdc for the port
478  *
479  * Inputs:
480  *	handle:		register handle interpreted by the underlying OS
481  *	portnm:		Physical Port Number
482  *	rdc:	RX DMA Channel number
483  *
484  * Return:
485  * NPI_SUCCESS
486  * NPI_RXDMA_RDC_INVALID
487  * NPI_RXDMA_PORT_INVALID
488  *
489  */
490 npi_status_t npi_rxdma_cfg_default_port_rdc(npi_handle_t handle,
491 				    uint8_t portnm, uint8_t rdc)
492 {
493 
494 	uint64_t offset;
495 	def_pt_rdc_t cfg;
496 
497 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
498 	if (!RXDMA_CHANNEL_VALID(rdc)) {
499 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
500 		    "rxdma_cfg_default_port_rdc"
501 		    " Illegal RDC number %d \n",
502 		    rdc));
503 		return (NPI_RXDMA_RDC_INVALID);
504 	}
505 
506 	ASSERT(RXDMA_PORT_VALID(portnm));
507 	if (!RXDMA_PORT_VALID(portnm)) {
508 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
509 		    "rxdma_cfg_default_port_rdc"
510 		    " Illegal Port number %d \n",
511 		    portnm));
512 		return (NPI_RXDMA_PORT_INVALID);
513 	}
514 
515 	offset = DEF_PT_RDC_REG(portnm);
516 	cfg.value = 0;
517 	cfg.bits.ldw.rdc = rdc;
518 	NXGE_REG_WR64(handle, offset, cfg.value);
519 	return (NPI_SUCCESS);
520 }
521 
522 npi_status_t
523 npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc,
524 			    uint8_t op, uint16_t param)
525 {
526 	rcrcfig_b_t rcr_cfgb;
527 
528 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
529 	if (!RXDMA_CHANNEL_VALID(rdc)) {
530 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
531 		    "rxdma_cfg_rdc_rcr_ctl"
532 		    " Illegal RDC number %d \n",
533 		    rdc));
534 		return (NPI_RXDMA_RDC_INVALID);
535 	}
536 
537 
538 	RXDMA_REG_READ64(handle, RCRCFIG_B_REG, rdc, &rcr_cfgb.value);
539 
540 	switch (op) {
541 		case RCR_TIMEOUT_ENABLE:
542 			rcr_cfgb.bits.ldw.timeout = (uint8_t)param;
543 			rcr_cfgb.bits.ldw.entout = 1;
544 			break;
545 
546 		case RCR_THRESHOLD:
547 			rcr_cfgb.bits.ldw.pthres = param;
548 			break;
549 
550 		case RCR_TIMEOUT_DISABLE:
551 			rcr_cfgb.bits.ldw.entout = 0;
552 			break;
553 
554 		default:
555 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
556 		    "rxdma_cfg_rdc_rcr_ctl"
557 		    " Illegal opcode %x \n",
558 		    op));
559 		return (NPI_RXDMA_OPCODE_INVALID(rdc));
560 	}
561 
562 	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
563 	return (NPI_SUCCESS);
564 }
565 
566 npi_status_t
567 npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t handle, uint8_t rdc)
568 {
569 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
570 	    RCR_TIMEOUT_DISABLE, 0));
571 }
572 
573 npi_status_t
574 npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t handle, uint8_t rdc,
575 				    uint16_t rcr_threshold)
576 {
577 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
578 	    RCR_THRESHOLD, rcr_threshold));
579 
580 }
581 
582 npi_status_t
583 npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t handle, uint8_t rdc,
584 			    uint8_t rcr_timeout)
585 {
586 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
587 	    RCR_TIMEOUT_ENABLE, rcr_timeout));
588 
589 }
590 
591 /*
592  * npi_rxdma_cfg_rdc_ring()
593  * Configure The RDC channel Rcv Buffer Ring
594  */
595 npi_status_t
596 npi_rxdma_cfg_rdc_ring(npi_handle_t handle, uint8_t rdc,
597 			    rdc_desc_cfg_t *rdc_desc_cfg)
598 {
599 	rbr_cfig_a_t cfga;
600 	rbr_cfig_b_t cfgb;
601 	rxdma_cfig1_t cfg1;
602 	rxdma_cfig2_t cfg2;
603 	rcrcfig_a_t rcr_cfga;
604 	rcrcfig_b_t rcr_cfgb;
605 
606 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
607 	if (!RXDMA_CHANNEL_VALID(rdc)) {
608 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
609 		    "rxdma_cfg_rdc_ring"
610 		    " Illegal RDC number %d \n",
611 		    rdc));
612 		return (NPI_RXDMA_RDC_INVALID);
613 	}
614 
615 
616 	cfga.value = 0;
617 	cfgb.value = 0;
618 	cfg1.value = 0;
619 	cfg2.value = 0;
620 
621 	if (rdc_desc_cfg->mbox_enable == 1) {
622 		cfg1.bits.ldw.mbaddr_h =
623 		    (rdc_desc_cfg->mbox_addr >> 32) & 0xfff;
624 		cfg2.bits.ldw.mbaddr =
625 		    ((rdc_desc_cfg->mbox_addr &
626 		    RXDMA_CFIG2_MBADDR_L_MASK) >>
627 		    RXDMA_CFIG2_MBADDR_L_SHIFT);
628 
629 
630 		/*
631 		 * Only after all the configurations are set, then
632 		 * enable the RDC or else configuration fatal error
633 		 * will be returned (especially if the Hypervisor
634 		 * set up the logical pages with non-zero values.
635 		 * This NPI function only sets up the configuration.
636 		 */
637 	}
638 
639 
640 	if (rdc_desc_cfg->full_hdr == 1)
641 		cfg2.bits.ldw.full_hdr = 1;
642 
643 	if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
644 		cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
645 	} else {
646 		cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
647 	}
648 
649 		/* rbr config */
650 
651 	cfga.value = (rdc_desc_cfg->rbr_addr & (RBR_CFIG_A_STDADDR_MASK |
652 	    RBR_CFIG_A_STDADDR_BASE_MASK));
653 
654 	if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) ||
655 	    (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN)) {
656 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
657 		    "npi_rxdma_cfg_rdc_ring"
658 		    " Illegal RBR Queue Length %d \n",
659 		    rdc_desc_cfg->rbr_len));
660 		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RBRSIZE_INVALID, rdc));
661 	}
662 
663 
664 	cfga.bits.hdw.len = rdc_desc_cfg->rbr_len;
665 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
666 	    "npi_rxdma_cfg_rdc_ring"
667 	    " CFGA 0x%llx hdw.len %d (RBR LEN %d)\n",
668 	    cfga.value, cfga.bits.hdw.len,
669 	    rdc_desc_cfg->rbr_len));
670 
671 	if (rdc_desc_cfg->page_size == SIZE_4KB)
672 		cfgb.bits.ldw.bksize = RBR_BKSIZE_4K;
673 	else if (rdc_desc_cfg->page_size == SIZE_8KB)
674 		cfgb.bits.ldw.bksize = RBR_BKSIZE_8K;
675 	else if (rdc_desc_cfg->page_size == SIZE_16KB)
676 		cfgb.bits.ldw.bksize = RBR_BKSIZE_16K;
677 	else if (rdc_desc_cfg->page_size == SIZE_32KB)
678 		cfgb.bits.ldw.bksize = RBR_BKSIZE_32K;
679 	else {
680 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
681 		    "rxdma_cfg_rdc_ring"
682 		    " blksize: Illegal buffer size %d \n",
683 		    rdc_desc_cfg->page_size));
684 		return (NPI_RXDMA_BUFSIZE_INVALID);
685 	}
686 
687 	if (rdc_desc_cfg->valid0) {
688 
689 		if (rdc_desc_cfg->size0 == SIZE_256B)
690 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_256B;
691 		else if (rdc_desc_cfg->size0 == SIZE_512B)
692 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_512B;
693 		else if (rdc_desc_cfg->size0 == SIZE_1KB)
694 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_1K;
695 		else if (rdc_desc_cfg->size0 == SIZE_2KB)
696 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_2K;
697 		else {
698 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
699 			    " rxdma_cfg_rdc_ring"
700 			    " blksize0: Illegal buffer size %x \n",
701 			    rdc_desc_cfg->size0));
702 			return (NPI_RXDMA_BUFSIZE_INVALID);
703 		}
704 		cfgb.bits.ldw.vld0 = 1;
705 	} else {
706 		cfgb.bits.ldw.vld0 = 0;
707 	}
708 
709 
710 	if (rdc_desc_cfg->valid1) {
711 		if (rdc_desc_cfg->size1 == SIZE_1KB)
712 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_1K;
713 		else if (rdc_desc_cfg->size1 == SIZE_2KB)
714 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_2K;
715 		else if (rdc_desc_cfg->size1 == SIZE_4KB)
716 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_4K;
717 		else if (rdc_desc_cfg->size1 == SIZE_8KB)
718 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_8K;
719 		else {
720 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
721 			    " rxdma_cfg_rdc_ring"
722 			    " blksize1: Illegal buffer size %x \n",
723 			    rdc_desc_cfg->size1));
724 			return (NPI_RXDMA_BUFSIZE_INVALID);
725 		}
726 		cfgb.bits.ldw.vld1 = 1;
727 	} else {
728 		cfgb.bits.ldw.vld1 = 0;
729 	}
730 
731 
732 	if (rdc_desc_cfg->valid2) {
733 		if (rdc_desc_cfg->size2 == SIZE_2KB)
734 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_2K;
735 		else if (rdc_desc_cfg->size2 == SIZE_4KB)
736 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_4K;
737 		else if (rdc_desc_cfg->size2 == SIZE_8KB)
738 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_8K;
739 		else if (rdc_desc_cfg->size2 == SIZE_16KB)
740 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_16K;
741 		else {
742 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
743 			    " rxdma_cfg_rdc_ring"
744 			    " blksize2: Illegal buffer size %x \n",
745 			    rdc_desc_cfg->size2));
746 			return (NPI_RXDMA_BUFSIZE_INVALID);
747 		}
748 		cfgb.bits.ldw.vld2 = 1;
749 	} else {
750 		cfgb.bits.ldw.vld2 = 0;
751 	}
752 
753 
754 	rcr_cfga.value = (rdc_desc_cfg->rcr_addr &
755 	    (RCRCFIG_A_STADDR_MASK |
756 	    RCRCFIG_A_STADDR_BASE_MASK));
757 
758 
759 	if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) ||
760 	    (rdc_desc_cfg->rcr_len > NXGE_RCR_MAX)) {
761 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
762 		    " rxdma_cfg_rdc_ring"
763 		    " Illegal RCR Queue Length %d \n",
764 		    rdc_desc_cfg->rcr_len));
765 		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RCRSIZE_INVALID, rdc));
766 	}
767 
768 	rcr_cfga.bits.hdw.len = rdc_desc_cfg->rcr_len;
769 
770 
771 	rcr_cfgb.value = 0;
772 	if (rdc_desc_cfg->rcr_timeout_enable == 1) {
773 		/* check if the rcr timeout value is valid */
774 
775 		if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) {
776 			rcr_cfgb.bits.ldw.timeout = rdc_desc_cfg->rcr_timeout;
777 			rcr_cfgb.bits.ldw.entout = 1;
778 		} else {
779 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
780 			    " rxdma_cfg_rdc_ring"
781 			    " Illegal RCR Timeout value %d \n",
782 			    rdc_desc_cfg->rcr_timeout));
783 			rcr_cfgb.bits.ldw.entout = 0;
784 		}
785 	} else {
786 		rcr_cfgb.bits.ldw.entout = 0;
787 	}
788 
789 		/* check if the rcr threshold value is valid */
790 	if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) {
791 		rcr_cfgb.bits.ldw.pthres = rdc_desc_cfg->rcr_threshold;
792 	} else {
793 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
794 		    " rxdma_cfg_rdc_ring"
795 		    " Illegal RCR Threshold value %d \n",
796 		    rdc_desc_cfg->rcr_threshold));
797 		rcr_cfgb.bits.ldw.pthres = 1;
798 	}
799 
800 		/* now do the actual HW configuration */
801 	RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG, rdc, cfg1.value);
802 	RXDMA_REG_WRITE64(handle, RXDMA_CFIG2_REG, rdc, cfg2.value);
803 
804 
805 	RXDMA_REG_WRITE64(handle, RBR_CFIG_A_REG, rdc, cfga.value);
806 	RXDMA_REG_WRITE64(handle, RBR_CFIG_B_REG, rdc, cfgb.value);
807 
808 	RXDMA_REG_WRITE64(handle, RCRCFIG_A_REG, rdc, rcr_cfga.value);
809 	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
810 
811 	return (NPI_SUCCESS);
812 
813 }
814 
815 /*
816  * npi_rxdma_red_discard_stat_get
817  * Gets the current discrad count due RED
818  * The counter overflow bit is cleared, if it has been set.
819  *
820  * Inputs:
821  *      handle:	opaque handle interpreted by the underlying OS
822  *	rdc:		RX DMA Channel number
823  *	cnt:	Ptr to structure to write current RDC discard stat
824  *
825  * Return:
826  * NPI_SUCCESS
827  * NPI_RXDMA_RDC_INVALID
828  *
829  */
830 npi_status_t
831 npi_rxdma_red_discard_stat_get(npi_handle_t handle, uint8_t rdc,
832 				    rx_disc_cnt_t *cnt)
833 {
834 	uint64_t offset;
835 
836 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
837 	if (!RXDMA_CHANNEL_VALID(rdc)) {
838 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
839 		    " npi_rxdma_red_discard_stat_get"
840 		    " Illegal RDC Number %d \n",
841 		    rdc));
842 		return (NPI_RXDMA_RDC_INVALID);
843 	}
844 
845 	offset = RDC_RED_RDC_DISC_REG(rdc);
846 	NXGE_REG_RD64(handle, offset, &cnt->value);
847 	if (cnt->bits.ldw.oflow) {
848 		NPI_DEBUG_MSG((handle.function, NPI_ERR_CTL,
849 		    " npi_rxdma_red_discard_stat_get"
850 		    " Counter overflow for channel %d ",
851 		    " ..... clearing \n",
852 		    rdc));
853 		cnt->bits.ldw.oflow = 0;
854 		NXGE_REG_WR64(handle, offset, cnt->value);
855 		cnt->bits.ldw.oflow = 1;
856 	}
857 
858 	return (NPI_SUCCESS);
859 }
860 
861 /*
862  * npi_rxdma_red_discard_oflow_clear
863  * Clear RED discard counter overflow bit
864  *
865  * Inputs:
866  *      handle:	opaque handle interpreted by the underlying OS
867  *	rdc:		RX DMA Channel number
868  *
869  * Return:
870  * NPI_SUCCESS
871  * NPI_RXDMA_RDC_INVALID
872  *
873  */
874 npi_status_t
875 npi_rxdma_red_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
876 
877 {
878 	uint64_t offset;
879 	rx_disc_cnt_t cnt;
880 
881 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
882 	if (!RXDMA_CHANNEL_VALID(rdc)) {
883 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
884 			    " npi_rxdma_red_discard_oflow_clear"
885 			    " Illegal RDC Number %d \n",
886 			    rdc));
887 		return (NPI_RXDMA_RDC_INVALID);
888 	}
889 
890 	offset = RDC_RED_RDC_DISC_REG(rdc);
891 	NXGE_REG_RD64(handle, offset, &cnt.value);
892 	if (cnt.bits.ldw.oflow) {
893 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
894 			    " npi_rxdma_red_discard_oflow_clear"
895 			    " Counter overflow for channel %d ",
896 			    " ..... clearing \n",
897 			    rdc));
898 		cnt.bits.ldw.oflow = 0;
899 		NXGE_REG_WR64(handle, offset, cnt.value);
900 	}
901 	return (NPI_SUCCESS);
902 }
903 
904 /*
905  * npi_rxdma_misc_discard_stat_get
906  * Gets the current discrad count for the rdc due to
907  * buffer pool empty
908  * The counter overflow bit is cleared, if it has been set.
909  *
910  * Inputs:
911  *      handle:	opaque handle interpreted by the underlying OS
912  *	rdc:		RX DMA Channel number
913  *	cnt:	Ptr to structure to write current RDC discard stat
914  *
915  * Return:
916  * NPI_SUCCESS
917  * NPI_RXDMA_RDC_INVALID
918  *
919  */
920 npi_status_t
921 npi_rxdma_misc_discard_stat_get(npi_handle_t handle, uint8_t rdc,
922 				    rx_disc_cnt_t *cnt)
923 {
924 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
925 	if (!RXDMA_CHANNEL_VALID(rdc)) {
926 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
927 		    " npi_rxdma_misc_discard_stat_get"
928 		    " Illegal RDC Number %d \n",
929 		    rdc));
930 		return (NPI_RXDMA_RDC_INVALID);
931 	}
932 
933 	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt->value);
934 	if (cnt->bits.ldw.oflow) {
935 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
936 		    " npi_rxdma_misc_discard_stat_get"
937 		    " Counter overflow for channel %d ",
938 		    " ..... clearing \n",
939 		    rdc));
940 		cnt->bits.ldw.oflow = 0;
941 		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt->value);
942 		cnt->bits.ldw.oflow = 1;
943 	}
944 
945 	return (NPI_SUCCESS);
946 }
947 
948 /*
949  * npi_rxdma_red_discard_oflow_clear
950  * Clear RED discard counter overflow bit
951  * clear the overflow bit for  buffer pool empty discrad counter
952  * for the rdc
953  *
954  * Inputs:
955  *      handle:	opaque handle interpreted by the underlying OS
956  *	rdc:		RX DMA Channel number
957  *
958  * Return:
959  * NPI_SUCCESS
960  * NPI_RXDMA_RDC_INVALID
961  *
962  */
963 npi_status_t
964 npi_rxdma_misc_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
965 {
966 	rx_disc_cnt_t cnt;
967 
968 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
969 	if (!RXDMA_CHANNEL_VALID(rdc)) {
970 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
971 		    " npi_rxdma_misc_discard_oflow_clear"
972 		    " Illegal RDC Number %d \n",
973 		    rdc));
974 		return (NPI_RXDMA_RDC_INVALID);
975 	}
976 
977 	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt.value);
978 	if (cnt.bits.ldw.oflow) {
979 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
980 		    " npi_rxdma_misc_discard_oflow_clear"
981 		    " Counter overflow for channel %d ",
982 		    " ..... clearing \n",
983 		    rdc));
984 		cnt.bits.ldw.oflow = 0;
985 		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt.value);
986 	}
987 
988 	return (NPI_SUCCESS);
989 }
990 
991 /*
992  * npi_rxdma_ring_perr_stat_get
993  * Gets the current RDC Memory parity error
994  * The counter overflow bit is cleared, if it has been set.
995  *
996  * Inputs:
997  * handle:	opaque handle interpreted by the underlying OS
998  * pre_log:	Structure to write current RDC Prefetch memory
999  *		Parity Error stat
1000  * sha_log:	Structure to write current RDC Shadow memory
1001  *		Parity Error stat
1002  *
1003  * Return:
1004  * NPI_SUCCESS
1005  *
1006  */
1007 npi_status_t
1008 npi_rxdma_ring_perr_stat_get(npi_handle_t handle,
1009 			    rdmc_par_err_log_t *pre_log,
1010 			    rdmc_par_err_log_t *sha_log)
1011 {
1012 	uint64_t pre_offset, sha_offset;
1013 	rdmc_par_err_log_t clr;
1014 	int clr_bits = 0;
1015 
1016 	pre_offset = RDMC_PRE_PAR_ERR_REG;
1017 	sha_offset = RDMC_SHA_PAR_ERR_REG;
1018 	NXGE_REG_RD64(handle, pre_offset, &pre_log->value);
1019 	NXGE_REG_RD64(handle, sha_offset, &sha_log->value);
1020 
1021 	clr.value = pre_log->value;
1022 	if (pre_log->bits.ldw.err) {
1023 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1024 		    " npi_rxdma_ring_perr_stat_get"
1025 		    " PRE ERR Bit set ..... clearing \n"));
1026 		clr.bits.ldw.err = 0;
1027 		clr_bits++;
1028 	}
1029 
1030 	if (pre_log->bits.ldw.merr) {
1031 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1032 		    " npi_rxdma_ring_perr_stat_get"
1033 		    " PRE MERR Bit set ..... clearing \n"));
1034 		clr.bits.ldw.merr = 0;
1035 		clr_bits++;
1036 	}
1037 
1038 	if (clr_bits) {
1039 		NXGE_REG_WR64(handle, pre_offset, clr.value);
1040 	}
1041 
1042 	clr_bits = 0;
1043 	clr.value = sha_log->value;
1044 	if (sha_log->bits.ldw.err) {
1045 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1046 		    " npi_rxdma_ring_perr_stat_get"
1047 		    " SHA ERR Bit set ..... clearing \n"));
1048 		clr.bits.ldw.err = 0;
1049 		clr_bits++;
1050 	}
1051 
1052 	if (sha_log->bits.ldw.merr) {
1053 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1054 		    " npi_rxdma_ring_perr_stat_get"
1055 		    " SHA MERR Bit set ..... clearing \n"));
1056 		clr.bits.ldw.merr = 0;
1057 		clr_bits++;
1058 	}
1059 
1060 	if (clr_bits) {
1061 		NXGE_REG_WR64(handle, sha_offset, clr.value);
1062 	}
1063 
1064 	return (NPI_SUCCESS);
1065 }
1066 
1067 /*
1068  * npi_rxdma_ring_perr_stat_clear
1069  * Clear RDC Memory Parity Error counter overflow bits
1070  *
1071  * Inputs:
1072  *      handle:	opaque handle interpreted by the underlying OS
1073  * Return:
1074  * NPI_SUCCESS
1075  *
1076  */
1077 npi_status_t
1078 npi_rxdma_ring_perr_stat_clear(npi_handle_t handle)
1079 {
1080 	uint64_t pre_offset, sha_offset;
1081 	rdmc_par_err_log_t clr;
1082 	int clr_bits = 0;
1083 	pre_offset = RDMC_PRE_PAR_ERR_REG;
1084 	sha_offset = RDMC_SHA_PAR_ERR_REG;
1085 
1086 	NXGE_REG_RD64(handle, pre_offset, &clr.value);
1087 
1088 	if (clr.bits.ldw.err) {
1089 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1090 		    " npi_rxdma_ring_perr_stat_get"
1091 		    " PRE ERR Bit set ..... clearing \n"));
1092 		clr.bits.ldw.err = 0;
1093 		clr_bits++;
1094 	}
1095 
1096 	if (clr.bits.ldw.merr) {
1097 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1098 		    " npi_rxdma_ring_perr_stat_get"
1099 		    " PRE MERR Bit set ..... clearing \n"));
1100 		clr.bits.ldw.merr = 0;
1101 		clr_bits++;
1102 	}
1103 
1104 	if (clr_bits) {
1105 		NXGE_REG_WR64(handle, pre_offset, clr.value);
1106 	}
1107 
1108 	clr_bits = 0;
1109 	NXGE_REG_RD64(handle, sha_offset, &clr.value);
1110 	if (clr.bits.ldw.err) {
1111 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1112 		    " npi_rxdma_ring_perr_stat_get"
1113 		    " SHA ERR Bit set ..... clearing \n"));
1114 		clr.bits.ldw.err = 0;
1115 		clr_bits++;
1116 	}
1117 
1118 	if (clr.bits.ldw.merr) {
1119 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1120 		    " npi_rxdma_ring_perr_stat_get"
1121 		    " SHA MERR Bit set ..... clearing \n"));
1122 		clr.bits.ldw.merr = 0;
1123 		clr_bits++;
1124 	}
1125 
1126 	if (clr_bits) {
1127 		NXGE_REG_WR64(handle, sha_offset, clr.value);
1128 	}
1129 
1130 	return (NPI_SUCCESS);
1131 }
1132 
1133 /*
1134  * Access the RDMC Memory: used for debugging
1135  */
1136 npi_status_t
1137 npi_rxdma_rdmc_memory_io(npi_handle_t handle,
1138 			    rdmc_mem_access_t *data, uint8_t op)
1139 {
1140 	uint64_t d0_offset, d1_offset, d2_offset, d3_offset, d4_offset;
1141 	uint64_t addr_offset;
1142 	rdmc_mem_addr_t addr;
1143 	rdmc_mem_data_t d0, d1, d2, d3, d4;
1144 	d0.value = 0;
1145 	d1.value = 0;
1146 	d2.value = 0;
1147 	d3.value = 0;
1148 	d4.value = 0;
1149 	addr.value = 0;
1150 
1151 
1152 	if ((data->location != RDMC_MEM_ADDR_PREFETCH) &&
1153 	    (data->location != RDMC_MEM_ADDR_SHADOW)) {
1154 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1155 		    " npi_rxdma_rdmc_memory_io"
1156 		    " Illegal memory Type %x \n",
1157 		    data->location));
1158 		return (NPI_RXDMA_OPCODE_INVALID(0));
1159 	}
1160 
1161 	addr_offset = RDMC_MEM_ADDR_REG;
1162 	addr.bits.ldw.addr = data->addr;
1163 	addr.bits.ldw.pre_shad = data->location;
1164 
1165 	d0_offset = RDMC_MEM_DATA0_REG;
1166 	d1_offset = RDMC_MEM_DATA1_REG;
1167 	d2_offset = RDMC_MEM_DATA2_REG;
1168 	d3_offset = RDMC_MEM_DATA3_REG;
1169 	d4_offset = RDMC_MEM_DATA4_REG;
1170 
1171 
1172 	if (op == RDMC_MEM_WRITE) {
1173 		d0.bits.ldw.data = data->data[0];
1174 		d1.bits.ldw.data = data->data[1];
1175 		d2.bits.ldw.data = data->data[2];
1176 		d3.bits.ldw.data = data->data[3];
1177 		d4.bits.ldw.data = data->data[4];
1178 		NXGE_REG_WR64(handle, addr_offset, addr.value);
1179 		NXGE_REG_WR64(handle, d0_offset, d0.value);
1180 		NXGE_REG_WR64(handle, d1_offset, d1.value);
1181 		NXGE_REG_WR64(handle, d2_offset, d2.value);
1182 		NXGE_REG_WR64(handle, d3_offset, d3.value);
1183 		NXGE_REG_WR64(handle, d4_offset, d4.value);
1184 	}
1185 
1186 	if (op == RDMC_MEM_READ) {
1187 		NXGE_REG_WR64(handle, addr_offset, addr.value);
1188 		NXGE_REG_RD64(handle, d4_offset, &d4.value);
1189 		NXGE_REG_RD64(handle, d3_offset, &d3.value);
1190 		NXGE_REG_RD64(handle, d2_offset, &d2.value);
1191 		NXGE_REG_RD64(handle, d1_offset, &d1.value);
1192 		NXGE_REG_RD64(handle, d0_offset, &d0.value);
1193 
1194 		data->data[0] = d0.bits.ldw.data;
1195 		data->data[1] = d1.bits.ldw.data;
1196 		data->data[2] = d2.bits.ldw.data;
1197 		data->data[3] = d3.bits.ldw.data;
1198 		data->data[4] = d4.bits.ldw.data;
1199 	} else {
1200 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1201 		    " npi_rxdma_rdmc_memory_io"
1202 		    " Illegal opcode %x \n",
1203 		    op));
1204 		return (NPI_RXDMA_OPCODE_INVALID(0));
1205 
1206 	}
1207 
1208 	return (NPI_SUCCESS);
1209 }
1210 
1211 /*
1212  * system wide conf functions
1213  */
1214 npi_status_t
1215 npi_rxdma_cfg_clock_div_set(npi_handle_t handle, uint16_t count)
1216 {
1217 	uint64_t offset;
1218 	rx_dma_ck_div_t clk_div;
1219 
1220 	offset = RX_DMA_CK_DIV_REG;
1221 
1222 	clk_div.value = 0;
1223 	clk_div.bits.ldw.cnt = count;
1224 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1225 	    " npi_rxdma_cfg_clock_div_set: add 0x%llx "
1226 	    "handle 0x%llx value 0x%llx",
1227 	    handle.regp, handle.regh, clk_div.value));
1228 
1229 	NXGE_REG_WR64(handle, offset, clk_div.value);
1230 
1231 	return (NPI_SUCCESS);
1232 }
1233 
1234 npi_status_t
1235 npi_rxdma_cfg_red_rand_init(npi_handle_t handle, uint16_t init_value)
1236 {
1237 	uint64_t offset;
1238 	red_ran_init_t rand_reg;
1239 
1240 	offset = RED_RAN_INIT_REG;
1241 
1242 	rand_reg.value = 0;
1243 	rand_reg.bits.ldw.init = init_value;
1244 	rand_reg.bits.ldw.enable = 1;
1245 	NXGE_REG_WR64(handle, offset, rand_reg.value);
1246 
1247 	return (NPI_SUCCESS);
1248 
1249 }
1250 
1251 npi_status_t
1252 npi_rxdma_cfg_red_rand_disable(npi_handle_t handle)
1253 {
1254 	uint64_t offset;
1255 	red_ran_init_t rand_reg;
1256 
1257 	offset = RED_RAN_INIT_REG;
1258 
1259 	NXGE_REG_RD64(handle, offset, &rand_reg.value);
1260 	rand_reg.bits.ldw.enable = 0;
1261 	NXGE_REG_WR64(handle, offset, rand_reg.value);
1262 
1263 	return (NPI_SUCCESS);
1264 
1265 }
1266 
1267 npi_status_t
1268 npi_rxdma_cfg_32bitmode_enable(npi_handle_t handle)
1269 {
1270 	uint64_t offset;
1271 	rx_addr_md_t md_reg;
1272 	offset = RX_ADDR_MD_REG;
1273 	md_reg.value = 0;
1274 	md_reg.bits.ldw.mode32 = 1;
1275 
1276 	NXGE_REG_WR64(handle, offset, md_reg.value);
1277 	return (NPI_SUCCESS);
1278 
1279 }
1280 
1281 npi_status_t
1282 npi_rxdma_cfg_32bitmode_disable(npi_handle_t handle)
1283 {
1284 	uint64_t offset;
1285 	rx_addr_md_t md_reg;
1286 	offset = RX_ADDR_MD_REG;
1287 	md_reg.value = 0;
1288 
1289 	NXGE_REG_WR64(handle, offset, md_reg.value);
1290 	return (NPI_SUCCESS);
1291 
1292 }
1293 
1294 npi_status_t
1295 npi_rxdma_cfg_ram_access_enable(npi_handle_t handle)
1296 {
1297 	uint64_t offset;
1298 	rx_addr_md_t md_reg;
1299 	offset = RX_ADDR_MD_REG;
1300 	NXGE_REG_RD64(handle, offset, &md_reg.value);
1301 	md_reg.bits.ldw.ram_acc = 1;
1302 	NXGE_REG_WR64(handle, offset, md_reg.value);
1303 	return (NPI_SUCCESS);
1304 
1305 }
1306 
1307 npi_status_t
1308 npi_rxdma_cfg_ram_access_disable(npi_handle_t handle)
1309 {
1310 	uint64_t offset;
1311 	rx_addr_md_t md_reg;
1312 	offset = RX_ADDR_MD_REG;
1313 	NXGE_REG_RD64(handle, offset, &md_reg.value);
1314 	md_reg.bits.ldw.ram_acc = 0;
1315 	NXGE_REG_WR64(handle, offset, md_reg.value);
1316 	return (NPI_SUCCESS);
1317 
1318 }
1319 
1320 npi_status_t
1321 npi_rxdma_cfg_port_ddr_weight(npi_handle_t handle,
1322 				    uint8_t portnm, uint32_t weight)
1323 {
1324 
1325 	pt_drr_wt_t wt_reg;
1326 	uint64_t offset;
1327 
1328 	ASSERT(RXDMA_PORT_VALID(portnm));
1329 	if (!RXDMA_PORT_VALID(portnm)) {
1330 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1331 		    " rxdma_cfg_port_ddr_weight"
1332 		    " Illegal Port Number %d \n",
1333 		    portnm));
1334 		return (NPI_RXDMA_PORT_INVALID);
1335 	}
1336 
1337 	offset = PT_DRR_WT_REG(portnm);
1338 	wt_reg.value = 0;
1339 	wt_reg.bits.ldw.wt = weight;
1340 	NXGE_REG_WR64(handle, offset, wt_reg.value);
1341 	return (NPI_SUCCESS);
1342 }
1343 
1344 npi_status_t
1345 npi_rxdma_port_usage_get(npi_handle_t handle,
1346 				    uint8_t portnm, uint32_t *blocks)
1347 {
1348 
1349 	pt_use_t use_reg;
1350 	uint64_t offset;
1351 
1352 	ASSERT(RXDMA_PORT_VALID(portnm));
1353 	if (!RXDMA_PORT_VALID(portnm)) {
1354 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1355 		    " rxdma_port_usage_get"
1356 		    " Illegal Port Number %d \n",
1357 		    portnm));
1358 		return (NPI_RXDMA_PORT_INVALID);
1359 	}
1360 
1361 	offset = PT_USE_REG(portnm);
1362 	NXGE_REG_RD64(handle, offset, &use_reg.value);
1363 	*blocks = use_reg.bits.ldw.cnt;
1364 	return (NPI_SUCCESS);
1365 
1366 }
1367 
1368 npi_status_t
1369 npi_rxdma_cfg_wred_param(npi_handle_t handle, uint8_t rdc,
1370 				    rdc_red_para_t *wred_params)
1371 {
1372 	rdc_red_para_t wred_reg;
1373 	uint64_t offset;
1374 
1375 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1376 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1377 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1378 		    " rxdma_cfg_wred_param"
1379 		    " Illegal RDC Number %d \n",
1380 		    rdc));
1381 		return (NPI_RXDMA_RDC_INVALID);
1382 	}
1383 
1384 	/*
1385 	 * need to update RDC_RED_PARA_REG as well as bit defs in
1386 	 * the hw header file
1387 	 */
1388 	offset = RDC_RED_RDC_PARA_REG(rdc);
1389 
1390 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1391 	    " npi_rxdma_cfg_wred_param: "
1392 	    "set RED_PARA: passed value 0x%llx "
1393 	    "win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
1394 	    wred_params->value,
1395 	    wred_params->bits.ldw.win,
1396 	    wred_params->bits.ldw.thre,
1397 	    wred_params->bits.ldw.win_syn,
1398 	    wred_params->bits.ldw.thre_sync));
1399 
1400 	wred_reg.value = 0;
1401 	wred_reg.bits.ldw.win = wred_params->bits.ldw.win;
1402 	wred_reg.bits.ldw.thre = wred_params->bits.ldw.thre;
1403 	wred_reg.bits.ldw.win_syn = wred_params->bits.ldw.win_syn;
1404 	wred_reg.bits.ldw.thre_sync = wred_params->bits.ldw.thre_sync;
1405 	NXGE_REG_WR64(handle, offset, wred_reg.value);
1406 
1407 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1408 	    "set RED_PARA: value 0x%llx "
1409 	    "win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
1410 	    wred_reg.value,
1411 	    wred_reg.bits.ldw.win,
1412 	    wred_reg.bits.ldw.thre,
1413 	    wred_reg.bits.ldw.win_syn,
1414 	    wred_reg.bits.ldw.thre_sync));
1415 
1416 	return (NPI_SUCCESS);
1417 }
1418 
1419 /*
1420  * npi_rxdma_rdc_table_config()
1421  * Configure/populate the RDC table
1422  *
1423  * Inputs:
1424  *	handle:	register handle interpreted by the underlying OS
1425  *	table:	RDC Group Number
1426  *	map:	A bitmap of the RDCs to populate with.
1427  *	count:	A count of the RDCs expressed in <map>.
1428  *
1429  * Notes:
1430  *	This function assumes that we are not using the TCAM, but are
1431  *	hashing all fields of the incoming ethernet packet!
1432  *
1433  * Return:
1434  *	NPI_SUCCESS
1435  *	NPI_RXDMA_TABLE_INVALID
1436  *
1437  */
1438 npi_status_t
1439 npi_rxdma_rdc_table_config(
1440 	npi_handle_t handle,
1441 	uint8_t table,
1442 	dc_map_t rdc_map,
1443 	int count)
1444 {
1445 	int8_t set[NXGE_MAX_RDCS];
1446 	int i, cursor;
1447 
1448 	rdc_tbl_t rdc_tbl;
1449 	uint64_t offset;
1450 
1451 	ASSERT(RXDMA_TABLE_VALID(table));
1452 	if (!RXDMA_TABLE_VALID(table)) {
1453 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1454 		    " npi_rxdma_cfg_rdc_table"
1455 		    " Illegal RDC Table Number %d \n",
1456 		    table));
1457 		return (NPI_RXDMA_TABLE_INVALID);
1458 	}
1459 
1460 	if (count == 0)		/* This shouldn't happen */
1461 		return (NPI_SUCCESS);
1462 
1463 	for (i = 0, cursor = 0; i < NXGE_MAX_RDCS; i++) {
1464 		if ((1 << i) & rdc_map) {
1465 			set[cursor++] = (int8_t)i;
1466 			if (cursor == count)
1467 				break;
1468 		}
1469 	}
1470 
1471 	rdc_tbl.value = 0;
1472 	offset = REG_RDC_TABLE_OFFSET(table);
1473 
1474 	/* Now write ( NXGE_MAX_RDCS / count ) sets of RDC numbers. */
1475 	for (i = 0, cursor = 0; i < NXGE_MAX_RDCS; i++) {
1476 		rdc_tbl.bits.ldw.rdc = set[cursor++];
1477 		NXGE_REG_WR64(handle, offset, rdc_tbl.value);
1478 		offset += sizeof (rdc_tbl.value);
1479 		if (cursor == count)
1480 			cursor = 0;
1481 	}
1482 
1483 	/*
1484 	 * Here is what the resulting table looks like with:
1485 	 *
1486 	 *  0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f
1487 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1488 	 * |v |w |x |y |z |v |w |x |y |z |v |w |x |y |z |v | 5 RDCs
1489 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1490 	 * |w |x |y |z |w |x |y |z |w |x |y |z |w |x |y |z | 4 RDCs
1491 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1492 	 * |x |y |z |x |y |z |x |y |z |x |y |z |x |y |z |x | 3 RDCs
1493 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1494 	 * |x |y |x |y |x |y |x |y |x |y |x |y |x |y |x |y | 2 RDCs
1495 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1496 	 * |x |x |x |x |x |x |x |x |x |x |x |x |x |x |x |x | 1 RDC
1497 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1498 	 */
1499 
1500 	return (NPI_SUCCESS);
1501 }
1502 
1503 npi_status_t
1504 npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t handle,
1505 			    uint8_t table, uint8_t rdc)
1506 {
1507 	uint64_t offset;
1508 	rdc_tbl_t tbl_reg;
1509 	tbl_reg.value = 0;
1510 
1511 	ASSERT(RXDMA_TABLE_VALID(table));
1512 	if (!RXDMA_TABLE_VALID(table)) {
1513 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1514 		    " npi_rxdma_cfg_rdc_table"
1515 		    " Illegal RDC table Number %d \n",
1516 		    rdc));
1517 		return (NPI_RXDMA_TABLE_INVALID);
1518 	}
1519 
1520 	offset = REG_RDC_TABLE_OFFSET(table);
1521 	tbl_reg.bits.ldw.rdc = rdc;
1522 	NXGE_REG_WR64(handle, offset, tbl_reg.value);
1523 	return (NPI_SUCCESS);
1524 
1525 }
1526 
1527 npi_status_t
1528 npi_rxdma_dump_rdc_table(npi_handle_t handle,
1529 			    uint8_t table)
1530 {
1531 	uint64_t offset;
1532 	int tbl_offset;
1533 	uint64_t value;
1534 
1535 	ASSERT(RXDMA_TABLE_VALID(table));
1536 	if (!RXDMA_TABLE_VALID(table)) {
1537 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1538 		    " npi_rxdma_dump_rdc_table"
1539 		    " Illegal RDC Rable Number %d \n",
1540 		    table));
1541 		return (NPI_RXDMA_TABLE_INVALID);
1542 	}
1543 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1544 	    "\n Register Dump for RDC Table %d \n",
1545 	    table));
1546 	offset = REG_RDC_TABLE_OFFSET(table);
1547 	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
1548 		NXGE_REG_RD64(handle, offset, &value);
1549 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1550 		    " 0x%08llx 0x%08llx \n",
1551 		    offset, value));
1552 		offset += 8;
1553 	}
1554 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1555 	    "\n Register Dump for RDC Table %d done\n",
1556 	    table));
1557 	return (NPI_SUCCESS);
1558 
1559 }
1560 
1561 npi_status_t
1562 npi_rxdma_rdc_rbr_stat_get(npi_handle_t handle, uint8_t rdc,
1563 			    rbr_stat_t *rbr_stat)
1564 {
1565 
1566 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1567 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1568 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1569 		    " rxdma_rdc_rbr_stat_get"
1570 		    " Illegal RDC Number %d \n",
1571 		    rdc));
1572 		return (NPI_RXDMA_RDC_INVALID);
1573 	}
1574 
1575 	RXDMA_REG_READ64(handle, RBR_STAT_REG, rdc, &rbr_stat->value);
1576 	return (NPI_SUCCESS);
1577 }
1578 
1579 /*
1580  * npi_rxdma_rdc_rbr_head_get
1581  * Gets the current rbr head pointer.
1582  *
1583  * Inputs:
1584  *      handle:	opaque handle interpreted by the underlying OS
1585  *	rdc:		RX DMA Channel number
1586  *	hdptr		ptr to write the rbr head value
1587  *
1588  * Return:
1589  * NPI_SUCCESS
1590  * NPI_RXDMA_RDC_INVALID
1591  */
1592 npi_status_t
1593 npi_rxdma_rdc_rbr_head_get(npi_handle_t handle,
1594 			    uint8_t rdc, addr44_t *hdptr)
1595 {
1596 	rbr_hdh_t hh_ptr;
1597 	rbr_hdl_t hl_ptr;
1598 
1599 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1600 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1601 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1602 		    " rxdma_rdc_rbr_head_get"
1603 		    " Illegal RDC Number %d \n",
1604 		    rdc));
1605 		return (NPI_RXDMA_RDC_INVALID);
1606 	}
1607 	hh_ptr.value = 0;
1608 	hl_ptr.value = 0;
1609 	RXDMA_REG_READ64(handle, RBR_HDH_REG, rdc, &hh_ptr.value);
1610 	RXDMA_REG_READ64(handle, RBR_HDL_REG, rdc, &hl_ptr.value);
1611 	hdptr->bits.ldw = hl_ptr.bits.ldw.head_l << 2;
1612 	hdptr->bits.hdw = hh_ptr.bits.ldw.head_h;
1613 	return (NPI_SUCCESS);
1614 
1615 }
1616 
1617 npi_status_t
1618 npi_rxdma_rdc_rcr_qlen_get(npi_handle_t handle, uint8_t rdc,
1619 			    uint16_t *rcr_qlen)
1620 {
1621 
1622 	rcrstat_a_t stats;
1623 
1624 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1625 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1626 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1627 		    " rxdma_rdc_rcr_qlen_get"
1628 		    " Illegal RDC Number %d \n",
1629 		    rdc));
1630 		return (NPI_RXDMA_RDC_INVALID);
1631 	}
1632 
1633 	RXDMA_REG_READ64(handle, RCRSTAT_A_REG, rdc, &stats.value);
1634 	*rcr_qlen =  stats.bits.ldw.qlen;
1635 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1636 	    " rxdma_rdc_rcr_qlen_get"
1637 	    " RDC %d qlen %x qlen %x\n",
1638 	    rdc, *rcr_qlen, stats.bits.ldw.qlen));
1639 	return (NPI_SUCCESS);
1640 }
1641 
1642 npi_status_t
1643 npi_rxdma_rdc_rcr_tail_get(npi_handle_t handle,
1644 			    uint8_t rdc, addr44_t *tail_addr)
1645 {
1646 
1647 	rcrstat_b_t th_ptr;
1648 	rcrstat_c_t tl_ptr;
1649 
1650 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1651 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1652 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1653 		    " rxdma_rdc_rcr_tail_get"
1654 		    " Illegal RDC Number %d \n",
1655 		    rdc));
1656 		return (NPI_RXDMA_RDC_INVALID);
1657 	}
1658 	th_ptr.value = 0;
1659 	tl_ptr.value = 0;
1660 	RXDMA_REG_READ64(handle, RCRSTAT_B_REG, rdc, &th_ptr.value);
1661 	RXDMA_REG_READ64(handle, RCRSTAT_C_REG, rdc, &tl_ptr.value);
1662 	tail_addr->bits.ldw = tl_ptr.bits.ldw.tlptr_l << 3;
1663 	tail_addr->bits.hdw = th_ptr.bits.ldw.tlptr_h;
1664 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1665 	    " rxdma_rdc_rcr_tail_get"
1666 	    " RDC %d rcr_tail %llx tl %x\n",
1667 	    rdc, tl_ptr.value,
1668 	    tl_ptr.bits.ldw.tlptr_l));
1669 
1670 	return (NPI_SUCCESS);
1671 
1672 
1673 }
1674 
1675 /*
1676  * npi_rxdma_rxctl_fifo_error_intr_set
1677  * Configure The RX ctrl fifo error interrupt generation
1678  *
1679  * Inputs:
1680  *      handle:	opaque handle interpreted by the underlying OS
1681  *	mask:	rx_ctl_dat_fifo_mask_t specifying the errors
1682  * valid fields in  rx_ctl_dat_fifo_mask_t structure are:
1683  * zcp_eop_err, ipp_eop_err, id_mismatch. If a field is set
1684  * to 1, we will enable interrupt generation for the
1685  * corresponding error condition. In the hardware, the bit(s)
1686  * have to be cleared to enable interrupt.
1687  *
1688  * Return:
1689  * NPI_SUCCESS
1690  *
1691  */
1692 npi_status_t
1693 npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t handle,
1694 				    rx_ctl_dat_fifo_mask_t *mask)
1695 {
1696 	uint64_t offset;
1697 	rx_ctl_dat_fifo_mask_t intr_mask;
1698 	offset = RX_CTL_DAT_FIFO_MASK_REG;
1699 	NXGE_REG_RD64(handle, offset, &intr_mask.value);
1700 
1701 	if (mask->bits.ldw.ipp_eop_err) {
1702 		intr_mask.bits.ldw.ipp_eop_err = 0;
1703 	}
1704 
1705 	if (mask->bits.ldw.zcp_eop_err) {
1706 		intr_mask.bits.ldw.zcp_eop_err = 0;
1707 	}
1708 
1709 	if (mask->bits.ldw.id_mismatch) {
1710 		intr_mask.bits.ldw.id_mismatch = 0;
1711 	}
1712 
1713 	NXGE_REG_WR64(handle, offset, intr_mask.value);
1714 	return (NPI_SUCCESS);
1715 }
1716 
1717 /*
1718  * npi_rxdma_rxctl_fifo_error_stat_get
1719  * Read The RX ctrl fifo error Status
1720  *
1721  * Inputs:
1722  *      handle:	opaque handle interpreted by the underlying OS
1723  *	stat:	rx_ctl_dat_fifo_stat_t to read the errors to
1724  * valid fields in  rx_ctl_dat_fifo_stat_t structure are:
1725  * zcp_eop_err, ipp_eop_err, id_mismatch.
1726  * Return:
1727  * NPI_SUCCESS
1728  *
1729  */
1730 npi_status_t
1731 npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t handle,
1732 			    rx_ctl_dat_fifo_stat_t *stat)
1733 {
1734 	uint64_t offset = RX_CTL_DAT_FIFO_STAT_REG;
1735 	NXGE_REG_RD64(handle, offset, &stat->value);
1736 	return (NPI_SUCCESS);
1737 }
1738 
1739 npi_status_t
1740 npi_rxdma_rdc_rcr_pktread_update(npi_handle_t handle, uint8_t channel,
1741 				    uint16_t pkts_read)
1742 {
1743 
1744 	rx_dma_ctl_stat_t	cs;
1745 	uint16_t min_read = 0;
1746 
1747 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1748 	if (!RXDMA_CHANNEL_VALID(channel)) {
1749 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1750 		    " npi_rxdma_rdc_rcr_pktread_update ",
1751 		    " channel %d", channel));
1752 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1753 	}
1754 
1755 	if ((pkts_read < min_read) && (pkts_read > 512)) {
1756 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1757 		    " npi_rxdma_rdc_rcr_pktread_update ",
1758 		    " pkts %d out of bound", pkts_read));
1759 		return (NPI_RXDMA_OPCODE_INVALID(pkts_read));
1760 	}
1761 
1762 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1763 	    &cs.value);
1764 	cs.bits.ldw.pktread = pkts_read;
1765 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1766 	    channel, cs.value);
1767 
1768 	return (NPI_SUCCESS);
1769 }
1770 
1771 npi_status_t
1772 npi_rxdma_rdc_rcr_bufread_update(npi_handle_t handle, uint8_t channel,
1773 					    uint16_t bufs_read)
1774 {
1775 
1776 	rx_dma_ctl_stat_t	cs;
1777 	uint16_t min_read = 0;
1778 
1779 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1780 	if (!RXDMA_CHANNEL_VALID(channel)) {
1781 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1782 		    " npi_rxdma_rdc_rcr_bufread_update ",
1783 		    " channel %d", channel));
1784 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1785 	}
1786 
1787 	if ((bufs_read < min_read) && (bufs_read > 512)) {
1788 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1789 		    " npi_rxdma_rdc_rcr_bufread_update ",
1790 		    " bufs read %d out of bound", bufs_read));
1791 		return (NPI_RXDMA_OPCODE_INVALID(bufs_read));
1792 	}
1793 
1794 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1795 	    &cs.value);
1796 	cs.bits.ldw.ptrread = bufs_read;
1797 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1798 	    channel, cs.value);
1799 
1800 	return (NPI_SUCCESS);
1801 }
1802 
1803 npi_status_t
1804 npi_rxdma_rdc_rcr_read_update(npi_handle_t handle, uint8_t channel,
1805 				    uint16_t pkts_read, uint16_t bufs_read)
1806 {
1807 
1808 	rx_dma_ctl_stat_t	cs;
1809 
1810 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1811 	if (!RXDMA_CHANNEL_VALID(channel)) {
1812 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1813 		    " npi_rxdma_rdc_rcr_read_update ",
1814 		    " channel %d", channel));
1815 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1816 	}
1817 
1818 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1819 	    " npi_rxdma_rdc_rcr_read_update "
1820 	    " bufs read %d pkt read %d",
1821 	    bufs_read, pkts_read));
1822 
1823 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1824 	    &cs.value);
1825 
1826 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1827 	    " npi_rxdma_rdc_rcr_read_update: "
1828 	    " value: 0x%llx bufs read %d pkt read %d",
1829 	    cs.value,
1830 	    cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
1831 
1832 	cs.bits.ldw.pktread = pkts_read;
1833 	cs.bits.ldw.ptrread = bufs_read;
1834 
1835 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1836 	    channel, cs.value);
1837 
1838 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1839 	    &cs.value);
1840 
1841 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1842 	    " npi_rxdma_rdc_rcr_read_update: read back after update "
1843 	    " value: 0x%llx bufs read %d pkt read %d",
1844 	    cs.value,
1845 	    cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
1846 
1847 	return (NPI_SUCCESS);
1848 }
1849 
1850 /*
1851  * npi_rxdma_channel_mex_set():
1852  *	This function is called to arm the DMA channel with
1853  *	mailbox updating capability. Software needs to rearm
1854  *	for each update by writing to the control and status register.
1855  *
1856  * Parameters:
1857  *	handle		- NPI handle (virtualization flag must be defined).
1858  *	channel		- logical RXDMA channel from 0 to 23.
1859  *			  (If virtualization flag is not set, then
1860  *			   logical channel is the same as the hardware
1861  *			   channel number).
1862  *
1863  * Return:
1864  *	NPI_SUCCESS		- If enable channel with mailbox update
1865  *				  is completed successfully.
1866  *
1867  *	Error:
1868  *	NPI error status code
1869  */
1870 npi_status_t
1871 npi_rxdma_channel_mex_set(npi_handle_t handle, uint8_t channel)
1872 {
1873 	return (npi_rxdma_channel_control(handle, RXDMA_MEX_SET, channel));
1874 }
1875 
1876 /*
1877  * npi_rxdma_channel_rcrto_clear():
1878  *	This function is called to reset RCRTO bit to 0.
1879  *
1880  * Parameters:
1881  *	handle		- NPI handle (virtualization flag must be defined).
1882  *	channel		- logical RXDMA channel from 0 to 23.
1883  *			  (If virtualization flag is not set, then
1884  *			   logical channel is the same as the hardware
1885  *			   channel number).
1886  * Return:
1887  *	NPI_SUCCESS
1888  *
1889  *	Error:
1890  *	NPI error status code
1891  */
1892 npi_status_t
1893 npi_rxdma_channel_rcrto_clear(npi_handle_t handle, uint8_t channel)
1894 {
1895 	return (npi_rxdma_channel_control(handle, RXDMA_RCRTO_CLEAR, channel));
1896 }
1897 
1898 /*
1899  * npi_rxdma_channel_pt_drop_pkt_clear():
1900  *	This function is called to clear the port drop packet bit (debug).
1901  *
1902  * Parameters:
1903  *	handle		- NPI handle (virtualization flag must be defined).
1904  *	channel		- logical RXDMA channel from 0 to 23.
1905  *			  (If virtualization flag is not set, then
1906  *			   logical channel is the same as the hardware
1907  *			   channel number).
1908  * Return:
1909  *	NPI_SUCCESS
1910  *
1911  *	Error:
1912  *	NPI error status code
1913  */
1914 npi_status_t
1915 npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t handle, uint8_t channel)
1916 {
1917 	return (npi_rxdma_channel_control(handle, RXDMA_PT_DROP_PKT_CLEAR,
1918 	    channel));
1919 }
1920 
1921 /*
1922  * npi_rxdma_channel_wred_drop_clear():
1923  *	This function is called to wred drop bit (debug only).
1924  *
1925  * Parameters:
1926  *	handle		- NPI handle (virtualization flag must be defined).
1927  *	channel		- logical RXDMA channel from 0 to 23.
1928  *			  (If virtualization flag is not set, then
1929  *			   logical channel is the same as the hardware
1930  *			   channel number).
1931  * Return:
1932  *	NPI_SUCCESS
1933  *
1934  *	Error:
1935  *	NPI error status code
1936  */
1937 npi_status_t
1938 npi_rxdma_channel_wred_dop_clear(npi_handle_t handle, uint8_t channel)
1939 {
1940 	return (npi_rxdma_channel_control(handle, RXDMA_WRED_DROP_CLEAR,
1941 	    channel));
1942 }
1943 
1944 /*
1945  * npi_rxdma_channel_rcr_shfull_clear():
1946  *	This function is called to clear RCR shadow full bit.
1947  *
1948  * Parameters:
1949  *	handle		- NPI handle (virtualization flag must be defined).
1950  *	channel		- logical RXDMA channel from 0 to 23.
1951  *			  (If virtualization flag is not set, then
1952  *			   logical channel is the same as the hardware
1953  *			   channel number).
1954  * Return:
1955  *	NPI_SUCCESS
1956  *
1957  *	Error:
1958  *	NPI error status code
1959  */
1960 npi_status_t
1961 npi_rxdma_channel_rcr_shfull_clear(npi_handle_t handle, uint8_t channel)
1962 {
1963 	return (npi_rxdma_channel_control(handle, RXDMA_RCR_SFULL_CLEAR,
1964 	    channel));
1965 }
1966 
1967 /*
1968  * npi_rxdma_channel_rcrfull_clear():
1969  *	This function is called to clear RCR full bit.
1970  *
1971  * Parameters:
1972  *	handle		- NPI handle (virtualization flag must be defined).
1973  *	channel		- logical RXDMA channel from 0 to 23.
1974  *			  (If virtualization flag is not set, then
1975  *			   logical channel is the same as the hardware
1976  *			   channel number).
1977  * Return:
1978  *	NPI_SUCCESS
1979  *
1980  *	Error:
1981  *	NPI error status code
1982  */
1983 npi_status_t
1984 npi_rxdma_channel_rcr_full_clear(npi_handle_t handle, uint8_t channel)
1985 {
1986 	return (npi_rxdma_channel_control(handle, RXDMA_RCR_FULL_CLEAR,
1987 	    channel));
1988 }
1989 
1990 npi_status_t
1991 npi_rxdma_channel_rbr_empty_clear(npi_handle_t handle, uint8_t channel)
1992 {
1993 	return (npi_rxdma_channel_control(handle,
1994 	    RXDMA_RBR_EMPTY_CLEAR, channel));
1995 }
1996 
1997 npi_status_t
1998 npi_rxdma_channel_cs_clear_all(npi_handle_t handle, uint8_t channel)
1999 {
2000 	return (npi_rxdma_channel_control(handle, RXDMA_CS_CLEAR_ALL, channel));
2001 }
2002 
2003 /*
2004  * npi_rxdma_channel_control():
2005  *	This function is called to control a receive DMA channel
2006  *	for arming the channel with mailbox updates, resetting
2007  *	various event status bits (control and status register).
2008  *
2009  * Parameters:
2010  *	handle		- NPI handle (virtualization flag must be defined).
2011  *	control		- NPI defined control type supported:
2012  *				- RXDMA_MEX_SET
2013  * 				- RXDMA_RCRTO_CLEAR
2014  *				- RXDMA_PT_DROP_PKT_CLEAR
2015  *				- RXDMA_WRED_DROP_CLEAR
2016  *				- RXDMA_RCR_SFULL_CLEAR
2017  *				- RXDMA_RCR_FULL_CLEAR
2018  *				- RXDMA_RBR_PRE_EMPTY_CLEAR
2019  *				- RXDMA_RBR_EMPTY_CLEAR
2020  *	channel		- logical RXDMA channel from 0 to 23.
2021  *			  (If virtualization flag is not set, then
2022  *			   logical channel is the same as the hardware.
2023  * Return:
2024  *	NPI_SUCCESS
2025  *
2026  *	Error:
2027  *	NPI error status code
2028  */
2029 npi_status_t
2030 npi_rxdma_channel_control(npi_handle_t handle, rxdma_cs_cntl_t control,
2031 			uint8_t channel)
2032 {
2033 
2034 	rx_dma_ctl_stat_t	cs;
2035 
2036 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2037 	if (!RXDMA_CHANNEL_VALID(channel)) {
2038 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2039 		    " npi_rxdma_channel_control",
2040 		    " channel", channel));
2041 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2042 	}
2043 
2044 	switch (control) {
2045 	case RXDMA_MEX_SET:
2046 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2047 		    &cs.value);
2048 		cs.bits.hdw.mex = 1;
2049 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2050 		    channel, cs.value);
2051 		break;
2052 
2053 	case RXDMA_RCRTO_CLEAR:
2054 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2055 		    &cs.value);
2056 		cs.bits.hdw.rcrto = 0;
2057 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2058 		    cs.value);
2059 		break;
2060 
2061 	case RXDMA_PT_DROP_PKT_CLEAR:
2062 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2063 		    &cs.value);
2064 		cs.bits.hdw.port_drop_pkt = 0;
2065 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2066 		    cs.value);
2067 		break;
2068 
2069 	case RXDMA_WRED_DROP_CLEAR:
2070 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2071 		    &cs.value);
2072 		cs.bits.hdw.wred_drop = 0;
2073 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2074 		    cs.value);
2075 		break;
2076 
2077 	case RXDMA_RCR_SFULL_CLEAR:
2078 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2079 		    &cs.value);
2080 		cs.bits.hdw.rcr_shadow_full = 0;
2081 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2082 		    cs.value);
2083 		break;
2084 
2085 	case RXDMA_RCR_FULL_CLEAR:
2086 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2087 		    &cs.value);
2088 		cs.bits.hdw.rcrfull = 0;
2089 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2090 		    cs.value);
2091 		break;
2092 
2093 	case RXDMA_RBR_PRE_EMPTY_CLEAR:
2094 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2095 		    &cs.value);
2096 		cs.bits.hdw.rbr_pre_empty = 0;
2097 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2098 		    cs.value);
2099 		break;
2100 
2101 	case RXDMA_RBR_EMPTY_CLEAR:
2102 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2103 		    &cs.value);
2104 		cs.bits.hdw.rbr_empty = 1;
2105 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2106 		    cs.value);
2107 		break;
2108 
2109 	case RXDMA_CS_CLEAR_ALL:
2110 		cs.value = 0;
2111 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2112 		    cs.value);
2113 		break;
2114 
2115 	default:
2116 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2117 		    "npi_rxdma_channel_control",
2118 		    "control", control));
2119 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2120 	}
2121 
2122 	return (NPI_SUCCESS);
2123 }
2124 
2125 /*
2126  * npi_rxdma_control_status():
2127  *	This function is called to operate on the control
2128  *	and status register.
2129  *
2130  * Parameters:
2131  *	handle		- NPI handle
2132  *	op_mode		- OP_GET: get hardware control and status
2133  *			  OP_SET: set hardware control and status
2134  *			  OP_UPDATE: update hardware control and status.
2135  *			  OP_CLEAR: clear control and status register to 0s.
2136  *	channel		- hardware RXDMA channel from 0 to 23.
2137  *	cs_p		- pointer to hardware defined control and status
2138  *			  structure.
2139  * Return:
2140  *	NPI_SUCCESS
2141  *
2142  *	Error:
2143  *	NPI error status code
2144  */
2145 npi_status_t
2146 npi_rxdma_control_status(npi_handle_t handle, io_op_t op_mode,
2147 			uint8_t channel, p_rx_dma_ctl_stat_t cs_p)
2148 {
2149 	int			status = NPI_SUCCESS;
2150 	rx_dma_ctl_stat_t	cs;
2151 
2152 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2153 	if (!RXDMA_CHANNEL_VALID(channel)) {
2154 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2155 		    "npi_rxdma_control_status",
2156 		    "channel", channel));
2157 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2158 	}
2159 
2160 	switch (op_mode) {
2161 	case OP_GET:
2162 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2163 		    &cs_p->value);
2164 		break;
2165 
2166 	case OP_SET:
2167 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2168 		    cs_p->value);
2169 		break;
2170 
2171 	case OP_UPDATE:
2172 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2173 		    &cs.value);
2174 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2175 		    cs_p->value | cs.value);
2176 		break;
2177 
2178 	default:
2179 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2180 		    "npi_rxdma_control_status",
2181 		    "control", op_mode));
2182 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2183 	}
2184 
2185 	return (status);
2186 }
2187 
2188 /*
2189  * npi_rxdma_event_mask():
2190  *	This function is called to operate on the event mask
2191  *	register which is used for generating interrupts.
2192  *
2193  * Parameters:
2194  *	handle		- NPI handle
2195  *	op_mode		- OP_GET: get hardware event mask
2196  *			  OP_SET: set hardware interrupt event masks
2197  *			  OP_CLEAR: clear control and status register to 0s.
2198  *	channel		- hardware RXDMA channel from 0 to 23.
2199  *	mask_p		- pointer to hardware defined event mask
2200  *			  structure.
2201  * Return:
2202  *	NPI_SUCCESS		- If set is complete successfully.
2203  *
2204  *	Error:
2205  *	NPI error status code
2206  */
2207 npi_status_t
2208 npi_rxdma_event_mask(npi_handle_t handle, io_op_t op_mode,
2209 		uint8_t channel, p_rx_dma_ent_msk_t mask_p)
2210 {
2211 	int			status = NPI_SUCCESS;
2212 	rx_dma_ent_msk_t	mask;
2213 
2214 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2215 	if (!RXDMA_CHANNEL_VALID(channel)) {
2216 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2217 		    "npi_rxdma_event_mask",
2218 		    "channel", channel));
2219 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2220 	}
2221 
2222 	switch (op_mode) {
2223 	case OP_GET:
2224 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2225 		    &mask_p->value);
2226 		break;
2227 
2228 	case OP_SET:
2229 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2230 		    mask_p->value);
2231 		break;
2232 
2233 	case OP_UPDATE:
2234 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2235 		    &mask.value);
2236 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2237 		    mask_p->value | mask.value);
2238 		break;
2239 
2240 	default:
2241 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2242 		    "npi_rxdma_event_mask",
2243 		    "eventmask", op_mode));
2244 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2245 	}
2246 
2247 	return (status);
2248 }
2249 
2250 /*
2251  * npi_rxdma_event_mask_config():
2252  *	This function is called to operate on the event mask
2253  *	register which is used for generating interrupts
2254  *	and status register.
2255  *
2256  * Parameters:
2257  *	handle		- NPI handle
2258  *	op_mode		- OP_GET: get hardware event mask
2259  *			  OP_SET: set hardware interrupt event masks
2260  *			  OP_CLEAR: clear control and status register to 0s.
2261  *	channel		- hardware RXDMA channel from 0 to 23.
2262  *	mask_cfgp		- pointer to NPI defined event mask
2263  *			  enum data type.
2264  * Return:
2265  *	NPI_SUCCESS		- If set is complete successfully.
2266  *
2267  *	Error:
2268  *	NPI error status code
2269  */
2270 npi_status_t
2271 npi_rxdma_event_mask_config(npi_handle_t handle, io_op_t op_mode,
2272 		uint8_t channel, rxdma_ent_msk_cfg_t *mask_cfgp)
2273 {
2274 	int		status = NPI_SUCCESS;
2275 	uint64_t	configuration = *mask_cfgp;
2276 	uint64_t	value;
2277 
2278 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2279 	if (!RXDMA_CHANNEL_VALID(channel)) {
2280 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2281 		    "npi_rxdma_event_mask_config",
2282 		    "channel", channel));
2283 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2284 	}
2285 
2286 	switch (op_mode) {
2287 	case OP_GET:
2288 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2289 		    (uint64_t *)mask_cfgp);
2290 		break;
2291 
2292 	case OP_SET:
2293 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2294 		    configuration);
2295 		break;
2296 
2297 	case OP_UPDATE:
2298 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel, &value);
2299 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2300 		    configuration | value);
2301 		break;
2302 
2303 	case OP_CLEAR:
2304 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2305 		    CFG_RXDMA_MASK_ALL);
2306 		break;
2307 	default:
2308 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2309 		    "npi_rxdma_event_mask_config",
2310 		    "eventmask", op_mode));
2311 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2312 	}
2313 
2314 	return (status);
2315 }
2316