xref: /titanic_50/usr/src/uts/common/io/nxge/npi/npi_rxdma.c (revision 4df55fde49134f9735f84011f23a767c75e393c7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <npi_rxdma.h>
27 #include <npi_rx_rd64.h>
28 #include <npi_rx_wr64.h>
29 #include <nxge_common.h>
30 
31 #define	 RXDMA_RESET_TRY_COUNT	4
32 #define	 RXDMA_RESET_DELAY	5
33 
34 #define	 RXDMA_OP_DISABLE	0
35 #define	 RXDMA_OP_ENABLE	1
36 #define	 RXDMA_OP_RESET	2
37 
38 #define	 RCR_TIMEOUT_ENABLE	1
39 #define	 RCR_TIMEOUT_DISABLE	2
40 #define	 RCR_THRESHOLD	4
41 
42 /* assume weight is in byte frames unit */
43 #define	WEIGHT_FACTOR 3/2
44 
45 uint64_t rdc_dmc_offset[] = {
46 	RXDMA_CFIG1_REG, RXDMA_CFIG2_REG, RBR_CFIG_A_REG, RBR_CFIG_B_REG,
47 	RBR_KICK_REG, RBR_STAT_REG, RBR_HDH_REG, RBR_HDL_REG,
48 	RCRCFIG_A_REG, RCRCFIG_B_REG, RCRSTAT_A_REG, RCRSTAT_B_REG,
49 	RCRSTAT_C_REG, RX_DMA_ENT_MSK_REG, RX_DMA_CTL_STAT_REG, RCR_FLSH_REG,
50 	RXMISC_DISCARD_REG
51 };
52 
53 const char *rdc_dmc_name[] = {
54 	"RXDMA_CFIG1", "RXDMA_CFIG2", "RBR_CFIG_A", "RBR_CFIG_B",
55 	"RBR_KICK", "RBR_STAT", "RBR_HDH", "RBR_HDL",
56 	"RCRCFIG_A", "RCRCFIG_B", "RCRSTAT_A", "RCRSTAT_B",
57 	"RCRSTAT_C", "RX_DMA_ENT_MSK", "RX_DMA_CTL_STAT", "RCR_FLSH",
58 	"RXMISC_DISCARD"
59 };
60 
61 uint64_t rdc_fzc_offset [] = {
62 	RX_LOG_PAGE_VLD_REG, RX_LOG_PAGE_MASK1_REG, RX_LOG_PAGE_VAL1_REG,
63 	RX_LOG_PAGE_MASK2_REG, RX_LOG_PAGE_VAL2_REG, RX_LOG_PAGE_RELO1_REG,
64 	RX_LOG_PAGE_RELO2_REG, RX_LOG_PAGE_HDL_REG, RDC_RED_PARA_REG,
65 	RED_DIS_CNT_REG
66 };
67 
68 
69 const char *rdc_fzc_name [] = {
70 	"RX_LOG_PAGE_VLD", "RX_LOG_PAGE_MASK1", "RX_LOG_PAGE_VAL1",
71 	"RX_LOG_PAGE_MASK2", "RX_LOG_PAGE_VAL2", "RX_LOG_PAGE_RELO1",
72 	"RX_LOG_PAGE_RELO2", "RX_LOG_PAGE_HDL", "RDC_RED_PARA", "RED_DIS_CNT"
73 };
74 
75 
76 /*
77  * Dump the MEM_ADD register first so all the data registers
78  * will have valid data buffer pointers.
79  */
80 uint64_t rx_fzc_offset[] = {
81 	RX_DMA_CK_DIV_REG, DEF_PT0_RDC_REG, DEF_PT1_RDC_REG, DEF_PT2_RDC_REG,
82 	DEF_PT3_RDC_REG, RX_ADDR_MD_REG, PT_DRR_WT0_REG, PT_DRR_WT1_REG,
83 	PT_DRR_WT2_REG, PT_DRR_WT3_REG, PT_USE0_REG, PT_USE1_REG,
84 	PT_USE2_REG, PT_USE3_REG, RED_RAN_INIT_REG, RX_ADDR_MD_REG,
85 	RDMC_PRE_PAR_ERR_REG, RDMC_SHA_PAR_ERR_REG,
86 	RDMC_MEM_DATA4_REG, RDMC_MEM_DATA3_REG, RDMC_MEM_DATA2_REG,
87 	RDMC_MEM_DATA1_REG, RDMC_MEM_DATA0_REG,
88 	RDMC_MEM_ADDR_REG,
89 	RX_CTL_DAT_FIFO_STAT_REG, RX_CTL_DAT_FIFO_MASK_REG,
90 	RX_CTL_DAT_FIFO_STAT_DBG_REG,
91 	RDMC_TRAINING_VECTOR_REG,
92 };
93 
94 
95 const char *rx_fzc_name[] = {
96 	"RX_DMA_CK_DIV", "DEF_PT0_RDC", "DEF_PT1_RDC", "DEF_PT2_RDC",
97 	"DEF_PT3_RDC", "RX_ADDR_MD", "PT_DRR_WT0", "PT_DRR_WT1",
98 	"PT_DRR_WT2", "PT_DRR_WT3", "PT_USE0", "PT_USE1",
99 	"PT_USE2", "PT_USE3", "RED_RAN_INIT", "RX_ADDR_MD",
100 	"RDMC_PRE_PAR_ERR", "RDMC_SHA_PAR_ERR",
101 	"RDMC_MEM_DATA4", "RDMC_MEM_DATA3", "RDMC_MEM_DATA2",
102 	"RDMC_MEM_DATA1", "RDMC_MEM_DATA0",
103 	"RDMC_MEM_ADDR",
104 	"RX_CTL_DAT_FIFO_STAT", "RX_CTL_DAT_FIFO_MASK",
105 	"RDMC_TRAINING_VECTOR_REG",
106 	"RX_CTL_DAT_FIFO_STAT_DBG_REG"
107 };
108 
109 
110 npi_status_t
111 npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op);
112 npi_status_t
113 npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op,
114 				uint16_t param);
115 
116 
117 /*
118  * npi_rxdma_dump_rdc_regs
119  * Dumps the contents of rdc csrs and fzc registers
120  *
121  * Input:
122  *      handle:	opaque handle interpreted by the underlying OS
123  *         rdc:      RX DMA number
124  *
125  * return:
126  *     NPI_SUCCESS
127  *     NPI_RXDMA_RDC_INVALID
128  *
129  */
130 npi_status_t
npi_rxdma_dump_rdc_regs(npi_handle_t handle,uint8_t rdc)131 npi_rxdma_dump_rdc_regs(npi_handle_t handle, uint8_t rdc)
132 {
133 
134 	uint64_t value, offset;
135 	int num_regs, i;
136 #ifdef NPI_DEBUG
137 	extern uint64_t npi_debug_level;
138 	uint64_t old_npi_debug_level = npi_debug_level;
139 #endif
140 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
141 	if (!RXDMA_CHANNEL_VALID(rdc)) {
142 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
143 		    "npi_rxdma_dump_rdc_regs"
144 		    " Illegal RDC number %d \n",
145 		    rdc));
146 		return (NPI_RXDMA_RDC_INVALID);
147 	}
148 #ifdef NPI_DEBUG
149 	npi_debug_level |= DUMP_ALWAYS;
150 #endif
151 	num_regs = sizeof (rdc_dmc_offset) / sizeof (uint64_t);
152 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
153 	    "\nDMC Register Dump for Channel %d\n",
154 	    rdc));
155 	for (i = 0; i < num_regs; i++) {
156 		RXDMA_REG_READ64(handle, rdc_dmc_offset[i], rdc, &value);
157 		offset = NXGE_RXDMA_OFFSET(rdc_dmc_offset[i], handle.is_vraddr,
158 		    rdc);
159 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
160 		    "%08llx %s\t %08llx \n",
161 		    offset, rdc_dmc_name[i], value));
162 	}
163 
164 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
165 	    "\n Register Dump for Channel %d done\n",
166 	    rdc));
167 #ifdef NPI_DEBUG
168 	npi_debug_level = old_npi_debug_level;
169 #endif
170 	return (NPI_SUCCESS);
171 }
172 
173 /*
174  * npi_rxdma_dump_fzc_regs
175  * Dumps the contents of rdc csrs and fzc registers
176  *
177  * Input:
178  *      handle:	opaque handle interpreted by the underlying OS
179  *
180  * return:
181  *     NPI_SUCCESS
182  */
183 npi_status_t
npi_rxdma_dump_fzc_regs(npi_handle_t handle)184 npi_rxdma_dump_fzc_regs(npi_handle_t handle)
185 {
186 
187 	uint64_t value;
188 	int num_regs, i;
189 
190 
191 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
192 	    "\nFZC_DMC Common Register Dump\n"));
193 	num_regs = sizeof (rx_fzc_offset) / sizeof (uint64_t);
194 
195 	for (i = 0; i < num_regs; i++) {
196 		NXGE_REG_RD64(handle, rx_fzc_offset[i], &value);
197 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
198 		    "0x%08llx %s\t 0x%08llx \n",
199 		    rx_fzc_offset[i],
200 		    rx_fzc_name[i], value));
201 	}
202 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
203 	    "\n FZC_DMC Register Dump Done \n"));
204 
205 	return (NPI_SUCCESS);
206 }
207 
208 
209 
210 /*
211  * per rdc config functions
212  */
213 npi_status_t
npi_rxdma_cfg_logical_page_disable(npi_handle_t handle,uint8_t rdc,uint8_t page_num)214 npi_rxdma_cfg_logical_page_disable(npi_handle_t handle, uint8_t rdc,
215 				    uint8_t page_num)
216 {
217 	log_page_vld_t page_vld;
218 	uint64_t valid_offset;
219 
220 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
221 	if (!RXDMA_CHANNEL_VALID(rdc)) {
222 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
223 		    "rxdma_cfg_logical_page_disable"
224 		    " Illegal RDC number %d \n",
225 		    rdc));
226 		return (NPI_RXDMA_RDC_INVALID);
227 	}
228 
229 	ASSERT(RXDMA_PAGE_VALID(page_num));
230 	if (!RXDMA_PAGE_VALID(page_num)) {
231 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
232 		    "rxdma_cfg_logical_page_disable"
233 		    " Illegal page number %d \n",
234 		    page_num));
235 		return (NPI_RXDMA_PAGE_INVALID);
236 	}
237 
238 	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
239 	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
240 
241 	if (page_num == 0)
242 		page_vld.bits.ldw.page0 = 0;
243 
244 	if (page_num == 1)
245 		page_vld.bits.ldw.page1 = 0;
246 
247 	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
248 	return (NPI_SUCCESS);
249 
250 }
251 
252 npi_status_t
npi_rxdma_cfg_logical_page(npi_handle_t handle,uint8_t rdc,dma_log_page_t * pg_cfg)253 npi_rxdma_cfg_logical_page(npi_handle_t handle, uint8_t rdc,
254 			    dma_log_page_t *pg_cfg)
255 {
256 	log_page_vld_t page_vld;
257 	log_page_mask_t page_mask;
258 	log_page_value_t page_value;
259 	log_page_relo_t page_reloc;
260 	uint64_t value_offset, reloc_offset, mask_offset;
261 	uint64_t valid_offset;
262 
263 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
264 	if (!RXDMA_CHANNEL_VALID(rdc)) {
265 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
266 		    " rxdma_cfg_logical_page"
267 		    " Illegal RDC number %d \n",
268 		    rdc));
269 		return (NPI_RXDMA_RDC_INVALID);
270 	}
271 
272 	ASSERT(RXDMA_PAGE_VALID(pg_cfg->page_num));
273 	if (!RXDMA_PAGE_VALID(pg_cfg->page_num)) {
274 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
275 		    " rxdma_cfg_logical_page"
276 		    " Illegal page number %d \n",
277 		    pg_cfg->page_num));
278 		return (NPI_RXDMA_PAGE_INVALID);
279 	}
280 
281 	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
282 	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
283 
284 	if (!pg_cfg->valid) {
285 		if (pg_cfg->page_num == 0)
286 			page_vld.bits.ldw.page0 = 0;
287 
288 		if (pg_cfg->page_num == 1)
289 			page_vld.bits.ldw.page1 = 0;
290 		NXGE_REG_WR64(handle, valid_offset, page_vld.value);
291 		return (NPI_SUCCESS);
292 	}
293 
294 	if (pg_cfg->page_num == 0) {
295 		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK1_REG, rdc);
296 		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL1_REG, rdc);
297 		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO1_REG, rdc);
298 		page_vld.bits.ldw.page0 = 1;
299 	}
300 
301 	if (pg_cfg->page_num == 1) {
302 		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK2_REG, rdc);
303 		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL2_REG, rdc);
304 		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO2_REG, rdc);
305 		page_vld.bits.ldw.page1 = 1;
306 	}
307 
308 
309 	page_vld.bits.ldw.func = pg_cfg->func_num;
310 
311 	page_mask.value = 0;
312 	page_value.value = 0;
313 	page_reloc.value = 0;
314 
315 
316 	page_mask.bits.ldw.mask = pg_cfg->mask >> LOG_PAGE_ADDR_SHIFT;
317 	page_value.bits.ldw.value = pg_cfg->value >> LOG_PAGE_ADDR_SHIFT;
318 	page_reloc.bits.ldw.relo = pg_cfg->reloc >> LOG_PAGE_ADDR_SHIFT;
319 
320 
321 	NXGE_REG_WR64(handle, mask_offset, page_mask.value);
322 	NXGE_REG_WR64(handle, value_offset, page_value.value);
323 	NXGE_REG_WR64(handle, reloc_offset, page_reloc.value);
324 
325 
326 /* enable the logical page */
327 	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
328 	return (NPI_SUCCESS);
329 }
330 
331 npi_status_t
npi_rxdma_cfg_logical_page_handle(npi_handle_t handle,uint8_t rdc,uint64_t page_handle)332 npi_rxdma_cfg_logical_page_handle(npi_handle_t handle, uint8_t rdc,
333 				    uint64_t page_handle)
334 {
335 	uint64_t offset;
336 	log_page_hdl_t page_hdl;
337 
338 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
339 	if (!RXDMA_CHANNEL_VALID(rdc)) {
340 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
341 		    "rxdma_cfg_logical_page_handle"
342 		    " Illegal RDC number %d \n", rdc));
343 		return (NPI_RXDMA_RDC_INVALID);
344 	}
345 
346 	page_hdl.value = 0;
347 
348 	page_hdl.bits.ldw.handle = (uint32_t)page_handle;
349 	offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_HDL_REG, rdc);
350 	NXGE_REG_WR64(handle, offset, page_hdl.value);
351 
352 	return (NPI_SUCCESS);
353 }
354 
355 /*
356  * RX DMA functions
357  */
358 npi_status_t
npi_rxdma_cfg_rdc_ctl(npi_handle_t handle,uint8_t rdc,uint8_t op)359 npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op)
360 {
361 
362 	rxdma_cfig1_t cfg;
363 	uint32_t count = RXDMA_RESET_TRY_COUNT;
364 	uint32_t delay_time = RXDMA_RESET_DELAY;
365 	uint32_t error = NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RESET_ERR, rdc);
366 
367 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
368 	if (!RXDMA_CHANNEL_VALID(rdc)) {
369 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
370 		    "npi_rxdma_cfg_rdc_ctl"
371 		    " Illegal RDC number %d \n", rdc));
372 		return (NPI_RXDMA_RDC_INVALID);
373 	}
374 
375 
376 	switch (op) {
377 		case RXDMA_OP_ENABLE:
378 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
379 			    &cfg.value);
380 			cfg.bits.ldw.en = 1;
381 			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
382 			    rdc, cfg.value);
383 
384 			NXGE_DELAY(delay_time);
385 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
386 			    &cfg.value);
387 			while ((count--) && (cfg.bits.ldw.qst == 0)) {
388 				NXGE_DELAY(delay_time);
389 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
390 				    &cfg.value);
391 			}
392 
393 			if (cfg.bits.ldw.qst == 0) {
394 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
395 				    " npi_rxdma_cfg_rdc_ctl"
396 				    " RXDMA_OP_ENABLE Failed for RDC %d \n",
397 				    rdc));
398 				return (error);
399 			}
400 
401 			break;
402 		case RXDMA_OP_DISABLE:
403 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
404 			    &cfg.value);
405 			cfg.bits.ldw.en = 0;
406 			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
407 			    rdc, cfg.value);
408 
409 			NXGE_DELAY(delay_time);
410 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
411 			    &cfg.value);
412 			while ((count--) && (cfg.bits.ldw.qst == 0)) {
413 				NXGE_DELAY(delay_time);
414 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
415 				    &cfg.value);
416 			}
417 			if (cfg.bits.ldw.qst == 0) {
418 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
419 				    " npi_rxdma_cfg_rdc_ctl"
420 				    " RXDMA_OP_DISABLE Failed for RDC %d \n",
421 				    rdc));
422 				return (error);
423 			}
424 
425 			break;
426 		case RXDMA_OP_RESET:
427 			cfg.value = 0;
428 			cfg.bits.ldw.rst = 1;
429 			RXDMA_REG_WRITE64(handle,
430 			    RXDMA_CFIG1_REG,
431 			    rdc, cfg.value);
432 			NXGE_DELAY(delay_time);
433 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
434 			    &cfg.value);
435 			while ((count--) && (cfg.bits.ldw.rst)) {
436 				NXGE_DELAY(delay_time);
437 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
438 				    &cfg.value);
439 			}
440 			if (count == 0) {
441 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
442 				    " npi_rxdma_cfg_rdc_ctl"
443 				    " Reset Failed for RDC %d \n",
444 				    rdc));
445 				return (error);
446 			}
447 			break;
448 		default:
449 			return (NPI_RXDMA_SW_PARAM_ERROR);
450 	}
451 
452 	return (NPI_SUCCESS);
453 }
454 
455 npi_status_t
npi_rxdma_cfg_rdc_enable(npi_handle_t handle,uint8_t rdc)456 npi_rxdma_cfg_rdc_enable(npi_handle_t handle, uint8_t rdc)
457 {
458 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE));
459 }
460 
461 npi_status_t
npi_rxdma_cfg_rdc_disable(npi_handle_t handle,uint8_t rdc)462 npi_rxdma_cfg_rdc_disable(npi_handle_t handle, uint8_t rdc)
463 {
464 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE));
465 }
466 
467 npi_status_t
npi_rxdma_cfg_rdc_reset(npi_handle_t handle,uint8_t rdc)468 npi_rxdma_cfg_rdc_reset(npi_handle_t handle, uint8_t rdc)
469 {
470 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET));
471 }
472 
473 /*
474  * npi_rxdma_cfg_defualt_port_rdc()
475  * Set the default rdc for the port
476  *
477  * Inputs:
478  *	handle:		register handle interpreted by the underlying OS
479  *	portnm:		Physical Port Number
480  *	rdc:	RX DMA Channel number
481  *
482  * Return:
483  * NPI_SUCCESS
484  * NPI_RXDMA_RDC_INVALID
485  * NPI_RXDMA_PORT_INVALID
486  *
487  */
npi_rxdma_cfg_default_port_rdc(npi_handle_t handle,uint8_t portnm,uint8_t rdc)488 npi_status_t npi_rxdma_cfg_default_port_rdc(npi_handle_t handle,
489 				    uint8_t portnm, uint8_t rdc)
490 {
491 
492 	uint64_t offset;
493 	def_pt_rdc_t cfg;
494 
495 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
496 	if (!RXDMA_CHANNEL_VALID(rdc)) {
497 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
498 		    "rxdma_cfg_default_port_rdc"
499 		    " Illegal RDC number %d \n",
500 		    rdc));
501 		return (NPI_RXDMA_RDC_INVALID);
502 	}
503 
504 	ASSERT(RXDMA_PORT_VALID(portnm));
505 	if (!RXDMA_PORT_VALID(portnm)) {
506 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
507 		    "rxdma_cfg_default_port_rdc"
508 		    " Illegal Port number %d \n",
509 		    portnm));
510 		return (NPI_RXDMA_PORT_INVALID);
511 	}
512 
513 	offset = DEF_PT_RDC_REG(portnm);
514 	cfg.value = 0;
515 	cfg.bits.ldw.rdc = rdc;
516 	NXGE_REG_WR64(handle, offset, cfg.value);
517 	return (NPI_SUCCESS);
518 }
519 
520 npi_status_t
npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle,uint8_t rdc,uint8_t op,uint16_t param)521 npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc,
522 			    uint8_t op, uint16_t param)
523 {
524 	rcrcfig_b_t rcr_cfgb;
525 
526 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
527 	if (!RXDMA_CHANNEL_VALID(rdc)) {
528 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
529 		    "rxdma_cfg_rdc_rcr_ctl"
530 		    " Illegal RDC number %d \n",
531 		    rdc));
532 		return (NPI_RXDMA_RDC_INVALID);
533 	}
534 
535 
536 	RXDMA_REG_READ64(handle, RCRCFIG_B_REG, rdc, &rcr_cfgb.value);
537 
538 	switch (op) {
539 		case RCR_TIMEOUT_ENABLE:
540 			rcr_cfgb.bits.ldw.timeout = (uint8_t)param;
541 			rcr_cfgb.bits.ldw.entout = 1;
542 			break;
543 
544 		case RCR_THRESHOLD:
545 			rcr_cfgb.bits.ldw.pthres = param;
546 			break;
547 
548 		case RCR_TIMEOUT_DISABLE:
549 			rcr_cfgb.bits.ldw.entout = 0;
550 			break;
551 
552 		default:
553 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
554 		    "rxdma_cfg_rdc_rcr_ctl"
555 		    " Illegal opcode %x \n",
556 		    op));
557 		return (NPI_RXDMA_OPCODE_INVALID(rdc));
558 	}
559 
560 	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
561 	return (NPI_SUCCESS);
562 }
563 
564 npi_status_t
npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t handle,uint8_t rdc)565 npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t handle, uint8_t rdc)
566 {
567 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
568 	    RCR_TIMEOUT_DISABLE, 0));
569 }
570 
571 npi_status_t
npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t handle,uint8_t rdc,uint16_t rcr_threshold)572 npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t handle, uint8_t rdc,
573 				    uint16_t rcr_threshold)
574 {
575 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
576 	    RCR_THRESHOLD, rcr_threshold));
577 
578 }
579 
580 npi_status_t
npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t handle,uint8_t rdc,uint8_t rcr_timeout)581 npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t handle, uint8_t rdc,
582 			    uint8_t rcr_timeout)
583 {
584 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
585 	    RCR_TIMEOUT_ENABLE, rcr_timeout));
586 
587 }
588 
589 /*
590  * npi_rxdma_cfg_rdc_ring()
591  * Configure The RDC channel Rcv Buffer Ring
592  */
593 npi_status_t
npi_rxdma_cfg_rdc_ring(npi_handle_t handle,uint8_t rdc,rdc_desc_cfg_t * rdc_desc_cfg,boolean_t new_off)594 npi_rxdma_cfg_rdc_ring(npi_handle_t handle, uint8_t rdc,
595 			    rdc_desc_cfg_t *rdc_desc_cfg, boolean_t new_off)
596 {
597 	rbr_cfig_a_t cfga;
598 	rbr_cfig_b_t cfgb;
599 	rxdma_cfig1_t cfg1;
600 	rxdma_cfig2_t cfg2;
601 	rcrcfig_a_t rcr_cfga;
602 	rcrcfig_b_t rcr_cfgb;
603 
604 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
605 	if (!RXDMA_CHANNEL_VALID(rdc)) {
606 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
607 		    "rxdma_cfg_rdc_ring"
608 		    " Illegal RDC number %d \n",
609 		    rdc));
610 		return (NPI_RXDMA_RDC_INVALID);
611 	}
612 
613 
614 	cfga.value = 0;
615 	cfgb.value = 0;
616 	cfg1.value = 0;
617 	cfg2.value = 0;
618 
619 	if (rdc_desc_cfg->mbox_enable == 1) {
620 		cfg1.bits.ldw.mbaddr_h =
621 		    (rdc_desc_cfg->mbox_addr >> 32) & 0xfff;
622 		cfg2.bits.ldw.mbaddr =
623 		    ((rdc_desc_cfg->mbox_addr &
624 		    RXDMA_CFIG2_MBADDR_L_MASK) >>
625 		    RXDMA_CFIG2_MBADDR_L_SHIFT);
626 
627 
628 		/*
629 		 * Only after all the configurations are set, then
630 		 * enable the RDC or else configuration fatal error
631 		 * will be returned (especially if the Hypervisor
632 		 * set up the logical pages with non-zero values.
633 		 * This NPI function only sets up the configuration.
634 		 */
635 	}
636 
637 
638 	if (rdc_desc_cfg->full_hdr == 1)
639 		cfg2.bits.ldw.full_hdr = 1;
640 
641 	if (new_off) {
642 		if (RXDMA_RF_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
643 			switch (rdc_desc_cfg->offset) {
644 			case SW_OFFSET_NO_OFFSET:
645 			case SW_OFFSET_64:
646 			case SW_OFFSET_128:
647 			case SW_OFFSET_192:
648 				cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
649 				cfg2.bits.ldw.offset256 = 0;
650 				break;
651 			case SW_OFFSET_256:
652 			case SW_OFFSET_320:
653 			case SW_OFFSET_384:
654 			case SW_OFFSET_448:
655 				cfg2.bits.ldw.offset =
656 				    rdc_desc_cfg->offset & 0x3;
657 				cfg2.bits.ldw.offset256 = 1;
658 				break;
659 			default:
660 				cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
661 				cfg2.bits.ldw.offset256 = 0;
662 			}
663 		} else {
664 			cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
665 			cfg2.bits.ldw.offset256 = 0;
666 		}
667 	} else {
668 		if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
669 			cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
670 		} else {
671 			cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
672 		}
673 	}
674 
675 		/* rbr config */
676 
677 	cfga.value = (rdc_desc_cfg->rbr_addr & (RBR_CFIG_A_STDADDR_MASK |
678 	    RBR_CFIG_A_STDADDR_BASE_MASK));
679 
680 	if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) ||
681 	    (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN)) {
682 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
683 		    "npi_rxdma_cfg_rdc_ring"
684 		    " Illegal RBR Queue Length %d \n",
685 		    rdc_desc_cfg->rbr_len));
686 		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RBRSIZE_INVALID, rdc));
687 	}
688 
689 
690 	cfga.bits.hdw.len = rdc_desc_cfg->rbr_len;
691 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
692 	    "npi_rxdma_cfg_rdc_ring"
693 	    " CFGA 0x%llx hdw.len %d (RBR LEN %d)\n",
694 	    cfga.value, cfga.bits.hdw.len,
695 	    rdc_desc_cfg->rbr_len));
696 
697 	if (rdc_desc_cfg->page_size == SIZE_4KB)
698 		cfgb.bits.ldw.bksize = RBR_BKSIZE_4K;
699 	else if (rdc_desc_cfg->page_size == SIZE_8KB)
700 		cfgb.bits.ldw.bksize = RBR_BKSIZE_8K;
701 	else if (rdc_desc_cfg->page_size == SIZE_16KB)
702 		cfgb.bits.ldw.bksize = RBR_BKSIZE_16K;
703 	else if (rdc_desc_cfg->page_size == SIZE_32KB)
704 		cfgb.bits.ldw.bksize = RBR_BKSIZE_32K;
705 	else {
706 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
707 		    "rxdma_cfg_rdc_ring"
708 		    " blksize: Illegal buffer size %d \n",
709 		    rdc_desc_cfg->page_size));
710 		return (NPI_RXDMA_BUFSIZE_INVALID);
711 	}
712 
713 	if (rdc_desc_cfg->valid0) {
714 
715 		if (rdc_desc_cfg->size0 == SIZE_256B)
716 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_256B;
717 		else if (rdc_desc_cfg->size0 == SIZE_512B)
718 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_512B;
719 		else if (rdc_desc_cfg->size0 == SIZE_1KB)
720 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_1K;
721 		else if (rdc_desc_cfg->size0 == SIZE_2KB)
722 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_2K;
723 		else {
724 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
725 			    " rxdma_cfg_rdc_ring"
726 			    " blksize0: Illegal buffer size %x \n",
727 			    rdc_desc_cfg->size0));
728 			return (NPI_RXDMA_BUFSIZE_INVALID);
729 		}
730 		cfgb.bits.ldw.vld0 = 1;
731 	} else {
732 		cfgb.bits.ldw.vld0 = 0;
733 	}
734 
735 
736 	if (rdc_desc_cfg->valid1) {
737 		if (rdc_desc_cfg->size1 == SIZE_1KB)
738 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_1K;
739 		else if (rdc_desc_cfg->size1 == SIZE_2KB)
740 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_2K;
741 		else if (rdc_desc_cfg->size1 == SIZE_4KB)
742 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_4K;
743 		else if (rdc_desc_cfg->size1 == SIZE_8KB)
744 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_8K;
745 		else {
746 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
747 			    " rxdma_cfg_rdc_ring"
748 			    " blksize1: Illegal buffer size %x \n",
749 			    rdc_desc_cfg->size1));
750 			return (NPI_RXDMA_BUFSIZE_INVALID);
751 		}
752 		cfgb.bits.ldw.vld1 = 1;
753 	} else {
754 		cfgb.bits.ldw.vld1 = 0;
755 	}
756 
757 
758 	if (rdc_desc_cfg->valid2) {
759 		if (rdc_desc_cfg->size2 == SIZE_2KB)
760 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_2K;
761 		else if (rdc_desc_cfg->size2 == SIZE_4KB)
762 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_4K;
763 		else if (rdc_desc_cfg->size2 == SIZE_8KB)
764 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_8K;
765 		else if (rdc_desc_cfg->size2 == SIZE_16KB)
766 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_16K;
767 		else {
768 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
769 			    " rxdma_cfg_rdc_ring"
770 			    " blksize2: Illegal buffer size %x \n",
771 			    rdc_desc_cfg->size2));
772 			return (NPI_RXDMA_BUFSIZE_INVALID);
773 		}
774 		cfgb.bits.ldw.vld2 = 1;
775 	} else {
776 		cfgb.bits.ldw.vld2 = 0;
777 	}
778 
779 
780 	rcr_cfga.value = (rdc_desc_cfg->rcr_addr &
781 	    (RCRCFIG_A_STADDR_MASK |
782 	    RCRCFIG_A_STADDR_BASE_MASK));
783 
784 
785 	if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) ||
786 	    (rdc_desc_cfg->rcr_len > NXGE_RCR_MAX)) {
787 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
788 		    " rxdma_cfg_rdc_ring"
789 		    " Illegal RCR Queue Length %d \n",
790 		    rdc_desc_cfg->rcr_len));
791 		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RCRSIZE_INVALID, rdc));
792 	}
793 
794 	rcr_cfga.bits.hdw.len = rdc_desc_cfg->rcr_len;
795 
796 
797 	rcr_cfgb.value = 0;
798 	if (rdc_desc_cfg->rcr_timeout_enable == 1) {
799 		/* check if the rcr timeout value is valid */
800 
801 		if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) {
802 			rcr_cfgb.bits.ldw.timeout = rdc_desc_cfg->rcr_timeout;
803 			rcr_cfgb.bits.ldw.entout = 1;
804 		} else {
805 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
806 			    " rxdma_cfg_rdc_ring"
807 			    " Illegal RCR Timeout value %d \n",
808 			    rdc_desc_cfg->rcr_timeout));
809 			rcr_cfgb.bits.ldw.entout = 0;
810 		}
811 	} else {
812 		rcr_cfgb.bits.ldw.entout = 0;
813 	}
814 
815 		/* check if the rcr threshold value is valid */
816 	if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) {
817 		rcr_cfgb.bits.ldw.pthres = rdc_desc_cfg->rcr_threshold;
818 	} else {
819 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
820 		    " rxdma_cfg_rdc_ring"
821 		    " Illegal RCR Threshold value %d \n",
822 		    rdc_desc_cfg->rcr_threshold));
823 		rcr_cfgb.bits.ldw.pthres = 1;
824 	}
825 
826 		/* now do the actual HW configuration */
827 	RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG, rdc, cfg1.value);
828 	RXDMA_REG_WRITE64(handle, RXDMA_CFIG2_REG, rdc, cfg2.value);
829 
830 
831 	RXDMA_REG_WRITE64(handle, RBR_CFIG_A_REG, rdc, cfga.value);
832 	RXDMA_REG_WRITE64(handle, RBR_CFIG_B_REG, rdc, cfgb.value);
833 
834 	RXDMA_REG_WRITE64(handle, RCRCFIG_A_REG, rdc, rcr_cfga.value);
835 	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
836 
837 	return (NPI_SUCCESS);
838 
839 }
840 
841 /*
842  * npi_rxdma_red_discard_stat_get
843  * Gets the current discrad count due RED
844  * The counter overflow bit is cleared, if it has been set.
845  *
846  * Inputs:
847  *      handle:	opaque handle interpreted by the underlying OS
848  *	rdc:		RX DMA Channel number
849  *	cnt:	Ptr to structure to write current RDC discard stat
850  *
851  * Return:
852  * NPI_SUCCESS
853  * NPI_RXDMA_RDC_INVALID
854  *
855  */
856 npi_status_t
npi_rxdma_red_discard_stat_get(npi_handle_t handle,uint8_t rdc,rx_disc_cnt_t * cnt)857 npi_rxdma_red_discard_stat_get(npi_handle_t handle, uint8_t rdc,
858 				    rx_disc_cnt_t *cnt)
859 {
860 	uint64_t offset;
861 
862 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
863 	if (!RXDMA_CHANNEL_VALID(rdc)) {
864 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
865 		    " npi_rxdma_red_discard_stat_get"
866 		    " Illegal RDC Number %d \n",
867 		    rdc));
868 		return (NPI_RXDMA_RDC_INVALID);
869 	}
870 
871 	offset = RDC_RED_RDC_DISC_REG(rdc);
872 	NXGE_REG_RD64(handle, offset, &cnt->value);
873 	if (cnt->bits.ldw.oflow) {
874 		NPI_DEBUG_MSG((handle.function, NPI_ERR_CTL,
875 		    " npi_rxdma_red_discard_stat_get"
876 		    " Counter overflow for channel %d ",
877 		    " ..... clearing \n",
878 		    rdc));
879 		cnt->bits.ldw.oflow = 0;
880 		NXGE_REG_WR64(handle, offset, cnt->value);
881 		cnt->bits.ldw.oflow = 1;
882 	}
883 
884 	return (NPI_SUCCESS);
885 }
886 
887 /*
888  * npi_rxdma_red_discard_oflow_clear
889  * Clear RED discard counter overflow bit
890  *
891  * Inputs:
892  *      handle:	opaque handle interpreted by the underlying OS
893  *	rdc:		RX DMA Channel number
894  *
895  * Return:
896  * NPI_SUCCESS
897  * NPI_RXDMA_RDC_INVALID
898  *
899  */
900 npi_status_t
npi_rxdma_red_discard_oflow_clear(npi_handle_t handle,uint8_t rdc)901 npi_rxdma_red_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
902 
903 {
904 	uint64_t offset;
905 	rx_disc_cnt_t cnt;
906 
907 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
908 	if (!RXDMA_CHANNEL_VALID(rdc)) {
909 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
910 			    " npi_rxdma_red_discard_oflow_clear"
911 			    " Illegal RDC Number %d \n",
912 			    rdc));
913 		return (NPI_RXDMA_RDC_INVALID);
914 	}
915 
916 	offset = RDC_RED_RDC_DISC_REG(rdc);
917 	NXGE_REG_RD64(handle, offset, &cnt.value);
918 	if (cnt.bits.ldw.oflow) {
919 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
920 			    " npi_rxdma_red_discard_oflow_clear"
921 			    " Counter overflow for channel %d ",
922 			    " ..... clearing \n",
923 			    rdc));
924 		cnt.bits.ldw.oflow = 0;
925 		NXGE_REG_WR64(handle, offset, cnt.value);
926 	}
927 	return (NPI_SUCCESS);
928 }
929 
930 /*
931  * npi_rxdma_misc_discard_stat_get
932  * Gets the current discrad count for the rdc due to
933  * buffer pool empty
934  * The counter overflow bit is cleared, if it has been set.
935  *
936  * Inputs:
937  *      handle:	opaque handle interpreted by the underlying OS
938  *	rdc:		RX DMA Channel number
939  *	cnt:	Ptr to structure to write current RDC discard stat
940  *
941  * Return:
942  * NPI_SUCCESS
943  * NPI_RXDMA_RDC_INVALID
944  *
945  */
946 npi_status_t
npi_rxdma_misc_discard_stat_get(npi_handle_t handle,uint8_t rdc,rx_disc_cnt_t * cnt)947 npi_rxdma_misc_discard_stat_get(npi_handle_t handle, uint8_t rdc,
948 				    rx_disc_cnt_t *cnt)
949 {
950 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
951 	if (!RXDMA_CHANNEL_VALID(rdc)) {
952 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
953 		    " npi_rxdma_misc_discard_stat_get"
954 		    " Illegal RDC Number %d \n",
955 		    rdc));
956 		return (NPI_RXDMA_RDC_INVALID);
957 	}
958 
959 	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt->value);
960 	if (cnt->bits.ldw.oflow) {
961 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
962 		    " npi_rxdma_misc_discard_stat_get"
963 		    " Counter overflow for channel %d ",
964 		    " ..... clearing \n",
965 		    rdc));
966 		cnt->bits.ldw.oflow = 0;
967 		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt->value);
968 		cnt->bits.ldw.oflow = 1;
969 	}
970 
971 	return (NPI_SUCCESS);
972 }
973 
974 /*
975  * npi_rxdma_red_discard_oflow_clear
976  * Clear RED discard counter overflow bit
977  * clear the overflow bit for  buffer pool empty discrad counter
978  * for the rdc
979  *
980  * Inputs:
981  *      handle:	opaque handle interpreted by the underlying OS
982  *	rdc:		RX DMA Channel number
983  *
984  * Return:
985  * NPI_SUCCESS
986  * NPI_RXDMA_RDC_INVALID
987  *
988  */
989 npi_status_t
npi_rxdma_misc_discard_oflow_clear(npi_handle_t handle,uint8_t rdc)990 npi_rxdma_misc_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
991 {
992 	rx_disc_cnt_t cnt;
993 
994 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
995 	if (!RXDMA_CHANNEL_VALID(rdc)) {
996 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
997 		    " npi_rxdma_misc_discard_oflow_clear"
998 		    " Illegal RDC Number %d \n",
999 		    rdc));
1000 		return (NPI_RXDMA_RDC_INVALID);
1001 	}
1002 
1003 	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt.value);
1004 	if (cnt.bits.ldw.oflow) {
1005 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1006 		    " npi_rxdma_misc_discard_oflow_clear"
1007 		    " Counter overflow for channel %d ",
1008 		    " ..... clearing \n",
1009 		    rdc));
1010 		cnt.bits.ldw.oflow = 0;
1011 		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt.value);
1012 	}
1013 
1014 	return (NPI_SUCCESS);
1015 }
1016 
1017 /*
1018  * npi_rxdma_ring_perr_stat_get
1019  * Gets the current RDC Memory parity error
1020  * The counter overflow bit is cleared, if it has been set.
1021  *
1022  * Inputs:
1023  * handle:	opaque handle interpreted by the underlying OS
1024  * pre_log:	Structure to write current RDC Prefetch memory
1025  *		Parity Error stat
1026  * sha_log:	Structure to write current RDC Shadow memory
1027  *		Parity Error stat
1028  *
1029  * Return:
1030  * NPI_SUCCESS
1031  *
1032  */
1033 npi_status_t
npi_rxdma_ring_perr_stat_get(npi_handle_t handle,rdmc_par_err_log_t * pre_log,rdmc_par_err_log_t * sha_log)1034 npi_rxdma_ring_perr_stat_get(npi_handle_t handle,
1035 			    rdmc_par_err_log_t *pre_log,
1036 			    rdmc_par_err_log_t *sha_log)
1037 {
1038 	uint64_t pre_offset, sha_offset;
1039 	rdmc_par_err_log_t clr;
1040 	int clr_bits = 0;
1041 
1042 	pre_offset = RDMC_PRE_PAR_ERR_REG;
1043 	sha_offset = RDMC_SHA_PAR_ERR_REG;
1044 	NXGE_REG_RD64(handle, pre_offset, &pre_log->value);
1045 	NXGE_REG_RD64(handle, sha_offset, &sha_log->value);
1046 
1047 	clr.value = pre_log->value;
1048 	if (pre_log->bits.ldw.err) {
1049 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1050 		    " npi_rxdma_ring_perr_stat_get"
1051 		    " PRE ERR Bit set ..... clearing \n"));
1052 		clr.bits.ldw.err = 0;
1053 		clr_bits++;
1054 	}
1055 
1056 	if (pre_log->bits.ldw.merr) {
1057 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1058 		    " npi_rxdma_ring_perr_stat_get"
1059 		    " PRE MERR Bit set ..... clearing \n"));
1060 		clr.bits.ldw.merr = 0;
1061 		clr_bits++;
1062 	}
1063 
1064 	if (clr_bits) {
1065 		NXGE_REG_WR64(handle, pre_offset, clr.value);
1066 	}
1067 
1068 	clr_bits = 0;
1069 	clr.value = sha_log->value;
1070 	if (sha_log->bits.ldw.err) {
1071 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1072 		    " npi_rxdma_ring_perr_stat_get"
1073 		    " SHA ERR Bit set ..... clearing \n"));
1074 		clr.bits.ldw.err = 0;
1075 		clr_bits++;
1076 	}
1077 
1078 	if (sha_log->bits.ldw.merr) {
1079 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1080 		    " npi_rxdma_ring_perr_stat_get"
1081 		    " SHA MERR Bit set ..... clearing \n"));
1082 		clr.bits.ldw.merr = 0;
1083 		clr_bits++;
1084 	}
1085 
1086 	if (clr_bits) {
1087 		NXGE_REG_WR64(handle, sha_offset, clr.value);
1088 	}
1089 
1090 	return (NPI_SUCCESS);
1091 }
1092 
1093 /*
1094  * npi_rxdma_ring_perr_stat_clear
1095  * Clear RDC Memory Parity Error counter overflow bits
1096  *
1097  * Inputs:
1098  *      handle:	opaque handle interpreted by the underlying OS
1099  * Return:
1100  * NPI_SUCCESS
1101  *
1102  */
1103 npi_status_t
npi_rxdma_ring_perr_stat_clear(npi_handle_t handle)1104 npi_rxdma_ring_perr_stat_clear(npi_handle_t handle)
1105 {
1106 	uint64_t pre_offset, sha_offset;
1107 	rdmc_par_err_log_t clr;
1108 	int clr_bits = 0;
1109 	pre_offset = RDMC_PRE_PAR_ERR_REG;
1110 	sha_offset = RDMC_SHA_PAR_ERR_REG;
1111 
1112 	NXGE_REG_RD64(handle, pre_offset, &clr.value);
1113 
1114 	if (clr.bits.ldw.err) {
1115 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1116 		    " npi_rxdma_ring_perr_stat_get"
1117 		    " PRE ERR Bit set ..... clearing \n"));
1118 		clr.bits.ldw.err = 0;
1119 		clr_bits++;
1120 	}
1121 
1122 	if (clr.bits.ldw.merr) {
1123 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1124 		    " npi_rxdma_ring_perr_stat_get"
1125 		    " PRE MERR Bit set ..... clearing \n"));
1126 		clr.bits.ldw.merr = 0;
1127 		clr_bits++;
1128 	}
1129 
1130 	if (clr_bits) {
1131 		NXGE_REG_WR64(handle, pre_offset, clr.value);
1132 	}
1133 
1134 	clr_bits = 0;
1135 	NXGE_REG_RD64(handle, sha_offset, &clr.value);
1136 	if (clr.bits.ldw.err) {
1137 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1138 		    " npi_rxdma_ring_perr_stat_get"
1139 		    " SHA ERR Bit set ..... clearing \n"));
1140 		clr.bits.ldw.err = 0;
1141 		clr_bits++;
1142 	}
1143 
1144 	if (clr.bits.ldw.merr) {
1145 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1146 		    " npi_rxdma_ring_perr_stat_get"
1147 		    " SHA MERR Bit set ..... clearing \n"));
1148 		clr.bits.ldw.merr = 0;
1149 		clr_bits++;
1150 	}
1151 
1152 	if (clr_bits) {
1153 		NXGE_REG_WR64(handle, sha_offset, clr.value);
1154 	}
1155 
1156 	return (NPI_SUCCESS);
1157 }
1158 
1159 /*
1160  * Access the RDMC Memory: used for debugging
1161  */
1162 npi_status_t
npi_rxdma_rdmc_memory_io(npi_handle_t handle,rdmc_mem_access_t * data,uint8_t op)1163 npi_rxdma_rdmc_memory_io(npi_handle_t handle,
1164 			    rdmc_mem_access_t *data, uint8_t op)
1165 {
1166 	uint64_t d0_offset, d1_offset, d2_offset, d3_offset, d4_offset;
1167 	uint64_t addr_offset;
1168 	rdmc_mem_addr_t addr;
1169 	rdmc_mem_data_t d0, d1, d2, d3, d4;
1170 	d0.value = 0;
1171 	d1.value = 0;
1172 	d2.value = 0;
1173 	d3.value = 0;
1174 	d4.value = 0;
1175 	addr.value = 0;
1176 
1177 
1178 	if ((data->location != RDMC_MEM_ADDR_PREFETCH) &&
1179 	    (data->location != RDMC_MEM_ADDR_SHADOW)) {
1180 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1181 		    " npi_rxdma_rdmc_memory_io"
1182 		    " Illegal memory Type %x \n",
1183 		    data->location));
1184 		return (NPI_RXDMA_OPCODE_INVALID(0));
1185 	}
1186 
1187 	addr_offset = RDMC_MEM_ADDR_REG;
1188 	addr.bits.ldw.addr = data->addr;
1189 	addr.bits.ldw.pre_shad = data->location;
1190 
1191 	d0_offset = RDMC_MEM_DATA0_REG;
1192 	d1_offset = RDMC_MEM_DATA1_REG;
1193 	d2_offset = RDMC_MEM_DATA2_REG;
1194 	d3_offset = RDMC_MEM_DATA3_REG;
1195 	d4_offset = RDMC_MEM_DATA4_REG;
1196 
1197 
1198 	if (op == RDMC_MEM_WRITE) {
1199 		d0.bits.ldw.data = data->data[0];
1200 		d1.bits.ldw.data = data->data[1];
1201 		d2.bits.ldw.data = data->data[2];
1202 		d3.bits.ldw.data = data->data[3];
1203 		d4.bits.ldw.data = data->data[4];
1204 		NXGE_REG_WR64(handle, addr_offset, addr.value);
1205 		NXGE_REG_WR64(handle, d0_offset, d0.value);
1206 		NXGE_REG_WR64(handle, d1_offset, d1.value);
1207 		NXGE_REG_WR64(handle, d2_offset, d2.value);
1208 		NXGE_REG_WR64(handle, d3_offset, d3.value);
1209 		NXGE_REG_WR64(handle, d4_offset, d4.value);
1210 	}
1211 
1212 	if (op == RDMC_MEM_READ) {
1213 		NXGE_REG_WR64(handle, addr_offset, addr.value);
1214 		NXGE_REG_RD64(handle, d4_offset, &d4.value);
1215 		NXGE_REG_RD64(handle, d3_offset, &d3.value);
1216 		NXGE_REG_RD64(handle, d2_offset, &d2.value);
1217 		NXGE_REG_RD64(handle, d1_offset, &d1.value);
1218 		NXGE_REG_RD64(handle, d0_offset, &d0.value);
1219 
1220 		data->data[0] = d0.bits.ldw.data;
1221 		data->data[1] = d1.bits.ldw.data;
1222 		data->data[2] = d2.bits.ldw.data;
1223 		data->data[3] = d3.bits.ldw.data;
1224 		data->data[4] = d4.bits.ldw.data;
1225 	} else {
1226 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1227 		    " npi_rxdma_rdmc_memory_io"
1228 		    " Illegal opcode %x \n",
1229 		    op));
1230 		return (NPI_RXDMA_OPCODE_INVALID(0));
1231 
1232 	}
1233 
1234 	return (NPI_SUCCESS);
1235 }
1236 
1237 /*
1238  * system wide conf functions
1239  */
1240 npi_status_t
npi_rxdma_cfg_clock_div_set(npi_handle_t handle,uint16_t count)1241 npi_rxdma_cfg_clock_div_set(npi_handle_t handle, uint16_t count)
1242 {
1243 	uint64_t offset;
1244 	rx_dma_ck_div_t clk_div;
1245 
1246 	offset = RX_DMA_CK_DIV_REG;
1247 
1248 	clk_div.value = 0;
1249 	clk_div.bits.ldw.cnt = count;
1250 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1251 	    " npi_rxdma_cfg_clock_div_set: add 0x%llx "
1252 	    "handle 0x%llx value 0x%llx",
1253 	    handle.regp, handle.regh, clk_div.value));
1254 
1255 	NXGE_REG_WR64(handle, offset, clk_div.value);
1256 
1257 	return (NPI_SUCCESS);
1258 }
1259 
1260 npi_status_t
npi_rxdma_cfg_red_rand_init(npi_handle_t handle,uint16_t init_value)1261 npi_rxdma_cfg_red_rand_init(npi_handle_t handle, uint16_t init_value)
1262 {
1263 	uint64_t offset;
1264 	red_ran_init_t rand_reg;
1265 
1266 	offset = RED_RAN_INIT_REG;
1267 
1268 	rand_reg.value = 0;
1269 	rand_reg.bits.ldw.init = init_value;
1270 	rand_reg.bits.ldw.enable = 1;
1271 	NXGE_REG_WR64(handle, offset, rand_reg.value);
1272 
1273 	return (NPI_SUCCESS);
1274 
1275 }
1276 
1277 npi_status_t
npi_rxdma_cfg_red_rand_disable(npi_handle_t handle)1278 npi_rxdma_cfg_red_rand_disable(npi_handle_t handle)
1279 {
1280 	uint64_t offset;
1281 	red_ran_init_t rand_reg;
1282 
1283 	offset = RED_RAN_INIT_REG;
1284 
1285 	NXGE_REG_RD64(handle, offset, &rand_reg.value);
1286 	rand_reg.bits.ldw.enable = 0;
1287 	NXGE_REG_WR64(handle, offset, rand_reg.value);
1288 
1289 	return (NPI_SUCCESS);
1290 
1291 }
1292 
1293 npi_status_t
npi_rxdma_cfg_32bitmode_enable(npi_handle_t handle)1294 npi_rxdma_cfg_32bitmode_enable(npi_handle_t handle)
1295 {
1296 	uint64_t offset;
1297 	rx_addr_md_t md_reg;
1298 	offset = RX_ADDR_MD_REG;
1299 	md_reg.value = 0;
1300 	md_reg.bits.ldw.mode32 = 1;
1301 
1302 	NXGE_REG_WR64(handle, offset, md_reg.value);
1303 	return (NPI_SUCCESS);
1304 
1305 }
1306 
1307 npi_status_t
npi_rxdma_cfg_32bitmode_disable(npi_handle_t handle)1308 npi_rxdma_cfg_32bitmode_disable(npi_handle_t handle)
1309 {
1310 	uint64_t offset;
1311 	rx_addr_md_t md_reg;
1312 	offset = RX_ADDR_MD_REG;
1313 	md_reg.value = 0;
1314 
1315 	NXGE_REG_WR64(handle, offset, md_reg.value);
1316 	return (NPI_SUCCESS);
1317 
1318 }
1319 
1320 npi_status_t
npi_rxdma_cfg_ram_access_enable(npi_handle_t handle)1321 npi_rxdma_cfg_ram_access_enable(npi_handle_t handle)
1322 {
1323 	uint64_t offset;
1324 	rx_addr_md_t md_reg;
1325 	offset = RX_ADDR_MD_REG;
1326 	NXGE_REG_RD64(handle, offset, &md_reg.value);
1327 	md_reg.bits.ldw.ram_acc = 1;
1328 	NXGE_REG_WR64(handle, offset, md_reg.value);
1329 	return (NPI_SUCCESS);
1330 
1331 }
1332 
1333 npi_status_t
npi_rxdma_cfg_ram_access_disable(npi_handle_t handle)1334 npi_rxdma_cfg_ram_access_disable(npi_handle_t handle)
1335 {
1336 	uint64_t offset;
1337 	rx_addr_md_t md_reg;
1338 	offset = RX_ADDR_MD_REG;
1339 	NXGE_REG_RD64(handle, offset, &md_reg.value);
1340 	md_reg.bits.ldw.ram_acc = 0;
1341 	NXGE_REG_WR64(handle, offset, md_reg.value);
1342 	return (NPI_SUCCESS);
1343 
1344 }
1345 
1346 npi_status_t
npi_rxdma_cfg_port_ddr_weight(npi_handle_t handle,uint8_t portnm,uint32_t weight)1347 npi_rxdma_cfg_port_ddr_weight(npi_handle_t handle,
1348 				    uint8_t portnm, uint32_t weight)
1349 {
1350 
1351 	pt_drr_wt_t wt_reg;
1352 	uint64_t offset;
1353 
1354 	ASSERT(RXDMA_PORT_VALID(portnm));
1355 	if (!RXDMA_PORT_VALID(portnm)) {
1356 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1357 		    " rxdma_cfg_port_ddr_weight"
1358 		    " Illegal Port Number %d \n",
1359 		    portnm));
1360 		return (NPI_RXDMA_PORT_INVALID);
1361 	}
1362 
1363 	offset = PT_DRR_WT_REG(portnm);
1364 	wt_reg.value = 0;
1365 	wt_reg.bits.ldw.wt = weight;
1366 	NXGE_REG_WR64(handle, offset, wt_reg.value);
1367 	return (NPI_SUCCESS);
1368 }
1369 
1370 npi_status_t
npi_rxdma_port_usage_get(npi_handle_t handle,uint8_t portnm,uint32_t * blocks)1371 npi_rxdma_port_usage_get(npi_handle_t handle,
1372 				    uint8_t portnm, uint32_t *blocks)
1373 {
1374 
1375 	pt_use_t use_reg;
1376 	uint64_t offset;
1377 
1378 	ASSERT(RXDMA_PORT_VALID(portnm));
1379 	if (!RXDMA_PORT_VALID(portnm)) {
1380 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1381 		    " rxdma_port_usage_get"
1382 		    " Illegal Port Number %d \n",
1383 		    portnm));
1384 		return (NPI_RXDMA_PORT_INVALID);
1385 	}
1386 
1387 	offset = PT_USE_REG(portnm);
1388 	NXGE_REG_RD64(handle, offset, &use_reg.value);
1389 	*blocks = use_reg.bits.ldw.cnt;
1390 	return (NPI_SUCCESS);
1391 
1392 }
1393 
1394 npi_status_t
npi_rxdma_cfg_wred_param(npi_handle_t handle,uint8_t rdc,rdc_red_para_t * wred_params)1395 npi_rxdma_cfg_wred_param(npi_handle_t handle, uint8_t rdc,
1396 				    rdc_red_para_t *wred_params)
1397 {
1398 	rdc_red_para_t wred_reg;
1399 	uint64_t offset;
1400 
1401 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1402 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1403 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1404 		    " rxdma_cfg_wred_param"
1405 		    " Illegal RDC Number %d \n",
1406 		    rdc));
1407 		return (NPI_RXDMA_RDC_INVALID);
1408 	}
1409 
1410 	/*
1411 	 * need to update RDC_RED_PARA_REG as well as bit defs in
1412 	 * the hw header file
1413 	 */
1414 	offset = RDC_RED_RDC_PARA_REG(rdc);
1415 
1416 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1417 	    " npi_rxdma_cfg_wred_param: "
1418 	    "set RED_PARA: passed value 0x%llx "
1419 	    "win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
1420 	    wred_params->value,
1421 	    wred_params->bits.ldw.win,
1422 	    wred_params->bits.ldw.thre,
1423 	    wred_params->bits.ldw.win_syn,
1424 	    wred_params->bits.ldw.thre_sync));
1425 
1426 	wred_reg.value = 0;
1427 	wred_reg.bits.ldw.win = wred_params->bits.ldw.win;
1428 	wred_reg.bits.ldw.thre = wred_params->bits.ldw.thre;
1429 	wred_reg.bits.ldw.win_syn = wred_params->bits.ldw.win_syn;
1430 	wred_reg.bits.ldw.thre_sync = wred_params->bits.ldw.thre_sync;
1431 	NXGE_REG_WR64(handle, offset, wred_reg.value);
1432 
1433 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1434 	    "set RED_PARA: value 0x%llx "
1435 	    "win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
1436 	    wred_reg.value,
1437 	    wred_reg.bits.ldw.win,
1438 	    wred_reg.bits.ldw.thre,
1439 	    wred_reg.bits.ldw.win_syn,
1440 	    wred_reg.bits.ldw.thre_sync));
1441 
1442 	return (NPI_SUCCESS);
1443 }
1444 
1445 /*
1446  * npi_rxdma_rdc_table_config()
1447  * Configure/populate the RDC table
1448  *
1449  * Inputs:
1450  *	handle:	register handle interpreted by the underlying OS
1451  *	table:	RDC Group Number
1452  *	map:	A bitmap of the RDCs to populate with.
1453  *	count:	A count of the RDCs expressed in <map>.
1454  *
1455  * Notes:
1456  *	This function assumes that we are not using the TCAM, but are
1457  *	hashing all fields of the incoming ethernet packet!
1458  *
1459  * Return:
1460  *	NPI_SUCCESS
1461  *	NPI_RXDMA_TABLE_INVALID
1462  *
1463  */
1464 npi_status_t
npi_rxdma_rdc_table_config(npi_handle_t handle,uint8_t table,dc_map_t rdc_map,int count)1465 npi_rxdma_rdc_table_config(
1466 	npi_handle_t handle,
1467 	uint8_t table,
1468 	dc_map_t rdc_map,
1469 	int count)
1470 {
1471 	int8_t set[NXGE_MAX_RDCS];
1472 	int i, cursor;
1473 
1474 	rdc_tbl_t rdc_tbl;
1475 	uint64_t offset;
1476 
1477 	ASSERT(RXDMA_TABLE_VALID(table));
1478 	if (!RXDMA_TABLE_VALID(table)) {
1479 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1480 		    " npi_rxdma_cfg_rdc_table"
1481 		    " Illegal RDC Table Number %d \n",
1482 		    table));
1483 		return (NPI_RXDMA_TABLE_INVALID);
1484 	}
1485 
1486 	if (count == 0)		/* This shouldn't happen */
1487 		return (NPI_SUCCESS);
1488 
1489 	for (i = 0, cursor = 0; i < NXGE_MAX_RDCS; i++) {
1490 		if ((1 << i) & rdc_map) {
1491 			set[cursor++] = (int8_t)i;
1492 			if (cursor == count)
1493 				break;
1494 		}
1495 	}
1496 
1497 	rdc_tbl.value = 0;
1498 	offset = REG_RDC_TABLE_OFFSET(table);
1499 
1500 	/* Now write ( NXGE_MAX_RDCS / count ) sets of RDC numbers. */
1501 	for (i = 0, cursor = 0; i < NXGE_MAX_RDCS; i++) {
1502 		rdc_tbl.bits.ldw.rdc = set[cursor++];
1503 		NXGE_REG_WR64(handle, offset, rdc_tbl.value);
1504 		offset += sizeof (rdc_tbl.value);
1505 		if (cursor == count)
1506 			cursor = 0;
1507 	}
1508 
1509 	/*
1510 	 * Here is what the resulting table looks like with:
1511 	 *
1512 	 *  0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f
1513 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1514 	 * |v |w |x |y |z |v |w |x |y |z |v |w |x |y |z |v | 5 RDCs
1515 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1516 	 * |w |x |y |z |w |x |y |z |w |x |y |z |w |x |y |z | 4 RDCs
1517 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1518 	 * |x |y |z |x |y |z |x |y |z |x |y |z |x |y |z |x | 3 RDCs
1519 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1520 	 * |x |y |x |y |x |y |x |y |x |y |x |y |x |y |x |y | 2 RDCs
1521 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1522 	 * |x |x |x |x |x |x |x |x |x |x |x |x |x |x |x |x | 1 RDC
1523 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1524 	 */
1525 
1526 	return (NPI_SUCCESS);
1527 }
1528 
1529 npi_status_t
npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t handle,uint8_t table,uint8_t rdc)1530 npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t handle,
1531 			    uint8_t table, uint8_t rdc)
1532 {
1533 	uint64_t offset;
1534 	rdc_tbl_t tbl_reg;
1535 	tbl_reg.value = 0;
1536 
1537 	ASSERT(RXDMA_TABLE_VALID(table));
1538 	if (!RXDMA_TABLE_VALID(table)) {
1539 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1540 		    " npi_rxdma_cfg_rdc_table"
1541 		    " Illegal RDC table Number %d \n",
1542 		    rdc));
1543 		return (NPI_RXDMA_TABLE_INVALID);
1544 	}
1545 
1546 	offset = REG_RDC_TABLE_OFFSET(table);
1547 	tbl_reg.bits.ldw.rdc = rdc;
1548 	NXGE_REG_WR64(handle, offset, tbl_reg.value);
1549 	return (NPI_SUCCESS);
1550 
1551 }
1552 
1553 npi_status_t
npi_rxdma_dump_rdc_table(npi_handle_t handle,uint8_t table)1554 npi_rxdma_dump_rdc_table(npi_handle_t handle,
1555 			    uint8_t table)
1556 {
1557 	uint64_t offset;
1558 	int tbl_offset;
1559 	uint64_t value;
1560 
1561 	ASSERT(RXDMA_TABLE_VALID(table));
1562 	if (!RXDMA_TABLE_VALID(table)) {
1563 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1564 		    " npi_rxdma_dump_rdc_table"
1565 		    " Illegal RDC Rable Number %d \n",
1566 		    table));
1567 		return (NPI_RXDMA_TABLE_INVALID);
1568 	}
1569 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1570 	    "\n Register Dump for RDC Table %d \n",
1571 	    table));
1572 	offset = REG_RDC_TABLE_OFFSET(table);
1573 	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
1574 		NXGE_REG_RD64(handle, offset, &value);
1575 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1576 		    " 0x%08llx 0x%08llx \n",
1577 		    offset, value));
1578 		offset += 8;
1579 	}
1580 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1581 	    "\n Register Dump for RDC Table %d done\n",
1582 	    table));
1583 	return (NPI_SUCCESS);
1584 
1585 }
1586 
1587 npi_status_t
npi_rxdma_rdc_rbr_stat_get(npi_handle_t handle,uint8_t rdc,rbr_stat_t * rbr_stat)1588 npi_rxdma_rdc_rbr_stat_get(npi_handle_t handle, uint8_t rdc,
1589 			    rbr_stat_t *rbr_stat)
1590 {
1591 
1592 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1593 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1594 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1595 		    " rxdma_rdc_rbr_stat_get"
1596 		    " Illegal RDC Number %d \n",
1597 		    rdc));
1598 		return (NPI_RXDMA_RDC_INVALID);
1599 	}
1600 
1601 	RXDMA_REG_READ64(handle, RBR_STAT_REG, rdc, &rbr_stat->value);
1602 	return (NPI_SUCCESS);
1603 }
1604 
1605 /*
1606  * npi_rxdma_rdc_rbr_head_get
1607  * Gets the current rbr head pointer.
1608  *
1609  * Inputs:
1610  *      handle:	opaque handle interpreted by the underlying OS
1611  *	rdc:		RX DMA Channel number
1612  *	hdptr		ptr to write the rbr head value
1613  *
1614  * Return:
1615  * NPI_SUCCESS
1616  * NPI_RXDMA_RDC_INVALID
1617  */
1618 npi_status_t
npi_rxdma_rdc_rbr_head_get(npi_handle_t handle,uint8_t rdc,addr44_t * hdptr)1619 npi_rxdma_rdc_rbr_head_get(npi_handle_t handle,
1620 			    uint8_t rdc, addr44_t *hdptr)
1621 {
1622 	rbr_hdh_t hh_ptr;
1623 	rbr_hdl_t hl_ptr;
1624 
1625 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1626 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1627 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1628 		    " rxdma_rdc_rbr_head_get"
1629 		    " Illegal RDC Number %d \n",
1630 		    rdc));
1631 		return (NPI_RXDMA_RDC_INVALID);
1632 	}
1633 	hh_ptr.value = 0;
1634 	hl_ptr.value = 0;
1635 	RXDMA_REG_READ64(handle, RBR_HDH_REG, rdc, &hh_ptr.value);
1636 	RXDMA_REG_READ64(handle, RBR_HDL_REG, rdc, &hl_ptr.value);
1637 	hdptr->bits.ldw = hl_ptr.bits.ldw.head_l << 2;
1638 	hdptr->bits.hdw = hh_ptr.bits.ldw.head_h;
1639 	return (NPI_SUCCESS);
1640 
1641 }
1642 
1643 npi_status_t
npi_rxdma_rdc_rcr_qlen_get(npi_handle_t handle,uint8_t rdc,uint16_t * rcr_qlen)1644 npi_rxdma_rdc_rcr_qlen_get(npi_handle_t handle, uint8_t rdc,
1645 			    uint16_t *rcr_qlen)
1646 {
1647 
1648 	rcrstat_a_t stats;
1649 
1650 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1651 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1652 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1653 		    " rxdma_rdc_rcr_qlen_get"
1654 		    " Illegal RDC Number %d \n",
1655 		    rdc));
1656 		return (NPI_RXDMA_RDC_INVALID);
1657 	}
1658 
1659 	RXDMA_REG_READ64(handle, RCRSTAT_A_REG, rdc, &stats.value);
1660 	*rcr_qlen =  stats.bits.ldw.qlen;
1661 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1662 	    " rxdma_rdc_rcr_qlen_get"
1663 	    " RDC %d qlen %x qlen %x\n",
1664 	    rdc, *rcr_qlen, stats.bits.ldw.qlen));
1665 	return (NPI_SUCCESS);
1666 }
1667 
1668 npi_status_t
npi_rxdma_rdc_rcr_tail_get(npi_handle_t handle,uint8_t rdc,addr44_t * tail_addr)1669 npi_rxdma_rdc_rcr_tail_get(npi_handle_t handle,
1670 			    uint8_t rdc, addr44_t *tail_addr)
1671 {
1672 
1673 	rcrstat_b_t th_ptr;
1674 	rcrstat_c_t tl_ptr;
1675 
1676 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1677 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1678 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1679 		    " rxdma_rdc_rcr_tail_get"
1680 		    " Illegal RDC Number %d \n",
1681 		    rdc));
1682 		return (NPI_RXDMA_RDC_INVALID);
1683 	}
1684 	th_ptr.value = 0;
1685 	tl_ptr.value = 0;
1686 	RXDMA_REG_READ64(handle, RCRSTAT_B_REG, rdc, &th_ptr.value);
1687 	RXDMA_REG_READ64(handle, RCRSTAT_C_REG, rdc, &tl_ptr.value);
1688 	tail_addr->bits.ldw = tl_ptr.bits.ldw.tlptr_l << 3;
1689 	tail_addr->bits.hdw = th_ptr.bits.ldw.tlptr_h;
1690 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1691 	    " rxdma_rdc_rcr_tail_get"
1692 	    " RDC %d rcr_tail %llx tl %x\n",
1693 	    rdc, tl_ptr.value,
1694 	    tl_ptr.bits.ldw.tlptr_l));
1695 
1696 	return (NPI_SUCCESS);
1697 
1698 
1699 }
1700 
1701 /*
1702  * npi_rxdma_rxctl_fifo_error_intr_set
1703  * Configure The RX ctrl fifo error interrupt generation
1704  *
1705  * Inputs:
1706  *      handle:	opaque handle interpreted by the underlying OS
1707  *	mask:	rx_ctl_dat_fifo_mask_t specifying the errors
1708  * valid fields in  rx_ctl_dat_fifo_mask_t structure are:
1709  * zcp_eop_err, ipp_eop_err, id_mismatch. If a field is set
1710  * to 1, we will enable interrupt generation for the
1711  * corresponding error condition. In the hardware, the bit(s)
1712  * have to be cleared to enable interrupt.
1713  *
1714  * Return:
1715  * NPI_SUCCESS
1716  *
1717  */
1718 npi_status_t
npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t handle,rx_ctl_dat_fifo_mask_t * mask)1719 npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t handle,
1720 				    rx_ctl_dat_fifo_mask_t *mask)
1721 {
1722 	uint64_t offset;
1723 	rx_ctl_dat_fifo_mask_t intr_mask;
1724 	offset = RX_CTL_DAT_FIFO_MASK_REG;
1725 	NXGE_REG_RD64(handle, offset, &intr_mask.value);
1726 
1727 	if (mask->bits.ldw.ipp_eop_err) {
1728 		intr_mask.bits.ldw.ipp_eop_err = 0;
1729 	}
1730 
1731 	if (mask->bits.ldw.zcp_eop_err) {
1732 		intr_mask.bits.ldw.zcp_eop_err = 0;
1733 	}
1734 
1735 	if (mask->bits.ldw.id_mismatch) {
1736 		intr_mask.bits.ldw.id_mismatch = 0;
1737 	}
1738 
1739 	NXGE_REG_WR64(handle, offset, intr_mask.value);
1740 	return (NPI_SUCCESS);
1741 }
1742 
1743 /*
1744  * npi_rxdma_rxctl_fifo_error_stat_get
1745  * Read The RX ctrl fifo error Status
1746  *
1747  * Inputs:
1748  *      handle:	opaque handle interpreted by the underlying OS
1749  *	stat:	rx_ctl_dat_fifo_stat_t to read the errors to
1750  * valid fields in  rx_ctl_dat_fifo_stat_t structure are:
1751  * zcp_eop_err, ipp_eop_err, id_mismatch.
1752  * Return:
1753  * NPI_SUCCESS
1754  *
1755  */
1756 npi_status_t
npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t handle,rx_ctl_dat_fifo_stat_t * stat)1757 npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t handle,
1758 			    rx_ctl_dat_fifo_stat_t *stat)
1759 {
1760 	uint64_t offset = RX_CTL_DAT_FIFO_STAT_REG;
1761 	NXGE_REG_RD64(handle, offset, &stat->value);
1762 	return (NPI_SUCCESS);
1763 }
1764 
1765 npi_status_t
npi_rxdma_rdc_rcr_pktread_update(npi_handle_t handle,uint8_t channel,uint16_t pkts_read)1766 npi_rxdma_rdc_rcr_pktread_update(npi_handle_t handle, uint8_t channel,
1767 				    uint16_t pkts_read)
1768 {
1769 
1770 	rx_dma_ctl_stat_t	cs;
1771 	uint16_t min_read = 0;
1772 
1773 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1774 	if (!RXDMA_CHANNEL_VALID(channel)) {
1775 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1776 		    " npi_rxdma_rdc_rcr_pktread_update ",
1777 		    " channel %d", channel));
1778 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1779 	}
1780 
1781 	if ((pkts_read < min_read) && (pkts_read > 512)) {
1782 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1783 		    " npi_rxdma_rdc_rcr_pktread_update ",
1784 		    " pkts %d out of bound", pkts_read));
1785 		return (NPI_RXDMA_OPCODE_INVALID(pkts_read));
1786 	}
1787 
1788 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1789 	    &cs.value);
1790 	cs.bits.ldw.pktread = pkts_read;
1791 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1792 	    channel, cs.value);
1793 
1794 	return (NPI_SUCCESS);
1795 }
1796 
1797 npi_status_t
npi_rxdma_rdc_rcr_bufread_update(npi_handle_t handle,uint8_t channel,uint16_t bufs_read)1798 npi_rxdma_rdc_rcr_bufread_update(npi_handle_t handle, uint8_t channel,
1799 					    uint16_t bufs_read)
1800 {
1801 
1802 	rx_dma_ctl_stat_t	cs;
1803 	uint16_t min_read = 0;
1804 
1805 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1806 	if (!RXDMA_CHANNEL_VALID(channel)) {
1807 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1808 		    " npi_rxdma_rdc_rcr_bufread_update ",
1809 		    " channel %d", channel));
1810 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1811 	}
1812 
1813 	if ((bufs_read < min_read) && (bufs_read > 512)) {
1814 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1815 		    " npi_rxdma_rdc_rcr_bufread_update ",
1816 		    " bufs read %d out of bound", bufs_read));
1817 		return (NPI_RXDMA_OPCODE_INVALID(bufs_read));
1818 	}
1819 
1820 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1821 	    &cs.value);
1822 	cs.bits.ldw.ptrread = bufs_read;
1823 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1824 	    channel, cs.value);
1825 
1826 	return (NPI_SUCCESS);
1827 }
1828 
1829 npi_status_t
npi_rxdma_rdc_rcr_read_update(npi_handle_t handle,uint8_t channel,uint16_t pkts_read,uint16_t bufs_read)1830 npi_rxdma_rdc_rcr_read_update(npi_handle_t handle, uint8_t channel,
1831 				    uint16_t pkts_read, uint16_t bufs_read)
1832 {
1833 
1834 	rx_dma_ctl_stat_t	cs;
1835 
1836 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1837 	if (!RXDMA_CHANNEL_VALID(channel)) {
1838 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1839 		    " npi_rxdma_rdc_rcr_read_update ",
1840 		    " channel %d", channel));
1841 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1842 	}
1843 
1844 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1845 	    " npi_rxdma_rdc_rcr_read_update "
1846 	    " bufs read %d pkt read %d",
1847 	    bufs_read, pkts_read));
1848 
1849 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1850 	    &cs.value);
1851 
1852 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1853 	    " npi_rxdma_rdc_rcr_read_update: "
1854 	    " value: 0x%llx bufs read %d pkt read %d",
1855 	    cs.value,
1856 	    cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
1857 
1858 	cs.bits.ldw.pktread = pkts_read;
1859 	cs.bits.ldw.ptrread = bufs_read;
1860 
1861 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1862 	    channel, cs.value);
1863 
1864 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1865 	    &cs.value);
1866 
1867 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1868 	    " npi_rxdma_rdc_rcr_read_update: read back after update "
1869 	    " value: 0x%llx bufs read %d pkt read %d",
1870 	    cs.value,
1871 	    cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
1872 
1873 	return (NPI_SUCCESS);
1874 }
1875 
1876 /*
1877  * npi_rxdma_channel_mex_set():
1878  *	This function is called to arm the DMA channel with
1879  *	mailbox updating capability. Software needs to rearm
1880  *	for each update by writing to the control and status register.
1881  *
1882  * Parameters:
1883  *	handle		- NPI handle (virtualization flag must be defined).
1884  *	channel		- logical RXDMA channel from 0 to 23.
1885  *			  (If virtualization flag is not set, then
1886  *			   logical channel is the same as the hardware
1887  *			   channel number).
1888  *
1889  * Return:
1890  *	NPI_SUCCESS		- If enable channel with mailbox update
1891  *				  is completed successfully.
1892  *
1893  *	Error:
1894  *	NPI error status code
1895  */
1896 npi_status_t
npi_rxdma_channel_mex_set(npi_handle_t handle,uint8_t channel)1897 npi_rxdma_channel_mex_set(npi_handle_t handle, uint8_t channel)
1898 {
1899 	return (npi_rxdma_channel_control(handle, RXDMA_MEX_SET, channel));
1900 }
1901 
1902 /*
1903  * npi_rxdma_channel_rcrto_clear():
1904  *	This function is called to reset RCRTO bit to 0.
1905  *
1906  * Parameters:
1907  *	handle		- NPI handle (virtualization flag must be defined).
1908  *	channel		- logical RXDMA channel from 0 to 23.
1909  *			  (If virtualization flag is not set, then
1910  *			   logical channel is the same as the hardware
1911  *			   channel number).
1912  * Return:
1913  *	NPI_SUCCESS
1914  *
1915  *	Error:
1916  *	NPI error status code
1917  */
1918 npi_status_t
npi_rxdma_channel_rcrto_clear(npi_handle_t handle,uint8_t channel)1919 npi_rxdma_channel_rcrto_clear(npi_handle_t handle, uint8_t channel)
1920 {
1921 	return (npi_rxdma_channel_control(handle, RXDMA_RCRTO_CLEAR, channel));
1922 }
1923 
1924 /*
1925  * npi_rxdma_channel_pt_drop_pkt_clear():
1926  *	This function is called to clear the port drop packet bit (debug).
1927  *
1928  * Parameters:
1929  *	handle		- NPI handle (virtualization flag must be defined).
1930  *	channel		- logical RXDMA channel from 0 to 23.
1931  *			  (If virtualization flag is not set, then
1932  *			   logical channel is the same as the hardware
1933  *			   channel number).
1934  * Return:
1935  *	NPI_SUCCESS
1936  *
1937  *	Error:
1938  *	NPI error status code
1939  */
1940 npi_status_t
npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t handle,uint8_t channel)1941 npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t handle, uint8_t channel)
1942 {
1943 	return (npi_rxdma_channel_control(handle, RXDMA_PT_DROP_PKT_CLEAR,
1944 	    channel));
1945 }
1946 
1947 /*
1948  * npi_rxdma_channel_wred_drop_clear():
1949  *	This function is called to wred drop bit (debug only).
1950  *
1951  * Parameters:
1952  *	handle		- NPI handle (virtualization flag must be defined).
1953  *	channel		- logical RXDMA channel from 0 to 23.
1954  *			  (If virtualization flag is not set, then
1955  *			   logical channel is the same as the hardware
1956  *			   channel number).
1957  * Return:
1958  *	NPI_SUCCESS
1959  *
1960  *	Error:
1961  *	NPI error status code
1962  */
1963 npi_status_t
npi_rxdma_channel_wred_dop_clear(npi_handle_t handle,uint8_t channel)1964 npi_rxdma_channel_wred_dop_clear(npi_handle_t handle, uint8_t channel)
1965 {
1966 	return (npi_rxdma_channel_control(handle, RXDMA_WRED_DROP_CLEAR,
1967 	    channel));
1968 }
1969 
1970 /*
1971  * npi_rxdma_channel_rcr_shfull_clear():
1972  *	This function is called to clear RCR shadow full bit.
1973  *
1974  * Parameters:
1975  *	handle		- NPI handle (virtualization flag must be defined).
1976  *	channel		- logical RXDMA channel from 0 to 23.
1977  *			  (If virtualization flag is not set, then
1978  *			   logical channel is the same as the hardware
1979  *			   channel number).
1980  * Return:
1981  *	NPI_SUCCESS
1982  *
1983  *	Error:
1984  *	NPI error status code
1985  */
1986 npi_status_t
npi_rxdma_channel_rcr_shfull_clear(npi_handle_t handle,uint8_t channel)1987 npi_rxdma_channel_rcr_shfull_clear(npi_handle_t handle, uint8_t channel)
1988 {
1989 	return (npi_rxdma_channel_control(handle, RXDMA_RCR_SFULL_CLEAR,
1990 	    channel));
1991 }
1992 
1993 /*
1994  * npi_rxdma_channel_rcrfull_clear():
1995  *	This function is called to clear RCR full bit.
1996  *
1997  * Parameters:
1998  *	handle		- NPI handle (virtualization flag must be defined).
1999  *	channel		- logical RXDMA channel from 0 to 23.
2000  *			  (If virtualization flag is not set, then
2001  *			   logical channel is the same as the hardware
2002  *			   channel number).
2003  * Return:
2004  *	NPI_SUCCESS
2005  *
2006  *	Error:
2007  *	NPI error status code
2008  */
2009 npi_status_t
npi_rxdma_channel_rcr_full_clear(npi_handle_t handle,uint8_t channel)2010 npi_rxdma_channel_rcr_full_clear(npi_handle_t handle, uint8_t channel)
2011 {
2012 	return (npi_rxdma_channel_control(handle, RXDMA_RCR_FULL_CLEAR,
2013 	    channel));
2014 }
2015 
2016 npi_status_t
npi_rxdma_channel_rbr_empty_clear(npi_handle_t handle,uint8_t channel)2017 npi_rxdma_channel_rbr_empty_clear(npi_handle_t handle, uint8_t channel)
2018 {
2019 	return (npi_rxdma_channel_control(handle,
2020 	    RXDMA_RBR_EMPTY_CLEAR, channel));
2021 }
2022 
2023 npi_status_t
npi_rxdma_channel_cs_clear_all(npi_handle_t handle,uint8_t channel)2024 npi_rxdma_channel_cs_clear_all(npi_handle_t handle, uint8_t channel)
2025 {
2026 	return (npi_rxdma_channel_control(handle, RXDMA_CS_CLEAR_ALL, channel));
2027 }
2028 
2029 /*
2030  * npi_rxdma_channel_control():
2031  *	This function is called to control a receive DMA channel
2032  *	for arming the channel with mailbox updates, resetting
2033  *	various event status bits (control and status register).
2034  *
2035  * Parameters:
2036  *	handle		- NPI handle (virtualization flag must be defined).
2037  *	control		- NPI defined control type supported:
2038  *				- RXDMA_MEX_SET
2039  * 				- RXDMA_RCRTO_CLEAR
2040  *				- RXDMA_PT_DROP_PKT_CLEAR
2041  *				- RXDMA_WRED_DROP_CLEAR
2042  *				- RXDMA_RCR_SFULL_CLEAR
2043  *				- RXDMA_RCR_FULL_CLEAR
2044  *				- RXDMA_RBR_PRE_EMPTY_CLEAR
2045  *				- RXDMA_RBR_EMPTY_CLEAR
2046  *	channel		- logical RXDMA channel from 0 to 23.
2047  *			  (If virtualization flag is not set, then
2048  *			   logical channel is the same as the hardware.
2049  * Return:
2050  *	NPI_SUCCESS
2051  *
2052  *	Error:
2053  *	NPI error status code
2054  */
2055 npi_status_t
npi_rxdma_channel_control(npi_handle_t handle,rxdma_cs_cntl_t control,uint8_t channel)2056 npi_rxdma_channel_control(npi_handle_t handle, rxdma_cs_cntl_t control,
2057 			uint8_t channel)
2058 {
2059 
2060 	rx_dma_ctl_stat_t	cs;
2061 
2062 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2063 	if (!RXDMA_CHANNEL_VALID(channel)) {
2064 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2065 		    " npi_rxdma_channel_control",
2066 		    " channel", channel));
2067 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2068 	}
2069 
2070 	switch (control) {
2071 	case RXDMA_MEX_SET:
2072 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2073 		    &cs.value);
2074 		cs.bits.hdw.mex = 1;
2075 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2076 		    channel, cs.value);
2077 		break;
2078 
2079 	case RXDMA_RCRTO_CLEAR:
2080 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2081 		    &cs.value);
2082 		cs.bits.hdw.rcrto = 0;
2083 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2084 		    cs.value);
2085 		break;
2086 
2087 	case RXDMA_PT_DROP_PKT_CLEAR:
2088 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2089 		    &cs.value);
2090 		cs.bits.hdw.port_drop_pkt = 0;
2091 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2092 		    cs.value);
2093 		break;
2094 
2095 	case RXDMA_WRED_DROP_CLEAR:
2096 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2097 		    &cs.value);
2098 		cs.bits.hdw.wred_drop = 0;
2099 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2100 		    cs.value);
2101 		break;
2102 
2103 	case RXDMA_RCR_SFULL_CLEAR:
2104 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2105 		    &cs.value);
2106 		cs.bits.hdw.rcr_shadow_full = 0;
2107 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2108 		    cs.value);
2109 		break;
2110 
2111 	case RXDMA_RCR_FULL_CLEAR:
2112 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2113 		    &cs.value);
2114 		cs.bits.hdw.rcrfull = 0;
2115 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2116 		    cs.value);
2117 		break;
2118 
2119 	case RXDMA_RBR_PRE_EMPTY_CLEAR:
2120 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2121 		    &cs.value);
2122 		cs.bits.hdw.rbr_pre_empty = 0;
2123 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2124 		    cs.value);
2125 		break;
2126 
2127 	case RXDMA_RBR_EMPTY_CLEAR:
2128 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2129 		    &cs.value);
2130 		cs.bits.hdw.rbr_empty = 1;
2131 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2132 		    cs.value);
2133 		break;
2134 
2135 	case RXDMA_CS_CLEAR_ALL:
2136 		cs.value = 0;
2137 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2138 		    cs.value);
2139 		break;
2140 
2141 	default:
2142 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2143 		    "npi_rxdma_channel_control",
2144 		    "control", control));
2145 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2146 	}
2147 
2148 	return (NPI_SUCCESS);
2149 }
2150 
2151 /*
2152  * npi_rxdma_control_status():
2153  *	This function is called to operate on the control
2154  *	and status register.
2155  *
2156  * Parameters:
2157  *	handle		- NPI handle
2158  *	op_mode		- OP_GET: get hardware control and status
2159  *			  OP_SET: set hardware control and status
2160  *			  OP_UPDATE: update hardware control and status.
2161  *			  OP_CLEAR: clear control and status register to 0s.
2162  *	channel		- hardware RXDMA channel from 0 to 23.
2163  *	cs_p		- pointer to hardware defined control and status
2164  *			  structure.
2165  * Return:
2166  *	NPI_SUCCESS
2167  *
2168  *	Error:
2169  *	NPI error status code
2170  */
2171 npi_status_t
npi_rxdma_control_status(npi_handle_t handle,io_op_t op_mode,uint8_t channel,p_rx_dma_ctl_stat_t cs_p)2172 npi_rxdma_control_status(npi_handle_t handle, io_op_t op_mode,
2173 			uint8_t channel, p_rx_dma_ctl_stat_t cs_p)
2174 {
2175 	int			status = NPI_SUCCESS;
2176 	rx_dma_ctl_stat_t	cs;
2177 
2178 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2179 	if (!RXDMA_CHANNEL_VALID(channel)) {
2180 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2181 		    "npi_rxdma_control_status",
2182 		    "channel", channel));
2183 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2184 	}
2185 
2186 	switch (op_mode) {
2187 	case OP_GET:
2188 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2189 		    &cs_p->value);
2190 		break;
2191 
2192 	case OP_SET:
2193 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2194 		    cs_p->value);
2195 		break;
2196 
2197 	case OP_UPDATE:
2198 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2199 		    &cs.value);
2200 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2201 		    cs_p->value | cs.value);
2202 		break;
2203 
2204 	default:
2205 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2206 		    "npi_rxdma_control_status",
2207 		    "control", op_mode));
2208 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2209 	}
2210 
2211 	return (status);
2212 }
2213 
2214 /*
2215  * npi_rxdma_event_mask():
2216  *	This function is called to operate on the event mask
2217  *	register which is used for generating interrupts.
2218  *
2219  * Parameters:
2220  *	handle		- NPI handle
2221  *	op_mode		- OP_GET: get hardware event mask
2222  *			  OP_SET: set hardware interrupt event masks
2223  *			  OP_CLEAR: clear control and status register to 0s.
2224  *	channel		- hardware RXDMA channel from 0 to 23.
2225  *	mask_p		- pointer to hardware defined event mask
2226  *			  structure.
2227  * Return:
2228  *	NPI_SUCCESS		- If set is complete successfully.
2229  *
2230  *	Error:
2231  *	NPI error status code
2232  */
2233 npi_status_t
npi_rxdma_event_mask(npi_handle_t handle,io_op_t op_mode,uint8_t channel,p_rx_dma_ent_msk_t mask_p)2234 npi_rxdma_event_mask(npi_handle_t handle, io_op_t op_mode,
2235 		uint8_t channel, p_rx_dma_ent_msk_t mask_p)
2236 {
2237 	int			status = NPI_SUCCESS;
2238 	rx_dma_ent_msk_t	mask;
2239 
2240 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2241 	if (!RXDMA_CHANNEL_VALID(channel)) {
2242 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2243 		    "npi_rxdma_event_mask",
2244 		    "channel", channel));
2245 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2246 	}
2247 
2248 	switch (op_mode) {
2249 	case OP_GET:
2250 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2251 		    &mask_p->value);
2252 		break;
2253 
2254 	case OP_SET:
2255 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2256 		    mask_p->value);
2257 		break;
2258 
2259 	case OP_UPDATE:
2260 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2261 		    &mask.value);
2262 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2263 		    mask_p->value | mask.value);
2264 		break;
2265 
2266 	default:
2267 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2268 		    "npi_rxdma_event_mask",
2269 		    "eventmask", op_mode));
2270 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2271 	}
2272 
2273 	return (status);
2274 }
2275 
2276 /*
2277  * npi_rxdma_event_mask_config():
2278  *	This function is called to operate on the event mask
2279  *	register which is used for generating interrupts
2280  *	and status register.
2281  *
2282  * Parameters:
2283  *	handle		- NPI handle
2284  *	op_mode		- OP_GET: get hardware event mask
2285  *			  OP_SET: set hardware interrupt event masks
2286  *			  OP_CLEAR: clear control and status register to 0s.
2287  *	channel		- hardware RXDMA channel from 0 to 23.
2288  *	mask_cfgp		- pointer to NPI defined event mask
2289  *			  enum data type.
2290  * Return:
2291  *	NPI_SUCCESS		- If set is complete successfully.
2292  *
2293  *	Error:
2294  *	NPI error status code
2295  */
2296 npi_status_t
npi_rxdma_event_mask_config(npi_handle_t handle,io_op_t op_mode,uint8_t channel,rxdma_ent_msk_cfg_t * mask_cfgp)2297 npi_rxdma_event_mask_config(npi_handle_t handle, io_op_t op_mode,
2298 		uint8_t channel, rxdma_ent_msk_cfg_t *mask_cfgp)
2299 {
2300 	int		status = NPI_SUCCESS;
2301 	uint64_t	configuration = *mask_cfgp;
2302 	uint64_t	value;
2303 
2304 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2305 	if (!RXDMA_CHANNEL_VALID(channel)) {
2306 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2307 		    "npi_rxdma_event_mask_config",
2308 		    "channel", channel));
2309 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2310 	}
2311 
2312 	switch (op_mode) {
2313 	case OP_GET:
2314 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2315 		    (uint64_t *)mask_cfgp);
2316 		break;
2317 
2318 	case OP_SET:
2319 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2320 		    configuration);
2321 		break;
2322 
2323 	case OP_UPDATE:
2324 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel, &value);
2325 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2326 		    configuration | value);
2327 		break;
2328 
2329 	case OP_CLEAR:
2330 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2331 		    CFG_RXDMA_MASK_ALL);
2332 		break;
2333 	default:
2334 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2335 		    "npi_rxdma_event_mask_config",
2336 		    "eventmask", op_mode));
2337 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2338 	}
2339 
2340 	return (status);
2341 }
2342