xref: /titanic_50/usr/src/uts/common/io/hxge/hpi_rxdma.c (revision b83cd2c35abe58abb09c73f2ef35426f1384ad46)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <hpi_rxdma.h>
27 #include <hxge_common.h>
28 #include <hxge_impl.h>
29 
30 #define	 RXDMA_RESET_TRY_COUNT	5
31 #define	 RXDMA_RESET_DELAY	5
32 
33 #define	 RXDMA_OP_DISABLE	0
34 #define	 RXDMA_OP_ENABLE	1
35 #define	 RXDMA_OP_RESET		2
36 
37 #define	 RCR_TIMEOUT_ENABLE	1
38 #define	 RCR_TIMEOUT_DISABLE	2
39 #define	 RCR_THRESHOLD		4
40 
41 hpi_status_t
hpi_rxdma_cfg_logical_page_handle(hpi_handle_t handle,uint8_t rdc,uint64_t page_handle)42 hpi_rxdma_cfg_logical_page_handle(hpi_handle_t handle, uint8_t rdc,
43     uint64_t page_handle)
44 {
45 	rdc_page_handle_t page_hdl;
46 
47 	if (!RXDMA_CHANNEL_VALID(rdc)) {
48 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
49 		    "rxdma_cfg_logical_page_handle"
50 		    " Illegal RDC number %d \n", rdc));
51 		return (HPI_RXDMA_RDC_INVALID);
52 	}
53 
54 	page_hdl.value = 0;
55 	page_hdl.bits.handle = (uint32_t)page_handle;
56 
57 	RXDMA_REG_WRITE64(handle, RDC_PAGE_HANDLE, rdc, page_hdl.value);
58 
59 	return (HPI_SUCCESS);
60 }
61 
62 hpi_status_t
hpi_rxdma_cfg_rdc_wait_for_qst(hpi_handle_t handle,uint8_t rdc)63 hpi_rxdma_cfg_rdc_wait_for_qst(hpi_handle_t handle, uint8_t rdc)
64 {
65 	rdc_rx_cfg1_t	cfg;
66 	uint32_t	count = RXDMA_RESET_TRY_COUNT;
67 	uint32_t	delay_time = RXDMA_RESET_DELAY;
68 
69 	RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
70 
71 	while ((count--) && (cfg.bits.qst == 0)) {
72 		HXGE_DELAY(delay_time);
73 		RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
74 	}
75 
76 	if (cfg.bits.qst == 0)
77 		return (HPI_FAILURE);
78 
79 	return (HPI_SUCCESS);
80 }
81 
82 /* RX DMA functions */
83 static hpi_status_t
hpi_rxdma_cfg_rdc_ctl(hpi_handle_t handle,uint8_t rdc,uint8_t op)84 hpi_rxdma_cfg_rdc_ctl(hpi_handle_t handle, uint8_t rdc, uint8_t op)
85 {
86 	rdc_rx_cfg1_t cfg;
87 	uint32_t count = RXDMA_RESET_TRY_COUNT;
88 	uint32_t delay_time = RXDMA_RESET_DELAY;
89 	uint32_t error = HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RESET_ERR, rdc);
90 
91 	if (!RXDMA_CHANNEL_VALID(rdc)) {
92 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
93 		    "hpi_rxdma_cfg_rdc_ctl Illegal RDC number %d \n", rdc));
94 		return (HPI_RXDMA_RDC_INVALID);
95 	}
96 
97 	switch (op) {
98 	case RXDMA_OP_ENABLE:
99 		RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
100 		cfg.bits.enable = 1;
101 		RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value);
102 
103 		HXGE_DELAY(delay_time);
104 		RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
105 
106 		while ((count--) && (cfg.bits.qst == 1)) {
107 			HXGE_DELAY(delay_time);
108 			RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
109 		}
110 		if (cfg.bits.qst == 1) {
111 			return (HPI_FAILURE);
112 		}
113 		break;
114 
115 	case RXDMA_OP_DISABLE:
116 		RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
117 		cfg.bits.enable = 0;
118 		RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value);
119 
120 		HXGE_DELAY(delay_time);
121 		if (hpi_rxdma_cfg_rdc_wait_for_qst(handle,
122 		    rdc) != HPI_SUCCESS) {
123 			HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
124 			    " hpi_rxdma_cfg_rdc_ctl"
125 			    " RXDMA_OP_DISABLE Failed for RDC %d \n",
126 			    rdc));
127 			return (error);
128 		}
129 		break;
130 
131 	case RXDMA_OP_RESET:
132 		cfg.value = 0;
133 		cfg.bits.reset = 1;
134 		RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value);
135 		HXGE_DELAY(delay_time);
136 		RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
137 
138 		while ((count--) && (cfg.bits.qst == 0)) {
139 			HXGE_DELAY(delay_time);
140 			RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value);
141 		}
142 		if (count == 0) {
143 			HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
144 			    " hpi_rxdma_cfg_rdc_ctl"
145 			    " Reset Failed for RDC %d \n", rdc));
146 			return (error);
147 		}
148 		break;
149 
150 	default:
151 		return (HPI_RXDMA_SW_PARAM_ERROR);
152 	}
153 
154 	return (HPI_SUCCESS);
155 }
156 
157 hpi_status_t
hpi_rxdma_cfg_rdc_enable(hpi_handle_t handle,uint8_t rdc)158 hpi_rxdma_cfg_rdc_enable(hpi_handle_t handle, uint8_t rdc)
159 {
160 	return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE));
161 }
162 
163 hpi_status_t
hpi_rxdma_cfg_rdc_disable(hpi_handle_t handle,uint8_t rdc)164 hpi_rxdma_cfg_rdc_disable(hpi_handle_t handle, uint8_t rdc)
165 {
166 	return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE));
167 }
168 
169 hpi_status_t
hpi_rxdma_cfg_rdc_reset(hpi_handle_t handle,uint8_t rdc)170 hpi_rxdma_cfg_rdc_reset(hpi_handle_t handle, uint8_t rdc)
171 {
172 	return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET));
173 }
174 
175 static hpi_status_t
hpi_rxdma_cfg_rdc_rcr_ctl(hpi_handle_t handle,uint8_t rdc,uint8_t op,uint16_t param)176 hpi_rxdma_cfg_rdc_rcr_ctl(hpi_handle_t handle, uint8_t rdc,
177     uint8_t op, uint16_t param)
178 {
179 	rdc_rcr_cfg_b_t rcr_cfgb;
180 
181 	if (!RXDMA_CHANNEL_VALID(rdc)) {
182 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
183 		    "rxdma_cfg_rdc_rcr_ctl Illegal RDC number %d \n", rdc));
184 		return (HPI_RXDMA_RDC_INVALID);
185 	}
186 
187 	RXDMA_REG_READ64(handle, RDC_RCR_CFG_B, rdc, &rcr_cfgb.value);
188 
189 	switch (op) {
190 	case RCR_TIMEOUT_ENABLE:
191 		rcr_cfgb.bits.timeout = (uint8_t)param;
192 		rcr_cfgb.bits.entout = 1;
193 		break;
194 
195 	case RCR_THRESHOLD:
196 		rcr_cfgb.bits.pthres = param;
197 		break;
198 
199 	case RCR_TIMEOUT_DISABLE:
200 		rcr_cfgb.bits.entout = 0;
201 		break;
202 
203 	default:
204 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
205 		    "rxdma_cfg_rdc_rcr_ctl Illegal opcode %x \n", op));
206 		return (HPI_RXDMA_OPCODE_INVALID(rdc));
207 	}
208 
209 	RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, rdc, rcr_cfgb.value);
210 	return (HPI_SUCCESS);
211 }
212 
213 hpi_status_t
hpi_rxdma_cfg_rdc_rcr_threshold(hpi_handle_t handle,uint8_t rdc,uint16_t rcr_threshold)214 hpi_rxdma_cfg_rdc_rcr_threshold(hpi_handle_t handle, uint8_t rdc,
215     uint16_t rcr_threshold)
216 {
217 	return (hpi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
218 	    RCR_THRESHOLD, rcr_threshold));
219 }
220 
221 hpi_status_t
hpi_rxdma_cfg_rdc_rcr_timeout(hpi_handle_t handle,uint8_t rdc,uint8_t rcr_timeout)222 hpi_rxdma_cfg_rdc_rcr_timeout(hpi_handle_t handle, uint8_t rdc,
223     uint8_t rcr_timeout)
224 {
225 	return (hpi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
226 	    RCR_TIMEOUT_ENABLE, rcr_timeout));
227 }
228 
229 /*
230  * Configure The RDC channel Rcv Buffer Ring
231  */
232 hpi_status_t
hpi_rxdma_cfg_rdc_ring(hpi_handle_t handle,uint8_t rdc,rdc_desc_cfg_t * rdc_desc_cfg)233 hpi_rxdma_cfg_rdc_ring(hpi_handle_t handle, uint8_t rdc,
234     rdc_desc_cfg_t *rdc_desc_cfg)
235 {
236 	rdc_rbr_cfg_a_t		cfga;
237 	rdc_rbr_cfg_b_t		cfgb;
238 	rdc_rx_cfg1_t		cfg1;
239 	rdc_rx_cfg2_t		cfg2;
240 	rdc_rcr_cfg_a_t		rcr_cfga;
241 	rdc_rcr_cfg_b_t		rcr_cfgb;
242 	rdc_page_handle_t	page_handle;
243 
244 	if (!RXDMA_CHANNEL_VALID(rdc)) {
245 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
246 		    "rxdma_cfg_rdc_ring Illegal RDC number %d \n", rdc));
247 		return (HPI_RXDMA_RDC_INVALID);
248 	}
249 
250 	cfga.value = 0;
251 	cfgb.value = 0;
252 	cfg1.value = 0;
253 	cfg2.value = 0;
254 	page_handle.value = 0;
255 
256 	if (rdc_desc_cfg->mbox_enable == 1) {
257 		cfg1.bits.mbaddr_h = (rdc_desc_cfg->mbox_addr >> 32) & 0xfff;
258 		cfg2.bits.mbaddr_l = ((rdc_desc_cfg->mbox_addr &
259 		    RXDMA_CFIG2_MBADDR_L_MASK) >> RXDMA_CFIG2_MBADDR_L_SHIFT);
260 
261 		/*
262 		 * Only after all the configurations are set, then
263 		 * enable the RDC or else configuration fatal error
264 		 * will be returned (especially if the Hypervisor
265 		 * set up the logical pages with non-zero values.
266 		 * This HPI function only sets up the configuration.
267 		 * Call the enable function to enable the RDMC!
268 		 */
269 	}
270 
271 	if (rdc_desc_cfg->full_hdr == 1)
272 		cfg2.bits.full_hdr = 1;
273 
274 	if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
275 		cfg2.bits.offset = rdc_desc_cfg->offset;
276 	} else {
277 		cfg2.bits.offset = SW_OFFSET_NO_OFFSET;
278 	}
279 
280 	/* rbr config */
281 	cfga.value = (rdc_desc_cfg->rbr_addr &
282 	    (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
283 
284 	/* The remaining 20 bits in the DMA address form the handle */
285 	page_handle.bits.handle = (rdc_desc_cfg->rbr_addr >> 44) && 0xfffff;
286 
287 	/*
288 	 * The RBR ring size must be multiple of 64.
289 	 */
290 	if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) ||
291 	    (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN) ||
292 	    (rdc_desc_cfg->rbr_len % 64)) {
293 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
294 		    "hpi_rxdma_cfg_rdc_ring Illegal RBR Queue Length %d \n",
295 		    rdc_desc_cfg->rbr_len));
296 		return (HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RBRSZIE_INVALID, rdc));
297 	}
298 
299 	/*
300 	 * The lower 6 bits are hardcoded to 0 and the higher 10 bits are
301 	 * stored in len.
302 	 */
303 	cfga.bits.len = rdc_desc_cfg->rbr_len >> 6;
304 	HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL,
305 	    "hpi_rxdma_cfg_rdc_ring CFGA 0x%llx len %d (RBR LEN %d)\n",
306 	    cfga.value, cfga.bits.len, rdc_desc_cfg->rbr_len));
307 
308 	/*
309 	 * bksize is 1 bit
310 	 * Buffer Block Size. b0 - 4K; b1 - 8K.
311 	 */
312 	if (rdc_desc_cfg->page_size == SIZE_4KB)
313 		cfgb.bits.bksize = RBR_BKSIZE_4K;
314 	else if (rdc_desc_cfg->page_size == SIZE_8KB)
315 		cfgb.bits.bksize = RBR_BKSIZE_8K;
316 	else {
317 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
318 		    "rxdma_cfg_rdc_ring blksize: Illegal buffer size %d \n",
319 		    rdc_desc_cfg->page_size));
320 		return (HPI_RXDMA_BUFSZIE_INVALID);
321 	}
322 
323 	/*
324 	 * Size 0 of packet buffer. b00 - 256; b01 - 512; b10 - 1K; b11 - resvd.
325 	 */
326 	if (rdc_desc_cfg->valid0) {
327 		if (rdc_desc_cfg->size0 == SIZE_256B)
328 			cfgb.bits.bufsz0 = RBR_BUFSZ0_256B;
329 		else if (rdc_desc_cfg->size0 == SIZE_512B)
330 			cfgb.bits.bufsz0 = RBR_BUFSZ0_512B;
331 		else if (rdc_desc_cfg->size0 == SIZE_1KB)
332 			cfgb.bits.bufsz0 = RBR_BUFSZ0_1K;
333 		else {
334 			HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
335 			    " rxdma_cfg_rdc_ring"
336 			    " blksize0: Illegal buffer size %x \n",
337 			    rdc_desc_cfg->size0));
338 			return (HPI_RXDMA_BUFSZIE_INVALID);
339 		}
340 		cfgb.bits.vld0 = 1;
341 	} else {
342 		cfgb.bits.vld0 = 0;
343 	}
344 
345 	/*
346 	 * Size 1 of packet buffer. b0 - 1K; b1 - 2K.
347 	 */
348 	if (rdc_desc_cfg->valid1) {
349 		if (rdc_desc_cfg->size1 == SIZE_1KB)
350 			cfgb.bits.bufsz1 = RBR_BUFSZ1_1K;
351 		else if (rdc_desc_cfg->size1 == SIZE_2KB)
352 			cfgb.bits.bufsz1 = RBR_BUFSZ1_2K;
353 		else {
354 			HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
355 			    " rxdma_cfg_rdc_ring"
356 			    " blksize1: Illegal buffer size %x \n",
357 			    rdc_desc_cfg->size1));
358 			return (HPI_RXDMA_BUFSZIE_INVALID);
359 		}
360 		cfgb.bits.vld1 = 1;
361 	} else {
362 		cfgb.bits.vld1 = 0;
363 	}
364 
365 	/*
366 	 * Size 2 of packet buffer. b0 - 2K; b1 - 4K.
367 	 */
368 	if (rdc_desc_cfg->valid2) {
369 		if (rdc_desc_cfg->size2 == SIZE_2KB)
370 			cfgb.bits.bufsz2 = RBR_BUFSZ2_2K;
371 		else if (rdc_desc_cfg->size2 == SIZE_4KB)
372 			cfgb.bits.bufsz2 = RBR_BUFSZ2_4K;
373 		else {
374 			HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
375 			    " rxdma_cfg_rdc_ring"
376 			    " blksize2: Illegal buffer size %x \n",
377 			    rdc_desc_cfg->size2));
378 			return (HPI_RXDMA_BUFSZIE_INVALID);
379 		}
380 		cfgb.bits.vld2 = 1;
381 	} else {
382 		cfgb.bits.vld2 = 0;
383 	}
384 
385 	rcr_cfga.value = (rdc_desc_cfg->rcr_addr &
386 	    (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
387 
388 	/*
389 	 * The rcr len must be multiple of 32.
390 	 */
391 	if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) ||
392 	    (rdc_desc_cfg->rcr_len > HXGE_RCR_MAX) ||
393 	    (rdc_desc_cfg->rcr_len % 32)) {
394 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
395 		    " rxdma_cfg_rdc_ring Illegal RCR Queue Length %d \n",
396 		    rdc_desc_cfg->rcr_len));
397 		return (HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RCRSZIE_INVALID, rdc));
398 	}
399 
400 	/*
401 	 * Bits 15:5 of the maximum number of 8B entries in RCR.  Bits 4:0 are
402 	 * hard-coded to zero.  The maximum size is 2^16 - 32.
403 	 */
404 	rcr_cfga.bits.len = rdc_desc_cfg->rcr_len >> 5;
405 
406 	rcr_cfgb.value = 0;
407 	if (rdc_desc_cfg->rcr_timeout_enable == 1) {
408 		/* check if the rcr timeout value is valid */
409 
410 		if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) {
411 			rcr_cfgb.bits.timeout = rdc_desc_cfg->rcr_timeout;
412 			rcr_cfgb.bits.entout = 1;
413 		} else {
414 			HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
415 			    " rxdma_cfg_rdc_ring"
416 			    " Illegal RCR Timeout value %d \n",
417 			    rdc_desc_cfg->rcr_timeout));
418 			rcr_cfgb.bits.entout = 0;
419 		}
420 	} else {
421 		rcr_cfgb.bits.entout = 0;
422 	}
423 
424 	/* check if the rcr threshold value is valid */
425 	if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) {
426 		rcr_cfgb.bits.pthres = rdc_desc_cfg->rcr_threshold;
427 	} else {
428 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
429 		    " rxdma_cfg_rdc_ring Illegal RCR Threshold value %d \n",
430 		    rdc_desc_cfg->rcr_threshold));
431 		rcr_cfgb.bits.pthres = 1;
432 	}
433 
434 	/* now do the actual HW configuration */
435 	RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg1.value);
436 	RXDMA_REG_WRITE64(handle, RDC_RX_CFG2, rdc, cfg2.value);
437 
438 	RXDMA_REG_WRITE64(handle, RDC_RBR_CFG_A, rdc, cfga.value);
439 	RXDMA_REG_WRITE64(handle, RDC_RBR_CFG_B, rdc, cfgb.value);
440 
441 	RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_A, rdc, rcr_cfga.value);
442 	RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, rdc, rcr_cfgb.value);
443 
444 	RXDMA_REG_WRITE64(handle, RDC_PAGE_HANDLE, rdc, page_handle.value);
445 
446 	return (HPI_SUCCESS);
447 }
448 
449 hpi_status_t
hpi_rxdma_ring_perr_stat_get(hpi_handle_t handle,rdc_pref_par_log_t * pre_log,rdc_pref_par_log_t * sha_log)450 hpi_rxdma_ring_perr_stat_get(hpi_handle_t handle,
451     rdc_pref_par_log_t *pre_log, rdc_pref_par_log_t *sha_log)
452 {
453 	/*
454 	 * Hydra doesn't have details about these errors.
455 	 * It only provides the addresses of the errors.
456 	 */
457 	HXGE_REG_RD64(handle, RDC_PREF_PAR_LOG, &pre_log->value);
458 	HXGE_REG_RD64(handle, RDC_SHADOW_PAR_LOG, &sha_log->value);
459 
460 	return (HPI_SUCCESS);
461 }
462 
463 
464 /* system wide conf functions */
465 
466 hpi_status_t
hpi_rxdma_cfg_clock_div_set(hpi_handle_t handle,uint16_t count)467 hpi_rxdma_cfg_clock_div_set(hpi_handle_t handle, uint16_t count)
468 {
469 	uint64_t	offset;
470 	rdc_clock_div_t	clk_div;
471 
472 	offset = RDC_CLOCK_DIV;
473 
474 	clk_div.value = 0;
475 	clk_div.bits.count = count;
476 	HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL,
477 	    " hpi_rxdma_cfg_clock_div_set: add 0x%llx "
478 	    "handle 0x%llx value 0x%llx",
479 	    handle.regp, handle.regh, clk_div.value));
480 
481 	HXGE_REG_WR64(handle, offset, clk_div.value);
482 
483 	return (HPI_SUCCESS);
484 }
485 
486 
487 hpi_status_t
hpi_rxdma_rdc_rbr_stat_get(hpi_handle_t handle,uint8_t rdc,rdc_rbr_qlen_t * rbr_stat)488 hpi_rxdma_rdc_rbr_stat_get(hpi_handle_t handle, uint8_t rdc,
489     rdc_rbr_qlen_t *rbr_stat)
490 {
491 	if (!RXDMA_CHANNEL_VALID(rdc)) {
492 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
493 		    " rxdma_rdc_rbr_stat_get Illegal RDC Number %d \n", rdc));
494 		return (HPI_RXDMA_RDC_INVALID);
495 	}
496 
497 	RXDMA_REG_READ64(handle, RDC_RBR_QLEN, rdc, &rbr_stat->value);
498 	return (HPI_SUCCESS);
499 }
500 
501 
502 hpi_status_t
hpi_rxdma_rdc_rcr_qlen_get(hpi_handle_t handle,uint8_t rdc,uint16_t * rcr_qlen)503 hpi_rxdma_rdc_rcr_qlen_get(hpi_handle_t handle, uint8_t rdc,
504     uint16_t *rcr_qlen)
505 {
506 	rdc_rcr_qlen_t stats;
507 
508 	if (!RXDMA_CHANNEL_VALID(rdc)) {
509 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
510 		    " rxdma_rdc_rcr_qlen_get Illegal RDC Number %d \n", rdc));
511 		return (HPI_RXDMA_RDC_INVALID);
512 	}
513 
514 	RXDMA_REG_READ64(handle, RDC_RCR_QLEN, rdc, &stats.value);
515 	*rcr_qlen =  stats.bits.qlen;
516 	HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL,
517 	    " rxdma_rdc_rcr_qlen_get RDC %d qlen %x qlen %x\n",
518 	    rdc, *rcr_qlen, stats.bits.qlen));
519 	return (HPI_SUCCESS);
520 }
521 
522 hpi_status_t
hpi_rxdma_channel_rbr_empty_clear(hpi_handle_t handle,uint8_t channel)523 hpi_rxdma_channel_rbr_empty_clear(hpi_handle_t handle, uint8_t channel)
524 {
525 	rdc_stat_t	cs;
526 
527 	if (!RXDMA_CHANNEL_VALID(channel)) {
528 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
529 		    " hpi_rxdma_channel_rbr_empty_clear", " channel", channel));
530 		return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel));
531 	}
532 
533 	RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
534 	cs.bits.rbr_empty = 1;
535 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
536 
537 	return (HPI_SUCCESS);
538 }
539 
540 /*
541  * This function is called to operate on the control and status register.
542  */
543 hpi_status_t
hpi_rxdma_control_status(hpi_handle_t handle,io_op_t op_mode,uint8_t channel,rdc_stat_t * cs_p)544 hpi_rxdma_control_status(hpi_handle_t handle, io_op_t op_mode, uint8_t channel,
545     rdc_stat_t *cs_p)
546 {
547 	int		status = HPI_SUCCESS;
548 	rdc_stat_t	cs;
549 
550 	if (!RXDMA_CHANNEL_VALID(channel)) {
551 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
552 		    "hpi_rxdma_control_status", "channel", channel));
553 		return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel));
554 	}
555 
556 	switch (op_mode) {
557 	case OP_GET:
558 		RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs_p->value);
559 		break;
560 
561 	case OP_SET:
562 		RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_p->value);
563 		break;
564 
565 	case OP_UPDATE:
566 		RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
567 		RXDMA_REG_WRITE64(handle, RDC_STAT, channel,
568 		    cs_p->value | cs.value);
569 		break;
570 
571 	default:
572 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
573 		    "hpi_rxdma_control_status", "control", op_mode));
574 		return (HPI_FAILURE | HPI_RXDMA_OPCODE_INVALID(channel));
575 	}
576 
577 	return (status);
578 }
579 
580 /*
581  * This function is called to operate on the event mask
582  * register which is used for generating interrupts.
583  */
584 hpi_status_t
hpi_rxdma_event_mask(hpi_handle_t handle,io_op_t op_mode,uint8_t channel,rdc_int_mask_t * mask_p)585 hpi_rxdma_event_mask(hpi_handle_t handle, io_op_t op_mode, uint8_t channel,
586     rdc_int_mask_t *mask_p)
587 {
588 	int		status = HPI_SUCCESS;
589 	rdc_int_mask_t	mask;
590 
591 	if (!RXDMA_CHANNEL_VALID(channel)) {
592 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
593 		    "hpi_rxdma_event_mask", "channel", channel));
594 		return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel));
595 	}
596 
597 	switch (op_mode) {
598 	case OP_GET:
599 		RXDMA_REG_READ64(handle, RDC_INT_MASK, channel, &mask_p->value);
600 		break;
601 
602 	case OP_SET:
603 		RXDMA_REG_WRITE64(handle, RDC_INT_MASK, channel, mask_p->value);
604 		break;
605 
606 	case OP_UPDATE:
607 		RXDMA_REG_READ64(handle, RDC_INT_MASK, channel, &mask.value);
608 		RXDMA_REG_WRITE64(handle, RDC_INT_MASK, channel,
609 		    mask_p->value | mask.value);
610 		break;
611 
612 	default:
613 		HPI_ERROR_MSG((handle.function, HPI_ERR_CTL,
614 		    "hpi_rxdma_event_mask", "eventmask", op_mode));
615 		return (HPI_FAILURE | HPI_RXDMA_OPCODE_INVALID(channel));
616 	}
617 
618 	return (status);
619 }
620