xref: /freebsd/sys/dev/dpaa2/dpaa2_swp.c (revision 58983e4b0253ad38a3e1ef2166fedd3133fdb552)
1ba7319e9SDmitry Salychev /*-
2ba7319e9SDmitry Salychev  * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause
3ba7319e9SDmitry Salychev  *
4ba7319e9SDmitry Salychev  * Copyright © 2014-2016 Freescale Semiconductor, Inc.
5ba7319e9SDmitry Salychev  * Copyright © 2016-2019 NXP
6ba7319e9SDmitry Salychev  * All rights reserved.
7ba7319e9SDmitry Salychev  *
8ba7319e9SDmitry Salychev  * Redistribution and use in source and binary forms, with or without
9ba7319e9SDmitry Salychev  * modification, are permitted provided that the following conditions are met:
10ba7319e9SDmitry Salychev  *
11ba7319e9SDmitry Salychev  * 1. Redistributions of source code must retain the above copyright notice,
12ba7319e9SDmitry Salychev  *    this list of conditions and the following disclaimer.
13ba7319e9SDmitry Salychev  *
14ba7319e9SDmitry Salychev  * 2. Redistributions in binary form must reproduce the above copyright
15ba7319e9SDmitry Salychev  *    notice, this list of conditions and the following disclaimer in the
16ba7319e9SDmitry Salychev  *    documentation and/or other materials provided with the distribution.
17ba7319e9SDmitry Salychev  *
18ba7319e9SDmitry Salychev  * 3. Neither the name of the copyright holder nor the names of its
19ba7319e9SDmitry Salychev  *    contributors may be used to endorse or promote products derived from this
20ba7319e9SDmitry Salychev  *    software without specific prior written permission.
21ba7319e9SDmitry Salychev  *
22ba7319e9SDmitry Salychev  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23ba7319e9SDmitry Salychev  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24ba7319e9SDmitry Salychev  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25ba7319e9SDmitry Salychev  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
26ba7319e9SDmitry Salychev  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27ba7319e9SDmitry Salychev  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28ba7319e9SDmitry Salychev  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29ba7319e9SDmitry Salychev  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30ba7319e9SDmitry Salychev  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31ba7319e9SDmitry Salychev  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32ba7319e9SDmitry Salychev  * POSSIBILITY OF SUCH DAMAGE.
33ba7319e9SDmitry Salychev  *
34ba7319e9SDmitry Salychev  * Original source file obtained from:
35ba7319e9SDmitry Salychev  * drivers/soc/fsl/dpio/qbman-portal.c
36ba7319e9SDmitry Salychev  *
37ba7319e9SDmitry Salychev  * Commit: 4c86114194e644b6da9107d75910635c9e87179e
38ba7319e9SDmitry Salychev  * Repository: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
39ba7319e9SDmitry Salychev  */
40ba7319e9SDmitry Salychev 
41ba7319e9SDmitry Salychev /*
42ba7319e9SDmitry Salychev  * Copyright © 2021-2022 Dmitry Salychev
43ba7319e9SDmitry Salychev  *
44ba7319e9SDmitry Salychev  * Redistribution and use in source and binary forms, with or without
45ba7319e9SDmitry Salychev  * modification, are permitted provided that the following conditions
46ba7319e9SDmitry Salychev  * are met:
47ba7319e9SDmitry Salychev  * 1. Redistributions of source code must retain the above copyright
48ba7319e9SDmitry Salychev  *    notice, this list of conditions and the following disclaimer.
49ba7319e9SDmitry Salychev  * 2. Redistributions in binary form must reproduce the above copyright
50ba7319e9SDmitry Salychev  *    notice, this list of conditions and the following disclaimer in the
51ba7319e9SDmitry Salychev  *    documentation and/or other materials provided with the distribution.
52ba7319e9SDmitry Salychev  *
53ba7319e9SDmitry Salychev  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54ba7319e9SDmitry Salychev  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55ba7319e9SDmitry Salychev  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56ba7319e9SDmitry Salychev  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57ba7319e9SDmitry Salychev  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58ba7319e9SDmitry Salychev  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59ba7319e9SDmitry Salychev  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60ba7319e9SDmitry Salychev  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61ba7319e9SDmitry Salychev  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62ba7319e9SDmitry Salychev  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63ba7319e9SDmitry Salychev  * SUCH DAMAGE.
64ba7319e9SDmitry Salychev  */
65ba7319e9SDmitry Salychev 
66ba7319e9SDmitry Salychev #include <sys/cdefs.h>
67ba7319e9SDmitry Salychev /*
68ba7319e9SDmitry Salychev  * DPAA2 QBMan software portal.
69ba7319e9SDmitry Salychev  */
70ba7319e9SDmitry Salychev 
71ba7319e9SDmitry Salychev #include <sys/param.h>
72ba7319e9SDmitry Salychev #include <sys/kernel.h>
73ba7319e9SDmitry Salychev #include <sys/bus.h>
74ba7319e9SDmitry Salychev #include <sys/rman.h>
75ba7319e9SDmitry Salychev #include <sys/module.h>
76ba7319e9SDmitry Salychev #include <sys/malloc.h>
77ba7319e9SDmitry Salychev #include <sys/mutex.h>
78ba7319e9SDmitry Salychev #include <sys/time.h>
79ba7319e9SDmitry Salychev #include <sys/types.h>
80ba7319e9SDmitry Salychev #include <sys/systm.h>
81ba7319e9SDmitry Salychev #include <sys/lock.h>
82ba7319e9SDmitry Salychev 
83ba7319e9SDmitry Salychev #include <machine/bus.h>
84ba7319e9SDmitry Salychev #include <machine/resource.h>
85ba7319e9SDmitry Salychev #include <machine/atomic.h>
86ba7319e9SDmitry Salychev 
87ba7319e9SDmitry Salychev #include "pcib_if.h"
88ba7319e9SDmitry Salychev #include "pci_if.h"
89ba7319e9SDmitry Salychev 
90ba7319e9SDmitry Salychev #include "dpaa2_swp.h"
91ba7319e9SDmitry Salychev #include "dpaa2_mc.h"
92ba7319e9SDmitry Salychev #include "dpaa2_bp.h"
93ba7319e9SDmitry Salychev 
94ba7319e9SDmitry Salychev #define CMD_SPIN_TIMEOUT		100u	/* us */
95ba7319e9SDmitry Salychev #define CMD_SPIN_ATTEMPTS		2000u	/* 200 ms max. */
96ba7319e9SDmitry Salychev 
97ba7319e9SDmitry Salychev #define CMD_VERB_MASK			0x7Fu
98ba7319e9SDmitry Salychev 
99ba7319e9SDmitry Salychev /* Shifts in the VERB byte of the enqueue command descriptor. */
100ba7319e9SDmitry Salychev #define ENQ_CMD_ORP_ENABLE_SHIFT	2
101ba7319e9SDmitry Salychev #define ENQ_CMD_IRQ_ON_DISPATCH_SHIFT	3
102ba7319e9SDmitry Salychev #define ENQ_CMD_TARGET_TYPE_SHIFT	4
103ba7319e9SDmitry Salychev #define ENQ_CMD_DCA_EN_SHIFT		7
104ba7319e9SDmitry Salychev /* VERB byte options of the enqueue command descriptor. */
105ba7319e9SDmitry Salychev #define ENQ_CMD_EMPTY			0u
106ba7319e9SDmitry Salychev #define ENQ_CMD_RESPONSE_ALWAYS		1u
107ba7319e9SDmitry Salychev #define ENQ_CMD_REJECTS_TO_FQ		2u
108ba7319e9SDmitry Salychev 
109ba7319e9SDmitry Salychev #define ENQ_DESC_FD_OFFSET		32u
110ba7319e9SDmitry Salychev 
111ba7319e9SDmitry Salychev #define ENQ_DCA_IDXMASK			0x0Fu
112ba7319e9SDmitry Salychev #define ENQ_FLAG_DCA			(1ull << 31)
113ba7319e9SDmitry Salychev 
114ba7319e9SDmitry Salychev /* QBMan portal command codes. */
115ba7319e9SDmitry Salychev #define CMDID_SWP_MC_ACQUIRE		0x30
116ba7319e9SDmitry Salychev #define CMDID_SWP_BP_QUERY		0x32
117ba7319e9SDmitry Salychev #define CMDID_SWP_WQCHAN_CONFIGURE	0x46
118ba7319e9SDmitry Salychev 
119ba7319e9SDmitry Salychev /* QBMan portal command result codes. */
120ba7319e9SDmitry Salychev #define QBMAN_CMD_RC_OK			0xF0
121ba7319e9SDmitry Salychev 
122ba7319e9SDmitry Salychev /* SDQCR attribute codes */
123ba7319e9SDmitry Salychev #define QB_SDQCR_FC_SHIFT 		29u
124ba7319e9SDmitry Salychev #define QB_SDQCR_FC_MASK		0x1u
125ba7319e9SDmitry Salychev #define QB_SDQCR_DCT_SHIFT		24u
126ba7319e9SDmitry Salychev #define QB_SDQCR_DCT_MASK		0x3u
127ba7319e9SDmitry Salychev #define QB_SDQCR_TOK_SHIFT		16u
128ba7319e9SDmitry Salychev #define QB_SDQCR_TOK_MASK		0xFFu
129ba7319e9SDmitry Salychev #define QB_SDQCR_SRC_SHIFT		0u
130ba7319e9SDmitry Salychev #define QB_SDQCR_SRC_MASK		0xFFFFu
131ba7319e9SDmitry Salychev 
132ba7319e9SDmitry Salychev /* Shifts in the VERB byte of the volatile dequeue command. */
133ba7319e9SDmitry Salychev #define QB_VDQCR_VERB_DCT0_SHIFT	0
134ba7319e9SDmitry Salychev #define QB_VDQCR_VERB_DCT1_SHIFT	1
135ba7319e9SDmitry Salychev #define QB_VDQCR_VERB_DT0_SHIFT		2
136ba7319e9SDmitry Salychev #define QB_VDQCR_VERB_DT1_SHIFT		3
137ba7319e9SDmitry Salychev #define QB_VDQCR_VERB_RLS_SHIFT		4
138ba7319e9SDmitry Salychev #define QB_VDQCR_VERB_WAE_SHIFT		5
139ba7319e9SDmitry Salychev #define QB_VDQCR_VERB_RAD_SHIFT		6
140ba7319e9SDmitry Salychev 
141ba7319e9SDmitry Salychev /* Maximum timeout period for the DQRR interrupt. */
142ba7319e9SDmitry Salychev #define DQRR_MAX_ITP			4096u
143ba7319e9SDmitry Salychev #define DQRR_PI_MASK			0x0Fu
144ba7319e9SDmitry Salychev 
145ba7319e9SDmitry Salychev /* Release Array Allocation register helpers. */
146ba7319e9SDmitry Salychev #define RAR_IDX(rar)			((rar) & 0x7u)
147ba7319e9SDmitry Salychev #define RAR_VB(rar)			((rar) & 0x80u)
148ba7319e9SDmitry Salychev #define RAR_SUCCESS(rar)		((rar) & 0x100u)
149ba7319e9SDmitry Salychev 
150ba7319e9SDmitry Salychev MALLOC_DEFINE(M_DPAA2_SWP, "dpaa2_swp", "DPAA2 QBMan Software Portal");
151ba7319e9SDmitry Salychev 
152ba7319e9SDmitry Salychev enum qbman_sdqcr_dct {
153ba7319e9SDmitry Salychev 	qbman_sdqcr_dct_null = 0,
154ba7319e9SDmitry Salychev 	qbman_sdqcr_dct_prio_ics,
155ba7319e9SDmitry Salychev 	qbman_sdqcr_dct_active_ics,
156ba7319e9SDmitry Salychev 	qbman_sdqcr_dct_active
157ba7319e9SDmitry Salychev };
158ba7319e9SDmitry Salychev 
159ba7319e9SDmitry Salychev enum qbman_sdqcr_fc {
160ba7319e9SDmitry Salychev 	qbman_sdqcr_fc_one = 0,
161ba7319e9SDmitry Salychev 	qbman_sdqcr_fc_up_to_3 = 1
162ba7319e9SDmitry Salychev };
163ba7319e9SDmitry Salychev 
164ba7319e9SDmitry Salychev /* Routines to execute software portal commands. */
165ba7319e9SDmitry Salychev static int dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *,
166ba7319e9SDmitry Salychev     struct dpaa2_swp_cmd *, struct dpaa2_swp_rsp *, uint8_t);
167ba7319e9SDmitry Salychev static int dpaa2_swp_exec_br_command(struct dpaa2_swp *, struct dpaa2_swp_cmd *,
168ba7319e9SDmitry Salychev     uint32_t);
169ba7319e9SDmitry Salychev static int dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *,
170ba7319e9SDmitry Salychev     struct dpaa2_swp_cmd *);
171ba7319e9SDmitry Salychev 
172ba7319e9SDmitry Salychev /* Management Commands helpers. */
173ba7319e9SDmitry Salychev static int dpaa2_swp_send_mgmt_command(struct dpaa2_swp *,
174ba7319e9SDmitry Salychev     struct dpaa2_swp_cmd *, uint8_t);
175ba7319e9SDmitry Salychev static int dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *,
176ba7319e9SDmitry Salychev     struct dpaa2_swp_rsp *);
177ba7319e9SDmitry Salychev 
178ba7319e9SDmitry Salychev /* Helper subroutines. */
179ba7319e9SDmitry Salychev static int dpaa2_swp_cyc_diff(uint8_t, uint8_t, uint8_t);
180ba7319e9SDmitry Salychev 
181ba7319e9SDmitry Salychev int
dpaa2_swp_init_portal(struct dpaa2_swp ** swp,struct dpaa2_swp_desc * desc,uint16_t flags)182ba7319e9SDmitry Salychev dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc,
183ba7319e9SDmitry Salychev     uint16_t flags)
184ba7319e9SDmitry Salychev {
185ba7319e9SDmitry Salychev 	struct dpaa2_swp *p;
186ba7319e9SDmitry Salychev 	uint32_t reg, mask_size, eqcr_pi; /* EQCR producer index */
187ba7319e9SDmitry Salychev 
188ba7319e9SDmitry Salychev 	if (!swp || !desc)
189ba7319e9SDmitry Salychev 		return (DPAA2_SWP_STAT_EINVAL);
190ba7319e9SDmitry Salychev 
191ba7319e9SDmitry Salychev 	p = malloc(sizeof(struct dpaa2_swp), M_DPAA2_SWP,
192ba7319e9SDmitry Salychev 	    flags & DPAA2_SWP_NOWAIT_ALLOC
193ba7319e9SDmitry Salychev 	    ? (M_NOWAIT | M_ZERO)
194ba7319e9SDmitry Salychev 	    : (M_WAITOK | M_ZERO));
195ba7319e9SDmitry Salychev 	if (!p)
196ba7319e9SDmitry Salychev 		return (DPAA2_SWP_STAT_NO_MEMORY);
197ba7319e9SDmitry Salychev 
198ba7319e9SDmitry Salychev 	mtx_init(&p->lock, "swp_sleep_lock", NULL, MTX_DEF);
199ba7319e9SDmitry Salychev 
200ba7319e9SDmitry Salychev 	p->cfg.mem_backed = false;
201ba7319e9SDmitry Salychev 	p->cfg.writes_cinh = true;
202ba7319e9SDmitry Salychev 
203ba7319e9SDmitry Salychev 	p->desc = desc;
204ba7319e9SDmitry Salychev 	p->flags = flags;
205ba7319e9SDmitry Salychev 	p->mc.valid_bit = DPAA2_SWP_VALID_BIT;
206ba7319e9SDmitry Salychev 	p->mr.valid_bit = DPAA2_SWP_VALID_BIT;
207ba7319e9SDmitry Salychev 
208ba7319e9SDmitry Salychev 	/* FIXME: Memory-backed mode doesn't work now. Why? */
209ba7319e9SDmitry Salychev 	p->cena_res = desc->cena_res;
210ba7319e9SDmitry Salychev 	p->cena_map = desc->cena_map;
211ba7319e9SDmitry Salychev 	p->cinh_res = desc->cinh_res;
212ba7319e9SDmitry Salychev 	p->cinh_map = desc->cinh_map;
213ba7319e9SDmitry Salychev 
214ba7319e9SDmitry Salychev 	/* Static Dequeue Command Register configuration. */
215ba7319e9SDmitry Salychev 	p->sdq = 0;
216ba7319e9SDmitry Salychev 	p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
217ba7319e9SDmitry Salychev 	p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
218ba7319e9SDmitry Salychev 	p->sdq |= DPAA2_SWP_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
219ba7319e9SDmitry Salychev 
220ba7319e9SDmitry Salychev 	/* Volatile Dequeue Command configuration. */
221ba7319e9SDmitry Salychev 	p->vdq.valid_bit = DPAA2_SWP_VALID_BIT;
222ba7319e9SDmitry Salychev 
223ba7319e9SDmitry Salychev 	/* Dequeue Response Ring configuration */
224ba7319e9SDmitry Salychev 	p->dqrr.next_idx = 0;
225ba7319e9SDmitry Salychev 	p->dqrr.valid_bit = DPAA2_SWP_VALID_BIT;
226ba7319e9SDmitry Salychev 	if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_4100) {
227ba7319e9SDmitry Salychev 		p->dqrr.ring_size = 4;
228ba7319e9SDmitry Salychev 		p->dqrr.reset_bug = 1;
229ba7319e9SDmitry Salychev 	} else {
230ba7319e9SDmitry Salychev 		p->dqrr.ring_size = 8;
231ba7319e9SDmitry Salychev 		p->dqrr.reset_bug = 0;
232ba7319e9SDmitry Salychev 	}
233ba7319e9SDmitry Salychev 
234ba7319e9SDmitry Salychev 	if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_5000) {
235ba7319e9SDmitry Salychev 		reg = dpaa2_swp_set_cfg(
236ba7319e9SDmitry Salychev 		    p->dqrr.ring_size, /* max. entries QMan writes to DQRR */
237ba7319e9SDmitry Salychev 		    1, /* writes enabled in the CINH memory only */
238ba7319e9SDmitry Salychev 		    0, /* EQCR_CI stashing threshold */
239ba7319e9SDmitry Salychev 		    3, /* RPM: RCR in array mode */
240ba7319e9SDmitry Salychev 		    2, /* DCM: Discrete consumption ack */
241ba7319e9SDmitry Salychev 		    2, /* EPM: EQCR in ring mode (FIFO) */
242ba7319e9SDmitry Salychev 		    1, /* mem stashing drop enable enable */
243ba7319e9SDmitry Salychev 		    1, /* mem stashing priority enable */
244ba7319e9SDmitry Salychev 		    1, /* mem stashing enable */
245ba7319e9SDmitry Salychev 		    1, /* dequeue stashing priority enable */
246ba7319e9SDmitry Salychev 		    0, /* dequeue stashing enable enable */
247ba7319e9SDmitry Salychev 		    0  /* EQCR_CI stashing priority enable */
248ba7319e9SDmitry Salychev 		);
249ba7319e9SDmitry Salychev 		reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */
250ba7319e9SDmitry Salychev 	} else {
251ba7319e9SDmitry Salychev 		bus_set_region_4(p->cena_map, 0, 0,
252ba7319e9SDmitry Salychev 		    rman_get_size(p->cena_res) / 4);
253ba7319e9SDmitry Salychev 
254ba7319e9SDmitry Salychev 		reg = dpaa2_swp_set_cfg(
255ba7319e9SDmitry Salychev 		    p->dqrr.ring_size, /* max. entries QMan writes to DQRR */					/* DQRR_MF */
256ba7319e9SDmitry Salychev 		    1, /* writes enabled in the CINH memory only */						/* WN */
257ba7319e9SDmitry Salychev 		    0, /* EQCR_CI stashing is disabled */							/* EST */
258ba7319e9SDmitry Salychev 		    3, /* RPM: RCR in array mode */								/* RPM */
259ba7319e9SDmitry Salychev 		    2, /* DCM: Discrete consumption ack */							/* DCM */
260ba7319e9SDmitry Salychev 		    2, /* EPM: EQCR in ring mode (FIFO) */							/* EPM */
261ba7319e9SDmitry Salychev 		    1, /* Dequeued frame data, annotation, and FQ context stashing drop enable */		/* SD */
262ba7319e9SDmitry Salychev 		    1, /* Dequeued frame data, annotation, and FQ context stashing priority */			/* SP */
263ba7319e9SDmitry Salychev 		    1, /* Dequeued frame data, annotation, and FQ context stashing enable */			/* SE */
264ba7319e9SDmitry Salychev 		    1, /* Dequeue response ring (DQRR) entry stashing priority */				/* DP */
265ba7319e9SDmitry Salychev 		    0, /* Dequeue response ring (DQRR) entry, or cacheable portal area, stashing enable. */	/* DE */
266ba7319e9SDmitry Salychev 		    0  /* EQCR_CI stashing priority */								/* EP */
267ba7319e9SDmitry Salychev 		);
268ba7319e9SDmitry Salychev 		/* TODO: Switch to memory-backed mode. */
269ba7319e9SDmitry Salychev 		reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */
270ba7319e9SDmitry Salychev 	}
271ba7319e9SDmitry Salychev 	dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_CFG, reg);
272ba7319e9SDmitry Salychev 	reg = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_CFG);
273ba7319e9SDmitry Salychev 	if (!reg) {
274ba7319e9SDmitry Salychev 		free(p, M_DPAA2_SWP);
275ba7319e9SDmitry Salychev 		return (DPAA2_SWP_STAT_PORTAL_DISABLED);
276ba7319e9SDmitry Salychev 	}
277ba7319e9SDmitry Salychev 
278ba7319e9SDmitry Salychev 	/*
279ba7319e9SDmitry Salychev 	 * Static Dequeue Command Register needs to be initialized to 0 when no
280ba7319e9SDmitry Salychev 	 * channels are being dequeued from or else the QMan HW will indicate an
281ba7319e9SDmitry Salychev 	 * error. The values that were calculated above will be applied when
282ba7319e9SDmitry Salychev 	 * dequeues from a specific channel are enabled.
283ba7319e9SDmitry Salychev 	 */
284ba7319e9SDmitry Salychev 	dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_SDQCR, 0);
285ba7319e9SDmitry Salychev 
286ba7319e9SDmitry Salychev 	p->eqcr.pi_ring_size = 8;
287ba7319e9SDmitry Salychev 	/* if ((desc->swp_version & DPAA2_SWP_REV_MASK) >= DPAA2_SWP_REV_5000) */
288ba7319e9SDmitry Salychev 	/* 	p->eqcr.pi_ring_size = 32; */
289ba7319e9SDmitry Salychev 
290ba7319e9SDmitry Salychev 	for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
291ba7319e9SDmitry Salychev 		p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
292ba7319e9SDmitry Salychev 
293ba7319e9SDmitry Salychev 	eqcr_pi = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_PI);
294ba7319e9SDmitry Salychev 	p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
295ba7319e9SDmitry Salychev 	p->eqcr.pi_vb = eqcr_pi & DPAA2_SWP_VALID_BIT;
296ba7319e9SDmitry Salychev 	p->eqcr.ci = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_CI)
297ba7319e9SDmitry Salychev 	    & p->eqcr.pi_ci_mask;
298ba7319e9SDmitry Salychev 	p->eqcr.available = p->eqcr.pi_ring_size;
299ba7319e9SDmitry Salychev 
300*58983e4bSDmitry Salychev 	/* TODO: sysctl(9) for the IRQ timeout? */
301*58983e4bSDmitry Salychev 	/* Initialize the portal with an IRQ threshold and timeout of 120us. */
302*58983e4bSDmitry Salychev 	dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 120);
303ba7319e9SDmitry Salychev 
304ba7319e9SDmitry Salychev 	*swp = p;
305ba7319e9SDmitry Salychev 
306ba7319e9SDmitry Salychev 	return (0);
307ba7319e9SDmitry Salychev }
308ba7319e9SDmitry Salychev 
309ba7319e9SDmitry Salychev void
dpaa2_swp_free_portal(struct dpaa2_swp * swp)310ba7319e9SDmitry Salychev dpaa2_swp_free_portal(struct dpaa2_swp *swp)
311ba7319e9SDmitry Salychev {
312ba7319e9SDmitry Salychev 	uint16_t flags;
313ba7319e9SDmitry Salychev 
314ba7319e9SDmitry Salychev 	KASSERT(swp != NULL, ("%s: swp is NULL", __func__));
315ba7319e9SDmitry Salychev 
316ba7319e9SDmitry Salychev 	DPAA2_SWP_LOCK(swp, &flags);
317ba7319e9SDmitry Salychev 	swp->flags |= DPAA2_SWP_DESTROYED;
318ba7319e9SDmitry Salychev 	DPAA2_SWP_UNLOCK(swp);
319ba7319e9SDmitry Salychev 
320ba7319e9SDmitry Salychev 	/* Let threads stop using this portal. */
321ba7319e9SDmitry Salychev 	DELAY(DPAA2_SWP_TIMEOUT);
322ba7319e9SDmitry Salychev 
323ba7319e9SDmitry Salychev 	mtx_destroy(&swp->lock);
324ba7319e9SDmitry Salychev 	free(swp, M_DPAA2_SWP);
325ba7319e9SDmitry Salychev }
326ba7319e9SDmitry Salychev 
327ba7319e9SDmitry Salychev uint32_t
dpaa2_swp_set_cfg(uint8_t max_fill,uint8_t wn,uint8_t est,uint8_t rpm,uint8_t dcm,uint8_t epm,int sd,int sp,int se,int dp,int de,int ep)328ba7319e9SDmitry Salychev dpaa2_swp_set_cfg(uint8_t max_fill, uint8_t wn, uint8_t est, uint8_t rpm,
329ba7319e9SDmitry Salychev     uint8_t dcm, uint8_t epm, int sd, int sp, int se, int dp, int de, int ep)
330ba7319e9SDmitry Salychev {
331ba7319e9SDmitry Salychev 	return (
332ba7319e9SDmitry Salychev 	    max_fill	<< DPAA2_SWP_CFG_DQRR_MF_SHIFT |
333ba7319e9SDmitry Salychev 	    est		<< DPAA2_SWP_CFG_EST_SHIFT |
334ba7319e9SDmitry Salychev 	    wn		<< DPAA2_SWP_CFG_WN_SHIFT |
335ba7319e9SDmitry Salychev 	    rpm		<< DPAA2_SWP_CFG_RPM_SHIFT |
336ba7319e9SDmitry Salychev 	    dcm		<< DPAA2_SWP_CFG_DCM_SHIFT |
337ba7319e9SDmitry Salychev 	    epm		<< DPAA2_SWP_CFG_EPM_SHIFT |
338ba7319e9SDmitry Salychev 	    sd		<< DPAA2_SWP_CFG_SD_SHIFT |
339ba7319e9SDmitry Salychev 	    sp		<< DPAA2_SWP_CFG_SP_SHIFT |
340ba7319e9SDmitry Salychev 	    se		<< DPAA2_SWP_CFG_SE_SHIFT |
341ba7319e9SDmitry Salychev 	    dp		<< DPAA2_SWP_CFG_DP_SHIFT |
342ba7319e9SDmitry Salychev 	    de		<< DPAA2_SWP_CFG_DE_SHIFT |
343ba7319e9SDmitry Salychev 	    ep		<< DPAA2_SWP_CFG_EP_SHIFT
344ba7319e9SDmitry Salychev 	);
345ba7319e9SDmitry Salychev }
346ba7319e9SDmitry Salychev 
347ba7319e9SDmitry Salychev /* Read/write registers of a software portal. */
348ba7319e9SDmitry Salychev 
349ba7319e9SDmitry Salychev void
dpaa2_swp_write_reg(struct dpaa2_swp * swp,uint32_t o,uint32_t v)350ba7319e9SDmitry Salychev dpaa2_swp_write_reg(struct dpaa2_swp *swp, uint32_t o, uint32_t v)
351ba7319e9SDmitry Salychev {
352ba7319e9SDmitry Salychev 	bus_write_4(swp->cinh_map, o, v);
353ba7319e9SDmitry Salychev }
354ba7319e9SDmitry Salychev 
355ba7319e9SDmitry Salychev uint32_t
dpaa2_swp_read_reg(struct dpaa2_swp * swp,uint32_t o)356ba7319e9SDmitry Salychev dpaa2_swp_read_reg(struct dpaa2_swp *swp, uint32_t o)
357ba7319e9SDmitry Salychev {
358ba7319e9SDmitry Salychev 	return (bus_read_4(swp->cinh_map, o));
359ba7319e9SDmitry Salychev }
360ba7319e9SDmitry Salychev 
361ba7319e9SDmitry Salychev /* Helper routines. */
362ba7319e9SDmitry Salychev 
363ba7319e9SDmitry Salychev /**
364ba7319e9SDmitry Salychev  * @brief Set enqueue descriptor without Order Point Record ID.
365ba7319e9SDmitry Salychev  *
366ba7319e9SDmitry Salychev  * ed:		Enqueue descriptor.
367ba7319e9SDmitry Salychev  * resp_always:	Enqueue with response always (1); FD from a rejected enqueue
368ba7319e9SDmitry Salychev  *		will be returned on a FQ (0).
369ba7319e9SDmitry Salychev  */
370ba7319e9SDmitry Salychev void
dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc * ed,bool resp_always)371ba7319e9SDmitry Salychev dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc *ed, bool resp_always)
372ba7319e9SDmitry Salychev {
373ba7319e9SDmitry Salychev 	ed->verb &= ~(1 << ENQ_CMD_ORP_ENABLE_SHIFT);
374ba7319e9SDmitry Salychev 	if (resp_always)
375ba7319e9SDmitry Salychev 		ed->verb |= ENQ_CMD_RESPONSE_ALWAYS;
376ba7319e9SDmitry Salychev 	else
377ba7319e9SDmitry Salychev 		ed->verb |= ENQ_CMD_REJECTS_TO_FQ;
378ba7319e9SDmitry Salychev }
379ba7319e9SDmitry Salychev 
380ba7319e9SDmitry Salychev /**
381ba7319e9SDmitry Salychev  * @brief Set FQ of the enqueue descriptor.
382ba7319e9SDmitry Salychev  */
383ba7319e9SDmitry Salychev void
dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc * ed,uint32_t fqid)384ba7319e9SDmitry Salychev dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc *ed, uint32_t fqid)
385ba7319e9SDmitry Salychev {
386ba7319e9SDmitry Salychev 	ed->verb &= ~(1 << ENQ_CMD_TARGET_TYPE_SHIFT);
387ba7319e9SDmitry Salychev 	ed->tgtid = fqid;
388ba7319e9SDmitry Salychev }
389ba7319e9SDmitry Salychev 
390ba7319e9SDmitry Salychev /**
391ba7319e9SDmitry Salychev  * @brief Enable interrupts for a software portal.
392ba7319e9SDmitry Salychev  */
393ba7319e9SDmitry Salychev void
dpaa2_swp_set_intr_trigger(struct dpaa2_swp * swp,uint32_t mask)394ba7319e9SDmitry Salychev dpaa2_swp_set_intr_trigger(struct dpaa2_swp *swp, uint32_t mask)
395ba7319e9SDmitry Salychev {
396ba7319e9SDmitry Salychev 	if (swp != NULL)
397ba7319e9SDmitry Salychev 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_IER, mask);
398ba7319e9SDmitry Salychev }
399ba7319e9SDmitry Salychev 
400ba7319e9SDmitry Salychev /**
401ba7319e9SDmitry Salychev  * @brief Return the value in the SWP_IER register.
402ba7319e9SDmitry Salychev  */
403ba7319e9SDmitry Salychev uint32_t
dpaa2_swp_get_intr_trigger(struct dpaa2_swp * swp)404ba7319e9SDmitry Salychev dpaa2_swp_get_intr_trigger(struct dpaa2_swp *swp)
405ba7319e9SDmitry Salychev {
406ba7319e9SDmitry Salychev 	if (swp != NULL)
407ba7319e9SDmitry Salychev 		return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_IER);
408ba7319e9SDmitry Salychev 	return (0);
409ba7319e9SDmitry Salychev }
410ba7319e9SDmitry Salychev 
411ba7319e9SDmitry Salychev /**
412ba7319e9SDmitry Salychev  * @brief Return the value in the SWP_ISR register.
413ba7319e9SDmitry Salychev  */
414ba7319e9SDmitry Salychev uint32_t
dpaa2_swp_read_intr_status(struct dpaa2_swp * swp)415ba7319e9SDmitry Salychev dpaa2_swp_read_intr_status(struct dpaa2_swp *swp)
416ba7319e9SDmitry Salychev {
417ba7319e9SDmitry Salychev 	if (swp != NULL)
418ba7319e9SDmitry Salychev 		return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_ISR);
419ba7319e9SDmitry Salychev 	return (0);
420ba7319e9SDmitry Salychev }
421ba7319e9SDmitry Salychev 
422ba7319e9SDmitry Salychev /**
423ba7319e9SDmitry Salychev  * @brief Clear SWP_ISR register according to the given mask.
424ba7319e9SDmitry Salychev  */
425ba7319e9SDmitry Salychev void
dpaa2_swp_clear_intr_status(struct dpaa2_swp * swp,uint32_t mask)426ba7319e9SDmitry Salychev dpaa2_swp_clear_intr_status(struct dpaa2_swp *swp, uint32_t mask)
427ba7319e9SDmitry Salychev {
428ba7319e9SDmitry Salychev 	if (swp != NULL)
429ba7319e9SDmitry Salychev 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ISR, mask);
430ba7319e9SDmitry Salychev }
431ba7319e9SDmitry Salychev 
432ba7319e9SDmitry Salychev /**
433ba7319e9SDmitry Salychev  * @brief Enable or disable push dequeue.
434ba7319e9SDmitry Salychev  *
435ba7319e9SDmitry Salychev  * swp:		the software portal object
436ba7319e9SDmitry Salychev  * chan_idx:	the channel index (0 to 15)
437ba7319e9SDmitry Salychev  * en:		enable or disable push dequeue
438ba7319e9SDmitry Salychev  */
439ba7319e9SDmitry Salychev void
dpaa2_swp_set_push_dequeue(struct dpaa2_swp * swp,uint8_t chan_idx,bool en)440ba7319e9SDmitry Salychev dpaa2_swp_set_push_dequeue(struct dpaa2_swp *swp, uint8_t chan_idx, bool en)
441ba7319e9SDmitry Salychev {
442ba7319e9SDmitry Salychev 	uint16_t dqsrc;
443ba7319e9SDmitry Salychev 
444ba7319e9SDmitry Salychev 	if (swp != NULL) {
445ba7319e9SDmitry Salychev 		if (chan_idx > 15u) {
446ba7319e9SDmitry Salychev 			device_printf(swp->desc->dpio_dev, "channel index "
447ba7319e9SDmitry Salychev 			    "should be <= 15: chan_idx=%d\n", chan_idx);
448ba7319e9SDmitry Salychev 			return;
449ba7319e9SDmitry Salychev 		}
450ba7319e9SDmitry Salychev 
451ba7319e9SDmitry Salychev 		if (en)
452ba7319e9SDmitry Salychev 			swp->sdq |= 1 << chan_idx;
453ba7319e9SDmitry Salychev 		else
454ba7319e9SDmitry Salychev 			swp->sdq &= ~(1 << chan_idx);
455ba7319e9SDmitry Salychev 		/*
456ba7319e9SDmitry Salychev 		 * Read make the complete src map. If no channels are enabled
457ba7319e9SDmitry Salychev 		 * the SDQCR must be 0 or else QMan will assert errors.
458ba7319e9SDmitry Salychev 		 */
459ba7319e9SDmitry Salychev 		dqsrc = (swp->sdq >> DPAA2_SDQCR_SRC_SHIFT) &
460ba7319e9SDmitry Salychev 		    DPAA2_SDQCR_SRC_MASK;
461ba7319e9SDmitry Salychev 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_SDQCR, dqsrc != 0
462ba7319e9SDmitry Salychev 		    ? swp->sdq : 0);
463ba7319e9SDmitry Salychev 	}
464ba7319e9SDmitry Salychev }
465ba7319e9SDmitry Salychev 
466ba7319e9SDmitry Salychev /**
467ba7319e9SDmitry Salychev  * @brief Set new IRQ coalescing values.
468ba7319e9SDmitry Salychev  *
469ba7319e9SDmitry Salychev  * swp:		The software portal object.
470ba7319e9SDmitry Salychev  * threshold:	Threshold for DQRR interrupt generation. The DQRR interrupt
471ba7319e9SDmitry Salychev  *		asserts when the ring contains greater than "threshold" entries.
472ba7319e9SDmitry Salychev  * holdoff:	DQRR interrupt holdoff (timeout) period in us.
473ba7319e9SDmitry Salychev  */
dpaa2_swp_set_irq_coalescing(struct dpaa2_swp * swp,uint32_t threshold,uint32_t holdoff)474ba7319e9SDmitry Salychev int dpaa2_swp_set_irq_coalescing(struct dpaa2_swp *swp, uint32_t threshold,
475ba7319e9SDmitry Salychev     uint32_t holdoff)
476ba7319e9SDmitry Salychev {
477ba7319e9SDmitry Salychev 	uint32_t itp; /* Interrupt Timeout Period */
478ba7319e9SDmitry Salychev 
479ba7319e9SDmitry Salychev 	if (swp == NULL)
480ba7319e9SDmitry Salychev 		return (EINVAL);
481ba7319e9SDmitry Salychev 
482ba7319e9SDmitry Salychev 	/*
483ba7319e9SDmitry Salychev 	 * Convert "holdoff" value from us to 256 QBMAN clock cycles
484ba7319e9SDmitry Salychev 	 * increments. This depends on the QBMAN internal frequency.
485ba7319e9SDmitry Salychev 	 */
486ba7319e9SDmitry Salychev 	itp = (holdoff * 1000u) / swp->desc->swp_cycles_ratio;
487ba7319e9SDmitry Salychev 	if (itp > DQRR_MAX_ITP)
488ba7319e9SDmitry Salychev 		itp = DQRR_MAX_ITP;
489ba7319e9SDmitry Salychev 	if (threshold >= swp->dqrr.ring_size)
490ba7319e9SDmitry Salychev 		threshold = swp->dqrr.ring_size - 1;
491ba7319e9SDmitry Salychev 
492ba7319e9SDmitry Salychev 	swp->dqrr.irq_threshold = threshold;
493ba7319e9SDmitry Salychev 	swp->dqrr.irq_itp = itp;
494ba7319e9SDmitry Salychev 
495ba7319e9SDmitry Salychev 	dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_DQRR_ITR, threshold);
496ba7319e9SDmitry Salychev 	dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ITPR, itp);
497ba7319e9SDmitry Salychev 
498ba7319e9SDmitry Salychev 	return (0);
499ba7319e9SDmitry Salychev }
500ba7319e9SDmitry Salychev 
501ba7319e9SDmitry Salychev /*
502ba7319e9SDmitry Salychev  * Software portal commands.
503ba7319e9SDmitry Salychev  */
504ba7319e9SDmitry Salychev 
505ba7319e9SDmitry Salychev /**
506ba7319e9SDmitry Salychev  * @brief Configure the channel data availability notification (CDAN)
507ba7319e9SDmitry Salychev  * in a particular WQ channel.
508ba7319e9SDmitry Salychev  */
509ba7319e9SDmitry Salychev int
dpaa2_swp_conf_wq_channel(struct dpaa2_swp * swp,uint16_t chan_id,uint8_t we_mask,bool cdan_en,uint64_t ctx)510ba7319e9SDmitry Salychev dpaa2_swp_conf_wq_channel(struct dpaa2_swp *swp, uint16_t chan_id,
511ba7319e9SDmitry Salychev     uint8_t we_mask, bool cdan_en, uint64_t ctx)
512ba7319e9SDmitry Salychev {
513ba7319e9SDmitry Salychev 	/* NOTE: 64 bytes command. */
514ba7319e9SDmitry Salychev 	struct __packed {
515ba7319e9SDmitry Salychev 		uint8_t		verb;
516ba7319e9SDmitry Salychev 		uint8_t		result; /* in response only! */
517ba7319e9SDmitry Salychev 		uint16_t	chan_id;
518ba7319e9SDmitry Salychev 		uint8_t		we;
519ba7319e9SDmitry Salychev 		uint8_t		ctrl;
520ba7319e9SDmitry Salychev 		uint16_t	_reserved2;
521ba7319e9SDmitry Salychev 		uint64_t	ctx;
522ba7319e9SDmitry Salychev 		uint8_t		_reserved3[48];
523ba7319e9SDmitry Salychev 	} cmd = {0};
524ba7319e9SDmitry Salychev 	struct __packed {
525ba7319e9SDmitry Salychev 		uint8_t		verb;
526ba7319e9SDmitry Salychev 		uint8_t		result;
527ba7319e9SDmitry Salychev 		uint16_t	chan_id;
528ba7319e9SDmitry Salychev 		uint8_t		_reserved[60];
529ba7319e9SDmitry Salychev 	} rsp;
530ba7319e9SDmitry Salychev 	int error;
531ba7319e9SDmitry Salychev 
532ba7319e9SDmitry Salychev 	if (swp == NULL)
533ba7319e9SDmitry Salychev 		return (EINVAL);
534ba7319e9SDmitry Salychev 
535ba7319e9SDmitry Salychev 	cmd.chan_id = chan_id;
536ba7319e9SDmitry Salychev 	cmd.we = we_mask;
537ba7319e9SDmitry Salychev 	cmd.ctrl = cdan_en ? 1u : 0u;
538ba7319e9SDmitry Salychev 	cmd.ctx = ctx;
539ba7319e9SDmitry Salychev 
540ba7319e9SDmitry Salychev 	error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd,
541ba7319e9SDmitry Salychev 	    (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_WQCHAN_CONFIGURE);
542ba7319e9SDmitry Salychev 	if (error)
543ba7319e9SDmitry Salychev 		return (error);
544ba7319e9SDmitry Salychev 
545ba7319e9SDmitry Salychev 	if (rsp.result != QBMAN_CMD_RC_OK) {
546ba7319e9SDmitry Salychev 		device_printf(swp->desc->dpio_dev, "WQ channel configuration "
547ba7319e9SDmitry Salychev 		    "error: channel_id=%d, result=0x%02x\n", chan_id,
548ba7319e9SDmitry Salychev 		    rsp.result);
549ba7319e9SDmitry Salychev 		return (EIO);
550ba7319e9SDmitry Salychev 	}
551ba7319e9SDmitry Salychev 
552ba7319e9SDmitry Salychev 	return (0);
553ba7319e9SDmitry Salychev }
554ba7319e9SDmitry Salychev 
555ba7319e9SDmitry Salychev /**
556ba7319e9SDmitry Salychev  * @brief Query current configuration/state of the buffer pool.
557ba7319e9SDmitry Salychev  */
558ba7319e9SDmitry Salychev int
dpaa2_swp_query_bp(struct dpaa2_swp * swp,uint16_t bpid,struct dpaa2_bp_conf * conf)559ba7319e9SDmitry Salychev dpaa2_swp_query_bp(struct dpaa2_swp *swp, uint16_t bpid,
560ba7319e9SDmitry Salychev     struct dpaa2_bp_conf *conf)
561ba7319e9SDmitry Salychev {
562ba7319e9SDmitry Salychev 	/* NOTE: 64 bytes command. */
563ba7319e9SDmitry Salychev 	struct __packed {
564ba7319e9SDmitry Salychev 		uint8_t		verb;
565ba7319e9SDmitry Salychev 		uint8_t		_reserved1;
566ba7319e9SDmitry Salychev 		uint16_t	bpid;
567ba7319e9SDmitry Salychev 		uint8_t		_reserved2[60];
568ba7319e9SDmitry Salychev 	} cmd = {0};
569ba7319e9SDmitry Salychev 	struct __packed {
570ba7319e9SDmitry Salychev 		uint8_t		verb;
571ba7319e9SDmitry Salychev 		uint8_t		result;
572ba7319e9SDmitry Salychev 		uint32_t	_reserved1;
573ba7319e9SDmitry Salychev 		uint8_t		bdi;
574ba7319e9SDmitry Salychev 		uint8_t		state;
575ba7319e9SDmitry Salychev 		uint32_t	fill;
576ba7319e9SDmitry Salychev 		/* TODO: Support the other fields as well. */
577ba7319e9SDmitry Salychev 		uint8_t		_reserved2[52];
578ba7319e9SDmitry Salychev 	} rsp;
579ba7319e9SDmitry Salychev 	int error;
580ba7319e9SDmitry Salychev 
581ba7319e9SDmitry Salychev 	if (swp == NULL || conf == NULL)
582ba7319e9SDmitry Salychev 		return (EINVAL);
583ba7319e9SDmitry Salychev 
584ba7319e9SDmitry Salychev 	cmd.bpid = bpid;
585ba7319e9SDmitry Salychev 
586ba7319e9SDmitry Salychev 	error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd,
587ba7319e9SDmitry Salychev 	    (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_BP_QUERY);
588ba7319e9SDmitry Salychev 	if (error)
589ba7319e9SDmitry Salychev 		return (error);
590ba7319e9SDmitry Salychev 
591ba7319e9SDmitry Salychev 	if (rsp.result != QBMAN_CMD_RC_OK) {
592ba7319e9SDmitry Salychev 		device_printf(swp->desc->dpio_dev, "BP query error: bpid=%d, "
593ba7319e9SDmitry Salychev 		    "result=0x%02x\n", bpid, rsp.result);
594ba7319e9SDmitry Salychev 		return (EIO);
595ba7319e9SDmitry Salychev 	}
596ba7319e9SDmitry Salychev 
597ba7319e9SDmitry Salychev 	conf->bdi = rsp.bdi;
598ba7319e9SDmitry Salychev 	conf->state = rsp.state;
599ba7319e9SDmitry Salychev 	conf->free_bufn = rsp.fill;
600ba7319e9SDmitry Salychev 
601ba7319e9SDmitry Salychev 	return (0);
602ba7319e9SDmitry Salychev }
603ba7319e9SDmitry Salychev 
604ba7319e9SDmitry Salychev int
dpaa2_swp_release_bufs(struct dpaa2_swp * swp,uint16_t bpid,bus_addr_t * buf,uint32_t buf_num)605ba7319e9SDmitry Salychev dpaa2_swp_release_bufs(struct dpaa2_swp *swp, uint16_t bpid, bus_addr_t *buf,
606ba7319e9SDmitry Salychev     uint32_t buf_num)
607ba7319e9SDmitry Salychev {
608ba7319e9SDmitry Salychev 	/* NOTE: 64 bytes command. */
609ba7319e9SDmitry Salychev 	struct __packed {
610ba7319e9SDmitry Salychev 		uint8_t		verb;
611ba7319e9SDmitry Salychev 		uint8_t		_reserved1;
612ba7319e9SDmitry Salychev 		uint16_t	bpid;
613ba7319e9SDmitry Salychev 		uint32_t	_reserved2;
614ba7319e9SDmitry Salychev 		uint64_t	buf[DPAA2_SWP_BUFS_PER_CMD];
615ba7319e9SDmitry Salychev 	} cmd = {0};
616ba7319e9SDmitry Salychev 	int error;
617ba7319e9SDmitry Salychev 
618ba7319e9SDmitry Salychev 	if (swp == NULL || buf == NULL || buf_num == 0u ||
619ba7319e9SDmitry Salychev 	    buf_num > DPAA2_SWP_BUFS_PER_CMD)
620ba7319e9SDmitry Salychev 		return (EINVAL);
621ba7319e9SDmitry Salychev 
622ba7319e9SDmitry Salychev 	for (uint32_t i = 0; i < buf_num; i++)
623ba7319e9SDmitry Salychev 		cmd.buf[i] = buf[i];
624ba7319e9SDmitry Salychev 	cmd.bpid = bpid;
625ba7319e9SDmitry Salychev 	cmd.verb |= 1 << 5; /* Switch release buffer command to valid. */
626ba7319e9SDmitry Salychev 
627ba7319e9SDmitry Salychev 	error = dpaa2_swp_exec_br_command(swp, (struct dpaa2_swp_cmd *) &cmd,
628ba7319e9SDmitry Salychev 	    buf_num);
629ba7319e9SDmitry Salychev 	if (error) {
630ba7319e9SDmitry Salychev 		device_printf(swp->desc->dpio_dev, "buffers release command "
631ba7319e9SDmitry Salychev 		    "failed\n");
632ba7319e9SDmitry Salychev 		return (error);
633ba7319e9SDmitry Salychev 	}
634ba7319e9SDmitry Salychev 
635ba7319e9SDmitry Salychev 	return (0);
636ba7319e9SDmitry Salychev }
637ba7319e9SDmitry Salychev 
638ba7319e9SDmitry Salychev int
dpaa2_swp_dqrr_next_locked(struct dpaa2_swp * swp,struct dpaa2_dq * dq,uint32_t * idx)639ba7319e9SDmitry Salychev dpaa2_swp_dqrr_next_locked(struct dpaa2_swp *swp, struct dpaa2_dq *dq,
640ba7319e9SDmitry Salychev     uint32_t *idx)
641ba7319e9SDmitry Salychev {
642ba7319e9SDmitry Salychev 	struct resource_map *map = swp->cinh_map;
643ba7319e9SDmitry Salychev 	struct dpaa2_swp_rsp *rsp = (struct dpaa2_swp_rsp *) dq;
644ba7319e9SDmitry Salychev 	uint32_t verb, pi; /* producer index */
645ba7319e9SDmitry Salychev 	uint32_t offset = swp->cfg.mem_backed
646ba7319e9SDmitry Salychev 	    ? DPAA2_SWP_CENA_DQRR_MEM(swp->dqrr.next_idx)
647ba7319e9SDmitry Salychev 	    : DPAA2_SWP_CENA_DQRR(swp->dqrr.next_idx);
648ba7319e9SDmitry Salychev 
649ba7319e9SDmitry Salychev 	if (swp == NULL || dq == NULL)
650ba7319e9SDmitry Salychev 		return (EINVAL);
651ba7319e9SDmitry Salychev 
652ba7319e9SDmitry Salychev 	/*
653ba7319e9SDmitry Salychev 	 * Before using valid-bit to detect if something is there, we have to
654ba7319e9SDmitry Salychev 	 * handle the case of the DQRR reset bug...
655ba7319e9SDmitry Salychev 	 */
656ba7319e9SDmitry Salychev 	if (swp->dqrr.reset_bug) {
657ba7319e9SDmitry Salychev 		/*
658ba7319e9SDmitry Salychev 		 * We pick up new entries by cache-inhibited producer index,
659ba7319e9SDmitry Salychev 		 * which means that a non-coherent mapping would require us to
660ba7319e9SDmitry Salychev 		 * invalidate and read *only* once that PI has indicated that
661ba7319e9SDmitry Salychev 		 * there's an entry here. The first trip around the DQRR ring
662ba7319e9SDmitry Salychev 		 * will be much less efficient than all subsequent trips around
663ba7319e9SDmitry Salychev 		 * it...
664ba7319e9SDmitry Salychev 		 */
665ba7319e9SDmitry Salychev 		pi = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_DQPI) & DQRR_PI_MASK;
666ba7319e9SDmitry Salychev 
667ba7319e9SDmitry Salychev 		/* There are new entries if pi != next_idx */
668ba7319e9SDmitry Salychev 		if (pi == swp->dqrr.next_idx)
669ba7319e9SDmitry Salychev 			return (ENOENT);
670ba7319e9SDmitry Salychev 
671ba7319e9SDmitry Salychev 		/*
672ba7319e9SDmitry Salychev 		 * If next_idx is/was the last ring index, and 'pi' is
673ba7319e9SDmitry Salychev 		 * different, we can disable the workaround as all the ring
674ba7319e9SDmitry Salychev 		 * entries have now been DMA'd to so valid-bit checking is
675ba7319e9SDmitry Salychev 		 * repaired.
676ba7319e9SDmitry Salychev 		 *
677ba7319e9SDmitry Salychev 		 * NOTE: This logic needs to be based on next_idx (which
678ba7319e9SDmitry Salychev 		 *	 increments one at a time), rather than on pi (which
679ba7319e9SDmitry Salychev 		 *	 can burst and wrap-around between our snapshots of it).
680ba7319e9SDmitry Salychev 		 */
681ba7319e9SDmitry Salychev 		if (swp->dqrr.next_idx == (swp->dqrr.ring_size - 1))
682ba7319e9SDmitry Salychev 			swp->dqrr.reset_bug = 0;
683ba7319e9SDmitry Salychev 	}
684ba7319e9SDmitry Salychev 
685ba7319e9SDmitry Salychev 	verb = bus_read_4(map, offset);
686ba7319e9SDmitry Salychev 	if ((verb & DPAA2_SWP_VALID_BIT) != swp->dqrr.valid_bit)
687ba7319e9SDmitry Salychev 		return (ENOENT);
688ba7319e9SDmitry Salychev 
689ba7319e9SDmitry Salychev 	/* Read dequeue response message. */
690ba7319e9SDmitry Salychev 	for (int i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++)
691ba7319e9SDmitry Salychev 		rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t));
692ba7319e9SDmitry Salychev 
693ba7319e9SDmitry Salychev 	/* Return index of the current entry (if requested). */
694ba7319e9SDmitry Salychev 	if (idx != NULL)
695ba7319e9SDmitry Salychev 		*idx = swp->dqrr.next_idx;
696ba7319e9SDmitry Salychev 
697ba7319e9SDmitry Salychev 	/*
698ba7319e9SDmitry Salychev 	 * There's something there. Move "next_idx" attention to the next ring
699ba7319e9SDmitry Salychev 	 * entry before returning what we found.
700ba7319e9SDmitry Salychev 	 */
701ba7319e9SDmitry Salychev 	swp->dqrr.next_idx++;
702ba7319e9SDmitry Salychev 	swp->dqrr.next_idx &= swp->dqrr.ring_size - 1; /* wrap around */
703ba7319e9SDmitry Salychev 	if (swp->dqrr.next_idx == 0u)
704ba7319e9SDmitry Salychev 		swp->dqrr.valid_bit ^= DPAA2_SWP_VALID_BIT;
705ba7319e9SDmitry Salychev 
706ba7319e9SDmitry Salychev 	return (0);
707ba7319e9SDmitry Salychev }
708ba7319e9SDmitry Salychev 
709ba7319e9SDmitry Salychev int
dpaa2_swp_pull(struct dpaa2_swp * swp,uint16_t chan_id,struct dpaa2_buf * buf,uint32_t frames_n)710ba7319e9SDmitry Salychev dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf,
711ba7319e9SDmitry Salychev     uint32_t frames_n)
712ba7319e9SDmitry Salychev {
713ba7319e9SDmitry Salychev 	/* NOTE: 64 bytes command. */
714ba7319e9SDmitry Salychev 	struct __packed {
715ba7319e9SDmitry Salychev 		uint8_t		verb;
716ba7319e9SDmitry Salychev 		uint8_t		numf;
717ba7319e9SDmitry Salychev 		uint8_t		tok;
718ba7319e9SDmitry Salychev 		uint8_t		_reserved;
719ba7319e9SDmitry Salychev 		uint32_t	dq_src;
720ba7319e9SDmitry Salychev 		uint64_t	rsp_addr;
721ba7319e9SDmitry Salychev 		uint64_t	_reserved1[6];
722ba7319e9SDmitry Salychev 	} cmd = {0};
723ba7319e9SDmitry Salychev 	struct dpaa2_dq *msg;
724ba7319e9SDmitry Salychev 	uint16_t flags;
725ba7319e9SDmitry Salychev 	int i, error;
726ba7319e9SDmitry Salychev 
727ba7319e9SDmitry Salychev 	KASSERT(frames_n != 0u, ("%s: cannot pull zero frames", __func__));
728ba7319e9SDmitry Salychev 	KASSERT(frames_n <= 16u, ("%s: too much frames to pull", __func__));
729ba7319e9SDmitry Salychev 
730ba7319e9SDmitry Salychev 	cmd.numf = frames_n - 1;
731ba7319e9SDmitry Salychev 	cmd.tok = DPAA2_SWP_VDQCR_TOKEN;
732ba7319e9SDmitry Salychev 	cmd.dq_src = chan_id;
733*58983e4bSDmitry Salychev 	cmd.rsp_addr = (uint64_t)buf->paddr;
734ba7319e9SDmitry Salychev 
735ba7319e9SDmitry Salychev 	/* Dequeue command type */
736ba7319e9SDmitry Salychev 	cmd.verb &= ~(1 << QB_VDQCR_VERB_DCT0_SHIFT);
737ba7319e9SDmitry Salychev 	cmd.verb |=  (1 << QB_VDQCR_VERB_DCT1_SHIFT);
738ba7319e9SDmitry Salychev 	/* Dequeue from a specific software portal channel (ID's in DQ_SRC). */
739ba7319e9SDmitry Salychev 	cmd.verb &= ~(1 << QB_VDQCR_VERB_DT0_SHIFT);
740ba7319e9SDmitry Salychev 	cmd.verb &= ~(1 << QB_VDQCR_VERB_DT1_SHIFT);
741ba7319e9SDmitry Salychev 	/* Write the response to this command into memory (at the RSP_ADDR). */
742ba7319e9SDmitry Salychev 	cmd.verb |=  (1 << QB_VDQCR_VERB_RLS_SHIFT);
743ba7319e9SDmitry Salychev 	/* Response writes won't attempt to allocate into a cache. */
744ba7319e9SDmitry Salychev 	cmd.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
745ba7319e9SDmitry Salychev 	/* Allow the FQ to remain active in the portal after dequeue. */
746ba7319e9SDmitry Salychev 	cmd.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
747ba7319e9SDmitry Salychev 
748ba7319e9SDmitry Salychev 	DPAA2_SWP_LOCK(swp, &flags);
749ba7319e9SDmitry Salychev 	if (flags & DPAA2_SWP_DESTROYED) {
750ba7319e9SDmitry Salychev 		/* Terminate operation if portal is destroyed. */
751ba7319e9SDmitry Salychev 		DPAA2_SWP_UNLOCK(swp);
752ba7319e9SDmitry Salychev 		return (ENOENT);
753ba7319e9SDmitry Salychev 	}
754ba7319e9SDmitry Salychev 
755ba7319e9SDmitry Salychev 	error = dpaa2_swp_exec_vdc_command_locked(swp,
756ba7319e9SDmitry Salychev 	    (struct dpaa2_swp_cmd *) &cmd);
757ba7319e9SDmitry Salychev 	if (error != 0) {
758ba7319e9SDmitry Salychev 		DPAA2_SWP_UNLOCK(swp);
759ba7319e9SDmitry Salychev 		return (error);
760ba7319e9SDmitry Salychev 	}
761ba7319e9SDmitry Salychev 
762ba7319e9SDmitry Salychev 	/* Let's sync before reading VDQ response from QBMan. */
763*58983e4bSDmitry Salychev 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
764ba7319e9SDmitry Salychev 
765ba7319e9SDmitry Salychev 	/* Read VDQ response from QBMan. */
766*58983e4bSDmitry Salychev 	msg = (struct dpaa2_dq *)buf->vaddr;
767ba7319e9SDmitry Salychev 	for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
768ba7319e9SDmitry Salychev 		if ((msg->fdr.desc.stat & DPAA2_DQ_STAT_VOLATILE) &&
769ba7319e9SDmitry Salychev 		    (msg->fdr.desc.tok == DPAA2_SWP_VDQCR_TOKEN)) {
770ba7319e9SDmitry Salychev 			/* Reset token. */
771ba7319e9SDmitry Salychev 			msg->fdr.desc.tok = 0;
772ba7319e9SDmitry Salychev 			break;
773ba7319e9SDmitry Salychev 		}
774ba7319e9SDmitry Salychev 		DELAY(CMD_SPIN_TIMEOUT);
775ba7319e9SDmitry Salychev 	}
776ba7319e9SDmitry Salychev 	DPAA2_SWP_UNLOCK(swp);
777ba7319e9SDmitry Salychev 
778ba7319e9SDmitry Salychev 	/* Return an error on expired timeout. */
779ba7319e9SDmitry Salychev 	return (i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0);
780ba7319e9SDmitry Salychev }
781ba7319e9SDmitry Salychev 
782ba7319e9SDmitry Salychev /**
783ba7319e9SDmitry Salychev  * @brief Issue a command to enqueue a frame using one enqueue descriptor.
784ba7319e9SDmitry Salychev  *
785ba7319e9SDmitry Salychev  * swp:		Software portal used to send this command to.
786ba7319e9SDmitry Salychev  * ed:		Enqueue command descriptor.
787ba7319e9SDmitry Salychev  * fd:		Frame descriptor to enqueue.
788ba7319e9SDmitry Salychev  */
789ba7319e9SDmitry Salychev int
dpaa2_swp_enq(struct dpaa2_swp * swp,struct dpaa2_eq_desc * ed,struct dpaa2_fd * fd)790ba7319e9SDmitry Salychev dpaa2_swp_enq(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
791ba7319e9SDmitry Salychev     struct dpaa2_fd *fd)
792ba7319e9SDmitry Salychev {
793ba7319e9SDmitry Salychev 	uint32_t flags = 0;
794ba7319e9SDmitry Salychev 	int rc = dpaa2_swp_enq_mult(swp, ed, fd, &flags, 1);
795ba7319e9SDmitry Salychev 
796ba7319e9SDmitry Salychev 	return (rc >= 0 ? 0 : EBUSY);
797ba7319e9SDmitry Salychev }
798ba7319e9SDmitry Salychev 
799ba7319e9SDmitry Salychev /**
800ba7319e9SDmitry Salychev  * @brief Issue a command to enqueue frames using one enqueue descriptor.
801ba7319e9SDmitry Salychev  *
802ba7319e9SDmitry Salychev  * swp:		Software portal used to send this command to.
803ba7319e9SDmitry Salychev  * ed:		Enqueue command descriptor.
804ba7319e9SDmitry Salychev  * fd:		Frame descriptor to enqueue.
805ba7319e9SDmitry Salychev  * flags:	Table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL.
806ba7319e9SDmitry Salychev  * frames_n:	Number of FDs to enqueue.
807ba7319e9SDmitry Salychev  *
808ba7319e9SDmitry Salychev  * NOTE: Enqueue command (64 bytes): 32 (eq. descriptor) + 32 (frame descriptor).
809ba7319e9SDmitry Salychev  */
810ba7319e9SDmitry Salychev int
dpaa2_swp_enq_mult(struct dpaa2_swp * swp,struct dpaa2_eq_desc * ed,struct dpaa2_fd * fd,uint32_t * flags,int frames_n)811ba7319e9SDmitry Salychev dpaa2_swp_enq_mult(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
812ba7319e9SDmitry Salychev     struct dpaa2_fd *fd, uint32_t *flags, int frames_n)
813ba7319e9SDmitry Salychev {
814ba7319e9SDmitry Salychev 	const uint8_t  *ed_pdat8 =  (const uint8_t *) ed;
815ba7319e9SDmitry Salychev 	const uint32_t *ed_pdat32 = (const uint32_t *) ed;
816ba7319e9SDmitry Salychev 	const uint64_t *ed_pdat64 = (const uint64_t *) ed;
817ba7319e9SDmitry Salychev 	const uint64_t *fd_pdat64 = (const uint64_t *) fd;
818ba7319e9SDmitry Salychev 	struct resource_map *map;
819ba7319e9SDmitry Salychev 	uint32_t eqcr_ci, eqcr_pi; /* EQCR consumer/producer index */
820ba7319e9SDmitry Salychev 	uint32_t half_mask, full_mask, val, ci_offset;
821ba7319e9SDmitry Salychev 	uint16_t swp_flags;
822ba7319e9SDmitry Salychev 	int num_enq = 0;
823ba7319e9SDmitry Salychev 
824ba7319e9SDmitry Salychev 	if (swp == NULL || ed == NULL || fd == NULL || flags == NULL ||
825ba7319e9SDmitry Salychev 	    frames_n == 0)
826ba7319e9SDmitry Salychev 		return (EINVAL);
827ba7319e9SDmitry Salychev 
828ba7319e9SDmitry Salychev 	DPAA2_SWP_LOCK(swp, &swp_flags);
829ba7319e9SDmitry Salychev 	if (swp_flags & DPAA2_SWP_DESTROYED) {
830ba7319e9SDmitry Salychev 		/* Terminate operation if portal is destroyed. */
831ba7319e9SDmitry Salychev 		DPAA2_SWP_UNLOCK(swp);
832ba7319e9SDmitry Salychev 		return (ENOENT);
833ba7319e9SDmitry Salychev 	}
834ba7319e9SDmitry Salychev 
835ba7319e9SDmitry Salychev 	map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
836ba7319e9SDmitry Salychev 	ci_offset = swp->cfg.mem_backed
837ba7319e9SDmitry Salychev 	    ? DPAA2_SWP_CENA_EQCR_CI_MEMBACK
838ba7319e9SDmitry Salychev 	    : DPAA2_SWP_CENA_EQCR_CI;
839ba7319e9SDmitry Salychev 
840ba7319e9SDmitry Salychev 	half_mask = swp->eqcr.pi_ci_mask >> 1;
841ba7319e9SDmitry Salychev 	full_mask = swp->eqcr.pi_ci_mask;
842ba7319e9SDmitry Salychev 
843ba7319e9SDmitry Salychev 	if (swp->eqcr.available == 0) {
844ba7319e9SDmitry Salychev 		val = dpaa2_swp_read_reg(swp, ci_offset);
845ba7319e9SDmitry Salychev 		eqcr_ci = swp->eqcr.ci;
846ba7319e9SDmitry Salychev 		swp->eqcr.ci = val & full_mask;
847ba7319e9SDmitry Salychev 
848ba7319e9SDmitry Salychev 		swp->eqcr.available = dpaa2_swp_cyc_diff(swp->eqcr.pi_ring_size,
849ba7319e9SDmitry Salychev 		    eqcr_ci, swp->eqcr.ci);
850ba7319e9SDmitry Salychev 
851ba7319e9SDmitry Salychev 		if (swp->eqcr.available == 0) {
852ba7319e9SDmitry Salychev 			DPAA2_SWP_UNLOCK(swp);
853ba7319e9SDmitry Salychev 			return (0);
854ba7319e9SDmitry Salychev 		}
855ba7319e9SDmitry Salychev 	}
856ba7319e9SDmitry Salychev 
857ba7319e9SDmitry Salychev 	eqcr_pi = swp->eqcr.pi;
858ba7319e9SDmitry Salychev 	num_enq = swp->eqcr.available < frames_n
859ba7319e9SDmitry Salychev 	    ? swp->eqcr.available : frames_n;
860ba7319e9SDmitry Salychev 	swp->eqcr.available -= num_enq;
861ba7319e9SDmitry Salychev 
862ba7319e9SDmitry Salychev 	KASSERT(num_enq >= 0 && num_enq <= swp->eqcr.pi_ring_size,
863ba7319e9SDmitry Salychev 	    ("%s: unexpected num_enq=%d", __func__, num_enq));
864ba7319e9SDmitry Salychev 	KASSERT(swp->eqcr.available >= 0 &&
865ba7319e9SDmitry Salychev 	    swp->eqcr.available <= swp->eqcr.pi_ring_size,
866ba7319e9SDmitry Salychev 	    ("%s: unexpected eqcr.available=%d", __func__, swp->eqcr.available));
867ba7319e9SDmitry Salychev 
868ba7319e9SDmitry Salychev 	/* Fill in the EQCR ring. */
869ba7319e9SDmitry Salychev 	for (int i = 0; i < num_enq; i++) {
870ba7319e9SDmitry Salychev 		/* Write enq. desc. without the VERB, DCA, SEQNUM and OPRID. */
871ba7319e9SDmitry Salychev 		for (int j = 1; j <= 3; j++)
872ba7319e9SDmitry Salychev 			bus_write_8(map,
873ba7319e9SDmitry Salychev 			    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
874ba7319e9SDmitry Salychev 			    sizeof(uint64_t) * j, ed_pdat64[j]);
875ba7319e9SDmitry Salychev 		/* Write OPRID. */
876ba7319e9SDmitry Salychev 		bus_write_4(map,
877ba7319e9SDmitry Salychev 		    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + sizeof(uint32_t),
878ba7319e9SDmitry Salychev 		    ed_pdat32[1]);
879ba7319e9SDmitry Salychev 		/* Write DCA and SEQNUM without VERB byte. */
880ba7319e9SDmitry Salychev 		for (int j = 1; j <= 3; j++)
881ba7319e9SDmitry Salychev 			bus_write_1(map,
882ba7319e9SDmitry Salychev 			    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
883ba7319e9SDmitry Salychev 			    sizeof(uint8_t) * j, ed_pdat8[j]);
884ba7319e9SDmitry Salychev 
885ba7319e9SDmitry Salychev 		/* Write frame descriptor. */
886ba7319e9SDmitry Salychev 		for (int j = 0; j <= 3; j++)
887ba7319e9SDmitry Salychev 			bus_write_8(map,
888ba7319e9SDmitry Salychev 			    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
889ba7319e9SDmitry Salychev 			    ENQ_DESC_FD_OFFSET +
890ba7319e9SDmitry Salychev 			    sizeof(uint64_t) * j, fd_pdat64[j]);
891ba7319e9SDmitry Salychev 		eqcr_pi++;
892ba7319e9SDmitry Salychev 	}
893ba7319e9SDmitry Salychev 
894ba7319e9SDmitry Salychev 	wmb();
895ba7319e9SDmitry Salychev 
896ba7319e9SDmitry Salychev 	/* Write the VERB byte of enqueue descriptor. */
897ba7319e9SDmitry Salychev 	eqcr_pi = swp->eqcr.pi;
898ba7319e9SDmitry Salychev 	for (int i = 0; i < num_enq; i++) {
899ba7319e9SDmitry Salychev 		bus_write_1(map,
900ba7319e9SDmitry Salychev 		    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask),
901ba7319e9SDmitry Salychev 		    ed_pdat8[0] | swp->eqcr.pi_vb);
902ba7319e9SDmitry Salychev 
903ba7319e9SDmitry Salychev 		if (flags && (flags[i] & ENQ_FLAG_DCA)) {
904ba7319e9SDmitry Salychev 			/* Update DCA byte. */
905ba7319e9SDmitry Salychev 			bus_write_1(map,
906ba7319e9SDmitry Salychev 			    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + 1,
907ba7319e9SDmitry Salychev 			    (1 << ENQ_CMD_DCA_EN_SHIFT) |
908ba7319e9SDmitry Salychev 			    (flags[i] & ENQ_DCA_IDXMASK));
909ba7319e9SDmitry Salychev 		}
910ba7319e9SDmitry Salychev 		eqcr_pi++;
911ba7319e9SDmitry Salychev 		if (!(eqcr_pi & half_mask))
912ba7319e9SDmitry Salychev 			swp->eqcr.pi_vb ^= DPAA2_SWP_VALID_BIT;
913ba7319e9SDmitry Salychev 	}
914ba7319e9SDmitry Salychev 	swp->eqcr.pi = eqcr_pi & full_mask;
915ba7319e9SDmitry Salychev 
916ba7319e9SDmitry Salychev 	DPAA2_SWP_UNLOCK(swp);
917ba7319e9SDmitry Salychev 
918ba7319e9SDmitry Salychev 	return (num_enq);
919ba7319e9SDmitry Salychev }
920ba7319e9SDmitry Salychev 
921ba7319e9SDmitry Salychev static int
dpaa2_swp_cyc_diff(uint8_t ringsize,uint8_t first,uint8_t last)922ba7319e9SDmitry Salychev dpaa2_swp_cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last)
923ba7319e9SDmitry Salychev {
924ba7319e9SDmitry Salychev 	/* 'first' is included, 'last' is excluded */
925ba7319e9SDmitry Salychev 	return ((first <= last)
926ba7319e9SDmitry Salychev 	    ? (last - first) : ((2 * ringsize) - (first - last)));
927ba7319e9SDmitry Salychev }
928ba7319e9SDmitry Salychev 
929ba7319e9SDmitry Salychev /**
930ba7319e9SDmitry Salychev  * @brief Execute Buffer Release Command (BRC).
931ba7319e9SDmitry Salychev  */
932ba7319e9SDmitry Salychev static int
dpaa2_swp_exec_br_command(struct dpaa2_swp * swp,struct dpaa2_swp_cmd * cmd,uint32_t buf_num)933ba7319e9SDmitry Salychev dpaa2_swp_exec_br_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
934ba7319e9SDmitry Salychev     uint32_t buf_num)
935ba7319e9SDmitry Salychev {
936ba7319e9SDmitry Salychev 	struct __packed with_verb {
937ba7319e9SDmitry Salychev 		uint8_t	verb;
938ba7319e9SDmitry Salychev 		uint8_t	_reserved[63];
939ba7319e9SDmitry Salychev 	} *c;
940ba7319e9SDmitry Salychev 	const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params;
941ba7319e9SDmitry Salychev 	const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params;
942ba7319e9SDmitry Salychev 	struct resource_map *map;
943ba7319e9SDmitry Salychev 	uint32_t offset, rar; /* Release Array Allocation register */
944ba7319e9SDmitry Salychev 	uint16_t flags;
945ba7319e9SDmitry Salychev 
946ba7319e9SDmitry Salychev 	if (!swp || !cmd)
947ba7319e9SDmitry Salychev 		return (EINVAL);
948ba7319e9SDmitry Salychev 
949ba7319e9SDmitry Salychev 	DPAA2_SWP_LOCK(swp, &flags);
950ba7319e9SDmitry Salychev 	if (flags & DPAA2_SWP_DESTROYED) {
951ba7319e9SDmitry Salychev 		/* Terminate operation if portal is destroyed. */
952ba7319e9SDmitry Salychev 		DPAA2_SWP_UNLOCK(swp);
953ba7319e9SDmitry Salychev 		return (ENOENT);
954ba7319e9SDmitry Salychev 	}
955ba7319e9SDmitry Salychev 
956ba7319e9SDmitry Salychev 	rar = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_RAR);
957ba7319e9SDmitry Salychev 	if (!RAR_SUCCESS(rar)) {
958ba7319e9SDmitry Salychev 		DPAA2_SWP_UNLOCK(swp);
959ba7319e9SDmitry Salychev 		return (EBUSY);
960ba7319e9SDmitry Salychev 	}
961ba7319e9SDmitry Salychev 
962ba7319e9SDmitry Salychev 	map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
963ba7319e9SDmitry Salychev 	offset = swp->cfg.mem_backed
964ba7319e9SDmitry Salychev 	    ? DPAA2_SWP_CENA_RCR_MEM(RAR_IDX(rar))
965ba7319e9SDmitry Salychev 	    : DPAA2_SWP_CENA_RCR(RAR_IDX(rar));
966ba7319e9SDmitry Salychev 	c = (struct with_verb *) cmd;
967ba7319e9SDmitry Salychev 
968ba7319e9SDmitry Salychev 	/* Write command bytes (without VERB byte). */
969ba7319e9SDmitry Salychev 	for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
970ba7319e9SDmitry Salychev 		bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
971ba7319e9SDmitry Salychev 	bus_write_4(map, offset + 4, cmd_pdat32[1]);
972ba7319e9SDmitry Salychev 	for (uint32_t i = 1; i <= 3; i++)
973ba7319e9SDmitry Salychev 		bus_write_1(map, offset + i, cmd_pdat8[i]);
974ba7319e9SDmitry Salychev 
975ba7319e9SDmitry Salychev 	/* Write VERB byte and trigger command execution. */
976ba7319e9SDmitry Salychev 	if (swp->cfg.mem_backed) {
977ba7319e9SDmitry Salychev 		bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num);
978ba7319e9SDmitry Salychev 		wmb();
979ba7319e9SDmitry Salychev 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_RCR_AM_RT +
980ba7319e9SDmitry Salychev 		    RAR_IDX(rar) * 4, DPAA2_SWP_RT_MODE);
981ba7319e9SDmitry Salychev 	} else {
982ba7319e9SDmitry Salychev 		wmb();
983ba7319e9SDmitry Salychev 		bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num);
984ba7319e9SDmitry Salychev 	}
985ba7319e9SDmitry Salychev 
986ba7319e9SDmitry Salychev 	DPAA2_SWP_UNLOCK(swp);
987ba7319e9SDmitry Salychev 
988ba7319e9SDmitry Salychev 	return (0);
989ba7319e9SDmitry Salychev }
990ba7319e9SDmitry Salychev 
991ba7319e9SDmitry Salychev /**
992ba7319e9SDmitry Salychev  * @brief Execute Volatile Dequeue Command (VDC).
993ba7319e9SDmitry Salychev  *
994ba7319e9SDmitry Salychev  * This command will be executed by QBMan only once in order to deliver requested
995ba7319e9SDmitry Salychev  * number of frames (1-16 or 1-32 depending on QBMan version) to the driver via
996ba7319e9SDmitry Salychev  * DQRR or arbitrary DMA-mapped memory.
997ba7319e9SDmitry Salychev  *
998ba7319e9SDmitry Salychev  * NOTE: There is a counterpart to the volatile dequeue command called static
999ba7319e9SDmitry Salychev  *	 dequeue command (SDQC) which is executed periodically all the time the
1000ba7319e9SDmitry Salychev  *	 command is present in the SDQCR register.
1001ba7319e9SDmitry Salychev  */
1002ba7319e9SDmitry Salychev static int
dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp * swp,struct dpaa2_swp_cmd * cmd)1003ba7319e9SDmitry Salychev dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *swp,
1004ba7319e9SDmitry Salychev     struct dpaa2_swp_cmd *cmd)
1005ba7319e9SDmitry Salychev {
1006ba7319e9SDmitry Salychev 	struct __packed with_verb {
1007ba7319e9SDmitry Salychev 		uint8_t	verb;
1008ba7319e9SDmitry Salychev 		uint8_t	_reserved[63];
1009ba7319e9SDmitry Salychev 	} *c;
1010ba7319e9SDmitry Salychev 	const uint8_t *p8 = (const uint8_t *) cmd->params;
1011ba7319e9SDmitry Salychev 	const uint32_t *p32 = (const uint32_t *) cmd->params;
1012ba7319e9SDmitry Salychev 	struct resource_map *map;
1013ba7319e9SDmitry Salychev 	uint32_t offset;
1014ba7319e9SDmitry Salychev 
1015ba7319e9SDmitry Salychev 	map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
1016ba7319e9SDmitry Salychev 	offset = swp->cfg.mem_backed
1017ba7319e9SDmitry Salychev 	    ? DPAA2_SWP_CENA_VDQCR_MEM : DPAA2_SWP_CENA_VDQCR;
1018ba7319e9SDmitry Salychev 	c = (struct with_verb *) cmd;
1019ba7319e9SDmitry Salychev 
1020ba7319e9SDmitry Salychev 	/* Write command bytes (without VERB byte). */
1021ba7319e9SDmitry Salychev 	for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
1022ba7319e9SDmitry Salychev 		bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
1023ba7319e9SDmitry Salychev 	bus_write_4(map, offset + 4, p32[1]);
1024ba7319e9SDmitry Salychev 	for (uint32_t i = 1; i <= 3; i++)
1025ba7319e9SDmitry Salychev 		bus_write_1(map, offset + i, p8[i]);
1026ba7319e9SDmitry Salychev 
1027ba7319e9SDmitry Salychev 	/* Write VERB byte and trigger command execution. */
1028ba7319e9SDmitry Salychev 	if (swp->cfg.mem_backed) {
1029ba7319e9SDmitry Salychev 		bus_write_1(map, offset, c->verb | swp->vdq.valid_bit);
1030ba7319e9SDmitry Salychev 		swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT;
1031ba7319e9SDmitry Salychev 		wmb();
1032ba7319e9SDmitry Salychev 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_VDQCR_RT,
1033ba7319e9SDmitry Salychev 		    DPAA2_SWP_RT_MODE);
1034ba7319e9SDmitry Salychev 	} else {
1035ba7319e9SDmitry Salychev 		wmb();
1036ba7319e9SDmitry Salychev 		bus_write_1(map, offset, c->verb | swp->vdq.valid_bit);
1037ba7319e9SDmitry Salychev 		swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT;
1038ba7319e9SDmitry Salychev 	}
1039ba7319e9SDmitry Salychev 
1040ba7319e9SDmitry Salychev 	return (0);
1041ba7319e9SDmitry Salychev }
1042ba7319e9SDmitry Salychev 
1043ba7319e9SDmitry Salychev /**
1044ba7319e9SDmitry Salychev  * @brief Execute a QBMan management command.
1045ba7319e9SDmitry Salychev  */
1046ba7319e9SDmitry Salychev static int
dpaa2_swp_exec_mgmt_command(struct dpaa2_swp * swp,struct dpaa2_swp_cmd * cmd,struct dpaa2_swp_rsp * rsp,uint8_t cmdid)1047ba7319e9SDmitry Salychev dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
1048ba7319e9SDmitry Salychev     struct dpaa2_swp_rsp *rsp, uint8_t cmdid)
1049ba7319e9SDmitry Salychev {
1050ba7319e9SDmitry Salychev #if (defined(_KERNEL) && defined(INVARIANTS))
1051ba7319e9SDmitry Salychev 	struct __packed with_verb {
1052ba7319e9SDmitry Salychev 		uint8_t	verb;
1053ba7319e9SDmitry Salychev 		uint8_t	_reserved[63];
1054ba7319e9SDmitry Salychev 	} *r;
1055ba7319e9SDmitry Salychev #endif
1056ba7319e9SDmitry Salychev 	uint16_t flags;
1057ba7319e9SDmitry Salychev 	int error;
1058ba7319e9SDmitry Salychev 
1059ba7319e9SDmitry Salychev 	if (swp == NULL || cmd == NULL || rsp == NULL)
1060ba7319e9SDmitry Salychev 		return (EINVAL);
1061ba7319e9SDmitry Salychev 
1062ba7319e9SDmitry Salychev 	DPAA2_SWP_LOCK(swp, &flags);
1063ba7319e9SDmitry Salychev 	if (flags & DPAA2_SWP_DESTROYED) {
1064ba7319e9SDmitry Salychev 		/* Terminate operation if portal is destroyed. */
1065ba7319e9SDmitry Salychev 		DPAA2_SWP_UNLOCK(swp);
1066ba7319e9SDmitry Salychev 		return (ENOENT);
1067ba7319e9SDmitry Salychev 	}
1068ba7319e9SDmitry Salychev 
1069ba7319e9SDmitry Salychev 	/*
1070ba7319e9SDmitry Salychev 	 * Send a command to QBMan using Management Command register and wait
1071ba7319e9SDmitry Salychev 	 * for response from the Management Response registers.
1072ba7319e9SDmitry Salychev 	 */
1073ba7319e9SDmitry Salychev 	dpaa2_swp_send_mgmt_command(swp, cmd, cmdid);
1074ba7319e9SDmitry Salychev 	error = dpaa2_swp_wait_for_mgmt_response(swp, rsp);
1075ba7319e9SDmitry Salychev 	if (error) {
1076ba7319e9SDmitry Salychev 		DPAA2_SWP_UNLOCK(swp);
1077ba7319e9SDmitry Salychev 		return (error);
1078ba7319e9SDmitry Salychev 	}
1079ba7319e9SDmitry Salychev 	DPAA2_SWP_UNLOCK(swp);
1080ba7319e9SDmitry Salychev 
1081ba7319e9SDmitry Salychev #if (defined(_KERNEL) && defined(INVARIANTS))
1082ba7319e9SDmitry Salychev 	r = (struct with_verb *) rsp;
1083ba7319e9SDmitry Salychev 	KASSERT((r->verb & CMD_VERB_MASK) == cmdid,
1084ba7319e9SDmitry Salychev 	    ("wrong VERB byte in response: resp=0x%02x, expected=0x%02x",
1085ba7319e9SDmitry Salychev 	    r->verb, cmdid));
1086ba7319e9SDmitry Salychev #endif
1087ba7319e9SDmitry Salychev 
1088ba7319e9SDmitry Salychev 	return (0);
1089ba7319e9SDmitry Salychev }
1090ba7319e9SDmitry Salychev 
1091ba7319e9SDmitry Salychev static int
dpaa2_swp_send_mgmt_command(struct dpaa2_swp * swp,struct dpaa2_swp_cmd * cmd,uint8_t cmdid)1092ba7319e9SDmitry Salychev dpaa2_swp_send_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
1093ba7319e9SDmitry Salychev     uint8_t cmdid)
1094ba7319e9SDmitry Salychev {
1095ba7319e9SDmitry Salychev 	const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params;
1096ba7319e9SDmitry Salychev 	const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params;
1097ba7319e9SDmitry Salychev 	struct resource_map *map;
1098ba7319e9SDmitry Salychev 	uint32_t offset;
1099ba7319e9SDmitry Salychev 
1100ba7319e9SDmitry Salychev 	map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
1101ba7319e9SDmitry Salychev 	offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_CR_MEM : DPAA2_SWP_CENA_CR;
1102ba7319e9SDmitry Salychev 
1103ba7319e9SDmitry Salychev 	/* Write command bytes (without VERB byte). */
1104ba7319e9SDmitry Salychev 	for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
1105ba7319e9SDmitry Salychev 		bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
1106ba7319e9SDmitry Salychev 	bus_write_4(map, offset + 4, cmd_pdat32[1]);
1107ba7319e9SDmitry Salychev 	for (uint32_t i = 1; i <= 3; i++)
1108ba7319e9SDmitry Salychev 		bus_write_1(map, offset + i, cmd_pdat8[i]);
1109ba7319e9SDmitry Salychev 
1110ba7319e9SDmitry Salychev 	/* Write VERB byte and trigger command execution. */
1111ba7319e9SDmitry Salychev 	if (swp->cfg.mem_backed) {
1112ba7319e9SDmitry Salychev 		bus_write_1(map, offset, cmdid | swp->mr.valid_bit);
1113ba7319e9SDmitry Salychev 		wmb();
1114ba7319e9SDmitry Salychev 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_CR_RT,
1115ba7319e9SDmitry Salychev 		    DPAA2_SWP_RT_MODE);
1116ba7319e9SDmitry Salychev 	} else {
1117ba7319e9SDmitry Salychev 		wmb();
1118ba7319e9SDmitry Salychev 		bus_write_1(map, offset, cmdid | swp->mc.valid_bit);
1119ba7319e9SDmitry Salychev 	}
1120ba7319e9SDmitry Salychev 
1121ba7319e9SDmitry Salychev 	return (0);
1122ba7319e9SDmitry Salychev }
1123ba7319e9SDmitry Salychev 
1124ba7319e9SDmitry Salychev static int
dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp * swp,struct dpaa2_swp_rsp * rsp)1125ba7319e9SDmitry Salychev dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *swp, struct dpaa2_swp_rsp *rsp)
1126ba7319e9SDmitry Salychev {
1127ba7319e9SDmitry Salychev 	struct resource_map *map = swp->cfg.mem_backed
1128ba7319e9SDmitry Salychev 	    ? swp->cena_map : swp->cinh_map;
1129ba7319e9SDmitry Salychev 	/* Management command response to be read from the only RR or RR0/RR1. */
1130ba7319e9SDmitry Salychev 	const uint32_t offset = swp->cfg.mem_backed
1131ba7319e9SDmitry Salychev 	    ? DPAA2_SWP_CENA_RR_MEM
1132ba7319e9SDmitry Salychev 	    : DPAA2_SWP_CENA_RR(swp->mc.valid_bit);
1133ba7319e9SDmitry Salychev 	uint32_t i, verb, ret;
1134ba7319e9SDmitry Salychev 	int rc;
1135ba7319e9SDmitry Salychev 
1136ba7319e9SDmitry Salychev 	/* Wait for a command response from QBMan. */
1137ba7319e9SDmitry Salychev 	for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
1138ba7319e9SDmitry Salychev 		if (swp->cfg.mem_backed) {
1139ba7319e9SDmitry Salychev 			verb = (uint32_t) (bus_read_4(map, offset) & 0xFFu);
1140ba7319e9SDmitry Salychev 			if (swp->mr.valid_bit != (verb & DPAA2_SWP_VALID_BIT))
1141ba7319e9SDmitry Salychev 				goto wait;
1142ba7319e9SDmitry Salychev 			if (!(verb & ~DPAA2_SWP_VALID_BIT))
1143ba7319e9SDmitry Salychev 				goto wait;
1144ba7319e9SDmitry Salychev 			swp->mr.valid_bit ^= DPAA2_SWP_VALID_BIT;
1145ba7319e9SDmitry Salychev 		} else {
1146ba7319e9SDmitry Salychev 			ret = bus_read_4(map, offset);
1147ba7319e9SDmitry Salychev 			verb = ret & ~DPAA2_SWP_VALID_BIT; /* remove valid bit */
1148ba7319e9SDmitry Salychev 			if (verb == 0u)
1149ba7319e9SDmitry Salychev 				goto wait;
1150ba7319e9SDmitry Salychev 			swp->mc.valid_bit ^= DPAA2_SWP_VALID_BIT;
1151ba7319e9SDmitry Salychev 		}
1152ba7319e9SDmitry Salychev 		break;
1153ba7319e9SDmitry Salychev  wait:
1154ba7319e9SDmitry Salychev 		DELAY(CMD_SPIN_TIMEOUT);
1155ba7319e9SDmitry Salychev 	}
1156ba7319e9SDmitry Salychev 	/* Return an error on expired timeout. */
1157ba7319e9SDmitry Salychev 	rc = i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0;
1158ba7319e9SDmitry Salychev 
1159ba7319e9SDmitry Salychev 	/* Read command response. */
1160ba7319e9SDmitry Salychev 	for (i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++)
1161ba7319e9SDmitry Salychev 		rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t));
1162ba7319e9SDmitry Salychev 
1163ba7319e9SDmitry Salychev 	return (rc);
1164ba7319e9SDmitry Salychev }
1165