xref: /freebsd/sys/dev/dpaa2/dpaa2_swp.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause
3  *
4  * Copyright © 2014-2016 Freescale Semiconductor, Inc.
5  * Copyright © 2016-2019 NXP
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * 3. Neither the name of the copyright holder nor the names of its
19  *    contributors may be used to endorse or promote products derived from this
20  *    software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  * Original source file obtained from:
35  * drivers/soc/fsl/dpio/qbman-portal.c
36  *
37  * Commit: 4c86114194e644b6da9107d75910635c9e87179e
38  * Repository: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
39  */
40 
41 /*
42  * Copyright © 2021-2022 Dmitry Salychev
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  */
65 
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD$");
68 
69 /*
70  * DPAA2 QBMan software portal.
71  */
72 
73 #include <sys/param.h>
74 #include <sys/kernel.h>
75 #include <sys/bus.h>
76 #include <sys/rman.h>
77 #include <sys/module.h>
78 #include <sys/malloc.h>
79 #include <sys/mutex.h>
80 #include <sys/time.h>
81 #include <sys/types.h>
82 #include <sys/systm.h>
83 #include <sys/condvar.h>
84 #include <sys/lock.h>
85 
86 #include <machine/bus.h>
87 #include <machine/resource.h>
88 #include <machine/atomic.h>
89 
90 #include "pcib_if.h"
91 #include "pci_if.h"
92 
93 #include "dpaa2_swp.h"
94 #include "dpaa2_mc.h"
95 #include "dpaa2_bp.h"
96 
97 #define CMD_SPIN_TIMEOUT		100u	/* us */
98 #define CMD_SPIN_ATTEMPTS		2000u	/* 200 ms max. */
99 
100 #define CMD_VERB_MASK			0x7Fu
101 
102 /* Shifts in the VERB byte of the enqueue command descriptor. */
103 #define ENQ_CMD_ORP_ENABLE_SHIFT	2
104 #define ENQ_CMD_IRQ_ON_DISPATCH_SHIFT	3
105 #define ENQ_CMD_TARGET_TYPE_SHIFT	4
106 #define ENQ_CMD_DCA_EN_SHIFT		7
107 /* VERB byte options of the enqueue command descriptor. */
108 #define ENQ_CMD_EMPTY			0u
109 #define ENQ_CMD_RESPONSE_ALWAYS		1u
110 #define ENQ_CMD_REJECTS_TO_FQ		2u
111 
112 #define ENQ_DESC_FD_OFFSET		32u
113 
114 #define ENQ_DCA_IDXMASK			0x0Fu
115 #define ENQ_FLAG_DCA			(1ull << 31)
116 
117 /* QBMan portal command codes. */
118 #define CMDID_SWP_MC_ACQUIRE		0x30
119 #define CMDID_SWP_BP_QUERY		0x32
120 #define CMDID_SWP_WQCHAN_CONFIGURE	0x46
121 
122 /* QBMan portal command result codes. */
123 #define QBMAN_CMD_RC_OK			0xF0
124 
125 /* SDQCR attribute codes */
126 #define QB_SDQCR_FC_SHIFT 		29u
127 #define QB_SDQCR_FC_MASK		0x1u
128 #define QB_SDQCR_DCT_SHIFT		24u
129 #define QB_SDQCR_DCT_MASK		0x3u
130 #define QB_SDQCR_TOK_SHIFT		16u
131 #define QB_SDQCR_TOK_MASK		0xFFu
132 #define QB_SDQCR_SRC_SHIFT		0u
133 #define QB_SDQCR_SRC_MASK		0xFFFFu
134 
135 /* Shifts in the VERB byte of the volatile dequeue command. */
136 #define QB_VDQCR_VERB_DCT0_SHIFT	0
137 #define QB_VDQCR_VERB_DCT1_SHIFT	1
138 #define QB_VDQCR_VERB_DT0_SHIFT		2
139 #define QB_VDQCR_VERB_DT1_SHIFT		3
140 #define QB_VDQCR_VERB_RLS_SHIFT		4
141 #define QB_VDQCR_VERB_WAE_SHIFT		5
142 #define QB_VDQCR_VERB_RAD_SHIFT		6
143 
144 /* Maximum timeout period for the DQRR interrupt. */
145 #define DQRR_MAX_ITP			4096u
146 #define DQRR_PI_MASK			0x0Fu
147 
148 /* Release Array Allocation register helpers. */
149 #define RAR_IDX(rar)			((rar) & 0x7u)
150 #define RAR_VB(rar)			((rar) & 0x80u)
151 #define RAR_SUCCESS(rar)		((rar) & 0x100u)
152 
153 MALLOC_DEFINE(M_DPAA2_SWP, "dpaa2_swp", "DPAA2 QBMan Software Portal");
154 
155 enum qbman_sdqcr_dct {
156 	qbman_sdqcr_dct_null = 0,
157 	qbman_sdqcr_dct_prio_ics,
158 	qbman_sdqcr_dct_active_ics,
159 	qbman_sdqcr_dct_active
160 };
161 
162 enum qbman_sdqcr_fc {
163 	qbman_sdqcr_fc_one = 0,
164 	qbman_sdqcr_fc_up_to_3 = 1
165 };
166 
167 /* Routines to execute software portal commands. */
168 static int dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *,
169     struct dpaa2_swp_cmd *, struct dpaa2_swp_rsp *, uint8_t);
170 static int dpaa2_swp_exec_br_command(struct dpaa2_swp *, struct dpaa2_swp_cmd *,
171     uint32_t);
172 static int dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *,
173     struct dpaa2_swp_cmd *);
174 
175 /* Management Commands helpers. */
176 static int dpaa2_swp_send_mgmt_command(struct dpaa2_swp *,
177     struct dpaa2_swp_cmd *, uint8_t);
178 static int dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *,
179     struct dpaa2_swp_rsp *);
180 
181 /* Helper subroutines. */
182 static int dpaa2_swp_cyc_diff(uint8_t, uint8_t, uint8_t);
183 
184 int
185 dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc,
186     uint16_t flags)
187 {
188 	struct dpaa2_swp *p;
189 	uint32_t reg, mask_size, eqcr_pi; /* EQCR producer index */
190 
191 	if (!swp || !desc)
192 		return (DPAA2_SWP_STAT_EINVAL);
193 
194 	p = malloc(sizeof(struct dpaa2_swp), M_DPAA2_SWP,
195 	    flags & DPAA2_SWP_NOWAIT_ALLOC
196 	    ? (M_NOWAIT | M_ZERO)
197 	    : (M_WAITOK | M_ZERO));
198 	if (!p)
199 		return (DPAA2_SWP_STAT_NO_MEMORY);
200 
201 	mtx_init(&p->lock, "swp_sleep_lock", NULL, MTX_DEF);
202 
203 	p->cfg.mem_backed = false;
204 	p->cfg.writes_cinh = true;
205 
206 	p->desc = desc;
207 	p->flags = flags;
208 	p->mc.valid_bit = DPAA2_SWP_VALID_BIT;
209 	p->mr.valid_bit = DPAA2_SWP_VALID_BIT;
210 
211 	/* FIXME: Memory-backed mode doesn't work now. Why? */
212 	p->cena_res = desc->cena_res;
213 	p->cena_map = desc->cena_map;
214 	p->cinh_res = desc->cinh_res;
215 	p->cinh_map = desc->cinh_map;
216 
217 	/* Static Dequeue Command Register configuration. */
218 	p->sdq = 0;
219 	p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
220 	p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
221 	p->sdq |= DPAA2_SWP_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
222 
223 	/* Volatile Dequeue Command configuration. */
224 	p->vdq.valid_bit = DPAA2_SWP_VALID_BIT;
225 
226 	/* Dequeue Response Ring configuration */
227 	p->dqrr.next_idx = 0;
228 	p->dqrr.valid_bit = DPAA2_SWP_VALID_BIT;
229 	if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_4100) {
230 		p->dqrr.ring_size = 4;
231 		p->dqrr.reset_bug = 1;
232 	} else {
233 		p->dqrr.ring_size = 8;
234 		p->dqrr.reset_bug = 0;
235 	}
236 
237 	if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_5000) {
238 		reg = dpaa2_swp_set_cfg(
239 		    p->dqrr.ring_size, /* max. entries QMan writes to DQRR */
240 		    1, /* writes enabled in the CINH memory only */
241 		    0, /* EQCR_CI stashing threshold */
242 		    3, /* RPM: RCR in array mode */
243 		    2, /* DCM: Discrete consumption ack */
244 		    2, /* EPM: EQCR in ring mode (FIFO) */
245 		    1, /* mem stashing drop enable enable */
246 		    1, /* mem stashing priority enable */
247 		    1, /* mem stashing enable */
248 		    1, /* dequeue stashing priority enable */
249 		    0, /* dequeue stashing enable enable */
250 		    0  /* EQCR_CI stashing priority enable */
251 		);
252 		reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */
253 	} else {
254 		bus_set_region_4(p->cena_map, 0, 0,
255 		    rman_get_size(p->cena_res) / 4);
256 
257 		reg = dpaa2_swp_set_cfg(
258 		    p->dqrr.ring_size, /* max. entries QMan writes to DQRR */					/* DQRR_MF */
259 		    1, /* writes enabled in the CINH memory only */						/* WN */
260 		    0, /* EQCR_CI stashing is disabled */							/* EST */
261 		    3, /* RPM: RCR in array mode */								/* RPM */
262 		    2, /* DCM: Discrete consumption ack */							/* DCM */
263 		    2, /* EPM: EQCR in ring mode (FIFO) */							/* EPM */
264 		    1, /* Dequeued frame data, annotation, and FQ context stashing drop enable */		/* SD */
265 		    1, /* Dequeued frame data, annotation, and FQ context stashing priority */			/* SP */
266 		    1, /* Dequeued frame data, annotation, and FQ context stashing enable */			/* SE */
267 		    1, /* Dequeue response ring (DQRR) entry stashing priority */				/* DP */
268 		    0, /* Dequeue response ring (DQRR) entry, or cacheable portal area, stashing enable. */	/* DE */
269 		    0  /* EQCR_CI stashing priority */								/* EP */
270 		);
271 		/* TODO: Switch to memory-backed mode. */
272 		reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */
273 	}
274 	dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_CFG, reg);
275 	reg = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_CFG);
276 	if (!reg) {
277 		free(p, M_DPAA2_SWP);
278 		return (DPAA2_SWP_STAT_PORTAL_DISABLED);
279 	}
280 
281 	/*
282 	 * Static Dequeue Command Register needs to be initialized to 0 when no
283 	 * channels are being dequeued from or else the QMan HW will indicate an
284 	 * error. The values that were calculated above will be applied when
285 	 * dequeues from a specific channel are enabled.
286 	 */
287 	dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_SDQCR, 0);
288 
289 	p->eqcr.pi_ring_size = 8;
290 	/* if ((desc->swp_version & DPAA2_SWP_REV_MASK) >= DPAA2_SWP_REV_5000) */
291 	/* 	p->eqcr.pi_ring_size = 32; */
292 
293 	for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
294 		p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
295 
296 	eqcr_pi = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_PI);
297 	p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
298 	p->eqcr.pi_vb = eqcr_pi & DPAA2_SWP_VALID_BIT;
299 	p->eqcr.ci = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_CI)
300 	    & p->eqcr.pi_ci_mask;
301 	p->eqcr.available = p->eqcr.pi_ring_size;
302 
303 	/* Initialize the portal with an IRQ threshold and timeout of 0us. */
304 	dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 0);
305 
306 	*swp = p;
307 
308 	return (0);
309 }
310 
311 void
312 dpaa2_swp_free_portal(struct dpaa2_swp *swp)
313 {
314 	uint16_t flags;
315 
316 	KASSERT(swp != NULL, ("%s: swp is NULL", __func__));
317 
318 	DPAA2_SWP_LOCK(swp, &flags);
319 	swp->flags |= DPAA2_SWP_DESTROYED;
320 	DPAA2_SWP_UNLOCK(swp);
321 
322 	/* Let threads stop using this portal. */
323 	DELAY(DPAA2_SWP_TIMEOUT);
324 
325 	mtx_destroy(&swp->lock);
326 	free(swp, M_DPAA2_SWP);
327 }
328 
329 uint32_t
330 dpaa2_swp_set_cfg(uint8_t max_fill, uint8_t wn, uint8_t est, uint8_t rpm,
331     uint8_t dcm, uint8_t epm, int sd, int sp, int se, int dp, int de, int ep)
332 {
333 	return (
334 	    max_fill	<< DPAA2_SWP_CFG_DQRR_MF_SHIFT |
335 	    est		<< DPAA2_SWP_CFG_EST_SHIFT |
336 	    wn		<< DPAA2_SWP_CFG_WN_SHIFT |
337 	    rpm		<< DPAA2_SWP_CFG_RPM_SHIFT |
338 	    dcm		<< DPAA2_SWP_CFG_DCM_SHIFT |
339 	    epm		<< DPAA2_SWP_CFG_EPM_SHIFT |
340 	    sd		<< DPAA2_SWP_CFG_SD_SHIFT |
341 	    sp		<< DPAA2_SWP_CFG_SP_SHIFT |
342 	    se		<< DPAA2_SWP_CFG_SE_SHIFT |
343 	    dp		<< DPAA2_SWP_CFG_DP_SHIFT |
344 	    de		<< DPAA2_SWP_CFG_DE_SHIFT |
345 	    ep		<< DPAA2_SWP_CFG_EP_SHIFT
346 	);
347 }
348 
349 /* Read/write registers of a software portal. */
350 
351 void
352 dpaa2_swp_write_reg(struct dpaa2_swp *swp, uint32_t o, uint32_t v)
353 {
354 	bus_write_4(swp->cinh_map, o, v);
355 }
356 
357 uint32_t
358 dpaa2_swp_read_reg(struct dpaa2_swp *swp, uint32_t o)
359 {
360 	return (bus_read_4(swp->cinh_map, o));
361 }
362 
363 /* Helper routines. */
364 
365 /**
366  * @brief Set enqueue descriptor without Order Point Record ID.
367  *
368  * ed:		Enqueue descriptor.
369  * resp_always:	Enqueue with response always (1); FD from a rejected enqueue
370  *		will be returned on a FQ (0).
371  */
372 void
373 dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc *ed, bool resp_always)
374 {
375 	ed->verb &= ~(1 << ENQ_CMD_ORP_ENABLE_SHIFT);
376 	if (resp_always)
377 		ed->verb |= ENQ_CMD_RESPONSE_ALWAYS;
378 	else
379 		ed->verb |= ENQ_CMD_REJECTS_TO_FQ;
380 }
381 
382 /**
383  * @brief Set FQ of the enqueue descriptor.
384  */
385 void
386 dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc *ed, uint32_t fqid)
387 {
388 	ed->verb &= ~(1 << ENQ_CMD_TARGET_TYPE_SHIFT);
389 	ed->tgtid = fqid;
390 }
391 
392 /**
393  * @brief Enable interrupts for a software portal.
394  */
395 void
396 dpaa2_swp_set_intr_trigger(struct dpaa2_swp *swp, uint32_t mask)
397 {
398 	if (swp != NULL)
399 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_IER, mask);
400 }
401 
402 /**
403  * @brief Return the value in the SWP_IER register.
404  */
405 uint32_t
406 dpaa2_swp_get_intr_trigger(struct dpaa2_swp *swp)
407 {
408 	if (swp != NULL)
409 		return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_IER);
410 	return (0);
411 }
412 
413 /**
414  * @brief Return the value in the SWP_ISR register.
415  */
416 uint32_t
417 dpaa2_swp_read_intr_status(struct dpaa2_swp *swp)
418 {
419 	if (swp != NULL)
420 		return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_ISR);
421 	return (0);
422 }
423 
424 /**
425  * @brief Clear SWP_ISR register according to the given mask.
426  */
427 void
428 dpaa2_swp_clear_intr_status(struct dpaa2_swp *swp, uint32_t mask)
429 {
430 	if (swp != NULL)
431 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ISR, mask);
432 }
433 
434 /**
435  * @brief Enable or disable push dequeue.
436  *
437  * swp:		the software portal object
438  * chan_idx:	the channel index (0 to 15)
439  * en:		enable or disable push dequeue
440  */
441 void
442 dpaa2_swp_set_push_dequeue(struct dpaa2_swp *swp, uint8_t chan_idx, bool en)
443 {
444 	uint16_t dqsrc;
445 
446 	if (swp != NULL) {
447 		if (chan_idx > 15u) {
448 			device_printf(swp->desc->dpio_dev, "channel index "
449 			    "should be <= 15: chan_idx=%d\n", chan_idx);
450 			return;
451 		}
452 
453 		if (en)
454 			swp->sdq |= 1 << chan_idx;
455 		else
456 			swp->sdq &= ~(1 << chan_idx);
457 		/*
458 		 * Read make the complete src map. If no channels are enabled
459 		 * the SDQCR must be 0 or else QMan will assert errors.
460 		 */
461 		dqsrc = (swp->sdq >> DPAA2_SDQCR_SRC_SHIFT) &
462 		    DPAA2_SDQCR_SRC_MASK;
463 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_SDQCR, dqsrc != 0
464 		    ? swp->sdq : 0);
465 	}
466 }
467 
468 /**
469  * @brief Set new IRQ coalescing values.
470  *
471  * swp:		The software portal object.
472  * threshold:	Threshold for DQRR interrupt generation. The DQRR interrupt
473  *		asserts when the ring contains greater than "threshold" entries.
474  * holdoff:	DQRR interrupt holdoff (timeout) period in us.
475  */
476 int dpaa2_swp_set_irq_coalescing(struct dpaa2_swp *swp, uint32_t threshold,
477     uint32_t holdoff)
478 {
479 	uint32_t itp; /* Interrupt Timeout Period */
480 
481 	if (swp == NULL)
482 		return (EINVAL);
483 
484 	/*
485 	 * Convert "holdoff" value from us to 256 QBMAN clock cycles
486 	 * increments. This depends on the QBMAN internal frequency.
487 	 */
488 	itp = (holdoff * 1000u) / swp->desc->swp_cycles_ratio;
489 	if (itp > DQRR_MAX_ITP)
490 		itp = DQRR_MAX_ITP;
491 	if (threshold >= swp->dqrr.ring_size)
492 		threshold = swp->dqrr.ring_size - 1;
493 
494 	swp->dqrr.irq_threshold = threshold;
495 	swp->dqrr.irq_itp = itp;
496 
497 	dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_DQRR_ITR, threshold);
498 	dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ITPR, itp);
499 
500 	return (0);
501 }
502 
503 /*
504  * Software portal commands.
505  */
506 
507 /**
508  * @brief Configure the channel data availability notification (CDAN)
509  * in a particular WQ channel.
510  */
511 int
512 dpaa2_swp_conf_wq_channel(struct dpaa2_swp *swp, uint16_t chan_id,
513     uint8_t we_mask, bool cdan_en, uint64_t ctx)
514 {
515 	/* NOTE: 64 bytes command. */
516 	struct __packed {
517 		uint8_t		verb;
518 		uint8_t		result; /* in response only! */
519 		uint16_t	chan_id;
520 		uint8_t		we;
521 		uint8_t		ctrl;
522 		uint16_t	_reserved2;
523 		uint64_t	ctx;
524 		uint8_t		_reserved3[48];
525 	} cmd = {0};
526 	struct __packed {
527 		uint8_t		verb;
528 		uint8_t		result;
529 		uint16_t	chan_id;
530 		uint8_t		_reserved[60];
531 	} rsp;
532 	int error;
533 
534 	if (swp == NULL)
535 		return (EINVAL);
536 
537 	cmd.chan_id = chan_id;
538 	cmd.we = we_mask;
539 	cmd.ctrl = cdan_en ? 1u : 0u;
540 	cmd.ctx = ctx;
541 
542 	error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd,
543 	    (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_WQCHAN_CONFIGURE);
544 	if (error)
545 		return (error);
546 
547 	if (rsp.result != QBMAN_CMD_RC_OK) {
548 		device_printf(swp->desc->dpio_dev, "WQ channel configuration "
549 		    "error: channel_id=%d, result=0x%02x\n", chan_id,
550 		    rsp.result);
551 		return (EIO);
552 	}
553 
554 	return (0);
555 }
556 
557 /**
558  * @brief Query current configuration/state of the buffer pool.
559  */
560 int
561 dpaa2_swp_query_bp(struct dpaa2_swp *swp, uint16_t bpid,
562     struct dpaa2_bp_conf *conf)
563 {
564 	/* NOTE: 64 bytes command. */
565 	struct __packed {
566 		uint8_t		verb;
567 		uint8_t		_reserved1;
568 		uint16_t	bpid;
569 		uint8_t		_reserved2[60];
570 	} cmd = {0};
571 	struct __packed {
572 		uint8_t		verb;
573 		uint8_t		result;
574 		uint32_t	_reserved1;
575 		uint8_t		bdi;
576 		uint8_t		state;
577 		uint32_t	fill;
578 		/* TODO: Support the other fields as well. */
579 		uint8_t		_reserved2[52];
580 	} rsp;
581 	int error;
582 
583 	if (swp == NULL || conf == NULL)
584 		return (EINVAL);
585 
586 	cmd.bpid = bpid;
587 
588 	error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd,
589 	    (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_BP_QUERY);
590 	if (error)
591 		return (error);
592 
593 	if (rsp.result != QBMAN_CMD_RC_OK) {
594 		device_printf(swp->desc->dpio_dev, "BP query error: bpid=%d, "
595 		    "result=0x%02x\n", bpid, rsp.result);
596 		return (EIO);
597 	}
598 
599 	conf->bdi = rsp.bdi;
600 	conf->state = rsp.state;
601 	conf->free_bufn = rsp.fill;
602 
603 	return (0);
604 }
605 
606 int
607 dpaa2_swp_release_bufs(struct dpaa2_swp *swp, uint16_t bpid, bus_addr_t *buf,
608     uint32_t buf_num)
609 {
610 	/* NOTE: 64 bytes command. */
611 	struct __packed {
612 		uint8_t		verb;
613 		uint8_t		_reserved1;
614 		uint16_t	bpid;
615 		uint32_t	_reserved2;
616 		uint64_t	buf[DPAA2_SWP_BUFS_PER_CMD];
617 	} cmd = {0};
618 	int error;
619 
620 	if (swp == NULL || buf == NULL || buf_num == 0u ||
621 	    buf_num > DPAA2_SWP_BUFS_PER_CMD)
622 		return (EINVAL);
623 
624 	for (uint32_t i = 0; i < buf_num; i++)
625 		cmd.buf[i] = buf[i];
626 	cmd.bpid = bpid;
627 	cmd.verb |= 1 << 5; /* Switch release buffer command to valid. */
628 
629 	error = dpaa2_swp_exec_br_command(swp, (struct dpaa2_swp_cmd *) &cmd,
630 	    buf_num);
631 	if (error) {
632 		device_printf(swp->desc->dpio_dev, "buffers release command "
633 		    "failed\n");
634 		return (error);
635 	}
636 
637 	return (0);
638 }
639 
640 int
641 dpaa2_swp_dqrr_next_locked(struct dpaa2_swp *swp, struct dpaa2_dq *dq,
642     uint32_t *idx)
643 {
644 	struct resource_map *map = swp->cinh_map;
645 	struct dpaa2_swp_rsp *rsp = (struct dpaa2_swp_rsp *) dq;
646 	uint32_t verb, pi; /* producer index */
647 	uint32_t offset = swp->cfg.mem_backed
648 	    ? DPAA2_SWP_CENA_DQRR_MEM(swp->dqrr.next_idx)
649 	    : DPAA2_SWP_CENA_DQRR(swp->dqrr.next_idx);
650 
651 	if (swp == NULL || dq == NULL)
652 		return (EINVAL);
653 
654 	/*
655 	 * Before using valid-bit to detect if something is there, we have to
656 	 * handle the case of the DQRR reset bug...
657 	 */
658 	if (swp->dqrr.reset_bug) {
659 		/*
660 		 * We pick up new entries by cache-inhibited producer index,
661 		 * which means that a non-coherent mapping would require us to
662 		 * invalidate and read *only* once that PI has indicated that
663 		 * there's an entry here. The first trip around the DQRR ring
664 		 * will be much less efficient than all subsequent trips around
665 		 * it...
666 		 */
667 		pi = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_DQPI) & DQRR_PI_MASK;
668 
669 		/* There are new entries if pi != next_idx */
670 		if (pi == swp->dqrr.next_idx)
671 			return (ENOENT);
672 
673 		/*
674 		 * If next_idx is/was the last ring index, and 'pi' is
675 		 * different, we can disable the workaround as all the ring
676 		 * entries have now been DMA'd to so valid-bit checking is
677 		 * repaired.
678 		 *
679 		 * NOTE: This logic needs to be based on next_idx (which
680 		 *	 increments one at a time), rather than on pi (which
681 		 *	 can burst and wrap-around between our snapshots of it).
682 		 */
683 		if (swp->dqrr.next_idx == (swp->dqrr.ring_size - 1))
684 			swp->dqrr.reset_bug = 0;
685 	}
686 
687 	verb = bus_read_4(map, offset);
688 	if ((verb & DPAA2_SWP_VALID_BIT) != swp->dqrr.valid_bit)
689 		return (ENOENT);
690 
691 	/* Read dequeue response message. */
692 	for (int i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++)
693 		rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t));
694 
695 	/* Return index of the current entry (if requested). */
696 	if (idx != NULL)
697 		*idx = swp->dqrr.next_idx;
698 
699 	/*
700 	 * There's something there. Move "next_idx" attention to the next ring
701 	 * entry before returning what we found.
702 	 */
703 	swp->dqrr.next_idx++;
704 	swp->dqrr.next_idx &= swp->dqrr.ring_size - 1; /* wrap around */
705 	if (swp->dqrr.next_idx == 0u)
706 		swp->dqrr.valid_bit ^= DPAA2_SWP_VALID_BIT;
707 
708 	return (0);
709 }
710 
711 int
712 dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf,
713     uint32_t frames_n)
714 {
715 	/* NOTE: 64 bytes command. */
716 	struct __packed {
717 		uint8_t		verb;
718 		uint8_t		numf;
719 		uint8_t		tok;
720 		uint8_t		_reserved;
721 		uint32_t	dq_src;
722 		uint64_t	rsp_addr;
723 		uint64_t	_reserved1[6];
724 	} cmd = {0};
725 	struct dpaa2_dq *msg;
726 	uint16_t flags;
727 	int i, error;
728 
729 	KASSERT(swp != NULL, ("%s: swp is NULL", __func__));
730 	KASSERT(frames_n != 0u, ("%s: cannot pull zero frames", __func__));
731 	KASSERT(frames_n <= 16u, ("%s: too much frames to pull", __func__));
732 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage "
733 	    "buffer", __func__));
734 
735 	cmd.numf = frames_n - 1;
736 	cmd.tok = DPAA2_SWP_VDQCR_TOKEN;
737 	cmd.dq_src = chan_id;
738 	cmd.rsp_addr = (uint64_t) buf->store.paddr;
739 
740 	/* Dequeue command type */
741 	cmd.verb &= ~(1 << QB_VDQCR_VERB_DCT0_SHIFT);
742 	cmd.verb |=  (1 << QB_VDQCR_VERB_DCT1_SHIFT);
743 	/* Dequeue from a specific software portal channel (ID's in DQ_SRC). */
744 	cmd.verb &= ~(1 << QB_VDQCR_VERB_DT0_SHIFT);
745 	cmd.verb &= ~(1 << QB_VDQCR_VERB_DT1_SHIFT);
746 	/* Write the response to this command into memory (at the RSP_ADDR). */
747 	cmd.verb |=  (1 << QB_VDQCR_VERB_RLS_SHIFT);
748 	/* Response writes won't attempt to allocate into a cache. */
749 	cmd.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
750 	/* Allow the FQ to remain active in the portal after dequeue. */
751 	cmd.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
752 
753 	DPAA2_SWP_LOCK(swp, &flags);
754 	if (flags & DPAA2_SWP_DESTROYED) {
755 		/* Terminate operation if portal is destroyed. */
756 		DPAA2_SWP_UNLOCK(swp);
757 		return (ENOENT);
758 	}
759 
760 	error = dpaa2_swp_exec_vdc_command_locked(swp,
761 	    (struct dpaa2_swp_cmd *) &cmd);
762 	if (error != 0) {
763 		DPAA2_SWP_UNLOCK(swp);
764 		return (error);
765 	}
766 
767 	/* Let's sync before reading VDQ response from QBMan. */
768 	bus_dmamap_sync(buf->store.dmat, buf->store.dmap, BUS_DMASYNC_POSTREAD);
769 
770 	/* Read VDQ response from QBMan. */
771 	msg = (struct dpaa2_dq *) buf->store.vaddr;
772 	for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
773 		if ((msg->fdr.desc.stat & DPAA2_DQ_STAT_VOLATILE) &&
774 		    (msg->fdr.desc.tok == DPAA2_SWP_VDQCR_TOKEN)) {
775 			/* Reset token. */
776 			msg->fdr.desc.tok = 0;
777 			break;
778 		}
779 		DELAY(CMD_SPIN_TIMEOUT);
780 	}
781 	DPAA2_SWP_UNLOCK(swp);
782 
783 	/* Return an error on expired timeout. */
784 	return (i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0);
785 }
786 
787 /**
788  * @brief Issue a command to enqueue a frame using one enqueue descriptor.
789  *
790  * swp:		Software portal used to send this command to.
791  * ed:		Enqueue command descriptor.
792  * fd:		Frame descriptor to enqueue.
793  */
794 int
795 dpaa2_swp_enq(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
796     struct dpaa2_fd *fd)
797 {
798 	uint32_t flags = 0;
799 	int rc = dpaa2_swp_enq_mult(swp, ed, fd, &flags, 1);
800 
801 	return (rc >= 0 ? 0 : EBUSY);
802 }
803 
804 /**
805  * @brief Issue a command to enqueue frames using one enqueue descriptor.
806  *
807  * swp:		Software portal used to send this command to.
808  * ed:		Enqueue command descriptor.
809  * fd:		Frame descriptor to enqueue.
810  * flags:	Table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL.
811  * frames_n:	Number of FDs to enqueue.
812  *
813  * NOTE: Enqueue command (64 bytes): 32 (eq. descriptor) + 32 (frame descriptor).
814  */
815 int
816 dpaa2_swp_enq_mult(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
817     struct dpaa2_fd *fd, uint32_t *flags, int frames_n)
818 {
819 	const uint8_t  *ed_pdat8 =  (const uint8_t *) ed;
820 	const uint32_t *ed_pdat32 = (const uint32_t *) ed;
821 	const uint64_t *ed_pdat64 = (const uint64_t *) ed;
822 	const uint64_t *fd_pdat64 = (const uint64_t *) fd;
823 	struct resource_map *map;
824 	uint32_t eqcr_ci, eqcr_pi; /* EQCR consumer/producer index */
825 	uint32_t half_mask, full_mask, val, ci_offset;
826 	uint16_t swp_flags;
827 	int num_enq = 0;
828 
829 	if (swp == NULL || ed == NULL || fd == NULL || flags == NULL ||
830 	    frames_n == 0)
831 		return (EINVAL);
832 
833 	DPAA2_SWP_LOCK(swp, &swp_flags);
834 	if (swp_flags & DPAA2_SWP_DESTROYED) {
835 		/* Terminate operation if portal is destroyed. */
836 		DPAA2_SWP_UNLOCK(swp);
837 		return (ENOENT);
838 	}
839 
840 	map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
841 	ci_offset = swp->cfg.mem_backed
842 	    ? DPAA2_SWP_CENA_EQCR_CI_MEMBACK
843 	    : DPAA2_SWP_CENA_EQCR_CI;
844 
845 	half_mask = swp->eqcr.pi_ci_mask >> 1;
846 	full_mask = swp->eqcr.pi_ci_mask;
847 
848 	if (swp->eqcr.available == 0) {
849 		val = dpaa2_swp_read_reg(swp, ci_offset);
850 		eqcr_ci = swp->eqcr.ci;
851 		swp->eqcr.ci = val & full_mask;
852 
853 		swp->eqcr.available = dpaa2_swp_cyc_diff(swp->eqcr.pi_ring_size,
854 		    eqcr_ci, swp->eqcr.ci);
855 
856 		if (swp->eqcr.available == 0) {
857 			DPAA2_SWP_UNLOCK(swp);
858 			return (0);
859 		}
860 	}
861 
862 	eqcr_pi = swp->eqcr.pi;
863 	num_enq = swp->eqcr.available < frames_n
864 	    ? swp->eqcr.available : frames_n;
865 	swp->eqcr.available -= num_enq;
866 
867 	KASSERT(num_enq >= 0 && num_enq <= swp->eqcr.pi_ring_size,
868 	    ("%s: unexpected num_enq=%d", __func__, num_enq));
869 	KASSERT(swp->eqcr.available >= 0 &&
870 	    swp->eqcr.available <= swp->eqcr.pi_ring_size,
871 	    ("%s: unexpected eqcr.available=%d", __func__, swp->eqcr.available));
872 
873 	/* Fill in the EQCR ring. */
874 	for (int i = 0; i < num_enq; i++) {
875 		/* Write enq. desc. without the VERB, DCA, SEQNUM and OPRID. */
876 		for (int j = 1; j <= 3; j++)
877 			bus_write_8(map,
878 			    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
879 			    sizeof(uint64_t) * j, ed_pdat64[j]);
880 		/* Write OPRID. */
881 		bus_write_4(map,
882 		    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + sizeof(uint32_t),
883 		    ed_pdat32[1]);
884 		/* Write DCA and SEQNUM without VERB byte. */
885 		for (int j = 1; j <= 3; j++)
886 			bus_write_1(map,
887 			    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
888 			    sizeof(uint8_t) * j, ed_pdat8[j]);
889 
890 		/* Write frame descriptor. */
891 		for (int j = 0; j <= 3; j++)
892 			bus_write_8(map,
893 			    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
894 			    ENQ_DESC_FD_OFFSET +
895 			    sizeof(uint64_t) * j, fd_pdat64[j]);
896 		eqcr_pi++;
897 	}
898 
899 	wmb();
900 
901 	/* Write the VERB byte of enqueue descriptor. */
902 	eqcr_pi = swp->eqcr.pi;
903 	for (int i = 0; i < num_enq; i++) {
904 		bus_write_1(map,
905 		    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask),
906 		    ed_pdat8[0] | swp->eqcr.pi_vb);
907 
908 		if (flags && (flags[i] & ENQ_FLAG_DCA)) {
909 			/* Update DCA byte. */
910 			bus_write_1(map,
911 			    DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + 1,
912 			    (1 << ENQ_CMD_DCA_EN_SHIFT) |
913 			    (flags[i] & ENQ_DCA_IDXMASK));
914 		}
915 		eqcr_pi++;
916 		if (!(eqcr_pi & half_mask))
917 			swp->eqcr.pi_vb ^= DPAA2_SWP_VALID_BIT;
918 	}
919 	swp->eqcr.pi = eqcr_pi & full_mask;
920 
921 	DPAA2_SWP_UNLOCK(swp);
922 
923 	return (num_enq);
924 }
925 
926 static int
927 dpaa2_swp_cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last)
928 {
929 	/* 'first' is included, 'last' is excluded */
930 	return ((first <= last)
931 	    ? (last - first) : ((2 * ringsize) - (first - last)));
932 }
933 
934 /**
935  * @brief Execute Buffer Release Command (BRC).
936  */
937 static int
938 dpaa2_swp_exec_br_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
939     uint32_t buf_num)
940 {
941 	struct __packed with_verb {
942 		uint8_t	verb;
943 		uint8_t	_reserved[63];
944 	} *c;
945 	const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params;
946 	const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params;
947 	struct resource_map *map;
948 	uint32_t offset, rar; /* Release Array Allocation register */
949 	uint16_t flags;
950 
951 	if (!swp || !cmd)
952 		return (EINVAL);
953 
954 	DPAA2_SWP_LOCK(swp, &flags);
955 	if (flags & DPAA2_SWP_DESTROYED) {
956 		/* Terminate operation if portal is destroyed. */
957 		DPAA2_SWP_UNLOCK(swp);
958 		return (ENOENT);
959 	}
960 
961 	rar = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_RAR);
962 	if (!RAR_SUCCESS(rar)) {
963 		DPAA2_SWP_UNLOCK(swp);
964 		return (EBUSY);
965 	}
966 
967 	map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
968 	offset = swp->cfg.mem_backed
969 	    ? DPAA2_SWP_CENA_RCR_MEM(RAR_IDX(rar))
970 	    : DPAA2_SWP_CENA_RCR(RAR_IDX(rar));
971 	c = (struct with_verb *) cmd;
972 
973 	/* Write command bytes (without VERB byte). */
974 	for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
975 		bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
976 	bus_write_4(map, offset + 4, cmd_pdat32[1]);
977 	for (uint32_t i = 1; i <= 3; i++)
978 		bus_write_1(map, offset + i, cmd_pdat8[i]);
979 
980 	/* Write VERB byte and trigger command execution. */
981 	if (swp->cfg.mem_backed) {
982 		bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num);
983 		wmb();
984 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_RCR_AM_RT +
985 		    RAR_IDX(rar) * 4, DPAA2_SWP_RT_MODE);
986 	} else {
987 		wmb();
988 		bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num);
989 	}
990 
991 	DPAA2_SWP_UNLOCK(swp);
992 
993 	return (0);
994 }
995 
996 /**
997  * @brief Execute Volatile Dequeue Command (VDC).
998  *
999  * This command will be executed by QBMan only once in order to deliver requested
1000  * number of frames (1-16 or 1-32 depending on QBMan version) to the driver via
1001  * DQRR or arbitrary DMA-mapped memory.
1002  *
1003  * NOTE: There is a counterpart to the volatile dequeue command called static
1004  *	 dequeue command (SDQC) which is executed periodically all the time the
1005  *	 command is present in the SDQCR register.
1006  */
1007 static int
1008 dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *swp,
1009     struct dpaa2_swp_cmd *cmd)
1010 {
1011 	struct __packed with_verb {
1012 		uint8_t	verb;
1013 		uint8_t	_reserved[63];
1014 	} *c;
1015 	const uint8_t *p8 = (const uint8_t *) cmd->params;
1016 	const uint32_t *p32 = (const uint32_t *) cmd->params;
1017 	struct resource_map *map;
1018 	uint32_t offset;
1019 
1020 	map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
1021 	offset = swp->cfg.mem_backed
1022 	    ? DPAA2_SWP_CENA_VDQCR_MEM : DPAA2_SWP_CENA_VDQCR;
1023 	c = (struct with_verb *) cmd;
1024 
1025 	/* Write command bytes (without VERB byte). */
1026 	for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
1027 		bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
1028 	bus_write_4(map, offset + 4, p32[1]);
1029 	for (uint32_t i = 1; i <= 3; i++)
1030 		bus_write_1(map, offset + i, p8[i]);
1031 
1032 	/* Write VERB byte and trigger command execution. */
1033 	if (swp->cfg.mem_backed) {
1034 		bus_write_1(map, offset, c->verb | swp->vdq.valid_bit);
1035 		swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT;
1036 		wmb();
1037 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_VDQCR_RT,
1038 		    DPAA2_SWP_RT_MODE);
1039 	} else {
1040 		wmb();
1041 		bus_write_1(map, offset, c->verb | swp->vdq.valid_bit);
1042 		swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT;
1043 	}
1044 
1045 	return (0);
1046 }
1047 
1048 /**
1049  * @brief Execute a QBMan management command.
1050  */
1051 static int
1052 dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
1053     struct dpaa2_swp_rsp *rsp, uint8_t cmdid)
1054 {
1055 #if (defined(_KERNEL) && defined(INVARIANTS))
1056 	struct __packed with_verb {
1057 		uint8_t	verb;
1058 		uint8_t	_reserved[63];
1059 	} *r;
1060 #endif
1061 	uint16_t flags;
1062 	int error;
1063 
1064 	if (swp == NULL || cmd == NULL || rsp == NULL)
1065 		return (EINVAL);
1066 
1067 	DPAA2_SWP_LOCK(swp, &flags);
1068 	if (flags & DPAA2_SWP_DESTROYED) {
1069 		/* Terminate operation if portal is destroyed. */
1070 		DPAA2_SWP_UNLOCK(swp);
1071 		return (ENOENT);
1072 	}
1073 
1074 	/*
1075 	 * Send a command to QBMan using Management Command register and wait
1076 	 * for response from the Management Response registers.
1077 	 */
1078 	dpaa2_swp_send_mgmt_command(swp, cmd, cmdid);
1079 	error = dpaa2_swp_wait_for_mgmt_response(swp, rsp);
1080 	if (error) {
1081 		DPAA2_SWP_UNLOCK(swp);
1082 		return (error);
1083 	}
1084 	DPAA2_SWP_UNLOCK(swp);
1085 
1086 #if (defined(_KERNEL) && defined(INVARIANTS))
1087 	r = (struct with_verb *) rsp;
1088 	KASSERT((r->verb & CMD_VERB_MASK) == cmdid,
1089 	    ("wrong VERB byte in response: resp=0x%02x, expected=0x%02x",
1090 	    r->verb, cmdid));
1091 #endif
1092 
1093 	return (0);
1094 }
1095 
1096 static int
1097 dpaa2_swp_send_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
1098     uint8_t cmdid)
1099 {
1100 	const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params;
1101 	const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params;
1102 	struct resource_map *map;
1103 	uint32_t offset;
1104 
1105 	map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
1106 	offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_CR_MEM : DPAA2_SWP_CENA_CR;
1107 
1108 	/* Write command bytes (without VERB byte). */
1109 	for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
1110 		bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
1111 	bus_write_4(map, offset + 4, cmd_pdat32[1]);
1112 	for (uint32_t i = 1; i <= 3; i++)
1113 		bus_write_1(map, offset + i, cmd_pdat8[i]);
1114 
1115 	/* Write VERB byte and trigger command execution. */
1116 	if (swp->cfg.mem_backed) {
1117 		bus_write_1(map, offset, cmdid | swp->mr.valid_bit);
1118 		wmb();
1119 		dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_CR_RT,
1120 		    DPAA2_SWP_RT_MODE);
1121 	} else {
1122 		wmb();
1123 		bus_write_1(map, offset, cmdid | swp->mc.valid_bit);
1124 	}
1125 
1126 	return (0);
1127 }
1128 
1129 static int
1130 dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *swp, struct dpaa2_swp_rsp *rsp)
1131 {
1132 	struct resource_map *map = swp->cfg.mem_backed
1133 	    ? swp->cena_map : swp->cinh_map;
1134 	/* Management command response to be read from the only RR or RR0/RR1. */
1135 	const uint32_t offset = swp->cfg.mem_backed
1136 	    ? DPAA2_SWP_CENA_RR_MEM
1137 	    : DPAA2_SWP_CENA_RR(swp->mc.valid_bit);
1138 	uint32_t i, verb, ret;
1139 	int rc;
1140 
1141 	/* Wait for a command response from QBMan. */
1142 	for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
1143 		if (swp->cfg.mem_backed) {
1144 			verb = (uint32_t) (bus_read_4(map, offset) & 0xFFu);
1145 			if (swp->mr.valid_bit != (verb & DPAA2_SWP_VALID_BIT))
1146 				goto wait;
1147 			if (!(verb & ~DPAA2_SWP_VALID_BIT))
1148 				goto wait;
1149 			swp->mr.valid_bit ^= DPAA2_SWP_VALID_BIT;
1150 		} else {
1151 			ret = bus_read_4(map, offset);
1152 			verb = ret & ~DPAA2_SWP_VALID_BIT; /* remove valid bit */
1153 			if (verb == 0u)
1154 				goto wait;
1155 			swp->mc.valid_bit ^= DPAA2_SWP_VALID_BIT;
1156 		}
1157 		break;
1158  wait:
1159 		DELAY(CMD_SPIN_TIMEOUT);
1160 	}
1161 	/* Return an error on expired timeout. */
1162 	rc = i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0;
1163 
1164 	/* Read command response. */
1165 	for (i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++)
1166 		rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t));
1167 
1168 	return (rc);
1169 }
1170