1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause AND BSD-2-Clause
3 *
4 * Copyright © 2014-2016 Freescale Semiconductor, Inc.
5 * Copyright © 2016-2019 NXP
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * 3. Neither the name of the copyright holder nor the names of its
19 * contributors may be used to endorse or promote products derived from this
20 * software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *
34 * Original source file obtained from:
35 * drivers/soc/fsl/dpio/qbman-portal.c
36 *
37 * Commit: 4c86114194e644b6da9107d75910635c9e87179e
38 * Repository: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
39 */
40
41 /*
42 * Copyright © 2021-2022 Dmitry Salychev
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 */
65
66 #include <sys/cdefs.h>
67 /*
68 * DPAA2 QBMan software portal.
69 */
70
71 #include <sys/param.h>
72 #include <sys/kernel.h>
73 #include <sys/bus.h>
74 #include <sys/rman.h>
75 #include <sys/module.h>
76 #include <sys/malloc.h>
77 #include <sys/mutex.h>
78 #include <sys/time.h>
79 #include <sys/types.h>
80 #include <sys/systm.h>
81 #include <sys/lock.h>
82
83 #include <machine/bus.h>
84 #include <machine/resource.h>
85 #include <machine/atomic.h>
86
87 #include "pcib_if.h"
88 #include "pci_if.h"
89
90 #include "dpaa2_swp.h"
91 #include "dpaa2_mc.h"
92 #include "dpaa2_bp.h"
93
94 #define CMD_SPIN_TIMEOUT 100u /* us */
95 #define CMD_SPIN_ATTEMPTS 2000u /* 200 ms max. */
96
97 #define CMD_VERB_MASK 0x7Fu
98
99 /* Shifts in the VERB byte of the enqueue command descriptor. */
100 #define ENQ_CMD_ORP_ENABLE_SHIFT 2
101 #define ENQ_CMD_IRQ_ON_DISPATCH_SHIFT 3
102 #define ENQ_CMD_TARGET_TYPE_SHIFT 4
103 #define ENQ_CMD_DCA_EN_SHIFT 7
104 /* VERB byte options of the enqueue command descriptor. */
105 #define ENQ_CMD_EMPTY 0u
106 #define ENQ_CMD_RESPONSE_ALWAYS 1u
107 #define ENQ_CMD_REJECTS_TO_FQ 2u
108
109 #define ENQ_DESC_FD_OFFSET 32u
110
111 #define ENQ_DCA_IDXMASK 0x0Fu
112 #define ENQ_FLAG_DCA (1ull << 31)
113
114 /* QBMan portal command codes. */
115 #define CMDID_SWP_MC_ACQUIRE 0x30
116 #define CMDID_SWP_BP_QUERY 0x32
117 #define CMDID_SWP_WQCHAN_CONFIGURE 0x46
118
119 /* QBMan portal command result codes. */
120 #define QBMAN_CMD_RC_OK 0xF0
121
122 /* SDQCR attribute codes */
123 #define QB_SDQCR_FC_SHIFT 29u
124 #define QB_SDQCR_FC_MASK 0x1u
125 #define QB_SDQCR_DCT_SHIFT 24u
126 #define QB_SDQCR_DCT_MASK 0x3u
127 #define QB_SDQCR_TOK_SHIFT 16u
128 #define QB_SDQCR_TOK_MASK 0xFFu
129 #define QB_SDQCR_SRC_SHIFT 0u
130 #define QB_SDQCR_SRC_MASK 0xFFFFu
131
132 /* Shifts in the VERB byte of the volatile dequeue command. */
133 #define QB_VDQCR_VERB_DCT0_SHIFT 0
134 #define QB_VDQCR_VERB_DCT1_SHIFT 1
135 #define QB_VDQCR_VERB_DT0_SHIFT 2
136 #define QB_VDQCR_VERB_DT1_SHIFT 3
137 #define QB_VDQCR_VERB_RLS_SHIFT 4
138 #define QB_VDQCR_VERB_WAE_SHIFT 5
139 #define QB_VDQCR_VERB_RAD_SHIFT 6
140
141 /* Maximum timeout period for the DQRR interrupt. */
142 #define DQRR_MAX_ITP 4096u
143 #define DQRR_PI_MASK 0x0Fu
144
145 /* Release Array Allocation register helpers. */
146 #define RAR_IDX(rar) ((rar) & 0x7u)
147 #define RAR_VB(rar) ((rar) & 0x80u)
148 #define RAR_SUCCESS(rar) ((rar) & 0x100u)
149
150 MALLOC_DEFINE(M_DPAA2_SWP, "dpaa2_swp", "DPAA2 QBMan Software Portal");
151
152 enum qbman_sdqcr_dct {
153 qbman_sdqcr_dct_null = 0,
154 qbman_sdqcr_dct_prio_ics,
155 qbman_sdqcr_dct_active_ics,
156 qbman_sdqcr_dct_active
157 };
158
159 enum qbman_sdqcr_fc {
160 qbman_sdqcr_fc_one = 0,
161 qbman_sdqcr_fc_up_to_3 = 1
162 };
163
164 /* Routines to execute software portal commands. */
165 static int dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *,
166 struct dpaa2_swp_cmd *, struct dpaa2_swp_rsp *, uint8_t);
167 static int dpaa2_swp_exec_br_command(struct dpaa2_swp *, struct dpaa2_swp_cmd *,
168 uint32_t);
169 static int dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *,
170 struct dpaa2_swp_cmd *);
171
172 /* Management Commands helpers. */
173 static int dpaa2_swp_send_mgmt_command(struct dpaa2_swp *,
174 struct dpaa2_swp_cmd *, uint8_t);
175 static int dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *,
176 struct dpaa2_swp_rsp *);
177
178 /* Helper subroutines. */
179 static int dpaa2_swp_cyc_diff(uint8_t, uint8_t, uint8_t);
180
181 int
dpaa2_swp_init_portal(struct dpaa2_swp ** swp,struct dpaa2_swp_desc * desc,uint16_t flags)182 dpaa2_swp_init_portal(struct dpaa2_swp **swp, struct dpaa2_swp_desc *desc,
183 uint16_t flags)
184 {
185 struct dpaa2_swp *p;
186 uint32_t reg, mask_size, eqcr_pi; /* EQCR producer index */
187
188 if (!swp || !desc)
189 return (DPAA2_SWP_STAT_EINVAL);
190
191 p = malloc(sizeof(struct dpaa2_swp), M_DPAA2_SWP,
192 flags & DPAA2_SWP_NOWAIT_ALLOC
193 ? (M_NOWAIT | M_ZERO)
194 : (M_WAITOK | M_ZERO));
195 if (!p)
196 return (DPAA2_SWP_STAT_NO_MEMORY);
197
198 mtx_init(&p->lock, "swp_sleep_lock", NULL, MTX_DEF);
199
200 p->cfg.mem_backed = false;
201 p->cfg.writes_cinh = true;
202
203 p->desc = desc;
204 p->flags = flags;
205 p->mc.valid_bit = DPAA2_SWP_VALID_BIT;
206 p->mr.valid_bit = DPAA2_SWP_VALID_BIT;
207
208 /* FIXME: Memory-backed mode doesn't work now. Why? */
209 p->cena_res = desc->cena_res;
210 p->cena_map = desc->cena_map;
211 p->cinh_res = desc->cinh_res;
212 p->cinh_map = desc->cinh_map;
213
214 /* Static Dequeue Command Register configuration. */
215 p->sdq = 0;
216 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
217 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
218 p->sdq |= DPAA2_SWP_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
219
220 /* Volatile Dequeue Command configuration. */
221 p->vdq.valid_bit = DPAA2_SWP_VALID_BIT;
222
223 /* Dequeue Response Ring configuration */
224 p->dqrr.next_idx = 0;
225 p->dqrr.valid_bit = DPAA2_SWP_VALID_BIT;
226 if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_4100) {
227 p->dqrr.ring_size = 4;
228 p->dqrr.reset_bug = 1;
229 } else {
230 p->dqrr.ring_size = 8;
231 p->dqrr.reset_bug = 0;
232 }
233
234 if ((desc->swp_version & DPAA2_SWP_REV_MASK) < DPAA2_SWP_REV_5000) {
235 reg = dpaa2_swp_set_cfg(
236 p->dqrr.ring_size, /* max. entries QMan writes to DQRR */
237 1, /* writes enabled in the CINH memory only */
238 0, /* EQCR_CI stashing threshold */
239 3, /* RPM: RCR in array mode */
240 2, /* DCM: Discrete consumption ack */
241 2, /* EPM: EQCR in ring mode (FIFO) */
242 1, /* mem stashing drop enable enable */
243 1, /* mem stashing priority enable */
244 1, /* mem stashing enable */
245 1, /* dequeue stashing priority enable */
246 0, /* dequeue stashing enable enable */
247 0 /* EQCR_CI stashing priority enable */
248 );
249 reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */
250 } else {
251 bus_set_region_4(p->cena_map, 0, 0,
252 rman_get_size(p->cena_res) / 4);
253
254 reg = dpaa2_swp_set_cfg(
255 p->dqrr.ring_size, /* max. entries QMan writes to DQRR */ /* DQRR_MF */
256 1, /* writes enabled in the CINH memory only */ /* WN */
257 0, /* EQCR_CI stashing is disabled */ /* EST */
258 3, /* RPM: RCR in array mode */ /* RPM */
259 2, /* DCM: Discrete consumption ack */ /* DCM */
260 2, /* EPM: EQCR in ring mode (FIFO) */ /* EPM */
261 1, /* Dequeued frame data, annotation, and FQ context stashing drop enable */ /* SD */
262 1, /* Dequeued frame data, annotation, and FQ context stashing priority */ /* SP */
263 1, /* Dequeued frame data, annotation, and FQ context stashing enable */ /* SE */
264 1, /* Dequeue response ring (DQRR) entry stashing priority */ /* DP */
265 0, /* Dequeue response ring (DQRR) entry, or cacheable portal area, stashing enable. */ /* DE */
266 0 /* EQCR_CI stashing priority */ /* EP */
267 );
268 /* TODO: Switch to memory-backed mode. */
269 reg &= ~(1 << DPAA2_SWP_CFG_CPBS_SHIFT); /* QMan-backed mode */
270 }
271 dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_CFG, reg);
272 reg = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_CFG);
273 if (!reg) {
274 free(p, M_DPAA2_SWP);
275 return (DPAA2_SWP_STAT_PORTAL_DISABLED);
276 }
277
278 /*
279 * Static Dequeue Command Register needs to be initialized to 0 when no
280 * channels are being dequeued from or else the QMan HW will indicate an
281 * error. The values that were calculated above will be applied when
282 * dequeues from a specific channel are enabled.
283 */
284 dpaa2_swp_write_reg(p, DPAA2_SWP_CINH_SDQCR, 0);
285
286 p->eqcr.pi_ring_size = 8;
287 /* if ((desc->swp_version & DPAA2_SWP_REV_MASK) >= DPAA2_SWP_REV_5000) */
288 /* p->eqcr.pi_ring_size = 32; */
289
290 for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
291 p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
292
293 eqcr_pi = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_PI);
294 p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
295 p->eqcr.pi_vb = eqcr_pi & DPAA2_SWP_VALID_BIT;
296 p->eqcr.ci = dpaa2_swp_read_reg(p, DPAA2_SWP_CINH_EQCR_CI)
297 & p->eqcr.pi_ci_mask;
298 p->eqcr.available = p->eqcr.pi_ring_size;
299
300 /* TODO: sysctl(9) for the IRQ timeout? */
301 /* Initialize the portal with an IRQ threshold and timeout of 120us. */
302 dpaa2_swp_set_irq_coalescing(p, p->dqrr.ring_size - 1, 120);
303
304 *swp = p;
305
306 return (0);
307 }
308
309 void
dpaa2_swp_free_portal(struct dpaa2_swp * swp)310 dpaa2_swp_free_portal(struct dpaa2_swp *swp)
311 {
312 uint16_t flags;
313
314 KASSERT(swp != NULL, ("%s: swp is NULL", __func__));
315
316 DPAA2_SWP_LOCK(swp, &flags);
317 swp->flags |= DPAA2_SWP_DESTROYED;
318 DPAA2_SWP_UNLOCK(swp);
319
320 /* Let threads stop using this portal. */
321 DELAY(DPAA2_SWP_TIMEOUT);
322
323 mtx_destroy(&swp->lock);
324 free(swp, M_DPAA2_SWP);
325 }
326
327 uint32_t
dpaa2_swp_set_cfg(uint8_t max_fill,uint8_t wn,uint8_t est,uint8_t rpm,uint8_t dcm,uint8_t epm,int sd,int sp,int se,int dp,int de,int ep)328 dpaa2_swp_set_cfg(uint8_t max_fill, uint8_t wn, uint8_t est, uint8_t rpm,
329 uint8_t dcm, uint8_t epm, int sd, int sp, int se, int dp, int de, int ep)
330 {
331 return (
332 max_fill << DPAA2_SWP_CFG_DQRR_MF_SHIFT |
333 est << DPAA2_SWP_CFG_EST_SHIFT |
334 wn << DPAA2_SWP_CFG_WN_SHIFT |
335 rpm << DPAA2_SWP_CFG_RPM_SHIFT |
336 dcm << DPAA2_SWP_CFG_DCM_SHIFT |
337 epm << DPAA2_SWP_CFG_EPM_SHIFT |
338 sd << DPAA2_SWP_CFG_SD_SHIFT |
339 sp << DPAA2_SWP_CFG_SP_SHIFT |
340 se << DPAA2_SWP_CFG_SE_SHIFT |
341 dp << DPAA2_SWP_CFG_DP_SHIFT |
342 de << DPAA2_SWP_CFG_DE_SHIFT |
343 ep << DPAA2_SWP_CFG_EP_SHIFT
344 );
345 }
346
347 /* Read/write registers of a software portal. */
348
349 void
dpaa2_swp_write_reg(struct dpaa2_swp * swp,uint32_t o,uint32_t v)350 dpaa2_swp_write_reg(struct dpaa2_swp *swp, uint32_t o, uint32_t v)
351 {
352 bus_write_4(swp->cinh_map, o, v);
353 }
354
355 uint32_t
dpaa2_swp_read_reg(struct dpaa2_swp * swp,uint32_t o)356 dpaa2_swp_read_reg(struct dpaa2_swp *swp, uint32_t o)
357 {
358 return (bus_read_4(swp->cinh_map, o));
359 }
360
361 /* Helper routines. */
362
363 /**
364 * @brief Set enqueue descriptor without Order Point Record ID.
365 *
366 * ed: Enqueue descriptor.
367 * resp_always: Enqueue with response always (1); FD from a rejected enqueue
368 * will be returned on a FQ (0).
369 */
370 void
dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc * ed,bool resp_always)371 dpaa2_swp_set_ed_norp(struct dpaa2_eq_desc *ed, bool resp_always)
372 {
373 ed->verb &= ~(1 << ENQ_CMD_ORP_ENABLE_SHIFT);
374 if (resp_always)
375 ed->verb |= ENQ_CMD_RESPONSE_ALWAYS;
376 else
377 ed->verb |= ENQ_CMD_REJECTS_TO_FQ;
378 }
379
380 /**
381 * @brief Set FQ of the enqueue descriptor.
382 */
383 void
dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc * ed,uint32_t fqid)384 dpaa2_swp_set_ed_fq(struct dpaa2_eq_desc *ed, uint32_t fqid)
385 {
386 ed->verb &= ~(1 << ENQ_CMD_TARGET_TYPE_SHIFT);
387 ed->tgtid = fqid;
388 }
389
390 /**
391 * @brief Enable interrupts for a software portal.
392 */
393 void
dpaa2_swp_set_intr_trigger(struct dpaa2_swp * swp,uint32_t mask)394 dpaa2_swp_set_intr_trigger(struct dpaa2_swp *swp, uint32_t mask)
395 {
396 if (swp != NULL)
397 dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_IER, mask);
398 }
399
400 /**
401 * @brief Return the value in the SWP_IER register.
402 */
403 uint32_t
dpaa2_swp_get_intr_trigger(struct dpaa2_swp * swp)404 dpaa2_swp_get_intr_trigger(struct dpaa2_swp *swp)
405 {
406 if (swp != NULL)
407 return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_IER);
408 return (0);
409 }
410
411 /**
412 * @brief Return the value in the SWP_ISR register.
413 */
414 uint32_t
dpaa2_swp_read_intr_status(struct dpaa2_swp * swp)415 dpaa2_swp_read_intr_status(struct dpaa2_swp *swp)
416 {
417 if (swp != NULL)
418 return dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_ISR);
419 return (0);
420 }
421
422 /**
423 * @brief Clear SWP_ISR register according to the given mask.
424 */
425 void
dpaa2_swp_clear_intr_status(struct dpaa2_swp * swp,uint32_t mask)426 dpaa2_swp_clear_intr_status(struct dpaa2_swp *swp, uint32_t mask)
427 {
428 if (swp != NULL)
429 dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ISR, mask);
430 }
431
432 /**
433 * @brief Enable or disable push dequeue.
434 *
435 * swp: the software portal object
436 * chan_idx: the channel index (0 to 15)
437 * en: enable or disable push dequeue
438 */
439 void
dpaa2_swp_set_push_dequeue(struct dpaa2_swp * swp,uint8_t chan_idx,bool en)440 dpaa2_swp_set_push_dequeue(struct dpaa2_swp *swp, uint8_t chan_idx, bool en)
441 {
442 uint16_t dqsrc;
443
444 if (swp != NULL) {
445 if (chan_idx > 15u) {
446 device_printf(swp->desc->dpio_dev, "channel index "
447 "should be <= 15: chan_idx=%d\n", chan_idx);
448 return;
449 }
450
451 if (en)
452 swp->sdq |= 1 << chan_idx;
453 else
454 swp->sdq &= ~(1 << chan_idx);
455 /*
456 * Read make the complete src map. If no channels are enabled
457 * the SDQCR must be 0 or else QMan will assert errors.
458 */
459 dqsrc = (swp->sdq >> DPAA2_SDQCR_SRC_SHIFT) &
460 DPAA2_SDQCR_SRC_MASK;
461 dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_SDQCR, dqsrc != 0
462 ? swp->sdq : 0);
463 }
464 }
465
466 /**
467 * @brief Set new IRQ coalescing values.
468 *
469 * swp: The software portal object.
470 * threshold: Threshold for DQRR interrupt generation. The DQRR interrupt
471 * asserts when the ring contains greater than "threshold" entries.
472 * holdoff: DQRR interrupt holdoff (timeout) period in us.
473 */
dpaa2_swp_set_irq_coalescing(struct dpaa2_swp * swp,uint32_t threshold,uint32_t holdoff)474 int dpaa2_swp_set_irq_coalescing(struct dpaa2_swp *swp, uint32_t threshold,
475 uint32_t holdoff)
476 {
477 uint32_t itp; /* Interrupt Timeout Period */
478
479 if (swp == NULL)
480 return (EINVAL);
481
482 /*
483 * Convert "holdoff" value from us to 256 QBMAN clock cycles
484 * increments. This depends on the QBMAN internal frequency.
485 */
486 itp = (holdoff * 1000u) / swp->desc->swp_cycles_ratio;
487 if (itp > DQRR_MAX_ITP)
488 itp = DQRR_MAX_ITP;
489 if (threshold >= swp->dqrr.ring_size)
490 threshold = swp->dqrr.ring_size - 1;
491
492 swp->dqrr.irq_threshold = threshold;
493 swp->dqrr.irq_itp = itp;
494
495 dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_DQRR_ITR, threshold);
496 dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_ITPR, itp);
497
498 return (0);
499 }
500
501 /*
502 * Software portal commands.
503 */
504
505 /**
506 * @brief Configure the channel data availability notification (CDAN)
507 * in a particular WQ channel.
508 */
509 int
dpaa2_swp_conf_wq_channel(struct dpaa2_swp * swp,uint16_t chan_id,uint8_t we_mask,bool cdan_en,uint64_t ctx)510 dpaa2_swp_conf_wq_channel(struct dpaa2_swp *swp, uint16_t chan_id,
511 uint8_t we_mask, bool cdan_en, uint64_t ctx)
512 {
513 /* NOTE: 64 bytes command. */
514 struct __packed {
515 uint8_t verb;
516 uint8_t result; /* in response only! */
517 uint16_t chan_id;
518 uint8_t we;
519 uint8_t ctrl;
520 uint16_t _reserved2;
521 uint64_t ctx;
522 uint8_t _reserved3[48];
523 } cmd = {0};
524 struct __packed {
525 uint8_t verb;
526 uint8_t result;
527 uint16_t chan_id;
528 uint8_t _reserved[60];
529 } rsp;
530 int error;
531
532 if (swp == NULL)
533 return (EINVAL);
534
535 cmd.chan_id = chan_id;
536 cmd.we = we_mask;
537 cmd.ctrl = cdan_en ? 1u : 0u;
538 cmd.ctx = ctx;
539
540 error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd,
541 (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_WQCHAN_CONFIGURE);
542 if (error)
543 return (error);
544
545 if (rsp.result != QBMAN_CMD_RC_OK) {
546 device_printf(swp->desc->dpio_dev, "WQ channel configuration "
547 "error: channel_id=%d, result=0x%02x\n", chan_id,
548 rsp.result);
549 return (EIO);
550 }
551
552 return (0);
553 }
554
555 /**
556 * @brief Query current configuration/state of the buffer pool.
557 */
558 int
dpaa2_swp_query_bp(struct dpaa2_swp * swp,uint16_t bpid,struct dpaa2_bp_conf * conf)559 dpaa2_swp_query_bp(struct dpaa2_swp *swp, uint16_t bpid,
560 struct dpaa2_bp_conf *conf)
561 {
562 /* NOTE: 64 bytes command. */
563 struct __packed {
564 uint8_t verb;
565 uint8_t _reserved1;
566 uint16_t bpid;
567 uint8_t _reserved2[60];
568 } cmd = {0};
569 struct __packed {
570 uint8_t verb;
571 uint8_t result;
572 uint32_t _reserved1;
573 uint8_t bdi;
574 uint8_t state;
575 uint32_t fill;
576 /* TODO: Support the other fields as well. */
577 uint8_t _reserved2[52];
578 } rsp;
579 int error;
580
581 if (swp == NULL || conf == NULL)
582 return (EINVAL);
583
584 cmd.bpid = bpid;
585
586 error = dpaa2_swp_exec_mgmt_command(swp, (struct dpaa2_swp_cmd *) &cmd,
587 (struct dpaa2_swp_rsp *) &rsp, CMDID_SWP_BP_QUERY);
588 if (error)
589 return (error);
590
591 if (rsp.result != QBMAN_CMD_RC_OK) {
592 device_printf(swp->desc->dpio_dev, "BP query error: bpid=%d, "
593 "result=0x%02x\n", bpid, rsp.result);
594 return (EIO);
595 }
596
597 conf->bdi = rsp.bdi;
598 conf->state = rsp.state;
599 conf->free_bufn = rsp.fill;
600
601 return (0);
602 }
603
604 int
dpaa2_swp_release_bufs(struct dpaa2_swp * swp,uint16_t bpid,bus_addr_t * buf,uint32_t buf_num)605 dpaa2_swp_release_bufs(struct dpaa2_swp *swp, uint16_t bpid, bus_addr_t *buf,
606 uint32_t buf_num)
607 {
608 /* NOTE: 64 bytes command. */
609 struct __packed {
610 uint8_t verb;
611 uint8_t _reserved1;
612 uint16_t bpid;
613 uint32_t _reserved2;
614 uint64_t buf[DPAA2_SWP_BUFS_PER_CMD];
615 } cmd = {0};
616 int error;
617
618 if (swp == NULL || buf == NULL || buf_num == 0u ||
619 buf_num > DPAA2_SWP_BUFS_PER_CMD)
620 return (EINVAL);
621
622 for (uint32_t i = 0; i < buf_num; i++)
623 cmd.buf[i] = buf[i];
624 cmd.bpid = bpid;
625 cmd.verb |= 1 << 5; /* Switch release buffer command to valid. */
626
627 error = dpaa2_swp_exec_br_command(swp, (struct dpaa2_swp_cmd *) &cmd,
628 buf_num);
629 if (error) {
630 device_printf(swp->desc->dpio_dev, "buffers release command "
631 "failed\n");
632 return (error);
633 }
634
635 return (0);
636 }
637
638 int
dpaa2_swp_dqrr_next_locked(struct dpaa2_swp * swp,struct dpaa2_dq * dq,uint32_t * idx)639 dpaa2_swp_dqrr_next_locked(struct dpaa2_swp *swp, struct dpaa2_dq *dq,
640 uint32_t *idx)
641 {
642 struct resource_map *map = swp->cinh_map;
643 struct dpaa2_swp_rsp *rsp = (struct dpaa2_swp_rsp *) dq;
644 uint32_t verb, pi; /* producer index */
645 uint32_t offset = swp->cfg.mem_backed
646 ? DPAA2_SWP_CENA_DQRR_MEM(swp->dqrr.next_idx)
647 : DPAA2_SWP_CENA_DQRR(swp->dqrr.next_idx);
648
649 if (swp == NULL || dq == NULL)
650 return (EINVAL);
651
652 /*
653 * Before using valid-bit to detect if something is there, we have to
654 * handle the case of the DQRR reset bug...
655 */
656 if (swp->dqrr.reset_bug) {
657 /*
658 * We pick up new entries by cache-inhibited producer index,
659 * which means that a non-coherent mapping would require us to
660 * invalidate and read *only* once that PI has indicated that
661 * there's an entry here. The first trip around the DQRR ring
662 * will be much less efficient than all subsequent trips around
663 * it...
664 */
665 pi = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_DQPI) & DQRR_PI_MASK;
666
667 /* There are new entries if pi != next_idx */
668 if (pi == swp->dqrr.next_idx)
669 return (ENOENT);
670
671 /*
672 * If next_idx is/was the last ring index, and 'pi' is
673 * different, we can disable the workaround as all the ring
674 * entries have now been DMA'd to so valid-bit checking is
675 * repaired.
676 *
677 * NOTE: This logic needs to be based on next_idx (which
678 * increments one at a time), rather than on pi (which
679 * can burst and wrap-around between our snapshots of it).
680 */
681 if (swp->dqrr.next_idx == (swp->dqrr.ring_size - 1))
682 swp->dqrr.reset_bug = 0;
683 }
684
685 verb = bus_read_4(map, offset);
686 if ((verb & DPAA2_SWP_VALID_BIT) != swp->dqrr.valid_bit)
687 return (ENOENT);
688
689 /* Read dequeue response message. */
690 for (int i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++)
691 rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t));
692
693 /* Return index of the current entry (if requested). */
694 if (idx != NULL)
695 *idx = swp->dqrr.next_idx;
696
697 /*
698 * There's something there. Move "next_idx" attention to the next ring
699 * entry before returning what we found.
700 */
701 swp->dqrr.next_idx++;
702 swp->dqrr.next_idx &= swp->dqrr.ring_size - 1; /* wrap around */
703 if (swp->dqrr.next_idx == 0u)
704 swp->dqrr.valid_bit ^= DPAA2_SWP_VALID_BIT;
705
706 return (0);
707 }
708
709 int
dpaa2_swp_pull(struct dpaa2_swp * swp,uint16_t chan_id,struct dpaa2_buf * buf,uint32_t frames_n)710 dpaa2_swp_pull(struct dpaa2_swp *swp, uint16_t chan_id, struct dpaa2_buf *buf,
711 uint32_t frames_n)
712 {
713 /* NOTE: 64 bytes command. */
714 struct __packed {
715 uint8_t verb;
716 uint8_t numf;
717 uint8_t tok;
718 uint8_t _reserved;
719 uint32_t dq_src;
720 uint64_t rsp_addr;
721 uint64_t _reserved1[6];
722 } cmd = {0};
723 struct dpaa2_dq *msg;
724 uint16_t flags;
725 int i, error;
726
727 KASSERT(frames_n != 0u, ("%s: cannot pull zero frames", __func__));
728 KASSERT(frames_n <= 16u, ("%s: too much frames to pull", __func__));
729
730 cmd.numf = frames_n - 1;
731 cmd.tok = DPAA2_SWP_VDQCR_TOKEN;
732 cmd.dq_src = chan_id;
733 cmd.rsp_addr = (uint64_t)buf->paddr;
734
735 /* Dequeue command type */
736 cmd.verb &= ~(1 << QB_VDQCR_VERB_DCT0_SHIFT);
737 cmd.verb |= (1 << QB_VDQCR_VERB_DCT1_SHIFT);
738 /* Dequeue from a specific software portal channel (ID's in DQ_SRC). */
739 cmd.verb &= ~(1 << QB_VDQCR_VERB_DT0_SHIFT);
740 cmd.verb &= ~(1 << QB_VDQCR_VERB_DT1_SHIFT);
741 /* Write the response to this command into memory (at the RSP_ADDR). */
742 cmd.verb |= (1 << QB_VDQCR_VERB_RLS_SHIFT);
743 /* Response writes won't attempt to allocate into a cache. */
744 cmd.verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
745 /* Allow the FQ to remain active in the portal after dequeue. */
746 cmd.verb &= ~(1 << QB_VDQCR_VERB_RAD_SHIFT);
747
748 DPAA2_SWP_LOCK(swp, &flags);
749 if (flags & DPAA2_SWP_DESTROYED) {
750 /* Terminate operation if portal is destroyed. */
751 DPAA2_SWP_UNLOCK(swp);
752 return (ENOENT);
753 }
754
755 error = dpaa2_swp_exec_vdc_command_locked(swp,
756 (struct dpaa2_swp_cmd *) &cmd);
757 if (error != 0) {
758 DPAA2_SWP_UNLOCK(swp);
759 return (error);
760 }
761
762 /* Let's sync before reading VDQ response from QBMan. */
763 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
764
765 /* Read VDQ response from QBMan. */
766 msg = (struct dpaa2_dq *)buf->vaddr;
767 for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
768 if ((msg->fdr.desc.stat & DPAA2_DQ_STAT_VOLATILE) &&
769 (msg->fdr.desc.tok == DPAA2_SWP_VDQCR_TOKEN)) {
770 /* Reset token. */
771 msg->fdr.desc.tok = 0;
772 break;
773 }
774 DELAY(CMD_SPIN_TIMEOUT);
775 }
776 DPAA2_SWP_UNLOCK(swp);
777
778 /* Return an error on expired timeout. */
779 return (i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0);
780 }
781
782 /**
783 * @brief Issue a command to enqueue a frame using one enqueue descriptor.
784 *
785 * swp: Software portal used to send this command to.
786 * ed: Enqueue command descriptor.
787 * fd: Frame descriptor to enqueue.
788 */
789 int
dpaa2_swp_enq(struct dpaa2_swp * swp,struct dpaa2_eq_desc * ed,struct dpaa2_fd * fd)790 dpaa2_swp_enq(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
791 struct dpaa2_fd *fd)
792 {
793 uint32_t flags = 0;
794 int rc = dpaa2_swp_enq_mult(swp, ed, fd, &flags, 1);
795
796 return (rc >= 0 ? 0 : EBUSY);
797 }
798
799 /**
800 * @brief Issue a command to enqueue frames using one enqueue descriptor.
801 *
802 * swp: Software portal used to send this command to.
803 * ed: Enqueue command descriptor.
804 * fd: Frame descriptor to enqueue.
805 * flags: Table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL.
806 * frames_n: Number of FDs to enqueue.
807 *
808 * NOTE: Enqueue command (64 bytes): 32 (eq. descriptor) + 32 (frame descriptor).
809 */
810 int
dpaa2_swp_enq_mult(struct dpaa2_swp * swp,struct dpaa2_eq_desc * ed,struct dpaa2_fd * fd,uint32_t * flags,int frames_n)811 dpaa2_swp_enq_mult(struct dpaa2_swp *swp, struct dpaa2_eq_desc *ed,
812 struct dpaa2_fd *fd, uint32_t *flags, int frames_n)
813 {
814 const uint8_t *ed_pdat8 = (const uint8_t *) ed;
815 const uint32_t *ed_pdat32 = (const uint32_t *) ed;
816 const uint64_t *ed_pdat64 = (const uint64_t *) ed;
817 const uint64_t *fd_pdat64 = (const uint64_t *) fd;
818 struct resource_map *map;
819 uint32_t eqcr_ci, eqcr_pi; /* EQCR consumer/producer index */
820 uint32_t half_mask, full_mask, val, ci_offset;
821 uint16_t swp_flags;
822 int num_enq = 0;
823
824 if (swp == NULL || ed == NULL || fd == NULL || flags == NULL ||
825 frames_n == 0)
826 return (EINVAL);
827
828 DPAA2_SWP_LOCK(swp, &swp_flags);
829 if (swp_flags & DPAA2_SWP_DESTROYED) {
830 /* Terminate operation if portal is destroyed. */
831 DPAA2_SWP_UNLOCK(swp);
832 return (ENOENT);
833 }
834
835 map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
836 ci_offset = swp->cfg.mem_backed
837 ? DPAA2_SWP_CENA_EQCR_CI_MEMBACK
838 : DPAA2_SWP_CENA_EQCR_CI;
839
840 half_mask = swp->eqcr.pi_ci_mask >> 1;
841 full_mask = swp->eqcr.pi_ci_mask;
842
843 if (swp->eqcr.available == 0) {
844 val = dpaa2_swp_read_reg(swp, ci_offset);
845 eqcr_ci = swp->eqcr.ci;
846 swp->eqcr.ci = val & full_mask;
847
848 swp->eqcr.available = dpaa2_swp_cyc_diff(swp->eqcr.pi_ring_size,
849 eqcr_ci, swp->eqcr.ci);
850
851 if (swp->eqcr.available == 0) {
852 DPAA2_SWP_UNLOCK(swp);
853 return (0);
854 }
855 }
856
857 eqcr_pi = swp->eqcr.pi;
858 num_enq = swp->eqcr.available < frames_n
859 ? swp->eqcr.available : frames_n;
860 swp->eqcr.available -= num_enq;
861
862 KASSERT(num_enq >= 0 && num_enq <= swp->eqcr.pi_ring_size,
863 ("%s: unexpected num_enq=%d", __func__, num_enq));
864 KASSERT(swp->eqcr.available >= 0 &&
865 swp->eqcr.available <= swp->eqcr.pi_ring_size,
866 ("%s: unexpected eqcr.available=%d", __func__, swp->eqcr.available));
867
868 /* Fill in the EQCR ring. */
869 for (int i = 0; i < num_enq; i++) {
870 /* Write enq. desc. without the VERB, DCA, SEQNUM and OPRID. */
871 for (int j = 1; j <= 3; j++)
872 bus_write_8(map,
873 DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
874 sizeof(uint64_t) * j, ed_pdat64[j]);
875 /* Write OPRID. */
876 bus_write_4(map,
877 DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + sizeof(uint32_t),
878 ed_pdat32[1]);
879 /* Write DCA and SEQNUM without VERB byte. */
880 for (int j = 1; j <= 3; j++)
881 bus_write_1(map,
882 DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
883 sizeof(uint8_t) * j, ed_pdat8[j]);
884
885 /* Write frame descriptor. */
886 for (int j = 0; j <= 3; j++)
887 bus_write_8(map,
888 DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) +
889 ENQ_DESC_FD_OFFSET +
890 sizeof(uint64_t) * j, fd_pdat64[j]);
891 eqcr_pi++;
892 }
893
894 wmb();
895
896 /* Write the VERB byte of enqueue descriptor. */
897 eqcr_pi = swp->eqcr.pi;
898 for (int i = 0; i < num_enq; i++) {
899 bus_write_1(map,
900 DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask),
901 ed_pdat8[0] | swp->eqcr.pi_vb);
902
903 if (flags && (flags[i] & ENQ_FLAG_DCA)) {
904 /* Update DCA byte. */
905 bus_write_1(map,
906 DPAA2_SWP_CENA_EQCR(eqcr_pi & half_mask) + 1,
907 (1 << ENQ_CMD_DCA_EN_SHIFT) |
908 (flags[i] & ENQ_DCA_IDXMASK));
909 }
910 eqcr_pi++;
911 if (!(eqcr_pi & half_mask))
912 swp->eqcr.pi_vb ^= DPAA2_SWP_VALID_BIT;
913 }
914 swp->eqcr.pi = eqcr_pi & full_mask;
915
916 DPAA2_SWP_UNLOCK(swp);
917
918 return (num_enq);
919 }
920
921 static int
dpaa2_swp_cyc_diff(uint8_t ringsize,uint8_t first,uint8_t last)922 dpaa2_swp_cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last)
923 {
924 /* 'first' is included, 'last' is excluded */
925 return ((first <= last)
926 ? (last - first) : ((2 * ringsize) - (first - last)));
927 }
928
929 /**
930 * @brief Execute Buffer Release Command (BRC).
931 */
932 static int
dpaa2_swp_exec_br_command(struct dpaa2_swp * swp,struct dpaa2_swp_cmd * cmd,uint32_t buf_num)933 dpaa2_swp_exec_br_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
934 uint32_t buf_num)
935 {
936 struct __packed with_verb {
937 uint8_t verb;
938 uint8_t _reserved[63];
939 } *c;
940 const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params;
941 const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params;
942 struct resource_map *map;
943 uint32_t offset, rar; /* Release Array Allocation register */
944 uint16_t flags;
945
946 if (!swp || !cmd)
947 return (EINVAL);
948
949 DPAA2_SWP_LOCK(swp, &flags);
950 if (flags & DPAA2_SWP_DESTROYED) {
951 /* Terminate operation if portal is destroyed. */
952 DPAA2_SWP_UNLOCK(swp);
953 return (ENOENT);
954 }
955
956 rar = dpaa2_swp_read_reg(swp, DPAA2_SWP_CINH_RAR);
957 if (!RAR_SUCCESS(rar)) {
958 DPAA2_SWP_UNLOCK(swp);
959 return (EBUSY);
960 }
961
962 map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
963 offset = swp->cfg.mem_backed
964 ? DPAA2_SWP_CENA_RCR_MEM(RAR_IDX(rar))
965 : DPAA2_SWP_CENA_RCR(RAR_IDX(rar));
966 c = (struct with_verb *) cmd;
967
968 /* Write command bytes (without VERB byte). */
969 for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
970 bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
971 bus_write_4(map, offset + 4, cmd_pdat32[1]);
972 for (uint32_t i = 1; i <= 3; i++)
973 bus_write_1(map, offset + i, cmd_pdat8[i]);
974
975 /* Write VERB byte and trigger command execution. */
976 if (swp->cfg.mem_backed) {
977 bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num);
978 wmb();
979 dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_RCR_AM_RT +
980 RAR_IDX(rar) * 4, DPAA2_SWP_RT_MODE);
981 } else {
982 wmb();
983 bus_write_1(map, offset, c->verb | RAR_VB(rar) | buf_num);
984 }
985
986 DPAA2_SWP_UNLOCK(swp);
987
988 return (0);
989 }
990
991 /**
992 * @brief Execute Volatile Dequeue Command (VDC).
993 *
994 * This command will be executed by QBMan only once in order to deliver requested
995 * number of frames (1-16 or 1-32 depending on QBMan version) to the driver via
996 * DQRR or arbitrary DMA-mapped memory.
997 *
998 * NOTE: There is a counterpart to the volatile dequeue command called static
999 * dequeue command (SDQC) which is executed periodically all the time the
1000 * command is present in the SDQCR register.
1001 */
1002 static int
dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp * swp,struct dpaa2_swp_cmd * cmd)1003 dpaa2_swp_exec_vdc_command_locked(struct dpaa2_swp *swp,
1004 struct dpaa2_swp_cmd *cmd)
1005 {
1006 struct __packed with_verb {
1007 uint8_t verb;
1008 uint8_t _reserved[63];
1009 } *c;
1010 const uint8_t *p8 = (const uint8_t *) cmd->params;
1011 const uint32_t *p32 = (const uint32_t *) cmd->params;
1012 struct resource_map *map;
1013 uint32_t offset;
1014
1015 map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
1016 offset = swp->cfg.mem_backed
1017 ? DPAA2_SWP_CENA_VDQCR_MEM : DPAA2_SWP_CENA_VDQCR;
1018 c = (struct with_verb *) cmd;
1019
1020 /* Write command bytes (without VERB byte). */
1021 for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
1022 bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
1023 bus_write_4(map, offset + 4, p32[1]);
1024 for (uint32_t i = 1; i <= 3; i++)
1025 bus_write_1(map, offset + i, p8[i]);
1026
1027 /* Write VERB byte and trigger command execution. */
1028 if (swp->cfg.mem_backed) {
1029 bus_write_1(map, offset, c->verb | swp->vdq.valid_bit);
1030 swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT;
1031 wmb();
1032 dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_VDQCR_RT,
1033 DPAA2_SWP_RT_MODE);
1034 } else {
1035 wmb();
1036 bus_write_1(map, offset, c->verb | swp->vdq.valid_bit);
1037 swp->vdq.valid_bit ^= DPAA2_SWP_VALID_BIT;
1038 }
1039
1040 return (0);
1041 }
1042
1043 /**
1044 * @brief Execute a QBMan management command.
1045 */
1046 static int
dpaa2_swp_exec_mgmt_command(struct dpaa2_swp * swp,struct dpaa2_swp_cmd * cmd,struct dpaa2_swp_rsp * rsp,uint8_t cmdid)1047 dpaa2_swp_exec_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
1048 struct dpaa2_swp_rsp *rsp, uint8_t cmdid)
1049 {
1050 #if (defined(_KERNEL) && defined(INVARIANTS))
1051 struct __packed with_verb {
1052 uint8_t verb;
1053 uint8_t _reserved[63];
1054 } *r;
1055 #endif
1056 uint16_t flags;
1057 int error;
1058
1059 if (swp == NULL || cmd == NULL || rsp == NULL)
1060 return (EINVAL);
1061
1062 DPAA2_SWP_LOCK(swp, &flags);
1063 if (flags & DPAA2_SWP_DESTROYED) {
1064 /* Terminate operation if portal is destroyed. */
1065 DPAA2_SWP_UNLOCK(swp);
1066 return (ENOENT);
1067 }
1068
1069 /*
1070 * Send a command to QBMan using Management Command register and wait
1071 * for response from the Management Response registers.
1072 */
1073 dpaa2_swp_send_mgmt_command(swp, cmd, cmdid);
1074 error = dpaa2_swp_wait_for_mgmt_response(swp, rsp);
1075 if (error) {
1076 DPAA2_SWP_UNLOCK(swp);
1077 return (error);
1078 }
1079 DPAA2_SWP_UNLOCK(swp);
1080
1081 #if (defined(_KERNEL) && defined(INVARIANTS))
1082 r = (struct with_verb *) rsp;
1083 KASSERT((r->verb & CMD_VERB_MASK) == cmdid,
1084 ("wrong VERB byte in response: resp=0x%02x, expected=0x%02x",
1085 r->verb, cmdid));
1086 #endif
1087
1088 return (0);
1089 }
1090
1091 static int
dpaa2_swp_send_mgmt_command(struct dpaa2_swp * swp,struct dpaa2_swp_cmd * cmd,uint8_t cmdid)1092 dpaa2_swp_send_mgmt_command(struct dpaa2_swp *swp, struct dpaa2_swp_cmd *cmd,
1093 uint8_t cmdid)
1094 {
1095 const uint8_t *cmd_pdat8 = (const uint8_t *) cmd->params;
1096 const uint32_t *cmd_pdat32 = (const uint32_t *) cmd->params;
1097 struct resource_map *map;
1098 uint32_t offset;
1099
1100 map = swp->cfg.writes_cinh ? swp->cinh_map : swp->cena_map;
1101 offset = swp->cfg.mem_backed ? DPAA2_SWP_CENA_CR_MEM : DPAA2_SWP_CENA_CR;
1102
1103 /* Write command bytes (without VERB byte). */
1104 for (uint32_t i = 1; i < DPAA2_SWP_CMD_PARAMS_N; i++)
1105 bus_write_8(map, offset + sizeof(uint64_t) * i, cmd->params[i]);
1106 bus_write_4(map, offset + 4, cmd_pdat32[1]);
1107 for (uint32_t i = 1; i <= 3; i++)
1108 bus_write_1(map, offset + i, cmd_pdat8[i]);
1109
1110 /* Write VERB byte and trigger command execution. */
1111 if (swp->cfg.mem_backed) {
1112 bus_write_1(map, offset, cmdid | swp->mr.valid_bit);
1113 wmb();
1114 dpaa2_swp_write_reg(swp, DPAA2_SWP_CINH_CR_RT,
1115 DPAA2_SWP_RT_MODE);
1116 } else {
1117 wmb();
1118 bus_write_1(map, offset, cmdid | swp->mc.valid_bit);
1119 }
1120
1121 return (0);
1122 }
1123
1124 static int
dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp * swp,struct dpaa2_swp_rsp * rsp)1125 dpaa2_swp_wait_for_mgmt_response(struct dpaa2_swp *swp, struct dpaa2_swp_rsp *rsp)
1126 {
1127 struct resource_map *map = swp->cfg.mem_backed
1128 ? swp->cena_map : swp->cinh_map;
1129 /* Management command response to be read from the only RR or RR0/RR1. */
1130 const uint32_t offset = swp->cfg.mem_backed
1131 ? DPAA2_SWP_CENA_RR_MEM
1132 : DPAA2_SWP_CENA_RR(swp->mc.valid_bit);
1133 uint32_t i, verb, ret;
1134 int rc;
1135
1136 /* Wait for a command response from QBMan. */
1137 for (i = 1; i <= CMD_SPIN_ATTEMPTS; i++) {
1138 if (swp->cfg.mem_backed) {
1139 verb = (uint32_t) (bus_read_4(map, offset) & 0xFFu);
1140 if (swp->mr.valid_bit != (verb & DPAA2_SWP_VALID_BIT))
1141 goto wait;
1142 if (!(verb & ~DPAA2_SWP_VALID_BIT))
1143 goto wait;
1144 swp->mr.valid_bit ^= DPAA2_SWP_VALID_BIT;
1145 } else {
1146 ret = bus_read_4(map, offset);
1147 verb = ret & ~DPAA2_SWP_VALID_BIT; /* remove valid bit */
1148 if (verb == 0u)
1149 goto wait;
1150 swp->mc.valid_bit ^= DPAA2_SWP_VALID_BIT;
1151 }
1152 break;
1153 wait:
1154 DELAY(CMD_SPIN_TIMEOUT);
1155 }
1156 /* Return an error on expired timeout. */
1157 rc = i > CMD_SPIN_ATTEMPTS ? ETIMEDOUT : 0;
1158
1159 /* Read command response. */
1160 for (i = 0; i < DPAA2_SWP_RSP_PARAMS_N; i++)
1161 rsp->params[i] = bus_read_8(map, offset + i * sizeof(uint64_t));
1162
1163 return (rc);
1164 }
1165