1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "qman_priv.h"
32
33 #define DQRR_MAXFILL 15
34 #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
35 #define IRQNAME "QMan portal %d"
36 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
37 #define QMAN_POLL_LIMIT 32
38 #define QMAN_PIRQ_DQRR_ITHRESH 12
39 #define QMAN_DQRR_IT_MAX 15
40 #define QMAN_ITP_MAX 0xFFF
41 #define QMAN_PIRQ_MR_ITHRESH 4
42 #define QMAN_PIRQ_IPERIOD 100
43
44 /* Portal register assists */
45
46 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
47 /* Cache-inhibited register offsets */
48 #define QM_REG_EQCR_PI_CINH 0x3000
49 #define QM_REG_EQCR_CI_CINH 0x3040
50 #define QM_REG_EQCR_ITR 0x3080
51 #define QM_REG_DQRR_PI_CINH 0x3100
52 #define QM_REG_DQRR_CI_CINH 0x3140
53 #define QM_REG_DQRR_ITR 0x3180
54 #define QM_REG_DQRR_DCAP 0x31C0
55 #define QM_REG_DQRR_SDQCR 0x3200
56 #define QM_REG_DQRR_VDQCR 0x3240
57 #define QM_REG_DQRR_PDQCR 0x3280
58 #define QM_REG_MR_PI_CINH 0x3300
59 #define QM_REG_MR_CI_CINH 0x3340
60 #define QM_REG_MR_ITR 0x3380
61 #define QM_REG_CFG 0x3500
62 #define QM_REG_ISR 0x3600
63 #define QM_REG_IER 0x3640
64 #define QM_REG_ISDR 0x3680
65 #define QM_REG_IIR 0x36C0
66 #define QM_REG_ITPR 0x3740
67
68 /* Cache-enabled register offsets */
69 #define QM_CL_EQCR 0x0000
70 #define QM_CL_DQRR 0x1000
71 #define QM_CL_MR 0x2000
72 #define QM_CL_EQCR_PI_CENA 0x3000
73 #define QM_CL_EQCR_CI_CENA 0x3040
74 #define QM_CL_DQRR_PI_CENA 0x3100
75 #define QM_CL_DQRR_CI_CENA 0x3140
76 #define QM_CL_MR_PI_CENA 0x3300
77 #define QM_CL_MR_CI_CENA 0x3340
78 #define QM_CL_CR 0x3800
79 #define QM_CL_RR0 0x3900
80 #define QM_CL_RR1 0x3940
81
82 #else
83 /* Cache-inhibited register offsets */
84 #define QM_REG_EQCR_PI_CINH 0x0000
85 #define QM_REG_EQCR_CI_CINH 0x0004
86 #define QM_REG_EQCR_ITR 0x0008
87 #define QM_REG_DQRR_PI_CINH 0x0040
88 #define QM_REG_DQRR_CI_CINH 0x0044
89 #define QM_REG_DQRR_ITR 0x0048
90 #define QM_REG_DQRR_DCAP 0x0050
91 #define QM_REG_DQRR_SDQCR 0x0054
92 #define QM_REG_DQRR_VDQCR 0x0058
93 #define QM_REG_DQRR_PDQCR 0x005c
94 #define QM_REG_MR_PI_CINH 0x0080
95 #define QM_REG_MR_CI_CINH 0x0084
96 #define QM_REG_MR_ITR 0x0088
97 #define QM_REG_CFG 0x0100
98 #define QM_REG_ISR 0x0e00
99 #define QM_REG_IER 0x0e04
100 #define QM_REG_ISDR 0x0e08
101 #define QM_REG_IIR 0x0e0c
102 #define QM_REG_ITPR 0x0e14
103
104 /* Cache-enabled register offsets */
105 #define QM_CL_EQCR 0x0000
106 #define QM_CL_DQRR 0x1000
107 #define QM_CL_MR 0x2000
108 #define QM_CL_EQCR_PI_CENA 0x3000
109 #define QM_CL_EQCR_CI_CENA 0x3100
110 #define QM_CL_DQRR_PI_CENA 0x3200
111 #define QM_CL_DQRR_CI_CENA 0x3300
112 #define QM_CL_MR_PI_CENA 0x3400
113 #define QM_CL_MR_CI_CENA 0x3500
114 #define QM_CL_CR 0x3800
115 #define QM_CL_RR0 0x3900
116 #define QM_CL_RR1 0x3940
117 #endif
118
119 /*
120 * BTW, the drivers (and h/w programming model) already obtain the required
121 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
122 * or other order-preserving primitives simply degrade performance. Hence the
123 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
124 * the portal registers as volatile
125 */
126
127 /* Cache-enabled ring access */
128 #define qm_cl(base, idx) ((void *)base + ((idx) << 6))
129
130 /*
131 * Portal modes.
132 * Enum types;
133 * pmode == production mode
134 * cmode == consumption mode,
135 * dmode == h/w dequeue mode.
136 * Enum values use 3 letter codes. First letter matches the portal mode,
137 * remaining two letters indicate;
138 * ci == cache-inhibited portal register
139 * ce == cache-enabled portal register
140 * vb == in-band valid-bit (cache-enabled)
141 * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
142 * As for "enum qm_dqrr_dmode", it should be self-explanatory.
143 */
144 enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
145 qm_eqcr_pci = 0, /* PI index, cache-inhibited */
146 qm_eqcr_pce = 1, /* PI index, cache-enabled */
147 qm_eqcr_pvb = 2 /* valid-bit */
148 };
149 enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
150 qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
151 qm_dqrr_dpull = 1 /* PDQCR */
152 };
153 enum qm_dqrr_pmode { /* s/w-only */
154 qm_dqrr_pci, /* reads DQRR_PI_CINH */
155 qm_dqrr_pce, /* reads DQRR_PI_CENA */
156 qm_dqrr_pvb /* reads valid-bit */
157 };
158 enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
159 qm_dqrr_cci = 0, /* CI index, cache-inhibited */
160 qm_dqrr_cce = 1, /* CI index, cache-enabled */
161 qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
162 };
163 enum qm_mr_pmode { /* s/w-only */
164 qm_mr_pci, /* reads MR_PI_CINH */
165 qm_mr_pce, /* reads MR_PI_CENA */
166 qm_mr_pvb /* reads valid-bit */
167 };
168 enum qm_mr_cmode { /* matches QCSP_CFG::MM */
169 qm_mr_cci = 0, /* CI index, cache-inhibited */
170 qm_mr_cce = 1 /* CI index, cache-enabled */
171 };
172
173 /* --- Portal structures --- */
174
175 #define QM_EQCR_SIZE 8
176 #define QM_DQRR_SIZE 16
177 #define QM_MR_SIZE 8
178
179 /* "Enqueue Command" */
180 struct qm_eqcr_entry {
181 u8 _ncw_verb; /* writes to this are non-coherent */
182 u8 dca;
183 __be16 seqnum;
184 u8 __reserved[4];
185 __be32 fqid; /* 24-bit */
186 __be32 tag;
187 struct qm_fd fd;
188 u8 __reserved3[32];
189 } __packed __aligned(8);
190 #define QM_EQCR_VERB_VBIT 0x80
191 #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
192 #define QM_EQCR_VERB_CMD_ENQUEUE 0x01
193 #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
194 #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
195 #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
196
197 struct qm_eqcr {
198 struct qm_eqcr_entry *ring, *cursor;
199 u8 ci, available, ithresh, vbit;
200 #ifdef CONFIG_FSL_DPAA_CHECKING
201 u32 busy;
202 enum qm_eqcr_pmode pmode;
203 #endif
204 };
205
206 struct qm_dqrr {
207 const struct qm_dqrr_entry *ring, *cursor;
208 u8 pi, ci, fill, ithresh, vbit;
209 #ifdef CONFIG_FSL_DPAA_CHECKING
210 enum qm_dqrr_dmode dmode;
211 enum qm_dqrr_pmode pmode;
212 enum qm_dqrr_cmode cmode;
213 #endif
214 };
215
216 struct qm_mr {
217 union qm_mr_entry *ring, *cursor;
218 u8 pi, ci, fill, ithresh, vbit;
219 #ifdef CONFIG_FSL_DPAA_CHECKING
220 enum qm_mr_pmode pmode;
221 enum qm_mr_cmode cmode;
222 #endif
223 };
224
225 /* MC (Management Command) command */
226 /* "FQ" command layout */
227 struct qm_mcc_fq {
228 u8 _ncw_verb;
229 u8 __reserved1[3];
230 __be32 fqid; /* 24-bit */
231 u8 __reserved2[56];
232 } __packed;
233
234 /* "CGR" command layout */
235 struct qm_mcc_cgr {
236 u8 _ncw_verb;
237 u8 __reserved1[30];
238 u8 cgid;
239 u8 __reserved2[32];
240 };
241
242 #define QM_MCC_VERB_VBIT 0x80
243 #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
244 #define QM_MCC_VERB_INITFQ_PARKED 0x40
245 #define QM_MCC_VERB_INITFQ_SCHED 0x41
246 #define QM_MCC_VERB_QUERYFQ 0x44
247 #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
248 #define QM_MCC_VERB_QUERYWQ 0x46
249 #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
250 #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
251 #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
252 #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
253 #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
254 #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
255 #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
256 #define QM_MCC_VERB_INITCGR 0x50
257 #define QM_MCC_VERB_MODIFYCGR 0x51
258 #define QM_MCC_VERB_CGRTESTWRITE 0x52
259 #define QM_MCC_VERB_QUERYCGR 0x58
260 #define QM_MCC_VERB_QUERYCONGESTION 0x59
261 union qm_mc_command {
262 struct {
263 u8 _ncw_verb; /* writes to this are non-coherent */
264 u8 __reserved[63];
265 };
266 struct qm_mcc_initfq initfq;
267 struct qm_mcc_initcgr initcgr;
268 struct qm_mcc_fq fq;
269 struct qm_mcc_cgr cgr;
270 };
271
272 /* MC (Management Command) result */
273 /* "Query FQ" */
274 struct qm_mcr_queryfq {
275 u8 verb;
276 u8 result;
277 u8 __reserved1[8];
278 struct qm_fqd fqd; /* the FQD fields are here */
279 u8 __reserved2[30];
280 } __packed;
281
282 /* "Alter FQ State Commands" */
283 struct qm_mcr_alterfq {
284 u8 verb;
285 u8 result;
286 u8 fqs; /* Frame Queue Status */
287 u8 __reserved1[61];
288 };
289 #define QM_MCR_VERB_RRID 0x80
290 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
291 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
292 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
293 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
294 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
295 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
296 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
297 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
298 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
299 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
300 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
301 #define QM_MCR_RESULT_NULL 0x00
302 #define QM_MCR_RESULT_OK 0xf0
303 #define QM_MCR_RESULT_ERR_FQID 0xf1
304 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
305 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
306 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
307 #define QM_MCR_RESULT_PENDING 0xf8
308 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
309 #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
310 #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
311 #define QM_MCR_TIMEOUT 10000 /* us */
312 union qm_mc_result {
313 struct {
314 u8 verb;
315 u8 result;
316 u8 __reserved1[62];
317 };
318 struct qm_mcr_queryfq queryfq;
319 struct qm_mcr_alterfq alterfq;
320 struct qm_mcr_querycgr querycgr;
321 struct qm_mcr_querycongestion querycongestion;
322 struct qm_mcr_querywq querywq;
323 struct qm_mcr_queryfq_np queryfq_np;
324 };
325
326 struct qm_mc {
327 union qm_mc_command *cr;
328 union qm_mc_result *rr;
329 u8 rridx, vbit;
330 #ifdef CONFIG_FSL_DPAA_CHECKING
331 enum {
332 /* Can be _mc_start()ed */
333 qman_mc_idle,
334 /* Can be _mc_commit()ed or _mc_abort()ed */
335 qman_mc_user,
336 /* Can only be _mc_retry()ed */
337 qman_mc_hw
338 } state;
339 #endif
340 };
341
342 struct qm_addr {
343 void *ce; /* cache-enabled */
344 __be32 *ce_be; /* same value as above but for direct access */
345 void __iomem *ci; /* cache-inhibited */
346 };
347
348 struct qm_portal {
349 /*
350 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
351 * and including 'mc' fits within a cacheline (yay!). The 'config' part
352 * is setup-only, so isn't a cause for a concern. In other words, don't
353 * rearrange this structure on a whim, there be dragons ...
354 */
355 struct qm_addr addr;
356 struct qm_eqcr eqcr;
357 struct qm_dqrr dqrr;
358 struct qm_mr mr;
359 struct qm_mc mc;
360 } ____cacheline_aligned;
361
362 /* Cache-inhibited register access. */
qm_in(struct qm_portal * p,u32 offset)363 static inline u32 qm_in(struct qm_portal *p, u32 offset)
364 {
365 return ioread32be(p->addr.ci + offset);
366 }
367
qm_out(struct qm_portal * p,u32 offset,u32 val)368 static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
369 {
370 iowrite32be(val, p->addr.ci + offset);
371 }
372
373 /* Cache Enabled Portal Access */
qm_cl_invalidate(struct qm_portal * p,u32 offset)374 static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
375 {
376 dpaa_invalidate(p->addr.ce + offset);
377 }
378
qm_cl_touch_ro(struct qm_portal * p,u32 offset)379 static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
380 {
381 dpaa_touch_ro(p->addr.ce + offset);
382 }
383
qm_ce_in(struct qm_portal * p,u32 offset)384 static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
385 {
386 return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
387 }
388
389 /* --- EQCR API --- */
390
391 #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
392 #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
393
394 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
eqcr_carryclear(struct qm_eqcr_entry * p)395 static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
396 {
397 uintptr_t addr = (uintptr_t)p;
398
399 addr &= ~EQCR_CARRY;
400
401 return (struct qm_eqcr_entry *)addr;
402 }
403
404 /* Bit-wise logic to convert a ring pointer to a ring index */
eqcr_ptr2idx(struct qm_eqcr_entry * e)405 static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
406 {
407 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
408 }
409
410 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
eqcr_inc(struct qm_eqcr * eqcr)411 static inline void eqcr_inc(struct qm_eqcr *eqcr)
412 {
413 /* increment to the next EQCR pointer and handle overflow and 'vbit' */
414 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
415
416 eqcr->cursor = eqcr_carryclear(partial);
417 if (partial != eqcr->cursor)
418 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
419 }
420
qm_eqcr_init(struct qm_portal * portal,enum qm_eqcr_pmode pmode,unsigned int eq_stash_thresh,int eq_stash_prio)421 static inline int qm_eqcr_init(struct qm_portal *portal,
422 enum qm_eqcr_pmode pmode,
423 unsigned int eq_stash_thresh,
424 int eq_stash_prio)
425 {
426 struct qm_eqcr *eqcr = &portal->eqcr;
427 u32 cfg;
428 u8 pi;
429
430 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
431 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
432 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
433 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
434 eqcr->cursor = eqcr->ring + pi;
435 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
436 QM_EQCR_VERB_VBIT : 0;
437 eqcr->available = QM_EQCR_SIZE - 1 -
438 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
439 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
440 #ifdef CONFIG_FSL_DPAA_CHECKING
441 eqcr->busy = 0;
442 eqcr->pmode = pmode;
443 #endif
444 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
445 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
446 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
447 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
448 qm_out(portal, QM_REG_CFG, cfg);
449 return 0;
450 }
451
qm_eqcr_finish(struct qm_portal * portal)452 static inline void qm_eqcr_finish(struct qm_portal *portal)
453 {
454 struct qm_eqcr *eqcr = &portal->eqcr;
455 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
456 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
457
458 DPAA_ASSERT(!eqcr->busy);
459 if (pi != eqcr_ptr2idx(eqcr->cursor))
460 pr_crit("losing uncommitted EQCR entries\n");
461 if (ci != eqcr->ci)
462 pr_crit("missing existing EQCR completions\n");
463 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
464 pr_crit("EQCR destroyed unquiesced\n");
465 }
466
qm_eqcr_start_no_stash(struct qm_portal * portal)467 static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
468 *portal)
469 {
470 struct qm_eqcr *eqcr = &portal->eqcr;
471
472 DPAA_ASSERT(!eqcr->busy);
473 if (!eqcr->available)
474 return NULL;
475
476 #ifdef CONFIG_FSL_DPAA_CHECKING
477 eqcr->busy = 1;
478 #endif
479 dpaa_zero(eqcr->cursor);
480 return eqcr->cursor;
481 }
482
qm_eqcr_start_stash(struct qm_portal * portal)483 static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
484 *portal)
485 {
486 struct qm_eqcr *eqcr = &portal->eqcr;
487 u8 diff, old_ci;
488
489 DPAA_ASSERT(!eqcr->busy);
490 if (!eqcr->available) {
491 old_ci = eqcr->ci;
492 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
493 (QM_EQCR_SIZE - 1);
494 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
495 eqcr->available += diff;
496 if (!diff)
497 return NULL;
498 }
499 #ifdef CONFIG_FSL_DPAA_CHECKING
500 eqcr->busy = 1;
501 #endif
502 dpaa_zero(eqcr->cursor);
503 return eqcr->cursor;
504 }
505
eqcr_commit_checks(struct qm_eqcr * eqcr)506 static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
507 {
508 DPAA_ASSERT(eqcr->busy);
509 DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
510 DPAA_ASSERT(eqcr->available >= 1);
511 }
512
qm_eqcr_pvb_commit(struct qm_portal * portal,u8 myverb)513 static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
514 {
515 struct qm_eqcr *eqcr = &portal->eqcr;
516 struct qm_eqcr_entry *eqcursor;
517
518 eqcr_commit_checks(eqcr);
519 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
520 dma_wmb();
521 eqcursor = eqcr->cursor;
522 eqcursor->_ncw_verb = myverb | eqcr->vbit;
523 dpaa_flush(eqcursor);
524 eqcr_inc(eqcr);
525 eqcr->available--;
526 #ifdef CONFIG_FSL_DPAA_CHECKING
527 eqcr->busy = 0;
528 #endif
529 }
530
qm_eqcr_cce_prefetch(struct qm_portal * portal)531 static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
532 {
533 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
534 }
535
qm_eqcr_cce_update(struct qm_portal * portal)536 static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
537 {
538 struct qm_eqcr *eqcr = &portal->eqcr;
539 u8 diff, old_ci = eqcr->ci;
540
541 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
542 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
543 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
544 eqcr->available += diff;
545 return diff;
546 }
547
qm_eqcr_set_ithresh(struct qm_portal * portal,u8 ithresh)548 static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
549 {
550 struct qm_eqcr *eqcr = &portal->eqcr;
551
552 eqcr->ithresh = ithresh;
553 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
554 }
555
qm_eqcr_get_avail(struct qm_portal * portal)556 static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
557 {
558 struct qm_eqcr *eqcr = &portal->eqcr;
559
560 return eqcr->available;
561 }
562
qm_eqcr_get_fill(struct qm_portal * portal)563 static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
564 {
565 struct qm_eqcr *eqcr = &portal->eqcr;
566
567 return QM_EQCR_SIZE - 1 - eqcr->available;
568 }
569
570 /* --- DQRR API --- */
571
572 #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
573 #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
574
dqrr_carryclear(const struct qm_dqrr_entry * p)575 static const struct qm_dqrr_entry *dqrr_carryclear(
576 const struct qm_dqrr_entry *p)
577 {
578 uintptr_t addr = (uintptr_t)p;
579
580 addr &= ~DQRR_CARRY;
581
582 return (const struct qm_dqrr_entry *)addr;
583 }
584
dqrr_ptr2idx(const struct qm_dqrr_entry * e)585 static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
586 {
587 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
588 }
589
dqrr_inc(const struct qm_dqrr_entry * e)590 static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
591 {
592 return dqrr_carryclear(e + 1);
593 }
594
qm_dqrr_set_maxfill(struct qm_portal * portal,u8 mf)595 static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
596 {
597 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
598 ((mf & (QM_DQRR_SIZE - 1)) << 20));
599 }
600
qm_dqrr_init(struct qm_portal * portal,const struct qm_portal_config * config,enum qm_dqrr_dmode dmode,enum qm_dqrr_pmode pmode,enum qm_dqrr_cmode cmode,u8 max_fill)601 static inline int qm_dqrr_init(struct qm_portal *portal,
602 const struct qm_portal_config *config,
603 enum qm_dqrr_dmode dmode,
604 enum qm_dqrr_pmode pmode,
605 enum qm_dqrr_cmode cmode, u8 max_fill)
606 {
607 struct qm_dqrr *dqrr = &portal->dqrr;
608 u32 cfg;
609
610 /* Make sure the DQRR will be idle when we enable */
611 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
612 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
613 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
614 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
615 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
616 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
617 dqrr->cursor = dqrr->ring + dqrr->ci;
618 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
619 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
620 QM_DQRR_VERB_VBIT : 0;
621 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
622 #ifdef CONFIG_FSL_DPAA_CHECKING
623 dqrr->dmode = dmode;
624 dqrr->pmode = pmode;
625 dqrr->cmode = cmode;
626 #endif
627 /* Invalidate every ring entry before beginning */
628 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
629 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
630 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
631 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
632 ((dmode & 1) << 18) | /* DP */
633 ((cmode & 3) << 16) | /* DCM */
634 0xa0 | /* RE+SE */
635 (0 ? 0x40 : 0) | /* Ignore RP */
636 (0 ? 0x10 : 0); /* Ignore SP */
637 qm_out(portal, QM_REG_CFG, cfg);
638 qm_dqrr_set_maxfill(portal, max_fill);
639 return 0;
640 }
641
qm_dqrr_finish(struct qm_portal * portal)642 static inline void qm_dqrr_finish(struct qm_portal *portal)
643 {
644 #ifdef CONFIG_FSL_DPAA_CHECKING
645 struct qm_dqrr *dqrr = &portal->dqrr;
646
647 if (dqrr->cmode != qm_dqrr_cdc &&
648 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
649 pr_crit("Ignoring completed DQRR entries\n");
650 #endif
651 }
652
qm_dqrr_current(struct qm_portal * portal)653 static inline const struct qm_dqrr_entry *qm_dqrr_current(
654 struct qm_portal *portal)
655 {
656 struct qm_dqrr *dqrr = &portal->dqrr;
657
658 if (!dqrr->fill)
659 return NULL;
660 return dqrr->cursor;
661 }
662
qm_dqrr_next(struct qm_portal * portal)663 static inline u8 qm_dqrr_next(struct qm_portal *portal)
664 {
665 struct qm_dqrr *dqrr = &portal->dqrr;
666
667 DPAA_ASSERT(dqrr->fill);
668 dqrr->cursor = dqrr_inc(dqrr->cursor);
669 return --dqrr->fill;
670 }
671
qm_dqrr_pvb_update(struct qm_portal * portal)672 static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
673 {
674 struct qm_dqrr *dqrr = &portal->dqrr;
675 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
676
677 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
678 #ifndef CONFIG_FSL_PAMU
679 /*
680 * If PAMU is not available we need to invalidate the cache.
681 * When PAMU is available the cache is updated by stash
682 */
683 dpaa_invalidate_touch_ro(res);
684 #endif
685 if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
686 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
687 if (!dqrr->pi)
688 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
689 dqrr->fill++;
690 }
691 }
692
qm_dqrr_cdc_consume_1ptr(struct qm_portal * portal,const struct qm_dqrr_entry * dq,int park)693 static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
694 const struct qm_dqrr_entry *dq,
695 int park)
696 {
697 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
698 int idx = dqrr_ptr2idx(dq);
699
700 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
701 DPAA_ASSERT((dqrr->ring + idx) == dq);
702 DPAA_ASSERT(idx < QM_DQRR_SIZE);
703 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
704 ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
705 idx); /* DQRR_DCAP::DCAP_CI */
706 }
707
qm_dqrr_cdc_consume_n(struct qm_portal * portal,u32 bitmask)708 static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
709 {
710 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
711
712 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
713 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
714 (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
715 }
716
qm_dqrr_sdqcr_set(struct qm_portal * portal,u32 sdqcr)717 static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
718 {
719 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
720 }
721
qm_dqrr_vdqcr_set(struct qm_portal * portal,u32 vdqcr)722 static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
723 {
724 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
725 }
726
qm_dqrr_set_ithresh(struct qm_portal * portal,u8 ithresh)727 static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
728 {
729
730 if (ithresh > QMAN_DQRR_IT_MAX)
731 return -EINVAL;
732
733 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
734
735 return 0;
736 }
737
738 /* --- MR API --- */
739
740 #define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
741 #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
742
mr_carryclear(union qm_mr_entry * p)743 static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
744 {
745 uintptr_t addr = (uintptr_t)p;
746
747 addr &= ~MR_CARRY;
748
749 return (union qm_mr_entry *)addr;
750 }
751
mr_ptr2idx(const union qm_mr_entry * e)752 static inline int mr_ptr2idx(const union qm_mr_entry *e)
753 {
754 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
755 }
756
mr_inc(union qm_mr_entry * e)757 static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
758 {
759 return mr_carryclear(e + 1);
760 }
761
qm_mr_init(struct qm_portal * portal,enum qm_mr_pmode pmode,enum qm_mr_cmode cmode)762 static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
763 enum qm_mr_cmode cmode)
764 {
765 struct qm_mr *mr = &portal->mr;
766 u32 cfg;
767
768 mr->ring = portal->addr.ce + QM_CL_MR;
769 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
770 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
771 mr->cursor = mr->ring + mr->ci;
772 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
773 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
774 ? QM_MR_VERB_VBIT : 0;
775 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
776 #ifdef CONFIG_FSL_DPAA_CHECKING
777 mr->pmode = pmode;
778 mr->cmode = cmode;
779 #endif
780 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
781 ((cmode & 1) << 8); /* QCSP_CFG:MM */
782 qm_out(portal, QM_REG_CFG, cfg);
783 return 0;
784 }
785
qm_mr_finish(struct qm_portal * portal)786 static inline void qm_mr_finish(struct qm_portal *portal)
787 {
788 struct qm_mr *mr = &portal->mr;
789
790 if (mr->ci != mr_ptr2idx(mr->cursor))
791 pr_crit("Ignoring completed MR entries\n");
792 }
793
qm_mr_current(struct qm_portal * portal)794 static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
795 {
796 struct qm_mr *mr = &portal->mr;
797
798 if (!mr->fill)
799 return NULL;
800 return mr->cursor;
801 }
802
qm_mr_next(struct qm_portal * portal)803 static inline int qm_mr_next(struct qm_portal *portal)
804 {
805 struct qm_mr *mr = &portal->mr;
806
807 DPAA_ASSERT(mr->fill);
808 mr->cursor = mr_inc(mr->cursor);
809 return --mr->fill;
810 }
811
qm_mr_pvb_update(struct qm_portal * portal)812 static inline void qm_mr_pvb_update(struct qm_portal *portal)
813 {
814 struct qm_mr *mr = &portal->mr;
815 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
816
817 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
818
819 if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
820 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
821 if (!mr->pi)
822 mr->vbit ^= QM_MR_VERB_VBIT;
823 mr->fill++;
824 res = mr_inc(res);
825 }
826 dpaa_invalidate_touch_ro(res);
827 }
828
qm_mr_cci_consume(struct qm_portal * portal,u8 num)829 static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
830 {
831 struct qm_mr *mr = &portal->mr;
832
833 DPAA_ASSERT(mr->cmode == qm_mr_cci);
834 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
835 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
836 }
837
qm_mr_cci_consume_to_current(struct qm_portal * portal)838 static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
839 {
840 struct qm_mr *mr = &portal->mr;
841
842 DPAA_ASSERT(mr->cmode == qm_mr_cci);
843 mr->ci = mr_ptr2idx(mr->cursor);
844 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
845 }
846
qm_mr_set_ithresh(struct qm_portal * portal,u8 ithresh)847 static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
848 {
849 qm_out(portal, QM_REG_MR_ITR, ithresh);
850 }
851
852 /* --- Management command API --- */
853
qm_mc_init(struct qm_portal * portal)854 static inline int qm_mc_init(struct qm_portal *portal)
855 {
856 u8 rr0, rr1;
857 struct qm_mc *mc = &portal->mc;
858
859 mc->cr = portal->addr.ce + QM_CL_CR;
860 mc->rr = portal->addr.ce + QM_CL_RR0;
861 /*
862 * The expected valid bit polarity for the next CR command is 0
863 * if RR1 contains a valid response, and is 1 if RR0 contains a
864 * valid response. If both RR contain all 0, this indicates either
865 * that no command has been executed since reset (in which case the
866 * expected valid bit polarity is 1)
867 */
868 rr0 = mc->rr->verb;
869 rr1 = (mc->rr+1)->verb;
870 if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
871 mc->rridx = 1;
872 else
873 mc->rridx = 0;
874 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
875 #ifdef CONFIG_FSL_DPAA_CHECKING
876 mc->state = qman_mc_idle;
877 #endif
878 return 0;
879 }
880
qm_mc_finish(struct qm_portal * portal)881 static inline void qm_mc_finish(struct qm_portal *portal)
882 {
883 #ifdef CONFIG_FSL_DPAA_CHECKING
884 struct qm_mc *mc = &portal->mc;
885
886 DPAA_ASSERT(mc->state == qman_mc_idle);
887 if (mc->state != qman_mc_idle)
888 pr_crit("Losing incomplete MC command\n");
889 #endif
890 }
891
qm_mc_start(struct qm_portal * portal)892 static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
893 {
894 struct qm_mc *mc = &portal->mc;
895
896 DPAA_ASSERT(mc->state == qman_mc_idle);
897 #ifdef CONFIG_FSL_DPAA_CHECKING
898 mc->state = qman_mc_user;
899 #endif
900 dpaa_zero(mc->cr);
901 return mc->cr;
902 }
903
qm_mc_commit(struct qm_portal * portal,u8 myverb)904 static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
905 {
906 struct qm_mc *mc = &portal->mc;
907 union qm_mc_result *rr = mc->rr + mc->rridx;
908
909 DPAA_ASSERT(mc->state == qman_mc_user);
910 dma_wmb();
911 mc->cr->_ncw_verb = myverb | mc->vbit;
912 dpaa_flush(mc->cr);
913 dpaa_invalidate_touch_ro(rr);
914 #ifdef CONFIG_FSL_DPAA_CHECKING
915 mc->state = qman_mc_hw;
916 #endif
917 }
918
qm_mc_result(struct qm_portal * portal)919 static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
920 {
921 struct qm_mc *mc = &portal->mc;
922 union qm_mc_result *rr = mc->rr + mc->rridx;
923
924 DPAA_ASSERT(mc->state == qman_mc_hw);
925 /*
926 * The inactive response register's verb byte always returns zero until
927 * its command is submitted and completed. This includes the valid-bit,
928 * in case you were wondering...
929 */
930 if (!rr->verb) {
931 dpaa_invalidate_touch_ro(rr);
932 return NULL;
933 }
934 mc->rridx ^= 1;
935 mc->vbit ^= QM_MCC_VERB_VBIT;
936 #ifdef CONFIG_FSL_DPAA_CHECKING
937 mc->state = qman_mc_idle;
938 #endif
939 return rr;
940 }
941
qm_mc_result_timeout(struct qm_portal * portal,union qm_mc_result ** mcr)942 static inline int qm_mc_result_timeout(struct qm_portal *portal,
943 union qm_mc_result **mcr)
944 {
945 int timeout = QM_MCR_TIMEOUT;
946
947 do {
948 *mcr = qm_mc_result(portal);
949 if (*mcr)
950 break;
951 udelay(1);
952 } while (--timeout);
953
954 return timeout;
955 }
956
fq_set(struct qman_fq * fq,u32 mask)957 static inline void fq_set(struct qman_fq *fq, u32 mask)
958 {
959 fq->flags |= mask;
960 }
961
fq_clear(struct qman_fq * fq,u32 mask)962 static inline void fq_clear(struct qman_fq *fq, u32 mask)
963 {
964 fq->flags &= ~mask;
965 }
966
fq_isset(struct qman_fq * fq,u32 mask)967 static inline int fq_isset(struct qman_fq *fq, u32 mask)
968 {
969 return fq->flags & mask;
970 }
971
fq_isclear(struct qman_fq * fq,u32 mask)972 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
973 {
974 return !(fq->flags & mask);
975 }
976
977 struct qman_portal {
978 struct qm_portal p;
979 /* PORTAL_BITS_*** - dynamic, strictly internal */
980 unsigned long bits;
981 /* interrupt sources processed by portal_isr(), configurable */
982 unsigned long irq_sources;
983 u32 use_eqcr_ci_stashing;
984 /* only 1 volatile dequeue at a time */
985 struct qman_fq *vdqcr_owned;
986 u32 sdqcr;
987 /* probing time config params for cpu-affine portals */
988 const struct qm_portal_config *config;
989 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
990 struct qman_cgrs *cgrs;
991 /* linked-list of CSCN handlers. */
992 struct list_head cgr_cbs;
993 /* list lock */
994 raw_spinlock_t cgr_lock;
995 struct work_struct congestion_work;
996 struct work_struct mr_work;
997 char irqname[MAX_IRQNAME];
998 };
999
1000 static cpumask_t affine_mask;
1001 static DEFINE_SPINLOCK(affine_mask_lock);
1002 static u16 affine_channels[NR_CPUS];
1003 static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
1004 struct qman_portal *affine_portals[NR_CPUS];
1005
get_affine_portal(void)1006 static inline struct qman_portal *get_affine_portal(void)
1007 {
1008 return &get_cpu_var(qman_affine_portal);
1009 }
1010
put_affine_portal(void)1011 static inline void put_affine_portal(void)
1012 {
1013 put_cpu_var(qman_affine_portal);
1014 }
1015
1016
get_portal_for_channel(u16 channel)1017 static inline struct qman_portal *get_portal_for_channel(u16 channel)
1018 {
1019 int i;
1020
1021 for (i = 0; i < num_possible_cpus(); i++) {
1022 if (affine_portals[i] &&
1023 affine_portals[i]->config->channel == channel)
1024 return affine_portals[i];
1025 }
1026
1027 return NULL;
1028 }
1029
1030 static struct workqueue_struct *qm_portal_wq;
1031
qman_dqrr_set_ithresh(struct qman_portal * portal,u8 ithresh)1032 int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
1033 {
1034 int res;
1035
1036 if (!portal)
1037 return -EINVAL;
1038
1039 res = qm_dqrr_set_ithresh(&portal->p, ithresh);
1040 if (res)
1041 return res;
1042
1043 portal->p.dqrr.ithresh = ithresh;
1044
1045 return 0;
1046 }
1047 EXPORT_SYMBOL(qman_dqrr_set_ithresh);
1048
qman_dqrr_get_ithresh(struct qman_portal * portal,u8 * ithresh)1049 void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
1050 {
1051 if (portal && ithresh)
1052 *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
1053 }
1054 EXPORT_SYMBOL(qman_dqrr_get_ithresh);
1055
qman_portal_get_iperiod(struct qman_portal * portal,u32 * iperiod)1056 void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
1057 {
1058 if (portal && iperiod)
1059 *iperiod = qm_in(&portal->p, QM_REG_ITPR);
1060 }
1061 EXPORT_SYMBOL(qman_portal_get_iperiod);
1062
qman_portal_set_iperiod(struct qman_portal * portal,u32 iperiod)1063 int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
1064 {
1065 if (!portal || iperiod > QMAN_ITP_MAX)
1066 return -EINVAL;
1067
1068 qm_out(&portal->p, QM_REG_ITPR, iperiod);
1069
1070 return 0;
1071 }
1072 EXPORT_SYMBOL(qman_portal_set_iperiod);
1073
qman_wq_alloc(void)1074 int qman_wq_alloc(void)
1075 {
1076 qm_portal_wq = alloc_workqueue("qman_portal_wq", WQ_PERCPU, 1);
1077 if (!qm_portal_wq)
1078 return -ENOMEM;
1079 return 0;
1080 }
1081
1082
qman_enable_irqs(void)1083 void qman_enable_irqs(void)
1084 {
1085 int i;
1086
1087 for (i = 0; i < num_possible_cpus(); i++) {
1088 if (affine_portals[i]) {
1089 qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
1090 qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
1091 }
1092
1093 }
1094 }
1095
1096 /*
1097 * This is what everything can wait on, even if it migrates to a different cpu
1098 * to the one whose affine portal it is waiting on.
1099 */
1100 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1101
1102 static struct qman_fq **fq_table;
1103 static u32 num_fqids;
1104
qman_alloc_fq_table(u32 _num_fqids)1105 int qman_alloc_fq_table(u32 _num_fqids)
1106 {
1107 num_fqids = _num_fqids;
1108
1109 fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
1110 num_fqids, 2));
1111 if (!fq_table)
1112 return -ENOMEM;
1113
1114 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1115 fq_table, num_fqids * 2);
1116 return 0;
1117 }
1118
idx_to_fq(u32 idx)1119 static struct qman_fq *idx_to_fq(u32 idx)
1120 {
1121 struct qman_fq *fq;
1122
1123 #ifdef CONFIG_FSL_DPAA_CHECKING
1124 if (WARN_ON(idx >= num_fqids * 2))
1125 return NULL;
1126 #endif
1127 fq = fq_table[idx];
1128 DPAA_ASSERT(!fq || idx == fq->idx);
1129
1130 return fq;
1131 }
1132
1133 /*
1134 * Only returns full-service fq objects, not enqueue-only
1135 * references (QMAN_FQ_FLAG_NO_MODIFY).
1136 */
fqid_to_fq(u32 fqid)1137 static struct qman_fq *fqid_to_fq(u32 fqid)
1138 {
1139 return idx_to_fq(fqid * 2);
1140 }
1141
tag_to_fq(u32 tag)1142 static struct qman_fq *tag_to_fq(u32 tag)
1143 {
1144 #if BITS_PER_LONG == 64
1145 return idx_to_fq(tag);
1146 #else
1147 return (struct qman_fq *)tag;
1148 #endif
1149 }
1150
fq_to_tag(struct qman_fq * fq)1151 static u32 fq_to_tag(struct qman_fq *fq)
1152 {
1153 #if BITS_PER_LONG == 64
1154 return fq->idx;
1155 #else
1156 return (u32)fq;
1157 #endif
1158 }
1159
1160 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1161 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1162 unsigned int poll_limit, bool sched_napi);
1163 static void qm_congestion_task(struct work_struct *work);
1164 static void qm_mr_process_task(struct work_struct *work);
1165
portal_isr(int irq,void * ptr)1166 static irqreturn_t portal_isr(int irq, void *ptr)
1167 {
1168 struct qman_portal *p = ptr;
1169 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1170 u32 clear = 0;
1171
1172 if (unlikely(!is))
1173 return IRQ_NONE;
1174
1175 /* DQRR-handling if it's interrupt-driven */
1176 if (is & QM_PIRQ_DQRI) {
1177 __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
1178 clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
1179 }
1180 /* Handling of anything else that's interrupt-driven */
1181 clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
1182 qm_out(&p->p, QM_REG_ISR, clear);
1183 return IRQ_HANDLED;
1184 }
1185
drain_mr_fqrni(struct qm_portal * p)1186 static int drain_mr_fqrni(struct qm_portal *p)
1187 {
1188 const union qm_mr_entry *msg;
1189 loop:
1190 qm_mr_pvb_update(p);
1191 msg = qm_mr_current(p);
1192 if (!msg) {
1193 /*
1194 * if MR was full and h/w had other FQRNI entries to produce, we
1195 * need to allow it time to produce those entries once the
1196 * existing entries are consumed. A worst-case situation
1197 * (fully-loaded system) means h/w sequencers may have to do 3-4
1198 * other things before servicing the portal's MR pump, each of
1199 * which (if slow) may take ~50 qman cycles (which is ~200
1200 * processor cycles). So rounding up and then multiplying this
1201 * worst-case estimate by a factor of 10, just to be
1202 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1203 * one entry at a time, so h/w has an opportunity to produce new
1204 * entries well before the ring has been fully consumed, so
1205 * we're being *really* paranoid here.
1206 */
1207 mdelay(1);
1208 qm_mr_pvb_update(p);
1209 msg = qm_mr_current(p);
1210 if (!msg)
1211 return 0;
1212 }
1213 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1214 /* We aren't draining anything but FQRNIs */
1215 pr_err("Found verb 0x%x in MR\n", msg->verb);
1216 return -1;
1217 }
1218 qm_mr_next(p);
1219 qm_mr_cci_consume(p, 1);
1220 goto loop;
1221 }
1222
qman_create_portal(struct qman_portal * portal,const struct qm_portal_config * c,const struct qman_cgrs * cgrs)1223 static int qman_create_portal(struct qman_portal *portal,
1224 const struct qm_portal_config *c,
1225 const struct qman_cgrs *cgrs)
1226 {
1227 struct qm_portal *p;
1228 int ret;
1229 u32 isdr;
1230
1231 p = &portal->p;
1232
1233 #ifdef CONFIG_FSL_PAMU
1234 /* PAMU is required for stashing */
1235 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1236 #else
1237 portal->use_eqcr_ci_stashing = 0;
1238 #endif
1239 /*
1240 * prep the low-level portal struct with the mapped addresses from the
1241 * config, everything that follows depends on it and "config" is more
1242 * for (de)reference
1243 */
1244 p->addr.ce = c->addr_virt_ce;
1245 p->addr.ce_be = c->addr_virt_ce;
1246 p->addr.ci = c->addr_virt_ci;
1247 /*
1248 * If CI-stashing is used, the current defaults use a threshold of 3,
1249 * and stash with high-than-DQRR priority.
1250 */
1251 if (qm_eqcr_init(p, qm_eqcr_pvb,
1252 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1253 dev_err(c->dev, "EQCR initialisation failed\n");
1254 goto fail_eqcr;
1255 }
1256 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1257 qm_dqrr_cdc, DQRR_MAXFILL)) {
1258 dev_err(c->dev, "DQRR initialisation failed\n");
1259 goto fail_dqrr;
1260 }
1261 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1262 dev_err(c->dev, "MR initialisation failed\n");
1263 goto fail_mr;
1264 }
1265 if (qm_mc_init(p)) {
1266 dev_err(c->dev, "MC initialisation failed\n");
1267 goto fail_mc;
1268 }
1269 /* static interrupt-gating controls */
1270 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1271 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1272 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1273 portal->cgrs = kmalloc_objs(*portal->cgrs, 2);
1274 if (!portal->cgrs)
1275 goto fail_cgrs;
1276 /* initial snapshot is no-depletion */
1277 qman_cgrs_init(&portal->cgrs[1]);
1278 if (cgrs)
1279 portal->cgrs[0] = *cgrs;
1280 else
1281 /* if the given mask is NULL, assume all CGRs can be seen */
1282 qman_cgrs_fill(&portal->cgrs[0]);
1283 INIT_LIST_HEAD(&portal->cgr_cbs);
1284 raw_spin_lock_init(&portal->cgr_lock);
1285 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1286 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1287 portal->bits = 0;
1288 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1289 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1290 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1291 isdr = 0xffffffff;
1292 qm_out(p, QM_REG_ISDR, isdr);
1293 portal->irq_sources = 0;
1294 qm_out(p, QM_REG_IER, 0);
1295 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1296 qm_out(p, QM_REG_IIR, 1);
1297 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1298 dev_err(c->dev, "request_irq() failed\n");
1299 goto fail_irq;
1300 }
1301
1302 if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
1303 goto fail_affinity;
1304
1305 /* Need EQCR to be empty before continuing */
1306 isdr &= ~QM_PIRQ_EQCI;
1307 qm_out(p, QM_REG_ISDR, isdr);
1308 ret = qm_eqcr_get_fill(p);
1309 if (ret) {
1310 dev_err(c->dev, "EQCR unclean\n");
1311 goto fail_eqcr_empty;
1312 }
1313 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1314 qm_out(p, QM_REG_ISDR, isdr);
1315 if (qm_dqrr_current(p)) {
1316 dev_dbg(c->dev, "DQRR unclean\n");
1317 qm_dqrr_cdc_consume_n(p, 0xffff);
1318 }
1319 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1320 /* special handling, drain just in case it's a few FQRNIs */
1321 const union qm_mr_entry *e = qm_mr_current(p);
1322
1323 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1324 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1325 goto fail_dqrr_mr_empty;
1326 }
1327 /* Success */
1328 portal->config = c;
1329 qm_out(p, QM_REG_ISR, 0xffffffff);
1330 qm_out(p, QM_REG_ISDR, 0);
1331 if (!qman_requires_cleanup())
1332 qm_out(p, QM_REG_IIR, 0);
1333 /* Write a sane SDQCR */
1334 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1335 return 0;
1336
1337 fail_dqrr_mr_empty:
1338 fail_eqcr_empty:
1339 fail_affinity:
1340 free_irq(c->irq, portal);
1341 fail_irq:
1342 kfree(portal->cgrs);
1343 fail_cgrs:
1344 qm_mc_finish(p);
1345 fail_mc:
1346 qm_mr_finish(p);
1347 fail_mr:
1348 qm_dqrr_finish(p);
1349 fail_dqrr:
1350 qm_eqcr_finish(p);
1351 fail_eqcr:
1352 return -EIO;
1353 }
1354
qman_create_affine_portal(const struct qm_portal_config * c,const struct qman_cgrs * cgrs)1355 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1356 const struct qman_cgrs *cgrs)
1357 {
1358 struct qman_portal *portal;
1359 int err;
1360
1361 portal = &per_cpu(qman_affine_portal, c->cpu);
1362 err = qman_create_portal(portal, c, cgrs);
1363 if (err)
1364 return NULL;
1365
1366 spin_lock(&affine_mask_lock);
1367 cpumask_set_cpu(c->cpu, &affine_mask);
1368 affine_channels[c->cpu] = c->channel;
1369 affine_portals[c->cpu] = portal;
1370 spin_unlock(&affine_mask_lock);
1371
1372 return portal;
1373 }
1374
qman_destroy_portal(struct qman_portal * qm)1375 static void qman_destroy_portal(struct qman_portal *qm)
1376 {
1377 const struct qm_portal_config *pcfg;
1378
1379 /* Stop dequeues on the portal */
1380 qm_dqrr_sdqcr_set(&qm->p, 0);
1381
1382 /*
1383 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1384 * something related to QM_PIRQ_EQCI, this may need fixing.
1385 * Also, due to the prefetching model used for CI updates in the enqueue
1386 * path, this update will only invalidate the CI cacheline *after*
1387 * working on it, so we need to call this twice to ensure a full update
1388 * irrespective of where the enqueue processing was at when the teardown
1389 * began.
1390 */
1391 qm_eqcr_cce_update(&qm->p);
1392 qm_eqcr_cce_update(&qm->p);
1393 pcfg = qm->config;
1394
1395 free_irq(pcfg->irq, qm);
1396
1397 kfree(qm->cgrs);
1398 qm_mc_finish(&qm->p);
1399 qm_mr_finish(&qm->p);
1400 qm_dqrr_finish(&qm->p);
1401 qm_eqcr_finish(&qm->p);
1402
1403 qm->config = NULL;
1404 }
1405
qman_destroy_affine_portal(void)1406 const struct qm_portal_config *qman_destroy_affine_portal(void)
1407 {
1408 struct qman_portal *qm = get_affine_portal();
1409 const struct qm_portal_config *pcfg;
1410 int cpu;
1411
1412 pcfg = qm->config;
1413 cpu = pcfg->cpu;
1414
1415 qman_destroy_portal(qm);
1416
1417 spin_lock(&affine_mask_lock);
1418 cpumask_clear_cpu(cpu, &affine_mask);
1419 spin_unlock(&affine_mask_lock);
1420 put_affine_portal();
1421 return pcfg;
1422 }
1423
1424 /* Inline helper to reduce nesting in __poll_portal_slow() */
fq_state_change(struct qman_portal * p,struct qman_fq * fq,const union qm_mr_entry * msg,u8 verb)1425 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1426 const union qm_mr_entry *msg, u8 verb)
1427 {
1428 switch (verb) {
1429 case QM_MR_VERB_FQRL:
1430 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1431 fq_clear(fq, QMAN_FQ_STATE_ORL);
1432 break;
1433 case QM_MR_VERB_FQRN:
1434 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1435 fq->state == qman_fq_state_sched);
1436 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1437 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1438 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1439 fq_set(fq, QMAN_FQ_STATE_NE);
1440 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1441 fq_set(fq, QMAN_FQ_STATE_ORL);
1442 fq->state = qman_fq_state_retired;
1443 break;
1444 case QM_MR_VERB_FQPN:
1445 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1446 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1447 fq->state = qman_fq_state_parked;
1448 }
1449 }
1450
qm_congestion_task(struct work_struct * work)1451 static void qm_congestion_task(struct work_struct *work)
1452 {
1453 struct qman_portal *p = container_of(work, struct qman_portal,
1454 congestion_work);
1455 struct qman_cgrs rr, c;
1456 union qm_mc_result *mcr;
1457 struct qman_cgr *cgr;
1458
1459 /*
1460 * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
1461 */
1462 raw_spin_lock_irq(&p->cgr_lock);
1463 qm_mc_start(&p->p);
1464 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1465 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1466 raw_spin_unlock_irq(&p->cgr_lock);
1467 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1468 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1469 return;
1470 }
1471 /* mask out the ones I'm not interested in */
1472 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1473 &p->cgrs[0]);
1474 /* check previous snapshot for delta, enter/exit congestion */
1475 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1476 /* update snapshot */
1477 qman_cgrs_cp(&p->cgrs[1], &rr);
1478 /* Invoke callback */
1479 list_for_each_entry(cgr, &p->cgr_cbs, node)
1480 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1481 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1482 raw_spin_unlock_irq(&p->cgr_lock);
1483 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1484 }
1485
qm_mr_process_task(struct work_struct * work)1486 static void qm_mr_process_task(struct work_struct *work)
1487 {
1488 struct qman_portal *p = container_of(work, struct qman_portal,
1489 mr_work);
1490 const union qm_mr_entry *msg;
1491 struct qman_fq *fq;
1492 u8 verb, num = 0;
1493
1494 preempt_disable();
1495
1496 while (1) {
1497 qm_mr_pvb_update(&p->p);
1498 msg = qm_mr_current(&p->p);
1499 if (!msg)
1500 break;
1501
1502 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1503 /* The message is a software ERN iff the 0x20 bit is clear */
1504 if (verb & 0x20) {
1505 switch (verb) {
1506 case QM_MR_VERB_FQRNI:
1507 /* nada, we drop FQRNIs on the floor */
1508 break;
1509 case QM_MR_VERB_FQRN:
1510 case QM_MR_VERB_FQRL:
1511 /* Lookup in the retirement table */
1512 fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1513 if (WARN_ON(!fq))
1514 break;
1515 fq_state_change(p, fq, msg, verb);
1516 if (fq->cb.fqs)
1517 fq->cb.fqs(p, fq, msg);
1518 break;
1519 case QM_MR_VERB_FQPN:
1520 /* Parked */
1521 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1522 fq_state_change(p, fq, msg, verb);
1523 if (fq->cb.fqs)
1524 fq->cb.fqs(p, fq, msg);
1525 break;
1526 case QM_MR_VERB_DC_ERN:
1527 /* DCP ERN */
1528 pr_crit_once("Leaking DCP ERNs!\n");
1529 break;
1530 default:
1531 pr_crit("Invalid MR verb 0x%02x\n", verb);
1532 }
1533 } else {
1534 /* Its a software ERN */
1535 fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1536 fq->cb.ern(p, fq, msg);
1537 }
1538 num++;
1539 qm_mr_next(&p->p);
1540 }
1541
1542 qm_mr_cci_consume(&p->p, num);
1543 qman_p_irqsource_add(p, QM_PIRQ_MRI);
1544 preempt_enable();
1545 }
1546
__poll_portal_slow(struct qman_portal * p,u32 is)1547 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1548 {
1549 if (is & QM_PIRQ_CSCI) {
1550 qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
1551 queue_work_on(smp_processor_id(), qm_portal_wq,
1552 &p->congestion_work);
1553 }
1554
1555 if (is & QM_PIRQ_EQRI) {
1556 qm_eqcr_cce_update(&p->p);
1557 qm_eqcr_set_ithresh(&p->p, 0);
1558 wake_up(&affine_queue);
1559 }
1560
1561 if (is & QM_PIRQ_MRI) {
1562 qman_p_irqsource_remove(p, QM_PIRQ_MRI);
1563 queue_work_on(smp_processor_id(), qm_portal_wq,
1564 &p->mr_work);
1565 }
1566
1567 return is;
1568 }
1569
1570 /*
1571 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1572 * inlined.
1573 */
clear_vdqcr(struct qman_portal * p,struct qman_fq * fq)1574 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1575 {
1576 p->vdqcr_owned = NULL;
1577 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1578 wake_up(&affine_queue);
1579 }
1580
1581 /*
1582 * The only states that would conflict with other things if they ran at the
1583 * same time on the same cpu are:
1584 *
1585 * (i) setting/clearing vdqcr_owned, and
1586 * (ii) clearing the NE (Not Empty) flag.
1587 *
1588 * Both are safe. Because;
1589 *
1590 * (i) this clearing can only occur after qman_volatile_dequeue() has set the
1591 * vdqcr_owned field (which it does before setting VDQCR), and
1592 * qman_volatile_dequeue() blocks interrupts and preemption while this is
1593 * done so that we can't interfere.
1594 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1595 * with (i) that API prevents us from interfering until it's safe.
1596 *
1597 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1598 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1599 * advantage comes from this function not having to "lock" anything at all.
1600 *
1601 * Note also that the callbacks are invoked at points which are safe against the
1602 * above potential conflicts, but that this function itself is not re-entrant
1603 * (this is because the function tracks one end of each FIFO in the portal and
1604 * we do *not* want to lock that). So the consequence is that it is safe for
1605 * user callbacks to call into any QMan API.
1606 */
__poll_portal_fast(struct qman_portal * p,unsigned int poll_limit,bool sched_napi)1607 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1608 unsigned int poll_limit, bool sched_napi)
1609 {
1610 const struct qm_dqrr_entry *dq;
1611 struct qman_fq *fq;
1612 enum qman_cb_dqrr_result res;
1613 unsigned int limit = 0;
1614
1615 do {
1616 qm_dqrr_pvb_update(&p->p);
1617 dq = qm_dqrr_current(&p->p);
1618 if (!dq)
1619 break;
1620
1621 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1622 /*
1623 * VDQCR: don't trust context_b as the FQ may have
1624 * been configured for h/w consumption and we're
1625 * draining it post-retirement.
1626 */
1627 fq = p->vdqcr_owned;
1628 /*
1629 * We only set QMAN_FQ_STATE_NE when retiring, so we
1630 * only need to check for clearing it when doing
1631 * volatile dequeues. It's one less thing to check
1632 * in the critical path (SDQCR).
1633 */
1634 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1635 fq_clear(fq, QMAN_FQ_STATE_NE);
1636 /*
1637 * This is duplicated from the SDQCR code, but we
1638 * have stuff to do before *and* after this callback,
1639 * and we don't want multiple if()s in the critical
1640 * path (SDQCR).
1641 */
1642 res = fq->cb.dqrr(p, fq, dq, sched_napi);
1643 if (res == qman_cb_dqrr_stop)
1644 break;
1645 /* Check for VDQCR completion */
1646 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1647 clear_vdqcr(p, fq);
1648 } else {
1649 /* SDQCR: context_b points to the FQ */
1650 fq = tag_to_fq(be32_to_cpu(dq->context_b));
1651 /* Now let the callback do its stuff */
1652 res = fq->cb.dqrr(p, fq, dq, sched_napi);
1653 /*
1654 * The callback can request that we exit without
1655 * consuming this entry nor advancing;
1656 */
1657 if (res == qman_cb_dqrr_stop)
1658 break;
1659 }
1660 /* Interpret 'dq' from a driver perspective. */
1661 /*
1662 * Parking isn't possible unless HELDACTIVE was set. NB,
1663 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1664 * check for HELDACTIVE to cover both.
1665 */
1666 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1667 (res != qman_cb_dqrr_park));
1668 /* just means "skip it, I'll consume it myself later on" */
1669 if (res != qman_cb_dqrr_defer)
1670 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1671 res == qman_cb_dqrr_park);
1672 /* Move forward */
1673 qm_dqrr_next(&p->p);
1674 /*
1675 * Entry processed and consumed, increment our counter. The
1676 * callback can request that we exit after consuming the
1677 * entry, and we also exit if we reach our processing limit,
1678 * so loop back only if neither of these conditions is met.
1679 */
1680 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1681
1682 return limit;
1683 }
1684
qman_p_irqsource_add(struct qman_portal * p,u32 bits)1685 void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1686 {
1687 unsigned long irqflags;
1688
1689 local_irq_save(irqflags);
1690 p->irq_sources |= bits & QM_PIRQ_VISIBLE;
1691 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1692 local_irq_restore(irqflags);
1693 }
1694 EXPORT_SYMBOL(qman_p_irqsource_add);
1695
qman_p_irqsource_remove(struct qman_portal * p,u32 bits)1696 void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1697 {
1698 unsigned long irqflags;
1699 u32 ier;
1700
1701 /*
1702 * Our interrupt handler only processes+clears status register bits that
1703 * are in p->irq_sources. As we're trimming that mask, if one of them
1704 * were to assert in the status register just before we remove it from
1705 * the enable register, there would be an interrupt-storm when we
1706 * release the IRQ lock. So we wait for the enable register update to
1707 * take effect in h/w (by reading it back) and then clear all other bits
1708 * in the status register. Ie. we clear them from ISR once it's certain
1709 * IER won't allow them to reassert.
1710 */
1711 local_irq_save(irqflags);
1712 bits &= QM_PIRQ_VISIBLE;
1713 p->irq_sources &= ~bits;
1714 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1715 ier = qm_in(&p->p, QM_REG_IER);
1716 /*
1717 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1718 * data-dependency, ie. to protect against re-ordering.
1719 */
1720 qm_out(&p->p, QM_REG_ISR, ~ier);
1721 local_irq_restore(irqflags);
1722 }
1723 EXPORT_SYMBOL(qman_p_irqsource_remove);
1724
qman_affine_cpus(void)1725 const cpumask_t *qman_affine_cpus(void)
1726 {
1727 return &affine_mask;
1728 }
1729 EXPORT_SYMBOL(qman_affine_cpus);
1730
qman_affine_channel(int cpu)1731 u16 qman_affine_channel(int cpu)
1732 {
1733 if (cpu < 0) {
1734 struct qman_portal *portal = get_affine_portal();
1735
1736 cpu = portal->config->cpu;
1737 put_affine_portal();
1738 }
1739 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1740 return affine_channels[cpu];
1741 }
1742 EXPORT_SYMBOL(qman_affine_channel);
1743
qman_get_affine_portal(int cpu)1744 struct qman_portal *qman_get_affine_portal(int cpu)
1745 {
1746 return affine_portals[cpu];
1747 }
1748 EXPORT_SYMBOL(qman_get_affine_portal);
1749
qman_start_using_portal(struct qman_portal * p,struct device * dev)1750 int qman_start_using_portal(struct qman_portal *p, struct device *dev)
1751 {
1752 return (!device_link_add(dev, p->config->dev,
1753 DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
1754 }
1755 EXPORT_SYMBOL(qman_start_using_portal);
1756
qman_p_poll_dqrr(struct qman_portal * p,unsigned int limit)1757 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1758 {
1759 return __poll_portal_fast(p, limit, false);
1760 }
1761 EXPORT_SYMBOL(qman_p_poll_dqrr);
1762
qman_p_static_dequeue_add(struct qman_portal * p,u32 pools)1763 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1764 {
1765 unsigned long irqflags;
1766
1767 local_irq_save(irqflags);
1768 pools &= p->config->pools;
1769 p->sdqcr |= pools;
1770 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1771 local_irq_restore(irqflags);
1772 }
1773 EXPORT_SYMBOL(qman_p_static_dequeue_add);
1774
1775 /* Frame queue API */
1776
mcr_result_str(u8 result)1777 static const char *mcr_result_str(u8 result)
1778 {
1779 switch (result) {
1780 case QM_MCR_RESULT_NULL:
1781 return "QM_MCR_RESULT_NULL";
1782 case QM_MCR_RESULT_OK:
1783 return "QM_MCR_RESULT_OK";
1784 case QM_MCR_RESULT_ERR_FQID:
1785 return "QM_MCR_RESULT_ERR_FQID";
1786 case QM_MCR_RESULT_ERR_FQSTATE:
1787 return "QM_MCR_RESULT_ERR_FQSTATE";
1788 case QM_MCR_RESULT_ERR_NOTEMPTY:
1789 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1790 case QM_MCR_RESULT_PENDING:
1791 return "QM_MCR_RESULT_PENDING";
1792 case QM_MCR_RESULT_ERR_BADCOMMAND:
1793 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1794 }
1795 return "<unknown MCR result>";
1796 }
1797
qman_create_fq(u32 fqid,u32 flags,struct qman_fq * fq)1798 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1799 {
1800 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1801 int ret = qman_alloc_fqid(&fqid);
1802
1803 if (ret)
1804 return ret;
1805 }
1806 fq->fqid = fqid;
1807 fq->flags = flags;
1808 fq->state = qman_fq_state_oos;
1809 fq->cgr_groupid = 0;
1810
1811 /* A context_b of 0 is allegedly special, so don't use that fqid */
1812 if (fqid == 0 || fqid >= num_fqids) {
1813 WARN(1, "bad fqid %d\n", fqid);
1814 return -EINVAL;
1815 }
1816
1817 fq->idx = fqid * 2;
1818 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1819 fq->idx++;
1820
1821 WARN_ON(fq_table[fq->idx]);
1822 fq_table[fq->idx] = fq;
1823
1824 return 0;
1825 }
1826 EXPORT_SYMBOL(qman_create_fq);
1827
qman_destroy_fq(struct qman_fq * fq)1828 void qman_destroy_fq(struct qman_fq *fq)
1829 {
1830 int leaked;
1831
1832 /*
1833 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1834 * quiesced. Instead, run some checks.
1835 */
1836 switch (fq->state) {
1837 case qman_fq_state_parked:
1838 case qman_fq_state_oos:
1839 /*
1840 * There's a race condition here on releasing the fqid,
1841 * setting the fq_table to NULL, and freeing the fqid.
1842 * To prevent it, this order should be respected:
1843 */
1844 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) {
1845 leaked = qman_shutdown_fq(fq->fqid);
1846 if (leaked)
1847 pr_debug("FQID %d leaked\n", fq->fqid);
1848 }
1849
1850 DPAA_ASSERT(fq_table[fq->idx]);
1851 fq_table[fq->idx] = NULL;
1852
1853 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID) && !leaked) {
1854 /*
1855 * fq_table[fq->idx] should be set to null before
1856 * freeing fq->fqid otherwise it could by allocated by
1857 * qman_alloc_fqid() while still being !NULL
1858 */
1859 smp_wmb();
1860 gen_pool_free(qm_fqalloc, fq->fqid | DPAA_GENALLOC_OFF, 1);
1861 }
1862 return;
1863 default:
1864 break;
1865 }
1866 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1867 }
1868 EXPORT_SYMBOL(qman_destroy_fq);
1869
qman_fq_fqid(struct qman_fq * fq)1870 u32 qman_fq_fqid(struct qman_fq *fq)
1871 {
1872 return fq->fqid;
1873 }
1874 EXPORT_SYMBOL(qman_fq_fqid);
1875
qman_init_fq(struct qman_fq * fq,u32 flags,struct qm_mcc_initfq * opts)1876 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1877 {
1878 union qm_mc_command *mcc;
1879 union qm_mc_result *mcr;
1880 struct qman_portal *p;
1881 u8 res, myverb;
1882 int ret = 0;
1883
1884 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1885 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1886
1887 if (fq->state != qman_fq_state_oos &&
1888 fq->state != qman_fq_state_parked)
1889 return -EINVAL;
1890 #ifdef CONFIG_FSL_DPAA_CHECKING
1891 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1892 return -EINVAL;
1893 #endif
1894 if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1895 /* And can't be set at the same time as TDTHRESH */
1896 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1897 return -EINVAL;
1898 }
1899 /* Issue an INITFQ_[PARKED|SCHED] management command */
1900 p = get_affine_portal();
1901 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1902 (fq->state != qman_fq_state_oos &&
1903 fq->state != qman_fq_state_parked)) {
1904 ret = -EBUSY;
1905 goto out;
1906 }
1907 mcc = qm_mc_start(&p->p);
1908 if (opts)
1909 mcc->initfq = *opts;
1910 qm_fqid_set(&mcc->fq, fq->fqid);
1911 mcc->initfq.count = 0;
1912 /*
1913 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1914 * demux pointer. Otherwise, the caller-provided value is allowed to
1915 * stand, don't overwrite it.
1916 */
1917 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1918 dma_addr_t phys_fq;
1919
1920 mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
1921 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1922 /*
1923 * and the physical address - NB, if the user wasn't trying to
1924 * set CONTEXTA, clear the stashing settings.
1925 */
1926 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1927 QM_INITFQ_WE_CONTEXTA)) {
1928 mcc->initfq.we_mask |=
1929 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1930 memset(&mcc->initfq.fqd.context_a, 0,
1931 sizeof(mcc->initfq.fqd.context_a));
1932 } else {
1933 struct qman_portal *p = qman_dma_portal;
1934
1935 phys_fq = dma_map_single(p->config->dev, fq,
1936 sizeof(*fq), DMA_TO_DEVICE);
1937 if (dma_mapping_error(p->config->dev, phys_fq)) {
1938 dev_err(p->config->dev, "dma_mapping failed\n");
1939 ret = -EIO;
1940 goto out;
1941 }
1942
1943 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1944 }
1945 }
1946 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1947 int wq = 0;
1948
1949 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1950 QM_INITFQ_WE_DESTWQ)) {
1951 mcc->initfq.we_mask |=
1952 cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1953 wq = 4;
1954 }
1955 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1956 }
1957 qm_mc_commit(&p->p, myverb);
1958 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1959 dev_err(p->config->dev, "MCR timeout\n");
1960 ret = -ETIMEDOUT;
1961 goto out;
1962 }
1963
1964 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1965 res = mcr->result;
1966 if (res != QM_MCR_RESULT_OK) {
1967 ret = -EIO;
1968 goto out;
1969 }
1970 if (opts) {
1971 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
1972 if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1973 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1974 else
1975 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1976 }
1977 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1978 fq->cgr_groupid = opts->fqd.cgid;
1979 }
1980 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1981 qman_fq_state_sched : qman_fq_state_parked;
1982
1983 out:
1984 put_affine_portal();
1985 return ret;
1986 }
1987 EXPORT_SYMBOL(qman_init_fq);
1988
qman_schedule_fq(struct qman_fq * fq)1989 int qman_schedule_fq(struct qman_fq *fq)
1990 {
1991 union qm_mc_command *mcc;
1992 union qm_mc_result *mcr;
1993 struct qman_portal *p;
1994 int ret = 0;
1995
1996 if (fq->state != qman_fq_state_parked)
1997 return -EINVAL;
1998 #ifdef CONFIG_FSL_DPAA_CHECKING
1999 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2000 return -EINVAL;
2001 #endif
2002 /* Issue a ALTERFQ_SCHED management command */
2003 p = get_affine_portal();
2004 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
2005 fq->state != qman_fq_state_parked) {
2006 ret = -EBUSY;
2007 goto out;
2008 }
2009 mcc = qm_mc_start(&p->p);
2010 qm_fqid_set(&mcc->fq, fq->fqid);
2011 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
2012 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2013 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
2014 ret = -ETIMEDOUT;
2015 goto out;
2016 }
2017
2018 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
2019 if (mcr->result != QM_MCR_RESULT_OK) {
2020 ret = -EIO;
2021 goto out;
2022 }
2023 fq->state = qman_fq_state_sched;
2024 out:
2025 put_affine_portal();
2026 return ret;
2027 }
2028 EXPORT_SYMBOL(qman_schedule_fq);
2029
qman_retire_fq(struct qman_fq * fq,u32 * flags)2030 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
2031 {
2032 union qm_mc_command *mcc;
2033 union qm_mc_result *mcr;
2034 struct qman_portal *p;
2035 int ret;
2036 u8 res;
2037
2038 if (fq->state != qman_fq_state_parked &&
2039 fq->state != qman_fq_state_sched)
2040 return -EINVAL;
2041 #ifdef CONFIG_FSL_DPAA_CHECKING
2042 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2043 return -EINVAL;
2044 #endif
2045 p = get_affine_portal();
2046 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
2047 fq->state == qman_fq_state_retired ||
2048 fq->state == qman_fq_state_oos) {
2049 ret = -EBUSY;
2050 goto out;
2051 }
2052 mcc = qm_mc_start(&p->p);
2053 qm_fqid_set(&mcc->fq, fq->fqid);
2054 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2055 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2056 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
2057 ret = -ETIMEDOUT;
2058 goto out;
2059 }
2060
2061 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
2062 res = mcr->result;
2063 /*
2064 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
2065 * and defer the flags until FQRNI or FQRN (respectively) show up. But
2066 * "Friendly" is to process OK immediately, and not set CHANGING. We do
2067 * friendly, otherwise the caller doesn't necessarily have a fully
2068 * "retired" FQ on return even if the retirement was immediate. However
2069 * this does mean some code duplication between here and
2070 * fq_state_change().
2071 */
2072 if (res == QM_MCR_RESULT_OK) {
2073 ret = 0;
2074 /* Process 'fq' right away, we'll ignore FQRNI */
2075 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
2076 fq_set(fq, QMAN_FQ_STATE_NE);
2077 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
2078 fq_set(fq, QMAN_FQ_STATE_ORL);
2079 if (flags)
2080 *flags = fq->flags;
2081 fq->state = qman_fq_state_retired;
2082 if (fq->cb.fqs) {
2083 /*
2084 * Another issue with supporting "immediate" retirement
2085 * is that we're forced to drop FQRNIs, because by the
2086 * time they're seen it may already be "too late" (the
2087 * fq may have been OOS'd and free()'d already). But if
2088 * the upper layer wants a callback whether it's
2089 * immediate or not, we have to fake a "MR" entry to
2090 * look like an FQRNI...
2091 */
2092 union qm_mr_entry msg;
2093
2094 msg.verb = QM_MR_VERB_FQRNI;
2095 msg.fq.fqs = mcr->alterfq.fqs;
2096 qm_fqid_set(&msg.fq, fq->fqid);
2097 msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
2098 fq->cb.fqs(p, fq, &msg);
2099 }
2100 } else if (res == QM_MCR_RESULT_PENDING) {
2101 ret = 1;
2102 fq_set(fq, QMAN_FQ_STATE_CHANGING);
2103 } else {
2104 ret = -EIO;
2105 }
2106 out:
2107 put_affine_portal();
2108 return ret;
2109 }
2110 EXPORT_SYMBOL(qman_retire_fq);
2111
qman_oos_fq(struct qman_fq * fq)2112 int qman_oos_fq(struct qman_fq *fq)
2113 {
2114 union qm_mc_command *mcc;
2115 union qm_mc_result *mcr;
2116 struct qman_portal *p;
2117 int ret = 0;
2118
2119 if (fq->state != qman_fq_state_retired)
2120 return -EINVAL;
2121 #ifdef CONFIG_FSL_DPAA_CHECKING
2122 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2123 return -EINVAL;
2124 #endif
2125 p = get_affine_portal();
2126 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2127 fq->state != qman_fq_state_retired) {
2128 ret = -EBUSY;
2129 goto out;
2130 }
2131 mcc = qm_mc_start(&p->p);
2132 qm_fqid_set(&mcc->fq, fq->fqid);
2133 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2134 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2135 ret = -ETIMEDOUT;
2136 goto out;
2137 }
2138 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2139 if (mcr->result != QM_MCR_RESULT_OK) {
2140 ret = -EIO;
2141 goto out;
2142 }
2143 fq->state = qman_fq_state_oos;
2144 out:
2145 put_affine_portal();
2146 return ret;
2147 }
2148 EXPORT_SYMBOL(qman_oos_fq);
2149
qman_query_fq(struct qman_fq * fq,struct qm_fqd * fqd)2150 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2151 {
2152 union qm_mc_command *mcc;
2153 union qm_mc_result *mcr;
2154 struct qman_portal *p = get_affine_portal();
2155 int ret = 0;
2156
2157 mcc = qm_mc_start(&p->p);
2158 qm_fqid_set(&mcc->fq, fq->fqid);
2159 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2160 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2161 ret = -ETIMEDOUT;
2162 goto out;
2163 }
2164
2165 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2166 if (mcr->result == QM_MCR_RESULT_OK)
2167 *fqd = mcr->queryfq.fqd;
2168 else
2169 ret = -EIO;
2170 out:
2171 put_affine_portal();
2172 return ret;
2173 }
2174
qman_query_fq_np(struct qman_fq * fq,struct qm_mcr_queryfq_np * np)2175 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2176 {
2177 union qm_mc_command *mcc;
2178 union qm_mc_result *mcr;
2179 struct qman_portal *p = get_affine_portal();
2180 int ret = 0;
2181
2182 mcc = qm_mc_start(&p->p);
2183 qm_fqid_set(&mcc->fq, fq->fqid);
2184 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2185 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2186 ret = -ETIMEDOUT;
2187 goto out;
2188 }
2189
2190 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2191 if (mcr->result == QM_MCR_RESULT_OK)
2192 *np = mcr->queryfq_np;
2193 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2194 ret = -ERANGE;
2195 else
2196 ret = -EIO;
2197 out:
2198 put_affine_portal();
2199 return ret;
2200 }
2201 EXPORT_SYMBOL(qman_query_fq_np);
2202
qman_query_cgr(struct qman_cgr * cgr,struct qm_mcr_querycgr * cgrd)2203 static int qman_query_cgr(struct qman_cgr *cgr,
2204 struct qm_mcr_querycgr *cgrd)
2205 {
2206 union qm_mc_command *mcc;
2207 union qm_mc_result *mcr;
2208 struct qman_portal *p = get_affine_portal();
2209 int ret = 0;
2210
2211 mcc = qm_mc_start(&p->p);
2212 mcc->cgr.cgid = cgr->cgrid;
2213 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2214 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2215 ret = -ETIMEDOUT;
2216 goto out;
2217 }
2218 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2219 if (mcr->result == QM_MCR_RESULT_OK)
2220 *cgrd = mcr->querycgr;
2221 else {
2222 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2223 mcr_result_str(mcr->result));
2224 ret = -EIO;
2225 }
2226 out:
2227 put_affine_portal();
2228 return ret;
2229 }
2230
qman_query_cgr_congested(struct qman_cgr * cgr,bool * result)2231 int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2232 {
2233 struct qm_mcr_querycgr query_cgr;
2234 int err;
2235
2236 err = qman_query_cgr(cgr, &query_cgr);
2237 if (err)
2238 return err;
2239
2240 *result = !!query_cgr.cgr.cs;
2241 return 0;
2242 }
2243 EXPORT_SYMBOL(qman_query_cgr_congested);
2244
2245 /* internal function used as a wait_event() expression */
set_p_vdqcr(struct qman_portal * p,struct qman_fq * fq,u32 vdqcr)2246 static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2247 {
2248 unsigned long irqflags;
2249 int ret = -EBUSY;
2250
2251 local_irq_save(irqflags);
2252 if (p->vdqcr_owned)
2253 goto out;
2254 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2255 goto out;
2256
2257 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2258 p->vdqcr_owned = fq;
2259 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2260 ret = 0;
2261 out:
2262 local_irq_restore(irqflags);
2263 return ret;
2264 }
2265
set_vdqcr(struct qman_portal ** p,struct qman_fq * fq,u32 vdqcr)2266 static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2267 {
2268 int ret;
2269
2270 *p = get_affine_portal();
2271 ret = set_p_vdqcr(*p, fq, vdqcr);
2272 put_affine_portal();
2273 return ret;
2274 }
2275
wait_vdqcr_start(struct qman_portal ** p,struct qman_fq * fq,u32 vdqcr,u32 flags)2276 static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2277 u32 vdqcr, u32 flags)
2278 {
2279 int ret = 0;
2280
2281 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2282 ret = wait_event_interruptible(affine_queue,
2283 !set_vdqcr(p, fq, vdqcr));
2284 else
2285 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2286 return ret;
2287 }
2288
qman_volatile_dequeue(struct qman_fq * fq,u32 flags,u32 vdqcr)2289 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2290 {
2291 struct qman_portal *p;
2292 int ret;
2293
2294 if (fq->state != qman_fq_state_parked &&
2295 fq->state != qman_fq_state_retired)
2296 return -EINVAL;
2297 if (vdqcr & QM_VDQCR_FQID_MASK)
2298 return -EINVAL;
2299 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2300 return -EBUSY;
2301 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2302 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2303 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2304 else
2305 ret = set_vdqcr(&p, fq, vdqcr);
2306 if (ret)
2307 return ret;
2308 /* VDQCR is set */
2309 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2310 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2311 /*
2312 * NB: don't propagate any error - the caller wouldn't
2313 * know whether the VDQCR was issued or not. A signal
2314 * could arrive after returning anyway, so the caller
2315 * can check signal_pending() if that's an issue.
2316 */
2317 wait_event_interruptible(affine_queue,
2318 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2319 else
2320 wait_event(affine_queue,
2321 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2322 }
2323 return 0;
2324 }
2325 EXPORT_SYMBOL(qman_volatile_dequeue);
2326
update_eqcr_ci(struct qman_portal * p,u8 avail)2327 static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2328 {
2329 if (avail)
2330 qm_eqcr_cce_prefetch(&p->p);
2331 else
2332 qm_eqcr_cce_update(&p->p);
2333 }
2334
qman_enqueue(struct qman_fq * fq,const struct qm_fd * fd)2335 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2336 {
2337 struct qman_portal *p;
2338 struct qm_eqcr_entry *eq;
2339 unsigned long irqflags;
2340 u8 avail;
2341
2342 p = get_affine_portal();
2343 local_irq_save(irqflags);
2344
2345 if (p->use_eqcr_ci_stashing) {
2346 /*
2347 * The stashing case is easy, only update if we need to in
2348 * order to try and liberate ring entries.
2349 */
2350 eq = qm_eqcr_start_stash(&p->p);
2351 } else {
2352 /*
2353 * The non-stashing case is harder, need to prefetch ahead of
2354 * time.
2355 */
2356 avail = qm_eqcr_get_avail(&p->p);
2357 if (avail < 2)
2358 update_eqcr_ci(p, avail);
2359 eq = qm_eqcr_start_no_stash(&p->p);
2360 }
2361
2362 if (unlikely(!eq))
2363 goto out;
2364
2365 qm_fqid_set(eq, fq->fqid);
2366 eq->tag = cpu_to_be32(fq_to_tag(fq));
2367 eq->fd = *fd;
2368
2369 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2370 out:
2371 local_irq_restore(irqflags);
2372 put_affine_portal();
2373 return 0;
2374 }
2375 EXPORT_SYMBOL(qman_enqueue);
2376
qm_modify_cgr(struct qman_cgr * cgr,u32 flags,struct qm_mcc_initcgr * opts)2377 static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2378 struct qm_mcc_initcgr *opts)
2379 {
2380 union qm_mc_command *mcc;
2381 union qm_mc_result *mcr;
2382 struct qman_portal *p = get_affine_portal();
2383 u8 verb = QM_MCC_VERB_MODIFYCGR;
2384 int ret = 0;
2385
2386 mcc = qm_mc_start(&p->p);
2387 if (opts)
2388 mcc->initcgr = *opts;
2389 mcc->initcgr.cgid = cgr->cgrid;
2390 if (flags & QMAN_CGR_FLAG_USE_INIT)
2391 verb = QM_MCC_VERB_INITCGR;
2392 qm_mc_commit(&p->p, verb);
2393 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2394 ret = -ETIMEDOUT;
2395 goto out;
2396 }
2397
2398 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2399 if (mcr->result != QM_MCR_RESULT_OK)
2400 ret = -EIO;
2401
2402 out:
2403 put_affine_portal();
2404 return ret;
2405 }
2406
2407 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2408
2409 /* congestion state change notification target update control */
qm_cgr_cscn_targ_set(struct __qm_mc_cgr * cgr,int pi,u32 val)2410 static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2411 {
2412 if (qman_ip_rev >= QMAN_REV30)
2413 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
2414 QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2415 else
2416 cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2417 }
2418
qm_cgr_cscn_targ_clear(struct __qm_mc_cgr * cgr,int pi,u32 val)2419 static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2420 {
2421 if (qman_ip_rev >= QMAN_REV30)
2422 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2423 else
2424 cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2425 }
2426
2427 static u8 qman_cgr_cpus[CGR_NUM];
2428
qman_init_cgr_all(void)2429 void qman_init_cgr_all(void)
2430 {
2431 struct qman_cgr cgr;
2432 int err_cnt = 0;
2433
2434 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2435 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2436 err_cnt++;
2437 }
2438
2439 if (err_cnt)
2440 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2441 err_cnt, (err_cnt > 1) ? "s" : "");
2442 }
2443
qman_create_cgr(struct qman_cgr * cgr,u32 flags,struct qm_mcc_initcgr * opts)2444 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2445 struct qm_mcc_initcgr *opts)
2446 {
2447 struct qm_mcr_querycgr cgr_state;
2448 int ret;
2449 struct qman_portal *p;
2450
2451 /*
2452 * We have to check that the provided CGRID is within the limits of the
2453 * data-structures, for obvious reasons. However we'll let h/w take
2454 * care of determining whether it's within the limits of what exists on
2455 * the SoC.
2456 */
2457 if (cgr->cgrid >= CGR_NUM)
2458 return -EINVAL;
2459
2460 preempt_disable();
2461 p = get_affine_portal();
2462 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2463 preempt_enable();
2464
2465 cgr->chan = p->config->channel;
2466 raw_spin_lock_irq(&p->cgr_lock);
2467
2468 if (opts) {
2469 struct qm_mcc_initcgr local_opts = *opts;
2470
2471 ret = qman_query_cgr(cgr, &cgr_state);
2472 if (ret)
2473 goto out;
2474
2475 qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
2476 be32_to_cpu(cgr_state.cgr.cscn_targ));
2477 local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2478
2479 /* send init if flags indicate so */
2480 if (flags & QMAN_CGR_FLAG_USE_INIT)
2481 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2482 &local_opts);
2483 else
2484 ret = qm_modify_cgr(cgr, 0, &local_opts);
2485 if (ret)
2486 goto out;
2487 }
2488
2489 list_add(&cgr->node, &p->cgr_cbs);
2490
2491 /* Determine if newly added object requires its callback to be called */
2492 ret = qman_query_cgr(cgr, &cgr_state);
2493 if (ret) {
2494 /* we can't go back, so proceed and return success */
2495 dev_err(p->config->dev, "CGR HW state partially modified\n");
2496 ret = 0;
2497 goto out;
2498 }
2499 if (cgr->cb && cgr_state.cgr.cscn_en &&
2500 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2501 cgr->cb(p, cgr, 1);
2502 out:
2503 raw_spin_unlock_irq(&p->cgr_lock);
2504 put_affine_portal();
2505 return ret;
2506 }
2507 EXPORT_SYMBOL(qman_create_cgr);
2508
qman_cgr_get_affine_portal(struct qman_cgr * cgr)2509 static struct qman_portal *qman_cgr_get_affine_portal(struct qman_cgr *cgr)
2510 {
2511 struct qman_portal *p = get_affine_portal();
2512
2513 if (cgr->chan != p->config->channel) {
2514 /* attempt to delete from other portal than creator */
2515 dev_err(p->config->dev, "CGR not owned by current portal");
2516 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2517 cgr->chan, p->config->channel);
2518 put_affine_portal();
2519 return NULL;
2520 }
2521
2522 return p;
2523 }
2524
qman_delete_cgr(struct qman_cgr * cgr)2525 int qman_delete_cgr(struct qman_cgr *cgr)
2526 {
2527 unsigned long irqflags;
2528 struct qm_mcr_querycgr cgr_state;
2529 struct qm_mcc_initcgr local_opts;
2530 int ret = 0;
2531 struct qman_cgr *i;
2532 struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
2533
2534 if (!p)
2535 return -EINVAL;
2536
2537 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2538 raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
2539 list_del(&cgr->node);
2540 /*
2541 * If there are no other CGR objects for this CGRID in the list,
2542 * update CSCN_TARG accordingly
2543 */
2544 list_for_each_entry(i, &p->cgr_cbs, node)
2545 if (i->cgrid == cgr->cgrid && i->cb)
2546 goto release_lock;
2547 ret = qman_query_cgr(cgr, &cgr_state);
2548 if (ret) {
2549 /* add back to the list */
2550 list_add(&cgr->node, &p->cgr_cbs);
2551 goto release_lock;
2552 }
2553
2554 local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2555 qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
2556 be32_to_cpu(cgr_state.cgr.cscn_targ));
2557
2558 ret = qm_modify_cgr(cgr, 0, &local_opts);
2559 if (ret)
2560 /* add back to the list */
2561 list_add(&cgr->node, &p->cgr_cbs);
2562 release_lock:
2563 raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2564 put_affine_portal();
2565 return ret;
2566 }
2567 EXPORT_SYMBOL(qman_delete_cgr);
2568
qman_delete_cgr_smp_call(void * p)2569 static void qman_delete_cgr_smp_call(void *p)
2570 {
2571 qman_delete_cgr((struct qman_cgr *)p);
2572 }
2573
qman_delete_cgr_safe(struct qman_cgr * cgr)2574 void qman_delete_cgr_safe(struct qman_cgr *cgr)
2575 {
2576 preempt_disable();
2577 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2578 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2579 qman_delete_cgr_smp_call, cgr, true);
2580 preempt_enable();
2581 return;
2582 }
2583
2584 qman_delete_cgr(cgr);
2585 preempt_enable();
2586 }
2587 EXPORT_SYMBOL(qman_delete_cgr_safe);
2588
qman_update_cgr(struct qman_cgr * cgr,struct qm_mcc_initcgr * opts)2589 static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
2590 {
2591 int ret;
2592 unsigned long irqflags;
2593 struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
2594
2595 if (!p)
2596 return -EINVAL;
2597
2598 raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
2599 ret = qm_modify_cgr(cgr, 0, opts);
2600 raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2601 put_affine_portal();
2602 return ret;
2603 }
2604
2605 struct update_cgr_params {
2606 struct qman_cgr *cgr;
2607 struct qm_mcc_initcgr *opts;
2608 int ret;
2609 };
2610
qman_update_cgr_smp_call(void * p)2611 static void qman_update_cgr_smp_call(void *p)
2612 {
2613 struct update_cgr_params *params = p;
2614
2615 params->ret = qman_update_cgr(params->cgr, params->opts);
2616 }
2617
qman_update_cgr_safe(struct qman_cgr * cgr,struct qm_mcc_initcgr * opts)2618 int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
2619 {
2620 struct update_cgr_params params = {
2621 .cgr = cgr,
2622 .opts = opts,
2623 };
2624
2625 preempt_disable();
2626 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id())
2627 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2628 qman_update_cgr_smp_call, ¶ms,
2629 true);
2630 else
2631 params.ret = qman_update_cgr(cgr, opts);
2632 preempt_enable();
2633 return params.ret;
2634 }
2635 EXPORT_SYMBOL(qman_update_cgr_safe);
2636
2637 /* Cleanup FQs */
2638
_qm_mr_consume_and_match_verb(struct qm_portal * p,int v)2639 static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2640 {
2641 const union qm_mr_entry *msg;
2642 int found = 0;
2643
2644 qm_mr_pvb_update(p);
2645 msg = qm_mr_current(p);
2646 while (msg) {
2647 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2648 found = 1;
2649 qm_mr_next(p);
2650 qm_mr_cci_consume_to_current(p);
2651 qm_mr_pvb_update(p);
2652 msg = qm_mr_current(p);
2653 }
2654 return found;
2655 }
2656
_qm_dqrr_consume_and_match(struct qm_portal * p,u32 fqid,int s,bool wait)2657 static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2658 bool wait)
2659 {
2660 const struct qm_dqrr_entry *dqrr;
2661 int found = 0;
2662
2663 do {
2664 qm_dqrr_pvb_update(p);
2665 dqrr = qm_dqrr_current(p);
2666 if (!dqrr)
2667 cpu_relax();
2668 } while (wait && !dqrr);
2669
2670 while (dqrr) {
2671 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2672 found = 1;
2673 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2674 qm_dqrr_pvb_update(p);
2675 qm_dqrr_next(p);
2676 dqrr = qm_dqrr_current(p);
2677 }
2678 return found;
2679 }
2680
2681 #define qm_mr_drain(p, V) \
2682 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2683
2684 #define qm_dqrr_drain(p, f, S) \
2685 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2686
2687 #define qm_dqrr_drain_wait(p, f, S) \
2688 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2689
2690 #define qm_dqrr_drain_nomatch(p) \
2691 _qm_dqrr_consume_and_match(p, 0, 0, false)
2692
qman_shutdown_fq(u32 fqid)2693 int qman_shutdown_fq(u32 fqid)
2694 {
2695 struct qman_portal *p, *channel_portal;
2696 struct device *dev;
2697 union qm_mc_command *mcc;
2698 union qm_mc_result *mcr;
2699 int orl_empty, drain = 0, ret = 0;
2700 u32 channel, res;
2701 u8 state;
2702
2703 p = get_affine_portal();
2704 dev = p->config->dev;
2705 /* Determine the state of the FQID */
2706 mcc = qm_mc_start(&p->p);
2707 qm_fqid_set(&mcc->fq, fqid);
2708 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2709 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2710 dev_err(dev, "QUERYFQ_NP timeout\n");
2711 ret = -ETIMEDOUT;
2712 goto out;
2713 }
2714
2715 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2716 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2717 if (state == QM_MCR_NP_STATE_OOS)
2718 goto out; /* Already OOS, no need to do anymore checks */
2719
2720 /* Query which channel the FQ is using */
2721 mcc = qm_mc_start(&p->p);
2722 qm_fqid_set(&mcc->fq, fqid);
2723 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2724 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2725 dev_err(dev, "QUERYFQ timeout\n");
2726 ret = -ETIMEDOUT;
2727 goto out;
2728 }
2729
2730 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2731 /* Need to store these since the MCR gets reused */
2732 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2733 qm_fqd_get_wq(&mcr->queryfq.fqd);
2734
2735 if (channel < qm_channel_pool1) {
2736 channel_portal = get_portal_for_channel(channel);
2737 if (channel_portal == NULL) {
2738 dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
2739 channel);
2740 ret = -EIO;
2741 goto out;
2742 }
2743 } else
2744 channel_portal = p;
2745
2746 switch (state) {
2747 case QM_MCR_NP_STATE_TEN_SCHED:
2748 case QM_MCR_NP_STATE_TRU_SCHED:
2749 case QM_MCR_NP_STATE_ACTIVE:
2750 case QM_MCR_NP_STATE_PARKED:
2751 orl_empty = 0;
2752 mcc = qm_mc_start(&channel_portal->p);
2753 qm_fqid_set(&mcc->fq, fqid);
2754 qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
2755 if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
2756 dev_err(dev, "ALTER_RETIRE timeout\n");
2757 ret = -ETIMEDOUT;
2758 goto out;
2759 }
2760 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2761 QM_MCR_VERB_ALTER_RETIRE);
2762 res = mcr->result; /* Make a copy as we reuse MCR below */
2763
2764 if (res == QM_MCR_RESULT_OK)
2765 drain_mr_fqrni(&channel_portal->p);
2766
2767 if (res == QM_MCR_RESULT_PENDING) {
2768 /*
2769 * Need to wait for the FQRN in the message ring, which
2770 * will only occur once the FQ has been drained. In
2771 * order for the FQ to drain the portal needs to be set
2772 * to dequeue from the channel the FQ is scheduled on
2773 */
2774 int found_fqrn = 0;
2775
2776 /* Flag that we need to drain FQ */
2777 drain = 1;
2778
2779 if (channel >= qm_channel_pool1 &&
2780 channel < qm_channel_pool1 + 15) {
2781 /* Pool channel, enable the bit in the portal */
2782 } else if (channel < qm_channel_pool1) {
2783 /* Dedicated channel */
2784 } else {
2785 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2786 fqid, channel);
2787 ret = -EBUSY;
2788 goto out;
2789 }
2790 /* Set the sdqcr to drain this channel */
2791 if (channel < qm_channel_pool1)
2792 qm_dqrr_sdqcr_set(&channel_portal->p,
2793 QM_SDQCR_TYPE_ACTIVE |
2794 QM_SDQCR_CHANNELS_DEDICATED);
2795 else
2796 qm_dqrr_sdqcr_set(&channel_portal->p,
2797 QM_SDQCR_TYPE_ACTIVE |
2798 QM_SDQCR_CHANNELS_POOL_CONV
2799 (channel));
2800 do {
2801 /* Keep draining DQRR while checking the MR*/
2802 qm_dqrr_drain_nomatch(&channel_portal->p);
2803 /* Process message ring too */
2804 found_fqrn = qm_mr_drain(&channel_portal->p,
2805 FQRN);
2806 cpu_relax();
2807 } while (!found_fqrn);
2808 /* Restore SDQCR */
2809 qm_dqrr_sdqcr_set(&channel_portal->p,
2810 channel_portal->sdqcr);
2811
2812 }
2813 if (res != QM_MCR_RESULT_OK &&
2814 res != QM_MCR_RESULT_PENDING) {
2815 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2816 fqid, res);
2817 ret = -EIO;
2818 goto out;
2819 }
2820 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2821 /*
2822 * ORL had no entries, no need to wait until the
2823 * ERNs come in
2824 */
2825 orl_empty = 1;
2826 }
2827 /*
2828 * Retirement succeeded, check to see if FQ needs
2829 * to be drained
2830 */
2831 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2832 /* FQ is Not Empty, drain using volatile DQ commands */
2833 do {
2834 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2835
2836 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2837 /*
2838 * Wait for a dequeue and process the dequeues,
2839 * making sure to empty the ring completely
2840 */
2841 } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2842 }
2843
2844 while (!orl_empty) {
2845 /* Wait for the ORL to have been completely drained */
2846 orl_empty = qm_mr_drain(&p->p, FQRL);
2847 cpu_relax();
2848 }
2849 mcc = qm_mc_start(&p->p);
2850 qm_fqid_set(&mcc->fq, fqid);
2851 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2852 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2853 ret = -ETIMEDOUT;
2854 goto out;
2855 }
2856
2857 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2858 QM_MCR_VERB_ALTER_OOS);
2859 if (mcr->result != QM_MCR_RESULT_OK) {
2860 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2861 fqid, mcr->result);
2862 ret = -EIO;
2863 goto out;
2864 }
2865 break;
2866
2867 case QM_MCR_NP_STATE_RETIRED:
2868 /* Send OOS Command */
2869 mcc = qm_mc_start(&p->p);
2870 qm_fqid_set(&mcc->fq, fqid);
2871 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2872 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2873 ret = -ETIMEDOUT;
2874 goto out;
2875 }
2876
2877 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2878 QM_MCR_VERB_ALTER_OOS);
2879 if (mcr->result != QM_MCR_RESULT_OK) {
2880 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2881 fqid, mcr->result);
2882 ret = -EIO;
2883 goto out;
2884 }
2885 break;
2886
2887 case QM_MCR_NP_STATE_OOS:
2888 /* Done */
2889 break;
2890
2891 default:
2892 ret = -EIO;
2893 }
2894
2895 out:
2896 put_affine_portal();
2897 return ret;
2898 }
2899
qman_get_qm_portal_config(struct qman_portal * portal)2900 const struct qm_portal_config *qman_get_qm_portal_config(
2901 struct qman_portal *portal)
2902 {
2903 return portal->config;
2904 }
2905 EXPORT_SYMBOL(qman_get_qm_portal_config);
2906
2907 struct gen_pool *qm_fqalloc; /* FQID allocator */
2908 struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2909 struct gen_pool *qm_cgralloc; /* CGR ID allocator */
2910
qman_alloc_range(struct gen_pool * p,u32 * result,u32 cnt)2911 static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2912 {
2913 unsigned long addr;
2914
2915 if (!p)
2916 return -ENODEV;
2917
2918 addr = gen_pool_alloc(p, cnt);
2919 if (!addr)
2920 return -ENOMEM;
2921
2922 *result = addr & ~DPAA_GENALLOC_OFF;
2923
2924 return 0;
2925 }
2926
qman_alloc_fqid_range(u32 * result,u32 count)2927 int qman_alloc_fqid_range(u32 *result, u32 count)
2928 {
2929 return qman_alloc_range(qm_fqalloc, result, count);
2930 }
2931 EXPORT_SYMBOL(qman_alloc_fqid_range);
2932
qman_alloc_pool_range(u32 * result,u32 count)2933 int qman_alloc_pool_range(u32 *result, u32 count)
2934 {
2935 return qman_alloc_range(qm_qpalloc, result, count);
2936 }
2937 EXPORT_SYMBOL(qman_alloc_pool_range);
2938
qman_alloc_cgrid_range(u32 * result,u32 count)2939 int qman_alloc_cgrid_range(u32 *result, u32 count)
2940 {
2941 return qman_alloc_range(qm_cgralloc, result, count);
2942 }
2943 EXPORT_SYMBOL(qman_alloc_cgrid_range);
2944
qman_release_fqid(u32 fqid)2945 int qman_release_fqid(u32 fqid)
2946 {
2947 int ret = qman_shutdown_fq(fqid);
2948
2949 if (ret) {
2950 pr_debug("FQID %d leaked\n", fqid);
2951 return ret;
2952 }
2953
2954 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2955 return 0;
2956 }
2957 EXPORT_SYMBOL(qman_release_fqid);
2958
qpool_cleanup(u32 qp)2959 static int qpool_cleanup(u32 qp)
2960 {
2961 /*
2962 * We query all FQDs starting from
2963 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2964 * whose destination channel is the pool-channel being released.
2965 * When a non-OOS FQD is found we attempt to clean it up
2966 */
2967 struct qman_fq fq = {
2968 .fqid = QM_FQID_RANGE_START
2969 };
2970 int err;
2971
2972 do {
2973 struct qm_mcr_queryfq_np np;
2974
2975 err = qman_query_fq_np(&fq, &np);
2976 if (err == -ERANGE)
2977 /* FQID range exceeded, found no problems */
2978 return 0;
2979 else if (WARN_ON(err))
2980 return err;
2981
2982 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2983 struct qm_fqd fqd;
2984
2985 err = qman_query_fq(&fq, &fqd);
2986 if (WARN_ON(err))
2987 return err;
2988 if (qm_fqd_get_chan(&fqd) == qp) {
2989 /* The channel is the FQ's target, clean it */
2990 err = qman_shutdown_fq(fq.fqid);
2991 if (err)
2992 /*
2993 * Couldn't shut down the FQ
2994 * so the pool must be leaked
2995 */
2996 return err;
2997 }
2998 }
2999 /* Move to the next FQID */
3000 fq.fqid++;
3001 } while (1);
3002 }
3003
qman_release_pool(u32 qp)3004 int qman_release_pool(u32 qp)
3005 {
3006 int ret;
3007
3008 ret = qpool_cleanup(qp);
3009 if (ret) {
3010 pr_debug("CHID %d leaked\n", qp);
3011 return ret;
3012 }
3013
3014 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
3015 return 0;
3016 }
3017 EXPORT_SYMBOL(qman_release_pool);
3018
cgr_cleanup(u32 cgrid)3019 static int cgr_cleanup(u32 cgrid)
3020 {
3021 /*
3022 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
3023 * error, looking for non-OOS FQDs whose CGR is the CGR being released
3024 */
3025 struct qman_fq fq = {
3026 .fqid = QM_FQID_RANGE_START
3027 };
3028 int err;
3029
3030 do {
3031 struct qm_mcr_queryfq_np np;
3032
3033 err = qman_query_fq_np(&fq, &np);
3034 if (err == -ERANGE)
3035 /* FQID range exceeded, found no problems */
3036 return 0;
3037 else if (WARN_ON(err))
3038 return err;
3039
3040 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
3041 struct qm_fqd fqd;
3042
3043 err = qman_query_fq(&fq, &fqd);
3044 if (WARN_ON(err))
3045 return err;
3046 if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
3047 fqd.cgid == cgrid) {
3048 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
3049 cgrid, fq.fqid);
3050 return -EIO;
3051 }
3052 }
3053 /* Move to the next FQID */
3054 fq.fqid++;
3055 } while (1);
3056 }
3057
qman_release_cgrid(u32 cgrid)3058 int qman_release_cgrid(u32 cgrid)
3059 {
3060 int ret;
3061
3062 ret = cgr_cleanup(cgrid);
3063 if (ret) {
3064 pr_debug("CGRID %d leaked\n", cgrid);
3065 return ret;
3066 }
3067
3068 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
3069 return 0;
3070 }
3071 EXPORT_SYMBOL(qman_release_cgrid);
3072