xref: /freebsd/sys/contrib/ncsw/Peripherals/QM/qm_portal_fqr.c (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 /******************************************************************************
2 
3  � 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4  All rights reserved.
5 
6  This is proprietary source code of Freescale Semiconductor Inc.,
7  and its use is subject to the NetComm Device Drivers EULA.
8  The copyright notice above does not evidence any actual or intended
9  publication of such source code.
10 
11  ALTERNATIVELY, redistribution and use in source and binary forms, with
12  or without modification, are permitted provided that the following
13  conditions are met:
14      * Redistributions of source code must retain the above copyright
15        notice, this list of conditions and the following disclaimer.
16      * Redistributions in binary form must reproduce the above copyright
17        notice, this list of conditions and the following disclaimer in the
18        documentation and/or other materials provided with the distribution.
19      * Neither the name of Freescale Semiconductor nor the
20        names of its contributors may be used to endorse or promote products
21        derived from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34 
35  **************************************************************************/
36 /******************************************************************************
37  @File          qm.c
38 
39  @Description   QM & Portal implementation
40 *//***************************************************************************/
41 #include <sys/cdefs.h>
42 #include <sys/types.h>
43 #include <machine/atomic.h>
44 
45 #include "error_ext.h"
46 #include "std_ext.h"
47 #include "string_ext.h"
48 #include "mm_ext.h"
49 #include "qm.h"
50 #include "qman_low.h"
51 
52 #include <machine/vmparam.h>
53 
54 /****************************************/
55 /*       static functions               */
56 /****************************************/
57 
58 #define SLOW_POLL_IDLE   1000
59 #define SLOW_POLL_BUSY   10
60 
61 /*
62  * Context entries are 32-bit.  The qman driver uses the pointer to the queue as
63  * its context, and the pointer is 64-byte aligned, per the XX_MallocSmart()
64  * call.  Take advantage of this fact to shove a 64-bit kernel pointer into a
65  * 32-bit context integer, and back.
66  *
67  * XXX: This depends on the fact that VM_MAX_KERNEL_ADDRESS is less than 38-bit
68  * count from VM_MIN_KERNEL_ADDRESS.  If this ever changes, this needs to be
69  * updated.
70  */
71 CTASSERT((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) < (1ULL << 35));
72 static inline uint32_t
73 aligned_int_from_ptr(const void *p)
74 {
75 	uintptr_t ctx;
76 
77 	ctx = (uintptr_t)p;
78 	KASSERT(ctx >= VM_MIN_KERNEL_ADDRESS, ("%p is too low!\n", p));
79 	ctx -= VM_MIN_KERNEL_ADDRESS;
80 	KASSERT((ctx & 0x07) == 0, ("Pointer %p is not 8-byte aligned!\n", p));
81 
82 	return (ctx >> 3);
83 }
84 
85 static inline void *
86 ptr_from_aligned_int(uint32_t ctx)
87 {
88 	uintptr_t p;
89 
90 	p = ctx;
91 	p = VM_MIN_KERNEL_ADDRESS + (p << 3);
92 
93 	return ((void *)p);
94 }
95 
96 static t_Error qman_volatile_dequeue(t_QmPortal     *p_QmPortal,
97                                      struct qman_fq *p_Fq,
98                                      uint32_t       vdqcr)
99 {
100     ASSERT_COND((p_Fq->state == qman_fq_state_parked) ||
101                 (p_Fq->state == qman_fq_state_retired));
102     ASSERT_COND(!(vdqcr & QM_VDQCR_FQID_MASK));
103     ASSERT_COND(!(p_Fq->flags & QMAN_FQ_STATE_VDQCR));
104 
105     vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | p_Fq->fqid;
106     NCSW_PLOCK(p_QmPortal);
107     FQLOCK(p_Fq);
108     p_Fq->flags |= QMAN_FQ_STATE_VDQCR;
109     qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, vdqcr);
110     FQUNLOCK(p_Fq);
111     PUNLOCK(p_QmPortal);
112 
113     return E_OK;
114 }
115 
116 static const char *mcr_result_str(uint8_t result)
117 {
118     switch (result) {
119     case QM_MCR_RESULT_NULL:
120         return "QM_MCR_RESULT_NULL";
121     case QM_MCR_RESULT_OK:
122         return "QM_MCR_RESULT_OK";
123     case QM_MCR_RESULT_ERR_FQID:
124         return "QM_MCR_RESULT_ERR_FQID";
125     case QM_MCR_RESULT_ERR_FQSTATE:
126         return "QM_MCR_RESULT_ERR_FQSTATE";
127     case QM_MCR_RESULT_ERR_NOTEMPTY:
128         return "QM_MCR_RESULT_ERR_NOTEMPTY";
129     case QM_MCR_RESULT_PENDING:
130         return "QM_MCR_RESULT_PENDING";
131     }
132     return "<unknown MCR result>";
133 }
134 
135 static t_Error qman_create_fq(t_QmPortal        *p_QmPortal,
136                               uint32_t          fqid,
137                               uint32_t          flags,
138                               struct qman_fq    *p_Fq)
139 {
140     struct qm_fqd fqd;
141     struct qm_mcr_queryfq_np np;
142     struct qm_mc_command *p_Mcc;
143     struct qm_mc_result *p_Mcr;
144 
145     p_Fq->fqid = fqid;
146     p_Fq->flags = flags;
147     p_Fq->state = qman_fq_state_oos;
148     p_Fq->cgr_groupid = 0;
149     if (!(flags & QMAN_FQ_FLAG_RECOVER) ||
150             (flags & QMAN_FQ_FLAG_NO_MODIFY))
151         return E_OK;
152     /* Everything else is RECOVER support */
153     NCSW_PLOCK(p_QmPortal);
154     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
155     p_Mcc->queryfq.fqid = fqid;
156     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ);
157     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
158     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
159     if (p_Mcr->result != QM_MCR_RESULT_OK) {
160         PUNLOCK(p_QmPortal);
161         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QUERYFQ failed: %s", mcr_result_str(p_Mcr->result)));
162     }
163     fqd = p_Mcr->queryfq.fqd;
164     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
165     p_Mcc->queryfq_np.fqid = fqid;
166     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP);
167     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
168     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
169     if (p_Mcr->result != QM_MCR_RESULT_OK) {
170         PUNLOCK(p_QmPortal);
171         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("UERYFQ_NP failed: %s", mcr_result_str(p_Mcr->result)));
172     }
173     np = p_Mcr->queryfq_np;
174     /* Phew, have queryfq and queryfq_np results, stitch together
175      * the FQ object from those. */
176     p_Fq->cgr_groupid = fqd.cgid;
177     switch (np.state & QM_MCR_NP_STATE_MASK) {
178     case QM_MCR_NP_STATE_OOS:
179         break;
180     case QM_MCR_NP_STATE_RETIRED:
181         p_Fq->state = qman_fq_state_retired;
182         if (np.frm_cnt)
183             p_Fq->flags |= QMAN_FQ_STATE_NE;
184         break;
185     case QM_MCR_NP_STATE_TEN_SCHED:
186     case QM_MCR_NP_STATE_TRU_SCHED:
187     case QM_MCR_NP_STATE_ACTIVE:
188         p_Fq->state = qman_fq_state_sched;
189         if (np.state & QM_MCR_NP_STATE_R)
190             p_Fq->flags |= QMAN_FQ_STATE_CHANGING;
191         break;
192     case QM_MCR_NP_STATE_PARKED:
193         p_Fq->state = qman_fq_state_parked;
194         break;
195     default:
196         ASSERT_COND(FALSE);
197     }
198     if (fqd.fq_ctrl & QM_FQCTRL_CGE)
199         p_Fq->state |= QMAN_FQ_STATE_CGR_EN;
200     PUNLOCK(p_QmPortal);
201 
202     return E_OK;
203 }
204 
205 static void qman_destroy_fq(struct qman_fq *p_Fq, uint32_t flags)
206 {
207     /* We don't need to lock the FQ as it is a pre-condition that the FQ be
208      * quiesced. Instead, run some checks. */
209     UNUSED(flags);
210     switch (p_Fq->state) {
211     case qman_fq_state_parked:
212         ASSERT_COND(flags & QMAN_FQ_DESTROY_PARKED);
213     case qman_fq_state_oos:
214         return;
215     default:
216         break;
217     }
218     ASSERT_COND(FALSE);
219 }
220 
221 static t_Error qman_init_fq(t_QmPortal          *p_QmPortal,
222                             struct qman_fq      *p_Fq,
223                             uint32_t            flags,
224                             struct qm_mcc_initfq *p_Opts)
225 {
226     struct qm_mc_command    *p_Mcc;
227     struct qm_mc_result     *p_Mcr;
228     uint8_t res, myverb = (uint8_t)((flags & QMAN_INITFQ_FLAG_SCHED) ?
229         QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED);
230 
231     SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_oos) ||
232                               (p_Fq->state == qman_fq_state_parked),
233                               E_INVALID_STATE);
234 
235     if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
236         return ERROR_CODE(E_INVALID_VALUE);
237     /* Issue an INITFQ_[PARKED|SCHED] management command */
238     NCSW_PLOCK(p_QmPortal);
239     FQLOCK(p_Fq);
240     if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
241             ((p_Fq->state != qman_fq_state_oos) &&
242                 (p_Fq->state != qman_fq_state_parked))) {
243         FQUNLOCK(p_Fq);
244         PUNLOCK(p_QmPortal);
245         return ERROR_CODE(E_BUSY);
246     }
247     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
248     Mem2IOCpy32((void*)&p_Mcc->initfq, p_Opts, sizeof(struct qm_mcc_initfq));
249     qm_mc_commit(p_QmPortal->p_LowQmPortal, myverb);
250     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
251     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == myverb);
252     res = p_Mcr->result;
253     if (res != QM_MCR_RESULT_OK) {
254         FQUNLOCK(p_Fq);
255         PUNLOCK(p_QmPortal);
256         RETURN_ERROR(MINOR, E_INVALID_STATE,("INITFQ failed: %s", mcr_result_str(res)));
257     }
258 
259     if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_FQCTRL) {
260         if (p_Mcc->initfq.fqd.fq_ctrl & QM_FQCTRL_CGE)
261             p_Fq->flags |= QMAN_FQ_STATE_CGR_EN;
262         else
263             p_Fq->flags &= ~QMAN_FQ_STATE_CGR_EN;
264     }
265     if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_CGID)
266         p_Fq->cgr_groupid = p_Mcc->initfq.fqd.cgid;
267     p_Fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
268             qman_fq_state_sched : qman_fq_state_parked;
269     FQUNLOCK(p_Fq);
270     PUNLOCK(p_QmPortal);
271     return E_OK;
272 }
273 
274 static t_Error qman_retire_fq(t_QmPortal        *p_QmPortal,
275                               struct qman_fq    *p_Fq,
276                               uint32_t          *p_Flags,
277                               bool              drain)
278 {
279     struct qm_mc_command    *p_Mcc;
280     struct qm_mc_result     *p_Mcr;
281     t_Error                 err = E_OK;
282     uint8_t                 res;
283 
284     SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_parked) ||
285                               (p_Fq->state == qman_fq_state_sched),
286                               E_INVALID_STATE);
287 
288     if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
289         return E_INVALID_VALUE;
290     NCSW_PLOCK(p_QmPortal);
291     FQLOCK(p_Fq);
292     if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
293             (p_Fq->state == qman_fq_state_retired) ||
294                 (p_Fq->state == qman_fq_state_oos)) {
295         err = E_BUSY;
296         goto out;
297     }
298     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
299     p_Mcc->alterfq.fqid = p_Fq->fqid;
300     if (drain)
301         p_Mcc->alterfq.context_b = aligned_int_from_ptr(p_Fq);
302     qm_mc_commit(p_QmPortal->p_LowQmPortal,
303                  (uint8_t)((drain)?QM_MCC_VERB_ALTER_RETIRE_CTXB:QM_MCC_VERB_ALTER_RETIRE));
304     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
305     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) ==
306                 (drain)?QM_MCR_VERB_ALTER_RETIRE_CTXB:QM_MCR_VERB_ALTER_RETIRE);
307     res = p_Mcr->result;
308     if (res == QM_MCR_RESULT_OK)
309     {
310         /* Process 'fq' right away, we'll ignore FQRNI */
311         if (p_Mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
312             p_Fq->flags |= QMAN_FQ_STATE_NE;
313         if (p_Mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
314             p_Fq->flags |= QMAN_FQ_STATE_ORL;
315         p_Fq->state = qman_fq_state_retired;
316     }
317     else if (res == QM_MCR_RESULT_PENDING)
318         p_Fq->flags |= QMAN_FQ_STATE_CHANGING;
319     else {
320         XX_Print("ALTER_RETIRE failed: %s\n",
321                 mcr_result_str(res));
322         err = E_INVALID_STATE;
323     }
324     if (p_Flags)
325         *p_Flags = p_Fq->flags;
326 out:
327     FQUNLOCK(p_Fq);
328     PUNLOCK(p_QmPortal);
329     return err;
330 }
331 
332 static t_Error qman_oos_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
333 {
334     struct qm_mc_command    *p_Mcc;
335     struct qm_mc_result     *p_Mcr;
336     uint8_t                 res;
337 
338     ASSERT_COND(p_Fq->state == qman_fq_state_retired);
339     if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
340         return ERROR_CODE(E_INVALID_VALUE);
341     NCSW_PLOCK(p_QmPortal);
342     FQLOCK(p_Fq);
343     if ((p_Fq->flags & QMAN_FQ_STATE_BLOCKOOS) ||
344             (p_Fq->state != qman_fq_state_retired)) {
345         FQUNLOCK(p_Fq);
346         PUNLOCK(p_QmPortal);
347         return ERROR_CODE(E_BUSY);
348     }
349     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
350     p_Mcc->alterfq.fqid = p_Fq->fqid;
351     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_OOS);
352     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
353     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
354     res = p_Mcr->result;
355     if (res != QM_MCR_RESULT_OK) {
356         FQUNLOCK(p_Fq);
357         PUNLOCK(p_QmPortal);
358         RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_OOS failed: %s\n", mcr_result_str(res)));
359     }
360     p_Fq->state = qman_fq_state_oos;
361 
362     FQUNLOCK(p_Fq);
363     PUNLOCK(p_QmPortal);
364     return E_OK;
365 }
366 
367 static t_Error qman_schedule_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
368 {
369     struct qm_mc_command    *p_Mcc;
370     struct qm_mc_result     *p_Mcr;
371     uint8_t                 res;
372 
373     ASSERT_COND(p_Fq->state == qman_fq_state_parked);
374     if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
375         return ERROR_CODE(E_INVALID_VALUE);
376     /* Issue a ALTERFQ_SCHED management command */
377     NCSW_PLOCK(p_QmPortal);
378     FQLOCK(p_Fq);
379     if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
380             (p_Fq->state != qman_fq_state_parked)) {
381         FQUNLOCK(p_Fq);
382         PUNLOCK(p_QmPortal);
383         return ERROR_CODE(E_BUSY);
384     }
385     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
386     p_Mcc->alterfq.fqid = p_Fq->fqid;
387     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_SCHED);
388     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
389     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
390     res = p_Mcr->result;
391     if (res != QM_MCR_RESULT_OK) {
392         FQUNLOCK(p_Fq);
393         PUNLOCK(p_QmPortal);
394         RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_SCHED failed: %s\n", mcr_result_str(res)));
395     }
396     p_Fq->state = qman_fq_state_sched;
397 
398     FQUNLOCK(p_Fq);
399     PUNLOCK(p_QmPortal);
400     return E_OK;
401 }
402 
403 /* Inline helper to reduce nesting in LoopMessageRing() */
404 static __inline__ void fq_state_change(struct qman_fq *p_Fq,
405                                        struct qm_mr_entry *p_Msg,
406                                        uint8_t verb)
407 {
408     FQLOCK(p_Fq);
409     switch(verb) {
410         case QM_MR_VERB_FQRL:
411             ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_ORL);
412             p_Fq->flags &= ~QMAN_FQ_STATE_ORL;
413             break;
414         case QM_MR_VERB_FQRN:
415             ASSERT_COND((p_Fq->state == qman_fq_state_parked) ||
416                 (p_Fq->state == qman_fq_state_sched));
417             ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING);
418             p_Fq->flags &= ~QMAN_FQ_STATE_CHANGING;
419             if (p_Msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
420                 p_Fq->flags |= QMAN_FQ_STATE_NE;
421             if (p_Msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
422                 p_Fq->flags |= QMAN_FQ_STATE_ORL;
423             p_Fq->state = qman_fq_state_retired;
424             break;
425         case QM_MR_VERB_FQPN:
426             ASSERT_COND(p_Fq->state == qman_fq_state_sched);
427             ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING);
428             p_Fq->state = qman_fq_state_parked;
429     }
430     FQUNLOCK(p_Fq);
431 }
432 
433 static t_Error freeDrainedFq(struct qman_fq *p_Fq)
434 {
435     t_QmFqr     *p_QmFqr;
436     uint32_t    i;
437 
438     ASSERT_COND(p_Fq);
439     p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr;
440     ASSERT_COND(p_QmFqr);
441 
442     ASSERT_COND(!p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset]);
443     p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset] = TRUE;
444     p_QmFqr->numOfDrainedFqids++;
445     if (p_QmFqr->numOfDrainedFqids == p_QmFqr->numOfFqids)
446     {
447         for (i=0;i<p_QmFqr->numOfFqids;i++)
448         {
449             if ((p_QmFqr->p_Fqs[i]->state == qman_fq_state_retired) &&
450                     (qman_oos_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]) != E_OK))
451                 RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!"));
452             qman_destroy_fq(p_QmFqr->p_Fqs[i], 0);
453             XX_FreeSmart(p_QmFqr->p_Fqs[i]);
454         }
455         XX_Free(p_QmFqr->p_DrainedFqs);
456         p_QmFqr->p_DrainedFqs = NULL;
457 
458         if (p_QmFqr->f_CompletionCB)
459         {
460             p_QmFqr->f_CompletionCB(p_QmFqr->h_App, p_QmFqr);
461             XX_Free(p_QmFqr->p_Fqs);
462             if (p_QmFqr->fqidBase)
463                 QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
464             XX_Free(p_QmFqr);
465         }
466     }
467 
468     return E_OK;
469 }
470 
471 static t_Error drainRetiredFq(struct qman_fq *p_Fq)
472 {
473     t_QmFqr     *p_QmFqr;
474 
475     ASSERT_COND(p_Fq);
476     p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr;
477     ASSERT_COND(p_QmFqr);
478 
479     if (p_Fq->flags & QMAN_FQ_STATE_NE)
480     {
481         if (qman_volatile_dequeue(p_QmFqr->h_QmPortal, p_Fq,
482                                 (QM_VDQCR_PRECEDENCE_VDQCR | QM_VDQCR_NUMFRAMES_TILLEMPTY)) != E_OK)
483 
484             RETURN_ERROR(MAJOR, E_INVALID_STATE, ("drain with volatile failed"));
485         return E_OK;
486     }
487     else
488         return freeDrainedFq(p_Fq);
489 }
490 
491 static e_RxStoreResponse drainCB(t_Handle h_App,
492                                  t_Handle h_QmFqr,
493                                  t_Handle h_QmPortal,
494                                  uint32_t fqidOffset,
495                                  t_DpaaFD *p_Frame)
496 {
497     UNUSED(h_App);
498     UNUSED(h_QmFqr);
499     UNUSED(h_QmPortal);
500     UNUSED(fqidOffset);
501     UNUSED(p_Frame);
502 
503     DBG(TRACE,("got fd for fqid %d", ((t_QmFqr *)h_QmFqr)->fqidBase + fqidOffset));
504     return e_RX_STORE_RESPONSE_CONTINUE;
505 }
506 
507 static void cb_ern_dcErn(t_Handle                   h_App,
508                          t_Handle                   h_QmPortal,
509                          struct qman_fq             *p_Fq,
510                          const struct qm_mr_entry   *p_Msg)
511 {
512     static int cnt = 0;
513     UNUSED(p_Fq);
514     UNUSED(p_Msg);
515     UNUSED(h_App);
516     UNUSED(h_QmPortal);
517 
518     XX_Print("cb_ern_dcErn_fqs() unimplemented %d\n", ++cnt);
519 }
520 
521 static void cb_fqs(t_Handle                   h_App,
522                    t_Handle                   h_QmPortal,
523                    struct qman_fq             *p_Fq,
524                    const struct qm_mr_entry   *p_Msg)
525 {
526     UNUSED(p_Msg);
527     UNUSED(h_App);
528     UNUSED(h_QmPortal);
529 
530     if (p_Fq->state == qman_fq_state_retired &&
531         !(p_Fq->flags & QMAN_FQ_STATE_ORL))
532         drainRetiredFq(p_Fq);
533 }
534 
535 static void null_cb_mr(t_Handle                   h_App,
536                        t_Handle                   h_QmPortal,
537                        struct qman_fq             *p_Fq,
538                        const struct qm_mr_entry   *p_Msg)
539 {
540     t_QmPortal      *p_QmPortal = (t_QmPortal *)h_QmPortal;
541 
542     UNUSED(p_Fq);UNUSED(h_App);
543 
544     if ((p_Msg->verb & QM_MR_VERB_DC_ERN) == QM_MR_VERB_DC_ERN)
545         XX_Print("Ignoring unowned MR frame on cpu %d, dc-portal 0x%02x.\n",
546                  p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->dcern.portal);
547     else
548         XX_Print("Ignoring unowned MR frame on cpu %d, verb 0x%02x.\n",
549                  p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->verb);
550 }
551 
552 static uint32_t LoopMessageRing(t_QmPortal *p_QmPortal, uint32_t is)
553 {
554     struct qm_mr_entry          *p_Msg;
555 
556     if (is & QM_PIRQ_CSCI) {
557         struct qm_mc_result *p_Mcr;
558         struct qman_cgrs    tmp;
559         uint32_t            mask;
560         unsigned int        i, j;
561 
562         NCSW_PLOCK(p_QmPortal);
563         qm_mc_start(p_QmPortal->p_LowQmPortal);
564         qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCONGESTION);
565         while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
566 
567         /* cgrs[0] is the portal mask for its cg's, cgrs[1] is the
568            previous state of cg's */
569         for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++)
570         {
571             /* get curent state */
572             tmp.q.__state[i] = p_Mcr->querycongestion.state.__state[i];
573             /* keep only cg's that are registered for this portal */
574             tmp.q.__state[i] &= p_QmPortal->cgrs[0].q.__state[i];
575             /* handle only cg's that changed their state from previous exception */
576             tmp.q.__state[i] ^= p_QmPortal->cgrs[1].q.__state[i];
577             /* update previous */
578             p_QmPortal->cgrs[1].q.__state[i] = p_Mcr->querycongestion.state.__state[i];
579         }
580         PUNLOCK(p_QmPortal);
581 
582         /* if in interrupt */
583         /* call the callback routines for any CG with a changed state */
584         for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++)
585             for(j=0, mask = 0x80000000; j<32 ; j++, mask>>=1)
586             {
587                 if(tmp.q.__state[i] & mask)
588                 {
589                     t_QmCg *p_QmCg = (t_QmCg *)(p_QmPortal->cgsHandles[i*32 + j]);
590                     if(p_QmCg->f_Exception)
591                         p_QmCg->f_Exception(p_QmCg->h_App, e_QM_EX_CG_STATE_CHANGE);
592                 }
593             }
594 
595     }
596 
597 
598     if (is & QM_PIRQ_EQRI) {
599         NCSW_PLOCK(p_QmPortal);
600         qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
601         qm_eqcr_set_ithresh(p_QmPortal->p_LowQmPortal, 0);
602         PUNLOCK(p_QmPortal);
603     }
604 
605     if (is & QM_PIRQ_MRI) {
606 mr_loop:
607         qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
608         p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
609         if (p_Msg) {
610             struct qman_fq  *p_FqFqs  = ptr_from_aligned_int(p_Msg->fq.contextB);
611             struct qman_fq  *p_FqErn  = ptr_from_aligned_int(p_Msg->ern.tag);
612             uint8_t         verb    =(uint8_t)(p_Msg->verb & QM_MR_VERB_TYPE_MASK);
613             t_QmRejectedFrameInfo   rejectedFrameInfo;
614 
615             memset(&rejectedFrameInfo, 0, sizeof(t_QmRejectedFrameInfo));
616             if (!(verb & QM_MR_VERB_DC_ERN))
617             {
618                 switch(p_Msg->ern.rc)
619                 {
620                     case(QM_MR_RC_CGR_TAILDROP):
621                         rejectedFrameInfo.rejectionCode = e_QM_RC_CG_TAILDROP;
622                         rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
623                         break;
624                     case(QM_MR_RC_WRED):
625                         rejectedFrameInfo.rejectionCode = e_QM_RC_CG_WRED;
626                         rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
627                         break;
628                     case(QM_MR_RC_FQ_TAILDROP):
629                         rejectedFrameInfo.rejectionCode = e_QM_RC_FQ_TAILDROP;
630                         rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
631                         break;
632                     case(QM_MR_RC_ERROR):
633                         break;
634                     default:
635                         REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("Unknown rejection code"));
636                 }
637                 if (!p_FqErn)
638                     p_QmPortal->p_NullCB->ern(p_QmPortal->h_App, NULL, p_QmPortal, 0, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo);
639                 else
640                     p_FqErn->cb.ern(p_FqErn->h_App, p_FqErn->h_QmFqr, p_QmPortal, p_FqErn->fqidOffset, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo);
641             } else if (verb == QM_MR_VERB_DC_ERN)
642             {
643                 if (!p_FqErn)
644                     p_QmPortal->p_NullCB->dc_ern(NULL, p_QmPortal, NULL, p_Msg);
645                 else
646                     p_FqErn->cb.dc_ern(p_FqErn->h_App, p_QmPortal, p_FqErn, p_Msg);
647             } else
648             {
649                 if (verb == QM_MR_VERB_FQRNI)
650                     ; /* we drop FQRNIs on the floor */
651                 else if (!p_FqFqs)
652                             p_QmPortal->p_NullCB->fqs(NULL, p_QmPortal, NULL, p_Msg);
653                 else if ((verb == QM_MR_VERB_FQRN) ||
654                          (verb == QM_MR_VERB_FQRL) ||
655                          (verb == QM_MR_VERB_FQPN))
656                 {
657                     fq_state_change(p_FqFqs, p_Msg, verb);
658                     p_FqFqs->cb.fqs(p_FqFqs->h_App, p_QmPortal, p_FqFqs, p_Msg);
659                 }
660             }
661             qm_mr_next(p_QmPortal->p_LowQmPortal);
662             qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1);
663 
664             goto mr_loop;
665         }
666     }
667 
668     return is & (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
669 }
670 
671 static void LoopDequeueRing(t_Handle h_QmPortal)
672 {
673     struct qm_dqrr_entry        *p_Dq;
674     struct qman_fq              *p_Fq;
675     enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
676     e_RxStoreResponse           tmpRes;
677     t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
678     int                         prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
679 
680     while (res != qman_cb_dqrr_pause)
681     {
682         if (prefetch)
683             qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
684         qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
685         p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
686         if (!p_Dq)
687             break;
688 	p_Fq = ptr_from_aligned_int(p_Dq->contextB);
689         if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
690             /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
691              * to check for clearing it when doing volatile dequeues. It's
692              * one less thing to check in the critical path (SDQCR). */
693             tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
694             if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
695                 res = qman_cb_dqrr_pause;
696             /* Check for VDQCR completion */
697             if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
698                 p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
699             if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
700             {
701                 p_Fq->flags &= ~QMAN_FQ_STATE_NE;
702                 freeDrainedFq(p_Fq);
703             }
704         }
705         else
706         {
707             /* Interpret 'dq' from the owner's perspective. */
708             /* use portal default handlers */
709             ASSERT_COND(p_Dq->fqid);
710             if (p_Fq)
711             {
712                 tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
713                                        p_Fq->h_QmFqr,
714                                        p_QmPortal,
715                                        p_Fq->fqidOffset,
716                                        (t_DpaaFD*)&p_Dq->fd);
717                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
718                     res = qman_cb_dqrr_pause;
719                 else if (p_Fq->state == qman_fq_state_waiting_parked)
720                     res = qman_cb_dqrr_park;
721             }
722             else
723             {
724                 tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
725                                                     NULL,
726                                                     p_QmPortal,
727                                                     p_Dq->fqid,
728                                                     (t_DpaaFD*)&p_Dq->fd);
729                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
730                     res = qman_cb_dqrr_pause;
731             }
732         }
733 
734         /* Parking isn't possible unless HELDACTIVE was set. NB,
735          * FORCEELIGIBLE implies HELDACTIVE, so we only need to
736          * check for HELDACTIVE to cover both. */
737         ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
738                     (res != qman_cb_dqrr_park));
739         if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) {
740             /* Defer just means "skip it, I'll consume it myself later on" */
741             if (res != qman_cb_dqrr_defer)
742                 qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
743                                            p_Dq,
744                                            (res == qman_cb_dqrr_park));
745             qm_dqrr_next(p_QmPortal->p_LowQmPortal);
746         } else {
747             if (res == qman_cb_dqrr_park)
748                 /* The only thing to do for non-DCA is the park-request */
749                 qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal);
750             qm_dqrr_next(p_QmPortal->p_LowQmPortal);
751             qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
752         }
753     }
754 }
755 
756 static void LoopDequeueRingDcaOptimized(t_Handle h_QmPortal)
757 {
758     struct qm_dqrr_entry        *p_Dq;
759     struct qman_fq              *p_Fq;
760     enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
761     e_RxStoreResponse           tmpRes;
762     t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
763 
764     while (res != qman_cb_dqrr_pause)
765     {
766         qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
767         p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
768         if (!p_Dq)
769             break;
770 	p_Fq = ptr_from_aligned_int(p_Dq->contextB);
771         if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
772             /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
773              * to check for clearing it when doing volatile dequeues. It's
774              * one less thing to check in the critical path (SDQCR). */
775             tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
776             if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
777                 res = qman_cb_dqrr_pause;
778             /* Check for VDQCR completion */
779             if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
780                 p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
781             if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
782             {
783                 p_Fq->flags &= ~QMAN_FQ_STATE_NE;
784                 freeDrainedFq(p_Fq);
785             }
786         }
787         else
788         {
789             /* Interpret 'dq' from the owner's perspective. */
790             /* use portal default handlers */
791             ASSERT_COND(p_Dq->fqid);
792             if (p_Fq)
793             {
794                 tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
795                                        p_Fq->h_QmFqr,
796                                        p_QmPortal,
797                                        p_Fq->fqidOffset,
798                                        (t_DpaaFD*)&p_Dq->fd);
799                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
800                     res = qman_cb_dqrr_pause;
801                 else if (p_Fq->state == qman_fq_state_waiting_parked)
802                     res = qman_cb_dqrr_park;
803             }
804             else
805             {
806                 tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
807                                                     NULL,
808                                                     p_QmPortal,
809                                                     p_Dq->fqid,
810                                                     (t_DpaaFD*)&p_Dq->fd);
811                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
812                     res = qman_cb_dqrr_pause;
813             }
814         }
815 
816         /* Parking isn't possible unless HELDACTIVE was set. NB,
817          * FORCEELIGIBLE implies HELDACTIVE, so we only need to
818          * check for HELDACTIVE to cover both. */
819         ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
820                 (res != qman_cb_dqrr_park));
821         /* Defer just means "skip it, I'll consume it myself later on" */
822         if (res != qman_cb_dqrr_defer)
823             qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
824                                        p_Dq,
825                                        (res == qman_cb_dqrr_park));
826         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
827     }
828 }
829 
830 static void LoopDequeueRingOptimized(t_Handle h_QmPortal)
831 {
832     struct qm_dqrr_entry        *p_Dq;
833     struct qman_fq              *p_Fq;
834     enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
835     e_RxStoreResponse           tmpRes;
836     t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
837 
838     while (res != qman_cb_dqrr_pause)
839     {
840         qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
841         p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
842         if (!p_Dq)
843             break;
844 	p_Fq = ptr_from_aligned_int(p_Dq->contextB);
845         if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
846             /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
847              * to check for clearing it when doing volatile dequeues. It's
848              * one less thing to check in the critical path (SDQCR). */
849             tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
850             if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
851                 res = qman_cb_dqrr_pause;
852             /* Check for VDQCR completion */
853             if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
854                 p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
855             if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
856             {
857                 p_Fq->flags &= ~QMAN_FQ_STATE_NE;
858                 freeDrainedFq(p_Fq);
859             }
860         }
861         else
862         {
863             /* Interpret 'dq' from the owner's perspective. */
864             /* use portal default handlers */
865             ASSERT_COND(p_Dq->fqid);
866             if (p_Fq)
867             {
868                 tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
869                                        p_Fq->h_QmFqr,
870                                        p_QmPortal,
871                                        p_Fq->fqidOffset,
872                                        (t_DpaaFD*)&p_Dq->fd);
873                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
874                     res = qman_cb_dqrr_pause;
875                 else if (p_Fq->state == qman_fq_state_waiting_parked)
876                     res = qman_cb_dqrr_park;
877             }
878             else
879             {
880                 tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
881                                                     NULL,
882                                                     p_QmPortal,
883                                                     p_Dq->fqid,
884                                                     (t_DpaaFD*)&p_Dq->fd);
885                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
886                     res = qman_cb_dqrr_pause;
887             }
888         }
889 
890         /* Parking isn't possible unless HELDACTIVE was set. NB,
891          * FORCEELIGIBLE implies HELDACTIVE, so we only need to
892          * check for HELDACTIVE to cover both. */
893         ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
894                 (res != qman_cb_dqrr_park));
895         if (res == qman_cb_dqrr_park)
896             /* The only thing to do for non-DCA is the park-request */
897             qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal);
898         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
899         qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
900     }
901 }
902 
903 /* Portal interrupt handler */
904 static void portal_isr(void *ptr)
905 {
906     t_QmPortal  *p_QmPortal = ptr;
907     uint32_t    event = 0;
908     uint32_t    enableEvents = qm_isr_enable_read(p_QmPortal->p_LowQmPortal);
909 
910     DBG(TRACE, ("software-portal %d got interrupt", p_QmPortal->p_LowQmPortal->config.cpu));
911 
912     event |= (qm_isr_status_read(p_QmPortal->p_LowQmPortal) &
913             enableEvents);
914 
915     qm_isr_status_clear(p_QmPortal->p_LowQmPortal, event);
916     /* Only do fast-path handling if it's required */
917     if (/*(event & QM_PIRQ_DQRI) &&*/
918         (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_FAST))
919         p_QmPortal->f_LoopDequeueRingCB(p_QmPortal);
920     if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_SLOW)
921         LoopMessageRing(p_QmPortal, event);
922 }
923 
924 
925 static t_Error qman_query_fq_np(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, struct qm_mcr_queryfq_np *p_Np)
926 {
927     struct qm_mc_command    *p_Mcc;
928     struct qm_mc_result     *p_Mcr;
929     uint8_t                 res;
930 
931     NCSW_PLOCK(p_QmPortal);
932     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
933     p_Mcc->queryfq_np.fqid = p_Fq->fqid;
934     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP);
935     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
936     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
937     res = p_Mcr->result;
938     if (res == QM_MCR_RESULT_OK)
939         *p_Np = p_Mcr->queryfq_np;
940     PUNLOCK(p_QmPortal);
941     if (res != QM_MCR_RESULT_OK)
942         RETURN_ERROR(MINOR, E_INVALID_STATE, ("QUERYFQ_NP failed: %s\n", mcr_result_str(res)));
943     return E_OK;
944 }
945 
946 static uint8_t QmCgGetCgId(t_Handle h_QmCg)
947 {
948    t_QmCg *p_QmCg = (t_QmCg *)h_QmCg;
949 
950    return p_QmCg->id;
951 
952 }
953 
954 static t_Error qm_new_fq(t_QmPortal                         *p_QmPortal,
955                          uint32_t                           fqid,
956                          uint32_t                           fqidOffset,
957                          uint32_t                           channel,
958                          uint32_t                           wqid,
959                          uint16_t                           count,
960                          uint32_t                           flags,
961                          t_QmFqrCongestionAvoidanceParams   *p_CgParams,
962                          t_QmContextA                       *p_ContextA,
963                          t_QmContextB                       *p_ContextB,
964                          bool                               initParked,
965                          t_Handle                           h_QmFqr,
966                          struct qman_fq                     **p_Fqs)
967 {
968     struct qman_fq          *p_Fq = NULL;
969     struct qm_mcc_initfq    fq_opts;
970     uint32_t                i;
971     t_Error                 err = E_OK;
972     int         gap, tmp;
973     uint32_t    tmpA, tmpN, ta=0, tn=0, initFqFlag;
974 
975     ASSERT_COND(p_QmPortal);
976     ASSERT_COND(count);
977 
978     for(i=0;i<count;i++)
979     {
980         p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64);
981         if (!p_Fq)
982             RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
983         memset(p_Fq, 0, sizeof(struct qman_fq));
984         p_Fq->cb.dqrr     = p_QmPortal->f_DfltFrame;
985         p_Fq->cb.ern      = p_QmPortal->f_RejectedFrame;
986         p_Fq->cb.dc_ern   = cb_ern_dcErn;
987         p_Fq->cb.fqs      = cb_fqs;
988         p_Fq->h_App       = p_QmPortal->h_App;
989         p_Fq->h_QmFqr     = h_QmFqr;
990         p_Fq->fqidOffset  = fqidOffset;
991         p_Fqs[i] = p_Fq;
992         if ((err = qman_create_fq(p_QmPortal,(uint32_t)(fqid + i), 0, p_Fqs[i])) != E_OK)
993             break;
994     }
995 
996     if (err != E_OK)
997     {
998         for(i=0;i<count;i++)
999             if (p_Fqs[i])
1000             {
1001                 XX_FreeSmart(p_Fqs[i]);
1002                 p_Fqs[i] = NULL;
1003             }
1004         RETURN_ERROR(MINOR, err, ("Failed to create Fqs"));
1005     }
1006 
1007     memset(&fq_opts,0,sizeof(fq_opts));
1008     fq_opts.fqid = fqid;
1009     fq_opts.count = (uint16_t)(count-1);
1010     fq_opts.we_mask |= QM_INITFQ_WE_DESTWQ;
1011     fq_opts.fqd.dest.channel = channel;
1012     fq_opts.fqd.dest.wq = wqid;
1013     fq_opts.we_mask |= QM_INITFQ_WE_FQCTRL;
1014     fq_opts.fqd.fq_ctrl = (uint16_t)flags;
1015 
1016     if ((flags & QM_FQCTRL_CGE) || (flags & QM_FQCTRL_TDE))
1017         ASSERT_COND(p_CgParams);
1018 
1019     if(flags & QM_FQCTRL_CGE)
1020     {
1021         ASSERT_COND(p_CgParams->h_QmCg);
1022 
1023         /* CG OAC and FQ TD may not be configured at the same time. if both are required,
1024            than we configure CG first, and the FQ TD later - see below. */
1025         fq_opts.fqd.cgid = QmCgGetCgId(p_CgParams->h_QmCg);
1026         fq_opts.we_mask |= QM_INITFQ_WE_CGID;
1027         if(p_CgParams->overheadAccountingLength)
1028         {
1029             fq_opts.we_mask |= QM_INITFQ_WE_OAC;
1030             fq_opts.we_mask &= ~QM_INITFQ_WE_TDTHRESH;
1031             fq_opts.fqd.td_thresh = (uint16_t)(QM_FQD_TD_THRESH_OAC_EN | p_CgParams->overheadAccountingLength);
1032         }
1033     }
1034     if((flags & QM_FQCTRL_TDE) && (!p_CgParams->overheadAccountingLength))
1035     {
1036         ASSERT_COND(p_CgParams->fqTailDropThreshold);
1037 
1038         fq_opts.we_mask |= QM_INITFQ_WE_TDTHRESH;
1039 
1040             /* express thresh as ta*2^tn */
1041             gap = (int)p_CgParams->fqTailDropThreshold;
1042             for (tmpA=0 ; tmpA<256; tmpA++ )
1043                 for (tmpN=0 ; tmpN<32; tmpN++ )
1044                 {
1045                     tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<<tmpN)));
1046                     if (tmp < gap)
1047                     {
1048                        ta = tmpA;
1049                        tn = tmpN;
1050                        gap = tmp;
1051                     }
1052                 }
1053             fq_opts.fqd.td.exp = tn;
1054             fq_opts.fqd.td.mant = ta;
1055     }
1056 
1057     if (p_ContextA)
1058     {
1059         fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTA;
1060         memcpy((void*)&fq_opts.fqd.context_a, p_ContextA, sizeof(t_QmContextA));
1061     }
1062     /* If this FQ will not be used for tx, we can use contextB field */
1063     if (fq_opts.fqd.dest.channel < e_QM_FQ_CHANNEL_FMAN0_SP0)
1064     {
1065             fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
1066             fq_opts.fqd.context_b = aligned_int_from_ptr(p_Fqs[0]);
1067     }
1068     else if (p_ContextB) /* Tx-Queue */
1069     {
1070         fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
1071         memcpy((void*)&fq_opts.fqd.context_b, p_ContextB, sizeof(t_QmContextB));
1072     }
1073 
1074     if((flags & QM_FQCTRL_TDE) && (p_CgParams->overheadAccountingLength))
1075         initFqFlag = 0;
1076     else
1077         initFqFlag = (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED);
1078 
1079     if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], initFqFlag, &fq_opts)) != E_OK)
1080     {
1081         for(i=0;i<count;i++)
1082             if (p_Fqs[i])
1083             {
1084                 XX_FreeSmart(p_Fqs[i]);
1085                 p_Fqs[i] = NULL;
1086             }
1087         RETURN_ERROR(MINOR, err, ("Failed to init Fqs [%d-%d]", fqid, fqid+count-1));
1088     }
1089 
1090     /* if both CG OAC and FQ TD are needed, we call qman_init_fq again, this time for the FQ TD only */
1091     if((flags & QM_FQCTRL_TDE) && (p_CgParams->overheadAccountingLength))
1092     {
1093         ASSERT_COND(p_CgParams->fqTailDropThreshold);
1094 
1095         fq_opts.we_mask = QM_INITFQ_WE_TDTHRESH;
1096 
1097         /* express thresh as ta*2^tn */
1098         gap = (int)p_CgParams->fqTailDropThreshold;
1099         for (tmpA=0 ; tmpA<256; tmpA++ )
1100             for (tmpN=0 ; tmpN<32; tmpN++ )
1101             {
1102                 tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<<tmpN)));
1103                 if (tmp < gap)
1104                 {
1105                    ta = tmpA;
1106                    tn = tmpN;
1107                    gap = tmp;
1108                 }
1109             }
1110         fq_opts.fqd.td.exp = tn;
1111         fq_opts.fqd.td.mant = ta;
1112         if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED), &fq_opts)) != E_OK)
1113         {
1114             for(i=0;i<count;i++)
1115                 if (p_Fqs[i])
1116                 {
1117                     XX_FreeSmart(p_Fqs[i]);
1118                     p_Fqs[i] = NULL;
1119                 }
1120             RETURN_ERROR(MINOR, err, ("Failed to init Fqs"));
1121         }
1122     }
1123 
1124 
1125     for(i=1;i<count;i++)
1126     {
1127         memcpy(p_Fqs[i], p_Fqs[0], sizeof(struct qman_fq));
1128         p_Fqs[i]->fqid += i;
1129     }
1130 
1131     return err;
1132 }
1133 
1134 
1135 static t_Error qm_free_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
1136 {
1137     uint32_t flags=0;
1138 
1139     if (qman_retire_fq(p_QmPortal, p_Fq, &flags, false) != E_OK)
1140         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!"));
1141 
1142     if (flags & QMAN_FQ_STATE_CHANGING)
1143         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("fq %d currently in use, will be retired", p_Fq->fqid));
1144 
1145     if (flags & QMAN_FQ_STATE_NE)
1146         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed;" \
1147                                           "Frame Queue Not Empty, Need to dequeue"));
1148 
1149     if (qman_oos_fq(p_QmPortal, p_Fq) != E_OK)
1150         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!"));
1151 
1152     qman_destroy_fq(p_Fq,0);
1153 
1154     return E_OK;
1155 }
1156 
1157 static void qman_disable_portal(t_QmPortal *p_QmPortal)
1158 {
1159     NCSW_PLOCK(p_QmPortal);
1160     if (!(p_QmPortal->disable_count++))
1161         qm_dqrr_set_maxfill(p_QmPortal->p_LowQmPortal, 0);
1162     PUNLOCK(p_QmPortal);
1163 }
1164 
1165 
1166 /* quiesce SDQCR/VDQCR, then drain till h/w wraps up anything it
1167  * was doing (5ms is more than enough to ensure it's done). */
1168 static void clean_dqrr_mr(t_QmPortal *p_QmPortal)
1169 {
1170     struct qm_dqrr_entry    *p_Dq;
1171     struct qm_mr_entry      *p_Msg;
1172     int                     idle = 0;
1173 
1174     qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, 0);
1175     qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, 0);
1176 drain_loop:
1177     qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1178     qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1179     qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
1180     p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1181     p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
1182     if (p_Dq) {
1183         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1184         qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1185     }
1186     if (p_Msg) {
1187     qm_mr_next(p_QmPortal->p_LowQmPortal);
1188         qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1189     }
1190     if (!p_Dq && !p_Msg) {
1191     if (++idle < 5) {
1192     XX_UDelay(1000);
1193     goto drain_loop;
1194     }
1195     } else {
1196     idle = 0;
1197     goto drain_loop;
1198     }
1199 }
1200 
1201 static t_Error qman_create_portal(t_QmPortal *p_QmPortal,
1202                                    uint32_t flags,
1203                                    uint32_t sdqcrFlags,
1204                                    uint8_t  dqrrSize)
1205 {
1206     const struct qm_portal_config   *p_Config = &(p_QmPortal->p_LowQmPortal->config);
1207     int                             ret = 0;
1208     t_Error                         err;
1209     uint32_t                        isdr;
1210 
1211     if ((err = qm_eqcr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalEqcrCCE)) != E_OK)
1212         RETURN_ERROR(MINOR, err, ("Qman EQCR initialization failed\n"));
1213 
1214     if (qm_dqrr_init(p_QmPortal->p_LowQmPortal,
1215                      sdqcrFlags ? e_QmPortalDequeuePushMode : e_QmPortalDequeuePullMode,
1216                      e_QmPortalPVB,
1217                      (flags & QMAN_PORTAL_FLAG_DCA) ? e_QmPortalDqrrDCA : e_QmPortalDqrrCCI,
1218                      dqrrSize,
1219                      (flags & QMAN_PORTAL_FLAG_RSTASH) ? 1 : 0,
1220                      (flags & QMAN_PORTAL_FLAG_DSTASH) ? 1 : 0)) {
1221         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR initialization failed"));
1222         goto fail_dqrr;
1223     }
1224 
1225     if (qm_mr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalMrCCI)) {
1226         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR initialization failed"));
1227         goto fail_mr;
1228     }
1229     if (qm_mc_init(p_QmPortal->p_LowQmPortal)) {
1230         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MC initialization failed"));
1231         goto fail_mc;
1232     }
1233     if (qm_isr_init(p_QmPortal->p_LowQmPortal)) {
1234         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("ISR initialization failed"));
1235         goto fail_isr;
1236     }
1237     /* static interrupt-gating controls */
1238     qm_dqrr_set_ithresh(p_QmPortal->p_LowQmPortal, 12);
1239     qm_mr_set_ithresh(p_QmPortal->p_LowQmPortal, 4);
1240     qm_isr_set_iperiod(p_QmPortal->p_LowQmPortal, 100);
1241     p_QmPortal->options = flags;
1242     isdr = 0xffffffff;
1243     qm_isr_status_clear(p_QmPortal->p_LowQmPortal, 0xffffffff);
1244     qm_isr_enable_write(p_QmPortal->p_LowQmPortal, DEFAULT_portalExceptions);
1245     qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1246     if (flags & QMAN_PORTAL_FLAG_IRQ)
1247     {
1248         XX_SetIntr(p_Config->irq, portal_isr, p_QmPortal);
1249         XX_EnableIntr(p_Config->irq);
1250         qm_isr_uninhibit(p_QmPortal->p_LowQmPortal);
1251     } else
1252         /* without IRQ, we can't block */
1253         flags &= ~QMAN_PORTAL_FLAG_WAIT;
1254     /* Need EQCR to be empty before continuing */
1255     isdr ^= QM_PIRQ_EQCI;
1256     qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1257     ret = qm_eqcr_get_fill(p_QmPortal->p_LowQmPortal);
1258     if (ret) {
1259         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("EQCR unclean"));
1260         goto fail_eqcr_empty;
1261     }
1262     isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
1263     qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1264     if (qm_dqrr_current(p_QmPortal->p_LowQmPortal) != NULL)
1265     {
1266         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR unclean"));
1267 goto fail_dqrr_mr_empty;
1268     }
1269     if (qm_mr_current(p_QmPortal->p_LowQmPortal) != NULL)
1270     {
1271         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR unclean"));
1272 goto fail_dqrr_mr_empty;
1273     }
1274     qm_isr_disable_write(p_QmPortal->p_LowQmPortal, 0);
1275     qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags);
1276     return E_OK;
1277 fail_dqrr_mr_empty:
1278 fail_eqcr_empty:
1279     qm_isr_finish(p_QmPortal->p_LowQmPortal);
1280 fail_isr:
1281     qm_mc_finish(p_QmPortal->p_LowQmPortal);
1282 fail_mc:
1283     qm_mr_finish(p_QmPortal->p_LowQmPortal);
1284 fail_mr:
1285     qm_dqrr_finish(p_QmPortal->p_LowQmPortal);
1286 fail_dqrr:
1287     qm_eqcr_finish(p_QmPortal->p_LowQmPortal);
1288     return ERROR_CODE(E_INVALID_STATE);
1289 }
1290 
1291 static void qman_destroy_portal(t_QmPortal *p_QmPortal)
1292 {
1293     /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1294      * something related to QM_PIRQ_EQCI, this may need fixing. */
1295     qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
1296     if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ)
1297     {
1298         XX_DisableIntr(p_QmPortal->p_LowQmPortal->config.irq);
1299         XX_FreeIntr(p_QmPortal->p_LowQmPortal->config.irq);
1300     }
1301     qm_isr_finish(p_QmPortal->p_LowQmPortal);
1302     qm_mc_finish(p_QmPortal->p_LowQmPortal);
1303     qm_mr_finish(p_QmPortal->p_LowQmPortal);
1304     qm_dqrr_finish(p_QmPortal->p_LowQmPortal);
1305     qm_eqcr_finish(p_QmPortal->p_LowQmPortal);
1306 }
1307 
1308 static inline struct qm_eqcr_entry *try_eq_start(t_QmPortal *p_QmPortal)
1309 {
1310     struct qm_eqcr_entry    *p_Eq;
1311     uint8_t                 avail;
1312 
1313     avail = qm_eqcr_get_avail(p_QmPortal->p_LowQmPortal);
1314     if (avail == EQCR_THRESH)
1315         qmPortalEqcrCcePrefetch(p_QmPortal->p_LowQmPortal);
1316     else if (avail < EQCR_THRESH)
1317             qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
1318     p_Eq = qm_eqcr_start(p_QmPortal->p_LowQmPortal);
1319 
1320     return p_Eq;
1321 }
1322 
1323 
1324 static t_Error qman_orp_update(t_QmPortal   *p_QmPortal,
1325                                uint32_t     orpId,
1326                                uint16_t     orpSeqnum,
1327                                uint32_t     flags)
1328 {
1329     struct qm_eqcr_entry *p_Eq;
1330 
1331     NCSW_PLOCK(p_QmPortal);
1332     p_Eq = try_eq_start(p_QmPortal);
1333     if (!p_Eq)
1334     {
1335         PUNLOCK(p_QmPortal);
1336         return ERROR_CODE(E_BUSY);
1337     }
1338 
1339     if (flags & QMAN_ENQUEUE_FLAG_NESN)
1340         orpSeqnum |= QM_EQCR_SEQNUM_NESN;
1341     else
1342         /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
1343         orpSeqnum &= ~QM_EQCR_SEQNUM_NESN;
1344     p_Eq->seqnum  = orpSeqnum;
1345     p_Eq->orp     = orpId;
1346 qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal, (uint8_t)QM_EQCR_VERB_ORP);
1347 
1348     PUNLOCK(p_QmPortal);
1349     return E_OK;
1350 }
1351 
1352 static __inline__ t_Error CheckStashParams(t_QmFqrParams *p_QmFqrParams)
1353 {
1354     ASSERT_COND(p_QmFqrParams);
1355 
1356     if (p_QmFqrParams->stashingParams.frameAnnotationSize > QM_CONTEXTA_MAX_STASH_SIZE)
1357         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Annotation Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1358     if (p_QmFqrParams->stashingParams.frameDataSize > QM_CONTEXTA_MAX_STASH_SIZE)
1359         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Data Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1360     if (p_QmFqrParams->stashingParams.fqContextSize > QM_CONTEXTA_MAX_STASH_SIZE)
1361         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Context Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1362     if (p_QmFqrParams->stashingParams.fqContextSize)
1363     {
1364         if (!p_QmFqrParams->stashingParams.fqContextAddr)
1365             RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be givven"));
1366         if (!IS_ALIGNED(p_QmFqrParams->stashingParams.fqContextAddr, CACHELINE_SIZE))
1367             RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be aligned to %d", CACHELINE_SIZE));
1368         if (p_QmFqrParams->stashingParams.fqContextAddr & 0xffffff0000000000LL)
1369             RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address May be up to 40 bit"));
1370     }
1371 
1372     return E_OK;
1373 }
1374 
1375 static t_Error QmPortalRegisterCg(t_Handle h_QmPortal, t_Handle h_QmCg, uint8_t  cgId)
1376 {
1377     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1378 
1379     /* cgrs[0] is the mask of registered CG's*/
1380     if(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32)))
1381         RETURN_ERROR(MINOR, E_BUSY, ("CG already used"));
1382 
1383     p_QmPortal->cgrs[0].q.__state[cgId/32] |=  0x80000000 >> (cgId % 32);
1384     p_QmPortal->cgsHandles[cgId] = h_QmCg;
1385 
1386     return E_OK;
1387 }
1388 
1389 static t_Error QmPortalUnregisterCg(t_Handle h_QmPortal, uint8_t  cgId)
1390 {
1391     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1392 
1393     /* cgrs[0] is the mask of registered CG's*/
1394     if(!(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32))))
1395         RETURN_ERROR(MINOR, E_BUSY, ("CG is not in use"));
1396 
1397     p_QmPortal->cgrs[0].q.__state[cgId/32] &=  ~0x80000000 >> (cgId % 32);
1398     p_QmPortal->cgsHandles[cgId] = NULL;
1399 
1400     return E_OK;
1401 }
1402 
1403 static e_DpaaSwPortal QmPortalGetSwPortalId(t_Handle h_QmPortal)
1404 {
1405     t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1406 
1407     return (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu;
1408 }
1409 
1410 static t_Error CalcWredCurve(t_QmCgWredCurve *p_WredCurve, uint32_t  *p_CurveWord)
1411 {
1412     uint32_t    maxP, roundDown, roundUp, tmpA, tmpN;
1413     uint32_t    ma=0, mn=0, slope, sa=0, sn=0, pn;
1414     int         pres = 1000;
1415     int         gap, tmp;
1416 
1417 /*  TODO - change maxTh to uint64_t?
1418    if(p_WredCurve->maxTh > (1<<39))
1419         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh is not in range"));*/
1420 
1421     /* express maxTh as ma*2^mn */
1422      gap = (int)p_WredCurve->maxTh;
1423      for (tmpA=0 ; tmpA<256; tmpA++ )
1424          for (tmpN=0 ; tmpN<32; tmpN++ )
1425          {
1426              tmp = ABS((int)(p_WredCurve->maxTh - tmpA*(1<<tmpN)));
1427              if (tmp < gap)
1428              {
1429                 ma = tmpA;
1430                 mn = tmpN;
1431                 gap = tmp;
1432              }
1433          }
1434      ASSERT_COND(ma <256);
1435      ASSERT_COND(mn <32);
1436      p_WredCurve->maxTh = ma*(1<<mn);
1437 
1438      if(p_WredCurve->maxTh <= p_WredCurve->minTh)
1439         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh must be larger than minTh"));
1440      if(p_WredCurve->probabilityDenominator > 64)
1441         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("probabilityDenominator mustn't be 1-64"));
1442 
1443     /* first we translate from Cisco probabilityDenominator
1444        to 256 fixed denominator, result must be divisible by 4. */
1445     /* we multiply by a fixed value to get better accuracy (without
1446        using floating point) */
1447     maxP = (uint32_t)(256*1000/p_WredCurve->probabilityDenominator);
1448     if (maxP % 4*pres)
1449     {
1450         roundDown  = maxP + (maxP % (4*pres));
1451         roundUp = roundDown + 4*pres;
1452         if((roundUp - maxP) > (maxP - roundDown))
1453             maxP = roundDown;
1454         else
1455             maxP = roundUp;
1456     }
1457     maxP = maxP/pres;
1458     ASSERT_COND(maxP <= 256);
1459     pn = (uint8_t)(maxP/4 - 1);
1460 
1461     if(maxP >= (p_WredCurve->maxTh - p_WredCurve->minTh))
1462         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Due to probabilityDenominator selected, maxTh-minTh must be larger than %d", maxP));
1463 
1464     pres = 1000000;
1465     slope = maxP*pres/(p_WredCurve->maxTh - p_WredCurve->minTh);
1466     /* express slope as sa/2^sn */
1467     gap = (int)slope;
1468     for (tmpA=(uint32_t)(64*pres) ; tmpA<128*pres; tmpA += pres )
1469         for (tmpN=7 ; tmpN<64; tmpN++ )
1470         {
1471             tmp = ABS((int)(slope - tmpA/(1UL<<(tmpN%32))));
1472             if (tmp < gap)
1473             {
1474                sa = tmpA;
1475                sn = tmpN;
1476                gap = tmp;
1477             }
1478         }
1479     sa = sa/pres;
1480     ASSERT_COND(sa<128 && sa>=64);
1481     ASSERT_COND(sn<64 && sn>=7);
1482 
1483     *p_CurveWord = ((ma << 24) |
1484                     (mn << 19) |
1485                     (sa << 12) |
1486                     (sn << 6) |
1487                     pn);
1488 
1489     return E_OK;
1490 }
1491 
1492 static t_Error QmPortalPullFrame(t_Handle h_QmPortal, uint32_t pdqcr, t_DpaaFD *p_Frame)
1493 {
1494     t_QmPortal              *p_QmPortal = (t_QmPortal *)h_QmPortal;
1495     struct qm_dqrr_entry    *p_Dq;
1496     struct qman_fq          *p_Fq;
1497     int                     prefetch;
1498     uint32_t                *p_Dst, *p_Src;
1499 
1500     ASSERT_COND(p_QmPortal);
1501     ASSERT_COND(p_Frame);
1502     SANITY_CHECK_RETURN_ERROR(p_QmPortal->pullMode, E_INVALID_STATE);
1503 
1504     NCSW_PLOCK(p_QmPortal);
1505 
1506     qm_dqrr_pdqcr_set(p_QmPortal->p_LowQmPortal, pdqcr);
1507     mb();
1508     while (qm_dqrr_pdqcr_get(p_QmPortal->p_LowQmPortal)) ;
1509 
1510     prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
1511     while(TRUE)
1512     {
1513         if (prefetch)
1514             qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1515         qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1516         p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1517         if (!p_Dq)
1518             continue;
1519         p_Fq = ptr_from_aligned_int(p_Dq->contextB);
1520         ASSERT_COND(p_Dq->fqid);
1521         p_Dst = (uint32_t *)p_Frame;
1522         p_Src = (uint32_t *)&p_Dq->fd;
1523         p_Dst[0] = p_Src[0];
1524         p_Dst[1] = p_Src[1];
1525         p_Dst[2] = p_Src[2];
1526         p_Dst[3] = p_Src[3];
1527         if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA)
1528         {
1529             qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
1530                                        p_Dq,
1531                                        false);
1532             qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1533         }
1534         else
1535         {
1536             qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1537             qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1538         }
1539         break;
1540     }
1541 
1542     PUNLOCK(p_QmPortal);
1543 
1544     if (!(p_Dq->stat & QM_DQRR_STAT_FD_VALID))
1545         return ERROR_CODE(E_EMPTY);
1546 
1547     return E_OK;
1548 }
1549 
1550 
1551 /****************************************/
1552 /*       API Init unit functions        */
1553 /****************************************/
1554 t_Handle QM_PORTAL_Config(t_QmPortalParam *p_QmPortalParam)
1555 {
1556     t_QmPortal          *p_QmPortal;
1557     uint32_t            i;
1558 
1559     SANITY_CHECK_RETURN_VALUE(p_QmPortalParam, E_INVALID_HANDLE, NULL);
1560     SANITY_CHECK_RETURN_VALUE(p_QmPortalParam->swPortalId < DPAA_MAX_NUM_OF_SW_PORTALS, E_INVALID_VALUE, 0);
1561 
1562     p_QmPortal = (t_QmPortal *)XX_Malloc(sizeof(t_QmPortal));
1563     if (!p_QmPortal)
1564     {
1565         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal obj!!!"));
1566         return NULL;
1567     }
1568     memset(p_QmPortal, 0, sizeof(t_QmPortal));
1569 
1570     p_QmPortal->p_LowQmPortal = (struct qm_portal *)XX_Malloc(sizeof(struct qm_portal));
1571     if (!p_QmPortal->p_LowQmPortal)
1572     {
1573         XX_Free(p_QmPortal);
1574         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Low qm p_QmPortal obj!!!"));
1575         return NULL;
1576     }
1577     memset(p_QmPortal->p_LowQmPortal, 0, sizeof(struct qm_portal));
1578 
1579     p_QmPortal->p_QmPortalDriverParams = (t_QmPortalDriverParams *)XX_Malloc(sizeof(t_QmPortalDriverParams));
1580     if (!p_QmPortal->p_QmPortalDriverParams)
1581     {
1582         XX_Free(p_QmPortal->p_LowQmPortal);
1583         XX_Free(p_QmPortal);
1584         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal driver parameters"));
1585         return NULL;
1586     }
1587     memset(p_QmPortal->p_QmPortalDriverParams, 0, sizeof(t_QmPortalDriverParams));
1588 
1589     p_QmPortal->p_LowQmPortal->addr.addr_ce = UINT_TO_PTR(p_QmPortalParam->ceBaseAddress);
1590     p_QmPortal->p_LowQmPortal->addr.addr_ci = UINT_TO_PTR(p_QmPortalParam->ciBaseAddress);
1591     p_QmPortal->p_LowQmPortal->config.irq = p_QmPortalParam->irq;
1592     p_QmPortal->p_LowQmPortal->config.bound = 0;
1593     p_QmPortal->p_LowQmPortal->config.cpu = (int)p_QmPortalParam->swPortalId;
1594     p_QmPortal->p_LowQmPortal->config.channel = (e_QmFQChannel)(e_QM_FQ_CHANNEL_SWPORTAL0 + p_QmPortalParam->swPortalId);
1595     p_QmPortal->p_LowQmPortal->bind_lock = XX_InitSpinlock();
1596 
1597     p_QmPortal->h_Qm                = p_QmPortalParam->h_Qm;
1598     p_QmPortal->f_DfltFrame         = p_QmPortalParam->f_DfltFrame;
1599     p_QmPortal->f_RejectedFrame     = p_QmPortalParam->f_RejectedFrame;
1600     p_QmPortal->h_App               = p_QmPortalParam->h_App;
1601 
1602     p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset           = p_QmPortalParam->fdLiodnOffset;
1603     p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode          = DEFAULT_dequeueDcaMode;
1604     p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames  = DEFAULT_dequeueUpToThreeFrames;
1605     p_QmPortal->p_QmPortalDriverParams->commandType             = DEFAULT_dequeueCommandType;
1606     p_QmPortal->p_QmPortalDriverParams->userToken               = DEFAULT_dequeueUserToken;
1607     p_QmPortal->p_QmPortalDriverParams->specifiedWq             = DEFAULT_dequeueSpecifiedWq;
1608     p_QmPortal->p_QmPortalDriverParams->dedicatedChannel        = DEFAULT_dequeueDedicatedChannel;
1609     p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels =
1610         DEFAULT_dequeueDedicatedChannelHasPrecedenceOverPoolChannels;
1611     p_QmPortal->p_QmPortalDriverParams->poolChannelId           = DEFAULT_dequeuePoolChannelId;
1612     p_QmPortal->p_QmPortalDriverParams->wqId                    = DEFAULT_dequeueWqId;
1613     for (i=0;i<QM_MAX_NUM_OF_POOL_CHANNELS;i++)
1614         p_QmPortal->p_QmPortalDriverParams->poolChannels[i] = FALSE;
1615     p_QmPortal->p_QmPortalDriverParams->dqrrSize                = DEFAULT_dqrrSize;
1616     p_QmPortal->p_QmPortalDriverParams->pullMode                = DEFAULT_pullMode;
1617 
1618     return p_QmPortal;
1619 }
1620 
1621 t_Error QM_PORTAL_Init(t_Handle h_QmPortal)
1622 {
1623     t_QmPortal                          *p_QmPortal = (t_QmPortal *)h_QmPortal;
1624     uint32_t                            i, flags=0, sdqcrFlags=0;
1625     t_Error                             err;
1626     t_QmInterModulePortalInitParams     qmParams;
1627 
1628     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1629     SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE);
1630 
1631     memset(&qmParams, 0, sizeof(qmParams));
1632     qmParams.portalId       = (uint8_t)p_QmPortal->p_LowQmPortal->config.cpu;
1633     qmParams.liodn          = p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset;
1634     qmParams.dqrrLiodn      = p_QmPortal->p_QmPortalDriverParams->dqrrLiodn;
1635     qmParams.fdFqLiodn      = p_QmPortal->p_QmPortalDriverParams->fdFqLiodn;
1636     qmParams.stashDestQueue = p_QmPortal->p_QmPortalDriverParams->stashDestQueue;
1637     if ((err = QmGetSetPortalParams(p_QmPortal->h_Qm, &qmParams)) != E_OK)
1638         RETURN_ERROR(MAJOR, err, NO_MSG);
1639 
1640     flags = (uint32_t)(((p_QmPortal->p_LowQmPortal->config.irq == NO_IRQ) ?
1641             0 :
1642             (QMAN_PORTAL_FLAG_IRQ |
1643              QMAN_PORTAL_FLAG_IRQ_FAST |
1644              QMAN_PORTAL_FLAG_IRQ_SLOW)));
1645     flags |= ((p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode) ? QMAN_PORTAL_FLAG_DCA : 0);
1646     flags |= (p_QmPortal->p_QmPortalDriverParams->dqrr)?QMAN_PORTAL_FLAG_RSTASH:0;
1647     flags |= (p_QmPortal->p_QmPortalDriverParams->fdFq)?QMAN_PORTAL_FLAG_DSTASH:0;
1648 
1649     p_QmPortal->pullMode = p_QmPortal->p_QmPortalDriverParams->pullMode;
1650     if (!p_QmPortal->pullMode)
1651     {
1652         sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames) ? QM_SDQCR_COUNT_UPTO3 : QM_SDQCR_COUNT_EXACT1;
1653         sdqcrFlags |= QM_SDQCR_TOKEN_SET(p_QmPortal->p_QmPortalDriverParams->userToken);
1654         sdqcrFlags |= QM_SDQCR_TYPE_SET(p_QmPortal->p_QmPortalDriverParams->commandType);
1655         if (!p_QmPortal->p_QmPortalDriverParams->specifiedWq)
1656         {
1657             /* sdqcrFlags |= QM_SDQCR_SOURCE_CHANNELS;*/ /* removed as the macro is '0' */
1658             sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels) ? QM_SDQCR_DEDICATED_PRECEDENCE : 0;
1659             sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ? QM_SDQCR_CHANNELS_DEDICATED : 0;
1660             for (i=0;i<QM_MAX_NUM_OF_POOL_CHANNELS;i++)
1661                 sdqcrFlags |= ((p_QmPortal->p_QmPortalDriverParams->poolChannels[i]) ?
1662                      QM_SDQCR_CHANNELS_POOL(i+1) : 0);
1663         }
1664         else
1665         {
1666             sdqcrFlags |= QM_SDQCR_SOURCE_SPECIFICWQ;
1667             sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ?
1668                             QM_SDQCR_SPECIFICWQ_DEDICATED : QM_SDQCR_SPECIFICWQ_POOL(p_QmPortal->p_QmPortalDriverParams->poolChannelId);
1669             sdqcrFlags |= QM_SDQCR_SPECIFICWQ_WQ(p_QmPortal->p_QmPortalDriverParams->wqId);
1670         }
1671     }
1672     if ((flags & QMAN_PORTAL_FLAG_RSTASH) && (flags & QMAN_PORTAL_FLAG_DCA))
1673         p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingDcaOptimized;
1674     else if ((flags & QMAN_PORTAL_FLAG_RSTASH) && !(flags & QMAN_PORTAL_FLAG_DCA))
1675         p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingOptimized;
1676     else
1677         p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRing;
1678 
1679     if ((!p_QmPortal->f_RejectedFrame) || (!p_QmPortal->f_DfltFrame))
1680         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_RejectedFrame or f_DfltFrame callback not provided"));
1681 
1682     p_QmPortal->p_NullCB = (struct qman_fq_cb *)XX_Malloc(sizeof(struct qman_fq_cb));
1683     if (!p_QmPortal->p_NullCB)
1684         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ Null CB obj!!!"));
1685     memset(p_QmPortal->p_NullCB, 0, sizeof(struct qman_fq_cb));
1686 
1687     p_QmPortal->p_NullCB->dqrr      = p_QmPortal->f_DfltFrame;
1688     p_QmPortal->p_NullCB->ern       = p_QmPortal->f_RejectedFrame;
1689     p_QmPortal->p_NullCB->dc_ern    = p_QmPortal->p_NullCB->fqs = null_cb_mr;
1690 
1691     if (qman_create_portal(p_QmPortal, flags, sdqcrFlags, p_QmPortal->p_QmPortalDriverParams->dqrrSize) != E_OK)
1692     {
1693         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("create portal failed"));
1694     }
1695 
1696     QmSetPortalHandle(p_QmPortal->h_Qm, (t_Handle)p_QmPortal, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu);
1697     XX_Free(p_QmPortal->p_QmPortalDriverParams);
1698     p_QmPortal->p_QmPortalDriverParams = NULL;
1699 
1700     DBG(TRACE, ("Qman-Portal %d @ %p:%p",
1701                 p_QmPortal->p_LowQmPortal->config.cpu,
1702                 p_QmPortal->p_LowQmPortal->addr.addr_ce,
1703                 p_QmPortal->p_LowQmPortal->addr.addr_ci
1704                 ));
1705 
1706     DBG(TRACE, ("Qman-Portal %d phys @ 0x%016llx:0x%016llx",
1707                 p_QmPortal->p_LowQmPortal->config.cpu,
1708                 (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ce),
1709                 (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ci)
1710                 ));
1711 
1712     return E_OK;
1713 }
1714 
1715 t_Error QM_PORTAL_Free(t_Handle h_QmPortal)
1716 {
1717     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1718 
1719     if (!p_QmPortal)
1720        return ERROR_CODE(E_INVALID_HANDLE);
1721 
1722     ASSERT_COND(p_QmPortal->p_LowQmPortal);
1723     QmSetPortalHandle(p_QmPortal->h_Qm, NULL, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu);
1724     qman_destroy_portal(p_QmPortal);
1725     if (p_QmPortal->p_NullCB)
1726         XX_Free(p_QmPortal->p_NullCB);
1727 
1728     if (p_QmPortal->p_LowQmPortal->bind_lock)
1729         XX_FreeSpinlock(p_QmPortal->p_LowQmPortal->bind_lock);
1730     if(p_QmPortal->p_QmPortalDriverParams)
1731         XX_Free(p_QmPortal->p_QmPortalDriverParams);
1732     XX_Free(p_QmPortal->p_LowQmPortal);
1733     XX_Free(p_QmPortal);
1734 
1735     return E_OK;
1736 }
1737 
1738 t_Error QM_PORTAL_ConfigDcaMode(t_Handle h_QmPortal, bool enable)
1739 {
1740     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1741 
1742     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1743     SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE);
1744 
1745     p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = enable;
1746 
1747     return E_OK;
1748 }
1749 
1750 t_Error QM_PORTAL_ConfigStash(t_Handle h_QmPortal, t_QmPortalStashParam *p_StashParams)
1751 {
1752     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1753 
1754     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1755     SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER);
1756     SANITY_CHECK_RETURN_ERROR(p_StashParams, E_NULL_POINTER);
1757 
1758     p_QmPortal->p_QmPortalDriverParams->stashDestQueue  = p_StashParams->stashDestQueue;
1759     p_QmPortal->p_QmPortalDriverParams->dqrrLiodn       = p_StashParams->dqrrLiodn;
1760     p_QmPortal->p_QmPortalDriverParams->fdFqLiodn       = p_StashParams->fdFqLiodn;
1761     p_QmPortal->p_QmPortalDriverParams->eqcr            = p_StashParams->eqcr;
1762     p_QmPortal->p_QmPortalDriverParams->eqcrHighPri     = p_StashParams->eqcrHighPri;
1763     p_QmPortal->p_QmPortalDriverParams->dqrr            = p_StashParams->dqrr;
1764     p_QmPortal->p_QmPortalDriverParams->dqrrHighPri     = p_StashParams->dqrrHighPri;
1765     p_QmPortal->p_QmPortalDriverParams->fdFq            = p_StashParams->fdFq;
1766     p_QmPortal->p_QmPortalDriverParams->fdFqHighPri     = p_StashParams->fdFqHighPri;
1767     p_QmPortal->p_QmPortalDriverParams->fdFqDrop        = p_StashParams->fdFqDrop;
1768 
1769     return E_OK;
1770 }
1771 
1772 
1773 t_Error QM_PORTAL_ConfigPullMode(t_Handle h_QmPortal, bool pullMode)
1774 {
1775     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1776 
1777     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1778     SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER);
1779 
1780     p_QmPortal->p_QmPortalDriverParams->pullMode  = pullMode;
1781 
1782     return E_OK;
1783 }
1784 
1785 t_Error QM_PORTAL_AddPoolChannel(t_Handle h_QmPortal, uint8_t poolChannelId)
1786 {
1787     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1788     uint32_t    sdqcrFlags;
1789 
1790     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1791     SANITY_CHECK_RETURN_ERROR((poolChannelId < QM_MAX_NUM_OF_POOL_CHANNELS), E_INVALID_VALUE);
1792 
1793     sdqcrFlags = qm_dqrr_sdqcr_get(p_QmPortal->p_LowQmPortal);
1794     sdqcrFlags |= QM_SDQCR_CHANNELS_POOL(poolChannelId+1);
1795     qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags);
1796 
1797     return E_OK;
1798 }
1799 
1800 t_Error QM_PORTAL_Poll(t_Handle h_QmPortal, e_QmPortalPollSource source)
1801 {
1802     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1803 
1804     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1805 
1806     NCSW_PLOCK(p_QmPortal);
1807 
1808     if ((source == e_QM_PORTAL_POLL_SOURCE_CONTROL_FRAMES) ||
1809         (source == e_QM_PORTAL_POLL_SOURCE_BOTH))
1810     {
1811         uint32_t is = qm_isr_status_read(p_QmPortal->p_LowQmPortal);
1812         uint32_t active = LoopMessageRing(p_QmPortal, is);
1813         if (active)
1814             qm_isr_status_clear(p_QmPortal->p_LowQmPortal, active);
1815     }
1816     if ((source == e_QM_PORTAL_POLL_SOURCE_DATA_FRAMES) ||
1817         (source == e_QM_PORTAL_POLL_SOURCE_BOTH))
1818         p_QmPortal->f_LoopDequeueRingCB((t_Handle)p_QmPortal);
1819 
1820     PUNLOCK(p_QmPortal);
1821 
1822     return E_OK;
1823 }
1824 
1825 t_Error QM_PORTAL_PollFrame(t_Handle h_QmPortal, t_QmPortalFrameInfo *p_frameInfo)
1826 {
1827     t_QmPortal              *p_QmPortal     = (t_QmPortal *)h_QmPortal;
1828     struct qm_dqrr_entry    *p_Dq;
1829     struct qman_fq          *p_Fq;
1830     int                     prefetch;
1831 
1832     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1833     SANITY_CHECK_RETURN_ERROR(p_frameInfo, E_NULL_POINTER);
1834 
1835     NCSW_PLOCK(p_QmPortal);
1836 
1837     prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
1838     if (prefetch)
1839         qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1840     qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1841     p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1842     if (!p_Dq)
1843     {
1844         PUNLOCK(p_QmPortal);
1845         return ERROR_CODE(E_EMPTY);
1846     }
1847     p_Fq = ptr_from_aligned_int(p_Dq->contextB);
1848     ASSERT_COND(p_Dq->fqid);
1849     if (p_Fq)
1850     {
1851         p_frameInfo->h_App = p_Fq->h_App;
1852         p_frameInfo->h_QmFqr = p_Fq->h_QmFqr;
1853         p_frameInfo->fqidOffset = p_Fq->fqidOffset;
1854         memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD));
1855     }
1856     else
1857     {
1858         p_frameInfo->h_App = p_QmPortal->h_App;
1859         p_frameInfo->h_QmFqr = NULL;
1860         p_frameInfo->fqidOffset = p_Dq->fqid;
1861         memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD));
1862     }
1863     if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) {
1864         qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
1865                                    p_Dq,
1866                                    false);
1867         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1868     } else {
1869         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1870         qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1871     }
1872 
1873     PUNLOCK(p_QmPortal);
1874 
1875     return E_OK;
1876 }
1877 
1878 
1879 t_Handle QM_FQR_Create(t_QmFqrParams *p_QmFqrParams)
1880 {
1881     t_QmFqr             *p_QmFqr;
1882     uint32_t            i, flags = 0;
1883     u_QmFqdContextA     cnxtA;
1884 
1885     SANITY_CHECK_RETURN_VALUE(p_QmFqrParams, E_INVALID_HANDLE, NULL);
1886     SANITY_CHECK_RETURN_VALUE(p_QmFqrParams->h_Qm, E_INVALID_HANDLE, NULL);
1887 
1888     if (p_QmFqrParams->shadowMode &&
1889         (!p_QmFqrParams->useForce || p_QmFqrParams->numOfFqids != 1))
1890     {
1891         REPORT_ERROR(MAJOR, E_CONFLICT, ("shadowMode must be use with useForce and numOfFqids==1!!!"));
1892         return NULL;
1893     }
1894 
1895     p_QmFqr = (t_QmFqr *)XX_MallocSmart(sizeof(t_QmFqr), 0, 64);
1896     if (!p_QmFqr)
1897     {
1898         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQR obj!!!"));
1899         return NULL;
1900     }
1901     memset(p_QmFqr, 0, sizeof(t_QmFqr));
1902 
1903     p_QmFqr->h_Qm       = p_QmFqrParams->h_Qm;
1904     p_QmFqr->h_QmPortal = p_QmFqrParams->h_QmPortal;
1905     p_QmFqr->shadowMode = p_QmFqrParams->shadowMode;
1906     p_QmFqr->numOfFqids = (p_QmFqrParams->useForce && !p_QmFqrParams->numOfFqids) ?
1907                               1 : p_QmFqrParams->numOfFqids;
1908 
1909     if (!p_QmFqr->h_QmPortal)
1910     {
1911         p_QmFqr->h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
1912         SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_QmPortal, E_INVALID_HANDLE, NULL);
1913     }
1914 
1915     p_QmFqr->p_Fqs = (struct qman_fq **)XX_Malloc(sizeof(struct qman_fq *) * p_QmFqr->numOfFqids);
1916     if (!p_QmFqr->p_Fqs)
1917     {
1918         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQs obj!!!"));
1919         QM_FQR_Free(p_QmFqr);
1920         return NULL;
1921     }
1922     memset(p_QmFqr->p_Fqs, 0, sizeof(struct qman_fq *) * p_QmFqr->numOfFqids);
1923 
1924     if (p_QmFqr->shadowMode)
1925     {
1926         struct qman_fq          *p_Fq = NULL;
1927 
1928         p_QmFqr->fqidBase = p_QmFqrParams->qs.frcQ.fqid;
1929         p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64);
1930         if (!p_Fq)
1931         {
1932             REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
1933             QM_FQR_Free(p_QmFqr);
1934             return NULL;
1935         }
1936         memset(p_Fq, 0, sizeof(struct qman_fq));
1937         p_Fq->cb.dqrr     = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_DfltFrame;
1938         p_Fq->cb.ern      = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_RejectedFrame;
1939         p_Fq->cb.dc_ern   = cb_ern_dcErn;
1940         p_Fq->cb.fqs      = cb_fqs;
1941         p_Fq->h_App       = ((t_QmPortal*)p_QmFqr->h_QmPortal)->h_App;
1942         p_Fq->h_QmFqr     = p_QmFqr;
1943         p_Fq->state       = qman_fq_state_sched;
1944         p_Fq->fqid        = p_QmFqr->fqidBase;
1945         p_QmFqr->p_Fqs[0] = p_Fq;
1946     }
1947     else
1948     {
1949         p_QmFqr->channel    = p_QmFqrParams->channel;
1950         p_QmFqr->workQueue  = p_QmFqrParams->wq;
1951 
1952         p_QmFqr->fqidBase = QmFqidGet(p_QmFqr->h_Qm,
1953                                       p_QmFqr->numOfFqids,
1954                                       p_QmFqrParams->qs.nonFrcQs.align,
1955                                       p_QmFqrParams->useForce,
1956                                       p_QmFqrParams->qs.frcQ.fqid);
1957         if (p_QmFqr->fqidBase == (uint32_t)ILLEGAL_BASE)
1958         {
1959             REPORT_ERROR(CRITICAL,E_INVALID_STATE,("can't allocate a fqid"));
1960             QM_FQR_Free(p_QmFqr);
1961             return NULL;
1962         }
1963 
1964         if(p_QmFqrParams->congestionAvoidanceEnable &&
1965             (p_QmFqrParams->congestionAvoidanceParams.h_QmCg == NULL) &&
1966             (p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold == 0))
1967         {
1968             REPORT_ERROR(CRITICAL,E_INVALID_STATE,("NULL congestion group handle and no FQ Threshold"));
1969             QM_FQR_Free(p_QmFqr);
1970             return NULL;
1971         }
1972         if(p_QmFqrParams->congestionAvoidanceEnable)
1973         {
1974             if(p_QmFqrParams->congestionAvoidanceParams.h_QmCg)
1975                 flags |= QM_FQCTRL_CGE;
1976             if(p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold)
1977                 flags |= QM_FQCTRL_TDE;
1978         }
1979 
1980     /*
1981         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_ORP : 0;
1982         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_CPCSTASH : 0;
1983         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_FORCESFDR : 0;
1984         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_AVOIDBLOCK : 0;
1985     */
1986         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_HOLDACTIVE : 0;
1987         flags |= (p_QmFqrParams->preferInCache) ? QM_FQCTRL_LOCKINCACHE : 0;
1988 
1989         if (p_QmFqrParams->useContextAForStash)
1990         {
1991             if (CheckStashParams(p_QmFqrParams) != E_OK)
1992             {
1993                 REPORT_ERROR(CRITICAL,E_INVALID_STATE,NO_MSG);
1994                 QM_FQR_Free(p_QmFqr);
1995                 return NULL;
1996             }
1997 
1998             memset(&cnxtA, 0, sizeof(cnxtA));
1999             cnxtA.stashing.annotation_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameAnnotationSize, CACHELINE_SIZE);
2000             cnxtA.stashing.data_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameDataSize, CACHELINE_SIZE);
2001             cnxtA.stashing.context_cl = DIV_CEIL(p_QmFqrParams->stashingParams.fqContextSize, CACHELINE_SIZE);
2002             cnxtA.context_hi = (uint8_t)((p_QmFqrParams->stashingParams.fqContextAddr >> 32) & 0xff);
2003             cnxtA.context_lo = (uint32_t)(p_QmFqrParams->stashingParams.fqContextAddr);
2004             flags |= QM_FQCTRL_CTXASTASHING;
2005         }
2006 
2007         for(i=0;i<p_QmFqr->numOfFqids;i++)
2008             if (qm_new_fq(p_QmFqr->h_QmPortal,
2009                           p_QmFqr->fqidBase+i,
2010                           i,
2011                           p_QmFqr->channel,
2012                           p_QmFqr->workQueue,
2013                           1/*p_QmFqr->numOfFqids*/,
2014                           flags,
2015                           (p_QmFqrParams->congestionAvoidanceEnable ?
2016                               &p_QmFqrParams->congestionAvoidanceParams : NULL),
2017                           p_QmFqrParams->useContextAForStash ?
2018                               (t_QmContextA *)&cnxtA : p_QmFqrParams->p_ContextA,
2019                           p_QmFqrParams->p_ContextB,
2020                           p_QmFqrParams->initParked,
2021                           p_QmFqr,
2022                           &p_QmFqr->p_Fqs[i]) != E_OK)
2023             {
2024                 QM_FQR_Free(p_QmFqr);
2025                 return NULL;
2026             }
2027     }
2028     return p_QmFqr;
2029 }
2030 
2031 t_Error  QM_FQR_Free(t_Handle h_QmFqr)
2032 {
2033     t_QmFqr     *p_QmFqr    = (t_QmFqr *)h_QmFqr;
2034     uint32_t    i;
2035 
2036     if (!p_QmFqr)
2037         return ERROR_CODE(E_INVALID_HANDLE);
2038 
2039     if (p_QmFqr->p_Fqs)
2040     {
2041         for (i=0;i<p_QmFqr->numOfFqids;i++)
2042             if (p_QmFqr->p_Fqs[i])
2043             {
2044                 if (!p_QmFqr->shadowMode)
2045                     qm_free_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]);
2046                 XX_FreeSmart(p_QmFqr->p_Fqs[i]);
2047             }
2048         XX_Free(p_QmFqr->p_Fqs);
2049     }
2050 
2051     if (!p_QmFqr->shadowMode && p_QmFqr->fqidBase)
2052         QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
2053 
2054     XX_FreeSmart(p_QmFqr);
2055 
2056     return E_OK;
2057 }
2058 
2059 t_Error  QM_FQR_FreeWDrain(t_Handle                     h_QmFqr,
2060                            t_QmFqrDrainedCompletionCB   *f_CompletionCB,
2061                            bool                         deliverFrame,
2062                            t_QmReceivedFrameCallback    *f_CallBack,
2063                            t_Handle                     h_App)
2064 {
2065     t_QmFqr     *p_QmFqr    = (t_QmFqr *)h_QmFqr;
2066     uint32_t    i;
2067 
2068     if (!p_QmFqr)
2069         return ERROR_CODE(E_INVALID_HANDLE);
2070 
2071     if (p_QmFqr->shadowMode)
2072         RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("QM_FQR_FreeWDrain can't be called to shadow FQR!!!. call QM_FQR_Free"));
2073 
2074     p_QmFqr->p_DrainedFqs = (bool *)XX_Malloc(sizeof(bool) * p_QmFqr->numOfFqids);
2075     if (!p_QmFqr->p_DrainedFqs)
2076         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("QM Drained-FQs obj!!!. Try to Free without draining"));
2077     memset(p_QmFqr->p_DrainedFqs, 0, sizeof(bool) * p_QmFqr->numOfFqids);
2078 
2079     if (f_CompletionCB)
2080     {
2081         p_QmFqr->f_CompletionCB = f_CompletionCB;
2082         p_QmFqr->h_App          = h_App;
2083     }
2084 
2085     if (deliverFrame)
2086     {
2087         if (!f_CallBack)
2088         {
2089             REPORT_ERROR(MAJOR, E_NULL_POINTER, ("f_CallBack must be given."));
2090             XX_Free(p_QmFqr->p_DrainedFqs);
2091             return ERROR_CODE(E_NULL_POINTER);
2092         }
2093         QM_FQR_RegisterCB(p_QmFqr, f_CallBack, h_App);
2094     }
2095     else
2096         QM_FQR_RegisterCB(p_QmFqr, drainCB, h_App);
2097 
2098     for (i=0;i<p_QmFqr->numOfFqids;i++)
2099     {
2100         if (qman_retire_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i], 0, true) != E_OK)
2101             RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!"));
2102 
2103         if (p_QmFqr->p_Fqs[i]->flags & QMAN_FQ_STATE_CHANGING)
2104             DBG(INFO, ("fq %d currently in use, will be retired", p_QmFqr->p_Fqs[i]->fqid));
2105         else
2106             drainRetiredFq(p_QmFqr->p_Fqs[i]);
2107     }
2108 
2109     if (!p_QmFqr->f_CompletionCB)
2110     {
2111         while(p_QmFqr->p_DrainedFqs) ;
2112         DBG(TRACE, ("QM-FQR with base %d completed", p_QmFqr->fqidBase));
2113         XX_FreeSmart(p_QmFqr->p_Fqs);
2114         if (p_QmFqr->fqidBase)
2115             QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
2116         XX_FreeSmart(p_QmFqr);
2117     }
2118 
2119     return E_OK;
2120 }
2121 
2122 t_Error QM_FQR_RegisterCB(t_Handle h_QmFqr, t_QmReceivedFrameCallback *f_CallBack, t_Handle h_App)
2123 {
2124     t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2125     int         i;
2126 
2127     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2128 
2129     for (i=0;i<p_QmFqr->numOfFqids;i++)
2130     {
2131         p_QmFqr->p_Fqs[i]->cb.dqrr = f_CallBack;
2132         p_QmFqr->p_Fqs[i]->h_App   = h_App;
2133     }
2134 
2135     return E_OK;
2136 }
2137 
2138 t_Error QM_FQR_Enqueue(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame)
2139 {
2140     t_QmFqr                 *p_QmFqr = (t_QmFqr *)h_QmFqr;
2141     t_QmPortal              *p_QmPortal;
2142     struct qm_eqcr_entry    *p_Eq;
2143     uint32_t                *p_Dst, *p_Src;
2144     const struct qman_fq    *p_Fq;
2145 
2146     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2147     SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2148 
2149     if (!h_QmPortal)
2150     {
2151         SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2152         h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2153         SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2154     }
2155     p_QmPortal = (t_QmPortal *)h_QmPortal;
2156 
2157     p_Fq = p_QmFqr->p_Fqs[fqidOffset];
2158 
2159 #ifdef QM_CHECKING
2160     if (p_Fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE)
2161         RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
2162     if ((!(p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)) &&
2163         ((p_Fq->state == qman_fq_state_retired) ||
2164          (p_Fq->state == qman_fq_state_oos)))
2165         return ERROR_CODE(E_BUSY);
2166 #endif /* QM_CHECKING */
2167 
2168     NCSW_PLOCK(p_QmPortal);
2169     p_Eq = try_eq_start(p_QmPortal);
2170     if (!p_Eq)
2171     {
2172         PUNLOCK(p_QmPortal);
2173         return ERROR_CODE(E_BUSY);
2174     }
2175 
2176     p_Eq->fqid = p_Fq->fqid;
2177     p_Eq->tag = aligned_int_from_ptr(p_Fq);
2178     /* gcc does a dreadful job of the following;
2179      *  eq->fd = *fd;
2180      * It causes the entire function to save/restore a wider range of
2181      * registers, and comes up with instruction-waste galore. This will do
2182      * until we can rework the function for better code-generation. */
2183     p_Dst = (uint32_t *)&p_Eq->fd;
2184     p_Src = (uint32_t *)p_Frame;
2185     p_Dst[0] = p_Src[0];
2186     p_Dst[1] = p_Src[1];
2187     p_Dst[2] = p_Src[2];
2188     p_Dst[3] = p_Src[3];
2189 
2190     qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal,
2191                           (uint8_t)(QM_EQCR_VERB_CMD_ENQUEUE/* |
2192                           (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))*/));
2193     PUNLOCK(p_QmPortal);
2194 
2195     return E_OK;
2196 }
2197 
2198 
2199 t_Error QM_FQR_PullFrame(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame)
2200 {
2201     t_QmFqr                 *p_QmFqr = (t_QmFqr *)h_QmFqr;
2202     uint32_t                pdqcr = 0;
2203 
2204     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2205     SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2206     SANITY_CHECK_RETURN_ERROR(p_Frame, E_NULL_POINTER);
2207     SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_oos) ||
2208                               (p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_parked),
2209                               E_INVALID_STATE);
2210     if (!h_QmPortal)
2211     {
2212         SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2213         h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2214         SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2215     }
2216 
2217     pdqcr |= QM_PDQCR_MODE_UNSCHEDULED;
2218     pdqcr |= QM_PDQCR_FQID(p_QmFqr->p_Fqs[fqidOffset]->fqid);
2219     return QmPortalPullFrame(h_QmPortal, pdqcr, p_Frame);
2220 }
2221 
2222 t_Error QM_FQR_Resume(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset)
2223 {
2224     t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2225 
2226     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2227     SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2228 
2229     if (!h_QmPortal)
2230     {
2231         SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2232         h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2233         SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2234     }
2235     return qman_schedule_fq(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset]);
2236 }
2237 
2238 t_Error  QM_FQR_Suspend(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset)
2239 {
2240     t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2241 
2242     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2243     SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2244     SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->flags & QM_FQCTRL_HOLDACTIVE), E_INVALID_STATE);
2245 
2246     UNUSED(h_QmPortal);
2247     p_QmFqr->p_Fqs[fqidOffset]->state = qman_fq_state_waiting_parked;
2248 
2249     return E_OK;
2250 }
2251 
2252 uint32_t QM_FQR_GetFqid(t_Handle h_QmFqr)
2253 {
2254     t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2255 
2256     SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0);
2257 
2258     return p_QmFqr->fqidBase;
2259 }
2260 
2261 uint32_t QM_FQR_GetCounter(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, e_QmFqrCounters counter)
2262 {
2263     t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2264     struct qm_mcr_queryfq_np    queryfq_np;
2265 
2266     SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0);
2267     SANITY_CHECK_RETURN_VALUE((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE, 0);
2268 
2269     if (!h_QmPortal)
2270     {
2271         SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_Qm, E_INVALID_HANDLE, 0);
2272         h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2273         SANITY_CHECK_RETURN_VALUE(h_QmPortal, E_INVALID_HANDLE, 0);
2274     }
2275     if (qman_query_fq_np(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset], &queryfq_np) != E_OK)
2276         return 0;
2277     switch (counter)
2278     {
2279         case e_QM_FQR_COUNTERS_FRAME :
2280             return queryfq_np.frm_cnt;
2281         case e_QM_FQR_COUNTERS_BYTE :
2282             return queryfq_np.byte_cnt;
2283         default :
2284             break;
2285     }
2286     /* should never get here */
2287     ASSERT_COND(FALSE);
2288 
2289     return 0;
2290 }
2291 
2292 
2293 t_Handle QM_CG_Create(t_QmCgParams *p_CgParams)
2294 {
2295     t_QmCg                          *p_QmCg;
2296     t_QmPortal                      *p_QmPortal;
2297     t_Error                         err;
2298     uint32_t                        wredParams;
2299     uint32_t                        tmpA, tmpN, ta=0, tn=0;
2300     int                             gap, tmp;
2301     struct qm_mc_command            *p_Mcc;
2302     struct qm_mc_result             *p_Mcr;
2303 
2304     SANITY_CHECK_RETURN_VALUE(p_CgParams, E_INVALID_HANDLE, NULL);
2305     SANITY_CHECK_RETURN_VALUE(p_CgParams->h_Qm, E_INVALID_HANDLE, NULL);
2306 
2307     if(p_CgParams->notifyDcPortal &&
2308        ((p_CgParams->dcPortalId == e_DPAA_DCPORTAL2) || (p_CgParams->dcPortalId == e_DPAA_DCPORTAL3)))
2309     {
2310         REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("notifyDcPortal is invalid for this DC Portal"));
2311         return NULL;
2312     }
2313 
2314     if (!p_CgParams->h_QmPortal)
2315     {
2316         p_QmPortal = QmGetPortalHandle(p_CgParams->h_Qm);
2317         SANITY_CHECK_RETURN_VALUE(p_QmPortal, E_INVALID_STATE, NULL);
2318     }
2319     else
2320         p_QmPortal = p_CgParams->h_QmPortal;
2321 
2322     p_QmCg = (t_QmCg *)XX_Malloc(sizeof(t_QmCg));
2323     if (!p_QmCg)
2324     {
2325         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM CG obj!!!"));
2326         return NULL;
2327     }
2328     memset(p_QmCg, 0, sizeof(t_QmCg));
2329 
2330     /* build CG struct */
2331     p_QmCg->h_Qm        = p_CgParams->h_Qm;
2332     p_QmCg->h_QmPortal  = p_QmPortal;
2333     p_QmCg->h_App       = p_CgParams->h_App;
2334     err = QmGetCgId(p_CgParams->h_Qm, &p_QmCg->id);
2335     if (err)
2336     {
2337         XX_Free(p_QmCg);
2338         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmGetCgId failed"));
2339         return NULL;
2340     }
2341 
2342     NCSW_PLOCK(p_QmPortal);
2343     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2344     p_Mcc->initcgr.cgid = p_QmCg->id;
2345 
2346     err = QmPortalRegisterCg(p_QmPortal, p_QmCg, p_QmCg->id);
2347     if (err)
2348     {
2349         XX_Free(p_QmCg);
2350         PUNLOCK(p_QmPortal);
2351         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalRegisterCg failed"));
2352         return NULL;
2353     }
2354 
2355     /*  Build CGR command */
2356     {
2357 #ifdef QM_CGS_NO_FRAME_MODE
2358     t_QmRevisionInfo    revInfo;
2359 
2360     QmGetRevision(p_QmCg->h_Qm, &revInfo);
2361 
2362     if (!((revInfo.majorRev == 1) && (revInfo.minorRev == 0)))
2363 #endif /* QM_CGS_NO_FRAME_MODE */
2364         if (p_CgParams->frameCount)
2365         {
2366             p_Mcc->initcgr.we_mask |= QM_CGR_WE_MODE;
2367             p_Mcc->initcgr.cgr.frame_mode = QM_CGR_EN;
2368         }
2369     }
2370 
2371     if (p_CgParams->wredEnable)
2372     {
2373         if (p_CgParams->wredParams.enableGreen)
2374         {
2375             err = CalcWredCurve(&p_CgParams->wredParams.greenCurve, &wredParams);
2376             if(err)
2377             {
2378                 XX_Free(p_QmCg);
2379                 PUNLOCK(p_QmPortal);
2380                 REPORT_ERROR(MAJOR, err, NO_MSG);
2381                 return NULL;
2382             }
2383             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G;
2384             p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN;
2385             p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams;
2386         }
2387         if (p_CgParams->wredParams.enableYellow)
2388         {
2389             err = CalcWredCurve(&p_CgParams->wredParams.yellowCurve, &wredParams);
2390             if(err)
2391             {
2392                 XX_Free(p_QmCg);
2393                 PUNLOCK(p_QmPortal);
2394                 REPORT_ERROR(MAJOR, err, NO_MSG);
2395                 return NULL;
2396             }
2397             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y;
2398             p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN;
2399             p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams;
2400         }
2401         if (p_CgParams->wredParams.enableRed)
2402         {
2403             err = CalcWredCurve(&p_CgParams->wredParams.redCurve, &wredParams);
2404             if(err)
2405             {
2406                 XX_Free(p_QmCg);
2407                 PUNLOCK(p_QmPortal);
2408                 REPORT_ERROR(MAJOR, err, NO_MSG);
2409                 return NULL;
2410             }
2411             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R;
2412             p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN;
2413             p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams;
2414         }
2415     }
2416 
2417     if (p_CgParams->tailDropEnable)
2418     {
2419         if (!p_CgParams->threshold)
2420         {
2421             XX_Free(p_QmCg);
2422             PUNLOCK(p_QmPortal);
2423             REPORT_ERROR(MINOR, E_INVALID_STATE, ("tailDropThreshold must be configured if tailDropEnable "));
2424             return NULL;
2425         }
2426         p_Mcc->initcgr.cgr.cstd_en = QM_CGR_EN;
2427         p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
2428     }
2429 
2430     if (p_CgParams->threshold)
2431     {
2432         p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES;
2433         p_QmCg->f_Exception = p_CgParams->f_Exception;
2434         if (p_QmCg->f_Exception || p_CgParams->notifyDcPortal)
2435         {
2436             p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN;
2437             p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSCN_EN | QM_CGR_WE_CSCN_TARG;
2438             /* if SW - set target, if HW - if FM, set HW target, otherwize, set SW target */
2439             p_Mcc->initcgr.cgr.cscn_targ = 0;
2440             if (p_QmCg->f_Exception)
2441                 p_Mcc->initcgr.cgr.cscn_targ = (uint32_t)QM_CGR_TARGET_SWP(QmPortalGetSwPortalId(p_QmCg->h_QmPortal));
2442             if (p_CgParams->notifyDcPortal)
2443                 p_Mcc->initcgr.cgr.cscn_targ |= (uint32_t)QM_CGR_TARGET_DCP(p_CgParams->dcPortalId);
2444         }
2445 
2446         /* express thresh as ta*2^tn */
2447         gap = (int)p_CgParams->threshold;
2448         for (tmpA=0 ; tmpA<256; tmpA++ )
2449             for (tmpN=0 ; tmpN<32; tmpN++ )
2450             {
2451                 tmp = ABS((int)(p_CgParams->threshold - tmpA*(1<<tmpN)));
2452                 if (tmp < gap)
2453                 {
2454                    ta = tmpA;
2455                    tn = tmpN;
2456                    gap = tmp;
2457                 }
2458             }
2459         p_Mcc->initcgr.cgr.cs_thres.TA = ta;
2460         p_Mcc->initcgr.cgr.cs_thres.Tn = tn;
2461     }
2462     else if(p_CgParams->f_Exception)
2463     {
2464         XX_Free(p_QmCg);
2465         PUNLOCK(p_QmPortal);
2466         REPORT_ERROR(MINOR, E_INVALID_STATE, ("No threshold configured, but f_Exception defined"));
2467         return NULL;
2468     }
2469 
2470     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_INITCGR);
2471     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2472     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_INITCGR);
2473     if (p_Mcr->result != QM_MCR_RESULT_OK)
2474     {
2475         XX_Free(p_QmCg);
2476         PUNLOCK(p_QmPortal);
2477         REPORT_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2478         return NULL;
2479     }
2480     PUNLOCK(p_QmPortal);
2481 
2482     return p_QmCg;
2483 }
2484 
2485 t_Error QM_CG_Free(t_Handle h_QmCg)
2486 {
2487 
2488     t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2489     t_Error                 err;
2490     struct qm_mc_command    *p_Mcc;
2491     struct qm_mc_result     *p_Mcr;
2492     t_QmPortal              *p_QmPortal;
2493 
2494     SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2495 
2496     p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2497 
2498     NCSW_PLOCK(p_QmPortal);
2499     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2500     p_Mcc->initcgr.cgid = p_QmCg->id;
2501     p_Mcc->initcgr.we_mask = QM_CGR_WE_MASK;
2502 
2503     err = QmFreeCgId(p_QmCg->h_Qm, p_QmCg->id);
2504     if(err)
2505     {
2506         XX_Free(p_QmCg);
2507         PUNLOCK(p_QmPortal);
2508         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmFreeCgId failed"));
2509     }
2510 
2511     err = QmPortalUnregisterCg(p_QmCg->h_QmPortal, p_QmCg->id);
2512     if(err)
2513     {
2514         XX_Free(p_QmCg);
2515         PUNLOCK(p_QmPortal);
2516         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalUnregisterCg failed"));
2517     }
2518 
2519     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2520     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2521     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2522     if (p_Mcr->result != QM_MCR_RESULT_OK)
2523     {
2524         PUNLOCK(p_QmPortal);
2525         RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2526     }
2527     PUNLOCK(p_QmPortal);
2528 
2529     XX_Free(p_QmCg);
2530 
2531     return E_OK;
2532 }
2533 
2534 t_Error QM_CG_SetException(t_Handle h_QmCg, e_QmExceptions exception, bool enable)
2535 {
2536     t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2537     struct qm_mc_command    *p_Mcc;
2538     struct qm_mc_result     *p_Mcr;
2539     t_QmPortal              *p_QmPortal;
2540 
2541     SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2542 
2543     p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2544     if (!p_QmCg->f_Exception)
2545         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Either threshold or exception callback was not configured."));
2546 
2547     NCSW_PLOCK(p_QmPortal);
2548     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2549     p_Mcc->initcgr.cgid = p_QmCg->id;
2550     p_Mcc->initcgr.we_mask = QM_CGR_WE_CSCN_EN;
2551 
2552     if(exception == e_QM_EX_CG_STATE_CHANGE)
2553     {
2554         if(enable)
2555             p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN;
2556     }
2557     else
2558     {
2559         PUNLOCK(p_QmPortal);
2560         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal exception"));
2561     }
2562 
2563     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2564     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2565     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2566     if (p_Mcr->result != QM_MCR_RESULT_OK)
2567     {
2568         PUNLOCK(p_QmPortal);
2569         RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2570     }
2571     PUNLOCK(p_QmPortal);
2572 
2573     return E_OK;
2574 }
2575 
2576 t_Error QM_CG_ModifyWredCurve(t_Handle h_QmCg, t_QmCgModifyWredParams *p_QmCgModifyParams)
2577 {
2578     t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2579     uint32_t                wredParams;
2580     struct qm_mc_command    *p_Mcc;
2581     struct qm_mc_result     *p_Mcr;
2582     t_QmPortal              *p_QmPortal;
2583     t_Error                 err = E_OK;
2584 
2585     SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2586 
2587     p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2588 
2589     NCSW_PLOCK(p_QmPortal);
2590     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2591     p_Mcc->initcgr.cgid = p_QmCg->id;
2592 
2593     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR);
2594     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2595     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2596     if (p_Mcr->result != QM_MCR_RESULT_OK)
2597     {
2598         PUNLOCK(p_QmPortal);
2599         RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result)));
2600     }
2601 
2602     switch(p_QmCgModifyParams->color)
2603     {
2604         case(e_QM_CG_COLOR_GREEN):
2605             if(!p_Mcr->querycgr.cgr.wr_en_g)
2606             {
2607                 PUNLOCK(p_QmPortal);
2608                 RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for green"));
2609             }
2610             break;
2611         case(e_QM_CG_COLOR_YELLOW):
2612             if(!p_Mcr->querycgr.cgr.wr_en_y)
2613             {
2614                 PUNLOCK(p_QmPortal);
2615                 RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for yellow"));
2616             }
2617             break;
2618         case(e_QM_CG_COLOR_RED):
2619             if(!p_Mcr->querycgr.cgr.wr_en_r)
2620             {
2621                 PUNLOCK(p_QmPortal);
2622                 RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for red"));
2623             }
2624             break;
2625     }
2626 
2627     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2628     p_Mcc->initcgr.cgid = p_QmCg->id;
2629 
2630     switch(p_QmCgModifyParams->color)
2631     {
2632         case(e_QM_CG_COLOR_GREEN):
2633             err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2634             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G;
2635             p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN;
2636             p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams;
2637             break;
2638         case(e_QM_CG_COLOR_YELLOW):
2639             err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2640             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y;
2641             p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN;
2642             p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams;
2643             break;
2644         case(e_QM_CG_COLOR_RED):
2645             err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2646             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R;
2647             p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN;
2648             p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams;
2649             break;
2650     }
2651     if (err)
2652     {
2653         PUNLOCK(p_QmPortal);
2654         RETURN_ERROR(MINOR, err, NO_MSG);
2655     }
2656 
2657     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2658     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2659     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2660     if (p_Mcr->result != QM_MCR_RESULT_OK)
2661     {
2662         PUNLOCK(p_QmPortal);
2663         RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2664     }
2665     PUNLOCK(p_QmPortal);
2666 
2667     return E_OK;
2668 }
2669 
2670 t_Error QM_CG_ModifyTailDropThreshold(t_Handle h_QmCg, uint32_t threshold)
2671 {
2672     t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2673     struct qm_mc_command    *p_Mcc;
2674     struct qm_mc_result     *p_Mcr;
2675     t_QmPortal              *p_QmPortal;
2676     uint32_t                tmpA, tmpN, ta=0, tn=0;
2677     int                     gap, tmp;
2678 
2679     SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2680 
2681     p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2682 
2683     NCSW_PLOCK(p_QmPortal);
2684     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2685     p_Mcc->initcgr.cgid = p_QmCg->id;
2686 
2687     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR);
2688     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2689     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2690     if (p_Mcr->result != QM_MCR_RESULT_OK)
2691     {
2692         PUNLOCK(p_QmPortal);
2693         RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result)));
2694     }
2695 
2696     if(!p_Mcr->querycgr.cgr.cstd_en)
2697     {
2698         PUNLOCK(p_QmPortal);
2699         RETURN_ERROR(MINOR, E_INVALID_STATE, ("Tail Drop is not enabled!"));
2700     }
2701 
2702     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2703     p_Mcc->initcgr.cgid = p_QmCg->id;
2704     p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES;
2705 
2706     /* express thresh as ta*2^tn */
2707     gap = (int)threshold;
2708     for (tmpA=0 ; tmpA<256; tmpA++ )
2709         for (tmpN=0 ; tmpN<32; tmpN++ )
2710         {
2711             tmp = ABS((int)(threshold - tmpA*(1<<tmpN)));
2712             if (tmp < gap)
2713             {
2714                ta = tmpA;
2715                tn = tmpN;
2716                gap = tmp;
2717             }
2718         }
2719     p_Mcc->initcgr.cgr.cs_thres.TA = ta;
2720     p_Mcc->initcgr.cgr.cs_thres.Tn = tn;
2721 
2722     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2723     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2724     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2725     if (p_Mcr->result != QM_MCR_RESULT_OK)
2726     {
2727         PUNLOCK(p_QmPortal);
2728         RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2729     }
2730     PUNLOCK(p_QmPortal);
2731 
2732     return E_OK;
2733 }
2734 
2735