Lines Matching +full:qman +full:- +full:channel +full:- +full:range
1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
35 #define IRQNAME "QMan portal %d"
36 #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
47 /* Cache-inhibited register offsets */
68 /* Cache-enabled register offsets */
83 /* Cache-inhibited register offsets */
104 /* Cache-enabled register offsets */
121 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
122 * or other order-preserving primitives simply degrade performance. Hence the
127 /* Cache-enabled ring access */
138 * ci == cache-inhibited portal register
139 * ce == cache-enabled portal register
140 * vb == in-band valid-bit (cache-enabled)
141 * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
142 * As for "enum qm_dqrr_dmode", it should be self-explanatory.
145 qm_eqcr_pci = 0, /* PI index, cache-inhibited */
146 qm_eqcr_pce = 1, /* PI index, cache-enabled */
147 qm_eqcr_pvb = 2 /* valid-bit */
153 enum qm_dqrr_pmode { /* s/w-only */
156 qm_dqrr_pvb /* reads valid-bit */
159 qm_dqrr_cci = 0, /* CI index, cache-inhibited */
160 qm_dqrr_cce = 1, /* CI index, cache-enabled */
163 enum qm_mr_pmode { /* s/w-only */
166 qm_mr_pvb /* reads valid-bit */
169 qm_mr_cci = 0, /* CI index, cache-inhibited */
170 qm_mr_cce = 1 /* CI index, cache-enabled */
173 /* --- Portal structures --- */
181 u8 _ncw_verb; /* writes to this are non-coherent */
185 __be32 fqid; /* 24-bit */
230 __be32 fqid; /* 24-bit */
247 #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
263 u8 _ncw_verb; /* writes to this are non-coherent */
343 void *ce; /* cache-enabled */
345 void __iomem *ci; /* cache-inhibited */
350 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
352 * is setup-only, so isn't a cause for a concern. In other words, don't
362 /* Cache-inhibited register access. */
365 return ioread32be(p->addr.ci + offset); in qm_in()
370 iowrite32be(val, p->addr.ci + offset); in qm_out()
376 dpaa_invalidate(p->addr.ce + offset); in qm_cl_invalidate()
381 dpaa_touch_ro(p->addr.ce + offset); in qm_cl_touch_ro()
386 return be32_to_cpu(*(p->addr.ce_be + (offset/4))); in qm_ce_in()
389 /* --- EQCR API --- */
394 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
404 /* Bit-wise logic to convert a ring pointer to a ring index */
407 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); in eqcr_ptr2idx()
414 struct qm_eqcr_entry *partial = eqcr->cursor + 1; in eqcr_inc()
416 eqcr->cursor = eqcr_carryclear(partial); in eqcr_inc()
417 if (partial != eqcr->cursor) in eqcr_inc()
418 eqcr->vbit ^= QM_EQCR_VERB_VBIT; in eqcr_inc()
426 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_init()
430 eqcr->ring = portal->addr.ce + QM_CL_EQCR; in qm_eqcr_init()
431 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); in qm_eqcr_init()
433 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); in qm_eqcr_init()
434 eqcr->cursor = eqcr->ring + pi; in qm_eqcr_init()
435 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? in qm_eqcr_init()
437 eqcr->available = QM_EQCR_SIZE - 1 - in qm_eqcr_init()
438 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); in qm_eqcr_init()
439 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); in qm_eqcr_init()
441 eqcr->busy = 0; in qm_eqcr_init()
442 eqcr->pmode = pmode; in qm_eqcr_init()
454 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_finish()
455 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); in qm_eqcr_finish()
456 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); in qm_eqcr_finish()
458 DPAA_ASSERT(!eqcr->busy); in qm_eqcr_finish()
459 if (pi != eqcr_ptr2idx(eqcr->cursor)) in qm_eqcr_finish()
461 if (ci != eqcr->ci) in qm_eqcr_finish()
463 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) in qm_eqcr_finish()
470 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_start_no_stash()
472 DPAA_ASSERT(!eqcr->busy); in qm_eqcr_start_no_stash()
473 if (!eqcr->available) in qm_eqcr_start_no_stash()
477 eqcr->busy = 1; in qm_eqcr_start_no_stash()
479 dpaa_zero(eqcr->cursor); in qm_eqcr_start_no_stash()
480 return eqcr->cursor; in qm_eqcr_start_no_stash()
486 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_start_stash()
489 DPAA_ASSERT(!eqcr->busy); in qm_eqcr_start_stash()
490 if (!eqcr->available) { in qm_eqcr_start_stash()
491 old_ci = eqcr->ci; in qm_eqcr_start_stash()
492 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & in qm_eqcr_start_stash()
493 (QM_EQCR_SIZE - 1); in qm_eqcr_start_stash()
494 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); in qm_eqcr_start_stash()
495 eqcr->available += diff; in qm_eqcr_start_stash()
500 eqcr->busy = 1; in qm_eqcr_start_stash()
502 dpaa_zero(eqcr->cursor); in qm_eqcr_start_stash()
503 return eqcr->cursor; in qm_eqcr_start_stash()
508 DPAA_ASSERT(eqcr->busy); in eqcr_commit_checks()
509 DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); in eqcr_commit_checks()
510 DPAA_ASSERT(eqcr->available >= 1); in eqcr_commit_checks()
515 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_pvb_commit()
519 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); in qm_eqcr_pvb_commit()
521 eqcursor = eqcr->cursor; in qm_eqcr_pvb_commit()
522 eqcursor->_ncw_verb = myverb | eqcr->vbit; in qm_eqcr_pvb_commit()
525 eqcr->available--; in qm_eqcr_pvb_commit()
527 eqcr->busy = 0; in qm_eqcr_pvb_commit()
538 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_cce_update()
539 u8 diff, old_ci = eqcr->ci; in qm_eqcr_cce_update()
541 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); in qm_eqcr_cce_update()
543 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); in qm_eqcr_cce_update()
544 eqcr->available += diff; in qm_eqcr_cce_update()
550 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_set_ithresh()
552 eqcr->ithresh = ithresh; in qm_eqcr_set_ithresh()
558 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_get_avail()
560 return eqcr->available; in qm_eqcr_get_avail()
565 struct qm_eqcr *eqcr = &portal->eqcr; in qm_eqcr_get_fill()
567 return QM_EQCR_SIZE - 1 - eqcr->available; in qm_eqcr_get_fill()
570 /* --- DQRR API --- */
587 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); in dqrr_ptr2idx()
598 ((mf & (QM_DQRR_SIZE - 1)) << 20)); in qm_dqrr_set_maxfill()
607 struct qm_dqrr *dqrr = &portal->dqrr; in qm_dqrr_init()
614 dqrr->ring = portal->addr.ce + QM_CL_DQRR; in qm_dqrr_init()
615 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); in qm_dqrr_init()
616 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); in qm_dqrr_init()
617 dqrr->cursor = dqrr->ring + dqrr->ci; in qm_dqrr_init()
618 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); in qm_dqrr_init()
619 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? in qm_dqrr_init()
621 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); in qm_dqrr_init()
623 dqrr->dmode = dmode; in qm_dqrr_init()
624 dqrr->pmode = pmode; in qm_dqrr_init()
625 dqrr->cmode = cmode; in qm_dqrr_init()
629 dpaa_invalidate(qm_cl(dqrr->ring, cfg)); in qm_dqrr_init()
631 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ in qm_dqrr_init()
645 struct qm_dqrr *dqrr = &portal->dqrr; in qm_dqrr_finish()
647 if (dqrr->cmode != qm_dqrr_cdc && in qm_dqrr_finish()
648 dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) in qm_dqrr_finish()
656 struct qm_dqrr *dqrr = &portal->dqrr; in qm_dqrr_current()
658 if (!dqrr->fill) in qm_dqrr_current()
660 return dqrr->cursor; in qm_dqrr_current()
665 struct qm_dqrr *dqrr = &portal->dqrr; in qm_dqrr_next()
667 DPAA_ASSERT(dqrr->fill); in qm_dqrr_next()
668 dqrr->cursor = dqrr_inc(dqrr->cursor); in qm_dqrr_next()
669 return --dqrr->fill; in qm_dqrr_next()
674 struct qm_dqrr *dqrr = &portal->dqrr; in qm_dqrr_pvb_update()
675 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); in qm_dqrr_pvb_update()
677 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); in qm_dqrr_pvb_update()
685 if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) { in qm_dqrr_pvb_update()
686 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); in qm_dqrr_pvb_update()
687 if (!dqrr->pi) in qm_dqrr_pvb_update()
688 dqrr->vbit ^= QM_DQRR_VERB_VBIT; in qm_dqrr_pvb_update()
689 dqrr->fill++; in qm_dqrr_pvb_update()
697 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; in qm_dqrr_cdc_consume_1ptr()
700 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); in qm_dqrr_cdc_consume_1ptr()
701 DPAA_ASSERT((dqrr->ring + idx) == dq); in qm_dqrr_cdc_consume_1ptr()
710 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; in qm_dqrr_cdc_consume_n()
712 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); in qm_dqrr_cdc_consume_n()
731 return -EINVAL; in qm_dqrr_set_ithresh()
738 /* --- MR API --- */
754 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); in mr_ptr2idx()
765 struct qm_mr *mr = &portal->mr; in qm_mr_init()
768 mr->ring = portal->addr.ce + QM_CL_MR; in qm_mr_init()
769 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); in qm_mr_init()
770 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); in qm_mr_init()
771 mr->cursor = mr->ring + mr->ci; in qm_mr_init()
772 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); in qm_mr_init()
773 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) in qm_mr_init()
775 mr->ithresh = qm_in(portal, QM_REG_MR_ITR); in qm_mr_init()
777 mr->pmode = pmode; in qm_mr_init()
778 mr->cmode = cmode; in qm_mr_init()
788 struct qm_mr *mr = &portal->mr; in qm_mr_finish()
790 if (mr->ci != mr_ptr2idx(mr->cursor)) in qm_mr_finish()
796 struct qm_mr *mr = &portal->mr; in qm_mr_current()
798 if (!mr->fill) in qm_mr_current()
800 return mr->cursor; in qm_mr_current()
805 struct qm_mr *mr = &portal->mr; in qm_mr_next()
807 DPAA_ASSERT(mr->fill); in qm_mr_next()
808 mr->cursor = mr_inc(mr->cursor); in qm_mr_next()
809 return --mr->fill; in qm_mr_next()
814 struct qm_mr *mr = &portal->mr; in qm_mr_pvb_update()
815 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); in qm_mr_pvb_update()
817 DPAA_ASSERT(mr->pmode == qm_mr_pvb); in qm_mr_pvb_update()
819 if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) { in qm_mr_pvb_update()
820 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); in qm_mr_pvb_update()
821 if (!mr->pi) in qm_mr_pvb_update()
822 mr->vbit ^= QM_MR_VERB_VBIT; in qm_mr_pvb_update()
823 mr->fill++; in qm_mr_pvb_update()
831 struct qm_mr *mr = &portal->mr; in qm_mr_cci_consume()
833 DPAA_ASSERT(mr->cmode == qm_mr_cci); in qm_mr_cci_consume()
834 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); in qm_mr_cci_consume()
835 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); in qm_mr_cci_consume()
840 struct qm_mr *mr = &portal->mr; in qm_mr_cci_consume_to_current()
842 DPAA_ASSERT(mr->cmode == qm_mr_cci); in qm_mr_cci_consume_to_current()
843 mr->ci = mr_ptr2idx(mr->cursor); in qm_mr_cci_consume_to_current()
844 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); in qm_mr_cci_consume_to_current()
852 /* --- Management command API --- */
857 struct qm_mc *mc = &portal->mc; in qm_mc_init()
859 mc->cr = portal->addr.ce + QM_CL_CR; in qm_mc_init()
860 mc->rr = portal->addr.ce + QM_CL_RR0; in qm_mc_init()
868 rr0 = mc->rr->verb; in qm_mc_init()
869 rr1 = (mc->rr+1)->verb; in qm_mc_init()
871 mc->rridx = 1; in qm_mc_init()
873 mc->rridx = 0; in qm_mc_init()
874 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; in qm_mc_init()
876 mc->state = qman_mc_idle; in qm_mc_init()
884 struct qm_mc *mc = &portal->mc; in qm_mc_finish()
886 DPAA_ASSERT(mc->state == qman_mc_idle); in qm_mc_finish()
887 if (mc->state != qman_mc_idle) in qm_mc_finish()
894 struct qm_mc *mc = &portal->mc; in qm_mc_start()
896 DPAA_ASSERT(mc->state == qman_mc_idle); in qm_mc_start()
898 mc->state = qman_mc_user; in qm_mc_start()
900 dpaa_zero(mc->cr); in qm_mc_start()
901 return mc->cr; in qm_mc_start()
906 struct qm_mc *mc = &portal->mc; in qm_mc_commit()
907 union qm_mc_result *rr = mc->rr + mc->rridx; in qm_mc_commit()
909 DPAA_ASSERT(mc->state == qman_mc_user); in qm_mc_commit()
911 mc->cr->_ncw_verb = myverb | mc->vbit; in qm_mc_commit()
912 dpaa_flush(mc->cr); in qm_mc_commit()
915 mc->state = qman_mc_hw; in qm_mc_commit()
921 struct qm_mc *mc = &portal->mc; in qm_mc_result()
922 union qm_mc_result *rr = mc->rr + mc->rridx; in qm_mc_result()
924 DPAA_ASSERT(mc->state == qman_mc_hw); in qm_mc_result()
927 * its command is submitted and completed. This includes the valid-bit, in qm_mc_result()
930 if (!rr->verb) { in qm_mc_result()
934 mc->rridx ^= 1; in qm_mc_result()
935 mc->vbit ^= QM_MCC_VERB_VBIT; in qm_mc_result()
937 mc->state = qman_mc_idle; in qm_mc_result()
952 } while (--timeout); in qm_mc_result_timeout()
959 fq->flags |= mask; in fq_set()
964 fq->flags &= ~mask; in fq_clear()
969 return fq->flags & mask; in fq_isset()
974 return !(fq->flags & mask); in fq_isclear()
979 /* PORTAL_BITS_*** - dynamic, strictly internal */
987 /* probing time config params for cpu-affine portals */
989 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
991 /* linked-list of CSCN handlers. */
1017 static inline struct qman_portal *get_portal_for_channel(u16 channel) in get_portal_for_channel() argument
1023 affine_portals[i]->config->channel == channel) in get_portal_for_channel()
1037 return -EINVAL; in qman_dqrr_set_ithresh()
1039 res = qm_dqrr_set_ithresh(&portal->p, ithresh); in qman_dqrr_set_ithresh()
1043 portal->p.dqrr.ithresh = ithresh; in qman_dqrr_set_ithresh()
1052 *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR); in qman_dqrr_get_ithresh()
1059 *iperiod = qm_in(&portal->p, QM_REG_ITPR); in qman_portal_get_iperiod()
1066 return -EINVAL; in qman_portal_set_iperiod()
1068 qm_out(&portal->p, QM_REG_ITPR, iperiod); in qman_portal_set_iperiod()
1078 return -ENOMEM; in qman_wq_alloc()
1089 qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff); in qman_enable_irqs()
1090 qm_out(&affine_portals[i]->p, QM_REG_IIR, 0); in qman_enable_irqs()
1112 return -ENOMEM; in qman_alloc_fq_table()
1128 DPAA_ASSERT(!fq || idx == fq->idx); in idx_to_fq()
1134 * Only returns full-service fq objects, not enqueue-only
1154 return fq->idx; in fq_to_tag()
1169 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; in portal_isr()
1175 /* DQRR-handling if it's interrupt-driven */ in portal_isr()
1180 /* Handling of anything else that's interrupt-driven */ in portal_isr()
1182 qm_out(&p->p, QM_REG_ISR, clear); in portal_isr()
1196 * existing entries are consumed. A worst-case situation in drain_mr_fqrni()
1197 * (fully-loaded system) means h/w sequencers may have to do 3-4 in drain_mr_fqrni()
1199 * which (if slow) may take ~50 qman cycles (which is ~200 in drain_mr_fqrni()
1201 * worst-case estimate by a factor of 10, just to be in drain_mr_fqrni()
1202 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume in drain_mr_fqrni()
1213 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { in drain_mr_fqrni()
1215 pr_err("Found verb 0x%x in MR\n", msg->verb); in drain_mr_fqrni()
1216 return -1; in drain_mr_fqrni()
1231 p = &portal->p; in qman_create_portal()
1235 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); in qman_create_portal()
1237 portal->use_eqcr_ci_stashing = 0; in qman_create_portal()
1240 * prep the low-level portal struct with the mapped addresses from the in qman_create_portal()
1244 p->addr.ce = c->addr_virt_ce; in qman_create_portal()
1245 p->addr.ce_be = c->addr_virt_ce; in qman_create_portal()
1246 p->addr.ci = c->addr_virt_ci; in qman_create_portal()
1248 * If CI-stashing is used, the current defaults use a threshold of 3, in qman_create_portal()
1249 * and stash with high-than-DQRR priority. in qman_create_portal()
1252 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { in qman_create_portal()
1253 dev_err(c->dev, "EQCR initialisation failed\n"); in qman_create_portal()
1258 dev_err(c->dev, "DQRR initialisation failed\n"); in qman_create_portal()
1262 dev_err(c->dev, "MR initialisation failed\n"); in qman_create_portal()
1266 dev_err(c->dev, "MC initialisation failed\n"); in qman_create_portal()
1269 /* static interrupt-gating controls */ in qman_create_portal()
1273 portal->cgrs = kmalloc_array(2, sizeof(*portal->cgrs), GFP_KERNEL); in qman_create_portal()
1274 if (!portal->cgrs) in qman_create_portal()
1276 /* initial snapshot is no-depletion */ in qman_create_portal()
1277 qman_cgrs_init(&portal->cgrs[1]); in qman_create_portal()
1279 portal->cgrs[0] = *cgrs; in qman_create_portal()
1282 qman_cgrs_fill(&portal->cgrs[0]); in qman_create_portal()
1283 INIT_LIST_HEAD(&portal->cgr_cbs); in qman_create_portal()
1284 raw_spin_lock_init(&portal->cgr_lock); in qman_create_portal()
1285 INIT_WORK(&portal->congestion_work, qm_congestion_task); in qman_create_portal()
1286 INIT_WORK(&portal->mr_work, qm_mr_process_task); in qman_create_portal()
1287 portal->bits = 0; in qman_create_portal()
1288 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | in qman_create_portal()
1293 portal->irq_sources = 0; in qman_create_portal()
1295 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); in qman_create_portal()
1297 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { in qman_create_portal()
1298 dev_err(c->dev, "request_irq() failed\n"); in qman_create_portal()
1302 if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu)) in qman_create_portal()
1310 dev_err(c->dev, "EQCR unclean\n"); in qman_create_portal()
1316 dev_dbg(c->dev, "DQRR unclean\n"); in qman_create_portal()
1323 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", in qman_create_portal()
1324 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); in qman_create_portal()
1328 portal->config = c; in qman_create_portal()
1334 qm_dqrr_sdqcr_set(p, portal->sdqcr); in qman_create_portal()
1340 free_irq(c->irq, portal); in qman_create_portal()
1342 kfree(portal->cgrs); in qman_create_portal()
1352 return -EIO; in qman_create_portal()
1361 portal = &per_cpu(qman_affine_portal, c->cpu); in qman_create_affine_portal()
1367 cpumask_set_cpu(c->cpu, &affine_mask); in qman_create_affine_portal()
1368 affine_channels[c->cpu] = c->channel; in qman_create_affine_portal()
1369 affine_portals[c->cpu] = portal; in qman_create_affine_portal()
1380 qm_dqrr_sdqcr_set(&qm->p, 0); in qman_destroy_portal()
1383 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or in qman_destroy_portal()
1391 qm_eqcr_cce_update(&qm->p); in qman_destroy_portal()
1392 qm_eqcr_cce_update(&qm->p); in qman_destroy_portal()
1393 pcfg = qm->config; in qman_destroy_portal()
1395 free_irq(pcfg->irq, qm); in qman_destroy_portal()
1397 kfree(qm->cgrs); in qman_destroy_portal()
1398 qm_mc_finish(&qm->p); in qman_destroy_portal()
1399 qm_mr_finish(&qm->p); in qman_destroy_portal()
1400 qm_dqrr_finish(&qm->p); in qman_destroy_portal()
1401 qm_eqcr_finish(&qm->p); in qman_destroy_portal()
1403 qm->config = NULL; in qman_destroy_portal()
1412 pcfg = qm->config; in qman_destroy_affine_portal()
1413 cpu = pcfg->cpu; in qman_destroy_affine_portal()
1434 DPAA_ASSERT(fq->state == qman_fq_state_parked || in fq_state_change()
1435 fq->state == qman_fq_state_sched); in fq_state_change()
1438 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) in fq_state_change()
1440 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) in fq_state_change()
1442 fq->state = qman_fq_state_retired; in fq_state_change()
1445 DPAA_ASSERT(fq->state == qman_fq_state_sched); in fq_state_change()
1447 fq->state = qman_fq_state_parked; in fq_state_change()
1462 raw_spin_lock_irq(&p->cgr_lock); in qm_congestion_task()
1463 qm_mc_start(&p->p); in qm_congestion_task()
1464 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); in qm_congestion_task()
1465 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qm_congestion_task()
1466 raw_spin_unlock_irq(&p->cgr_lock); in qm_congestion_task()
1467 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); in qm_congestion_task()
1472 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, in qm_congestion_task()
1473 &p->cgrs[0]); in qm_congestion_task()
1475 qman_cgrs_xor(&c, &rr, &p->cgrs[1]); in qm_congestion_task()
1477 qman_cgrs_cp(&p->cgrs[1], &rr); in qm_congestion_task()
1479 list_for_each_entry(cgr, &p->cgr_cbs, node) in qm_congestion_task()
1480 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) in qm_congestion_task()
1481 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); in qm_congestion_task()
1482 raw_spin_unlock_irq(&p->cgr_lock); in qm_congestion_task()
1497 qm_mr_pvb_update(&p->p); in qm_mr_process_task()
1498 msg = qm_mr_current(&p->p); in qm_mr_process_task()
1502 verb = msg->verb & QM_MR_VERB_TYPE_MASK; in qm_mr_process_task()
1512 fq = fqid_to_fq(qm_fqid_get(&msg->fq)); in qm_mr_process_task()
1516 if (fq->cb.fqs) in qm_mr_process_task()
1517 fq->cb.fqs(p, fq, msg); in qm_mr_process_task()
1521 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); in qm_mr_process_task()
1523 if (fq->cb.fqs) in qm_mr_process_task()
1524 fq->cb.fqs(p, fq, msg); in qm_mr_process_task()
1535 fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); in qm_mr_process_task()
1536 fq->cb.ern(p, fq, msg); in qm_mr_process_task()
1539 qm_mr_next(&p->p); in qm_mr_process_task()
1542 qm_mr_cci_consume(&p->p, num); in qm_mr_process_task()
1552 &p->congestion_work); in __poll_portal_slow()
1556 qm_eqcr_cce_update(&p->p); in __poll_portal_slow()
1557 qm_eqcr_set_ithresh(&p->p, 0); in __poll_portal_slow()
1564 &p->mr_work); in __poll_portal_slow()
1571 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1576 p->vdqcr_owned = NULL; in clear_vdqcr()
1598 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1602 * above potential conflicts, but that this function itself is not re-entrant
1605 * user callbacks to call into any QMan API.
1616 qm_dqrr_pvb_update(&p->p); in __poll_portal_fast()
1617 dq = qm_dqrr_current(&p->p); in __poll_portal_fast()
1621 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { in __poll_portal_fast()
1625 * draining it post-retirement. in __poll_portal_fast()
1627 fq = p->vdqcr_owned; in __poll_portal_fast()
1634 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) in __poll_portal_fast()
1642 res = fq->cb.dqrr(p, fq, dq, sched_napi); in __poll_portal_fast()
1646 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) in __poll_portal_fast()
1650 fq = tag_to_fq(be32_to_cpu(dq->context_b)); in __poll_portal_fast()
1652 res = fq->cb.dqrr(p, fq, dq, sched_napi); in __poll_portal_fast()
1666 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || in __poll_portal_fast()
1670 qm_dqrr_cdc_consume_1ptr(&p->p, dq, in __poll_portal_fast()
1673 qm_dqrr_next(&p->p); in __poll_portal_fast()
1690 p->irq_sources |= bits & QM_PIRQ_VISIBLE; in qman_p_irqsource_add()
1691 qm_out(&p->p, QM_REG_IER, p->irq_sources); in qman_p_irqsource_add()
1703 * are in p->irq_sources. As we're trimming that mask, if one of them in qman_p_irqsource_remove()
1705 * the enable register, there would be an interrupt-storm when we in qman_p_irqsource_remove()
1713 p->irq_sources &= ~bits; in qman_p_irqsource_remove()
1714 qm_out(&p->p, QM_REG_IER, p->irq_sources); in qman_p_irqsource_remove()
1715 ier = qm_in(&p->p, QM_REG_IER); in qman_p_irqsource_remove()
1717 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a in qman_p_irqsource_remove()
1718 * data-dependency, ie. to protect against re-ordering. in qman_p_irqsource_remove()
1720 qm_out(&p->p, QM_REG_ISR, ~ier); in qman_p_irqsource_remove()
1736 cpu = portal->config->cpu; in qman_affine_channel()
1752 return (!device_link_add(dev, p->config->dev, in qman_start_using_portal()
1753 DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0; in qman_start_using_portal()
1768 pools &= p->config->pools; in qman_p_static_dequeue_add()
1769 p->sdqcr |= pools; in qman_p_static_dequeue_add()
1770 qm_dqrr_sdqcr_set(&p->p, p->sdqcr); in qman_p_static_dequeue_add()
1806 fq->fqid = fqid; in qman_create_fq()
1807 fq->flags = flags; in qman_create_fq()
1808 fq->state = qman_fq_state_oos; in qman_create_fq()
1809 fq->cgr_groupid = 0; in qman_create_fq()
1814 return -EINVAL; in qman_create_fq()
1817 fq->idx = fqid * 2; in qman_create_fq()
1819 fq->idx++; in qman_create_fq()
1821 WARN_ON(fq_table[fq->idx]); in qman_create_fq()
1822 fq_table[fq->idx] = fq; in qman_create_fq()
1831 * We don't need to lock the FQ as it is a pre-condition that the FQ be in qman_destroy_fq()
1834 switch (fq->state) { in qman_destroy_fq()
1838 qman_release_fqid(fq->fqid); in qman_destroy_fq()
1840 DPAA_ASSERT(fq_table[fq->idx]); in qman_destroy_fq()
1841 fq_table[fq->idx] = NULL; in qman_destroy_fq()
1852 return fq->fqid; in qman_fq_fqid()
1867 if (fq->state != qman_fq_state_oos && in qman_init_fq()
1868 fq->state != qman_fq_state_parked) in qman_init_fq()
1869 return -EINVAL; in qman_init_fq()
1872 return -EINVAL; in qman_init_fq()
1874 if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { in qman_init_fq()
1876 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) in qman_init_fq()
1877 return -EINVAL; in qman_init_fq()
1882 (fq->state != qman_fq_state_oos && in qman_init_fq()
1883 fq->state != qman_fq_state_parked)) { in qman_init_fq()
1884 ret = -EBUSY; in qman_init_fq()
1887 mcc = qm_mc_start(&p->p); in qman_init_fq()
1889 mcc->initfq = *opts; in qman_init_fq()
1890 qm_fqid_set(&mcc->fq, fq->fqid); in qman_init_fq()
1891 mcc->initfq.count = 0; in qman_init_fq()
1894 * demux pointer. Otherwise, the caller-provided value is allowed to in qman_init_fq()
1900 mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); in qman_init_fq()
1901 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); in qman_init_fq()
1903 * and the physical address - NB, if the user wasn't trying to in qman_init_fq()
1906 if (!(be16_to_cpu(mcc->initfq.we_mask) & in qman_init_fq()
1908 mcc->initfq.we_mask |= in qman_init_fq()
1910 memset(&mcc->initfq.fqd.context_a, 0, in qman_init_fq()
1911 sizeof(mcc->initfq.fqd.context_a)); in qman_init_fq()
1915 phys_fq = dma_map_single(p->config->dev, fq, in qman_init_fq()
1917 if (dma_mapping_error(p->config->dev, phys_fq)) { in qman_init_fq()
1918 dev_err(p->config->dev, "dma_mapping failed\n"); in qman_init_fq()
1919 ret = -EIO; in qman_init_fq()
1923 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); in qman_init_fq()
1929 if (!(be16_to_cpu(mcc->initfq.we_mask) & in qman_init_fq()
1931 mcc->initfq.we_mask |= in qman_init_fq()
1935 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); in qman_init_fq()
1937 qm_mc_commit(&p->p, myverb); in qman_init_fq()
1938 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_init_fq()
1939 dev_err(p->config->dev, "MCR timeout\n"); in qman_init_fq()
1940 ret = -ETIMEDOUT; in qman_init_fq()
1944 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); in qman_init_fq()
1945 res = mcr->result; in qman_init_fq()
1947 ret = -EIO; in qman_init_fq()
1951 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { in qman_init_fq()
1952 if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) in qman_init_fq()
1957 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) in qman_init_fq()
1958 fq->cgr_groupid = opts->fqd.cgid; in qman_init_fq()
1960 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? in qman_init_fq()
1976 if (fq->state != qman_fq_state_parked) in qman_schedule_fq()
1977 return -EINVAL; in qman_schedule_fq()
1980 return -EINVAL; in qman_schedule_fq()
1985 fq->state != qman_fq_state_parked) { in qman_schedule_fq()
1986 ret = -EBUSY; in qman_schedule_fq()
1989 mcc = qm_mc_start(&p->p); in qman_schedule_fq()
1990 qm_fqid_set(&mcc->fq, fq->fqid); in qman_schedule_fq()
1991 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); in qman_schedule_fq()
1992 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_schedule_fq()
1993 dev_err(p->config->dev, "ALTER_SCHED timeout\n"); in qman_schedule_fq()
1994 ret = -ETIMEDOUT; in qman_schedule_fq()
1998 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); in qman_schedule_fq()
1999 if (mcr->result != QM_MCR_RESULT_OK) { in qman_schedule_fq()
2000 ret = -EIO; in qman_schedule_fq()
2003 fq->state = qman_fq_state_sched; in qman_schedule_fq()
2018 if (fq->state != qman_fq_state_parked && in qman_retire_fq()
2019 fq->state != qman_fq_state_sched) in qman_retire_fq()
2020 return -EINVAL; in qman_retire_fq()
2023 return -EINVAL; in qman_retire_fq()
2027 fq->state == qman_fq_state_retired || in qman_retire_fq()
2028 fq->state == qman_fq_state_oos) { in qman_retire_fq()
2029 ret = -EBUSY; in qman_retire_fq()
2032 mcc = qm_mc_start(&p->p); in qman_retire_fq()
2033 qm_fqid_set(&mcc->fq, fq->fqid); in qman_retire_fq()
2034 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); in qman_retire_fq()
2035 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_retire_fq()
2036 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); in qman_retire_fq()
2037 ret = -ETIMEDOUT; in qman_retire_fq()
2041 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); in qman_retire_fq()
2042 res = mcr->result; in qman_retire_fq()
2055 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) in qman_retire_fq()
2057 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) in qman_retire_fq()
2060 *flags = fq->flags; in qman_retire_fq()
2061 fq->state = qman_fq_state_retired; in qman_retire_fq()
2062 if (fq->cb.fqs) { in qman_retire_fq()
2075 msg.fq.fqs = mcr->alterfq.fqs; in qman_retire_fq()
2076 qm_fqid_set(&msg.fq, fq->fqid); in qman_retire_fq()
2078 fq->cb.fqs(p, fq, &msg); in qman_retire_fq()
2084 ret = -EIO; in qman_retire_fq()
2099 if (fq->state != qman_fq_state_retired) in qman_oos_fq()
2100 return -EINVAL; in qman_oos_fq()
2103 return -EINVAL; in qman_oos_fq()
2107 fq->state != qman_fq_state_retired) { in qman_oos_fq()
2108 ret = -EBUSY; in qman_oos_fq()
2111 mcc = qm_mc_start(&p->p); in qman_oos_fq()
2112 qm_fqid_set(&mcc->fq, fq->fqid); in qman_oos_fq()
2113 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); in qman_oos_fq()
2114 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_oos_fq()
2115 ret = -ETIMEDOUT; in qman_oos_fq()
2118 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); in qman_oos_fq()
2119 if (mcr->result != QM_MCR_RESULT_OK) { in qman_oos_fq()
2120 ret = -EIO; in qman_oos_fq()
2123 fq->state = qman_fq_state_oos; in qman_oos_fq()
2137 mcc = qm_mc_start(&p->p); in qman_query_fq()
2138 qm_fqid_set(&mcc->fq, fq->fqid); in qman_query_fq()
2139 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); in qman_query_fq()
2140 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_query_fq()
2141 ret = -ETIMEDOUT; in qman_query_fq()
2145 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); in qman_query_fq()
2146 if (mcr->result == QM_MCR_RESULT_OK) in qman_query_fq()
2147 *fqd = mcr->queryfq.fqd; in qman_query_fq()
2149 ret = -EIO; in qman_query_fq()
2162 mcc = qm_mc_start(&p->p); in qman_query_fq_np()
2163 qm_fqid_set(&mcc->fq, fq->fqid); in qman_query_fq_np()
2164 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); in qman_query_fq_np()
2165 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_query_fq_np()
2166 ret = -ETIMEDOUT; in qman_query_fq_np()
2170 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); in qman_query_fq_np()
2171 if (mcr->result == QM_MCR_RESULT_OK) in qman_query_fq_np()
2172 *np = mcr->queryfq_np; in qman_query_fq_np()
2173 else if (mcr->result == QM_MCR_RESULT_ERR_FQID) in qman_query_fq_np()
2174 ret = -ERANGE; in qman_query_fq_np()
2176 ret = -EIO; in qman_query_fq_np()
2191 mcc = qm_mc_start(&p->p); in qman_query_cgr()
2192 mcc->cgr.cgid = cgr->cgrid; in qman_query_cgr()
2193 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); in qman_query_cgr()
2194 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_query_cgr()
2195 ret = -ETIMEDOUT; in qman_query_cgr()
2198 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); in qman_query_cgr()
2199 if (mcr->result == QM_MCR_RESULT_OK) in qman_query_cgr()
2200 *cgrd = mcr->querycgr; in qman_query_cgr()
2202 dev_err(p->config->dev, "QUERY_CGR failed: %s\n", in qman_query_cgr()
2203 mcr_result_str(mcr->result)); in qman_query_cgr()
2204 ret = -EIO; in qman_query_cgr()
2229 int ret = -EBUSY; in set_p_vdqcr()
2232 if (p->vdqcr_owned) in set_p_vdqcr()
2238 p->vdqcr_owned = fq; in set_p_vdqcr()
2239 qm_dqrr_vdqcr_set(&p->p, vdqcr); in set_p_vdqcr()
2274 if (fq->state != qman_fq_state_parked && in qman_volatile_dequeue()
2275 fq->state != qman_fq_state_retired) in qman_volatile_dequeue()
2276 return -EINVAL; in qman_volatile_dequeue()
2278 return -EINVAL; in qman_volatile_dequeue()
2280 return -EBUSY; in qman_volatile_dequeue()
2281 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; in qman_volatile_dequeue()
2292 * NB: don't propagate any error - the caller wouldn't in qman_volatile_dequeue()
2310 qm_eqcr_cce_prefetch(&p->p); in update_eqcr_ci()
2312 qm_eqcr_cce_update(&p->p); in update_eqcr_ci()
2325 if (p->use_eqcr_ci_stashing) { in qman_enqueue()
2330 eq = qm_eqcr_start_stash(&p->p); in qman_enqueue()
2333 * The non-stashing case is harder, need to prefetch ahead of in qman_enqueue()
2336 avail = qm_eqcr_get_avail(&p->p); in qman_enqueue()
2339 eq = qm_eqcr_start_no_stash(&p->p); in qman_enqueue()
2345 qm_fqid_set(eq, fq->fqid); in qman_enqueue()
2346 eq->tag = cpu_to_be32(fq_to_tag(fq)); in qman_enqueue()
2347 eq->fd = *fd; in qman_enqueue()
2349 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); in qman_enqueue()
2366 mcc = qm_mc_start(&p->p); in qm_modify_cgr()
2368 mcc->initcgr = *opts; in qm_modify_cgr()
2369 mcc->initcgr.cgid = cgr->cgrid; in qm_modify_cgr()
2372 qm_mc_commit(&p->p, verb); in qm_modify_cgr()
2373 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qm_modify_cgr()
2374 ret = -ETIMEDOUT; in qm_modify_cgr()
2378 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); in qm_modify_cgr()
2379 if (mcr->result != QM_MCR_RESULT_OK) in qm_modify_cgr()
2380 ret = -EIO; in qm_modify_cgr()
2387 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2393 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | in qm_cgr_cscn_targ_set()
2396 cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); in qm_cgr_cscn_targ_set()
2402 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); in qm_cgr_cscn_targ_clear()
2404 cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); in qm_cgr_cscn_targ_clear()
2433 * data-structures, for obvious reasons. However we'll let h/w take in qman_create_cgr()
2437 if (cgr->cgrid >= CGR_NUM) in qman_create_cgr()
2438 return -EINVAL; in qman_create_cgr()
2442 qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); in qman_create_cgr()
2445 cgr->chan = p->config->channel; in qman_create_cgr()
2446 raw_spin_lock_irq(&p->cgr_lock); in qman_create_cgr()
2469 list_add(&cgr->node, &p->cgr_cbs); in qman_create_cgr()
2475 dev_err(p->config->dev, "CGR HW state partially modified\n"); in qman_create_cgr()
2479 if (cgr->cb && cgr_state.cgr.cscn_en && in qman_create_cgr()
2480 qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) in qman_create_cgr()
2481 cgr->cb(p, cgr, 1); in qman_create_cgr()
2483 raw_spin_unlock_irq(&p->cgr_lock); in qman_create_cgr()
2493 if (cgr->chan != p->config->channel) { in qman_cgr_get_affine_portal()
2495 dev_err(p->config->dev, "CGR not owned by current portal"); in qman_cgr_get_affine_portal()
2496 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", in qman_cgr_get_affine_portal()
2497 cgr->chan, p->config->channel); in qman_cgr_get_affine_portal()
2515 return -EINVAL; in qman_delete_cgr()
2518 raw_spin_lock_irqsave(&p->cgr_lock, irqflags); in qman_delete_cgr()
2519 list_del(&cgr->node); in qman_delete_cgr()
2524 list_for_each_entry(i, &p->cgr_cbs, node) in qman_delete_cgr()
2525 if (i->cgrid == cgr->cgrid && i->cb) in qman_delete_cgr()
2530 list_add(&cgr->node, &p->cgr_cbs); in qman_delete_cgr()
2541 list_add(&cgr->node, &p->cgr_cbs); in qman_delete_cgr()
2543 raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); in qman_delete_cgr()
2557 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { in qman_delete_cgr_safe()
2558 smp_call_function_single(qman_cgr_cpus[cgr->cgrid], in qman_delete_cgr_safe()
2576 return -EINVAL; in qman_update_cgr()
2578 raw_spin_lock_irqsave(&p->cgr_lock, irqflags); in qman_update_cgr()
2580 raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags); in qman_update_cgr()
2595 params->ret = qman_update_cgr(params->cgr, params->opts); in qman_update_cgr_smp_call()
2606 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) in qman_update_cgr_safe()
2607 smp_call_function_single(qman_cgr_cpus[cgr->cgrid], in qman_update_cgr_safe()
2627 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) in _qm_mr_consume_and_match_verb()
2651 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) in _qm_dqrr_consume_and_match()
2680 u32 channel, res; in qman_shutdown_fq() local
2684 dev = p->config->dev; in qman_shutdown_fq()
2686 mcc = qm_mc_start(&p->p); in qman_shutdown_fq()
2687 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2688 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); in qman_shutdown_fq()
2689 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_shutdown_fq()
2691 ret = -ETIMEDOUT; in qman_shutdown_fq()
2695 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); in qman_shutdown_fq()
2696 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; in qman_shutdown_fq()
2700 /* Query which channel the FQ is using */ in qman_shutdown_fq()
2701 mcc = qm_mc_start(&p->p); in qman_shutdown_fq()
2702 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2703 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); in qman_shutdown_fq()
2704 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_shutdown_fq()
2706 ret = -ETIMEDOUT; in qman_shutdown_fq()
2710 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); in qman_shutdown_fq()
2712 channel = qm_fqd_get_chan(&mcr->queryfq.fqd); in qman_shutdown_fq()
2713 qm_fqd_get_wq(&mcr->queryfq.fqd); in qman_shutdown_fq()
2715 if (channel < qm_channel_pool1) { in qman_shutdown_fq()
2716 channel_portal = get_portal_for_channel(channel); in qman_shutdown_fq()
2718 dev_err(dev, "Can't find portal for dedicated channel 0x%x\n", in qman_shutdown_fq()
2719 channel); in qman_shutdown_fq()
2720 ret = -EIO; in qman_shutdown_fq()
2732 mcc = qm_mc_start(&channel_portal->p); in qman_shutdown_fq()
2733 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2734 qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE); in qman_shutdown_fq()
2735 if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) { in qman_shutdown_fq()
2737 ret = -ETIMEDOUT; in qman_shutdown_fq()
2740 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == in qman_shutdown_fq()
2742 res = mcr->result; /* Make a copy as we reuse MCR below */ in qman_shutdown_fq()
2745 drain_mr_fqrni(&channel_portal->p); in qman_shutdown_fq()
2752 * to dequeue from the channel the FQ is scheduled on in qman_shutdown_fq()
2759 if (channel >= qm_channel_pool1 && in qman_shutdown_fq()
2760 channel < qm_channel_pool1 + 15) { in qman_shutdown_fq()
2761 /* Pool channel, enable the bit in the portal */ in qman_shutdown_fq()
2762 } else if (channel < qm_channel_pool1) { in qman_shutdown_fq()
2763 /* Dedicated channel */ in qman_shutdown_fq()
2766 fqid, channel); in qman_shutdown_fq()
2767 ret = -EBUSY; in qman_shutdown_fq()
2770 /* Set the sdqcr to drain this channel */ in qman_shutdown_fq()
2771 if (channel < qm_channel_pool1) in qman_shutdown_fq()
2772 qm_dqrr_sdqcr_set(&channel_portal->p, in qman_shutdown_fq()
2776 qm_dqrr_sdqcr_set(&channel_portal->p, in qman_shutdown_fq()
2779 (channel)); in qman_shutdown_fq()
2782 qm_dqrr_drain_nomatch(&channel_portal->p); in qman_shutdown_fq()
2784 found_fqrn = qm_mr_drain(&channel_portal->p, in qman_shutdown_fq()
2789 qm_dqrr_sdqcr_set(&channel_portal->p, in qman_shutdown_fq()
2790 channel_portal->sdqcr); in qman_shutdown_fq()
2797 ret = -EIO; in qman_shutdown_fq()
2800 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { in qman_shutdown_fq()
2811 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { in qman_shutdown_fq()
2816 qm_dqrr_vdqcr_set(&p->p, vdqcr); in qman_shutdown_fq()
2821 } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); in qman_shutdown_fq()
2826 orl_empty = qm_mr_drain(&p->p, FQRL); in qman_shutdown_fq()
2829 mcc = qm_mc_start(&p->p); in qman_shutdown_fq()
2830 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2831 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); in qman_shutdown_fq()
2832 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_shutdown_fq()
2833 ret = -ETIMEDOUT; in qman_shutdown_fq()
2837 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == in qman_shutdown_fq()
2839 if (mcr->result != QM_MCR_RESULT_OK) { in qman_shutdown_fq()
2841 fqid, mcr->result); in qman_shutdown_fq()
2842 ret = -EIO; in qman_shutdown_fq()
2849 mcc = qm_mc_start(&p->p); in qman_shutdown_fq()
2850 qm_fqid_set(&mcc->fq, fqid); in qman_shutdown_fq()
2851 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); in qman_shutdown_fq()
2852 if (!qm_mc_result_timeout(&p->p, &mcr)) { in qman_shutdown_fq()
2853 ret = -ETIMEDOUT; in qman_shutdown_fq()
2857 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == in qman_shutdown_fq()
2859 if (mcr->result != QM_MCR_RESULT_OK) { in qman_shutdown_fq()
2861 fqid, mcr->result); in qman_shutdown_fq()
2862 ret = -EIO; in qman_shutdown_fq()
2872 ret = -EIO; in qman_shutdown_fq()
2883 return portal->config; in qman_get_qm_portal_config()
2888 struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2896 return -ENODEV; in qman_alloc_range()
2900 return -ENOMEM; in qman_alloc_range()
2943 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs in qpool_cleanup()
2944 * whose destination channel is the pool-channel being released. in qpool_cleanup()
2945 * When a non-OOS FQD is found we attempt to clean it up in qpool_cleanup()
2956 if (err == -ERANGE) in qpool_cleanup()
2957 /* FQID range exceeded, found no problems */ in qpool_cleanup()
2969 /* The channel is the FQ's target, clean it */ in qpool_cleanup()
3003 * error, looking for non-OOS FQDs whose CGR is the CGR being released in cgr_cleanup()
3014 if (err == -ERANGE) in cgr_cleanup()
3015 /* FQID range exceeded, found no problems */ in cgr_cleanup()
3030 return -EIO; in cgr_cleanup()