Lines Matching refs:llq

147 	writel_relaxed(q->llq.cons, q->cons_reg);  in queue_sync_cons_out()
158 struct arm_smmu_ll_queue *llq = &q->llq; in queue_sync_cons_ovf() local
160 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons))) in queue_sync_cons_ovf()
163 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | in queue_sync_cons_ovf()
164 Q_IDX(llq, llq->cons); in queue_sync_cons_ovf()
180 if (Q_OVF(prod) != Q_OVF(q->llq.prod)) in queue_sync_prod_in()
183 q->llq.prod = prod; in queue_sync_prod_in()
238 if (queue_empty(&q->llq)) in queue_remove_raw()
241 queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); in queue_remove_raw()
242 queue_inc_cons(&q->llq); in queue_remove_raw()
383 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * in arm_smmu_cmdq_build_sync_cmd()
559 struct arm_smmu_ll_queue llq = { in __arm_smmu_cmdq_poll_set_valid_map() local
560 .max_n_shift = cmdq->q.llq.max_n_shift, in __arm_smmu_cmdq_poll_set_valid_map()
564 ewidx = BIT_WORD(Q_IDX(&llq, eprod)); in __arm_smmu_cmdq_poll_set_valid_map()
565 ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG; in __arm_smmu_cmdq_poll_set_valid_map()
567 while (llq.prod != eprod) { in __arm_smmu_cmdq_poll_set_valid_map()
572 swidx = BIT_WORD(Q_IDX(&llq, llq.prod)); in __arm_smmu_cmdq_poll_set_valid_map()
573 sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG; in __arm_smmu_cmdq_poll_set_valid_map()
593 valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask; in __arm_smmu_cmdq_poll_set_valid_map()
597 llq.prod = queue_inc_prod_n(&llq, limit - sbidx); in __arm_smmu_cmdq_poll_set_valid_map()
618 struct arm_smmu_ll_queue *llq) in arm_smmu_cmdq_poll_until_not_full() argument
629 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full()
631 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
637 llq->val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_poll_until_not_full()
638 if (!queue_full(llq)) in arm_smmu_cmdq_poll_until_not_full()
653 struct arm_smmu_ll_queue *llq) in __arm_smmu_cmdq_poll_until_msi() argument
657 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); in __arm_smmu_cmdq_poll_until_msi()
667 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); in __arm_smmu_cmdq_poll_until_msi()
677 struct arm_smmu_ll_queue *llq) in __arm_smmu_cmdq_poll_until_consumed() argument
680 u32 prod = llq->prod; in __arm_smmu_cmdq_poll_until_consumed()
684 llq->val = READ_ONCE(cmdq->q.llq.val); in __arm_smmu_cmdq_poll_until_consumed()
686 if (queue_consumed(llq, prod)) in __arm_smmu_cmdq_poll_until_consumed()
719 llq->cons = readl(cmdq->q.cons_reg); in __arm_smmu_cmdq_poll_until_consumed()
727 struct arm_smmu_ll_queue *llq) in arm_smmu_cmdq_poll_until_sync() argument
731 return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq); in arm_smmu_cmdq_poll_until_sync()
733 return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq); in arm_smmu_cmdq_poll_until_sync()
740 struct arm_smmu_ll_queue llq = { in arm_smmu_cmdq_write_entries() local
741 .max_n_shift = cmdq->q.llq.max_n_shift, in arm_smmu_cmdq_write_entries()
748 prod = queue_inc_prod_n(&llq, i); in arm_smmu_cmdq_write_entries()
777 struct arm_smmu_ll_queue llq, head; in arm_smmu_cmdq_issue_cmdlist() local
780 llq.max_n_shift = cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_issue_cmdlist()
784 llq.val = READ_ONCE(cmdq->q.llq.val); in arm_smmu_cmdq_issue_cmdlist()
788 while (!queue_has_space(&llq, n + sync)) { in arm_smmu_cmdq_issue_cmdlist()
790 if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq)) in arm_smmu_cmdq_issue_cmdlist()
795 head.cons = llq.cons; in arm_smmu_cmdq_issue_cmdlist()
796 head.prod = queue_inc_prod_n(&llq, n + sync) | in arm_smmu_cmdq_issue_cmdlist()
799 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); in arm_smmu_cmdq_issue_cmdlist()
800 if (old == llq.val) in arm_smmu_cmdq_issue_cmdlist()
803 llq.val = old; in arm_smmu_cmdq_issue_cmdlist()
805 owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG); in arm_smmu_cmdq_issue_cmdlist()
807 llq.prod &= ~CMDQ_PROD_OWNED_FLAG; in arm_smmu_cmdq_issue_cmdlist()
813 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); in arm_smmu_cmdq_issue_cmdlist()
815 prod = queue_inc_prod_n(&llq, n); in arm_smmu_cmdq_issue_cmdlist()
830 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); in arm_smmu_cmdq_issue_cmdlist()
835 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); in arm_smmu_cmdq_issue_cmdlist()
839 &cmdq->q.llq.atomic.prod); in arm_smmu_cmdq_issue_cmdlist()
847 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); in arm_smmu_cmdq_issue_cmdlist()
865 llq.prod = queue_inc_prod_n(&llq, n); in arm_smmu_cmdq_issue_cmdlist()
866 ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq); in arm_smmu_cmdq_issue_cmdlist()
870 llq.prod, in arm_smmu_cmdq_issue_cmdlist()
880 WRITE_ONCE(cmdq->q.llq.cons, llq.cons); in arm_smmu_cmdq_issue_cmdlist()
1827 struct arm_smmu_ll_queue *llq = &q->llq; in arm_smmu_evtq_thread() local
1854 } while (!queue_empty(llq)); in arm_smmu_evtq_thread()
1903 struct arm_smmu_ll_queue *llq = &q->llq; in arm_smmu_priq_thread() local
1912 } while (!queue_empty(llq)); in arm_smmu_priq_thread()
3590 qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; in arm_smmu_init_one_queue()
3596 q->llq.max_n_shift--; in arm_smmu_init_one_queue()
3608 1 << q->llq.max_n_shift, name); in arm_smmu_init_one_queue()
3617 q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); in arm_smmu_init_one_queue()
3619 q->llq.prod = q->llq.cons = 0; in arm_smmu_init_one_queue()
3626 unsigned int nents = 1 << cmdq->q.llq.max_n_shift; in arm_smmu_cmdq_init()
4020 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); in arm_smmu_device_reset()
4021 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); in arm_smmu_device_reset()
4046 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD); in arm_smmu_device_reset()
4047 writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS); in arm_smmu_device_reset()
4061 writel_relaxed(smmu->priq.q.llq.prod, in arm_smmu_device_reset()
4063 writel_relaxed(smmu->priq.q.llq.cons, in arm_smmu_device_reset()
4290 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
4292 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { in arm_smmu_device_hw_probe()
4304 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()
4306 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, in arm_smmu_device_hw_probe()