Lines Matching full:cmdq
374 struct arm_smmu_cmdq *cmdq = NULL;
377 cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent);
379 return cmdq ?: &smmu->cmdq;
383 struct arm_smmu_cmdq *cmdq)
385 if (cmdq == &smmu->cmdq)
392 struct arm_smmu_cmdq *cmdq, u32 prod)
394 struct arm_smmu_queue *q = &cmdq->q;
409 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
414 struct arm_smmu_cmdq *cmdq)
422 struct arm_smmu_queue *q = &cmdq->q;
432 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
456 * not to touch any of the shadow cmdq state.
465 if (arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
473 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq);
488 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
498 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
502 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
503 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
506 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
508 (void)atomic_dec_return_release(&cmdq->lock);
511 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
513 if (atomic_read(&cmdq->lock) == 1)
516 arm_smmu_cmdq_shared_unlock(cmdq);
520 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \
524 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
530 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \
532 atomic_set_release(&cmdq->lock, 0); \
576 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
581 .max_n_shift = cmdq->q.llq.max_n_shift,
596 ptr = &cmdq->valid_map[swidx];
623 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
626 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
630 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
633 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
638 struct arm_smmu_cmdq *cmdq,
646 * Try to update our copy of cons by grabbing exclusive cmdq access. If
649 if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
650 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
651 arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
652 llq->val = READ_ONCE(cmdq->q.llq.val);
658 llq->val = READ_ONCE(cmdq->q.llq.val);
670 * Must be called with the cmdq lock held in some capacity.
673 struct arm_smmu_cmdq *cmdq,
678 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
694 * Must be called with the cmdq lock held in some capacity.
697 struct arm_smmu_cmdq *cmdq,
705 llq->val = READ_ONCE(cmdq->q.llq.val);
720 * cmdq->q.llq.cons. Roughly speaking:
740 llq->cons = readl(cmdq->q.cons_reg);
747 struct arm_smmu_cmdq *cmdq,
751 !arm_smmu_cmdq_needs_busy_polling(smmu, cmdq))
752 return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
754 return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq);
757 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
762 .max_n_shift = cmdq->q.llq.max_n_shift,
770 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
791 struct arm_smmu_cmdq *cmdq, u64 *cmds, int n,
801 llq.max_n_shift = cmdq->q.llq.max_n_shift;
805 llq.val = READ_ONCE(cmdq->q.llq.val);
811 if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
812 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
820 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
834 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
837 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, cmdq, prod);
838 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
843 * We achieve that by taking the cmdq lock as shared before
846 arm_smmu_cmdq_shared_lock(cmdq);
851 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
856 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
860 &cmdq->q.llq.atomic.prod);
868 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
874 writel_relaxed(prod, cmdq->q.prod_reg);
881 atomic_set_release(&cmdq->owner_prod, prod);
887 ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq);
892 readl_relaxed(cmdq->q.prod_reg),
893 readl_relaxed(cmdq->q.cons_reg));
897 * Try to unlock the cmdq lock. This will fail if we're the last
898 * reader, in which case we can safely update cmdq->q.llq.cons
900 if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
901 WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
902 arm_smmu_cmdq_shared_unlock(cmdq);
917 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
943 cmds->cmdq = arm_smmu_get_cmdq(smmu, ent);
950 bool unsupported_cmd = !arm_smmu_cmdq_supports_cmd(cmds->cmdq, cmd);
956 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
962 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
969 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
980 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
2066 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
3757 struct arm_smmu_cmdq *cmdq)
3759 unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
3761 atomic_set(&cmdq->owner_prod, 0);
3762 atomic_set(&cmdq->lock, 0);
3764 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
3766 if (!cmdq->valid_map)
3776 /* cmdq */
3777 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
3779 CMDQ_ENT_DWORDS, "cmdq");
3783 ret = arm_smmu_cmdq_init(smmu, &smmu->cmdq);
3977 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
4152 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
4153 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
4154 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
4423 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
4425 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {