Lines Matching full:ctl

11  * CTL - MDP Control Pool Manager
21 * In certain use cases (high-resolution dual pipe), one single CTL can be
33 /* CTL status bitmask */
45 /* when do CTL registers need to be flushed? (mask of trigger bits) */
50 /* True if the current CTL has FLUSH bits pending for single FLUSH. */
53 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
59 /* number of CTL / Layer Mixers in this hw config: */
84 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
86 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
88 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
93 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
95 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
97 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
136 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
164 spin_lock_irqsave(&ctl->hw_lock, flags);
165 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
166 spin_unlock_irqrestore(&ctl->hw_lock, flags);
169 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
171 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
178 set_ctl_op(ctl, pipeline);
183 static bool start_signal_needed(struct mdp5_ctl *ctl,
188 if (!ctl->encoder_enabled)
208 static void send_start_signal(struct mdp5_ctl *ctl)
212 spin_lock_irqsave(&ctl->hw_lock, flags);
213 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
214 spin_unlock_irqrestore(&ctl->hw_lock, flags);
220 * @ctl: the CTL instance
227 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
233 if (WARN_ON(!ctl))
236 ctl->encoder_enabled = enabled;
239 if (start_signal_needed(ctl, pipeline)) {
240 send_start_signal(ctl);
248 * CTL registers need to be flushed after calling this function
251 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
254 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
260 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
261 ctl->id);
270 spin_lock_irqsave(&ctl->hw_lock, flags);
272 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
279 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
280 ctl->cursor_on = enable;
282 spin_unlock_irqrestore(&ctl->hw_lock, flags);
284 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
332 static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
335 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
338 spin_lock_irqsave(&ctl->hw_lock, flags);
341 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
342 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
345 spin_unlock_irqrestore(&ctl->hw_lock, flags);
350 int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
362 mdp5_ctl_reset_blend_regs(ctl);
390 spin_lock_irqsave(&ctl->hw_lock, flags);
391 if (ctl->cursor_on)
394 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
395 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
398 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
400 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
403 spin_unlock_irqrestore(&ctl->hw_lock, flags);
405 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
407 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
473 static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
476 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
488 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
491 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
493 if (ctl->pair) {
494 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
495 ctl->flush_pending = true;
499 if (ctl->pair->flush_pending) {
500 *flush_id = min_t(u32, ctl->id, ctl->pair->id);
503 ctl->flush_pending = false;
504 ctl->pair->flush_pending = false;
516 * @ctl: the CTL instance
530 * CTL registers need to be flushed in some circumstances; if that is the
532 * ctl->pending_ctl_trigger.
536 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
540 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
542 u32 flush_id = ctl->id;
545 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
547 if (ctl->pending_ctl_trigger & flush_mask) {
549 ctl->pending_ctl_trigger = 0;
552 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
558 fix_for_single_flush(ctl, &flush_mask, &flush_id);
561 ctl->flush_mask |= flush_mask;
564 flush_mask |= ctl->flush_mask;
565 ctl->flush_mask = 0;
569 spin_lock_irqsave(&ctl->hw_lock, flags);
570 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
571 spin_unlock_irqrestore(&ctl->hw_lock, flags);
574 if (start_signal_needed(ctl, pipeline)) {
575 send_start_signal(ctl);
581 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
583 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
586 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
588 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
626 * mdp5_ctl_request() - CTL allocation
628 * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
629 * If no CTL is available in preferred category, allocate from the other one.
631 * @return fail if no CTL is available.
636 struct mdp5_ctl *ctl = NULL;
650 "fall back to the other CTL category for INTF %d!\n", intf_num);
657 DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
661 ctl = &ctl_mgr->ctls[c];
662 ctl->status |= CTL_STAT_BUSY;
663 ctl->pending_ctl_trigger = 0;
664 DBG("CTL %d allocated", ctl->id);
668 return ctl;
677 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
679 spin_lock_irqsave(&ctl->hw_lock, flags);
680 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
681 spin_unlock_irqrestore(&ctl->hw_lock, flags);
692 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
698 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
708 /* initialize the CTL manager: */
715 /* initialize each CTL of the pool: */
718 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
726 ctl->ctlm = ctl_mgr;
727 ctl->id = c;
728 ctl->reg_offset = ctl_cfg->base[c];
729 ctl->status = 0;
730 spin_lock_init(&ctl->hw_lock);