1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c8b75bcaSEric Anholt /* 3c8b75bcaSEric Anholt * Copyright (C) 2015 Broadcom 4c8b75bcaSEric Anholt */ 5c8b75bcaSEric Anholt 6c8b75bcaSEric Anholt /** 7c8b75bcaSEric Anholt * DOC: VC4 KMS 8c8b75bcaSEric Anholt * 9c8b75bcaSEric Anholt * This is the general code for implementing KMS mode setting that 10c8b75bcaSEric Anholt * doesn't clearly associate with any of the other objects (plane, 11c8b75bcaSEric Anholt * crtc, HDMI encoder). 12c8b75bcaSEric Anholt */ 13c8b75bcaSEric Anholt 14d7d96c00SMaxime Ripard #include <linux/clk.h> 15e3479398SMaxime Ripard #include <linux/sort.h> 16d7d96c00SMaxime Ripard 17b7e8e25bSMasahiro Yamada #include <drm/drm_atomic.h> 18b7e8e25bSMasahiro Yamada #include <drm/drm_atomic_helper.h> 19fd6d6d80SSam Ravnborg #include <drm/drm_crtc.h> 20720cf96dSVille Syrjälä #include <drm/drm_fourcc.h> 219762477cSNoralf Trønnes #include <drm/drm_gem_framebuffer_helper.h> 22fcd70cd3SDaniel Vetter #include <drm/drm_probe_helper.h> 23fd6d6d80SSam Ravnborg #include <drm/drm_vblank.h> 24fd6d6d80SSam Ravnborg 25c8b75bcaSEric Anholt #include "vc4_drv.h" 26766cc6b1SStefan Schake #include "vc4_regs.h" 27766cc6b1SStefan Schake 28766cc6b1SStefan Schake struct vc4_ctm_state { 29766cc6b1SStefan Schake struct drm_private_state base; 30766cc6b1SStefan Schake struct drm_color_ctm *ctm; 31766cc6b1SStefan Schake int fifo; 32766cc6b1SStefan Schake }; 33766cc6b1SStefan Schake 34220f125cSMaxime Ripard static struct vc4_ctm_state * 35220f125cSMaxime Ripard to_vc4_ctm_state(const struct drm_private_state *priv) 36766cc6b1SStefan Schake { 37766cc6b1SStefan Schake return container_of(priv, struct vc4_ctm_state, base); 38766cc6b1SStefan Schake } 39766cc6b1SStefan Schake 404686da83SBoris Brezillon struct vc4_load_tracker_state { 414686da83SBoris Brezillon struct drm_private_state base; 424686da83SBoris Brezillon u64 hvs_load; 434686da83SBoris Brezillon u64 membus_load; 444686da83SBoris Brezillon }; 454686da83SBoris Brezillon 464686da83SBoris Brezillon static struct vc4_load_tracker_state * 47220f125cSMaxime Ripard to_vc4_load_tracker_state(const struct drm_private_state *priv) 484686da83SBoris Brezillon { 494686da83SBoris Brezillon return container_of(priv, struct vc4_load_tracker_state, base); 504686da83SBoris Brezillon } 514686da83SBoris Brezillon 52766cc6b1SStefan Schake static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, 53766cc6b1SStefan Schake struct drm_private_obj *manager) 54766cc6b1SStefan Schake { 55766cc6b1SStefan Schake struct drm_device *dev = state->dev; 5688e08589SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 57766cc6b1SStefan Schake struct drm_private_state *priv_state; 58766cc6b1SStefan Schake int ret; 59766cc6b1SStefan Schake 60766cc6b1SStefan Schake ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx); 61766cc6b1SStefan Schake if (ret) 62766cc6b1SStefan Schake return ERR_PTR(ret); 63766cc6b1SStefan Schake 64766cc6b1SStefan Schake priv_state = drm_atomic_get_private_obj_state(state, manager); 65766cc6b1SStefan Schake if (IS_ERR(priv_state)) 66766cc6b1SStefan Schake return ERR_CAST(priv_state); 67766cc6b1SStefan Schake 68766cc6b1SStefan Schake return to_vc4_ctm_state(priv_state); 69766cc6b1SStefan Schake } 70766cc6b1SStefan Schake 71766cc6b1SStefan Schake static struct drm_private_state * 72766cc6b1SStefan Schake vc4_ctm_duplicate_state(struct drm_private_obj *obj) 73766cc6b1SStefan Schake { 74766cc6b1SStefan Schake struct vc4_ctm_state *state; 75766cc6b1SStefan Schake 76766cc6b1SStefan Schake state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 77766cc6b1SStefan Schake if (!state) 78766cc6b1SStefan Schake return NULL; 79766cc6b1SStefan Schake 80766cc6b1SStefan Schake __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 81766cc6b1SStefan Schake 82766cc6b1SStefan Schake return &state->base; 83766cc6b1SStefan Schake } 84766cc6b1SStefan Schake 85766cc6b1SStefan Schake static void vc4_ctm_destroy_state(struct drm_private_obj *obj, 86766cc6b1SStefan Schake struct drm_private_state *state) 87766cc6b1SStefan Schake { 88766cc6b1SStefan Schake struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state); 89766cc6b1SStefan Schake 90766cc6b1SStefan Schake kfree(ctm_state); 91766cc6b1SStefan Schake } 92766cc6b1SStefan Schake 93766cc6b1SStefan Schake static const struct drm_private_state_funcs vc4_ctm_state_funcs = { 94766cc6b1SStefan Schake .atomic_duplicate_state = vc4_ctm_duplicate_state, 95766cc6b1SStefan Schake .atomic_destroy_state = vc4_ctm_destroy_state, 96766cc6b1SStefan Schake }; 97766cc6b1SStefan Schake 98dcda7c28SMaxime Ripard static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused) 99dcda7c28SMaxime Ripard { 100dcda7c28SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 101dcda7c28SMaxime Ripard 102dcda7c28SMaxime Ripard drm_atomic_private_obj_fini(&vc4->ctm_manager); 103dcda7c28SMaxime Ripard } 104dcda7c28SMaxime Ripard 105dcda7c28SMaxime Ripard static int vc4_ctm_obj_init(struct vc4_dev *vc4) 106dcda7c28SMaxime Ripard { 107dcda7c28SMaxime Ripard struct vc4_ctm_state *ctm_state; 108dcda7c28SMaxime Ripard 109dcda7c28SMaxime Ripard drm_modeset_lock_init(&vc4->ctm_state_lock); 110dcda7c28SMaxime Ripard 111dcda7c28SMaxime Ripard ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); 112dcda7c28SMaxime Ripard if (!ctm_state) 113dcda7c28SMaxime Ripard return -ENOMEM; 114dcda7c28SMaxime Ripard 115dcda7c28SMaxime Ripard drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, 116dcda7c28SMaxime Ripard &vc4_ctm_state_funcs); 117dcda7c28SMaxime Ripard 1183c354ed1SMaxime Ripard return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); 119dcda7c28SMaxime Ripard } 120dcda7c28SMaxime Ripard 121766cc6b1SStefan Schake /* Converts a DRM S31.32 value to the HW S0.9 format. */ 122766cc6b1SStefan Schake static u16 vc4_ctm_s31_32_to_s0_9(u64 in) 123766cc6b1SStefan Schake { 124766cc6b1SStefan Schake u16 r; 125766cc6b1SStefan Schake 126766cc6b1SStefan Schake /* Sign bit. */ 127766cc6b1SStefan Schake r = in & BIT_ULL(63) ? BIT(9) : 0; 128766cc6b1SStefan Schake 129766cc6b1SStefan Schake if ((in & GENMASK_ULL(62, 32)) > 0) { 130766cc6b1SStefan Schake /* We have zero integer bits so we can only saturate here. */ 131766cc6b1SStefan Schake r |= GENMASK(8, 0); 132766cc6b1SStefan Schake } else { 133766cc6b1SStefan Schake /* Otherwise take the 9 most important fractional bits. */ 134766cc6b1SStefan Schake r |= (in >> 23) & GENMASK(8, 0); 135766cc6b1SStefan Schake } 136766cc6b1SStefan Schake 137766cc6b1SStefan Schake return r; 138766cc6b1SStefan Schake } 139766cc6b1SStefan Schake 140766cc6b1SStefan Schake static void 141766cc6b1SStefan Schake vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) 142766cc6b1SStefan Schake { 1433454f01aSMaxime Ripard struct vc4_hvs *hvs = vc4->hvs; 144766cc6b1SStefan Schake struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state); 145766cc6b1SStefan Schake struct drm_color_ctm *ctm = ctm_state->ctm; 146766cc6b1SStefan Schake 147766cc6b1SStefan Schake if (ctm_state->fifo) { 148766cc6b1SStefan Schake HVS_WRITE(SCALER_OLEDCOEF2, 149766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]), 150766cc6b1SStefan Schake SCALER_OLEDCOEF2_R_TO_R) | 151766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]), 152766cc6b1SStefan Schake SCALER_OLEDCOEF2_R_TO_G) | 153766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]), 154766cc6b1SStefan Schake SCALER_OLEDCOEF2_R_TO_B)); 155766cc6b1SStefan Schake HVS_WRITE(SCALER_OLEDCOEF1, 156766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]), 157766cc6b1SStefan Schake SCALER_OLEDCOEF1_G_TO_R) | 158766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]), 159766cc6b1SStefan Schake SCALER_OLEDCOEF1_G_TO_G) | 160766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]), 161766cc6b1SStefan Schake SCALER_OLEDCOEF1_G_TO_B)); 162766cc6b1SStefan Schake HVS_WRITE(SCALER_OLEDCOEF0, 163766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]), 164766cc6b1SStefan Schake SCALER_OLEDCOEF0_B_TO_R) | 165766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]), 166766cc6b1SStefan Schake SCALER_OLEDCOEF0_B_TO_G) | 167766cc6b1SStefan Schake VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]), 168766cc6b1SStefan Schake SCALER_OLEDCOEF0_B_TO_B)); 169766cc6b1SStefan Schake } 170766cc6b1SStefan Schake 171766cc6b1SStefan Schake HVS_WRITE(SCALER_OLEDOFFS, 172766cc6b1SStefan Schake VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); 173766cc6b1SStefan Schake } 174c8b75bcaSEric Anholt 1753c5cb5ecSMaxime Ripard struct vc4_hvs_state * 176e818ee68SMaxime Ripard vc4_hvs_get_new_global_state(const struct drm_atomic_state *state) 1779ec03d7fSMaxime Ripard { 1789ec03d7fSMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(state->dev); 1799ec03d7fSMaxime Ripard struct drm_private_state *priv_state; 1809ec03d7fSMaxime Ripard 1819ec03d7fSMaxime Ripard priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels); 182dba9e346SGaosheng Cui if (!priv_state) 183dba9e346SGaosheng Cui return ERR_PTR(-EINVAL); 1849ec03d7fSMaxime Ripard 1859ec03d7fSMaxime Ripard return to_vc4_hvs_state(priv_state); 1869ec03d7fSMaxime Ripard } 1879ec03d7fSMaxime Ripard 1883c5cb5ecSMaxime Ripard struct vc4_hvs_state * 189e818ee68SMaxime Ripard vc4_hvs_get_old_global_state(const struct drm_atomic_state *state) 1909ec03d7fSMaxime Ripard { 1919ec03d7fSMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(state->dev); 1929ec03d7fSMaxime Ripard struct drm_private_state *priv_state; 1939ec03d7fSMaxime Ripard 1949ec03d7fSMaxime Ripard priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels); 195dba9e346SGaosheng Cui if (!priv_state) 196dba9e346SGaosheng Cui return ERR_PTR(-EINVAL); 1979ec03d7fSMaxime Ripard 1989ec03d7fSMaxime Ripard return to_vc4_hvs_state(priv_state); 1999ec03d7fSMaxime Ripard } 2009ec03d7fSMaxime Ripard 2013c5cb5ecSMaxime Ripard struct vc4_hvs_state * 202f2df84e0SMaxime Ripard vc4_hvs_get_global_state(struct drm_atomic_state *state) 203f2df84e0SMaxime Ripard { 204f2df84e0SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(state->dev); 205f2df84e0SMaxime Ripard struct drm_private_state *priv_state; 206f2df84e0SMaxime Ripard 207f2df84e0SMaxime Ripard priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels); 208f2df84e0SMaxime Ripard if (IS_ERR(priv_state)) 209f2df84e0SMaxime Ripard return ERR_CAST(priv_state); 210f2df84e0SMaxime Ripard 211f2df84e0SMaxime Ripard return to_vc4_hvs_state(priv_state); 212f2df84e0SMaxime Ripard } 213f2df84e0SMaxime Ripard 21487ebcd42SMaxime Ripard static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, 21587ebcd42SMaxime Ripard struct drm_atomic_state *state) 21687ebcd42SMaxime Ripard { 2173454f01aSMaxime Ripard struct vc4_hvs *hvs = vc4->hvs; 21887ebcd42SMaxime Ripard struct drm_crtc_state *crtc_state; 21987ebcd42SMaxime Ripard struct drm_crtc *crtc; 22087ebcd42SMaxime Ripard unsigned int i; 22187ebcd42SMaxime Ripard 22287ebcd42SMaxime Ripard for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 223a16c6640SMaxime Ripard struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 22487ebcd42SMaxime Ripard struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 22587ebcd42SMaxime Ripard u32 dispctrl; 22687ebcd42SMaxime Ripard u32 dsp3_mux; 22787ebcd42SMaxime Ripard 22887ebcd42SMaxime Ripard if (!crtc_state->active) 22987ebcd42SMaxime Ripard continue; 23087ebcd42SMaxime Ripard 23187ebcd42SMaxime Ripard if (vc4_state->assigned_channel != 2) 23287ebcd42SMaxime Ripard continue; 23387ebcd42SMaxime Ripard 23487ebcd42SMaxime Ripard /* 23587ebcd42SMaxime Ripard * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to 23687ebcd42SMaxime Ripard * FIFO X'. 23787ebcd42SMaxime Ripard * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. 23887ebcd42SMaxime Ripard * 23987ebcd42SMaxime Ripard * DSP3 is connected to FIFO2 unless the transposer is 24087ebcd42SMaxime Ripard * enabled. In this case, FIFO 2 is directly accessed by the 24187ebcd42SMaxime Ripard * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 24287ebcd42SMaxime Ripard * route. 24387ebcd42SMaxime Ripard */ 244a16c6640SMaxime Ripard if (vc4_crtc->feeds_txp) 24587ebcd42SMaxime Ripard dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); 24687ebcd42SMaxime Ripard else 24787ebcd42SMaxime Ripard dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 24887ebcd42SMaxime Ripard 24987ebcd42SMaxime Ripard dispctrl = HVS_READ(SCALER_DISPCTRL) & 25087ebcd42SMaxime Ripard ~SCALER_DISPCTRL_DSP3_MUX_MASK; 25187ebcd42SMaxime Ripard HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); 25287ebcd42SMaxime Ripard } 25387ebcd42SMaxime Ripard } 25487ebcd42SMaxime Ripard 25587ebcd42SMaxime Ripard static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, 25687ebcd42SMaxime Ripard struct drm_atomic_state *state) 25787ebcd42SMaxime Ripard { 2583454f01aSMaxime Ripard struct vc4_hvs *hvs = vc4->hvs; 25987ebcd42SMaxime Ripard struct drm_crtc_state *crtc_state; 26087ebcd42SMaxime Ripard struct drm_crtc *crtc; 2612820526dSMaxime Ripard unsigned char mux; 26287ebcd42SMaxime Ripard unsigned int i; 26387ebcd42SMaxime Ripard u32 reg; 26487ebcd42SMaxime Ripard 26587ebcd42SMaxime Ripard for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 26687ebcd42SMaxime Ripard struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 26787ebcd42SMaxime Ripard struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 268f47d37a9SMaxime Ripard unsigned int channel = vc4_state->assigned_channel; 26987ebcd42SMaxime Ripard 2702820526dSMaxime Ripard if (!vc4_state->update_muxing) 27187ebcd42SMaxime Ripard continue; 27287ebcd42SMaxime Ripard 27387ebcd42SMaxime Ripard switch (vc4_crtc->data->hvs_output) { 27487ebcd42SMaxime Ripard case 2: 275457e5184SMaxime Ripard drm_WARN_ON(&vc4->base, 276457e5184SMaxime Ripard VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL), 277457e5184SMaxime Ripard SCALER_DISPCTRL_DSP3_MUX) == channel); 278457e5184SMaxime Ripard 279f47d37a9SMaxime Ripard mux = (channel == 2) ? 0 : 1; 2802820526dSMaxime Ripard reg = HVS_READ(SCALER_DISPECTRL); 2812820526dSMaxime Ripard HVS_WRITE(SCALER_DISPECTRL, 2822820526dSMaxime Ripard (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | 2832820526dSMaxime Ripard VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX)); 28487ebcd42SMaxime Ripard break; 28587ebcd42SMaxime Ripard 28687ebcd42SMaxime Ripard case 3: 287f47d37a9SMaxime Ripard if (channel == VC4_HVS_CHANNEL_DISABLED) 2882820526dSMaxime Ripard mux = 3; 2892820526dSMaxime Ripard else 290f47d37a9SMaxime Ripard mux = channel; 2912820526dSMaxime Ripard 2922820526dSMaxime Ripard reg = HVS_READ(SCALER_DISPCTRL); 2932820526dSMaxime Ripard HVS_WRITE(SCALER_DISPCTRL, 2942820526dSMaxime Ripard (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | 2952820526dSMaxime Ripard VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX)); 29687ebcd42SMaxime Ripard break; 29787ebcd42SMaxime Ripard 29887ebcd42SMaxime Ripard case 4: 299f47d37a9SMaxime Ripard if (channel == VC4_HVS_CHANNEL_DISABLED) 3002820526dSMaxime Ripard mux = 3; 3012820526dSMaxime Ripard else 302f47d37a9SMaxime Ripard mux = channel; 3032820526dSMaxime Ripard 3042820526dSMaxime Ripard reg = HVS_READ(SCALER_DISPEOLN); 3052820526dSMaxime Ripard HVS_WRITE(SCALER_DISPEOLN, 3062820526dSMaxime Ripard (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | 3072820526dSMaxime Ripard VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX)); 3082820526dSMaxime Ripard 30987ebcd42SMaxime Ripard break; 31087ebcd42SMaxime Ripard 31187ebcd42SMaxime Ripard case 5: 312f47d37a9SMaxime Ripard if (channel == VC4_HVS_CHANNEL_DISABLED) 3132820526dSMaxime Ripard mux = 3; 3142820526dSMaxime Ripard else 315f47d37a9SMaxime Ripard mux = channel; 3162820526dSMaxime Ripard 3172820526dSMaxime Ripard reg = HVS_READ(SCALER_DISPDITHER); 3182820526dSMaxime Ripard HVS_WRITE(SCALER_DISPDITHER, 3192820526dSMaxime Ripard (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | 3202820526dSMaxime Ripard VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX)); 32187ebcd42SMaxime Ripard break; 32287ebcd42SMaxime Ripard 32387ebcd42SMaxime Ripard default: 32487ebcd42SMaxime Ripard break; 32587ebcd42SMaxime Ripard } 32687ebcd42SMaxime Ripard } 32787ebcd42SMaxime Ripard } 32887ebcd42SMaxime Ripard 329f3c420feSMaxime Ripard static void vc4_atomic_commit_tail(struct drm_atomic_state *state) 330b501baccSEric Anholt { 331b501baccSEric Anholt struct drm_device *dev = state->dev; 332b501baccSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 333d7d96c00SMaxime Ripard struct vc4_hvs *hvs = vc4->hvs; 33459635667SMaxime Ripard struct drm_crtc_state *new_crtc_state; 33516e10105SMaxime Ripard struct vc4_hvs_state *new_hvs_state; 33659635667SMaxime Ripard struct drm_crtc *crtc; 3379ec03d7fSMaxime Ripard struct vc4_hvs_state *old_hvs_state; 3386052a311SMaxime Ripard unsigned int channel; 339531a1b62SBoris Brezillon int i; 340531a1b62SBoris Brezillon 34116e10105SMaxime Ripard old_hvs_state = vc4_hvs_get_old_global_state(state); 34299b03ca6SDaniel Vetter if (WARN_ON(IS_ERR(old_hvs_state))) 34316e10105SMaxime Ripard return; 34416e10105SMaxime Ripard 34516e10105SMaxime Ripard new_hvs_state = vc4_hvs_get_new_global_state(state); 34699b03ca6SDaniel Vetter if (WARN_ON(IS_ERR(new_hvs_state))) 34716e10105SMaxime Ripard return; 34816e10105SMaxime Ripard 34959635667SMaxime Ripard for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 35087ebcd42SMaxime Ripard struct vc4_crtc_state *vc4_crtc_state; 35159635667SMaxime Ripard 35259635667SMaxime Ripard if (!new_crtc_state->commit) 353531a1b62SBoris Brezillon continue; 354531a1b62SBoris Brezillon 35587ebcd42SMaxime Ripard vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); 3563454f01aSMaxime Ripard vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel); 357531a1b62SBoris Brezillon } 358b501baccSEric Anholt 3596052a311SMaxime Ripard for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { 360049cfff8SMaxime Ripard struct drm_crtc_commit *commit; 361b99c2c95SMaxime Ripard int ret; 3629ec03d7fSMaxime Ripard 3639ec03d7fSMaxime Ripard if (!old_hvs_state->fifo_state[channel].in_use) 3649ec03d7fSMaxime Ripard continue; 3659ec03d7fSMaxime Ripard 366049cfff8SMaxime Ripard commit = old_hvs_state->fifo_state[channel].pending_commit; 367049cfff8SMaxime Ripard if (!commit) 368049cfff8SMaxime Ripard continue; 369049cfff8SMaxime Ripard 370049cfff8SMaxime Ripard ret = drm_crtc_commit_wait(commit); 371b99c2c95SMaxime Ripard if (ret) 372b99c2c95SMaxime Ripard drm_err(dev, "Timed out waiting for commit\n"); 373049cfff8SMaxime Ripard 374049cfff8SMaxime Ripard drm_crtc_commit_put(commit); 375d134c5ffSMaxime Ripard old_hvs_state->fifo_state[channel].pending_commit = NULL; 3769ec03d7fSMaxime Ripard } 3779ec03d7fSMaxime Ripard 3781cbc91ebSMaxime Ripard if (vc4->is_vc5) { 379748acfc9SMaxime Ripard unsigned long state_rate = max(old_hvs_state->core_clock_rate, 380244a36e5SMaxime Ripard new_hvs_state->core_clock_rate); 381fc041428SMaxime Ripard unsigned long core_rate = clamp_t(unsigned long, state_rate, 382fc041428SMaxime Ripard 500000000, hvs->max_core_rate); 383244a36e5SMaxime Ripard 3845b6ef06eSMaxime Ripard drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate); 3855b6ef06eSMaxime Ripard 3865b6ef06eSMaxime Ripard /* 3875b6ef06eSMaxime Ripard * Do a temporary request on the core clock during the 3885b6ef06eSMaxime Ripard * modeset. 3895b6ef06eSMaxime Ripard */ 3907d0648c8SMaxime Ripard WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate)); 391244a36e5SMaxime Ripard } 3925b6ef06eSMaxime Ripard 393b501baccSEric Anholt drm_atomic_helper_commit_modeset_disables(dev, state); 394b501baccSEric Anholt 395766cc6b1SStefan Schake vc4_ctm_commit(vc4, state); 396766cc6b1SStefan Schake 3971cbc91ebSMaxime Ripard if (vc4->is_vc5) 39887ebcd42SMaxime Ripard vc5_hvs_pv_muxing_commit(vc4, state); 39987ebcd42SMaxime Ripard else 40087ebcd42SMaxime Ripard vc4_hvs_pv_muxing_commit(vc4, state); 40187ebcd42SMaxime Ripard 402d65661acSMaxime Ripard drm_atomic_helper_commit_planes(dev, state, 403d65661acSMaxime Ripard DRM_PLANE_COMMIT_ACTIVE_ONLY); 404b501baccSEric Anholt 405b501baccSEric Anholt drm_atomic_helper_commit_modeset_enables(dev, state); 406b501baccSEric Anholt 4071ebe99a7SBoris Brezillon drm_atomic_helper_fake_vblank(state); 4081ebe99a7SBoris Brezillon 40934c8ea40SBoris Brezillon drm_atomic_helper_commit_hw_done(state); 41034c8ea40SBoris Brezillon 411184d3cf4SBoris Brezillon drm_atomic_helper_wait_for_flip_done(dev, state); 412b501baccSEric Anholt 413b501baccSEric Anholt drm_atomic_helper_cleanup_planes(dev, state); 414b501baccSEric Anholt 4151cbc91ebSMaxime Ripard if (vc4->is_vc5) { 416fc041428SMaxime Ripard unsigned long core_rate = min_t(unsigned long, 417fc041428SMaxime Ripard hvs->max_core_rate, 41816e10105SMaxime Ripard new_hvs_state->core_clock_rate); 41916e10105SMaxime Ripard 420fc041428SMaxime Ripard drm_dbg(dev, "Running the core clock at %lu Hz\n", core_rate); 421fc041428SMaxime Ripard 4225b6ef06eSMaxime Ripard /* 4235b6ef06eSMaxime Ripard * Request a clock rate based on the current HVS 4245b6ef06eSMaxime Ripard * requirements. 4255b6ef06eSMaxime Ripard */ 426fc041428SMaxime Ripard WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate)); 4273870b54eSMaxime Ripard 4283870b54eSMaxime Ripard drm_dbg(dev, "Core clock actual rate: %lu Hz\n", 4293870b54eSMaxime Ripard clk_get_rate(hvs->core_clk)); 43016e10105SMaxime Ripard } 431b501baccSEric Anholt } 432b501baccSEric Anholt 4339ec03d7fSMaxime Ripard static int vc4_atomic_commit_setup(struct drm_atomic_state *state) 4349ec03d7fSMaxime Ripard { 4359ec03d7fSMaxime Ripard struct drm_crtc_state *crtc_state; 4369ec03d7fSMaxime Ripard struct vc4_hvs_state *hvs_state; 4379ec03d7fSMaxime Ripard struct drm_crtc *crtc; 4389ec03d7fSMaxime Ripard unsigned int i; 4399ec03d7fSMaxime Ripard 4409ec03d7fSMaxime Ripard hvs_state = vc4_hvs_get_new_global_state(state); 441f9277679SMaxime Ripard if (WARN_ON(IS_ERR(hvs_state))) 442f9277679SMaxime Ripard return PTR_ERR(hvs_state); 4439ec03d7fSMaxime Ripard 4449ec03d7fSMaxime Ripard for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 4459ec03d7fSMaxime Ripard struct vc4_crtc_state *vc4_crtc_state = 4469ec03d7fSMaxime Ripard to_vc4_crtc_state(crtc_state); 4479ec03d7fSMaxime Ripard unsigned int channel = 4489ec03d7fSMaxime Ripard vc4_crtc_state->assigned_channel; 4499ec03d7fSMaxime Ripard 4509ec03d7fSMaxime Ripard if (channel == VC4_HVS_CHANNEL_DISABLED) 4519ec03d7fSMaxime Ripard continue; 4529ec03d7fSMaxime Ripard 4539ec03d7fSMaxime Ripard if (!hvs_state->fifo_state[channel].in_use) 4549ec03d7fSMaxime Ripard continue; 4559ec03d7fSMaxime Ripard 4569ec03d7fSMaxime Ripard hvs_state->fifo_state[channel].pending_commit = 4579ec03d7fSMaxime Ripard drm_crtc_commit_get(crtc_state->commit); 4589ec03d7fSMaxime Ripard } 4599ec03d7fSMaxime Ripard 4609ec03d7fSMaxime Ripard return 0; 4619ec03d7fSMaxime Ripard } 4629ec03d7fSMaxime Ripard 46383753117SEric Anholt static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 46483753117SEric Anholt struct drm_file *file_priv, 46583753117SEric Anholt const struct drm_mode_fb_cmd2 *mode_cmd) 46683753117SEric Anholt { 46730f8c74cSMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 46883753117SEric Anholt struct drm_mode_fb_cmd2 mode_cmd_local; 46983753117SEric Anholt 47030f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5)) 47130f8c74cSMaxime Ripard return ERR_PTR(-ENODEV); 47230f8c74cSMaxime Ripard 47383753117SEric Anholt /* If the user didn't specify a modifier, use the 47483753117SEric Anholt * vc4_set_tiling_ioctl() state for the BO. 47583753117SEric Anholt */ 47683753117SEric Anholt if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) { 47783753117SEric Anholt struct drm_gem_object *gem_obj; 47883753117SEric Anholt struct vc4_bo *bo; 47983753117SEric Anholt 48083753117SEric Anholt gem_obj = drm_gem_object_lookup(file_priv, 48183753117SEric Anholt mode_cmd->handles[0]); 48283753117SEric Anholt if (!gem_obj) { 483fb95992aSEric Anholt DRM_DEBUG("Failed to look up GEM BO %d\n", 48483753117SEric Anholt mode_cmd->handles[0]); 48583753117SEric Anholt return ERR_PTR(-ENOENT); 48683753117SEric Anholt } 48783753117SEric Anholt bo = to_vc4_bo(gem_obj); 48883753117SEric Anholt 48983753117SEric Anholt mode_cmd_local = *mode_cmd; 49083753117SEric Anholt 49183753117SEric Anholt if (bo->t_format) { 49283753117SEric Anholt mode_cmd_local.modifier[0] = 49383753117SEric Anholt DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 49483753117SEric Anholt } else { 49583753117SEric Anholt mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 49683753117SEric Anholt } 49783753117SEric Anholt 498f7a8cd30SEmil Velikov drm_gem_object_put(gem_obj); 49983753117SEric Anholt 50083753117SEric Anholt mode_cmd = &mode_cmd_local; 50183753117SEric Anholt } 50283753117SEric Anholt 5039762477cSNoralf Trønnes return drm_gem_fb_create(dev, file_priv, mode_cmd); 50483753117SEric Anholt } 50583753117SEric Anholt 506766cc6b1SStefan Schake /* Our CTM has some peculiar limitations: we can only enable it for one CRTC 507766cc6b1SStefan Schake * at a time and the HW only supports S0.9 scalars. To account for the latter, 508766cc6b1SStefan Schake * we don't allow userland to set a CTM that we have no hope of approximating. 509766cc6b1SStefan Schake */ 510766cc6b1SStefan Schake static int 511766cc6b1SStefan Schake vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 512766cc6b1SStefan Schake { 513766cc6b1SStefan Schake struct vc4_dev *vc4 = to_vc4_dev(dev); 514766cc6b1SStefan Schake struct vc4_ctm_state *ctm_state = NULL; 515766cc6b1SStefan Schake struct drm_crtc *crtc; 516766cc6b1SStefan Schake struct drm_crtc_state *old_crtc_state, *new_crtc_state; 517766cc6b1SStefan Schake struct drm_color_ctm *ctm; 518766cc6b1SStefan Schake int i; 519766cc6b1SStefan Schake 520766cc6b1SStefan Schake for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 521766cc6b1SStefan Schake /* CTM is being disabled. */ 522766cc6b1SStefan Schake if (!new_crtc_state->ctm && old_crtc_state->ctm) { 523766cc6b1SStefan Schake ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 524766cc6b1SStefan Schake if (IS_ERR(ctm_state)) 525766cc6b1SStefan Schake return PTR_ERR(ctm_state); 526766cc6b1SStefan Schake ctm_state->fifo = 0; 527766cc6b1SStefan Schake } 528766cc6b1SStefan Schake } 529766cc6b1SStefan Schake 530766cc6b1SStefan Schake for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 531766cc6b1SStefan Schake if (new_crtc_state->ctm == old_crtc_state->ctm) 532766cc6b1SStefan Schake continue; 533766cc6b1SStefan Schake 534766cc6b1SStefan Schake if (!ctm_state) { 535766cc6b1SStefan Schake ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 536766cc6b1SStefan Schake if (IS_ERR(ctm_state)) 537766cc6b1SStefan Schake return PTR_ERR(ctm_state); 538766cc6b1SStefan Schake } 539766cc6b1SStefan Schake 540766cc6b1SStefan Schake /* CTM is being enabled or the matrix changed. */ 541766cc6b1SStefan Schake if (new_crtc_state->ctm) { 54287ebcd42SMaxime Ripard struct vc4_crtc_state *vc4_crtc_state = 54387ebcd42SMaxime Ripard to_vc4_crtc_state(new_crtc_state); 54487ebcd42SMaxime Ripard 545766cc6b1SStefan Schake /* fifo is 1-based since 0 disables CTM. */ 54687ebcd42SMaxime Ripard int fifo = vc4_crtc_state->assigned_channel + 1; 547766cc6b1SStefan Schake 548766cc6b1SStefan Schake /* Check userland isn't trying to turn on CTM for more 549766cc6b1SStefan Schake * than one CRTC at a time. 550766cc6b1SStefan Schake */ 551766cc6b1SStefan Schake if (ctm_state->fifo && ctm_state->fifo != fifo) { 552766cc6b1SStefan Schake DRM_DEBUG_DRIVER("Too many CTM configured\n"); 553766cc6b1SStefan Schake return -EINVAL; 554766cc6b1SStefan Schake } 555766cc6b1SStefan Schake 556766cc6b1SStefan Schake /* Check we can approximate the specified CTM. 557766cc6b1SStefan Schake * We disallow scalars |c| > 1.0 since the HW has 558766cc6b1SStefan Schake * no integer bits. 559766cc6b1SStefan Schake */ 560766cc6b1SStefan Schake ctm = new_crtc_state->ctm->data; 561766cc6b1SStefan Schake for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) { 562766cc6b1SStefan Schake u64 val = ctm->matrix[i]; 563766cc6b1SStefan Schake 564766cc6b1SStefan Schake val &= ~BIT_ULL(63); 565766cc6b1SStefan Schake if (val > BIT_ULL(32)) 566766cc6b1SStefan Schake return -EINVAL; 567766cc6b1SStefan Schake } 568766cc6b1SStefan Schake 569766cc6b1SStefan Schake ctm_state->fifo = fifo; 570766cc6b1SStefan Schake ctm_state->ctm = ctm; 571766cc6b1SStefan Schake } 572766cc6b1SStefan Schake } 573766cc6b1SStefan Schake 574766cc6b1SStefan Schake return 0; 575766cc6b1SStefan Schake } 576766cc6b1SStefan Schake 5774686da83SBoris Brezillon static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) 5784686da83SBoris Brezillon { 5794686da83SBoris Brezillon struct drm_plane_state *old_plane_state, *new_plane_state; 5804686da83SBoris Brezillon struct vc4_dev *vc4 = to_vc4_dev(state->dev); 5814686da83SBoris Brezillon struct vc4_load_tracker_state *load_state; 5824686da83SBoris Brezillon struct drm_private_state *priv_state; 5834686da83SBoris Brezillon struct drm_plane *plane; 5844686da83SBoris Brezillon int i; 5854686da83SBoris Brezillon 5864686da83SBoris Brezillon priv_state = drm_atomic_get_private_obj_state(state, 5874686da83SBoris Brezillon &vc4->load_tracker); 5884686da83SBoris Brezillon if (IS_ERR(priv_state)) 5894686da83SBoris Brezillon return PTR_ERR(priv_state); 5904686da83SBoris Brezillon 5914686da83SBoris Brezillon load_state = to_vc4_load_tracker_state(priv_state); 5924686da83SBoris Brezillon for_each_oldnew_plane_in_state(state, plane, old_plane_state, 5934686da83SBoris Brezillon new_plane_state, i) { 5944686da83SBoris Brezillon struct vc4_plane_state *vc4_plane_state; 5954686da83SBoris Brezillon 5964686da83SBoris Brezillon if (old_plane_state->fb && old_plane_state->crtc) { 5974686da83SBoris Brezillon vc4_plane_state = to_vc4_plane_state(old_plane_state); 5984686da83SBoris Brezillon load_state->membus_load -= vc4_plane_state->membus_load; 5994686da83SBoris Brezillon load_state->hvs_load -= vc4_plane_state->hvs_load; 6004686da83SBoris Brezillon } 6014686da83SBoris Brezillon 6024686da83SBoris Brezillon if (new_plane_state->fb && new_plane_state->crtc) { 6034686da83SBoris Brezillon vc4_plane_state = to_vc4_plane_state(new_plane_state); 6044686da83SBoris Brezillon load_state->membus_load += vc4_plane_state->membus_load; 6054686da83SBoris Brezillon load_state->hvs_load += vc4_plane_state->hvs_load; 6064686da83SBoris Brezillon } 6074686da83SBoris Brezillon } 6084686da83SBoris Brezillon 6096b5c029dSPaul Kocialkowski /* Don't check the load when the tracker is disabled. */ 6106b5c029dSPaul Kocialkowski if (!vc4->load_tracker_enabled) 6116b5c029dSPaul Kocialkowski return 0; 6126b5c029dSPaul Kocialkowski 6134686da83SBoris Brezillon /* The absolute limit is 2Gbyte/sec, but let's take a margin to let 6144686da83SBoris Brezillon * the system work when other blocks are accessing the memory. 6154686da83SBoris Brezillon */ 6164686da83SBoris Brezillon if (load_state->membus_load > SZ_1G + SZ_512M) 6174686da83SBoris Brezillon return -ENOSPC; 6184686da83SBoris Brezillon 6194686da83SBoris Brezillon /* HVS clock is supposed to run @ 250Mhz, let's take a margin and 6204686da83SBoris Brezillon * consider the maximum number of cycles is 240M. 6214686da83SBoris Brezillon */ 6224686da83SBoris Brezillon if (load_state->hvs_load > 240000000ULL) 6234686da83SBoris Brezillon return -ENOSPC; 6244686da83SBoris Brezillon 6254686da83SBoris Brezillon return 0; 6264686da83SBoris Brezillon } 6274686da83SBoris Brezillon 6284686da83SBoris Brezillon static struct drm_private_state * 6294686da83SBoris Brezillon vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) 6304686da83SBoris Brezillon { 6314686da83SBoris Brezillon struct vc4_load_tracker_state *state; 6324686da83SBoris Brezillon 6334686da83SBoris Brezillon state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 6344686da83SBoris Brezillon if (!state) 6354686da83SBoris Brezillon return NULL; 6364686da83SBoris Brezillon 6374686da83SBoris Brezillon __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 6384686da83SBoris Brezillon 6394686da83SBoris Brezillon return &state->base; 6404686da83SBoris Brezillon } 6414686da83SBoris Brezillon 6424686da83SBoris Brezillon static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, 6434686da83SBoris Brezillon struct drm_private_state *state) 6444686da83SBoris Brezillon { 6454686da83SBoris Brezillon struct vc4_load_tracker_state *load_state; 6464686da83SBoris Brezillon 6474686da83SBoris Brezillon load_state = to_vc4_load_tracker_state(state); 6484686da83SBoris Brezillon kfree(load_state); 6494686da83SBoris Brezillon } 6504686da83SBoris Brezillon 6514686da83SBoris Brezillon static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { 6524686da83SBoris Brezillon .atomic_duplicate_state = vc4_load_tracker_duplicate_state, 6534686da83SBoris Brezillon .atomic_destroy_state = vc4_load_tracker_destroy_state, 6544686da83SBoris Brezillon }; 6554686da83SBoris Brezillon 656dcda7c28SMaxime Ripard static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) 657dcda7c28SMaxime Ripard { 658dcda7c28SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 659dcda7c28SMaxime Ripard 660dcda7c28SMaxime Ripard drm_atomic_private_obj_fini(&vc4->load_tracker); 661dcda7c28SMaxime Ripard } 662dcda7c28SMaxime Ripard 663dcda7c28SMaxime Ripard static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) 664dcda7c28SMaxime Ripard { 665dcda7c28SMaxime Ripard struct vc4_load_tracker_state *load_state; 666dcda7c28SMaxime Ripard 667dcda7c28SMaxime Ripard load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); 668dcda7c28SMaxime Ripard if (!load_state) 669dcda7c28SMaxime Ripard return -ENOMEM; 670dcda7c28SMaxime Ripard 671dcda7c28SMaxime Ripard drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker, 672dcda7c28SMaxime Ripard &load_state->base, 673dcda7c28SMaxime Ripard &vc4_load_tracker_state_funcs); 674dcda7c28SMaxime Ripard 6753c354ed1SMaxime Ripard return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); 676dcda7c28SMaxime Ripard } 677dcda7c28SMaxime Ripard 678f2df84e0SMaxime Ripard static struct drm_private_state * 679f2df84e0SMaxime Ripard vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) 680f2df84e0SMaxime Ripard { 681f2df84e0SMaxime Ripard struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state); 682f2df84e0SMaxime Ripard struct vc4_hvs_state *state; 6839ec03d7fSMaxime Ripard unsigned int i; 684f2df84e0SMaxime Ripard 685f2df84e0SMaxime Ripard state = kzalloc(sizeof(*state), GFP_KERNEL); 686f2df84e0SMaxime Ripard if (!state) 687f2df84e0SMaxime Ripard return NULL; 688f2df84e0SMaxime Ripard 689f2df84e0SMaxime Ripard __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 690f2df84e0SMaxime Ripard 6919ec03d7fSMaxime Ripard for (i = 0; i < HVS_NUM_CHANNELS; i++) { 6929ec03d7fSMaxime Ripard state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; 69316e10105SMaxime Ripard state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load; 6949ec03d7fSMaxime Ripard } 6959ec03d7fSMaxime Ripard 69616e10105SMaxime Ripard state->core_clock_rate = old_state->core_clock_rate; 69716e10105SMaxime Ripard 698f2df84e0SMaxime Ripard return &state->base; 699f2df84e0SMaxime Ripard } 700f2df84e0SMaxime Ripard 701f2df84e0SMaxime Ripard static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj, 702f2df84e0SMaxime Ripard struct drm_private_state *state) 703f2df84e0SMaxime Ripard { 704f2df84e0SMaxime Ripard struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); 7059ec03d7fSMaxime Ripard unsigned int i; 7069ec03d7fSMaxime Ripard 7079ec03d7fSMaxime Ripard for (i = 0; i < HVS_NUM_CHANNELS; i++) { 7089ec03d7fSMaxime Ripard if (!hvs_state->fifo_state[i].pending_commit) 7099ec03d7fSMaxime Ripard continue; 7109ec03d7fSMaxime Ripard 7119ec03d7fSMaxime Ripard drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit); 7129ec03d7fSMaxime Ripard } 713f2df84e0SMaxime Ripard 714f2df84e0SMaxime Ripard kfree(hvs_state); 715f2df84e0SMaxime Ripard } 716f2df84e0SMaxime Ripard 71766bfe59dSMaxime Ripard static void vc4_hvs_channels_print_state(struct drm_printer *p, 71866bfe59dSMaxime Ripard const struct drm_private_state *state) 71966bfe59dSMaxime Ripard { 72066bfe59dSMaxime Ripard struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); 72166bfe59dSMaxime Ripard unsigned int i; 72266bfe59dSMaxime Ripard 72366bfe59dSMaxime Ripard drm_printf(p, "HVS State\n"); 72466bfe59dSMaxime Ripard drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate); 72566bfe59dSMaxime Ripard 72666bfe59dSMaxime Ripard for (i = 0; i < HVS_NUM_CHANNELS; i++) { 72766bfe59dSMaxime Ripard drm_printf(p, "\tChannel %d\n", i); 72866bfe59dSMaxime Ripard drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use); 72966bfe59dSMaxime Ripard drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load); 73066bfe59dSMaxime Ripard } 73166bfe59dSMaxime Ripard } 73266bfe59dSMaxime Ripard 733f2df84e0SMaxime Ripard static const struct drm_private_state_funcs vc4_hvs_state_funcs = { 734f2df84e0SMaxime Ripard .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, 735f2df84e0SMaxime Ripard .atomic_destroy_state = vc4_hvs_channels_destroy_state, 73666bfe59dSMaxime Ripard .atomic_print_state = vc4_hvs_channels_print_state, 737f2df84e0SMaxime Ripard }; 738f2df84e0SMaxime Ripard 739f2df84e0SMaxime Ripard static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) 740f2df84e0SMaxime Ripard { 741f2df84e0SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 742f2df84e0SMaxime Ripard 743f2df84e0SMaxime Ripard drm_atomic_private_obj_fini(&vc4->hvs_channels); 744f2df84e0SMaxime Ripard } 745f2df84e0SMaxime Ripard 746f2df84e0SMaxime Ripard static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) 747f2df84e0SMaxime Ripard { 748f2df84e0SMaxime Ripard struct vc4_hvs_state *state; 749f2df84e0SMaxime Ripard 750f2df84e0SMaxime Ripard state = kzalloc(sizeof(*state), GFP_KERNEL); 751f2df84e0SMaxime Ripard if (!state) 752f2df84e0SMaxime Ripard return -ENOMEM; 753f2df84e0SMaxime Ripard 754f2df84e0SMaxime Ripard drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, 755f2df84e0SMaxime Ripard &state->base, 756f2df84e0SMaxime Ripard &vc4_hvs_state_funcs); 757f2df84e0SMaxime Ripard 758f2df84e0SMaxime Ripard return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); 759f2df84e0SMaxime Ripard } 760f2df84e0SMaxime Ripard 761e3479398SMaxime Ripard static int cmp_vc4_crtc_hvs_output(const void *a, const void *b) 762e3479398SMaxime Ripard { 763e3479398SMaxime Ripard const struct vc4_crtc *crtc_a = 764e3479398SMaxime Ripard to_vc4_crtc(*(const struct drm_crtc **)a); 765e3479398SMaxime Ripard const struct vc4_crtc_data *data_a = 766e3479398SMaxime Ripard vc4_crtc_to_vc4_crtc_data(crtc_a); 767e3479398SMaxime Ripard const struct vc4_crtc *crtc_b = 768e3479398SMaxime Ripard to_vc4_crtc(*(const struct drm_crtc **)b); 769e3479398SMaxime Ripard const struct vc4_crtc_data *data_b = 770e3479398SMaxime Ripard vc4_crtc_to_vc4_crtc_data(crtc_b); 771e3479398SMaxime Ripard 772e3479398SMaxime Ripard return data_a->hvs_output - data_b->hvs_output; 773e3479398SMaxime Ripard } 774e3479398SMaxime Ripard 775b5dbc4d3SMaxime Ripard /* 776b5dbc4d3SMaxime Ripard * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and 777b5dbc4d3SMaxime Ripard * the TXP (and therefore all the CRTCs found on that platform). 778b5dbc4d3SMaxime Ripard * 779b5dbc4d3SMaxime Ripard * The naive (and our initial) implementation would just iterate over 780b5dbc4d3SMaxime Ripard * all the active CRTCs, try to find a suitable FIFO, and then remove it 781b5dbc4d3SMaxime Ripard * from the pool of available FIFOs. However, there are a few corner 782b5dbc4d3SMaxime Ripard * cases that need to be considered: 783b5dbc4d3SMaxime Ripard * 784b5dbc4d3SMaxime Ripard * - When running in a dual-display setup (so with two CRTCs involved), 785b5dbc4d3SMaxime Ripard * we can update the state of a single CRTC (for example by changing 786b5dbc4d3SMaxime Ripard * its mode using xrandr under X11) without affecting the other. In 787b5dbc4d3SMaxime Ripard * this case, the other CRTC wouldn't be in the state at all, so we 788b5dbc4d3SMaxime Ripard * need to consider all the running CRTCs in the DRM device to assign 789b5dbc4d3SMaxime Ripard * a FIFO, not just the one in the state. 790b5dbc4d3SMaxime Ripard * 791f2df84e0SMaxime Ripard * - To fix the above, we can't use drm_atomic_get_crtc_state on all 792f2df84e0SMaxime Ripard * enabled CRTCs to pull their CRTC state into the global state, since 793f2df84e0SMaxime Ripard * a page flip would start considering their vblank to complete. Since 794f2df84e0SMaxime Ripard * we don't have a guarantee that they are actually active, that 795f2df84e0SMaxime Ripard * vblank might never happen, and shouldn't even be considered if we 796f2df84e0SMaxime Ripard * want to do a page flip on a single CRTC. That can be tested by 797f2df84e0SMaxime Ripard * doing a modetest -v first on HDMI1 and then on HDMI0. 798f2df84e0SMaxime Ripard * 799b5dbc4d3SMaxime Ripard * - Since we need the pixelvalve to be disabled and enabled back when 800b5dbc4d3SMaxime Ripard * the FIFO is changed, we should keep the FIFO assigned for as long 801b5dbc4d3SMaxime Ripard * as the CRTC is enabled, only considering it free again once that 802b5dbc4d3SMaxime Ripard * CRTC has been disabled. This can be tested by booting X11 on a 803b5dbc4d3SMaxime Ripard * single display, and changing the resolution down and then back up. 804b5dbc4d3SMaxime Ripard */ 805a72b0458SMaxime Ripard static int vc4_pv_muxing_atomic_check(struct drm_device *dev, 806a72b0458SMaxime Ripard struct drm_atomic_state *state) 807766cc6b1SStefan Schake { 808f2df84e0SMaxime Ripard struct vc4_hvs_state *hvs_new_state; 809e3479398SMaxime Ripard struct drm_crtc **sorted_crtcs; 81087ebcd42SMaxime Ripard struct drm_crtc *crtc; 81103b03efeSMaxime Ripard unsigned int unassigned_channels = 0; 812a72b0458SMaxime Ripard unsigned int i; 813e3479398SMaxime Ripard int ret; 81487ebcd42SMaxime Ripard 815f2df84e0SMaxime Ripard hvs_new_state = vc4_hvs_get_global_state(state); 816f9277679SMaxime Ripard if (IS_ERR(hvs_new_state)) 817f9277679SMaxime Ripard return PTR_ERR(hvs_new_state); 818089d8341SMaxime Ripard 81903b03efeSMaxime Ripard for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) 82003b03efeSMaxime Ripard if (!hvs_new_state->fifo_state[i].in_use) 82103b03efeSMaxime Ripard unassigned_channels |= BIT(i); 82203b03efeSMaxime Ripard 823e3479398SMaxime Ripard /* 824e3479398SMaxime Ripard * The problem we have to solve here is that we have up to 7 825e3479398SMaxime Ripard * encoders, connected to up to 6 CRTCs. 826e3479398SMaxime Ripard * 827e3479398SMaxime Ripard * Those CRTCs, depending on the instance, can be routed to 1, 2 828e3479398SMaxime Ripard * or 3 HVS FIFOs, and we need to set the muxing between FIFOs and 829e3479398SMaxime Ripard * outputs in the HVS accordingly. 830e3479398SMaxime Ripard * 831e3479398SMaxime Ripard * It would be pretty hard to come up with an algorithm that 832e3479398SMaxime Ripard * would generically solve this. However, the current routing 833e3479398SMaxime Ripard * trees we support allow us to simplify a bit the problem. 834e3479398SMaxime Ripard * 835e3479398SMaxime Ripard * Indeed, with the current supported layouts, if we try to 836e3479398SMaxime Ripard * assign in the ascending crtc index order the FIFOs, we can't 837e3479398SMaxime Ripard * fall into the situation where an earlier CRTC that had 838e3479398SMaxime Ripard * multiple routes is assigned one that was the only option for 839e3479398SMaxime Ripard * a later CRTC. 840e3479398SMaxime Ripard * 841e3479398SMaxime Ripard * If the layout changes and doesn't give us that in the future, 842e3479398SMaxime Ripard * we will need to have something smarter, but it works so far. 843e3479398SMaxime Ripard */ 844e3479398SMaxime Ripard sorted_crtcs = kmalloc_array(dev->num_crtcs, sizeof(*sorted_crtcs), GFP_KERNEL); 845e3479398SMaxime Ripard if (!sorted_crtcs) 846e3479398SMaxime Ripard return -ENOMEM; 847e3479398SMaxime Ripard 848e3479398SMaxime Ripard i = 0; 849e3479398SMaxime Ripard drm_for_each_crtc(crtc, dev) 850e3479398SMaxime Ripard sorted_crtcs[i++] = crtc; 851e3479398SMaxime Ripard 852e3479398SMaxime Ripard sort(sorted_crtcs, i, sizeof(*sorted_crtcs), cmp_vc4_crtc_hvs_output, NULL); 853e3479398SMaxime Ripard 854e3479398SMaxime Ripard for (i = 0; i < dev->num_crtcs; i++) { 855e3479398SMaxime Ripard struct vc4_crtc_state *old_vc4_crtc_state, *new_vc4_crtc_state; 856e3479398SMaxime Ripard struct drm_crtc_state *old_crtc_state, *new_crtc_state; 857e3479398SMaxime Ripard struct vc4_crtc *vc4_crtc; 85887ebcd42SMaxime Ripard unsigned int matching_channels; 859d62a8ed7SMaxime Ripard unsigned int channel; 86087ebcd42SMaxime Ripard 861e3479398SMaxime Ripard crtc = sorted_crtcs[i]; 862e3479398SMaxime Ripard if (!crtc) 863e3479398SMaxime Ripard continue; 864e3479398SMaxime Ripard vc4_crtc = to_vc4_crtc(crtc); 865e3479398SMaxime Ripard 866e3479398SMaxime Ripard old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); 867e3479398SMaxime Ripard if (!old_crtc_state) 868e3479398SMaxime Ripard continue; 869e3479398SMaxime Ripard old_vc4_crtc_state = to_vc4_crtc_state(old_crtc_state); 870e3479398SMaxime Ripard 871e3479398SMaxime Ripard new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 872e3479398SMaxime Ripard if (!new_crtc_state) 873e3479398SMaxime Ripard continue; 874e3479398SMaxime Ripard new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); 875e3479398SMaxime Ripard 8763870b54eSMaxime Ripard drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name); 8773870b54eSMaxime Ripard 8782820526dSMaxime Ripard /* Nothing to do here, let's skip it */ 8793870b54eSMaxime Ripard if (old_crtc_state->enable == new_crtc_state->enable) { 8803870b54eSMaxime Ripard if (new_crtc_state->enable) 8813870b54eSMaxime Ripard drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n", 8823870b54eSMaxime Ripard crtc->name, new_vc4_crtc_state->assigned_channel); 8833870b54eSMaxime Ripard else 8843870b54eSMaxime Ripard drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name); 8853870b54eSMaxime Ripard 8862820526dSMaxime Ripard continue; 8873870b54eSMaxime Ripard } 8882820526dSMaxime Ripard 8892820526dSMaxime Ripard /* Muxing will need to be modified, mark it as such */ 8902820526dSMaxime Ripard new_vc4_crtc_state->update_muxing = true; 8912820526dSMaxime Ripard 8922820526dSMaxime Ripard /* If we're disabling our CRTC, we put back our channel */ 8932820526dSMaxime Ripard if (!new_crtc_state->enable) { 8949ec03d7fSMaxime Ripard channel = old_vc4_crtc_state->assigned_channel; 8953870b54eSMaxime Ripard 8963870b54eSMaxime Ripard drm_dbg(dev, "%s: Disabling, Freeing channel %d\n", 8973870b54eSMaxime Ripard crtc->name, channel); 8983870b54eSMaxime Ripard 8999ec03d7fSMaxime Ripard hvs_new_state->fifo_state[channel].in_use = false; 9008ba0b6d1SMaxime Ripard new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; 9012820526dSMaxime Ripard continue; 902f2df84e0SMaxime Ripard } 9038ba0b6d1SMaxime Ripard 90403b03efeSMaxime Ripard matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels; 905e3479398SMaxime Ripard if (!matching_channels) { 906e3479398SMaxime Ripard ret = -EINVAL; 907e3479398SMaxime Ripard goto err_free_crtc_array; 908e3479398SMaxime Ripard } 90987ebcd42SMaxime Ripard 910d62a8ed7SMaxime Ripard channel = ffs(matching_channels) - 1; 9113870b54eSMaxime Ripard 9123870b54eSMaxime Ripard drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name); 9138ba0b6d1SMaxime Ripard new_vc4_crtc_state->assigned_channel = channel; 91403b03efeSMaxime Ripard unassigned_channels &= ~BIT(channel); 9159ec03d7fSMaxime Ripard hvs_new_state->fifo_state[channel].in_use = true; 91687ebcd42SMaxime Ripard } 917766cc6b1SStefan Schake 918e3479398SMaxime Ripard kfree(sorted_crtcs); 919a72b0458SMaxime Ripard return 0; 920e3479398SMaxime Ripard 921e3479398SMaxime Ripard err_free_crtc_array: 922e3479398SMaxime Ripard kfree(sorted_crtcs); 923e3479398SMaxime Ripard return ret; 924a72b0458SMaxime Ripard } 925a72b0458SMaxime Ripard 926a72b0458SMaxime Ripard static int 92716e10105SMaxime Ripard vc4_core_clock_atomic_check(struct drm_atomic_state *state) 92816e10105SMaxime Ripard { 92916e10105SMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(state->dev); 93016e10105SMaxime Ripard struct drm_private_state *priv_state; 93116e10105SMaxime Ripard struct vc4_hvs_state *hvs_new_state; 93216e10105SMaxime Ripard struct vc4_load_tracker_state *load_state; 93316e10105SMaxime Ripard struct drm_crtc_state *old_crtc_state, *new_crtc_state; 93416e10105SMaxime Ripard struct drm_crtc *crtc; 93516e10105SMaxime Ripard unsigned int num_outputs; 93616e10105SMaxime Ripard unsigned long pixel_rate; 93716e10105SMaxime Ripard unsigned long cob_rate; 93816e10105SMaxime Ripard unsigned int i; 93916e10105SMaxime Ripard 94016e10105SMaxime Ripard priv_state = drm_atomic_get_private_obj_state(state, 94116e10105SMaxime Ripard &vc4->load_tracker); 94216e10105SMaxime Ripard if (IS_ERR(priv_state)) 94316e10105SMaxime Ripard return PTR_ERR(priv_state); 94416e10105SMaxime Ripard 94516e10105SMaxime Ripard load_state = to_vc4_load_tracker_state(priv_state); 94616e10105SMaxime Ripard 94716e10105SMaxime Ripard hvs_new_state = vc4_hvs_get_global_state(state); 94899b03ca6SDaniel Vetter if (IS_ERR(hvs_new_state)) 94999b03ca6SDaniel Vetter return PTR_ERR(hvs_new_state); 95016e10105SMaxime Ripard 95116e10105SMaxime Ripard for_each_oldnew_crtc_in_state(state, crtc, 95216e10105SMaxime Ripard old_crtc_state, 95316e10105SMaxime Ripard new_crtc_state, 95416e10105SMaxime Ripard i) { 95516e10105SMaxime Ripard if (old_crtc_state->active) { 95616e10105SMaxime Ripard struct vc4_crtc_state *old_vc4_state = 95716e10105SMaxime Ripard to_vc4_crtc_state(old_crtc_state); 95816e10105SMaxime Ripard unsigned int channel = old_vc4_state->assigned_channel; 95916e10105SMaxime Ripard 96016e10105SMaxime Ripard hvs_new_state->fifo_state[channel].fifo_load = 0; 96116e10105SMaxime Ripard } 96216e10105SMaxime Ripard 96316e10105SMaxime Ripard if (new_crtc_state->active) { 96416e10105SMaxime Ripard struct vc4_crtc_state *new_vc4_state = 96516e10105SMaxime Ripard to_vc4_crtc_state(new_crtc_state); 96616e10105SMaxime Ripard unsigned int channel = new_vc4_state->assigned_channel; 96716e10105SMaxime Ripard 96816e10105SMaxime Ripard hvs_new_state->fifo_state[channel].fifo_load = 96916e10105SMaxime Ripard new_vc4_state->hvs_load; 97016e10105SMaxime Ripard } 97116e10105SMaxime Ripard } 97216e10105SMaxime Ripard 97316e10105SMaxime Ripard cob_rate = 0; 97416e10105SMaxime Ripard num_outputs = 0; 97516e10105SMaxime Ripard for (i = 0; i < HVS_NUM_CHANNELS; i++) { 97616e10105SMaxime Ripard if (!hvs_new_state->fifo_state[i].in_use) 97716e10105SMaxime Ripard continue; 97816e10105SMaxime Ripard 97916e10105SMaxime Ripard num_outputs++; 9801701a23aSMaxime Ripard cob_rate = max_t(unsigned long, 9811701a23aSMaxime Ripard hvs_new_state->fifo_state[i].fifo_load, 9821701a23aSMaxime Ripard cob_rate); 98316e10105SMaxime Ripard } 98416e10105SMaxime Ripard 98516e10105SMaxime Ripard pixel_rate = load_state->hvs_load; 98616e10105SMaxime Ripard if (num_outputs > 1) { 98716e10105SMaxime Ripard pixel_rate = (pixel_rate * 40) / 100; 98816e10105SMaxime Ripard } else { 98916e10105SMaxime Ripard pixel_rate = (pixel_rate * 60) / 100; 99016e10105SMaxime Ripard } 99116e10105SMaxime Ripard 99216e10105SMaxime Ripard hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate); 99316e10105SMaxime Ripard 99416e10105SMaxime Ripard return 0; 99516e10105SMaxime Ripard } 99616e10105SMaxime Ripard 99716e10105SMaxime Ripard 99816e10105SMaxime Ripard static int 999a72b0458SMaxime Ripard vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 1000a72b0458SMaxime Ripard { 1001a72b0458SMaxime Ripard int ret; 1002a72b0458SMaxime Ripard 1003a72b0458SMaxime Ripard ret = vc4_pv_muxing_atomic_check(dev, state); 1004a72b0458SMaxime Ripard if (ret) 1005a72b0458SMaxime Ripard return ret; 1006a72b0458SMaxime Ripard 1007766cc6b1SStefan Schake ret = vc4_ctm_atomic_check(dev, state); 1008766cc6b1SStefan Schake if (ret < 0) 1009766cc6b1SStefan Schake return ret; 1010766cc6b1SStefan Schake 10114686da83SBoris Brezillon ret = drm_atomic_helper_check(dev, state); 10124686da83SBoris Brezillon if (ret) 10134686da83SBoris Brezillon return ret; 10144686da83SBoris Brezillon 101516e10105SMaxime Ripard ret = vc4_load_tracker_atomic_check(state); 101616e10105SMaxime Ripard if (ret) 101716e10105SMaxime Ripard return ret; 101816e10105SMaxime Ripard 101916e10105SMaxime Ripard return vc4_core_clock_atomic_check(state); 1020766cc6b1SStefan Schake } 1021766cc6b1SStefan Schake 10229ec03d7fSMaxime Ripard static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = { 10239ec03d7fSMaxime Ripard .atomic_commit_setup = vc4_atomic_commit_setup, 1024f3c420feSMaxime Ripard .atomic_commit_tail = vc4_atomic_commit_tail, 10259ec03d7fSMaxime Ripard }; 10269ec03d7fSMaxime Ripard 1027c8b75bcaSEric Anholt static const struct drm_mode_config_funcs vc4_mode_funcs = { 1028766cc6b1SStefan Schake .atomic_check = vc4_atomic_check, 1029f3c420feSMaxime Ripard .atomic_commit = drm_atomic_helper_commit, 103083753117SEric Anholt .fb_create = vc4_fb_create, 1031c8b75bcaSEric Anholt }; 1032c8b75bcaSEric Anholt 103339a30ec6SMaxime Ripard static const struct drm_mode_config_funcs vc5_mode_funcs = { 103439a30ec6SMaxime Ripard .atomic_check = vc4_atomic_check, 103539a30ec6SMaxime Ripard .atomic_commit = drm_atomic_helper_commit, 103639a30ec6SMaxime Ripard .fb_create = drm_gem_fb_create, 103739a30ec6SMaxime Ripard }; 103839a30ec6SMaxime Ripard 1039c8b75bcaSEric Anholt int vc4_kms_load(struct drm_device *dev) 1040c8b75bcaSEric Anholt { 104148666d56SDerek Foreman struct vc4_dev *vc4 = to_vc4_dev(dev); 1042c8b75bcaSEric Anholt int ret; 1043c8b75bcaSEric Anholt 10447f817159SMaxime Ripard /* 10457f817159SMaxime Ripard * The limits enforced by the load tracker aren't relevant for 10467f817159SMaxime Ripard * the BCM2711, but the load tracker computations are used for 10477f817159SMaxime Ripard * the core clock rate calculation. 10487f817159SMaxime Ripard */ 10491cbc91ebSMaxime Ripard if (!vc4->is_vc5) { 1050f437bc1eSMaxime Ripard /* Start with the load tracker enabled. Can be 1051f437bc1eSMaxime Ripard * disabled through the debugfs load_tracker file. 10526b5c029dSPaul Kocialkowski */ 10536b5c029dSPaul Kocialkowski vc4->load_tracker_enabled = true; 1054f437bc1eSMaxime Ripard } 10556b5c029dSPaul Kocialkowski 10567d2818f5SMario Kleiner /* Set support for vblank irq fast disable, before drm_vblank_init() */ 10577d2818f5SMario Kleiner dev->vblank_disable_immediate = true; 10587d2818f5SMario Kleiner 1059c8b75bcaSEric Anholt ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 1060c8b75bcaSEric Anholt if (ret < 0) { 1061c8b75bcaSEric Anholt dev_err(dev->dev, "failed to initialize vblank\n"); 1062c8b75bcaSEric Anholt return ret; 1063c8b75bcaSEric Anholt } 1064c8b75bcaSEric Anholt 10651cbc91ebSMaxime Ripard if (vc4->is_vc5) { 1066f437bc1eSMaxime Ripard dev->mode_config.max_width = 7680; 1067f437bc1eSMaxime Ripard dev->mode_config.max_height = 7680; 1068f437bc1eSMaxime Ripard } else { 1069c8b75bcaSEric Anholt dev->mode_config.max_width = 2048; 1070c8b75bcaSEric Anholt dev->mode_config.max_height = 2048; 1071f437bc1eSMaxime Ripard } 1072f437bc1eSMaxime Ripard 107339a30ec6SMaxime Ripard dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs; 10749ec03d7fSMaxime Ripard dev->mode_config.helper_private = &vc4_mode_config_helpers; 1075c8b75bcaSEric Anholt dev->mode_config.preferred_depth = 24; 1076b501baccSEric Anholt dev->mode_config.async_page_flip = true; 1077*868bc999SDave Stevenson dev->mode_config.normalize_zpos = true; 1078b501baccSEric Anholt 1079dcda7c28SMaxime Ripard ret = vc4_ctm_obj_init(vc4); 1080dcda7c28SMaxime Ripard if (ret) 1081dcda7c28SMaxime Ripard return ret; 1082766cc6b1SStefan Schake 1083dcda7c28SMaxime Ripard ret = vc4_load_tracker_obj_init(vc4); 1084dcda7c28SMaxime Ripard if (ret) 1085dcda7c28SMaxime Ripard return ret; 10864686da83SBoris Brezillon 1087f2df84e0SMaxime Ripard ret = vc4_hvs_channels_obj_init(vc4); 1088f2df84e0SMaxime Ripard if (ret) 1089f2df84e0SMaxime Ripard return ret; 1090f2df84e0SMaxime Ripard 1091c8b75bcaSEric Anholt drm_mode_config_reset(dev); 1092c8b75bcaSEric Anholt 1093c8b75bcaSEric Anholt drm_kms_helper_poll_init(dev); 1094c8b75bcaSEric Anholt 1095c8b75bcaSEric Anholt return 0; 1096c8b75bcaSEric Anholt } 1097