1df0566a6SJani Nikula /* SPDX-License-Identifier: MIT */
2df0566a6SJani Nikula /*
3df0566a6SJani Nikula * Copyright © 2019 Intel Corporation
4df0566a6SJani Nikula */
5df0566a6SJani Nikula
6707c3a7dSLucas De Marchi #include <linux/string_helpers.h>
7707c3a7dSLucas De Marchi
8df0566a6SJani Nikula #include "i915_drv.h"
9df0566a6SJani Nikula #include "i915_irq.h"
10476f62b8SJani Nikula #include "i915_reg.h"
11a9b4c16dSJani Nikula #include "intel_backlight_regs.h"
12df0566a6SJani Nikula #include "intel_cdclk.h"
135197c49dSVille Syrjälä #include "intel_clock_gating.h"
14df0566a6SJani Nikula #include "intel_combo_phy.h"
157785ae0bSVille Syrjälä #include "intel_de.h"
1680e77e30SJani Nikula #include "intel_display_power.h"
17323286c8SImre Deak #include "intel_display_power_map.h"
18ef1e1708SImre Deak #include "intel_display_power_well.h"
191d455f8dSJani Nikula #include "intel_display_types.h"
2032f9402dSAnusha Srivatsa #include "intel_dmc.h"
21e30e6c7bSMatt Roper #include "intel_mchbar_regs.h"
22ae880cd0SVille Syrjälä #include "intel_pch_refclk.h"
234dd4375bSJani Nikula #include "intel_pcode.h"
244c4cc7acSMika Kahola #include "intel_pmdemand.h"
25065695b3SJani Nikula #include "intel_pps_regs.h"
26a6a12811SMatt Roper #include "intel_snps_phy.h"
2742a0d256SVille Syrjälä #include "skl_watermark.h"
28689e61a4SJani Nikula #include "skl_watermark_regs.h"
291eecf31eSJani Nikula #include "vlv_sideband.h"
30df0566a6SJani Nikula
31888a2a63SImre Deak #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
32ac78f31bSImre Deak for_each_power_well(__dev_priv, __power_well) \
33888a2a63SImre Deak for_each_if(test_bit((__domain), (__power_well)->domains.bits))
34ac78f31bSImre Deak
35888a2a63SImre Deak #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
36ac78f31bSImre Deak for_each_power_well_reverse(__dev_priv, __power_well) \
37888a2a63SImre Deak for_each_if(test_bit((__domain), (__power_well)->domains.bits))
38ac78f31bSImre Deak
39fbc64aafSJani Nikula static const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)408a84bacbSImre Deak intel_display_power_domain_str(enum intel_display_power_domain domain)
41df0566a6SJani Nikula {
42df0566a6SJani Nikula switch (domain) {
43df0566a6SJani Nikula case POWER_DOMAIN_DISPLAY_CORE:
44df0566a6SJani Nikula return "DISPLAY_CORE";
45df0566a6SJani Nikula case POWER_DOMAIN_PIPE_A:
46df0566a6SJani Nikula return "PIPE_A";
47df0566a6SJani Nikula case POWER_DOMAIN_PIPE_B:
48df0566a6SJani Nikula return "PIPE_B";
49df0566a6SJani Nikula case POWER_DOMAIN_PIPE_C:
50df0566a6SJani Nikula return "PIPE_C";
511db27a72SMika Kahola case POWER_DOMAIN_PIPE_D:
521db27a72SMika Kahola return "PIPE_D";
530ba2661dSImre Deak case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
540ba2661dSImre Deak return "PIPE_PANEL_FITTER_A";
550ba2661dSImre Deak case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
560ba2661dSImre Deak return "PIPE_PANEL_FITTER_B";
570ba2661dSImre Deak case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
580ba2661dSImre Deak return "PIPE_PANEL_FITTER_C";
590ba2661dSImre Deak case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
600ba2661dSImre Deak return "PIPE_PANEL_FITTER_D";
61df0566a6SJani Nikula case POWER_DOMAIN_TRANSCODER_A:
62df0566a6SJani Nikula return "TRANSCODER_A";
63df0566a6SJani Nikula case POWER_DOMAIN_TRANSCODER_B:
64df0566a6SJani Nikula return "TRANSCODER_B";
65df0566a6SJani Nikula case POWER_DOMAIN_TRANSCODER_C:
66df0566a6SJani Nikula return "TRANSCODER_C";
671db27a72SMika Kahola case POWER_DOMAIN_TRANSCODER_D:
681db27a72SMika Kahola return "TRANSCODER_D";
69df0566a6SJani Nikula case POWER_DOMAIN_TRANSCODER_EDP:
70df0566a6SJani Nikula return "TRANSCODER_EDP";
71df0566a6SJani Nikula case POWER_DOMAIN_TRANSCODER_DSI_A:
72df0566a6SJani Nikula return "TRANSCODER_DSI_A";
73df0566a6SJani Nikula case POWER_DOMAIN_TRANSCODER_DSI_C:
74df0566a6SJani Nikula return "TRANSCODER_DSI_C";
75492c1ae2SImre Deak case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
76492c1ae2SImre Deak return "TRANSCODER_VDSC_PW2";
770ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_LANES_A:
780ba2661dSImre Deak return "PORT_DDI_LANES_A";
790ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_LANES_B:
800ba2661dSImre Deak return "PORT_DDI_LANES_B";
810ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_LANES_C:
820ba2661dSImre Deak return "PORT_DDI_LANES_C";
830ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_LANES_D:
840ba2661dSImre Deak return "PORT_DDI_LANES_D";
850ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_LANES_E:
860ba2661dSImre Deak return "PORT_DDI_LANES_E";
870ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_LANES_F:
880ba2661dSImre Deak return "PORT_DDI_LANES_F";
89c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_LANES_TC1:
90c97bbab0SImre Deak return "PORT_DDI_LANES_TC1";
91c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_LANES_TC2:
92c97bbab0SImre Deak return "PORT_DDI_LANES_TC2";
93c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_LANES_TC3:
94c97bbab0SImre Deak return "PORT_DDI_LANES_TC3";
95c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_LANES_TC4:
96c97bbab0SImre Deak return "PORT_DDI_LANES_TC4";
97c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_LANES_TC5:
98c97bbab0SImre Deak return "PORT_DDI_LANES_TC5";
99c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_LANES_TC6:
100c97bbab0SImre Deak return "PORT_DDI_LANES_TC6";
1010ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_IO_A:
1020ba2661dSImre Deak return "PORT_DDI_IO_A";
1030ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_IO_B:
1040ba2661dSImre Deak return "PORT_DDI_IO_B";
1050ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_IO_C:
1060ba2661dSImre Deak return "PORT_DDI_IO_C";
1070ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_IO_D:
1080ba2661dSImre Deak return "PORT_DDI_IO_D";
1090ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_IO_E:
1100ba2661dSImre Deak return "PORT_DDI_IO_E";
1110ba2661dSImre Deak case POWER_DOMAIN_PORT_DDI_IO_F:
1120ba2661dSImre Deak return "PORT_DDI_IO_F";
113c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_IO_TC1:
114c97bbab0SImre Deak return "PORT_DDI_IO_TC1";
115c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_IO_TC2:
116c97bbab0SImre Deak return "PORT_DDI_IO_TC2";
117c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_IO_TC3:
118c97bbab0SImre Deak return "PORT_DDI_IO_TC3";
119c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_IO_TC4:
120c97bbab0SImre Deak return "PORT_DDI_IO_TC4";
121c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_IO_TC5:
122c97bbab0SImre Deak return "PORT_DDI_IO_TC5";
123c97bbab0SImre Deak case POWER_DOMAIN_PORT_DDI_IO_TC6:
124c97bbab0SImre Deak return "PORT_DDI_IO_TC6";
125df0566a6SJani Nikula case POWER_DOMAIN_PORT_DSI:
126df0566a6SJani Nikula return "PORT_DSI";
127df0566a6SJani Nikula case POWER_DOMAIN_PORT_CRT:
128df0566a6SJani Nikula return "PORT_CRT";
129df0566a6SJani Nikula case POWER_DOMAIN_PORT_OTHER:
130df0566a6SJani Nikula return "PORT_OTHER";
131df0566a6SJani Nikula case POWER_DOMAIN_VGA:
132df0566a6SJani Nikula return "VGA";
133615a7724SAnshuman Gupta case POWER_DOMAIN_AUDIO_MMIO:
134615a7724SAnshuman Gupta return "AUDIO_MMIO";
135615a7724SAnshuman Gupta case POWER_DOMAIN_AUDIO_PLAYBACK:
136615a7724SAnshuman Gupta return "AUDIO_PLAYBACK";
1375c30cfcdSImre Deak case POWER_DOMAIN_AUX_IO_A:
1385c30cfcdSImre Deak return "AUX_IO_A";
139f645cbdaSImre Deak case POWER_DOMAIN_AUX_IO_B:
140f645cbdaSImre Deak return "AUX_IO_B";
141f645cbdaSImre Deak case POWER_DOMAIN_AUX_IO_C:
142f645cbdaSImre Deak return "AUX_IO_C";
143f645cbdaSImre Deak case POWER_DOMAIN_AUX_IO_D:
144f645cbdaSImre Deak return "AUX_IO_D";
145f645cbdaSImre Deak case POWER_DOMAIN_AUX_IO_E:
146f645cbdaSImre Deak return "AUX_IO_E";
147f645cbdaSImre Deak case POWER_DOMAIN_AUX_IO_F:
148f645cbdaSImre Deak return "AUX_IO_F";
149df0566a6SJani Nikula case POWER_DOMAIN_AUX_A:
150df0566a6SJani Nikula return "AUX_A";
151df0566a6SJani Nikula case POWER_DOMAIN_AUX_B:
152df0566a6SJani Nikula return "AUX_B";
153df0566a6SJani Nikula case POWER_DOMAIN_AUX_C:
154df0566a6SJani Nikula return "AUX_C";
155df0566a6SJani Nikula case POWER_DOMAIN_AUX_D:
1568a84bacbSImre Deak return "AUX_D";
157df0566a6SJani Nikula case POWER_DOMAIN_AUX_E:
1588a84bacbSImre Deak return "AUX_E";
159df0566a6SJani Nikula case POWER_DOMAIN_AUX_F:
1608a84bacbSImre Deak return "AUX_F";
161c97bbab0SImre Deak case POWER_DOMAIN_AUX_USBC1:
162c97bbab0SImre Deak return "AUX_USBC1";
163c97bbab0SImre Deak case POWER_DOMAIN_AUX_USBC2:
164c97bbab0SImre Deak return "AUX_USBC2";
165c97bbab0SImre Deak case POWER_DOMAIN_AUX_USBC3:
166c97bbab0SImre Deak return "AUX_USBC3";
167c97bbab0SImre Deak case POWER_DOMAIN_AUX_USBC4:
168c97bbab0SImre Deak return "AUX_USBC4";
169c97bbab0SImre Deak case POWER_DOMAIN_AUX_USBC5:
170c97bbab0SImre Deak return "AUX_USBC5";
171c97bbab0SImre Deak case POWER_DOMAIN_AUX_USBC6:
172c97bbab0SImre Deak return "AUX_USBC6";
173c97bbab0SImre Deak case POWER_DOMAIN_AUX_TBT1:
174c97bbab0SImre Deak return "AUX_TBT1";
175c97bbab0SImre Deak case POWER_DOMAIN_AUX_TBT2:
176c97bbab0SImre Deak return "AUX_TBT2";
177c97bbab0SImre Deak case POWER_DOMAIN_AUX_TBT3:
178c97bbab0SImre Deak return "AUX_TBT3";
179c97bbab0SImre Deak case POWER_DOMAIN_AUX_TBT4:
180c97bbab0SImre Deak return "AUX_TBT4";
181c97bbab0SImre Deak case POWER_DOMAIN_AUX_TBT5:
182c97bbab0SImre Deak return "AUX_TBT5";
183c97bbab0SImre Deak case POWER_DOMAIN_AUX_TBT6:
184c97bbab0SImre Deak return "AUX_TBT6";
185df0566a6SJani Nikula case POWER_DOMAIN_GMBUS:
186df0566a6SJani Nikula return "GMBUS";
187df0566a6SJani Nikula case POWER_DOMAIN_INIT:
188df0566a6SJani Nikula return "INIT";
189df0566a6SJani Nikula case POWER_DOMAIN_GT_IRQ:
190df0566a6SJani Nikula return "GT_IRQ";
191808b79ebSJosé Roberto de Souza case POWER_DOMAIN_DC_OFF:
192808b79ebSJosé Roberto de Souza return "DC_OFF";
1933c02934bSJosé Roberto de Souza case POWER_DOMAIN_TC_COLD_OFF:
1943c02934bSJosé Roberto de Souza return "TC_COLD_OFF";
195df0566a6SJani Nikula default:
196df0566a6SJani Nikula MISSING_CASE(domain);
197df0566a6SJani Nikula return "?";
198df0566a6SJani Nikula }
199df0566a6SJani Nikula }
200df0566a6SJani Nikula
__intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)201202b85daSJani Nikula static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
202df0566a6SJani Nikula enum intel_display_power_domain domain)
203df0566a6SJani Nikula {
204df0566a6SJani Nikula struct i915_power_well *power_well;
205df0566a6SJani Nikula bool is_enabled;
206df0566a6SJani Nikula
2078874288cSJouni Högander if (pm_runtime_suspended(dev_priv->drm.dev))
208df0566a6SJani Nikula return false;
209df0566a6SJani Nikula
210df0566a6SJani Nikula is_enabled = true;
211df0566a6SJani Nikula
212888a2a63SImre Deak for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
2133ab5e051SImre Deak if (intel_power_well_is_always_on(power_well))
214df0566a6SJani Nikula continue;
215df0566a6SJani Nikula
2163ab5e051SImre Deak if (!intel_power_well_is_enabled_cached(power_well)) {
217df0566a6SJani Nikula is_enabled = false;
218df0566a6SJani Nikula break;
219df0566a6SJani Nikula }
220df0566a6SJani Nikula }
221df0566a6SJani Nikula
222df0566a6SJani Nikula return is_enabled;
223df0566a6SJani Nikula }
224df0566a6SJani Nikula
225df0566a6SJani Nikula /**
226df0566a6SJani Nikula * intel_display_power_is_enabled - check for a power domain
227df0566a6SJani Nikula * @dev_priv: i915 device instance
228df0566a6SJani Nikula * @domain: power domain to check
229df0566a6SJani Nikula *
230df0566a6SJani Nikula * This function can be used to check the hw power domain state. It is mostly
231df0566a6SJani Nikula * used in hardware state readout functions. Everywhere else code should rely
232df0566a6SJani Nikula * upon explicit power domain reference counting to ensure that the hardware
233df0566a6SJani Nikula * block is powered up before accessing it.
234df0566a6SJani Nikula *
235df0566a6SJani Nikula * Callers must hold the relevant modesetting locks to ensure that concurrent
236df0566a6SJani Nikula * threads can't disable the power well while the caller tries to read a few
237df0566a6SJani Nikula * registers.
238df0566a6SJani Nikula *
239df0566a6SJani Nikula * Returns:
240df0566a6SJani Nikula * True when the power domain is enabled, false otherwise.
241df0566a6SJani Nikula */
intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)242df0566a6SJani Nikula bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
243df0566a6SJani Nikula enum intel_display_power_domain domain)
244df0566a6SJani Nikula {
245df0566a6SJani Nikula struct i915_power_domains *power_domains;
246df0566a6SJani Nikula bool ret;
247df0566a6SJani Nikula
248e3e8148fSJani Nikula power_domains = &dev_priv->display.power.domains;
249df0566a6SJani Nikula
250df0566a6SJani Nikula mutex_lock(&power_domains->lock);
251df0566a6SJani Nikula ret = __intel_display_power_is_enabled(dev_priv, domain);
252df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
253df0566a6SJani Nikula
254df0566a6SJani Nikula return ret;
255df0566a6SJani Nikula }
256df0566a6SJani Nikula
2574645e906SAnshuman Gupta static u32
sanitize_target_dc_state(struct drm_i915_private * i915,u32 target_dc_state)258825f0de2SJani Nikula sanitize_target_dc_state(struct drm_i915_private *i915,
2594645e906SAnshuman Gupta u32 target_dc_state)
2604645e906SAnshuman Gupta {
261825f0de2SJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
2622a2d23b6SColin Ian King static const u32 states[] = {
2634645e906SAnshuman Gupta DC_STATE_EN_UPTO_DC6,
2644645e906SAnshuman Gupta DC_STATE_EN_UPTO_DC5,
2654645e906SAnshuman Gupta DC_STATE_EN_DC3CO,
2664645e906SAnshuman Gupta DC_STATE_DISABLE,
2674645e906SAnshuman Gupta };
2684645e906SAnshuman Gupta int i;
2694645e906SAnshuman Gupta
2704645e906SAnshuman Gupta for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
2714645e906SAnshuman Gupta if (target_dc_state != states[i])
2724645e906SAnshuman Gupta continue;
2734645e906SAnshuman Gupta
274825f0de2SJani Nikula if (power_domains->allowed_dc_mask & target_dc_state)
2754645e906SAnshuman Gupta break;
2764645e906SAnshuman Gupta
2774645e906SAnshuman Gupta target_dc_state = states[i + 1];
2784645e906SAnshuman Gupta }
2794645e906SAnshuman Gupta
2804645e906SAnshuman Gupta return target_dc_state;
2814645e906SAnshuman Gupta }
2824645e906SAnshuman Gupta
2831c4d821dSAnshuman Gupta /**
2841c4d821dSAnshuman Gupta * intel_display_power_set_target_dc_state - Set target dc state.
2851c4d821dSAnshuman Gupta * @dev_priv: i915 device
2861c4d821dSAnshuman Gupta * @state: state which needs to be set as target_dc_state.
2871c4d821dSAnshuman Gupta *
2881c4d821dSAnshuman Gupta * This function set the "DC off" power well target_dc_state,
2891c4d821dSAnshuman Gupta * based upon this target_dc_stste, "DC off" power well will
2901c4d821dSAnshuman Gupta * enable desired DC state.
2911c4d821dSAnshuman Gupta */
intel_display_power_set_target_dc_state(struct drm_i915_private * dev_priv,u32 state)2921c4d821dSAnshuman Gupta void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
2931c4d821dSAnshuman Gupta u32 state)
2941c4d821dSAnshuman Gupta {
2951c4d821dSAnshuman Gupta struct i915_power_well *power_well;
2961c4d821dSAnshuman Gupta bool dc_off_enabled;
297e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
2981c4d821dSAnshuman Gupta
2991c4d821dSAnshuman Gupta mutex_lock(&power_domains->lock);
3001c4d821dSAnshuman Gupta power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
3011c4d821dSAnshuman Gupta
302a66d7c1eSPankaj Bharadiya if (drm_WARN_ON(&dev_priv->drm, !power_well))
3031c4d821dSAnshuman Gupta goto unlock;
3041c4d821dSAnshuman Gupta
3051c4d821dSAnshuman Gupta state = sanitize_target_dc_state(dev_priv, state);
3061c4d821dSAnshuman Gupta
307825f0de2SJani Nikula if (state == power_domains->target_dc_state)
3081c4d821dSAnshuman Gupta goto unlock;
3091c4d821dSAnshuman Gupta
3103ab5e051SImre Deak dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
3111c4d821dSAnshuman Gupta /*
3121c4d821dSAnshuman Gupta * If DC off power well is disabled, need to enable and disable the
3131c4d821dSAnshuman Gupta * DC off power well to effect target DC state.
3141c4d821dSAnshuman Gupta */
3151c4d821dSAnshuman Gupta if (!dc_off_enabled)
316359441cdSImre Deak intel_power_well_enable(dev_priv, power_well);
3171c4d821dSAnshuman Gupta
318825f0de2SJani Nikula power_domains->target_dc_state = state;
3191c4d821dSAnshuman Gupta
3201c4d821dSAnshuman Gupta if (!dc_off_enabled)
321359441cdSImre Deak intel_power_well_disable(dev_priv, power_well);
3221c4d821dSAnshuman Gupta
3231c4d821dSAnshuman Gupta unlock:
3241c4d821dSAnshuman Gupta mutex_unlock(&power_domains->lock);
3251c4d821dSAnshuman Gupta }
3261c4d821dSAnshuman Gupta
__async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)327888a2a63SImre Deak static void __async_put_domains_mask(struct i915_power_domains *power_domains,
328888a2a63SImre Deak struct intel_power_domain_mask *mask)
329df0566a6SJani Nikula {
330888a2a63SImre Deak bitmap_or(mask->bits,
331888a2a63SImre Deak power_domains->async_put_domains[0].bits,
332888a2a63SImre Deak power_domains->async_put_domains[1].bits,
333888a2a63SImre Deak POWER_DOMAIN_NUM);
334df0566a6SJani Nikula }
335df0566a6SJani Nikula
336df0566a6SJani Nikula #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
337df0566a6SJani Nikula
338df0566a6SJani Nikula static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)339df0566a6SJani Nikula assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
340df0566a6SJani Nikula {
3414c1ccdf7SPankaj Bharadiya struct drm_i915_private *i915 = container_of(power_domains,
3424c1ccdf7SPankaj Bharadiya struct drm_i915_private,
343e3e8148fSJani Nikula display.power.domains);
344888a2a63SImre Deak
345888a2a63SImre Deak return !drm_WARN_ON(&i915->drm,
346888a2a63SImre Deak bitmap_intersects(power_domains->async_put_domains[0].bits,
347888a2a63SImre Deak power_domains->async_put_domains[1].bits,
348888a2a63SImre Deak POWER_DOMAIN_NUM));
349df0566a6SJani Nikula }
350df0566a6SJani Nikula
351df0566a6SJani Nikula static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)352df0566a6SJani Nikula __async_put_domains_state_ok(struct i915_power_domains *power_domains)
353df0566a6SJani Nikula {
3544c1ccdf7SPankaj Bharadiya struct drm_i915_private *i915 = container_of(power_domains,
3554c1ccdf7SPankaj Bharadiya struct drm_i915_private,
356e3e8148fSJani Nikula display.power.domains);
357888a2a63SImre Deak struct intel_power_domain_mask async_put_mask;
358df0566a6SJani Nikula enum intel_display_power_domain domain;
359df0566a6SJani Nikula bool err = false;
360df0566a6SJani Nikula
361df0566a6SJani Nikula err |= !assert_async_put_domain_masks_disjoint(power_domains);
362888a2a63SImre Deak __async_put_domains_mask(power_domains, &async_put_mask);
363888a2a63SImre Deak err |= drm_WARN_ON(&i915->drm,
364888a2a63SImre Deak !!power_domains->async_put_wakeref !=
365888a2a63SImre Deak !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
366df0566a6SJani Nikula
367888a2a63SImre Deak for_each_power_domain(domain, &async_put_mask)
3684c1ccdf7SPankaj Bharadiya err |= drm_WARN_ON(&i915->drm,
3694c1ccdf7SPankaj Bharadiya power_domains->domain_use_count[domain] != 1);
370df0566a6SJani Nikula
371df0566a6SJani Nikula return !err;
372df0566a6SJani Nikula }
373df0566a6SJani Nikula
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,struct intel_power_domain_mask * mask)374df0566a6SJani Nikula static void print_power_domains(struct i915_power_domains *power_domains,
375888a2a63SImre Deak const char *prefix, struct intel_power_domain_mask *mask)
376df0566a6SJani Nikula {
3773c4e3870SJani Nikula struct drm_i915_private *i915 = container_of(power_domains,
3783c4e3870SJani Nikula struct drm_i915_private,
379e3e8148fSJani Nikula display.power.domains);
380df0566a6SJani Nikula enum intel_display_power_domain domain;
381df0566a6SJani Nikula
382888a2a63SImre Deak drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
383df0566a6SJani Nikula for_each_power_domain(domain, mask)
3843c4e3870SJani Nikula drm_dbg(&i915->drm, "%s use_count %d\n",
3858a84bacbSImre Deak intel_display_power_domain_str(domain),
386df0566a6SJani Nikula power_domains->domain_use_count[domain]);
387df0566a6SJani Nikula }
388df0566a6SJani Nikula
389df0566a6SJani Nikula static void
print_async_put_domains_state(struct i915_power_domains * power_domains)390df0566a6SJani Nikula print_async_put_domains_state(struct i915_power_domains *power_domains)
391df0566a6SJani Nikula {
3923c4e3870SJani Nikula struct drm_i915_private *i915 = container_of(power_domains,
3933c4e3870SJani Nikula struct drm_i915_private,
394e3e8148fSJani Nikula display.power.domains);
3953c4e3870SJani Nikula
396fe761f34SJani Nikula drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
397fe761f34SJani Nikula str_yes_no(power_domains->async_put_wakeref));
398df0566a6SJani Nikula
399df0566a6SJani Nikula print_power_domains(power_domains, "async_put_domains[0]",
400888a2a63SImre Deak &power_domains->async_put_domains[0]);
401df0566a6SJani Nikula print_power_domains(power_domains, "async_put_domains[1]",
402888a2a63SImre Deak &power_domains->async_put_domains[1]);
403df0566a6SJani Nikula }
404df0566a6SJani Nikula
405df0566a6SJani Nikula static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)406df0566a6SJani Nikula verify_async_put_domains_state(struct i915_power_domains *power_domains)
407df0566a6SJani Nikula {
408df0566a6SJani Nikula if (!__async_put_domains_state_ok(power_domains))
409df0566a6SJani Nikula print_async_put_domains_state(power_domains);
410df0566a6SJani Nikula }
411df0566a6SJani Nikula
412df0566a6SJani Nikula #else
413df0566a6SJani Nikula
414df0566a6SJani Nikula static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)415df0566a6SJani Nikula assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
416df0566a6SJani Nikula {
417df0566a6SJani Nikula }
418df0566a6SJani Nikula
419df0566a6SJani Nikula static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)420df0566a6SJani Nikula verify_async_put_domains_state(struct i915_power_domains *power_domains)
421df0566a6SJani Nikula {
422df0566a6SJani Nikula }
423df0566a6SJani Nikula
424df0566a6SJani Nikula #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
425df0566a6SJani Nikula
async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)426888a2a63SImre Deak static void async_put_domains_mask(struct i915_power_domains *power_domains,
427888a2a63SImre Deak struct intel_power_domain_mask *mask)
428888a2a63SImre Deak
429df0566a6SJani Nikula {
430df0566a6SJani Nikula assert_async_put_domain_masks_disjoint(power_domains);
431df0566a6SJani Nikula
432888a2a63SImre Deak __async_put_domains_mask(power_domains, mask);
433df0566a6SJani Nikula }
434df0566a6SJani Nikula
435df0566a6SJani Nikula static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)436df0566a6SJani Nikula async_put_domains_clear_domain(struct i915_power_domains *power_domains,
437df0566a6SJani Nikula enum intel_display_power_domain domain)
438df0566a6SJani Nikula {
439df0566a6SJani Nikula assert_async_put_domain_masks_disjoint(power_domains);
440df0566a6SJani Nikula
441888a2a63SImre Deak clear_bit(domain, power_domains->async_put_domains[0].bits);
442888a2a63SImre Deak clear_bit(domain, power_domains->async_put_domains[1].bits);
443df0566a6SJani Nikula }
444df0566a6SJani Nikula
445caacfe31SImre Deak static void
cancel_async_put_work(struct i915_power_domains * power_domains,bool sync)446caacfe31SImre Deak cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
447caacfe31SImre Deak {
448caacfe31SImre Deak if (sync)
449caacfe31SImre Deak cancel_delayed_work_sync(&power_domains->async_put_work);
450caacfe31SImre Deak else
451caacfe31SImre Deak cancel_delayed_work(&power_domains->async_put_work);
452caacfe31SImre Deak
453caacfe31SImre Deak power_domains->async_put_next_delay = 0;
454caacfe31SImre Deak }
455caacfe31SImre Deak
456df0566a6SJani Nikula static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)457df0566a6SJani Nikula intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
458df0566a6SJani Nikula enum intel_display_power_domain domain)
459df0566a6SJani Nikula {
460e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
461888a2a63SImre Deak struct intel_power_domain_mask async_put_mask;
462df0566a6SJani Nikula bool ret = false;
463df0566a6SJani Nikula
464888a2a63SImre Deak async_put_domains_mask(power_domains, &async_put_mask);
465888a2a63SImre Deak if (!test_bit(domain, async_put_mask.bits))
466df0566a6SJani Nikula goto out_verify;
467df0566a6SJani Nikula
468df0566a6SJani Nikula async_put_domains_clear_domain(power_domains, domain);
469df0566a6SJani Nikula
470df0566a6SJani Nikula ret = true;
471df0566a6SJani Nikula
472888a2a63SImre Deak async_put_domains_mask(power_domains, &async_put_mask);
473888a2a63SImre Deak if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
474df0566a6SJani Nikula goto out_verify;
475df0566a6SJani Nikula
476caacfe31SImre Deak cancel_async_put_work(power_domains, false);
477df0566a6SJani Nikula intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
478df0566a6SJani Nikula fetch_and_zero(&power_domains->async_put_wakeref));
479df0566a6SJani Nikula out_verify:
480df0566a6SJani Nikula verify_async_put_domains_state(power_domains);
481df0566a6SJani Nikula
482df0566a6SJani Nikula return ret;
483df0566a6SJani Nikula }
484df0566a6SJani Nikula
485df0566a6SJani Nikula static void
__intel_display_power_get_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)486df0566a6SJani Nikula __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
487df0566a6SJani Nikula enum intel_display_power_domain domain)
488df0566a6SJani Nikula {
489e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
490df0566a6SJani Nikula struct i915_power_well *power_well;
491df0566a6SJani Nikula
492df0566a6SJani Nikula if (intel_display_power_grab_async_put_ref(dev_priv, domain))
493df0566a6SJani Nikula return;
494df0566a6SJani Nikula
495888a2a63SImre Deak for_each_power_domain_well(dev_priv, power_well, domain)
496df0566a6SJani Nikula intel_power_well_get(dev_priv, power_well);
497df0566a6SJani Nikula
498df0566a6SJani Nikula power_domains->domain_use_count[domain]++;
499df0566a6SJani Nikula }
500df0566a6SJani Nikula
501df0566a6SJani Nikula /**
502df0566a6SJani Nikula * intel_display_power_get - grab a power domain reference
503df0566a6SJani Nikula * @dev_priv: i915 device instance
504df0566a6SJani Nikula * @domain: power domain to reference
505df0566a6SJani Nikula *
506df0566a6SJani Nikula * This function grabs a power domain reference for @domain and ensures that the
507df0566a6SJani Nikula * power domain and all its parents are powered up. Therefore users should only
508df0566a6SJani Nikula * grab a reference to the innermost power domain they need.
509df0566a6SJani Nikula *
510df0566a6SJani Nikula * Any power domain reference obtained by this function must have a symmetric
511df0566a6SJani Nikula * call to intel_display_power_put() to release the reference again.
512df0566a6SJani Nikula */
intel_display_power_get(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)513df0566a6SJani Nikula intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
514df0566a6SJani Nikula enum intel_display_power_domain domain)
515df0566a6SJani Nikula {
516e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
517df0566a6SJani Nikula intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
518df0566a6SJani Nikula
519df0566a6SJani Nikula mutex_lock(&power_domains->lock);
520df0566a6SJani Nikula __intel_display_power_get_domain(dev_priv, domain);
521df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
522df0566a6SJani Nikula
523df0566a6SJani Nikula return wakeref;
524df0566a6SJani Nikula }
525df0566a6SJani Nikula
526df0566a6SJani Nikula /**
527df0566a6SJani Nikula * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
528df0566a6SJani Nikula * @dev_priv: i915 device instance
529df0566a6SJani Nikula * @domain: power domain to reference
530df0566a6SJani Nikula *
531df0566a6SJani Nikula * This function grabs a power domain reference for @domain and ensures that the
532df0566a6SJani Nikula * power domain and all its parents are powered up. Therefore users should only
533df0566a6SJani Nikula * grab a reference to the innermost power domain they need.
534df0566a6SJani Nikula *
535df0566a6SJani Nikula * Any power domain reference obtained by this function must have a symmetric
536df0566a6SJani Nikula * call to intel_display_power_put() to release the reference again.
537df0566a6SJani Nikula */
538df0566a6SJani Nikula intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)539df0566a6SJani Nikula intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
540df0566a6SJani Nikula enum intel_display_power_domain domain)
541df0566a6SJani Nikula {
542e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
543df0566a6SJani Nikula intel_wakeref_t wakeref;
544df0566a6SJani Nikula bool is_enabled;
545df0566a6SJani Nikula
546df0566a6SJani Nikula wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
547df0566a6SJani Nikula if (!wakeref)
548df0566a6SJani Nikula return false;
549df0566a6SJani Nikula
550df0566a6SJani Nikula mutex_lock(&power_domains->lock);
551df0566a6SJani Nikula
552df0566a6SJani Nikula if (__intel_display_power_is_enabled(dev_priv, domain)) {
553df0566a6SJani Nikula __intel_display_power_get_domain(dev_priv, domain);
554df0566a6SJani Nikula is_enabled = true;
555df0566a6SJani Nikula } else {
556df0566a6SJani Nikula is_enabled = false;
557df0566a6SJani Nikula }
558df0566a6SJani Nikula
559df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
560df0566a6SJani Nikula
561df0566a6SJani Nikula if (!is_enabled) {
562df0566a6SJani Nikula intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
563df0566a6SJani Nikula wakeref = 0;
564df0566a6SJani Nikula }
565df0566a6SJani Nikula
566df0566a6SJani Nikula return wakeref;
567df0566a6SJani Nikula }
568df0566a6SJani Nikula
569df0566a6SJani Nikula static void
__intel_display_power_put_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)570df0566a6SJani Nikula __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
571df0566a6SJani Nikula enum intel_display_power_domain domain)
572df0566a6SJani Nikula {
573df0566a6SJani Nikula struct i915_power_domains *power_domains;
574df0566a6SJani Nikula struct i915_power_well *power_well;
5758a84bacbSImre Deak const char *name = intel_display_power_domain_str(domain);
576888a2a63SImre Deak struct intel_power_domain_mask async_put_mask;
577df0566a6SJani Nikula
578e3e8148fSJani Nikula power_domains = &dev_priv->display.power.domains;
579df0566a6SJani Nikula
580a66d7c1eSPankaj Bharadiya drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
581df0566a6SJani Nikula "Use count on domain %s is already zero\n",
582df0566a6SJani Nikula name);
583888a2a63SImre Deak async_put_domains_mask(power_domains, &async_put_mask);
584a66d7c1eSPankaj Bharadiya drm_WARN(&dev_priv->drm,
585888a2a63SImre Deak test_bit(domain, async_put_mask.bits),
586df0566a6SJani Nikula "Async disabling of domain %s is pending\n",
587df0566a6SJani Nikula name);
588df0566a6SJani Nikula
589df0566a6SJani Nikula power_domains->domain_use_count[domain]--;
590df0566a6SJani Nikula
591888a2a63SImre Deak for_each_power_domain_well_reverse(dev_priv, power_well, domain)
592df0566a6SJani Nikula intel_power_well_put(dev_priv, power_well);
593df0566a6SJani Nikula }
594df0566a6SJani Nikula
__intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)595df0566a6SJani Nikula static void __intel_display_power_put(struct drm_i915_private *dev_priv,
596df0566a6SJani Nikula enum intel_display_power_domain domain)
597df0566a6SJani Nikula {
598e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
599df0566a6SJani Nikula
600df0566a6SJani Nikula mutex_lock(&power_domains->lock);
601df0566a6SJani Nikula __intel_display_power_put_domain(dev_priv, domain);
602df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
603df0566a6SJani Nikula }
604df0566a6SJani Nikula
605df0566a6SJani Nikula static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref,int delay_ms)606df0566a6SJani Nikula queue_async_put_domains_work(struct i915_power_domains *power_domains,
607caacfe31SImre Deak intel_wakeref_t wakeref,
608caacfe31SImre Deak int delay_ms)
609df0566a6SJani Nikula {
6104c1ccdf7SPankaj Bharadiya struct drm_i915_private *i915 = container_of(power_domains,
6114c1ccdf7SPankaj Bharadiya struct drm_i915_private,
612e3e8148fSJani Nikula display.power.domains);
6134c1ccdf7SPankaj Bharadiya drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
614df0566a6SJani Nikula power_domains->async_put_wakeref = wakeref;
6154c1ccdf7SPankaj Bharadiya drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
616df0566a6SJani Nikula &power_domains->async_put_work,
617caacfe31SImre Deak msecs_to_jiffies(delay_ms)));
618df0566a6SJani Nikula }
619df0566a6SJani Nikula
620df0566a6SJani Nikula static void
release_async_put_domains(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)621888a2a63SImre Deak release_async_put_domains(struct i915_power_domains *power_domains,
622888a2a63SImre Deak struct intel_power_domain_mask *mask)
623df0566a6SJani Nikula {
624df0566a6SJani Nikula struct drm_i915_private *dev_priv =
625df0566a6SJani Nikula container_of(power_domains, struct drm_i915_private,
626e3e8148fSJani Nikula display.power.domains);
627df0566a6SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
628df0566a6SJani Nikula enum intel_display_power_domain domain;
629df0566a6SJani Nikula intel_wakeref_t wakeref;
630df0566a6SJani Nikula
63177e619a8SRodrigo Vivi wakeref = intel_runtime_pm_get_noresume(rpm);
632df0566a6SJani Nikula
633df0566a6SJani Nikula for_each_power_domain(domain, mask) {
634df0566a6SJani Nikula /* Clear before put, so put's sanity check is happy. */
635df0566a6SJani Nikula async_put_domains_clear_domain(power_domains, domain);
636df0566a6SJani Nikula __intel_display_power_put_domain(dev_priv, domain);
637df0566a6SJani Nikula }
638df0566a6SJani Nikula
639df0566a6SJani Nikula intel_runtime_pm_put(rpm, wakeref);
640df0566a6SJani Nikula }
641df0566a6SJani Nikula
642df0566a6SJani Nikula static void
intel_display_power_put_async_work(struct work_struct * work)643df0566a6SJani Nikula intel_display_power_put_async_work(struct work_struct *work)
644df0566a6SJani Nikula {
645df0566a6SJani Nikula struct drm_i915_private *dev_priv =
646df0566a6SJani Nikula container_of(work, struct drm_i915_private,
647e3e8148fSJani Nikula display.power.domains.async_put_work.work);
648e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
649df0566a6SJani Nikula struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
650df0566a6SJani Nikula intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
651df0566a6SJani Nikula intel_wakeref_t old_work_wakeref = 0;
652df0566a6SJani Nikula
653df0566a6SJani Nikula mutex_lock(&power_domains->lock);
654df0566a6SJani Nikula
655df0566a6SJani Nikula /*
656df0566a6SJani Nikula * Bail out if all the domain refs pending to be released were grabbed
657df0566a6SJani Nikula * by subsequent gets or a flush_work.
658df0566a6SJani Nikula */
659df0566a6SJani Nikula old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
660df0566a6SJani Nikula if (!old_work_wakeref)
661df0566a6SJani Nikula goto out_verify;
662df0566a6SJani Nikula
663df0566a6SJani Nikula release_async_put_domains(power_domains,
664888a2a63SImre Deak &power_domains->async_put_domains[0]);
665df0566a6SJani Nikula
66630ca6365SImre Deak /*
66730ca6365SImre Deak * Cancel the work that got queued after this one got dequeued,
66830ca6365SImre Deak * since here we released the corresponding async-put reference.
66930ca6365SImre Deak */
67030ca6365SImre Deak cancel_async_put_work(power_domains, false);
67130ca6365SImre Deak
672df0566a6SJani Nikula /* Requeue the work if more domains were async put meanwhile. */
673888a2a63SImre Deak if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
674888a2a63SImre Deak bitmap_copy(power_domains->async_put_domains[0].bits,
675888a2a63SImre Deak power_domains->async_put_domains[1].bits,
676888a2a63SImre Deak POWER_DOMAIN_NUM);
677888a2a63SImre Deak bitmap_zero(power_domains->async_put_domains[1].bits,
678888a2a63SImre Deak POWER_DOMAIN_NUM);
679df0566a6SJani Nikula queue_async_put_domains_work(power_domains,
680caacfe31SImre Deak fetch_and_zero(&new_work_wakeref),
681caacfe31SImre Deak power_domains->async_put_next_delay);
682caacfe31SImre Deak power_domains->async_put_next_delay = 0;
683df0566a6SJani Nikula }
684df0566a6SJani Nikula
685df0566a6SJani Nikula out_verify:
686df0566a6SJani Nikula verify_async_put_domains_state(power_domains);
687df0566a6SJani Nikula
688df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
689df0566a6SJani Nikula
690df0566a6SJani Nikula if (old_work_wakeref)
691df0566a6SJani Nikula intel_runtime_pm_put_raw(rpm, old_work_wakeref);
692df0566a6SJani Nikula if (new_work_wakeref)
693df0566a6SJani Nikula intel_runtime_pm_put_raw(rpm, new_work_wakeref);
694df0566a6SJani Nikula }
695df0566a6SJani Nikula
696df0566a6SJani Nikula /**
69763c154a0SLee Jones * __intel_display_power_put_async - release a power domain reference asynchronously
698df0566a6SJani Nikula * @i915: i915 device instance
699df0566a6SJani Nikula * @domain: power domain to reference
700df0566a6SJani Nikula * @wakeref: wakeref acquired for the reference that is being released
701caacfe31SImre Deak * @delay_ms: delay of powering down the power domain
702df0566a6SJani Nikula *
703df0566a6SJani Nikula * This function drops the power domain reference obtained by
704df0566a6SJani Nikula * intel_display_power_get*() and schedules a work to power down the
705df0566a6SJani Nikula * corresponding hardware block if this is the last reference.
706caacfe31SImre Deak * The power down is delayed by @delay_ms if this is >= 0, or by a default
707caacfe31SImre Deak * 100 ms otherwise.
708df0566a6SJani Nikula */
__intel_display_power_put_async(struct drm_i915_private * i915,enum intel_display_power_domain domain,intel_wakeref_t wakeref,int delay_ms)709df0566a6SJani Nikula void __intel_display_power_put_async(struct drm_i915_private *i915,
710df0566a6SJani Nikula enum intel_display_power_domain domain,
711caacfe31SImre Deak intel_wakeref_t wakeref,
712caacfe31SImre Deak int delay_ms)
713df0566a6SJani Nikula {
714e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
715df0566a6SJani Nikula struct intel_runtime_pm *rpm = &i915->runtime_pm;
716df0566a6SJani Nikula intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
717df0566a6SJani Nikula
718caacfe31SImre Deak delay_ms = delay_ms >= 0 ? delay_ms : 100;
719caacfe31SImre Deak
720df0566a6SJani Nikula mutex_lock(&power_domains->lock);
721df0566a6SJani Nikula
722df0566a6SJani Nikula if (power_domains->domain_use_count[domain] > 1) {
723df0566a6SJani Nikula __intel_display_power_put_domain(i915, domain);
724df0566a6SJani Nikula
725df0566a6SJani Nikula goto out_verify;
726df0566a6SJani Nikula }
727df0566a6SJani Nikula
728a66d7c1eSPankaj Bharadiya drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
729df0566a6SJani Nikula
730df0566a6SJani Nikula /* Let a pending work requeue itself or queue a new one. */
731df0566a6SJani Nikula if (power_domains->async_put_wakeref) {
732888a2a63SImre Deak set_bit(domain, power_domains->async_put_domains[1].bits);
733caacfe31SImre Deak power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
734caacfe31SImre Deak delay_ms);
735df0566a6SJani Nikula } else {
736888a2a63SImre Deak set_bit(domain, power_domains->async_put_domains[0].bits);
737df0566a6SJani Nikula queue_async_put_domains_work(power_domains,
738caacfe31SImre Deak fetch_and_zero(&work_wakeref),
739caacfe31SImre Deak delay_ms);
740df0566a6SJani Nikula }
741df0566a6SJani Nikula
742df0566a6SJani Nikula out_verify:
743df0566a6SJani Nikula verify_async_put_domains_state(power_domains);
744df0566a6SJani Nikula
745df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
746df0566a6SJani Nikula
747df0566a6SJani Nikula if (work_wakeref)
748df0566a6SJani Nikula intel_runtime_pm_put_raw(rpm, work_wakeref);
749df0566a6SJani Nikula
750df0566a6SJani Nikula intel_runtime_pm_put(rpm, wakeref);
751df0566a6SJani Nikula }
752df0566a6SJani Nikula
753df0566a6SJani Nikula /**
754df0566a6SJani Nikula * intel_display_power_flush_work - flushes the async display power disabling work
755df0566a6SJani Nikula * @i915: i915 device instance
756df0566a6SJani Nikula *
757df0566a6SJani Nikula * Flushes any pending work that was scheduled by a preceding
758df0566a6SJani Nikula * intel_display_power_put_async() call, completing the disabling of the
759df0566a6SJani Nikula * corresponding power domains.
760df0566a6SJani Nikula *
761df0566a6SJani Nikula * Note that the work handler function may still be running after this
762df0566a6SJani Nikula * function returns; to ensure that the work handler isn't running use
763df0566a6SJani Nikula * intel_display_power_flush_work_sync() instead.
764df0566a6SJani Nikula */
intel_display_power_flush_work(struct drm_i915_private * i915)765df0566a6SJani Nikula void intel_display_power_flush_work(struct drm_i915_private *i915)
766df0566a6SJani Nikula {
767e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
768888a2a63SImre Deak struct intel_power_domain_mask async_put_mask;
769df0566a6SJani Nikula intel_wakeref_t work_wakeref;
770df0566a6SJani Nikula
771df0566a6SJani Nikula mutex_lock(&power_domains->lock);
772df0566a6SJani Nikula
773df0566a6SJani Nikula work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
774df0566a6SJani Nikula if (!work_wakeref)
775df0566a6SJani Nikula goto out_verify;
776df0566a6SJani Nikula
777888a2a63SImre Deak async_put_domains_mask(power_domains, &async_put_mask);
778888a2a63SImre Deak release_async_put_domains(power_domains, &async_put_mask);
779caacfe31SImre Deak cancel_async_put_work(power_domains, false);
780df0566a6SJani Nikula
781df0566a6SJani Nikula out_verify:
782df0566a6SJani Nikula verify_async_put_domains_state(power_domains);
783df0566a6SJani Nikula
784df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
785df0566a6SJani Nikula
786df0566a6SJani Nikula if (work_wakeref)
787df0566a6SJani Nikula intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
788df0566a6SJani Nikula }
789df0566a6SJani Nikula
790df0566a6SJani Nikula /**
791df0566a6SJani Nikula * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
792df0566a6SJani Nikula * @i915: i915 device instance
793df0566a6SJani Nikula *
794df0566a6SJani Nikula * Like intel_display_power_flush_work(), but also ensure that the work
795df0566a6SJani Nikula * handler function is not running any more when this function returns.
796df0566a6SJani Nikula */
797df0566a6SJani Nikula static void
intel_display_power_flush_work_sync(struct drm_i915_private * i915)798df0566a6SJani Nikula intel_display_power_flush_work_sync(struct drm_i915_private *i915)
799df0566a6SJani Nikula {
800e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
801df0566a6SJani Nikula
802df0566a6SJani Nikula intel_display_power_flush_work(i915);
803caacfe31SImre Deak cancel_async_put_work(power_domains, true);
804df0566a6SJani Nikula
805df0566a6SJani Nikula verify_async_put_domains_state(power_domains);
806df0566a6SJani Nikula
807a66d7c1eSPankaj Bharadiya drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
808df0566a6SJani Nikula }
809df0566a6SJani Nikula
810df0566a6SJani Nikula #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
811df0566a6SJani Nikula /**
812df0566a6SJani Nikula * intel_display_power_put - release a power domain reference
813df0566a6SJani Nikula * @dev_priv: i915 device instance
814df0566a6SJani Nikula * @domain: power domain to reference
815df0566a6SJani Nikula * @wakeref: wakeref acquired for the reference that is being released
816df0566a6SJani Nikula *
817df0566a6SJani Nikula * This function drops the power domain reference obtained by
818df0566a6SJani Nikula * intel_display_power_get() and might power down the corresponding hardware
819df0566a6SJani Nikula * block right away if this is the last reference.
820df0566a6SJani Nikula */
intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain,intel_wakeref_t wakeref)821df0566a6SJani Nikula void intel_display_power_put(struct drm_i915_private *dev_priv,
822df0566a6SJani Nikula enum intel_display_power_domain domain,
823df0566a6SJani Nikula intel_wakeref_t wakeref)
824df0566a6SJani Nikula {
825df0566a6SJani Nikula __intel_display_power_put(dev_priv, domain);
826df0566a6SJani Nikula intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
827df0566a6SJani Nikula }
828e3529346SImre Deak #else
829e3529346SImre Deak /**
830e3529346SImre Deak * intel_display_power_put_unchecked - release an unchecked power domain reference
831e3529346SImre Deak * @dev_priv: i915 device instance
832e3529346SImre Deak * @domain: power domain to reference
833e3529346SImre Deak *
834e3529346SImre Deak * This function drops the power domain reference obtained by
835e3529346SImre Deak * intel_display_power_get() and might power down the corresponding hardware
836e3529346SImre Deak * block right away if this is the last reference.
837e3529346SImre Deak *
838e3529346SImre Deak * This function is only for the power domain code's internal use to suppress wakeref
839e3529346SImre Deak * tracking when the correspondig debug kconfig option is disabled, should not
840e3529346SImre Deak * be used otherwise.
841e3529346SImre Deak */
intel_display_power_put_unchecked(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)842e3529346SImre Deak void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
843e3529346SImre Deak enum intel_display_power_domain domain)
844e3529346SImre Deak {
845e3529346SImre Deak __intel_display_power_put(dev_priv, domain);
846e3529346SImre Deak intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
847e3529346SImre Deak }
848df0566a6SJani Nikula #endif
849df0566a6SJani Nikula
8506979cb9aSImre Deak void
intel_display_power_get_in_set(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)8516979cb9aSImre Deak intel_display_power_get_in_set(struct drm_i915_private *i915,
8526979cb9aSImre Deak struct intel_display_power_domain_set *power_domain_set,
8536979cb9aSImre Deak enum intel_display_power_domain domain)
8546979cb9aSImre Deak {
8556979cb9aSImre Deak intel_wakeref_t __maybe_unused wf;
8566979cb9aSImre Deak
857888a2a63SImre Deak drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
8586979cb9aSImre Deak
8596979cb9aSImre Deak wf = intel_display_power_get(i915, domain);
8606979cb9aSImre Deak #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
8616979cb9aSImre Deak power_domain_set->wakerefs[domain] = wf;
8626979cb9aSImre Deak #endif
863888a2a63SImre Deak set_bit(domain, power_domain_set->mask.bits);
8646979cb9aSImre Deak }
8656979cb9aSImre Deak
8666979cb9aSImre Deak bool
intel_display_power_get_in_set_if_enabled(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)8676979cb9aSImre Deak intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
8686979cb9aSImre Deak struct intel_display_power_domain_set *power_domain_set,
8696979cb9aSImre Deak enum intel_display_power_domain domain)
8706979cb9aSImre Deak {
8716979cb9aSImre Deak intel_wakeref_t wf;
8726979cb9aSImre Deak
873888a2a63SImre Deak drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
8746979cb9aSImre Deak
8756979cb9aSImre Deak wf = intel_display_power_get_if_enabled(i915, domain);
8766979cb9aSImre Deak if (!wf)
8776979cb9aSImre Deak return false;
8786979cb9aSImre Deak
8796979cb9aSImre Deak #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
8806979cb9aSImre Deak power_domain_set->wakerefs[domain] = wf;
8816979cb9aSImre Deak #endif
882888a2a63SImre Deak set_bit(domain, power_domain_set->mask.bits);
8836979cb9aSImre Deak
8846979cb9aSImre Deak return true;
8856979cb9aSImre Deak }
8866979cb9aSImre Deak
8876979cb9aSImre Deak void
intel_display_power_put_mask_in_set(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,struct intel_power_domain_mask * mask)8886979cb9aSImre Deak intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
8896979cb9aSImre Deak struct intel_display_power_domain_set *power_domain_set,
890888a2a63SImre Deak struct intel_power_domain_mask *mask)
8916979cb9aSImre Deak {
8926979cb9aSImre Deak enum intel_display_power_domain domain;
8936979cb9aSImre Deak
894888a2a63SImre Deak drm_WARN_ON(&i915->drm,
895888a2a63SImre Deak !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
8966979cb9aSImre Deak
8976979cb9aSImre Deak for_each_power_domain(domain, mask) {
8986979cb9aSImre Deak intel_wakeref_t __maybe_unused wf = -1;
8996979cb9aSImre Deak
9006979cb9aSImre Deak #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
9016979cb9aSImre Deak wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
9026979cb9aSImre Deak #endif
9036979cb9aSImre Deak intel_display_power_put(i915, domain, wf);
904888a2a63SImre Deak clear_bit(domain, power_domain_set->mask.bits);
9056979cb9aSImre Deak }
9066979cb9aSImre Deak }
9076979cb9aSImre Deak
908df0566a6SJani Nikula static int
sanitize_disable_power_well_option(const struct drm_i915_private * dev_priv,int disable_power_well)909df0566a6SJani Nikula sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
910df0566a6SJani Nikula int disable_power_well)
911df0566a6SJani Nikula {
912df0566a6SJani Nikula if (disable_power_well >= 0)
913df0566a6SJani Nikula return !!disable_power_well;
914df0566a6SJani Nikula
915df0566a6SJani Nikula return 1;
916df0566a6SJani Nikula }
917df0566a6SJani Nikula
get_allowed_dc_mask(const struct drm_i915_private * dev_priv,int enable_dc)918df0566a6SJani Nikula static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
919df0566a6SJani Nikula int enable_dc)
920df0566a6SJani Nikula {
921df0566a6SJani Nikula u32 mask;
922df0566a6SJani Nikula int requested_dc;
923df0566a6SJani Nikula int max_dc;
924df0566a6SJani Nikula
9255df7bd13SJosé Roberto de Souza if (!HAS_DISPLAY(dev_priv))
9265df7bd13SJosé Roberto de Souza return 0;
9275df7bd13SJosé Roberto de Souza
92843e18b0aSMatt Roper if (DISPLAY_VER(dev_priv) >= 20)
92943e18b0aSMatt Roper max_dc = 2;
93043e18b0aSMatt Roper else if (IS_DG2(dev_priv))
9318eb40367SAnusha Srivatsa max_dc = 1;
932ef83e119SAnusha Srivatsa else if (IS_DG1(dev_priv))
933cbb6ea8cSAnshuman Gupta max_dc = 3;
934005e9537SMatt Roper else if (DISPLAY_VER(dev_priv) >= 12)
93519c79ff8SAnshuman Gupta max_dc = 4;
93670bfb307SMatt Roper else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
93702d794a3SJosé Roberto de Souza max_dc = 1;
93870bfb307SMatt Roper else if (DISPLAY_VER(dev_priv) >= 9)
93970bfb307SMatt Roper max_dc = 2;
94004460494SJosé Roberto de Souza else
94102d794a3SJosé Roberto de Souza max_dc = 0;
94202d794a3SJosé Roberto de Souza
943df0566a6SJani Nikula /*
944df0566a6SJani Nikula * DC9 has a separate HW flow from the rest of the DC states,
945df0566a6SJani Nikula * not depending on the DMC firmware. It's needed by system
946df0566a6SJani Nikula * suspend/resume, so allow it unconditionally.
947df0566a6SJani Nikula */
94870bfb307SMatt Roper mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
94970bfb307SMatt Roper DISPLAY_VER(dev_priv) >= 11 ?
95002d794a3SJosé Roberto de Souza DC_STATE_EN_DC9 : 0;
951df0566a6SJani Nikula
952bfcda58bSJouni Högander if (!dev_priv->display.params.disable_power_well)
953df0566a6SJani Nikula max_dc = 0;
954df0566a6SJani Nikula
955df0566a6SJani Nikula if (enable_dc >= 0 && enable_dc <= max_dc) {
956df0566a6SJani Nikula requested_dc = enable_dc;
957df0566a6SJani Nikula } else if (enable_dc == -1) {
958df0566a6SJani Nikula requested_dc = max_dc;
95919c79ff8SAnshuman Gupta } else if (enable_dc > max_dc && enable_dc <= 4) {
960569caa65SWambui Karuga drm_dbg_kms(&dev_priv->drm,
961569caa65SWambui Karuga "Adjusting requested max DC state (%d->%d)\n",
962df0566a6SJani Nikula enable_dc, max_dc);
963df0566a6SJani Nikula requested_dc = max_dc;
964df0566a6SJani Nikula } else {
965569caa65SWambui Karuga drm_err(&dev_priv->drm,
966569caa65SWambui Karuga "Unexpected value for enable_dc (%d)\n", enable_dc);
967df0566a6SJani Nikula requested_dc = max_dc;
968df0566a6SJani Nikula }
969df0566a6SJani Nikula
97019c79ff8SAnshuman Gupta switch (requested_dc) {
97119c79ff8SAnshuman Gupta case 4:
97219c79ff8SAnshuman Gupta mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
97319c79ff8SAnshuman Gupta break;
97419c79ff8SAnshuman Gupta case 3:
97519c79ff8SAnshuman Gupta mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
97619c79ff8SAnshuman Gupta break;
97719c79ff8SAnshuman Gupta case 2:
978df0566a6SJani Nikula mask |= DC_STATE_EN_UPTO_DC6;
97919c79ff8SAnshuman Gupta break;
98019c79ff8SAnshuman Gupta case 1:
981df0566a6SJani Nikula mask |= DC_STATE_EN_UPTO_DC5;
98219c79ff8SAnshuman Gupta break;
98319c79ff8SAnshuman Gupta }
984df0566a6SJani Nikula
985569caa65SWambui Karuga drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
986df0566a6SJani Nikula
987df0566a6SJani Nikula return mask;
988df0566a6SJani Nikula }
989df0566a6SJani Nikula
990df0566a6SJani Nikula /**
991df0566a6SJani Nikula * intel_power_domains_init - initializes the power domain structures
992df0566a6SJani Nikula * @dev_priv: i915 device instance
993df0566a6SJani Nikula *
994df0566a6SJani Nikula * Initializes the power domain structures for @dev_priv depending upon the
995df0566a6SJani Nikula * supported platform.
996df0566a6SJani Nikula */
intel_power_domains_init(struct drm_i915_private * dev_priv)997df0566a6SJani Nikula int intel_power_domains_init(struct drm_i915_private *dev_priv)
998df0566a6SJani Nikula {
999e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1000df0566a6SJani Nikula
1001bfcda58bSJouni Högander dev_priv->display.params.disable_power_well =
1002df0566a6SJani Nikula sanitize_disable_power_well_option(dev_priv,
1003bfcda58bSJouni Högander dev_priv->display.params.disable_power_well);
1004825f0de2SJani Nikula power_domains->allowed_dc_mask =
10050deee706SJouni Högander get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
1006df0566a6SJani Nikula
1007825f0de2SJani Nikula power_domains->target_dc_state =
10084645e906SAnshuman Gupta sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
10094645e906SAnshuman Gupta
1010df0566a6SJani Nikula mutex_init(&power_domains->lock);
1011df0566a6SJani Nikula
1012df0566a6SJani Nikula INIT_DELAYED_WORK(&power_domains->async_put_work,
1013df0566a6SJani Nikula intel_display_power_put_async_work);
1014df0566a6SJani Nikula
1015323286c8SImre Deak return intel_display_power_map_init(power_domains);
1016df0566a6SJani Nikula }
1017df0566a6SJani Nikula
1018df0566a6SJani Nikula /**
1019df0566a6SJani Nikula * intel_power_domains_cleanup - clean up power domains resources
1020df0566a6SJani Nikula * @dev_priv: i915 device instance
1021df0566a6SJani Nikula *
1022df0566a6SJani Nikula * Release any resources acquired by intel_power_domains_init()
1023df0566a6SJani Nikula */
intel_power_domains_cleanup(struct drm_i915_private * dev_priv)1024df0566a6SJani Nikula void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
1025df0566a6SJani Nikula {
1026e3e8148fSJani Nikula intel_display_power_map_cleanup(&dev_priv->display.power.domains);
1027df0566a6SJani Nikula }
1028df0566a6SJani Nikula
intel_power_domains_sync_hw(struct drm_i915_private * dev_priv)1029df0566a6SJani Nikula static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1030df0566a6SJani Nikula {
1031e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1032df0566a6SJani Nikula struct i915_power_well *power_well;
1033df0566a6SJani Nikula
1034df0566a6SJani Nikula mutex_lock(&power_domains->lock);
103590cf356bSImre Deak for_each_power_well(dev_priv, power_well)
103690cf356bSImre Deak intel_power_well_sync_hw(dev_priv, power_well);
1037df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1038df0566a6SJani Nikula }
1039df0566a6SJani Nikula
gen9_dbuf_slice_set(struct drm_i915_private * dev_priv,enum dbuf_slice slice,bool enable)104056f48c1dSVille Syrjälä static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
1041b3f1ff5bSVille Syrjälä enum dbuf_slice slice, bool enable)
1042df0566a6SJani Nikula {
1043b3f1ff5bSVille Syrjälä i915_reg_t reg = DBUF_CTL_S(slice);
1044b3f1ff5bSVille Syrjälä bool state;
1045df0566a6SJani Nikula
1046d152bb1fSVille Syrjälä intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
1047d152bb1fSVille Syrjälä enable ? DBUF_POWER_REQUEST : 0);
1048d6e53851SJani Nikula intel_de_posting_read(dev_priv, reg);
1049df0566a6SJani Nikula udelay(10);
1050df0566a6SJani Nikula
1051b3f1ff5bSVille Syrjälä state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
1052b3f1ff5bSVille Syrjälä drm_WARN(&dev_priv->drm, enable != state,
1053b3f1ff5bSVille Syrjälä "DBuf slice %d power %s timeout!\n",
1054707c3a7dSLucas De Marchi slice, str_enable_disable(enable));
1055df0566a6SJani Nikula }
1056df0566a6SJani Nikula
gen9_dbuf_slices_update(struct drm_i915_private * dev_priv,u8 req_slices)105756f48c1dSVille Syrjälä void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
1058df0566a6SJani Nikula u8 req_slices)
1059df0566a6SJani Nikula {
1060e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
10615af5169dSMatt Roper u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask;
1062b3f1ff5bSVille Syrjälä enum dbuf_slice slice;
1063df0566a6SJani Nikula
1064b88da660SVille Syrjälä drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
1065b88da660SVille Syrjälä "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1066b88da660SVille Syrjälä req_slices, slice_mask);
10670f0f9aeeSStanislav Lisovskiy
10683c4e3870SJani Nikula drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
10693c4e3870SJani Nikula req_slices);
10700f0f9aeeSStanislav Lisovskiy
10710f0f9aeeSStanislav Lisovskiy /*
10720f0f9aeeSStanislav Lisovskiy * Might be running this in parallel to gen9_dc_off_power_well_enable
10730f0f9aeeSStanislav Lisovskiy * being called from intel_dp_detect for instance,
10740f0f9aeeSStanislav Lisovskiy * which causes assertion triggered by race condition,
10750f0f9aeeSStanislav Lisovskiy * as gen9_assert_dbuf_enabled might preempt this when registers
10760f0f9aeeSStanislav Lisovskiy * were already updated, while dev_priv was not.
10770f0f9aeeSStanislav Lisovskiy */
10780f0f9aeeSStanislav Lisovskiy mutex_lock(&power_domains->lock);
10790f0f9aeeSStanislav Lisovskiy
1080b88da660SVille Syrjälä for_each_dbuf_slice(dev_priv, slice)
108156f48c1dSVille Syrjälä gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
1082df0566a6SJani Nikula
1083b7d15590SJani Nikula dev_priv->display.dbuf.enabled_slices = req_slices;
1084df0566a6SJani Nikula
10850f0f9aeeSStanislav Lisovskiy mutex_unlock(&power_domains->lock);
1086df0566a6SJani Nikula }
1087df0566a6SJani Nikula
gen9_dbuf_enable(struct drm_i915_private * dev_priv)108856f48c1dSVille Syrjälä static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
1089df0566a6SJani Nikula {
10904c4cc7acSMika Kahola u8 slices_mask;
10914c4cc7acSMika Kahola
1092b7d15590SJani Nikula dev_priv->display.dbuf.enabled_slices =
109356f48c1dSVille Syrjälä intel_enabled_dbuf_slices_mask(dev_priv);
109456f48c1dSVille Syrjälä
10954c4cc7acSMika Kahola slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices;
10964c4cc7acSMika Kahola
10974c4cc7acSMika Kahola if (DISPLAY_VER(dev_priv) >= 14)
10984c4cc7acSMika Kahola intel_pmdemand_program_dbuf(dev_priv, slices_mask);
10994c4cc7acSMika Kahola
1100df0566a6SJani Nikula /*
1101b18e249bSStanislav Lisovskiy * Just power up at least 1 slice, we will
11020f0f9aeeSStanislav Lisovskiy * figure out later which slices we have and what we need.
1103df0566a6SJani Nikula */
11044c4cc7acSMika Kahola gen9_dbuf_slices_update(dev_priv, slices_mask);
1105df0566a6SJani Nikula }
1106df0566a6SJani Nikula
gen9_dbuf_disable(struct drm_i915_private * dev_priv)110756f48c1dSVille Syrjälä static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
1108df0566a6SJani Nikula {
110956f48c1dSVille Syrjälä gen9_dbuf_slices_update(dev_priv, 0);
11104c4cc7acSMika Kahola
11114c4cc7acSMika Kahola if (DISPLAY_VER(dev_priv) >= 14)
11124c4cc7acSMika Kahola intel_pmdemand_program_dbuf(dev_priv, 0);
1113df0566a6SJani Nikula }
1114df0566a6SJani Nikula
gen12_dbuf_slices_config(struct drm_i915_private * dev_priv)1115359d0effSJosé Roberto de Souza static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
1116359d0effSJosé Roberto de Souza {
1117359d0effSJosé Roberto de Souza enum dbuf_slice slice;
1118359d0effSJosé Roberto de Souza
111914076e46SJosé Roberto de Souza if (IS_ALDERLAKE_P(dev_priv))
112014076e46SJosé Roberto de Souza return;
112114076e46SJosé Roberto de Souza
1122b88da660SVille Syrjälä for_each_dbuf_slice(dev_priv, slice)
1123359d0effSJosé Roberto de Souza intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
1124359d0effSJosé Roberto de Souza DBUF_TRACKER_STATE_SERVICE_MASK,
1125359d0effSJosé Roberto de Souza DBUF_TRACKER_STATE_SERVICE(8));
1126359d0effSJosé Roberto de Souza }
1127359d0effSJosé Roberto de Souza
icl_mbus_init(struct drm_i915_private * dev_priv)1128df0566a6SJani Nikula static void icl_mbus_init(struct drm_i915_private *dev_priv)
1129df0566a6SJani Nikula {
11305af5169dSMatt Roper unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask;
113162afef28SMatt Roper u32 mask, val, i;
1132df0566a6SJani Nikula
1133f02c7d5aSJosé Roberto de Souza if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
113414076e46SJosé Roberto de Souza return;
113514076e46SJosé Roberto de Souza
1136837b63e6SMatt Roper mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1137837b63e6SMatt Roper MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1138837b63e6SMatt Roper MBUS_ABOX_B_CREDIT_MASK |
1139837b63e6SMatt Roper MBUS_ABOX_BW_CREDIT_MASK;
1140df0566a6SJani Nikula val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1141df0566a6SJani Nikula MBUS_ABOX_BT_CREDIT_POOL2(16) |
1142df0566a6SJani Nikula MBUS_ABOX_B_CREDIT(1) |
1143df0566a6SJani Nikula MBUS_ABOX_BW_CREDIT(1);
1144df0566a6SJani Nikula
114562afef28SMatt Roper /*
114662afef28SMatt Roper * gen12 platforms that use abox1 and abox2 for pixel data reads still
114762afef28SMatt Roper * expect us to program the abox_ctl0 register as well, even though
114862afef28SMatt Roper * we don't have to program other instance-0 registers like BW_BUDDY.
114962afef28SMatt Roper */
115093e7e61eSLucas De Marchi if (DISPLAY_VER(dev_priv) == 12)
115162afef28SMatt Roper abox_regs |= BIT(0);
115262afef28SMatt Roper
115362afef28SMatt Roper for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
115462afef28SMatt Roper intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
1155df0566a6SJani Nikula }
1156df0566a6SJani Nikula
hsw_assert_cdclk(struct drm_i915_private * dev_priv)1157df0566a6SJani Nikula static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
1158df0566a6SJani Nikula {
1159d6e53851SJani Nikula u32 val = intel_de_read(dev_priv, LCPLL_CTL);
1160df0566a6SJani Nikula
1161df0566a6SJani Nikula /*
1162df0566a6SJani Nikula * The LCPLL register should be turned on by the BIOS. For now
1163df0566a6SJani Nikula * let's just check its state and print errors in case
1164df0566a6SJani Nikula * something is wrong. Don't even try to turn it on.
1165df0566a6SJani Nikula */
1166df0566a6SJani Nikula
1167df0566a6SJani Nikula if (val & LCPLL_CD_SOURCE_FCLK)
1168569caa65SWambui Karuga drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
1169df0566a6SJani Nikula
1170df0566a6SJani Nikula if (val & LCPLL_PLL_DISABLE)
1171569caa65SWambui Karuga drm_err(&dev_priv->drm, "LCPLL is disabled\n");
1172df0566a6SJani Nikula
1173df0566a6SJani Nikula if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1174569caa65SWambui Karuga drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
1175df0566a6SJani Nikula }
1176df0566a6SJani Nikula
assert_can_disable_lcpll(struct drm_i915_private * dev_priv)1177df0566a6SJani Nikula static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
1178df0566a6SJani Nikula {
1179df0566a6SJani Nikula struct intel_crtc *crtc;
1180df0566a6SJani Nikula
11813703060dSAndrzej Hajda for_each_intel_crtc(&dev_priv->drm, crtc)
11826b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv, crtc->active,
11836b9bd7c3SJani Nikula "CRTC for pipe %c enabled\n",
1184df0566a6SJani Nikula pipe_name(crtc->pipe));
1185df0566a6SJani Nikula
11866b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
1187df0566a6SJani Nikula "Display power well on\n");
11886b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
11896b9bd7c3SJani Nikula intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
1190df0566a6SJani Nikula "SPLL enabled\n");
11916b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
11926b9bd7c3SJani Nikula intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1193df0566a6SJani Nikula "WRPLL1 enabled\n");
11946b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
11956b9bd7c3SJani Nikula intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1196df0566a6SJani Nikula "WRPLL2 enabled\n");
11976b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
1198fdc97fdcSJani Nikula intel_de_read(dev_priv, PP_STATUS(dev_priv, 0)) & PP_ON,
1199df0566a6SJani Nikula "Panel power on\n");
12006b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
12016b9bd7c3SJani Nikula intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1202df0566a6SJani Nikula "CPU PWM1 enabled\n");
1203df0566a6SJani Nikula if (IS_HASWELL(dev_priv))
12046b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
12056b9bd7c3SJani Nikula intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1206df0566a6SJani Nikula "CPU PWM2 enabled\n");
12076b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
12086b9bd7c3SJani Nikula intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1209df0566a6SJani Nikula "PCH PWM1 enabled\n");
12106b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
12116b9bd7c3SJani Nikula (intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
1212287bfaf6SVille Syrjälä "Utility pin enabled in PWM mode\n");
12136b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv,
12146b9bd7c3SJani Nikula intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1215df0566a6SJani Nikula "PCH GTC enabled\n");
1216df0566a6SJani Nikula
1217df0566a6SJani Nikula /*
1218df0566a6SJani Nikula * In theory we can still leave IRQs enabled, as long as only the HPD
1219df0566a6SJani Nikula * interrupts remain enabled. We used to check for that, but since it's
1220df0566a6SJani Nikula * gen-specific and since we only disable LCPLL after we fully disable
1221df0566a6SJani Nikula * the interrupts, the check below should be enough.
1222df0566a6SJani Nikula */
12236b9bd7c3SJani Nikula I915_STATE_WARN(dev_priv, intel_irqs_enabled(dev_priv),
12246b9bd7c3SJani Nikula "IRQs enabled\n");
1225df0566a6SJani Nikula }
1226df0566a6SJani Nikula
hsw_read_dcomp(struct drm_i915_private * dev_priv)1227df0566a6SJani Nikula static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
1228df0566a6SJani Nikula {
1229df0566a6SJani Nikula if (IS_HASWELL(dev_priv))
1230d6e53851SJani Nikula return intel_de_read(dev_priv, D_COMP_HSW);
1231df0566a6SJani Nikula else
1232d6e53851SJani Nikula return intel_de_read(dev_priv, D_COMP_BDW);
1233df0566a6SJani Nikula }
1234df0566a6SJani Nikula
hsw_write_dcomp(struct drm_i915_private * dev_priv,u32 val)1235df0566a6SJani Nikula static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
1236df0566a6SJani Nikula {
1237df0566a6SJani Nikula if (IS_HASWELL(dev_priv)) {
1238ee421bb4SAshutosh Dixit if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
1239569caa65SWambui Karuga drm_dbg_kms(&dev_priv->drm,
1240569caa65SWambui Karuga "Failed to write to D_COMP\n");
1241df0566a6SJani Nikula } else {
1242d6e53851SJani Nikula intel_de_write(dev_priv, D_COMP_BDW, val);
1243d6e53851SJani Nikula intel_de_posting_read(dev_priv, D_COMP_BDW);
1244df0566a6SJani Nikula }
1245df0566a6SJani Nikula }
1246df0566a6SJani Nikula
1247df0566a6SJani Nikula /*
1248df0566a6SJani Nikula * This function implements pieces of two sequences from BSpec:
1249df0566a6SJani Nikula * - Sequence for display software to disable LCPLL
1250df0566a6SJani Nikula * - Sequence for display software to allow package C8+
1251df0566a6SJani Nikula * The steps implemented here are just the steps that actually touch the LCPLL
1252df0566a6SJani Nikula * register. Callers should take care of disabling all the display engine
1253df0566a6SJani Nikula * functions, doing the mode unset, fixing interrupts, etc.
1254df0566a6SJani Nikula */
hsw_disable_lcpll(struct drm_i915_private * dev_priv,bool switch_to_fclk,bool allow_power_down)1255df0566a6SJani Nikula static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
1256df0566a6SJani Nikula bool switch_to_fclk, bool allow_power_down)
1257df0566a6SJani Nikula {
1258df0566a6SJani Nikula u32 val;
1259df0566a6SJani Nikula
1260df0566a6SJani Nikula assert_can_disable_lcpll(dev_priv);
1261df0566a6SJani Nikula
1262d6e53851SJani Nikula val = intel_de_read(dev_priv, LCPLL_CTL);
1263df0566a6SJani Nikula
1264df0566a6SJani Nikula if (switch_to_fclk) {
1265df0566a6SJani Nikula val |= LCPLL_CD_SOURCE_FCLK;
1266d6e53851SJani Nikula intel_de_write(dev_priv, LCPLL_CTL, val);
1267df0566a6SJani Nikula
1268d6e53851SJani Nikula if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
1269df0566a6SJani Nikula LCPLL_CD_SOURCE_FCLK_DONE, 1))
1270569caa65SWambui Karuga drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
1271df0566a6SJani Nikula
1272d6e53851SJani Nikula val = intel_de_read(dev_priv, LCPLL_CTL);
1273df0566a6SJani Nikula }
1274df0566a6SJani Nikula
1275df0566a6SJani Nikula val |= LCPLL_PLL_DISABLE;
1276d6e53851SJani Nikula intel_de_write(dev_priv, LCPLL_CTL, val);
1277d6e53851SJani Nikula intel_de_posting_read(dev_priv, LCPLL_CTL);
1278df0566a6SJani Nikula
12794cb3b44dSDaniele Ceraolo Spurio if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1280569caa65SWambui Karuga drm_err(&dev_priv->drm, "LCPLL still locked\n");
1281df0566a6SJani Nikula
1282df0566a6SJani Nikula val = hsw_read_dcomp(dev_priv);
1283df0566a6SJani Nikula val |= D_COMP_COMP_DISABLE;
1284df0566a6SJani Nikula hsw_write_dcomp(dev_priv, val);
1285df0566a6SJani Nikula ndelay(100);
1286df0566a6SJani Nikula
1287df0566a6SJani Nikula if (wait_for((hsw_read_dcomp(dev_priv) &
1288df0566a6SJani Nikula D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1289569caa65SWambui Karuga drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
1290df0566a6SJani Nikula
1291df0566a6SJani Nikula if (allow_power_down) {
1292992ed9d5SAndrzej Hajda intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
1293d6e53851SJani Nikula intel_de_posting_read(dev_priv, LCPLL_CTL);
1294df0566a6SJani Nikula }
1295df0566a6SJani Nikula }
1296df0566a6SJani Nikula
1297df0566a6SJani Nikula /*
1298df0566a6SJani Nikula * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1299df0566a6SJani Nikula * source.
1300df0566a6SJani Nikula */
hsw_restore_lcpll(struct drm_i915_private * dev_priv)1301df0566a6SJani Nikula static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
1302df0566a6SJani Nikula {
1303df0566a6SJani Nikula u32 val;
1304df0566a6SJani Nikula
1305d6e53851SJani Nikula val = intel_de_read(dev_priv, LCPLL_CTL);
1306df0566a6SJani Nikula
1307df0566a6SJani Nikula if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1308df0566a6SJani Nikula LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1309df0566a6SJani Nikula return;
1310df0566a6SJani Nikula
1311df0566a6SJani Nikula /*
1312df0566a6SJani Nikula * Make sure we're not on PC8 state before disabling PC8, otherwise
1313df0566a6SJani Nikula * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1314df0566a6SJani Nikula */
1315df0566a6SJani Nikula intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1316df0566a6SJani Nikula
1317df0566a6SJani Nikula if (val & LCPLL_POWER_DOWN_ALLOW) {
1318df0566a6SJani Nikula val &= ~LCPLL_POWER_DOWN_ALLOW;
1319d6e53851SJani Nikula intel_de_write(dev_priv, LCPLL_CTL, val);
1320d6e53851SJani Nikula intel_de_posting_read(dev_priv, LCPLL_CTL);
1321df0566a6SJani Nikula }
1322df0566a6SJani Nikula
1323df0566a6SJani Nikula val = hsw_read_dcomp(dev_priv);
1324df0566a6SJani Nikula val |= D_COMP_COMP_FORCE;
1325df0566a6SJani Nikula val &= ~D_COMP_COMP_DISABLE;
1326df0566a6SJani Nikula hsw_write_dcomp(dev_priv, val);
1327df0566a6SJani Nikula
1328d6e53851SJani Nikula val = intel_de_read(dev_priv, LCPLL_CTL);
1329df0566a6SJani Nikula val &= ~LCPLL_PLL_DISABLE;
1330d6e53851SJani Nikula intel_de_write(dev_priv, LCPLL_CTL, val);
1331df0566a6SJani Nikula
13324cb3b44dSDaniele Ceraolo Spurio if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1333569caa65SWambui Karuga drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
1334df0566a6SJani Nikula
1335df0566a6SJani Nikula if (val & LCPLL_CD_SOURCE_FCLK) {
1336992ed9d5SAndrzej Hajda intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
1337df0566a6SJani Nikula
1338d6e53851SJani Nikula if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
1339df0566a6SJani Nikula LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1340569caa65SWambui Karuga drm_err(&dev_priv->drm,
1341569caa65SWambui Karuga "Switching back to LCPLL failed\n");
1342df0566a6SJani Nikula }
1343df0566a6SJani Nikula
1344df0566a6SJani Nikula intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1345df0566a6SJani Nikula
1346df0566a6SJani Nikula intel_update_cdclk(dev_priv);
1347d51309b4SJani Nikula intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
1348df0566a6SJani Nikula }
1349df0566a6SJani Nikula
1350df0566a6SJani Nikula /*
1351df0566a6SJani Nikula * Package states C8 and deeper are really deep PC states that can only be
1352df0566a6SJani Nikula * reached when all the devices on the system allow it, so even if the graphics
1353df0566a6SJani Nikula * device allows PC8+, it doesn't mean the system will actually get to these
1354df0566a6SJani Nikula * states. Our driver only allows PC8+ when going into runtime PM.
1355df0566a6SJani Nikula *
1356df0566a6SJani Nikula * The requirements for PC8+ are that all the outputs are disabled, the power
1357df0566a6SJani Nikula * well is disabled and most interrupts are disabled, and these are also
1358df0566a6SJani Nikula * requirements for runtime PM. When these conditions are met, we manually do
1359df0566a6SJani Nikula * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1360df0566a6SJani Nikula * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1361df0566a6SJani Nikula * hang the machine.
1362df0566a6SJani Nikula *
1363df0566a6SJani Nikula * When we really reach PC8 or deeper states (not just when we allow it) we lose
1364df0566a6SJani Nikula * the state of some registers, so when we come back from PC8+ we need to
1365df0566a6SJani Nikula * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1366df0566a6SJani Nikula * need to take care of the registers kept by RC6. Notice that this happens even
1367df0566a6SJani Nikula * if we don't put the device in PCI D3 state (which is what currently happens
1368df0566a6SJani Nikula * because of the runtime PM support).
1369df0566a6SJani Nikula *
1370df0566a6SJani Nikula * For more, read "Display Sequences for Package C8" on the hardware
1371df0566a6SJani Nikula * documentation.
1372df0566a6SJani Nikula */
hsw_enable_pc8(struct drm_i915_private * dev_priv)1373071b68ccSRodrigo Vivi static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
1374df0566a6SJani Nikula {
1375569caa65SWambui Karuga drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
1376df0566a6SJani Nikula
1377992ed9d5SAndrzej Hajda if (HAS_PCH_LPT_LP(dev_priv))
1378992ed9d5SAndrzej Hajda intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
1379992ed9d5SAndrzej Hajda PCH_LP_PARTITION_LEVEL_DISABLE, 0);
1380df0566a6SJani Nikula
1381df0566a6SJani Nikula lpt_disable_clkout_dp(dev_priv);
1382df0566a6SJani Nikula hsw_disable_lcpll(dev_priv, true, true);
1383df0566a6SJani Nikula }
1384df0566a6SJani Nikula
hsw_disable_pc8(struct drm_i915_private * dev_priv)1385071b68ccSRodrigo Vivi static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
1386df0566a6SJani Nikula {
1387569caa65SWambui Karuga drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
1388df0566a6SJani Nikula
1389df0566a6SJani Nikula hsw_restore_lcpll(dev_priv);
1390df0566a6SJani Nikula intel_init_pch_refclk(dev_priv);
1391df0566a6SJani Nikula
13925197c49dSVille Syrjälä /* Many display registers don't survive PC8+ */
13935197c49dSVille Syrjälä intel_clock_gating_init(dev_priv);
1394df0566a6SJani Nikula }
1395df0566a6SJani Nikula
intel_pch_reset_handshake(struct drm_i915_private * dev_priv,bool enable)1396df0566a6SJani Nikula static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
1397df0566a6SJani Nikula bool enable)
1398df0566a6SJani Nikula {
1399df0566a6SJani Nikula i915_reg_t reg;
1400992ed9d5SAndrzej Hajda u32 reset_bits;
1401df0566a6SJani Nikula
1402df0566a6SJani Nikula if (IS_IVYBRIDGE(dev_priv)) {
1403df0566a6SJani Nikula reg = GEN7_MSG_CTL;
1404df0566a6SJani Nikula reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1405df0566a6SJani Nikula } else {
1406df0566a6SJani Nikula reg = HSW_NDE_RSTWRN_OPT;
1407df0566a6SJani Nikula reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1408df0566a6SJani Nikula }
1409df0566a6SJani Nikula
141061c86578SRadhakrishna Sripada if (DISPLAY_VER(dev_priv) >= 14)
141161c86578SRadhakrishna Sripada reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
141261c86578SRadhakrishna Sripada
1413992ed9d5SAndrzej Hajda intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
1414df0566a6SJani Nikula }
1415df0566a6SJani Nikula
skl_display_core_init(struct drm_i915_private * dev_priv,bool resume)1416df0566a6SJani Nikula static void skl_display_core_init(struct drm_i915_private *dev_priv,
1417df0566a6SJani Nikula bool resume)
1418df0566a6SJani Nikula {
1419e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1420df0566a6SJani Nikula struct i915_power_well *well;
1421df0566a6SJani Nikula
1422df0566a6SJani Nikula gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1423df0566a6SJani Nikula
1424df0566a6SJani Nikula /* enable PCH reset handshake */
1425df0566a6SJani Nikula intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1426df0566a6SJani Nikula
14275df7bd13SJosé Roberto de Souza if (!HAS_DISPLAY(dev_priv))
14285df7bd13SJosé Roberto de Souza return;
14295df7bd13SJosé Roberto de Souza
1430df0566a6SJani Nikula /* enable PG1 and Misc I/O */
1431df0566a6SJani Nikula mutex_lock(&power_domains->lock);
1432df0566a6SJani Nikula
1433df0566a6SJani Nikula well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1434df0566a6SJani Nikula intel_power_well_enable(dev_priv, well);
1435df0566a6SJani Nikula
1436df0566a6SJani Nikula well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1437df0566a6SJani Nikula intel_power_well_enable(dev_priv, well);
1438df0566a6SJani Nikula
1439df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1440df0566a6SJani Nikula
1441ed645eeeSVille Syrjälä intel_cdclk_init_hw(dev_priv);
1442df0566a6SJani Nikula
1443df0566a6SJani Nikula gen9_dbuf_enable(dev_priv);
1444df0566a6SJani Nikula
144578a574deSJani Nikula if (resume)
144674ff150dSAnusha Srivatsa intel_dmc_load_program(dev_priv);
1447df0566a6SJani Nikula }
1448df0566a6SJani Nikula
skl_display_core_uninit(struct drm_i915_private * dev_priv)1449df0566a6SJani Nikula static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1450df0566a6SJani Nikula {
1451e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1452df0566a6SJani Nikula struct i915_power_well *well;
1453df0566a6SJani Nikula
14545df7bd13SJosé Roberto de Souza if (!HAS_DISPLAY(dev_priv))
14555df7bd13SJosé Roberto de Souza return;
14565df7bd13SJosé Roberto de Souza
1457149d6debSImre Deak gen9_disable_dc_states(dev_priv);
1458fa6a4cdeSImre Deak /* TODO: disable DMC program */
1459df0566a6SJani Nikula
1460df0566a6SJani Nikula gen9_dbuf_disable(dev_priv);
1461df0566a6SJani Nikula
1462ed645eeeSVille Syrjälä intel_cdclk_uninit_hw(dev_priv);
1463df0566a6SJani Nikula
1464df0566a6SJani Nikula /* The spec doesn't call for removing the reset handshake flag */
1465df0566a6SJani Nikula /* disable PG1 and Misc I/O */
1466df0566a6SJani Nikula
1467df0566a6SJani Nikula mutex_lock(&power_domains->lock);
1468df0566a6SJani Nikula
1469df0566a6SJani Nikula /*
1470df0566a6SJani Nikula * BSpec says to keep the MISC IO power well enabled here, only
1471df0566a6SJani Nikula * remove our request for power well 1.
1472df0566a6SJani Nikula * Note that even though the driver's request is removed power well 1
1473df0566a6SJani Nikula * may stay enabled after this due to DMC's own request on it.
1474df0566a6SJani Nikula */
1475df0566a6SJani Nikula well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1476df0566a6SJani Nikula intel_power_well_disable(dev_priv, well);
1477df0566a6SJani Nikula
1478df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1479df0566a6SJani Nikula
1480df0566a6SJani Nikula usleep_range(10, 30); /* 10 us delay per Bspec */
1481df0566a6SJani Nikula }
1482df0566a6SJani Nikula
bxt_display_core_init(struct drm_i915_private * dev_priv,bool resume)1483071b68ccSRodrigo Vivi static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
1484df0566a6SJani Nikula {
1485e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1486df0566a6SJani Nikula struct i915_power_well *well;
1487df0566a6SJani Nikula
1488df0566a6SJani Nikula gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1489df0566a6SJani Nikula
1490df0566a6SJani Nikula /*
1491df0566a6SJani Nikula * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1492df0566a6SJani Nikula * or else the reset will hang because there is no PCH to respond.
1493df0566a6SJani Nikula * Move the handshake programming to initialization sequence.
1494df0566a6SJani Nikula * Previously was left up to BIOS.
1495df0566a6SJani Nikula */
1496df0566a6SJani Nikula intel_pch_reset_handshake(dev_priv, false);
1497df0566a6SJani Nikula
14985df7bd13SJosé Roberto de Souza if (!HAS_DISPLAY(dev_priv))
14995df7bd13SJosé Roberto de Souza return;
15005df7bd13SJosé Roberto de Souza
1501df0566a6SJani Nikula /* Enable PG1 */
1502df0566a6SJani Nikula mutex_lock(&power_domains->lock);
1503df0566a6SJani Nikula
1504df0566a6SJani Nikula well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1505df0566a6SJani Nikula intel_power_well_enable(dev_priv, well);
1506df0566a6SJani Nikula
1507df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1508df0566a6SJani Nikula
1509ed645eeeSVille Syrjälä intel_cdclk_init_hw(dev_priv);
1510df0566a6SJani Nikula
1511df0566a6SJani Nikula gen9_dbuf_enable(dev_priv);
1512df0566a6SJani Nikula
151378a574deSJani Nikula if (resume)
151474ff150dSAnusha Srivatsa intel_dmc_load_program(dev_priv);
1515df0566a6SJani Nikula }
1516df0566a6SJani Nikula
bxt_display_core_uninit(struct drm_i915_private * dev_priv)1517071b68ccSRodrigo Vivi static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
1518df0566a6SJani Nikula {
1519e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1520df0566a6SJani Nikula struct i915_power_well *well;
1521df0566a6SJani Nikula
15225df7bd13SJosé Roberto de Souza if (!HAS_DISPLAY(dev_priv))
15235df7bd13SJosé Roberto de Souza return;
15245df7bd13SJosé Roberto de Souza
1525149d6debSImre Deak gen9_disable_dc_states(dev_priv);
1526fa6a4cdeSImre Deak /* TODO: disable DMC program */
1527df0566a6SJani Nikula
1528df0566a6SJani Nikula gen9_dbuf_disable(dev_priv);
1529df0566a6SJani Nikula
1530ed645eeeSVille Syrjälä intel_cdclk_uninit_hw(dev_priv);
1531df0566a6SJani Nikula
1532df0566a6SJani Nikula /* The spec doesn't call for removing the reset handshake flag */
1533df0566a6SJani Nikula
1534df0566a6SJani Nikula /*
1535df0566a6SJani Nikula * Disable PW1 (PG1).
1536df0566a6SJani Nikula * Note that even though the driver's request is removed power well 1
1537df0566a6SJani Nikula * may stay enabled after this due to DMC's own request on it.
1538df0566a6SJani Nikula */
1539df0566a6SJani Nikula mutex_lock(&power_domains->lock);
1540df0566a6SJani Nikula
1541df0566a6SJani Nikula well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1542df0566a6SJani Nikula intel_power_well_disable(dev_priv, well);
1543df0566a6SJani Nikula
1544df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1545df0566a6SJani Nikula
1546df0566a6SJani Nikula usleep_range(10, 30); /* 10 us delay per Bspec */
1547df0566a6SJani Nikula }
1548df0566a6SJani Nikula
15493fa01d64SMatt Roper struct buddy_page_mask {
15503fa01d64SMatt Roper u32 page_mask;
15513fa01d64SMatt Roper u8 type;
15523fa01d64SMatt Roper u8 num_channels;
15533fa01d64SMatt Roper };
15543fa01d64SMatt Roper
15553fa01d64SMatt Roper static const struct buddy_page_mask tgl_buddy_page_masks[] = {
15563fa01d64SMatt Roper { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
1557b305fc74SJosé Roberto de Souza { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
15583fa01d64SMatt Roper { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1559b305fc74SJosé Roberto de Souza { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
15603fa01d64SMatt Roper { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
1561b305fc74SJosé Roberto de Souza { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
1562ecb40d08SMatt Roper { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1563b305fc74SJosé Roberto de Souza { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
15643fa01d64SMatt Roper {}
15653fa01d64SMatt Roper };
15663fa01d64SMatt Roper
15673fa01d64SMatt Roper static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
15683fa01d64SMatt Roper { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
15693fa01d64SMatt Roper { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
1570b305fc74SJosé Roberto de Souza { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
1571b305fc74SJosé Roberto de Souza { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
15723fa01d64SMatt Roper { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
15733fa01d64SMatt Roper { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
1574b305fc74SJosé Roberto de Souza { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
1575b305fc74SJosé Roberto de Souza { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
15763fa01d64SMatt Roper {}
15773fa01d64SMatt Roper };
15783fa01d64SMatt Roper
tgl_bw_buddy_init(struct drm_i915_private * dev_priv)15793fa01d64SMatt Roper static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
15803fa01d64SMatt Roper {
15813fa01d64SMatt Roper enum intel_dram_type type = dev_priv->dram_info.type;
15823fa01d64SMatt Roper u8 num_channels = dev_priv->dram_info.num_channels;
15833fa01d64SMatt Roper const struct buddy_page_mask *table;
15845af5169dSMatt Roper unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask;
158562afef28SMatt Roper int config, i;
15863fa01d64SMatt Roper
158747753748SMatt Roper /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
158847753748SMatt Roper if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
158947753748SMatt Roper return;
159047753748SMatt Roper
1591ea27113eSAditya Swarup if (IS_ALDERLAKE_S(dev_priv) ||
1592e5490979SDnyaneshwar Bhadane (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
1593d1702963SMatt Roper /* Wa_1409767108 */
15943fa01d64SMatt Roper table = wa_1409767108_buddy_page_masks;
15953fa01d64SMatt Roper else
15963fa01d64SMatt Roper table = tgl_buddy_page_masks;
15973fa01d64SMatt Roper
159862afef28SMatt Roper for (config = 0; table[config].page_mask != 0; config++)
159962afef28SMatt Roper if (table[config].num_channels == num_channels &&
160062afef28SMatt Roper table[config].type == type)
16013fa01d64SMatt Roper break;
16023fa01d64SMatt Roper
160362afef28SMatt Roper if (table[config].page_mask == 0) {
1604569caa65SWambui Karuga drm_dbg(&dev_priv->drm,
1605569caa65SWambui Karuga "Unknown memory configuration; disabling address buddy logic.\n");
160662afef28SMatt Roper for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
160762afef28SMatt Roper intel_de_write(dev_priv, BW_BUDDY_CTL(i),
160862afef28SMatt Roper BW_BUDDY_DISABLE);
16093fa01d64SMatt Roper } else {
161062afef28SMatt Roper for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
161162afef28SMatt Roper intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
161262afef28SMatt Roper table[config].page_mask);
161387e04f75SMatt Roper
1614c86ef50fSJosé Roberto de Souza /* Wa_22010178259:tgl,dg1,rkl,adl-s */
1615c86ef50fSJosé Roberto de Souza if (DISPLAY_VER(dev_priv) == 12)
161662afef28SMatt Roper intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
161787e04f75SMatt Roper BW_BUDDY_TLB_REQ_TIMER_MASK,
161862afef28SMatt Roper BW_BUDDY_TLB_REQ_TIMER(0x8));
161962afef28SMatt Roper }
16203fa01d64SMatt Roper }
16213fa01d64SMatt Roper }
16223fa01d64SMatt Roper
icl_display_core_init(struct drm_i915_private * dev_priv,bool resume)1623071b68ccSRodrigo Vivi static void icl_display_core_init(struct drm_i915_private *dev_priv,
1624df0566a6SJani Nikula bool resume)
1625df0566a6SJani Nikula {
1626e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1627df0566a6SJani Nikula struct i915_power_well *well;
1628df0566a6SJani Nikula
1629df0566a6SJani Nikula gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1630df0566a6SJani Nikula
1631ea27113eSAditya Swarup /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
16323d9c653cSVille Syrjälä if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1633c746063aSJosé Roberto de Souza INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1634c746063aSJosé Roberto de Souza intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
1635c746063aSJosé Roberto de Souza PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1636c746063aSJosé Roberto de Souza
1637df0566a6SJani Nikula /* 1. Enable PCH reset handshake. */
1638df0566a6SJani Nikula intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1639df0566a6SJani Nikula
16405df7bd13SJosé Roberto de Souza if (!HAS_DISPLAY(dev_priv))
16415df7bd13SJosé Roberto de Souza return;
16425df7bd13SJosé Roberto de Souza
1643df0566a6SJani Nikula /* 2. Initialize all combo phys */
1644df0566a6SJani Nikula intel_combo_phy_init(dev_priv);
1645df0566a6SJani Nikula
1646df0566a6SJani Nikula /*
1647df0566a6SJani Nikula * 3. Enable Power Well 1 (PG1).
1648df0566a6SJani Nikula * The AUX IO power wells will be enabled on demand.
1649df0566a6SJani Nikula */
1650df0566a6SJani Nikula mutex_lock(&power_domains->lock);
1651df0566a6SJani Nikula well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1652df0566a6SJani Nikula intel_power_well_enable(dev_priv, well);
1653df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1654df0566a6SJani Nikula
1655c4298d15SJosé Roberto de Souza if (DISPLAY_VER(dev_priv) == 14)
1656c4298d15SJosé Roberto de Souza intel_de_rmw(dev_priv, DC_STATE_EN,
1657c4298d15SJosé Roberto de Souza HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
1658c4298d15SJosé Roberto de Souza
1659df0566a6SJani Nikula /* 4. Enable CDCLK. */
1660ed645eeeSVille Syrjälä intel_cdclk_init_hw(dev_priv);
1661df0566a6SJani Nikula
1662005e9537SMatt Roper if (DISPLAY_VER(dev_priv) >= 12)
1663359d0effSJosé Roberto de Souza gen12_dbuf_slices_config(dev_priv);
1664359d0effSJosé Roberto de Souza
1665df0566a6SJani Nikula /* 5. Enable DBUF. */
166656f48c1dSVille Syrjälä gen9_dbuf_enable(dev_priv);
1667df0566a6SJani Nikula
1668df0566a6SJani Nikula /* 6. Setup MBUS. */
1669df0566a6SJani Nikula icl_mbus_init(dev_priv);
1670df0566a6SJani Nikula
16713fa01d64SMatt Roper /* 7. Program arbiter BW_BUDDY registers */
1672005e9537SMatt Roper if (DISPLAY_VER(dev_priv) >= 12)
16733fa01d64SMatt Roper tgl_bw_buddy_init(dev_priv);
16743fa01d64SMatt Roper
1675a6a12811SMatt Roper /* 8. Ensure PHYs have completed calibration and adaptation */
1676a6a12811SMatt Roper if (IS_DG2(dev_priv))
1677a6a12811SMatt Roper intel_snps_phy_wait_for_calibration(dev_priv);
1678a6a12811SMatt Roper
16790dffea1eSAnusha Srivatsa /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
16800dffea1eSAnusha Srivatsa if (DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 1))
16810dffea1eSAnusha Srivatsa intel_de_rmw(dev_priv, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
16820dffea1eSAnusha Srivatsa
168378a574deSJani Nikula if (resume)
168474ff150dSAnusha Srivatsa intel_dmc_load_program(dev_priv);
1685af9e1032SMatt Atwood
16860f82a1b9SBalasubramani Vivekanandan /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
1687*c8fc8346SJani Nikula if (IS_DISPLAY_VER_FULL(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
1688992ed9d5SAndrzej Hajda intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
1689992ed9d5SAndrzej Hajda DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1690992ed9d5SAndrzej Hajda DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
16910e53fb84SMatt Roper
16920e53fb84SMatt Roper /* Wa_14011503030:xelpd */
16930f82a1b9SBalasubramani Vivekanandan if (DISPLAY_VER(dev_priv) == 13)
16940e53fb84SMatt Roper intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
16957fbad577SMitul Golani
16967fbad577SMitul Golani /* Wa_15013987218 */
16977fbad577SMitul Golani if (DISPLAY_VER(dev_priv) == 20) {
16987fbad577SMitul Golani intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
16997fbad577SMitul Golani 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
17007fbad577SMitul Golani intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
17017fbad577SMitul Golani PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
17027fbad577SMitul Golani }
1703df0566a6SJani Nikula }
1704df0566a6SJani Nikula
icl_display_core_uninit(struct drm_i915_private * dev_priv)1705071b68ccSRodrigo Vivi static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
1706df0566a6SJani Nikula {
1707e3e8148fSJani Nikula struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1708df0566a6SJani Nikula struct i915_power_well *well;
1709df0566a6SJani Nikula
17105df7bd13SJosé Roberto de Souza if (!HAS_DISPLAY(dev_priv))
17115df7bd13SJosé Roberto de Souza return;
17125df7bd13SJosé Roberto de Souza
1713149d6debSImre Deak gen9_disable_dc_states(dev_priv);
1714fa6a4cdeSImre Deak intel_dmc_disable_program(dev_priv);
1715df0566a6SJani Nikula
1716df0566a6SJani Nikula /* 1. Disable all display engine functions -> aready done */
1717df0566a6SJani Nikula
1718df0566a6SJani Nikula /* 2. Disable DBUF */
171956f48c1dSVille Syrjälä gen9_dbuf_disable(dev_priv);
1720df0566a6SJani Nikula
1721df0566a6SJani Nikula /* 3. Disable CD clock */
1722ed645eeeSVille Syrjälä intel_cdclk_uninit_hw(dev_priv);
1723df0566a6SJani Nikula
1724c4298d15SJosé Roberto de Souza if (DISPLAY_VER(dev_priv) == 14)
1725c4298d15SJosé Roberto de Souza intel_de_rmw(dev_priv, DC_STATE_EN, 0,
1726c4298d15SJosé Roberto de Souza HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
1727c4298d15SJosé Roberto de Souza
1728df0566a6SJani Nikula /*
1729df0566a6SJani Nikula * 4. Disable Power Well 1 (PG1).
1730df0566a6SJani Nikula * The AUX IO power wells are toggled on demand, so they are already
1731df0566a6SJani Nikula * disabled at this point.
1732df0566a6SJani Nikula */
1733df0566a6SJani Nikula mutex_lock(&power_domains->lock);
1734df0566a6SJani Nikula well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1735df0566a6SJani Nikula intel_power_well_disable(dev_priv, well);
1736df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1737df0566a6SJani Nikula
1738df0566a6SJani Nikula /* 5. */
1739df0566a6SJani Nikula intel_combo_phy_uninit(dev_priv);
1740df0566a6SJani Nikula }
1741df0566a6SJani Nikula
chv_phy_control_init(struct drm_i915_private * dev_priv)1742df0566a6SJani Nikula static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1743df0566a6SJani Nikula {
1744df0566a6SJani Nikula struct i915_power_well *cmn_bc =
1745df0566a6SJani Nikula lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1746df0566a6SJani Nikula struct i915_power_well *cmn_d =
1747df0566a6SJani Nikula lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1748df0566a6SJani Nikula
1749df0566a6SJani Nikula /*
1750df0566a6SJani Nikula * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1751df0566a6SJani Nikula * workaround never ever read DISPLAY_PHY_CONTROL, and
1752df0566a6SJani Nikula * instead maintain a shadow copy ourselves. Use the actual
1753df0566a6SJani Nikula * power well state and lane status to reconstruct the
1754df0566a6SJani Nikula * expected initial value.
1755df0566a6SJani Nikula */
1756e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control =
1757df0566a6SJani Nikula PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1758df0566a6SJani Nikula PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1759df0566a6SJani Nikula PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1760df0566a6SJani Nikula PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1761df0566a6SJani Nikula PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1762df0566a6SJani Nikula
1763df0566a6SJani Nikula /*
1764df0566a6SJani Nikula * If all lanes are disabled we leave the override disabled
1765df0566a6SJani Nikula * with all power down bits cleared to match the state we
1766df0566a6SJani Nikula * would use after disabling the port. Otherwise enable the
1767df0566a6SJani Nikula * override and set the lane powerdown bits accding to the
1768df0566a6SJani Nikula * current lane status.
1769df0566a6SJani Nikula */
17703ab5e051SImre Deak if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
17719a875f95SJani Nikula u32 status = intel_de_read(dev_priv, DPLL(dev_priv, PIPE_A));
1772df0566a6SJani Nikula unsigned int mask;
1773df0566a6SJani Nikula
1774df0566a6SJani Nikula mask = status & DPLL_PORTB_READY_MASK;
1775df0566a6SJani Nikula if (mask == 0xf)
1776df0566a6SJani Nikula mask = 0x0;
1777df0566a6SJani Nikula else
1778e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control |=
1779df0566a6SJani Nikula PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1780df0566a6SJani Nikula
1781e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control |=
1782df0566a6SJani Nikula PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1783df0566a6SJani Nikula
1784df0566a6SJani Nikula mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1785df0566a6SJani Nikula if (mask == 0xf)
1786df0566a6SJani Nikula mask = 0x0;
1787df0566a6SJani Nikula else
1788e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control |=
1789df0566a6SJani Nikula PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1790df0566a6SJani Nikula
1791e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control |=
1792df0566a6SJani Nikula PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1793df0566a6SJani Nikula
1794e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1795df0566a6SJani Nikula
1796e3e8148fSJani Nikula dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
1797df0566a6SJani Nikula } else {
1798e3e8148fSJani Nikula dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
1799df0566a6SJani Nikula }
1800df0566a6SJani Nikula
18013ab5e051SImre Deak if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
1802d6e53851SJani Nikula u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
1803df0566a6SJani Nikula unsigned int mask;
1804df0566a6SJani Nikula
1805df0566a6SJani Nikula mask = status & DPLL_PORTD_READY_MASK;
1806df0566a6SJani Nikula
1807df0566a6SJani Nikula if (mask == 0xf)
1808df0566a6SJani Nikula mask = 0x0;
1809df0566a6SJani Nikula else
1810e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control |=
1811df0566a6SJani Nikula PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1812df0566a6SJani Nikula
1813e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control |=
1814df0566a6SJani Nikula PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1815df0566a6SJani Nikula
1816e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1817df0566a6SJani Nikula
1818e3e8148fSJani Nikula dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
1819df0566a6SJani Nikula } else {
1820e3e8148fSJani Nikula dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
1821df0566a6SJani Nikula }
1822df0566a6SJani Nikula
1823569caa65SWambui Karuga drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
1824e3e8148fSJani Nikula dev_priv->display.power.chv_phy_control);
18251a2466feSChris Wilson
18261a2466feSChris Wilson /* Defer application of initial phy_control to enabling the powerwell */
1827df0566a6SJani Nikula }
1828df0566a6SJani Nikula
vlv_cmnlane_wa(struct drm_i915_private * dev_priv)1829df0566a6SJani Nikula static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1830df0566a6SJani Nikula {
1831df0566a6SJani Nikula struct i915_power_well *cmn =
1832df0566a6SJani Nikula lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1833df0566a6SJani Nikula struct i915_power_well *disp2d =
1834df0566a6SJani Nikula lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
1835df0566a6SJani Nikula
1836df0566a6SJani Nikula /* If the display might be already active skip this */
18373ab5e051SImre Deak if (intel_power_well_is_enabled(dev_priv, cmn) &&
18383ab5e051SImre Deak intel_power_well_is_enabled(dev_priv, disp2d) &&
1839d6e53851SJani Nikula intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
1840df0566a6SJani Nikula return;
1841df0566a6SJani Nikula
1842569caa65SWambui Karuga drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
1843df0566a6SJani Nikula
1844df0566a6SJani Nikula /* cmnlane needs DPLL registers */
1845359441cdSImre Deak intel_power_well_enable(dev_priv, disp2d);
1846df0566a6SJani Nikula
1847df0566a6SJani Nikula /*
1848df0566a6SJani Nikula * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1849df0566a6SJani Nikula * Need to assert and de-assert PHY SB reset by gating the
1850df0566a6SJani Nikula * common lane power, then un-gating it.
1851df0566a6SJani Nikula * Simply ungating isn't enough to reset the PHY enough to get
1852df0566a6SJani Nikula * ports and lanes running.
1853df0566a6SJani Nikula */
1854359441cdSImre Deak intel_power_well_disable(dev_priv, cmn);
1855df0566a6SJani Nikula }
1856df0566a6SJani Nikula
vlv_punit_is_power_gated(struct drm_i915_private * dev_priv,u32 reg0)1857df0566a6SJani Nikula static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
1858df0566a6SJani Nikula {
1859df0566a6SJani Nikula bool ret;
1860df0566a6SJani Nikula
1861df0566a6SJani Nikula vlv_punit_get(dev_priv);
1862df0566a6SJani Nikula ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1863df0566a6SJani Nikula vlv_punit_put(dev_priv);
1864df0566a6SJani Nikula
1865df0566a6SJani Nikula return ret;
1866df0566a6SJani Nikula }
1867df0566a6SJani Nikula
assert_ved_power_gated(struct drm_i915_private * dev_priv)1868df0566a6SJani Nikula static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
1869df0566a6SJani Nikula {
1870a66d7c1eSPankaj Bharadiya drm_WARN(&dev_priv->drm,
1871a66d7c1eSPankaj Bharadiya !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
1872df0566a6SJani Nikula "VED not power gated\n");
1873df0566a6SJani Nikula }
1874df0566a6SJani Nikula
assert_isp_power_gated(struct drm_i915_private * dev_priv)1875df0566a6SJani Nikula static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
1876df0566a6SJani Nikula {
1877df0566a6SJani Nikula static const struct pci_device_id isp_ids[] = {
1878df0566a6SJani Nikula {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1879df0566a6SJani Nikula {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1880df0566a6SJani Nikula {}
1881df0566a6SJani Nikula };
1882df0566a6SJani Nikula
1883a66d7c1eSPankaj Bharadiya drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
1884df0566a6SJani Nikula !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
1885df0566a6SJani Nikula "ISP not power gated\n");
1886df0566a6SJani Nikula }
1887df0566a6SJani Nikula
1888df0566a6SJani Nikula static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
1889df0566a6SJani Nikula
1890df0566a6SJani Nikula /**
1891df0566a6SJani Nikula * intel_power_domains_init_hw - initialize hardware power domain state
1892df0566a6SJani Nikula * @i915: i915 device instance
1893df0566a6SJani Nikula * @resume: Called from resume code paths or not
1894df0566a6SJani Nikula *
1895df0566a6SJani Nikula * This function initializes the hardware power domain state and enables all
1896df0566a6SJani Nikula * power wells belonging to the INIT power domain. Power wells in other
1897df0566a6SJani Nikula * domains (and not in the INIT domain) are referenced or disabled by
1898df0566a6SJani Nikula * intel_modeset_readout_hw_state(). After that the reference count of each
1899df0566a6SJani Nikula * power well must match its HW enabled state, see
1900df0566a6SJani Nikula * intel_power_domains_verify_state().
1901df0566a6SJani Nikula *
1902df0566a6SJani Nikula * It will return with power domains disabled (to be enabled later by
1903df0566a6SJani Nikula * intel_power_domains_enable()) and must be paired with
190478dae1acSJanusz Krzysztofik * intel_power_domains_driver_remove().
1905df0566a6SJani Nikula */
intel_power_domains_init_hw(struct drm_i915_private * i915,bool resume)1906df0566a6SJani Nikula void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
1907df0566a6SJani Nikula {
1908e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
1909df0566a6SJani Nikula
1910df0566a6SJani Nikula power_domains->initializing = true;
1911df0566a6SJani Nikula
1912005e9537SMatt Roper if (DISPLAY_VER(i915) >= 11) {
1913df0566a6SJani Nikula icl_display_core_init(i915, resume);
191470bfb307SMatt Roper } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
1915df0566a6SJani Nikula bxt_display_core_init(i915, resume);
191693e7e61eSLucas De Marchi } else if (DISPLAY_VER(i915) == 9) {
191770bfb307SMatt Roper skl_display_core_init(i915, resume);
1918df0566a6SJani Nikula } else if (IS_CHERRYVIEW(i915)) {
1919df0566a6SJani Nikula mutex_lock(&power_domains->lock);
1920df0566a6SJani Nikula chv_phy_control_init(i915);
1921df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1922df0566a6SJani Nikula assert_isp_power_gated(i915);
1923df0566a6SJani Nikula } else if (IS_VALLEYVIEW(i915)) {
1924df0566a6SJani Nikula mutex_lock(&power_domains->lock);
1925df0566a6SJani Nikula vlv_cmnlane_wa(i915);
1926df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
1927df0566a6SJani Nikula assert_ved_power_gated(i915);
1928df0566a6SJani Nikula assert_isp_power_gated(i915);
1929df0566a6SJani Nikula } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
1930df0566a6SJani Nikula hsw_assert_cdclk(i915);
1931df0566a6SJani Nikula intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1932df0566a6SJani Nikula } else if (IS_IVYBRIDGE(i915)) {
1933df0566a6SJani Nikula intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1934df0566a6SJani Nikula }
1935df0566a6SJani Nikula
1936df0566a6SJani Nikula /*
1937df0566a6SJani Nikula * Keep all power wells enabled for any dependent HW access during
1938df0566a6SJani Nikula * initialization and to make sure we keep BIOS enabled display HW
1939df0566a6SJani Nikula * resources powered until display HW readout is complete. We drop
1940df0566a6SJani Nikula * this reference in intel_power_domains_enable().
1941df0566a6SJani Nikula */
1942a0b024edSImre Deak drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
1943a0b024edSImre Deak power_domains->init_wakeref =
1944df0566a6SJani Nikula intel_display_power_get(i915, POWER_DOMAIN_INIT);
1945df0566a6SJani Nikula
1946df0566a6SJani Nikula /* Disable power support if the user asked so. */
1947bfcda58bSJouni Högander if (!i915->display.params.disable_power_well) {
194893b916fdSImre Deak drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
1949e3e8148fSJani Nikula i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
195093b916fdSImre Deak POWER_DOMAIN_INIT);
195193b916fdSImre Deak }
1952df0566a6SJani Nikula intel_power_domains_sync_hw(i915);
1953df0566a6SJani Nikula
1954df0566a6SJani Nikula power_domains->initializing = false;
1955df0566a6SJani Nikula }
1956df0566a6SJani Nikula
1957df0566a6SJani Nikula /**
195878dae1acSJanusz Krzysztofik * intel_power_domains_driver_remove - deinitialize hw power domain state
1959df0566a6SJani Nikula * @i915: i915 device instance
1960df0566a6SJani Nikula *
1961df0566a6SJani Nikula * De-initializes the display power domain HW state. It also ensures that the
1962df0566a6SJani Nikula * device stays powered up so that the driver can be reloaded.
1963df0566a6SJani Nikula *
1964df0566a6SJani Nikula * It must be called with power domains already disabled (after a call to
1965df0566a6SJani Nikula * intel_power_domains_disable()) and must be paired with
1966df0566a6SJani Nikula * intel_power_domains_init_hw().
1967df0566a6SJani Nikula */
intel_power_domains_driver_remove(struct drm_i915_private * i915)196878dae1acSJanusz Krzysztofik void intel_power_domains_driver_remove(struct drm_i915_private *i915)
1969df0566a6SJani Nikula {
1970df0566a6SJani Nikula intel_wakeref_t wakeref __maybe_unused =
1971e3e8148fSJani Nikula fetch_and_zero(&i915->display.power.domains.init_wakeref);
1972df0566a6SJani Nikula
1973df0566a6SJani Nikula /* Remove the refcount we took to keep power well support disabled. */
1974bfcda58bSJouni Högander if (!i915->display.params.disable_power_well)
197593b916fdSImre Deak intel_display_power_put(i915, POWER_DOMAIN_INIT,
1976e3e8148fSJani Nikula fetch_and_zero(&i915->display.power.domains.disable_wakeref));
1977df0566a6SJani Nikula
1978df0566a6SJani Nikula intel_display_power_flush_work_sync(i915);
1979df0566a6SJani Nikula
1980df0566a6SJani Nikula intel_power_domains_verify_state(i915);
1981df0566a6SJani Nikula
1982df0566a6SJani Nikula /* Keep the power well enabled, but cancel its rpm wakeref. */
1983df0566a6SJani Nikula intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1984df0566a6SJani Nikula }
1985df0566a6SJani Nikula
1986df0566a6SJani Nikula /**
1987d946bc44SImre Deak * intel_power_domains_sanitize_state - sanitize power domains state
1988d946bc44SImre Deak * @i915: i915 device instance
1989d946bc44SImre Deak *
1990d946bc44SImre Deak * Sanitize the power domains state during driver loading and system resume.
1991d946bc44SImre Deak * The function will disable all display power wells that BIOS has enabled
1992d946bc44SImre Deak * without a user for it (any user for a power well has taken a reference
1993d946bc44SImre Deak * on it by the time this function is called, after the state of all the
1994d946bc44SImre Deak * pipe, encoder, etc. HW resources have been sanitized).
1995d946bc44SImre Deak */
intel_power_domains_sanitize_state(struct drm_i915_private * i915)1996d946bc44SImre Deak void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
1997d946bc44SImre Deak {
1998e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
1999d946bc44SImre Deak struct i915_power_well *power_well;
2000d946bc44SImre Deak
2001d946bc44SImre Deak mutex_lock(&power_domains->lock);
2002d946bc44SImre Deak
2003d946bc44SImre Deak for_each_power_well_reverse(i915, power_well) {
2004d946bc44SImre Deak if (power_well->desc->always_on || power_well->count ||
20053ab5e051SImre Deak !intel_power_well_is_enabled(i915, power_well))
2006d946bc44SImre Deak continue;
2007d946bc44SImre Deak
2008d946bc44SImre Deak drm_dbg_kms(&i915->drm,
2009d946bc44SImre Deak "BIOS left unused %s power well enabled, disabling it\n",
20103ab5e051SImre Deak intel_power_well_name(power_well));
2011d946bc44SImre Deak intel_power_well_disable(i915, power_well);
2012d946bc44SImre Deak }
2013d946bc44SImre Deak
2014d946bc44SImre Deak mutex_unlock(&power_domains->lock);
2015d946bc44SImre Deak }
2016d946bc44SImre Deak
2017d946bc44SImre Deak /**
2018df0566a6SJani Nikula * intel_power_domains_enable - enable toggling of display power wells
2019df0566a6SJani Nikula * @i915: i915 device instance
2020df0566a6SJani Nikula *
2021df0566a6SJani Nikula * Enable the ondemand enabling/disabling of the display power wells. Note that
2022df0566a6SJani Nikula * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2023df0566a6SJani Nikula * only at specific points of the display modeset sequence, thus they are not
2024df0566a6SJani Nikula * affected by the intel_power_domains_enable()/disable() calls. The purpose
2025df0566a6SJani Nikula * of these function is to keep the rest of power wells enabled until the end
2026df0566a6SJani Nikula * of display HW readout (which will acquire the power references reflecting
2027df0566a6SJani Nikula * the current HW state).
2028df0566a6SJani Nikula */
intel_power_domains_enable(struct drm_i915_private * i915)2029df0566a6SJani Nikula void intel_power_domains_enable(struct drm_i915_private *i915)
2030df0566a6SJani Nikula {
2031df0566a6SJani Nikula intel_wakeref_t wakeref __maybe_unused =
2032e3e8148fSJani Nikula fetch_and_zero(&i915->display.power.domains.init_wakeref);
2033df0566a6SJani Nikula
2034df0566a6SJani Nikula intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2035df0566a6SJani Nikula intel_power_domains_verify_state(i915);
2036df0566a6SJani Nikula }
2037df0566a6SJani Nikula
2038df0566a6SJani Nikula /**
2039df0566a6SJani Nikula * intel_power_domains_disable - disable toggling of display power wells
2040df0566a6SJani Nikula * @i915: i915 device instance
2041df0566a6SJani Nikula *
2042df0566a6SJani Nikula * Disable the ondemand enabling/disabling of the display power wells. See
2043df0566a6SJani Nikula * intel_power_domains_enable() for which power wells this call controls.
2044df0566a6SJani Nikula */
intel_power_domains_disable(struct drm_i915_private * i915)2045df0566a6SJani Nikula void intel_power_domains_disable(struct drm_i915_private *i915)
2046df0566a6SJani Nikula {
2047e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
2048df0566a6SJani Nikula
2049a0b024edSImre Deak drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2050a0b024edSImre Deak power_domains->init_wakeref =
2051df0566a6SJani Nikula intel_display_power_get(i915, POWER_DOMAIN_INIT);
2052df0566a6SJani Nikula
2053df0566a6SJani Nikula intel_power_domains_verify_state(i915);
2054df0566a6SJani Nikula }
2055df0566a6SJani Nikula
2056df0566a6SJani Nikula /**
2057df0566a6SJani Nikula * intel_power_domains_suspend - suspend power domain state
2058df0566a6SJani Nikula * @i915: i915 device instance
2059c7b5abd3SMaarten Lankhorst * @s2idle: specifies whether we go to idle, or deeper sleep
2060df0566a6SJani Nikula *
2061df0566a6SJani Nikula * This function prepares the hardware power domain state before entering
2062df0566a6SJani Nikula * system suspend.
2063df0566a6SJani Nikula *
2064df0566a6SJani Nikula * It must be called with power domains already disabled (after a call to
2065df0566a6SJani Nikula * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2066df0566a6SJani Nikula */
intel_power_domains_suspend(struct drm_i915_private * i915,bool s2idle)2067c7b5abd3SMaarten Lankhorst void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
2068df0566a6SJani Nikula {
2069e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
2070df0566a6SJani Nikula intel_wakeref_t wakeref __maybe_unused =
2071a0b024edSImre Deak fetch_and_zero(&power_domains->init_wakeref);
2072df0566a6SJani Nikula
2073df0566a6SJani Nikula intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2074df0566a6SJani Nikula
2075df0566a6SJani Nikula /*
2076df0566a6SJani Nikula * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2077df0566a6SJani Nikula * support don't manually deinit the power domains. This also means the
20780633cdcbSAnusha Srivatsa * DMC firmware will stay active, it will power down any HW
2079df0566a6SJani Nikula * resources as required and also enable deeper system power states
2080df0566a6SJani Nikula * that would be blocked if the firmware was inactive.
2081df0566a6SJani Nikula */
2082c7b5abd3SMaarten Lankhorst if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
208303256487SAnusha Srivatsa intel_dmc_has_payload(i915)) {
2084df0566a6SJani Nikula intel_display_power_flush_work(i915);
2085df0566a6SJani Nikula intel_power_domains_verify_state(i915);
2086df0566a6SJani Nikula return;
2087df0566a6SJani Nikula }
2088df0566a6SJani Nikula
2089df0566a6SJani Nikula /*
2090df0566a6SJani Nikula * Even if power well support was disabled we still want to disable
2091df0566a6SJani Nikula * power wells if power domains must be deinitialized for suspend.
2092df0566a6SJani Nikula */
2093bfcda58bSJouni Högander if (!i915->display.params.disable_power_well)
209493b916fdSImre Deak intel_display_power_put(i915, POWER_DOMAIN_INIT,
2095e3e8148fSJani Nikula fetch_and_zero(&i915->display.power.domains.disable_wakeref));
2096df0566a6SJani Nikula
2097df0566a6SJani Nikula intel_display_power_flush_work(i915);
2098df0566a6SJani Nikula intel_power_domains_verify_state(i915);
2099df0566a6SJani Nikula
2100005e9537SMatt Roper if (DISPLAY_VER(i915) >= 11)
2101df0566a6SJani Nikula icl_display_core_uninit(i915);
210270bfb307SMatt Roper else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
2103df0566a6SJani Nikula bxt_display_core_uninit(i915);
210493e7e61eSLucas De Marchi else if (DISPLAY_VER(i915) == 9)
210570bfb307SMatt Roper skl_display_core_uninit(i915);
2106df0566a6SJani Nikula
2107df0566a6SJani Nikula power_domains->display_core_suspended = true;
2108df0566a6SJani Nikula }
2109df0566a6SJani Nikula
2110df0566a6SJani Nikula /**
2111df0566a6SJani Nikula * intel_power_domains_resume - resume power domain state
2112df0566a6SJani Nikula * @i915: i915 device instance
2113df0566a6SJani Nikula *
2114df0566a6SJani Nikula * This function resume the hardware power domain state during system resume.
2115df0566a6SJani Nikula *
2116df0566a6SJani Nikula * It will return with power domain support disabled (to be enabled later by
2117df0566a6SJani Nikula * intel_power_domains_enable()) and must be paired with
2118df0566a6SJani Nikula * intel_power_domains_suspend().
2119df0566a6SJani Nikula */
intel_power_domains_resume(struct drm_i915_private * i915)2120df0566a6SJani Nikula void intel_power_domains_resume(struct drm_i915_private *i915)
2121df0566a6SJani Nikula {
2122e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
2123df0566a6SJani Nikula
2124df0566a6SJani Nikula if (power_domains->display_core_suspended) {
2125df0566a6SJani Nikula intel_power_domains_init_hw(i915, true);
2126df0566a6SJani Nikula power_domains->display_core_suspended = false;
2127df0566a6SJani Nikula } else {
2128a0b024edSImre Deak drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2129a0b024edSImre Deak power_domains->init_wakeref =
2130df0566a6SJani Nikula intel_display_power_get(i915, POWER_DOMAIN_INIT);
2131df0566a6SJani Nikula }
2132df0566a6SJani Nikula
2133df0566a6SJani Nikula intel_power_domains_verify_state(i915);
2134df0566a6SJani Nikula }
2135df0566a6SJani Nikula
2136df0566a6SJani Nikula #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2137df0566a6SJani Nikula
intel_power_domains_dump_info(struct drm_i915_private * i915)2138df0566a6SJani Nikula static void intel_power_domains_dump_info(struct drm_i915_private *i915)
2139df0566a6SJani Nikula {
2140e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
2141df0566a6SJani Nikula struct i915_power_well *power_well;
2142df0566a6SJani Nikula
2143df0566a6SJani Nikula for_each_power_well(i915, power_well) {
2144df0566a6SJani Nikula enum intel_display_power_domain domain;
2145df0566a6SJani Nikula
2146569caa65SWambui Karuga drm_dbg(&i915->drm, "%-25s %d\n",
21473ab5e051SImre Deak intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2148df0566a6SJani Nikula
21493ab5e051SImre Deak for_each_power_domain(domain, intel_power_well_domains(power_well))
2150569caa65SWambui Karuga drm_dbg(&i915->drm, " %-23s %d\n",
21518a84bacbSImre Deak intel_display_power_domain_str(domain),
2152df0566a6SJani Nikula power_domains->domain_use_count[domain]);
2153df0566a6SJani Nikula }
2154df0566a6SJani Nikula }
2155df0566a6SJani Nikula
2156df0566a6SJani Nikula /**
2157df0566a6SJani Nikula * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2158df0566a6SJani Nikula * @i915: i915 device instance
2159df0566a6SJani Nikula *
2160df0566a6SJani Nikula * Verify if the reference count of each power well matches its HW enabled
2161df0566a6SJani Nikula * state and the total refcount of the domains it belongs to. This must be
2162df0566a6SJani Nikula * called after modeset HW state sanitization, which is responsible for
2163df0566a6SJani Nikula * acquiring reference counts for any power wells in use and disabling the
2164df0566a6SJani Nikula * ones left on by BIOS but not required by any active output.
2165df0566a6SJani Nikula */
intel_power_domains_verify_state(struct drm_i915_private * i915)2166df0566a6SJani Nikula static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2167df0566a6SJani Nikula {
2168e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
2169df0566a6SJani Nikula struct i915_power_well *power_well;
2170df0566a6SJani Nikula bool dump_domain_info;
2171df0566a6SJani Nikula
2172df0566a6SJani Nikula mutex_lock(&power_domains->lock);
2173df0566a6SJani Nikula
2174df0566a6SJani Nikula verify_async_put_domains_state(power_domains);
2175df0566a6SJani Nikula
2176df0566a6SJani Nikula dump_domain_info = false;
2177df0566a6SJani Nikula for_each_power_well(i915, power_well) {
2178df0566a6SJani Nikula enum intel_display_power_domain domain;
2179df0566a6SJani Nikula int domains_count;
2180df0566a6SJani Nikula bool enabled;
2181df0566a6SJani Nikula
21823ab5e051SImre Deak enabled = intel_power_well_is_enabled(i915, power_well);
21833ab5e051SImre Deak if ((intel_power_well_refcount(power_well) ||
21843ab5e051SImre Deak intel_power_well_is_always_on(power_well)) !=
2185df0566a6SJani Nikula enabled)
2186569caa65SWambui Karuga drm_err(&i915->drm,
2187569caa65SWambui Karuga "power well %s state mismatch (refcount %d/enabled %d)",
21883ab5e051SImre Deak intel_power_well_name(power_well),
21893ab5e051SImre Deak intel_power_well_refcount(power_well), enabled);
2190df0566a6SJani Nikula
2191df0566a6SJani Nikula domains_count = 0;
21923ab5e051SImre Deak for_each_power_domain(domain, intel_power_well_domains(power_well))
2193df0566a6SJani Nikula domains_count += power_domains->domain_use_count[domain];
2194df0566a6SJani Nikula
21953ab5e051SImre Deak if (intel_power_well_refcount(power_well) != domains_count) {
2196569caa65SWambui Karuga drm_err(&i915->drm,
2197569caa65SWambui Karuga "power well %s refcount/domain refcount mismatch "
2198df0566a6SJani Nikula "(refcount %d/domains refcount %d)\n",
21993ab5e051SImre Deak intel_power_well_name(power_well),
22003ab5e051SImre Deak intel_power_well_refcount(power_well),
2201df0566a6SJani Nikula domains_count);
2202df0566a6SJani Nikula dump_domain_info = true;
2203df0566a6SJani Nikula }
2204df0566a6SJani Nikula }
2205df0566a6SJani Nikula
2206df0566a6SJani Nikula if (dump_domain_info) {
2207df0566a6SJani Nikula static bool dumped;
2208df0566a6SJani Nikula
2209df0566a6SJani Nikula if (!dumped) {
2210df0566a6SJani Nikula intel_power_domains_dump_info(i915);
2211df0566a6SJani Nikula dumped = true;
2212df0566a6SJani Nikula }
2213df0566a6SJani Nikula }
2214df0566a6SJani Nikula
2215df0566a6SJani Nikula mutex_unlock(&power_domains->lock);
2216df0566a6SJani Nikula }
2217df0566a6SJani Nikula
2218df0566a6SJani Nikula #else
2219df0566a6SJani Nikula
intel_power_domains_verify_state(struct drm_i915_private * i915)2220df0566a6SJani Nikula static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2221df0566a6SJani Nikula {
2222df0566a6SJani Nikula }
2223df0566a6SJani Nikula
2224df0566a6SJani Nikula #endif
2225071b68ccSRodrigo Vivi
intel_display_power_suspend_late(struct drm_i915_private * i915)2226071b68ccSRodrigo Vivi void intel_display_power_suspend_late(struct drm_i915_private *i915)
2227071b68ccSRodrigo Vivi {
222870bfb307SMatt Roper if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
222970bfb307SMatt Roper IS_BROXTON(i915)) {
2230071b68ccSRodrigo Vivi bxt_enable_dc9(i915);
2231b896898cSBob Paauwe } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2232071b68ccSRodrigo Vivi hsw_enable_pc8(i915);
2233071b68ccSRodrigo Vivi }
22348b46cc65SAnshuman Gupta
22358b46cc65SAnshuman Gupta /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
22368b46cc65SAnshuman Gupta if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
22378b46cc65SAnshuman Gupta intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2238b896898cSBob Paauwe }
2239071b68ccSRodrigo Vivi
intel_display_power_resume_early(struct drm_i915_private * i915)2240071b68ccSRodrigo Vivi void intel_display_power_resume_early(struct drm_i915_private *i915)
2241071b68ccSRodrigo Vivi {
224270bfb307SMatt Roper if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
224370bfb307SMatt Roper IS_BROXTON(i915)) {
2244071b68ccSRodrigo Vivi gen9_sanitize_dc_state(i915);
2245071b68ccSRodrigo Vivi bxt_disable_dc9(i915);
2246071b68ccSRodrigo Vivi } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2247071b68ccSRodrigo Vivi hsw_disable_pc8(i915);
2248071b68ccSRodrigo Vivi }
22498b46cc65SAnshuman Gupta
22508b46cc65SAnshuman Gupta /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
22518b46cc65SAnshuman Gupta if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
22528b46cc65SAnshuman Gupta intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2253071b68ccSRodrigo Vivi }
2254071b68ccSRodrigo Vivi
intel_display_power_suspend(struct drm_i915_private * i915)2255071b68ccSRodrigo Vivi void intel_display_power_suspend(struct drm_i915_private *i915)
2256071b68ccSRodrigo Vivi {
2257005e9537SMatt Roper if (DISPLAY_VER(i915) >= 11) {
2258071b68ccSRodrigo Vivi icl_display_core_uninit(i915);
2259071b68ccSRodrigo Vivi bxt_enable_dc9(i915);
226070bfb307SMatt Roper } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2261071b68ccSRodrigo Vivi bxt_display_core_uninit(i915);
2262071b68ccSRodrigo Vivi bxt_enable_dc9(i915);
2263071b68ccSRodrigo Vivi } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2264071b68ccSRodrigo Vivi hsw_enable_pc8(i915);
2265071b68ccSRodrigo Vivi }
2266071b68ccSRodrigo Vivi }
2267071b68ccSRodrigo Vivi
intel_display_power_resume(struct drm_i915_private * i915)2268071b68ccSRodrigo Vivi void intel_display_power_resume(struct drm_i915_private *i915)
2269071b68ccSRodrigo Vivi {
2270825f0de2SJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
2271825f0de2SJani Nikula
2272005e9537SMatt Roper if (DISPLAY_VER(i915) >= 11) {
2273071b68ccSRodrigo Vivi bxt_disable_dc9(i915);
2274071b68ccSRodrigo Vivi icl_display_core_init(i915, true);
227503256487SAnusha Srivatsa if (intel_dmc_has_payload(i915)) {
2276825f0de2SJani Nikula if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
2277071b68ccSRodrigo Vivi skl_enable_dc6(i915);
2278825f0de2SJani Nikula else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
2279071b68ccSRodrigo Vivi gen9_enable_dc5(i915);
2280071b68ccSRodrigo Vivi }
228170bfb307SMatt Roper } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2282071b68ccSRodrigo Vivi bxt_disable_dc9(i915);
2283071b68ccSRodrigo Vivi bxt_display_core_init(i915, true);
228403256487SAnusha Srivatsa if (intel_dmc_has_payload(i915) &&
2285825f0de2SJani Nikula (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2286071b68ccSRodrigo Vivi gen9_enable_dc5(i915);
2287071b68ccSRodrigo Vivi } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2288071b68ccSRodrigo Vivi hsw_disable_pc8(i915);
2289071b68ccSRodrigo Vivi }
2290071b68ccSRodrigo Vivi }
22916abf2fc0SJani Nikula
intel_display_power_debug(struct drm_i915_private * i915,struct seq_file * m)22926abf2fc0SJani Nikula void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
22936abf2fc0SJani Nikula {
2294e3e8148fSJani Nikula struct i915_power_domains *power_domains = &i915->display.power.domains;
22956abf2fc0SJani Nikula int i;
22966abf2fc0SJani Nikula
22976abf2fc0SJani Nikula mutex_lock(&power_domains->lock);
22986abf2fc0SJani Nikula
22996abf2fc0SJani Nikula seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
23006abf2fc0SJani Nikula for (i = 0; i < power_domains->power_well_count; i++) {
23016abf2fc0SJani Nikula struct i915_power_well *power_well;
23026abf2fc0SJani Nikula enum intel_display_power_domain power_domain;
23036abf2fc0SJani Nikula
23046abf2fc0SJani Nikula power_well = &power_domains->power_wells[i];
23053ab5e051SImre Deak seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
23063ab5e051SImre Deak intel_power_well_refcount(power_well));
23076abf2fc0SJani Nikula
23083ab5e051SImre Deak for_each_power_domain(power_domain, intel_power_well_domains(power_well))
23096abf2fc0SJani Nikula seq_printf(m, " %-23s %d\n",
23106abf2fc0SJani Nikula intel_display_power_domain_str(power_domain),
23116abf2fc0SJani Nikula power_domains->domain_use_count[power_domain]);
23126abf2fc0SJani Nikula }
23136abf2fc0SJani Nikula
23146abf2fc0SJani Nikula mutex_unlock(&power_domains->lock);
23156abf2fc0SJani Nikula }
2316979e1b32SImre Deak
2317979e1b32SImre Deak struct intel_ddi_port_domains {
2318979e1b32SImre Deak enum port port_start;
2319979e1b32SImre Deak enum port port_end;
2320979e1b32SImre Deak enum aux_ch aux_ch_start;
2321979e1b32SImre Deak enum aux_ch aux_ch_end;
2322979e1b32SImre Deak
2323979e1b32SImre Deak enum intel_display_power_domain ddi_lanes;
2324979e1b32SImre Deak enum intel_display_power_domain ddi_io;
2325f645cbdaSImre Deak enum intel_display_power_domain aux_io;
2326979e1b32SImre Deak enum intel_display_power_domain aux_legacy_usbc;
2327979e1b32SImre Deak enum intel_display_power_domain aux_tbt;
2328979e1b32SImre Deak };
2329979e1b32SImre Deak
2330979e1b32SImre Deak static const struct intel_ddi_port_domains
2331979e1b32SImre Deak i9xx_port_domains[] = {
2332979e1b32SImre Deak {
2333979e1b32SImre Deak .port_start = PORT_A,
2334979e1b32SImre Deak .port_end = PORT_F,
2335979e1b32SImre Deak .aux_ch_start = AUX_CH_A,
2336979e1b32SImre Deak .aux_ch_end = AUX_CH_F,
2337979e1b32SImre Deak
2338979e1b32SImre Deak .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2339979e1b32SImre Deak .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2340f645cbdaSImre Deak .aux_io = POWER_DOMAIN_AUX_IO_A,
2341979e1b32SImre Deak .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2342979e1b32SImre Deak .aux_tbt = POWER_DOMAIN_INVALID,
2343979e1b32SImre Deak },
2344979e1b32SImre Deak };
2345979e1b32SImre Deak
2346979e1b32SImre Deak static const struct intel_ddi_port_domains
2347979e1b32SImre Deak d11_port_domains[] = {
2348979e1b32SImre Deak {
2349979e1b32SImre Deak .port_start = PORT_A,
2350979e1b32SImre Deak .port_end = PORT_B,
2351979e1b32SImre Deak .aux_ch_start = AUX_CH_A,
2352979e1b32SImre Deak .aux_ch_end = AUX_CH_B,
2353979e1b32SImre Deak
2354979e1b32SImre Deak .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2355979e1b32SImre Deak .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2356f645cbdaSImre Deak .aux_io = POWER_DOMAIN_AUX_IO_A,
2357979e1b32SImre Deak .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2358979e1b32SImre Deak .aux_tbt = POWER_DOMAIN_INVALID,
2359979e1b32SImre Deak }, {
2360979e1b32SImre Deak .port_start = PORT_C,
2361979e1b32SImre Deak .port_end = PORT_F,
2362979e1b32SImre Deak .aux_ch_start = AUX_CH_C,
2363979e1b32SImre Deak .aux_ch_end = AUX_CH_F,
2364979e1b32SImre Deak
2365979e1b32SImre Deak .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2366979e1b32SImre Deak .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2367f645cbdaSImre Deak .aux_io = POWER_DOMAIN_AUX_IO_C,
2368979e1b32SImre Deak .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2369e20b77c1SImre Deak .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2370979e1b32SImre Deak },
2371979e1b32SImre Deak };
2372979e1b32SImre Deak
2373979e1b32SImre Deak static const struct intel_ddi_port_domains
2374979e1b32SImre Deak d12_port_domains[] = {
2375979e1b32SImre Deak {
2376979e1b32SImre Deak .port_start = PORT_A,
2377979e1b32SImre Deak .port_end = PORT_C,
2378979e1b32SImre Deak .aux_ch_start = AUX_CH_A,
2379979e1b32SImre Deak .aux_ch_end = AUX_CH_C,
2380979e1b32SImre Deak
2381979e1b32SImre Deak .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2382979e1b32SImre Deak .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2383f645cbdaSImre Deak .aux_io = POWER_DOMAIN_AUX_IO_A,
2384979e1b32SImre Deak .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2385979e1b32SImre Deak .aux_tbt = POWER_DOMAIN_INVALID,
2386979e1b32SImre Deak }, {
2387979e1b32SImre Deak .port_start = PORT_TC1,
2388979e1b32SImre Deak .port_end = PORT_TC6,
2389979e1b32SImre Deak .aux_ch_start = AUX_CH_USBC1,
2390979e1b32SImre Deak .aux_ch_end = AUX_CH_USBC6,
2391979e1b32SImre Deak
2392979e1b32SImre Deak .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2393979e1b32SImre Deak .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2394f645cbdaSImre Deak .aux_io = POWER_DOMAIN_INVALID,
2395979e1b32SImre Deak .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2396979e1b32SImre Deak .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2397979e1b32SImre Deak },
2398979e1b32SImre Deak };
2399979e1b32SImre Deak
2400979e1b32SImre Deak static const struct intel_ddi_port_domains
2401979e1b32SImre Deak d13_port_domains[] = {
2402979e1b32SImre Deak {
2403979e1b32SImre Deak .port_start = PORT_A,
2404979e1b32SImre Deak .port_end = PORT_C,
2405979e1b32SImre Deak .aux_ch_start = AUX_CH_A,
2406979e1b32SImre Deak .aux_ch_end = AUX_CH_C,
2407979e1b32SImre Deak
2408979e1b32SImre Deak .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2409979e1b32SImre Deak .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2410f645cbdaSImre Deak .aux_io = POWER_DOMAIN_AUX_IO_A,
2411979e1b32SImre Deak .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2412979e1b32SImre Deak .aux_tbt = POWER_DOMAIN_INVALID,
2413979e1b32SImre Deak }, {
2414979e1b32SImre Deak .port_start = PORT_TC1,
2415979e1b32SImre Deak .port_end = PORT_TC4,
2416979e1b32SImre Deak .aux_ch_start = AUX_CH_USBC1,
2417979e1b32SImre Deak .aux_ch_end = AUX_CH_USBC4,
2418979e1b32SImre Deak
2419979e1b32SImre Deak .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2420979e1b32SImre Deak .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2421f645cbdaSImre Deak .aux_io = POWER_DOMAIN_INVALID,
2422979e1b32SImre Deak .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2423979e1b32SImre Deak .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2424979e1b32SImre Deak }, {
2425979e1b32SImre Deak .port_start = PORT_D_XELPD,
2426979e1b32SImre Deak .port_end = PORT_E_XELPD,
2427979e1b32SImre Deak .aux_ch_start = AUX_CH_D_XELPD,
2428979e1b32SImre Deak .aux_ch_end = AUX_CH_E_XELPD,
2429979e1b32SImre Deak
2430799da983SImre Deak .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2431799da983SImre Deak .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2432f645cbdaSImre Deak .aux_io = POWER_DOMAIN_AUX_IO_D,
2433799da983SImre Deak .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2434979e1b32SImre Deak .aux_tbt = POWER_DOMAIN_INVALID,
2435979e1b32SImre Deak },
2436979e1b32SImre Deak };
2437979e1b32SImre Deak
2438979e1b32SImre Deak static void
intel_port_domains_for_platform(struct drm_i915_private * i915,const struct intel_ddi_port_domains ** domains,int * domains_size)2439979e1b32SImre Deak intel_port_domains_for_platform(struct drm_i915_private *i915,
2440979e1b32SImre Deak const struct intel_ddi_port_domains **domains,
2441979e1b32SImre Deak int *domains_size)
2442979e1b32SImre Deak {
2443979e1b32SImre Deak if (DISPLAY_VER(i915) >= 13) {
2444979e1b32SImre Deak *domains = d13_port_domains;
2445979e1b32SImre Deak *domains_size = ARRAY_SIZE(d13_port_domains);
2446979e1b32SImre Deak } else if (DISPLAY_VER(i915) >= 12) {
2447979e1b32SImre Deak *domains = d12_port_domains;
2448979e1b32SImre Deak *domains_size = ARRAY_SIZE(d12_port_domains);
2449979e1b32SImre Deak } else if (DISPLAY_VER(i915) >= 11) {
2450979e1b32SImre Deak *domains = d11_port_domains;
2451979e1b32SImre Deak *domains_size = ARRAY_SIZE(d11_port_domains);
2452979e1b32SImre Deak } else {
2453979e1b32SImre Deak *domains = i9xx_port_domains;
2454979e1b32SImre Deak *domains_size = ARRAY_SIZE(i9xx_port_domains);
2455979e1b32SImre Deak }
2456979e1b32SImre Deak }
2457979e1b32SImre Deak
2458979e1b32SImre Deak static const struct intel_ddi_port_domains *
intel_port_domains_for_port(struct drm_i915_private * i915,enum port port)2459979e1b32SImre Deak intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
2460979e1b32SImre Deak {
2461979e1b32SImre Deak const struct intel_ddi_port_domains *domains;
2462979e1b32SImre Deak int domains_size;
2463979e1b32SImre Deak int i;
2464979e1b32SImre Deak
2465979e1b32SImre Deak intel_port_domains_for_platform(i915, &domains, &domains_size);
2466979e1b32SImre Deak for (i = 0; i < domains_size; i++)
2467979e1b32SImre Deak if (port >= domains[i].port_start && port <= domains[i].port_end)
2468979e1b32SImre Deak return &domains[i];
2469979e1b32SImre Deak
2470979e1b32SImre Deak return NULL;
2471979e1b32SImre Deak }
2472979e1b32SImre Deak
2473979e1b32SImre Deak enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct drm_i915_private * i915,enum port port)2474979e1b32SImre Deak intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
2475979e1b32SImre Deak {
2476979e1b32SImre Deak const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2477979e1b32SImre Deak
247810b85f0eSImre Deak if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
2479979e1b32SImre Deak return POWER_DOMAIN_PORT_DDI_IO_A;
2480979e1b32SImre Deak
24817ecc3cc8SImre Deak return domains->ddi_io + (int)(port - domains->port_start);
2482979e1b32SImre Deak }
2483979e1b32SImre Deak
2484979e1b32SImre Deak enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct drm_i915_private * i915,enum port port)2485979e1b32SImre Deak intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
2486979e1b32SImre Deak {
2487979e1b32SImre Deak const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2488979e1b32SImre Deak
248910b85f0eSImre Deak if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
2490979e1b32SImre Deak return POWER_DOMAIN_PORT_DDI_LANES_A;
2491979e1b32SImre Deak
24927ecc3cc8SImre Deak return domains->ddi_lanes + (int)(port - domains->port_start);
2493979e1b32SImre Deak }
2494979e1b32SImre Deak
2495979e1b32SImre Deak static const struct intel_ddi_port_domains *
intel_port_domains_for_aux_ch(struct drm_i915_private * i915,enum aux_ch aux_ch)2496979e1b32SImre Deak intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
2497979e1b32SImre Deak {
2498979e1b32SImre Deak const struct intel_ddi_port_domains *domains;
2499979e1b32SImre Deak int domains_size;
2500979e1b32SImre Deak int i;
2501979e1b32SImre Deak
2502979e1b32SImre Deak intel_port_domains_for_platform(i915, &domains, &domains_size);
2503979e1b32SImre Deak for (i = 0; i < domains_size; i++)
2504979e1b32SImre Deak if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2505979e1b32SImre Deak return &domains[i];
2506979e1b32SImre Deak
2507979e1b32SImre Deak return NULL;
2508979e1b32SImre Deak }
2509979e1b32SImre Deak
2510979e1b32SImre Deak enum intel_display_power_domain
intel_display_power_aux_io_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)2511f645cbdaSImre Deak intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2512f645cbdaSImre Deak {
2513f645cbdaSImre Deak const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2514f645cbdaSImre Deak
2515f645cbdaSImre Deak if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2516f645cbdaSImre Deak return POWER_DOMAIN_AUX_IO_A;
2517f645cbdaSImre Deak
2518f645cbdaSImre Deak return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2519f645cbdaSImre Deak }
2520f645cbdaSImre Deak
2521f645cbdaSImre Deak enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)2522979e1b32SImre Deak intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2523979e1b32SImre Deak {
2524979e1b32SImre Deak const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2525979e1b32SImre Deak
252610b85f0eSImre Deak if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
2527979e1b32SImre Deak return POWER_DOMAIN_AUX_A;
2528979e1b32SImre Deak
25297ecc3cc8SImre Deak return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2530979e1b32SImre Deak }
2531979e1b32SImre Deak
2532979e1b32SImre Deak enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)2533979e1b32SImre Deak intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2534979e1b32SImre Deak {
2535979e1b32SImre Deak const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2536979e1b32SImre Deak
253710b85f0eSImre Deak if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
2538979e1b32SImre Deak return POWER_DOMAIN_AUX_TBT1;
2539979e1b32SImre Deak
25407ecc3cc8SImre Deak return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2541979e1b32SImre Deak }
2542