xref: /linux/drivers/gpu/drm/i915/display/intel_display_power.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
8 
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
18 #include "intel_pm.h"
19 #include "intel_sideband.h"
20 #include "intel_tc.h"
21 #include "intel_vga.h"
22 
23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
24 					 enum i915_power_well_id power_well_id);
25 
26 const char *
27 intel_display_power_domain_str(enum intel_display_power_domain domain)
28 {
29 	switch (domain) {
30 	case POWER_DOMAIN_DISPLAY_CORE:
31 		return "DISPLAY_CORE";
32 	case POWER_DOMAIN_PIPE_A:
33 		return "PIPE_A";
34 	case POWER_DOMAIN_PIPE_B:
35 		return "PIPE_B";
36 	case POWER_DOMAIN_PIPE_C:
37 		return "PIPE_C";
38 	case POWER_DOMAIN_PIPE_D:
39 		return "PIPE_D";
40 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
41 		return "PIPE_A_PANEL_FITTER";
42 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
43 		return "PIPE_B_PANEL_FITTER";
44 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
45 		return "PIPE_C_PANEL_FITTER";
46 	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
47 		return "PIPE_D_PANEL_FITTER";
48 	case POWER_DOMAIN_TRANSCODER_A:
49 		return "TRANSCODER_A";
50 	case POWER_DOMAIN_TRANSCODER_B:
51 		return "TRANSCODER_B";
52 	case POWER_DOMAIN_TRANSCODER_C:
53 		return "TRANSCODER_C";
54 	case POWER_DOMAIN_TRANSCODER_D:
55 		return "TRANSCODER_D";
56 	case POWER_DOMAIN_TRANSCODER_EDP:
57 		return "TRANSCODER_EDP";
58 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
59 		return "TRANSCODER_VDSC_PW2";
60 	case POWER_DOMAIN_TRANSCODER_DSI_A:
61 		return "TRANSCODER_DSI_A";
62 	case POWER_DOMAIN_TRANSCODER_DSI_C:
63 		return "TRANSCODER_DSI_C";
64 	case POWER_DOMAIN_PORT_DDI_A_LANES:
65 		return "PORT_DDI_A_LANES";
66 	case POWER_DOMAIN_PORT_DDI_B_LANES:
67 		return "PORT_DDI_B_LANES";
68 	case POWER_DOMAIN_PORT_DDI_C_LANES:
69 		return "PORT_DDI_C_LANES";
70 	case POWER_DOMAIN_PORT_DDI_D_LANES:
71 		return "PORT_DDI_D_LANES";
72 	case POWER_DOMAIN_PORT_DDI_E_LANES:
73 		return "PORT_DDI_E_LANES";
74 	case POWER_DOMAIN_PORT_DDI_F_LANES:
75 		return "PORT_DDI_F_LANES";
76 	case POWER_DOMAIN_PORT_DDI_G_LANES:
77 		return "PORT_DDI_G_LANES";
78 	case POWER_DOMAIN_PORT_DDI_H_LANES:
79 		return "PORT_DDI_H_LANES";
80 	case POWER_DOMAIN_PORT_DDI_I_LANES:
81 		return "PORT_DDI_I_LANES";
82 	case POWER_DOMAIN_PORT_DDI_A_IO:
83 		return "PORT_DDI_A_IO";
84 	case POWER_DOMAIN_PORT_DDI_B_IO:
85 		return "PORT_DDI_B_IO";
86 	case POWER_DOMAIN_PORT_DDI_C_IO:
87 		return "PORT_DDI_C_IO";
88 	case POWER_DOMAIN_PORT_DDI_D_IO:
89 		return "PORT_DDI_D_IO";
90 	case POWER_DOMAIN_PORT_DDI_E_IO:
91 		return "PORT_DDI_E_IO";
92 	case POWER_DOMAIN_PORT_DDI_F_IO:
93 		return "PORT_DDI_F_IO";
94 	case POWER_DOMAIN_PORT_DDI_G_IO:
95 		return "PORT_DDI_G_IO";
96 	case POWER_DOMAIN_PORT_DDI_H_IO:
97 		return "PORT_DDI_H_IO";
98 	case POWER_DOMAIN_PORT_DDI_I_IO:
99 		return "PORT_DDI_I_IO";
100 	case POWER_DOMAIN_PORT_DSI:
101 		return "PORT_DSI";
102 	case POWER_DOMAIN_PORT_CRT:
103 		return "PORT_CRT";
104 	case POWER_DOMAIN_PORT_OTHER:
105 		return "PORT_OTHER";
106 	case POWER_DOMAIN_VGA:
107 		return "VGA";
108 	case POWER_DOMAIN_AUDIO:
109 		return "AUDIO";
110 	case POWER_DOMAIN_AUX_A:
111 		return "AUX_A";
112 	case POWER_DOMAIN_AUX_B:
113 		return "AUX_B";
114 	case POWER_DOMAIN_AUX_C:
115 		return "AUX_C";
116 	case POWER_DOMAIN_AUX_D:
117 		return "AUX_D";
118 	case POWER_DOMAIN_AUX_E:
119 		return "AUX_E";
120 	case POWER_DOMAIN_AUX_F:
121 		return "AUX_F";
122 	case POWER_DOMAIN_AUX_G:
123 		return "AUX_G";
124 	case POWER_DOMAIN_AUX_H:
125 		return "AUX_H";
126 	case POWER_DOMAIN_AUX_I:
127 		return "AUX_I";
128 	case POWER_DOMAIN_AUX_IO_A:
129 		return "AUX_IO_A";
130 	case POWER_DOMAIN_AUX_C_TBT:
131 		return "AUX_C_TBT";
132 	case POWER_DOMAIN_AUX_D_TBT:
133 		return "AUX_D_TBT";
134 	case POWER_DOMAIN_AUX_E_TBT:
135 		return "AUX_E_TBT";
136 	case POWER_DOMAIN_AUX_F_TBT:
137 		return "AUX_F_TBT";
138 	case POWER_DOMAIN_AUX_G_TBT:
139 		return "AUX_G_TBT";
140 	case POWER_DOMAIN_AUX_H_TBT:
141 		return "AUX_H_TBT";
142 	case POWER_DOMAIN_AUX_I_TBT:
143 		return "AUX_I_TBT";
144 	case POWER_DOMAIN_GMBUS:
145 		return "GMBUS";
146 	case POWER_DOMAIN_INIT:
147 		return "INIT";
148 	case POWER_DOMAIN_MODESET:
149 		return "MODESET";
150 	case POWER_DOMAIN_GT_IRQ:
151 		return "GT_IRQ";
152 	case POWER_DOMAIN_DPLL_DC_OFF:
153 		return "DPLL_DC_OFF";
154 	default:
155 		MISSING_CASE(domain);
156 		return "?";
157 	}
158 }
159 
160 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
161 				    struct i915_power_well *power_well)
162 {
163 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
164 	power_well->desc->ops->enable(dev_priv, power_well);
165 	power_well->hw_enabled = true;
166 }
167 
168 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
169 				     struct i915_power_well *power_well)
170 {
171 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
172 	power_well->hw_enabled = false;
173 	power_well->desc->ops->disable(dev_priv, power_well);
174 }
175 
176 static void intel_power_well_get(struct drm_i915_private *dev_priv,
177 				 struct i915_power_well *power_well)
178 {
179 	if (!power_well->count++)
180 		intel_power_well_enable(dev_priv, power_well);
181 }
182 
183 static void intel_power_well_put(struct drm_i915_private *dev_priv,
184 				 struct i915_power_well *power_well)
185 {
186 	drm_WARN(&dev_priv->drm, !power_well->count,
187 		 "Use count on power well %s is already zero",
188 		 power_well->desc->name);
189 
190 	if (!--power_well->count)
191 		intel_power_well_disable(dev_priv, power_well);
192 }
193 
194 /**
195  * __intel_display_power_is_enabled - unlocked check for a power domain
196  * @dev_priv: i915 device instance
197  * @domain: power domain to check
198  *
199  * This is the unlocked version of intel_display_power_is_enabled() and should
200  * only be used from error capture and recovery code where deadlocks are
201  * possible.
202  *
203  * Returns:
204  * True when the power domain is enabled, false otherwise.
205  */
206 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
207 				      enum intel_display_power_domain domain)
208 {
209 	struct i915_power_well *power_well;
210 	bool is_enabled;
211 
212 	if (dev_priv->runtime_pm.suspended)
213 		return false;
214 
215 	is_enabled = true;
216 
217 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
218 		if (power_well->desc->always_on)
219 			continue;
220 
221 		if (!power_well->hw_enabled) {
222 			is_enabled = false;
223 			break;
224 		}
225 	}
226 
227 	return is_enabled;
228 }
229 
230 /**
231  * intel_display_power_is_enabled - check for a power domain
232  * @dev_priv: i915 device instance
233  * @domain: power domain to check
234  *
235  * This function can be used to check the hw power domain state. It is mostly
236  * used in hardware state readout functions. Everywhere else code should rely
237  * upon explicit power domain reference counting to ensure that the hardware
238  * block is powered up before accessing it.
239  *
240  * Callers must hold the relevant modesetting locks to ensure that concurrent
241  * threads can't disable the power well while the caller tries to read a few
242  * registers.
243  *
244  * Returns:
245  * True when the power domain is enabled, false otherwise.
246  */
247 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
248 				    enum intel_display_power_domain domain)
249 {
250 	struct i915_power_domains *power_domains;
251 	bool ret;
252 
253 	power_domains = &dev_priv->power_domains;
254 
255 	mutex_lock(&power_domains->lock);
256 	ret = __intel_display_power_is_enabled(dev_priv, domain);
257 	mutex_unlock(&power_domains->lock);
258 
259 	return ret;
260 }
261 
262 /*
263  * Starting with Haswell, we have a "Power Down Well" that can be turned off
264  * when not needed anymore. We have 4 registers that can request the power well
265  * to be enabled, and it will only be disabled if none of the registers is
266  * requesting it to be enabled.
267  */
268 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
269 				       u8 irq_pipe_mask, bool has_vga)
270 {
271 	if (has_vga)
272 		intel_vga_reset_io_mem(dev_priv);
273 
274 	if (irq_pipe_mask)
275 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
276 }
277 
278 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
279 				       u8 irq_pipe_mask)
280 {
281 	if (irq_pipe_mask)
282 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
283 }
284 
285 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
286 					   struct i915_power_well *power_well)
287 {
288 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
289 	int pw_idx = power_well->desc->hsw.idx;
290 
291 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
292 	if (intel_de_wait_for_set(dev_priv, regs->driver,
293 				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
294 		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
295 			    power_well->desc->name);
296 
297 		/* An AUX timeout is expected if the TBT DP tunnel is down. */
298 		drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
299 	}
300 }
301 
302 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
303 				     const struct i915_power_well_regs *regs,
304 				     int pw_idx)
305 {
306 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
307 	u32 ret;
308 
309 	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
310 	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
311 	if (regs->kvmr.reg)
312 		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
313 	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
314 
315 	return ret;
316 }
317 
318 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
319 					    struct i915_power_well *power_well)
320 {
321 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
322 	int pw_idx = power_well->desc->hsw.idx;
323 	bool disabled;
324 	u32 reqs;
325 
326 	/*
327 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
328 	 * this for paranoia. The known cases where a PW will be forced on:
329 	 * - a KVMR request on any power well via the KVMR request register
330 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
331 	 *   DEBUG request registers
332 	 * Skip the wait in case any of the request bits are set and print a
333 	 * diagnostic message.
334 	 */
335 	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
336 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
337 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
338 	if (disabled)
339 		return;
340 
341 	drm_dbg_kms(&dev_priv->drm,
342 		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
343 		    power_well->desc->name,
344 		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
345 }
346 
347 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
348 					   enum skl_power_gate pg)
349 {
350 	/* Timeout 5us for PG#0, for other PGs 1us */
351 	drm_WARN_ON(&dev_priv->drm,
352 		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
353 					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
354 }
355 
356 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
357 				  struct i915_power_well *power_well)
358 {
359 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
360 	int pw_idx = power_well->desc->hsw.idx;
361 	bool wait_fuses = power_well->desc->hsw.has_fuses;
362 	enum skl_power_gate uninitialized_var(pg);
363 	u32 val;
364 
365 	if (wait_fuses) {
366 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
367 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
368 		/*
369 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
370 		 * before enabling the power well and PW1/PG1's own fuse
371 		 * state after the enabling. For all other power wells with
372 		 * fuses we only have to wait for that PW/PG's fuse state
373 		 * after the enabling.
374 		 */
375 		if (pg == SKL_PG1)
376 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
377 	}
378 
379 	val = intel_de_read(dev_priv, regs->driver);
380 	intel_de_write(dev_priv, regs->driver,
381 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
382 	hsw_wait_for_power_well_enable(dev_priv, power_well);
383 
384 	/* Display WA #1178: cnl */
385 	if (IS_CANNONLAKE(dev_priv) &&
386 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
387 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
388 		val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
389 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
390 		intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
391 	}
392 
393 	if (wait_fuses)
394 		gen9_wait_for_power_well_fuses(dev_priv, pg);
395 
396 	hsw_power_well_post_enable(dev_priv,
397 				   power_well->desc->hsw.irq_pipe_mask,
398 				   power_well->desc->hsw.has_vga);
399 }
400 
401 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
402 				   struct i915_power_well *power_well)
403 {
404 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
405 	int pw_idx = power_well->desc->hsw.idx;
406 	u32 val;
407 
408 	hsw_power_well_pre_disable(dev_priv,
409 				   power_well->desc->hsw.irq_pipe_mask);
410 
411 	val = intel_de_read(dev_priv, regs->driver);
412 	intel_de_write(dev_priv, regs->driver,
413 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
414 	hsw_wait_for_power_well_disable(dev_priv, power_well);
415 }
416 
417 #define ICL_AUX_PW_TO_PHY(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
418 
419 static void
420 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
421 				    struct i915_power_well *power_well)
422 {
423 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
424 	int pw_idx = power_well->desc->hsw.idx;
425 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
426 	u32 val;
427 
428 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
429 
430 	val = intel_de_read(dev_priv, regs->driver);
431 	intel_de_write(dev_priv, regs->driver,
432 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
433 
434 	if (INTEL_GEN(dev_priv) < 12) {
435 		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
436 		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
437 			       val | ICL_LANE_ENABLE_AUX);
438 	}
439 
440 	hsw_wait_for_power_well_enable(dev_priv, power_well);
441 
442 	/* Display WA #1178: icl */
443 	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
444 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
445 		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
446 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
447 		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
448 	}
449 }
450 
451 static void
452 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
453 				     struct i915_power_well *power_well)
454 {
455 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
456 	int pw_idx = power_well->desc->hsw.idx;
457 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
458 	u32 val;
459 
460 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
461 
462 	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
463 	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
464 		       val & ~ICL_LANE_ENABLE_AUX);
465 
466 	val = intel_de_read(dev_priv, regs->driver);
467 	intel_de_write(dev_priv, regs->driver,
468 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
469 
470 	hsw_wait_for_power_well_disable(dev_priv, power_well);
471 }
472 
473 #define ICL_AUX_PW_TO_CH(pw_idx)	\
474 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
475 
476 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
477 	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
478 
479 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
480 				     struct i915_power_well *power_well)
481 {
482 	int pw_idx = power_well->desc->hsw.idx;
483 
484 	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
485 						 ICL_AUX_PW_TO_CH(pw_idx);
486 }
487 
488 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
489 
490 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
491 
492 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
493 				      struct i915_power_well *power_well)
494 {
495 	int refs = hweight64(power_well->desc->domains &
496 			     async_put_domains_mask(&dev_priv->power_domains));
497 
498 	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
499 
500 	return refs;
501 }
502 
503 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
504 					struct i915_power_well *power_well)
505 {
506 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
507 	struct intel_digital_port *dig_port = NULL;
508 	struct intel_encoder *encoder;
509 
510 	/* Bypass the check if all references are released asynchronously */
511 	if (power_well_async_ref_count(dev_priv, power_well) ==
512 	    power_well->count)
513 		return;
514 
515 	aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
516 
517 	for_each_intel_encoder(&dev_priv->drm, encoder) {
518 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
519 
520 		if (!intel_phy_is_tc(dev_priv, phy))
521 			continue;
522 
523 		/* We'll check the MST primary port */
524 		if (encoder->type == INTEL_OUTPUT_DP_MST)
525 			continue;
526 
527 		dig_port = enc_to_dig_port(encoder);
528 		if (drm_WARN_ON(&dev_priv->drm, !dig_port))
529 			continue;
530 
531 		if (dig_port->aux_ch != aux_ch) {
532 			dig_port = NULL;
533 			continue;
534 		}
535 
536 		break;
537 	}
538 
539 	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
540 		return;
541 
542 	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
543 }
544 
545 #else
546 
547 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
548 					struct i915_power_well *power_well)
549 {
550 }
551 
552 #endif
553 
554 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
555 
556 static void
557 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
558 				 struct i915_power_well *power_well)
559 {
560 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
561 	u32 val;
562 
563 	icl_tc_port_assert_ref_held(dev_priv, power_well);
564 
565 	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
566 	val &= ~DP_AUX_CH_CTL_TBT_IO;
567 	if (power_well->desc->hsw.is_tc_tbt)
568 		val |= DP_AUX_CH_CTL_TBT_IO;
569 	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
570 
571 	hsw_power_well_enable(dev_priv, power_well);
572 
573 	if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
574 		enum tc_port tc_port;
575 
576 		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
577 		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
578 			       HIP_INDEX_VAL(tc_port, 0x2));
579 
580 		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
581 					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
582 			drm_warn(&dev_priv->drm,
583 				 "Timeout waiting TC uC health\n");
584 	}
585 }
586 
587 static void
588 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
589 				  struct i915_power_well *power_well)
590 {
591 	icl_tc_port_assert_ref_held(dev_priv, power_well);
592 
593 	hsw_power_well_disable(dev_priv, power_well);
594 }
595 
596 /*
597  * We should only use the power well if we explicitly asked the hardware to
598  * enable it, so check if it's enabled and also check if we've requested it to
599  * be enabled.
600  */
601 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
602 				   struct i915_power_well *power_well)
603 {
604 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
605 	enum i915_power_well_id id = power_well->desc->id;
606 	int pw_idx = power_well->desc->hsw.idx;
607 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
608 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
609 	u32 val;
610 
611 	val = intel_de_read(dev_priv, regs->driver);
612 
613 	/*
614 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
615 	 * and the MISC_IO PW will be not restored, so check instead for the
616 	 * BIOS's own request bits, which are forced-on for these power wells
617 	 * when exiting DC5/6.
618 	 */
619 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
620 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
621 		val |= intel_de_read(dev_priv, regs->bios);
622 
623 	return (val & mask) == mask;
624 }
625 
626 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
627 {
628 	drm_WARN_ONCE(&dev_priv->drm,
629 		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
630 		      "DC9 already programmed to be enabled.\n");
631 	drm_WARN_ONCE(&dev_priv->drm,
632 		      intel_de_read(dev_priv, DC_STATE_EN) &
633 		      DC_STATE_EN_UPTO_DC5,
634 		      "DC5 still not disabled to enable DC9.\n");
635 	drm_WARN_ONCE(&dev_priv->drm,
636 		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
637 		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
638 		      "Power well 2 on.\n");
639 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
640 		      "Interrupts not disabled yet.\n");
641 
642 	 /*
643 	  * TODO: check for the following to verify the conditions to enter DC9
644 	  * state are satisfied:
645 	  * 1] Check relevant display engine registers to verify if mode set
646 	  * disable sequence was followed.
647 	  * 2] Check if display uninitialize sequence is initialized.
648 	  */
649 }
650 
651 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
652 {
653 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
654 		      "Interrupts not disabled yet.\n");
655 	drm_WARN_ONCE(&dev_priv->drm,
656 		      intel_de_read(dev_priv, DC_STATE_EN) &
657 		      DC_STATE_EN_UPTO_DC5,
658 		      "DC5 still not disabled.\n");
659 
660 	 /*
661 	  * TODO: check for the following to verify DC9 state was indeed
662 	  * entered before programming to disable it:
663 	  * 1] Check relevant display engine registers to verify if mode
664 	  *  set disable sequence was followed.
665 	  * 2] Check if display uninitialize sequence is initialized.
666 	  */
667 }
668 
669 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
670 				u32 state)
671 {
672 	int rewrites = 0;
673 	int rereads = 0;
674 	u32 v;
675 
676 	intel_de_write(dev_priv, DC_STATE_EN, state);
677 
678 	/* It has been observed that disabling the dc6 state sometimes
679 	 * doesn't stick and dmc keeps returning old value. Make sure
680 	 * the write really sticks enough times and also force rewrite until
681 	 * we are confident that state is exactly what we want.
682 	 */
683 	do  {
684 		v = intel_de_read(dev_priv, DC_STATE_EN);
685 
686 		if (v != state) {
687 			intel_de_write(dev_priv, DC_STATE_EN, state);
688 			rewrites++;
689 			rereads = 0;
690 		} else if (rereads++ > 5) {
691 			break;
692 		}
693 
694 	} while (rewrites < 100);
695 
696 	if (v != state)
697 		drm_err(&dev_priv->drm,
698 			"Writing dc state to 0x%x failed, now 0x%x\n",
699 			state, v);
700 
701 	/* Most of the times we need one retry, avoid spam */
702 	if (rewrites > 1)
703 		drm_dbg_kms(&dev_priv->drm,
704 			    "Rewrote dc state to 0x%x %d times\n",
705 			    state, rewrites);
706 }
707 
708 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
709 {
710 	u32 mask;
711 
712 	mask = DC_STATE_EN_UPTO_DC5;
713 
714 	if (INTEL_GEN(dev_priv) >= 12)
715 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
716 					  | DC_STATE_EN_DC9;
717 	else if (IS_GEN(dev_priv, 11))
718 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
719 	else if (IS_GEN9_LP(dev_priv))
720 		mask |= DC_STATE_EN_DC9;
721 	else
722 		mask |= DC_STATE_EN_UPTO_DC6;
723 
724 	return mask;
725 }
726 
727 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
728 {
729 	u32 val;
730 
731 	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
732 
733 	drm_dbg_kms(&dev_priv->drm,
734 		    "Resetting DC state tracking from %02x to %02x\n",
735 		    dev_priv->csr.dc_state, val);
736 	dev_priv->csr.dc_state = val;
737 }
738 
739 /**
740  * gen9_set_dc_state - set target display C power state
741  * @dev_priv: i915 device instance
742  * @state: target DC power state
743  * - DC_STATE_DISABLE
744  * - DC_STATE_EN_UPTO_DC5
745  * - DC_STATE_EN_UPTO_DC6
746  * - DC_STATE_EN_DC9
747  *
748  * Signal to DMC firmware/HW the target DC power state passed in @state.
749  * DMC/HW can turn off individual display clocks and power rails when entering
750  * a deeper DC power state (higher in number) and turns these back when exiting
751  * that state to a shallower power state (lower in number). The HW will decide
752  * when to actually enter a given state on an on-demand basis, for instance
753  * depending on the active state of display pipes. The state of display
754  * registers backed by affected power rails are saved/restored as needed.
755  *
756  * Based on the above enabling a deeper DC power state is asynchronous wrt.
757  * enabling it. Disabling a deeper power state is synchronous: for instance
758  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
759  * back on and register state is restored. This is guaranteed by the MMIO write
760  * to DC_STATE_EN blocking until the state is restored.
761  */
762 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
763 {
764 	u32 val;
765 	u32 mask;
766 
767 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
768 			     state & ~dev_priv->csr.allowed_dc_mask))
769 		state &= dev_priv->csr.allowed_dc_mask;
770 
771 	val = intel_de_read(dev_priv, DC_STATE_EN);
772 	mask = gen9_dc_mask(dev_priv);
773 	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
774 		    val & mask, state);
775 
776 	/* Check if DMC is ignoring our DC state requests */
777 	if ((val & mask) != dev_priv->csr.dc_state)
778 		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
779 			dev_priv->csr.dc_state, val & mask);
780 
781 	val &= ~mask;
782 	val |= state;
783 
784 	gen9_write_dc_state(dev_priv, val);
785 
786 	dev_priv->csr.dc_state = val & mask;
787 }
788 
789 static u32
790 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
791 			 u32 target_dc_state)
792 {
793 	u32 states[] = {
794 		DC_STATE_EN_UPTO_DC6,
795 		DC_STATE_EN_UPTO_DC5,
796 		DC_STATE_EN_DC3CO,
797 		DC_STATE_DISABLE,
798 	};
799 	int i;
800 
801 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
802 		if (target_dc_state != states[i])
803 			continue;
804 
805 		if (dev_priv->csr.allowed_dc_mask & target_dc_state)
806 			break;
807 
808 		target_dc_state = states[i + 1];
809 	}
810 
811 	return target_dc_state;
812 }
813 
814 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
815 {
816 	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
817 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
818 }
819 
820 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
821 {
822 	u32 val;
823 
824 	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
825 	val = intel_de_read(dev_priv, DC_STATE_EN);
826 	val &= ~DC_STATE_DC3CO_STATUS;
827 	intel_de_write(dev_priv, DC_STATE_EN, val);
828 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
829 	/*
830 	 * Delay of 200us DC3CO Exit time B.Spec 49196
831 	 */
832 	usleep_range(200, 210);
833 }
834 
835 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
836 {
837 	assert_can_enable_dc9(dev_priv);
838 
839 	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
840 	/*
841 	 * Power sequencer reset is not needed on
842 	 * platforms with South Display Engine on PCH,
843 	 * because PPS registers are always on.
844 	 */
845 	if (!HAS_PCH_SPLIT(dev_priv))
846 		intel_power_sequencer_reset(dev_priv);
847 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
848 }
849 
850 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
851 {
852 	assert_can_disable_dc9(dev_priv);
853 
854 	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
855 
856 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
857 
858 	intel_pps_unlock_regs_wa(dev_priv);
859 }
860 
861 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
862 {
863 	drm_WARN_ONCE(&dev_priv->drm,
864 		      !intel_de_read(dev_priv, CSR_PROGRAM(0)),
865 		      "CSR program storage start is NULL\n");
866 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
867 		      "CSR SSP Base Not fine\n");
868 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
869 		      "CSR HTP Not fine\n");
870 }
871 
872 static struct i915_power_well *
873 lookup_power_well(struct drm_i915_private *dev_priv,
874 		  enum i915_power_well_id power_well_id)
875 {
876 	struct i915_power_well *power_well;
877 
878 	for_each_power_well(dev_priv, power_well)
879 		if (power_well->desc->id == power_well_id)
880 			return power_well;
881 
882 	/*
883 	 * It's not feasible to add error checking code to the callers since
884 	 * this condition really shouldn't happen and it doesn't even make sense
885 	 * to abort things like display initialization sequences. Just return
886 	 * the first power well and hope the WARN gets reported so we can fix
887 	 * our driver.
888 	 */
889 	drm_WARN(&dev_priv->drm, 1,
890 		 "Power well %d not defined for this platform\n",
891 		 power_well_id);
892 	return &dev_priv->power_domains.power_wells[0];
893 }
894 
895 /**
896  * intel_display_power_set_target_dc_state - Set target dc state.
897  * @dev_priv: i915 device
898  * @state: state which needs to be set as target_dc_state.
899  *
900  * This function set the "DC off" power well target_dc_state,
901  * based upon this target_dc_stste, "DC off" power well will
902  * enable desired DC state.
903  */
904 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
905 					     u32 state)
906 {
907 	struct i915_power_well *power_well;
908 	bool dc_off_enabled;
909 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
910 
911 	mutex_lock(&power_domains->lock);
912 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
913 
914 	if (drm_WARN_ON(&dev_priv->drm, !power_well))
915 		goto unlock;
916 
917 	state = sanitize_target_dc_state(dev_priv, state);
918 
919 	if (state == dev_priv->csr.target_dc_state)
920 		goto unlock;
921 
922 	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
923 							   power_well);
924 	/*
925 	 * If DC off power well is disabled, need to enable and disable the
926 	 * DC off power well to effect target DC state.
927 	 */
928 	if (!dc_off_enabled)
929 		power_well->desc->ops->enable(dev_priv, power_well);
930 
931 	dev_priv->csr.target_dc_state = state;
932 
933 	if (!dc_off_enabled)
934 		power_well->desc->ops->disable(dev_priv, power_well);
935 
936 unlock:
937 	mutex_unlock(&power_domains->lock);
938 }
939 
940 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
941 {
942 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
943 					SKL_DISP_PW_2);
944 
945 	drm_WARN_ONCE(&dev_priv->drm, pg2_enabled,
946 		      "PG2 not disabled to enable DC5.\n");
947 
948 	drm_WARN_ONCE(&dev_priv->drm,
949 		      (intel_de_read(dev_priv, DC_STATE_EN) &
950 		       DC_STATE_EN_UPTO_DC5),
951 		      "DC5 already programmed to be enabled.\n");
952 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
953 
954 	assert_csr_loaded(dev_priv);
955 }
956 
957 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
958 {
959 	assert_can_enable_dc5(dev_priv);
960 
961 	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
962 
963 	/* Wa Display #1183: skl,kbl,cfl */
964 	if (IS_GEN9_BC(dev_priv))
965 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
966 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
967 
968 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
969 }
970 
971 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
972 {
973 	drm_WARN_ONCE(&dev_priv->drm,
974 		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
975 		      "Backlight is not disabled.\n");
976 	drm_WARN_ONCE(&dev_priv->drm,
977 		      (intel_de_read(dev_priv, DC_STATE_EN) &
978 		       DC_STATE_EN_UPTO_DC6),
979 		      "DC6 already programmed to be enabled.\n");
980 
981 	assert_csr_loaded(dev_priv);
982 }
983 
984 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
985 {
986 	assert_can_enable_dc6(dev_priv);
987 
988 	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
989 
990 	/* Wa Display #1183: skl,kbl,cfl */
991 	if (IS_GEN9_BC(dev_priv))
992 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
993 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
994 
995 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
996 }
997 
998 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
999 				   struct i915_power_well *power_well)
1000 {
1001 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1002 	int pw_idx = power_well->desc->hsw.idx;
1003 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1004 	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1005 
1006 	/* Take over the request bit if set by BIOS. */
1007 	if (bios_req & mask) {
1008 		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1009 
1010 		if (!(drv_req & mask))
1011 			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1012 		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1013 	}
1014 }
1015 
1016 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1017 					   struct i915_power_well *power_well)
1018 {
1019 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1020 }
1021 
1022 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1023 					    struct i915_power_well *power_well)
1024 {
1025 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1026 }
1027 
1028 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1029 					    struct i915_power_well *power_well)
1030 {
1031 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1032 }
1033 
1034 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1035 {
1036 	struct i915_power_well *power_well;
1037 
1038 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1039 	if (power_well->count > 0)
1040 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1041 
1042 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1043 	if (power_well->count > 0)
1044 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1045 
1046 	if (IS_GEMINILAKE(dev_priv)) {
1047 		power_well = lookup_power_well(dev_priv,
1048 					       GLK_DISP_PW_DPIO_CMN_C);
1049 		if (power_well->count > 0)
1050 			bxt_ddi_phy_verify_state(dev_priv,
1051 						 power_well->desc->bxt.phy);
1052 	}
1053 }
1054 
1055 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1056 					   struct i915_power_well *power_well)
1057 {
1058 	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1059 		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1060 }
1061 
1062 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1063 {
1064 	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1065 	u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1066 
1067 	drm_WARN(&dev_priv->drm,
1068 		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1069 		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1070 		 hw_enabled_dbuf_slices,
1071 		 enabled_dbuf_slices);
1072 }
1073 
1074 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1075 {
1076 	struct intel_cdclk_config cdclk_config = {};
1077 
1078 	if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1079 		tgl_disable_dc3co(dev_priv);
1080 		return;
1081 	}
1082 
1083 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1084 
1085 	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1086 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1087 	drm_WARN_ON(&dev_priv->drm,
1088 		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1089 					      &cdclk_config));
1090 
1091 	gen9_assert_dbuf_enabled(dev_priv);
1092 
1093 	if (IS_GEN9_LP(dev_priv))
1094 		bxt_verify_ddi_phy_power_wells(dev_priv);
1095 
1096 	if (INTEL_GEN(dev_priv) >= 11)
1097 		/*
1098 		 * DMC retains HW context only for port A, the other combo
1099 		 * PHY's HW context for port B is lost after DC transitions,
1100 		 * so we need to restore it manually.
1101 		 */
1102 		intel_combo_phy_init(dev_priv);
1103 }
1104 
1105 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1106 					  struct i915_power_well *power_well)
1107 {
1108 	gen9_disable_dc_states(dev_priv);
1109 }
1110 
1111 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1112 					   struct i915_power_well *power_well)
1113 {
1114 	if (!dev_priv->csr.dmc_payload)
1115 		return;
1116 
1117 	switch (dev_priv->csr.target_dc_state) {
1118 	case DC_STATE_EN_DC3CO:
1119 		tgl_enable_dc3co(dev_priv);
1120 		break;
1121 	case DC_STATE_EN_UPTO_DC6:
1122 		skl_enable_dc6(dev_priv);
1123 		break;
1124 	case DC_STATE_EN_UPTO_DC5:
1125 		gen9_enable_dc5(dev_priv);
1126 		break;
1127 	}
1128 }
1129 
1130 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1131 					 struct i915_power_well *power_well)
1132 {
1133 }
1134 
1135 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1136 					   struct i915_power_well *power_well)
1137 {
1138 }
1139 
1140 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1141 					     struct i915_power_well *power_well)
1142 {
1143 	return true;
1144 }
1145 
1146 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1147 					 struct i915_power_well *power_well)
1148 {
1149 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1150 		i830_enable_pipe(dev_priv, PIPE_A);
1151 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1152 		i830_enable_pipe(dev_priv, PIPE_B);
1153 }
1154 
1155 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1156 					  struct i915_power_well *power_well)
1157 {
1158 	i830_disable_pipe(dev_priv, PIPE_B);
1159 	i830_disable_pipe(dev_priv, PIPE_A);
1160 }
1161 
1162 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1163 					  struct i915_power_well *power_well)
1164 {
1165 	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1166 		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1167 }
1168 
1169 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1170 					  struct i915_power_well *power_well)
1171 {
1172 	if (power_well->count > 0)
1173 		i830_pipes_power_well_enable(dev_priv, power_well);
1174 	else
1175 		i830_pipes_power_well_disable(dev_priv, power_well);
1176 }
1177 
1178 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1179 			       struct i915_power_well *power_well, bool enable)
1180 {
1181 	int pw_idx = power_well->desc->vlv.idx;
1182 	u32 mask;
1183 	u32 state;
1184 	u32 ctrl;
1185 
1186 	mask = PUNIT_PWRGT_MASK(pw_idx);
1187 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1188 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1189 
1190 	vlv_punit_get(dev_priv);
1191 
1192 #define COND \
1193 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1194 
1195 	if (COND)
1196 		goto out;
1197 
1198 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1199 	ctrl &= ~mask;
1200 	ctrl |= state;
1201 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1202 
1203 	if (wait_for(COND, 100))
1204 		drm_err(&dev_priv->drm,
1205 			"timeout setting power well state %08x (%08x)\n",
1206 			state,
1207 			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1208 
1209 #undef COND
1210 
1211 out:
1212 	vlv_punit_put(dev_priv);
1213 }
1214 
1215 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1216 				  struct i915_power_well *power_well)
1217 {
1218 	vlv_set_power_well(dev_priv, power_well, true);
1219 }
1220 
1221 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1222 				   struct i915_power_well *power_well)
1223 {
1224 	vlv_set_power_well(dev_priv, power_well, false);
1225 }
1226 
1227 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1228 				   struct i915_power_well *power_well)
1229 {
1230 	int pw_idx = power_well->desc->vlv.idx;
1231 	bool enabled = false;
1232 	u32 mask;
1233 	u32 state;
1234 	u32 ctrl;
1235 
1236 	mask = PUNIT_PWRGT_MASK(pw_idx);
1237 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1238 
1239 	vlv_punit_get(dev_priv);
1240 
1241 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1242 	/*
1243 	 * We only ever set the power-on and power-gate states, anything
1244 	 * else is unexpected.
1245 	 */
1246 	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1247 		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1248 	if (state == ctrl)
1249 		enabled = true;
1250 
1251 	/*
1252 	 * A transient state at this point would mean some unexpected party
1253 	 * is poking at the power controls too.
1254 	 */
1255 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1256 	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1257 
1258 	vlv_punit_put(dev_priv);
1259 
1260 	return enabled;
1261 }
1262 
1263 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1264 {
1265 	u32 val;
1266 
1267 	/*
1268 	 * On driver load, a pipe may be active and driving a DSI display.
1269 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1270 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1271 	 * clear it when we turn off the display.
1272 	 */
1273 	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1274 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1275 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1276 	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1277 
1278 	/*
1279 	 * Disable trickle feed and enable pnd deadline calculation
1280 	 */
1281 	intel_de_write(dev_priv, MI_ARB_VLV,
1282 		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1283 	intel_de_write(dev_priv, CBR1_VLV, 0);
1284 
1285 	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1286 	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1287 		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1288 					 1000));
1289 }
1290 
1291 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1292 {
1293 	struct intel_encoder *encoder;
1294 	enum pipe pipe;
1295 
1296 	/*
1297 	 * Enable the CRI clock source so we can get at the
1298 	 * display and the reference clock for VGA
1299 	 * hotplug / manual detection. Supposedly DSI also
1300 	 * needs the ref clock up and running.
1301 	 *
1302 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1303 	 */
1304 	for_each_pipe(dev_priv, pipe) {
1305 		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1306 
1307 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1308 		if (pipe != PIPE_A)
1309 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1310 
1311 		intel_de_write(dev_priv, DPLL(pipe), val);
1312 	}
1313 
1314 	vlv_init_display_clock_gating(dev_priv);
1315 
1316 	spin_lock_irq(&dev_priv->irq_lock);
1317 	valleyview_enable_display_irqs(dev_priv);
1318 	spin_unlock_irq(&dev_priv->irq_lock);
1319 
1320 	/*
1321 	 * During driver initialization/resume we can avoid restoring the
1322 	 * part of the HW/SW state that will be inited anyway explicitly.
1323 	 */
1324 	if (dev_priv->power_domains.initializing)
1325 		return;
1326 
1327 	intel_hpd_init(dev_priv);
1328 
1329 	/* Re-enable the ADPA, if we have one */
1330 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1331 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1332 			intel_crt_reset(&encoder->base);
1333 	}
1334 
1335 	intel_vga_redisable_power_on(dev_priv);
1336 
1337 	intel_pps_unlock_regs_wa(dev_priv);
1338 }
1339 
1340 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1341 {
1342 	spin_lock_irq(&dev_priv->irq_lock);
1343 	valleyview_disable_display_irqs(dev_priv);
1344 	spin_unlock_irq(&dev_priv->irq_lock);
1345 
1346 	/* make sure we're done processing display irqs */
1347 	intel_synchronize_irq(dev_priv);
1348 
1349 	intel_power_sequencer_reset(dev_priv);
1350 
1351 	/* Prevent us from re-enabling polling on accident in late suspend */
1352 	if (!dev_priv->drm.dev->power.is_suspended)
1353 		intel_hpd_poll_init(dev_priv);
1354 }
1355 
1356 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1357 					  struct i915_power_well *power_well)
1358 {
1359 	vlv_set_power_well(dev_priv, power_well, true);
1360 
1361 	vlv_display_power_well_init(dev_priv);
1362 }
1363 
1364 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1365 					   struct i915_power_well *power_well)
1366 {
1367 	vlv_display_power_well_deinit(dev_priv);
1368 
1369 	vlv_set_power_well(dev_priv, power_well, false);
1370 }
1371 
1372 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1373 					   struct i915_power_well *power_well)
1374 {
1375 	/* since ref/cri clock was enabled */
1376 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1377 
1378 	vlv_set_power_well(dev_priv, power_well, true);
1379 
1380 	/*
1381 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1382 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1383 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1384 	 *   b.	The other bits such as sfr settings / modesel may all
1385 	 *	be set to 0.
1386 	 *
1387 	 * This should only be done on init and resume from S3 with
1388 	 * both PLLs disabled, or we risk losing DPIO and PLL
1389 	 * synchronization.
1390 	 */
1391 	intel_de_write(dev_priv, DPIO_CTL,
1392 		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1393 }
1394 
1395 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1396 					    struct i915_power_well *power_well)
1397 {
1398 	enum pipe pipe;
1399 
1400 	for_each_pipe(dev_priv, pipe)
1401 		assert_pll_disabled(dev_priv, pipe);
1402 
1403 	/* Assert common reset */
1404 	intel_de_write(dev_priv, DPIO_CTL,
1405 		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1406 
1407 	vlv_set_power_well(dev_priv, power_well, false);
1408 }
1409 
1410 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1411 
1412 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1413 
1414 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1415 {
1416 	struct i915_power_well *cmn_bc =
1417 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1418 	struct i915_power_well *cmn_d =
1419 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1420 	u32 phy_control = dev_priv->chv_phy_control;
1421 	u32 phy_status = 0;
1422 	u32 phy_status_mask = 0xffffffff;
1423 
1424 	/*
1425 	 * The BIOS can leave the PHY is some weird state
1426 	 * where it doesn't fully power down some parts.
1427 	 * Disable the asserts until the PHY has been fully
1428 	 * reset (ie. the power well has been disabled at
1429 	 * least once).
1430 	 */
1431 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1432 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1433 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1434 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1435 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1436 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1437 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1438 
1439 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1440 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1441 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1442 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1443 
1444 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1445 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1446 
1447 		/* this assumes override is only used to enable lanes */
1448 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1449 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1450 
1451 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1452 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1453 
1454 		/* CL1 is on whenever anything is on in either channel */
1455 		if (BITS_SET(phy_control,
1456 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1457 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1458 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1459 
1460 		/*
1461 		 * The DPLLB check accounts for the pipe B + port A usage
1462 		 * with CL2 powered up but all the lanes in the second channel
1463 		 * powered down.
1464 		 */
1465 		if (BITS_SET(phy_control,
1466 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1467 		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1468 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1469 
1470 		if (BITS_SET(phy_control,
1471 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1472 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1473 		if (BITS_SET(phy_control,
1474 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1475 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1476 
1477 		if (BITS_SET(phy_control,
1478 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1479 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1480 		if (BITS_SET(phy_control,
1481 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1482 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1483 	}
1484 
1485 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1486 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1487 
1488 		/* this assumes override is only used to enable lanes */
1489 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1490 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1491 
1492 		if (BITS_SET(phy_control,
1493 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1494 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1495 
1496 		if (BITS_SET(phy_control,
1497 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1498 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1499 		if (BITS_SET(phy_control,
1500 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1501 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1502 	}
1503 
1504 	phy_status &= phy_status_mask;
1505 
1506 	/*
1507 	 * The PHY may be busy with some initial calibration and whatnot,
1508 	 * so the power state can take a while to actually change.
1509 	 */
1510 	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1511 				       phy_status_mask, phy_status, 10))
1512 		drm_err(&dev_priv->drm,
1513 			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1514 			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1515 			phy_status, dev_priv->chv_phy_control);
1516 }
1517 
1518 #undef BITS_SET
1519 
1520 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1521 					   struct i915_power_well *power_well)
1522 {
1523 	enum dpio_phy phy;
1524 	enum pipe pipe;
1525 	u32 tmp;
1526 
1527 	drm_WARN_ON_ONCE(&dev_priv->drm,
1528 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1529 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1530 
1531 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1532 		pipe = PIPE_A;
1533 		phy = DPIO_PHY0;
1534 	} else {
1535 		pipe = PIPE_C;
1536 		phy = DPIO_PHY1;
1537 	}
1538 
1539 	/* since ref/cri clock was enabled */
1540 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1541 	vlv_set_power_well(dev_priv, power_well, true);
1542 
1543 	/* Poll for phypwrgood signal */
1544 	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1545 				  PHY_POWERGOOD(phy), 1))
1546 		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1547 			phy);
1548 
1549 	vlv_dpio_get(dev_priv);
1550 
1551 	/* Enable dynamic power down */
1552 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1553 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1554 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1555 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1556 
1557 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1558 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1559 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1560 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1561 	} else {
1562 		/*
1563 		 * Force the non-existing CL2 off. BXT does this
1564 		 * too, so maybe it saves some power even though
1565 		 * CL2 doesn't exist?
1566 		 */
1567 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1568 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1569 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1570 	}
1571 
1572 	vlv_dpio_put(dev_priv);
1573 
1574 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1575 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1576 		       dev_priv->chv_phy_control);
1577 
1578 	drm_dbg_kms(&dev_priv->drm,
1579 		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1580 		    phy, dev_priv->chv_phy_control);
1581 
1582 	assert_chv_phy_status(dev_priv);
1583 }
1584 
1585 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1586 					    struct i915_power_well *power_well)
1587 {
1588 	enum dpio_phy phy;
1589 
1590 	drm_WARN_ON_ONCE(&dev_priv->drm,
1591 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1592 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1593 
1594 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1595 		phy = DPIO_PHY0;
1596 		assert_pll_disabled(dev_priv, PIPE_A);
1597 		assert_pll_disabled(dev_priv, PIPE_B);
1598 	} else {
1599 		phy = DPIO_PHY1;
1600 		assert_pll_disabled(dev_priv, PIPE_C);
1601 	}
1602 
1603 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1604 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1605 		       dev_priv->chv_phy_control);
1606 
1607 	vlv_set_power_well(dev_priv, power_well, false);
1608 
1609 	drm_dbg_kms(&dev_priv->drm,
1610 		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1611 		    phy, dev_priv->chv_phy_control);
1612 
1613 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1614 	dev_priv->chv_phy_assert[phy] = true;
1615 
1616 	assert_chv_phy_status(dev_priv);
1617 }
1618 
1619 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1620 				     enum dpio_channel ch, bool override, unsigned int mask)
1621 {
1622 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1623 	u32 reg, val, expected, actual;
1624 
1625 	/*
1626 	 * The BIOS can leave the PHY is some weird state
1627 	 * where it doesn't fully power down some parts.
1628 	 * Disable the asserts until the PHY has been fully
1629 	 * reset (ie. the power well has been disabled at
1630 	 * least once).
1631 	 */
1632 	if (!dev_priv->chv_phy_assert[phy])
1633 		return;
1634 
1635 	if (ch == DPIO_CH0)
1636 		reg = _CHV_CMN_DW0_CH0;
1637 	else
1638 		reg = _CHV_CMN_DW6_CH1;
1639 
1640 	vlv_dpio_get(dev_priv);
1641 	val = vlv_dpio_read(dev_priv, pipe, reg);
1642 	vlv_dpio_put(dev_priv);
1643 
1644 	/*
1645 	 * This assumes !override is only used when the port is disabled.
1646 	 * All lanes should power down even without the override when
1647 	 * the port is disabled.
1648 	 */
1649 	if (!override || mask == 0xf) {
1650 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1651 		/*
1652 		 * If CH1 common lane is not active anymore
1653 		 * (eg. for pipe B DPLL) the entire channel will
1654 		 * shut down, which causes the common lane registers
1655 		 * to read as 0. That means we can't actually check
1656 		 * the lane power down status bits, but as the entire
1657 		 * register reads as 0 it's a good indication that the
1658 		 * channel is indeed entirely powered down.
1659 		 */
1660 		if (ch == DPIO_CH1 && val == 0)
1661 			expected = 0;
1662 	} else if (mask != 0x0) {
1663 		expected = DPIO_ANYDL_POWERDOWN;
1664 	} else {
1665 		expected = 0;
1666 	}
1667 
1668 	if (ch == DPIO_CH0)
1669 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1670 	else
1671 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1672 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1673 
1674 	drm_WARN(&dev_priv->drm, actual != expected,
1675 		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1676 		 !!(actual & DPIO_ALLDL_POWERDOWN),
1677 		 !!(actual & DPIO_ANYDL_POWERDOWN),
1678 		 !!(expected & DPIO_ALLDL_POWERDOWN),
1679 		 !!(expected & DPIO_ANYDL_POWERDOWN),
1680 		 reg, val);
1681 }
1682 
1683 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1684 			  enum dpio_channel ch, bool override)
1685 {
1686 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1687 	bool was_override;
1688 
1689 	mutex_lock(&power_domains->lock);
1690 
1691 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1692 
1693 	if (override == was_override)
1694 		goto out;
1695 
1696 	if (override)
1697 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1698 	else
1699 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1700 
1701 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1702 		       dev_priv->chv_phy_control);
1703 
1704 	drm_dbg_kms(&dev_priv->drm,
1705 		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1706 		    phy, ch, dev_priv->chv_phy_control);
1707 
1708 	assert_chv_phy_status(dev_priv);
1709 
1710 out:
1711 	mutex_unlock(&power_domains->lock);
1712 
1713 	return was_override;
1714 }
1715 
1716 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1717 			     bool override, unsigned int mask)
1718 {
1719 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1720 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1721 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1722 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1723 
1724 	mutex_lock(&power_domains->lock);
1725 
1726 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1727 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1728 
1729 	if (override)
1730 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1731 	else
1732 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1733 
1734 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1735 		       dev_priv->chv_phy_control);
1736 
1737 	drm_dbg_kms(&dev_priv->drm,
1738 		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1739 		    phy, ch, mask, dev_priv->chv_phy_control);
1740 
1741 	assert_chv_phy_status(dev_priv);
1742 
1743 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1744 
1745 	mutex_unlock(&power_domains->lock);
1746 }
1747 
1748 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1749 					struct i915_power_well *power_well)
1750 {
1751 	enum pipe pipe = PIPE_A;
1752 	bool enabled;
1753 	u32 state, ctrl;
1754 
1755 	vlv_punit_get(dev_priv);
1756 
1757 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1758 	/*
1759 	 * We only ever set the power-on and power-gate states, anything
1760 	 * else is unexpected.
1761 	 */
1762 	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1763 		    state != DP_SSS_PWR_GATE(pipe));
1764 	enabled = state == DP_SSS_PWR_ON(pipe);
1765 
1766 	/*
1767 	 * A transient state at this point would mean some unexpected party
1768 	 * is poking at the power controls too.
1769 	 */
1770 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1771 	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1772 
1773 	vlv_punit_put(dev_priv);
1774 
1775 	return enabled;
1776 }
1777 
1778 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1779 				    struct i915_power_well *power_well,
1780 				    bool enable)
1781 {
1782 	enum pipe pipe = PIPE_A;
1783 	u32 state;
1784 	u32 ctrl;
1785 
1786 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1787 
1788 	vlv_punit_get(dev_priv);
1789 
1790 #define COND \
1791 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1792 
1793 	if (COND)
1794 		goto out;
1795 
1796 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1797 	ctrl &= ~DP_SSC_MASK(pipe);
1798 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1799 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1800 
1801 	if (wait_for(COND, 100))
1802 		drm_err(&dev_priv->drm,
1803 			"timeout setting power well state %08x (%08x)\n",
1804 			state,
1805 			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1806 
1807 #undef COND
1808 
1809 out:
1810 	vlv_punit_put(dev_priv);
1811 }
1812 
1813 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1814 					struct i915_power_well *power_well)
1815 {
1816 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1817 		       dev_priv->chv_phy_control);
1818 }
1819 
1820 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1821 				       struct i915_power_well *power_well)
1822 {
1823 	chv_set_pipe_power_well(dev_priv, power_well, true);
1824 
1825 	vlv_display_power_well_init(dev_priv);
1826 }
1827 
1828 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1829 					struct i915_power_well *power_well)
1830 {
1831 	vlv_display_power_well_deinit(dev_priv);
1832 
1833 	chv_set_pipe_power_well(dev_priv, power_well, false);
1834 }
1835 
1836 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1837 {
1838 	return power_domains->async_put_domains[0] |
1839 	       power_domains->async_put_domains[1];
1840 }
1841 
1842 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1843 
1844 static bool
1845 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1846 {
1847 	return !WARN_ON(power_domains->async_put_domains[0] &
1848 			power_domains->async_put_domains[1]);
1849 }
1850 
1851 static bool
1852 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1853 {
1854 	enum intel_display_power_domain domain;
1855 	bool err = false;
1856 
1857 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1858 	err |= WARN_ON(!!power_domains->async_put_wakeref !=
1859 		       !!__async_put_domains_mask(power_domains));
1860 
1861 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1862 		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1863 
1864 	return !err;
1865 }
1866 
1867 static void print_power_domains(struct i915_power_domains *power_domains,
1868 				const char *prefix, u64 mask)
1869 {
1870 	enum intel_display_power_domain domain;
1871 
1872 	DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1873 	for_each_power_domain(domain, mask)
1874 		DRM_DEBUG_DRIVER("%s use_count %d\n",
1875 				 intel_display_power_domain_str(domain),
1876 				 power_domains->domain_use_count[domain]);
1877 }
1878 
1879 static void
1880 print_async_put_domains_state(struct i915_power_domains *power_domains)
1881 {
1882 	DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1883 			 power_domains->async_put_wakeref);
1884 
1885 	print_power_domains(power_domains, "async_put_domains[0]",
1886 			    power_domains->async_put_domains[0]);
1887 	print_power_domains(power_domains, "async_put_domains[1]",
1888 			    power_domains->async_put_domains[1]);
1889 }
1890 
1891 static void
1892 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1893 {
1894 	if (!__async_put_domains_state_ok(power_domains))
1895 		print_async_put_domains_state(power_domains);
1896 }
1897 
1898 #else
1899 
1900 static void
1901 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1902 {
1903 }
1904 
1905 static void
1906 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1907 {
1908 }
1909 
1910 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1911 
1912 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1913 {
1914 	assert_async_put_domain_masks_disjoint(power_domains);
1915 
1916 	return __async_put_domains_mask(power_domains);
1917 }
1918 
1919 static void
1920 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1921 			       enum intel_display_power_domain domain)
1922 {
1923 	assert_async_put_domain_masks_disjoint(power_domains);
1924 
1925 	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1926 	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1927 }
1928 
1929 static bool
1930 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1931 				       enum intel_display_power_domain domain)
1932 {
1933 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1934 	bool ret = false;
1935 
1936 	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1937 		goto out_verify;
1938 
1939 	async_put_domains_clear_domain(power_domains, domain);
1940 
1941 	ret = true;
1942 
1943 	if (async_put_domains_mask(power_domains))
1944 		goto out_verify;
1945 
1946 	cancel_delayed_work(&power_domains->async_put_work);
1947 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1948 				 fetch_and_zero(&power_domains->async_put_wakeref));
1949 out_verify:
1950 	verify_async_put_domains_state(power_domains);
1951 
1952 	return ret;
1953 }
1954 
1955 static void
1956 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1957 				 enum intel_display_power_domain domain)
1958 {
1959 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1960 	struct i915_power_well *power_well;
1961 
1962 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1963 		return;
1964 
1965 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1966 		intel_power_well_get(dev_priv, power_well);
1967 
1968 	power_domains->domain_use_count[domain]++;
1969 }
1970 
1971 /**
1972  * intel_display_power_get - grab a power domain reference
1973  * @dev_priv: i915 device instance
1974  * @domain: power domain to reference
1975  *
1976  * This function grabs a power domain reference for @domain and ensures that the
1977  * power domain and all its parents are powered up. Therefore users should only
1978  * grab a reference to the innermost power domain they need.
1979  *
1980  * Any power domain reference obtained by this function must have a symmetric
1981  * call to intel_display_power_put() to release the reference again.
1982  */
1983 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1984 					enum intel_display_power_domain domain)
1985 {
1986 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1987 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1988 
1989 	mutex_lock(&power_domains->lock);
1990 	__intel_display_power_get_domain(dev_priv, domain);
1991 	mutex_unlock(&power_domains->lock);
1992 
1993 	return wakeref;
1994 }
1995 
1996 /**
1997  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1998  * @dev_priv: i915 device instance
1999  * @domain: power domain to reference
2000  *
2001  * This function grabs a power domain reference for @domain and ensures that the
2002  * power domain and all its parents are powered up. Therefore users should only
2003  * grab a reference to the innermost power domain they need.
2004  *
2005  * Any power domain reference obtained by this function must have a symmetric
2006  * call to intel_display_power_put() to release the reference again.
2007  */
2008 intel_wakeref_t
2009 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2010 				   enum intel_display_power_domain domain)
2011 {
2012 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2013 	intel_wakeref_t wakeref;
2014 	bool is_enabled;
2015 
2016 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2017 	if (!wakeref)
2018 		return false;
2019 
2020 	mutex_lock(&power_domains->lock);
2021 
2022 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2023 		__intel_display_power_get_domain(dev_priv, domain);
2024 		is_enabled = true;
2025 	} else {
2026 		is_enabled = false;
2027 	}
2028 
2029 	mutex_unlock(&power_domains->lock);
2030 
2031 	if (!is_enabled) {
2032 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2033 		wakeref = 0;
2034 	}
2035 
2036 	return wakeref;
2037 }
2038 
2039 static void
2040 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2041 				 enum intel_display_power_domain domain)
2042 {
2043 	struct i915_power_domains *power_domains;
2044 	struct i915_power_well *power_well;
2045 	const char *name = intel_display_power_domain_str(domain);
2046 
2047 	power_domains = &dev_priv->power_domains;
2048 
2049 	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2050 		 "Use count on domain %s is already zero\n",
2051 		 name);
2052 	drm_WARN(&dev_priv->drm,
2053 		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2054 		 "Async disabling of domain %s is pending\n",
2055 		 name);
2056 
2057 	power_domains->domain_use_count[domain]--;
2058 
2059 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2060 		intel_power_well_put(dev_priv, power_well);
2061 }
2062 
2063 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2064 				      enum intel_display_power_domain domain)
2065 {
2066 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2067 
2068 	mutex_lock(&power_domains->lock);
2069 	__intel_display_power_put_domain(dev_priv, domain);
2070 	mutex_unlock(&power_domains->lock);
2071 }
2072 
2073 /**
2074  * intel_display_power_put_unchecked - release an unchecked power domain reference
2075  * @dev_priv: i915 device instance
2076  * @domain: power domain to reference
2077  *
2078  * This function drops the power domain reference obtained by
2079  * intel_display_power_get() and might power down the corresponding hardware
2080  * block right away if this is the last reference.
2081  *
2082  * This function exists only for historical reasons and should be avoided in
2083  * new code, as the correctness of its use cannot be checked. Always use
2084  * intel_display_power_put() instead.
2085  */
2086 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2087 				       enum intel_display_power_domain domain)
2088 {
2089 	__intel_display_power_put(dev_priv, domain);
2090 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2091 }
2092 
2093 static void
2094 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2095 			     intel_wakeref_t wakeref)
2096 {
2097 	WARN_ON(power_domains->async_put_wakeref);
2098 	power_domains->async_put_wakeref = wakeref;
2099 	WARN_ON(!queue_delayed_work(system_unbound_wq,
2100 				    &power_domains->async_put_work,
2101 				    msecs_to_jiffies(100)));
2102 }
2103 
2104 static void
2105 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2106 {
2107 	struct drm_i915_private *dev_priv =
2108 		container_of(power_domains, struct drm_i915_private,
2109 			     power_domains);
2110 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2111 	enum intel_display_power_domain domain;
2112 	intel_wakeref_t wakeref;
2113 
2114 	/*
2115 	 * The caller must hold already raw wakeref, upgrade that to a proper
2116 	 * wakeref to make the state checker happy about the HW access during
2117 	 * power well disabling.
2118 	 */
2119 	assert_rpm_raw_wakeref_held(rpm);
2120 	wakeref = intel_runtime_pm_get(rpm);
2121 
2122 	for_each_power_domain(domain, mask) {
2123 		/* Clear before put, so put's sanity check is happy. */
2124 		async_put_domains_clear_domain(power_domains, domain);
2125 		__intel_display_power_put_domain(dev_priv, domain);
2126 	}
2127 
2128 	intel_runtime_pm_put(rpm, wakeref);
2129 }
2130 
2131 static void
2132 intel_display_power_put_async_work(struct work_struct *work)
2133 {
2134 	struct drm_i915_private *dev_priv =
2135 		container_of(work, struct drm_i915_private,
2136 			     power_domains.async_put_work.work);
2137 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2138 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2139 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2140 	intel_wakeref_t old_work_wakeref = 0;
2141 
2142 	mutex_lock(&power_domains->lock);
2143 
2144 	/*
2145 	 * Bail out if all the domain refs pending to be released were grabbed
2146 	 * by subsequent gets or a flush_work.
2147 	 */
2148 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2149 	if (!old_work_wakeref)
2150 		goto out_verify;
2151 
2152 	release_async_put_domains(power_domains,
2153 				  power_domains->async_put_domains[0]);
2154 
2155 	/* Requeue the work if more domains were async put meanwhile. */
2156 	if (power_domains->async_put_domains[1]) {
2157 		power_domains->async_put_domains[0] =
2158 			fetch_and_zero(&power_domains->async_put_domains[1]);
2159 		queue_async_put_domains_work(power_domains,
2160 					     fetch_and_zero(&new_work_wakeref));
2161 	}
2162 
2163 out_verify:
2164 	verify_async_put_domains_state(power_domains);
2165 
2166 	mutex_unlock(&power_domains->lock);
2167 
2168 	if (old_work_wakeref)
2169 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2170 	if (new_work_wakeref)
2171 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2172 }
2173 
2174 /**
2175  * intel_display_power_put_async - release a power domain reference asynchronously
2176  * @i915: i915 device instance
2177  * @domain: power domain to reference
2178  * @wakeref: wakeref acquired for the reference that is being released
2179  *
2180  * This function drops the power domain reference obtained by
2181  * intel_display_power_get*() and schedules a work to power down the
2182  * corresponding hardware block if this is the last reference.
2183  */
2184 void __intel_display_power_put_async(struct drm_i915_private *i915,
2185 				     enum intel_display_power_domain domain,
2186 				     intel_wakeref_t wakeref)
2187 {
2188 	struct i915_power_domains *power_domains = &i915->power_domains;
2189 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2190 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2191 
2192 	mutex_lock(&power_domains->lock);
2193 
2194 	if (power_domains->domain_use_count[domain] > 1) {
2195 		__intel_display_power_put_domain(i915, domain);
2196 
2197 		goto out_verify;
2198 	}
2199 
2200 	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2201 
2202 	/* Let a pending work requeue itself or queue a new one. */
2203 	if (power_domains->async_put_wakeref) {
2204 		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2205 	} else {
2206 		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2207 		queue_async_put_domains_work(power_domains,
2208 					     fetch_and_zero(&work_wakeref));
2209 	}
2210 
2211 out_verify:
2212 	verify_async_put_domains_state(power_domains);
2213 
2214 	mutex_unlock(&power_domains->lock);
2215 
2216 	if (work_wakeref)
2217 		intel_runtime_pm_put_raw(rpm, work_wakeref);
2218 
2219 	intel_runtime_pm_put(rpm, wakeref);
2220 }
2221 
2222 /**
2223  * intel_display_power_flush_work - flushes the async display power disabling work
2224  * @i915: i915 device instance
2225  *
2226  * Flushes any pending work that was scheduled by a preceding
2227  * intel_display_power_put_async() call, completing the disabling of the
2228  * corresponding power domains.
2229  *
2230  * Note that the work handler function may still be running after this
2231  * function returns; to ensure that the work handler isn't running use
2232  * intel_display_power_flush_work_sync() instead.
2233  */
2234 void intel_display_power_flush_work(struct drm_i915_private *i915)
2235 {
2236 	struct i915_power_domains *power_domains = &i915->power_domains;
2237 	intel_wakeref_t work_wakeref;
2238 
2239 	mutex_lock(&power_domains->lock);
2240 
2241 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2242 	if (!work_wakeref)
2243 		goto out_verify;
2244 
2245 	release_async_put_domains(power_domains,
2246 				  async_put_domains_mask(power_domains));
2247 	cancel_delayed_work(&power_domains->async_put_work);
2248 
2249 out_verify:
2250 	verify_async_put_domains_state(power_domains);
2251 
2252 	mutex_unlock(&power_domains->lock);
2253 
2254 	if (work_wakeref)
2255 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2256 }
2257 
2258 /**
2259  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2260  * @i915: i915 device instance
2261  *
2262  * Like intel_display_power_flush_work(), but also ensure that the work
2263  * handler function is not running any more when this function returns.
2264  */
2265 static void
2266 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2267 {
2268 	struct i915_power_domains *power_domains = &i915->power_domains;
2269 
2270 	intel_display_power_flush_work(i915);
2271 	cancel_delayed_work_sync(&power_domains->async_put_work);
2272 
2273 	verify_async_put_domains_state(power_domains);
2274 
2275 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2276 }
2277 
2278 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2279 /**
2280  * intel_display_power_put - release a power domain reference
2281  * @dev_priv: i915 device instance
2282  * @domain: power domain to reference
2283  * @wakeref: wakeref acquired for the reference that is being released
2284  *
2285  * This function drops the power domain reference obtained by
2286  * intel_display_power_get() and might power down the corresponding hardware
2287  * block right away if this is the last reference.
2288  */
2289 void intel_display_power_put(struct drm_i915_private *dev_priv,
2290 			     enum intel_display_power_domain domain,
2291 			     intel_wakeref_t wakeref)
2292 {
2293 	__intel_display_power_put(dev_priv, domain);
2294 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2295 }
2296 #endif
2297 
2298 #define I830_PIPES_POWER_DOMAINS (		\
2299 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2300 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2301 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2302 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2303 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2304 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2305 	BIT_ULL(POWER_DOMAIN_INIT))
2306 
2307 #define VLV_DISPLAY_POWER_DOMAINS (		\
2308 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2309 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2310 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2311 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2312 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2313 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2314 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2315 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2316 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2317 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2318 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2319 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2320 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2321 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2322 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2323 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2324 	BIT_ULL(POWER_DOMAIN_INIT))
2325 
2326 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2327 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2328 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2329 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2330 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2331 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2332 	BIT_ULL(POWER_DOMAIN_INIT))
2333 
2334 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2335 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2336 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2337 	BIT_ULL(POWER_DOMAIN_INIT))
2338 
2339 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2340 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2341 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2342 	BIT_ULL(POWER_DOMAIN_INIT))
2343 
2344 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2345 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2346 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2347 	BIT_ULL(POWER_DOMAIN_INIT))
2348 
2349 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2350 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2351 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2352 	BIT_ULL(POWER_DOMAIN_INIT))
2353 
2354 #define CHV_DISPLAY_POWER_DOMAINS (		\
2355 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2356 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2357 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2358 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2359 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2360 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2361 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2362 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2363 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2364 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2365 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2366 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2367 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2368 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2369 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2370 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2371 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2372 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2373 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2374 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2375 	BIT_ULL(POWER_DOMAIN_INIT))
2376 
2377 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2378 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2379 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2380 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2381 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2382 	BIT_ULL(POWER_DOMAIN_INIT))
2383 
2384 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2385 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2386 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2387 	BIT_ULL(POWER_DOMAIN_INIT))
2388 
2389 #define HSW_DISPLAY_POWER_DOMAINS (			\
2390 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2391 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2392 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2393 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2394 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2395 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2396 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2397 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2398 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2399 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2400 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2401 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2402 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2403 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2404 	BIT_ULL(POWER_DOMAIN_INIT))
2405 
2406 #define BDW_DISPLAY_POWER_DOMAINS (			\
2407 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2408 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2409 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2410 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2411 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2412 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2413 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2414 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2415 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2416 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2417 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2418 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2419 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2420 	BIT_ULL(POWER_DOMAIN_INIT))
2421 
2422 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2423 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2424 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2425 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2426 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2427 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2428 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2429 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2430 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2431 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2432 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2433 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2434 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2435 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2436 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2437 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2438 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2439 	BIT_ULL(POWER_DOMAIN_INIT))
2440 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2441 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2442 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2443 	BIT_ULL(POWER_DOMAIN_INIT))
2444 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2445 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2446 	BIT_ULL(POWER_DOMAIN_INIT))
2447 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2448 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2449 	BIT_ULL(POWER_DOMAIN_INIT))
2450 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2451 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2452 	BIT_ULL(POWER_DOMAIN_INIT))
2453 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2454 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2455 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2456 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2457 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2458 	BIT_ULL(POWER_DOMAIN_INIT))
2459 
2460 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2461 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2462 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2463 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2464 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2465 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2466 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2467 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2468 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2469 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2470 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2471 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2472 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2473 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2474 	BIT_ULL(POWER_DOMAIN_INIT))
2475 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2476 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2477 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2478 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2479 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2480 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2481 	BIT_ULL(POWER_DOMAIN_INIT))
2482 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2483 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2484 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2485 	BIT_ULL(POWER_DOMAIN_INIT))
2486 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2487 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2488 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2489 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2490 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2491 	BIT_ULL(POWER_DOMAIN_INIT))
2492 
2493 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2494 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2495 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2496 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2497 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2498 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2499 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2500 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2501 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2502 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2503 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2504 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2505 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2506 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2507 	BIT_ULL(POWER_DOMAIN_INIT))
2508 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2509 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2510 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2511 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2512 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2513 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2514 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2515 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2516 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2517 	BIT_ULL(POWER_DOMAIN_INIT))
2518 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2519 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2520 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2521 	BIT_ULL(POWER_DOMAIN_INIT))
2522 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2523 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2524 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2525 	BIT_ULL(POWER_DOMAIN_INIT))
2526 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2527 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2528 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2529 	BIT_ULL(POWER_DOMAIN_INIT))
2530 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2531 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2532 	BIT_ULL(POWER_DOMAIN_INIT))
2533 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2534 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2535 	BIT_ULL(POWER_DOMAIN_INIT))
2536 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2537 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2538 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2539 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2540 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2541 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2542 	BIT_ULL(POWER_DOMAIN_INIT))
2543 
2544 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2545 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2546 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2547 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2548 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2549 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2550 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2551 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2552 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2553 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2554 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2555 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2556 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2557 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2558 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2559 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2560 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2561 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2562 	BIT_ULL(POWER_DOMAIN_INIT))
2563 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2564 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2565 	BIT_ULL(POWER_DOMAIN_INIT))
2566 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2567 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2568 	BIT_ULL(POWER_DOMAIN_INIT))
2569 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2570 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2571 	BIT_ULL(POWER_DOMAIN_INIT))
2572 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2573 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2574 	BIT_ULL(POWER_DOMAIN_INIT))
2575 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2576 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2577 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2578 	BIT_ULL(POWER_DOMAIN_INIT))
2579 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2580 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2581 	BIT_ULL(POWER_DOMAIN_INIT))
2582 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2583 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2584 	BIT_ULL(POWER_DOMAIN_INIT))
2585 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2586 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2587 	BIT_ULL(POWER_DOMAIN_INIT))
2588 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2589 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2590 	BIT_ULL(POWER_DOMAIN_INIT))
2591 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2592 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2593 	BIT_ULL(POWER_DOMAIN_INIT))
2594 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2595 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2596 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2597 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2598 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2599 	BIT_ULL(POWER_DOMAIN_INIT))
2600 
2601 /*
2602  * ICL PW_0/PG_0 domains (HW/DMC control):
2603  * - PCI
2604  * - clocks except port PLL
2605  * - central power except FBC
2606  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2607  * ICL PW_1/PG_1 domains (HW/DMC control):
2608  * - DBUF function
2609  * - PIPE_A and its planes, except VGA
2610  * - transcoder EDP + PSR
2611  * - transcoder DSI
2612  * - DDI_A
2613  * - FBC
2614  */
2615 #define ICL_PW_4_POWER_DOMAINS (			\
2616 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2617 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2618 	BIT_ULL(POWER_DOMAIN_INIT))
2619 	/* VDSC/joining */
2620 #define ICL_PW_3_POWER_DOMAINS (			\
2621 	ICL_PW_4_POWER_DOMAINS |			\
2622 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2623 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2624 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2625 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2626 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2627 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2628 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2629 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2630 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2631 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2632 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2633 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2634 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2635 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2636 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2637 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2638 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2639 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2640 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2641 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2642 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2643 	BIT_ULL(POWER_DOMAIN_INIT))
2644 	/*
2645 	 * - transcoder WD
2646 	 * - KVMR (HW control)
2647 	 */
2648 #define ICL_PW_2_POWER_DOMAINS (			\
2649 	ICL_PW_3_POWER_DOMAINS |			\
2650 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2651 	BIT_ULL(POWER_DOMAIN_INIT))
2652 	/*
2653 	 * - KVMR (HW control)
2654 	 */
2655 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2656 	ICL_PW_2_POWER_DOMAINS |			\
2657 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2658 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2659 	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2660 	BIT_ULL(POWER_DOMAIN_INIT))
2661 
2662 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2663 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2664 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2665 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2666 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2667 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2668 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2669 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2670 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2671 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2672 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2673 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2674 
2675 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2676 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2677 	BIT_ULL(POWER_DOMAIN_AUX_A))
2678 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2679 	BIT_ULL(POWER_DOMAIN_AUX_B))
2680 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2681 	BIT_ULL(POWER_DOMAIN_AUX_C))
2682 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2683 	BIT_ULL(POWER_DOMAIN_AUX_D))
2684 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2685 	BIT_ULL(POWER_DOMAIN_AUX_E))
2686 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2687 	BIT_ULL(POWER_DOMAIN_AUX_F))
2688 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2689 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2690 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2691 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2692 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2693 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2694 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2695 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2696 
2697 #define TGL_PW_5_POWER_DOMAINS (			\
2698 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2699 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2700 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2701 	BIT_ULL(POWER_DOMAIN_INIT))
2702 
2703 #define TGL_PW_4_POWER_DOMAINS (			\
2704 	TGL_PW_5_POWER_DOMAINS |			\
2705 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2706 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2707 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2708 	BIT_ULL(POWER_DOMAIN_INIT))
2709 
2710 #define TGL_PW_3_POWER_DOMAINS (			\
2711 	TGL_PW_4_POWER_DOMAINS |			\
2712 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2713 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2714 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2715 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2716 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2717 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2718 	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |	\
2719 	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |	\
2720 	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |	\
2721 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2722 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2723 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2724 	BIT_ULL(POWER_DOMAIN_AUX_G) |			\
2725 	BIT_ULL(POWER_DOMAIN_AUX_H) |			\
2726 	BIT_ULL(POWER_DOMAIN_AUX_I) |			\
2727 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2728 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2729 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2730 	BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |		\
2731 	BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |		\
2732 	BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |		\
2733 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2734 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2735 	BIT_ULL(POWER_DOMAIN_INIT))
2736 
2737 #define TGL_PW_2_POWER_DOMAINS (			\
2738 	TGL_PW_3_POWER_DOMAINS |			\
2739 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2740 	BIT_ULL(POWER_DOMAIN_INIT))
2741 
2742 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2743 	TGL_PW_2_POWER_DOMAINS |			\
2744 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2745 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2746 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2747 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2748 	BIT_ULL(POWER_DOMAIN_INIT))
2749 
2750 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS (	\
2751 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2752 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS (	\
2753 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2754 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS (	\
2755 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2756 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS (	\
2757 	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2758 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS (	\
2759 	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2760 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS (	\
2761 	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2762 
2763 #define TGL_AUX_A_IO_POWER_DOMAINS (		\
2764 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2765 	BIT_ULL(POWER_DOMAIN_AUX_A))
2766 #define TGL_AUX_B_IO_POWER_DOMAINS (		\
2767 	BIT_ULL(POWER_DOMAIN_AUX_B))
2768 #define TGL_AUX_C_IO_POWER_DOMAINS (		\
2769 	BIT_ULL(POWER_DOMAIN_AUX_C))
2770 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS (	\
2771 	BIT_ULL(POWER_DOMAIN_AUX_D))
2772 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS (	\
2773 	BIT_ULL(POWER_DOMAIN_AUX_E))
2774 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS (	\
2775 	BIT_ULL(POWER_DOMAIN_AUX_F))
2776 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS (	\
2777 	BIT_ULL(POWER_DOMAIN_AUX_G))
2778 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS (	\
2779 	BIT_ULL(POWER_DOMAIN_AUX_H))
2780 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS (	\
2781 	BIT_ULL(POWER_DOMAIN_AUX_I))
2782 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (	\
2783 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2784 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (	\
2785 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2786 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (	\
2787 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2788 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (	\
2789 	BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2790 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (	\
2791 	BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2792 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (	\
2793 	BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2794 
2795 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2796 	.sync_hw = i9xx_power_well_sync_hw_noop,
2797 	.enable = i9xx_always_on_power_well_noop,
2798 	.disable = i9xx_always_on_power_well_noop,
2799 	.is_enabled = i9xx_always_on_power_well_enabled,
2800 };
2801 
2802 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2803 	.sync_hw = chv_pipe_power_well_sync_hw,
2804 	.enable = chv_pipe_power_well_enable,
2805 	.disable = chv_pipe_power_well_disable,
2806 	.is_enabled = chv_pipe_power_well_enabled,
2807 };
2808 
2809 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2810 	.sync_hw = i9xx_power_well_sync_hw_noop,
2811 	.enable = chv_dpio_cmn_power_well_enable,
2812 	.disable = chv_dpio_cmn_power_well_disable,
2813 	.is_enabled = vlv_power_well_enabled,
2814 };
2815 
2816 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2817 	{
2818 		.name = "always-on",
2819 		.always_on = true,
2820 		.domains = POWER_DOMAIN_MASK,
2821 		.ops = &i9xx_always_on_power_well_ops,
2822 		.id = DISP_PW_ID_NONE,
2823 	},
2824 };
2825 
2826 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2827 	.sync_hw = i830_pipes_power_well_sync_hw,
2828 	.enable = i830_pipes_power_well_enable,
2829 	.disable = i830_pipes_power_well_disable,
2830 	.is_enabled = i830_pipes_power_well_enabled,
2831 };
2832 
2833 static const struct i915_power_well_desc i830_power_wells[] = {
2834 	{
2835 		.name = "always-on",
2836 		.always_on = true,
2837 		.domains = POWER_DOMAIN_MASK,
2838 		.ops = &i9xx_always_on_power_well_ops,
2839 		.id = DISP_PW_ID_NONE,
2840 	},
2841 	{
2842 		.name = "pipes",
2843 		.domains = I830_PIPES_POWER_DOMAINS,
2844 		.ops = &i830_pipes_power_well_ops,
2845 		.id = DISP_PW_ID_NONE,
2846 	},
2847 };
2848 
2849 static const struct i915_power_well_ops hsw_power_well_ops = {
2850 	.sync_hw = hsw_power_well_sync_hw,
2851 	.enable = hsw_power_well_enable,
2852 	.disable = hsw_power_well_disable,
2853 	.is_enabled = hsw_power_well_enabled,
2854 };
2855 
2856 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2857 	.sync_hw = i9xx_power_well_sync_hw_noop,
2858 	.enable = gen9_dc_off_power_well_enable,
2859 	.disable = gen9_dc_off_power_well_disable,
2860 	.is_enabled = gen9_dc_off_power_well_enabled,
2861 };
2862 
2863 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2864 	.sync_hw = i9xx_power_well_sync_hw_noop,
2865 	.enable = bxt_dpio_cmn_power_well_enable,
2866 	.disable = bxt_dpio_cmn_power_well_disable,
2867 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2868 };
2869 
2870 static const struct i915_power_well_regs hsw_power_well_regs = {
2871 	.bios	= HSW_PWR_WELL_CTL1,
2872 	.driver	= HSW_PWR_WELL_CTL2,
2873 	.kvmr	= HSW_PWR_WELL_CTL3,
2874 	.debug	= HSW_PWR_WELL_CTL4,
2875 };
2876 
2877 static const struct i915_power_well_desc hsw_power_wells[] = {
2878 	{
2879 		.name = "always-on",
2880 		.always_on = true,
2881 		.domains = POWER_DOMAIN_MASK,
2882 		.ops = &i9xx_always_on_power_well_ops,
2883 		.id = DISP_PW_ID_NONE,
2884 	},
2885 	{
2886 		.name = "display",
2887 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2888 		.ops = &hsw_power_well_ops,
2889 		.id = HSW_DISP_PW_GLOBAL,
2890 		{
2891 			.hsw.regs = &hsw_power_well_regs,
2892 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2893 			.hsw.has_vga = true,
2894 		},
2895 	},
2896 };
2897 
2898 static const struct i915_power_well_desc bdw_power_wells[] = {
2899 	{
2900 		.name = "always-on",
2901 		.always_on = true,
2902 		.domains = POWER_DOMAIN_MASK,
2903 		.ops = &i9xx_always_on_power_well_ops,
2904 		.id = DISP_PW_ID_NONE,
2905 	},
2906 	{
2907 		.name = "display",
2908 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2909 		.ops = &hsw_power_well_ops,
2910 		.id = HSW_DISP_PW_GLOBAL,
2911 		{
2912 			.hsw.regs = &hsw_power_well_regs,
2913 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2914 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2915 			.hsw.has_vga = true,
2916 		},
2917 	},
2918 };
2919 
2920 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2921 	.sync_hw = i9xx_power_well_sync_hw_noop,
2922 	.enable = vlv_display_power_well_enable,
2923 	.disable = vlv_display_power_well_disable,
2924 	.is_enabled = vlv_power_well_enabled,
2925 };
2926 
2927 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2928 	.sync_hw = i9xx_power_well_sync_hw_noop,
2929 	.enable = vlv_dpio_cmn_power_well_enable,
2930 	.disable = vlv_dpio_cmn_power_well_disable,
2931 	.is_enabled = vlv_power_well_enabled,
2932 };
2933 
2934 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2935 	.sync_hw = i9xx_power_well_sync_hw_noop,
2936 	.enable = vlv_power_well_enable,
2937 	.disable = vlv_power_well_disable,
2938 	.is_enabled = vlv_power_well_enabled,
2939 };
2940 
2941 static const struct i915_power_well_desc vlv_power_wells[] = {
2942 	{
2943 		.name = "always-on",
2944 		.always_on = true,
2945 		.domains = POWER_DOMAIN_MASK,
2946 		.ops = &i9xx_always_on_power_well_ops,
2947 		.id = DISP_PW_ID_NONE,
2948 	},
2949 	{
2950 		.name = "display",
2951 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2952 		.ops = &vlv_display_power_well_ops,
2953 		.id = VLV_DISP_PW_DISP2D,
2954 		{
2955 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2956 		},
2957 	},
2958 	{
2959 		.name = "dpio-tx-b-01",
2960 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2961 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2962 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2963 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2964 		.ops = &vlv_dpio_power_well_ops,
2965 		.id = DISP_PW_ID_NONE,
2966 		{
2967 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2968 		},
2969 	},
2970 	{
2971 		.name = "dpio-tx-b-23",
2972 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2973 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2974 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2975 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2976 		.ops = &vlv_dpio_power_well_ops,
2977 		.id = DISP_PW_ID_NONE,
2978 		{
2979 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2980 		},
2981 	},
2982 	{
2983 		.name = "dpio-tx-c-01",
2984 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2985 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2986 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2987 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2988 		.ops = &vlv_dpio_power_well_ops,
2989 		.id = DISP_PW_ID_NONE,
2990 		{
2991 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2992 		},
2993 	},
2994 	{
2995 		.name = "dpio-tx-c-23",
2996 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2997 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2998 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2999 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3000 		.ops = &vlv_dpio_power_well_ops,
3001 		.id = DISP_PW_ID_NONE,
3002 		{
3003 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3004 		},
3005 	},
3006 	{
3007 		.name = "dpio-common",
3008 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3009 		.ops = &vlv_dpio_cmn_power_well_ops,
3010 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3011 		{
3012 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3013 		},
3014 	},
3015 };
3016 
3017 static const struct i915_power_well_desc chv_power_wells[] = {
3018 	{
3019 		.name = "always-on",
3020 		.always_on = true,
3021 		.domains = POWER_DOMAIN_MASK,
3022 		.ops = &i9xx_always_on_power_well_ops,
3023 		.id = DISP_PW_ID_NONE,
3024 	},
3025 	{
3026 		.name = "display",
3027 		/*
3028 		 * Pipe A power well is the new disp2d well. Pipe B and C
3029 		 * power wells don't actually exist. Pipe A power well is
3030 		 * required for any pipe to work.
3031 		 */
3032 		.domains = CHV_DISPLAY_POWER_DOMAINS,
3033 		.ops = &chv_pipe_power_well_ops,
3034 		.id = DISP_PW_ID_NONE,
3035 	},
3036 	{
3037 		.name = "dpio-common-bc",
3038 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3039 		.ops = &chv_dpio_cmn_power_well_ops,
3040 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3041 		{
3042 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3043 		},
3044 	},
3045 	{
3046 		.name = "dpio-common-d",
3047 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3048 		.ops = &chv_dpio_cmn_power_well_ops,
3049 		.id = CHV_DISP_PW_DPIO_CMN_D,
3050 		{
3051 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3052 		},
3053 	},
3054 };
3055 
3056 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3057 					 enum i915_power_well_id power_well_id)
3058 {
3059 	struct i915_power_well *power_well;
3060 	bool ret;
3061 
3062 	power_well = lookup_power_well(dev_priv, power_well_id);
3063 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3064 
3065 	return ret;
3066 }
3067 
3068 static const struct i915_power_well_desc skl_power_wells[] = {
3069 	{
3070 		.name = "always-on",
3071 		.always_on = true,
3072 		.domains = POWER_DOMAIN_MASK,
3073 		.ops = &i9xx_always_on_power_well_ops,
3074 		.id = DISP_PW_ID_NONE,
3075 	},
3076 	{
3077 		.name = "power well 1",
3078 		/* Handled by the DMC firmware */
3079 		.always_on = true,
3080 		.domains = 0,
3081 		.ops = &hsw_power_well_ops,
3082 		.id = SKL_DISP_PW_1,
3083 		{
3084 			.hsw.regs = &hsw_power_well_regs,
3085 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3086 			.hsw.has_fuses = true,
3087 		},
3088 	},
3089 	{
3090 		.name = "MISC IO power well",
3091 		/* Handled by the DMC firmware */
3092 		.always_on = true,
3093 		.domains = 0,
3094 		.ops = &hsw_power_well_ops,
3095 		.id = SKL_DISP_PW_MISC_IO,
3096 		{
3097 			.hsw.regs = &hsw_power_well_regs,
3098 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3099 		},
3100 	},
3101 	{
3102 		.name = "DC off",
3103 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3104 		.ops = &gen9_dc_off_power_well_ops,
3105 		.id = SKL_DISP_DC_OFF,
3106 	},
3107 	{
3108 		.name = "power well 2",
3109 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3110 		.ops = &hsw_power_well_ops,
3111 		.id = SKL_DISP_PW_2,
3112 		{
3113 			.hsw.regs = &hsw_power_well_regs,
3114 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3115 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3116 			.hsw.has_vga = true,
3117 			.hsw.has_fuses = true,
3118 		},
3119 	},
3120 	{
3121 		.name = "DDI A/E IO power well",
3122 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3123 		.ops = &hsw_power_well_ops,
3124 		.id = DISP_PW_ID_NONE,
3125 		{
3126 			.hsw.regs = &hsw_power_well_regs,
3127 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3128 		},
3129 	},
3130 	{
3131 		.name = "DDI B IO power well",
3132 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3133 		.ops = &hsw_power_well_ops,
3134 		.id = DISP_PW_ID_NONE,
3135 		{
3136 			.hsw.regs = &hsw_power_well_regs,
3137 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3138 		},
3139 	},
3140 	{
3141 		.name = "DDI C IO power well",
3142 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3143 		.ops = &hsw_power_well_ops,
3144 		.id = DISP_PW_ID_NONE,
3145 		{
3146 			.hsw.regs = &hsw_power_well_regs,
3147 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3148 		},
3149 	},
3150 	{
3151 		.name = "DDI D IO power well",
3152 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3153 		.ops = &hsw_power_well_ops,
3154 		.id = DISP_PW_ID_NONE,
3155 		{
3156 			.hsw.regs = &hsw_power_well_regs,
3157 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3158 		},
3159 	},
3160 };
3161 
3162 static const struct i915_power_well_desc bxt_power_wells[] = {
3163 	{
3164 		.name = "always-on",
3165 		.always_on = true,
3166 		.domains = POWER_DOMAIN_MASK,
3167 		.ops = &i9xx_always_on_power_well_ops,
3168 		.id = DISP_PW_ID_NONE,
3169 	},
3170 	{
3171 		.name = "power well 1",
3172 		/* Handled by the DMC firmware */
3173 		.always_on = true,
3174 		.domains = 0,
3175 		.ops = &hsw_power_well_ops,
3176 		.id = SKL_DISP_PW_1,
3177 		{
3178 			.hsw.regs = &hsw_power_well_regs,
3179 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3180 			.hsw.has_fuses = true,
3181 		},
3182 	},
3183 	{
3184 		.name = "DC off",
3185 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3186 		.ops = &gen9_dc_off_power_well_ops,
3187 		.id = SKL_DISP_DC_OFF,
3188 	},
3189 	{
3190 		.name = "power well 2",
3191 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3192 		.ops = &hsw_power_well_ops,
3193 		.id = SKL_DISP_PW_2,
3194 		{
3195 			.hsw.regs = &hsw_power_well_regs,
3196 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3197 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3198 			.hsw.has_vga = true,
3199 			.hsw.has_fuses = true,
3200 		},
3201 	},
3202 	{
3203 		.name = "dpio-common-a",
3204 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3205 		.ops = &bxt_dpio_cmn_power_well_ops,
3206 		.id = BXT_DISP_PW_DPIO_CMN_A,
3207 		{
3208 			.bxt.phy = DPIO_PHY1,
3209 		},
3210 	},
3211 	{
3212 		.name = "dpio-common-bc",
3213 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3214 		.ops = &bxt_dpio_cmn_power_well_ops,
3215 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3216 		{
3217 			.bxt.phy = DPIO_PHY0,
3218 		},
3219 	},
3220 };
3221 
3222 static const struct i915_power_well_desc glk_power_wells[] = {
3223 	{
3224 		.name = "always-on",
3225 		.always_on = true,
3226 		.domains = POWER_DOMAIN_MASK,
3227 		.ops = &i9xx_always_on_power_well_ops,
3228 		.id = DISP_PW_ID_NONE,
3229 	},
3230 	{
3231 		.name = "power well 1",
3232 		/* Handled by the DMC firmware */
3233 		.always_on = true,
3234 		.domains = 0,
3235 		.ops = &hsw_power_well_ops,
3236 		.id = SKL_DISP_PW_1,
3237 		{
3238 			.hsw.regs = &hsw_power_well_regs,
3239 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3240 			.hsw.has_fuses = true,
3241 		},
3242 	},
3243 	{
3244 		.name = "DC off",
3245 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3246 		.ops = &gen9_dc_off_power_well_ops,
3247 		.id = SKL_DISP_DC_OFF,
3248 	},
3249 	{
3250 		.name = "power well 2",
3251 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3252 		.ops = &hsw_power_well_ops,
3253 		.id = SKL_DISP_PW_2,
3254 		{
3255 			.hsw.regs = &hsw_power_well_regs,
3256 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3257 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3258 			.hsw.has_vga = true,
3259 			.hsw.has_fuses = true,
3260 		},
3261 	},
3262 	{
3263 		.name = "dpio-common-a",
3264 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3265 		.ops = &bxt_dpio_cmn_power_well_ops,
3266 		.id = BXT_DISP_PW_DPIO_CMN_A,
3267 		{
3268 			.bxt.phy = DPIO_PHY1,
3269 		},
3270 	},
3271 	{
3272 		.name = "dpio-common-b",
3273 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3274 		.ops = &bxt_dpio_cmn_power_well_ops,
3275 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3276 		{
3277 			.bxt.phy = DPIO_PHY0,
3278 		},
3279 	},
3280 	{
3281 		.name = "dpio-common-c",
3282 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3283 		.ops = &bxt_dpio_cmn_power_well_ops,
3284 		.id = GLK_DISP_PW_DPIO_CMN_C,
3285 		{
3286 			.bxt.phy = DPIO_PHY2,
3287 		},
3288 	},
3289 	{
3290 		.name = "AUX A",
3291 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3292 		.ops = &hsw_power_well_ops,
3293 		.id = DISP_PW_ID_NONE,
3294 		{
3295 			.hsw.regs = &hsw_power_well_regs,
3296 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3297 		},
3298 	},
3299 	{
3300 		.name = "AUX B",
3301 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3302 		.ops = &hsw_power_well_ops,
3303 		.id = DISP_PW_ID_NONE,
3304 		{
3305 			.hsw.regs = &hsw_power_well_regs,
3306 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3307 		},
3308 	},
3309 	{
3310 		.name = "AUX C",
3311 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3312 		.ops = &hsw_power_well_ops,
3313 		.id = DISP_PW_ID_NONE,
3314 		{
3315 			.hsw.regs = &hsw_power_well_regs,
3316 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3317 		},
3318 	},
3319 	{
3320 		.name = "DDI A IO power well",
3321 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3322 		.ops = &hsw_power_well_ops,
3323 		.id = DISP_PW_ID_NONE,
3324 		{
3325 			.hsw.regs = &hsw_power_well_regs,
3326 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3327 		},
3328 	},
3329 	{
3330 		.name = "DDI B IO power well",
3331 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3332 		.ops = &hsw_power_well_ops,
3333 		.id = DISP_PW_ID_NONE,
3334 		{
3335 			.hsw.regs = &hsw_power_well_regs,
3336 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3337 		},
3338 	},
3339 	{
3340 		.name = "DDI C IO power well",
3341 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3342 		.ops = &hsw_power_well_ops,
3343 		.id = DISP_PW_ID_NONE,
3344 		{
3345 			.hsw.regs = &hsw_power_well_regs,
3346 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3347 		},
3348 	},
3349 };
3350 
3351 static const struct i915_power_well_desc cnl_power_wells[] = {
3352 	{
3353 		.name = "always-on",
3354 		.always_on = true,
3355 		.domains = POWER_DOMAIN_MASK,
3356 		.ops = &i9xx_always_on_power_well_ops,
3357 		.id = DISP_PW_ID_NONE,
3358 	},
3359 	{
3360 		.name = "power well 1",
3361 		/* Handled by the DMC firmware */
3362 		.always_on = true,
3363 		.domains = 0,
3364 		.ops = &hsw_power_well_ops,
3365 		.id = SKL_DISP_PW_1,
3366 		{
3367 			.hsw.regs = &hsw_power_well_regs,
3368 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3369 			.hsw.has_fuses = true,
3370 		},
3371 	},
3372 	{
3373 		.name = "AUX A",
3374 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3375 		.ops = &hsw_power_well_ops,
3376 		.id = DISP_PW_ID_NONE,
3377 		{
3378 			.hsw.regs = &hsw_power_well_regs,
3379 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3380 		},
3381 	},
3382 	{
3383 		.name = "AUX B",
3384 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3385 		.ops = &hsw_power_well_ops,
3386 		.id = DISP_PW_ID_NONE,
3387 		{
3388 			.hsw.regs = &hsw_power_well_regs,
3389 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3390 		},
3391 	},
3392 	{
3393 		.name = "AUX C",
3394 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3395 		.ops = &hsw_power_well_ops,
3396 		.id = DISP_PW_ID_NONE,
3397 		{
3398 			.hsw.regs = &hsw_power_well_regs,
3399 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3400 		},
3401 	},
3402 	{
3403 		.name = "AUX D",
3404 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3405 		.ops = &hsw_power_well_ops,
3406 		.id = DISP_PW_ID_NONE,
3407 		{
3408 			.hsw.regs = &hsw_power_well_regs,
3409 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3410 		},
3411 	},
3412 	{
3413 		.name = "DC off",
3414 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3415 		.ops = &gen9_dc_off_power_well_ops,
3416 		.id = SKL_DISP_DC_OFF,
3417 	},
3418 	{
3419 		.name = "power well 2",
3420 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3421 		.ops = &hsw_power_well_ops,
3422 		.id = SKL_DISP_PW_2,
3423 		{
3424 			.hsw.regs = &hsw_power_well_regs,
3425 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3426 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3427 			.hsw.has_vga = true,
3428 			.hsw.has_fuses = true,
3429 		},
3430 	},
3431 	{
3432 		.name = "DDI A IO power well",
3433 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3434 		.ops = &hsw_power_well_ops,
3435 		.id = DISP_PW_ID_NONE,
3436 		{
3437 			.hsw.regs = &hsw_power_well_regs,
3438 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3439 		},
3440 	},
3441 	{
3442 		.name = "DDI B IO power well",
3443 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3444 		.ops = &hsw_power_well_ops,
3445 		.id = DISP_PW_ID_NONE,
3446 		{
3447 			.hsw.regs = &hsw_power_well_regs,
3448 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3449 		},
3450 	},
3451 	{
3452 		.name = "DDI C IO power well",
3453 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3454 		.ops = &hsw_power_well_ops,
3455 		.id = DISP_PW_ID_NONE,
3456 		{
3457 			.hsw.regs = &hsw_power_well_regs,
3458 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3459 		},
3460 	},
3461 	{
3462 		.name = "DDI D IO power well",
3463 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3464 		.ops = &hsw_power_well_ops,
3465 		.id = DISP_PW_ID_NONE,
3466 		{
3467 			.hsw.regs = &hsw_power_well_regs,
3468 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3469 		},
3470 	},
3471 	{
3472 		.name = "DDI F IO power well",
3473 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3474 		.ops = &hsw_power_well_ops,
3475 		.id = DISP_PW_ID_NONE,
3476 		{
3477 			.hsw.regs = &hsw_power_well_regs,
3478 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3479 		},
3480 	},
3481 	{
3482 		.name = "AUX F",
3483 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3484 		.ops = &hsw_power_well_ops,
3485 		.id = DISP_PW_ID_NONE,
3486 		{
3487 			.hsw.regs = &hsw_power_well_regs,
3488 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3489 		},
3490 	},
3491 };
3492 
3493 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3494 	.sync_hw = hsw_power_well_sync_hw,
3495 	.enable = icl_combo_phy_aux_power_well_enable,
3496 	.disable = icl_combo_phy_aux_power_well_disable,
3497 	.is_enabled = hsw_power_well_enabled,
3498 };
3499 
3500 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3501 	.sync_hw = hsw_power_well_sync_hw,
3502 	.enable = icl_tc_phy_aux_power_well_enable,
3503 	.disable = icl_tc_phy_aux_power_well_disable,
3504 	.is_enabled = hsw_power_well_enabled,
3505 };
3506 
3507 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3508 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3509 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3510 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3511 };
3512 
3513 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3514 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3515 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3516 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3517 };
3518 
3519 static const struct i915_power_well_desc icl_power_wells[] = {
3520 	{
3521 		.name = "always-on",
3522 		.always_on = true,
3523 		.domains = POWER_DOMAIN_MASK,
3524 		.ops = &i9xx_always_on_power_well_ops,
3525 		.id = DISP_PW_ID_NONE,
3526 	},
3527 	{
3528 		.name = "power well 1",
3529 		/* Handled by the DMC firmware */
3530 		.always_on = true,
3531 		.domains = 0,
3532 		.ops = &hsw_power_well_ops,
3533 		.id = SKL_DISP_PW_1,
3534 		{
3535 			.hsw.regs = &hsw_power_well_regs,
3536 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3537 			.hsw.has_fuses = true,
3538 		},
3539 	},
3540 	{
3541 		.name = "DC off",
3542 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3543 		.ops = &gen9_dc_off_power_well_ops,
3544 		.id = SKL_DISP_DC_OFF,
3545 	},
3546 	{
3547 		.name = "power well 2",
3548 		.domains = ICL_PW_2_POWER_DOMAINS,
3549 		.ops = &hsw_power_well_ops,
3550 		.id = SKL_DISP_PW_2,
3551 		{
3552 			.hsw.regs = &hsw_power_well_regs,
3553 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3554 			.hsw.has_fuses = true,
3555 		},
3556 	},
3557 	{
3558 		.name = "power well 3",
3559 		.domains = ICL_PW_3_POWER_DOMAINS,
3560 		.ops = &hsw_power_well_ops,
3561 		.id = DISP_PW_ID_NONE,
3562 		{
3563 			.hsw.regs = &hsw_power_well_regs,
3564 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3565 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3566 			.hsw.has_vga = true,
3567 			.hsw.has_fuses = true,
3568 		},
3569 	},
3570 	{
3571 		.name = "DDI A IO",
3572 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3573 		.ops = &hsw_power_well_ops,
3574 		.id = DISP_PW_ID_NONE,
3575 		{
3576 			.hsw.regs = &icl_ddi_power_well_regs,
3577 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3578 		},
3579 	},
3580 	{
3581 		.name = "DDI B IO",
3582 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3583 		.ops = &hsw_power_well_ops,
3584 		.id = DISP_PW_ID_NONE,
3585 		{
3586 			.hsw.regs = &icl_ddi_power_well_regs,
3587 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3588 		},
3589 	},
3590 	{
3591 		.name = "DDI C IO",
3592 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3593 		.ops = &hsw_power_well_ops,
3594 		.id = DISP_PW_ID_NONE,
3595 		{
3596 			.hsw.regs = &icl_ddi_power_well_regs,
3597 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3598 		},
3599 	},
3600 	{
3601 		.name = "DDI D IO",
3602 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3603 		.ops = &hsw_power_well_ops,
3604 		.id = DISP_PW_ID_NONE,
3605 		{
3606 			.hsw.regs = &icl_ddi_power_well_regs,
3607 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3608 		},
3609 	},
3610 	{
3611 		.name = "DDI E IO",
3612 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3613 		.ops = &hsw_power_well_ops,
3614 		.id = DISP_PW_ID_NONE,
3615 		{
3616 			.hsw.regs = &icl_ddi_power_well_regs,
3617 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3618 		},
3619 	},
3620 	{
3621 		.name = "DDI F IO",
3622 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3623 		.ops = &hsw_power_well_ops,
3624 		.id = DISP_PW_ID_NONE,
3625 		{
3626 			.hsw.regs = &icl_ddi_power_well_regs,
3627 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3628 		},
3629 	},
3630 	{
3631 		.name = "AUX A",
3632 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3633 		.ops = &icl_combo_phy_aux_power_well_ops,
3634 		.id = DISP_PW_ID_NONE,
3635 		{
3636 			.hsw.regs = &icl_aux_power_well_regs,
3637 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3638 		},
3639 	},
3640 	{
3641 		.name = "AUX B",
3642 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3643 		.ops = &icl_combo_phy_aux_power_well_ops,
3644 		.id = DISP_PW_ID_NONE,
3645 		{
3646 			.hsw.regs = &icl_aux_power_well_regs,
3647 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3648 		},
3649 	},
3650 	{
3651 		.name = "AUX C TC1",
3652 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3653 		.ops = &icl_tc_phy_aux_power_well_ops,
3654 		.id = DISP_PW_ID_NONE,
3655 		{
3656 			.hsw.regs = &icl_aux_power_well_regs,
3657 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3658 			.hsw.is_tc_tbt = false,
3659 		},
3660 	},
3661 	{
3662 		.name = "AUX D TC2",
3663 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3664 		.ops = &icl_tc_phy_aux_power_well_ops,
3665 		.id = DISP_PW_ID_NONE,
3666 		{
3667 			.hsw.regs = &icl_aux_power_well_regs,
3668 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3669 			.hsw.is_tc_tbt = false,
3670 		},
3671 	},
3672 	{
3673 		.name = "AUX E TC3",
3674 		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3675 		.ops = &icl_tc_phy_aux_power_well_ops,
3676 		.id = DISP_PW_ID_NONE,
3677 		{
3678 			.hsw.regs = &icl_aux_power_well_regs,
3679 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3680 			.hsw.is_tc_tbt = false,
3681 		},
3682 	},
3683 	{
3684 		.name = "AUX F TC4",
3685 		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3686 		.ops = &icl_tc_phy_aux_power_well_ops,
3687 		.id = DISP_PW_ID_NONE,
3688 		{
3689 			.hsw.regs = &icl_aux_power_well_regs,
3690 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3691 			.hsw.is_tc_tbt = false,
3692 		},
3693 	},
3694 	{
3695 		.name = "AUX C TBT1",
3696 		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3697 		.ops = &icl_tc_phy_aux_power_well_ops,
3698 		.id = DISP_PW_ID_NONE,
3699 		{
3700 			.hsw.regs = &icl_aux_power_well_regs,
3701 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3702 			.hsw.is_tc_tbt = true,
3703 		},
3704 	},
3705 	{
3706 		.name = "AUX D TBT2",
3707 		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3708 		.ops = &icl_tc_phy_aux_power_well_ops,
3709 		.id = DISP_PW_ID_NONE,
3710 		{
3711 			.hsw.regs = &icl_aux_power_well_regs,
3712 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3713 			.hsw.is_tc_tbt = true,
3714 		},
3715 	},
3716 	{
3717 		.name = "AUX E TBT3",
3718 		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3719 		.ops = &icl_tc_phy_aux_power_well_ops,
3720 		.id = DISP_PW_ID_NONE,
3721 		{
3722 			.hsw.regs = &icl_aux_power_well_regs,
3723 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3724 			.hsw.is_tc_tbt = true,
3725 		},
3726 	},
3727 	{
3728 		.name = "AUX F TBT4",
3729 		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3730 		.ops = &icl_tc_phy_aux_power_well_ops,
3731 		.id = DISP_PW_ID_NONE,
3732 		{
3733 			.hsw.regs = &icl_aux_power_well_regs,
3734 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3735 			.hsw.is_tc_tbt = true,
3736 		},
3737 	},
3738 	{
3739 		.name = "power well 4",
3740 		.domains = ICL_PW_4_POWER_DOMAINS,
3741 		.ops = &hsw_power_well_ops,
3742 		.id = DISP_PW_ID_NONE,
3743 		{
3744 			.hsw.regs = &hsw_power_well_regs,
3745 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3746 			.hsw.has_fuses = true,
3747 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3748 		},
3749 	},
3750 };
3751 
3752 static const struct i915_power_well_desc ehl_power_wells[] = {
3753 	{
3754 		.name = "always-on",
3755 		.always_on = true,
3756 		.domains = POWER_DOMAIN_MASK,
3757 		.ops = &i9xx_always_on_power_well_ops,
3758 		.id = DISP_PW_ID_NONE,
3759 	},
3760 	{
3761 		.name = "power well 1",
3762 		/* Handled by the DMC firmware */
3763 		.always_on = true,
3764 		.domains = 0,
3765 		.ops = &hsw_power_well_ops,
3766 		.id = SKL_DISP_PW_1,
3767 		{
3768 			.hsw.regs = &hsw_power_well_regs,
3769 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3770 			.hsw.has_fuses = true,
3771 		},
3772 	},
3773 	{
3774 		.name = "DC off",
3775 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3776 		.ops = &gen9_dc_off_power_well_ops,
3777 		.id = SKL_DISP_DC_OFF,
3778 	},
3779 	{
3780 		.name = "power well 2",
3781 		.domains = ICL_PW_2_POWER_DOMAINS,
3782 		.ops = &hsw_power_well_ops,
3783 		.id = SKL_DISP_PW_2,
3784 		{
3785 			.hsw.regs = &hsw_power_well_regs,
3786 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3787 			.hsw.has_fuses = true,
3788 		},
3789 	},
3790 	{
3791 		.name = "power well 3",
3792 		.domains = ICL_PW_3_POWER_DOMAINS,
3793 		.ops = &hsw_power_well_ops,
3794 		.id = DISP_PW_ID_NONE,
3795 		{
3796 			.hsw.regs = &hsw_power_well_regs,
3797 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3798 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3799 			.hsw.has_vga = true,
3800 			.hsw.has_fuses = true,
3801 		},
3802 	},
3803 	{
3804 		.name = "DDI A IO",
3805 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3806 		.ops = &hsw_power_well_ops,
3807 		.id = DISP_PW_ID_NONE,
3808 		{
3809 			.hsw.regs = &icl_ddi_power_well_regs,
3810 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3811 		},
3812 	},
3813 	{
3814 		.name = "DDI B IO",
3815 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3816 		.ops = &hsw_power_well_ops,
3817 		.id = DISP_PW_ID_NONE,
3818 		{
3819 			.hsw.regs = &icl_ddi_power_well_regs,
3820 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3821 		},
3822 	},
3823 	{
3824 		.name = "DDI C IO",
3825 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3826 		.ops = &hsw_power_well_ops,
3827 		.id = DISP_PW_ID_NONE,
3828 		{
3829 			.hsw.regs = &icl_ddi_power_well_regs,
3830 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3831 		},
3832 	},
3833 	{
3834 		.name = "DDI D IO",
3835 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3836 		.ops = &hsw_power_well_ops,
3837 		.id = DISP_PW_ID_NONE,
3838 		{
3839 			.hsw.regs = &icl_ddi_power_well_regs,
3840 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3841 		},
3842 	},
3843 	{
3844 		.name = "AUX A",
3845 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3846 		.ops = &hsw_power_well_ops,
3847 		.id = DISP_PW_ID_NONE,
3848 		{
3849 			.hsw.regs = &icl_aux_power_well_regs,
3850 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3851 		},
3852 	},
3853 	{
3854 		.name = "AUX B",
3855 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3856 		.ops = &hsw_power_well_ops,
3857 		.id = DISP_PW_ID_NONE,
3858 		{
3859 			.hsw.regs = &icl_aux_power_well_regs,
3860 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3861 		},
3862 	},
3863 	{
3864 		.name = "AUX C",
3865 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3866 		.ops = &hsw_power_well_ops,
3867 		.id = DISP_PW_ID_NONE,
3868 		{
3869 			.hsw.regs = &icl_aux_power_well_regs,
3870 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3871 		},
3872 	},
3873 	{
3874 		.name = "AUX D",
3875 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3876 		.ops = &hsw_power_well_ops,
3877 		.id = DISP_PW_ID_NONE,
3878 		{
3879 			.hsw.regs = &icl_aux_power_well_regs,
3880 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3881 		},
3882 	},
3883 	{
3884 		.name = "power well 4",
3885 		.domains = ICL_PW_4_POWER_DOMAINS,
3886 		.ops = &hsw_power_well_ops,
3887 		.id = DISP_PW_ID_NONE,
3888 		{
3889 			.hsw.regs = &hsw_power_well_regs,
3890 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3891 			.hsw.has_fuses = true,
3892 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3893 		},
3894 	},
3895 };
3896 
3897 static const struct i915_power_well_desc tgl_power_wells[] = {
3898 	{
3899 		.name = "always-on",
3900 		.always_on = true,
3901 		.domains = POWER_DOMAIN_MASK,
3902 		.ops = &i9xx_always_on_power_well_ops,
3903 		.id = DISP_PW_ID_NONE,
3904 	},
3905 	{
3906 		.name = "power well 1",
3907 		/* Handled by the DMC firmware */
3908 		.always_on = true,
3909 		.domains = 0,
3910 		.ops = &hsw_power_well_ops,
3911 		.id = SKL_DISP_PW_1,
3912 		{
3913 			.hsw.regs = &hsw_power_well_regs,
3914 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3915 			.hsw.has_fuses = true,
3916 		},
3917 	},
3918 	{
3919 		.name = "DC off",
3920 		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3921 		.ops = &gen9_dc_off_power_well_ops,
3922 		.id = SKL_DISP_DC_OFF,
3923 	},
3924 	{
3925 		.name = "power well 2",
3926 		.domains = TGL_PW_2_POWER_DOMAINS,
3927 		.ops = &hsw_power_well_ops,
3928 		.id = SKL_DISP_PW_2,
3929 		{
3930 			.hsw.regs = &hsw_power_well_regs,
3931 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3932 			.hsw.has_fuses = true,
3933 		},
3934 	},
3935 	{
3936 		.name = "power well 3",
3937 		.domains = TGL_PW_3_POWER_DOMAINS,
3938 		.ops = &hsw_power_well_ops,
3939 		.id = DISP_PW_ID_NONE,
3940 		{
3941 			.hsw.regs = &hsw_power_well_regs,
3942 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3943 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3944 			.hsw.has_vga = true,
3945 			.hsw.has_fuses = true,
3946 		},
3947 	},
3948 	{
3949 		.name = "DDI A IO",
3950 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3951 		.ops = &hsw_power_well_ops,
3952 		.id = DISP_PW_ID_NONE,
3953 		{
3954 			.hsw.regs = &icl_ddi_power_well_regs,
3955 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3956 		}
3957 	},
3958 	{
3959 		.name = "DDI B IO",
3960 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3961 		.ops = &hsw_power_well_ops,
3962 		.id = DISP_PW_ID_NONE,
3963 		{
3964 			.hsw.regs = &icl_ddi_power_well_regs,
3965 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3966 		}
3967 	},
3968 	{
3969 		.name = "DDI C IO",
3970 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3971 		.ops = &hsw_power_well_ops,
3972 		.id = DISP_PW_ID_NONE,
3973 		{
3974 			.hsw.regs = &icl_ddi_power_well_regs,
3975 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3976 		}
3977 	},
3978 	{
3979 		.name = "DDI D TC1 IO",
3980 		.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
3981 		.ops = &hsw_power_well_ops,
3982 		.id = DISP_PW_ID_NONE,
3983 		{
3984 			.hsw.regs = &icl_ddi_power_well_regs,
3985 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3986 		},
3987 	},
3988 	{
3989 		.name = "DDI E TC2 IO",
3990 		.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
3991 		.ops = &hsw_power_well_ops,
3992 		.id = DISP_PW_ID_NONE,
3993 		{
3994 			.hsw.regs = &icl_ddi_power_well_regs,
3995 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
3996 		},
3997 	},
3998 	{
3999 		.name = "DDI F TC3 IO",
4000 		.domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4001 		.ops = &hsw_power_well_ops,
4002 		.id = DISP_PW_ID_NONE,
4003 		{
4004 			.hsw.regs = &icl_ddi_power_well_regs,
4005 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4006 		},
4007 	},
4008 	{
4009 		.name = "DDI G TC4 IO",
4010 		.domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4011 		.ops = &hsw_power_well_ops,
4012 		.id = DISP_PW_ID_NONE,
4013 		{
4014 			.hsw.regs = &icl_ddi_power_well_regs,
4015 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4016 		},
4017 	},
4018 	{
4019 		.name = "DDI H TC5 IO",
4020 		.domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4021 		.ops = &hsw_power_well_ops,
4022 		.id = DISP_PW_ID_NONE,
4023 		{
4024 			.hsw.regs = &icl_ddi_power_well_regs,
4025 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4026 		},
4027 	},
4028 	{
4029 		.name = "DDI I TC6 IO",
4030 		.domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4031 		.ops = &hsw_power_well_ops,
4032 		.id = DISP_PW_ID_NONE,
4033 		{
4034 			.hsw.regs = &icl_ddi_power_well_regs,
4035 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4036 		},
4037 	},
4038 	{
4039 		.name = "AUX A",
4040 		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4041 		.ops = &hsw_power_well_ops,
4042 		.id = DISP_PW_ID_NONE,
4043 		{
4044 			.hsw.regs = &icl_aux_power_well_regs,
4045 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4046 		},
4047 	},
4048 	{
4049 		.name = "AUX B",
4050 		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4051 		.ops = &hsw_power_well_ops,
4052 		.id = DISP_PW_ID_NONE,
4053 		{
4054 			.hsw.regs = &icl_aux_power_well_regs,
4055 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4056 		},
4057 	},
4058 	{
4059 		.name = "AUX C",
4060 		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4061 		.ops = &hsw_power_well_ops,
4062 		.id = DISP_PW_ID_NONE,
4063 		{
4064 			.hsw.regs = &icl_aux_power_well_regs,
4065 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4066 		},
4067 	},
4068 	{
4069 		.name = "AUX D TC1",
4070 		.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4071 		.ops = &icl_tc_phy_aux_power_well_ops,
4072 		.id = DISP_PW_ID_NONE,
4073 		{
4074 			.hsw.regs = &icl_aux_power_well_regs,
4075 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4076 			.hsw.is_tc_tbt = false,
4077 		},
4078 	},
4079 	{
4080 		.name = "AUX E TC2",
4081 		.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4082 		.ops = &icl_tc_phy_aux_power_well_ops,
4083 		.id = DISP_PW_ID_NONE,
4084 		{
4085 			.hsw.regs = &icl_aux_power_well_regs,
4086 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4087 			.hsw.is_tc_tbt = false,
4088 		},
4089 	},
4090 	{
4091 		.name = "AUX F TC3",
4092 		.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4093 		.ops = &icl_tc_phy_aux_power_well_ops,
4094 		.id = DISP_PW_ID_NONE,
4095 		{
4096 			.hsw.regs = &icl_aux_power_well_regs,
4097 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4098 			.hsw.is_tc_tbt = false,
4099 		},
4100 	},
4101 	{
4102 		.name = "AUX G TC4",
4103 		.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4104 		.ops = &icl_tc_phy_aux_power_well_ops,
4105 		.id = DISP_PW_ID_NONE,
4106 		{
4107 			.hsw.regs = &icl_aux_power_well_regs,
4108 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4109 			.hsw.is_tc_tbt = false,
4110 		},
4111 	},
4112 	{
4113 		.name = "AUX H TC5",
4114 		.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4115 		.ops = &icl_tc_phy_aux_power_well_ops,
4116 		.id = DISP_PW_ID_NONE,
4117 		{
4118 			.hsw.regs = &icl_aux_power_well_regs,
4119 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4120 			.hsw.is_tc_tbt = false,
4121 		},
4122 	},
4123 	{
4124 		.name = "AUX I TC6",
4125 		.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4126 		.ops = &icl_tc_phy_aux_power_well_ops,
4127 		.id = DISP_PW_ID_NONE,
4128 		{
4129 			.hsw.regs = &icl_aux_power_well_regs,
4130 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4131 			.hsw.is_tc_tbt = false,
4132 		},
4133 	},
4134 	{
4135 		.name = "AUX D TBT1",
4136 		.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4137 		.ops = &hsw_power_well_ops,
4138 		.id = DISP_PW_ID_NONE,
4139 		{
4140 			.hsw.regs = &icl_aux_power_well_regs,
4141 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4142 			.hsw.is_tc_tbt = true,
4143 		},
4144 	},
4145 	{
4146 		.name = "AUX E TBT2",
4147 		.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4148 		.ops = &hsw_power_well_ops,
4149 		.id = DISP_PW_ID_NONE,
4150 		{
4151 			.hsw.regs = &icl_aux_power_well_regs,
4152 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4153 			.hsw.is_tc_tbt = true,
4154 		},
4155 	},
4156 	{
4157 		.name = "AUX F TBT3",
4158 		.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4159 		.ops = &hsw_power_well_ops,
4160 		.id = DISP_PW_ID_NONE,
4161 		{
4162 			.hsw.regs = &icl_aux_power_well_regs,
4163 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4164 			.hsw.is_tc_tbt = true,
4165 		},
4166 	},
4167 	{
4168 		.name = "AUX G TBT4",
4169 		.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4170 		.ops = &hsw_power_well_ops,
4171 		.id = DISP_PW_ID_NONE,
4172 		{
4173 			.hsw.regs = &icl_aux_power_well_regs,
4174 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4175 			.hsw.is_tc_tbt = true,
4176 		},
4177 	},
4178 	{
4179 		.name = "AUX H TBT5",
4180 		.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4181 		.ops = &hsw_power_well_ops,
4182 		.id = DISP_PW_ID_NONE,
4183 		{
4184 			.hsw.regs = &icl_aux_power_well_regs,
4185 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4186 			.hsw.is_tc_tbt = true,
4187 		},
4188 	},
4189 	{
4190 		.name = "AUX I TBT6",
4191 		.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4192 		.ops = &hsw_power_well_ops,
4193 		.id = DISP_PW_ID_NONE,
4194 		{
4195 			.hsw.regs = &icl_aux_power_well_regs,
4196 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4197 			.hsw.is_tc_tbt = true,
4198 		},
4199 	},
4200 	{
4201 		.name = "power well 4",
4202 		.domains = TGL_PW_4_POWER_DOMAINS,
4203 		.ops = &hsw_power_well_ops,
4204 		.id = DISP_PW_ID_NONE,
4205 		{
4206 			.hsw.regs = &hsw_power_well_regs,
4207 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4208 			.hsw.has_fuses = true,
4209 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4210 		}
4211 	},
4212 	{
4213 		.name = "power well 5",
4214 		.domains = TGL_PW_5_POWER_DOMAINS,
4215 		.ops = &hsw_power_well_ops,
4216 		.id = DISP_PW_ID_NONE,
4217 		{
4218 			.hsw.regs = &hsw_power_well_regs,
4219 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4220 			.hsw.has_fuses = true,
4221 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4222 		},
4223 	},
4224 };
4225 
4226 static int
4227 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4228 				   int disable_power_well)
4229 {
4230 	if (disable_power_well >= 0)
4231 		return !!disable_power_well;
4232 
4233 	return 1;
4234 }
4235 
4236 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4237 			       int enable_dc)
4238 {
4239 	u32 mask;
4240 	int requested_dc;
4241 	int max_dc;
4242 
4243 	if (INTEL_GEN(dev_priv) >= 12) {
4244 		max_dc = 4;
4245 		/*
4246 		 * DC9 has a separate HW flow from the rest of the DC states,
4247 		 * not depending on the DMC firmware. It's needed by system
4248 		 * suspend/resume, so allow it unconditionally.
4249 		 */
4250 		mask = DC_STATE_EN_DC9;
4251 	} else if (IS_GEN(dev_priv, 11)) {
4252 		max_dc = 2;
4253 		mask = DC_STATE_EN_DC9;
4254 	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4255 		max_dc = 2;
4256 		mask = 0;
4257 	} else if (IS_GEN9_LP(dev_priv)) {
4258 		max_dc = 1;
4259 		mask = DC_STATE_EN_DC9;
4260 	} else {
4261 		max_dc = 0;
4262 		mask = 0;
4263 	}
4264 
4265 	if (!i915_modparams.disable_power_well)
4266 		max_dc = 0;
4267 
4268 	if (enable_dc >= 0 && enable_dc <= max_dc) {
4269 		requested_dc = enable_dc;
4270 	} else if (enable_dc == -1) {
4271 		requested_dc = max_dc;
4272 	} else if (enable_dc > max_dc && enable_dc <= 4) {
4273 		drm_dbg_kms(&dev_priv->drm,
4274 			    "Adjusting requested max DC state (%d->%d)\n",
4275 			    enable_dc, max_dc);
4276 		requested_dc = max_dc;
4277 	} else {
4278 		drm_err(&dev_priv->drm,
4279 			"Unexpected value for enable_dc (%d)\n", enable_dc);
4280 		requested_dc = max_dc;
4281 	}
4282 
4283 	switch (requested_dc) {
4284 	case 4:
4285 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4286 		break;
4287 	case 3:
4288 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4289 		break;
4290 	case 2:
4291 		mask |= DC_STATE_EN_UPTO_DC6;
4292 		break;
4293 	case 1:
4294 		mask |= DC_STATE_EN_UPTO_DC5;
4295 		break;
4296 	}
4297 
4298 	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4299 
4300 	return mask;
4301 }
4302 
4303 static int
4304 __set_power_wells(struct i915_power_domains *power_domains,
4305 		  const struct i915_power_well_desc *power_well_descs,
4306 		  int power_well_count)
4307 {
4308 	u64 power_well_ids = 0;
4309 	int i;
4310 
4311 	power_domains->power_well_count = power_well_count;
4312 	power_domains->power_wells =
4313 				kcalloc(power_well_count,
4314 					sizeof(*power_domains->power_wells),
4315 					GFP_KERNEL);
4316 	if (!power_domains->power_wells)
4317 		return -ENOMEM;
4318 
4319 	for (i = 0; i < power_well_count; i++) {
4320 		enum i915_power_well_id id = power_well_descs[i].id;
4321 
4322 		power_domains->power_wells[i].desc = &power_well_descs[i];
4323 
4324 		if (id == DISP_PW_ID_NONE)
4325 			continue;
4326 
4327 		WARN_ON(id >= sizeof(power_well_ids) * 8);
4328 		WARN_ON(power_well_ids & BIT_ULL(id));
4329 		power_well_ids |= BIT_ULL(id);
4330 	}
4331 
4332 	return 0;
4333 }
4334 
4335 #define set_power_wells(power_domains, __power_well_descs) \
4336 	__set_power_wells(power_domains, __power_well_descs, \
4337 			  ARRAY_SIZE(__power_well_descs))
4338 
4339 /**
4340  * intel_power_domains_init - initializes the power domain structures
4341  * @dev_priv: i915 device instance
4342  *
4343  * Initializes the power domain structures for @dev_priv depending upon the
4344  * supported platform.
4345  */
4346 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4347 {
4348 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4349 	int err;
4350 
4351 	i915_modparams.disable_power_well =
4352 		sanitize_disable_power_well_option(dev_priv,
4353 						   i915_modparams.disable_power_well);
4354 	dev_priv->csr.allowed_dc_mask =
4355 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4356 
4357 	dev_priv->csr.target_dc_state =
4358 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4359 
4360 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4361 
4362 	mutex_init(&power_domains->lock);
4363 
4364 	INIT_DELAYED_WORK(&power_domains->async_put_work,
4365 			  intel_display_power_put_async_work);
4366 
4367 	/*
4368 	 * The enabling order will be from lower to higher indexed wells,
4369 	 * the disabling order is reversed.
4370 	 */
4371 	if (IS_GEN(dev_priv, 12)) {
4372 		err = set_power_wells(power_domains, tgl_power_wells);
4373 	} else if (IS_ELKHARTLAKE(dev_priv)) {
4374 		err = set_power_wells(power_domains, ehl_power_wells);
4375 	} else if (IS_GEN(dev_priv, 11)) {
4376 		err = set_power_wells(power_domains, icl_power_wells);
4377 	} else if (IS_CANNONLAKE(dev_priv)) {
4378 		err = set_power_wells(power_domains, cnl_power_wells);
4379 
4380 		/*
4381 		 * DDI and Aux IO are getting enabled for all ports
4382 		 * regardless the presence or use. So, in order to avoid
4383 		 * timeouts, lets remove them from the list
4384 		 * for the SKUs without port F.
4385 		 */
4386 		if (!IS_CNL_WITH_PORT_F(dev_priv))
4387 			power_domains->power_well_count -= 2;
4388 	} else if (IS_GEMINILAKE(dev_priv)) {
4389 		err = set_power_wells(power_domains, glk_power_wells);
4390 	} else if (IS_BROXTON(dev_priv)) {
4391 		err = set_power_wells(power_domains, bxt_power_wells);
4392 	} else if (IS_GEN9_BC(dev_priv)) {
4393 		err = set_power_wells(power_domains, skl_power_wells);
4394 	} else if (IS_CHERRYVIEW(dev_priv)) {
4395 		err = set_power_wells(power_domains, chv_power_wells);
4396 	} else if (IS_BROADWELL(dev_priv)) {
4397 		err = set_power_wells(power_domains, bdw_power_wells);
4398 	} else if (IS_HASWELL(dev_priv)) {
4399 		err = set_power_wells(power_domains, hsw_power_wells);
4400 	} else if (IS_VALLEYVIEW(dev_priv)) {
4401 		err = set_power_wells(power_domains, vlv_power_wells);
4402 	} else if (IS_I830(dev_priv)) {
4403 		err = set_power_wells(power_domains, i830_power_wells);
4404 	} else {
4405 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
4406 	}
4407 
4408 	return err;
4409 }
4410 
4411 /**
4412  * intel_power_domains_cleanup - clean up power domains resources
4413  * @dev_priv: i915 device instance
4414  *
4415  * Release any resources acquired by intel_power_domains_init()
4416  */
4417 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4418 {
4419 	kfree(dev_priv->power_domains.power_wells);
4420 }
4421 
4422 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4423 {
4424 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4425 	struct i915_power_well *power_well;
4426 
4427 	mutex_lock(&power_domains->lock);
4428 	for_each_power_well(dev_priv, power_well) {
4429 		power_well->desc->ops->sync_hw(dev_priv, power_well);
4430 		power_well->hw_enabled =
4431 			power_well->desc->ops->is_enabled(dev_priv, power_well);
4432 	}
4433 	mutex_unlock(&power_domains->lock);
4434 }
4435 
4436 static inline
4437 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4438 			  i915_reg_t reg, bool enable)
4439 {
4440 	u32 val, status;
4441 
4442 	val = intel_de_read(dev_priv, reg);
4443 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4444 	intel_de_write(dev_priv, reg, val);
4445 	intel_de_posting_read(dev_priv, reg);
4446 	udelay(10);
4447 
4448 	status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4449 	if ((enable && !status) || (!enable && status)) {
4450 		drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4451 			enable ? "enable" : "disable");
4452 		return false;
4453 	}
4454 	return true;
4455 }
4456 
4457 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4458 {
4459 	icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4460 }
4461 
4462 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4463 {
4464 	icl_dbuf_slices_update(dev_priv, 0);
4465 }
4466 
4467 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4468 			    u8 req_slices)
4469 {
4470 	int i;
4471 	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4472 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4473 
4474 	drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
4475 		 "Invalid number of dbuf slices requested\n");
4476 
4477 	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
4478 
4479 	/*
4480 	 * Might be running this in parallel to gen9_dc_off_power_well_enable
4481 	 * being called from intel_dp_detect for instance,
4482 	 * which causes assertion triggered by race condition,
4483 	 * as gen9_assert_dbuf_enabled might preempt this when registers
4484 	 * were already updated, while dev_priv was not.
4485 	 */
4486 	mutex_lock(&power_domains->lock);
4487 
4488 	for (i = 0; i < max_slices; i++) {
4489 		intel_dbuf_slice_set(dev_priv,
4490 				     DBUF_CTL_S(i),
4491 				     (req_slices & BIT(i)) != 0);
4492 	}
4493 
4494 	dev_priv->enabled_dbuf_slices_mask = req_slices;
4495 
4496 	mutex_unlock(&power_domains->lock);
4497 }
4498 
4499 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4500 {
4501 	skl_ddb_get_hw_state(dev_priv);
4502 	/*
4503 	 * Just power up at least 1 slice, we will
4504 	 * figure out later which slices we have and what we need.
4505 	 */
4506 	icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4507 			       BIT(DBUF_S1));
4508 }
4509 
4510 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4511 {
4512 	icl_dbuf_slices_update(dev_priv, 0);
4513 }
4514 
4515 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4516 {
4517 	u32 mask, val;
4518 
4519 	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4520 		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4521 		MBUS_ABOX_B_CREDIT_MASK |
4522 		MBUS_ABOX_BW_CREDIT_MASK;
4523 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4524 		MBUS_ABOX_BT_CREDIT_POOL2(16) |
4525 		MBUS_ABOX_B_CREDIT(1) |
4526 		MBUS_ABOX_BW_CREDIT(1);
4527 
4528 	intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
4529 	if (INTEL_GEN(dev_priv) >= 12) {
4530 		intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
4531 		intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
4532 	}
4533 }
4534 
4535 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4536 {
4537 	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4538 
4539 	/*
4540 	 * The LCPLL register should be turned on by the BIOS. For now
4541 	 * let's just check its state and print errors in case
4542 	 * something is wrong.  Don't even try to turn it on.
4543 	 */
4544 
4545 	if (val & LCPLL_CD_SOURCE_FCLK)
4546 		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4547 
4548 	if (val & LCPLL_PLL_DISABLE)
4549 		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4550 
4551 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4552 		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4553 }
4554 
4555 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4556 {
4557 	struct drm_device *dev = &dev_priv->drm;
4558 	struct intel_crtc *crtc;
4559 
4560 	for_each_intel_crtc(dev, crtc)
4561 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4562 				pipe_name(crtc->pipe));
4563 
4564 	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4565 			"Display power well on\n");
4566 	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4567 			"SPLL enabled\n");
4568 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4569 			"WRPLL1 enabled\n");
4570 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4571 			"WRPLL2 enabled\n");
4572 	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4573 			"Panel power on\n");
4574 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4575 			"CPU PWM1 enabled\n");
4576 	if (IS_HASWELL(dev_priv))
4577 		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4578 				"CPU PWM2 enabled\n");
4579 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4580 			"PCH PWM1 enabled\n");
4581 	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4582 			"Utility pin enabled\n");
4583 	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4584 			"PCH GTC enabled\n");
4585 
4586 	/*
4587 	 * In theory we can still leave IRQs enabled, as long as only the HPD
4588 	 * interrupts remain enabled. We used to check for that, but since it's
4589 	 * gen-specific and since we only disable LCPLL after we fully disable
4590 	 * the interrupts, the check below should be enough.
4591 	 */
4592 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4593 }
4594 
4595 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4596 {
4597 	if (IS_HASWELL(dev_priv))
4598 		return intel_de_read(dev_priv, D_COMP_HSW);
4599 	else
4600 		return intel_de_read(dev_priv, D_COMP_BDW);
4601 }
4602 
4603 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4604 {
4605 	if (IS_HASWELL(dev_priv)) {
4606 		if (sandybridge_pcode_write(dev_priv,
4607 					    GEN6_PCODE_WRITE_D_COMP, val))
4608 			drm_dbg_kms(&dev_priv->drm,
4609 				    "Failed to write to D_COMP\n");
4610 	} else {
4611 		intel_de_write(dev_priv, D_COMP_BDW, val);
4612 		intel_de_posting_read(dev_priv, D_COMP_BDW);
4613 	}
4614 }
4615 
4616 /*
4617  * This function implements pieces of two sequences from BSpec:
4618  * - Sequence for display software to disable LCPLL
4619  * - Sequence for display software to allow package C8+
4620  * The steps implemented here are just the steps that actually touch the LCPLL
4621  * register. Callers should take care of disabling all the display engine
4622  * functions, doing the mode unset, fixing interrupts, etc.
4623  */
4624 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4625 			      bool switch_to_fclk, bool allow_power_down)
4626 {
4627 	u32 val;
4628 
4629 	assert_can_disable_lcpll(dev_priv);
4630 
4631 	val = intel_de_read(dev_priv, LCPLL_CTL);
4632 
4633 	if (switch_to_fclk) {
4634 		val |= LCPLL_CD_SOURCE_FCLK;
4635 		intel_de_write(dev_priv, LCPLL_CTL, val);
4636 
4637 		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4638 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
4639 			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4640 
4641 		val = intel_de_read(dev_priv, LCPLL_CTL);
4642 	}
4643 
4644 	val |= LCPLL_PLL_DISABLE;
4645 	intel_de_write(dev_priv, LCPLL_CTL, val);
4646 	intel_de_posting_read(dev_priv, LCPLL_CTL);
4647 
4648 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4649 		drm_err(&dev_priv->drm, "LCPLL still locked\n");
4650 
4651 	val = hsw_read_dcomp(dev_priv);
4652 	val |= D_COMP_COMP_DISABLE;
4653 	hsw_write_dcomp(dev_priv, val);
4654 	ndelay(100);
4655 
4656 	if (wait_for((hsw_read_dcomp(dev_priv) &
4657 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4658 		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4659 
4660 	if (allow_power_down) {
4661 		val = intel_de_read(dev_priv, LCPLL_CTL);
4662 		val |= LCPLL_POWER_DOWN_ALLOW;
4663 		intel_de_write(dev_priv, LCPLL_CTL, val);
4664 		intel_de_posting_read(dev_priv, LCPLL_CTL);
4665 	}
4666 }
4667 
4668 /*
4669  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4670  * source.
4671  */
4672 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4673 {
4674 	u32 val;
4675 
4676 	val = intel_de_read(dev_priv, LCPLL_CTL);
4677 
4678 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4679 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4680 		return;
4681 
4682 	/*
4683 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
4684 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4685 	 */
4686 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4687 
4688 	if (val & LCPLL_POWER_DOWN_ALLOW) {
4689 		val &= ~LCPLL_POWER_DOWN_ALLOW;
4690 		intel_de_write(dev_priv, LCPLL_CTL, val);
4691 		intel_de_posting_read(dev_priv, LCPLL_CTL);
4692 	}
4693 
4694 	val = hsw_read_dcomp(dev_priv);
4695 	val |= D_COMP_COMP_FORCE;
4696 	val &= ~D_COMP_COMP_DISABLE;
4697 	hsw_write_dcomp(dev_priv, val);
4698 
4699 	val = intel_de_read(dev_priv, LCPLL_CTL);
4700 	val &= ~LCPLL_PLL_DISABLE;
4701 	intel_de_write(dev_priv, LCPLL_CTL, val);
4702 
4703 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4704 		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4705 
4706 	if (val & LCPLL_CD_SOURCE_FCLK) {
4707 		val = intel_de_read(dev_priv, LCPLL_CTL);
4708 		val &= ~LCPLL_CD_SOURCE_FCLK;
4709 		intel_de_write(dev_priv, LCPLL_CTL, val);
4710 
4711 		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4712 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4713 			drm_err(&dev_priv->drm,
4714 				"Switching back to LCPLL failed\n");
4715 	}
4716 
4717 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4718 
4719 	intel_update_cdclk(dev_priv);
4720 	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4721 }
4722 
4723 /*
4724  * Package states C8 and deeper are really deep PC states that can only be
4725  * reached when all the devices on the system allow it, so even if the graphics
4726  * device allows PC8+, it doesn't mean the system will actually get to these
4727  * states. Our driver only allows PC8+ when going into runtime PM.
4728  *
4729  * The requirements for PC8+ are that all the outputs are disabled, the power
4730  * well is disabled and most interrupts are disabled, and these are also
4731  * requirements for runtime PM. When these conditions are met, we manually do
4732  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4733  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4734  * hang the machine.
4735  *
4736  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4737  * the state of some registers, so when we come back from PC8+ we need to
4738  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4739  * need to take care of the registers kept by RC6. Notice that this happens even
4740  * if we don't put the device in PCI D3 state (which is what currently happens
4741  * because of the runtime PM support).
4742  *
4743  * For more, read "Display Sequences for Package C8" on the hardware
4744  * documentation.
4745  */
4746 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4747 {
4748 	u32 val;
4749 
4750 	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4751 
4752 	if (HAS_PCH_LPT_LP(dev_priv)) {
4753 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4754 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4755 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4756 	}
4757 
4758 	lpt_disable_clkout_dp(dev_priv);
4759 	hsw_disable_lcpll(dev_priv, true, true);
4760 }
4761 
4762 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4763 {
4764 	u32 val;
4765 
4766 	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4767 
4768 	hsw_restore_lcpll(dev_priv);
4769 	intel_init_pch_refclk(dev_priv);
4770 
4771 	if (HAS_PCH_LPT_LP(dev_priv)) {
4772 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4773 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4774 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4775 	}
4776 }
4777 
4778 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4779 				      bool enable)
4780 {
4781 	i915_reg_t reg;
4782 	u32 reset_bits, val;
4783 
4784 	if (IS_IVYBRIDGE(dev_priv)) {
4785 		reg = GEN7_MSG_CTL;
4786 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4787 	} else {
4788 		reg = HSW_NDE_RSTWRN_OPT;
4789 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4790 	}
4791 
4792 	val = intel_de_read(dev_priv, reg);
4793 
4794 	if (enable)
4795 		val |= reset_bits;
4796 	else
4797 		val &= ~reset_bits;
4798 
4799 	intel_de_write(dev_priv, reg, val);
4800 }
4801 
4802 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4803 				  bool resume)
4804 {
4805 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4806 	struct i915_power_well *well;
4807 
4808 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4809 
4810 	/* enable PCH reset handshake */
4811 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4812 
4813 	/* enable PG1 and Misc I/O */
4814 	mutex_lock(&power_domains->lock);
4815 
4816 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4817 	intel_power_well_enable(dev_priv, well);
4818 
4819 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4820 	intel_power_well_enable(dev_priv, well);
4821 
4822 	mutex_unlock(&power_domains->lock);
4823 
4824 	intel_cdclk_init_hw(dev_priv);
4825 
4826 	gen9_dbuf_enable(dev_priv);
4827 
4828 	if (resume && dev_priv->csr.dmc_payload)
4829 		intel_csr_load_program(dev_priv);
4830 }
4831 
4832 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4833 {
4834 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4835 	struct i915_power_well *well;
4836 
4837 	gen9_disable_dc_states(dev_priv);
4838 
4839 	gen9_dbuf_disable(dev_priv);
4840 
4841 	intel_cdclk_uninit_hw(dev_priv);
4842 
4843 	/* The spec doesn't call for removing the reset handshake flag */
4844 	/* disable PG1 and Misc I/O */
4845 
4846 	mutex_lock(&power_domains->lock);
4847 
4848 	/*
4849 	 * BSpec says to keep the MISC IO power well enabled here, only
4850 	 * remove our request for power well 1.
4851 	 * Note that even though the driver's request is removed power well 1
4852 	 * may stay enabled after this due to DMC's own request on it.
4853 	 */
4854 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4855 	intel_power_well_disable(dev_priv, well);
4856 
4857 	mutex_unlock(&power_domains->lock);
4858 
4859 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4860 }
4861 
4862 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4863 {
4864 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4865 	struct i915_power_well *well;
4866 
4867 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4868 
4869 	/*
4870 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4871 	 * or else the reset will hang because there is no PCH to respond.
4872 	 * Move the handshake programming to initialization sequence.
4873 	 * Previously was left up to BIOS.
4874 	 */
4875 	intel_pch_reset_handshake(dev_priv, false);
4876 
4877 	/* Enable PG1 */
4878 	mutex_lock(&power_domains->lock);
4879 
4880 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4881 	intel_power_well_enable(dev_priv, well);
4882 
4883 	mutex_unlock(&power_domains->lock);
4884 
4885 	intel_cdclk_init_hw(dev_priv);
4886 
4887 	gen9_dbuf_enable(dev_priv);
4888 
4889 	if (resume && dev_priv->csr.dmc_payload)
4890 		intel_csr_load_program(dev_priv);
4891 }
4892 
4893 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4894 {
4895 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4896 	struct i915_power_well *well;
4897 
4898 	gen9_disable_dc_states(dev_priv);
4899 
4900 	gen9_dbuf_disable(dev_priv);
4901 
4902 	intel_cdclk_uninit_hw(dev_priv);
4903 
4904 	/* The spec doesn't call for removing the reset handshake flag */
4905 
4906 	/*
4907 	 * Disable PW1 (PG1).
4908 	 * Note that even though the driver's request is removed power well 1
4909 	 * may stay enabled after this due to DMC's own request on it.
4910 	 */
4911 	mutex_lock(&power_domains->lock);
4912 
4913 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4914 	intel_power_well_disable(dev_priv, well);
4915 
4916 	mutex_unlock(&power_domains->lock);
4917 
4918 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4919 }
4920 
4921 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4922 {
4923 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4924 	struct i915_power_well *well;
4925 
4926 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4927 
4928 	/* 1. Enable PCH Reset Handshake */
4929 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4930 
4931 	/* 2-3. */
4932 	intel_combo_phy_init(dev_priv);
4933 
4934 	/*
4935 	 * 4. Enable Power Well 1 (PG1).
4936 	 *    The AUX IO power wells will be enabled on demand.
4937 	 */
4938 	mutex_lock(&power_domains->lock);
4939 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4940 	intel_power_well_enable(dev_priv, well);
4941 	mutex_unlock(&power_domains->lock);
4942 
4943 	/* 5. Enable CD clock */
4944 	intel_cdclk_init_hw(dev_priv);
4945 
4946 	/* 6. Enable DBUF */
4947 	gen9_dbuf_enable(dev_priv);
4948 
4949 	if (resume && dev_priv->csr.dmc_payload)
4950 		intel_csr_load_program(dev_priv);
4951 }
4952 
4953 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4954 {
4955 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4956 	struct i915_power_well *well;
4957 
4958 	gen9_disable_dc_states(dev_priv);
4959 
4960 	/* 1. Disable all display engine functions -> aready done */
4961 
4962 	/* 2. Disable DBUF */
4963 	gen9_dbuf_disable(dev_priv);
4964 
4965 	/* 3. Disable CD clock */
4966 	intel_cdclk_uninit_hw(dev_priv);
4967 
4968 	/*
4969 	 * 4. Disable Power Well 1 (PG1).
4970 	 *    The AUX IO power wells are toggled on demand, so they are already
4971 	 *    disabled at this point.
4972 	 */
4973 	mutex_lock(&power_domains->lock);
4974 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4975 	intel_power_well_disable(dev_priv, well);
4976 	mutex_unlock(&power_domains->lock);
4977 
4978 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4979 
4980 	/* 5. */
4981 	intel_combo_phy_uninit(dev_priv);
4982 }
4983 
4984 struct buddy_page_mask {
4985 	u32 page_mask;
4986 	u8 type;
4987 	u8 num_channels;
4988 };
4989 
4990 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
4991 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
4992 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
4993 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
4994 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
4995 	{}
4996 };
4997 
4998 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
4999 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5000 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5001 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5002 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5003 	{}
5004 };
5005 
5006 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5007 {
5008 	enum intel_dram_type type = dev_priv->dram_info.type;
5009 	u8 num_channels = dev_priv->dram_info.num_channels;
5010 	const struct buddy_page_mask *table;
5011 	int i;
5012 
5013 	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
5014 		/* Wa_1409767108: tgl */
5015 		table = wa_1409767108_buddy_page_masks;
5016 	else
5017 		table = tgl_buddy_page_masks;
5018 
5019 	for (i = 0; table[i].page_mask != 0; i++)
5020 		if (table[i].num_channels == num_channels &&
5021 		    table[i].type == type)
5022 			break;
5023 
5024 	if (table[i].page_mask == 0) {
5025 		drm_dbg(&dev_priv->drm,
5026 			"Unknown memory configuration; disabling address buddy logic.\n");
5027 		intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
5028 		intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
5029 	} else {
5030 		intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
5031 			       table[i].page_mask);
5032 		intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
5033 			       table[i].page_mask);
5034 
5035 		/* Wa_22010178259:tgl */
5036 		intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
5037 			     BW_BUDDY_TLB_REQ_TIMER_MASK,
5038 			     REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5039 		intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
5040 			     BW_BUDDY_TLB_REQ_TIMER_MASK,
5041 			     REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5042 	}
5043 }
5044 
5045 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5046 				  bool resume)
5047 {
5048 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5049 	struct i915_power_well *well;
5050 
5051 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5052 
5053 	/* 1. Enable PCH reset handshake. */
5054 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5055 
5056 	/* 2. Initialize all combo phys */
5057 	intel_combo_phy_init(dev_priv);
5058 
5059 	/*
5060 	 * 3. Enable Power Well 1 (PG1).
5061 	 *    The AUX IO power wells will be enabled on demand.
5062 	 */
5063 	mutex_lock(&power_domains->lock);
5064 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5065 	intel_power_well_enable(dev_priv, well);
5066 	mutex_unlock(&power_domains->lock);
5067 
5068 	/* 4. Enable CDCLK. */
5069 	intel_cdclk_init_hw(dev_priv);
5070 
5071 	/* 5. Enable DBUF. */
5072 	icl_dbuf_enable(dev_priv);
5073 
5074 	/* 6. Setup MBUS. */
5075 	icl_mbus_init(dev_priv);
5076 
5077 	/* 7. Program arbiter BW_BUDDY registers */
5078 	if (INTEL_GEN(dev_priv) >= 12)
5079 		tgl_bw_buddy_init(dev_priv);
5080 
5081 	if (resume && dev_priv->csr.dmc_payload)
5082 		intel_csr_load_program(dev_priv);
5083 }
5084 
5085 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5086 {
5087 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5088 	struct i915_power_well *well;
5089 
5090 	gen9_disable_dc_states(dev_priv);
5091 
5092 	/* 1. Disable all display engine functions -> aready done */
5093 
5094 	/* 2. Disable DBUF */
5095 	icl_dbuf_disable(dev_priv);
5096 
5097 	/* 3. Disable CD clock */
5098 	intel_cdclk_uninit_hw(dev_priv);
5099 
5100 	/*
5101 	 * 4. Disable Power Well 1 (PG1).
5102 	 *    The AUX IO power wells are toggled on demand, so they are already
5103 	 *    disabled at this point.
5104 	 */
5105 	mutex_lock(&power_domains->lock);
5106 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5107 	intel_power_well_disable(dev_priv, well);
5108 	mutex_unlock(&power_domains->lock);
5109 
5110 	/* 5. */
5111 	intel_combo_phy_uninit(dev_priv);
5112 }
5113 
5114 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5115 {
5116 	struct i915_power_well *cmn_bc =
5117 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5118 	struct i915_power_well *cmn_d =
5119 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5120 
5121 	/*
5122 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5123 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5124 	 * instead maintain a shadow copy ourselves. Use the actual
5125 	 * power well state and lane status to reconstruct the
5126 	 * expected initial value.
5127 	 */
5128 	dev_priv->chv_phy_control =
5129 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5130 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5131 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5132 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5133 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5134 
5135 	/*
5136 	 * If all lanes are disabled we leave the override disabled
5137 	 * with all power down bits cleared to match the state we
5138 	 * would use after disabling the port. Otherwise enable the
5139 	 * override and set the lane powerdown bits accding to the
5140 	 * current lane status.
5141 	 */
5142 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5143 		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5144 		unsigned int mask;
5145 
5146 		mask = status & DPLL_PORTB_READY_MASK;
5147 		if (mask == 0xf)
5148 			mask = 0x0;
5149 		else
5150 			dev_priv->chv_phy_control |=
5151 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5152 
5153 		dev_priv->chv_phy_control |=
5154 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5155 
5156 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5157 		if (mask == 0xf)
5158 			mask = 0x0;
5159 		else
5160 			dev_priv->chv_phy_control |=
5161 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5162 
5163 		dev_priv->chv_phy_control |=
5164 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5165 
5166 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5167 
5168 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5169 	} else {
5170 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5171 	}
5172 
5173 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5174 		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5175 		unsigned int mask;
5176 
5177 		mask = status & DPLL_PORTD_READY_MASK;
5178 
5179 		if (mask == 0xf)
5180 			mask = 0x0;
5181 		else
5182 			dev_priv->chv_phy_control |=
5183 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5184 
5185 		dev_priv->chv_phy_control |=
5186 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5187 
5188 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5189 
5190 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5191 	} else {
5192 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5193 	}
5194 
5195 	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5196 		    dev_priv->chv_phy_control);
5197 
5198 	/* Defer application of initial phy_control to enabling the powerwell */
5199 }
5200 
5201 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5202 {
5203 	struct i915_power_well *cmn =
5204 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5205 	struct i915_power_well *disp2d =
5206 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5207 
5208 	/* If the display might be already active skip this */
5209 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5210 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5211 	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5212 		return;
5213 
5214 	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5215 
5216 	/* cmnlane needs DPLL registers */
5217 	disp2d->desc->ops->enable(dev_priv, disp2d);
5218 
5219 	/*
5220 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5221 	 * Need to assert and de-assert PHY SB reset by gating the
5222 	 * common lane power, then un-gating it.
5223 	 * Simply ungating isn't enough to reset the PHY enough to get
5224 	 * ports and lanes running.
5225 	 */
5226 	cmn->desc->ops->disable(dev_priv, cmn);
5227 }
5228 
5229 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5230 {
5231 	bool ret;
5232 
5233 	vlv_punit_get(dev_priv);
5234 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5235 	vlv_punit_put(dev_priv);
5236 
5237 	return ret;
5238 }
5239 
5240 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5241 {
5242 	drm_WARN(&dev_priv->drm,
5243 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5244 		 "VED not power gated\n");
5245 }
5246 
5247 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5248 {
5249 	static const struct pci_device_id isp_ids[] = {
5250 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5251 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5252 		{}
5253 	};
5254 
5255 	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5256 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5257 		 "ISP not power gated\n");
5258 }
5259 
5260 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5261 
5262 /**
5263  * intel_power_domains_init_hw - initialize hardware power domain state
5264  * @i915: i915 device instance
5265  * @resume: Called from resume code paths or not
5266  *
5267  * This function initializes the hardware power domain state and enables all
5268  * power wells belonging to the INIT power domain. Power wells in other
5269  * domains (and not in the INIT domain) are referenced or disabled by
5270  * intel_modeset_readout_hw_state(). After that the reference count of each
5271  * power well must match its HW enabled state, see
5272  * intel_power_domains_verify_state().
5273  *
5274  * It will return with power domains disabled (to be enabled later by
5275  * intel_power_domains_enable()) and must be paired with
5276  * intel_power_domains_driver_remove().
5277  */
5278 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5279 {
5280 	struct i915_power_domains *power_domains = &i915->power_domains;
5281 
5282 	power_domains->initializing = true;
5283 
5284 	if (INTEL_GEN(i915) >= 11) {
5285 		icl_display_core_init(i915, resume);
5286 	} else if (IS_CANNONLAKE(i915)) {
5287 		cnl_display_core_init(i915, resume);
5288 	} else if (IS_GEN9_BC(i915)) {
5289 		skl_display_core_init(i915, resume);
5290 	} else if (IS_GEN9_LP(i915)) {
5291 		bxt_display_core_init(i915, resume);
5292 	} else if (IS_CHERRYVIEW(i915)) {
5293 		mutex_lock(&power_domains->lock);
5294 		chv_phy_control_init(i915);
5295 		mutex_unlock(&power_domains->lock);
5296 		assert_isp_power_gated(i915);
5297 	} else if (IS_VALLEYVIEW(i915)) {
5298 		mutex_lock(&power_domains->lock);
5299 		vlv_cmnlane_wa(i915);
5300 		mutex_unlock(&power_domains->lock);
5301 		assert_ved_power_gated(i915);
5302 		assert_isp_power_gated(i915);
5303 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5304 		hsw_assert_cdclk(i915);
5305 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5306 	} else if (IS_IVYBRIDGE(i915)) {
5307 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5308 	}
5309 
5310 	/*
5311 	 * Keep all power wells enabled for any dependent HW access during
5312 	 * initialization and to make sure we keep BIOS enabled display HW
5313 	 * resources powered until display HW readout is complete. We drop
5314 	 * this reference in intel_power_domains_enable().
5315 	 */
5316 	power_domains->wakeref =
5317 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5318 
5319 	/* Disable power support if the user asked so. */
5320 	if (!i915_modparams.disable_power_well)
5321 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5322 	intel_power_domains_sync_hw(i915);
5323 
5324 	power_domains->initializing = false;
5325 }
5326 
5327 /**
5328  * intel_power_domains_driver_remove - deinitialize hw power domain state
5329  * @i915: i915 device instance
5330  *
5331  * De-initializes the display power domain HW state. It also ensures that the
5332  * device stays powered up so that the driver can be reloaded.
5333  *
5334  * It must be called with power domains already disabled (after a call to
5335  * intel_power_domains_disable()) and must be paired with
5336  * intel_power_domains_init_hw().
5337  */
5338 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5339 {
5340 	intel_wakeref_t wakeref __maybe_unused =
5341 		fetch_and_zero(&i915->power_domains.wakeref);
5342 
5343 	/* Remove the refcount we took to keep power well support disabled. */
5344 	if (!i915_modparams.disable_power_well)
5345 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5346 
5347 	intel_display_power_flush_work_sync(i915);
5348 
5349 	intel_power_domains_verify_state(i915);
5350 
5351 	/* Keep the power well enabled, but cancel its rpm wakeref. */
5352 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5353 }
5354 
5355 /**
5356  * intel_power_domains_enable - enable toggling of display power wells
5357  * @i915: i915 device instance
5358  *
5359  * Enable the ondemand enabling/disabling of the display power wells. Note that
5360  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5361  * only at specific points of the display modeset sequence, thus they are not
5362  * affected by the intel_power_domains_enable()/disable() calls. The purpose
5363  * of these function is to keep the rest of power wells enabled until the end
5364  * of display HW readout (which will acquire the power references reflecting
5365  * the current HW state).
5366  */
5367 void intel_power_domains_enable(struct drm_i915_private *i915)
5368 {
5369 	intel_wakeref_t wakeref __maybe_unused =
5370 		fetch_and_zero(&i915->power_domains.wakeref);
5371 
5372 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5373 	intel_power_domains_verify_state(i915);
5374 }
5375 
5376 /**
5377  * intel_power_domains_disable - disable toggling of display power wells
5378  * @i915: i915 device instance
5379  *
5380  * Disable the ondemand enabling/disabling of the display power wells. See
5381  * intel_power_domains_enable() for which power wells this call controls.
5382  */
5383 void intel_power_domains_disable(struct drm_i915_private *i915)
5384 {
5385 	struct i915_power_domains *power_domains = &i915->power_domains;
5386 
5387 	drm_WARN_ON(&i915->drm, power_domains->wakeref);
5388 	power_domains->wakeref =
5389 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5390 
5391 	intel_power_domains_verify_state(i915);
5392 }
5393 
5394 /**
5395  * intel_power_domains_suspend - suspend power domain state
5396  * @i915: i915 device instance
5397  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5398  *
5399  * This function prepares the hardware power domain state before entering
5400  * system suspend.
5401  *
5402  * It must be called with power domains already disabled (after a call to
5403  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5404  */
5405 void intel_power_domains_suspend(struct drm_i915_private *i915,
5406 				 enum i915_drm_suspend_mode suspend_mode)
5407 {
5408 	struct i915_power_domains *power_domains = &i915->power_domains;
5409 	intel_wakeref_t wakeref __maybe_unused =
5410 		fetch_and_zero(&power_domains->wakeref);
5411 
5412 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5413 
5414 	/*
5415 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5416 	 * support don't manually deinit the power domains. This also means the
5417 	 * CSR/DMC firmware will stay active, it will power down any HW
5418 	 * resources as required and also enable deeper system power states
5419 	 * that would be blocked if the firmware was inactive.
5420 	 */
5421 	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5422 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
5423 	    i915->csr.dmc_payload) {
5424 		intel_display_power_flush_work(i915);
5425 		intel_power_domains_verify_state(i915);
5426 		return;
5427 	}
5428 
5429 	/*
5430 	 * Even if power well support was disabled we still want to disable
5431 	 * power wells if power domains must be deinitialized for suspend.
5432 	 */
5433 	if (!i915_modparams.disable_power_well)
5434 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5435 
5436 	intel_display_power_flush_work(i915);
5437 	intel_power_domains_verify_state(i915);
5438 
5439 	if (INTEL_GEN(i915) >= 11)
5440 		icl_display_core_uninit(i915);
5441 	else if (IS_CANNONLAKE(i915))
5442 		cnl_display_core_uninit(i915);
5443 	else if (IS_GEN9_BC(i915))
5444 		skl_display_core_uninit(i915);
5445 	else if (IS_GEN9_LP(i915))
5446 		bxt_display_core_uninit(i915);
5447 
5448 	power_domains->display_core_suspended = true;
5449 }
5450 
5451 /**
5452  * intel_power_domains_resume - resume power domain state
5453  * @i915: i915 device instance
5454  *
5455  * This function resume the hardware power domain state during system resume.
5456  *
5457  * It will return with power domain support disabled (to be enabled later by
5458  * intel_power_domains_enable()) and must be paired with
5459  * intel_power_domains_suspend().
5460  */
5461 void intel_power_domains_resume(struct drm_i915_private *i915)
5462 {
5463 	struct i915_power_domains *power_domains = &i915->power_domains;
5464 
5465 	if (power_domains->display_core_suspended) {
5466 		intel_power_domains_init_hw(i915, true);
5467 		power_domains->display_core_suspended = false;
5468 	} else {
5469 		drm_WARN_ON(&i915->drm, power_domains->wakeref);
5470 		power_domains->wakeref =
5471 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
5472 	}
5473 
5474 	intel_power_domains_verify_state(i915);
5475 }
5476 
5477 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5478 
5479 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5480 {
5481 	struct i915_power_domains *power_domains = &i915->power_domains;
5482 	struct i915_power_well *power_well;
5483 
5484 	for_each_power_well(i915, power_well) {
5485 		enum intel_display_power_domain domain;
5486 
5487 		drm_dbg(&i915->drm, "%-25s %d\n",
5488 			power_well->desc->name, power_well->count);
5489 
5490 		for_each_power_domain(domain, power_well->desc->domains)
5491 			drm_dbg(&i915->drm, "  %-23s %d\n",
5492 				intel_display_power_domain_str(domain),
5493 				power_domains->domain_use_count[domain]);
5494 	}
5495 }
5496 
5497 /**
5498  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5499  * @i915: i915 device instance
5500  *
5501  * Verify if the reference count of each power well matches its HW enabled
5502  * state and the total refcount of the domains it belongs to. This must be
5503  * called after modeset HW state sanitization, which is responsible for
5504  * acquiring reference counts for any power wells in use and disabling the
5505  * ones left on by BIOS but not required by any active output.
5506  */
5507 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5508 {
5509 	struct i915_power_domains *power_domains = &i915->power_domains;
5510 	struct i915_power_well *power_well;
5511 	bool dump_domain_info;
5512 
5513 	mutex_lock(&power_domains->lock);
5514 
5515 	verify_async_put_domains_state(power_domains);
5516 
5517 	dump_domain_info = false;
5518 	for_each_power_well(i915, power_well) {
5519 		enum intel_display_power_domain domain;
5520 		int domains_count;
5521 		bool enabled;
5522 
5523 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
5524 		if ((power_well->count || power_well->desc->always_on) !=
5525 		    enabled)
5526 			drm_err(&i915->drm,
5527 				"power well %s state mismatch (refcount %d/enabled %d)",
5528 				power_well->desc->name,
5529 				power_well->count, enabled);
5530 
5531 		domains_count = 0;
5532 		for_each_power_domain(domain, power_well->desc->domains)
5533 			domains_count += power_domains->domain_use_count[domain];
5534 
5535 		if (power_well->count != domains_count) {
5536 			drm_err(&i915->drm,
5537 				"power well %s refcount/domain refcount mismatch "
5538 				"(refcount %d/domains refcount %d)\n",
5539 				power_well->desc->name, power_well->count,
5540 				domains_count);
5541 			dump_domain_info = true;
5542 		}
5543 	}
5544 
5545 	if (dump_domain_info) {
5546 		static bool dumped;
5547 
5548 		if (!dumped) {
5549 			intel_power_domains_dump_info(i915);
5550 			dumped = true;
5551 		}
5552 	}
5553 
5554 	mutex_unlock(&power_domains->lock);
5555 }
5556 
5557 #else
5558 
5559 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5560 {
5561 }
5562 
5563 #endif
5564 
5565 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5566 {
5567 	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5568 		bxt_enable_dc9(i915);
5569 	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5570 		hsw_enable_pc8(i915);
5571 }
5572 
5573 void intel_display_power_resume_early(struct drm_i915_private *i915)
5574 {
5575 	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5576 		gen9_sanitize_dc_state(i915);
5577 		bxt_disable_dc9(i915);
5578 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5579 		hsw_disable_pc8(i915);
5580 	}
5581 }
5582 
5583 void intel_display_power_suspend(struct drm_i915_private *i915)
5584 {
5585 	if (INTEL_GEN(i915) >= 11) {
5586 		icl_display_core_uninit(i915);
5587 		bxt_enable_dc9(i915);
5588 	} else if (IS_GEN9_LP(i915)) {
5589 		bxt_display_core_uninit(i915);
5590 		bxt_enable_dc9(i915);
5591 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5592 		hsw_enable_pc8(i915);
5593 	}
5594 }
5595 
5596 void intel_display_power_resume(struct drm_i915_private *i915)
5597 {
5598 	if (INTEL_GEN(i915) >= 11) {
5599 		bxt_disable_dc9(i915);
5600 		icl_display_core_init(i915, true);
5601 		if (i915->csr.dmc_payload) {
5602 			if (i915->csr.allowed_dc_mask &
5603 			    DC_STATE_EN_UPTO_DC6)
5604 				skl_enable_dc6(i915);
5605 			else if (i915->csr.allowed_dc_mask &
5606 				 DC_STATE_EN_UPTO_DC5)
5607 				gen9_enable_dc5(i915);
5608 		}
5609 	} else if (IS_GEN9_LP(i915)) {
5610 		bxt_disable_dc9(i915);
5611 		bxt_display_core_init(i915, true);
5612 		if (i915->csr.dmc_payload &&
5613 		    (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5614 			gen9_enable_dc5(i915);
5615 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5616 		hsw_disable_pc8(i915);
5617 	}
5618 }
5619