xref: /linux/drivers/gpu/drm/i915/intel_runtime_pm.c (revision 2decec48b0fd28ffdbf4cc684bd04e735f0839dd)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include <drm/drm_print.h>
33 
34 #include "i915_drv.h"
35 #include "intel_drv.h"
36 
37 /**
38  * DOC: runtime pm
39  *
40  * The i915 driver supports dynamic enabling and disabling of entire hardware
41  * blocks at runtime. This is especially important on the display side where
42  * software is supposed to control many power gates manually on recent hardware,
43  * since on the GT side a lot of the power management is done by the hardware.
44  * But even there some manual control at the device level is required.
45  *
46  * Since i915 supports a diverse set of platforms with a unified codebase and
47  * hardware engineers just love to shuffle functionality around between power
48  * domains there's a sizeable amount of indirection required. This file provides
49  * generic functions to the driver for grabbing and releasing references for
50  * abstract power domains. It then maps those to the actual power wells
51  * present for a given platform.
52  */
53 
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 
56 #include <linux/sort.h>
57 
58 #define STACKDEPTH 8
59 
60 static noinline depot_stack_handle_t __save_depot_stack(void)
61 {
62 	unsigned long entries[STACKDEPTH];
63 	unsigned int n;
64 
65 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
66 	return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
67 }
68 
69 static void __print_depot_stack(depot_stack_handle_t stack,
70 				char *buf, int sz, int indent)
71 {
72 	unsigned long *entries;
73 	unsigned int nr_entries;
74 
75 	nr_entries = stack_depot_fetch(stack, &entries);
76 	stack_trace_snprint(buf, sz, entries, nr_entries, indent);
77 }
78 
79 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
80 {
81 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
82 
83 	spin_lock_init(&rpm->debug.lock);
84 }
85 
86 static noinline depot_stack_handle_t
87 track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
88 {
89 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
90 	depot_stack_handle_t stack, *stacks;
91 	unsigned long flags;
92 
93 	atomic_inc(&rpm->wakeref_count);
94 	assert_rpm_wakelock_held(i915);
95 
96 	if (!HAS_RUNTIME_PM(i915))
97 		return -1;
98 
99 	stack = __save_depot_stack();
100 	if (!stack)
101 		return -1;
102 
103 	spin_lock_irqsave(&rpm->debug.lock, flags);
104 
105 	if (!rpm->debug.count)
106 		rpm->debug.last_acquire = stack;
107 
108 	stacks = krealloc(rpm->debug.owners,
109 			  (rpm->debug.count + 1) * sizeof(*stacks),
110 			  GFP_NOWAIT | __GFP_NOWARN);
111 	if (stacks) {
112 		stacks[rpm->debug.count++] = stack;
113 		rpm->debug.owners = stacks;
114 	} else {
115 		stack = -1;
116 	}
117 
118 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
119 
120 	return stack;
121 }
122 
123 static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
124 					    depot_stack_handle_t stack)
125 {
126 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
127 	unsigned long flags, n;
128 	bool found = false;
129 
130 	if (unlikely(stack == -1))
131 		return;
132 
133 	spin_lock_irqsave(&rpm->debug.lock, flags);
134 	for (n = rpm->debug.count; n--; ) {
135 		if (rpm->debug.owners[n] == stack) {
136 			memmove(rpm->debug.owners + n,
137 				rpm->debug.owners + n + 1,
138 				(--rpm->debug.count - n) * sizeof(stack));
139 			found = true;
140 			break;
141 		}
142 	}
143 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
144 
145 	if (WARN(!found,
146 		 "Unmatched wakeref (tracking %lu), count %u\n",
147 		 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
148 		char *buf;
149 
150 		buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
151 		if (!buf)
152 			return;
153 
154 		__print_depot_stack(stack, buf, PAGE_SIZE, 2);
155 		DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
156 
157 		stack = READ_ONCE(rpm->debug.last_release);
158 		if (stack) {
159 			__print_depot_stack(stack, buf, PAGE_SIZE, 2);
160 			DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
161 		}
162 
163 		kfree(buf);
164 	}
165 }
166 
167 static int cmphandle(const void *_a, const void *_b)
168 {
169 	const depot_stack_handle_t * const a = _a, * const b = _b;
170 
171 	if (*a < *b)
172 		return -1;
173 	else if (*a > *b)
174 		return 1;
175 	else
176 		return 0;
177 }
178 
179 static void
180 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
181 				 const struct intel_runtime_pm_debug *dbg)
182 {
183 	unsigned long i;
184 	char *buf;
185 
186 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
187 	if (!buf)
188 		return;
189 
190 	if (dbg->last_acquire) {
191 		__print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
192 		drm_printf(p, "Wakeref last acquired:\n%s", buf);
193 	}
194 
195 	if (dbg->last_release) {
196 		__print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
197 		drm_printf(p, "Wakeref last released:\n%s", buf);
198 	}
199 
200 	drm_printf(p, "Wakeref count: %lu\n", dbg->count);
201 
202 	sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
203 
204 	for (i = 0; i < dbg->count; i++) {
205 		depot_stack_handle_t stack = dbg->owners[i];
206 		unsigned long rep;
207 
208 		rep = 1;
209 		while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
210 			rep++, i++;
211 		__print_depot_stack(stack, buf, PAGE_SIZE, 2);
212 		drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
213 	}
214 
215 	kfree(buf);
216 }
217 
218 static noinline void
219 untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
220 {
221 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
222 	struct intel_runtime_pm_debug dbg = {};
223 	struct drm_printer p;
224 	unsigned long flags;
225 
226 	assert_rpm_wakelock_held(i915);
227 	if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
228 					&rpm->debug.lock,
229 					flags)) {
230 		dbg = rpm->debug;
231 
232 		rpm->debug.owners = NULL;
233 		rpm->debug.count = 0;
234 		rpm->debug.last_release = __save_depot_stack();
235 
236 		spin_unlock_irqrestore(&rpm->debug.lock, flags);
237 	}
238 	if (!dbg.count)
239 		return;
240 
241 	p = drm_debug_printer("i915");
242 	__print_intel_runtime_pm_wakeref(&p, &dbg);
243 
244 	kfree(dbg.owners);
245 }
246 
247 void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
248 				    struct drm_printer *p)
249 {
250 	struct intel_runtime_pm_debug dbg = {};
251 
252 	do {
253 		struct i915_runtime_pm *rpm = &i915->runtime_pm;
254 		unsigned long alloc = dbg.count;
255 		depot_stack_handle_t *s;
256 
257 		spin_lock_irq(&rpm->debug.lock);
258 		dbg.count = rpm->debug.count;
259 		if (dbg.count <= alloc) {
260 			memcpy(dbg.owners,
261 			       rpm->debug.owners,
262 			       dbg.count * sizeof(*s));
263 		}
264 		dbg.last_acquire = rpm->debug.last_acquire;
265 		dbg.last_release = rpm->debug.last_release;
266 		spin_unlock_irq(&rpm->debug.lock);
267 		if (dbg.count <= alloc)
268 			break;
269 
270 		s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
271 		if (!s)
272 			goto out;
273 
274 		dbg.owners = s;
275 	} while (1);
276 
277 	__print_intel_runtime_pm_wakeref(p, &dbg);
278 
279 out:
280 	kfree(dbg.owners);
281 }
282 
283 #else
284 
285 static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
286 {
287 }
288 
289 static depot_stack_handle_t
290 track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
291 {
292 	atomic_inc(&i915->runtime_pm.wakeref_count);
293 	assert_rpm_wakelock_held(i915);
294 	return -1;
295 }
296 
297 static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
298 {
299 	assert_rpm_wakelock_held(i915);
300 	atomic_dec(&i915->runtime_pm.wakeref_count);
301 }
302 
303 #endif
304 
305 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
306 					 enum i915_power_well_id power_well_id);
307 
308 const char *
309 intel_display_power_domain_str(enum intel_display_power_domain domain)
310 {
311 	switch (domain) {
312 	case POWER_DOMAIN_PIPE_A:
313 		return "PIPE_A";
314 	case POWER_DOMAIN_PIPE_B:
315 		return "PIPE_B";
316 	case POWER_DOMAIN_PIPE_C:
317 		return "PIPE_C";
318 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
319 		return "PIPE_A_PANEL_FITTER";
320 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
321 		return "PIPE_B_PANEL_FITTER";
322 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
323 		return "PIPE_C_PANEL_FITTER";
324 	case POWER_DOMAIN_TRANSCODER_A:
325 		return "TRANSCODER_A";
326 	case POWER_DOMAIN_TRANSCODER_B:
327 		return "TRANSCODER_B";
328 	case POWER_DOMAIN_TRANSCODER_C:
329 		return "TRANSCODER_C";
330 	case POWER_DOMAIN_TRANSCODER_EDP:
331 		return "TRANSCODER_EDP";
332 	case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
333 		return "TRANSCODER_EDP_VDSC";
334 	case POWER_DOMAIN_TRANSCODER_DSI_A:
335 		return "TRANSCODER_DSI_A";
336 	case POWER_DOMAIN_TRANSCODER_DSI_C:
337 		return "TRANSCODER_DSI_C";
338 	case POWER_DOMAIN_PORT_DDI_A_LANES:
339 		return "PORT_DDI_A_LANES";
340 	case POWER_DOMAIN_PORT_DDI_B_LANES:
341 		return "PORT_DDI_B_LANES";
342 	case POWER_DOMAIN_PORT_DDI_C_LANES:
343 		return "PORT_DDI_C_LANES";
344 	case POWER_DOMAIN_PORT_DDI_D_LANES:
345 		return "PORT_DDI_D_LANES";
346 	case POWER_DOMAIN_PORT_DDI_E_LANES:
347 		return "PORT_DDI_E_LANES";
348 	case POWER_DOMAIN_PORT_DDI_F_LANES:
349 		return "PORT_DDI_F_LANES";
350 	case POWER_DOMAIN_PORT_DDI_A_IO:
351 		return "PORT_DDI_A_IO";
352 	case POWER_DOMAIN_PORT_DDI_B_IO:
353 		return "PORT_DDI_B_IO";
354 	case POWER_DOMAIN_PORT_DDI_C_IO:
355 		return "PORT_DDI_C_IO";
356 	case POWER_DOMAIN_PORT_DDI_D_IO:
357 		return "PORT_DDI_D_IO";
358 	case POWER_DOMAIN_PORT_DDI_E_IO:
359 		return "PORT_DDI_E_IO";
360 	case POWER_DOMAIN_PORT_DDI_F_IO:
361 		return "PORT_DDI_F_IO";
362 	case POWER_DOMAIN_PORT_DSI:
363 		return "PORT_DSI";
364 	case POWER_DOMAIN_PORT_CRT:
365 		return "PORT_CRT";
366 	case POWER_DOMAIN_PORT_OTHER:
367 		return "PORT_OTHER";
368 	case POWER_DOMAIN_VGA:
369 		return "VGA";
370 	case POWER_DOMAIN_AUDIO:
371 		return "AUDIO";
372 	case POWER_DOMAIN_PLLS:
373 		return "PLLS";
374 	case POWER_DOMAIN_AUX_A:
375 		return "AUX_A";
376 	case POWER_DOMAIN_AUX_B:
377 		return "AUX_B";
378 	case POWER_DOMAIN_AUX_C:
379 		return "AUX_C";
380 	case POWER_DOMAIN_AUX_D:
381 		return "AUX_D";
382 	case POWER_DOMAIN_AUX_E:
383 		return "AUX_E";
384 	case POWER_DOMAIN_AUX_F:
385 		return "AUX_F";
386 	case POWER_DOMAIN_AUX_IO_A:
387 		return "AUX_IO_A";
388 	case POWER_DOMAIN_AUX_TBT1:
389 		return "AUX_TBT1";
390 	case POWER_DOMAIN_AUX_TBT2:
391 		return "AUX_TBT2";
392 	case POWER_DOMAIN_AUX_TBT3:
393 		return "AUX_TBT3";
394 	case POWER_DOMAIN_AUX_TBT4:
395 		return "AUX_TBT4";
396 	case POWER_DOMAIN_GMBUS:
397 		return "GMBUS";
398 	case POWER_DOMAIN_INIT:
399 		return "INIT";
400 	case POWER_DOMAIN_MODESET:
401 		return "MODESET";
402 	case POWER_DOMAIN_GT_IRQ:
403 		return "GT_IRQ";
404 	default:
405 		MISSING_CASE(domain);
406 		return "?";
407 	}
408 }
409 
410 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
411 				    struct i915_power_well *power_well)
412 {
413 	DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
414 	power_well->desc->ops->enable(dev_priv, power_well);
415 	power_well->hw_enabled = true;
416 }
417 
418 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
419 				     struct i915_power_well *power_well)
420 {
421 	DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
422 	power_well->hw_enabled = false;
423 	power_well->desc->ops->disable(dev_priv, power_well);
424 }
425 
426 static void intel_power_well_get(struct drm_i915_private *dev_priv,
427 				 struct i915_power_well *power_well)
428 {
429 	if (!power_well->count++)
430 		intel_power_well_enable(dev_priv, power_well);
431 }
432 
433 static void intel_power_well_put(struct drm_i915_private *dev_priv,
434 				 struct i915_power_well *power_well)
435 {
436 	WARN(!power_well->count, "Use count on power well %s is already zero",
437 	     power_well->desc->name);
438 
439 	if (!--power_well->count)
440 		intel_power_well_disable(dev_priv, power_well);
441 }
442 
443 /**
444  * __intel_display_power_is_enabled - unlocked check for a power domain
445  * @dev_priv: i915 device instance
446  * @domain: power domain to check
447  *
448  * This is the unlocked version of intel_display_power_is_enabled() and should
449  * only be used from error capture and recovery code where deadlocks are
450  * possible.
451  *
452  * Returns:
453  * True when the power domain is enabled, false otherwise.
454  */
455 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
456 				      enum intel_display_power_domain domain)
457 {
458 	struct i915_power_well *power_well;
459 	bool is_enabled;
460 
461 	if (dev_priv->runtime_pm.suspended)
462 		return false;
463 
464 	is_enabled = true;
465 
466 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
467 		if (power_well->desc->always_on)
468 			continue;
469 
470 		if (!power_well->hw_enabled) {
471 			is_enabled = false;
472 			break;
473 		}
474 	}
475 
476 	return is_enabled;
477 }
478 
479 /**
480  * intel_display_power_is_enabled - check for a power domain
481  * @dev_priv: i915 device instance
482  * @domain: power domain to check
483  *
484  * This function can be used to check the hw power domain state. It is mostly
485  * used in hardware state readout functions. Everywhere else code should rely
486  * upon explicit power domain reference counting to ensure that the hardware
487  * block is powered up before accessing it.
488  *
489  * Callers must hold the relevant modesetting locks to ensure that concurrent
490  * threads can't disable the power well while the caller tries to read a few
491  * registers.
492  *
493  * Returns:
494  * True when the power domain is enabled, false otherwise.
495  */
496 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
497 				    enum intel_display_power_domain domain)
498 {
499 	struct i915_power_domains *power_domains;
500 	bool ret;
501 
502 	power_domains = &dev_priv->power_domains;
503 
504 	mutex_lock(&power_domains->lock);
505 	ret = __intel_display_power_is_enabled(dev_priv, domain);
506 	mutex_unlock(&power_domains->lock);
507 
508 	return ret;
509 }
510 
511 /*
512  * Starting with Haswell, we have a "Power Down Well" that can be turned off
513  * when not needed anymore. We have 4 registers that can request the power well
514  * to be enabled, and it will only be disabled if none of the registers is
515  * requesting it to be enabled.
516  */
517 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
518 				       u8 irq_pipe_mask, bool has_vga)
519 {
520 	struct pci_dev *pdev = dev_priv->drm.pdev;
521 
522 	/*
523 	 * After we re-enable the power well, if we touch VGA register 0x3d5
524 	 * we'll get unclaimed register interrupts. This stops after we write
525 	 * anything to the VGA MSR register. The vgacon module uses this
526 	 * register all the time, so if we unbind our driver and, as a
527 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
528 	 * console_unlock(). So make here we touch the VGA MSR register, making
529 	 * sure vgacon can keep working normally without triggering interrupts
530 	 * and error messages.
531 	 */
532 	if (has_vga) {
533 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
534 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
535 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
536 	}
537 
538 	if (irq_pipe_mask)
539 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
540 }
541 
542 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
543 				       u8 irq_pipe_mask)
544 {
545 	if (irq_pipe_mask)
546 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
547 }
548 
549 
550 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
551 					   struct i915_power_well *power_well)
552 {
553 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
554 	int pw_idx = power_well->desc->hsw.idx;
555 
556 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
557 	WARN_ON(intel_wait_for_register(dev_priv,
558 					regs->driver,
559 					HSW_PWR_WELL_CTL_STATE(pw_idx),
560 					HSW_PWR_WELL_CTL_STATE(pw_idx),
561 					1));
562 }
563 
564 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
565 				     const struct i915_power_well_regs *regs,
566 				     int pw_idx)
567 {
568 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
569 	u32 ret;
570 
571 	ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
572 	ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
573 	if (regs->kvmr.reg)
574 		ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
575 	ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
576 
577 	return ret;
578 }
579 
580 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
581 					    struct i915_power_well *power_well)
582 {
583 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
584 	int pw_idx = power_well->desc->hsw.idx;
585 	bool disabled;
586 	u32 reqs;
587 
588 	/*
589 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
590 	 * this for paranoia. The known cases where a PW will be forced on:
591 	 * - a KVMR request on any power well via the KVMR request register
592 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
593 	 *   DEBUG request registers
594 	 * Skip the wait in case any of the request bits are set and print a
595 	 * diagnostic message.
596 	 */
597 	wait_for((disabled = !(I915_READ(regs->driver) &
598 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
599 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
600 	if (disabled)
601 		return;
602 
603 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
604 		      power_well->desc->name,
605 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
606 }
607 
608 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
609 					   enum skl_power_gate pg)
610 {
611 	/* Timeout 5us for PG#0, for other PGs 1us */
612 	WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
613 					SKL_FUSE_PG_DIST_STATUS(pg),
614 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
615 }
616 
617 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
618 				  struct i915_power_well *power_well)
619 {
620 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
621 	int pw_idx = power_well->desc->hsw.idx;
622 	bool wait_fuses = power_well->desc->hsw.has_fuses;
623 	enum skl_power_gate uninitialized_var(pg);
624 	u32 val;
625 
626 	if (wait_fuses) {
627 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
628 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
629 		/*
630 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
631 		 * before enabling the power well and PW1/PG1's own fuse
632 		 * state after the enabling. For all other power wells with
633 		 * fuses we only have to wait for that PW/PG's fuse state
634 		 * after the enabling.
635 		 */
636 		if (pg == SKL_PG1)
637 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
638 	}
639 
640 	val = I915_READ(regs->driver);
641 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
642 	hsw_wait_for_power_well_enable(dev_priv, power_well);
643 
644 	/* Display WA #1178: cnl */
645 	if (IS_CANNONLAKE(dev_priv) &&
646 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
647 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
648 		val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
649 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
650 		I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
651 	}
652 
653 	if (wait_fuses)
654 		gen9_wait_for_power_well_fuses(dev_priv, pg);
655 
656 	hsw_power_well_post_enable(dev_priv,
657 				   power_well->desc->hsw.irq_pipe_mask,
658 				   power_well->desc->hsw.has_vga);
659 }
660 
661 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
662 				   struct i915_power_well *power_well)
663 {
664 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
665 	int pw_idx = power_well->desc->hsw.idx;
666 	u32 val;
667 
668 	hsw_power_well_pre_disable(dev_priv,
669 				   power_well->desc->hsw.irq_pipe_mask);
670 
671 	val = I915_READ(regs->driver);
672 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
673 	hsw_wait_for_power_well_disable(dev_priv, power_well);
674 }
675 
676 #define ICL_AUX_PW_TO_PORT(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
677 
678 static void
679 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
680 				    struct i915_power_well *power_well)
681 {
682 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
683 	int pw_idx = power_well->desc->hsw.idx;
684 	enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
685 	u32 val;
686 
687 	val = I915_READ(regs->driver);
688 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
689 
690 	val = I915_READ(ICL_PORT_CL_DW12(port));
691 	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
692 
693 	hsw_wait_for_power_well_enable(dev_priv, power_well);
694 
695 	/* Display WA #1178: icl */
696 	if (IS_ICELAKE(dev_priv) &&
697 	    pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
698 	    !intel_bios_is_port_edp(dev_priv, port)) {
699 		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
700 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
701 		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
702 	}
703 }
704 
705 static void
706 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
707 				     struct i915_power_well *power_well)
708 {
709 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
710 	int pw_idx = power_well->desc->hsw.idx;
711 	enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
712 	u32 val;
713 
714 	val = I915_READ(ICL_PORT_CL_DW12(port));
715 	I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
716 
717 	val = I915_READ(regs->driver);
718 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
719 
720 	hsw_wait_for_power_well_disable(dev_priv, power_well);
721 }
722 
723 #define ICL_AUX_PW_TO_CH(pw_idx)	\
724 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
725 
726 static void
727 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
728 				 struct i915_power_well *power_well)
729 {
730 	enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
731 	u32 val;
732 
733 	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
734 	val &= ~DP_AUX_CH_CTL_TBT_IO;
735 	if (power_well->desc->hsw.is_tc_tbt)
736 		val |= DP_AUX_CH_CTL_TBT_IO;
737 	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
738 
739 	hsw_power_well_enable(dev_priv, power_well);
740 }
741 
742 /*
743  * We should only use the power well if we explicitly asked the hardware to
744  * enable it, so check if it's enabled and also check if we've requested it to
745  * be enabled.
746  */
747 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
748 				   struct i915_power_well *power_well)
749 {
750 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
751 	enum i915_power_well_id id = power_well->desc->id;
752 	int pw_idx = power_well->desc->hsw.idx;
753 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
754 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
755 	u32 val;
756 
757 	val = I915_READ(regs->driver);
758 
759 	/*
760 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
761 	 * and the MISC_IO PW will be not restored, so check instead for the
762 	 * BIOS's own request bits, which are forced-on for these power wells
763 	 * when exiting DC5/6.
764 	 */
765 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
766 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
767 		val |= I915_READ(regs->bios);
768 
769 	return (val & mask) == mask;
770 }
771 
772 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
773 {
774 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
775 		  "DC9 already programmed to be enabled.\n");
776 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
777 		  "DC5 still not disabled to enable DC9.\n");
778 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
779 		  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
780 		  "Power well 2 on.\n");
781 	WARN_ONCE(intel_irqs_enabled(dev_priv),
782 		  "Interrupts not disabled yet.\n");
783 
784 	 /*
785 	  * TODO: check for the following to verify the conditions to enter DC9
786 	  * state are satisfied:
787 	  * 1] Check relevant display engine registers to verify if mode set
788 	  * disable sequence was followed.
789 	  * 2] Check if display uninitialize sequence is initialized.
790 	  */
791 }
792 
793 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
794 {
795 	WARN_ONCE(intel_irqs_enabled(dev_priv),
796 		  "Interrupts not disabled yet.\n");
797 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
798 		  "DC5 still not disabled.\n");
799 
800 	 /*
801 	  * TODO: check for the following to verify DC9 state was indeed
802 	  * entered before programming to disable it:
803 	  * 1] Check relevant display engine registers to verify if mode
804 	  *  set disable sequence was followed.
805 	  * 2] Check if display uninitialize sequence is initialized.
806 	  */
807 }
808 
809 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
810 				u32 state)
811 {
812 	int rewrites = 0;
813 	int rereads = 0;
814 	u32 v;
815 
816 	I915_WRITE(DC_STATE_EN, state);
817 
818 	/* It has been observed that disabling the dc6 state sometimes
819 	 * doesn't stick and dmc keeps returning old value. Make sure
820 	 * the write really sticks enough times and also force rewrite until
821 	 * we are confident that state is exactly what we want.
822 	 */
823 	do  {
824 		v = I915_READ(DC_STATE_EN);
825 
826 		if (v != state) {
827 			I915_WRITE(DC_STATE_EN, state);
828 			rewrites++;
829 			rereads = 0;
830 		} else if (rereads++ > 5) {
831 			break;
832 		}
833 
834 	} while (rewrites < 100);
835 
836 	if (v != state)
837 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
838 			  state, v);
839 
840 	/* Most of the times we need one retry, avoid spam */
841 	if (rewrites > 1)
842 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
843 			      state, rewrites);
844 }
845 
846 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
847 {
848 	u32 mask;
849 
850 	mask = DC_STATE_EN_UPTO_DC5;
851 	if (INTEL_GEN(dev_priv) >= 11)
852 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
853 	else if (IS_GEN9_LP(dev_priv))
854 		mask |= DC_STATE_EN_DC9;
855 	else
856 		mask |= DC_STATE_EN_UPTO_DC6;
857 
858 	return mask;
859 }
860 
861 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
862 {
863 	u32 val;
864 
865 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
866 
867 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
868 		      dev_priv->csr.dc_state, val);
869 	dev_priv->csr.dc_state = val;
870 }
871 
872 /**
873  * gen9_set_dc_state - set target display C power state
874  * @dev_priv: i915 device instance
875  * @state: target DC power state
876  * - DC_STATE_DISABLE
877  * - DC_STATE_EN_UPTO_DC5
878  * - DC_STATE_EN_UPTO_DC6
879  * - DC_STATE_EN_DC9
880  *
881  * Signal to DMC firmware/HW the target DC power state passed in @state.
882  * DMC/HW can turn off individual display clocks and power rails when entering
883  * a deeper DC power state (higher in number) and turns these back when exiting
884  * that state to a shallower power state (lower in number). The HW will decide
885  * when to actually enter a given state on an on-demand basis, for instance
886  * depending on the active state of display pipes. The state of display
887  * registers backed by affected power rails are saved/restored as needed.
888  *
889  * Based on the above enabling a deeper DC power state is asynchronous wrt.
890  * enabling it. Disabling a deeper power state is synchronous: for instance
891  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
892  * back on and register state is restored. This is guaranteed by the MMIO write
893  * to DC_STATE_EN blocking until the state is restored.
894  */
895 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
896 {
897 	u32 val;
898 	u32 mask;
899 
900 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
901 		state &= dev_priv->csr.allowed_dc_mask;
902 
903 	val = I915_READ(DC_STATE_EN);
904 	mask = gen9_dc_mask(dev_priv);
905 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
906 		      val & mask, state);
907 
908 	/* Check if DMC is ignoring our DC state requests */
909 	if ((val & mask) != dev_priv->csr.dc_state)
910 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
911 			  dev_priv->csr.dc_state, val & mask);
912 
913 	val &= ~mask;
914 	val |= state;
915 
916 	gen9_write_dc_state(dev_priv, val);
917 
918 	dev_priv->csr.dc_state = val & mask;
919 }
920 
921 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
922 {
923 	assert_can_enable_dc9(dev_priv);
924 
925 	DRM_DEBUG_KMS("Enabling DC9\n");
926 	/*
927 	 * Power sequencer reset is not needed on
928 	 * platforms with South Display Engine on PCH,
929 	 * because PPS registers are always on.
930 	 */
931 	if (!HAS_PCH_SPLIT(dev_priv))
932 		intel_power_sequencer_reset(dev_priv);
933 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
934 }
935 
936 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
937 {
938 	assert_can_disable_dc9(dev_priv);
939 
940 	DRM_DEBUG_KMS("Disabling DC9\n");
941 
942 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
943 
944 	intel_pps_unlock_regs_wa(dev_priv);
945 }
946 
947 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
948 {
949 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
950 		  "CSR program storage start is NULL\n");
951 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
952 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
953 }
954 
955 static struct i915_power_well *
956 lookup_power_well(struct drm_i915_private *dev_priv,
957 		  enum i915_power_well_id power_well_id)
958 {
959 	struct i915_power_well *power_well;
960 
961 	for_each_power_well(dev_priv, power_well)
962 		if (power_well->desc->id == power_well_id)
963 			return power_well;
964 
965 	/*
966 	 * It's not feasible to add error checking code to the callers since
967 	 * this condition really shouldn't happen and it doesn't even make sense
968 	 * to abort things like display initialization sequences. Just return
969 	 * the first power well and hope the WARN gets reported so we can fix
970 	 * our driver.
971 	 */
972 	WARN(1, "Power well %d not defined for this platform\n", power_well_id);
973 	return &dev_priv->power_domains.power_wells[0];
974 }
975 
976 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
977 {
978 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
979 					SKL_DISP_PW_2);
980 
981 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
982 
983 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
984 		  "DC5 already programmed to be enabled.\n");
985 	assert_rpm_wakelock_held(dev_priv);
986 
987 	assert_csr_loaded(dev_priv);
988 }
989 
990 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
991 {
992 	assert_can_enable_dc5(dev_priv);
993 
994 	DRM_DEBUG_KMS("Enabling DC5\n");
995 
996 	/* Wa Display #1183: skl,kbl,cfl */
997 	if (IS_GEN9_BC(dev_priv))
998 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
999 			   SKL_SELECT_ALTERNATE_DC_EXIT);
1000 
1001 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1002 }
1003 
1004 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1005 {
1006 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1007 		  "Backlight is not disabled.\n");
1008 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
1009 		  "DC6 already programmed to be enabled.\n");
1010 
1011 	assert_csr_loaded(dev_priv);
1012 }
1013 
1014 void skl_enable_dc6(struct drm_i915_private *dev_priv)
1015 {
1016 	assert_can_enable_dc6(dev_priv);
1017 
1018 	DRM_DEBUG_KMS("Enabling DC6\n");
1019 
1020 	/* Wa Display #1183: skl,kbl,cfl */
1021 	if (IS_GEN9_BC(dev_priv))
1022 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1023 			   SKL_SELECT_ALTERNATE_DC_EXIT);
1024 
1025 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1026 }
1027 
1028 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1029 				   struct i915_power_well *power_well)
1030 {
1031 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1032 	int pw_idx = power_well->desc->hsw.idx;
1033 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1034 	u32 bios_req = I915_READ(regs->bios);
1035 
1036 	/* Take over the request bit if set by BIOS. */
1037 	if (bios_req & mask) {
1038 		u32 drv_req = I915_READ(regs->driver);
1039 
1040 		if (!(drv_req & mask))
1041 			I915_WRITE(regs->driver, drv_req | mask);
1042 		I915_WRITE(regs->bios, bios_req & ~mask);
1043 	}
1044 }
1045 
1046 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1047 					   struct i915_power_well *power_well)
1048 {
1049 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1050 }
1051 
1052 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1053 					    struct i915_power_well *power_well)
1054 {
1055 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1056 }
1057 
1058 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1059 					    struct i915_power_well *power_well)
1060 {
1061 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1062 }
1063 
1064 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1065 {
1066 	struct i915_power_well *power_well;
1067 
1068 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1069 	if (power_well->count > 0)
1070 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1071 
1072 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1073 	if (power_well->count > 0)
1074 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1075 
1076 	if (IS_GEMINILAKE(dev_priv)) {
1077 		power_well = lookup_power_well(dev_priv,
1078 					       GLK_DISP_PW_DPIO_CMN_C);
1079 		if (power_well->count > 0)
1080 			bxt_ddi_phy_verify_state(dev_priv,
1081 						 power_well->desc->bxt.phy);
1082 	}
1083 }
1084 
1085 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1086 					   struct i915_power_well *power_well)
1087 {
1088 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
1089 }
1090 
1091 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1092 {
1093 	u32 tmp = I915_READ(DBUF_CTL);
1094 
1095 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1096 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1097 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
1098 }
1099 
1100 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1101 					  struct i915_power_well *power_well)
1102 {
1103 	struct intel_cdclk_state cdclk_state = {};
1104 
1105 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1106 
1107 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1108 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1109 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
1110 
1111 	gen9_assert_dbuf_enabled(dev_priv);
1112 
1113 	if (IS_GEN9_LP(dev_priv))
1114 		bxt_verify_ddi_phy_power_wells(dev_priv);
1115 
1116 	if (INTEL_GEN(dev_priv) >= 11)
1117 		/*
1118 		 * DMC retains HW context only for port A, the other combo
1119 		 * PHY's HW context for port B is lost after DC transitions,
1120 		 * so we need to restore it manually.
1121 		 */
1122 		icl_combo_phys_init(dev_priv);
1123 }
1124 
1125 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1126 					   struct i915_power_well *power_well)
1127 {
1128 	if (!dev_priv->csr.dmc_payload)
1129 		return;
1130 
1131 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
1132 		skl_enable_dc6(dev_priv);
1133 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
1134 		gen9_enable_dc5(dev_priv);
1135 }
1136 
1137 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1138 					 struct i915_power_well *power_well)
1139 {
1140 }
1141 
1142 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1143 					   struct i915_power_well *power_well)
1144 {
1145 }
1146 
1147 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1148 					     struct i915_power_well *power_well)
1149 {
1150 	return true;
1151 }
1152 
1153 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1154 					 struct i915_power_well *power_well)
1155 {
1156 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1157 		i830_enable_pipe(dev_priv, PIPE_A);
1158 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1159 		i830_enable_pipe(dev_priv, PIPE_B);
1160 }
1161 
1162 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1163 					  struct i915_power_well *power_well)
1164 {
1165 	i830_disable_pipe(dev_priv, PIPE_B);
1166 	i830_disable_pipe(dev_priv, PIPE_A);
1167 }
1168 
1169 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1170 					  struct i915_power_well *power_well)
1171 {
1172 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1173 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1174 }
1175 
1176 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1177 					  struct i915_power_well *power_well)
1178 {
1179 	if (power_well->count > 0)
1180 		i830_pipes_power_well_enable(dev_priv, power_well);
1181 	else
1182 		i830_pipes_power_well_disable(dev_priv, power_well);
1183 }
1184 
1185 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1186 			       struct i915_power_well *power_well, bool enable)
1187 {
1188 	int pw_idx = power_well->desc->vlv.idx;
1189 	u32 mask;
1190 	u32 state;
1191 	u32 ctrl;
1192 
1193 	mask = PUNIT_PWRGT_MASK(pw_idx);
1194 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1195 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1196 
1197 	mutex_lock(&dev_priv->pcu_lock);
1198 
1199 #define COND \
1200 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1201 
1202 	if (COND)
1203 		goto out;
1204 
1205 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1206 	ctrl &= ~mask;
1207 	ctrl |= state;
1208 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1209 
1210 	if (wait_for(COND, 100))
1211 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1212 			  state,
1213 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1214 
1215 #undef COND
1216 
1217 out:
1218 	mutex_unlock(&dev_priv->pcu_lock);
1219 }
1220 
1221 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1222 				  struct i915_power_well *power_well)
1223 {
1224 	vlv_set_power_well(dev_priv, power_well, true);
1225 }
1226 
1227 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1228 				   struct i915_power_well *power_well)
1229 {
1230 	vlv_set_power_well(dev_priv, power_well, false);
1231 }
1232 
1233 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1234 				   struct i915_power_well *power_well)
1235 {
1236 	int pw_idx = power_well->desc->vlv.idx;
1237 	bool enabled = false;
1238 	u32 mask;
1239 	u32 state;
1240 	u32 ctrl;
1241 
1242 	mask = PUNIT_PWRGT_MASK(pw_idx);
1243 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1244 
1245 	mutex_lock(&dev_priv->pcu_lock);
1246 
1247 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1248 	/*
1249 	 * We only ever set the power-on and power-gate states, anything
1250 	 * else is unexpected.
1251 	 */
1252 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1253 		state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1254 	if (state == ctrl)
1255 		enabled = true;
1256 
1257 	/*
1258 	 * A transient state at this point would mean some unexpected party
1259 	 * is poking at the power controls too.
1260 	 */
1261 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1262 	WARN_ON(ctrl != state);
1263 
1264 	mutex_unlock(&dev_priv->pcu_lock);
1265 
1266 	return enabled;
1267 }
1268 
1269 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1270 {
1271 	u32 val;
1272 
1273 	/*
1274 	 * On driver load, a pipe may be active and driving a DSI display.
1275 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1276 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1277 	 * clear it when we turn off the display.
1278 	 */
1279 	val = I915_READ(DSPCLK_GATE_D);
1280 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1281 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1282 	I915_WRITE(DSPCLK_GATE_D, val);
1283 
1284 	/*
1285 	 * Disable trickle feed and enable pnd deadline calculation
1286 	 */
1287 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1288 	I915_WRITE(CBR1_VLV, 0);
1289 
1290 	WARN_ON(dev_priv->rawclk_freq == 0);
1291 
1292 	I915_WRITE(RAWCLK_FREQ_VLV,
1293 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1294 }
1295 
1296 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1297 {
1298 	struct intel_encoder *encoder;
1299 	enum pipe pipe;
1300 
1301 	/*
1302 	 * Enable the CRI clock source so we can get at the
1303 	 * display and the reference clock for VGA
1304 	 * hotplug / manual detection. Supposedly DSI also
1305 	 * needs the ref clock up and running.
1306 	 *
1307 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1308 	 */
1309 	for_each_pipe(dev_priv, pipe) {
1310 		u32 val = I915_READ(DPLL(pipe));
1311 
1312 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1313 		if (pipe != PIPE_A)
1314 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1315 
1316 		I915_WRITE(DPLL(pipe), val);
1317 	}
1318 
1319 	vlv_init_display_clock_gating(dev_priv);
1320 
1321 	spin_lock_irq(&dev_priv->irq_lock);
1322 	valleyview_enable_display_irqs(dev_priv);
1323 	spin_unlock_irq(&dev_priv->irq_lock);
1324 
1325 	/*
1326 	 * During driver initialization/resume we can avoid restoring the
1327 	 * part of the HW/SW state that will be inited anyway explicitly.
1328 	 */
1329 	if (dev_priv->power_domains.initializing)
1330 		return;
1331 
1332 	intel_hpd_init(dev_priv);
1333 
1334 	/* Re-enable the ADPA, if we have one */
1335 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1336 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1337 			intel_crt_reset(&encoder->base);
1338 	}
1339 
1340 	i915_redisable_vga_power_on(dev_priv);
1341 
1342 	intel_pps_unlock_regs_wa(dev_priv);
1343 }
1344 
1345 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1346 {
1347 	spin_lock_irq(&dev_priv->irq_lock);
1348 	valleyview_disable_display_irqs(dev_priv);
1349 	spin_unlock_irq(&dev_priv->irq_lock);
1350 
1351 	/* make sure we're done processing display irqs */
1352 	synchronize_irq(dev_priv->drm.irq);
1353 
1354 	intel_power_sequencer_reset(dev_priv);
1355 
1356 	/* Prevent us from re-enabling polling on accident in late suspend */
1357 	if (!dev_priv->drm.dev->power.is_suspended)
1358 		intel_hpd_poll_init(dev_priv);
1359 }
1360 
1361 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1362 					  struct i915_power_well *power_well)
1363 {
1364 	vlv_set_power_well(dev_priv, power_well, true);
1365 
1366 	vlv_display_power_well_init(dev_priv);
1367 }
1368 
1369 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1370 					   struct i915_power_well *power_well)
1371 {
1372 	vlv_display_power_well_deinit(dev_priv);
1373 
1374 	vlv_set_power_well(dev_priv, power_well, false);
1375 }
1376 
1377 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1378 					   struct i915_power_well *power_well)
1379 {
1380 	/* since ref/cri clock was enabled */
1381 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1382 
1383 	vlv_set_power_well(dev_priv, power_well, true);
1384 
1385 	/*
1386 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1387 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1388 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1389 	 *   b.	The other bits such as sfr settings / modesel may all
1390 	 *	be set to 0.
1391 	 *
1392 	 * This should only be done on init and resume from S3 with
1393 	 * both PLLs disabled, or we risk losing DPIO and PLL
1394 	 * synchronization.
1395 	 */
1396 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1397 }
1398 
1399 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1400 					    struct i915_power_well *power_well)
1401 {
1402 	enum pipe pipe;
1403 
1404 	for_each_pipe(dev_priv, pipe)
1405 		assert_pll_disabled(dev_priv, pipe);
1406 
1407 	/* Assert common reset */
1408 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1409 
1410 	vlv_set_power_well(dev_priv, power_well, false);
1411 }
1412 
1413 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1414 
1415 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1416 
1417 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1418 {
1419 	struct i915_power_well *cmn_bc =
1420 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1421 	struct i915_power_well *cmn_d =
1422 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1423 	u32 phy_control = dev_priv->chv_phy_control;
1424 	u32 phy_status = 0;
1425 	u32 phy_status_mask = 0xffffffff;
1426 
1427 	/*
1428 	 * The BIOS can leave the PHY is some weird state
1429 	 * where it doesn't fully power down some parts.
1430 	 * Disable the asserts until the PHY has been fully
1431 	 * reset (ie. the power well has been disabled at
1432 	 * least once).
1433 	 */
1434 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1435 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1436 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1437 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1438 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1439 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1440 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1441 
1442 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1443 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1444 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1445 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1446 
1447 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1448 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1449 
1450 		/* this assumes override is only used to enable lanes */
1451 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1452 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1453 
1454 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1455 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1456 
1457 		/* CL1 is on whenever anything is on in either channel */
1458 		if (BITS_SET(phy_control,
1459 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1460 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1461 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1462 
1463 		/*
1464 		 * The DPLLB check accounts for the pipe B + port A usage
1465 		 * with CL2 powered up but all the lanes in the second channel
1466 		 * powered down.
1467 		 */
1468 		if (BITS_SET(phy_control,
1469 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1470 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1471 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1472 
1473 		if (BITS_SET(phy_control,
1474 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1475 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1476 		if (BITS_SET(phy_control,
1477 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1478 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1479 
1480 		if (BITS_SET(phy_control,
1481 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1482 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1483 		if (BITS_SET(phy_control,
1484 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1485 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1486 	}
1487 
1488 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1489 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1490 
1491 		/* this assumes override is only used to enable lanes */
1492 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1493 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1494 
1495 		if (BITS_SET(phy_control,
1496 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1497 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1498 
1499 		if (BITS_SET(phy_control,
1500 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1501 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1502 		if (BITS_SET(phy_control,
1503 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1504 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1505 	}
1506 
1507 	phy_status &= phy_status_mask;
1508 
1509 	/*
1510 	 * The PHY may be busy with some initial calibration and whatnot,
1511 	 * so the power state can take a while to actually change.
1512 	 */
1513 	if (intel_wait_for_register(dev_priv,
1514 				    DISPLAY_PHY_STATUS,
1515 				    phy_status_mask,
1516 				    phy_status,
1517 				    10))
1518 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1519 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1520 			   phy_status, dev_priv->chv_phy_control);
1521 }
1522 
1523 #undef BITS_SET
1524 
1525 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1526 					   struct i915_power_well *power_well)
1527 {
1528 	enum dpio_phy phy;
1529 	enum pipe pipe;
1530 	u32 tmp;
1531 
1532 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1533 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1534 
1535 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1536 		pipe = PIPE_A;
1537 		phy = DPIO_PHY0;
1538 	} else {
1539 		pipe = PIPE_C;
1540 		phy = DPIO_PHY1;
1541 	}
1542 
1543 	/* since ref/cri clock was enabled */
1544 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1545 	vlv_set_power_well(dev_priv, power_well, true);
1546 
1547 	/* Poll for phypwrgood signal */
1548 	if (intel_wait_for_register(dev_priv,
1549 				    DISPLAY_PHY_STATUS,
1550 				    PHY_POWERGOOD(phy),
1551 				    PHY_POWERGOOD(phy),
1552 				    1))
1553 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1554 
1555 	mutex_lock(&dev_priv->sb_lock);
1556 
1557 	/* Enable dynamic power down */
1558 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1559 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1560 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1561 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1562 
1563 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1564 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1565 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1566 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1567 	} else {
1568 		/*
1569 		 * Force the non-existing CL2 off. BXT does this
1570 		 * too, so maybe it saves some power even though
1571 		 * CL2 doesn't exist?
1572 		 */
1573 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1574 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1575 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1576 	}
1577 
1578 	mutex_unlock(&dev_priv->sb_lock);
1579 
1580 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1581 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1582 
1583 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1584 		      phy, dev_priv->chv_phy_control);
1585 
1586 	assert_chv_phy_status(dev_priv);
1587 }
1588 
1589 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1590 					    struct i915_power_well *power_well)
1591 {
1592 	enum dpio_phy phy;
1593 
1594 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1595 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1596 
1597 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1598 		phy = DPIO_PHY0;
1599 		assert_pll_disabled(dev_priv, PIPE_A);
1600 		assert_pll_disabled(dev_priv, PIPE_B);
1601 	} else {
1602 		phy = DPIO_PHY1;
1603 		assert_pll_disabled(dev_priv, PIPE_C);
1604 	}
1605 
1606 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1607 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1608 
1609 	vlv_set_power_well(dev_priv, power_well, false);
1610 
1611 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1612 		      phy, dev_priv->chv_phy_control);
1613 
1614 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1615 	dev_priv->chv_phy_assert[phy] = true;
1616 
1617 	assert_chv_phy_status(dev_priv);
1618 }
1619 
1620 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1621 				     enum dpio_channel ch, bool override, unsigned int mask)
1622 {
1623 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1624 	u32 reg, val, expected, actual;
1625 
1626 	/*
1627 	 * The BIOS can leave the PHY is some weird state
1628 	 * where it doesn't fully power down some parts.
1629 	 * Disable the asserts until the PHY has been fully
1630 	 * reset (ie. the power well has been disabled at
1631 	 * least once).
1632 	 */
1633 	if (!dev_priv->chv_phy_assert[phy])
1634 		return;
1635 
1636 	if (ch == DPIO_CH0)
1637 		reg = _CHV_CMN_DW0_CH0;
1638 	else
1639 		reg = _CHV_CMN_DW6_CH1;
1640 
1641 	mutex_lock(&dev_priv->sb_lock);
1642 	val = vlv_dpio_read(dev_priv, pipe, reg);
1643 	mutex_unlock(&dev_priv->sb_lock);
1644 
1645 	/*
1646 	 * This assumes !override is only used when the port is disabled.
1647 	 * All lanes should power down even without the override when
1648 	 * the port is disabled.
1649 	 */
1650 	if (!override || mask == 0xf) {
1651 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1652 		/*
1653 		 * If CH1 common lane is not active anymore
1654 		 * (eg. for pipe B DPLL) the entire channel will
1655 		 * shut down, which causes the common lane registers
1656 		 * to read as 0. That means we can't actually check
1657 		 * the lane power down status bits, but as the entire
1658 		 * register reads as 0 it's a good indication that the
1659 		 * channel is indeed entirely powered down.
1660 		 */
1661 		if (ch == DPIO_CH1 && val == 0)
1662 			expected = 0;
1663 	} else if (mask != 0x0) {
1664 		expected = DPIO_ANYDL_POWERDOWN;
1665 	} else {
1666 		expected = 0;
1667 	}
1668 
1669 	if (ch == DPIO_CH0)
1670 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1671 	else
1672 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1673 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1674 
1675 	WARN(actual != expected,
1676 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1677 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1678 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1679 	     reg, val);
1680 }
1681 
1682 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1683 			  enum dpio_channel ch, bool override)
1684 {
1685 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1686 	bool was_override;
1687 
1688 	mutex_lock(&power_domains->lock);
1689 
1690 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1691 
1692 	if (override == was_override)
1693 		goto out;
1694 
1695 	if (override)
1696 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1697 	else
1698 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1699 
1700 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1701 
1702 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1703 		      phy, ch, dev_priv->chv_phy_control);
1704 
1705 	assert_chv_phy_status(dev_priv);
1706 
1707 out:
1708 	mutex_unlock(&power_domains->lock);
1709 
1710 	return was_override;
1711 }
1712 
1713 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1714 			     bool override, unsigned int mask)
1715 {
1716 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1717 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1718 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1719 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1720 
1721 	mutex_lock(&power_domains->lock);
1722 
1723 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1724 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1725 
1726 	if (override)
1727 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1728 	else
1729 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1730 
1731 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1732 
1733 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1734 		      phy, ch, mask, dev_priv->chv_phy_control);
1735 
1736 	assert_chv_phy_status(dev_priv);
1737 
1738 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1739 
1740 	mutex_unlock(&power_domains->lock);
1741 }
1742 
1743 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1744 					struct i915_power_well *power_well)
1745 {
1746 	enum pipe pipe = PIPE_A;
1747 	bool enabled;
1748 	u32 state, ctrl;
1749 
1750 	mutex_lock(&dev_priv->pcu_lock);
1751 
1752 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1753 	/*
1754 	 * We only ever set the power-on and power-gate states, anything
1755 	 * else is unexpected.
1756 	 */
1757 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1758 	enabled = state == DP_SSS_PWR_ON(pipe);
1759 
1760 	/*
1761 	 * A transient state at this point would mean some unexpected party
1762 	 * is poking at the power controls too.
1763 	 */
1764 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1765 	WARN_ON(ctrl << 16 != state);
1766 
1767 	mutex_unlock(&dev_priv->pcu_lock);
1768 
1769 	return enabled;
1770 }
1771 
1772 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1773 				    struct i915_power_well *power_well,
1774 				    bool enable)
1775 {
1776 	enum pipe pipe = PIPE_A;
1777 	u32 state;
1778 	u32 ctrl;
1779 
1780 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1781 
1782 	mutex_lock(&dev_priv->pcu_lock);
1783 
1784 #define COND \
1785 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1786 
1787 	if (COND)
1788 		goto out;
1789 
1790 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1791 	ctrl &= ~DP_SSC_MASK(pipe);
1792 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1793 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1794 
1795 	if (wait_for(COND, 100))
1796 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1797 			  state,
1798 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1799 
1800 #undef COND
1801 
1802 out:
1803 	mutex_unlock(&dev_priv->pcu_lock);
1804 }
1805 
1806 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1807 				       struct i915_power_well *power_well)
1808 {
1809 	chv_set_pipe_power_well(dev_priv, power_well, true);
1810 
1811 	vlv_display_power_well_init(dev_priv);
1812 }
1813 
1814 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1815 					struct i915_power_well *power_well)
1816 {
1817 	vlv_display_power_well_deinit(dev_priv);
1818 
1819 	chv_set_pipe_power_well(dev_priv, power_well, false);
1820 }
1821 
1822 static void
1823 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1824 				 enum intel_display_power_domain domain)
1825 {
1826 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1827 	struct i915_power_well *power_well;
1828 
1829 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1830 		intel_power_well_get(dev_priv, power_well);
1831 
1832 	power_domains->domain_use_count[domain]++;
1833 }
1834 
1835 /**
1836  * intel_display_power_get - grab a power domain reference
1837  * @dev_priv: i915 device instance
1838  * @domain: power domain to reference
1839  *
1840  * This function grabs a power domain reference for @domain and ensures that the
1841  * power domain and all its parents are powered up. Therefore users should only
1842  * grab a reference to the innermost power domain they need.
1843  *
1844  * Any power domain reference obtained by this function must have a symmetric
1845  * call to intel_display_power_put() to release the reference again.
1846  */
1847 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1848 					enum intel_display_power_domain domain)
1849 {
1850 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1851 	intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
1852 
1853 	mutex_lock(&power_domains->lock);
1854 
1855 	__intel_display_power_get_domain(dev_priv, domain);
1856 
1857 	mutex_unlock(&power_domains->lock);
1858 
1859 	return wakeref;
1860 }
1861 
1862 /**
1863  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1864  * @dev_priv: i915 device instance
1865  * @domain: power domain to reference
1866  *
1867  * This function grabs a power domain reference for @domain and ensures that the
1868  * power domain and all its parents are powered up. Therefore users should only
1869  * grab a reference to the innermost power domain they need.
1870  *
1871  * Any power domain reference obtained by this function must have a symmetric
1872  * call to intel_display_power_put() to release the reference again.
1873  */
1874 intel_wakeref_t
1875 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1876 				   enum intel_display_power_domain domain)
1877 {
1878 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1879 	intel_wakeref_t wakeref;
1880 	bool is_enabled;
1881 
1882 	wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
1883 	if (!wakeref)
1884 		return false;
1885 
1886 	mutex_lock(&power_domains->lock);
1887 
1888 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1889 		__intel_display_power_get_domain(dev_priv, domain);
1890 		is_enabled = true;
1891 	} else {
1892 		is_enabled = false;
1893 	}
1894 
1895 	mutex_unlock(&power_domains->lock);
1896 
1897 	if (!is_enabled) {
1898 		intel_runtime_pm_put(dev_priv, wakeref);
1899 		wakeref = 0;
1900 	}
1901 
1902 	return wakeref;
1903 }
1904 
1905 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1906 				      enum intel_display_power_domain domain)
1907 {
1908 	struct i915_power_domains *power_domains;
1909 	struct i915_power_well *power_well;
1910 
1911 	power_domains = &dev_priv->power_domains;
1912 
1913 	mutex_lock(&power_domains->lock);
1914 
1915 	WARN(!power_domains->domain_use_count[domain],
1916 	     "Use count on domain %s is already zero\n",
1917 	     intel_display_power_domain_str(domain));
1918 	power_domains->domain_use_count[domain]--;
1919 
1920 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1921 		intel_power_well_put(dev_priv, power_well);
1922 
1923 	mutex_unlock(&power_domains->lock);
1924 }
1925 
1926 /**
1927  * intel_display_power_put - release a power domain reference
1928  * @dev_priv: i915 device instance
1929  * @domain: power domain to reference
1930  *
1931  * This function drops the power domain reference obtained by
1932  * intel_display_power_get() and might power down the corresponding hardware
1933  * block right away if this is the last reference.
1934  */
1935 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1936 				       enum intel_display_power_domain domain)
1937 {
1938 	__intel_display_power_put(dev_priv, domain);
1939 	intel_runtime_pm_put_unchecked(dev_priv);
1940 }
1941 
1942 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1943 void intel_display_power_put(struct drm_i915_private *dev_priv,
1944 			     enum intel_display_power_domain domain,
1945 			     intel_wakeref_t wakeref)
1946 {
1947 	__intel_display_power_put(dev_priv, domain);
1948 	intel_runtime_pm_put(dev_priv, wakeref);
1949 }
1950 #endif
1951 
1952 #define I830_PIPES_POWER_DOMAINS (		\
1953 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1954 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1955 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1956 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1957 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1958 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1959 	BIT_ULL(POWER_DOMAIN_INIT))
1960 
1961 #define VLV_DISPLAY_POWER_DOMAINS (		\
1962 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1963 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1964 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1965 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1966 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1967 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1968 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1969 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1970 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1971 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1972 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1973 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1974 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1975 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1976 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1977 	BIT_ULL(POWER_DOMAIN_INIT))
1978 
1979 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1980 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1981 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1982 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1983 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1984 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1985 	BIT_ULL(POWER_DOMAIN_INIT))
1986 
1987 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1988 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1989 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1990 	BIT_ULL(POWER_DOMAIN_INIT))
1991 
1992 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1993 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1994 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1995 	BIT_ULL(POWER_DOMAIN_INIT))
1996 
1997 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1998 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1999 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2000 	BIT_ULL(POWER_DOMAIN_INIT))
2001 
2002 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2003 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2004 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2005 	BIT_ULL(POWER_DOMAIN_INIT))
2006 
2007 #define CHV_DISPLAY_POWER_DOMAINS (		\
2008 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2009 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2010 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2011 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2012 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2013 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2014 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2015 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2016 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2017 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2018 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2019 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2020 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2021 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2022 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2023 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2024 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2025 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2026 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2027 	BIT_ULL(POWER_DOMAIN_INIT))
2028 
2029 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2030 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2031 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2032 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2033 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2034 	BIT_ULL(POWER_DOMAIN_INIT))
2035 
2036 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2037 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2038 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2039 	BIT_ULL(POWER_DOMAIN_INIT))
2040 
2041 #define HSW_DISPLAY_POWER_DOMAINS (			\
2042 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2043 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2044 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2045 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2046 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2047 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2048 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2049 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2050 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2051 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2052 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2053 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2054 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2055 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2056 	BIT_ULL(POWER_DOMAIN_INIT))
2057 
2058 #define BDW_DISPLAY_POWER_DOMAINS (			\
2059 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2060 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2061 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2062 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2063 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2064 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2065 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2066 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2067 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2068 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2069 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2070 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2071 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2072 	BIT_ULL(POWER_DOMAIN_INIT))
2073 
2074 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2075 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2076 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2077 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2078 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2079 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2080 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2081 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2082 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2083 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2084 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2085 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2086 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2087 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2088 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2089 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2090 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2091 	BIT_ULL(POWER_DOMAIN_INIT))
2092 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2093 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2094 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2095 	BIT_ULL(POWER_DOMAIN_INIT))
2096 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2097 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2098 	BIT_ULL(POWER_DOMAIN_INIT))
2099 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2100 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2101 	BIT_ULL(POWER_DOMAIN_INIT))
2102 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2103 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2104 	BIT_ULL(POWER_DOMAIN_INIT))
2105 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2106 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2107 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2108 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2109 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2110 	BIT_ULL(POWER_DOMAIN_INIT))
2111 
2112 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2113 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2114 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2115 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2116 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2117 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2118 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2119 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2120 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2121 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2122 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2123 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2124 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2125 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2126 	BIT_ULL(POWER_DOMAIN_INIT))
2127 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2128 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2129 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2130 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2131 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2132 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2133 	BIT_ULL(POWER_DOMAIN_INIT))
2134 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2135 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2136 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2137 	BIT_ULL(POWER_DOMAIN_INIT))
2138 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2139 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2140 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2141 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2142 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2143 	BIT_ULL(POWER_DOMAIN_INIT))
2144 
2145 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2146 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2147 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2148 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2149 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2150 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2151 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2152 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2153 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2154 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2155 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2156 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2157 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2158 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2159 	BIT_ULL(POWER_DOMAIN_INIT))
2160 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2161 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2162 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2163 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2164 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2165 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2166 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2167 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2168 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2169 	BIT_ULL(POWER_DOMAIN_INIT))
2170 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2171 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2172 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2173 	BIT_ULL(POWER_DOMAIN_INIT))
2174 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2175 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2176 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2177 	BIT_ULL(POWER_DOMAIN_INIT))
2178 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2179 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2180 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2181 	BIT_ULL(POWER_DOMAIN_INIT))
2182 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2183 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2184 	BIT_ULL(POWER_DOMAIN_INIT))
2185 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2186 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2187 	BIT_ULL(POWER_DOMAIN_INIT))
2188 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2189 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2190 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2191 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2192 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2193 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2194 	BIT_ULL(POWER_DOMAIN_INIT))
2195 
2196 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2197 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2198 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2199 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2200 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2201 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2202 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2203 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2204 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2205 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2206 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2207 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2208 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2209 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2210 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2211 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2212 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2213 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2214 	BIT_ULL(POWER_DOMAIN_INIT))
2215 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2216 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2217 	BIT_ULL(POWER_DOMAIN_INIT))
2218 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2219 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2220 	BIT_ULL(POWER_DOMAIN_INIT))
2221 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2222 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2223 	BIT_ULL(POWER_DOMAIN_INIT))
2224 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2225 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2226 	BIT_ULL(POWER_DOMAIN_INIT))
2227 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2228 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2229 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2230 	BIT_ULL(POWER_DOMAIN_INIT))
2231 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2232 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2233 	BIT_ULL(POWER_DOMAIN_INIT))
2234 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2235 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2236 	BIT_ULL(POWER_DOMAIN_INIT))
2237 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2238 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2239 	BIT_ULL(POWER_DOMAIN_INIT))
2240 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2241 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2242 	BIT_ULL(POWER_DOMAIN_INIT))
2243 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2244 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2245 	BIT_ULL(POWER_DOMAIN_INIT))
2246 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2247 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2248 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2249 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2250 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2251 	BIT_ULL(POWER_DOMAIN_INIT))
2252 
2253 /*
2254  * ICL PW_0/PG_0 domains (HW/DMC control):
2255  * - PCI
2256  * - clocks except port PLL
2257  * - central power except FBC
2258  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2259  * ICL PW_1/PG_1 domains (HW/DMC control):
2260  * - DBUF function
2261  * - PIPE_A and its planes, except VGA
2262  * - transcoder EDP + PSR
2263  * - transcoder DSI
2264  * - DDI_A
2265  * - FBC
2266  */
2267 #define ICL_PW_4_POWER_DOMAINS (			\
2268 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2269 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2270 	BIT_ULL(POWER_DOMAIN_INIT))
2271 	/* VDSC/joining */
2272 #define ICL_PW_3_POWER_DOMAINS (			\
2273 	ICL_PW_4_POWER_DOMAINS |			\
2274 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2275 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2276 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2277 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2278 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2279 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2280 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2281 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2282 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2283 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2284 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2285 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2286 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2287 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2288 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2289 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2290 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2291 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2292 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2293 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2294 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2295 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2296 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2297 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2298 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2299 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2300 	BIT_ULL(POWER_DOMAIN_INIT))
2301 	/*
2302 	 * - transcoder WD
2303 	 * - KVMR (HW control)
2304 	 */
2305 #define ICL_PW_2_POWER_DOMAINS (			\
2306 	ICL_PW_3_POWER_DOMAINS |			\
2307 	BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) |		\
2308 	BIT_ULL(POWER_DOMAIN_INIT))
2309 	/*
2310 	 * - KVMR (HW control)
2311 	 */
2312 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2313 	ICL_PW_2_POWER_DOMAINS |			\
2314 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2315 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2316 	BIT_ULL(POWER_DOMAIN_INIT))
2317 
2318 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2319 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2320 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2321 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2322 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2323 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2324 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2325 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2326 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2327 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2328 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2329 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2330 
2331 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2332 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2333 	BIT_ULL(POWER_DOMAIN_AUX_A))
2334 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2335 	BIT_ULL(POWER_DOMAIN_AUX_B))
2336 #define ICL_AUX_C_IO_POWER_DOMAINS (			\
2337 	BIT_ULL(POWER_DOMAIN_AUX_C))
2338 #define ICL_AUX_D_IO_POWER_DOMAINS (			\
2339 	BIT_ULL(POWER_DOMAIN_AUX_D))
2340 #define ICL_AUX_E_IO_POWER_DOMAINS (			\
2341 	BIT_ULL(POWER_DOMAIN_AUX_E))
2342 #define ICL_AUX_F_IO_POWER_DOMAINS (			\
2343 	BIT_ULL(POWER_DOMAIN_AUX_F))
2344 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (			\
2345 	BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2346 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (			\
2347 	BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2348 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (			\
2349 	BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2350 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (			\
2351 	BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2352 
2353 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2354 	.sync_hw = i9xx_power_well_sync_hw_noop,
2355 	.enable = i9xx_always_on_power_well_noop,
2356 	.disable = i9xx_always_on_power_well_noop,
2357 	.is_enabled = i9xx_always_on_power_well_enabled,
2358 };
2359 
2360 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2361 	.sync_hw = i9xx_power_well_sync_hw_noop,
2362 	.enable = chv_pipe_power_well_enable,
2363 	.disable = chv_pipe_power_well_disable,
2364 	.is_enabled = chv_pipe_power_well_enabled,
2365 };
2366 
2367 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2368 	.sync_hw = i9xx_power_well_sync_hw_noop,
2369 	.enable = chv_dpio_cmn_power_well_enable,
2370 	.disable = chv_dpio_cmn_power_well_disable,
2371 	.is_enabled = vlv_power_well_enabled,
2372 };
2373 
2374 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2375 	{
2376 		.name = "always-on",
2377 		.always_on = true,
2378 		.domains = POWER_DOMAIN_MASK,
2379 		.ops = &i9xx_always_on_power_well_ops,
2380 		.id = DISP_PW_ID_NONE,
2381 	},
2382 };
2383 
2384 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2385 	.sync_hw = i830_pipes_power_well_sync_hw,
2386 	.enable = i830_pipes_power_well_enable,
2387 	.disable = i830_pipes_power_well_disable,
2388 	.is_enabled = i830_pipes_power_well_enabled,
2389 };
2390 
2391 static const struct i915_power_well_desc i830_power_wells[] = {
2392 	{
2393 		.name = "always-on",
2394 		.always_on = true,
2395 		.domains = POWER_DOMAIN_MASK,
2396 		.ops = &i9xx_always_on_power_well_ops,
2397 		.id = DISP_PW_ID_NONE,
2398 	},
2399 	{
2400 		.name = "pipes",
2401 		.domains = I830_PIPES_POWER_DOMAINS,
2402 		.ops = &i830_pipes_power_well_ops,
2403 		.id = DISP_PW_ID_NONE,
2404 	},
2405 };
2406 
2407 static const struct i915_power_well_ops hsw_power_well_ops = {
2408 	.sync_hw = hsw_power_well_sync_hw,
2409 	.enable = hsw_power_well_enable,
2410 	.disable = hsw_power_well_disable,
2411 	.is_enabled = hsw_power_well_enabled,
2412 };
2413 
2414 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2415 	.sync_hw = i9xx_power_well_sync_hw_noop,
2416 	.enable = gen9_dc_off_power_well_enable,
2417 	.disable = gen9_dc_off_power_well_disable,
2418 	.is_enabled = gen9_dc_off_power_well_enabled,
2419 };
2420 
2421 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2422 	.sync_hw = i9xx_power_well_sync_hw_noop,
2423 	.enable = bxt_dpio_cmn_power_well_enable,
2424 	.disable = bxt_dpio_cmn_power_well_disable,
2425 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2426 };
2427 
2428 static const struct i915_power_well_regs hsw_power_well_regs = {
2429 	.bios	= HSW_PWR_WELL_CTL1,
2430 	.driver	= HSW_PWR_WELL_CTL2,
2431 	.kvmr	= HSW_PWR_WELL_CTL3,
2432 	.debug	= HSW_PWR_WELL_CTL4,
2433 };
2434 
2435 static const struct i915_power_well_desc hsw_power_wells[] = {
2436 	{
2437 		.name = "always-on",
2438 		.always_on = true,
2439 		.domains = POWER_DOMAIN_MASK,
2440 		.ops = &i9xx_always_on_power_well_ops,
2441 		.id = DISP_PW_ID_NONE,
2442 	},
2443 	{
2444 		.name = "display",
2445 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2446 		.ops = &hsw_power_well_ops,
2447 		.id = HSW_DISP_PW_GLOBAL,
2448 		{
2449 			.hsw.regs = &hsw_power_well_regs,
2450 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2451 			.hsw.has_vga = true,
2452 		},
2453 	},
2454 };
2455 
2456 static const struct i915_power_well_desc bdw_power_wells[] = {
2457 	{
2458 		.name = "always-on",
2459 		.always_on = true,
2460 		.domains = POWER_DOMAIN_MASK,
2461 		.ops = &i9xx_always_on_power_well_ops,
2462 		.id = DISP_PW_ID_NONE,
2463 	},
2464 	{
2465 		.name = "display",
2466 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2467 		.ops = &hsw_power_well_ops,
2468 		.id = HSW_DISP_PW_GLOBAL,
2469 		{
2470 			.hsw.regs = &hsw_power_well_regs,
2471 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2472 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2473 			.hsw.has_vga = true,
2474 		},
2475 	},
2476 };
2477 
2478 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2479 	.sync_hw = i9xx_power_well_sync_hw_noop,
2480 	.enable = vlv_display_power_well_enable,
2481 	.disable = vlv_display_power_well_disable,
2482 	.is_enabled = vlv_power_well_enabled,
2483 };
2484 
2485 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2486 	.sync_hw = i9xx_power_well_sync_hw_noop,
2487 	.enable = vlv_dpio_cmn_power_well_enable,
2488 	.disable = vlv_dpio_cmn_power_well_disable,
2489 	.is_enabled = vlv_power_well_enabled,
2490 };
2491 
2492 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2493 	.sync_hw = i9xx_power_well_sync_hw_noop,
2494 	.enable = vlv_power_well_enable,
2495 	.disable = vlv_power_well_disable,
2496 	.is_enabled = vlv_power_well_enabled,
2497 };
2498 
2499 static const struct i915_power_well_desc vlv_power_wells[] = {
2500 	{
2501 		.name = "always-on",
2502 		.always_on = true,
2503 		.domains = POWER_DOMAIN_MASK,
2504 		.ops = &i9xx_always_on_power_well_ops,
2505 		.id = DISP_PW_ID_NONE,
2506 	},
2507 	{
2508 		.name = "display",
2509 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2510 		.ops = &vlv_display_power_well_ops,
2511 		.id = VLV_DISP_PW_DISP2D,
2512 		{
2513 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2514 		},
2515 	},
2516 	{
2517 		.name = "dpio-tx-b-01",
2518 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2519 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2520 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2521 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2522 		.ops = &vlv_dpio_power_well_ops,
2523 		.id = DISP_PW_ID_NONE,
2524 		{
2525 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2526 		},
2527 	},
2528 	{
2529 		.name = "dpio-tx-b-23",
2530 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2531 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2532 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2533 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2534 		.ops = &vlv_dpio_power_well_ops,
2535 		.id = DISP_PW_ID_NONE,
2536 		{
2537 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2538 		},
2539 	},
2540 	{
2541 		.name = "dpio-tx-c-01",
2542 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2543 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2544 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2545 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2546 		.ops = &vlv_dpio_power_well_ops,
2547 		.id = DISP_PW_ID_NONE,
2548 		{
2549 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2550 		},
2551 	},
2552 	{
2553 		.name = "dpio-tx-c-23",
2554 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2555 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2556 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2557 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2558 		.ops = &vlv_dpio_power_well_ops,
2559 		.id = DISP_PW_ID_NONE,
2560 		{
2561 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2562 		},
2563 	},
2564 	{
2565 		.name = "dpio-common",
2566 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2567 		.ops = &vlv_dpio_cmn_power_well_ops,
2568 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2569 		{
2570 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2571 		},
2572 	},
2573 };
2574 
2575 static const struct i915_power_well_desc chv_power_wells[] = {
2576 	{
2577 		.name = "always-on",
2578 		.always_on = true,
2579 		.domains = POWER_DOMAIN_MASK,
2580 		.ops = &i9xx_always_on_power_well_ops,
2581 		.id = DISP_PW_ID_NONE,
2582 	},
2583 	{
2584 		.name = "display",
2585 		/*
2586 		 * Pipe A power well is the new disp2d well. Pipe B and C
2587 		 * power wells don't actually exist. Pipe A power well is
2588 		 * required for any pipe to work.
2589 		 */
2590 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2591 		.ops = &chv_pipe_power_well_ops,
2592 		.id = DISP_PW_ID_NONE,
2593 	},
2594 	{
2595 		.name = "dpio-common-bc",
2596 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2597 		.ops = &chv_dpio_cmn_power_well_ops,
2598 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2599 		{
2600 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2601 		},
2602 	},
2603 	{
2604 		.name = "dpio-common-d",
2605 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2606 		.ops = &chv_dpio_cmn_power_well_ops,
2607 		.id = CHV_DISP_PW_DPIO_CMN_D,
2608 		{
2609 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2610 		},
2611 	},
2612 };
2613 
2614 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2615 					 enum i915_power_well_id power_well_id)
2616 {
2617 	struct i915_power_well *power_well;
2618 	bool ret;
2619 
2620 	power_well = lookup_power_well(dev_priv, power_well_id);
2621 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2622 
2623 	return ret;
2624 }
2625 
2626 static const struct i915_power_well_desc skl_power_wells[] = {
2627 	{
2628 		.name = "always-on",
2629 		.always_on = true,
2630 		.domains = POWER_DOMAIN_MASK,
2631 		.ops = &i9xx_always_on_power_well_ops,
2632 		.id = DISP_PW_ID_NONE,
2633 	},
2634 	{
2635 		.name = "power well 1",
2636 		/* Handled by the DMC firmware */
2637 		.always_on = true,
2638 		.domains = 0,
2639 		.ops = &hsw_power_well_ops,
2640 		.id = SKL_DISP_PW_1,
2641 		{
2642 			.hsw.regs = &hsw_power_well_regs,
2643 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2644 			.hsw.has_fuses = true,
2645 		},
2646 	},
2647 	{
2648 		.name = "MISC IO power well",
2649 		/* Handled by the DMC firmware */
2650 		.always_on = true,
2651 		.domains = 0,
2652 		.ops = &hsw_power_well_ops,
2653 		.id = SKL_DISP_PW_MISC_IO,
2654 		{
2655 			.hsw.regs = &hsw_power_well_regs,
2656 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2657 		},
2658 	},
2659 	{
2660 		.name = "DC off",
2661 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2662 		.ops = &gen9_dc_off_power_well_ops,
2663 		.id = DISP_PW_ID_NONE,
2664 	},
2665 	{
2666 		.name = "power well 2",
2667 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2668 		.ops = &hsw_power_well_ops,
2669 		.id = SKL_DISP_PW_2,
2670 		{
2671 			.hsw.regs = &hsw_power_well_regs,
2672 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2673 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2674 			.hsw.has_vga = true,
2675 			.hsw.has_fuses = true,
2676 		},
2677 	},
2678 	{
2679 		.name = "DDI A/E IO power well",
2680 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2681 		.ops = &hsw_power_well_ops,
2682 		.id = DISP_PW_ID_NONE,
2683 		{
2684 			.hsw.regs = &hsw_power_well_regs,
2685 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2686 		},
2687 	},
2688 	{
2689 		.name = "DDI B IO power well",
2690 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2691 		.ops = &hsw_power_well_ops,
2692 		.id = DISP_PW_ID_NONE,
2693 		{
2694 			.hsw.regs = &hsw_power_well_regs,
2695 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2696 		},
2697 	},
2698 	{
2699 		.name = "DDI C IO power well",
2700 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2701 		.ops = &hsw_power_well_ops,
2702 		.id = DISP_PW_ID_NONE,
2703 		{
2704 			.hsw.regs = &hsw_power_well_regs,
2705 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2706 		},
2707 	},
2708 	{
2709 		.name = "DDI D IO power well",
2710 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2711 		.ops = &hsw_power_well_ops,
2712 		.id = DISP_PW_ID_NONE,
2713 		{
2714 			.hsw.regs = &hsw_power_well_regs,
2715 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2716 		},
2717 	},
2718 };
2719 
2720 static const struct i915_power_well_desc bxt_power_wells[] = {
2721 	{
2722 		.name = "always-on",
2723 		.always_on = true,
2724 		.domains = POWER_DOMAIN_MASK,
2725 		.ops = &i9xx_always_on_power_well_ops,
2726 		.id = DISP_PW_ID_NONE,
2727 	},
2728 	{
2729 		.name = "power well 1",
2730 		/* Handled by the DMC firmware */
2731 		.always_on = true,
2732 		.domains = 0,
2733 		.ops = &hsw_power_well_ops,
2734 		.id = SKL_DISP_PW_1,
2735 		{
2736 			.hsw.regs = &hsw_power_well_regs,
2737 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2738 			.hsw.has_fuses = true,
2739 		},
2740 	},
2741 	{
2742 		.name = "DC off",
2743 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2744 		.ops = &gen9_dc_off_power_well_ops,
2745 		.id = DISP_PW_ID_NONE,
2746 	},
2747 	{
2748 		.name = "power well 2",
2749 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2750 		.ops = &hsw_power_well_ops,
2751 		.id = SKL_DISP_PW_2,
2752 		{
2753 			.hsw.regs = &hsw_power_well_regs,
2754 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2755 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2756 			.hsw.has_vga = true,
2757 			.hsw.has_fuses = true,
2758 		},
2759 	},
2760 	{
2761 		.name = "dpio-common-a",
2762 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2763 		.ops = &bxt_dpio_cmn_power_well_ops,
2764 		.id = BXT_DISP_PW_DPIO_CMN_A,
2765 		{
2766 			.bxt.phy = DPIO_PHY1,
2767 		},
2768 	},
2769 	{
2770 		.name = "dpio-common-bc",
2771 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2772 		.ops = &bxt_dpio_cmn_power_well_ops,
2773 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2774 		{
2775 			.bxt.phy = DPIO_PHY0,
2776 		},
2777 	},
2778 };
2779 
2780 static const struct i915_power_well_desc glk_power_wells[] = {
2781 	{
2782 		.name = "always-on",
2783 		.always_on = true,
2784 		.domains = POWER_DOMAIN_MASK,
2785 		.ops = &i9xx_always_on_power_well_ops,
2786 		.id = DISP_PW_ID_NONE,
2787 	},
2788 	{
2789 		.name = "power well 1",
2790 		/* Handled by the DMC firmware */
2791 		.always_on = true,
2792 		.domains = 0,
2793 		.ops = &hsw_power_well_ops,
2794 		.id = SKL_DISP_PW_1,
2795 		{
2796 			.hsw.regs = &hsw_power_well_regs,
2797 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2798 			.hsw.has_fuses = true,
2799 		},
2800 	},
2801 	{
2802 		.name = "DC off",
2803 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2804 		.ops = &gen9_dc_off_power_well_ops,
2805 		.id = DISP_PW_ID_NONE,
2806 	},
2807 	{
2808 		.name = "power well 2",
2809 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2810 		.ops = &hsw_power_well_ops,
2811 		.id = SKL_DISP_PW_2,
2812 		{
2813 			.hsw.regs = &hsw_power_well_regs,
2814 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2815 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2816 			.hsw.has_vga = true,
2817 			.hsw.has_fuses = true,
2818 		},
2819 	},
2820 	{
2821 		.name = "dpio-common-a",
2822 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2823 		.ops = &bxt_dpio_cmn_power_well_ops,
2824 		.id = BXT_DISP_PW_DPIO_CMN_A,
2825 		{
2826 			.bxt.phy = DPIO_PHY1,
2827 		},
2828 	},
2829 	{
2830 		.name = "dpio-common-b",
2831 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2832 		.ops = &bxt_dpio_cmn_power_well_ops,
2833 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2834 		{
2835 			.bxt.phy = DPIO_PHY0,
2836 		},
2837 	},
2838 	{
2839 		.name = "dpio-common-c",
2840 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2841 		.ops = &bxt_dpio_cmn_power_well_ops,
2842 		.id = GLK_DISP_PW_DPIO_CMN_C,
2843 		{
2844 			.bxt.phy = DPIO_PHY2,
2845 		},
2846 	},
2847 	{
2848 		.name = "AUX A",
2849 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2850 		.ops = &hsw_power_well_ops,
2851 		.id = DISP_PW_ID_NONE,
2852 		{
2853 			.hsw.regs = &hsw_power_well_regs,
2854 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2855 		},
2856 	},
2857 	{
2858 		.name = "AUX B",
2859 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2860 		.ops = &hsw_power_well_ops,
2861 		.id = DISP_PW_ID_NONE,
2862 		{
2863 			.hsw.regs = &hsw_power_well_regs,
2864 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2865 		},
2866 	},
2867 	{
2868 		.name = "AUX C",
2869 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2870 		.ops = &hsw_power_well_ops,
2871 		.id = DISP_PW_ID_NONE,
2872 		{
2873 			.hsw.regs = &hsw_power_well_regs,
2874 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2875 		},
2876 	},
2877 	{
2878 		.name = "DDI A IO power well",
2879 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2880 		.ops = &hsw_power_well_ops,
2881 		.id = DISP_PW_ID_NONE,
2882 		{
2883 			.hsw.regs = &hsw_power_well_regs,
2884 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2885 		},
2886 	},
2887 	{
2888 		.name = "DDI B IO power well",
2889 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2890 		.ops = &hsw_power_well_ops,
2891 		.id = DISP_PW_ID_NONE,
2892 		{
2893 			.hsw.regs = &hsw_power_well_regs,
2894 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2895 		},
2896 	},
2897 	{
2898 		.name = "DDI C IO power well",
2899 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2900 		.ops = &hsw_power_well_ops,
2901 		.id = DISP_PW_ID_NONE,
2902 		{
2903 			.hsw.regs = &hsw_power_well_regs,
2904 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2905 		},
2906 	},
2907 };
2908 
2909 static const struct i915_power_well_desc cnl_power_wells[] = {
2910 	{
2911 		.name = "always-on",
2912 		.always_on = true,
2913 		.domains = POWER_DOMAIN_MASK,
2914 		.ops = &i9xx_always_on_power_well_ops,
2915 		.id = DISP_PW_ID_NONE,
2916 	},
2917 	{
2918 		.name = "power well 1",
2919 		/* Handled by the DMC firmware */
2920 		.always_on = true,
2921 		.domains = 0,
2922 		.ops = &hsw_power_well_ops,
2923 		.id = SKL_DISP_PW_1,
2924 		{
2925 			.hsw.regs = &hsw_power_well_regs,
2926 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
2927 			.hsw.has_fuses = true,
2928 		},
2929 	},
2930 	{
2931 		.name = "AUX A",
2932 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2933 		.ops = &hsw_power_well_ops,
2934 		.id = DISP_PW_ID_NONE,
2935 		{
2936 			.hsw.regs = &hsw_power_well_regs,
2937 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
2938 		},
2939 	},
2940 	{
2941 		.name = "AUX B",
2942 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2943 		.ops = &hsw_power_well_ops,
2944 		.id = DISP_PW_ID_NONE,
2945 		{
2946 			.hsw.regs = &hsw_power_well_regs,
2947 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
2948 		},
2949 	},
2950 	{
2951 		.name = "AUX C",
2952 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2953 		.ops = &hsw_power_well_ops,
2954 		.id = DISP_PW_ID_NONE,
2955 		{
2956 			.hsw.regs = &hsw_power_well_regs,
2957 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
2958 		},
2959 	},
2960 	{
2961 		.name = "AUX D",
2962 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2963 		.ops = &hsw_power_well_ops,
2964 		.id = DISP_PW_ID_NONE,
2965 		{
2966 			.hsw.regs = &hsw_power_well_regs,
2967 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
2968 		},
2969 	},
2970 	{
2971 		.name = "DC off",
2972 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2973 		.ops = &gen9_dc_off_power_well_ops,
2974 		.id = DISP_PW_ID_NONE,
2975 	},
2976 	{
2977 		.name = "power well 2",
2978 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2979 		.ops = &hsw_power_well_ops,
2980 		.id = SKL_DISP_PW_2,
2981 		{
2982 			.hsw.regs = &hsw_power_well_regs,
2983 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
2984 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2985 			.hsw.has_vga = true,
2986 			.hsw.has_fuses = true,
2987 		},
2988 	},
2989 	{
2990 		.name = "DDI A IO power well",
2991 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2992 		.ops = &hsw_power_well_ops,
2993 		.id = DISP_PW_ID_NONE,
2994 		{
2995 			.hsw.regs = &hsw_power_well_regs,
2996 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
2997 		},
2998 	},
2999 	{
3000 		.name = "DDI B IO power well",
3001 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3002 		.ops = &hsw_power_well_ops,
3003 		.id = DISP_PW_ID_NONE,
3004 		{
3005 			.hsw.regs = &hsw_power_well_regs,
3006 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3007 		},
3008 	},
3009 	{
3010 		.name = "DDI C IO power well",
3011 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3012 		.ops = &hsw_power_well_ops,
3013 		.id = DISP_PW_ID_NONE,
3014 		{
3015 			.hsw.regs = &hsw_power_well_regs,
3016 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3017 		},
3018 	},
3019 	{
3020 		.name = "DDI D IO power well",
3021 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3022 		.ops = &hsw_power_well_ops,
3023 		.id = DISP_PW_ID_NONE,
3024 		{
3025 			.hsw.regs = &hsw_power_well_regs,
3026 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3027 		},
3028 	},
3029 	{
3030 		.name = "DDI F IO power well",
3031 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3032 		.ops = &hsw_power_well_ops,
3033 		.id = DISP_PW_ID_NONE,
3034 		{
3035 			.hsw.regs = &hsw_power_well_regs,
3036 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3037 		},
3038 	},
3039 	{
3040 		.name = "AUX F",
3041 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3042 		.ops = &hsw_power_well_ops,
3043 		.id = DISP_PW_ID_NONE,
3044 		{
3045 			.hsw.regs = &hsw_power_well_regs,
3046 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3047 		},
3048 	},
3049 };
3050 
3051 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3052 	.sync_hw = hsw_power_well_sync_hw,
3053 	.enable = icl_combo_phy_aux_power_well_enable,
3054 	.disable = icl_combo_phy_aux_power_well_disable,
3055 	.is_enabled = hsw_power_well_enabled,
3056 };
3057 
3058 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3059 	.sync_hw = hsw_power_well_sync_hw,
3060 	.enable = icl_tc_phy_aux_power_well_enable,
3061 	.disable = hsw_power_well_disable,
3062 	.is_enabled = hsw_power_well_enabled,
3063 };
3064 
3065 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3066 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3067 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3068 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3069 };
3070 
3071 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3072 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3073 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3074 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3075 };
3076 
3077 static const struct i915_power_well_desc icl_power_wells[] = {
3078 	{
3079 		.name = "always-on",
3080 		.always_on = true,
3081 		.domains = POWER_DOMAIN_MASK,
3082 		.ops = &i9xx_always_on_power_well_ops,
3083 		.id = DISP_PW_ID_NONE,
3084 	},
3085 	{
3086 		.name = "power well 1",
3087 		/* Handled by the DMC firmware */
3088 		.always_on = true,
3089 		.domains = 0,
3090 		.ops = &hsw_power_well_ops,
3091 		.id = SKL_DISP_PW_1,
3092 		{
3093 			.hsw.regs = &hsw_power_well_regs,
3094 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3095 			.hsw.has_fuses = true,
3096 		},
3097 	},
3098 	{
3099 		.name = "DC off",
3100 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3101 		.ops = &gen9_dc_off_power_well_ops,
3102 		.id = DISP_PW_ID_NONE,
3103 	},
3104 	{
3105 		.name = "power well 2",
3106 		.domains = ICL_PW_2_POWER_DOMAINS,
3107 		.ops = &hsw_power_well_ops,
3108 		.id = SKL_DISP_PW_2,
3109 		{
3110 			.hsw.regs = &hsw_power_well_regs,
3111 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3112 			.hsw.has_fuses = true,
3113 		},
3114 	},
3115 	{
3116 		.name = "power well 3",
3117 		.domains = ICL_PW_3_POWER_DOMAINS,
3118 		.ops = &hsw_power_well_ops,
3119 		.id = DISP_PW_ID_NONE,
3120 		{
3121 			.hsw.regs = &hsw_power_well_regs,
3122 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3123 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3124 			.hsw.has_vga = true,
3125 			.hsw.has_fuses = true,
3126 		},
3127 	},
3128 	{
3129 		.name = "DDI A IO",
3130 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3131 		.ops = &hsw_power_well_ops,
3132 		.id = DISP_PW_ID_NONE,
3133 		{
3134 			.hsw.regs = &icl_ddi_power_well_regs,
3135 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3136 		},
3137 	},
3138 	{
3139 		.name = "DDI B IO",
3140 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3141 		.ops = &hsw_power_well_ops,
3142 		.id = DISP_PW_ID_NONE,
3143 		{
3144 			.hsw.regs = &icl_ddi_power_well_regs,
3145 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3146 		},
3147 	},
3148 	{
3149 		.name = "DDI C IO",
3150 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3151 		.ops = &hsw_power_well_ops,
3152 		.id = DISP_PW_ID_NONE,
3153 		{
3154 			.hsw.regs = &icl_ddi_power_well_regs,
3155 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3156 		},
3157 	},
3158 	{
3159 		.name = "DDI D IO",
3160 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3161 		.ops = &hsw_power_well_ops,
3162 		.id = DISP_PW_ID_NONE,
3163 		{
3164 			.hsw.regs = &icl_ddi_power_well_regs,
3165 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3166 		},
3167 	},
3168 	{
3169 		.name = "DDI E IO",
3170 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3171 		.ops = &hsw_power_well_ops,
3172 		.id = DISP_PW_ID_NONE,
3173 		{
3174 			.hsw.regs = &icl_ddi_power_well_regs,
3175 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3176 		},
3177 	},
3178 	{
3179 		.name = "DDI F IO",
3180 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3181 		.ops = &hsw_power_well_ops,
3182 		.id = DISP_PW_ID_NONE,
3183 		{
3184 			.hsw.regs = &icl_ddi_power_well_regs,
3185 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3186 		},
3187 	},
3188 	{
3189 		.name = "AUX A",
3190 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3191 		.ops = &icl_combo_phy_aux_power_well_ops,
3192 		.id = DISP_PW_ID_NONE,
3193 		{
3194 			.hsw.regs = &icl_aux_power_well_regs,
3195 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3196 		},
3197 	},
3198 	{
3199 		.name = "AUX B",
3200 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3201 		.ops = &icl_combo_phy_aux_power_well_ops,
3202 		.id = DISP_PW_ID_NONE,
3203 		{
3204 			.hsw.regs = &icl_aux_power_well_regs,
3205 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3206 		},
3207 	},
3208 	{
3209 		.name = "AUX C",
3210 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
3211 		.ops = &icl_tc_phy_aux_power_well_ops,
3212 		.id = DISP_PW_ID_NONE,
3213 		{
3214 			.hsw.regs = &icl_aux_power_well_regs,
3215 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3216 			.hsw.is_tc_tbt = false,
3217 		},
3218 	},
3219 	{
3220 		.name = "AUX D",
3221 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
3222 		.ops = &icl_tc_phy_aux_power_well_ops,
3223 		.id = DISP_PW_ID_NONE,
3224 		{
3225 			.hsw.regs = &icl_aux_power_well_regs,
3226 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3227 			.hsw.is_tc_tbt = false,
3228 		},
3229 	},
3230 	{
3231 		.name = "AUX E",
3232 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
3233 		.ops = &icl_tc_phy_aux_power_well_ops,
3234 		.id = DISP_PW_ID_NONE,
3235 		{
3236 			.hsw.regs = &icl_aux_power_well_regs,
3237 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3238 			.hsw.is_tc_tbt = false,
3239 		},
3240 	},
3241 	{
3242 		.name = "AUX F",
3243 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
3244 		.ops = &icl_tc_phy_aux_power_well_ops,
3245 		.id = DISP_PW_ID_NONE,
3246 		{
3247 			.hsw.regs = &icl_aux_power_well_regs,
3248 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3249 			.hsw.is_tc_tbt = false,
3250 		},
3251 	},
3252 	{
3253 		.name = "AUX TBT1",
3254 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
3255 		.ops = &icl_tc_phy_aux_power_well_ops,
3256 		.id = DISP_PW_ID_NONE,
3257 		{
3258 			.hsw.regs = &icl_aux_power_well_regs,
3259 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3260 			.hsw.is_tc_tbt = true,
3261 		},
3262 	},
3263 	{
3264 		.name = "AUX TBT2",
3265 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
3266 		.ops = &icl_tc_phy_aux_power_well_ops,
3267 		.id = DISP_PW_ID_NONE,
3268 		{
3269 			.hsw.regs = &icl_aux_power_well_regs,
3270 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3271 			.hsw.is_tc_tbt = true,
3272 		},
3273 	},
3274 	{
3275 		.name = "AUX TBT3",
3276 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
3277 		.ops = &icl_tc_phy_aux_power_well_ops,
3278 		.id = DISP_PW_ID_NONE,
3279 		{
3280 			.hsw.regs = &icl_aux_power_well_regs,
3281 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3282 			.hsw.is_tc_tbt = true,
3283 		},
3284 	},
3285 	{
3286 		.name = "AUX TBT4",
3287 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
3288 		.ops = &icl_tc_phy_aux_power_well_ops,
3289 		.id = DISP_PW_ID_NONE,
3290 		{
3291 			.hsw.regs = &icl_aux_power_well_regs,
3292 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3293 			.hsw.is_tc_tbt = true,
3294 		},
3295 	},
3296 	{
3297 		.name = "power well 4",
3298 		.domains = ICL_PW_4_POWER_DOMAINS,
3299 		.ops = &hsw_power_well_ops,
3300 		.id = DISP_PW_ID_NONE,
3301 		{
3302 			.hsw.regs = &hsw_power_well_regs,
3303 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3304 			.hsw.has_fuses = true,
3305 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3306 		},
3307 	},
3308 };
3309 
3310 static int
3311 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3312 				   int disable_power_well)
3313 {
3314 	if (disable_power_well >= 0)
3315 		return !!disable_power_well;
3316 
3317 	return 1;
3318 }
3319 
3320 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3321 			       int enable_dc)
3322 {
3323 	u32 mask;
3324 	int requested_dc;
3325 	int max_dc;
3326 
3327 	if (INTEL_GEN(dev_priv) >= 11) {
3328 		max_dc = 2;
3329 		/*
3330 		 * DC9 has a separate HW flow from the rest of the DC states,
3331 		 * not depending on the DMC firmware. It's needed by system
3332 		 * suspend/resume, so allow it unconditionally.
3333 		 */
3334 		mask = DC_STATE_EN_DC9;
3335 	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3336 		max_dc = 2;
3337 		mask = 0;
3338 	} else if (IS_GEN9_LP(dev_priv)) {
3339 		max_dc = 1;
3340 		mask = DC_STATE_EN_DC9;
3341 	} else {
3342 		max_dc = 0;
3343 		mask = 0;
3344 	}
3345 
3346 	if (!i915_modparams.disable_power_well)
3347 		max_dc = 0;
3348 
3349 	if (enable_dc >= 0 && enable_dc <= max_dc) {
3350 		requested_dc = enable_dc;
3351 	} else if (enable_dc == -1) {
3352 		requested_dc = max_dc;
3353 	} else if (enable_dc > max_dc && enable_dc <= 2) {
3354 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3355 			      enable_dc, max_dc);
3356 		requested_dc = max_dc;
3357 	} else {
3358 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3359 		requested_dc = max_dc;
3360 	}
3361 
3362 	if (requested_dc > 1)
3363 		mask |= DC_STATE_EN_UPTO_DC6;
3364 	if (requested_dc > 0)
3365 		mask |= DC_STATE_EN_UPTO_DC5;
3366 
3367 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3368 
3369 	return mask;
3370 }
3371 
3372 static int
3373 __set_power_wells(struct i915_power_domains *power_domains,
3374 		  const struct i915_power_well_desc *power_well_descs,
3375 		  int power_well_count)
3376 {
3377 	u64 power_well_ids = 0;
3378 	int i;
3379 
3380 	power_domains->power_well_count = power_well_count;
3381 	power_domains->power_wells =
3382 				kcalloc(power_well_count,
3383 					sizeof(*power_domains->power_wells),
3384 					GFP_KERNEL);
3385 	if (!power_domains->power_wells)
3386 		return -ENOMEM;
3387 
3388 	for (i = 0; i < power_well_count; i++) {
3389 		enum i915_power_well_id id = power_well_descs[i].id;
3390 
3391 		power_domains->power_wells[i].desc = &power_well_descs[i];
3392 
3393 		if (id == DISP_PW_ID_NONE)
3394 			continue;
3395 
3396 		WARN_ON(id >= sizeof(power_well_ids) * 8);
3397 		WARN_ON(power_well_ids & BIT_ULL(id));
3398 		power_well_ids |= BIT_ULL(id);
3399 	}
3400 
3401 	return 0;
3402 }
3403 
3404 #define set_power_wells(power_domains, __power_well_descs) \
3405 	__set_power_wells(power_domains, __power_well_descs, \
3406 			  ARRAY_SIZE(__power_well_descs))
3407 
3408 /**
3409  * intel_power_domains_init - initializes the power domain structures
3410  * @dev_priv: i915 device instance
3411  *
3412  * Initializes the power domain structures for @dev_priv depending upon the
3413  * supported platform.
3414  */
3415 int intel_power_domains_init(struct drm_i915_private *dev_priv)
3416 {
3417 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3418 	int err;
3419 
3420 	i915_modparams.disable_power_well =
3421 		sanitize_disable_power_well_option(dev_priv,
3422 						   i915_modparams.disable_power_well);
3423 	dev_priv->csr.allowed_dc_mask =
3424 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
3425 
3426 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
3427 
3428 	mutex_init(&power_domains->lock);
3429 
3430 	/*
3431 	 * The enabling order will be from lower to higher indexed wells,
3432 	 * the disabling order is reversed.
3433 	 */
3434 	if (IS_ICELAKE(dev_priv)) {
3435 		err = set_power_wells(power_domains, icl_power_wells);
3436 	} else if (IS_CANNONLAKE(dev_priv)) {
3437 		err = set_power_wells(power_domains, cnl_power_wells);
3438 
3439 		/*
3440 		 * DDI and Aux IO are getting enabled for all ports
3441 		 * regardless the presence or use. So, in order to avoid
3442 		 * timeouts, lets remove them from the list
3443 		 * for the SKUs without port F.
3444 		 */
3445 		if (!IS_CNL_WITH_PORT_F(dev_priv))
3446 			power_domains->power_well_count -= 2;
3447 	} else if (IS_GEMINILAKE(dev_priv)) {
3448 		err = set_power_wells(power_domains, glk_power_wells);
3449 	} else if (IS_BROXTON(dev_priv)) {
3450 		err = set_power_wells(power_domains, bxt_power_wells);
3451 	} else if (IS_GEN9_BC(dev_priv)) {
3452 		err = set_power_wells(power_domains, skl_power_wells);
3453 	} else if (IS_CHERRYVIEW(dev_priv)) {
3454 		err = set_power_wells(power_domains, chv_power_wells);
3455 	} else if (IS_BROADWELL(dev_priv)) {
3456 		err = set_power_wells(power_domains, bdw_power_wells);
3457 	} else if (IS_HASWELL(dev_priv)) {
3458 		err = set_power_wells(power_domains, hsw_power_wells);
3459 	} else if (IS_VALLEYVIEW(dev_priv)) {
3460 		err = set_power_wells(power_domains, vlv_power_wells);
3461 	} else if (IS_I830(dev_priv)) {
3462 		err = set_power_wells(power_domains, i830_power_wells);
3463 	} else {
3464 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
3465 	}
3466 
3467 	return err;
3468 }
3469 
3470 /**
3471  * intel_power_domains_cleanup - clean up power domains resources
3472  * @dev_priv: i915 device instance
3473  *
3474  * Release any resources acquired by intel_power_domains_init()
3475  */
3476 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
3477 {
3478 	kfree(dev_priv->power_domains.power_wells);
3479 }
3480 
3481 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
3482 {
3483 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3484 	struct i915_power_well *power_well;
3485 
3486 	mutex_lock(&power_domains->lock);
3487 	for_each_power_well(dev_priv, power_well) {
3488 		power_well->desc->ops->sync_hw(dev_priv, power_well);
3489 		power_well->hw_enabled =
3490 			power_well->desc->ops->is_enabled(dev_priv, power_well);
3491 	}
3492 	mutex_unlock(&power_domains->lock);
3493 }
3494 
3495 static inline
3496 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
3497 			  i915_reg_t reg, bool enable)
3498 {
3499 	u32 val, status;
3500 
3501 	val = I915_READ(reg);
3502 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
3503 	I915_WRITE(reg, val);
3504 	POSTING_READ(reg);
3505 	udelay(10);
3506 
3507 	status = I915_READ(reg) & DBUF_POWER_STATE;
3508 	if ((enable && !status) || (!enable && status)) {
3509 		DRM_ERROR("DBus power %s timeout!\n",
3510 			  enable ? "enable" : "disable");
3511 		return false;
3512 	}
3513 	return true;
3514 }
3515 
3516 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
3517 {
3518 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
3519 }
3520 
3521 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
3522 {
3523 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
3524 }
3525 
3526 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
3527 {
3528 	if (INTEL_GEN(dev_priv) < 11)
3529 		return 1;
3530 	return 2;
3531 }
3532 
3533 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
3534 			    u8 req_slices)
3535 {
3536 	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
3537 	bool ret;
3538 
3539 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3540 		DRM_ERROR("Invalid number of dbuf slices requested\n");
3541 		return;
3542 	}
3543 
3544 	if (req_slices == hw_enabled_slices || req_slices == 0)
3545 		return;
3546 
3547 	if (req_slices > hw_enabled_slices)
3548 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3549 	else
3550 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3551 
3552 	if (ret)
3553 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3554 }
3555 
3556 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3557 {
3558 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3559 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3560 	POSTING_READ(DBUF_CTL_S2);
3561 
3562 	udelay(10);
3563 
3564 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3565 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3566 		DRM_ERROR("DBuf power enable timeout\n");
3567 	else
3568 		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
3569 }
3570 
3571 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3572 {
3573 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3574 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3575 	POSTING_READ(DBUF_CTL_S2);
3576 
3577 	udelay(10);
3578 
3579 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3580 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3581 		DRM_ERROR("DBuf power disable timeout!\n");
3582 	else
3583 		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
3584 }
3585 
3586 static void icl_mbus_init(struct drm_i915_private *dev_priv)
3587 {
3588 	u32 val;
3589 
3590 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3591 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
3592 	      MBUS_ABOX_B_CREDIT(1) |
3593 	      MBUS_ABOX_BW_CREDIT(1);
3594 
3595 	I915_WRITE(MBUS_ABOX_CTL, val);
3596 }
3597 
3598 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
3599 				      bool enable)
3600 {
3601 	i915_reg_t reg;
3602 	u32 reset_bits, val;
3603 
3604 	if (IS_IVYBRIDGE(dev_priv)) {
3605 		reg = GEN7_MSG_CTL;
3606 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
3607 	} else {
3608 		reg = HSW_NDE_RSTWRN_OPT;
3609 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
3610 	}
3611 
3612 	val = I915_READ(reg);
3613 
3614 	if (enable)
3615 		val |= reset_bits;
3616 	else
3617 		val &= ~reset_bits;
3618 
3619 	I915_WRITE(reg, val);
3620 }
3621 
3622 static void skl_display_core_init(struct drm_i915_private *dev_priv,
3623 				   bool resume)
3624 {
3625 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3626 	struct i915_power_well *well;
3627 
3628 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3629 
3630 	/* enable PCH reset handshake */
3631 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3632 
3633 	/* enable PG1 and Misc I/O */
3634 	mutex_lock(&power_domains->lock);
3635 
3636 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3637 	intel_power_well_enable(dev_priv, well);
3638 
3639 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3640 	intel_power_well_enable(dev_priv, well);
3641 
3642 	mutex_unlock(&power_domains->lock);
3643 
3644 	skl_init_cdclk(dev_priv);
3645 
3646 	gen9_dbuf_enable(dev_priv);
3647 
3648 	if (resume && dev_priv->csr.dmc_payload)
3649 		intel_csr_load_program(dev_priv);
3650 }
3651 
3652 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3653 {
3654 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3655 	struct i915_power_well *well;
3656 
3657 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3658 
3659 	gen9_dbuf_disable(dev_priv);
3660 
3661 	skl_uninit_cdclk(dev_priv);
3662 
3663 	/* The spec doesn't call for removing the reset handshake flag */
3664 	/* disable PG1 and Misc I/O */
3665 
3666 	mutex_lock(&power_domains->lock);
3667 
3668 	/*
3669 	 * BSpec says to keep the MISC IO power well enabled here, only
3670 	 * remove our request for power well 1.
3671 	 * Note that even though the driver's request is removed power well 1
3672 	 * may stay enabled after this due to DMC's own request on it.
3673 	 */
3674 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3675 	intel_power_well_disable(dev_priv, well);
3676 
3677 	mutex_unlock(&power_domains->lock);
3678 
3679 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3680 }
3681 
3682 void bxt_display_core_init(struct drm_i915_private *dev_priv,
3683 			   bool resume)
3684 {
3685 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3686 	struct i915_power_well *well;
3687 
3688 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3689 
3690 	/*
3691 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3692 	 * or else the reset will hang because there is no PCH to respond.
3693 	 * Move the handshake programming to initialization sequence.
3694 	 * Previously was left up to BIOS.
3695 	 */
3696 	intel_pch_reset_handshake(dev_priv, false);
3697 
3698 	/* Enable PG1 */
3699 	mutex_lock(&power_domains->lock);
3700 
3701 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3702 	intel_power_well_enable(dev_priv, well);
3703 
3704 	mutex_unlock(&power_domains->lock);
3705 
3706 	bxt_init_cdclk(dev_priv);
3707 
3708 	gen9_dbuf_enable(dev_priv);
3709 
3710 	if (resume && dev_priv->csr.dmc_payload)
3711 		intel_csr_load_program(dev_priv);
3712 }
3713 
3714 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3715 {
3716 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3717 	struct i915_power_well *well;
3718 
3719 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3720 
3721 	gen9_dbuf_disable(dev_priv);
3722 
3723 	bxt_uninit_cdclk(dev_priv);
3724 
3725 	/* The spec doesn't call for removing the reset handshake flag */
3726 
3727 	/*
3728 	 * Disable PW1 (PG1).
3729 	 * Note that even though the driver's request is removed power well 1
3730 	 * may stay enabled after this due to DMC's own request on it.
3731 	 */
3732 	mutex_lock(&power_domains->lock);
3733 
3734 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3735 	intel_power_well_disable(dev_priv, well);
3736 
3737 	mutex_unlock(&power_domains->lock);
3738 
3739 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3740 }
3741 
3742 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3743 {
3744 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3745 	struct i915_power_well *well;
3746 
3747 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3748 
3749 	/* 1. Enable PCH Reset Handshake */
3750 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3751 
3752 	/* 2-3. */
3753 	cnl_combo_phys_init(dev_priv);
3754 
3755 	/*
3756 	 * 4. Enable Power Well 1 (PG1).
3757 	 *    The AUX IO power wells will be enabled on demand.
3758 	 */
3759 	mutex_lock(&power_domains->lock);
3760 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3761 	intel_power_well_enable(dev_priv, well);
3762 	mutex_unlock(&power_domains->lock);
3763 
3764 	/* 5. Enable CD clock */
3765 	cnl_init_cdclk(dev_priv);
3766 
3767 	/* 6. Enable DBUF */
3768 	gen9_dbuf_enable(dev_priv);
3769 
3770 	if (resume && dev_priv->csr.dmc_payload)
3771 		intel_csr_load_program(dev_priv);
3772 }
3773 
3774 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3775 {
3776 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3777 	struct i915_power_well *well;
3778 
3779 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3780 
3781 	/* 1. Disable all display engine functions -> aready done */
3782 
3783 	/* 2. Disable DBUF */
3784 	gen9_dbuf_disable(dev_priv);
3785 
3786 	/* 3. Disable CD clock */
3787 	cnl_uninit_cdclk(dev_priv);
3788 
3789 	/*
3790 	 * 4. Disable Power Well 1 (PG1).
3791 	 *    The AUX IO power wells are toggled on demand, so they are already
3792 	 *    disabled at this point.
3793 	 */
3794 	mutex_lock(&power_domains->lock);
3795 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3796 	intel_power_well_disable(dev_priv, well);
3797 	mutex_unlock(&power_domains->lock);
3798 
3799 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3800 
3801 	/* 5. */
3802 	cnl_combo_phys_uninit(dev_priv);
3803 }
3804 
3805 void icl_display_core_init(struct drm_i915_private *dev_priv,
3806 			   bool resume)
3807 {
3808 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3809 	struct i915_power_well *well;
3810 
3811 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3812 
3813 	/* 1. Enable PCH reset handshake. */
3814 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
3815 
3816 	/* 2-3. */
3817 	icl_combo_phys_init(dev_priv);
3818 
3819 	/*
3820 	 * 4. Enable Power Well 1 (PG1).
3821 	 *    The AUX IO power wells will be enabled on demand.
3822 	 */
3823 	mutex_lock(&power_domains->lock);
3824 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3825 	intel_power_well_enable(dev_priv, well);
3826 	mutex_unlock(&power_domains->lock);
3827 
3828 	/* 5. Enable CDCLK. */
3829 	icl_init_cdclk(dev_priv);
3830 
3831 	/* 6. Enable DBUF. */
3832 	icl_dbuf_enable(dev_priv);
3833 
3834 	/* 7. Setup MBUS. */
3835 	icl_mbus_init(dev_priv);
3836 
3837 	if (resume && dev_priv->csr.dmc_payload)
3838 		intel_csr_load_program(dev_priv);
3839 }
3840 
3841 void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3842 {
3843 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3844 	struct i915_power_well *well;
3845 
3846 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3847 
3848 	/* 1. Disable all display engine functions -> aready done */
3849 
3850 	/* 2. Disable DBUF */
3851 	icl_dbuf_disable(dev_priv);
3852 
3853 	/* 3. Disable CD clock */
3854 	icl_uninit_cdclk(dev_priv);
3855 
3856 	/*
3857 	 * 4. Disable Power Well 1 (PG1).
3858 	 *    The AUX IO power wells are toggled on demand, so they are already
3859 	 *    disabled at this point.
3860 	 */
3861 	mutex_lock(&power_domains->lock);
3862 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3863 	intel_power_well_disable(dev_priv, well);
3864 	mutex_unlock(&power_domains->lock);
3865 
3866 	/* 5. */
3867 	icl_combo_phys_uninit(dev_priv);
3868 }
3869 
3870 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3871 {
3872 	struct i915_power_well *cmn_bc =
3873 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3874 	struct i915_power_well *cmn_d =
3875 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
3876 
3877 	/*
3878 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3879 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
3880 	 * instead maintain a shadow copy ourselves. Use the actual
3881 	 * power well state and lane status to reconstruct the
3882 	 * expected initial value.
3883 	 */
3884 	dev_priv->chv_phy_control =
3885 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3886 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3887 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3888 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3889 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3890 
3891 	/*
3892 	 * If all lanes are disabled we leave the override disabled
3893 	 * with all power down bits cleared to match the state we
3894 	 * would use after disabling the port. Otherwise enable the
3895 	 * override and set the lane powerdown bits accding to the
3896 	 * current lane status.
3897 	 */
3898 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
3899 		u32 status = I915_READ(DPLL(PIPE_A));
3900 		unsigned int mask;
3901 
3902 		mask = status & DPLL_PORTB_READY_MASK;
3903 		if (mask == 0xf)
3904 			mask = 0x0;
3905 		else
3906 			dev_priv->chv_phy_control |=
3907 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3908 
3909 		dev_priv->chv_phy_control |=
3910 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3911 
3912 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3913 		if (mask == 0xf)
3914 			mask = 0x0;
3915 		else
3916 			dev_priv->chv_phy_control |=
3917 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3918 
3919 		dev_priv->chv_phy_control |=
3920 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3921 
3922 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3923 
3924 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3925 	} else {
3926 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3927 	}
3928 
3929 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
3930 		u32 status = I915_READ(DPIO_PHY_STATUS);
3931 		unsigned int mask;
3932 
3933 		mask = status & DPLL_PORTD_READY_MASK;
3934 
3935 		if (mask == 0xf)
3936 			mask = 0x0;
3937 		else
3938 			dev_priv->chv_phy_control |=
3939 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3940 
3941 		dev_priv->chv_phy_control |=
3942 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3943 
3944 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3945 
3946 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3947 	} else {
3948 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3949 	}
3950 
3951 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3952 
3953 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3954 		      dev_priv->chv_phy_control);
3955 }
3956 
3957 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3958 {
3959 	struct i915_power_well *cmn =
3960 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
3961 	struct i915_power_well *disp2d =
3962 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
3963 
3964 	/* If the display might be already active skip this */
3965 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
3966 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
3967 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
3968 		return;
3969 
3970 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
3971 
3972 	/* cmnlane needs DPLL registers */
3973 	disp2d->desc->ops->enable(dev_priv, disp2d);
3974 
3975 	/*
3976 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3977 	 * Need to assert and de-assert PHY SB reset by gating the
3978 	 * common lane power, then un-gating it.
3979 	 * Simply ungating isn't enough to reset the PHY enough to get
3980 	 * ports and lanes running.
3981 	 */
3982 	cmn->desc->ops->disable(dev_priv, cmn);
3983 }
3984 
3985 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
3986 
3987 /**
3988  * intel_power_domains_init_hw - initialize hardware power domain state
3989  * @i915: i915 device instance
3990  * @resume: Called from resume code paths or not
3991  *
3992  * This function initializes the hardware power domain state and enables all
3993  * power wells belonging to the INIT power domain. Power wells in other
3994  * domains (and not in the INIT domain) are referenced or disabled by
3995  * intel_modeset_readout_hw_state(). After that the reference count of each
3996  * power well must match its HW enabled state, see
3997  * intel_power_domains_verify_state().
3998  *
3999  * It will return with power domains disabled (to be enabled later by
4000  * intel_power_domains_enable()) and must be paired with
4001  * intel_power_domains_fini_hw().
4002  */
4003 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4004 {
4005 	struct i915_power_domains *power_domains = &i915->power_domains;
4006 
4007 	power_domains->initializing = true;
4008 
4009 	if (IS_ICELAKE(i915)) {
4010 		icl_display_core_init(i915, resume);
4011 	} else if (IS_CANNONLAKE(i915)) {
4012 		cnl_display_core_init(i915, resume);
4013 	} else if (IS_GEN9_BC(i915)) {
4014 		skl_display_core_init(i915, resume);
4015 	} else if (IS_GEN9_LP(i915)) {
4016 		bxt_display_core_init(i915, resume);
4017 	} else if (IS_CHERRYVIEW(i915)) {
4018 		mutex_lock(&power_domains->lock);
4019 		chv_phy_control_init(i915);
4020 		mutex_unlock(&power_domains->lock);
4021 	} else if (IS_VALLEYVIEW(i915)) {
4022 		mutex_lock(&power_domains->lock);
4023 		vlv_cmnlane_wa(i915);
4024 		mutex_unlock(&power_domains->lock);
4025 	} else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
4026 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4027 	}
4028 
4029 	/*
4030 	 * Keep all power wells enabled for any dependent HW access during
4031 	 * initialization and to make sure we keep BIOS enabled display HW
4032 	 * resources powered until display HW readout is complete. We drop
4033 	 * this reference in intel_power_domains_enable().
4034 	 */
4035 	power_domains->wakeref =
4036 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4037 
4038 	/* Disable power support if the user asked so. */
4039 	if (!i915_modparams.disable_power_well)
4040 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4041 	intel_power_domains_sync_hw(i915);
4042 
4043 	power_domains->initializing = false;
4044 }
4045 
4046 /**
4047  * intel_power_domains_fini_hw - deinitialize hw power domain state
4048  * @i915: i915 device instance
4049  *
4050  * De-initializes the display power domain HW state. It also ensures that the
4051  * device stays powered up so that the driver can be reloaded.
4052  *
4053  * It must be called with power domains already disabled (after a call to
4054  * intel_power_domains_disable()) and must be paired with
4055  * intel_power_domains_init_hw().
4056  */
4057 void intel_power_domains_fini_hw(struct drm_i915_private *i915)
4058 {
4059 	intel_wakeref_t wakeref __maybe_unused =
4060 		fetch_and_zero(&i915->power_domains.wakeref);
4061 
4062 	/* Remove the refcount we took to keep power well support disabled. */
4063 	if (!i915_modparams.disable_power_well)
4064 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4065 
4066 	intel_power_domains_verify_state(i915);
4067 
4068 	/* Keep the power well enabled, but cancel its rpm wakeref. */
4069 	intel_runtime_pm_put(i915, wakeref);
4070 }
4071 
4072 /**
4073  * intel_power_domains_enable - enable toggling of display power wells
4074  * @i915: i915 device instance
4075  *
4076  * Enable the ondemand enabling/disabling of the display power wells. Note that
4077  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4078  * only at specific points of the display modeset sequence, thus they are not
4079  * affected by the intel_power_domains_enable()/disable() calls. The purpose
4080  * of these function is to keep the rest of power wells enabled until the end
4081  * of display HW readout (which will acquire the power references reflecting
4082  * the current HW state).
4083  */
4084 void intel_power_domains_enable(struct drm_i915_private *i915)
4085 {
4086 	intel_wakeref_t wakeref __maybe_unused =
4087 		fetch_and_zero(&i915->power_domains.wakeref);
4088 
4089 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4090 	intel_power_domains_verify_state(i915);
4091 }
4092 
4093 /**
4094  * intel_power_domains_disable - disable toggling of display power wells
4095  * @i915: i915 device instance
4096  *
4097  * Disable the ondemand enabling/disabling of the display power wells. See
4098  * intel_power_domains_enable() for which power wells this call controls.
4099  */
4100 void intel_power_domains_disable(struct drm_i915_private *i915)
4101 {
4102 	struct i915_power_domains *power_domains = &i915->power_domains;
4103 
4104 	WARN_ON(power_domains->wakeref);
4105 	power_domains->wakeref =
4106 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
4107 
4108 	intel_power_domains_verify_state(i915);
4109 }
4110 
4111 /**
4112  * intel_power_domains_suspend - suspend power domain state
4113  * @i915: i915 device instance
4114  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
4115  *
4116  * This function prepares the hardware power domain state before entering
4117  * system suspend.
4118  *
4119  * It must be called with power domains already disabled (after a call to
4120  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
4121  */
4122 void intel_power_domains_suspend(struct drm_i915_private *i915,
4123 				 enum i915_drm_suspend_mode suspend_mode)
4124 {
4125 	struct i915_power_domains *power_domains = &i915->power_domains;
4126 	intel_wakeref_t wakeref __maybe_unused =
4127 		fetch_and_zero(&power_domains->wakeref);
4128 
4129 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4130 
4131 	/*
4132 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
4133 	 * support don't manually deinit the power domains. This also means the
4134 	 * CSR/DMC firmware will stay active, it will power down any HW
4135 	 * resources as required and also enable deeper system power states
4136 	 * that would be blocked if the firmware was inactive.
4137 	 */
4138 	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
4139 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
4140 	    i915->csr.dmc_payload) {
4141 		intel_power_domains_verify_state(i915);
4142 		return;
4143 	}
4144 
4145 	/*
4146 	 * Even if power well support was disabled we still want to disable
4147 	 * power wells if power domains must be deinitialized for suspend.
4148 	 */
4149 	if (!i915_modparams.disable_power_well) {
4150 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4151 		intel_power_domains_verify_state(i915);
4152 	}
4153 
4154 	if (IS_ICELAKE(i915))
4155 		icl_display_core_uninit(i915);
4156 	else if (IS_CANNONLAKE(i915))
4157 		cnl_display_core_uninit(i915);
4158 	else if (IS_GEN9_BC(i915))
4159 		skl_display_core_uninit(i915);
4160 	else if (IS_GEN9_LP(i915))
4161 		bxt_display_core_uninit(i915);
4162 
4163 	power_domains->display_core_suspended = true;
4164 }
4165 
4166 /**
4167  * intel_power_domains_resume - resume power domain state
4168  * @i915: i915 device instance
4169  *
4170  * This function resume the hardware power domain state during system resume.
4171  *
4172  * It will return with power domain support disabled (to be enabled later by
4173  * intel_power_domains_enable()) and must be paired with
4174  * intel_power_domains_suspend().
4175  */
4176 void intel_power_domains_resume(struct drm_i915_private *i915)
4177 {
4178 	struct i915_power_domains *power_domains = &i915->power_domains;
4179 
4180 	if (power_domains->display_core_suspended) {
4181 		intel_power_domains_init_hw(i915, true);
4182 		power_domains->display_core_suspended = false;
4183 	} else {
4184 		WARN_ON(power_domains->wakeref);
4185 		power_domains->wakeref =
4186 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
4187 	}
4188 
4189 	intel_power_domains_verify_state(i915);
4190 }
4191 
4192 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4193 
4194 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
4195 {
4196 	struct i915_power_domains *power_domains = &i915->power_domains;
4197 	struct i915_power_well *power_well;
4198 
4199 	for_each_power_well(i915, power_well) {
4200 		enum intel_display_power_domain domain;
4201 
4202 		DRM_DEBUG_DRIVER("%-25s %d\n",
4203 				 power_well->desc->name, power_well->count);
4204 
4205 		for_each_power_domain(domain, power_well->desc->domains)
4206 			DRM_DEBUG_DRIVER("  %-23s %d\n",
4207 					 intel_display_power_domain_str(domain),
4208 					 power_domains->domain_use_count[domain]);
4209 	}
4210 }
4211 
4212 /**
4213  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
4214  * @i915: i915 device instance
4215  *
4216  * Verify if the reference count of each power well matches its HW enabled
4217  * state and the total refcount of the domains it belongs to. This must be
4218  * called after modeset HW state sanitization, which is responsible for
4219  * acquiring reference counts for any power wells in use and disabling the
4220  * ones left on by BIOS but not required by any active output.
4221  */
4222 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4223 {
4224 	struct i915_power_domains *power_domains = &i915->power_domains;
4225 	struct i915_power_well *power_well;
4226 	bool dump_domain_info;
4227 
4228 	mutex_lock(&power_domains->lock);
4229 
4230 	dump_domain_info = false;
4231 	for_each_power_well(i915, power_well) {
4232 		enum intel_display_power_domain domain;
4233 		int domains_count;
4234 		bool enabled;
4235 
4236 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
4237 		if ((power_well->count || power_well->desc->always_on) !=
4238 		    enabled)
4239 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
4240 				  power_well->desc->name,
4241 				  power_well->count, enabled);
4242 
4243 		domains_count = 0;
4244 		for_each_power_domain(domain, power_well->desc->domains)
4245 			domains_count += power_domains->domain_use_count[domain];
4246 
4247 		if (power_well->count != domains_count) {
4248 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
4249 				  "(refcount %d/domains refcount %d)\n",
4250 				  power_well->desc->name, power_well->count,
4251 				  domains_count);
4252 			dump_domain_info = true;
4253 		}
4254 	}
4255 
4256 	if (dump_domain_info) {
4257 		static bool dumped;
4258 
4259 		if (!dumped) {
4260 			intel_power_domains_dump_info(i915);
4261 			dumped = true;
4262 		}
4263 	}
4264 
4265 	mutex_unlock(&power_domains->lock);
4266 }
4267 
4268 #else
4269 
4270 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
4271 {
4272 }
4273 
4274 #endif
4275 
4276 /**
4277  * intel_runtime_pm_get - grab a runtime pm reference
4278  * @i915: i915 device instance
4279  *
4280  * This function grabs a device-level runtime pm reference (mostly used for GEM
4281  * code to ensure the GTT or GT is on) and ensures that it is powered up.
4282  *
4283  * Any runtime pm reference obtained by this function must have a symmetric
4284  * call to intel_runtime_pm_put() to release the reference again.
4285  *
4286  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4287  */
4288 intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
4289 {
4290 	struct pci_dev *pdev = i915->drm.pdev;
4291 	struct device *kdev = &pdev->dev;
4292 	int ret;
4293 
4294 	ret = pm_runtime_get_sync(kdev);
4295 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4296 
4297 	return track_intel_runtime_pm_wakeref(i915);
4298 }
4299 
4300 /**
4301  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
4302  * @i915: i915 device instance
4303  *
4304  * This function grabs a device-level runtime pm reference if the device is
4305  * already in use and ensures that it is powered up. It is illegal to try
4306  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
4307  *
4308  * Any runtime pm reference obtained by this function must have a symmetric
4309  * call to intel_runtime_pm_put() to release the reference again.
4310  *
4311  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
4312  * as True if the wakeref was acquired, or False otherwise.
4313  */
4314 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
4315 {
4316 	if (IS_ENABLED(CONFIG_PM)) {
4317 		struct pci_dev *pdev = i915->drm.pdev;
4318 		struct device *kdev = &pdev->dev;
4319 
4320 		/*
4321 		 * In cases runtime PM is disabled by the RPM core and we get
4322 		 * an -EINVAL return value we are not supposed to call this
4323 		 * function, since the power state is undefined. This applies
4324 		 * atm to the late/early system suspend/resume handlers.
4325 		 */
4326 		if (pm_runtime_get_if_in_use(kdev) <= 0)
4327 			return 0;
4328 	}
4329 
4330 	return track_intel_runtime_pm_wakeref(i915);
4331 }
4332 
4333 /**
4334  * intel_runtime_pm_get_noresume - grab a runtime pm reference
4335  * @i915: i915 device instance
4336  *
4337  * This function grabs a device-level runtime pm reference (mostly used for GEM
4338  * code to ensure the GTT or GT is on).
4339  *
4340  * It will _not_ power up the device but instead only check that it's powered
4341  * on.  Therefore it is only valid to call this functions from contexts where
4342  * the device is known to be powered up and where trying to power it up would
4343  * result in hilarity and deadlocks. That pretty much means only the system
4344  * suspend/resume code where this is used to grab runtime pm references for
4345  * delayed setup down in work items.
4346  *
4347  * Any runtime pm reference obtained by this function must have a symmetric
4348  * call to intel_runtime_pm_put() to release the reference again.
4349  *
4350  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
4351  */
4352 intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
4353 {
4354 	struct pci_dev *pdev = i915->drm.pdev;
4355 	struct device *kdev = &pdev->dev;
4356 
4357 	assert_rpm_wakelock_held(i915);
4358 	pm_runtime_get_noresume(kdev);
4359 
4360 	return track_intel_runtime_pm_wakeref(i915);
4361 }
4362 
4363 /**
4364  * intel_runtime_pm_put - release a runtime pm reference
4365  * @i915: i915 device instance
4366  *
4367  * This function drops the device-level runtime pm reference obtained by
4368  * intel_runtime_pm_get() and might power down the corresponding
4369  * hardware block right away if this is the last reference.
4370  */
4371 void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
4372 {
4373 	struct pci_dev *pdev = i915->drm.pdev;
4374 	struct device *kdev = &pdev->dev;
4375 
4376 	untrack_intel_runtime_pm_wakeref(i915);
4377 
4378 	pm_runtime_mark_last_busy(kdev);
4379 	pm_runtime_put_autosuspend(kdev);
4380 }
4381 
4382 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4383 void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
4384 {
4385 	cancel_intel_runtime_pm_wakeref(i915, wref);
4386 	intel_runtime_pm_put_unchecked(i915);
4387 }
4388 #endif
4389 
4390 /**
4391  * intel_runtime_pm_enable - enable runtime pm
4392  * @i915: i915 device instance
4393  *
4394  * This function enables runtime pm at the end of the driver load sequence.
4395  *
4396  * Note that this function does currently not enable runtime pm for the
4397  * subordinate display power domains. That is done by
4398  * intel_power_domains_enable().
4399  */
4400 void intel_runtime_pm_enable(struct drm_i915_private *i915)
4401 {
4402 	struct pci_dev *pdev = i915->drm.pdev;
4403 	struct device *kdev = &pdev->dev;
4404 
4405 	/*
4406 	 * Disable the system suspend direct complete optimization, which can
4407 	 * leave the device suspended skipping the driver's suspend handlers
4408 	 * if the device was already runtime suspended. This is needed due to
4409 	 * the difference in our runtime and system suspend sequence and
4410 	 * becaue the HDA driver may require us to enable the audio power
4411 	 * domain during system suspend.
4412 	 */
4413 	dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
4414 
4415 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
4416 	pm_runtime_mark_last_busy(kdev);
4417 
4418 	/*
4419 	 * Take a permanent reference to disable the RPM functionality and drop
4420 	 * it only when unloading the driver. Use the low level get/put helpers,
4421 	 * so the driver's own RPM reference tracking asserts also work on
4422 	 * platforms without RPM support.
4423 	 */
4424 	if (!HAS_RUNTIME_PM(i915)) {
4425 		int ret;
4426 
4427 		pm_runtime_dont_use_autosuspend(kdev);
4428 		ret = pm_runtime_get_sync(kdev);
4429 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
4430 	} else {
4431 		pm_runtime_use_autosuspend(kdev);
4432 	}
4433 
4434 	/*
4435 	 * The core calls the driver load handler with an RPM reference held.
4436 	 * We drop that here and will reacquire it during unloading in
4437 	 * intel_power_domains_fini().
4438 	 */
4439 	pm_runtime_put_autosuspend(kdev);
4440 }
4441 
4442 void intel_runtime_pm_disable(struct drm_i915_private *i915)
4443 {
4444 	struct pci_dev *pdev = i915->drm.pdev;
4445 	struct device *kdev = &pdev->dev;
4446 
4447 	/* Transfer rpm ownership back to core */
4448 	WARN(pm_runtime_get_sync(kdev) < 0,
4449 	     "Failed to pass rpm ownership back to core\n");
4450 
4451 	pm_runtime_dont_use_autosuspend(kdev);
4452 
4453 	if (!HAS_RUNTIME_PM(i915))
4454 		pm_runtime_put(kdev);
4455 }
4456 
4457 void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
4458 {
4459 	struct i915_runtime_pm *rpm = &i915->runtime_pm;
4460 	int count;
4461 
4462 	count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
4463 	WARN(count,
4464 	     "i915->runtime_pm.wakeref_count=%d on cleanup\n",
4465 	     count);
4466 
4467 	untrack_intel_runtime_pm_wakeref(i915);
4468 }
4469 
4470 void intel_runtime_pm_init_early(struct drm_i915_private *i915)
4471 {
4472 	init_intel_runtime_pm_wakeref(i915);
4473 }
4474