1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/string_helpers.h>
7
8 #include "i915_drv.h"
9 #include "i915_irq.h"
10 #include "i915_reg.h"
11 #include "intel_backlight_regs.h"
12 #include "intel_cdclk.h"
13 #include "intel_clock_gating.h"
14 #include "intel_combo_phy.h"
15 #include "intel_de.h"
16 #include "intel_display_power.h"
17 #include "intel_display_power_map.h"
18 #include "intel_display_power_well.h"
19 #include "intel_display_types.h"
20 #include "intel_dmc.h"
21 #include "intel_mchbar_regs.h"
22 #include "intel_pch_refclk.h"
23 #include "intel_pcode.h"
24 #include "intel_pmdemand.h"
25 #include "intel_pps_regs.h"
26 #include "intel_snps_phy.h"
27 #include "skl_watermark.h"
28 #include "skl_watermark_regs.h"
29 #include "vlv_sideband.h"
30
31 #define for_each_power_domain_well(__display, __power_well, __domain) \
32 for_each_power_well((__display), __power_well) \
33 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
34
35 #define for_each_power_domain_well_reverse(__display, __power_well, __domain) \
36 for_each_power_well_reverse((__display), __power_well) \
37 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
38
39 static const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)40 intel_display_power_domain_str(enum intel_display_power_domain domain)
41 {
42 switch (domain) {
43 case POWER_DOMAIN_DISPLAY_CORE:
44 return "DISPLAY_CORE";
45 case POWER_DOMAIN_PIPE_A:
46 return "PIPE_A";
47 case POWER_DOMAIN_PIPE_B:
48 return "PIPE_B";
49 case POWER_DOMAIN_PIPE_C:
50 return "PIPE_C";
51 case POWER_DOMAIN_PIPE_D:
52 return "PIPE_D";
53 case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
54 return "PIPE_PANEL_FITTER_A";
55 case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
56 return "PIPE_PANEL_FITTER_B";
57 case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
58 return "PIPE_PANEL_FITTER_C";
59 case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
60 return "PIPE_PANEL_FITTER_D";
61 case POWER_DOMAIN_TRANSCODER_A:
62 return "TRANSCODER_A";
63 case POWER_DOMAIN_TRANSCODER_B:
64 return "TRANSCODER_B";
65 case POWER_DOMAIN_TRANSCODER_C:
66 return "TRANSCODER_C";
67 case POWER_DOMAIN_TRANSCODER_D:
68 return "TRANSCODER_D";
69 case POWER_DOMAIN_TRANSCODER_EDP:
70 return "TRANSCODER_EDP";
71 case POWER_DOMAIN_TRANSCODER_DSI_A:
72 return "TRANSCODER_DSI_A";
73 case POWER_DOMAIN_TRANSCODER_DSI_C:
74 return "TRANSCODER_DSI_C";
75 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
76 return "TRANSCODER_VDSC_PW2";
77 case POWER_DOMAIN_PORT_DDI_LANES_A:
78 return "PORT_DDI_LANES_A";
79 case POWER_DOMAIN_PORT_DDI_LANES_B:
80 return "PORT_DDI_LANES_B";
81 case POWER_DOMAIN_PORT_DDI_LANES_C:
82 return "PORT_DDI_LANES_C";
83 case POWER_DOMAIN_PORT_DDI_LANES_D:
84 return "PORT_DDI_LANES_D";
85 case POWER_DOMAIN_PORT_DDI_LANES_E:
86 return "PORT_DDI_LANES_E";
87 case POWER_DOMAIN_PORT_DDI_LANES_F:
88 return "PORT_DDI_LANES_F";
89 case POWER_DOMAIN_PORT_DDI_LANES_TC1:
90 return "PORT_DDI_LANES_TC1";
91 case POWER_DOMAIN_PORT_DDI_LANES_TC2:
92 return "PORT_DDI_LANES_TC2";
93 case POWER_DOMAIN_PORT_DDI_LANES_TC3:
94 return "PORT_DDI_LANES_TC3";
95 case POWER_DOMAIN_PORT_DDI_LANES_TC4:
96 return "PORT_DDI_LANES_TC4";
97 case POWER_DOMAIN_PORT_DDI_LANES_TC5:
98 return "PORT_DDI_LANES_TC5";
99 case POWER_DOMAIN_PORT_DDI_LANES_TC6:
100 return "PORT_DDI_LANES_TC6";
101 case POWER_DOMAIN_PORT_DDI_IO_A:
102 return "PORT_DDI_IO_A";
103 case POWER_DOMAIN_PORT_DDI_IO_B:
104 return "PORT_DDI_IO_B";
105 case POWER_DOMAIN_PORT_DDI_IO_C:
106 return "PORT_DDI_IO_C";
107 case POWER_DOMAIN_PORT_DDI_IO_D:
108 return "PORT_DDI_IO_D";
109 case POWER_DOMAIN_PORT_DDI_IO_E:
110 return "PORT_DDI_IO_E";
111 case POWER_DOMAIN_PORT_DDI_IO_F:
112 return "PORT_DDI_IO_F";
113 case POWER_DOMAIN_PORT_DDI_IO_TC1:
114 return "PORT_DDI_IO_TC1";
115 case POWER_DOMAIN_PORT_DDI_IO_TC2:
116 return "PORT_DDI_IO_TC2";
117 case POWER_DOMAIN_PORT_DDI_IO_TC3:
118 return "PORT_DDI_IO_TC3";
119 case POWER_DOMAIN_PORT_DDI_IO_TC4:
120 return "PORT_DDI_IO_TC4";
121 case POWER_DOMAIN_PORT_DDI_IO_TC5:
122 return "PORT_DDI_IO_TC5";
123 case POWER_DOMAIN_PORT_DDI_IO_TC6:
124 return "PORT_DDI_IO_TC6";
125 case POWER_DOMAIN_PORT_DSI:
126 return "PORT_DSI";
127 case POWER_DOMAIN_PORT_CRT:
128 return "PORT_CRT";
129 case POWER_DOMAIN_PORT_OTHER:
130 return "PORT_OTHER";
131 case POWER_DOMAIN_VGA:
132 return "VGA";
133 case POWER_DOMAIN_AUDIO_MMIO:
134 return "AUDIO_MMIO";
135 case POWER_DOMAIN_AUDIO_PLAYBACK:
136 return "AUDIO_PLAYBACK";
137 case POWER_DOMAIN_AUX_IO_A:
138 return "AUX_IO_A";
139 case POWER_DOMAIN_AUX_IO_B:
140 return "AUX_IO_B";
141 case POWER_DOMAIN_AUX_IO_C:
142 return "AUX_IO_C";
143 case POWER_DOMAIN_AUX_IO_D:
144 return "AUX_IO_D";
145 case POWER_DOMAIN_AUX_IO_E:
146 return "AUX_IO_E";
147 case POWER_DOMAIN_AUX_IO_F:
148 return "AUX_IO_F";
149 case POWER_DOMAIN_AUX_A:
150 return "AUX_A";
151 case POWER_DOMAIN_AUX_B:
152 return "AUX_B";
153 case POWER_DOMAIN_AUX_C:
154 return "AUX_C";
155 case POWER_DOMAIN_AUX_D:
156 return "AUX_D";
157 case POWER_DOMAIN_AUX_E:
158 return "AUX_E";
159 case POWER_DOMAIN_AUX_F:
160 return "AUX_F";
161 case POWER_DOMAIN_AUX_USBC1:
162 return "AUX_USBC1";
163 case POWER_DOMAIN_AUX_USBC2:
164 return "AUX_USBC2";
165 case POWER_DOMAIN_AUX_USBC3:
166 return "AUX_USBC3";
167 case POWER_DOMAIN_AUX_USBC4:
168 return "AUX_USBC4";
169 case POWER_DOMAIN_AUX_USBC5:
170 return "AUX_USBC5";
171 case POWER_DOMAIN_AUX_USBC6:
172 return "AUX_USBC6";
173 case POWER_DOMAIN_AUX_TBT1:
174 return "AUX_TBT1";
175 case POWER_DOMAIN_AUX_TBT2:
176 return "AUX_TBT2";
177 case POWER_DOMAIN_AUX_TBT3:
178 return "AUX_TBT3";
179 case POWER_DOMAIN_AUX_TBT4:
180 return "AUX_TBT4";
181 case POWER_DOMAIN_AUX_TBT5:
182 return "AUX_TBT5";
183 case POWER_DOMAIN_AUX_TBT6:
184 return "AUX_TBT6";
185 case POWER_DOMAIN_GMBUS:
186 return "GMBUS";
187 case POWER_DOMAIN_INIT:
188 return "INIT";
189 case POWER_DOMAIN_GT_IRQ:
190 return "GT_IRQ";
191 case POWER_DOMAIN_DC_OFF:
192 return "DC_OFF";
193 case POWER_DOMAIN_TC_COLD_OFF:
194 return "TC_COLD_OFF";
195 default:
196 MISSING_CASE(domain);
197 return "?";
198 }
199 }
200
__intel_display_power_is_enabled(struct intel_display * display,enum intel_display_power_domain domain)201 static bool __intel_display_power_is_enabled(struct intel_display *display,
202 enum intel_display_power_domain domain)
203 {
204 struct i915_power_well *power_well;
205 bool is_enabled;
206
207 if (pm_runtime_suspended(display->drm->dev))
208 return false;
209
210 is_enabled = true;
211
212 for_each_power_domain_well_reverse(display, power_well, domain) {
213 if (intel_power_well_is_always_on(power_well))
214 continue;
215
216 if (!intel_power_well_is_enabled_cached(power_well)) {
217 is_enabled = false;
218 break;
219 }
220 }
221
222 return is_enabled;
223 }
224
225 /**
226 * intel_display_power_is_enabled - check for a power domain
227 * @dev_priv: i915 device instance
228 * @domain: power domain to check
229 *
230 * This function can be used to check the hw power domain state. It is mostly
231 * used in hardware state readout functions. Everywhere else code should rely
232 * upon explicit power domain reference counting to ensure that the hardware
233 * block is powered up before accessing it.
234 *
235 * Callers must hold the relevant modesetting locks to ensure that concurrent
236 * threads can't disable the power well while the caller tries to read a few
237 * registers.
238 *
239 * Returns:
240 * True when the power domain is enabled, false otherwise.
241 */
intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)242 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
243 enum intel_display_power_domain domain)
244 {
245 struct intel_display *display = &dev_priv->display;
246 struct i915_power_domains *power_domains = &display->power.domains;
247 bool ret;
248
249 mutex_lock(&power_domains->lock);
250 ret = __intel_display_power_is_enabled(display, domain);
251 mutex_unlock(&power_domains->lock);
252
253 return ret;
254 }
255
256 static u32
sanitize_target_dc_state(struct intel_display * display,u32 target_dc_state)257 sanitize_target_dc_state(struct intel_display *display,
258 u32 target_dc_state)
259 {
260 struct i915_power_domains *power_domains = &display->power.domains;
261 static const u32 states[] = {
262 DC_STATE_EN_UPTO_DC6,
263 DC_STATE_EN_UPTO_DC5,
264 DC_STATE_EN_DC3CO,
265 DC_STATE_DISABLE,
266 };
267 int i;
268
269 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
270 if (target_dc_state != states[i])
271 continue;
272
273 if (power_domains->allowed_dc_mask & target_dc_state)
274 break;
275
276 target_dc_state = states[i + 1];
277 }
278
279 return target_dc_state;
280 }
281
282 /**
283 * intel_display_power_set_target_dc_state - Set target dc state.
284 * @display: display device
285 * @state: state which needs to be set as target_dc_state.
286 *
287 * This function set the "DC off" power well target_dc_state,
288 * based upon this target_dc_stste, "DC off" power well will
289 * enable desired DC state.
290 */
intel_display_power_set_target_dc_state(struct intel_display * display,u32 state)291 void intel_display_power_set_target_dc_state(struct intel_display *display,
292 u32 state)
293 {
294 struct i915_power_well *power_well;
295 bool dc_off_enabled;
296 struct i915_power_domains *power_domains = &display->power.domains;
297
298 mutex_lock(&power_domains->lock);
299 power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
300
301 if (drm_WARN_ON(display->drm, !power_well))
302 goto unlock;
303
304 state = sanitize_target_dc_state(display, state);
305
306 if (state == power_domains->target_dc_state)
307 goto unlock;
308
309 dc_off_enabled = intel_power_well_is_enabled(display, power_well);
310 /*
311 * If DC off power well is disabled, need to enable and disable the
312 * DC off power well to effect target DC state.
313 */
314 if (!dc_off_enabled)
315 intel_power_well_enable(display, power_well);
316
317 power_domains->target_dc_state = state;
318
319 if (!dc_off_enabled)
320 intel_power_well_disable(display, power_well);
321
322 unlock:
323 mutex_unlock(&power_domains->lock);
324 }
325
__async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)326 static void __async_put_domains_mask(struct i915_power_domains *power_domains,
327 struct intel_power_domain_mask *mask)
328 {
329 bitmap_or(mask->bits,
330 power_domains->async_put_domains[0].bits,
331 power_domains->async_put_domains[1].bits,
332 POWER_DOMAIN_NUM);
333 }
334
335 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
336
337 static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)338 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
339 {
340 struct intel_display *display = container_of(power_domains,
341 struct intel_display,
342 power.domains);
343
344 return !drm_WARN_ON(display->drm,
345 bitmap_intersects(power_domains->async_put_domains[0].bits,
346 power_domains->async_put_domains[1].bits,
347 POWER_DOMAIN_NUM));
348 }
349
350 static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)351 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
352 {
353 struct intel_display *display = container_of(power_domains,
354 struct intel_display,
355 power.domains);
356 struct intel_power_domain_mask async_put_mask;
357 enum intel_display_power_domain domain;
358 bool err = false;
359
360 err |= !assert_async_put_domain_masks_disjoint(power_domains);
361 __async_put_domains_mask(power_domains, &async_put_mask);
362 err |= drm_WARN_ON(display->drm,
363 !!power_domains->async_put_wakeref !=
364 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
365
366 for_each_power_domain(domain, &async_put_mask)
367 err |= drm_WARN_ON(display->drm,
368 power_domains->domain_use_count[domain] != 1);
369
370 return !err;
371 }
372
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,struct intel_power_domain_mask * mask)373 static void print_power_domains(struct i915_power_domains *power_domains,
374 const char *prefix, struct intel_power_domain_mask *mask)
375 {
376 struct intel_display *display = container_of(power_domains,
377 struct intel_display,
378 power.domains);
379 enum intel_display_power_domain domain;
380
381 drm_dbg_kms(display->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
382 for_each_power_domain(domain, mask)
383 drm_dbg_kms(display->drm, "%s use_count %d\n",
384 intel_display_power_domain_str(domain),
385 power_domains->domain_use_count[domain]);
386 }
387
388 static void
print_async_put_domains_state(struct i915_power_domains * power_domains)389 print_async_put_domains_state(struct i915_power_domains *power_domains)
390 {
391 struct intel_display *display = container_of(power_domains,
392 struct intel_display,
393 power.domains);
394
395 drm_dbg_kms(display->drm, "async_put_wakeref: %s\n",
396 str_yes_no(power_domains->async_put_wakeref));
397
398 print_power_domains(power_domains, "async_put_domains[0]",
399 &power_domains->async_put_domains[0]);
400 print_power_domains(power_domains, "async_put_domains[1]",
401 &power_domains->async_put_domains[1]);
402 }
403
404 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)405 verify_async_put_domains_state(struct i915_power_domains *power_domains)
406 {
407 if (!__async_put_domains_state_ok(power_domains))
408 print_async_put_domains_state(power_domains);
409 }
410
411 #else
412
413 static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)414 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
415 {
416 }
417
418 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)419 verify_async_put_domains_state(struct i915_power_domains *power_domains)
420 {
421 }
422
423 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
424
async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)425 static void async_put_domains_mask(struct i915_power_domains *power_domains,
426 struct intel_power_domain_mask *mask)
427
428 {
429 assert_async_put_domain_masks_disjoint(power_domains);
430
431 __async_put_domains_mask(power_domains, mask);
432 }
433
434 static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)435 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
436 enum intel_display_power_domain domain)
437 {
438 assert_async_put_domain_masks_disjoint(power_domains);
439
440 clear_bit(domain, power_domains->async_put_domains[0].bits);
441 clear_bit(domain, power_domains->async_put_domains[1].bits);
442 }
443
444 static void
cancel_async_put_work(struct i915_power_domains * power_domains,bool sync)445 cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
446 {
447 if (sync)
448 cancel_delayed_work_sync(&power_domains->async_put_work);
449 else
450 cancel_delayed_work(&power_domains->async_put_work);
451
452 power_domains->async_put_next_delay = 0;
453 }
454
455 static bool
intel_display_power_grab_async_put_ref(struct intel_display * display,enum intel_display_power_domain domain)456 intel_display_power_grab_async_put_ref(struct intel_display *display,
457 enum intel_display_power_domain domain)
458 {
459 struct drm_i915_private *dev_priv = to_i915(display->drm);
460 struct i915_power_domains *power_domains = &display->power.domains;
461 struct intel_power_domain_mask async_put_mask;
462 bool ret = false;
463
464 async_put_domains_mask(power_domains, &async_put_mask);
465 if (!test_bit(domain, async_put_mask.bits))
466 goto out_verify;
467
468 async_put_domains_clear_domain(power_domains, domain);
469
470 ret = true;
471
472 async_put_domains_mask(power_domains, &async_put_mask);
473 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
474 goto out_verify;
475
476 cancel_async_put_work(power_domains, false);
477 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
478 fetch_and_zero(&power_domains->async_put_wakeref));
479 out_verify:
480 verify_async_put_domains_state(power_domains);
481
482 return ret;
483 }
484
485 static void
__intel_display_power_get_domain(struct intel_display * display,enum intel_display_power_domain domain)486 __intel_display_power_get_domain(struct intel_display *display,
487 enum intel_display_power_domain domain)
488 {
489 struct i915_power_domains *power_domains = &display->power.domains;
490 struct i915_power_well *power_well;
491
492 if (intel_display_power_grab_async_put_ref(display, domain))
493 return;
494
495 for_each_power_domain_well(display, power_well, domain)
496 intel_power_well_get(display, power_well);
497
498 power_domains->domain_use_count[domain]++;
499 }
500
501 /**
502 * intel_display_power_get - grab a power domain reference
503 * @dev_priv: i915 device instance
504 * @domain: power domain to reference
505 *
506 * This function grabs a power domain reference for @domain and ensures that the
507 * power domain and all its parents are powered up. Therefore users should only
508 * grab a reference to the innermost power domain they need.
509 *
510 * Any power domain reference obtained by this function must have a symmetric
511 * call to intel_display_power_put() to release the reference again.
512 */
intel_display_power_get(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)513 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
514 enum intel_display_power_domain domain)
515 {
516 struct intel_display *display = &dev_priv->display;
517 struct i915_power_domains *power_domains = &display->power.domains;
518 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
519
520 mutex_lock(&power_domains->lock);
521 __intel_display_power_get_domain(display, domain);
522 mutex_unlock(&power_domains->lock);
523
524 return wakeref;
525 }
526
527 /**
528 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
529 * @dev_priv: i915 device instance
530 * @domain: power domain to reference
531 *
532 * This function grabs a power domain reference for @domain and ensures that the
533 * power domain and all its parents are powered up. Therefore users should only
534 * grab a reference to the innermost power domain they need.
535 *
536 * Any power domain reference obtained by this function must have a symmetric
537 * call to intel_display_power_put() to release the reference again.
538 */
539 intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)540 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
541 enum intel_display_power_domain domain)
542 {
543 struct intel_display *display = &dev_priv->display;
544 struct i915_power_domains *power_domains = &display->power.domains;
545 intel_wakeref_t wakeref;
546 bool is_enabled;
547
548 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
549 if (!wakeref)
550 return NULL;
551
552 mutex_lock(&power_domains->lock);
553
554 if (__intel_display_power_is_enabled(display, domain)) {
555 __intel_display_power_get_domain(display, domain);
556 is_enabled = true;
557 } else {
558 is_enabled = false;
559 }
560
561 mutex_unlock(&power_domains->lock);
562
563 if (!is_enabled) {
564 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
565 wakeref = NULL;
566 }
567
568 return wakeref;
569 }
570
571 static void
__intel_display_power_put_domain(struct intel_display * display,enum intel_display_power_domain domain)572 __intel_display_power_put_domain(struct intel_display *display,
573 enum intel_display_power_domain domain)
574 {
575 struct i915_power_domains *power_domains = &display->power.domains;
576 struct i915_power_well *power_well;
577 const char *name = intel_display_power_domain_str(domain);
578 struct intel_power_domain_mask async_put_mask;
579
580 drm_WARN(display->drm, !power_domains->domain_use_count[domain],
581 "Use count on domain %s is already zero\n",
582 name);
583 async_put_domains_mask(power_domains, &async_put_mask);
584 drm_WARN(display->drm,
585 test_bit(domain, async_put_mask.bits),
586 "Async disabling of domain %s is pending\n",
587 name);
588
589 power_domains->domain_use_count[domain]--;
590
591 for_each_power_domain_well_reverse(display, power_well, domain)
592 intel_power_well_put(display, power_well);
593 }
594
__intel_display_power_put(struct intel_display * display,enum intel_display_power_domain domain)595 static void __intel_display_power_put(struct intel_display *display,
596 enum intel_display_power_domain domain)
597 {
598 struct i915_power_domains *power_domains = &display->power.domains;
599
600 mutex_lock(&power_domains->lock);
601 __intel_display_power_put_domain(display, domain);
602 mutex_unlock(&power_domains->lock);
603 }
604
605 static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref,int delay_ms)606 queue_async_put_domains_work(struct i915_power_domains *power_domains,
607 intel_wakeref_t wakeref,
608 int delay_ms)
609 {
610 struct intel_display *display = container_of(power_domains,
611 struct intel_display,
612 power.domains);
613 drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
614 power_domains->async_put_wakeref = wakeref;
615 drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
616 &power_domains->async_put_work,
617 msecs_to_jiffies(delay_ms)));
618 }
619
620 static void
release_async_put_domains(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)621 release_async_put_domains(struct i915_power_domains *power_domains,
622 struct intel_power_domain_mask *mask)
623 {
624 struct intel_display *display = container_of(power_domains,
625 struct intel_display,
626 power.domains);
627 struct drm_i915_private *dev_priv = to_i915(display->drm);
628 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
629 enum intel_display_power_domain domain;
630 intel_wakeref_t wakeref;
631
632 wakeref = intel_runtime_pm_get_noresume(rpm);
633
634 for_each_power_domain(domain, mask) {
635 /* Clear before put, so put's sanity check is happy. */
636 async_put_domains_clear_domain(power_domains, domain);
637 __intel_display_power_put_domain(display, domain);
638 }
639
640 intel_runtime_pm_put(rpm, wakeref);
641 }
642
643 static void
intel_display_power_put_async_work(struct work_struct * work)644 intel_display_power_put_async_work(struct work_struct *work)
645 {
646 struct intel_display *display = container_of(work, struct intel_display,
647 power.domains.async_put_work.work);
648 struct drm_i915_private *dev_priv = to_i915(display->drm);
649 struct i915_power_domains *power_domains = &display->power.domains;
650 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
651 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
652 intel_wakeref_t old_work_wakeref = NULL;
653
654 mutex_lock(&power_domains->lock);
655
656 /*
657 * Bail out if all the domain refs pending to be released were grabbed
658 * by subsequent gets or a flush_work.
659 */
660 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
661 if (!old_work_wakeref)
662 goto out_verify;
663
664 release_async_put_domains(power_domains,
665 &power_domains->async_put_domains[0]);
666
667 /*
668 * Cancel the work that got queued after this one got dequeued,
669 * since here we released the corresponding async-put reference.
670 */
671 cancel_async_put_work(power_domains, false);
672
673 /* Requeue the work if more domains were async put meanwhile. */
674 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
675 bitmap_copy(power_domains->async_put_domains[0].bits,
676 power_domains->async_put_domains[1].bits,
677 POWER_DOMAIN_NUM);
678 bitmap_zero(power_domains->async_put_domains[1].bits,
679 POWER_DOMAIN_NUM);
680 queue_async_put_domains_work(power_domains,
681 fetch_and_zero(&new_work_wakeref),
682 power_domains->async_put_next_delay);
683 power_domains->async_put_next_delay = 0;
684 }
685
686 out_verify:
687 verify_async_put_domains_state(power_domains);
688
689 mutex_unlock(&power_domains->lock);
690
691 if (old_work_wakeref)
692 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
693 if (new_work_wakeref)
694 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
695 }
696
697 /**
698 * __intel_display_power_put_async - release a power domain reference asynchronously
699 * @i915: i915 device instance
700 * @domain: power domain to reference
701 * @wakeref: wakeref acquired for the reference that is being released
702 * @delay_ms: delay of powering down the power domain
703 *
704 * This function drops the power domain reference obtained by
705 * intel_display_power_get*() and schedules a work to power down the
706 * corresponding hardware block if this is the last reference.
707 * The power down is delayed by @delay_ms if this is >= 0, or by a default
708 * 100 ms otherwise.
709 */
__intel_display_power_put_async(struct drm_i915_private * i915,enum intel_display_power_domain domain,intel_wakeref_t wakeref,int delay_ms)710 void __intel_display_power_put_async(struct drm_i915_private *i915,
711 enum intel_display_power_domain domain,
712 intel_wakeref_t wakeref,
713 int delay_ms)
714 {
715 struct intel_display *display = &i915->display;
716 struct i915_power_domains *power_domains = &display->power.domains;
717 struct intel_runtime_pm *rpm = &i915->runtime_pm;
718 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
719
720 delay_ms = delay_ms >= 0 ? delay_ms : 100;
721
722 mutex_lock(&power_domains->lock);
723
724 if (power_domains->domain_use_count[domain] > 1) {
725 __intel_display_power_put_domain(display, domain);
726
727 goto out_verify;
728 }
729
730 drm_WARN_ON(display->drm, power_domains->domain_use_count[domain] != 1);
731
732 /* Let a pending work requeue itself or queue a new one. */
733 if (power_domains->async_put_wakeref) {
734 set_bit(domain, power_domains->async_put_domains[1].bits);
735 power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
736 delay_ms);
737 } else {
738 set_bit(domain, power_domains->async_put_domains[0].bits);
739 queue_async_put_domains_work(power_domains,
740 fetch_and_zero(&work_wakeref),
741 delay_ms);
742 }
743
744 out_verify:
745 verify_async_put_domains_state(power_domains);
746
747 mutex_unlock(&power_domains->lock);
748
749 if (work_wakeref)
750 intel_runtime_pm_put_raw(rpm, work_wakeref);
751
752 intel_runtime_pm_put(rpm, wakeref);
753 }
754
755 /**
756 * intel_display_power_flush_work - flushes the async display power disabling work
757 * @i915: i915 device instance
758 *
759 * Flushes any pending work that was scheduled by a preceding
760 * intel_display_power_put_async() call, completing the disabling of the
761 * corresponding power domains.
762 *
763 * Note that the work handler function may still be running after this
764 * function returns; to ensure that the work handler isn't running use
765 * intel_display_power_flush_work_sync() instead.
766 */
intel_display_power_flush_work(struct drm_i915_private * i915)767 void intel_display_power_flush_work(struct drm_i915_private *i915)
768 {
769 struct intel_display *display = &i915->display;
770 struct i915_power_domains *power_domains = &display->power.domains;
771 struct intel_power_domain_mask async_put_mask;
772 intel_wakeref_t work_wakeref;
773
774 mutex_lock(&power_domains->lock);
775
776 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
777 if (!work_wakeref)
778 goto out_verify;
779
780 async_put_domains_mask(power_domains, &async_put_mask);
781 release_async_put_domains(power_domains, &async_put_mask);
782 cancel_async_put_work(power_domains, false);
783
784 out_verify:
785 verify_async_put_domains_state(power_domains);
786
787 mutex_unlock(&power_domains->lock);
788
789 if (work_wakeref)
790 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
791 }
792
793 /**
794 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
795 * @display: display device instance
796 *
797 * Like intel_display_power_flush_work(), but also ensure that the work
798 * handler function is not running any more when this function returns.
799 */
800 static void
intel_display_power_flush_work_sync(struct intel_display * display)801 intel_display_power_flush_work_sync(struct intel_display *display)
802 {
803 struct drm_i915_private *i915 = to_i915(display->drm);
804 struct i915_power_domains *power_domains = &display->power.domains;
805
806 intel_display_power_flush_work(i915);
807 cancel_async_put_work(power_domains, true);
808
809 verify_async_put_domains_state(power_domains);
810
811 drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
812 }
813
814 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
815 /**
816 * intel_display_power_put - release a power domain reference
817 * @dev_priv: i915 device instance
818 * @domain: power domain to reference
819 * @wakeref: wakeref acquired for the reference that is being released
820 *
821 * This function drops the power domain reference obtained by
822 * intel_display_power_get() and might power down the corresponding hardware
823 * block right away if this is the last reference.
824 */
intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain,intel_wakeref_t wakeref)825 void intel_display_power_put(struct drm_i915_private *dev_priv,
826 enum intel_display_power_domain domain,
827 intel_wakeref_t wakeref)
828 {
829 struct intel_display *display = &dev_priv->display;
830
831 __intel_display_power_put(display, domain);
832 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
833 }
834 #else
835 /**
836 * intel_display_power_put_unchecked - release an unchecked power domain reference
837 * @dev_priv: i915 device instance
838 * @domain: power domain to reference
839 *
840 * This function drops the power domain reference obtained by
841 * intel_display_power_get() and might power down the corresponding hardware
842 * block right away if this is the last reference.
843 *
844 * This function is only for the power domain code's internal use to suppress wakeref
845 * tracking when the correspondig debug kconfig option is disabled, should not
846 * be used otherwise.
847 */
intel_display_power_put_unchecked(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)848 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
849 enum intel_display_power_domain domain)
850 {
851 struct intel_display *display = &dev_priv->display;
852
853 __intel_display_power_put(display, domain);
854 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
855 }
856 #endif
857
858 void
intel_display_power_get_in_set(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)859 intel_display_power_get_in_set(struct drm_i915_private *i915,
860 struct intel_display_power_domain_set *power_domain_set,
861 enum intel_display_power_domain domain)
862 {
863 struct intel_display *display = &i915->display;
864 intel_wakeref_t __maybe_unused wf;
865
866 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
867
868 wf = intel_display_power_get(i915, domain);
869 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
870 power_domain_set->wakerefs[domain] = wf;
871 #endif
872 set_bit(domain, power_domain_set->mask.bits);
873 }
874
875 bool
intel_display_power_get_in_set_if_enabled(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)876 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
877 struct intel_display_power_domain_set *power_domain_set,
878 enum intel_display_power_domain domain)
879 {
880 struct intel_display *display = &i915->display;
881 intel_wakeref_t wf;
882
883 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
884
885 wf = intel_display_power_get_if_enabled(i915, domain);
886 if (!wf)
887 return false;
888
889 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
890 power_domain_set->wakerefs[domain] = wf;
891 #endif
892 set_bit(domain, power_domain_set->mask.bits);
893
894 return true;
895 }
896
897 void
intel_display_power_put_mask_in_set(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,struct intel_power_domain_mask * mask)898 intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
899 struct intel_display_power_domain_set *power_domain_set,
900 struct intel_power_domain_mask *mask)
901 {
902 struct intel_display *display = &i915->display;
903 enum intel_display_power_domain domain;
904
905 drm_WARN_ON(display->drm,
906 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
907
908 for_each_power_domain(domain, mask) {
909 intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF;
910
911 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
912 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
913 #endif
914 intel_display_power_put(i915, domain, wf);
915 clear_bit(domain, power_domain_set->mask.bits);
916 }
917 }
918
919 static int
sanitize_disable_power_well_option(int disable_power_well)920 sanitize_disable_power_well_option(int disable_power_well)
921 {
922 if (disable_power_well >= 0)
923 return !!disable_power_well;
924
925 return 1;
926 }
927
get_allowed_dc_mask(struct intel_display * display,int enable_dc)928 static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
929 {
930 u32 mask;
931 int requested_dc;
932 int max_dc;
933
934 if (!HAS_DISPLAY(display))
935 return 0;
936
937 if (DISPLAY_VER(display) >= 20)
938 max_dc = 2;
939 else if (display->platform.dg2)
940 max_dc = 1;
941 else if (display->platform.dg1)
942 max_dc = 3;
943 else if (DISPLAY_VER(display) >= 12)
944 max_dc = 4;
945 else if (display->platform.geminilake || display->platform.broxton)
946 max_dc = 1;
947 else if (DISPLAY_VER(display) >= 9)
948 max_dc = 2;
949 else
950 max_dc = 0;
951
952 /*
953 * DC9 has a separate HW flow from the rest of the DC states,
954 * not depending on the DMC firmware. It's needed by system
955 * suspend/resume, so allow it unconditionally.
956 */
957 mask = display->platform.geminilake || display->platform.broxton ||
958 DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0;
959
960 if (!display->params.disable_power_well)
961 max_dc = 0;
962
963 if (enable_dc >= 0 && enable_dc <= max_dc) {
964 requested_dc = enable_dc;
965 } else if (enable_dc == -1) {
966 requested_dc = max_dc;
967 } else if (enable_dc > max_dc && enable_dc <= 4) {
968 drm_dbg_kms(display->drm,
969 "Adjusting requested max DC state (%d->%d)\n",
970 enable_dc, max_dc);
971 requested_dc = max_dc;
972 } else {
973 drm_err(display->drm,
974 "Unexpected value for enable_dc (%d)\n", enable_dc);
975 requested_dc = max_dc;
976 }
977
978 switch (requested_dc) {
979 case 4:
980 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
981 break;
982 case 3:
983 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
984 break;
985 case 2:
986 mask |= DC_STATE_EN_UPTO_DC6;
987 break;
988 case 1:
989 mask |= DC_STATE_EN_UPTO_DC5;
990 break;
991 }
992
993 drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask);
994
995 return mask;
996 }
997
998 /**
999 * intel_power_domains_init - initializes the power domain structures
1000 * @display: display device instance
1001 *
1002 * Initializes the power domain structures for @dev_priv depending upon the
1003 * supported platform.
1004 */
intel_power_domains_init(struct intel_display * display)1005 int intel_power_domains_init(struct intel_display *display)
1006 {
1007 struct i915_power_domains *power_domains = &display->power.domains;
1008
1009 display->params.disable_power_well =
1010 sanitize_disable_power_well_option(display->params.disable_power_well);
1011 power_domains->allowed_dc_mask =
1012 get_allowed_dc_mask(display, display->params.enable_dc);
1013
1014 power_domains->target_dc_state =
1015 sanitize_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1016
1017 mutex_init(&power_domains->lock);
1018
1019 INIT_DELAYED_WORK(&power_domains->async_put_work,
1020 intel_display_power_put_async_work);
1021
1022 return intel_display_power_map_init(power_domains);
1023 }
1024
1025 /**
1026 * intel_power_domains_cleanup - clean up power domains resources
1027 * @display: display device instance
1028 *
1029 * Release any resources acquired by intel_power_domains_init()
1030 */
intel_power_domains_cleanup(struct intel_display * display)1031 void intel_power_domains_cleanup(struct intel_display *display)
1032 {
1033 intel_display_power_map_cleanup(&display->power.domains);
1034 }
1035
intel_power_domains_sync_hw(struct intel_display * display)1036 static void intel_power_domains_sync_hw(struct intel_display *display)
1037 {
1038 struct i915_power_domains *power_domains = &display->power.domains;
1039 struct i915_power_well *power_well;
1040
1041 mutex_lock(&power_domains->lock);
1042 for_each_power_well(display, power_well)
1043 intel_power_well_sync_hw(display, power_well);
1044 mutex_unlock(&power_domains->lock);
1045 }
1046
gen9_dbuf_slice_set(struct intel_display * display,enum dbuf_slice slice,bool enable)1047 static void gen9_dbuf_slice_set(struct intel_display *display,
1048 enum dbuf_slice slice, bool enable)
1049 {
1050 i915_reg_t reg = DBUF_CTL_S(slice);
1051 bool state;
1052
1053 intel_de_rmw(display, reg, DBUF_POWER_REQUEST,
1054 enable ? DBUF_POWER_REQUEST : 0);
1055 intel_de_posting_read(display, reg);
1056 udelay(10);
1057
1058 state = intel_de_read(display, reg) & DBUF_POWER_STATE;
1059 drm_WARN(display->drm, enable != state,
1060 "DBuf slice %d power %s timeout!\n",
1061 slice, str_enable_disable(enable));
1062 }
1063
gen9_dbuf_slices_update(struct drm_i915_private * dev_priv,u8 req_slices)1064 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
1065 u8 req_slices)
1066 {
1067 struct intel_display *display = &dev_priv->display;
1068 struct i915_power_domains *power_domains = &display->power.domains;
1069 u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
1070 enum dbuf_slice slice;
1071
1072 drm_WARN(display->drm, req_slices & ~slice_mask,
1073 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1074 req_slices, slice_mask);
1075
1076 drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n",
1077 req_slices);
1078
1079 /*
1080 * Might be running this in parallel to gen9_dc_off_power_well_enable
1081 * being called from intel_dp_detect for instance,
1082 * which causes assertion triggered by race condition,
1083 * as gen9_assert_dbuf_enabled might preempt this when registers
1084 * were already updated, while dev_priv was not.
1085 */
1086 mutex_lock(&power_domains->lock);
1087
1088 for_each_dbuf_slice(display, slice)
1089 gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice));
1090
1091 display->dbuf.enabled_slices = req_slices;
1092
1093 mutex_unlock(&power_domains->lock);
1094 }
1095
gen9_dbuf_enable(struct intel_display * display)1096 static void gen9_dbuf_enable(struct intel_display *display)
1097 {
1098 struct drm_i915_private *dev_priv = to_i915(display->drm);
1099 u8 slices_mask;
1100
1101 display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1102
1103 slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
1104
1105 if (DISPLAY_VER(display) >= 14)
1106 intel_pmdemand_program_dbuf(display, slices_mask);
1107
1108 /*
1109 * Just power up at least 1 slice, we will
1110 * figure out later which slices we have and what we need.
1111 */
1112 gen9_dbuf_slices_update(dev_priv, slices_mask);
1113 }
1114
gen9_dbuf_disable(struct intel_display * display)1115 static void gen9_dbuf_disable(struct intel_display *display)
1116 {
1117 struct drm_i915_private *dev_priv = to_i915(display->drm);
1118
1119 gen9_dbuf_slices_update(dev_priv, 0);
1120
1121 if (DISPLAY_VER(display) >= 14)
1122 intel_pmdemand_program_dbuf(display, 0);
1123 }
1124
gen12_dbuf_slices_config(struct intel_display * display)1125 static void gen12_dbuf_slices_config(struct intel_display *display)
1126 {
1127 enum dbuf_slice slice;
1128
1129 if (display->platform.alderlake_p)
1130 return;
1131
1132 for_each_dbuf_slice(display, slice)
1133 intel_de_rmw(display, DBUF_CTL_S(slice),
1134 DBUF_TRACKER_STATE_SERVICE_MASK,
1135 DBUF_TRACKER_STATE_SERVICE(8));
1136 }
1137
icl_mbus_init(struct intel_display * display)1138 static void icl_mbus_init(struct intel_display *display)
1139 {
1140 unsigned long abox_regs = DISPLAY_INFO(display)->abox_mask;
1141 u32 mask, val, i;
1142
1143 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1144 return;
1145
1146 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1147 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1148 MBUS_ABOX_B_CREDIT_MASK |
1149 MBUS_ABOX_BW_CREDIT_MASK;
1150 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1151 MBUS_ABOX_BT_CREDIT_POOL2(16) |
1152 MBUS_ABOX_B_CREDIT(1) |
1153 MBUS_ABOX_BW_CREDIT(1);
1154
1155 /*
1156 * gen12 platforms that use abox1 and abox2 for pixel data reads still
1157 * expect us to program the abox_ctl0 register as well, even though
1158 * we don't have to program other instance-0 registers like BW_BUDDY.
1159 */
1160 if (DISPLAY_VER(display) == 12)
1161 abox_regs |= BIT(0);
1162
1163 for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
1164 intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
1165 }
1166
hsw_assert_cdclk(struct intel_display * display)1167 static void hsw_assert_cdclk(struct intel_display *display)
1168 {
1169 u32 val = intel_de_read(display, LCPLL_CTL);
1170
1171 /*
1172 * The LCPLL register should be turned on by the BIOS. For now
1173 * let's just check its state and print errors in case
1174 * something is wrong. Don't even try to turn it on.
1175 */
1176
1177 if (val & LCPLL_CD_SOURCE_FCLK)
1178 drm_err(display->drm, "CDCLK source is not LCPLL\n");
1179
1180 if (val & LCPLL_PLL_DISABLE)
1181 drm_err(display->drm, "LCPLL is disabled\n");
1182
1183 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1184 drm_err(display->drm, "LCPLL not using non-SSC reference\n");
1185 }
1186
assert_can_disable_lcpll(struct intel_display * display)1187 static void assert_can_disable_lcpll(struct intel_display *display)
1188 {
1189 struct drm_i915_private *dev_priv = to_i915(display->drm);
1190 struct intel_crtc *crtc;
1191
1192 for_each_intel_crtc(display->drm, crtc)
1193 INTEL_DISPLAY_STATE_WARN(display, crtc->active,
1194 "CRTC for pipe %c enabled\n",
1195 pipe_name(crtc->pipe));
1196
1197 INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2),
1198 "Display power well on\n");
1199 INTEL_DISPLAY_STATE_WARN(display,
1200 intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE,
1201 "SPLL enabled\n");
1202 INTEL_DISPLAY_STATE_WARN(display,
1203 intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1204 "WRPLL1 enabled\n");
1205 INTEL_DISPLAY_STATE_WARN(display,
1206 intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1207 "WRPLL2 enabled\n");
1208 INTEL_DISPLAY_STATE_WARN(display,
1209 intel_de_read(display, PP_STATUS(display, 0)) & PP_ON,
1210 "Panel power on\n");
1211 INTEL_DISPLAY_STATE_WARN(display,
1212 intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1213 "CPU PWM1 enabled\n");
1214 if (display->platform.haswell)
1215 INTEL_DISPLAY_STATE_WARN(display,
1216 intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1217 "CPU PWM2 enabled\n");
1218 INTEL_DISPLAY_STATE_WARN(display,
1219 intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1220 "PCH PWM1 enabled\n");
1221 INTEL_DISPLAY_STATE_WARN(display,
1222 (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
1223 "Utility pin enabled in PWM mode\n");
1224 INTEL_DISPLAY_STATE_WARN(display,
1225 intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1226 "PCH GTC enabled\n");
1227
1228 /*
1229 * In theory we can still leave IRQs enabled, as long as only the HPD
1230 * interrupts remain enabled. We used to check for that, but since it's
1231 * gen-specific and since we only disable LCPLL after we fully disable
1232 * the interrupts, the check below should be enough.
1233 */
1234 INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv),
1235 "IRQs enabled\n");
1236 }
1237
hsw_read_dcomp(struct intel_display * display)1238 static u32 hsw_read_dcomp(struct intel_display *display)
1239 {
1240 if (display->platform.haswell)
1241 return intel_de_read(display, D_COMP_HSW);
1242 else
1243 return intel_de_read(display, D_COMP_BDW);
1244 }
1245
hsw_write_dcomp(struct intel_display * display,u32 val)1246 static void hsw_write_dcomp(struct intel_display *display, u32 val)
1247 {
1248 struct drm_i915_private *dev_priv = to_i915(display->drm);
1249
1250 if (display->platform.haswell) {
1251 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
1252 drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
1253 } else {
1254 intel_de_write(display, D_COMP_BDW, val);
1255 intel_de_posting_read(display, D_COMP_BDW);
1256 }
1257 }
1258
1259 /*
1260 * This function implements pieces of two sequences from BSpec:
1261 * - Sequence for display software to disable LCPLL
1262 * - Sequence for display software to allow package C8+
1263 * The steps implemented here are just the steps that actually touch the LCPLL
1264 * register. Callers should take care of disabling all the display engine
1265 * functions, doing the mode unset, fixing interrupts, etc.
1266 */
hsw_disable_lcpll(struct intel_display * display,bool switch_to_fclk,bool allow_power_down)1267 static void hsw_disable_lcpll(struct intel_display *display,
1268 bool switch_to_fclk, bool allow_power_down)
1269 {
1270 u32 val;
1271
1272 assert_can_disable_lcpll(display);
1273
1274 val = intel_de_read(display, LCPLL_CTL);
1275
1276 if (switch_to_fclk) {
1277 val |= LCPLL_CD_SOURCE_FCLK;
1278 intel_de_write(display, LCPLL_CTL, val);
1279
1280 if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
1281 LCPLL_CD_SOURCE_FCLK_DONE, 1))
1282 drm_err(display->drm, "Switching to FCLK failed\n");
1283
1284 val = intel_de_read(display, LCPLL_CTL);
1285 }
1286
1287 val |= LCPLL_PLL_DISABLE;
1288 intel_de_write(display, LCPLL_CTL, val);
1289 intel_de_posting_read(display, LCPLL_CTL);
1290
1291 if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1292 drm_err(display->drm, "LCPLL still locked\n");
1293
1294 val = hsw_read_dcomp(display);
1295 val |= D_COMP_COMP_DISABLE;
1296 hsw_write_dcomp(display, val);
1297 ndelay(100);
1298
1299 if (wait_for((hsw_read_dcomp(display) &
1300 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1301 drm_err(display->drm, "D_COMP RCOMP still in progress\n");
1302
1303 if (allow_power_down) {
1304 intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
1305 intel_de_posting_read(display, LCPLL_CTL);
1306 }
1307 }
1308
1309 /*
1310 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1311 * source.
1312 */
hsw_restore_lcpll(struct intel_display * display)1313 static void hsw_restore_lcpll(struct intel_display *display)
1314 {
1315 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
1316 u32 val;
1317
1318 val = intel_de_read(display, LCPLL_CTL);
1319
1320 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1321 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1322 return;
1323
1324 /*
1325 * Make sure we're not on PC8 state before disabling PC8, otherwise
1326 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1327 */
1328 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1329
1330 if (val & LCPLL_POWER_DOWN_ALLOW) {
1331 val &= ~LCPLL_POWER_DOWN_ALLOW;
1332 intel_de_write(display, LCPLL_CTL, val);
1333 intel_de_posting_read(display, LCPLL_CTL);
1334 }
1335
1336 val = hsw_read_dcomp(display);
1337 val |= D_COMP_COMP_FORCE;
1338 val &= ~D_COMP_COMP_DISABLE;
1339 hsw_write_dcomp(display, val);
1340
1341 val = intel_de_read(display, LCPLL_CTL);
1342 val &= ~LCPLL_PLL_DISABLE;
1343 intel_de_write(display, LCPLL_CTL, val);
1344
1345 if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1346 drm_err(display->drm, "LCPLL not locked yet\n");
1347
1348 if (val & LCPLL_CD_SOURCE_FCLK) {
1349 intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
1350
1351 if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
1352 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1353 drm_err(display->drm,
1354 "Switching back to LCPLL failed\n");
1355 }
1356
1357 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1358
1359 intel_update_cdclk(display);
1360 intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
1361 }
1362
1363 /*
1364 * Package states C8 and deeper are really deep PC states that can only be
1365 * reached when all the devices on the system allow it, so even if the graphics
1366 * device allows PC8+, it doesn't mean the system will actually get to these
1367 * states. Our driver only allows PC8+ when going into runtime PM.
1368 *
1369 * The requirements for PC8+ are that all the outputs are disabled, the power
1370 * well is disabled and most interrupts are disabled, and these are also
1371 * requirements for runtime PM. When these conditions are met, we manually do
1372 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1373 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1374 * hang the machine.
1375 *
1376 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1377 * the state of some registers, so when we come back from PC8+ we need to
1378 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1379 * need to take care of the registers kept by RC6. Notice that this happens even
1380 * if we don't put the device in PCI D3 state (which is what currently happens
1381 * because of the runtime PM support).
1382 *
1383 * For more, read "Display Sequences for Package C8" on the hardware
1384 * documentation.
1385 */
hsw_enable_pc8(struct intel_display * display)1386 static void hsw_enable_pc8(struct intel_display *display)
1387 {
1388 struct drm_i915_private *dev_priv = to_i915(display->drm);
1389
1390 drm_dbg_kms(display->drm, "Enabling package C8+\n");
1391
1392 if (HAS_PCH_LPT_LP(dev_priv))
1393 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1394 PCH_LP_PARTITION_LEVEL_DISABLE, 0);
1395
1396 lpt_disable_clkout_dp(dev_priv);
1397 hsw_disable_lcpll(display, true, true);
1398 }
1399
hsw_disable_pc8(struct intel_display * display)1400 static void hsw_disable_pc8(struct intel_display *display)
1401 {
1402 struct drm_i915_private *dev_priv = to_i915(display->drm);
1403
1404 drm_dbg_kms(display->drm, "Disabling package C8+\n");
1405
1406 hsw_restore_lcpll(display);
1407 intel_init_pch_refclk(dev_priv);
1408
1409 /* Many display registers don't survive PC8+ */
1410 #ifdef I915 /* FIXME */
1411 intel_clock_gating_init(dev_priv);
1412 #endif
1413 }
1414
intel_pch_reset_handshake(struct intel_display * display,bool enable)1415 static void intel_pch_reset_handshake(struct intel_display *display,
1416 bool enable)
1417 {
1418 i915_reg_t reg;
1419 u32 reset_bits;
1420
1421 if (display->platform.ivybridge) {
1422 reg = GEN7_MSG_CTL;
1423 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1424 } else {
1425 reg = HSW_NDE_RSTWRN_OPT;
1426 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1427 }
1428
1429 if (DISPLAY_VER(display) >= 14)
1430 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
1431
1432 intel_de_rmw(display, reg, reset_bits, enable ? reset_bits : 0);
1433 }
1434
skl_display_core_init(struct intel_display * display,bool resume)1435 static void skl_display_core_init(struct intel_display *display,
1436 bool resume)
1437 {
1438 struct drm_i915_private *dev_priv = to_i915(display->drm);
1439 struct i915_power_domains *power_domains = &display->power.domains;
1440 struct i915_power_well *well;
1441
1442 gen9_set_dc_state(display, DC_STATE_DISABLE);
1443
1444 /* enable PCH reset handshake */
1445 intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
1446
1447 if (!HAS_DISPLAY(display))
1448 return;
1449
1450 /* enable PG1 and Misc I/O */
1451 mutex_lock(&power_domains->lock);
1452
1453 well = lookup_power_well(display, SKL_DISP_PW_1);
1454 intel_power_well_enable(display, well);
1455
1456 well = lookup_power_well(display, SKL_DISP_PW_MISC_IO);
1457 intel_power_well_enable(display, well);
1458
1459 mutex_unlock(&power_domains->lock);
1460
1461 intel_cdclk_init_hw(display);
1462
1463 gen9_dbuf_enable(display);
1464
1465 if (resume)
1466 intel_dmc_load_program(display);
1467 }
1468
skl_display_core_uninit(struct intel_display * display)1469 static void skl_display_core_uninit(struct intel_display *display)
1470 {
1471 struct i915_power_domains *power_domains = &display->power.domains;
1472 struct i915_power_well *well;
1473
1474 if (!HAS_DISPLAY(display))
1475 return;
1476
1477 gen9_disable_dc_states(display);
1478 /* TODO: disable DMC program */
1479
1480 gen9_dbuf_disable(display);
1481
1482 intel_cdclk_uninit_hw(display);
1483
1484 /* The spec doesn't call for removing the reset handshake flag */
1485 /* disable PG1 and Misc I/O */
1486
1487 mutex_lock(&power_domains->lock);
1488
1489 /*
1490 * BSpec says to keep the MISC IO power well enabled here, only
1491 * remove our request for power well 1.
1492 * Note that even though the driver's request is removed power well 1
1493 * may stay enabled after this due to DMC's own request on it.
1494 */
1495 well = lookup_power_well(display, SKL_DISP_PW_1);
1496 intel_power_well_disable(display, well);
1497
1498 mutex_unlock(&power_domains->lock);
1499
1500 usleep_range(10, 30); /* 10 us delay per Bspec */
1501 }
1502
bxt_display_core_init(struct intel_display * display,bool resume)1503 static void bxt_display_core_init(struct intel_display *display, bool resume)
1504 {
1505 struct i915_power_domains *power_domains = &display->power.domains;
1506 struct i915_power_well *well;
1507
1508 gen9_set_dc_state(display, DC_STATE_DISABLE);
1509
1510 /*
1511 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1512 * or else the reset will hang because there is no PCH to respond.
1513 * Move the handshake programming to initialization sequence.
1514 * Previously was left up to BIOS.
1515 */
1516 intel_pch_reset_handshake(display, false);
1517
1518 if (!HAS_DISPLAY(display))
1519 return;
1520
1521 /* Enable PG1 */
1522 mutex_lock(&power_domains->lock);
1523
1524 well = lookup_power_well(display, SKL_DISP_PW_1);
1525 intel_power_well_enable(display, well);
1526
1527 mutex_unlock(&power_domains->lock);
1528
1529 intel_cdclk_init_hw(display);
1530
1531 gen9_dbuf_enable(display);
1532
1533 if (resume)
1534 intel_dmc_load_program(display);
1535 }
1536
bxt_display_core_uninit(struct intel_display * display)1537 static void bxt_display_core_uninit(struct intel_display *display)
1538 {
1539 struct i915_power_domains *power_domains = &display->power.domains;
1540 struct i915_power_well *well;
1541
1542 if (!HAS_DISPLAY(display))
1543 return;
1544
1545 gen9_disable_dc_states(display);
1546 /* TODO: disable DMC program */
1547
1548 gen9_dbuf_disable(display);
1549
1550 intel_cdclk_uninit_hw(display);
1551
1552 /* The spec doesn't call for removing the reset handshake flag */
1553
1554 /*
1555 * Disable PW1 (PG1).
1556 * Note that even though the driver's request is removed power well 1
1557 * may stay enabled after this due to DMC's own request on it.
1558 */
1559 mutex_lock(&power_domains->lock);
1560
1561 well = lookup_power_well(display, SKL_DISP_PW_1);
1562 intel_power_well_disable(display, well);
1563
1564 mutex_unlock(&power_domains->lock);
1565
1566 usleep_range(10, 30); /* 10 us delay per Bspec */
1567 }
1568
1569 struct buddy_page_mask {
1570 u32 page_mask;
1571 u8 type;
1572 u8 num_channels;
1573 };
1574
1575 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1576 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
1577 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
1578 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1579 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1580 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
1581 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
1582 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1583 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1584 {}
1585 };
1586
1587 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1588 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1589 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
1590 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
1591 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1592 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1593 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
1594 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
1595 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1596 {}
1597 };
1598
tgl_bw_buddy_init(struct intel_display * display)1599 static void tgl_bw_buddy_init(struct intel_display *display)
1600 {
1601 struct drm_i915_private *dev_priv = to_i915(display->drm);
1602 enum intel_dram_type type = dev_priv->dram_info.type;
1603 u8 num_channels = dev_priv->dram_info.num_channels;
1604 const struct buddy_page_mask *table;
1605 unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
1606 int config, i;
1607
1608 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
1609 if (display->platform.dgfx && !display->platform.dg1)
1610 return;
1611
1612 if (display->platform.alderlake_s ||
1613 (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
1614 /* Wa_1409767108 */
1615 table = wa_1409767108_buddy_page_masks;
1616 else
1617 table = tgl_buddy_page_masks;
1618
1619 for (config = 0; table[config].page_mask != 0; config++)
1620 if (table[config].num_channels == num_channels &&
1621 table[config].type == type)
1622 break;
1623
1624 if (table[config].page_mask == 0) {
1625 drm_dbg_kms(display->drm,
1626 "Unknown memory configuration; disabling address buddy logic.\n");
1627 for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
1628 intel_de_write(display, BW_BUDDY_CTL(i),
1629 BW_BUDDY_DISABLE);
1630 } else {
1631 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
1632 intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
1633 table[config].page_mask);
1634
1635 /* Wa_22010178259:tgl,dg1,rkl,adl-s */
1636 if (DISPLAY_VER(display) == 12)
1637 intel_de_rmw(display, BW_BUDDY_CTL(i),
1638 BW_BUDDY_TLB_REQ_TIMER_MASK,
1639 BW_BUDDY_TLB_REQ_TIMER(0x8));
1640 }
1641 }
1642 }
1643
icl_display_core_init(struct intel_display * display,bool resume)1644 static void icl_display_core_init(struct intel_display *display,
1645 bool resume)
1646 {
1647 struct drm_i915_private *dev_priv = to_i915(display->drm);
1648 struct i915_power_domains *power_domains = &display->power.domains;
1649 struct i915_power_well *well;
1650
1651 gen9_set_dc_state(display, DC_STATE_DISABLE);
1652
1653 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
1654 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1655 INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1656 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
1657 PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1658
1659 /* 1. Enable PCH reset handshake. */
1660 intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
1661
1662 if (!HAS_DISPLAY(display))
1663 return;
1664
1665 /* 2. Initialize all combo phys */
1666 intel_combo_phy_init(dev_priv);
1667
1668 /*
1669 * 3. Enable Power Well 1 (PG1).
1670 * The AUX IO power wells will be enabled on demand.
1671 */
1672 mutex_lock(&power_domains->lock);
1673 well = lookup_power_well(display, SKL_DISP_PW_1);
1674 intel_power_well_enable(display, well);
1675 mutex_unlock(&power_domains->lock);
1676
1677 if (DISPLAY_VER(display) == 14)
1678 intel_de_rmw(display, DC_STATE_EN,
1679 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
1680
1681 /* 4. Enable CDCLK. */
1682 intel_cdclk_init_hw(display);
1683
1684 if (DISPLAY_VER(display) >= 12)
1685 gen12_dbuf_slices_config(display);
1686
1687 /* 5. Enable DBUF. */
1688 gen9_dbuf_enable(display);
1689
1690 /* 6. Setup MBUS. */
1691 icl_mbus_init(display);
1692
1693 /* 7. Program arbiter BW_BUDDY registers */
1694 if (DISPLAY_VER(display) >= 12)
1695 tgl_bw_buddy_init(display);
1696
1697 /* 8. Ensure PHYs have completed calibration and adaptation */
1698 if (display->platform.dg2)
1699 intel_snps_phy_wait_for_calibration(dev_priv);
1700
1701 /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
1702 if (DISPLAY_VERx100(display) == 1401)
1703 intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
1704
1705 if (resume)
1706 intel_dmc_load_program(display);
1707
1708 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
1709 if (IS_DISPLAY_VERx100(display, 1200, 1300))
1710 intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
1711 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1712 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
1713
1714 /* Wa_14011503030:xelpd */
1715 if (DISPLAY_VER(display) == 13)
1716 intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1717
1718 /* Wa_15013987218 */
1719 if (DISPLAY_VER(display) == 20) {
1720 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1721 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
1722 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1723 PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
1724 }
1725 }
1726
icl_display_core_uninit(struct intel_display * display)1727 static void icl_display_core_uninit(struct intel_display *display)
1728 {
1729 struct drm_i915_private *dev_priv = to_i915(display->drm);
1730 struct i915_power_domains *power_domains = &display->power.domains;
1731 struct i915_power_well *well;
1732
1733 if (!HAS_DISPLAY(display))
1734 return;
1735
1736 gen9_disable_dc_states(display);
1737 intel_dmc_disable_program(display);
1738
1739 /* 1. Disable all display engine functions -> aready done */
1740
1741 /* 2. Disable DBUF */
1742 gen9_dbuf_disable(display);
1743
1744 /* 3. Disable CD clock */
1745 intel_cdclk_uninit_hw(display);
1746
1747 if (DISPLAY_VER(display) == 14)
1748 intel_de_rmw(display, DC_STATE_EN, 0,
1749 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
1750
1751 /*
1752 * 4. Disable Power Well 1 (PG1).
1753 * The AUX IO power wells are toggled on demand, so they are already
1754 * disabled at this point.
1755 */
1756 mutex_lock(&power_domains->lock);
1757 well = lookup_power_well(display, SKL_DISP_PW_1);
1758 intel_power_well_disable(display, well);
1759 mutex_unlock(&power_domains->lock);
1760
1761 /* 5. */
1762 intel_combo_phy_uninit(dev_priv);
1763 }
1764
chv_phy_control_init(struct intel_display * display)1765 static void chv_phy_control_init(struct intel_display *display)
1766 {
1767 struct i915_power_well *cmn_bc =
1768 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1769 struct i915_power_well *cmn_d =
1770 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
1771
1772 /*
1773 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1774 * workaround never ever read DISPLAY_PHY_CONTROL, and
1775 * instead maintain a shadow copy ourselves. Use the actual
1776 * power well state and lane status to reconstruct the
1777 * expected initial value.
1778 */
1779 display->power.chv_phy_control =
1780 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1781 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1782 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1783 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1784 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1785
1786 /*
1787 * If all lanes are disabled we leave the override disabled
1788 * with all power down bits cleared to match the state we
1789 * would use after disabling the port. Otherwise enable the
1790 * override and set the lane powerdown bits accding to the
1791 * current lane status.
1792 */
1793 if (intel_power_well_is_enabled(display, cmn_bc)) {
1794 u32 status = intel_de_read(display, DPLL(display, PIPE_A));
1795 unsigned int mask;
1796
1797 mask = status & DPLL_PORTB_READY_MASK;
1798 if (mask == 0xf)
1799 mask = 0x0;
1800 else
1801 display->power.chv_phy_control |=
1802 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1803
1804 display->power.chv_phy_control |=
1805 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1806
1807 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1808 if (mask == 0xf)
1809 mask = 0x0;
1810 else
1811 display->power.chv_phy_control |=
1812 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1813
1814 display->power.chv_phy_control |=
1815 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1816
1817 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1818
1819 display->power.chv_phy_assert[DPIO_PHY0] = false;
1820 } else {
1821 display->power.chv_phy_assert[DPIO_PHY0] = true;
1822 }
1823
1824 if (intel_power_well_is_enabled(display, cmn_d)) {
1825 u32 status = intel_de_read(display, DPIO_PHY_STATUS);
1826 unsigned int mask;
1827
1828 mask = status & DPLL_PORTD_READY_MASK;
1829
1830 if (mask == 0xf)
1831 mask = 0x0;
1832 else
1833 display->power.chv_phy_control |=
1834 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1835
1836 display->power.chv_phy_control |=
1837 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1838
1839 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1840
1841 display->power.chv_phy_assert[DPIO_PHY1] = false;
1842 } else {
1843 display->power.chv_phy_assert[DPIO_PHY1] = true;
1844 }
1845
1846 drm_dbg_kms(display->drm, "Initial PHY_CONTROL=0x%08x\n",
1847 display->power.chv_phy_control);
1848
1849 /* Defer application of initial phy_control to enabling the powerwell */
1850 }
1851
vlv_cmnlane_wa(struct intel_display * display)1852 static void vlv_cmnlane_wa(struct intel_display *display)
1853 {
1854 struct i915_power_well *cmn =
1855 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1856 struct i915_power_well *disp2d =
1857 lookup_power_well(display, VLV_DISP_PW_DISP2D);
1858
1859 /* If the display might be already active skip this */
1860 if (intel_power_well_is_enabled(display, cmn) &&
1861 intel_power_well_is_enabled(display, disp2d) &&
1862 intel_de_read(display, DPIO_CTL) & DPIO_CMNRST)
1863 return;
1864
1865 drm_dbg_kms(display->drm, "toggling display PHY side reset\n");
1866
1867 /* cmnlane needs DPLL registers */
1868 intel_power_well_enable(display, disp2d);
1869
1870 /*
1871 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1872 * Need to assert and de-assert PHY SB reset by gating the
1873 * common lane power, then un-gating it.
1874 * Simply ungating isn't enough to reset the PHY enough to get
1875 * ports and lanes running.
1876 */
1877 intel_power_well_disable(display, cmn);
1878 }
1879
vlv_punit_is_power_gated(struct intel_display * display,u32 reg0)1880 static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
1881 {
1882 struct drm_i915_private *dev_priv = to_i915(display->drm);
1883 bool ret;
1884
1885 vlv_punit_get(dev_priv);
1886 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1887 vlv_punit_put(dev_priv);
1888
1889 return ret;
1890 }
1891
assert_ved_power_gated(struct intel_display * display)1892 static void assert_ved_power_gated(struct intel_display *display)
1893 {
1894 drm_WARN(display->drm,
1895 !vlv_punit_is_power_gated(display, PUNIT_REG_VEDSSPM0),
1896 "VED not power gated\n");
1897 }
1898
assert_isp_power_gated(struct intel_display * display)1899 static void assert_isp_power_gated(struct intel_display *display)
1900 {
1901 static const struct pci_device_id isp_ids[] = {
1902 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1903 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1904 {}
1905 };
1906
1907 drm_WARN(display->drm, !pci_dev_present(isp_ids) &&
1908 !vlv_punit_is_power_gated(display, PUNIT_REG_ISPSSPM0),
1909 "ISP not power gated\n");
1910 }
1911
1912 static void intel_power_domains_verify_state(struct intel_display *display);
1913
1914 /**
1915 * intel_power_domains_init_hw - initialize hardware power domain state
1916 * @display: display device instance
1917 * @resume: Called from resume code paths or not
1918 *
1919 * This function initializes the hardware power domain state and enables all
1920 * power wells belonging to the INIT power domain. Power wells in other
1921 * domains (and not in the INIT domain) are referenced or disabled by
1922 * intel_modeset_readout_hw_state(). After that the reference count of each
1923 * power well must match its HW enabled state, see
1924 * intel_power_domains_verify_state().
1925 *
1926 * It will return with power domains disabled (to be enabled later by
1927 * intel_power_domains_enable()) and must be paired with
1928 * intel_power_domains_driver_remove().
1929 */
intel_power_domains_init_hw(struct intel_display * display,bool resume)1930 void intel_power_domains_init_hw(struct intel_display *display, bool resume)
1931 {
1932 struct drm_i915_private *i915 = to_i915(display->drm);
1933 struct i915_power_domains *power_domains = &display->power.domains;
1934
1935 power_domains->initializing = true;
1936
1937 if (DISPLAY_VER(display) >= 11) {
1938 icl_display_core_init(display, resume);
1939 } else if (display->platform.geminilake || display->platform.broxton) {
1940 bxt_display_core_init(display, resume);
1941 } else if (DISPLAY_VER(display) == 9) {
1942 skl_display_core_init(display, resume);
1943 } else if (display->platform.cherryview) {
1944 mutex_lock(&power_domains->lock);
1945 chv_phy_control_init(display);
1946 mutex_unlock(&power_domains->lock);
1947 assert_isp_power_gated(display);
1948 } else if (display->platform.valleyview) {
1949 mutex_lock(&power_domains->lock);
1950 vlv_cmnlane_wa(display);
1951 mutex_unlock(&power_domains->lock);
1952 assert_ved_power_gated(display);
1953 assert_isp_power_gated(display);
1954 } else if (display->platform.broadwell || display->platform.haswell) {
1955 hsw_assert_cdclk(display);
1956 intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
1957 } else if (display->platform.ivybridge) {
1958 intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
1959 }
1960
1961 /*
1962 * Keep all power wells enabled for any dependent HW access during
1963 * initialization and to make sure we keep BIOS enabled display HW
1964 * resources powered until display HW readout is complete. We drop
1965 * this reference in intel_power_domains_enable().
1966 */
1967 drm_WARN_ON(display->drm, power_domains->init_wakeref);
1968 power_domains->init_wakeref =
1969 intel_display_power_get(i915, POWER_DOMAIN_INIT);
1970
1971 /* Disable power support if the user asked so. */
1972 if (!display->params.disable_power_well) {
1973 drm_WARN_ON(display->drm, power_domains->disable_wakeref);
1974 display->power.domains.disable_wakeref = intel_display_power_get(i915,
1975 POWER_DOMAIN_INIT);
1976 }
1977 intel_power_domains_sync_hw(display);
1978
1979 power_domains->initializing = false;
1980 }
1981
1982 /**
1983 * intel_power_domains_driver_remove - deinitialize hw power domain state
1984 * @display: display device instance
1985 *
1986 * De-initializes the display power domain HW state. It also ensures that the
1987 * device stays powered up so that the driver can be reloaded.
1988 *
1989 * It must be called with power domains already disabled (after a call to
1990 * intel_power_domains_disable()) and must be paired with
1991 * intel_power_domains_init_hw().
1992 */
intel_power_domains_driver_remove(struct intel_display * display)1993 void intel_power_domains_driver_remove(struct intel_display *display)
1994 {
1995 struct drm_i915_private *i915 = to_i915(display->drm);
1996 intel_wakeref_t wakeref __maybe_unused =
1997 fetch_and_zero(&display->power.domains.init_wakeref);
1998
1999 /* Remove the refcount we took to keep power well support disabled. */
2000 if (!display->params.disable_power_well)
2001 intel_display_power_put(i915, POWER_DOMAIN_INIT,
2002 fetch_and_zero(&display->power.domains.disable_wakeref));
2003
2004 intel_display_power_flush_work_sync(display);
2005
2006 intel_power_domains_verify_state(display);
2007
2008 /* Keep the power well enabled, but cancel its rpm wakeref. */
2009 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
2010 }
2011
2012 /**
2013 * intel_power_domains_sanitize_state - sanitize power domains state
2014 * @display: display device instance
2015 *
2016 * Sanitize the power domains state during driver loading and system resume.
2017 * The function will disable all display power wells that BIOS has enabled
2018 * without a user for it (any user for a power well has taken a reference
2019 * on it by the time this function is called, after the state of all the
2020 * pipe, encoder, etc. HW resources have been sanitized).
2021 */
intel_power_domains_sanitize_state(struct intel_display * display)2022 void intel_power_domains_sanitize_state(struct intel_display *display)
2023 {
2024 struct i915_power_domains *power_domains = &display->power.domains;
2025 struct i915_power_well *power_well;
2026
2027 mutex_lock(&power_domains->lock);
2028
2029 for_each_power_well_reverse(display, power_well) {
2030 if (power_well->desc->always_on || power_well->count ||
2031 !intel_power_well_is_enabled(display, power_well))
2032 continue;
2033
2034 drm_dbg_kms(display->drm,
2035 "BIOS left unused %s power well enabled, disabling it\n",
2036 intel_power_well_name(power_well));
2037 intel_power_well_disable(display, power_well);
2038 }
2039
2040 mutex_unlock(&power_domains->lock);
2041 }
2042
2043 /**
2044 * intel_power_domains_enable - enable toggling of display power wells
2045 * @display: display device instance
2046 *
2047 * Enable the ondemand enabling/disabling of the display power wells. Note that
2048 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2049 * only at specific points of the display modeset sequence, thus they are not
2050 * affected by the intel_power_domains_enable()/disable() calls. The purpose
2051 * of these function is to keep the rest of power wells enabled until the end
2052 * of display HW readout (which will acquire the power references reflecting
2053 * the current HW state).
2054 */
intel_power_domains_enable(struct intel_display * display)2055 void intel_power_domains_enable(struct intel_display *display)
2056 {
2057 struct drm_i915_private *i915 = to_i915(display->drm);
2058 intel_wakeref_t wakeref __maybe_unused =
2059 fetch_and_zero(&display->power.domains.init_wakeref);
2060
2061 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2062 intel_power_domains_verify_state(display);
2063 }
2064
2065 /**
2066 * intel_power_domains_disable - disable toggling of display power wells
2067 * @display: display device instance
2068 *
2069 * Disable the ondemand enabling/disabling of the display power wells. See
2070 * intel_power_domains_enable() for which power wells this call controls.
2071 */
intel_power_domains_disable(struct intel_display * display)2072 void intel_power_domains_disable(struct intel_display *display)
2073 {
2074 struct drm_i915_private *i915 = to_i915(display->drm);
2075 struct i915_power_domains *power_domains = &display->power.domains;
2076
2077 drm_WARN_ON(display->drm, power_domains->init_wakeref);
2078 power_domains->init_wakeref =
2079 intel_display_power_get(i915, POWER_DOMAIN_INIT);
2080
2081 intel_power_domains_verify_state(display);
2082 }
2083
2084 /**
2085 * intel_power_domains_suspend - suspend power domain state
2086 * @display: display device instance
2087 * @s2idle: specifies whether we go to idle, or deeper sleep
2088 *
2089 * This function prepares the hardware power domain state before entering
2090 * system suspend.
2091 *
2092 * It must be called with power domains already disabled (after a call to
2093 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2094 */
intel_power_domains_suspend(struct intel_display * display,bool s2idle)2095 void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
2096 {
2097 struct drm_i915_private *i915 = to_i915(display->drm);
2098 struct i915_power_domains *power_domains = &display->power.domains;
2099 intel_wakeref_t wakeref __maybe_unused =
2100 fetch_and_zero(&power_domains->init_wakeref);
2101
2102 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2103
2104 /*
2105 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2106 * support don't manually deinit the power domains. This also means the
2107 * DMC firmware will stay active, it will power down any HW
2108 * resources as required and also enable deeper system power states
2109 * that would be blocked if the firmware was inactive.
2110 */
2111 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
2112 intel_dmc_has_payload(display)) {
2113 intel_display_power_flush_work(i915);
2114 intel_power_domains_verify_state(display);
2115 return;
2116 }
2117
2118 /*
2119 * Even if power well support was disabled we still want to disable
2120 * power wells if power domains must be deinitialized for suspend.
2121 */
2122 if (!display->params.disable_power_well)
2123 intel_display_power_put(i915, POWER_DOMAIN_INIT,
2124 fetch_and_zero(&display->power.domains.disable_wakeref));
2125
2126 intel_display_power_flush_work(i915);
2127 intel_power_domains_verify_state(display);
2128
2129 if (DISPLAY_VER(display) >= 11)
2130 icl_display_core_uninit(display);
2131 else if (display->platform.geminilake || display->platform.broxton)
2132 bxt_display_core_uninit(display);
2133 else if (DISPLAY_VER(display) == 9)
2134 skl_display_core_uninit(display);
2135
2136 power_domains->display_core_suspended = true;
2137 }
2138
2139 /**
2140 * intel_power_domains_resume - resume power domain state
2141 * @display: display device instance
2142 *
2143 * This function resume the hardware power domain state during system resume.
2144 *
2145 * It will return with power domain support disabled (to be enabled later by
2146 * intel_power_domains_enable()) and must be paired with
2147 * intel_power_domains_suspend().
2148 */
intel_power_domains_resume(struct intel_display * display)2149 void intel_power_domains_resume(struct intel_display *display)
2150 {
2151 struct drm_i915_private *i915 = to_i915(display->drm);
2152 struct i915_power_domains *power_domains = &display->power.domains;
2153
2154 if (power_domains->display_core_suspended) {
2155 intel_power_domains_init_hw(display, true);
2156 power_domains->display_core_suspended = false;
2157 } else {
2158 drm_WARN_ON(display->drm, power_domains->init_wakeref);
2159 power_domains->init_wakeref =
2160 intel_display_power_get(i915, POWER_DOMAIN_INIT);
2161 }
2162
2163 intel_power_domains_verify_state(display);
2164 }
2165
2166 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2167
intel_power_domains_dump_info(struct intel_display * display)2168 static void intel_power_domains_dump_info(struct intel_display *display)
2169 {
2170 struct i915_power_domains *power_domains = &display->power.domains;
2171 struct i915_power_well *power_well;
2172
2173 for_each_power_well(display, power_well) {
2174 enum intel_display_power_domain domain;
2175
2176 drm_dbg_kms(display->drm, "%-25s %d\n",
2177 intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2178
2179 for_each_power_domain(domain, intel_power_well_domains(power_well))
2180 drm_dbg_kms(display->drm, " %-23s %d\n",
2181 intel_display_power_domain_str(domain),
2182 power_domains->domain_use_count[domain]);
2183 }
2184 }
2185
2186 /**
2187 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2188 * @display: display device instance
2189 *
2190 * Verify if the reference count of each power well matches its HW enabled
2191 * state and the total refcount of the domains it belongs to. This must be
2192 * called after modeset HW state sanitization, which is responsible for
2193 * acquiring reference counts for any power wells in use and disabling the
2194 * ones left on by BIOS but not required by any active output.
2195 */
intel_power_domains_verify_state(struct intel_display * display)2196 static void intel_power_domains_verify_state(struct intel_display *display)
2197 {
2198 struct i915_power_domains *power_domains = &display->power.domains;
2199 struct i915_power_well *power_well;
2200 bool dump_domain_info;
2201
2202 mutex_lock(&power_domains->lock);
2203
2204 verify_async_put_domains_state(power_domains);
2205
2206 dump_domain_info = false;
2207 for_each_power_well(display, power_well) {
2208 enum intel_display_power_domain domain;
2209 int domains_count;
2210 bool enabled;
2211
2212 enabled = intel_power_well_is_enabled(display, power_well);
2213 if ((intel_power_well_refcount(power_well) ||
2214 intel_power_well_is_always_on(power_well)) !=
2215 enabled)
2216 drm_err(display->drm,
2217 "power well %s state mismatch (refcount %d/enabled %d)",
2218 intel_power_well_name(power_well),
2219 intel_power_well_refcount(power_well), enabled);
2220
2221 domains_count = 0;
2222 for_each_power_domain(domain, intel_power_well_domains(power_well))
2223 domains_count += power_domains->domain_use_count[domain];
2224
2225 if (intel_power_well_refcount(power_well) != domains_count) {
2226 drm_err(display->drm,
2227 "power well %s refcount/domain refcount mismatch "
2228 "(refcount %d/domains refcount %d)\n",
2229 intel_power_well_name(power_well),
2230 intel_power_well_refcount(power_well),
2231 domains_count);
2232 dump_domain_info = true;
2233 }
2234 }
2235
2236 if (dump_domain_info) {
2237 static bool dumped;
2238
2239 if (!dumped) {
2240 intel_power_domains_dump_info(display);
2241 dumped = true;
2242 }
2243 }
2244
2245 mutex_unlock(&power_domains->lock);
2246 }
2247
2248 #else
2249
intel_power_domains_verify_state(struct intel_display * display)2250 static void intel_power_domains_verify_state(struct intel_display *display)
2251 {
2252 }
2253
2254 #endif
2255
intel_display_power_suspend_late(struct intel_display * display,bool s2idle)2256 void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
2257 {
2258 struct drm_i915_private *i915 = to_i915(display->drm);
2259
2260 intel_power_domains_suspend(display, s2idle);
2261
2262 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
2263 display->platform.broxton) {
2264 bxt_enable_dc9(display);
2265 } else if (display->platform.haswell || display->platform.broadwell) {
2266 hsw_enable_pc8(display);
2267 }
2268
2269 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2270 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2271 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2272 }
2273
intel_display_power_resume_early(struct intel_display * display)2274 void intel_display_power_resume_early(struct intel_display *display)
2275 {
2276 struct drm_i915_private *i915 = to_i915(display->drm);
2277
2278 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
2279 display->platform.broxton) {
2280 gen9_sanitize_dc_state(display);
2281 bxt_disable_dc9(display);
2282 } else if (display->platform.haswell || display->platform.broadwell) {
2283 hsw_disable_pc8(display);
2284 }
2285
2286 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2287 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2288 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2289
2290 intel_power_domains_resume(display);
2291 }
2292
intel_display_power_suspend(struct intel_display * display)2293 void intel_display_power_suspend(struct intel_display *display)
2294 {
2295 if (DISPLAY_VER(display) >= 11) {
2296 icl_display_core_uninit(display);
2297 bxt_enable_dc9(display);
2298 } else if (display->platform.geminilake || display->platform.broxton) {
2299 bxt_display_core_uninit(display);
2300 bxt_enable_dc9(display);
2301 } else if (display->platform.haswell || display->platform.broadwell) {
2302 hsw_enable_pc8(display);
2303 }
2304 }
2305
intel_display_power_resume(struct intel_display * display)2306 void intel_display_power_resume(struct intel_display *display)
2307 {
2308 struct i915_power_domains *power_domains = &display->power.domains;
2309
2310 if (DISPLAY_VER(display) >= 11) {
2311 bxt_disable_dc9(display);
2312 icl_display_core_init(display, true);
2313 if (intel_dmc_has_payload(display)) {
2314 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
2315 skl_enable_dc6(display);
2316 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
2317 gen9_enable_dc5(display);
2318 }
2319 } else if (display->platform.geminilake || display->platform.broxton) {
2320 bxt_disable_dc9(display);
2321 bxt_display_core_init(display, true);
2322 if (intel_dmc_has_payload(display) &&
2323 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2324 gen9_enable_dc5(display);
2325 } else if (display->platform.haswell || display->platform.broadwell) {
2326 hsw_disable_pc8(display);
2327 }
2328 }
2329
intel_display_power_debug(struct drm_i915_private * i915,struct seq_file * m)2330 void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
2331 {
2332 struct intel_display *display = &i915->display;
2333 struct i915_power_domains *power_domains = &display->power.domains;
2334 int i;
2335
2336 mutex_lock(&power_domains->lock);
2337
2338 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2339 for (i = 0; i < power_domains->power_well_count; i++) {
2340 struct i915_power_well *power_well;
2341 enum intel_display_power_domain power_domain;
2342
2343 power_well = &power_domains->power_wells[i];
2344 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2345 intel_power_well_refcount(power_well));
2346
2347 for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2348 seq_printf(m, " %-23s %d\n",
2349 intel_display_power_domain_str(power_domain),
2350 power_domains->domain_use_count[power_domain]);
2351 }
2352
2353 mutex_unlock(&power_domains->lock);
2354 }
2355
2356 struct intel_ddi_port_domains {
2357 enum port port_start;
2358 enum port port_end;
2359 enum aux_ch aux_ch_start;
2360 enum aux_ch aux_ch_end;
2361
2362 enum intel_display_power_domain ddi_lanes;
2363 enum intel_display_power_domain ddi_io;
2364 enum intel_display_power_domain aux_io;
2365 enum intel_display_power_domain aux_legacy_usbc;
2366 enum intel_display_power_domain aux_tbt;
2367 };
2368
2369 static const struct intel_ddi_port_domains
2370 i9xx_port_domains[] = {
2371 {
2372 .port_start = PORT_A,
2373 .port_end = PORT_F,
2374 .aux_ch_start = AUX_CH_A,
2375 .aux_ch_end = AUX_CH_F,
2376
2377 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2378 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2379 .aux_io = POWER_DOMAIN_AUX_IO_A,
2380 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2381 .aux_tbt = POWER_DOMAIN_INVALID,
2382 },
2383 };
2384
2385 static const struct intel_ddi_port_domains
2386 d11_port_domains[] = {
2387 {
2388 .port_start = PORT_A,
2389 .port_end = PORT_B,
2390 .aux_ch_start = AUX_CH_A,
2391 .aux_ch_end = AUX_CH_B,
2392
2393 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2394 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2395 .aux_io = POWER_DOMAIN_AUX_IO_A,
2396 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2397 .aux_tbt = POWER_DOMAIN_INVALID,
2398 }, {
2399 .port_start = PORT_C,
2400 .port_end = PORT_F,
2401 .aux_ch_start = AUX_CH_C,
2402 .aux_ch_end = AUX_CH_F,
2403
2404 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2405 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2406 .aux_io = POWER_DOMAIN_AUX_IO_C,
2407 .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2408 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2409 },
2410 };
2411
2412 static const struct intel_ddi_port_domains
2413 d12_port_domains[] = {
2414 {
2415 .port_start = PORT_A,
2416 .port_end = PORT_C,
2417 .aux_ch_start = AUX_CH_A,
2418 .aux_ch_end = AUX_CH_C,
2419
2420 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2421 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2422 .aux_io = POWER_DOMAIN_AUX_IO_A,
2423 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2424 .aux_tbt = POWER_DOMAIN_INVALID,
2425 }, {
2426 .port_start = PORT_TC1,
2427 .port_end = PORT_TC6,
2428 .aux_ch_start = AUX_CH_USBC1,
2429 .aux_ch_end = AUX_CH_USBC6,
2430
2431 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2432 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2433 .aux_io = POWER_DOMAIN_INVALID,
2434 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2435 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2436 },
2437 };
2438
2439 static const struct intel_ddi_port_domains
2440 d13_port_domains[] = {
2441 {
2442 .port_start = PORT_A,
2443 .port_end = PORT_C,
2444 .aux_ch_start = AUX_CH_A,
2445 .aux_ch_end = AUX_CH_C,
2446
2447 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2448 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2449 .aux_io = POWER_DOMAIN_AUX_IO_A,
2450 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2451 .aux_tbt = POWER_DOMAIN_INVALID,
2452 }, {
2453 .port_start = PORT_TC1,
2454 .port_end = PORT_TC4,
2455 .aux_ch_start = AUX_CH_USBC1,
2456 .aux_ch_end = AUX_CH_USBC4,
2457
2458 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2459 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2460 .aux_io = POWER_DOMAIN_INVALID,
2461 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2462 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2463 }, {
2464 .port_start = PORT_D_XELPD,
2465 .port_end = PORT_E_XELPD,
2466 .aux_ch_start = AUX_CH_D_XELPD,
2467 .aux_ch_end = AUX_CH_E_XELPD,
2468
2469 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2470 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2471 .aux_io = POWER_DOMAIN_AUX_IO_D,
2472 .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2473 .aux_tbt = POWER_DOMAIN_INVALID,
2474 },
2475 };
2476
2477 static void
intel_port_domains_for_platform(struct intel_display * display,const struct intel_ddi_port_domains ** domains,int * domains_size)2478 intel_port_domains_for_platform(struct intel_display *display,
2479 const struct intel_ddi_port_domains **domains,
2480 int *domains_size)
2481 {
2482 if (DISPLAY_VER(display) >= 13) {
2483 *domains = d13_port_domains;
2484 *domains_size = ARRAY_SIZE(d13_port_domains);
2485 } else if (DISPLAY_VER(display) >= 12) {
2486 *domains = d12_port_domains;
2487 *domains_size = ARRAY_SIZE(d12_port_domains);
2488 } else if (DISPLAY_VER(display) >= 11) {
2489 *domains = d11_port_domains;
2490 *domains_size = ARRAY_SIZE(d11_port_domains);
2491 } else {
2492 *domains = i9xx_port_domains;
2493 *domains_size = ARRAY_SIZE(i9xx_port_domains);
2494 }
2495 }
2496
2497 static const struct intel_ddi_port_domains *
intel_port_domains_for_port(struct intel_display * display,enum port port)2498 intel_port_domains_for_port(struct intel_display *display, enum port port)
2499 {
2500 const struct intel_ddi_port_domains *domains;
2501 int domains_size;
2502 int i;
2503
2504 intel_port_domains_for_platform(display, &domains, &domains_size);
2505 for (i = 0; i < domains_size; i++)
2506 if (port >= domains[i].port_start && port <= domains[i].port_end)
2507 return &domains[i];
2508
2509 return NULL;
2510 }
2511
2512 enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct drm_i915_private * i915,enum port port)2513 intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
2514 {
2515 struct intel_display *display = &i915->display;
2516 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
2517
2518 if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
2519 return POWER_DOMAIN_PORT_DDI_IO_A;
2520
2521 return domains->ddi_io + (int)(port - domains->port_start);
2522 }
2523
2524 enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct drm_i915_private * i915,enum port port)2525 intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
2526 {
2527 struct intel_display *display = &i915->display;
2528 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
2529
2530 if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
2531 return POWER_DOMAIN_PORT_DDI_LANES_A;
2532
2533 return domains->ddi_lanes + (int)(port - domains->port_start);
2534 }
2535
2536 static const struct intel_ddi_port_domains *
intel_port_domains_for_aux_ch(struct intel_display * display,enum aux_ch aux_ch)2537 intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
2538 {
2539 const struct intel_ddi_port_domains *domains;
2540 int domains_size;
2541 int i;
2542
2543 intel_port_domains_for_platform(display, &domains, &domains_size);
2544 for (i = 0; i < domains_size; i++)
2545 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2546 return &domains[i];
2547
2548 return NULL;
2549 }
2550
2551 enum intel_display_power_domain
intel_display_power_aux_io_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)2552 intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2553 {
2554 struct intel_display *display = &i915->display;
2555 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2556
2557 if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2558 return POWER_DOMAIN_AUX_IO_A;
2559
2560 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2561 }
2562
2563 enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)2564 intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2565 {
2566 struct intel_display *display = &i915->display;
2567 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2568
2569 if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
2570 return POWER_DOMAIN_AUX_A;
2571
2572 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2573 }
2574
2575 enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)2576 intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2577 {
2578 struct intel_display *display = &i915->display;
2579 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2580
2581 if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
2582 return POWER_DOMAIN_AUX_TBT1;
2583
2584 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2585 }
2586