1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/string_helpers.h>
7
8 #include "soc/intel_dram.h"
9
10 #include "i915_drv.h"
11 #include "i915_irq.h"
12 #include "i915_reg.h"
13 #include "intel_backlight_regs.h"
14 #include "intel_cdclk.h"
15 #include "intel_clock_gating.h"
16 #include "intel_combo_phy.h"
17 #include "intel_de.h"
18 #include "intel_display_power.h"
19 #include "intel_display_power_map.h"
20 #include "intel_display_power_well.h"
21 #include "intel_display_regs.h"
22 #include "intel_display_rpm.h"
23 #include "intel_display_types.h"
24 #include "intel_dmc.h"
25 #include "intel_mchbar_regs.h"
26 #include "intel_pch_refclk.h"
27 #include "intel_pcode.h"
28 #include "intel_pmdemand.h"
29 #include "intel_pps_regs.h"
30 #include "intel_snps_phy.h"
31 #include "skl_watermark.h"
32 #include "skl_watermark_regs.h"
33 #include "vlv_sideband.h"
34
35 #define for_each_power_domain_well(__display, __power_well, __domain) \
36 for_each_power_well((__display), __power_well) \
37 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
38
39 #define for_each_power_domain_well_reverse(__display, __power_well, __domain) \
40 for_each_power_well_reverse((__display), __power_well) \
41 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
42
43 static const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)44 intel_display_power_domain_str(enum intel_display_power_domain domain)
45 {
46 switch (domain) {
47 case POWER_DOMAIN_DISPLAY_CORE:
48 return "DISPLAY_CORE";
49 case POWER_DOMAIN_PIPE_A:
50 return "PIPE_A";
51 case POWER_DOMAIN_PIPE_B:
52 return "PIPE_B";
53 case POWER_DOMAIN_PIPE_C:
54 return "PIPE_C";
55 case POWER_DOMAIN_PIPE_D:
56 return "PIPE_D";
57 case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
58 return "PIPE_PANEL_FITTER_A";
59 case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
60 return "PIPE_PANEL_FITTER_B";
61 case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
62 return "PIPE_PANEL_FITTER_C";
63 case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
64 return "PIPE_PANEL_FITTER_D";
65 case POWER_DOMAIN_TRANSCODER_A:
66 return "TRANSCODER_A";
67 case POWER_DOMAIN_TRANSCODER_B:
68 return "TRANSCODER_B";
69 case POWER_DOMAIN_TRANSCODER_C:
70 return "TRANSCODER_C";
71 case POWER_DOMAIN_TRANSCODER_D:
72 return "TRANSCODER_D";
73 case POWER_DOMAIN_TRANSCODER_EDP:
74 return "TRANSCODER_EDP";
75 case POWER_DOMAIN_TRANSCODER_DSI_A:
76 return "TRANSCODER_DSI_A";
77 case POWER_DOMAIN_TRANSCODER_DSI_C:
78 return "TRANSCODER_DSI_C";
79 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
80 return "TRANSCODER_VDSC_PW2";
81 case POWER_DOMAIN_PORT_DDI_LANES_A:
82 return "PORT_DDI_LANES_A";
83 case POWER_DOMAIN_PORT_DDI_LANES_B:
84 return "PORT_DDI_LANES_B";
85 case POWER_DOMAIN_PORT_DDI_LANES_C:
86 return "PORT_DDI_LANES_C";
87 case POWER_DOMAIN_PORT_DDI_LANES_D:
88 return "PORT_DDI_LANES_D";
89 case POWER_DOMAIN_PORT_DDI_LANES_E:
90 return "PORT_DDI_LANES_E";
91 case POWER_DOMAIN_PORT_DDI_LANES_F:
92 return "PORT_DDI_LANES_F";
93 case POWER_DOMAIN_PORT_DDI_LANES_TC1:
94 return "PORT_DDI_LANES_TC1";
95 case POWER_DOMAIN_PORT_DDI_LANES_TC2:
96 return "PORT_DDI_LANES_TC2";
97 case POWER_DOMAIN_PORT_DDI_LANES_TC3:
98 return "PORT_DDI_LANES_TC3";
99 case POWER_DOMAIN_PORT_DDI_LANES_TC4:
100 return "PORT_DDI_LANES_TC4";
101 case POWER_DOMAIN_PORT_DDI_LANES_TC5:
102 return "PORT_DDI_LANES_TC5";
103 case POWER_DOMAIN_PORT_DDI_LANES_TC6:
104 return "PORT_DDI_LANES_TC6";
105 case POWER_DOMAIN_PORT_DDI_IO_A:
106 return "PORT_DDI_IO_A";
107 case POWER_DOMAIN_PORT_DDI_IO_B:
108 return "PORT_DDI_IO_B";
109 case POWER_DOMAIN_PORT_DDI_IO_C:
110 return "PORT_DDI_IO_C";
111 case POWER_DOMAIN_PORT_DDI_IO_D:
112 return "PORT_DDI_IO_D";
113 case POWER_DOMAIN_PORT_DDI_IO_E:
114 return "PORT_DDI_IO_E";
115 case POWER_DOMAIN_PORT_DDI_IO_F:
116 return "PORT_DDI_IO_F";
117 case POWER_DOMAIN_PORT_DDI_IO_TC1:
118 return "PORT_DDI_IO_TC1";
119 case POWER_DOMAIN_PORT_DDI_IO_TC2:
120 return "PORT_DDI_IO_TC2";
121 case POWER_DOMAIN_PORT_DDI_IO_TC3:
122 return "PORT_DDI_IO_TC3";
123 case POWER_DOMAIN_PORT_DDI_IO_TC4:
124 return "PORT_DDI_IO_TC4";
125 case POWER_DOMAIN_PORT_DDI_IO_TC5:
126 return "PORT_DDI_IO_TC5";
127 case POWER_DOMAIN_PORT_DDI_IO_TC6:
128 return "PORT_DDI_IO_TC6";
129 case POWER_DOMAIN_PORT_DSI:
130 return "PORT_DSI";
131 case POWER_DOMAIN_PORT_CRT:
132 return "PORT_CRT";
133 case POWER_DOMAIN_PORT_OTHER:
134 return "PORT_OTHER";
135 case POWER_DOMAIN_VGA:
136 return "VGA";
137 case POWER_DOMAIN_AUDIO_MMIO:
138 return "AUDIO_MMIO";
139 case POWER_DOMAIN_AUDIO_PLAYBACK:
140 return "AUDIO_PLAYBACK";
141 case POWER_DOMAIN_AUX_IO_A:
142 return "AUX_IO_A";
143 case POWER_DOMAIN_AUX_IO_B:
144 return "AUX_IO_B";
145 case POWER_DOMAIN_AUX_IO_C:
146 return "AUX_IO_C";
147 case POWER_DOMAIN_AUX_IO_D:
148 return "AUX_IO_D";
149 case POWER_DOMAIN_AUX_IO_E:
150 return "AUX_IO_E";
151 case POWER_DOMAIN_AUX_IO_F:
152 return "AUX_IO_F";
153 case POWER_DOMAIN_AUX_A:
154 return "AUX_A";
155 case POWER_DOMAIN_AUX_B:
156 return "AUX_B";
157 case POWER_DOMAIN_AUX_C:
158 return "AUX_C";
159 case POWER_DOMAIN_AUX_D:
160 return "AUX_D";
161 case POWER_DOMAIN_AUX_E:
162 return "AUX_E";
163 case POWER_DOMAIN_AUX_F:
164 return "AUX_F";
165 case POWER_DOMAIN_AUX_USBC1:
166 return "AUX_USBC1";
167 case POWER_DOMAIN_AUX_USBC2:
168 return "AUX_USBC2";
169 case POWER_DOMAIN_AUX_USBC3:
170 return "AUX_USBC3";
171 case POWER_DOMAIN_AUX_USBC4:
172 return "AUX_USBC4";
173 case POWER_DOMAIN_AUX_USBC5:
174 return "AUX_USBC5";
175 case POWER_DOMAIN_AUX_USBC6:
176 return "AUX_USBC6";
177 case POWER_DOMAIN_AUX_TBT1:
178 return "AUX_TBT1";
179 case POWER_DOMAIN_AUX_TBT2:
180 return "AUX_TBT2";
181 case POWER_DOMAIN_AUX_TBT3:
182 return "AUX_TBT3";
183 case POWER_DOMAIN_AUX_TBT4:
184 return "AUX_TBT4";
185 case POWER_DOMAIN_AUX_TBT5:
186 return "AUX_TBT5";
187 case POWER_DOMAIN_AUX_TBT6:
188 return "AUX_TBT6";
189 case POWER_DOMAIN_GMBUS:
190 return "GMBUS";
191 case POWER_DOMAIN_INIT:
192 return "INIT";
193 case POWER_DOMAIN_GT_IRQ:
194 return "GT_IRQ";
195 case POWER_DOMAIN_DC_OFF:
196 return "DC_OFF";
197 case POWER_DOMAIN_TC_COLD_OFF:
198 return "TC_COLD_OFF";
199 default:
200 MISSING_CASE(domain);
201 return "?";
202 }
203 }
204
__intel_display_power_is_enabled(struct intel_display * display,enum intel_display_power_domain domain)205 static bool __intel_display_power_is_enabled(struct intel_display *display,
206 enum intel_display_power_domain domain)
207 {
208 struct i915_power_well *power_well;
209 bool is_enabled;
210
211 if (intel_display_rpm_suspended(display))
212 return false;
213
214 is_enabled = true;
215
216 for_each_power_domain_well_reverse(display, power_well, domain) {
217 if (intel_power_well_is_always_on(power_well))
218 continue;
219
220 if (!intel_power_well_is_enabled_cached(power_well)) {
221 is_enabled = false;
222 break;
223 }
224 }
225
226 return is_enabled;
227 }
228
229 /**
230 * intel_display_power_is_enabled - check for a power domain
231 * @display: display device instance
232 * @domain: power domain to check
233 *
234 * This function can be used to check the hw power domain state. It is mostly
235 * used in hardware state readout functions. Everywhere else code should rely
236 * upon explicit power domain reference counting to ensure that the hardware
237 * block is powered up before accessing it.
238 *
239 * Callers must hold the relevant modesetting locks to ensure that concurrent
240 * threads can't disable the power well while the caller tries to read a few
241 * registers.
242 *
243 * Returns:
244 * True when the power domain is enabled, false otherwise.
245 */
intel_display_power_is_enabled(struct intel_display * display,enum intel_display_power_domain domain)246 bool intel_display_power_is_enabled(struct intel_display *display,
247 enum intel_display_power_domain domain)
248 {
249 struct i915_power_domains *power_domains = &display->power.domains;
250 bool ret;
251
252 mutex_lock(&power_domains->lock);
253 ret = __intel_display_power_is_enabled(display, domain);
254 mutex_unlock(&power_domains->lock);
255
256 return ret;
257 }
258
259 static u32
sanitize_target_dc_state(struct intel_display * display,u32 target_dc_state)260 sanitize_target_dc_state(struct intel_display *display,
261 u32 target_dc_state)
262 {
263 struct i915_power_domains *power_domains = &display->power.domains;
264 static const u32 states[] = {
265 DC_STATE_EN_UPTO_DC6,
266 DC_STATE_EN_UPTO_DC5,
267 DC_STATE_EN_DC3CO,
268 DC_STATE_DISABLE,
269 };
270 int i;
271
272 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
273 if (target_dc_state != states[i])
274 continue;
275
276 if (power_domains->allowed_dc_mask & target_dc_state)
277 break;
278
279 target_dc_state = states[i + 1];
280 }
281
282 return target_dc_state;
283 }
284
285 /**
286 * intel_display_power_set_target_dc_state - Set target dc state.
287 * @display: display device
288 * @state: state which needs to be set as target_dc_state.
289 *
290 * This function set the "DC off" power well target_dc_state,
291 * based upon this target_dc_stste, "DC off" power well will
292 * enable desired DC state.
293 */
intel_display_power_set_target_dc_state(struct intel_display * display,u32 state)294 void intel_display_power_set_target_dc_state(struct intel_display *display,
295 u32 state)
296 {
297 struct i915_power_well *power_well;
298 bool dc_off_enabled;
299 struct i915_power_domains *power_domains = &display->power.domains;
300
301 mutex_lock(&power_domains->lock);
302 power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
303
304 if (drm_WARN_ON(display->drm, !power_well))
305 goto unlock;
306
307 state = sanitize_target_dc_state(display, state);
308
309 if (state == power_domains->target_dc_state)
310 goto unlock;
311
312 dc_off_enabled = intel_power_well_is_enabled(display, power_well);
313 /*
314 * If DC off power well is disabled, need to enable and disable the
315 * DC off power well to effect target DC state.
316 */
317 if (!dc_off_enabled)
318 intel_power_well_enable(display, power_well);
319
320 power_domains->target_dc_state = state;
321
322 if (!dc_off_enabled)
323 intel_power_well_disable(display, power_well);
324
325 unlock:
326 mutex_unlock(&power_domains->lock);
327 }
328
329 /**
330 * intel_display_power_get_current_dc_state - Set target dc state.
331 * @display: display device
332 *
333 * This function set the "DC off" power well target_dc_state,
334 * based upon this target_dc_stste, "DC off" power well will
335 * enable desired DC state.
336 */
intel_display_power_get_current_dc_state(struct intel_display * display)337 u32 intel_display_power_get_current_dc_state(struct intel_display *display)
338 {
339 struct i915_power_well *power_well;
340 struct i915_power_domains *power_domains = &display->power.domains;
341 u32 current_dc_state = DC_STATE_DISABLE;
342
343 mutex_lock(&power_domains->lock);
344 power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
345
346 if (drm_WARN_ON(display->drm, !power_well))
347 goto unlock;
348
349 current_dc_state = intel_power_well_is_enabled(display, power_well) ?
350 DC_STATE_DISABLE : power_domains->target_dc_state;
351
352 unlock:
353 mutex_unlock(&power_domains->lock);
354
355 return current_dc_state;
356 }
357
__async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)358 static void __async_put_domains_mask(struct i915_power_domains *power_domains,
359 struct intel_power_domain_mask *mask)
360 {
361 bitmap_or(mask->bits,
362 power_domains->async_put_domains[0].bits,
363 power_domains->async_put_domains[1].bits,
364 POWER_DOMAIN_NUM);
365 }
366
367 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
368
369 static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)370 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
371 {
372 struct intel_display *display = container_of(power_domains,
373 struct intel_display,
374 power.domains);
375
376 return !drm_WARN_ON(display->drm,
377 bitmap_intersects(power_domains->async_put_domains[0].bits,
378 power_domains->async_put_domains[1].bits,
379 POWER_DOMAIN_NUM));
380 }
381
382 static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)383 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
384 {
385 struct intel_display *display = container_of(power_domains,
386 struct intel_display,
387 power.domains);
388 struct intel_power_domain_mask async_put_mask;
389 enum intel_display_power_domain domain;
390 bool err = false;
391
392 err |= !assert_async_put_domain_masks_disjoint(power_domains);
393 __async_put_domains_mask(power_domains, &async_put_mask);
394 err |= drm_WARN_ON(display->drm,
395 !!power_domains->async_put_wakeref !=
396 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
397
398 for_each_power_domain(domain, &async_put_mask)
399 err |= drm_WARN_ON(display->drm,
400 power_domains->domain_use_count[domain] != 1);
401
402 return !err;
403 }
404
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,struct intel_power_domain_mask * mask)405 static void print_power_domains(struct i915_power_domains *power_domains,
406 const char *prefix, struct intel_power_domain_mask *mask)
407 {
408 struct intel_display *display = container_of(power_domains,
409 struct intel_display,
410 power.domains);
411 enum intel_display_power_domain domain;
412
413 drm_dbg_kms(display->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
414 for_each_power_domain(domain, mask)
415 drm_dbg_kms(display->drm, "%s use_count %d\n",
416 intel_display_power_domain_str(domain),
417 power_domains->domain_use_count[domain]);
418 }
419
420 static void
print_async_put_domains_state(struct i915_power_domains * power_domains)421 print_async_put_domains_state(struct i915_power_domains *power_domains)
422 {
423 struct intel_display *display = container_of(power_domains,
424 struct intel_display,
425 power.domains);
426
427 drm_dbg_kms(display->drm, "async_put_wakeref: %s\n",
428 str_yes_no(power_domains->async_put_wakeref));
429
430 print_power_domains(power_domains, "async_put_domains[0]",
431 &power_domains->async_put_domains[0]);
432 print_power_domains(power_domains, "async_put_domains[1]",
433 &power_domains->async_put_domains[1]);
434 }
435
436 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)437 verify_async_put_domains_state(struct i915_power_domains *power_domains)
438 {
439 if (!__async_put_domains_state_ok(power_domains))
440 print_async_put_domains_state(power_domains);
441 }
442
443 #else
444
445 static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)446 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
447 {
448 }
449
450 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)451 verify_async_put_domains_state(struct i915_power_domains *power_domains)
452 {
453 }
454
455 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
456
async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)457 static void async_put_domains_mask(struct i915_power_domains *power_domains,
458 struct intel_power_domain_mask *mask)
459
460 {
461 assert_async_put_domain_masks_disjoint(power_domains);
462
463 __async_put_domains_mask(power_domains, mask);
464 }
465
466 static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)467 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
468 enum intel_display_power_domain domain)
469 {
470 assert_async_put_domain_masks_disjoint(power_domains);
471
472 clear_bit(domain, power_domains->async_put_domains[0].bits);
473 clear_bit(domain, power_domains->async_put_domains[1].bits);
474 }
475
476 static void
cancel_async_put_work(struct i915_power_domains * power_domains,bool sync)477 cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
478 {
479 if (sync)
480 cancel_delayed_work_sync(&power_domains->async_put_work);
481 else
482 cancel_delayed_work(&power_domains->async_put_work);
483
484 power_domains->async_put_next_delay = 0;
485 }
486
487 static bool
intel_display_power_grab_async_put_ref(struct intel_display * display,enum intel_display_power_domain domain)488 intel_display_power_grab_async_put_ref(struct intel_display *display,
489 enum intel_display_power_domain domain)
490 {
491 struct i915_power_domains *power_domains = &display->power.domains;
492 struct intel_power_domain_mask async_put_mask;
493 bool ret = false;
494
495 async_put_domains_mask(power_domains, &async_put_mask);
496 if (!test_bit(domain, async_put_mask.bits))
497 goto out_verify;
498
499 async_put_domains_clear_domain(power_domains, domain);
500
501 ret = true;
502
503 async_put_domains_mask(power_domains, &async_put_mask);
504 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
505 goto out_verify;
506
507 cancel_async_put_work(power_domains, false);
508 intel_display_rpm_put_raw(display,
509 fetch_and_zero(&power_domains->async_put_wakeref));
510 out_verify:
511 verify_async_put_domains_state(power_domains);
512
513 return ret;
514 }
515
516 static void
__intel_display_power_get_domain(struct intel_display * display,enum intel_display_power_domain domain)517 __intel_display_power_get_domain(struct intel_display *display,
518 enum intel_display_power_domain domain)
519 {
520 struct i915_power_domains *power_domains = &display->power.domains;
521 struct i915_power_well *power_well;
522
523 if (intel_display_power_grab_async_put_ref(display, domain))
524 return;
525
526 for_each_power_domain_well(display, power_well, domain)
527 intel_power_well_get(display, power_well);
528
529 power_domains->domain_use_count[domain]++;
530 }
531
532 /**
533 * intel_display_power_get - grab a power domain reference
534 * @display: display device instance
535 * @domain: power domain to reference
536 *
537 * This function grabs a power domain reference for @domain and ensures that the
538 * power domain and all its parents are powered up. Therefore users should only
539 * grab a reference to the innermost power domain they need.
540 *
541 * Any power domain reference obtained by this function must have a symmetric
542 * call to intel_display_power_put() to release the reference again.
543 */
intel_display_power_get(struct intel_display * display,enum intel_display_power_domain domain)544 intel_wakeref_t intel_display_power_get(struct intel_display *display,
545 enum intel_display_power_domain domain)
546 {
547 struct i915_power_domains *power_domains = &display->power.domains;
548 struct ref_tracker *wakeref;
549
550 wakeref = intel_display_rpm_get(display);
551
552 mutex_lock(&power_domains->lock);
553 __intel_display_power_get_domain(display, domain);
554 mutex_unlock(&power_domains->lock);
555
556 return wakeref;
557 }
558
559 /**
560 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
561 * @display: display device instance
562 * @domain: power domain to reference
563 *
564 * This function grabs a power domain reference for @domain and ensures that the
565 * power domain and all its parents are powered up. Therefore users should only
566 * grab a reference to the innermost power domain they need.
567 *
568 * Any power domain reference obtained by this function must have a symmetric
569 * call to intel_display_power_put() to release the reference again.
570 */
571 intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display * display,enum intel_display_power_domain domain)572 intel_display_power_get_if_enabled(struct intel_display *display,
573 enum intel_display_power_domain domain)
574 {
575 struct i915_power_domains *power_domains = &display->power.domains;
576 struct ref_tracker *wakeref;
577 bool is_enabled;
578
579 wakeref = intel_display_rpm_get_if_in_use(display);
580 if (!wakeref)
581 return NULL;
582
583 mutex_lock(&power_domains->lock);
584
585 if (__intel_display_power_is_enabled(display, domain)) {
586 __intel_display_power_get_domain(display, domain);
587 is_enabled = true;
588 } else {
589 is_enabled = false;
590 }
591
592 mutex_unlock(&power_domains->lock);
593
594 if (!is_enabled) {
595 intel_display_rpm_put(display, wakeref);
596 wakeref = NULL;
597 }
598
599 return wakeref;
600 }
601
602 static void
__intel_display_power_put_domain(struct intel_display * display,enum intel_display_power_domain domain)603 __intel_display_power_put_domain(struct intel_display *display,
604 enum intel_display_power_domain domain)
605 {
606 struct i915_power_domains *power_domains = &display->power.domains;
607 struct i915_power_well *power_well;
608 const char *name = intel_display_power_domain_str(domain);
609 struct intel_power_domain_mask async_put_mask;
610
611 drm_WARN(display->drm, !power_domains->domain_use_count[domain],
612 "Use count on domain %s is already zero\n",
613 name);
614 async_put_domains_mask(power_domains, &async_put_mask);
615 drm_WARN(display->drm,
616 test_bit(domain, async_put_mask.bits),
617 "Async disabling of domain %s is pending\n",
618 name);
619
620 power_domains->domain_use_count[domain]--;
621
622 for_each_power_domain_well_reverse(display, power_well, domain)
623 intel_power_well_put(display, power_well);
624 }
625
__intel_display_power_put(struct intel_display * display,enum intel_display_power_domain domain)626 static void __intel_display_power_put(struct intel_display *display,
627 enum intel_display_power_domain domain)
628 {
629 struct i915_power_domains *power_domains = &display->power.domains;
630
631 mutex_lock(&power_domains->lock);
632 __intel_display_power_put_domain(display, domain);
633 mutex_unlock(&power_domains->lock);
634 }
635
636 static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref,int delay_ms)637 queue_async_put_domains_work(struct i915_power_domains *power_domains,
638 intel_wakeref_t wakeref,
639 int delay_ms)
640 {
641 struct intel_display *display = container_of(power_domains,
642 struct intel_display,
643 power.domains);
644 drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
645 power_domains->async_put_wakeref = wakeref;
646 drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
647 &power_domains->async_put_work,
648 msecs_to_jiffies(delay_ms)));
649 }
650
651 static void
release_async_put_domains(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)652 release_async_put_domains(struct i915_power_domains *power_domains,
653 struct intel_power_domain_mask *mask)
654 {
655 struct intel_display *display = container_of(power_domains,
656 struct intel_display,
657 power.domains);
658 enum intel_display_power_domain domain;
659 struct ref_tracker *wakeref;
660
661 wakeref = intel_display_rpm_get_noresume(display);
662
663 for_each_power_domain(domain, mask) {
664 /* Clear before put, so put's sanity check is happy. */
665 async_put_domains_clear_domain(power_domains, domain);
666 __intel_display_power_put_domain(display, domain);
667 }
668
669 intel_display_rpm_put(display, wakeref);
670 }
671
672 static void
intel_display_power_put_async_work(struct work_struct * work)673 intel_display_power_put_async_work(struct work_struct *work)
674 {
675 struct intel_display *display = container_of(work, struct intel_display,
676 power.domains.async_put_work.work);
677 struct i915_power_domains *power_domains = &display->power.domains;
678 struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL;
679
680 new_work_wakeref = intel_display_rpm_get_raw(display);
681
682 mutex_lock(&power_domains->lock);
683
684 /*
685 * Bail out if all the domain refs pending to be released were grabbed
686 * by subsequent gets or a flush_work.
687 */
688 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
689 if (!old_work_wakeref)
690 goto out_verify;
691
692 release_async_put_domains(power_domains,
693 &power_domains->async_put_domains[0]);
694
695 /*
696 * Cancel the work that got queued after this one got dequeued,
697 * since here we released the corresponding async-put reference.
698 */
699 cancel_async_put_work(power_domains, false);
700
701 /* Requeue the work if more domains were async put meanwhile. */
702 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
703 bitmap_copy(power_domains->async_put_domains[0].bits,
704 power_domains->async_put_domains[1].bits,
705 POWER_DOMAIN_NUM);
706 bitmap_zero(power_domains->async_put_domains[1].bits,
707 POWER_DOMAIN_NUM);
708 queue_async_put_domains_work(power_domains,
709 fetch_and_zero(&new_work_wakeref),
710 power_domains->async_put_next_delay);
711 power_domains->async_put_next_delay = 0;
712 }
713
714 out_verify:
715 verify_async_put_domains_state(power_domains);
716
717 mutex_unlock(&power_domains->lock);
718
719 if (old_work_wakeref)
720 intel_display_rpm_put_raw(display, old_work_wakeref);
721 if (new_work_wakeref)
722 intel_display_rpm_put_raw(display, new_work_wakeref);
723 }
724
725 /**
726 * __intel_display_power_put_async - release a power domain reference asynchronously
727 * @display: display device instance
728 * @domain: power domain to reference
729 * @wakeref: wakeref acquired for the reference that is being released
730 * @delay_ms: delay of powering down the power domain
731 *
732 * This function drops the power domain reference obtained by
733 * intel_display_power_get*() and schedules a work to power down the
734 * corresponding hardware block if this is the last reference.
735 * The power down is delayed by @delay_ms if this is >= 0, or by a default
736 * 100 ms otherwise.
737 */
__intel_display_power_put_async(struct intel_display * display,enum intel_display_power_domain domain,intel_wakeref_t wakeref,int delay_ms)738 void __intel_display_power_put_async(struct intel_display *display,
739 enum intel_display_power_domain domain,
740 intel_wakeref_t wakeref,
741 int delay_ms)
742 {
743 struct i915_power_domains *power_domains = &display->power.domains;
744 struct ref_tracker *work_wakeref;
745
746 work_wakeref = intel_display_rpm_get_raw(display);
747
748 delay_ms = delay_ms >= 0 ? delay_ms : 100;
749
750 mutex_lock(&power_domains->lock);
751
752 if (power_domains->domain_use_count[domain] > 1) {
753 __intel_display_power_put_domain(display, domain);
754
755 goto out_verify;
756 }
757
758 drm_WARN_ON(display->drm, power_domains->domain_use_count[domain] != 1);
759
760 /* Let a pending work requeue itself or queue a new one. */
761 if (power_domains->async_put_wakeref) {
762 set_bit(domain, power_domains->async_put_domains[1].bits);
763 power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
764 delay_ms);
765 } else {
766 set_bit(domain, power_domains->async_put_domains[0].bits);
767 queue_async_put_domains_work(power_domains,
768 fetch_and_zero(&work_wakeref),
769 delay_ms);
770 }
771
772 out_verify:
773 verify_async_put_domains_state(power_domains);
774
775 mutex_unlock(&power_domains->lock);
776
777 if (work_wakeref)
778 intel_display_rpm_put_raw(display, work_wakeref);
779
780 intel_display_rpm_put(display, wakeref);
781 }
782
783 /**
784 * intel_display_power_flush_work - flushes the async display power disabling work
785 * @display: display device instance
786 *
787 * Flushes any pending work that was scheduled by a preceding
788 * intel_display_power_put_async() call, completing the disabling of the
789 * corresponding power domains.
790 *
791 * Note that the work handler function may still be running after this
792 * function returns; to ensure that the work handler isn't running use
793 * intel_display_power_flush_work_sync() instead.
794 */
intel_display_power_flush_work(struct intel_display * display)795 void intel_display_power_flush_work(struct intel_display *display)
796 {
797 struct i915_power_domains *power_domains = &display->power.domains;
798 struct intel_power_domain_mask async_put_mask;
799 intel_wakeref_t work_wakeref;
800
801 mutex_lock(&power_domains->lock);
802
803 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
804 if (!work_wakeref)
805 goto out_verify;
806
807 async_put_domains_mask(power_domains, &async_put_mask);
808 release_async_put_domains(power_domains, &async_put_mask);
809 cancel_async_put_work(power_domains, false);
810
811 out_verify:
812 verify_async_put_domains_state(power_domains);
813
814 mutex_unlock(&power_domains->lock);
815
816 if (work_wakeref)
817 intel_display_rpm_put_raw(display, work_wakeref);
818 }
819
820 /**
821 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
822 * @display: display device instance
823 *
824 * Like intel_display_power_flush_work(), but also ensure that the work
825 * handler function is not running any more when this function returns.
826 */
827 static void
intel_display_power_flush_work_sync(struct intel_display * display)828 intel_display_power_flush_work_sync(struct intel_display *display)
829 {
830 struct i915_power_domains *power_domains = &display->power.domains;
831
832 intel_display_power_flush_work(display);
833 cancel_async_put_work(power_domains, true);
834
835 verify_async_put_domains_state(power_domains);
836
837 drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
838 }
839
840 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
841 /**
842 * intel_display_power_put - release a power domain reference
843 * @display: display device instance
844 * @domain: power domain to reference
845 * @wakeref: wakeref acquired for the reference that is being released
846 *
847 * This function drops the power domain reference obtained by
848 * intel_display_power_get() and might power down the corresponding hardware
849 * block right away if this is the last reference.
850 */
intel_display_power_put(struct intel_display * display,enum intel_display_power_domain domain,intel_wakeref_t wakeref)851 void intel_display_power_put(struct intel_display *display,
852 enum intel_display_power_domain domain,
853 intel_wakeref_t wakeref)
854 {
855 __intel_display_power_put(display, domain);
856 intel_display_rpm_put(display, wakeref);
857 }
858 #else
859 /**
860 * intel_display_power_put_unchecked - release an unchecked power domain reference
861 * @display: display device instance
862 * @domain: power domain to reference
863 *
864 * This function drops the power domain reference obtained by
865 * intel_display_power_get() and might power down the corresponding hardware
866 * block right away if this is the last reference.
867 *
868 * This function is only for the power domain code's internal use to suppress wakeref
869 * tracking when the corresponding debug kconfig option is disabled, should not
870 * be used otherwise.
871 */
intel_display_power_put_unchecked(struct intel_display * display,enum intel_display_power_domain domain)872 void intel_display_power_put_unchecked(struct intel_display *display,
873 enum intel_display_power_domain domain)
874 {
875 __intel_display_power_put(display, domain);
876 intel_display_rpm_put_unchecked(display);
877 }
878 #endif
879
880 void
intel_display_power_get_in_set(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)881 intel_display_power_get_in_set(struct intel_display *display,
882 struct intel_display_power_domain_set *power_domain_set,
883 enum intel_display_power_domain domain)
884 {
885 intel_wakeref_t __maybe_unused wf;
886
887 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
888
889 wf = intel_display_power_get(display, domain);
890 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
891 power_domain_set->wakerefs[domain] = wf;
892 #endif
893 set_bit(domain, power_domain_set->mask.bits);
894 }
895
896 bool
intel_display_power_get_in_set_if_enabled(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)897 intel_display_power_get_in_set_if_enabled(struct intel_display *display,
898 struct intel_display_power_domain_set *power_domain_set,
899 enum intel_display_power_domain domain)
900 {
901 intel_wakeref_t wf;
902
903 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
904
905 wf = intel_display_power_get_if_enabled(display, domain);
906 if (!wf)
907 return false;
908
909 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
910 power_domain_set->wakerefs[domain] = wf;
911 #endif
912 set_bit(domain, power_domain_set->mask.bits);
913
914 return true;
915 }
916
917 void
intel_display_power_put_mask_in_set(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,struct intel_power_domain_mask * mask)918 intel_display_power_put_mask_in_set(struct intel_display *display,
919 struct intel_display_power_domain_set *power_domain_set,
920 struct intel_power_domain_mask *mask)
921 {
922 enum intel_display_power_domain domain;
923
924 drm_WARN_ON(display->drm,
925 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
926
927 for_each_power_domain(domain, mask) {
928 intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF;
929
930 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
931 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
932 #endif
933 intel_display_power_put(display, domain, wf);
934 clear_bit(domain, power_domain_set->mask.bits);
935 }
936 }
937
938 static int
sanitize_disable_power_well_option(int disable_power_well)939 sanitize_disable_power_well_option(int disable_power_well)
940 {
941 if (disable_power_well >= 0)
942 return !!disable_power_well;
943
944 return 1;
945 }
946
get_allowed_dc_mask(struct intel_display * display,int enable_dc)947 static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
948 {
949 u32 mask;
950 int requested_dc;
951 int max_dc;
952
953 if (!HAS_DISPLAY(display))
954 return 0;
955
956 if (DISPLAY_VER(display) >= 20)
957 max_dc = 2;
958 else if (display->platform.dg2)
959 max_dc = 1;
960 else if (display->platform.dg1)
961 max_dc = 3;
962 else if (DISPLAY_VER(display) >= 12)
963 max_dc = 4;
964 else if (display->platform.geminilake || display->platform.broxton)
965 max_dc = 1;
966 else if (DISPLAY_VER(display) >= 9)
967 max_dc = 2;
968 else
969 max_dc = 0;
970
971 /*
972 * DC9 has a separate HW flow from the rest of the DC states,
973 * not depending on the DMC firmware. It's needed by system
974 * suspend/resume, so allow it unconditionally.
975 */
976 mask = display->platform.geminilake || display->platform.broxton ||
977 DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0;
978
979 if (!display->params.disable_power_well)
980 max_dc = 0;
981
982 if (enable_dc >= 0 && enable_dc <= max_dc) {
983 requested_dc = enable_dc;
984 } else if (enable_dc == -1) {
985 requested_dc = max_dc;
986 } else if (enable_dc > max_dc && enable_dc <= 4) {
987 drm_dbg_kms(display->drm,
988 "Adjusting requested max DC state (%d->%d)\n",
989 enable_dc, max_dc);
990 requested_dc = max_dc;
991 } else {
992 drm_err(display->drm,
993 "Unexpected value for enable_dc (%d)\n", enable_dc);
994 requested_dc = max_dc;
995 }
996
997 switch (requested_dc) {
998 case 4:
999 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
1000 break;
1001 case 3:
1002 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
1003 break;
1004 case 2:
1005 mask |= DC_STATE_EN_UPTO_DC6;
1006 break;
1007 case 1:
1008 mask |= DC_STATE_EN_UPTO_DC5;
1009 break;
1010 }
1011
1012 drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask);
1013
1014 return mask;
1015 }
1016
1017 /**
1018 * intel_power_domains_init - initializes the power domain structures
1019 * @display: display device instance
1020 *
1021 * Initializes the power domain structures for @display depending upon the
1022 * supported platform.
1023 */
intel_power_domains_init(struct intel_display * display)1024 int intel_power_domains_init(struct intel_display *display)
1025 {
1026 struct i915_power_domains *power_domains = &display->power.domains;
1027
1028 display->params.disable_power_well =
1029 sanitize_disable_power_well_option(display->params.disable_power_well);
1030 power_domains->allowed_dc_mask =
1031 get_allowed_dc_mask(display, display->params.enable_dc);
1032
1033 power_domains->target_dc_state =
1034 sanitize_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1035
1036 mutex_init(&power_domains->lock);
1037
1038 INIT_DELAYED_WORK(&power_domains->async_put_work,
1039 intel_display_power_put_async_work);
1040
1041 return intel_display_power_map_init(power_domains);
1042 }
1043
1044 /**
1045 * intel_power_domains_cleanup - clean up power domains resources
1046 * @display: display device instance
1047 *
1048 * Release any resources acquired by intel_power_domains_init()
1049 */
intel_power_domains_cleanup(struct intel_display * display)1050 void intel_power_domains_cleanup(struct intel_display *display)
1051 {
1052 intel_display_power_map_cleanup(&display->power.domains);
1053 }
1054
intel_power_domains_sync_hw(struct intel_display * display)1055 static void intel_power_domains_sync_hw(struct intel_display *display)
1056 {
1057 struct i915_power_domains *power_domains = &display->power.domains;
1058 struct i915_power_well *power_well;
1059
1060 mutex_lock(&power_domains->lock);
1061 for_each_power_well(display, power_well)
1062 intel_power_well_sync_hw(display, power_well);
1063 mutex_unlock(&power_domains->lock);
1064 }
1065
gen9_dbuf_slice_set(struct intel_display * display,enum dbuf_slice slice,bool enable)1066 static void gen9_dbuf_slice_set(struct intel_display *display,
1067 enum dbuf_slice slice, bool enable)
1068 {
1069 i915_reg_t reg = DBUF_CTL_S(slice);
1070 bool state;
1071
1072 intel_de_rmw(display, reg, DBUF_POWER_REQUEST,
1073 enable ? DBUF_POWER_REQUEST : 0);
1074 intel_de_posting_read(display, reg);
1075 udelay(10);
1076
1077 state = intel_de_read(display, reg) & DBUF_POWER_STATE;
1078 drm_WARN(display->drm, enable != state,
1079 "DBuf slice %d power %s timeout!\n",
1080 slice, str_enable_disable(enable));
1081 }
1082
gen9_dbuf_slices_update(struct intel_display * display,u8 req_slices)1083 void gen9_dbuf_slices_update(struct intel_display *display,
1084 u8 req_slices)
1085 {
1086 struct i915_power_domains *power_domains = &display->power.domains;
1087 u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
1088 enum dbuf_slice slice;
1089
1090 drm_WARN(display->drm, req_slices & ~slice_mask,
1091 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1092 req_slices, slice_mask);
1093
1094 drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n",
1095 req_slices);
1096
1097 /*
1098 * Might be running this in parallel to gen9_dc_off_power_well_enable
1099 * being called from intel_dp_detect for instance,
1100 * which causes assertion triggered by race condition,
1101 * as gen9_assert_dbuf_enabled might preempt this when registers
1102 * were already updated, while dev_priv was not.
1103 */
1104 mutex_lock(&power_domains->lock);
1105
1106 for_each_dbuf_slice(display, slice)
1107 gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice));
1108
1109 display->dbuf.enabled_slices = req_slices;
1110
1111 mutex_unlock(&power_domains->lock);
1112 }
1113
gen9_dbuf_enable(struct intel_display * display)1114 static void gen9_dbuf_enable(struct intel_display *display)
1115 {
1116 u8 slices_mask;
1117
1118 display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(display);
1119
1120 slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
1121
1122 if (DISPLAY_VER(display) >= 14)
1123 intel_pmdemand_program_dbuf(display, slices_mask);
1124
1125 /*
1126 * Just power up at least 1 slice, we will
1127 * figure out later which slices we have and what we need.
1128 */
1129 gen9_dbuf_slices_update(display, slices_mask);
1130 }
1131
gen9_dbuf_disable(struct intel_display * display)1132 static void gen9_dbuf_disable(struct intel_display *display)
1133 {
1134 gen9_dbuf_slices_update(display, 0);
1135
1136 if (DISPLAY_VER(display) >= 14)
1137 intel_pmdemand_program_dbuf(display, 0);
1138 }
1139
gen12_dbuf_slices_config(struct intel_display * display)1140 static void gen12_dbuf_slices_config(struct intel_display *display)
1141 {
1142 enum dbuf_slice slice;
1143
1144 for_each_dbuf_slice(display, slice)
1145 intel_de_rmw(display, DBUF_CTL_S(slice),
1146 DBUF_TRACKER_STATE_SERVICE_MASK,
1147 DBUF_TRACKER_STATE_SERVICE(8));
1148 }
1149
icl_mbus_init(struct intel_display * display)1150 static void icl_mbus_init(struct intel_display *display)
1151 {
1152 unsigned long abox_regs = DISPLAY_INFO(display)->abox_mask;
1153 u32 mask, val, i;
1154
1155 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1156 return;
1157
1158 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1159 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1160 MBUS_ABOX_B_CREDIT_MASK |
1161 MBUS_ABOX_BW_CREDIT_MASK;
1162 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1163 MBUS_ABOX_BT_CREDIT_POOL2(16) |
1164 MBUS_ABOX_B_CREDIT(1) |
1165 MBUS_ABOX_BW_CREDIT(1);
1166
1167 /*
1168 * gen12 platforms that use abox1 and abox2 for pixel data reads still
1169 * expect us to program the abox_ctl0 register as well, even though
1170 * we don't have to program other instance-0 registers like BW_BUDDY.
1171 */
1172 if (DISPLAY_VER(display) == 12)
1173 abox_regs |= BIT(0);
1174
1175 for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
1176 intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
1177 }
1178
hsw_assert_cdclk(struct intel_display * display)1179 static void hsw_assert_cdclk(struct intel_display *display)
1180 {
1181 u32 val = intel_de_read(display, LCPLL_CTL);
1182
1183 /*
1184 * The LCPLL register should be turned on by the BIOS. For now
1185 * let's just check its state and print errors in case
1186 * something is wrong. Don't even try to turn it on.
1187 */
1188
1189 if (val & LCPLL_CD_SOURCE_FCLK)
1190 drm_err(display->drm, "CDCLK source is not LCPLL\n");
1191
1192 if (val & LCPLL_PLL_DISABLE)
1193 drm_err(display->drm, "LCPLL is disabled\n");
1194
1195 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1196 drm_err(display->drm, "LCPLL not using non-SSC reference\n");
1197 }
1198
assert_can_disable_lcpll(struct intel_display * display)1199 static void assert_can_disable_lcpll(struct intel_display *display)
1200 {
1201 struct drm_i915_private *dev_priv = to_i915(display->drm);
1202 struct intel_crtc *crtc;
1203
1204 for_each_intel_crtc(display->drm, crtc)
1205 INTEL_DISPLAY_STATE_WARN(display, crtc->active,
1206 "CRTC for pipe %c enabled\n",
1207 pipe_name(crtc->pipe));
1208
1209 INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2),
1210 "Display power well on\n");
1211 INTEL_DISPLAY_STATE_WARN(display,
1212 intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE,
1213 "SPLL enabled\n");
1214 INTEL_DISPLAY_STATE_WARN(display,
1215 intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1216 "WRPLL1 enabled\n");
1217 INTEL_DISPLAY_STATE_WARN(display,
1218 intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1219 "WRPLL2 enabled\n");
1220 INTEL_DISPLAY_STATE_WARN(display,
1221 intel_de_read(display, PP_STATUS(display, 0)) & PP_ON,
1222 "Panel power on\n");
1223 INTEL_DISPLAY_STATE_WARN(display,
1224 intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1225 "CPU PWM1 enabled\n");
1226 if (display->platform.haswell)
1227 INTEL_DISPLAY_STATE_WARN(display,
1228 intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1229 "CPU PWM2 enabled\n");
1230 INTEL_DISPLAY_STATE_WARN(display,
1231 intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1232 "PCH PWM1 enabled\n");
1233 INTEL_DISPLAY_STATE_WARN(display,
1234 (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
1235 "Utility pin enabled in PWM mode\n");
1236 INTEL_DISPLAY_STATE_WARN(display,
1237 intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1238 "PCH GTC enabled\n");
1239
1240 /*
1241 * In theory we can still leave IRQs enabled, as long as only the HPD
1242 * interrupts remain enabled. We used to check for that, but since it's
1243 * gen-specific and since we only disable LCPLL after we fully disable
1244 * the interrupts, the check below should be enough.
1245 */
1246 INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv),
1247 "IRQs enabled\n");
1248 }
1249
hsw_read_dcomp(struct intel_display * display)1250 static u32 hsw_read_dcomp(struct intel_display *display)
1251 {
1252 if (display->platform.haswell)
1253 return intel_de_read(display, D_COMP_HSW);
1254 else
1255 return intel_de_read(display, D_COMP_BDW);
1256 }
1257
hsw_write_dcomp(struct intel_display * display,u32 val)1258 static void hsw_write_dcomp(struct intel_display *display, u32 val)
1259 {
1260 if (display->platform.haswell) {
1261 if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
1262 drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
1263 } else {
1264 intel_de_write(display, D_COMP_BDW, val);
1265 intel_de_posting_read(display, D_COMP_BDW);
1266 }
1267 }
1268
1269 /*
1270 * This function implements pieces of two sequences from BSpec:
1271 * - Sequence for display software to disable LCPLL
1272 * - Sequence for display software to allow package C8+
1273 * The steps implemented here are just the steps that actually touch the LCPLL
1274 * register. Callers should take care of disabling all the display engine
1275 * functions, doing the mode unset, fixing interrupts, etc.
1276 */
hsw_disable_lcpll(struct intel_display * display,bool switch_to_fclk,bool allow_power_down)1277 static void hsw_disable_lcpll(struct intel_display *display,
1278 bool switch_to_fclk, bool allow_power_down)
1279 {
1280 u32 val;
1281
1282 assert_can_disable_lcpll(display);
1283
1284 val = intel_de_read(display, LCPLL_CTL);
1285
1286 if (switch_to_fclk) {
1287 val |= LCPLL_CD_SOURCE_FCLK;
1288 intel_de_write(display, LCPLL_CTL, val);
1289
1290 if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
1291 LCPLL_CD_SOURCE_FCLK_DONE, 1))
1292 drm_err(display->drm, "Switching to FCLK failed\n");
1293
1294 val = intel_de_read(display, LCPLL_CTL);
1295 }
1296
1297 val |= LCPLL_PLL_DISABLE;
1298 intel_de_write(display, LCPLL_CTL, val);
1299 intel_de_posting_read(display, LCPLL_CTL);
1300
1301 if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1302 drm_err(display->drm, "LCPLL still locked\n");
1303
1304 val = hsw_read_dcomp(display);
1305 val |= D_COMP_COMP_DISABLE;
1306 hsw_write_dcomp(display, val);
1307 ndelay(100);
1308
1309 if (wait_for((hsw_read_dcomp(display) &
1310 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1311 drm_err(display->drm, "D_COMP RCOMP still in progress\n");
1312
1313 if (allow_power_down) {
1314 intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
1315 intel_de_posting_read(display, LCPLL_CTL);
1316 }
1317 }
1318
1319 /*
1320 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1321 * source.
1322 */
hsw_restore_lcpll(struct intel_display * display)1323 static void hsw_restore_lcpll(struct intel_display *display)
1324 {
1325 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
1326 u32 val;
1327
1328 val = intel_de_read(display, LCPLL_CTL);
1329
1330 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1331 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1332 return;
1333
1334 /*
1335 * Make sure we're not on PC8 state before disabling PC8, otherwise
1336 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1337 */
1338 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1339
1340 if (val & LCPLL_POWER_DOWN_ALLOW) {
1341 val &= ~LCPLL_POWER_DOWN_ALLOW;
1342 intel_de_write(display, LCPLL_CTL, val);
1343 intel_de_posting_read(display, LCPLL_CTL);
1344 }
1345
1346 val = hsw_read_dcomp(display);
1347 val |= D_COMP_COMP_FORCE;
1348 val &= ~D_COMP_COMP_DISABLE;
1349 hsw_write_dcomp(display, val);
1350
1351 val = intel_de_read(display, LCPLL_CTL);
1352 val &= ~LCPLL_PLL_DISABLE;
1353 intel_de_write(display, LCPLL_CTL, val);
1354
1355 if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1356 drm_err(display->drm, "LCPLL not locked yet\n");
1357
1358 if (val & LCPLL_CD_SOURCE_FCLK) {
1359 intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
1360
1361 if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
1362 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1363 drm_err(display->drm,
1364 "Switching back to LCPLL failed\n");
1365 }
1366
1367 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1368
1369 intel_update_cdclk(display);
1370 intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
1371 }
1372
1373 /*
1374 * Package states C8 and deeper are really deep PC states that can only be
1375 * reached when all the devices on the system allow it, so even if the graphics
1376 * device allows PC8+, it doesn't mean the system will actually get to these
1377 * states. Our driver only allows PC8+ when going into runtime PM.
1378 *
1379 * The requirements for PC8+ are that all the outputs are disabled, the power
1380 * well is disabled and most interrupts are disabled, and these are also
1381 * requirements for runtime PM. When these conditions are met, we manually do
1382 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1383 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1384 * hang the machine.
1385 *
1386 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1387 * the state of some registers, so when we come back from PC8+ we need to
1388 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1389 * need to take care of the registers kept by RC6. Notice that this happens even
1390 * if we don't put the device in PCI D3 state (which is what currently happens
1391 * because of the runtime PM support).
1392 *
1393 * For more, read "Display Sequences for Package C8" on the hardware
1394 * documentation.
1395 */
hsw_enable_pc8(struct intel_display * display)1396 static void hsw_enable_pc8(struct intel_display *display)
1397 {
1398 drm_dbg_kms(display->drm, "Enabling package C8+\n");
1399
1400 if (HAS_PCH_LPT_LP(display))
1401 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1402 PCH_LP_PARTITION_LEVEL_DISABLE, 0);
1403
1404 lpt_disable_clkout_dp(display);
1405 hsw_disable_lcpll(display, true, true);
1406 }
1407
hsw_disable_pc8(struct intel_display * display)1408 static void hsw_disable_pc8(struct intel_display *display)
1409 {
1410 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
1411
1412 drm_dbg_kms(display->drm, "Disabling package C8+\n");
1413
1414 hsw_restore_lcpll(display);
1415 intel_init_pch_refclk(display);
1416
1417 /* Many display registers don't survive PC8+ */
1418 #ifdef I915 /* FIXME */
1419 intel_clock_gating_init(dev_priv);
1420 #endif
1421 }
1422
intel_pch_reset_handshake(struct intel_display * display,bool enable)1423 static void intel_pch_reset_handshake(struct intel_display *display,
1424 bool enable)
1425 {
1426 i915_reg_t reg;
1427 u32 reset_bits;
1428
1429 if (display->platform.ivybridge) {
1430 reg = GEN7_MSG_CTL;
1431 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1432 } else {
1433 reg = HSW_NDE_RSTWRN_OPT;
1434 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1435 }
1436
1437 if (DISPLAY_VER(display) >= 14)
1438 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
1439
1440 intel_de_rmw(display, reg, reset_bits, enable ? reset_bits : 0);
1441 }
1442
skl_display_core_init(struct intel_display * display,bool resume)1443 static void skl_display_core_init(struct intel_display *display,
1444 bool resume)
1445 {
1446 struct i915_power_domains *power_domains = &display->power.domains;
1447 struct i915_power_well *well;
1448
1449 gen9_set_dc_state(display, DC_STATE_DISABLE);
1450
1451 /* enable PCH reset handshake */
1452 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
1453
1454 if (!HAS_DISPLAY(display))
1455 return;
1456
1457 /* enable PG1 and Misc I/O */
1458 mutex_lock(&power_domains->lock);
1459
1460 well = lookup_power_well(display, SKL_DISP_PW_1);
1461 intel_power_well_enable(display, well);
1462
1463 well = lookup_power_well(display, SKL_DISP_PW_MISC_IO);
1464 intel_power_well_enable(display, well);
1465
1466 mutex_unlock(&power_domains->lock);
1467
1468 intel_cdclk_init_hw(display);
1469
1470 gen9_dbuf_enable(display);
1471
1472 if (resume)
1473 intel_dmc_load_program(display);
1474 }
1475
skl_display_core_uninit(struct intel_display * display)1476 static void skl_display_core_uninit(struct intel_display *display)
1477 {
1478 struct i915_power_domains *power_domains = &display->power.domains;
1479 struct i915_power_well *well;
1480
1481 if (!HAS_DISPLAY(display))
1482 return;
1483
1484 gen9_disable_dc_states(display);
1485 /* TODO: disable DMC program */
1486
1487 gen9_dbuf_disable(display);
1488
1489 intel_cdclk_uninit_hw(display);
1490
1491 /* The spec doesn't call for removing the reset handshake flag */
1492 /* disable PG1 and Misc I/O */
1493
1494 mutex_lock(&power_domains->lock);
1495
1496 /*
1497 * BSpec says to keep the MISC IO power well enabled here, only
1498 * remove our request for power well 1.
1499 * Note that even though the driver's request is removed power well 1
1500 * may stay enabled after this due to DMC's own request on it.
1501 */
1502 well = lookup_power_well(display, SKL_DISP_PW_1);
1503 intel_power_well_disable(display, well);
1504
1505 mutex_unlock(&power_domains->lock);
1506
1507 usleep_range(10, 30); /* 10 us delay per Bspec */
1508 }
1509
bxt_display_core_init(struct intel_display * display,bool resume)1510 static void bxt_display_core_init(struct intel_display *display, bool resume)
1511 {
1512 struct i915_power_domains *power_domains = &display->power.domains;
1513 struct i915_power_well *well;
1514
1515 gen9_set_dc_state(display, DC_STATE_DISABLE);
1516
1517 /*
1518 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1519 * or else the reset will hang because there is no PCH to respond.
1520 * Move the handshake programming to initialization sequence.
1521 * Previously was left up to BIOS.
1522 */
1523 intel_pch_reset_handshake(display, false);
1524
1525 if (!HAS_DISPLAY(display))
1526 return;
1527
1528 /* Enable PG1 */
1529 mutex_lock(&power_domains->lock);
1530
1531 well = lookup_power_well(display, SKL_DISP_PW_1);
1532 intel_power_well_enable(display, well);
1533
1534 mutex_unlock(&power_domains->lock);
1535
1536 intel_cdclk_init_hw(display);
1537
1538 gen9_dbuf_enable(display);
1539
1540 if (resume)
1541 intel_dmc_load_program(display);
1542 }
1543
bxt_display_core_uninit(struct intel_display * display)1544 static void bxt_display_core_uninit(struct intel_display *display)
1545 {
1546 struct i915_power_domains *power_domains = &display->power.domains;
1547 struct i915_power_well *well;
1548
1549 if (!HAS_DISPLAY(display))
1550 return;
1551
1552 gen9_disable_dc_states(display);
1553 /* TODO: disable DMC program */
1554
1555 gen9_dbuf_disable(display);
1556
1557 intel_cdclk_uninit_hw(display);
1558
1559 /* The spec doesn't call for removing the reset handshake flag */
1560
1561 /*
1562 * Disable PW1 (PG1).
1563 * Note that even though the driver's request is removed power well 1
1564 * may stay enabled after this due to DMC's own request on it.
1565 */
1566 mutex_lock(&power_domains->lock);
1567
1568 well = lookup_power_well(display, SKL_DISP_PW_1);
1569 intel_power_well_disable(display, well);
1570
1571 mutex_unlock(&power_domains->lock);
1572
1573 usleep_range(10, 30); /* 10 us delay per Bspec */
1574 }
1575
1576 struct buddy_page_mask {
1577 u32 page_mask;
1578 u8 type;
1579 u8 num_channels;
1580 };
1581
1582 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1583 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
1584 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
1585 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1586 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1587 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
1588 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
1589 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1590 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1591 {}
1592 };
1593
1594 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1595 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1596 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
1597 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
1598 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1599 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1600 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
1601 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
1602 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1603 {}
1604 };
1605
tgl_bw_buddy_init(struct intel_display * display)1606 static void tgl_bw_buddy_init(struct intel_display *display)
1607 {
1608 const struct dram_info *dram_info = intel_dram_info(display->drm);
1609 const struct buddy_page_mask *table;
1610 unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
1611 int config, i;
1612
1613 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
1614 if (display->platform.dgfx && !display->platform.dg1)
1615 return;
1616
1617 if (display->platform.alderlake_s ||
1618 (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
1619 /* Wa_1409767108 */
1620 table = wa_1409767108_buddy_page_masks;
1621 else
1622 table = tgl_buddy_page_masks;
1623
1624 for (config = 0; table[config].page_mask != 0; config++)
1625 if (table[config].num_channels == dram_info->num_channels &&
1626 table[config].type == dram_info->type)
1627 break;
1628
1629 if (table[config].page_mask == 0) {
1630 drm_dbg_kms(display->drm,
1631 "Unknown memory configuration; disabling address buddy logic.\n");
1632 for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
1633 intel_de_write(display, BW_BUDDY_CTL(i),
1634 BW_BUDDY_DISABLE);
1635 } else {
1636 for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
1637 intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
1638 table[config].page_mask);
1639
1640 /* Wa_22010178259:tgl,dg1,rkl,adl-s */
1641 if (DISPLAY_VER(display) == 12)
1642 intel_de_rmw(display, BW_BUDDY_CTL(i),
1643 BW_BUDDY_TLB_REQ_TIMER_MASK,
1644 BW_BUDDY_TLB_REQ_TIMER(0x8));
1645 }
1646 }
1647 }
1648
icl_display_core_init(struct intel_display * display,bool resume)1649 static void icl_display_core_init(struct intel_display *display,
1650 bool resume)
1651 {
1652 struct i915_power_domains *power_domains = &display->power.domains;
1653 struct i915_power_well *well;
1654
1655 gen9_set_dc_state(display, DC_STATE_DISABLE);
1656
1657 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
1658 if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
1659 INTEL_PCH_TYPE(display) < PCH_DG1)
1660 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
1661 PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1662
1663 /* 1. Enable PCH reset handshake. */
1664 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
1665
1666 if (!HAS_DISPLAY(display))
1667 return;
1668
1669 /* 2. Initialize all combo phys */
1670 intel_combo_phy_init(display);
1671
1672 /*
1673 * 3. Enable Power Well 1 (PG1).
1674 * The AUX IO power wells will be enabled on demand.
1675 */
1676 mutex_lock(&power_domains->lock);
1677 well = lookup_power_well(display, SKL_DISP_PW_1);
1678 intel_power_well_enable(display, well);
1679 mutex_unlock(&power_domains->lock);
1680
1681 if (DISPLAY_VER(display) == 14)
1682 intel_de_rmw(display, DC_STATE_EN,
1683 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
1684
1685 /* 4. Enable CDCLK. */
1686 intel_cdclk_init_hw(display);
1687
1688 if (DISPLAY_VER(display) == 12 || display->platform.dg2)
1689 gen12_dbuf_slices_config(display);
1690
1691 /* 5. Enable DBUF. */
1692 gen9_dbuf_enable(display);
1693
1694 /* 6. Setup MBUS. */
1695 icl_mbus_init(display);
1696
1697 /* 7. Program arbiter BW_BUDDY registers */
1698 if (DISPLAY_VER(display) >= 12)
1699 tgl_bw_buddy_init(display);
1700
1701 /* 8. Ensure PHYs have completed calibration and adaptation */
1702 if (display->platform.dg2)
1703 intel_snps_phy_wait_for_calibration(display);
1704
1705 /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
1706 if (DISPLAY_VERx100(display) == 1401)
1707 intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
1708
1709 if (resume)
1710 intel_dmc_load_program(display);
1711
1712 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
1713 if (IS_DISPLAY_VERx100(display, 1200, 1300))
1714 intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
1715 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1716 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
1717
1718 /* Wa_14011503030:xelpd */
1719 if (DISPLAY_VER(display) == 13)
1720 intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1721
1722 /* Wa_15013987218 */
1723 if (DISPLAY_VER(display) == 20) {
1724 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1725 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
1726 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1727 PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
1728 }
1729 }
1730
icl_display_core_uninit(struct intel_display * display)1731 static void icl_display_core_uninit(struct intel_display *display)
1732 {
1733 struct i915_power_domains *power_domains = &display->power.domains;
1734 struct i915_power_well *well;
1735
1736 if (!HAS_DISPLAY(display))
1737 return;
1738
1739 gen9_disable_dc_states(display);
1740 intel_dmc_disable_program(display);
1741
1742 /* 1. Disable all display engine functions -> already done */
1743
1744 /* 2. Disable DBUF */
1745 gen9_dbuf_disable(display);
1746
1747 /* 3. Disable CD clock */
1748 intel_cdclk_uninit_hw(display);
1749
1750 if (DISPLAY_VER(display) == 14)
1751 intel_de_rmw(display, DC_STATE_EN, 0,
1752 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
1753
1754 /*
1755 * 4. Disable Power Well 1 (PG1).
1756 * The AUX IO power wells are toggled on demand, so they are already
1757 * disabled at this point.
1758 */
1759 mutex_lock(&power_domains->lock);
1760 well = lookup_power_well(display, SKL_DISP_PW_1);
1761 intel_power_well_disable(display, well);
1762 mutex_unlock(&power_domains->lock);
1763
1764 /* 5. */
1765 intel_combo_phy_uninit(display);
1766 }
1767
chv_phy_control_init(struct intel_display * display)1768 static void chv_phy_control_init(struct intel_display *display)
1769 {
1770 struct i915_power_well *cmn_bc =
1771 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1772 struct i915_power_well *cmn_d =
1773 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
1774
1775 /*
1776 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1777 * workaround never ever read DISPLAY_PHY_CONTROL, and
1778 * instead maintain a shadow copy ourselves. Use the actual
1779 * power well state and lane status to reconstruct the
1780 * expected initial value.
1781 */
1782 display->power.chv_phy_control =
1783 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1784 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1785 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1786 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1787 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1788
1789 /*
1790 * If all lanes are disabled we leave the override disabled
1791 * with all power down bits cleared to match the state we
1792 * would use after disabling the port. Otherwise enable the
1793 * override and set the lane powerdown bits accding to the
1794 * current lane status.
1795 */
1796 if (intel_power_well_is_enabled(display, cmn_bc)) {
1797 u32 status = intel_de_read(display, DPLL(display, PIPE_A));
1798 unsigned int mask;
1799
1800 mask = status & DPLL_PORTB_READY_MASK;
1801 if (mask == 0xf)
1802 mask = 0x0;
1803 else
1804 display->power.chv_phy_control |=
1805 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1806
1807 display->power.chv_phy_control |=
1808 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1809
1810 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1811 if (mask == 0xf)
1812 mask = 0x0;
1813 else
1814 display->power.chv_phy_control |=
1815 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1816
1817 display->power.chv_phy_control |=
1818 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1819
1820 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1821
1822 display->power.chv_phy_assert[DPIO_PHY0] = false;
1823 } else {
1824 display->power.chv_phy_assert[DPIO_PHY0] = true;
1825 }
1826
1827 if (intel_power_well_is_enabled(display, cmn_d)) {
1828 u32 status = intel_de_read(display, DPIO_PHY_STATUS);
1829 unsigned int mask;
1830
1831 mask = status & DPLL_PORTD_READY_MASK;
1832
1833 if (mask == 0xf)
1834 mask = 0x0;
1835 else
1836 display->power.chv_phy_control |=
1837 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1838
1839 display->power.chv_phy_control |=
1840 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1841
1842 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1843
1844 display->power.chv_phy_assert[DPIO_PHY1] = false;
1845 } else {
1846 display->power.chv_phy_assert[DPIO_PHY1] = true;
1847 }
1848
1849 drm_dbg_kms(display->drm, "Initial PHY_CONTROL=0x%08x\n",
1850 display->power.chv_phy_control);
1851
1852 /* Defer application of initial phy_control to enabling the powerwell */
1853 }
1854
vlv_cmnlane_wa(struct intel_display * display)1855 static void vlv_cmnlane_wa(struct intel_display *display)
1856 {
1857 struct i915_power_well *cmn =
1858 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1859 struct i915_power_well *disp2d =
1860 lookup_power_well(display, VLV_DISP_PW_DISP2D);
1861
1862 /* If the display might be already active skip this */
1863 if (intel_power_well_is_enabled(display, cmn) &&
1864 intel_power_well_is_enabled(display, disp2d) &&
1865 intel_de_read(display, DPIO_CTL) & DPIO_CMNRST)
1866 return;
1867
1868 drm_dbg_kms(display->drm, "toggling display PHY side reset\n");
1869
1870 /* cmnlane needs DPLL registers */
1871 intel_power_well_enable(display, disp2d);
1872
1873 /*
1874 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1875 * Need to assert and de-assert PHY SB reset by gating the
1876 * common lane power, then un-gating it.
1877 * Simply ungating isn't enough to reset the PHY enough to get
1878 * ports and lanes running.
1879 */
1880 intel_power_well_disable(display, cmn);
1881 }
1882
vlv_punit_is_power_gated(struct intel_display * display,u32 reg0)1883 static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
1884 {
1885 bool ret;
1886
1887 vlv_punit_get(display->drm);
1888 ret = (vlv_punit_read(display->drm, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1889 vlv_punit_put(display->drm);
1890
1891 return ret;
1892 }
1893
assert_ved_power_gated(struct intel_display * display)1894 static void assert_ved_power_gated(struct intel_display *display)
1895 {
1896 drm_WARN(display->drm,
1897 !vlv_punit_is_power_gated(display, PUNIT_REG_VEDSSPM0),
1898 "VED not power gated\n");
1899 }
1900
assert_isp_power_gated(struct intel_display * display)1901 static void assert_isp_power_gated(struct intel_display *display)
1902 {
1903 static const struct pci_device_id isp_ids[] = {
1904 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1905 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1906 {}
1907 };
1908
1909 drm_WARN(display->drm, !pci_dev_present(isp_ids) &&
1910 !vlv_punit_is_power_gated(display, PUNIT_REG_ISPSSPM0),
1911 "ISP not power gated\n");
1912 }
1913
1914 static void intel_power_domains_verify_state(struct intel_display *display);
1915
1916 /**
1917 * intel_power_domains_init_hw - initialize hardware power domain state
1918 * @display: display device instance
1919 * @resume: Called from resume code paths or not
1920 *
1921 * This function initializes the hardware power domain state and enables all
1922 * power wells belonging to the INIT power domain. Power wells in other
1923 * domains (and not in the INIT domain) are referenced or disabled by
1924 * intel_modeset_readout_hw_state(). After that the reference count of each
1925 * power well must match its HW enabled state, see
1926 * intel_power_domains_verify_state().
1927 *
1928 * It will return with power domains disabled (to be enabled later by
1929 * intel_power_domains_enable()) and must be paired with
1930 * intel_power_domains_driver_remove().
1931 */
intel_power_domains_init_hw(struct intel_display * display,bool resume)1932 void intel_power_domains_init_hw(struct intel_display *display, bool resume)
1933 {
1934 struct i915_power_domains *power_domains = &display->power.domains;
1935
1936 power_domains->initializing = true;
1937
1938 if (DISPLAY_VER(display) >= 11) {
1939 icl_display_core_init(display, resume);
1940 } else if (display->platform.geminilake || display->platform.broxton) {
1941 bxt_display_core_init(display, resume);
1942 } else if (DISPLAY_VER(display) == 9) {
1943 skl_display_core_init(display, resume);
1944 } else if (display->platform.cherryview) {
1945 mutex_lock(&power_domains->lock);
1946 chv_phy_control_init(display);
1947 mutex_unlock(&power_domains->lock);
1948 assert_isp_power_gated(display);
1949 } else if (display->platform.valleyview) {
1950 mutex_lock(&power_domains->lock);
1951 vlv_cmnlane_wa(display);
1952 mutex_unlock(&power_domains->lock);
1953 assert_ved_power_gated(display);
1954 assert_isp_power_gated(display);
1955 } else if (display->platform.broadwell || display->platform.haswell) {
1956 hsw_assert_cdclk(display);
1957 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
1958 } else if (display->platform.ivybridge) {
1959 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
1960 }
1961
1962 /*
1963 * Keep all power wells enabled for any dependent HW access during
1964 * initialization and to make sure we keep BIOS enabled display HW
1965 * resources powered until display HW readout is complete. We drop
1966 * this reference in intel_power_domains_enable().
1967 */
1968 drm_WARN_ON(display->drm, power_domains->init_wakeref);
1969 power_domains->init_wakeref =
1970 intel_display_power_get(display, POWER_DOMAIN_INIT);
1971
1972 /* Disable power support if the user asked so. */
1973 if (!display->params.disable_power_well) {
1974 drm_WARN_ON(display->drm, power_domains->disable_wakeref);
1975 display->power.domains.disable_wakeref = intel_display_power_get(display,
1976 POWER_DOMAIN_INIT);
1977 }
1978 intel_power_domains_sync_hw(display);
1979
1980 power_domains->initializing = false;
1981 }
1982
1983 /**
1984 * intel_power_domains_driver_remove - deinitialize hw power domain state
1985 * @display: display device instance
1986 *
1987 * De-initializes the display power domain HW state. It also ensures that the
1988 * device stays powered up so that the driver can be reloaded.
1989 *
1990 * It must be called with power domains already disabled (after a call to
1991 * intel_power_domains_disable()) and must be paired with
1992 * intel_power_domains_init_hw().
1993 */
intel_power_domains_driver_remove(struct intel_display * display)1994 void intel_power_domains_driver_remove(struct intel_display *display)
1995 {
1996 intel_wakeref_t wakeref __maybe_unused =
1997 fetch_and_zero(&display->power.domains.init_wakeref);
1998
1999 /* Remove the refcount we took to keep power well support disabled. */
2000 if (!display->params.disable_power_well)
2001 intel_display_power_put(display, POWER_DOMAIN_INIT,
2002 fetch_and_zero(&display->power.domains.disable_wakeref));
2003
2004 intel_display_power_flush_work_sync(display);
2005
2006 intel_power_domains_verify_state(display);
2007
2008 /* Keep the power well enabled, but cancel its rpm wakeref. */
2009 intel_display_rpm_put(display, wakeref);
2010 }
2011
2012 /**
2013 * intel_power_domains_sanitize_state - sanitize power domains state
2014 * @display: display device instance
2015 *
2016 * Sanitize the power domains state during driver loading and system resume.
2017 * The function will disable all display power wells that BIOS has enabled
2018 * without a user for it (any user for a power well has taken a reference
2019 * on it by the time this function is called, after the state of all the
2020 * pipe, encoder, etc. HW resources have been sanitized).
2021 */
intel_power_domains_sanitize_state(struct intel_display * display)2022 void intel_power_domains_sanitize_state(struct intel_display *display)
2023 {
2024 struct i915_power_domains *power_domains = &display->power.domains;
2025 struct i915_power_well *power_well;
2026
2027 mutex_lock(&power_domains->lock);
2028
2029 for_each_power_well_reverse(display, power_well) {
2030 if (power_well->desc->always_on || power_well->count ||
2031 !intel_power_well_is_enabled(display, power_well))
2032 continue;
2033
2034 drm_dbg_kms(display->drm,
2035 "BIOS left unused %s power well enabled, disabling it\n",
2036 intel_power_well_name(power_well));
2037 intel_power_well_disable(display, power_well);
2038 }
2039
2040 mutex_unlock(&power_domains->lock);
2041 }
2042
2043 /**
2044 * intel_power_domains_enable - enable toggling of display power wells
2045 * @display: display device instance
2046 *
2047 * Enable the ondemand enabling/disabling of the display power wells. Note that
2048 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2049 * only at specific points of the display modeset sequence, thus they are not
2050 * affected by the intel_power_domains_enable()/disable() calls. The purpose
2051 * of these function is to keep the rest of power wells enabled until the end
2052 * of display HW readout (which will acquire the power references reflecting
2053 * the current HW state).
2054 */
intel_power_domains_enable(struct intel_display * display)2055 void intel_power_domains_enable(struct intel_display *display)
2056 {
2057 intel_wakeref_t wakeref __maybe_unused =
2058 fetch_and_zero(&display->power.domains.init_wakeref);
2059
2060 intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
2061 intel_power_domains_verify_state(display);
2062 }
2063
2064 /**
2065 * intel_power_domains_disable - disable toggling of display power wells
2066 * @display: display device instance
2067 *
2068 * Disable the ondemand enabling/disabling of the display power wells. See
2069 * intel_power_domains_enable() for which power wells this call controls.
2070 */
intel_power_domains_disable(struct intel_display * display)2071 void intel_power_domains_disable(struct intel_display *display)
2072 {
2073 struct i915_power_domains *power_domains = &display->power.domains;
2074
2075 drm_WARN_ON(display->drm, power_domains->init_wakeref);
2076 power_domains->init_wakeref =
2077 intel_display_power_get(display, POWER_DOMAIN_INIT);
2078
2079 intel_power_domains_verify_state(display);
2080 }
2081
2082 /**
2083 * intel_power_domains_suspend - suspend power domain state
2084 * @display: display device instance
2085 * @s2idle: specifies whether we go to idle, or deeper sleep
2086 *
2087 * This function prepares the hardware power domain state before entering
2088 * system suspend.
2089 *
2090 * It must be called with power domains already disabled (after a call to
2091 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2092 */
intel_power_domains_suspend(struct intel_display * display,bool s2idle)2093 void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
2094 {
2095 struct i915_power_domains *power_domains = &display->power.domains;
2096 intel_wakeref_t wakeref __maybe_unused =
2097 fetch_and_zero(&power_domains->init_wakeref);
2098
2099 intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
2100
2101 /*
2102 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2103 * support don't manually deinit the power domains. This also means the
2104 * DMC firmware will stay active, it will power down any HW
2105 * resources as required and also enable deeper system power states
2106 * that would be blocked if the firmware was inactive.
2107 */
2108 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
2109 intel_dmc_has_payload(display)) {
2110 intel_display_power_flush_work(display);
2111 intel_power_domains_verify_state(display);
2112 return;
2113 }
2114
2115 /*
2116 * Even if power well support was disabled we still want to disable
2117 * power wells if power domains must be deinitialized for suspend.
2118 */
2119 if (!display->params.disable_power_well)
2120 intel_display_power_put(display, POWER_DOMAIN_INIT,
2121 fetch_and_zero(&display->power.domains.disable_wakeref));
2122
2123 intel_display_power_flush_work(display);
2124 intel_power_domains_verify_state(display);
2125
2126 if (DISPLAY_VER(display) >= 11)
2127 icl_display_core_uninit(display);
2128 else if (display->platform.geminilake || display->platform.broxton)
2129 bxt_display_core_uninit(display);
2130 else if (DISPLAY_VER(display) == 9)
2131 skl_display_core_uninit(display);
2132
2133 power_domains->display_core_suspended = true;
2134 }
2135
2136 /**
2137 * intel_power_domains_resume - resume power domain state
2138 * @display: display device instance
2139 *
2140 * This function resume the hardware power domain state during system resume.
2141 *
2142 * It will return with power domain support disabled (to be enabled later by
2143 * intel_power_domains_enable()) and must be paired with
2144 * intel_power_domains_suspend().
2145 */
intel_power_domains_resume(struct intel_display * display)2146 void intel_power_domains_resume(struct intel_display *display)
2147 {
2148 struct i915_power_domains *power_domains = &display->power.domains;
2149
2150 if (power_domains->display_core_suspended) {
2151 intel_power_domains_init_hw(display, true);
2152 power_domains->display_core_suspended = false;
2153 } else {
2154 drm_WARN_ON(display->drm, power_domains->init_wakeref);
2155 power_domains->init_wakeref =
2156 intel_display_power_get(display, POWER_DOMAIN_INIT);
2157 }
2158
2159 intel_power_domains_verify_state(display);
2160 }
2161
2162 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2163
intel_power_domains_dump_info(struct intel_display * display)2164 static void intel_power_domains_dump_info(struct intel_display *display)
2165 {
2166 struct i915_power_domains *power_domains = &display->power.domains;
2167 struct i915_power_well *power_well;
2168
2169 for_each_power_well(display, power_well) {
2170 enum intel_display_power_domain domain;
2171
2172 drm_dbg_kms(display->drm, "%-25s %d\n",
2173 intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2174
2175 for_each_power_domain(domain, intel_power_well_domains(power_well))
2176 drm_dbg_kms(display->drm, " %-23s %d\n",
2177 intel_display_power_domain_str(domain),
2178 power_domains->domain_use_count[domain]);
2179 }
2180 }
2181
2182 /**
2183 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2184 * @display: display device instance
2185 *
2186 * Verify if the reference count of each power well matches its HW enabled
2187 * state and the total refcount of the domains it belongs to. This must be
2188 * called after modeset HW state sanitization, which is responsible for
2189 * acquiring reference counts for any power wells in use and disabling the
2190 * ones left on by BIOS but not required by any active output.
2191 */
intel_power_domains_verify_state(struct intel_display * display)2192 static void intel_power_domains_verify_state(struct intel_display *display)
2193 {
2194 struct i915_power_domains *power_domains = &display->power.domains;
2195 struct i915_power_well *power_well;
2196 bool dump_domain_info;
2197
2198 mutex_lock(&power_domains->lock);
2199
2200 verify_async_put_domains_state(power_domains);
2201
2202 dump_domain_info = false;
2203 for_each_power_well(display, power_well) {
2204 enum intel_display_power_domain domain;
2205 int domains_count;
2206 bool enabled;
2207
2208 enabled = intel_power_well_is_enabled(display, power_well);
2209 if ((intel_power_well_refcount(power_well) ||
2210 intel_power_well_is_always_on(power_well)) !=
2211 enabled)
2212 drm_err(display->drm,
2213 "power well %s state mismatch (refcount %d/enabled %d)",
2214 intel_power_well_name(power_well),
2215 intel_power_well_refcount(power_well), enabled);
2216
2217 domains_count = 0;
2218 for_each_power_domain(domain, intel_power_well_domains(power_well))
2219 domains_count += power_domains->domain_use_count[domain];
2220
2221 if (intel_power_well_refcount(power_well) != domains_count) {
2222 drm_err(display->drm,
2223 "power well %s refcount/domain refcount mismatch "
2224 "(refcount %d/domains refcount %d)\n",
2225 intel_power_well_name(power_well),
2226 intel_power_well_refcount(power_well),
2227 domains_count);
2228 dump_domain_info = true;
2229 }
2230 }
2231
2232 if (dump_domain_info) {
2233 static bool dumped;
2234
2235 if (!dumped) {
2236 intel_power_domains_dump_info(display);
2237 dumped = true;
2238 }
2239 }
2240
2241 mutex_unlock(&power_domains->lock);
2242 }
2243
2244 #else
2245
intel_power_domains_verify_state(struct intel_display * display)2246 static void intel_power_domains_verify_state(struct intel_display *display)
2247 {
2248 }
2249
2250 #endif
2251
intel_display_power_suspend_late(struct intel_display * display,bool s2idle)2252 void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
2253 {
2254 intel_power_domains_suspend(display, s2idle);
2255
2256 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
2257 display->platform.broxton) {
2258 bxt_enable_dc9(display);
2259 } else if (display->platform.haswell || display->platform.broadwell) {
2260 hsw_enable_pc8(display);
2261 }
2262
2263 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2264 if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
2265 intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2266 }
2267
intel_display_power_resume_early(struct intel_display * display)2268 void intel_display_power_resume_early(struct intel_display *display)
2269 {
2270 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
2271 display->platform.broxton) {
2272 gen9_sanitize_dc_state(display);
2273 bxt_disable_dc9(display);
2274 } else if (display->platform.haswell || display->platform.broadwell) {
2275 hsw_disable_pc8(display);
2276 }
2277
2278 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2279 if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
2280 intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2281
2282 intel_power_domains_resume(display);
2283 }
2284
intel_display_power_suspend(struct intel_display * display)2285 void intel_display_power_suspend(struct intel_display *display)
2286 {
2287 if (DISPLAY_VER(display) >= 11) {
2288 icl_display_core_uninit(display);
2289 bxt_enable_dc9(display);
2290 } else if (display->platform.geminilake || display->platform.broxton) {
2291 bxt_display_core_uninit(display);
2292 bxt_enable_dc9(display);
2293 } else if (display->platform.haswell || display->platform.broadwell) {
2294 hsw_enable_pc8(display);
2295 }
2296 }
2297
intel_display_power_resume(struct intel_display * display)2298 void intel_display_power_resume(struct intel_display *display)
2299 {
2300 struct i915_power_domains *power_domains = &display->power.domains;
2301
2302 if (DISPLAY_VER(display) >= 11) {
2303 bxt_disable_dc9(display);
2304 icl_display_core_init(display, true);
2305 if (intel_dmc_has_payload(display)) {
2306 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
2307 skl_enable_dc6(display);
2308 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
2309 gen9_enable_dc5(display);
2310 }
2311 } else if (display->platform.geminilake || display->platform.broxton) {
2312 bxt_disable_dc9(display);
2313 bxt_display_core_init(display, true);
2314 if (intel_dmc_has_payload(display) &&
2315 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2316 gen9_enable_dc5(display);
2317 } else if (display->platform.haswell || display->platform.broadwell) {
2318 hsw_disable_pc8(display);
2319 }
2320 }
2321
intel_display_power_debug(struct intel_display * display,struct seq_file * m)2322 void intel_display_power_debug(struct intel_display *display, struct seq_file *m)
2323 {
2324 struct i915_power_domains *power_domains = &display->power.domains;
2325 int i;
2326
2327 mutex_lock(&power_domains->lock);
2328
2329 seq_printf(m, "Runtime power status: %s\n",
2330 str_enabled_disabled(!power_domains->init_wakeref));
2331
2332 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2333 for (i = 0; i < power_domains->power_well_count; i++) {
2334 struct i915_power_well *power_well;
2335 enum intel_display_power_domain power_domain;
2336
2337 power_well = &power_domains->power_wells[i];
2338 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2339 intel_power_well_refcount(power_well));
2340
2341 for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2342 seq_printf(m, " %-23s %d\n",
2343 intel_display_power_domain_str(power_domain),
2344 power_domains->domain_use_count[power_domain]);
2345 }
2346
2347 mutex_unlock(&power_domains->lock);
2348 }
2349
2350 struct intel_ddi_port_domains {
2351 enum port port_start;
2352 enum port port_end;
2353 enum aux_ch aux_ch_start;
2354 enum aux_ch aux_ch_end;
2355
2356 enum intel_display_power_domain ddi_lanes;
2357 enum intel_display_power_domain ddi_io;
2358 enum intel_display_power_domain aux_io;
2359 enum intel_display_power_domain aux_legacy_usbc;
2360 enum intel_display_power_domain aux_tbt;
2361 };
2362
2363 static const struct intel_ddi_port_domains
2364 i9xx_port_domains[] = {
2365 {
2366 .port_start = PORT_A,
2367 .port_end = PORT_F,
2368 .aux_ch_start = AUX_CH_A,
2369 .aux_ch_end = AUX_CH_F,
2370
2371 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2372 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2373 .aux_io = POWER_DOMAIN_AUX_IO_A,
2374 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2375 .aux_tbt = POWER_DOMAIN_INVALID,
2376 },
2377 };
2378
2379 static const struct intel_ddi_port_domains
2380 d11_port_domains[] = {
2381 {
2382 .port_start = PORT_A,
2383 .port_end = PORT_B,
2384 .aux_ch_start = AUX_CH_A,
2385 .aux_ch_end = AUX_CH_B,
2386
2387 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2388 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2389 .aux_io = POWER_DOMAIN_AUX_IO_A,
2390 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2391 .aux_tbt = POWER_DOMAIN_INVALID,
2392 }, {
2393 .port_start = PORT_C,
2394 .port_end = PORT_F,
2395 .aux_ch_start = AUX_CH_C,
2396 .aux_ch_end = AUX_CH_F,
2397
2398 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2399 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2400 .aux_io = POWER_DOMAIN_AUX_IO_C,
2401 .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2402 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2403 },
2404 };
2405
2406 static const struct intel_ddi_port_domains
2407 d12_port_domains[] = {
2408 {
2409 .port_start = PORT_A,
2410 .port_end = PORT_C,
2411 .aux_ch_start = AUX_CH_A,
2412 .aux_ch_end = AUX_CH_C,
2413
2414 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2415 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2416 .aux_io = POWER_DOMAIN_AUX_IO_A,
2417 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2418 .aux_tbt = POWER_DOMAIN_INVALID,
2419 }, {
2420 .port_start = PORT_TC1,
2421 .port_end = PORT_TC6,
2422 .aux_ch_start = AUX_CH_USBC1,
2423 .aux_ch_end = AUX_CH_USBC6,
2424
2425 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2426 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2427 .aux_io = POWER_DOMAIN_INVALID,
2428 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2429 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2430 },
2431 };
2432
2433 static const struct intel_ddi_port_domains
2434 d13_port_domains[] = {
2435 {
2436 .port_start = PORT_A,
2437 .port_end = PORT_C,
2438 .aux_ch_start = AUX_CH_A,
2439 .aux_ch_end = AUX_CH_C,
2440
2441 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2442 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2443 .aux_io = POWER_DOMAIN_AUX_IO_A,
2444 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2445 .aux_tbt = POWER_DOMAIN_INVALID,
2446 }, {
2447 .port_start = PORT_TC1,
2448 .port_end = PORT_TC4,
2449 .aux_ch_start = AUX_CH_USBC1,
2450 .aux_ch_end = AUX_CH_USBC4,
2451
2452 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2453 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2454 .aux_io = POWER_DOMAIN_INVALID,
2455 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2456 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2457 }, {
2458 .port_start = PORT_D_XELPD,
2459 .port_end = PORT_E_XELPD,
2460 .aux_ch_start = AUX_CH_D_XELPD,
2461 .aux_ch_end = AUX_CH_E_XELPD,
2462
2463 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2464 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2465 .aux_io = POWER_DOMAIN_AUX_IO_D,
2466 .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2467 .aux_tbt = POWER_DOMAIN_INVALID,
2468 },
2469 };
2470
2471 static void
intel_port_domains_for_platform(struct intel_display * display,const struct intel_ddi_port_domains ** domains,int * domains_size)2472 intel_port_domains_for_platform(struct intel_display *display,
2473 const struct intel_ddi_port_domains **domains,
2474 int *domains_size)
2475 {
2476 if (DISPLAY_VER(display) >= 13) {
2477 *domains = d13_port_domains;
2478 *domains_size = ARRAY_SIZE(d13_port_domains);
2479 } else if (DISPLAY_VER(display) >= 12) {
2480 *domains = d12_port_domains;
2481 *domains_size = ARRAY_SIZE(d12_port_domains);
2482 } else if (DISPLAY_VER(display) >= 11) {
2483 *domains = d11_port_domains;
2484 *domains_size = ARRAY_SIZE(d11_port_domains);
2485 } else {
2486 *domains = i9xx_port_domains;
2487 *domains_size = ARRAY_SIZE(i9xx_port_domains);
2488 }
2489 }
2490
2491 static const struct intel_ddi_port_domains *
intel_port_domains_for_port(struct intel_display * display,enum port port)2492 intel_port_domains_for_port(struct intel_display *display, enum port port)
2493 {
2494 const struct intel_ddi_port_domains *domains;
2495 int domains_size;
2496 int i;
2497
2498 intel_port_domains_for_platform(display, &domains, &domains_size);
2499 for (i = 0; i < domains_size; i++)
2500 if (port >= domains[i].port_start && port <= domains[i].port_end)
2501 return &domains[i];
2502
2503 return NULL;
2504 }
2505
2506 enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct intel_display * display,enum port port)2507 intel_display_power_ddi_io_domain(struct intel_display *display, enum port port)
2508 {
2509 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
2510
2511 if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
2512 return POWER_DOMAIN_PORT_DDI_IO_A;
2513
2514 return domains->ddi_io + (int)(port - domains->port_start);
2515 }
2516
2517 enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct intel_display * display,enum port port)2518 intel_display_power_ddi_lanes_domain(struct intel_display *display, enum port port)
2519 {
2520 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
2521
2522 if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
2523 return POWER_DOMAIN_PORT_DDI_LANES_A;
2524
2525 return domains->ddi_lanes + (int)(port - domains->port_start);
2526 }
2527
2528 static const struct intel_ddi_port_domains *
intel_port_domains_for_aux_ch(struct intel_display * display,enum aux_ch aux_ch)2529 intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
2530 {
2531 const struct intel_ddi_port_domains *domains;
2532 int domains_size;
2533 int i;
2534
2535 intel_port_domains_for_platform(display, &domains, &domains_size);
2536 for (i = 0; i < domains_size; i++)
2537 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2538 return &domains[i];
2539
2540 return NULL;
2541 }
2542
2543 enum intel_display_power_domain
intel_display_power_aux_io_domain(struct intel_display * display,enum aux_ch aux_ch)2544 intel_display_power_aux_io_domain(struct intel_display *display, enum aux_ch aux_ch)
2545 {
2546 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2547
2548 if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2549 return POWER_DOMAIN_AUX_IO_A;
2550
2551 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2552 }
2553
2554 enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct intel_display * display,enum aux_ch aux_ch)2555 intel_display_power_legacy_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
2556 {
2557 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2558
2559 if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
2560 return POWER_DOMAIN_AUX_A;
2561
2562 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2563 }
2564
2565 enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct intel_display * display,enum aux_ch aux_ch)2566 intel_display_power_tbt_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
2567 {
2568 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2569
2570 if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
2571 return POWER_DOMAIN_AUX_TBT1;
2572
2573 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2574 }
2575