1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/iopoll.h>
7 #include <linux/string_helpers.h>
8
9 #include "soc/intel_dram.h"
10
11 #include "i915_drv.h"
12 #include "i915_irq.h"
13 #include "i915_reg.h"
14 #include "i915_utils.h"
15 #include "intel_backlight_regs.h"
16 #include "intel_cdclk.h"
17 #include "intel_clock_gating.h"
18 #include "intel_combo_phy.h"
19 #include "intel_de.h"
20 #include "intel_display_power.h"
21 #include "intel_display_power_map.h"
22 #include "intel_display_power_well.h"
23 #include "intel_display_regs.h"
24 #include "intel_display_rpm.h"
25 #include "intel_display_types.h"
26 #include "intel_dmc.h"
27 #include "intel_mchbar_regs.h"
28 #include "intel_pch_refclk.h"
29 #include "intel_pcode.h"
30 #include "intel_pmdemand.h"
31 #include "intel_pps_regs.h"
32 #include "intel_snps_phy.h"
33 #include "skl_watermark.h"
34 #include "skl_watermark_regs.h"
35 #include "vlv_sideband.h"
36
37 #define for_each_power_domain_well(__display, __power_well, __domain) \
38 for_each_power_well((__display), __power_well) \
39 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
40
41 #define for_each_power_domain_well_reverse(__display, __power_well, __domain) \
42 for_each_power_well_reverse((__display), __power_well) \
43 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
44
45 static const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)46 intel_display_power_domain_str(enum intel_display_power_domain domain)
47 {
48 switch (domain) {
49 case POWER_DOMAIN_DISPLAY_CORE:
50 return "DISPLAY_CORE";
51 case POWER_DOMAIN_PIPE_A:
52 return "PIPE_A";
53 case POWER_DOMAIN_PIPE_B:
54 return "PIPE_B";
55 case POWER_DOMAIN_PIPE_C:
56 return "PIPE_C";
57 case POWER_DOMAIN_PIPE_D:
58 return "PIPE_D";
59 case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
60 return "PIPE_PANEL_FITTER_A";
61 case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
62 return "PIPE_PANEL_FITTER_B";
63 case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
64 return "PIPE_PANEL_FITTER_C";
65 case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
66 return "PIPE_PANEL_FITTER_D";
67 case POWER_DOMAIN_TRANSCODER_A:
68 return "TRANSCODER_A";
69 case POWER_DOMAIN_TRANSCODER_B:
70 return "TRANSCODER_B";
71 case POWER_DOMAIN_TRANSCODER_C:
72 return "TRANSCODER_C";
73 case POWER_DOMAIN_TRANSCODER_D:
74 return "TRANSCODER_D";
75 case POWER_DOMAIN_TRANSCODER_EDP:
76 return "TRANSCODER_EDP";
77 case POWER_DOMAIN_TRANSCODER_DSI_A:
78 return "TRANSCODER_DSI_A";
79 case POWER_DOMAIN_TRANSCODER_DSI_C:
80 return "TRANSCODER_DSI_C";
81 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
82 return "TRANSCODER_VDSC_PW2";
83 case POWER_DOMAIN_PORT_DDI_LANES_A:
84 return "PORT_DDI_LANES_A";
85 case POWER_DOMAIN_PORT_DDI_LANES_B:
86 return "PORT_DDI_LANES_B";
87 case POWER_DOMAIN_PORT_DDI_LANES_C:
88 return "PORT_DDI_LANES_C";
89 case POWER_DOMAIN_PORT_DDI_LANES_D:
90 return "PORT_DDI_LANES_D";
91 case POWER_DOMAIN_PORT_DDI_LANES_E:
92 return "PORT_DDI_LANES_E";
93 case POWER_DOMAIN_PORT_DDI_LANES_F:
94 return "PORT_DDI_LANES_F";
95 case POWER_DOMAIN_PORT_DDI_LANES_TC1:
96 return "PORT_DDI_LANES_TC1";
97 case POWER_DOMAIN_PORT_DDI_LANES_TC2:
98 return "PORT_DDI_LANES_TC2";
99 case POWER_DOMAIN_PORT_DDI_LANES_TC3:
100 return "PORT_DDI_LANES_TC3";
101 case POWER_DOMAIN_PORT_DDI_LANES_TC4:
102 return "PORT_DDI_LANES_TC4";
103 case POWER_DOMAIN_PORT_DDI_LANES_TC5:
104 return "PORT_DDI_LANES_TC5";
105 case POWER_DOMAIN_PORT_DDI_LANES_TC6:
106 return "PORT_DDI_LANES_TC6";
107 case POWER_DOMAIN_PORT_DDI_IO_A:
108 return "PORT_DDI_IO_A";
109 case POWER_DOMAIN_PORT_DDI_IO_B:
110 return "PORT_DDI_IO_B";
111 case POWER_DOMAIN_PORT_DDI_IO_C:
112 return "PORT_DDI_IO_C";
113 case POWER_DOMAIN_PORT_DDI_IO_D:
114 return "PORT_DDI_IO_D";
115 case POWER_DOMAIN_PORT_DDI_IO_E:
116 return "PORT_DDI_IO_E";
117 case POWER_DOMAIN_PORT_DDI_IO_F:
118 return "PORT_DDI_IO_F";
119 case POWER_DOMAIN_PORT_DDI_IO_TC1:
120 return "PORT_DDI_IO_TC1";
121 case POWER_DOMAIN_PORT_DDI_IO_TC2:
122 return "PORT_DDI_IO_TC2";
123 case POWER_DOMAIN_PORT_DDI_IO_TC3:
124 return "PORT_DDI_IO_TC3";
125 case POWER_DOMAIN_PORT_DDI_IO_TC4:
126 return "PORT_DDI_IO_TC4";
127 case POWER_DOMAIN_PORT_DDI_IO_TC5:
128 return "PORT_DDI_IO_TC5";
129 case POWER_DOMAIN_PORT_DDI_IO_TC6:
130 return "PORT_DDI_IO_TC6";
131 case POWER_DOMAIN_PORT_DSI:
132 return "PORT_DSI";
133 case POWER_DOMAIN_PORT_CRT:
134 return "PORT_CRT";
135 case POWER_DOMAIN_PORT_OTHER:
136 return "PORT_OTHER";
137 case POWER_DOMAIN_VGA:
138 return "VGA";
139 case POWER_DOMAIN_AUDIO_MMIO:
140 return "AUDIO_MMIO";
141 case POWER_DOMAIN_AUDIO_PLAYBACK:
142 return "AUDIO_PLAYBACK";
143 case POWER_DOMAIN_AUX_IO_A:
144 return "AUX_IO_A";
145 case POWER_DOMAIN_AUX_IO_B:
146 return "AUX_IO_B";
147 case POWER_DOMAIN_AUX_IO_C:
148 return "AUX_IO_C";
149 case POWER_DOMAIN_AUX_IO_D:
150 return "AUX_IO_D";
151 case POWER_DOMAIN_AUX_IO_E:
152 return "AUX_IO_E";
153 case POWER_DOMAIN_AUX_IO_F:
154 return "AUX_IO_F";
155 case POWER_DOMAIN_AUX_A:
156 return "AUX_A";
157 case POWER_DOMAIN_AUX_B:
158 return "AUX_B";
159 case POWER_DOMAIN_AUX_C:
160 return "AUX_C";
161 case POWER_DOMAIN_AUX_D:
162 return "AUX_D";
163 case POWER_DOMAIN_AUX_E:
164 return "AUX_E";
165 case POWER_DOMAIN_AUX_F:
166 return "AUX_F";
167 case POWER_DOMAIN_AUX_USBC1:
168 return "AUX_USBC1";
169 case POWER_DOMAIN_AUX_USBC2:
170 return "AUX_USBC2";
171 case POWER_DOMAIN_AUX_USBC3:
172 return "AUX_USBC3";
173 case POWER_DOMAIN_AUX_USBC4:
174 return "AUX_USBC4";
175 case POWER_DOMAIN_AUX_USBC5:
176 return "AUX_USBC5";
177 case POWER_DOMAIN_AUX_USBC6:
178 return "AUX_USBC6";
179 case POWER_DOMAIN_AUX_TBT1:
180 return "AUX_TBT1";
181 case POWER_DOMAIN_AUX_TBT2:
182 return "AUX_TBT2";
183 case POWER_DOMAIN_AUX_TBT3:
184 return "AUX_TBT3";
185 case POWER_DOMAIN_AUX_TBT4:
186 return "AUX_TBT4";
187 case POWER_DOMAIN_AUX_TBT5:
188 return "AUX_TBT5";
189 case POWER_DOMAIN_AUX_TBT6:
190 return "AUX_TBT6";
191 case POWER_DOMAIN_GMBUS:
192 return "GMBUS";
193 case POWER_DOMAIN_INIT:
194 return "INIT";
195 case POWER_DOMAIN_GT_IRQ:
196 return "GT_IRQ";
197 case POWER_DOMAIN_DC_OFF:
198 return "DC_OFF";
199 case POWER_DOMAIN_TC_COLD_OFF:
200 return "TC_COLD_OFF";
201 default:
202 MISSING_CASE(domain);
203 return "?";
204 }
205 }
206
__intel_display_power_is_enabled(struct intel_display * display,enum intel_display_power_domain domain)207 static bool __intel_display_power_is_enabled(struct intel_display *display,
208 enum intel_display_power_domain domain)
209 {
210 struct i915_power_well *power_well;
211 bool is_enabled;
212
213 if (intel_display_rpm_suspended(display))
214 return false;
215
216 is_enabled = true;
217
218 for_each_power_domain_well_reverse(display, power_well, domain) {
219 if (intel_power_well_is_always_on(power_well))
220 continue;
221
222 if (!intel_power_well_is_enabled_cached(power_well)) {
223 is_enabled = false;
224 break;
225 }
226 }
227
228 return is_enabled;
229 }
230
231 /**
232 * intel_display_power_is_enabled - check for a power domain
233 * @display: display device instance
234 * @domain: power domain to check
235 *
236 * This function can be used to check the hw power domain state. It is mostly
237 * used in hardware state readout functions. Everywhere else code should rely
238 * upon explicit power domain reference counting to ensure that the hardware
239 * block is powered up before accessing it.
240 *
241 * Callers must hold the relevant modesetting locks to ensure that concurrent
242 * threads can't disable the power well while the caller tries to read a few
243 * registers.
244 *
245 * Returns:
246 * True when the power domain is enabled, false otherwise.
247 */
intel_display_power_is_enabled(struct intel_display * display,enum intel_display_power_domain domain)248 bool intel_display_power_is_enabled(struct intel_display *display,
249 enum intel_display_power_domain domain)
250 {
251 struct i915_power_domains *power_domains = &display->power.domains;
252 bool ret;
253
254 mutex_lock(&power_domains->lock);
255 ret = __intel_display_power_is_enabled(display, domain);
256 mutex_unlock(&power_domains->lock);
257
258 return ret;
259 }
260
261 static u32
sanitize_target_dc_state(struct intel_display * display,u32 target_dc_state)262 sanitize_target_dc_state(struct intel_display *display,
263 u32 target_dc_state)
264 {
265 struct i915_power_domains *power_domains = &display->power.domains;
266 static const u32 states[] = {
267 DC_STATE_EN_UPTO_DC6,
268 DC_STATE_EN_UPTO_DC5,
269 DC_STATE_EN_DC3CO,
270 DC_STATE_DISABLE,
271 };
272 int i;
273
274 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
275 if (target_dc_state != states[i])
276 continue;
277
278 if (power_domains->allowed_dc_mask & target_dc_state)
279 break;
280
281 target_dc_state = states[i + 1];
282 }
283
284 return target_dc_state;
285 }
286
287 /**
288 * intel_display_power_set_target_dc_state - Set target dc state.
289 * @display: display device
290 * @state: state which needs to be set as target_dc_state.
291 *
292 * This function set the "DC off" power well target_dc_state,
293 * based upon this target_dc_stste, "DC off" power well will
294 * enable desired DC state.
295 */
intel_display_power_set_target_dc_state(struct intel_display * display,u32 state)296 void intel_display_power_set_target_dc_state(struct intel_display *display,
297 u32 state)
298 {
299 struct i915_power_well *power_well;
300 bool dc_off_enabled;
301 struct i915_power_domains *power_domains = &display->power.domains;
302
303 mutex_lock(&power_domains->lock);
304 power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
305
306 if (drm_WARN_ON(display->drm, !power_well))
307 goto unlock;
308
309 state = sanitize_target_dc_state(display, state);
310
311 if (state == power_domains->target_dc_state)
312 goto unlock;
313
314 dc_off_enabled = intel_power_well_is_enabled(display, power_well);
315 /*
316 * If DC off power well is disabled, need to enable and disable the
317 * DC off power well to effect target DC state.
318 */
319 if (!dc_off_enabled)
320 intel_power_well_enable(display, power_well);
321
322 power_domains->target_dc_state = state;
323
324 if (!dc_off_enabled)
325 intel_power_well_disable(display, power_well);
326
327 unlock:
328 mutex_unlock(&power_domains->lock);
329 }
330
331 /**
332 * intel_display_power_get_current_dc_state - Set target dc state.
333 * @display: display device
334 *
335 * This function set the "DC off" power well target_dc_state,
336 * based upon this target_dc_stste, "DC off" power well will
337 * enable desired DC state.
338 */
intel_display_power_get_current_dc_state(struct intel_display * display)339 u32 intel_display_power_get_current_dc_state(struct intel_display *display)
340 {
341 struct i915_power_well *power_well;
342 struct i915_power_domains *power_domains = &display->power.domains;
343 u32 current_dc_state = DC_STATE_DISABLE;
344
345 mutex_lock(&power_domains->lock);
346 power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
347
348 if (drm_WARN_ON(display->drm, !power_well))
349 goto unlock;
350
351 current_dc_state = intel_power_well_is_enabled(display, power_well) ?
352 DC_STATE_DISABLE : power_domains->target_dc_state;
353
354 unlock:
355 mutex_unlock(&power_domains->lock);
356
357 return current_dc_state;
358 }
359
__async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)360 static void __async_put_domains_mask(struct i915_power_domains *power_domains,
361 struct intel_power_domain_mask *mask)
362 {
363 bitmap_or(mask->bits,
364 power_domains->async_put_domains[0].bits,
365 power_domains->async_put_domains[1].bits,
366 POWER_DOMAIN_NUM);
367 }
368
369 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
370
371 static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)372 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
373 {
374 struct intel_display *display = container_of(power_domains,
375 struct intel_display,
376 power.domains);
377
378 return !drm_WARN_ON(display->drm,
379 bitmap_intersects(power_domains->async_put_domains[0].bits,
380 power_domains->async_put_domains[1].bits,
381 POWER_DOMAIN_NUM));
382 }
383
384 static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)385 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
386 {
387 struct intel_display *display = container_of(power_domains,
388 struct intel_display,
389 power.domains);
390 struct intel_power_domain_mask async_put_mask;
391 enum intel_display_power_domain domain;
392 bool err = false;
393
394 err |= !assert_async_put_domain_masks_disjoint(power_domains);
395 __async_put_domains_mask(power_domains, &async_put_mask);
396 err |= drm_WARN_ON(display->drm,
397 !!power_domains->async_put_wakeref !=
398 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
399
400 for_each_power_domain(domain, &async_put_mask)
401 err |= drm_WARN_ON(display->drm,
402 power_domains->domain_use_count[domain] != 1);
403
404 return !err;
405 }
406
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,struct intel_power_domain_mask * mask)407 static void print_power_domains(struct i915_power_domains *power_domains,
408 const char *prefix, struct intel_power_domain_mask *mask)
409 {
410 struct intel_display *display = container_of(power_domains,
411 struct intel_display,
412 power.domains);
413 enum intel_display_power_domain domain;
414
415 drm_dbg_kms(display->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
416 for_each_power_domain(domain, mask)
417 drm_dbg_kms(display->drm, "%s use_count %d\n",
418 intel_display_power_domain_str(domain),
419 power_domains->domain_use_count[domain]);
420 }
421
422 static void
print_async_put_domains_state(struct i915_power_domains * power_domains)423 print_async_put_domains_state(struct i915_power_domains *power_domains)
424 {
425 struct intel_display *display = container_of(power_domains,
426 struct intel_display,
427 power.domains);
428
429 drm_dbg_kms(display->drm, "async_put_wakeref: %s\n",
430 str_yes_no(power_domains->async_put_wakeref));
431
432 print_power_domains(power_domains, "async_put_domains[0]",
433 &power_domains->async_put_domains[0]);
434 print_power_domains(power_domains, "async_put_domains[1]",
435 &power_domains->async_put_domains[1]);
436 }
437
438 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)439 verify_async_put_domains_state(struct i915_power_domains *power_domains)
440 {
441 if (!__async_put_domains_state_ok(power_domains))
442 print_async_put_domains_state(power_domains);
443 }
444
445 #else
446
447 static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)448 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
449 {
450 }
451
452 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)453 verify_async_put_domains_state(struct i915_power_domains *power_domains)
454 {
455 }
456
457 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
458
async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)459 static void async_put_domains_mask(struct i915_power_domains *power_domains,
460 struct intel_power_domain_mask *mask)
461
462 {
463 assert_async_put_domain_masks_disjoint(power_domains);
464
465 __async_put_domains_mask(power_domains, mask);
466 }
467
468 static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)469 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
470 enum intel_display_power_domain domain)
471 {
472 assert_async_put_domain_masks_disjoint(power_domains);
473
474 clear_bit(domain, power_domains->async_put_domains[0].bits);
475 clear_bit(domain, power_domains->async_put_domains[1].bits);
476 }
477
478 static void
cancel_async_put_work(struct i915_power_domains * power_domains,bool sync)479 cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
480 {
481 if (sync)
482 cancel_delayed_work_sync(&power_domains->async_put_work);
483 else
484 cancel_delayed_work(&power_domains->async_put_work);
485
486 power_domains->async_put_next_delay = 0;
487 }
488
489 static bool
intel_display_power_grab_async_put_ref(struct intel_display * display,enum intel_display_power_domain domain)490 intel_display_power_grab_async_put_ref(struct intel_display *display,
491 enum intel_display_power_domain domain)
492 {
493 struct i915_power_domains *power_domains = &display->power.domains;
494 struct intel_power_domain_mask async_put_mask;
495 bool ret = false;
496
497 async_put_domains_mask(power_domains, &async_put_mask);
498 if (!test_bit(domain, async_put_mask.bits))
499 goto out_verify;
500
501 async_put_domains_clear_domain(power_domains, domain);
502
503 ret = true;
504
505 async_put_domains_mask(power_domains, &async_put_mask);
506 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
507 goto out_verify;
508
509 cancel_async_put_work(power_domains, false);
510 intel_display_rpm_put_raw(display,
511 fetch_and_zero(&power_domains->async_put_wakeref));
512 out_verify:
513 verify_async_put_domains_state(power_domains);
514
515 return ret;
516 }
517
518 static void
__intel_display_power_get_domain(struct intel_display * display,enum intel_display_power_domain domain)519 __intel_display_power_get_domain(struct intel_display *display,
520 enum intel_display_power_domain domain)
521 {
522 struct i915_power_domains *power_domains = &display->power.domains;
523 struct i915_power_well *power_well;
524
525 if (intel_display_power_grab_async_put_ref(display, domain))
526 return;
527
528 for_each_power_domain_well(display, power_well, domain)
529 intel_power_well_get(display, power_well);
530
531 power_domains->domain_use_count[domain]++;
532 }
533
534 /**
535 * intel_display_power_get - grab a power domain reference
536 * @display: display device instance
537 * @domain: power domain to reference
538 *
539 * This function grabs a power domain reference for @domain and ensures that the
540 * power domain and all its parents are powered up. Therefore users should only
541 * grab a reference to the innermost power domain they need.
542 *
543 * Any power domain reference obtained by this function must have a symmetric
544 * call to intel_display_power_put() to release the reference again.
545 */
intel_display_power_get(struct intel_display * display,enum intel_display_power_domain domain)546 intel_wakeref_t intel_display_power_get(struct intel_display *display,
547 enum intel_display_power_domain domain)
548 {
549 struct i915_power_domains *power_domains = &display->power.domains;
550 struct ref_tracker *wakeref;
551
552 wakeref = intel_display_rpm_get(display);
553
554 mutex_lock(&power_domains->lock);
555 __intel_display_power_get_domain(display, domain);
556 mutex_unlock(&power_domains->lock);
557
558 return wakeref;
559 }
560
561 /**
562 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
563 * @display: display device instance
564 * @domain: power domain to reference
565 *
566 * This function grabs a power domain reference for @domain and ensures that the
567 * power domain and all its parents are powered up. Therefore users should only
568 * grab a reference to the innermost power domain they need.
569 *
570 * Any power domain reference obtained by this function must have a symmetric
571 * call to intel_display_power_put() to release the reference again.
572 */
573 intel_wakeref_t
intel_display_power_get_if_enabled(struct intel_display * display,enum intel_display_power_domain domain)574 intel_display_power_get_if_enabled(struct intel_display *display,
575 enum intel_display_power_domain domain)
576 {
577 struct i915_power_domains *power_domains = &display->power.domains;
578 struct ref_tracker *wakeref;
579 bool is_enabled;
580
581 wakeref = intel_display_rpm_get_if_in_use(display);
582 if (!wakeref)
583 return NULL;
584
585 mutex_lock(&power_domains->lock);
586
587 if (__intel_display_power_is_enabled(display, domain)) {
588 __intel_display_power_get_domain(display, domain);
589 is_enabled = true;
590 } else {
591 is_enabled = false;
592 }
593
594 mutex_unlock(&power_domains->lock);
595
596 if (!is_enabled) {
597 intel_display_rpm_put(display, wakeref);
598 wakeref = NULL;
599 }
600
601 return wakeref;
602 }
603
604 static void
__intel_display_power_put_domain(struct intel_display * display,enum intel_display_power_domain domain)605 __intel_display_power_put_domain(struct intel_display *display,
606 enum intel_display_power_domain domain)
607 {
608 struct i915_power_domains *power_domains = &display->power.domains;
609 struct i915_power_well *power_well;
610 const char *name = intel_display_power_domain_str(domain);
611 struct intel_power_domain_mask async_put_mask;
612
613 drm_WARN(display->drm, !power_domains->domain_use_count[domain],
614 "Use count on domain %s is already zero\n",
615 name);
616 async_put_domains_mask(power_domains, &async_put_mask);
617 drm_WARN(display->drm,
618 test_bit(domain, async_put_mask.bits),
619 "Async disabling of domain %s is pending\n",
620 name);
621
622 power_domains->domain_use_count[domain]--;
623
624 for_each_power_domain_well_reverse(display, power_well, domain)
625 intel_power_well_put(display, power_well);
626 }
627
__intel_display_power_put(struct intel_display * display,enum intel_display_power_domain domain)628 static void __intel_display_power_put(struct intel_display *display,
629 enum intel_display_power_domain domain)
630 {
631 struct i915_power_domains *power_domains = &display->power.domains;
632
633 mutex_lock(&power_domains->lock);
634 __intel_display_power_put_domain(display, domain);
635 mutex_unlock(&power_domains->lock);
636 }
637
638 static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref,int delay_ms)639 queue_async_put_domains_work(struct i915_power_domains *power_domains,
640 intel_wakeref_t wakeref,
641 int delay_ms)
642 {
643 struct intel_display *display = container_of(power_domains,
644 struct intel_display,
645 power.domains);
646 drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
647 power_domains->async_put_wakeref = wakeref;
648 drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
649 &power_domains->async_put_work,
650 msecs_to_jiffies(delay_ms)));
651 }
652
653 static void
release_async_put_domains(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)654 release_async_put_domains(struct i915_power_domains *power_domains,
655 struct intel_power_domain_mask *mask)
656 {
657 struct intel_display *display = container_of(power_domains,
658 struct intel_display,
659 power.domains);
660 enum intel_display_power_domain domain;
661 struct ref_tracker *wakeref;
662
663 wakeref = intel_display_rpm_get_noresume(display);
664
665 for_each_power_domain(domain, mask) {
666 /* Clear before put, so put's sanity check is happy. */
667 async_put_domains_clear_domain(power_domains, domain);
668 __intel_display_power_put_domain(display, domain);
669 }
670
671 intel_display_rpm_put(display, wakeref);
672 }
673
674 static void
intel_display_power_put_async_work(struct work_struct * work)675 intel_display_power_put_async_work(struct work_struct *work)
676 {
677 struct intel_display *display = container_of(work, struct intel_display,
678 power.domains.async_put_work.work);
679 struct i915_power_domains *power_domains = &display->power.domains;
680 struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL;
681
682 new_work_wakeref = intel_display_rpm_get_raw(display);
683
684 mutex_lock(&power_domains->lock);
685
686 /*
687 * Bail out if all the domain refs pending to be released were grabbed
688 * by subsequent gets or a flush_work.
689 */
690 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
691 if (!old_work_wakeref)
692 goto out_verify;
693
694 release_async_put_domains(power_domains,
695 &power_domains->async_put_domains[0]);
696
697 /*
698 * Cancel the work that got queued after this one got dequeued,
699 * since here we released the corresponding async-put reference.
700 */
701 cancel_async_put_work(power_domains, false);
702
703 /* Requeue the work if more domains were async put meanwhile. */
704 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
705 bitmap_copy(power_domains->async_put_domains[0].bits,
706 power_domains->async_put_domains[1].bits,
707 POWER_DOMAIN_NUM);
708 bitmap_zero(power_domains->async_put_domains[1].bits,
709 POWER_DOMAIN_NUM);
710 queue_async_put_domains_work(power_domains,
711 fetch_and_zero(&new_work_wakeref),
712 power_domains->async_put_next_delay);
713 power_domains->async_put_next_delay = 0;
714 }
715
716 out_verify:
717 verify_async_put_domains_state(power_domains);
718
719 mutex_unlock(&power_domains->lock);
720
721 if (old_work_wakeref)
722 intel_display_rpm_put_raw(display, old_work_wakeref);
723 if (new_work_wakeref)
724 intel_display_rpm_put_raw(display, new_work_wakeref);
725 }
726
727 /**
728 * __intel_display_power_put_async - release a power domain reference asynchronously
729 * @display: display device instance
730 * @domain: power domain to reference
731 * @wakeref: wakeref acquired for the reference that is being released
732 * @delay_ms: delay of powering down the power domain
733 *
734 * This function drops the power domain reference obtained by
735 * intel_display_power_get*() and schedules a work to power down the
736 * corresponding hardware block if this is the last reference.
737 * The power down is delayed by @delay_ms if this is >= 0, or by a default
738 * 100 ms otherwise.
739 */
__intel_display_power_put_async(struct intel_display * display,enum intel_display_power_domain domain,intel_wakeref_t wakeref,int delay_ms)740 void __intel_display_power_put_async(struct intel_display *display,
741 enum intel_display_power_domain domain,
742 intel_wakeref_t wakeref,
743 int delay_ms)
744 {
745 struct i915_power_domains *power_domains = &display->power.domains;
746 struct ref_tracker *work_wakeref;
747
748 work_wakeref = intel_display_rpm_get_raw(display);
749
750 delay_ms = delay_ms >= 0 ? delay_ms : 100;
751
752 mutex_lock(&power_domains->lock);
753
754 if (power_domains->domain_use_count[domain] > 1) {
755 __intel_display_power_put_domain(display, domain);
756
757 goto out_verify;
758 }
759
760 drm_WARN_ON(display->drm, power_domains->domain_use_count[domain] != 1);
761
762 /* Let a pending work requeue itself or queue a new one. */
763 if (power_domains->async_put_wakeref) {
764 set_bit(domain, power_domains->async_put_domains[1].bits);
765 power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
766 delay_ms);
767 } else {
768 set_bit(domain, power_domains->async_put_domains[0].bits);
769 queue_async_put_domains_work(power_domains,
770 fetch_and_zero(&work_wakeref),
771 delay_ms);
772 }
773
774 out_verify:
775 verify_async_put_domains_state(power_domains);
776
777 mutex_unlock(&power_domains->lock);
778
779 if (work_wakeref)
780 intel_display_rpm_put_raw(display, work_wakeref);
781
782 intel_display_rpm_put(display, wakeref);
783 }
784
785 /**
786 * intel_display_power_flush_work - flushes the async display power disabling work
787 * @display: display device instance
788 *
789 * Flushes any pending work that was scheduled by a preceding
790 * intel_display_power_put_async() call, completing the disabling of the
791 * corresponding power domains.
792 *
793 * Note that the work handler function may still be running after this
794 * function returns; to ensure that the work handler isn't running use
795 * intel_display_power_flush_work_sync() instead.
796 */
intel_display_power_flush_work(struct intel_display * display)797 void intel_display_power_flush_work(struct intel_display *display)
798 {
799 struct i915_power_domains *power_domains = &display->power.domains;
800 struct intel_power_domain_mask async_put_mask;
801 intel_wakeref_t work_wakeref;
802
803 mutex_lock(&power_domains->lock);
804
805 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
806 if (!work_wakeref)
807 goto out_verify;
808
809 async_put_domains_mask(power_domains, &async_put_mask);
810 release_async_put_domains(power_domains, &async_put_mask);
811 cancel_async_put_work(power_domains, false);
812
813 out_verify:
814 verify_async_put_domains_state(power_domains);
815
816 mutex_unlock(&power_domains->lock);
817
818 if (work_wakeref)
819 intel_display_rpm_put_raw(display, work_wakeref);
820 }
821
822 /**
823 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
824 * @display: display device instance
825 *
826 * Like intel_display_power_flush_work(), but also ensure that the work
827 * handler function is not running any more when this function returns.
828 */
829 static void
intel_display_power_flush_work_sync(struct intel_display * display)830 intel_display_power_flush_work_sync(struct intel_display *display)
831 {
832 struct i915_power_domains *power_domains = &display->power.domains;
833
834 intel_display_power_flush_work(display);
835 cancel_async_put_work(power_domains, true);
836
837 verify_async_put_domains_state(power_domains);
838
839 drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
840 }
841
842 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
843 /**
844 * intel_display_power_put - release a power domain reference
845 * @display: display device instance
846 * @domain: power domain to reference
847 * @wakeref: wakeref acquired for the reference that is being released
848 *
849 * This function drops the power domain reference obtained by
850 * intel_display_power_get() and might power down the corresponding hardware
851 * block right away if this is the last reference.
852 */
intel_display_power_put(struct intel_display * display,enum intel_display_power_domain domain,intel_wakeref_t wakeref)853 void intel_display_power_put(struct intel_display *display,
854 enum intel_display_power_domain domain,
855 intel_wakeref_t wakeref)
856 {
857 __intel_display_power_put(display, domain);
858 intel_display_rpm_put(display, wakeref);
859 }
860 #else
861 /**
862 * intel_display_power_put_unchecked - release an unchecked power domain reference
863 * @display: display device instance
864 * @domain: power domain to reference
865 *
866 * This function drops the power domain reference obtained by
867 * intel_display_power_get() and might power down the corresponding hardware
868 * block right away if this is the last reference.
869 *
870 * This function is only for the power domain code's internal use to suppress wakeref
871 * tracking when the corresponding debug kconfig option is disabled, should not
872 * be used otherwise.
873 */
intel_display_power_put_unchecked(struct intel_display * display,enum intel_display_power_domain domain)874 void intel_display_power_put_unchecked(struct intel_display *display,
875 enum intel_display_power_domain domain)
876 {
877 __intel_display_power_put(display, domain);
878 intel_display_rpm_put_unchecked(display);
879 }
880 #endif
881
882 void
intel_display_power_get_in_set(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)883 intel_display_power_get_in_set(struct intel_display *display,
884 struct intel_display_power_domain_set *power_domain_set,
885 enum intel_display_power_domain domain)
886 {
887 intel_wakeref_t __maybe_unused wf;
888
889 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
890
891 wf = intel_display_power_get(display, domain);
892 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
893 power_domain_set->wakerefs[domain] = wf;
894 #endif
895 set_bit(domain, power_domain_set->mask.bits);
896 }
897
898 bool
intel_display_power_get_in_set_if_enabled(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)899 intel_display_power_get_in_set_if_enabled(struct intel_display *display,
900 struct intel_display_power_domain_set *power_domain_set,
901 enum intel_display_power_domain domain)
902 {
903 intel_wakeref_t wf;
904
905 drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
906
907 wf = intel_display_power_get_if_enabled(display, domain);
908 if (!wf)
909 return false;
910
911 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
912 power_domain_set->wakerefs[domain] = wf;
913 #endif
914 set_bit(domain, power_domain_set->mask.bits);
915
916 return true;
917 }
918
919 void
intel_display_power_put_mask_in_set(struct intel_display * display,struct intel_display_power_domain_set * power_domain_set,struct intel_power_domain_mask * mask)920 intel_display_power_put_mask_in_set(struct intel_display *display,
921 struct intel_display_power_domain_set *power_domain_set,
922 struct intel_power_domain_mask *mask)
923 {
924 enum intel_display_power_domain domain;
925
926 drm_WARN_ON(display->drm,
927 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
928
929 for_each_power_domain(domain, mask) {
930 intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF;
931
932 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
933 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
934 #endif
935 intel_display_power_put(display, domain, wf);
936 clear_bit(domain, power_domain_set->mask.bits);
937 }
938 }
939
940 static int
sanitize_disable_power_well_option(int disable_power_well)941 sanitize_disable_power_well_option(int disable_power_well)
942 {
943 if (disable_power_well >= 0)
944 return !!disable_power_well;
945
946 return 1;
947 }
948
get_allowed_dc_mask(struct intel_display * display,int enable_dc)949 static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
950 {
951 u32 mask;
952 int requested_dc;
953 int max_dc;
954
955 if (!HAS_DISPLAY(display))
956 return 0;
957
958 if (DISPLAY_VER(display) >= 20)
959 max_dc = 2;
960 else if (display->platform.dg2)
961 max_dc = 1;
962 else if (display->platform.dg1)
963 max_dc = 3;
964 else if (DISPLAY_VER(display) >= 12)
965 max_dc = 4;
966 else if (display->platform.geminilake || display->platform.broxton)
967 max_dc = 1;
968 else if (DISPLAY_VER(display) >= 9)
969 max_dc = 2;
970 else
971 max_dc = 0;
972
973 /*
974 * DC9 has a separate HW flow from the rest of the DC states,
975 * not depending on the DMC firmware. It's needed by system
976 * suspend/resume, so allow it unconditionally.
977 */
978 mask = display->platform.geminilake || display->platform.broxton ||
979 DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0;
980
981 if (!display->params.disable_power_well)
982 max_dc = 0;
983
984 if (enable_dc >= 0 && enable_dc <= max_dc) {
985 requested_dc = enable_dc;
986 } else if (enable_dc == -1) {
987 requested_dc = max_dc;
988 } else if (enable_dc > max_dc && enable_dc <= 4) {
989 drm_dbg_kms(display->drm,
990 "Adjusting requested max DC state (%d->%d)\n",
991 enable_dc, max_dc);
992 requested_dc = max_dc;
993 } else {
994 drm_err(display->drm,
995 "Unexpected value for enable_dc (%d)\n", enable_dc);
996 requested_dc = max_dc;
997 }
998
999 switch (requested_dc) {
1000 case 4:
1001 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
1002 break;
1003 case 3:
1004 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
1005 break;
1006 case 2:
1007 mask |= DC_STATE_EN_UPTO_DC6;
1008 break;
1009 case 1:
1010 mask |= DC_STATE_EN_UPTO_DC5;
1011 break;
1012 }
1013
1014 drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask);
1015
1016 return mask;
1017 }
1018
1019 /**
1020 * intel_power_domains_init - initializes the power domain structures
1021 * @display: display device instance
1022 *
1023 * Initializes the power domain structures for @display depending upon the
1024 * supported platform.
1025 */
intel_power_domains_init(struct intel_display * display)1026 int intel_power_domains_init(struct intel_display *display)
1027 {
1028 struct i915_power_domains *power_domains = &display->power.domains;
1029
1030 display->params.disable_power_well =
1031 sanitize_disable_power_well_option(display->params.disable_power_well);
1032 power_domains->allowed_dc_mask =
1033 get_allowed_dc_mask(display, display->params.enable_dc);
1034
1035 power_domains->target_dc_state =
1036 sanitize_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1037
1038 mutex_init(&power_domains->lock);
1039
1040 INIT_DELAYED_WORK(&power_domains->async_put_work,
1041 intel_display_power_put_async_work);
1042
1043 return intel_display_power_map_init(power_domains);
1044 }
1045
1046 /**
1047 * intel_power_domains_cleanup - clean up power domains resources
1048 * @display: display device instance
1049 *
1050 * Release any resources acquired by intel_power_domains_init()
1051 */
intel_power_domains_cleanup(struct intel_display * display)1052 void intel_power_domains_cleanup(struct intel_display *display)
1053 {
1054 intel_display_power_map_cleanup(&display->power.domains);
1055 }
1056
intel_power_domains_sync_hw(struct intel_display * display)1057 static void intel_power_domains_sync_hw(struct intel_display *display)
1058 {
1059 struct i915_power_domains *power_domains = &display->power.domains;
1060 struct i915_power_well *power_well;
1061
1062 mutex_lock(&power_domains->lock);
1063 for_each_power_well(display, power_well)
1064 intel_power_well_sync_hw(display, power_well);
1065 mutex_unlock(&power_domains->lock);
1066 }
1067
gen9_dbuf_slice_set(struct intel_display * display,enum dbuf_slice slice,bool enable)1068 static void gen9_dbuf_slice_set(struct intel_display *display,
1069 enum dbuf_slice slice, bool enable)
1070 {
1071 i915_reg_t reg = DBUF_CTL_S(slice);
1072 bool state;
1073
1074 intel_de_rmw(display, reg, DBUF_POWER_REQUEST,
1075 enable ? DBUF_POWER_REQUEST : 0);
1076 intel_de_posting_read(display, reg);
1077 udelay(10);
1078
1079 state = intel_de_read(display, reg) & DBUF_POWER_STATE;
1080 drm_WARN(display->drm, enable != state,
1081 "DBuf slice %d power %s timeout!\n",
1082 slice, str_enable_disable(enable));
1083 }
1084
gen9_dbuf_slices_update(struct intel_display * display,u8 req_slices)1085 void gen9_dbuf_slices_update(struct intel_display *display,
1086 u8 req_slices)
1087 {
1088 struct i915_power_domains *power_domains = &display->power.domains;
1089 u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
1090 enum dbuf_slice slice;
1091
1092 drm_WARN(display->drm, req_slices & ~slice_mask,
1093 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1094 req_slices, slice_mask);
1095
1096 drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n",
1097 req_slices);
1098
1099 /*
1100 * Might be running this in parallel to gen9_dc_off_power_well_enable
1101 * being called from intel_dp_detect for instance,
1102 * which causes assertion triggered by race condition,
1103 * as gen9_assert_dbuf_enabled might preempt this when registers
1104 * were already updated, while dev_priv was not.
1105 */
1106 mutex_lock(&power_domains->lock);
1107
1108 for_each_dbuf_slice(display, slice)
1109 gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice));
1110
1111 display->dbuf.enabled_slices = req_slices;
1112
1113 mutex_unlock(&power_domains->lock);
1114 }
1115
gen9_dbuf_enable(struct intel_display * display)1116 static void gen9_dbuf_enable(struct intel_display *display)
1117 {
1118 u8 slices_mask;
1119
1120 display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(display);
1121
1122 slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
1123
1124 if (DISPLAY_VER(display) >= 14)
1125 intel_pmdemand_program_dbuf(display, slices_mask);
1126
1127 /*
1128 * Just power up at least 1 slice, we will
1129 * figure out later which slices we have and what we need.
1130 */
1131 gen9_dbuf_slices_update(display, slices_mask);
1132 }
1133
gen9_dbuf_disable(struct intel_display * display)1134 static void gen9_dbuf_disable(struct intel_display *display)
1135 {
1136 gen9_dbuf_slices_update(display, 0);
1137
1138 if (DISPLAY_VER(display) >= 14)
1139 intel_pmdemand_program_dbuf(display, 0);
1140 }
1141
gen12_dbuf_slices_config(struct intel_display * display)1142 static void gen12_dbuf_slices_config(struct intel_display *display)
1143 {
1144 enum dbuf_slice slice;
1145
1146 for_each_dbuf_slice(display, slice)
1147 intel_de_rmw(display, DBUF_CTL_S(slice),
1148 DBUF_TRACKER_STATE_SERVICE_MASK,
1149 DBUF_TRACKER_STATE_SERVICE(8));
1150 }
1151
icl_mbus_init(struct intel_display * display)1152 static void icl_mbus_init(struct intel_display *display)
1153 {
1154 unsigned long abox_regs = DISPLAY_INFO(display)->abox_mask;
1155 u32 mask, val, i;
1156
1157 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1158 return;
1159
1160 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1161 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1162 MBUS_ABOX_B_CREDIT_MASK |
1163 MBUS_ABOX_BW_CREDIT_MASK;
1164 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1165 MBUS_ABOX_BT_CREDIT_POOL2(16) |
1166 MBUS_ABOX_B_CREDIT(1) |
1167 MBUS_ABOX_BW_CREDIT(1);
1168
1169 /*
1170 * gen12 platforms that use abox1 and abox2 for pixel data reads still
1171 * expect us to program the abox_ctl0 register as well, even though
1172 * we don't have to program other instance-0 registers like BW_BUDDY.
1173 */
1174 if (DISPLAY_VER(display) == 12)
1175 abox_regs |= BIT(0);
1176
1177 for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
1178 intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
1179 }
1180
hsw_assert_cdclk(struct intel_display * display)1181 static void hsw_assert_cdclk(struct intel_display *display)
1182 {
1183 u32 val = intel_de_read(display, LCPLL_CTL);
1184
1185 /*
1186 * The LCPLL register should be turned on by the BIOS. For now
1187 * let's just check its state and print errors in case
1188 * something is wrong. Don't even try to turn it on.
1189 */
1190
1191 if (val & LCPLL_CD_SOURCE_FCLK)
1192 drm_err(display->drm, "CDCLK source is not LCPLL\n");
1193
1194 if (val & LCPLL_PLL_DISABLE)
1195 drm_err(display->drm, "LCPLL is disabled\n");
1196
1197 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1198 drm_err(display->drm, "LCPLL not using non-SSC reference\n");
1199 }
1200
assert_can_disable_lcpll(struct intel_display * display)1201 static void assert_can_disable_lcpll(struct intel_display *display)
1202 {
1203 struct drm_i915_private *dev_priv = to_i915(display->drm);
1204 struct intel_crtc *crtc;
1205
1206 for_each_intel_crtc(display->drm, crtc)
1207 INTEL_DISPLAY_STATE_WARN(display, crtc->active,
1208 "CRTC for pipe %c enabled\n",
1209 pipe_name(crtc->pipe));
1210
1211 INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2),
1212 "Display power well on\n");
1213 INTEL_DISPLAY_STATE_WARN(display,
1214 intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE,
1215 "SPLL enabled\n");
1216 INTEL_DISPLAY_STATE_WARN(display,
1217 intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1218 "WRPLL1 enabled\n");
1219 INTEL_DISPLAY_STATE_WARN(display,
1220 intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1221 "WRPLL2 enabled\n");
1222 INTEL_DISPLAY_STATE_WARN(display,
1223 intel_de_read(display, PP_STATUS(display, 0)) & PP_ON,
1224 "Panel power on\n");
1225 INTEL_DISPLAY_STATE_WARN(display,
1226 intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1227 "CPU PWM1 enabled\n");
1228 if (display->platform.haswell)
1229 INTEL_DISPLAY_STATE_WARN(display,
1230 intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1231 "CPU PWM2 enabled\n");
1232 INTEL_DISPLAY_STATE_WARN(display,
1233 intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1234 "PCH PWM1 enabled\n");
1235 INTEL_DISPLAY_STATE_WARN(display,
1236 (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
1237 "Utility pin enabled in PWM mode\n");
1238 INTEL_DISPLAY_STATE_WARN(display,
1239 intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1240 "PCH GTC enabled\n");
1241
1242 /*
1243 * In theory we can still leave IRQs enabled, as long as only the HPD
1244 * interrupts remain enabled. We used to check for that, but since it's
1245 * gen-specific and since we only disable LCPLL after we fully disable
1246 * the interrupts, the check below should be enough.
1247 */
1248 INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv),
1249 "IRQs enabled\n");
1250 }
1251
hsw_read_dcomp(struct intel_display * display)1252 static u32 hsw_read_dcomp(struct intel_display *display)
1253 {
1254 if (display->platform.haswell)
1255 return intel_de_read(display, D_COMP_HSW);
1256 else
1257 return intel_de_read(display, D_COMP_BDW);
1258 }
1259
hsw_write_dcomp(struct intel_display * display,u32 val)1260 static void hsw_write_dcomp(struct intel_display *display, u32 val)
1261 {
1262 if (display->platform.haswell) {
1263 if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
1264 drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
1265 } else {
1266 intel_de_write(display, D_COMP_BDW, val);
1267 intel_de_posting_read(display, D_COMP_BDW);
1268 }
1269 }
1270
1271 /*
1272 * This function implements pieces of two sequences from BSpec:
1273 * - Sequence for display software to disable LCPLL
1274 * - Sequence for display software to allow package C8+
1275 * The steps implemented here are just the steps that actually touch the LCPLL
1276 * register. Callers should take care of disabling all the display engine
1277 * functions, doing the mode unset, fixing interrupts, etc.
1278 */
hsw_disable_lcpll(struct intel_display * display,bool switch_to_fclk,bool allow_power_down)1279 static void hsw_disable_lcpll(struct intel_display *display,
1280 bool switch_to_fclk, bool allow_power_down)
1281 {
1282 u32 val;
1283 int ret;
1284
1285 assert_can_disable_lcpll(display);
1286
1287 val = intel_de_read(display, LCPLL_CTL);
1288
1289 if (switch_to_fclk) {
1290 val |= LCPLL_CD_SOURCE_FCLK;
1291 intel_de_write(display, LCPLL_CTL, val);
1292
1293 ret = intel_de_wait_custom(display, LCPLL_CTL,
1294 LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE,
1295 1, 0, NULL);
1296 if (ret)
1297 drm_err(display->drm, "Switching to FCLK failed\n");
1298
1299 val = intel_de_read(display, LCPLL_CTL);
1300 }
1301
1302 val |= LCPLL_PLL_DISABLE;
1303 intel_de_write(display, LCPLL_CTL, val);
1304 intel_de_posting_read(display, LCPLL_CTL);
1305
1306 if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1307 drm_err(display->drm, "LCPLL still locked\n");
1308
1309 val = hsw_read_dcomp(display);
1310 val |= D_COMP_COMP_DISABLE;
1311 hsw_write_dcomp(display, val);
1312 ndelay(100);
1313
1314 ret = poll_timeout_us(val = hsw_read_dcomp(display),
1315 (val & D_COMP_RCOMP_IN_PROGRESS) == 0,
1316 100, 1000, false);
1317 if (ret)
1318 drm_err(display->drm, "D_COMP RCOMP still in progress\n");
1319
1320 if (allow_power_down) {
1321 intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
1322 intel_de_posting_read(display, LCPLL_CTL);
1323 }
1324 }
1325
1326 /*
1327 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1328 * source.
1329 */
hsw_restore_lcpll(struct intel_display * display)1330 static void hsw_restore_lcpll(struct intel_display *display)
1331 {
1332 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
1333 u32 val;
1334 int ret;
1335
1336 val = intel_de_read(display, LCPLL_CTL);
1337
1338 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1339 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1340 return;
1341
1342 /*
1343 * Make sure we're not on PC8 state before disabling PC8, otherwise
1344 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1345 */
1346 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1347
1348 if (val & LCPLL_POWER_DOWN_ALLOW) {
1349 val &= ~LCPLL_POWER_DOWN_ALLOW;
1350 intel_de_write(display, LCPLL_CTL, val);
1351 intel_de_posting_read(display, LCPLL_CTL);
1352 }
1353
1354 val = hsw_read_dcomp(display);
1355 val |= D_COMP_COMP_FORCE;
1356 val &= ~D_COMP_COMP_DISABLE;
1357 hsw_write_dcomp(display, val);
1358
1359 val = intel_de_read(display, LCPLL_CTL);
1360 val &= ~LCPLL_PLL_DISABLE;
1361 intel_de_write(display, LCPLL_CTL, val);
1362
1363 if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1364 drm_err(display->drm, "LCPLL not locked yet\n");
1365
1366 if (val & LCPLL_CD_SOURCE_FCLK) {
1367 intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
1368
1369 ret = intel_de_wait_custom(display, LCPLL_CTL,
1370 LCPLL_CD_SOURCE_FCLK_DONE, 0,
1371 1, 0, NULL);
1372 if (ret)
1373 drm_err(display->drm,
1374 "Switching back to LCPLL failed\n");
1375 }
1376
1377 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1378
1379 intel_update_cdclk(display);
1380 intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
1381 }
1382
1383 /*
1384 * Package states C8 and deeper are really deep PC states that can only be
1385 * reached when all the devices on the system allow it, so even if the graphics
1386 * device allows PC8+, it doesn't mean the system will actually get to these
1387 * states. Our driver only allows PC8+ when going into runtime PM.
1388 *
1389 * The requirements for PC8+ are that all the outputs are disabled, the power
1390 * well is disabled and most interrupts are disabled, and these are also
1391 * requirements for runtime PM. When these conditions are met, we manually do
1392 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1393 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1394 * hang the machine.
1395 *
1396 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1397 * the state of some registers, so when we come back from PC8+ we need to
1398 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1399 * need to take care of the registers kept by RC6. Notice that this happens even
1400 * if we don't put the device in PCI D3 state (which is what currently happens
1401 * because of the runtime PM support).
1402 *
1403 * For more, read "Display Sequences for Package C8" on the hardware
1404 * documentation.
1405 */
hsw_enable_pc8(struct intel_display * display)1406 static void hsw_enable_pc8(struct intel_display *display)
1407 {
1408 drm_dbg_kms(display->drm, "Enabling package C8+\n");
1409
1410 if (HAS_PCH_LPT_LP(display))
1411 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1412 PCH_LP_PARTITION_LEVEL_DISABLE, 0);
1413
1414 lpt_disable_clkout_dp(display);
1415 hsw_disable_lcpll(display, true, true);
1416 }
1417
hsw_disable_pc8(struct intel_display * display)1418 static void hsw_disable_pc8(struct intel_display *display)
1419 {
1420 struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
1421
1422 drm_dbg_kms(display->drm, "Disabling package C8+\n");
1423
1424 hsw_restore_lcpll(display);
1425 intel_init_pch_refclk(display);
1426
1427 /* Many display registers don't survive PC8+ */
1428 #ifdef I915 /* FIXME */
1429 intel_clock_gating_init(dev_priv);
1430 #endif
1431 }
1432
intel_pch_reset_handshake(struct intel_display * display,bool enable)1433 static void intel_pch_reset_handshake(struct intel_display *display,
1434 bool enable)
1435 {
1436 i915_reg_t reg;
1437 u32 reset_bits;
1438
1439 if (display->platform.ivybridge) {
1440 reg = GEN7_MSG_CTL;
1441 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1442 } else {
1443 reg = HSW_NDE_RSTWRN_OPT;
1444 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1445 }
1446
1447 if (DISPLAY_VER(display) >= 14)
1448 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
1449
1450 intel_de_rmw(display, reg, reset_bits, enable ? reset_bits : 0);
1451 }
1452
skl_display_core_init(struct intel_display * display,bool resume)1453 static void skl_display_core_init(struct intel_display *display,
1454 bool resume)
1455 {
1456 struct i915_power_domains *power_domains = &display->power.domains;
1457 struct i915_power_well *well;
1458
1459 gen9_set_dc_state(display, DC_STATE_DISABLE);
1460
1461 /* enable PCH reset handshake */
1462 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
1463
1464 if (!HAS_DISPLAY(display))
1465 return;
1466
1467 /* enable PG1 and Misc I/O */
1468 mutex_lock(&power_domains->lock);
1469
1470 well = lookup_power_well(display, SKL_DISP_PW_1);
1471 intel_power_well_enable(display, well);
1472
1473 well = lookup_power_well(display, SKL_DISP_PW_MISC_IO);
1474 intel_power_well_enable(display, well);
1475
1476 mutex_unlock(&power_domains->lock);
1477
1478 intel_cdclk_init_hw(display);
1479
1480 gen9_dbuf_enable(display);
1481
1482 if (resume)
1483 intel_dmc_load_program(display);
1484 }
1485
skl_display_core_uninit(struct intel_display * display)1486 static void skl_display_core_uninit(struct intel_display *display)
1487 {
1488 struct i915_power_domains *power_domains = &display->power.domains;
1489 struct i915_power_well *well;
1490
1491 if (!HAS_DISPLAY(display))
1492 return;
1493
1494 gen9_disable_dc_states(display);
1495 /* TODO: disable DMC program */
1496
1497 gen9_dbuf_disable(display);
1498
1499 intel_cdclk_uninit_hw(display);
1500
1501 /* The spec doesn't call for removing the reset handshake flag */
1502 /* disable PG1 and Misc I/O */
1503
1504 mutex_lock(&power_domains->lock);
1505
1506 /*
1507 * BSpec says to keep the MISC IO power well enabled here, only
1508 * remove our request for power well 1.
1509 * Note that even though the driver's request is removed power well 1
1510 * may stay enabled after this due to DMC's own request on it.
1511 */
1512 well = lookup_power_well(display, SKL_DISP_PW_1);
1513 intel_power_well_disable(display, well);
1514
1515 mutex_unlock(&power_domains->lock);
1516
1517 usleep_range(10, 30); /* 10 us delay per Bspec */
1518 }
1519
bxt_display_core_init(struct intel_display * display,bool resume)1520 static void bxt_display_core_init(struct intel_display *display, bool resume)
1521 {
1522 struct i915_power_domains *power_domains = &display->power.domains;
1523 struct i915_power_well *well;
1524
1525 gen9_set_dc_state(display, DC_STATE_DISABLE);
1526
1527 /*
1528 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1529 * or else the reset will hang because there is no PCH to respond.
1530 * Move the handshake programming to initialization sequence.
1531 * Previously was left up to BIOS.
1532 */
1533 intel_pch_reset_handshake(display, false);
1534
1535 if (!HAS_DISPLAY(display))
1536 return;
1537
1538 /* Enable PG1 */
1539 mutex_lock(&power_domains->lock);
1540
1541 well = lookup_power_well(display, SKL_DISP_PW_1);
1542 intel_power_well_enable(display, well);
1543
1544 mutex_unlock(&power_domains->lock);
1545
1546 intel_cdclk_init_hw(display);
1547
1548 gen9_dbuf_enable(display);
1549
1550 if (resume)
1551 intel_dmc_load_program(display);
1552 }
1553
bxt_display_core_uninit(struct intel_display * display)1554 static void bxt_display_core_uninit(struct intel_display *display)
1555 {
1556 struct i915_power_domains *power_domains = &display->power.domains;
1557 struct i915_power_well *well;
1558
1559 if (!HAS_DISPLAY(display))
1560 return;
1561
1562 gen9_disable_dc_states(display);
1563 /* TODO: disable DMC program */
1564
1565 gen9_dbuf_disable(display);
1566
1567 intel_cdclk_uninit_hw(display);
1568
1569 /* The spec doesn't call for removing the reset handshake flag */
1570
1571 /*
1572 * Disable PW1 (PG1).
1573 * Note that even though the driver's request is removed power well 1
1574 * may stay enabled after this due to DMC's own request on it.
1575 */
1576 mutex_lock(&power_domains->lock);
1577
1578 well = lookup_power_well(display, SKL_DISP_PW_1);
1579 intel_power_well_disable(display, well);
1580
1581 mutex_unlock(&power_domains->lock);
1582
1583 usleep_range(10, 30); /* 10 us delay per Bspec */
1584 }
1585
1586 struct buddy_page_mask {
1587 u32 page_mask;
1588 u8 type;
1589 u8 num_channels;
1590 };
1591
1592 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1593 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
1594 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
1595 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1596 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1597 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
1598 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
1599 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1600 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1601 {}
1602 };
1603
1604 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1605 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1606 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
1607 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
1608 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1609 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1610 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
1611 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
1612 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1613 {}
1614 };
1615
tgl_bw_buddy_init(struct intel_display * display)1616 static void tgl_bw_buddy_init(struct intel_display *display)
1617 {
1618 const struct dram_info *dram_info = intel_dram_info(display->drm);
1619 const struct buddy_page_mask *table;
1620 unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
1621 int config, i;
1622
1623 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
1624 if (display->platform.dgfx && !display->platform.dg1)
1625 return;
1626
1627 if (display->platform.alderlake_s ||
1628 (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
1629 /* Wa_1409767108 */
1630 table = wa_1409767108_buddy_page_masks;
1631 else
1632 table = tgl_buddy_page_masks;
1633
1634 for (config = 0; table[config].page_mask != 0; config++)
1635 if (table[config].num_channels == dram_info->num_channels &&
1636 table[config].type == dram_info->type)
1637 break;
1638
1639 if (table[config].page_mask == 0) {
1640 drm_dbg_kms(display->drm,
1641 "Unknown memory configuration; disabling address buddy logic.\n");
1642 for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
1643 intel_de_write(display, BW_BUDDY_CTL(i),
1644 BW_BUDDY_DISABLE);
1645 } else {
1646 for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
1647 intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
1648 table[config].page_mask);
1649
1650 /* Wa_22010178259:tgl,dg1,rkl,adl-s */
1651 if (DISPLAY_VER(display) == 12)
1652 intel_de_rmw(display, BW_BUDDY_CTL(i),
1653 BW_BUDDY_TLB_REQ_TIMER_MASK,
1654 BW_BUDDY_TLB_REQ_TIMER(0x8));
1655 }
1656 }
1657 }
1658
icl_display_core_init(struct intel_display * display,bool resume)1659 static void icl_display_core_init(struct intel_display *display,
1660 bool resume)
1661 {
1662 struct i915_power_domains *power_domains = &display->power.domains;
1663 struct i915_power_well *well;
1664
1665 gen9_set_dc_state(display, DC_STATE_DISABLE);
1666
1667 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
1668 if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
1669 INTEL_PCH_TYPE(display) < PCH_DG1)
1670 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
1671 PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1672
1673 /* 1. Enable PCH reset handshake. */
1674 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
1675
1676 if (!HAS_DISPLAY(display))
1677 return;
1678
1679 /* 2. Initialize all combo phys */
1680 intel_combo_phy_init(display);
1681
1682 /*
1683 * 3. Enable Power Well 1 (PG1).
1684 * The AUX IO power wells will be enabled on demand.
1685 */
1686 mutex_lock(&power_domains->lock);
1687 well = lookup_power_well(display, SKL_DISP_PW_1);
1688 intel_power_well_enable(display, well);
1689 mutex_unlock(&power_domains->lock);
1690
1691 if (DISPLAY_VER(display) == 14)
1692 intel_de_rmw(display, DC_STATE_EN,
1693 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
1694
1695 /* 4. Enable CDCLK. */
1696 intel_cdclk_init_hw(display);
1697
1698 if (DISPLAY_VER(display) == 12 || display->platform.dg2)
1699 gen12_dbuf_slices_config(display);
1700
1701 /* 5. Enable DBUF. */
1702 gen9_dbuf_enable(display);
1703
1704 /* 6. Setup MBUS. */
1705 icl_mbus_init(display);
1706
1707 /* 7. Program arbiter BW_BUDDY registers */
1708 if (DISPLAY_VER(display) >= 12)
1709 tgl_bw_buddy_init(display);
1710
1711 /* 8. Ensure PHYs have completed calibration and adaptation */
1712 if (display->platform.dg2)
1713 intel_snps_phy_wait_for_calibration(display);
1714
1715 /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
1716 if (DISPLAY_VERx100(display) == 1401)
1717 intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
1718
1719 if (resume)
1720 intel_dmc_load_program(display);
1721
1722 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
1723 if (IS_DISPLAY_VERx100(display, 1200, 1300))
1724 intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
1725 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1726 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
1727
1728 /* Wa_14011503030:xelpd */
1729 if (DISPLAY_VER(display) == 13)
1730 intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1731
1732 /* Wa_15013987218 */
1733 if (DISPLAY_VER(display) == 20) {
1734 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1735 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
1736 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
1737 PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
1738 }
1739 }
1740
icl_display_core_uninit(struct intel_display * display)1741 static void icl_display_core_uninit(struct intel_display *display)
1742 {
1743 struct i915_power_domains *power_domains = &display->power.domains;
1744 struct i915_power_well *well;
1745
1746 if (!HAS_DISPLAY(display))
1747 return;
1748
1749 gen9_disable_dc_states(display);
1750 intel_dmc_disable_program(display);
1751
1752 /* 1. Disable all display engine functions -> already done */
1753
1754 /* 2. Disable DBUF */
1755 gen9_dbuf_disable(display);
1756
1757 /* 3. Disable CD clock */
1758 intel_cdclk_uninit_hw(display);
1759
1760 if (DISPLAY_VER(display) == 14)
1761 intel_de_rmw(display, DC_STATE_EN, 0,
1762 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
1763
1764 /*
1765 * 4. Disable Power Well 1 (PG1).
1766 * The AUX IO power wells are toggled on demand, so they are already
1767 * disabled at this point.
1768 */
1769 mutex_lock(&power_domains->lock);
1770 well = lookup_power_well(display, SKL_DISP_PW_1);
1771 intel_power_well_disable(display, well);
1772 mutex_unlock(&power_domains->lock);
1773
1774 /* 5. */
1775 intel_combo_phy_uninit(display);
1776 }
1777
chv_phy_control_init(struct intel_display * display)1778 static void chv_phy_control_init(struct intel_display *display)
1779 {
1780 struct i915_power_well *cmn_bc =
1781 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1782 struct i915_power_well *cmn_d =
1783 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
1784
1785 /*
1786 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1787 * workaround never ever read DISPLAY_PHY_CONTROL, and
1788 * instead maintain a shadow copy ourselves. Use the actual
1789 * power well state and lane status to reconstruct the
1790 * expected initial value.
1791 */
1792 display->power.chv_phy_control =
1793 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1794 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1795 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1796 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1797 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1798
1799 /*
1800 * If all lanes are disabled we leave the override disabled
1801 * with all power down bits cleared to match the state we
1802 * would use after disabling the port. Otherwise enable the
1803 * override and set the lane powerdown bits accding to the
1804 * current lane status.
1805 */
1806 if (intel_power_well_is_enabled(display, cmn_bc)) {
1807 u32 status = intel_de_read(display, DPLL(display, PIPE_A));
1808 unsigned int mask;
1809
1810 mask = status & DPLL_PORTB_READY_MASK;
1811 if (mask == 0xf)
1812 mask = 0x0;
1813 else
1814 display->power.chv_phy_control |=
1815 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1816
1817 display->power.chv_phy_control |=
1818 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1819
1820 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1821 if (mask == 0xf)
1822 mask = 0x0;
1823 else
1824 display->power.chv_phy_control |=
1825 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1826
1827 display->power.chv_phy_control |=
1828 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1829
1830 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1831
1832 display->power.chv_phy_assert[DPIO_PHY0] = false;
1833 } else {
1834 display->power.chv_phy_assert[DPIO_PHY0] = true;
1835 }
1836
1837 if (intel_power_well_is_enabled(display, cmn_d)) {
1838 u32 status = intel_de_read(display, DPIO_PHY_STATUS);
1839 unsigned int mask;
1840
1841 mask = status & DPLL_PORTD_READY_MASK;
1842
1843 if (mask == 0xf)
1844 mask = 0x0;
1845 else
1846 display->power.chv_phy_control |=
1847 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1848
1849 display->power.chv_phy_control |=
1850 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1851
1852 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1853
1854 display->power.chv_phy_assert[DPIO_PHY1] = false;
1855 } else {
1856 display->power.chv_phy_assert[DPIO_PHY1] = true;
1857 }
1858
1859 drm_dbg_kms(display->drm, "Initial PHY_CONTROL=0x%08x\n",
1860 display->power.chv_phy_control);
1861
1862 /* Defer application of initial phy_control to enabling the powerwell */
1863 }
1864
vlv_cmnlane_wa(struct intel_display * display)1865 static void vlv_cmnlane_wa(struct intel_display *display)
1866 {
1867 struct i915_power_well *cmn =
1868 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1869 struct i915_power_well *disp2d =
1870 lookup_power_well(display, VLV_DISP_PW_DISP2D);
1871
1872 /* If the display might be already active skip this */
1873 if (intel_power_well_is_enabled(display, cmn) &&
1874 intel_power_well_is_enabled(display, disp2d) &&
1875 intel_de_read(display, DPIO_CTL) & DPIO_CMNRST)
1876 return;
1877
1878 drm_dbg_kms(display->drm, "toggling display PHY side reset\n");
1879
1880 /* cmnlane needs DPLL registers */
1881 intel_power_well_enable(display, disp2d);
1882
1883 /*
1884 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1885 * Need to assert and de-assert PHY SB reset by gating the
1886 * common lane power, then un-gating it.
1887 * Simply ungating isn't enough to reset the PHY enough to get
1888 * ports and lanes running.
1889 */
1890 intel_power_well_disable(display, cmn);
1891 }
1892
vlv_punit_is_power_gated(struct intel_display * display,u32 reg0)1893 static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
1894 {
1895 bool ret;
1896
1897 vlv_punit_get(display->drm);
1898 ret = (vlv_punit_read(display->drm, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1899 vlv_punit_put(display->drm);
1900
1901 return ret;
1902 }
1903
assert_ved_power_gated(struct intel_display * display)1904 static void assert_ved_power_gated(struct intel_display *display)
1905 {
1906 drm_WARN(display->drm,
1907 !vlv_punit_is_power_gated(display, PUNIT_REG_VEDSSPM0),
1908 "VED not power gated\n");
1909 }
1910
assert_isp_power_gated(struct intel_display * display)1911 static void assert_isp_power_gated(struct intel_display *display)
1912 {
1913 static const struct pci_device_id isp_ids[] = {
1914 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1915 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1916 {}
1917 };
1918
1919 drm_WARN(display->drm, !pci_dev_present(isp_ids) &&
1920 !vlv_punit_is_power_gated(display, PUNIT_REG_ISPSSPM0),
1921 "ISP not power gated\n");
1922 }
1923
1924 static void intel_power_domains_verify_state(struct intel_display *display);
1925
1926 /**
1927 * intel_power_domains_init_hw - initialize hardware power domain state
1928 * @display: display device instance
1929 * @resume: Called from resume code paths or not
1930 *
1931 * This function initializes the hardware power domain state and enables all
1932 * power wells belonging to the INIT power domain. Power wells in other
1933 * domains (and not in the INIT domain) are referenced or disabled by
1934 * intel_modeset_readout_hw_state(). After that the reference count of each
1935 * power well must match its HW enabled state, see
1936 * intel_power_domains_verify_state().
1937 *
1938 * It will return with power domains disabled (to be enabled later by
1939 * intel_power_domains_enable()) and must be paired with
1940 * intel_power_domains_driver_remove().
1941 */
intel_power_domains_init_hw(struct intel_display * display,bool resume)1942 void intel_power_domains_init_hw(struct intel_display *display, bool resume)
1943 {
1944 struct i915_power_domains *power_domains = &display->power.domains;
1945
1946 power_domains->initializing = true;
1947
1948 if (DISPLAY_VER(display) >= 11) {
1949 icl_display_core_init(display, resume);
1950 } else if (display->platform.geminilake || display->platform.broxton) {
1951 bxt_display_core_init(display, resume);
1952 } else if (DISPLAY_VER(display) == 9) {
1953 skl_display_core_init(display, resume);
1954 } else if (display->platform.cherryview) {
1955 mutex_lock(&power_domains->lock);
1956 chv_phy_control_init(display);
1957 mutex_unlock(&power_domains->lock);
1958 assert_isp_power_gated(display);
1959 } else if (display->platform.valleyview) {
1960 mutex_lock(&power_domains->lock);
1961 vlv_cmnlane_wa(display);
1962 mutex_unlock(&power_domains->lock);
1963 assert_ved_power_gated(display);
1964 assert_isp_power_gated(display);
1965 } else if (display->platform.broadwell || display->platform.haswell) {
1966 hsw_assert_cdclk(display);
1967 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
1968 } else if (display->platform.ivybridge) {
1969 intel_pch_reset_handshake(display, !HAS_PCH_NOP(display));
1970 }
1971
1972 /*
1973 * Keep all power wells enabled for any dependent HW access during
1974 * initialization and to make sure we keep BIOS enabled display HW
1975 * resources powered until display HW readout is complete. We drop
1976 * this reference in intel_power_domains_enable().
1977 */
1978 drm_WARN_ON(display->drm, power_domains->init_wakeref);
1979 power_domains->init_wakeref =
1980 intel_display_power_get(display, POWER_DOMAIN_INIT);
1981
1982 /* Disable power support if the user asked so. */
1983 if (!display->params.disable_power_well) {
1984 drm_WARN_ON(display->drm, power_domains->disable_wakeref);
1985 display->power.domains.disable_wakeref = intel_display_power_get(display,
1986 POWER_DOMAIN_INIT);
1987 }
1988 intel_power_domains_sync_hw(display);
1989
1990 power_domains->initializing = false;
1991 }
1992
1993 /**
1994 * intel_power_domains_driver_remove - deinitialize hw power domain state
1995 * @display: display device instance
1996 *
1997 * De-initializes the display power domain HW state. It also ensures that the
1998 * device stays powered up so that the driver can be reloaded.
1999 *
2000 * It must be called with power domains already disabled (after a call to
2001 * intel_power_domains_disable()) and must be paired with
2002 * intel_power_domains_init_hw().
2003 */
intel_power_domains_driver_remove(struct intel_display * display)2004 void intel_power_domains_driver_remove(struct intel_display *display)
2005 {
2006 intel_wakeref_t wakeref __maybe_unused =
2007 fetch_and_zero(&display->power.domains.init_wakeref);
2008
2009 /* Remove the refcount we took to keep power well support disabled. */
2010 if (!display->params.disable_power_well)
2011 intel_display_power_put(display, POWER_DOMAIN_INIT,
2012 fetch_and_zero(&display->power.domains.disable_wakeref));
2013
2014 intel_display_power_flush_work_sync(display);
2015
2016 intel_power_domains_verify_state(display);
2017
2018 /* Keep the power well enabled, but cancel its rpm wakeref. */
2019 intel_display_rpm_put(display, wakeref);
2020 }
2021
2022 /**
2023 * intel_power_domains_sanitize_state - sanitize power domains state
2024 * @display: display device instance
2025 *
2026 * Sanitize the power domains state during driver loading and system resume.
2027 * The function will disable all display power wells that BIOS has enabled
2028 * without a user for it (any user for a power well has taken a reference
2029 * on it by the time this function is called, after the state of all the
2030 * pipe, encoder, etc. HW resources have been sanitized).
2031 */
intel_power_domains_sanitize_state(struct intel_display * display)2032 void intel_power_domains_sanitize_state(struct intel_display *display)
2033 {
2034 struct i915_power_domains *power_domains = &display->power.domains;
2035 struct i915_power_well *power_well;
2036
2037 mutex_lock(&power_domains->lock);
2038
2039 for_each_power_well_reverse(display, power_well) {
2040 if (power_well->desc->always_on || power_well->count ||
2041 !intel_power_well_is_enabled(display, power_well))
2042 continue;
2043
2044 drm_dbg_kms(display->drm,
2045 "BIOS left unused %s power well enabled, disabling it\n",
2046 intel_power_well_name(power_well));
2047 intel_power_well_disable(display, power_well);
2048 }
2049
2050 mutex_unlock(&power_domains->lock);
2051 }
2052
2053 /**
2054 * intel_power_domains_enable - enable toggling of display power wells
2055 * @display: display device instance
2056 *
2057 * Enable the ondemand enabling/disabling of the display power wells. Note that
2058 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2059 * only at specific points of the display modeset sequence, thus they are not
2060 * affected by the intel_power_domains_enable()/disable() calls. The purpose
2061 * of these function is to keep the rest of power wells enabled until the end
2062 * of display HW readout (which will acquire the power references reflecting
2063 * the current HW state).
2064 */
intel_power_domains_enable(struct intel_display * display)2065 void intel_power_domains_enable(struct intel_display *display)
2066 {
2067 intel_wakeref_t wakeref __maybe_unused =
2068 fetch_and_zero(&display->power.domains.init_wakeref);
2069
2070 intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
2071 intel_power_domains_verify_state(display);
2072 }
2073
2074 /**
2075 * intel_power_domains_disable - disable toggling of display power wells
2076 * @display: display device instance
2077 *
2078 * Disable the ondemand enabling/disabling of the display power wells. See
2079 * intel_power_domains_enable() for which power wells this call controls.
2080 */
intel_power_domains_disable(struct intel_display * display)2081 void intel_power_domains_disable(struct intel_display *display)
2082 {
2083 struct i915_power_domains *power_domains = &display->power.domains;
2084
2085 drm_WARN_ON(display->drm, power_domains->init_wakeref);
2086 power_domains->init_wakeref =
2087 intel_display_power_get(display, POWER_DOMAIN_INIT);
2088
2089 intel_power_domains_verify_state(display);
2090 }
2091
2092 /**
2093 * intel_power_domains_suspend - suspend power domain state
2094 * @display: display device instance
2095 * @s2idle: specifies whether we go to idle, or deeper sleep
2096 *
2097 * This function prepares the hardware power domain state before entering
2098 * system suspend.
2099 *
2100 * It must be called with power domains already disabled (after a call to
2101 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2102 */
intel_power_domains_suspend(struct intel_display * display,bool s2idle)2103 void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
2104 {
2105 struct i915_power_domains *power_domains = &display->power.domains;
2106 intel_wakeref_t wakeref __maybe_unused =
2107 fetch_and_zero(&power_domains->init_wakeref);
2108
2109 intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
2110
2111 /*
2112 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2113 * support don't manually deinit the power domains. This also means the
2114 * DMC firmware will stay active, it will power down any HW
2115 * resources as required and also enable deeper system power states
2116 * that would be blocked if the firmware was inactive.
2117 */
2118 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
2119 intel_dmc_has_payload(display)) {
2120 intel_display_power_flush_work(display);
2121 intel_power_domains_verify_state(display);
2122 return;
2123 }
2124
2125 /*
2126 * Even if power well support was disabled we still want to disable
2127 * power wells if power domains must be deinitialized for suspend.
2128 */
2129 if (!display->params.disable_power_well)
2130 intel_display_power_put(display, POWER_DOMAIN_INIT,
2131 fetch_and_zero(&display->power.domains.disable_wakeref));
2132
2133 intel_display_power_flush_work(display);
2134 intel_power_domains_verify_state(display);
2135
2136 if (DISPLAY_VER(display) >= 11)
2137 icl_display_core_uninit(display);
2138 else if (display->platform.geminilake || display->platform.broxton)
2139 bxt_display_core_uninit(display);
2140 else if (DISPLAY_VER(display) == 9)
2141 skl_display_core_uninit(display);
2142
2143 power_domains->display_core_suspended = true;
2144 }
2145
2146 /**
2147 * intel_power_domains_resume - resume power domain state
2148 * @display: display device instance
2149 *
2150 * This function resume the hardware power domain state during system resume.
2151 *
2152 * It will return with power domain support disabled (to be enabled later by
2153 * intel_power_domains_enable()) and must be paired with
2154 * intel_power_domains_suspend().
2155 */
intel_power_domains_resume(struct intel_display * display)2156 void intel_power_domains_resume(struct intel_display *display)
2157 {
2158 struct i915_power_domains *power_domains = &display->power.domains;
2159
2160 if (power_domains->display_core_suspended) {
2161 intel_power_domains_init_hw(display, true);
2162 power_domains->display_core_suspended = false;
2163 } else {
2164 drm_WARN_ON(display->drm, power_domains->init_wakeref);
2165 power_domains->init_wakeref =
2166 intel_display_power_get(display, POWER_DOMAIN_INIT);
2167 }
2168 }
2169
2170 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2171
intel_power_domains_dump_info(struct intel_display * display)2172 static void intel_power_domains_dump_info(struct intel_display *display)
2173 {
2174 struct i915_power_domains *power_domains = &display->power.domains;
2175 struct i915_power_well *power_well;
2176
2177 for_each_power_well(display, power_well) {
2178 enum intel_display_power_domain domain;
2179
2180 drm_dbg_kms(display->drm, "%-25s %d\n",
2181 intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2182
2183 for_each_power_domain(domain, intel_power_well_domains(power_well))
2184 drm_dbg_kms(display->drm, " %-23s %d\n",
2185 intel_display_power_domain_str(domain),
2186 power_domains->domain_use_count[domain]);
2187 }
2188 }
2189
2190 /**
2191 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2192 * @display: display device instance
2193 *
2194 * Verify if the reference count of each power well matches its HW enabled
2195 * state and the total refcount of the domains it belongs to. This must be
2196 * called after modeset HW state sanitization, which is responsible for
2197 * acquiring reference counts for any power wells in use and disabling the
2198 * ones left on by BIOS but not required by any active output.
2199 */
intel_power_domains_verify_state(struct intel_display * display)2200 static void intel_power_domains_verify_state(struct intel_display *display)
2201 {
2202 struct i915_power_domains *power_domains = &display->power.domains;
2203 struct i915_power_well *power_well;
2204 bool dump_domain_info;
2205
2206 mutex_lock(&power_domains->lock);
2207
2208 verify_async_put_domains_state(power_domains);
2209
2210 dump_domain_info = false;
2211 for_each_power_well(display, power_well) {
2212 enum intel_display_power_domain domain;
2213 int domains_count;
2214 bool enabled;
2215
2216 enabled = intel_power_well_is_enabled(display, power_well);
2217 if ((intel_power_well_refcount(power_well) ||
2218 intel_power_well_is_always_on(power_well)) !=
2219 enabled)
2220 drm_err(display->drm,
2221 "power well %s state mismatch (refcount %d/enabled %d)",
2222 intel_power_well_name(power_well),
2223 intel_power_well_refcount(power_well), enabled);
2224
2225 domains_count = 0;
2226 for_each_power_domain(domain, intel_power_well_domains(power_well))
2227 domains_count += power_domains->domain_use_count[domain];
2228
2229 if (intel_power_well_refcount(power_well) != domains_count) {
2230 drm_err(display->drm,
2231 "power well %s refcount/domain refcount mismatch "
2232 "(refcount %d/domains refcount %d)\n",
2233 intel_power_well_name(power_well),
2234 intel_power_well_refcount(power_well),
2235 domains_count);
2236 dump_domain_info = true;
2237 }
2238 }
2239
2240 if (dump_domain_info) {
2241 static bool dumped;
2242
2243 if (!dumped) {
2244 intel_power_domains_dump_info(display);
2245 dumped = true;
2246 }
2247 }
2248
2249 mutex_unlock(&power_domains->lock);
2250 }
2251
2252 #else
2253
intel_power_domains_verify_state(struct intel_display * display)2254 static void intel_power_domains_verify_state(struct intel_display *display)
2255 {
2256 }
2257
2258 #endif
2259
intel_display_power_suspend_late(struct intel_display * display,bool s2idle)2260 void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
2261 {
2262 intel_power_domains_suspend(display, s2idle);
2263
2264 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
2265 display->platform.broxton) {
2266 bxt_enable_dc9(display);
2267 } else if (display->platform.haswell || display->platform.broadwell) {
2268 hsw_enable_pc8(display);
2269 }
2270
2271 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2272 if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
2273 intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2274 }
2275
intel_display_power_resume_early(struct intel_display * display)2276 void intel_display_power_resume_early(struct intel_display *display)
2277 {
2278 if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
2279 display->platform.broxton) {
2280 gen9_sanitize_dc_state(display);
2281 bxt_disable_dc9(display);
2282 } else if (display->platform.haswell || display->platform.broadwell) {
2283 hsw_disable_pc8(display);
2284 }
2285
2286 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2287 if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
2288 intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2289
2290 intel_power_domains_resume(display);
2291 }
2292
intel_display_power_suspend(struct intel_display * display)2293 void intel_display_power_suspend(struct intel_display *display)
2294 {
2295 if (DISPLAY_VER(display) >= 11) {
2296 icl_display_core_uninit(display);
2297 bxt_enable_dc9(display);
2298 } else if (display->platform.geminilake || display->platform.broxton) {
2299 bxt_display_core_uninit(display);
2300 bxt_enable_dc9(display);
2301 } else if (display->platform.haswell || display->platform.broadwell) {
2302 hsw_enable_pc8(display);
2303 }
2304 }
2305
intel_display_power_resume(struct intel_display * display)2306 void intel_display_power_resume(struct intel_display *display)
2307 {
2308 struct i915_power_domains *power_domains = &display->power.domains;
2309
2310 if (DISPLAY_VER(display) >= 11) {
2311 bxt_disable_dc9(display);
2312 icl_display_core_init(display, true);
2313 if (intel_dmc_has_payload(display)) {
2314 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
2315 skl_enable_dc6(display);
2316 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
2317 gen9_enable_dc5(display);
2318 }
2319 } else if (display->platform.geminilake || display->platform.broxton) {
2320 bxt_disable_dc9(display);
2321 bxt_display_core_init(display, true);
2322 if (intel_dmc_has_payload(display) &&
2323 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2324 gen9_enable_dc5(display);
2325 } else if (display->platform.haswell || display->platform.broadwell) {
2326 hsw_disable_pc8(display);
2327 }
2328 }
2329
intel_display_power_debug(struct intel_display * display,struct seq_file * m)2330 void intel_display_power_debug(struct intel_display *display, struct seq_file *m)
2331 {
2332 struct i915_power_domains *power_domains = &display->power.domains;
2333 int i;
2334
2335 mutex_lock(&power_domains->lock);
2336
2337 seq_printf(m, "Runtime power status: %s\n",
2338 str_enabled_disabled(!power_domains->init_wakeref));
2339
2340 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2341 for (i = 0; i < power_domains->power_well_count; i++) {
2342 struct i915_power_well *power_well;
2343 enum intel_display_power_domain power_domain;
2344
2345 power_well = &power_domains->power_wells[i];
2346 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2347 intel_power_well_refcount(power_well));
2348
2349 for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2350 seq_printf(m, " %-23s %d\n",
2351 intel_display_power_domain_str(power_domain),
2352 power_domains->domain_use_count[power_domain]);
2353 }
2354
2355 mutex_unlock(&power_domains->lock);
2356 }
2357
2358 struct intel_ddi_port_domains {
2359 enum port port_start;
2360 enum port port_end;
2361 enum aux_ch aux_ch_start;
2362 enum aux_ch aux_ch_end;
2363
2364 enum intel_display_power_domain ddi_lanes;
2365 enum intel_display_power_domain ddi_io;
2366 enum intel_display_power_domain aux_io;
2367 enum intel_display_power_domain aux_legacy_usbc;
2368 enum intel_display_power_domain aux_tbt;
2369 };
2370
2371 static const struct intel_ddi_port_domains
2372 i9xx_port_domains[] = {
2373 {
2374 .port_start = PORT_A,
2375 .port_end = PORT_F,
2376 .aux_ch_start = AUX_CH_A,
2377 .aux_ch_end = AUX_CH_F,
2378
2379 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2380 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2381 .aux_io = POWER_DOMAIN_AUX_IO_A,
2382 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2383 .aux_tbt = POWER_DOMAIN_INVALID,
2384 },
2385 };
2386
2387 static const struct intel_ddi_port_domains
2388 d11_port_domains[] = {
2389 {
2390 .port_start = PORT_A,
2391 .port_end = PORT_B,
2392 .aux_ch_start = AUX_CH_A,
2393 .aux_ch_end = AUX_CH_B,
2394
2395 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2396 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2397 .aux_io = POWER_DOMAIN_AUX_IO_A,
2398 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2399 .aux_tbt = POWER_DOMAIN_INVALID,
2400 }, {
2401 .port_start = PORT_C,
2402 .port_end = PORT_F,
2403 .aux_ch_start = AUX_CH_C,
2404 .aux_ch_end = AUX_CH_F,
2405
2406 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2407 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2408 .aux_io = POWER_DOMAIN_AUX_IO_C,
2409 .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2410 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2411 },
2412 };
2413
2414 static const struct intel_ddi_port_domains
2415 d12_port_domains[] = {
2416 {
2417 .port_start = PORT_A,
2418 .port_end = PORT_C,
2419 .aux_ch_start = AUX_CH_A,
2420 .aux_ch_end = AUX_CH_C,
2421
2422 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2423 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2424 .aux_io = POWER_DOMAIN_AUX_IO_A,
2425 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2426 .aux_tbt = POWER_DOMAIN_INVALID,
2427 }, {
2428 .port_start = PORT_TC1,
2429 .port_end = PORT_TC6,
2430 .aux_ch_start = AUX_CH_USBC1,
2431 .aux_ch_end = AUX_CH_USBC6,
2432
2433 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2434 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2435 .aux_io = POWER_DOMAIN_INVALID,
2436 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2437 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2438 },
2439 };
2440
2441 static const struct intel_ddi_port_domains
2442 d13_port_domains[] = {
2443 {
2444 .port_start = PORT_A,
2445 .port_end = PORT_C,
2446 .aux_ch_start = AUX_CH_A,
2447 .aux_ch_end = AUX_CH_C,
2448
2449 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2450 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2451 .aux_io = POWER_DOMAIN_AUX_IO_A,
2452 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2453 .aux_tbt = POWER_DOMAIN_INVALID,
2454 }, {
2455 .port_start = PORT_TC1,
2456 .port_end = PORT_TC4,
2457 .aux_ch_start = AUX_CH_USBC1,
2458 .aux_ch_end = AUX_CH_USBC4,
2459
2460 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2461 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2462 .aux_io = POWER_DOMAIN_INVALID,
2463 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2464 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2465 }, {
2466 .port_start = PORT_D_XELPD,
2467 .port_end = PORT_E_XELPD,
2468 .aux_ch_start = AUX_CH_D_XELPD,
2469 .aux_ch_end = AUX_CH_E_XELPD,
2470
2471 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2472 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2473 .aux_io = POWER_DOMAIN_AUX_IO_D,
2474 .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2475 .aux_tbt = POWER_DOMAIN_INVALID,
2476 },
2477 };
2478
2479 static void
intel_port_domains_for_platform(struct intel_display * display,const struct intel_ddi_port_domains ** domains,int * domains_size)2480 intel_port_domains_for_platform(struct intel_display *display,
2481 const struct intel_ddi_port_domains **domains,
2482 int *domains_size)
2483 {
2484 if (DISPLAY_VER(display) >= 13) {
2485 *domains = d13_port_domains;
2486 *domains_size = ARRAY_SIZE(d13_port_domains);
2487 } else if (DISPLAY_VER(display) >= 12) {
2488 *domains = d12_port_domains;
2489 *domains_size = ARRAY_SIZE(d12_port_domains);
2490 } else if (DISPLAY_VER(display) >= 11) {
2491 *domains = d11_port_domains;
2492 *domains_size = ARRAY_SIZE(d11_port_domains);
2493 } else {
2494 *domains = i9xx_port_domains;
2495 *domains_size = ARRAY_SIZE(i9xx_port_domains);
2496 }
2497 }
2498
2499 static const struct intel_ddi_port_domains *
intel_port_domains_for_port(struct intel_display * display,enum port port)2500 intel_port_domains_for_port(struct intel_display *display, enum port port)
2501 {
2502 const struct intel_ddi_port_domains *domains;
2503 int domains_size;
2504 int i;
2505
2506 intel_port_domains_for_platform(display, &domains, &domains_size);
2507 for (i = 0; i < domains_size; i++)
2508 if (port >= domains[i].port_start && port <= domains[i].port_end)
2509 return &domains[i];
2510
2511 return NULL;
2512 }
2513
2514 enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct intel_display * display,enum port port)2515 intel_display_power_ddi_io_domain(struct intel_display *display, enum port port)
2516 {
2517 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
2518
2519 if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
2520 return POWER_DOMAIN_PORT_DDI_IO_A;
2521
2522 return domains->ddi_io + (int)(port - domains->port_start);
2523 }
2524
2525 enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct intel_display * display,enum port port)2526 intel_display_power_ddi_lanes_domain(struct intel_display *display, enum port port)
2527 {
2528 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
2529
2530 if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
2531 return POWER_DOMAIN_PORT_DDI_LANES_A;
2532
2533 return domains->ddi_lanes + (int)(port - domains->port_start);
2534 }
2535
2536 static const struct intel_ddi_port_domains *
intel_port_domains_for_aux_ch(struct intel_display * display,enum aux_ch aux_ch)2537 intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
2538 {
2539 const struct intel_ddi_port_domains *domains;
2540 int domains_size;
2541 int i;
2542
2543 intel_port_domains_for_platform(display, &domains, &domains_size);
2544 for (i = 0; i < domains_size; i++)
2545 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2546 return &domains[i];
2547
2548 return NULL;
2549 }
2550
2551 enum intel_display_power_domain
intel_display_power_aux_io_domain(struct intel_display * display,enum aux_ch aux_ch)2552 intel_display_power_aux_io_domain(struct intel_display *display, enum aux_ch aux_ch)
2553 {
2554 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2555
2556 if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2557 return POWER_DOMAIN_AUX_IO_A;
2558
2559 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2560 }
2561
2562 enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct intel_display * display,enum aux_ch aux_ch)2563 intel_display_power_legacy_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
2564 {
2565 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2566
2567 if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
2568 return POWER_DOMAIN_AUX_A;
2569
2570 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2571 }
2572
2573 enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct intel_display * display,enum aux_ch aux_ch)2574 intel_display_power_tbt_aux_domain(struct intel_display *display, enum aux_ch aux_ch)
2575 {
2576 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
2577
2578 if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
2579 return POWER_DOMAIN_AUX_TBT1;
2580
2581 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2582 }
2583