1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include <linux/iopoll.h>
7
8 #include <drm/drm_print.h>
9 #include <drm/intel/intel_pcode_regs.h>
10
11 #include "intel_backlight_regs.h"
12 #include "intel_combo_phy.h"
13 #include "intel_combo_phy_regs.h"
14 #include "intel_crt.h"
15 #include "intel_de.h"
16 #include "intel_display_irq.h"
17 #include "intel_display_power_well.h"
18 #include "intel_display_regs.h"
19 #include "intel_display_rpm.h"
20 #include "intel_display_types.h"
21 #include "intel_display_wa.h"
22 #include "intel_dkl_phy.h"
23 #include "intel_dkl_phy_regs.h"
24 #include "intel_dmc.h"
25 #include "intel_dmc_wl.h"
26 #include "intel_dp_aux_regs.h"
27 #include "intel_dpio_phy.h"
28 #include "intel_dpll.h"
29 #include "intel_hotplug.h"
30 #include "intel_parent.h"
31 #include "intel_pps.h"
32 #include "intel_psr.h"
33 #include "intel_tc.h"
34 #include "intel_vga.h"
35 #include "skl_watermark.h"
36 #include "vlv_dpio_phy_regs.h"
37 #include "vlv_iosf_sb_reg.h"
38 #include "vlv_sideband.h"
39
40 /*
41 * PG0 is HW controlled, so doesn't have a corresponding power well control knob
42 *
43 * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4
44 */
pw_idx_to_pg(struct intel_display * display,int pw_idx)45 static enum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx)
46 {
47 int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1;
48
49 return pw_idx - pw1_idx + SKL_PG1;
50 }
51
52 struct i915_power_well_regs {
53 i915_reg_t bios;
54 i915_reg_t driver;
55 i915_reg_t kvmr;
56 i915_reg_t debug;
57 };
58
59 struct i915_power_well_ops {
60 const struct i915_power_well_regs *regs;
61 /*
62 * Synchronize the well's hw state to match the current sw state, for
63 * example enable/disable it based on the current refcount. Called
64 * during driver init and resume time, possibly after first calling
65 * the enable/disable handlers.
66 */
67 void (*sync_hw)(struct intel_display *display,
68 struct i915_power_well *power_well);
69 /*
70 * Enable the well and resources that depend on it (for example
71 * interrupts located on the well). Called after the 0->1 refcount
72 * transition.
73 */
74 void (*enable)(struct intel_display *display,
75 struct i915_power_well *power_well);
76 /*
77 * Disable the well and resources that depend on it. Called after
78 * the 1->0 refcount transition.
79 */
80 void (*disable)(struct intel_display *display,
81 struct i915_power_well *power_well);
82 /* Returns the hw enabled state. */
83 bool (*is_enabled)(struct intel_display *display,
84 struct i915_power_well *power_well);
85 };
86
87 static const struct i915_power_well_instance *
i915_power_well_instance(const struct i915_power_well * power_well)88 i915_power_well_instance(const struct i915_power_well *power_well)
89 {
90 return &power_well->desc->instances->list[power_well->instance_idx];
91 }
92
93 struct i915_power_well *
lookup_power_well(struct intel_display * display,enum i915_power_well_id power_well_id)94 lookup_power_well(struct intel_display *display,
95 enum i915_power_well_id power_well_id)
96 {
97 struct i915_power_well *power_well;
98
99 for_each_power_well(display, power_well)
100 if (i915_power_well_instance(power_well)->id == power_well_id)
101 return power_well;
102
103 /*
104 * It's not feasible to add error checking code to the callers since
105 * this condition really shouldn't happen and it doesn't even make sense
106 * to abort things like display initialization sequences. Just return
107 * the first power well and hope the WARN gets reported so we can fix
108 * our driver.
109 */
110 drm_WARN(display->drm, 1,
111 "Power well %d not defined for this platform\n",
112 power_well_id);
113 return &display->power.domains.power_wells[0];
114 }
115
intel_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)116 void intel_power_well_enable(struct intel_display *display,
117 struct i915_power_well *power_well)
118 {
119 drm_dbg_kms(display->drm, "enabling %s\n", intel_power_well_name(power_well));
120 power_well->desc->ops->enable(display, power_well);
121 power_well->hw_enabled = true;
122 }
123
intel_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)124 void intel_power_well_disable(struct intel_display *display,
125 struct i915_power_well *power_well)
126 {
127 drm_dbg_kms(display->drm, "disabling %s\n", intel_power_well_name(power_well));
128 power_well->hw_enabled = false;
129 power_well->desc->ops->disable(display, power_well);
130 }
131
intel_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)132 void intel_power_well_sync_hw(struct intel_display *display,
133 struct i915_power_well *power_well)
134 {
135 power_well->desc->ops->sync_hw(display, power_well);
136 power_well->hw_enabled = power_well->desc->ops->is_enabled(display, power_well);
137 }
138
intel_power_well_get(struct intel_display * display,struct i915_power_well * power_well)139 void intel_power_well_get(struct intel_display *display,
140 struct i915_power_well *power_well)
141 {
142 if (!power_well->count++)
143 intel_power_well_enable(display, power_well);
144 }
145
intel_power_well_put(struct intel_display * display,struct i915_power_well * power_well)146 void intel_power_well_put(struct intel_display *display,
147 struct i915_power_well *power_well)
148 {
149 drm_WARN(display->drm, !power_well->count,
150 "Use count on power well %s is already zero",
151 i915_power_well_instance(power_well)->name);
152
153 if (!--power_well->count)
154 intel_power_well_disable(display, power_well);
155 }
156
intel_power_well_is_enabled(struct intel_display * display,struct i915_power_well * power_well)157 bool intel_power_well_is_enabled(struct intel_display *display,
158 struct i915_power_well *power_well)
159 {
160 return power_well->desc->ops->is_enabled(display, power_well);
161 }
162
intel_power_well_is_enabled_cached(struct i915_power_well * power_well)163 bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
164 {
165 return power_well->hw_enabled;
166 }
167
intel_display_power_well_is_enabled(struct intel_display * display,enum i915_power_well_id power_well_id)168 bool intel_display_power_well_is_enabled(struct intel_display *display,
169 enum i915_power_well_id power_well_id)
170 {
171 struct i915_power_well *power_well;
172
173 power_well = lookup_power_well(display, power_well_id);
174
175 return intel_power_well_is_enabled(display, power_well);
176 }
177
intel_power_well_is_always_on(struct i915_power_well * power_well)178 bool intel_power_well_is_always_on(struct i915_power_well *power_well)
179 {
180 return power_well->desc->always_on;
181 }
182
intel_power_well_name(struct i915_power_well * power_well)183 const char *intel_power_well_name(struct i915_power_well *power_well)
184 {
185 return i915_power_well_instance(power_well)->name;
186 }
187
intel_power_well_domains(struct i915_power_well * power_well)188 struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
189 {
190 return &power_well->domains;
191 }
192
intel_power_well_refcount(struct i915_power_well * power_well)193 int intel_power_well_refcount(struct i915_power_well *power_well)
194 {
195 return power_well->count;
196 }
197
dss_pipe_gating_bits(u8 irq_pipe_mask)198 static u32 dss_pipe_gating_bits(u8 irq_pipe_mask)
199 {
200 u32 bits = 0;
201
202 if (irq_pipe_mask & BIT(PIPE_A))
203 bits |= DSS_PIPE_A_GATING_DISABLED;
204 if (irq_pipe_mask & BIT(PIPE_B))
205 bits |= DSS_PIPE_B_GATING_DISABLED;
206 if (irq_pipe_mask & BIT(PIPE_C))
207 bits |= DSS_PIPE_C_GATING_DISABLED;
208 if (irq_pipe_mask & BIT(PIPE_D))
209 bits |= DSS_PIPE_D_GATING_DISABLED;
210
211 return bits;
212 }
213
dss_pipe_gating_enable_disable(struct intel_display * display,u8 irq_pipe_mask,bool disable)214 static void dss_pipe_gating_enable_disable(struct intel_display *display,
215 u8 irq_pipe_mask,
216 bool disable)
217 {
218 u32 bits = dss_pipe_gating_bits(irq_pipe_mask);
219 u32 clear, set;
220
221 if (!bits)
222 return;
223
224 /*
225 * Single intel_de_rmw() for both enable/disable:
226 * - disable == true, set bits (disable clock gating)
227 * - disable == false, clear bits (re-enable clock gating)
228 */
229 set = disable ? bits : 0;
230 clear = disable ? 0 : bits;
231
232 intel_de_rmw(display, CLKGATE_DIS_DSSDSC, clear, set);
233
234 drm_dbg_kms(display->drm,
235 "DSS clock gating %sd for pipe_mask=0x%x (CLKGATE_DIS_DSSDSC=0x%08x)\n",
236 str_enable_disable(!disable), irq_pipe_mask,
237 intel_de_read(display, CLKGATE_DIS_DSSDSC));
238 }
239
240 /*
241 * Starting with Haswell, we have a "Power Down Well" that can be turned off
242 * when not needed anymore. We have 4 registers that can request the power well
243 * to be enabled, and it will only be disabled if none of the registers is
244 * requesting it to be enabled.
245 */
hsw_power_well_post_enable(struct intel_display * display,u8 irq_pipe_mask)246 static void hsw_power_well_post_enable(struct intel_display *display,
247 u8 irq_pipe_mask)
248 {
249 if (irq_pipe_mask) {
250 gen8_irq_power_well_post_enable(display, irq_pipe_mask);
251
252 if (intel_display_wa(display, INTEL_DISPLAY_WA_22021048059))
253 dss_pipe_gating_enable_disable(display, irq_pipe_mask, false);
254 }
255 }
256
hsw_power_well_pre_disable(struct intel_display * display,u8 irq_pipe_mask)257 static void hsw_power_well_pre_disable(struct intel_display *display,
258 u8 irq_pipe_mask)
259 {
260 if (irq_pipe_mask) {
261 if (intel_display_wa(display, INTEL_DISPLAY_WA_22021048059))
262 dss_pipe_gating_enable_disable(display, irq_pipe_mask, true);
263
264 gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
265 }
266 }
267
268 #define ICL_AUX_PW_TO_PHY(pw_idx) \
269 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A)
270
271 #define ICL_AUX_PW_TO_CH(pw_idx) \
272 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
273
274 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
275 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
276
icl_aux_pw_to_ch(const struct i915_power_well * power_well)277 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
278 {
279 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
280
281 return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
282 ICL_AUX_PW_TO_CH(pw_idx);
283 }
284
285 static struct intel_digital_port *
aux_ch_to_digital_port(struct intel_display * display,enum aux_ch aux_ch)286 aux_ch_to_digital_port(struct intel_display *display,
287 enum aux_ch aux_ch)
288 {
289 struct intel_encoder *encoder;
290
291 for_each_intel_encoder(display->drm, encoder) {
292 struct intel_digital_port *dig_port;
293
294 /* We'll check the MST primary port */
295 if (encoder->type == INTEL_OUTPUT_DP_MST)
296 continue;
297
298 dig_port = enc_to_dig_port(encoder);
299
300 if (dig_port && dig_port->aux_ch == aux_ch)
301 return dig_port;
302 }
303
304 return NULL;
305 }
306
307 static struct intel_encoder *
icl_aux_pw_to_encoder(struct intel_display * display,const struct i915_power_well * power_well)308 icl_aux_pw_to_encoder(struct intel_display *display,
309 const struct i915_power_well *power_well)
310 {
311 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
312 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
313
314 /*
315 * FIXME should we care about the (VBT defined) dig_port->aux_ch
316 * relationship or should this be purely defined by the hardware layout?
317 * Currently if the port doesn't appear in the VBT, or if it's declared
318 * as HDMI-only and routed to a combo PHY, the encoder either won't be
319 * present at all or it will not have an aux_ch assigned.
320 */
321 return dig_port ? &dig_port->base : NULL;
322 }
323
icl_aux_pw_to_phy(struct intel_display * display,const struct i915_power_well * power_well)324 static enum phy icl_aux_pw_to_phy(struct intel_display *display,
325 const struct i915_power_well *power_well)
326 {
327 struct intel_encoder *encoder = icl_aux_pw_to_encoder(display, power_well);
328
329 return encoder ? intel_encoder_to_phy(encoder) : PHY_NONE;
330 }
331
icl_aux_pw_is_tc_phy(struct intel_display * display,const struct i915_power_well * power_well)332 static bool icl_aux_pw_is_tc_phy(struct intel_display *display,
333 const struct i915_power_well *power_well)
334 {
335 struct intel_encoder *encoder = icl_aux_pw_to_encoder(display, power_well);
336
337 return encoder && intel_encoder_is_tc(encoder);
338 }
339
hsw_wait_for_power_well_enable(struct intel_display * display,struct i915_power_well * power_well,bool timeout_expected)340 static void hsw_wait_for_power_well_enable(struct intel_display *display,
341 struct i915_power_well *power_well,
342 bool timeout_expected)
343 {
344 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
345 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
346 int timeout = power_well->desc->enable_timeout ? : 1;
347
348 /*
349 * For some power wells we're not supposed to watch the status bit for
350 * an ack, but rather just wait a fixed amount of time and then
351 * proceed. This is only used on DG2.
352 */
353 if (display->platform.dg2 && power_well->desc->fixed_enable_delay) {
354 usleep_range(600, 1200);
355 return;
356 }
357
358 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
359 if (intel_de_wait_for_set_ms(display, regs->driver,
360 HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
361 drm_dbg_kms(display->drm, "%s power well enable timeout\n",
362 intel_power_well_name(power_well));
363
364 drm_WARN_ON(display->drm, !timeout_expected);
365
366 }
367 }
368
hsw_power_well_requesters(struct intel_display * display,const struct i915_power_well_regs * regs,int pw_idx)369 static u32 hsw_power_well_requesters(struct intel_display *display,
370 const struct i915_power_well_regs *regs,
371 int pw_idx)
372 {
373 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
374 u32 ret;
375
376 ret = intel_de_read(display, regs->bios) & req_mask ? 1 : 0;
377 ret |= intel_de_read(display, regs->driver) & req_mask ? 2 : 0;
378 if (regs->kvmr.reg)
379 ret |= intel_de_read(display, regs->kvmr) & req_mask ? 4 : 0;
380 ret |= intel_de_read(display, regs->debug) & req_mask ? 8 : 0;
381
382 return ret;
383 }
384
hsw_wait_for_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)385 static void hsw_wait_for_power_well_disable(struct intel_display *display,
386 struct i915_power_well *power_well)
387 {
388 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
389 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
390 u32 reqs;
391 int ret;
392
393 /*
394 * Bspec doesn't require waiting for PWs to get disabled, but still do
395 * this for paranoia. The known cases where a PW will be forced on:
396 * - a KVMR request on any power well via the KVMR request register
397 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
398 * DEBUG request registers
399 * Skip the wait in case any of the request bits are set and print a
400 * diagnostic message.
401 */
402 reqs = hsw_power_well_requesters(display, regs, pw_idx);
403
404 ret = intel_de_wait_for_clear_ms(display, regs->driver,
405 HSW_PWR_WELL_CTL_STATE(pw_idx),
406 reqs ? 0 : 1);
407 if (!ret)
408 return;
409
410 /* Refresh requesters in case they popped up during the wait. */
411 if (!reqs)
412 reqs = hsw_power_well_requesters(display, regs, pw_idx);
413
414 drm_dbg_kms(display->drm,
415 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
416 intel_power_well_name(power_well),
417 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
418 }
419
gen9_wait_for_power_well_fuses(struct intel_display * display,enum skl_power_gate pg)420 static void gen9_wait_for_power_well_fuses(struct intel_display *display,
421 enum skl_power_gate pg)
422 {
423 /* Timeout 5us for PG#0, for other PGs 1us */
424 drm_WARN_ON(display->drm,
425 intel_de_wait_for_set_ms(display, SKL_FUSE_STATUS,
426 SKL_FUSE_PG_DIST_STATUS(pg), 1));
427 }
428
hsw_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)429 static void hsw_power_well_enable(struct intel_display *display,
430 struct i915_power_well *power_well)
431 {
432 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
433 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
434
435 if (power_well->desc->has_fuses) {
436 enum skl_power_gate pg;
437
438 pg = pw_idx_to_pg(display, pw_idx);
439
440 /* Wa_16013190616:adlp */
441 if (display->platform.alderlake_p && pg == SKL_PG1)
442 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
443
444 /*
445 * For PW1 we have to wait both for the PW0/PG0 fuse state
446 * before enabling the power well and PW1/PG1's own fuse
447 * state after the enabling. For all other power wells with
448 * fuses we only have to wait for that PW/PG's fuse state
449 * after the enabling.
450 */
451 if (pg == SKL_PG1)
452 gen9_wait_for_power_well_fuses(display, SKL_PG0);
453 }
454
455 intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
456
457 hsw_wait_for_power_well_enable(display, power_well, false);
458
459 if (power_well->desc->has_fuses) {
460 enum skl_power_gate pg;
461
462 pg = pw_idx_to_pg(display, pw_idx);
463
464 gen9_wait_for_power_well_fuses(display, pg);
465 }
466
467 hsw_power_well_post_enable(display,
468 power_well->desc->irq_pipe_mask);
469 }
470
hsw_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)471 static void hsw_power_well_disable(struct intel_display *display,
472 struct i915_power_well *power_well)
473 {
474 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
475 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
476
477 hsw_power_well_pre_disable(display,
478 power_well->desc->irq_pipe_mask);
479
480 intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
481 hsw_wait_for_power_well_disable(display, power_well);
482 }
483
intel_aux_ch_is_edp(struct intel_display * display,enum aux_ch aux_ch)484 static bool intel_aux_ch_is_edp(struct intel_display *display, enum aux_ch aux_ch)
485 {
486 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
487
488 return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
489 }
490
491 static void
icl_combo_phy_aux_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)492 icl_combo_phy_aux_power_well_enable(struct intel_display *display,
493 struct i915_power_well *power_well)
494 {
495 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
496 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
497
498 drm_WARN_ON(display->drm, !display->platform.icelake);
499
500 intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
501
502 /*
503 * FIXME not sure if we should derive the PHY from the pw_idx, or
504 * from the VBT defined AUX_CH->DDI->PHY mapping.
505 */
506 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
507 0, ICL_LANE_ENABLE_AUX);
508
509 hsw_wait_for_power_well_enable(display, power_well, false);
510
511 /* Display WA #1178: icl */
512 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
513 !intel_aux_ch_is_edp(display, ICL_AUX_PW_TO_CH(pw_idx)))
514 intel_de_rmw(display, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
515 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
516 }
517
518 static void
icl_combo_phy_aux_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)519 icl_combo_phy_aux_power_well_disable(struct intel_display *display,
520 struct i915_power_well *power_well)
521 {
522 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
523 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
524
525 drm_WARN_ON(display->drm, !display->platform.icelake);
526
527 /*
528 * FIXME not sure if we should derive the PHY from the pw_idx, or
529 * from the VBT defined AUX_CH->DDI->PHY mapping.
530 */
531 intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
532 ICL_LANE_ENABLE_AUX, 0);
533
534 intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
535
536 hsw_wait_for_power_well_disable(display, power_well);
537 }
538
539 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
540
icl_tc_port_assert_ref_held(struct intel_display * display,struct i915_power_well * power_well,struct intel_digital_port * dig_port)541 static void icl_tc_port_assert_ref_held(struct intel_display *display,
542 struct i915_power_well *power_well,
543 struct intel_digital_port *dig_port)
544 {
545 if (drm_WARN_ON(display->drm, !dig_port))
546 return;
547
548 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
549 return;
550
551 drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
552 }
553
554 #else
555
icl_tc_port_assert_ref_held(struct intel_display * display,struct i915_power_well * power_well,struct intel_digital_port * dig_port)556 static void icl_tc_port_assert_ref_held(struct intel_display *display,
557 struct i915_power_well *power_well,
558 struct intel_digital_port *dig_port)
559 {
560 }
561
562 #endif
563
564 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
565
icl_tc_cold_exit(struct intel_display * display)566 static void icl_tc_cold_exit(struct intel_display *display)
567 {
568 int ret, tries = 0;
569
570 while (1) {
571 ret = intel_parent_pcode_write(display, ICL_PCODE_EXIT_TCCOLD, 0);
572 if (ret != -EAGAIN || ++tries == 3)
573 break;
574 msleep(1);
575 }
576
577 /* Spec states that TC cold exit can take up to 1ms to complete */
578 if (!ret)
579 msleep(1);
580
581 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
582 drm_dbg_kms(display->drm, "TC cold block %s\n", ret ? "failed" :
583 "succeeded");
584 }
585
586 static void
icl_tc_phy_aux_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)587 icl_tc_phy_aux_power_well_enable(struct intel_display *display,
588 struct i915_power_well *power_well)
589 {
590 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
591 struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
592 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
593 bool is_tbt = power_well->desc->is_tc_tbt;
594 bool timeout_expected;
595 u32 val;
596 int ret;
597
598 icl_tc_port_assert_ref_held(display, power_well, dig_port);
599
600 intel_de_rmw(display, DP_AUX_CH_CTL(aux_ch),
601 DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
602
603 intel_de_rmw(display, regs->driver,
604 0,
605 HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
606
607 /*
608 * An AUX timeout is expected if the TBT DP tunnel is down,
609 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
610 * exit sequence.
611 */
612 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
613 if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
614 icl_tc_cold_exit(display);
615
616 hsw_wait_for_power_well_enable(display, power_well, timeout_expected);
617
618 if (DISPLAY_VER(display) >= 12 && !is_tbt) {
619 enum tc_port tc_port;
620
621 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
622
623 ret = poll_timeout_us(val = intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)),
624 val & DKL_CMN_UC_DW27_UC_HEALTH,
625 100, 1000, false);
626 if (ret)
627 drm_warn(display->drm, "Timeout waiting TC uC health\n");
628 }
629 }
630
631 static void
icl_aux_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)632 icl_aux_power_well_enable(struct intel_display *display,
633 struct i915_power_well *power_well)
634 {
635 if (icl_aux_pw_is_tc_phy(display, power_well))
636 return icl_tc_phy_aux_power_well_enable(display, power_well);
637 else if (display->platform.icelake)
638 return icl_combo_phy_aux_power_well_enable(display,
639 power_well);
640 else
641 return hsw_power_well_enable(display, power_well);
642 }
643
644 static void
icl_aux_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)645 icl_aux_power_well_disable(struct intel_display *display,
646 struct i915_power_well *power_well)
647 {
648 if (icl_aux_pw_is_tc_phy(display, power_well))
649 return hsw_power_well_disable(display, power_well);
650 else if (display->platform.icelake)
651 return icl_combo_phy_aux_power_well_disable(display,
652 power_well);
653 else
654 return hsw_power_well_disable(display, power_well);
655 }
656
657 /*
658 * We should only use the power well if we explicitly asked the hardware to
659 * enable it, so check if it's enabled and also check if we've requested it to
660 * be enabled.
661 */
hsw_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)662 static bool hsw_power_well_enabled(struct intel_display *display,
663 struct i915_power_well *power_well)
664 {
665 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
666 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
667 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
668 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
669 HSW_PWR_WELL_CTL_STATE(pw_idx);
670 u32 val;
671
672 val = intel_de_read(display, regs->driver);
673
674 /*
675 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
676 * and the MISC_IO PW will be not restored, so check instead for the
677 * BIOS's own request bits, which are forced-on for these power wells
678 * when exiting DC5/6.
679 */
680 if (DISPLAY_VER(display) == 9 && !display->platform.broxton &&
681 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
682 val |= intel_de_read(display, regs->bios);
683
684 return (val & mask) == mask;
685 }
686
assert_can_enable_dc9(struct intel_display * display)687 static void assert_can_enable_dc9(struct intel_display *display)
688 {
689 drm_WARN_ONCE(display->drm,
690 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9),
691 "DC9 already programmed to be enabled.\n");
692 drm_WARN_ONCE(display->drm,
693 intel_de_read(display, DC_STATE_EN) &
694 DC_STATE_EN_UPTO_DC5,
695 "DC5 still not disabled to enable DC9.\n");
696 drm_WARN_ONCE(display->drm,
697 intel_de_read(display, HSW_PWR_WELL_CTL2) &
698 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
699 "Power well 2 on.\n");
700 drm_WARN_ONCE(display->drm, intel_parent_irq_enabled(display),
701 "Interrupts not disabled yet.\n");
702
703 /*
704 * TODO: check for the following to verify the conditions to enter DC9
705 * state are satisfied:
706 * 1] Check relevant display engine registers to verify if mode set
707 * disable sequence was followed.
708 * 2] Check if display uninitialize sequence is initialized.
709 */
710 }
711
assert_can_disable_dc9(struct intel_display * display)712 static void assert_can_disable_dc9(struct intel_display *display)
713 {
714 drm_WARN_ONCE(display->drm, intel_parent_irq_enabled(display),
715 "Interrupts not disabled yet.\n");
716 drm_WARN_ONCE(display->drm,
717 intel_de_read(display, DC_STATE_EN) &
718 DC_STATE_EN_UPTO_DC5,
719 "DC5 still not disabled.\n");
720
721 /*
722 * TODO: check for the following to verify DC9 state was indeed
723 * entered before programming to disable it:
724 * 1] Check relevant display engine registers to verify if mode
725 * set disable sequence was followed.
726 * 2] Check if display uninitialize sequence is initialized.
727 */
728 }
729
gen9_write_dc_state(struct intel_display * display,u32 state)730 static void gen9_write_dc_state(struct intel_display *display,
731 u32 state)
732 {
733 int rewrites = 0;
734 int rereads = 0;
735 u32 v;
736
737 intel_de_write(display, DC_STATE_EN, state);
738
739 /* It has been observed that disabling the dc6 state sometimes
740 * doesn't stick and dmc keeps returning old value. Make sure
741 * the write really sticks enough times and also force rewrite until
742 * we are confident that state is exactly what we want.
743 */
744 do {
745 v = intel_de_read(display, DC_STATE_EN);
746
747 if (v != state) {
748 intel_de_write(display, DC_STATE_EN, state);
749 rewrites++;
750 rereads = 0;
751 } else if (rereads++ > 5) {
752 break;
753 }
754
755 } while (rewrites < 100);
756
757 if (v != state)
758 drm_err(display->drm,
759 "Writing dc state to 0x%x failed, now 0x%x\n",
760 state, v);
761
762 /* Most of the times we need one retry, avoid spam */
763 if (rewrites > 1)
764 drm_dbg_kms(display->drm,
765 "Rewrote dc state to 0x%x %d times\n",
766 state, rewrites);
767 }
768
gen9_dc_mask(struct intel_display * display)769 static u32 gen9_dc_mask(struct intel_display *display)
770 {
771 u32 mask;
772
773 mask = DC_STATE_EN_UPTO_DC5;
774
775 if (DISPLAY_VER(display) >= 12)
776 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
777 | DC_STATE_EN_DC9;
778 else if (DISPLAY_VER(display) == 11)
779 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
780 else if (display->platform.geminilake || display->platform.broxton)
781 mask |= DC_STATE_EN_DC9;
782 else
783 mask |= DC_STATE_EN_UPTO_DC6;
784
785 return mask;
786 }
787
gen9_sanitize_dc_state(struct intel_display * display)788 void gen9_sanitize_dc_state(struct intel_display *display)
789 {
790 struct i915_power_domains *power_domains = &display->power.domains;
791 u32 val;
792
793 if (!HAS_DISPLAY(display))
794 return;
795
796 val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display);
797
798 drm_dbg_kms(display->drm,
799 "Resetting DC state tracking from %02x to %02x\n",
800 power_domains->dc_state, val);
801 power_domains->dc_state = val;
802 }
803
804 /**
805 * gen9_set_dc_state - set target display C power state
806 * @display: display instance
807 * @state: target DC power state
808 * - DC_STATE_DISABLE
809 * - DC_STATE_EN_UPTO_DC5
810 * - DC_STATE_EN_UPTO_DC6
811 * - DC_STATE_EN_DC9
812 *
813 * Signal to DMC firmware/HW the target DC power state passed in @state.
814 * DMC/HW can turn off individual display clocks and power rails when entering
815 * a deeper DC power state (higher in number) and turns these back when exiting
816 * that state to a shallower power state (lower in number). The HW will decide
817 * when to actually enter a given state on an on-demand basis, for instance
818 * depending on the active state of display pipes. The state of display
819 * registers backed by affected power rails are saved/restored as needed.
820 *
821 * Based on the above enabling a deeper DC power state is asynchronous wrt.
822 * enabling it. Disabling a deeper power state is synchronous: for instance
823 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
824 * back on and register state is restored. This is guaranteed by the MMIO write
825 * to DC_STATE_EN blocking until the state is restored.
826 */
gen9_set_dc_state(struct intel_display * display,u32 state)827 void gen9_set_dc_state(struct intel_display *display, u32 state)
828 {
829 struct i915_power_domains *power_domains = &display->power.domains;
830 bool dc6_was_enabled, enable_dc6;
831 u32 mask;
832 u32 val;
833
834 if (!HAS_DISPLAY(display))
835 return;
836
837 if (drm_WARN_ON_ONCE(display->drm,
838 state & ~power_domains->allowed_dc_mask))
839 state &= power_domains->allowed_dc_mask;
840
841 if (!power_domains->initializing)
842 intel_psr_notify_dc5_dc6(display);
843
844 val = intel_de_read(display, DC_STATE_EN);
845 mask = gen9_dc_mask(display);
846 drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
847 val & mask, state);
848
849 /* Check if DMC is ignoring our DC state requests */
850 if ((val & mask) != power_domains->dc_state)
851 drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
852 power_domains->dc_state, val & mask);
853
854 enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
855 dc6_was_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6;
856 if (!dc6_was_enabled && enable_dc6)
857 intel_dmc_update_dc6_allowed_count(display, true);
858
859 val &= ~mask;
860 val |= state;
861
862 gen9_write_dc_state(display, val);
863
864 if (!enable_dc6 && dc6_was_enabled)
865 intel_dmc_update_dc6_allowed_count(display, false);
866
867 power_domains->dc_state = val & mask;
868 }
869
tgl_enable_dc3co(struct intel_display * display)870 static void tgl_enable_dc3co(struct intel_display *display)
871 {
872 drm_dbg_kms(display->drm, "Enabling DC3CO\n");
873 gen9_set_dc_state(display, DC_STATE_EN_DC3CO);
874 }
875
tgl_disable_dc3co(struct intel_display * display)876 static void tgl_disable_dc3co(struct intel_display *display)
877 {
878 drm_dbg_kms(display->drm, "Disabling DC3CO\n");
879 intel_de_rmw(display, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
880 gen9_set_dc_state(display, DC_STATE_DISABLE);
881 /*
882 * Delay of 200us DC3CO Exit time B.Spec 49196
883 */
884 usleep_range(200, 210);
885 }
886
assert_can_enable_dc5(struct intel_display * display)887 static void assert_can_enable_dc5(struct intel_display *display)
888 {
889 enum i915_power_well_id high_pg;
890
891 /* Power wells at this level and above must be disabled for DC5 entry */
892 if (DISPLAY_VER(display) == 12)
893 high_pg = ICL_DISP_PW_3;
894 else
895 high_pg = SKL_DISP_PW_2;
896
897 drm_WARN_ONCE(display->drm,
898 intel_display_power_well_is_enabled(display, high_pg),
899 "Power wells above platform's DC5 limit still enabled.\n");
900
901 drm_WARN_ONCE(display->drm,
902 (intel_de_read(display, DC_STATE_EN) &
903 DC_STATE_EN_UPTO_DC5),
904 "DC5 already programmed to be enabled.\n");
905
906 assert_display_rpm_held(display);
907
908 assert_main_dmc_loaded(display);
909 }
910
gen9_enable_dc5(struct intel_display * display)911 void gen9_enable_dc5(struct intel_display *display)
912 {
913 assert_can_enable_dc5(display);
914
915 drm_dbg_kms(display->drm, "Enabling DC5\n");
916
917 /* Wa Display #1183: skl,kbl,cfl */
918 if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
919 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
920 0, SKL_SELECT_ALTERNATE_DC_EXIT);
921
922 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC5);
923
924 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5);
925 }
926
assert_can_enable_dc6(struct intel_display * display)927 static void assert_can_enable_dc6(struct intel_display *display)
928 {
929 drm_WARN_ONCE(display->drm,
930 (intel_de_read(display, UTIL_PIN_CTL) &
931 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) ==
932 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
933 "Utility pin enabled in PWM mode\n");
934 drm_WARN_ONCE(display->drm,
935 (intel_de_read(display, DC_STATE_EN) &
936 DC_STATE_EN_UPTO_DC6),
937 "DC6 already programmed to be enabled.\n");
938
939 assert_main_dmc_loaded(display);
940 }
941
skl_enable_dc6(struct intel_display * display)942 void skl_enable_dc6(struct intel_display *display)
943 {
944 assert_can_enable_dc6(display);
945
946 drm_dbg_kms(display->drm, "Enabling DC6\n");
947
948 /* Wa Display #1183: skl,kbl,cfl */
949 if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
950 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
951 0, SKL_SELECT_ALTERNATE_DC_EXIT);
952
953 intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC6);
954
955 gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6);
956 }
957
bxt_enable_dc9(struct intel_display * display)958 void bxt_enable_dc9(struct intel_display *display)
959 {
960 assert_can_enable_dc9(display);
961
962 drm_dbg_kms(display->drm, "Enabling DC9\n");
963 /*
964 * Power sequencer reset is needed on BXT/GLK, because the PPS registers
965 * aren't always on, unlike with South Display Engine on PCH.
966 */
967 if (display->platform.broxton || display->platform.geminilake)
968 bxt_pps_reset_all(display);
969 gen9_set_dc_state(display, DC_STATE_EN_DC9);
970 }
971
bxt_disable_dc9(struct intel_display * display)972 void bxt_disable_dc9(struct intel_display *display)
973 {
974 assert_can_disable_dc9(display);
975
976 drm_dbg_kms(display->drm, "Disabling DC9\n");
977
978 gen9_set_dc_state(display, DC_STATE_DISABLE);
979
980 intel_pps_unlock_regs_wa(display);
981 }
982
hsw_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)983 static void hsw_power_well_sync_hw(struct intel_display *display,
984 struct i915_power_well *power_well)
985 {
986 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
987 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
988 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
989 u32 bios_req = intel_de_read(display, regs->bios);
990
991 /* Take over the request bit if set by BIOS. */
992 if (bios_req & mask) {
993 u32 drv_req = intel_de_read(display, regs->driver);
994
995 if (!(drv_req & mask))
996 intel_de_write(display, regs->driver, drv_req | mask);
997 intel_de_write(display, regs->bios, bios_req & ~mask);
998 }
999 }
1000
bxt_dpio_cmn_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1001 static void bxt_dpio_cmn_power_well_enable(struct intel_display *display,
1002 struct i915_power_well *power_well)
1003 {
1004 bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy);
1005 }
1006
bxt_dpio_cmn_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1007 static void bxt_dpio_cmn_power_well_disable(struct intel_display *display,
1008 struct i915_power_well *power_well)
1009 {
1010 bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy);
1011 }
1012
bxt_dpio_cmn_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1013 static bool bxt_dpio_cmn_power_well_enabled(struct intel_display *display,
1014 struct i915_power_well *power_well)
1015 {
1016 return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy);
1017 }
1018
bxt_verify_dpio_phy_power_wells(struct intel_display * display)1019 static void bxt_verify_dpio_phy_power_wells(struct intel_display *display)
1020 {
1021 struct i915_power_well *power_well;
1022
1023 power_well = lookup_power_well(display, BXT_DISP_PW_DPIO_CMN_A);
1024 if (intel_power_well_refcount(power_well) > 0)
1025 bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
1026
1027 power_well = lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1028 if (intel_power_well_refcount(power_well) > 0)
1029 bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
1030
1031 if (display->platform.geminilake) {
1032 power_well = lookup_power_well(display,
1033 GLK_DISP_PW_DPIO_CMN_C);
1034 if (intel_power_well_refcount(power_well) > 0)
1035 bxt_dpio_phy_verify_state(display,
1036 i915_power_well_instance(power_well)->bxt.phy);
1037 }
1038 }
1039
gen9_dc_off_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1040 static bool gen9_dc_off_power_well_enabled(struct intel_display *display,
1041 struct i915_power_well *power_well)
1042 {
1043 return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1044 (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1045 }
1046
gen9_assert_dbuf_enabled(struct intel_display * display)1047 static void gen9_assert_dbuf_enabled(struct intel_display *display)
1048 {
1049 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(display);
1050 u8 enabled_dbuf_slices = display->dbuf.enabled_slices;
1051
1052 drm_WARN(display->drm,
1053 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1054 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1055 hw_enabled_dbuf_slices,
1056 enabled_dbuf_slices);
1057 }
1058
gen9_disable_dc_states(struct intel_display * display)1059 void gen9_disable_dc_states(struct intel_display *display)
1060 {
1061 struct i915_power_domains *power_domains = &display->power.domains;
1062 struct intel_cdclk_config cdclk_config = {};
1063 u32 old_state = power_domains->dc_state;
1064
1065 if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
1066 tgl_disable_dc3co(display);
1067 return;
1068 }
1069
1070 if (HAS_DISPLAY(display)) {
1071 intel_dmc_wl_get_noreg(display);
1072 gen9_set_dc_state(display, DC_STATE_DISABLE);
1073 intel_dmc_wl_put_noreg(display);
1074 } else {
1075 gen9_set_dc_state(display, DC_STATE_DISABLE);
1076 return;
1077 }
1078
1079 if (old_state == DC_STATE_EN_UPTO_DC5 ||
1080 old_state == DC_STATE_EN_UPTO_DC6)
1081 intel_dmc_wl_disable(display);
1082
1083 intel_cdclk_get_cdclk(display, &cdclk_config);
1084 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1085 drm_WARN_ON(display->drm,
1086 intel_cdclk_clock_changed(&display->cdclk.hw,
1087 &cdclk_config));
1088
1089 gen9_assert_dbuf_enabled(display);
1090
1091 if (display->platform.geminilake || display->platform.broxton)
1092 bxt_verify_dpio_phy_power_wells(display);
1093
1094 if (DISPLAY_VER(display) >= 11)
1095 /*
1096 * DMC retains HW context only for port A, the other combo
1097 * PHY's HW context for port B is lost after DC transitions,
1098 * so we need to restore it manually.
1099 */
1100 intel_combo_phy_init(display);
1101 }
1102
gen9_dc_off_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1103 static void gen9_dc_off_power_well_enable(struct intel_display *display,
1104 struct i915_power_well *power_well)
1105 {
1106 gen9_disable_dc_states(display);
1107 }
1108
gen9_dc_off_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1109 static void gen9_dc_off_power_well_disable(struct intel_display *display,
1110 struct i915_power_well *power_well)
1111 {
1112 struct i915_power_domains *power_domains = &display->power.domains;
1113
1114 if (!intel_dmc_has_payload(display))
1115 return;
1116
1117 switch (power_domains->target_dc_state) {
1118 case DC_STATE_EN_DC3CO:
1119 tgl_enable_dc3co(display);
1120 break;
1121 case DC_STATE_EN_UPTO_DC6:
1122 skl_enable_dc6(display);
1123 break;
1124 case DC_STATE_EN_UPTO_DC5:
1125 gen9_enable_dc5(display);
1126 break;
1127 }
1128 }
1129
i9xx_power_well_sync_hw_noop(struct intel_display * display,struct i915_power_well * power_well)1130 static void i9xx_power_well_sync_hw_noop(struct intel_display *display,
1131 struct i915_power_well *power_well)
1132 {
1133 }
1134
i9xx_always_on_power_well_noop(struct intel_display * display,struct i915_power_well * power_well)1135 static void i9xx_always_on_power_well_noop(struct intel_display *display,
1136 struct i915_power_well *power_well)
1137 {
1138 }
1139
i9xx_always_on_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1140 static bool i9xx_always_on_power_well_enabled(struct intel_display *display,
1141 struct i915_power_well *power_well)
1142 {
1143 return true;
1144 }
1145
i830_pipes_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1146 static void i830_pipes_power_well_enable(struct intel_display *display,
1147 struct i915_power_well *power_well)
1148 {
1149 if ((intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE) == 0)
1150 i830_enable_pipe(display, PIPE_A);
1151 if ((intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE) == 0)
1152 i830_enable_pipe(display, PIPE_B);
1153 }
1154
i830_pipes_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1155 static void i830_pipes_power_well_disable(struct intel_display *display,
1156 struct i915_power_well *power_well)
1157 {
1158 i830_disable_pipe(display, PIPE_B);
1159 i830_disable_pipe(display, PIPE_A);
1160 }
1161
i830_pipes_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1162 static bool i830_pipes_power_well_enabled(struct intel_display *display,
1163 struct i915_power_well *power_well)
1164 {
1165 return intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE &&
1166 intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
1167 }
1168
i830_pipes_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)1169 static void i830_pipes_power_well_sync_hw(struct intel_display *display,
1170 struct i915_power_well *power_well)
1171 {
1172 if (intel_power_well_refcount(power_well) > 0)
1173 i830_pipes_power_well_enable(display, power_well);
1174 else
1175 i830_pipes_power_well_disable(display, power_well);
1176 }
1177
vlv_set_power_well(struct intel_display * display,struct i915_power_well * power_well,bool enable)1178 static void vlv_set_power_well(struct intel_display *display,
1179 struct i915_power_well *power_well, bool enable)
1180 {
1181 int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
1182 u32 mask;
1183 u32 state;
1184 u32 ctrl;
1185 u32 val;
1186 int ret;
1187
1188 mask = PUNIT_PWRGT_MASK(pw_idx);
1189 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1190 PUNIT_PWRGT_PWR_GATE(pw_idx);
1191
1192 vlv_punit_get(display->drm);
1193
1194 val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS);
1195 if ((val & mask) == state)
1196 goto out;
1197
1198 ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL);
1199 ctrl &= ~mask;
1200 ctrl |= state;
1201 vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl);
1202
1203 ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS),
1204 (val & mask) == state,
1205 500, 100 * 1000, false);
1206 if (ret)
1207 drm_err(display->drm,
1208 "timeout setting power well state %08x (%08x)\n",
1209 state,
1210 vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
1211
1212 out:
1213 vlv_punit_put(display->drm);
1214 }
1215
vlv_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1216 static void vlv_power_well_enable(struct intel_display *display,
1217 struct i915_power_well *power_well)
1218 {
1219 vlv_set_power_well(display, power_well, true);
1220 }
1221
vlv_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1222 static void vlv_power_well_disable(struct intel_display *display,
1223 struct i915_power_well *power_well)
1224 {
1225 vlv_set_power_well(display, power_well, false);
1226 }
1227
vlv_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1228 static bool vlv_power_well_enabled(struct intel_display *display,
1229 struct i915_power_well *power_well)
1230 {
1231 int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
1232 bool enabled = false;
1233 u32 mask;
1234 u32 state;
1235 u32 ctrl;
1236
1237 mask = PUNIT_PWRGT_MASK(pw_idx);
1238 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1239
1240 vlv_punit_get(display->drm);
1241
1242 state = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask;
1243 /*
1244 * We only ever set the power-on and power-gate states, anything
1245 * else is unexpected.
1246 */
1247 drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1248 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1249 if (state == ctrl)
1250 enabled = true;
1251
1252 /*
1253 * A transient state at this point would mean some unexpected party
1254 * is poking at the power controls too.
1255 */
1256 ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL) & mask;
1257 drm_WARN_ON(display->drm, ctrl != state);
1258
1259 vlv_punit_put(display->drm);
1260
1261 return enabled;
1262 }
1263
vlv_init_display_clock_gating(struct intel_display * display)1264 static void vlv_init_display_clock_gating(struct intel_display *display)
1265 {
1266 /*
1267 * On driver load, a pipe may be active and driving a DSI display.
1268 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1269 * (and never recovering) in this case. intel_dsi_post_disable() will
1270 * clear it when we turn off the display.
1271 */
1272 intel_de_rmw(display, VLV_DSPCLK_GATE_D,
1273 ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
1274
1275 /*
1276 * Disable trickle feed and enable pnd deadline calculation
1277 */
1278 intel_de_write(display, MI_ARB_VLV,
1279 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE_VLV);
1280 intel_de_write(display, CBR1_VLV, 0);
1281
1282 drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0);
1283 intel_de_write(display, RAWCLK_FREQ_VLV,
1284 DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq,
1285 1000));
1286 }
1287
vlv_display_power_well_init(struct intel_display * display)1288 static void vlv_display_power_well_init(struct intel_display *display)
1289 {
1290 struct intel_encoder *encoder;
1291 enum pipe pipe;
1292
1293 /*
1294 * Enable the CRI clock source so we can get at the
1295 * display and the reference clock for VGA
1296 * hotplug / manual detection. Supposedly DSI also
1297 * needs the ref clock up and running.
1298 *
1299 * CHV DPLL B/C have some issues if VGA mode is enabled.
1300 */
1301 for_each_pipe(display, pipe) {
1302 u32 val = intel_de_read(display, DPLL(display, pipe));
1303
1304 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1305 if (pipe != PIPE_A)
1306 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1307
1308 intel_de_write(display, DPLL(display, pipe), val);
1309 }
1310
1311 vlv_init_display_clock_gating(display);
1312
1313 valleyview_enable_display_irqs(display);
1314
1315 /*
1316 * During driver initialization/resume we can avoid restoring the
1317 * part of the HW/SW state that will be inited anyway explicitly.
1318 */
1319 if (display->power.domains.initializing)
1320 return;
1321
1322 intel_hpd_init(display);
1323 intel_hpd_poll_disable(display);
1324
1325 /* Re-enable the ADPA, if we have one */
1326 for_each_intel_encoder(display->drm, encoder) {
1327 if (encoder->type == INTEL_OUTPUT_ANALOG)
1328 intel_crt_reset(&encoder->base);
1329 }
1330
1331 intel_vga_disable(display);
1332
1333 intel_pps_unlock_regs_wa(display);
1334 }
1335
vlv_display_power_well_deinit(struct intel_display * display)1336 static void vlv_display_power_well_deinit(struct intel_display *display)
1337 {
1338 valleyview_disable_display_irqs(display);
1339
1340 /* make sure we're done processing display irqs */
1341 intel_parent_irq_synchronize(display);
1342
1343 vlv_pps_reset_all(display);
1344
1345 /* Prevent us from re-enabling polling on accident in late suspend */
1346 if (!display->drm->dev->power.is_suspended)
1347 intel_hpd_poll_enable(display);
1348 }
1349
vlv_display_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1350 static void vlv_display_power_well_enable(struct intel_display *display,
1351 struct i915_power_well *power_well)
1352 {
1353 vlv_set_power_well(display, power_well, true);
1354
1355 vlv_display_power_well_init(display);
1356 }
1357
vlv_display_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1358 static void vlv_display_power_well_disable(struct intel_display *display,
1359 struct i915_power_well *power_well)
1360 {
1361 vlv_display_power_well_deinit(display);
1362
1363 vlv_set_power_well(display, power_well, false);
1364 }
1365
vlv_dpio_cmn_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1366 static void vlv_dpio_cmn_power_well_enable(struct intel_display *display,
1367 struct i915_power_well *power_well)
1368 {
1369 /* since ref/cri clock was enabled */
1370 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1371
1372 vlv_set_power_well(display, power_well, true);
1373
1374 /*
1375 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1376 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1377 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1378 * b. The other bits such as sfr settings / modesel may all
1379 * be set to 0.
1380 *
1381 * This should only be done on init and resume from S3 with
1382 * both PLLs disabled, or we risk losing DPIO and PLL
1383 * synchronization.
1384 */
1385 intel_de_rmw(display, DPIO_CTL, 0, DPIO_CMNRST);
1386 }
1387
vlv_dpio_cmn_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1388 static void vlv_dpio_cmn_power_well_disable(struct intel_display *display,
1389 struct i915_power_well *power_well)
1390 {
1391 enum pipe pipe;
1392
1393 for_each_pipe(display, pipe)
1394 assert_pll_disabled(display, pipe);
1395
1396 /* Assert common reset */
1397 intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0);
1398
1399 vlv_set_power_well(display, power_well, false);
1400 }
1401
1402 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1403
assert_chv_phy_status(struct intel_display * display)1404 static void assert_chv_phy_status(struct intel_display *display)
1405 {
1406 struct i915_power_well *cmn_bc =
1407 lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
1408 struct i915_power_well *cmn_d =
1409 lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
1410 u32 phy_control = display->power.chv_phy_control;
1411 u32 phy_status = 0;
1412 u32 phy_status_mask = 0xffffffff;
1413 u32 val;
1414
1415 /*
1416 * The BIOS can leave the PHY is some weird state
1417 * where it doesn't fully power down some parts.
1418 * Disable the asserts until the PHY has been fully
1419 * reset (ie. the power well has been disabled at
1420 * least once).
1421 */
1422 if (!display->power.chv_phy_assert[DPIO_PHY0])
1423 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1424 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1425 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1426 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1427 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1428 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1429
1430 if (!display->power.chv_phy_assert[DPIO_PHY1])
1431 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1432 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1433 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1434
1435 if (intel_power_well_is_enabled(display, cmn_bc)) {
1436 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1437
1438 /* this assumes override is only used to enable lanes */
1439 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1440 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1441
1442 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1443 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1444
1445 /* CL1 is on whenever anything is on in either channel */
1446 if (BITS_SET(phy_control,
1447 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1448 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1449 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1450
1451 /*
1452 * The DPLLB check accounts for the pipe B + port A usage
1453 * with CL2 powered up but all the lanes in the second channel
1454 * powered down.
1455 */
1456 if (BITS_SET(phy_control,
1457 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1458 (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1459 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1460
1461 if (BITS_SET(phy_control,
1462 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1463 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1464 if (BITS_SET(phy_control,
1465 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1466 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1467
1468 if (BITS_SET(phy_control,
1469 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1470 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1471 if (BITS_SET(phy_control,
1472 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1473 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1474 }
1475
1476 if (intel_power_well_is_enabled(display, cmn_d)) {
1477 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1478
1479 /* this assumes override is only used to enable lanes */
1480 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1481 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1482
1483 if (BITS_SET(phy_control,
1484 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1485 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1486
1487 if (BITS_SET(phy_control,
1488 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1489 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1490 if (BITS_SET(phy_control,
1491 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1492 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1493 }
1494
1495 phy_status &= phy_status_mask;
1496
1497 /*
1498 * The PHY may be busy with some initial calibration and whatnot,
1499 * so the power state can take a while to actually change.
1500 */
1501 if (intel_de_wait_ms(display, DISPLAY_PHY_STATUS,
1502 phy_status_mask, phy_status, 10, &val))
1503 drm_err(display->drm,
1504 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1505 val & phy_status_mask, phy_status, display->power.chv_phy_control);
1506 }
1507
1508 #undef BITS_SET
1509
chv_dpio_cmn_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1510 static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
1511 struct i915_power_well *power_well)
1512 {
1513 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
1514 enum dpio_phy phy;
1515 u32 tmp;
1516
1517 drm_WARN_ON_ONCE(display->drm,
1518 id != VLV_DISP_PW_DPIO_CMN_BC &&
1519 id != CHV_DISP_PW_DPIO_CMN_D);
1520
1521 if (id == VLV_DISP_PW_DPIO_CMN_BC)
1522 phy = DPIO_PHY0;
1523 else
1524 phy = DPIO_PHY1;
1525
1526 /* since ref/cri clock was enabled */
1527 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1528 vlv_set_power_well(display, power_well, true);
1529
1530 /* Poll for phypwrgood signal */
1531 if (intel_de_wait_for_set_ms(display, DISPLAY_PHY_STATUS,
1532 PHY_POWERGOOD(phy), 1))
1533 drm_err(display->drm, "Display PHY %d is not power up\n",
1534 phy);
1535
1536 vlv_dpio_get(display->drm);
1537
1538 /* Enable dynamic power down */
1539 tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW28);
1540 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1541 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1542 vlv_dpio_write(display->drm, phy, CHV_CMN_DW28, tmp);
1543
1544 if (id == VLV_DISP_PW_DPIO_CMN_BC) {
1545 tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW6_CH1);
1546 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1547 vlv_dpio_write(display->drm, phy, CHV_CMN_DW6_CH1, tmp);
1548 } else {
1549 /*
1550 * Force the non-existing CL2 off. BXT does this
1551 * too, so maybe it saves some power even though
1552 * CL2 doesn't exist?
1553 */
1554 tmp = vlv_dpio_read(display->drm, phy, CHV_CMN_DW30);
1555 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1556 vlv_dpio_write(display->drm, phy, CHV_CMN_DW30, tmp);
1557 }
1558
1559 vlv_dpio_put(display->drm);
1560
1561 display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1562 intel_de_write(display, DISPLAY_PHY_CONTROL,
1563 display->power.chv_phy_control);
1564
1565 drm_dbg_kms(display->drm,
1566 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1567 phy, display->power.chv_phy_control);
1568
1569 assert_chv_phy_status(display);
1570 }
1571
chv_dpio_cmn_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1572 static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
1573 struct i915_power_well *power_well)
1574 {
1575 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
1576 enum dpio_phy phy;
1577
1578 drm_WARN_ON_ONCE(display->drm,
1579 id != VLV_DISP_PW_DPIO_CMN_BC &&
1580 id != CHV_DISP_PW_DPIO_CMN_D);
1581
1582 if (id == VLV_DISP_PW_DPIO_CMN_BC) {
1583 phy = DPIO_PHY0;
1584 assert_pll_disabled(display, PIPE_A);
1585 assert_pll_disabled(display, PIPE_B);
1586 } else {
1587 phy = DPIO_PHY1;
1588 assert_pll_disabled(display, PIPE_C);
1589 }
1590
1591 display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1592 intel_de_write(display, DISPLAY_PHY_CONTROL,
1593 display->power.chv_phy_control);
1594
1595 vlv_set_power_well(display, power_well, false);
1596
1597 drm_dbg_kms(display->drm,
1598 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1599 phy, display->power.chv_phy_control);
1600
1601 /* PHY is fully reset now, so we can enable the PHY state asserts */
1602 display->power.chv_phy_assert[phy] = true;
1603
1604 assert_chv_phy_status(display);
1605 }
1606
assert_chv_phy_powergate(struct intel_display * display,enum dpio_phy phy,enum dpio_channel ch,bool override,unsigned int mask)1607 static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
1608 enum dpio_channel ch, bool override, unsigned int mask)
1609 {
1610 u32 reg, val, expected, actual;
1611
1612 /*
1613 * The BIOS can leave the PHY is some weird state
1614 * where it doesn't fully power down some parts.
1615 * Disable the asserts until the PHY has been fully
1616 * reset (ie. the power well has been disabled at
1617 * least once).
1618 */
1619 if (!display->power.chv_phy_assert[phy])
1620 return;
1621
1622 if (ch == DPIO_CH0)
1623 reg = CHV_CMN_DW0_CH0;
1624 else
1625 reg = CHV_CMN_DW6_CH1;
1626
1627 vlv_dpio_get(display->drm);
1628 val = vlv_dpio_read(display->drm, phy, reg);
1629 vlv_dpio_put(display->drm);
1630
1631 /*
1632 * This assumes !override is only used when the port is disabled.
1633 * All lanes should power down even without the override when
1634 * the port is disabled.
1635 */
1636 if (!override || mask == 0xf) {
1637 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1638 /*
1639 * If CH1 common lane is not active anymore
1640 * (eg. for pipe B DPLL) the entire channel will
1641 * shut down, which causes the common lane registers
1642 * to read as 0. That means we can't actually check
1643 * the lane power down status bits, but as the entire
1644 * register reads as 0 it's a good indication that the
1645 * channel is indeed entirely powered down.
1646 */
1647 if (ch == DPIO_CH1 && val == 0)
1648 expected = 0;
1649 } else if (mask != 0x0) {
1650 expected = DPIO_ANYDL_POWERDOWN;
1651 } else {
1652 expected = 0;
1653 }
1654
1655 if (ch == DPIO_CH0)
1656 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 |
1657 DPIO_ALLDL_POWERDOWN_CH0, val);
1658 else
1659 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
1660 DPIO_ALLDL_POWERDOWN_CH1, val);
1661
1662 drm_WARN(display->drm, actual != expected,
1663 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1664 !!(actual & DPIO_ALLDL_POWERDOWN),
1665 !!(actual & DPIO_ANYDL_POWERDOWN),
1666 !!(expected & DPIO_ALLDL_POWERDOWN),
1667 !!(expected & DPIO_ANYDL_POWERDOWN),
1668 reg, val);
1669 }
1670
chv_phy_powergate_ch(struct intel_display * display,enum dpio_phy phy,enum dpio_channel ch,bool override)1671 bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
1672 enum dpio_channel ch, bool override)
1673 {
1674 struct i915_power_domains *power_domains = &display->power.domains;
1675 bool was_override;
1676
1677 mutex_lock(&power_domains->lock);
1678
1679 was_override = display->power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1680
1681 if (override == was_override)
1682 goto out;
1683
1684 if (override)
1685 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1686 else
1687 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1688
1689 intel_de_write(display, DISPLAY_PHY_CONTROL,
1690 display->power.chv_phy_control);
1691
1692 drm_dbg_kms(display->drm,
1693 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1694 phy, ch, display->power.chv_phy_control);
1695
1696 assert_chv_phy_status(display);
1697
1698 out:
1699 mutex_unlock(&power_domains->lock);
1700
1701 return was_override;
1702 }
1703
chv_phy_powergate_lanes(struct intel_encoder * encoder,bool override,unsigned int mask)1704 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1705 bool override, unsigned int mask)
1706 {
1707 struct intel_display *display = to_intel_display(encoder);
1708 struct i915_power_domains *power_domains = &display->power.domains;
1709 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1710 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1711
1712 mutex_lock(&power_domains->lock);
1713
1714 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1715 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1716
1717 if (override)
1718 display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1719 else
1720 display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1721
1722 intel_de_write(display, DISPLAY_PHY_CONTROL,
1723 display->power.chv_phy_control);
1724
1725 drm_dbg_kms(display->drm,
1726 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1727 phy, ch, mask, display->power.chv_phy_control);
1728
1729 assert_chv_phy_status(display);
1730
1731 assert_chv_phy_powergate(display, phy, ch, override, mask);
1732
1733 mutex_unlock(&power_domains->lock);
1734 }
1735
chv_pipe_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1736 static bool chv_pipe_power_well_enabled(struct intel_display *display,
1737 struct i915_power_well *power_well)
1738 {
1739 enum pipe pipe = PIPE_A;
1740 bool enabled;
1741 u32 state, ctrl;
1742
1743 vlv_punit_get(display->drm);
1744
1745 state = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1746 /*
1747 * We only ever set the power-on and power-gate states, anything
1748 * else is unexpected.
1749 */
1750 drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) &&
1751 state != DP_SSS_PWR_GATE(pipe));
1752 enabled = state == DP_SSS_PWR_ON(pipe);
1753
1754 /*
1755 * A transient state at this point would mean some unexpected party
1756 * is poking at the power controls too.
1757 */
1758 ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1759 drm_WARN_ON(display->drm, ctrl << 16 != state);
1760
1761 vlv_punit_put(display->drm);
1762
1763 return enabled;
1764 }
1765
chv_set_pipe_power_well(struct intel_display * display,struct i915_power_well * power_well,bool enable)1766 static void chv_set_pipe_power_well(struct intel_display *display,
1767 struct i915_power_well *power_well,
1768 bool enable)
1769 {
1770 enum pipe pipe = PIPE_A;
1771 u32 state;
1772 u32 ctrl;
1773 int ret;
1774
1775 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1776
1777 vlv_punit_get(display->drm);
1778
1779 ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
1780 if ((ctrl & DP_SSS_MASK(pipe)) == state)
1781 goto out;
1782
1783 ctrl &= ~DP_SSC_MASK(pipe);
1784 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1785 vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl);
1786
1787 ret = poll_timeout_us(ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
1788 (ctrl & DP_SSS_MASK(pipe)) == state,
1789 500, 100 * 1000, false);
1790 if (ret)
1791 drm_err(display->drm,
1792 "timeout setting power well state %08x (%08x)\n",
1793 state,
1794 vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM));
1795
1796 #undef COND
1797
1798 out:
1799 vlv_punit_put(display->drm);
1800 }
1801
chv_pipe_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)1802 static void chv_pipe_power_well_sync_hw(struct intel_display *display,
1803 struct i915_power_well *power_well)
1804 {
1805 intel_de_write(display, DISPLAY_PHY_CONTROL,
1806 display->power.chv_phy_control);
1807 }
1808
chv_pipe_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1809 static void chv_pipe_power_well_enable(struct intel_display *display,
1810 struct i915_power_well *power_well)
1811 {
1812 chv_set_pipe_power_well(display, power_well, true);
1813
1814 vlv_display_power_well_init(display);
1815 }
1816
chv_pipe_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1817 static void chv_pipe_power_well_disable(struct intel_display *display,
1818 struct i915_power_well *power_well)
1819 {
1820 vlv_display_power_well_deinit(display);
1821
1822 chv_set_pipe_power_well(display, power_well, false);
1823 }
1824
1825 static void
tgl_tc_cold_request(struct intel_display * display,bool block)1826 tgl_tc_cold_request(struct intel_display *display, bool block)
1827 {
1828 u8 tries = 0;
1829 int ret;
1830
1831 while (1) {
1832 u32 low_val;
1833 u32 high_val = 0;
1834
1835 if (block)
1836 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
1837 else
1838 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
1839
1840 /*
1841 * Spec states that we should timeout the request after 200us
1842 * but the function below will timeout after 500us
1843 */
1844 ret = intel_parent_pcode_read(display, TGL_PCODE_TCCOLD, &low_val, &high_val);
1845 if (ret == 0) {
1846 if (block &&
1847 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
1848 ret = -EIO;
1849 else
1850 break;
1851 }
1852
1853 if (++tries == 3)
1854 break;
1855
1856 msleep(1);
1857 }
1858
1859 if (ret)
1860 drm_err(display->drm, "TC cold %sblock failed\n", block ? "" : "un");
1861 else
1862 drm_dbg_kms(display->drm, "TC cold %sblock succeeded\n",
1863 block ? "" : "un");
1864 }
1865
1866 static void
tgl_tc_cold_off_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1867 tgl_tc_cold_off_power_well_enable(struct intel_display *display,
1868 struct i915_power_well *power_well)
1869 {
1870 tgl_tc_cold_request(display, true);
1871 }
1872
1873 static void
tgl_tc_cold_off_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1874 tgl_tc_cold_off_power_well_disable(struct intel_display *display,
1875 struct i915_power_well *power_well)
1876 {
1877 tgl_tc_cold_request(display, false);
1878 }
1879
1880 static void
tgl_tc_cold_off_power_well_sync_hw(struct intel_display * display,struct i915_power_well * power_well)1881 tgl_tc_cold_off_power_well_sync_hw(struct intel_display *display,
1882 struct i915_power_well *power_well)
1883 {
1884 if (intel_power_well_refcount(power_well) > 0)
1885 tgl_tc_cold_off_power_well_enable(display, power_well);
1886 else
1887 tgl_tc_cold_off_power_well_disable(display, power_well);
1888 }
1889
1890 static bool
tgl_tc_cold_off_power_well_is_enabled(struct intel_display * display,struct i915_power_well * power_well)1891 tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
1892 struct i915_power_well *power_well)
1893 {
1894 /*
1895 * Not the correctly implementation but there is no way to just read it
1896 * from PCODE, so returning count to avoid state mismatch errors
1897 */
1898 return intel_power_well_refcount(power_well);
1899 }
1900
xelpdp_aux_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1901 static void xelpdp_aux_power_well_enable(struct intel_display *display,
1902 struct i915_power_well *power_well)
1903 {
1904 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1905 enum phy phy = icl_aux_pw_to_phy(display, power_well);
1906
1907 if (icl_aux_pw_is_tc_phy(display, power_well))
1908 icl_tc_port_assert_ref_held(display, power_well,
1909 aux_ch_to_digital_port(display, aux_ch));
1910
1911 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1912 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
1913 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
1914
1915 if (HAS_LT_PHY(display)) {
1916 if (intel_de_wait_for_set_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1917 XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2))
1918 drm_warn(display->drm,
1919 "Timeout waiting for PHY %c AUX channel power to be up\n",
1920 phy_name(phy));
1921 } else {
1922 /*
1923 * The power status flag cannot be used to determine whether aux
1924 * power wells have finished powering up. Instead we're
1925 * expected to just wait a fixed 600us after raising the request
1926 * bit.
1927 */
1928 usleep_range(600, 1200);
1929 }
1930 }
1931
xelpdp_aux_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1932 static void xelpdp_aux_power_well_disable(struct intel_display *display,
1933 struct i915_power_well *power_well)
1934 {
1935 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1936 enum phy phy = icl_aux_pw_to_phy(display, power_well);
1937
1938 intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1939 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
1940 0);
1941
1942 if (HAS_LT_PHY(display)) {
1943 if (intel_de_wait_for_clear_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
1944 XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1))
1945 drm_warn(display->drm,
1946 "Timeout waiting for PHY %c AUX channel to powerdown\n",
1947 phy_name(phy));
1948 } else {
1949 usleep_range(10, 30);
1950 }
1951 }
1952
xelpdp_aux_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1953 static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
1954 struct i915_power_well *power_well)
1955 {
1956 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1957
1958 return intel_de_read(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch)) &
1959 XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
1960 }
1961
xe2lpd_pica_power_well_enable(struct intel_display * display,struct i915_power_well * power_well)1962 static void xe2lpd_pica_power_well_enable(struct intel_display *display,
1963 struct i915_power_well *power_well)
1964 {
1965 intel_de_write(display, XE2LPD_PICA_PW_CTL,
1966 XE2LPD_PICA_CTL_POWER_REQUEST);
1967
1968 if (intel_de_wait_for_set_ms(display, XE2LPD_PICA_PW_CTL,
1969 XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
1970 drm_dbg_kms(display->drm, "pica power well enable timeout\n");
1971
1972 drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
1973 }
1974 }
1975
xe2lpd_pica_power_well_disable(struct intel_display * display,struct i915_power_well * power_well)1976 static void xe2lpd_pica_power_well_disable(struct intel_display *display,
1977 struct i915_power_well *power_well)
1978 {
1979 intel_de_write(display, XE2LPD_PICA_PW_CTL, 0);
1980
1981 if (intel_de_wait_for_clear_ms(display, XE2LPD_PICA_PW_CTL,
1982 XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
1983 drm_dbg_kms(display->drm, "pica power well disable timeout\n");
1984
1985 drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
1986 }
1987 }
1988
xe2lpd_pica_power_well_enabled(struct intel_display * display,struct i915_power_well * power_well)1989 static bool xe2lpd_pica_power_well_enabled(struct intel_display *display,
1990 struct i915_power_well *power_well)
1991 {
1992 return intel_de_read(display, XE2LPD_PICA_PW_CTL) &
1993 XE2LPD_PICA_CTL_POWER_STATUS;
1994 }
1995
1996 const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1997 .sync_hw = i9xx_power_well_sync_hw_noop,
1998 .enable = i9xx_always_on_power_well_noop,
1999 .disable = i9xx_always_on_power_well_noop,
2000 .is_enabled = i9xx_always_on_power_well_enabled,
2001 };
2002
2003 const struct i915_power_well_ops chv_pipe_power_well_ops = {
2004 .sync_hw = chv_pipe_power_well_sync_hw,
2005 .enable = chv_pipe_power_well_enable,
2006 .disable = chv_pipe_power_well_disable,
2007 .is_enabled = chv_pipe_power_well_enabled,
2008 };
2009
2010 const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2011 .sync_hw = i9xx_power_well_sync_hw_noop,
2012 .enable = chv_dpio_cmn_power_well_enable,
2013 .disable = chv_dpio_cmn_power_well_disable,
2014 .is_enabled = vlv_power_well_enabled,
2015 };
2016
2017 const struct i915_power_well_ops i830_pipes_power_well_ops = {
2018 .sync_hw = i830_pipes_power_well_sync_hw,
2019 .enable = i830_pipes_power_well_enable,
2020 .disable = i830_pipes_power_well_disable,
2021 .is_enabled = i830_pipes_power_well_enabled,
2022 };
2023
2024 static const struct i915_power_well_regs hsw_power_well_regs = {
2025 .bios = HSW_PWR_WELL_CTL1,
2026 .driver = HSW_PWR_WELL_CTL2,
2027 .kvmr = HSW_PWR_WELL_CTL3,
2028 .debug = HSW_PWR_WELL_CTL4,
2029 };
2030
2031 const struct i915_power_well_ops hsw_power_well_ops = {
2032 .regs = &hsw_power_well_regs,
2033 .sync_hw = hsw_power_well_sync_hw,
2034 .enable = hsw_power_well_enable,
2035 .disable = hsw_power_well_disable,
2036 .is_enabled = hsw_power_well_enabled,
2037 };
2038
2039 const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2040 .sync_hw = i9xx_power_well_sync_hw_noop,
2041 .enable = gen9_dc_off_power_well_enable,
2042 .disable = gen9_dc_off_power_well_disable,
2043 .is_enabled = gen9_dc_off_power_well_enabled,
2044 };
2045
2046 const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2047 .sync_hw = i9xx_power_well_sync_hw_noop,
2048 .enable = bxt_dpio_cmn_power_well_enable,
2049 .disable = bxt_dpio_cmn_power_well_disable,
2050 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2051 };
2052
2053 const struct i915_power_well_ops vlv_display_power_well_ops = {
2054 .sync_hw = i9xx_power_well_sync_hw_noop,
2055 .enable = vlv_display_power_well_enable,
2056 .disable = vlv_display_power_well_disable,
2057 .is_enabled = vlv_power_well_enabled,
2058 };
2059
2060 const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2061 .sync_hw = i9xx_power_well_sync_hw_noop,
2062 .enable = vlv_dpio_cmn_power_well_enable,
2063 .disable = vlv_dpio_cmn_power_well_disable,
2064 .is_enabled = vlv_power_well_enabled,
2065 };
2066
2067 const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2068 .sync_hw = i9xx_power_well_sync_hw_noop,
2069 .enable = vlv_power_well_enable,
2070 .disable = vlv_power_well_disable,
2071 .is_enabled = vlv_power_well_enabled,
2072 };
2073
2074 static const struct i915_power_well_regs icl_aux_power_well_regs = {
2075 .bios = ICL_PWR_WELL_CTL_AUX1,
2076 .driver = ICL_PWR_WELL_CTL_AUX2,
2077 .debug = ICL_PWR_WELL_CTL_AUX4,
2078 };
2079
2080 const struct i915_power_well_ops icl_aux_power_well_ops = {
2081 .regs = &icl_aux_power_well_regs,
2082 .sync_hw = hsw_power_well_sync_hw,
2083 .enable = icl_aux_power_well_enable,
2084 .disable = icl_aux_power_well_disable,
2085 .is_enabled = hsw_power_well_enabled,
2086 };
2087
2088 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
2089 .bios = ICL_PWR_WELL_CTL_DDI1,
2090 .driver = ICL_PWR_WELL_CTL_DDI2,
2091 .debug = ICL_PWR_WELL_CTL_DDI4,
2092 };
2093
2094 const struct i915_power_well_ops icl_ddi_power_well_ops = {
2095 .regs = &icl_ddi_power_well_regs,
2096 .sync_hw = hsw_power_well_sync_hw,
2097 .enable = hsw_power_well_enable,
2098 .disable = hsw_power_well_disable,
2099 .is_enabled = hsw_power_well_enabled,
2100 };
2101
2102 const struct i915_power_well_ops tgl_tc_cold_off_ops = {
2103 .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
2104 .enable = tgl_tc_cold_off_power_well_enable,
2105 .disable = tgl_tc_cold_off_power_well_disable,
2106 .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
2107 };
2108
2109 const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
2110 .sync_hw = i9xx_power_well_sync_hw_noop,
2111 .enable = xelpdp_aux_power_well_enable,
2112 .disable = xelpdp_aux_power_well_disable,
2113 .is_enabled = xelpdp_aux_power_well_enabled,
2114 };
2115
2116 const struct i915_power_well_ops xe2lpd_pica_power_well_ops = {
2117 .sync_hw = i9xx_power_well_sync_hw_noop,
2118 .enable = xe2lpd_pica_power_well_enable,
2119 .disable = xe2lpd_pica_power_well_disable,
2120 .is_enabled = xe2lpd_pica_power_well_enabled,
2121 };
2122