1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "i915_irq.h"
8 #include "i915_reg.h"
9 #include "intel_backlight_regs.h"
10 #include "intel_combo_phy.h"
11 #include "intel_combo_phy_regs.h"
12 #include "intel_crt.h"
13 #include "intel_de.h"
14 #include "intel_display_irq.h"
15 #include "intel_display_power_well.h"
16 #include "intel_display_types.h"
17 #include "intel_dkl_phy.h"
18 #include "intel_dkl_phy_regs.h"
19 #include "intel_dmc.h"
20 #include "intel_dmc_wl.h"
21 #include "intel_dp_aux_regs.h"
22 #include "intel_dpio_phy.h"
23 #include "intel_dpll.h"
24 #include "intel_hotplug.h"
25 #include "intel_pcode.h"
26 #include "intel_pps.h"
27 #include "intel_tc.h"
28 #include "intel_vga.h"
29 #include "skl_watermark.h"
30 #include "vlv_dpio_phy_regs.h"
31 #include "vlv_sideband.h"
32 #include "vlv_sideband_reg.h"
33
34 struct i915_power_well_regs {
35 i915_reg_t bios;
36 i915_reg_t driver;
37 i915_reg_t kvmr;
38 i915_reg_t debug;
39 };
40
41 struct i915_power_well_ops {
42 const struct i915_power_well_regs *regs;
43 /*
44 * Synchronize the well's hw state to match the current sw state, for
45 * example enable/disable it based on the current refcount. Called
46 * during driver init and resume time, possibly after first calling
47 * the enable/disable handlers.
48 */
49 void (*sync_hw)(struct drm_i915_private *i915,
50 struct i915_power_well *power_well);
51 /*
52 * Enable the well and resources that depend on it (for example
53 * interrupts located on the well). Called after the 0->1 refcount
54 * transition.
55 */
56 void (*enable)(struct drm_i915_private *i915,
57 struct i915_power_well *power_well);
58 /*
59 * Disable the well and resources that depend on it. Called after
60 * the 1->0 refcount transition.
61 */
62 void (*disable)(struct drm_i915_private *i915,
63 struct i915_power_well *power_well);
64 /* Returns the hw enabled state. */
65 bool (*is_enabled)(struct drm_i915_private *i915,
66 struct i915_power_well *power_well);
67 };
68
69 static const struct i915_power_well_instance *
i915_power_well_instance(const struct i915_power_well * power_well)70 i915_power_well_instance(const struct i915_power_well *power_well)
71 {
72 return &power_well->desc->instances->list[power_well->instance_idx];
73 }
74
75 struct i915_power_well *
lookup_power_well(struct drm_i915_private * i915,enum i915_power_well_id power_well_id)76 lookup_power_well(struct drm_i915_private *i915,
77 enum i915_power_well_id power_well_id)
78 {
79 struct i915_power_well *power_well;
80
81 for_each_power_well(i915, power_well)
82 if (i915_power_well_instance(power_well)->id == power_well_id)
83 return power_well;
84
85 /*
86 * It's not feasible to add error checking code to the callers since
87 * this condition really shouldn't happen and it doesn't even make sense
88 * to abort things like display initialization sequences. Just return
89 * the first power well and hope the WARN gets reported so we can fix
90 * our driver.
91 */
92 drm_WARN(&i915->drm, 1,
93 "Power well %d not defined for this platform\n",
94 power_well_id);
95 return &i915->display.power.domains.power_wells[0];
96 }
97
intel_power_well_enable(struct drm_i915_private * i915,struct i915_power_well * power_well)98 void intel_power_well_enable(struct drm_i915_private *i915,
99 struct i915_power_well *power_well)
100 {
101 drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
102 power_well->desc->ops->enable(i915, power_well);
103 power_well->hw_enabled = true;
104 }
105
intel_power_well_disable(struct drm_i915_private * i915,struct i915_power_well * power_well)106 void intel_power_well_disable(struct drm_i915_private *i915,
107 struct i915_power_well *power_well)
108 {
109 drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
110 power_well->hw_enabled = false;
111 power_well->desc->ops->disable(i915, power_well);
112 }
113
intel_power_well_sync_hw(struct drm_i915_private * i915,struct i915_power_well * power_well)114 void intel_power_well_sync_hw(struct drm_i915_private *i915,
115 struct i915_power_well *power_well)
116 {
117 power_well->desc->ops->sync_hw(i915, power_well);
118 power_well->hw_enabled =
119 power_well->desc->ops->is_enabled(i915, power_well);
120 }
121
intel_power_well_get(struct drm_i915_private * i915,struct i915_power_well * power_well)122 void intel_power_well_get(struct drm_i915_private *i915,
123 struct i915_power_well *power_well)
124 {
125 if (!power_well->count++)
126 intel_power_well_enable(i915, power_well);
127 }
128
intel_power_well_put(struct drm_i915_private * i915,struct i915_power_well * power_well)129 void intel_power_well_put(struct drm_i915_private *i915,
130 struct i915_power_well *power_well)
131 {
132 drm_WARN(&i915->drm, !power_well->count,
133 "Use count on power well %s is already zero",
134 i915_power_well_instance(power_well)->name);
135
136 if (!--power_well->count)
137 intel_power_well_disable(i915, power_well);
138 }
139
intel_power_well_is_enabled(struct drm_i915_private * i915,struct i915_power_well * power_well)140 bool intel_power_well_is_enabled(struct drm_i915_private *i915,
141 struct i915_power_well *power_well)
142 {
143 return power_well->desc->ops->is_enabled(i915, power_well);
144 }
145
intel_power_well_is_enabled_cached(struct i915_power_well * power_well)146 bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
147 {
148 return power_well->hw_enabled;
149 }
150
intel_display_power_well_is_enabled(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)151 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
152 enum i915_power_well_id power_well_id)
153 {
154 struct i915_power_well *power_well;
155
156 power_well = lookup_power_well(dev_priv, power_well_id);
157
158 return intel_power_well_is_enabled(dev_priv, power_well);
159 }
160
intel_power_well_is_always_on(struct i915_power_well * power_well)161 bool intel_power_well_is_always_on(struct i915_power_well *power_well)
162 {
163 return power_well->desc->always_on;
164 }
165
intel_power_well_name(struct i915_power_well * power_well)166 const char *intel_power_well_name(struct i915_power_well *power_well)
167 {
168 return i915_power_well_instance(power_well)->name;
169 }
170
intel_power_well_domains(struct i915_power_well * power_well)171 struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
172 {
173 return &power_well->domains;
174 }
175
intel_power_well_refcount(struct i915_power_well * power_well)176 int intel_power_well_refcount(struct i915_power_well *power_well)
177 {
178 return power_well->count;
179 }
180
181 /*
182 * Starting with Haswell, we have a "Power Down Well" that can be turned off
183 * when not needed anymore. We have 4 registers that can request the power well
184 * to be enabled, and it will only be disabled if none of the registers is
185 * requesting it to be enabled.
186 */
hsw_power_well_post_enable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask,bool has_vga)187 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
188 u8 irq_pipe_mask, bool has_vga)
189 {
190 if (has_vga)
191 intel_vga_reset_io_mem(dev_priv);
192
193 if (irq_pipe_mask)
194 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
195 }
196
hsw_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask)197 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
198 u8 irq_pipe_mask)
199 {
200 if (irq_pipe_mask)
201 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
202 }
203
204 #define ICL_AUX_PW_TO_PHY(pw_idx) \
205 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + PHY_A)
206
207 #define ICL_AUX_PW_TO_CH(pw_idx) \
208 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
209
210 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
211 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
212
icl_aux_pw_to_ch(const struct i915_power_well * power_well)213 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
214 {
215 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
216
217 return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
218 ICL_AUX_PW_TO_CH(pw_idx);
219 }
220
221 static struct intel_digital_port *
aux_ch_to_digital_port(struct drm_i915_private * dev_priv,enum aux_ch aux_ch)222 aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
223 enum aux_ch aux_ch)
224 {
225 struct intel_encoder *encoder;
226
227 for_each_intel_encoder(&dev_priv->drm, encoder) {
228 struct intel_digital_port *dig_port;
229
230 /* We'll check the MST primary port */
231 if (encoder->type == INTEL_OUTPUT_DP_MST)
232 continue;
233
234 dig_port = enc_to_dig_port(encoder);
235
236 if (dig_port && dig_port->aux_ch == aux_ch)
237 return dig_port;
238 }
239
240 return NULL;
241 }
242
icl_aux_pw_to_phy(struct drm_i915_private * i915,const struct i915_power_well * power_well)243 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
244 const struct i915_power_well *power_well)
245 {
246 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
247 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
248
249 /*
250 * FIXME should we care about the (VBT defined) dig_port->aux_ch
251 * relationship or should this be purely defined by the hardware layout?
252 * Currently if the port doesn't appear in the VBT, or if it's declared
253 * as HDMI-only and routed to a combo PHY, the encoder either won't be
254 * present at all or it will not have an aux_ch assigned.
255 */
256 return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
257 }
258
hsw_wait_for_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool timeout_expected)259 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
260 struct i915_power_well *power_well,
261 bool timeout_expected)
262 {
263 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
264 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
265 int timeout = power_well->desc->enable_timeout ? : 1;
266
267 /*
268 * For some power wells we're not supposed to watch the status bit for
269 * an ack, but rather just wait a fixed amount of time and then
270 * proceed. This is only used on DG2.
271 */
272 if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
273 usleep_range(600, 1200);
274 return;
275 }
276
277 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
278 if (intel_de_wait_for_set(dev_priv, regs->driver,
279 HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
280 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
281 intel_power_well_name(power_well));
282
283 drm_WARN_ON(&dev_priv->drm, !timeout_expected);
284
285 }
286 }
287
hsw_power_well_requesters(struct drm_i915_private * dev_priv,const struct i915_power_well_regs * regs,int pw_idx)288 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
289 const struct i915_power_well_regs *regs,
290 int pw_idx)
291 {
292 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
293 u32 ret;
294
295 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
296 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
297 if (regs->kvmr.reg)
298 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
299 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
300
301 return ret;
302 }
303
hsw_wait_for_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)304 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
305 struct i915_power_well *power_well)
306 {
307 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
308 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
309 bool disabled;
310 u32 reqs;
311
312 /*
313 * Bspec doesn't require waiting for PWs to get disabled, but still do
314 * this for paranoia. The known cases where a PW will be forced on:
315 * - a KVMR request on any power well via the KVMR request register
316 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
317 * DEBUG request registers
318 * Skip the wait in case any of the request bits are set and print a
319 * diagnostic message.
320 */
321 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
322 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
323 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
324 if (disabled)
325 return;
326
327 drm_dbg_kms(&dev_priv->drm,
328 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
329 intel_power_well_name(power_well),
330 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
331 }
332
gen9_wait_for_power_well_fuses(struct drm_i915_private * dev_priv,enum skl_power_gate pg)333 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
334 enum skl_power_gate pg)
335 {
336 /* Timeout 5us for PG#0, for other PGs 1us */
337 drm_WARN_ON(&dev_priv->drm,
338 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
339 SKL_FUSE_PG_DIST_STATUS(pg), 1));
340 }
341
hsw_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)342 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
343 struct i915_power_well *power_well)
344 {
345 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
346 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
347
348 if (power_well->desc->has_fuses) {
349 enum skl_power_gate pg;
350
351 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
352 SKL_PW_CTL_IDX_TO_PG(pw_idx);
353
354 /* Wa_16013190616:adlp */
355 if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
356 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
357
358 /*
359 * For PW1 we have to wait both for the PW0/PG0 fuse state
360 * before enabling the power well and PW1/PG1's own fuse
361 * state after the enabling. For all other power wells with
362 * fuses we only have to wait for that PW/PG's fuse state
363 * after the enabling.
364 */
365 if (pg == SKL_PG1)
366 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
367 }
368
369 intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
370
371 hsw_wait_for_power_well_enable(dev_priv, power_well, false);
372
373 if (power_well->desc->has_fuses) {
374 enum skl_power_gate pg;
375
376 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
377 SKL_PW_CTL_IDX_TO_PG(pw_idx);
378 gen9_wait_for_power_well_fuses(dev_priv, pg);
379 }
380
381 hsw_power_well_post_enable(dev_priv,
382 power_well->desc->irq_pipe_mask,
383 power_well->desc->has_vga);
384 }
385
hsw_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)386 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
387 struct i915_power_well *power_well)
388 {
389 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
390 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
391
392 hsw_power_well_pre_disable(dev_priv,
393 power_well->desc->irq_pipe_mask);
394
395 intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
396 hsw_wait_for_power_well_disable(dev_priv, power_well);
397 }
398
intel_aux_ch_is_edp(struct drm_i915_private * i915,enum aux_ch aux_ch)399 static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch)
400 {
401 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
402
403 return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
404 }
405
406 static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)407 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
408 struct i915_power_well *power_well)
409 {
410 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
411 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
412
413 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
414
415 intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
416
417 /*
418 * FIXME not sure if we should derive the PHY from the pw_idx, or
419 * from the VBT defined AUX_CH->DDI->PHY mapping.
420 */
421 intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
422 0, ICL_LANE_ENABLE_AUX);
423
424 hsw_wait_for_power_well_enable(dev_priv, power_well, false);
425
426 /* Display WA #1178: icl */
427 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
428 !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx)))
429 intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
430 0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
431 }
432
433 static void
icl_combo_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)434 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
435 struct i915_power_well *power_well)
436 {
437 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
438 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
439
440 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
441
442 /*
443 * FIXME not sure if we should derive the PHY from the pw_idx, or
444 * from the VBT defined AUX_CH->DDI->PHY mapping.
445 */
446 intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
447 ICL_LANE_ENABLE_AUX, 0);
448
449 intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
450
451 hsw_wait_for_power_well_disable(dev_priv, power_well);
452 }
453
454 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
455
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,struct intel_digital_port * dig_port)456 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
457 struct i915_power_well *power_well,
458 struct intel_digital_port *dig_port)
459 {
460 if (drm_WARN_ON(&dev_priv->drm, !dig_port))
461 return;
462
463 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
464 return;
465
466 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
467 }
468
469 #else
470
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,struct intel_digital_port * dig_port)471 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
472 struct i915_power_well *power_well,
473 struct intel_digital_port *dig_port)
474 {
475 }
476
477 #endif
478
479 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
480
icl_tc_cold_exit(struct drm_i915_private * i915)481 static void icl_tc_cold_exit(struct drm_i915_private *i915)
482 {
483 int ret, tries = 0;
484
485 while (1) {
486 ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
487 250, 1);
488 if (ret != -EAGAIN || ++tries == 3)
489 break;
490 msleep(1);
491 }
492
493 /* Spec states that TC cold exit can take up to 1ms to complete */
494 if (!ret)
495 msleep(1);
496
497 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
498 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
499 "succeeded");
500 }
501
502 static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)503 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
504 struct i915_power_well *power_well)
505 {
506 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
507 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
508 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
509 bool is_tbt = power_well->desc->is_tc_tbt;
510 bool timeout_expected;
511
512 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
513
514 intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
515 DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
516
517 intel_de_rmw(dev_priv, regs->driver,
518 0,
519 HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
520
521 /*
522 * An AUX timeout is expected if the TBT DP tunnel is down,
523 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
524 * exit sequence.
525 */
526 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
527 if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
528 icl_tc_cold_exit(dev_priv);
529
530 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
531
532 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
533 enum tc_port tc_port;
534
535 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
536
537 if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) &
538 DKL_CMN_UC_DW27_UC_HEALTH, 1))
539 drm_warn(&dev_priv->drm,
540 "Timeout waiting TC uC health\n");
541 }
542 }
543
544 static void
icl_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)545 icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
546 struct i915_power_well *power_well)
547 {
548 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
549
550 if (intel_phy_is_tc(dev_priv, phy))
551 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
552 else if (IS_ICELAKE(dev_priv))
553 return icl_combo_phy_aux_power_well_enable(dev_priv,
554 power_well);
555 else
556 return hsw_power_well_enable(dev_priv, power_well);
557 }
558
559 static void
icl_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)560 icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
561 struct i915_power_well *power_well)
562 {
563 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
564
565 if (intel_phy_is_tc(dev_priv, phy))
566 return hsw_power_well_disable(dev_priv, power_well);
567 else if (IS_ICELAKE(dev_priv))
568 return icl_combo_phy_aux_power_well_disable(dev_priv,
569 power_well);
570 else
571 return hsw_power_well_disable(dev_priv, power_well);
572 }
573
574 /*
575 * We should only use the power well if we explicitly asked the hardware to
576 * enable it, so check if it's enabled and also check if we've requested it to
577 * be enabled.
578 */
hsw_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)579 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
580 struct i915_power_well *power_well)
581 {
582 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
583 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
584 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
585 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
586 HSW_PWR_WELL_CTL_STATE(pw_idx);
587 u32 val;
588
589 val = intel_de_read(dev_priv, regs->driver);
590
591 /*
592 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
593 * and the MISC_IO PW will be not restored, so check instead for the
594 * BIOS's own request bits, which are forced-on for these power wells
595 * when exiting DC5/6.
596 */
597 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
598 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
599 val |= intel_de_read(dev_priv, regs->bios);
600
601 return (val & mask) == mask;
602 }
603
assert_can_enable_dc9(struct drm_i915_private * dev_priv)604 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
605 {
606 drm_WARN_ONCE(&dev_priv->drm,
607 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
608 "DC9 already programmed to be enabled.\n");
609 drm_WARN_ONCE(&dev_priv->drm,
610 intel_de_read(dev_priv, DC_STATE_EN) &
611 DC_STATE_EN_UPTO_DC5,
612 "DC5 still not disabled to enable DC9.\n");
613 drm_WARN_ONCE(&dev_priv->drm,
614 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
615 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
616 "Power well 2 on.\n");
617 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
618 "Interrupts not disabled yet.\n");
619
620 /*
621 * TODO: check for the following to verify the conditions to enter DC9
622 * state are satisfied:
623 * 1] Check relevant display engine registers to verify if mode set
624 * disable sequence was followed.
625 * 2] Check if display uninitialize sequence is initialized.
626 */
627 }
628
assert_can_disable_dc9(struct drm_i915_private * dev_priv)629 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
630 {
631 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
632 "Interrupts not disabled yet.\n");
633 drm_WARN_ONCE(&dev_priv->drm,
634 intel_de_read(dev_priv, DC_STATE_EN) &
635 DC_STATE_EN_UPTO_DC5,
636 "DC5 still not disabled.\n");
637
638 /*
639 * TODO: check for the following to verify DC9 state was indeed
640 * entered before programming to disable it:
641 * 1] Check relevant display engine registers to verify if mode
642 * set disable sequence was followed.
643 * 2] Check if display uninitialize sequence is initialized.
644 */
645 }
646
gen9_write_dc_state(struct drm_i915_private * dev_priv,u32 state)647 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
648 u32 state)
649 {
650 int rewrites = 0;
651 int rereads = 0;
652 u32 v;
653
654 intel_de_write(dev_priv, DC_STATE_EN, state);
655
656 /* It has been observed that disabling the dc6 state sometimes
657 * doesn't stick and dmc keeps returning old value. Make sure
658 * the write really sticks enough times and also force rewrite until
659 * we are confident that state is exactly what we want.
660 */
661 do {
662 v = intel_de_read(dev_priv, DC_STATE_EN);
663
664 if (v != state) {
665 intel_de_write(dev_priv, DC_STATE_EN, state);
666 rewrites++;
667 rereads = 0;
668 } else if (rereads++ > 5) {
669 break;
670 }
671
672 } while (rewrites < 100);
673
674 if (v != state)
675 drm_err(&dev_priv->drm,
676 "Writing dc state to 0x%x failed, now 0x%x\n",
677 state, v);
678
679 /* Most of the times we need one retry, avoid spam */
680 if (rewrites > 1)
681 drm_dbg_kms(&dev_priv->drm,
682 "Rewrote dc state to 0x%x %d times\n",
683 state, rewrites);
684 }
685
gen9_dc_mask(struct drm_i915_private * dev_priv)686 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
687 {
688 u32 mask;
689
690 mask = DC_STATE_EN_UPTO_DC5;
691
692 if (DISPLAY_VER(dev_priv) >= 12)
693 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
694 | DC_STATE_EN_DC9;
695 else if (DISPLAY_VER(dev_priv) == 11)
696 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
697 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
698 mask |= DC_STATE_EN_DC9;
699 else
700 mask |= DC_STATE_EN_UPTO_DC6;
701
702 return mask;
703 }
704
gen9_sanitize_dc_state(struct drm_i915_private * i915)705 void gen9_sanitize_dc_state(struct drm_i915_private *i915)
706 {
707 struct i915_power_domains *power_domains = &i915->display.power.domains;
708 u32 val;
709
710 if (!HAS_DISPLAY(i915))
711 return;
712
713 val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915);
714
715 drm_dbg_kms(&i915->drm,
716 "Resetting DC state tracking from %02x to %02x\n",
717 power_domains->dc_state, val);
718 power_domains->dc_state = val;
719 }
720
721 /**
722 * gen9_set_dc_state - set target display C power state
723 * @dev_priv: i915 device instance
724 * @state: target DC power state
725 * - DC_STATE_DISABLE
726 * - DC_STATE_EN_UPTO_DC5
727 * - DC_STATE_EN_UPTO_DC6
728 * - DC_STATE_EN_DC9
729 *
730 * Signal to DMC firmware/HW the target DC power state passed in @state.
731 * DMC/HW can turn off individual display clocks and power rails when entering
732 * a deeper DC power state (higher in number) and turns these back when exiting
733 * that state to a shallower power state (lower in number). The HW will decide
734 * when to actually enter a given state on an on-demand basis, for instance
735 * depending on the active state of display pipes. The state of display
736 * registers backed by affected power rails are saved/restored as needed.
737 *
738 * Based on the above enabling a deeper DC power state is asynchronous wrt.
739 * enabling it. Disabling a deeper power state is synchronous: for instance
740 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
741 * back on and register state is restored. This is guaranteed by the MMIO write
742 * to DC_STATE_EN blocking until the state is restored.
743 */
gen9_set_dc_state(struct drm_i915_private * dev_priv,u32 state)744 void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
745 {
746 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
747 u32 val;
748 u32 mask;
749
750 if (!HAS_DISPLAY(dev_priv))
751 return;
752
753 if (drm_WARN_ON_ONCE(&dev_priv->drm,
754 state & ~power_domains->allowed_dc_mask))
755 state &= power_domains->allowed_dc_mask;
756
757 val = intel_de_read(dev_priv, DC_STATE_EN);
758 mask = gen9_dc_mask(dev_priv);
759 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
760 val & mask, state);
761
762 /* Check if DMC is ignoring our DC state requests */
763 if ((val & mask) != power_domains->dc_state)
764 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
765 power_domains->dc_state, val & mask);
766
767 val &= ~mask;
768 val |= state;
769
770 gen9_write_dc_state(dev_priv, val);
771
772 power_domains->dc_state = val & mask;
773 }
774
tgl_enable_dc3co(struct drm_i915_private * dev_priv)775 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
776 {
777 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
778 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
779 }
780
tgl_disable_dc3co(struct drm_i915_private * dev_priv)781 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
782 {
783 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
784 intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
785 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
786 /*
787 * Delay of 200us DC3CO Exit time B.Spec 49196
788 */
789 usleep_range(200, 210);
790 }
791
assert_can_enable_dc5(struct drm_i915_private * dev_priv)792 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
793 {
794 enum i915_power_well_id high_pg;
795
796 /* Power wells at this level and above must be disabled for DC5 entry */
797 if (DISPLAY_VER(dev_priv) == 12)
798 high_pg = ICL_DISP_PW_3;
799 else
800 high_pg = SKL_DISP_PW_2;
801
802 drm_WARN_ONCE(&dev_priv->drm,
803 intel_display_power_well_is_enabled(dev_priv, high_pg),
804 "Power wells above platform's DC5 limit still enabled.\n");
805
806 drm_WARN_ONCE(&dev_priv->drm,
807 (intel_de_read(dev_priv, DC_STATE_EN) &
808 DC_STATE_EN_UPTO_DC5),
809 "DC5 already programmed to be enabled.\n");
810 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
811
812 assert_dmc_loaded(dev_priv);
813 }
814
gen9_enable_dc5(struct drm_i915_private * dev_priv)815 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
816 {
817 assert_can_enable_dc5(dev_priv);
818
819 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
820
821 /* Wa Display #1183: skl,kbl,cfl */
822 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
823 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
824 0, SKL_SELECT_ALTERNATE_DC_EXIT);
825
826 intel_dmc_wl_enable(&dev_priv->display);
827
828 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
829 }
830
assert_can_enable_dc6(struct drm_i915_private * dev_priv)831 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
832 {
833 drm_WARN_ONCE(&dev_priv->drm,
834 (intel_de_read(dev_priv, UTIL_PIN_CTL) &
835 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) ==
836 (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
837 "Utility pin enabled in PWM mode\n");
838 drm_WARN_ONCE(&dev_priv->drm,
839 (intel_de_read(dev_priv, DC_STATE_EN) &
840 DC_STATE_EN_UPTO_DC6),
841 "DC6 already programmed to be enabled.\n");
842
843 assert_dmc_loaded(dev_priv);
844 }
845
skl_enable_dc6(struct drm_i915_private * dev_priv)846 void skl_enable_dc6(struct drm_i915_private *dev_priv)
847 {
848 assert_can_enable_dc6(dev_priv);
849
850 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
851
852 /* Wa Display #1183: skl,kbl,cfl */
853 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
854 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
855 0, SKL_SELECT_ALTERNATE_DC_EXIT);
856
857 intel_dmc_wl_enable(&dev_priv->display);
858
859 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
860 }
861
bxt_enable_dc9(struct drm_i915_private * dev_priv)862 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
863 {
864 struct intel_display *display = &dev_priv->display;
865
866 assert_can_enable_dc9(dev_priv);
867
868 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
869 /*
870 * Power sequencer reset is not needed on
871 * platforms with South Display Engine on PCH,
872 * because PPS registers are always on.
873 */
874 if (!HAS_PCH_SPLIT(dev_priv))
875 intel_pps_reset_all(display);
876 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
877 }
878
bxt_disable_dc9(struct drm_i915_private * dev_priv)879 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
880 {
881 struct intel_display *display = &dev_priv->display;
882
883 assert_can_disable_dc9(dev_priv);
884
885 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
886
887 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
888
889 intel_pps_unlock_regs_wa(display);
890 }
891
hsw_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)892 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
893 struct i915_power_well *power_well)
894 {
895 const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
896 int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
897 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
898 u32 bios_req = intel_de_read(dev_priv, regs->bios);
899
900 /* Take over the request bit if set by BIOS. */
901 if (bios_req & mask) {
902 u32 drv_req = intel_de_read(dev_priv, regs->driver);
903
904 if (!(drv_req & mask))
905 intel_de_write(dev_priv, regs->driver, drv_req | mask);
906 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
907 }
908 }
909
bxt_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)910 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
911 struct i915_power_well *power_well)
912 {
913 bxt_dpio_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
914 }
915
bxt_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)916 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
917 struct i915_power_well *power_well)
918 {
919 bxt_dpio_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
920 }
921
bxt_dpio_cmn_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)922 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
923 struct i915_power_well *power_well)
924 {
925 return bxt_dpio_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
926 }
927
bxt_verify_dpio_phy_power_wells(struct drm_i915_private * dev_priv)928 static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
929 {
930 struct i915_power_well *power_well;
931
932 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
933 if (intel_power_well_refcount(power_well) > 0)
934 bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
935
936 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
937 if (intel_power_well_refcount(power_well) > 0)
938 bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
939
940 if (IS_GEMINILAKE(dev_priv)) {
941 power_well = lookup_power_well(dev_priv,
942 GLK_DISP_PW_DPIO_CMN_C);
943 if (intel_power_well_refcount(power_well) > 0)
944 bxt_dpio_phy_verify_state(dev_priv,
945 i915_power_well_instance(power_well)->bxt.phy);
946 }
947 }
948
gen9_dc_off_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)949 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
950 struct i915_power_well *power_well)
951 {
952 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
953 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
954 }
955
gen9_assert_dbuf_enabled(struct drm_i915_private * dev_priv)956 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
957 {
958 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
959 u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
960
961 drm_WARN(&dev_priv->drm,
962 hw_enabled_dbuf_slices != enabled_dbuf_slices,
963 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
964 hw_enabled_dbuf_slices,
965 enabled_dbuf_slices);
966 }
967
gen9_disable_dc_states(struct drm_i915_private * dev_priv)968 void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
969 {
970 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
971 struct intel_cdclk_config cdclk_config = {};
972
973 if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
974 tgl_disable_dc3co(dev_priv);
975 return;
976 }
977
978 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
979
980 if (!HAS_DISPLAY(dev_priv))
981 return;
982
983 intel_dmc_wl_disable(&dev_priv->display);
984
985 intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
986 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
987 drm_WARN_ON(&dev_priv->drm,
988 intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw,
989 &cdclk_config));
990
991 gen9_assert_dbuf_enabled(dev_priv);
992
993 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
994 bxt_verify_dpio_phy_power_wells(dev_priv);
995
996 if (DISPLAY_VER(dev_priv) >= 11)
997 /*
998 * DMC retains HW context only for port A, the other combo
999 * PHY's HW context for port B is lost after DC transitions,
1000 * so we need to restore it manually.
1001 */
1002 intel_combo_phy_init(dev_priv);
1003 }
1004
gen9_dc_off_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1005 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1006 struct i915_power_well *power_well)
1007 {
1008 gen9_disable_dc_states(dev_priv);
1009 }
1010
gen9_dc_off_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1011 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1012 struct i915_power_well *power_well)
1013 {
1014 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1015
1016 if (!intel_dmc_has_payload(dev_priv))
1017 return;
1018
1019 switch (power_domains->target_dc_state) {
1020 case DC_STATE_EN_DC3CO:
1021 tgl_enable_dc3co(dev_priv);
1022 break;
1023 case DC_STATE_EN_UPTO_DC6:
1024 skl_enable_dc6(dev_priv);
1025 break;
1026 case DC_STATE_EN_UPTO_DC5:
1027 gen9_enable_dc5(dev_priv);
1028 break;
1029 }
1030 }
1031
i9xx_power_well_sync_hw_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1032 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1033 struct i915_power_well *power_well)
1034 {
1035 }
1036
i9xx_always_on_power_well_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1037 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1038 struct i915_power_well *power_well)
1039 {
1040 }
1041
i9xx_always_on_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1042 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1043 struct i915_power_well *power_well)
1044 {
1045 return true;
1046 }
1047
i830_pipes_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1048 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1049 struct i915_power_well *power_well)
1050 {
1051 if ((intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE) == 0)
1052 i830_enable_pipe(dev_priv, PIPE_A);
1053 if ((intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE) == 0)
1054 i830_enable_pipe(dev_priv, PIPE_B);
1055 }
1056
i830_pipes_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1057 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1058 struct i915_power_well *power_well)
1059 {
1060 i830_disable_pipe(dev_priv, PIPE_B);
1061 i830_disable_pipe(dev_priv, PIPE_A);
1062 }
1063
i830_pipes_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1064 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1065 struct i915_power_well *power_well)
1066 {
1067 return intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE &&
1068 intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
1069 }
1070
i830_pipes_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1071 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1072 struct i915_power_well *power_well)
1073 {
1074 if (intel_power_well_refcount(power_well) > 0)
1075 i830_pipes_power_well_enable(dev_priv, power_well);
1076 else
1077 i830_pipes_power_well_disable(dev_priv, power_well);
1078 }
1079
vlv_set_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1080 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1081 struct i915_power_well *power_well, bool enable)
1082 {
1083 int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
1084 u32 mask;
1085 u32 state;
1086 u32 ctrl;
1087
1088 mask = PUNIT_PWRGT_MASK(pw_idx);
1089 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1090 PUNIT_PWRGT_PWR_GATE(pw_idx);
1091
1092 vlv_punit_get(dev_priv);
1093
1094 #define COND \
1095 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1096
1097 if (COND)
1098 goto out;
1099
1100 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1101 ctrl &= ~mask;
1102 ctrl |= state;
1103 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1104
1105 if (wait_for(COND, 100))
1106 drm_err(&dev_priv->drm,
1107 "timeout setting power well state %08x (%08x)\n",
1108 state,
1109 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1110
1111 #undef COND
1112
1113 out:
1114 vlv_punit_put(dev_priv);
1115 }
1116
vlv_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1117 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1118 struct i915_power_well *power_well)
1119 {
1120 vlv_set_power_well(dev_priv, power_well, true);
1121 }
1122
vlv_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1123 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1124 struct i915_power_well *power_well)
1125 {
1126 vlv_set_power_well(dev_priv, power_well, false);
1127 }
1128
vlv_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1129 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1130 struct i915_power_well *power_well)
1131 {
1132 int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
1133 bool enabled = false;
1134 u32 mask;
1135 u32 state;
1136 u32 ctrl;
1137
1138 mask = PUNIT_PWRGT_MASK(pw_idx);
1139 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1140
1141 vlv_punit_get(dev_priv);
1142
1143 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1144 /*
1145 * We only ever set the power-on and power-gate states, anything
1146 * else is unexpected.
1147 */
1148 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1149 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1150 if (state == ctrl)
1151 enabled = true;
1152
1153 /*
1154 * A transient state at this point would mean some unexpected party
1155 * is poking at the power controls too.
1156 */
1157 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1158 drm_WARN_ON(&dev_priv->drm, ctrl != state);
1159
1160 vlv_punit_put(dev_priv);
1161
1162 return enabled;
1163 }
1164
vlv_init_display_clock_gating(struct drm_i915_private * dev_priv)1165 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1166 {
1167 /*
1168 * On driver load, a pipe may be active and driving a DSI display.
1169 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1170 * (and never recovering) in this case. intel_dsi_post_disable() will
1171 * clear it when we turn off the display.
1172 */
1173 intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
1174 ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
1175
1176 /*
1177 * Disable trickle feed and enable pnd deadline calculation
1178 */
1179 intel_de_write(dev_priv, MI_ARB_VLV,
1180 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1181 intel_de_write(dev_priv, CBR1_VLV, 0);
1182
1183 drm_WARN_ON(&dev_priv->drm, DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1184 intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1185 DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq,
1186 1000));
1187 }
1188
vlv_display_power_well_init(struct drm_i915_private * dev_priv)1189 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1190 {
1191 struct intel_display *display = &dev_priv->display;
1192 struct intel_encoder *encoder;
1193 enum pipe pipe;
1194
1195 /*
1196 * Enable the CRI clock source so we can get at the
1197 * display and the reference clock for VGA
1198 * hotplug / manual detection. Supposedly DSI also
1199 * needs the ref clock up and running.
1200 *
1201 * CHV DPLL B/C have some issues if VGA mode is enabled.
1202 */
1203 for_each_pipe(dev_priv, pipe) {
1204 u32 val = intel_de_read(dev_priv, DPLL(dev_priv, pipe));
1205
1206 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1207 if (pipe != PIPE_A)
1208 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1209
1210 intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
1211 }
1212
1213 vlv_init_display_clock_gating(dev_priv);
1214
1215 spin_lock_irq(&dev_priv->irq_lock);
1216 valleyview_enable_display_irqs(dev_priv);
1217 spin_unlock_irq(&dev_priv->irq_lock);
1218
1219 /*
1220 * During driver initialization/resume we can avoid restoring the
1221 * part of the HW/SW state that will be inited anyway explicitly.
1222 */
1223 if (dev_priv->display.power.domains.initializing)
1224 return;
1225
1226 intel_hpd_init(dev_priv);
1227 intel_hpd_poll_disable(dev_priv);
1228
1229 /* Re-enable the ADPA, if we have one */
1230 for_each_intel_encoder(&dev_priv->drm, encoder) {
1231 if (encoder->type == INTEL_OUTPUT_ANALOG)
1232 intel_crt_reset(&encoder->base);
1233 }
1234
1235 intel_vga_redisable_power_on(dev_priv);
1236
1237 intel_pps_unlock_regs_wa(display);
1238 }
1239
vlv_display_power_well_deinit(struct drm_i915_private * dev_priv)1240 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1241 {
1242 struct intel_display *display = &dev_priv->display;
1243
1244 spin_lock_irq(&dev_priv->irq_lock);
1245 valleyview_disable_display_irqs(dev_priv);
1246 spin_unlock_irq(&dev_priv->irq_lock);
1247
1248 /* make sure we're done processing display irqs */
1249 intel_synchronize_irq(dev_priv);
1250
1251 intel_pps_reset_all(display);
1252
1253 /* Prevent us from re-enabling polling on accident in late suspend */
1254 if (!dev_priv->drm.dev->power.is_suspended)
1255 intel_hpd_poll_enable(dev_priv);
1256 }
1257
vlv_display_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1258 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1259 struct i915_power_well *power_well)
1260 {
1261 vlv_set_power_well(dev_priv, power_well, true);
1262
1263 vlv_display_power_well_init(dev_priv);
1264 }
1265
vlv_display_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1266 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1267 struct i915_power_well *power_well)
1268 {
1269 vlv_display_power_well_deinit(dev_priv);
1270
1271 vlv_set_power_well(dev_priv, power_well, false);
1272 }
1273
vlv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1274 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1275 struct i915_power_well *power_well)
1276 {
1277 /* since ref/cri clock was enabled */
1278 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1279
1280 vlv_set_power_well(dev_priv, power_well, true);
1281
1282 /*
1283 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1284 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1285 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1286 * b. The other bits such as sfr settings / modesel may all
1287 * be set to 0.
1288 *
1289 * This should only be done on init and resume from S3 with
1290 * both PLLs disabled, or we risk losing DPIO and PLL
1291 * synchronization.
1292 */
1293 intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
1294 }
1295
vlv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1296 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1297 struct i915_power_well *power_well)
1298 {
1299 enum pipe pipe;
1300
1301 for_each_pipe(dev_priv, pipe)
1302 assert_pll_disabled(dev_priv, pipe);
1303
1304 /* Assert common reset */
1305 intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
1306
1307 vlv_set_power_well(dev_priv, power_well, false);
1308 }
1309
1310 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1311
assert_chv_phy_status(struct drm_i915_private * dev_priv)1312 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1313 {
1314 struct i915_power_well *cmn_bc =
1315 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1316 struct i915_power_well *cmn_d =
1317 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1318 u32 phy_control = dev_priv->display.power.chv_phy_control;
1319 u32 phy_status = 0;
1320 u32 phy_status_mask = 0xffffffff;
1321
1322 /*
1323 * The BIOS can leave the PHY is some weird state
1324 * where it doesn't fully power down some parts.
1325 * Disable the asserts until the PHY has been fully
1326 * reset (ie. the power well has been disabled at
1327 * least once).
1328 */
1329 if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0])
1330 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1331 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1332 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1333 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1334 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1335 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1336
1337 if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1])
1338 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1339 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1340 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1341
1342 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
1343 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1344
1345 /* this assumes override is only used to enable lanes */
1346 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1347 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1348
1349 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1350 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1351
1352 /* CL1 is on whenever anything is on in either channel */
1353 if (BITS_SET(phy_control,
1354 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1355 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1356 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1357
1358 /*
1359 * The DPLLB check accounts for the pipe B + port A usage
1360 * with CL2 powered up but all the lanes in the second channel
1361 * powered down.
1362 */
1363 if (BITS_SET(phy_control,
1364 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1365 (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1366 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1367
1368 if (BITS_SET(phy_control,
1369 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1370 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1371 if (BITS_SET(phy_control,
1372 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1373 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1374
1375 if (BITS_SET(phy_control,
1376 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1377 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1378 if (BITS_SET(phy_control,
1379 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1380 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1381 }
1382
1383 if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
1384 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1385
1386 /* this assumes override is only used to enable lanes */
1387 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1388 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1389
1390 if (BITS_SET(phy_control,
1391 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1392 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1393
1394 if (BITS_SET(phy_control,
1395 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1396 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1397 if (BITS_SET(phy_control,
1398 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1399 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1400 }
1401
1402 phy_status &= phy_status_mask;
1403
1404 /*
1405 * The PHY may be busy with some initial calibration and whatnot,
1406 * so the power state can take a while to actually change.
1407 */
1408 if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS,
1409 phy_status_mask, phy_status, 10))
1410 drm_err(&dev_priv->drm,
1411 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1412 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1413 phy_status, dev_priv->display.power.chv_phy_control);
1414 }
1415
1416 #undef BITS_SET
1417
chv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1418 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1419 struct i915_power_well *power_well)
1420 {
1421 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
1422 enum dpio_phy phy;
1423 u32 tmp;
1424
1425 drm_WARN_ON_ONCE(&dev_priv->drm,
1426 id != VLV_DISP_PW_DPIO_CMN_BC &&
1427 id != CHV_DISP_PW_DPIO_CMN_D);
1428
1429 if (id == VLV_DISP_PW_DPIO_CMN_BC)
1430 phy = DPIO_PHY0;
1431 else
1432 phy = DPIO_PHY1;
1433
1434 /* since ref/cri clock was enabled */
1435 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1436 vlv_set_power_well(dev_priv, power_well, true);
1437
1438 /* Poll for phypwrgood signal */
1439 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1440 PHY_POWERGOOD(phy), 1))
1441 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1442 phy);
1443
1444 vlv_dpio_get(dev_priv);
1445
1446 /* Enable dynamic power down */
1447 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
1448 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1449 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1450 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
1451
1452 if (id == VLV_DISP_PW_DPIO_CMN_BC) {
1453 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW6_CH1);
1454 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1455 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW6_CH1, tmp);
1456 } else {
1457 /*
1458 * Force the non-existing CL2 off. BXT does this
1459 * too, so maybe it saves some power even though
1460 * CL2 doesn't exist?
1461 */
1462 tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
1463 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1464 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
1465 }
1466
1467 vlv_dpio_put(dev_priv);
1468
1469 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1470 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1471 dev_priv->display.power.chv_phy_control);
1472
1473 drm_dbg_kms(&dev_priv->drm,
1474 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1475 phy, dev_priv->display.power.chv_phy_control);
1476
1477 assert_chv_phy_status(dev_priv);
1478 }
1479
chv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1480 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1481 struct i915_power_well *power_well)
1482 {
1483 enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
1484 enum dpio_phy phy;
1485
1486 drm_WARN_ON_ONCE(&dev_priv->drm,
1487 id != VLV_DISP_PW_DPIO_CMN_BC &&
1488 id != CHV_DISP_PW_DPIO_CMN_D);
1489
1490 if (id == VLV_DISP_PW_DPIO_CMN_BC) {
1491 phy = DPIO_PHY0;
1492 assert_pll_disabled(dev_priv, PIPE_A);
1493 assert_pll_disabled(dev_priv, PIPE_B);
1494 } else {
1495 phy = DPIO_PHY1;
1496 assert_pll_disabled(dev_priv, PIPE_C);
1497 }
1498
1499 dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1500 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1501 dev_priv->display.power.chv_phy_control);
1502
1503 vlv_set_power_well(dev_priv, power_well, false);
1504
1505 drm_dbg_kms(&dev_priv->drm,
1506 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1507 phy, dev_priv->display.power.chv_phy_control);
1508
1509 /* PHY is fully reset now, so we can enable the PHY state asserts */
1510 dev_priv->display.power.chv_phy_assert[phy] = true;
1511
1512 assert_chv_phy_status(dev_priv);
1513 }
1514
assert_chv_phy_powergate(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override,unsigned int mask)1515 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1516 enum dpio_channel ch, bool override, unsigned int mask)
1517 {
1518 u32 reg, val, expected, actual;
1519
1520 /*
1521 * The BIOS can leave the PHY is some weird state
1522 * where it doesn't fully power down some parts.
1523 * Disable the asserts until the PHY has been fully
1524 * reset (ie. the power well has been disabled at
1525 * least once).
1526 */
1527 if (!dev_priv->display.power.chv_phy_assert[phy])
1528 return;
1529
1530 if (ch == DPIO_CH0)
1531 reg = CHV_CMN_DW0_CH0;
1532 else
1533 reg = CHV_CMN_DW6_CH1;
1534
1535 vlv_dpio_get(dev_priv);
1536 val = vlv_dpio_read(dev_priv, phy, reg);
1537 vlv_dpio_put(dev_priv);
1538
1539 /*
1540 * This assumes !override is only used when the port is disabled.
1541 * All lanes should power down even without the override when
1542 * the port is disabled.
1543 */
1544 if (!override || mask == 0xf) {
1545 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1546 /*
1547 * If CH1 common lane is not active anymore
1548 * (eg. for pipe B DPLL) the entire channel will
1549 * shut down, which causes the common lane registers
1550 * to read as 0. That means we can't actually check
1551 * the lane power down status bits, but as the entire
1552 * register reads as 0 it's a good indication that the
1553 * channel is indeed entirely powered down.
1554 */
1555 if (ch == DPIO_CH1 && val == 0)
1556 expected = 0;
1557 } else if (mask != 0x0) {
1558 expected = DPIO_ANYDL_POWERDOWN;
1559 } else {
1560 expected = 0;
1561 }
1562
1563 if (ch == DPIO_CH0)
1564 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH0 |
1565 DPIO_ALLDL_POWERDOWN_CH0, val);
1566 else
1567 actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
1568 DPIO_ALLDL_POWERDOWN_CH1, val);
1569
1570 drm_WARN(&dev_priv->drm, actual != expected,
1571 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1572 !!(actual & DPIO_ALLDL_POWERDOWN),
1573 !!(actual & DPIO_ANYDL_POWERDOWN),
1574 !!(expected & DPIO_ALLDL_POWERDOWN),
1575 !!(expected & DPIO_ANYDL_POWERDOWN),
1576 reg, val);
1577 }
1578
chv_phy_powergate_ch(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override)1579 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1580 enum dpio_channel ch, bool override)
1581 {
1582 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1583 bool was_override;
1584
1585 mutex_lock(&power_domains->lock);
1586
1587 was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1588
1589 if (override == was_override)
1590 goto out;
1591
1592 if (override)
1593 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1594 else
1595 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1596
1597 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1598 dev_priv->display.power.chv_phy_control);
1599
1600 drm_dbg_kms(&dev_priv->drm,
1601 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1602 phy, ch, dev_priv->display.power.chv_phy_control);
1603
1604 assert_chv_phy_status(dev_priv);
1605
1606 out:
1607 mutex_unlock(&power_domains->lock);
1608
1609 return was_override;
1610 }
1611
chv_phy_powergate_lanes(struct intel_encoder * encoder,bool override,unsigned int mask)1612 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1613 bool override, unsigned int mask)
1614 {
1615 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1616 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1617 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1618 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1619
1620 mutex_lock(&power_domains->lock);
1621
1622 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1623 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1624
1625 if (override)
1626 dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1627 else
1628 dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1629
1630 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1631 dev_priv->display.power.chv_phy_control);
1632
1633 drm_dbg_kms(&dev_priv->drm,
1634 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1635 phy, ch, mask, dev_priv->display.power.chv_phy_control);
1636
1637 assert_chv_phy_status(dev_priv);
1638
1639 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1640
1641 mutex_unlock(&power_domains->lock);
1642 }
1643
chv_pipe_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1644 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1645 struct i915_power_well *power_well)
1646 {
1647 enum pipe pipe = PIPE_A;
1648 bool enabled;
1649 u32 state, ctrl;
1650
1651 vlv_punit_get(dev_priv);
1652
1653 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1654 /*
1655 * We only ever set the power-on and power-gate states, anything
1656 * else is unexpected.
1657 */
1658 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1659 state != DP_SSS_PWR_GATE(pipe));
1660 enabled = state == DP_SSS_PWR_ON(pipe);
1661
1662 /*
1663 * A transient state at this point would mean some unexpected party
1664 * is poking at the power controls too.
1665 */
1666 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1667 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1668
1669 vlv_punit_put(dev_priv);
1670
1671 return enabled;
1672 }
1673
chv_set_pipe_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1674 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1675 struct i915_power_well *power_well,
1676 bool enable)
1677 {
1678 enum pipe pipe = PIPE_A;
1679 u32 state;
1680 u32 ctrl;
1681
1682 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1683
1684 vlv_punit_get(dev_priv);
1685
1686 #define COND \
1687 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1688
1689 if (COND)
1690 goto out;
1691
1692 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1693 ctrl &= ~DP_SSC_MASK(pipe);
1694 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1695 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1696
1697 if (wait_for(COND, 100))
1698 drm_err(&dev_priv->drm,
1699 "timeout setting power well state %08x (%08x)\n",
1700 state,
1701 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1702
1703 #undef COND
1704
1705 out:
1706 vlv_punit_put(dev_priv);
1707 }
1708
chv_pipe_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1709 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1710 struct i915_power_well *power_well)
1711 {
1712 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1713 dev_priv->display.power.chv_phy_control);
1714 }
1715
chv_pipe_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1716 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1717 struct i915_power_well *power_well)
1718 {
1719 chv_set_pipe_power_well(dev_priv, power_well, true);
1720
1721 vlv_display_power_well_init(dev_priv);
1722 }
1723
chv_pipe_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1724 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1725 struct i915_power_well *power_well)
1726 {
1727 vlv_display_power_well_deinit(dev_priv);
1728
1729 chv_set_pipe_power_well(dev_priv, power_well, false);
1730 }
1731
1732 static void
tgl_tc_cold_request(struct drm_i915_private * i915,bool block)1733 tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
1734 {
1735 u8 tries = 0;
1736 int ret;
1737
1738 while (1) {
1739 u32 low_val;
1740 u32 high_val = 0;
1741
1742 if (block)
1743 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
1744 else
1745 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
1746
1747 /*
1748 * Spec states that we should timeout the request after 200us
1749 * but the function below will timeout after 500us
1750 */
1751 ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
1752 if (ret == 0) {
1753 if (block &&
1754 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
1755 ret = -EIO;
1756 else
1757 break;
1758 }
1759
1760 if (++tries == 3)
1761 break;
1762
1763 msleep(1);
1764 }
1765
1766 if (ret)
1767 drm_err(&i915->drm, "TC cold %sblock failed\n",
1768 block ? "" : "un");
1769 else
1770 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
1771 block ? "" : "un");
1772 }
1773
1774 static void
tgl_tc_cold_off_power_well_enable(struct drm_i915_private * i915,struct i915_power_well * power_well)1775 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
1776 struct i915_power_well *power_well)
1777 {
1778 tgl_tc_cold_request(i915, true);
1779 }
1780
1781 static void
tgl_tc_cold_off_power_well_disable(struct drm_i915_private * i915,struct i915_power_well * power_well)1782 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
1783 struct i915_power_well *power_well)
1784 {
1785 tgl_tc_cold_request(i915, false);
1786 }
1787
1788 static void
tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private * i915,struct i915_power_well * power_well)1789 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
1790 struct i915_power_well *power_well)
1791 {
1792 if (intel_power_well_refcount(power_well) > 0)
1793 tgl_tc_cold_off_power_well_enable(i915, power_well);
1794 else
1795 tgl_tc_cold_off_power_well_disable(i915, power_well);
1796 }
1797
1798 static bool
tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1799 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
1800 struct i915_power_well *power_well)
1801 {
1802 /*
1803 * Not the correctly implementation but there is no way to just read it
1804 * from PCODE, so returning count to avoid state mismatch errors
1805 */
1806 return intel_power_well_refcount(power_well);
1807 }
1808
xelpdp_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1809 static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
1810 struct i915_power_well *power_well)
1811 {
1812 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1813 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
1814
1815 if (intel_phy_is_tc(dev_priv, phy))
1816 icl_tc_port_assert_ref_held(dev_priv, power_well,
1817 aux_ch_to_digital_port(dev_priv, aux_ch));
1818
1819 intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch),
1820 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
1821 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
1822
1823 /*
1824 * The power status flag cannot be used to determine whether aux
1825 * power wells have finished powering up. Instead we're
1826 * expected to just wait a fixed 600us after raising the request
1827 * bit.
1828 */
1829 usleep_range(600, 1200);
1830 }
1831
xelpdp_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1832 static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
1833 struct i915_power_well *power_well)
1834 {
1835 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1836
1837 intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch),
1838 XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
1839 0);
1840 usleep_range(10, 30);
1841 }
1842
xelpdp_aux_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1843 static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
1844 struct i915_power_well *power_well)
1845 {
1846 enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
1847
1848 return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch)) &
1849 XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
1850 }
1851
xe2lpd_pica_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1852 static void xe2lpd_pica_power_well_enable(struct drm_i915_private *dev_priv,
1853 struct i915_power_well *power_well)
1854 {
1855 intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL,
1856 XE2LPD_PICA_CTL_POWER_REQUEST);
1857
1858 if (intel_de_wait_for_set(dev_priv, XE2LPD_PICA_PW_CTL,
1859 XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
1860 drm_dbg_kms(&dev_priv->drm, "pica power well enable timeout\n");
1861
1862 drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when enabled");
1863 }
1864 }
1865
xe2lpd_pica_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1866 static void xe2lpd_pica_power_well_disable(struct drm_i915_private *dev_priv,
1867 struct i915_power_well *power_well)
1868 {
1869 intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL, 0);
1870
1871 if (intel_de_wait_for_clear(dev_priv, XE2LPD_PICA_PW_CTL,
1872 XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
1873 drm_dbg_kms(&dev_priv->drm, "pica power well disable timeout\n");
1874
1875 drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when disabled");
1876 }
1877 }
1878
xe2lpd_pica_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1879 static bool xe2lpd_pica_power_well_enabled(struct drm_i915_private *dev_priv,
1880 struct i915_power_well *power_well)
1881 {
1882 return intel_de_read(dev_priv, XE2LPD_PICA_PW_CTL) &
1883 XE2LPD_PICA_CTL_POWER_STATUS;
1884 }
1885
1886 const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1887 .sync_hw = i9xx_power_well_sync_hw_noop,
1888 .enable = i9xx_always_on_power_well_noop,
1889 .disable = i9xx_always_on_power_well_noop,
1890 .is_enabled = i9xx_always_on_power_well_enabled,
1891 };
1892
1893 const struct i915_power_well_ops chv_pipe_power_well_ops = {
1894 .sync_hw = chv_pipe_power_well_sync_hw,
1895 .enable = chv_pipe_power_well_enable,
1896 .disable = chv_pipe_power_well_disable,
1897 .is_enabled = chv_pipe_power_well_enabled,
1898 };
1899
1900 const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1901 .sync_hw = i9xx_power_well_sync_hw_noop,
1902 .enable = chv_dpio_cmn_power_well_enable,
1903 .disable = chv_dpio_cmn_power_well_disable,
1904 .is_enabled = vlv_power_well_enabled,
1905 };
1906
1907 const struct i915_power_well_ops i830_pipes_power_well_ops = {
1908 .sync_hw = i830_pipes_power_well_sync_hw,
1909 .enable = i830_pipes_power_well_enable,
1910 .disable = i830_pipes_power_well_disable,
1911 .is_enabled = i830_pipes_power_well_enabled,
1912 };
1913
1914 static const struct i915_power_well_regs hsw_power_well_regs = {
1915 .bios = HSW_PWR_WELL_CTL1,
1916 .driver = HSW_PWR_WELL_CTL2,
1917 .kvmr = HSW_PWR_WELL_CTL3,
1918 .debug = HSW_PWR_WELL_CTL4,
1919 };
1920
1921 const struct i915_power_well_ops hsw_power_well_ops = {
1922 .regs = &hsw_power_well_regs,
1923 .sync_hw = hsw_power_well_sync_hw,
1924 .enable = hsw_power_well_enable,
1925 .disable = hsw_power_well_disable,
1926 .is_enabled = hsw_power_well_enabled,
1927 };
1928
1929 const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1930 .sync_hw = i9xx_power_well_sync_hw_noop,
1931 .enable = gen9_dc_off_power_well_enable,
1932 .disable = gen9_dc_off_power_well_disable,
1933 .is_enabled = gen9_dc_off_power_well_enabled,
1934 };
1935
1936 const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1937 .sync_hw = i9xx_power_well_sync_hw_noop,
1938 .enable = bxt_dpio_cmn_power_well_enable,
1939 .disable = bxt_dpio_cmn_power_well_disable,
1940 .is_enabled = bxt_dpio_cmn_power_well_enabled,
1941 };
1942
1943 const struct i915_power_well_ops vlv_display_power_well_ops = {
1944 .sync_hw = i9xx_power_well_sync_hw_noop,
1945 .enable = vlv_display_power_well_enable,
1946 .disable = vlv_display_power_well_disable,
1947 .is_enabled = vlv_power_well_enabled,
1948 };
1949
1950 const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1951 .sync_hw = i9xx_power_well_sync_hw_noop,
1952 .enable = vlv_dpio_cmn_power_well_enable,
1953 .disable = vlv_dpio_cmn_power_well_disable,
1954 .is_enabled = vlv_power_well_enabled,
1955 };
1956
1957 const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1958 .sync_hw = i9xx_power_well_sync_hw_noop,
1959 .enable = vlv_power_well_enable,
1960 .disable = vlv_power_well_disable,
1961 .is_enabled = vlv_power_well_enabled,
1962 };
1963
1964 static const struct i915_power_well_regs icl_aux_power_well_regs = {
1965 .bios = ICL_PWR_WELL_CTL_AUX1,
1966 .driver = ICL_PWR_WELL_CTL_AUX2,
1967 .debug = ICL_PWR_WELL_CTL_AUX4,
1968 };
1969
1970 const struct i915_power_well_ops icl_aux_power_well_ops = {
1971 .regs = &icl_aux_power_well_regs,
1972 .sync_hw = hsw_power_well_sync_hw,
1973 .enable = icl_aux_power_well_enable,
1974 .disable = icl_aux_power_well_disable,
1975 .is_enabled = hsw_power_well_enabled,
1976 };
1977
1978 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
1979 .bios = ICL_PWR_WELL_CTL_DDI1,
1980 .driver = ICL_PWR_WELL_CTL_DDI2,
1981 .debug = ICL_PWR_WELL_CTL_DDI4,
1982 };
1983
1984 const struct i915_power_well_ops icl_ddi_power_well_ops = {
1985 .regs = &icl_ddi_power_well_regs,
1986 .sync_hw = hsw_power_well_sync_hw,
1987 .enable = hsw_power_well_enable,
1988 .disable = hsw_power_well_disable,
1989 .is_enabled = hsw_power_well_enabled,
1990 };
1991
1992 const struct i915_power_well_ops tgl_tc_cold_off_ops = {
1993 .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
1994 .enable = tgl_tc_cold_off_power_well_enable,
1995 .disable = tgl_tc_cold_off_power_well_disable,
1996 .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
1997 };
1998
1999 const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
2000 .sync_hw = i9xx_power_well_sync_hw_noop,
2001 .enable = xelpdp_aux_power_well_enable,
2002 .disable = xelpdp_aux_power_well_disable,
2003 .is_enabled = xelpdp_aux_power_well_enabled,
2004 };
2005
2006 const struct i915_power_well_ops xe2lpd_pica_power_well_ops = {
2007 .sync_hw = i9xx_power_well_sync_hw_noop,
2008 .enable = xe2lpd_pica_power_well_enable,
2009 .disable = xe2lpd_pica_power_well_disable,
2010 .is_enabled = xe2lpd_pica_power_well_enabled,
2011 };
2012