1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include "g4x_dp.h"
7 #include "i915_drv.h"
8 #include "i915_reg.h"
9 #include "intel_de.h"
10 #include "intel_display_power_well.h"
11 #include "intel_display_types.h"
12 #include "intel_dp.h"
13 #include "intel_dpio_phy.h"
14 #include "intel_dpll.h"
15 #include "intel_lvds.h"
16 #include "intel_lvds_regs.h"
17 #include "intel_pps.h"
18 #include "intel_pps_regs.h"
19 #include "intel_quirks.h"
20
21 static void vlv_steal_power_sequencer(struct intel_display *display,
22 enum pipe pipe);
23
24 static void pps_init_delays(struct intel_dp *intel_dp);
25 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
26
pps_name(struct intel_dp * intel_dp)27 static const char *pps_name(struct intel_dp *intel_dp)
28 {
29 struct intel_display *display = to_intel_display(intel_dp);
30 struct drm_i915_private *i915 = to_i915(display->drm);
31 struct intel_pps *pps = &intel_dp->pps;
32
33 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
34 switch (pps->pps_pipe) {
35 case INVALID_PIPE:
36 /*
37 * FIXME would be nice if we can guarantee
38 * to always have a valid PPS when calling this.
39 */
40 return "PPS <none>";
41 case PIPE_A:
42 return "PPS A";
43 case PIPE_B:
44 return "PPS B";
45 default:
46 MISSING_CASE(pps->pps_pipe);
47 break;
48 }
49 } else {
50 switch (pps->pps_idx) {
51 case 0:
52 return "PPS 0";
53 case 1:
54 return "PPS 1";
55 default:
56 MISSING_CASE(pps->pps_idx);
57 break;
58 }
59 }
60
61 return "PPS <invalid>";
62 }
63
intel_pps_lock(struct intel_dp * intel_dp)64 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
65 {
66 struct intel_display *display = to_intel_display(intel_dp);
67 struct drm_i915_private *dev_priv = to_i915(display->drm);
68 intel_wakeref_t wakeref;
69
70 /*
71 * See intel_pps_reset_all() why we need a power domain reference here.
72 */
73 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
74 mutex_lock(&display->pps.mutex);
75
76 return wakeref;
77 }
78
intel_pps_unlock(struct intel_dp * intel_dp,intel_wakeref_t wakeref)79 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
80 intel_wakeref_t wakeref)
81 {
82 struct intel_display *display = to_intel_display(intel_dp);
83 struct drm_i915_private *dev_priv = to_i915(display->drm);
84
85 mutex_unlock(&display->pps.mutex);
86 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
87
88 return 0;
89 }
90
91 static void
vlv_power_sequencer_kick(struct intel_dp * intel_dp)92 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
93 {
94 struct intel_display *display = to_intel_display(intel_dp);
95 struct drm_i915_private *dev_priv = to_i915(display->drm);
96 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
97 enum pipe pipe = intel_dp->pps.pps_pipe;
98 bool pll_enabled, release_cl_override = false;
99 enum dpio_phy phy = vlv_pipe_to_phy(pipe);
100 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
101 u32 DP;
102
103 if (drm_WARN(display->drm,
104 intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN,
105 "skipping %s kick due to [ENCODER:%d:%s] being active\n",
106 pps_name(intel_dp),
107 dig_port->base.base.base.id, dig_port->base.base.name))
108 return;
109
110 drm_dbg_kms(display->drm,
111 "kicking %s for [ENCODER:%d:%s]\n",
112 pps_name(intel_dp),
113 dig_port->base.base.base.id, dig_port->base.base.name);
114
115 /* Preserve the BIOS-computed detected bit. This is
116 * supposed to be read-only.
117 */
118 DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
119 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
120 DP |= DP_PORT_WIDTH(1);
121 DP |= DP_LINK_TRAIN_PAT_1;
122
123 if (IS_CHERRYVIEW(dev_priv))
124 DP |= DP_PIPE_SEL_CHV(pipe);
125 else
126 DP |= DP_PIPE_SEL(pipe);
127
128 pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
129
130 /*
131 * The DPLL for the pipe must be enabled for this to work.
132 * So enable temporarily it if it's not already enabled.
133 */
134 if (!pll_enabled) {
135 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
136 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
137
138 if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
139 drm_err(display->drm,
140 "Failed to force on PLL for pipe %c!\n",
141 pipe_name(pipe));
142 return;
143 }
144 }
145
146 /*
147 * Similar magic as in intel_dp_enable_port().
148 * We _must_ do this port enable + disable trick
149 * to make this power sequencer lock onto the port.
150 * Otherwise even VDD force bit won't work.
151 */
152 intel_de_write(display, intel_dp->output_reg, DP);
153 intel_de_posting_read(display, intel_dp->output_reg);
154
155 intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN);
156 intel_de_posting_read(display, intel_dp->output_reg);
157
158 intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN);
159 intel_de_posting_read(display, intel_dp->output_reg);
160
161 if (!pll_enabled) {
162 vlv_force_pll_off(dev_priv, pipe);
163
164 if (release_cl_override)
165 chv_phy_powergate_ch(dev_priv, phy, ch, false);
166 }
167 }
168
vlv_find_free_pps(struct intel_display * display)169 static enum pipe vlv_find_free_pps(struct intel_display *display)
170 {
171 struct intel_encoder *encoder;
172 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
173
174 /*
175 * We don't have power sequencer currently.
176 * Pick one that's not used by other ports.
177 */
178 for_each_intel_dp(display->drm, encoder) {
179 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
180
181 if (encoder->type == INTEL_OUTPUT_EDP) {
182 drm_WARN_ON(display->drm,
183 intel_dp->pps.active_pipe != INVALID_PIPE &&
184 intel_dp->pps.active_pipe !=
185 intel_dp->pps.pps_pipe);
186
187 if (intel_dp->pps.pps_pipe != INVALID_PIPE)
188 pipes &= ~(1 << intel_dp->pps.pps_pipe);
189 } else {
190 drm_WARN_ON(display->drm,
191 intel_dp->pps.pps_pipe != INVALID_PIPE);
192
193 if (intel_dp->pps.active_pipe != INVALID_PIPE)
194 pipes &= ~(1 << intel_dp->pps.active_pipe);
195 }
196 }
197
198 if (pipes == 0)
199 return INVALID_PIPE;
200
201 return ffs(pipes) - 1;
202 }
203
204 static enum pipe
vlv_power_sequencer_pipe(struct intel_dp * intel_dp)205 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
206 {
207 struct intel_display *display = to_intel_display(intel_dp);
208 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
209 enum pipe pipe;
210
211 lockdep_assert_held(&display->pps.mutex);
212
213 /* We should never land here with regular DP ports */
214 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
215
216 drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
217 intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
218
219 if (intel_dp->pps.pps_pipe != INVALID_PIPE)
220 return intel_dp->pps.pps_pipe;
221
222 pipe = vlv_find_free_pps(display);
223
224 /*
225 * Didn't find one. This should not happen since there
226 * are two power sequencers and up to two eDP ports.
227 */
228 if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE))
229 pipe = PIPE_A;
230
231 vlv_steal_power_sequencer(display, pipe);
232 intel_dp->pps.pps_pipe = pipe;
233
234 drm_dbg_kms(display->drm,
235 "picked %s for [ENCODER:%d:%s]\n",
236 pps_name(intel_dp),
237 dig_port->base.base.base.id, dig_port->base.base.name);
238
239 /* init power sequencer on this pipe and port */
240 pps_init_delays(intel_dp);
241 pps_init_registers(intel_dp, true);
242
243 /*
244 * Even vdd force doesn't work until we've made
245 * the power sequencer lock in on the port.
246 */
247 vlv_power_sequencer_kick(intel_dp);
248
249 return intel_dp->pps.pps_pipe;
250 }
251
252 static int
bxt_power_sequencer_idx(struct intel_dp * intel_dp)253 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
254 {
255 struct intel_display *display = to_intel_display(intel_dp);
256 int pps_idx = intel_dp->pps.pps_idx;
257
258 lockdep_assert_held(&display->pps.mutex);
259
260 /* We should never land here with regular DP ports */
261 drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
262
263 if (!intel_dp->pps.pps_reset)
264 return pps_idx;
265
266 intel_dp->pps.pps_reset = false;
267
268 /*
269 * Only the HW needs to be reprogrammed, the SW state is fixed and
270 * has been setup during connector init.
271 */
272 pps_init_registers(intel_dp, false);
273
274 return pps_idx;
275 }
276
277 typedef bool (*pps_check)(struct intel_display *display, int pps_idx);
278
pps_has_pp_on(struct intel_display * display,int pps_idx)279 static bool pps_has_pp_on(struct intel_display *display, int pps_idx)
280 {
281 return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON;
282 }
283
pps_has_vdd_on(struct intel_display * display,int pps_idx)284 static bool pps_has_vdd_on(struct intel_display *display, int pps_idx)
285 {
286 return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD;
287 }
288
pps_any(struct intel_display * display,int pps_idx)289 static bool pps_any(struct intel_display *display, int pps_idx)
290 {
291 return true;
292 }
293
294 static enum pipe
vlv_initial_pps_pipe(struct intel_display * display,enum port port,pps_check check)295 vlv_initial_pps_pipe(struct intel_display *display,
296 enum port port, pps_check check)
297 {
298 enum pipe pipe;
299
300 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
301 u32 port_sel = intel_de_read(display,
302 PP_ON_DELAYS(display, pipe)) &
303 PANEL_PORT_SELECT_MASK;
304
305 if (port_sel != PANEL_PORT_SELECT_VLV(port))
306 continue;
307
308 if (!check(display, pipe))
309 continue;
310
311 return pipe;
312 }
313
314 return INVALID_PIPE;
315 }
316
317 static void
vlv_initial_power_sequencer_setup(struct intel_dp * intel_dp)318 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
319 {
320 struct intel_display *display = to_intel_display(intel_dp);
321 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
322 enum port port = dig_port->base.port;
323
324 lockdep_assert_held(&display->pps.mutex);
325
326 /* try to find a pipe with this port selected */
327 /* first pick one where the panel is on */
328 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
329 pps_has_pp_on);
330 /* didn't find one? pick one where vdd is on */
331 if (intel_dp->pps.pps_pipe == INVALID_PIPE)
332 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
333 pps_has_vdd_on);
334 /* didn't find one? pick one with just the correct port */
335 if (intel_dp->pps.pps_pipe == INVALID_PIPE)
336 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
337 pps_any);
338
339 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
340 if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
341 drm_dbg_kms(display->drm,
342 "[ENCODER:%d:%s] no initial power sequencer\n",
343 dig_port->base.base.base.id, dig_port->base.base.name);
344 return;
345 }
346
347 drm_dbg_kms(display->drm,
348 "[ENCODER:%d:%s] initial power sequencer: %s\n",
349 dig_port->base.base.base.id, dig_port->base.base.name,
350 pps_name(intel_dp));
351 }
352
intel_num_pps(struct intel_display * display)353 static int intel_num_pps(struct intel_display *display)
354 {
355 struct drm_i915_private *i915 = to_i915(display->drm);
356
357 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
358 return 2;
359
360 if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
361 return 2;
362
363 if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
364 return 2;
365
366 if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
367 return 1;
368
369 if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
370 return 2;
371
372 return 1;
373 }
374
intel_pps_is_valid(struct intel_dp * intel_dp)375 static bool intel_pps_is_valid(struct intel_dp *intel_dp)
376 {
377 struct intel_display *display = to_intel_display(intel_dp);
378 struct drm_i915_private *i915 = to_i915(display->drm);
379
380 if (intel_dp->pps.pps_idx == 1 &&
381 INTEL_PCH_TYPE(i915) >= PCH_ICP &&
382 INTEL_PCH_TYPE(i915) <= PCH_ADP)
383 return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
384
385 return true;
386 }
387
388 static int
bxt_initial_pps_idx(struct intel_display * display,pps_check check)389 bxt_initial_pps_idx(struct intel_display *display, pps_check check)
390 {
391 int pps_idx, pps_num = intel_num_pps(display);
392
393 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
394 if (check(display, pps_idx))
395 return pps_idx;
396 }
397
398 return -1;
399 }
400
401 static bool
pps_initial_setup(struct intel_dp * intel_dp)402 pps_initial_setup(struct intel_dp *intel_dp)
403 {
404 struct intel_display *display = to_intel_display(intel_dp);
405 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
406 struct intel_connector *connector = intel_dp->attached_connector;
407 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
408
409 lockdep_assert_held(&display->pps.mutex);
410
411 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
412 vlv_initial_power_sequencer_setup(intel_dp);
413 return true;
414 }
415
416 /* first ask the VBT */
417 if (intel_num_pps(display) > 1)
418 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
419 else
420 intel_dp->pps.pps_idx = 0;
421
422 if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display)))
423 intel_dp->pps.pps_idx = -1;
424
425 /* VBT wasn't parsed yet? pick one where the panel is on */
426 if (intel_dp->pps.pps_idx < 0)
427 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on);
428 /* didn't find one? pick one where vdd is on */
429 if (intel_dp->pps.pps_idx < 0)
430 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on);
431 /* didn't find one? pick any */
432 if (intel_dp->pps.pps_idx < 0) {
433 intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any);
434
435 drm_dbg_kms(display->drm,
436 "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
437 encoder->base.base.id, encoder->base.name,
438 pps_name(intel_dp));
439 } else {
440 drm_dbg_kms(display->drm,
441 "[ENCODER:%d:%s] initial power sequencer: %s\n",
442 encoder->base.base.id, encoder->base.name,
443 pps_name(intel_dp));
444 }
445
446 return intel_pps_is_valid(intel_dp);
447 }
448
intel_pps_reset_all(struct intel_display * display)449 void intel_pps_reset_all(struct intel_display *display)
450 {
451 struct drm_i915_private *dev_priv = to_i915(display->drm);
452 struct intel_encoder *encoder;
453
454 if (drm_WARN_ON(display->drm, !IS_LP(dev_priv)))
455 return;
456
457 if (!HAS_DISPLAY(display))
458 return;
459
460 /*
461 * We can't grab pps_mutex here due to deadlock with power_domain
462 * mutex when power_domain functions are called while holding pps_mutex.
463 * That also means that in order to use pps_pipe the code needs to
464 * hold both a power domain reference and pps_mutex, and the power domain
465 * reference get/put must be done while _not_ holding pps_mutex.
466 * pps_{lock,unlock}() do these steps in the correct order, so one
467 * should use them always.
468 */
469
470 for_each_intel_dp(display->drm, encoder) {
471 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
472
473 drm_WARN_ON(display->drm,
474 intel_dp->pps.active_pipe != INVALID_PIPE);
475
476 if (encoder->type != INTEL_OUTPUT_EDP)
477 continue;
478
479 if (DISPLAY_VER(display) >= 9)
480 intel_dp->pps.pps_reset = true;
481 else
482 intel_dp->pps.pps_pipe = INVALID_PIPE;
483 }
484 }
485
486 struct pps_registers {
487 i915_reg_t pp_ctrl;
488 i915_reg_t pp_stat;
489 i915_reg_t pp_on;
490 i915_reg_t pp_off;
491 i915_reg_t pp_div;
492 };
493
intel_pps_get_registers(struct intel_dp * intel_dp,struct pps_registers * regs)494 static void intel_pps_get_registers(struct intel_dp *intel_dp,
495 struct pps_registers *regs)
496 {
497 struct intel_display *display = to_intel_display(intel_dp);
498 struct drm_i915_private *dev_priv = to_i915(display->drm);
499 int pps_idx;
500
501 memset(regs, 0, sizeof(*regs));
502
503 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
504 pps_idx = vlv_power_sequencer_pipe(intel_dp);
505 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
506 pps_idx = bxt_power_sequencer_idx(intel_dp);
507 else
508 pps_idx = intel_dp->pps.pps_idx;
509
510 regs->pp_ctrl = PP_CONTROL(display, pps_idx);
511 regs->pp_stat = PP_STATUS(display, pps_idx);
512 regs->pp_on = PP_ON_DELAYS(display, pps_idx);
513 regs->pp_off = PP_OFF_DELAYS(display, pps_idx);
514
515 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
516 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
517 INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
518 regs->pp_div = INVALID_MMIO_REG;
519 else
520 regs->pp_div = PP_DIVISOR(display, pps_idx);
521 }
522
523 static i915_reg_t
_pp_ctrl_reg(struct intel_dp * intel_dp)524 _pp_ctrl_reg(struct intel_dp *intel_dp)
525 {
526 struct pps_registers regs;
527
528 intel_pps_get_registers(intel_dp, ®s);
529
530 return regs.pp_ctrl;
531 }
532
533 static i915_reg_t
_pp_stat_reg(struct intel_dp * intel_dp)534 _pp_stat_reg(struct intel_dp *intel_dp)
535 {
536 struct pps_registers regs;
537
538 intel_pps_get_registers(intel_dp, ®s);
539
540 return regs.pp_stat;
541 }
542
edp_have_panel_power(struct intel_dp * intel_dp)543 static bool edp_have_panel_power(struct intel_dp *intel_dp)
544 {
545 struct intel_display *display = to_intel_display(intel_dp);
546 struct drm_i915_private *dev_priv = to_i915(display->drm);
547
548 lockdep_assert_held(&display->pps.mutex);
549
550 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
551 intel_dp->pps.pps_pipe == INVALID_PIPE)
552 return false;
553
554 return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
555 }
556
edp_have_panel_vdd(struct intel_dp * intel_dp)557 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
558 {
559 struct intel_display *display = to_intel_display(intel_dp);
560 struct drm_i915_private *dev_priv = to_i915(display->drm);
561
562 lockdep_assert_held(&display->pps.mutex);
563
564 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
565 intel_dp->pps.pps_pipe == INVALID_PIPE)
566 return false;
567
568 return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
569 }
570
intel_pps_check_power_unlocked(struct intel_dp * intel_dp)571 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
572 {
573 struct intel_display *display = to_intel_display(intel_dp);
574 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
575
576 if (!intel_dp_is_edp(intel_dp))
577 return;
578
579 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
580 drm_WARN(display->drm, 1,
581 "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
582 dig_port->base.base.base.id, dig_port->base.base.name,
583 pps_name(intel_dp));
584 drm_dbg_kms(display->drm,
585 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
586 dig_port->base.base.base.id, dig_port->base.base.name,
587 pps_name(intel_dp),
588 intel_de_read(display, _pp_stat_reg(intel_dp)),
589 intel_de_read(display, _pp_ctrl_reg(intel_dp)));
590 }
591 }
592
593 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
594 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
595
596 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
597 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
598
599 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
600 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
601
602 static void intel_pps_verify_state(struct intel_dp *intel_dp);
603
wait_panel_status(struct intel_dp * intel_dp,u32 mask,u32 value)604 static void wait_panel_status(struct intel_dp *intel_dp,
605 u32 mask, u32 value)
606 {
607 struct intel_display *display = to_intel_display(intel_dp);
608 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
609 i915_reg_t pp_stat_reg, pp_ctrl_reg;
610
611 lockdep_assert_held(&display->pps.mutex);
612
613 intel_pps_verify_state(intel_dp);
614
615 pp_stat_reg = _pp_stat_reg(intel_dp);
616 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
617
618 drm_dbg_kms(display->drm,
619 "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
620 dig_port->base.base.base.id, dig_port->base.base.name,
621 pps_name(intel_dp),
622 mask, value,
623 intel_de_read(display, pp_stat_reg),
624 intel_de_read(display, pp_ctrl_reg));
625
626 if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
627 drm_err(display->drm,
628 "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
629 dig_port->base.base.base.id, dig_port->base.base.name,
630 pps_name(intel_dp),
631 intel_de_read(display, pp_stat_reg),
632 intel_de_read(display, pp_ctrl_reg));
633
634 drm_dbg_kms(display->drm, "Wait complete\n");
635 }
636
wait_panel_on(struct intel_dp * intel_dp)637 static void wait_panel_on(struct intel_dp *intel_dp)
638 {
639 struct intel_display *display = to_intel_display(intel_dp);
640 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
641
642 drm_dbg_kms(display->drm,
643 "[ENCODER:%d:%s] %s wait for panel power on\n",
644 dig_port->base.base.base.id, dig_port->base.base.name,
645 pps_name(intel_dp));
646 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
647 }
648
wait_panel_off(struct intel_dp * intel_dp)649 static void wait_panel_off(struct intel_dp *intel_dp)
650 {
651 struct intel_display *display = to_intel_display(intel_dp);
652 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
653
654 drm_dbg_kms(display->drm,
655 "[ENCODER:%d:%s] %s wait for panel power off time\n",
656 dig_port->base.base.base.id, dig_port->base.base.name,
657 pps_name(intel_dp));
658 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
659 }
660
wait_panel_power_cycle(struct intel_dp * intel_dp)661 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
662 {
663 struct intel_display *display = to_intel_display(intel_dp);
664 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
665 ktime_t panel_power_on_time;
666 s64 panel_power_off_duration;
667
668 drm_dbg_kms(display->drm,
669 "[ENCODER:%d:%s] %s wait for panel power cycle\n",
670 dig_port->base.base.base.id, dig_port->base.base.name,
671 pps_name(intel_dp));
672
673 /* take the difference of current time and panel power off time
674 * and then make panel wait for t11_t12 if needed. */
675 panel_power_on_time = ktime_get_boottime();
676 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
677
678 /* When we disable the VDD override bit last we have to do the manual
679 * wait. */
680 if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
681 wait_remaining_ms_from_jiffies(jiffies,
682 intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
683
684 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
685 }
686
intel_pps_wait_power_cycle(struct intel_dp * intel_dp)687 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
688 {
689 intel_wakeref_t wakeref;
690
691 if (!intel_dp_is_edp(intel_dp))
692 return;
693
694 with_intel_pps_lock(intel_dp, wakeref)
695 wait_panel_power_cycle(intel_dp);
696 }
697
wait_backlight_on(struct intel_dp * intel_dp)698 static void wait_backlight_on(struct intel_dp *intel_dp)
699 {
700 wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
701 intel_dp->pps.backlight_on_delay);
702 }
703
edp_wait_backlight_off(struct intel_dp * intel_dp)704 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
705 {
706 wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
707 intel_dp->pps.backlight_off_delay);
708 }
709
710 /* Read the current pp_control value, unlocking the register if it
711 * is locked
712 */
713
ilk_get_pp_control(struct intel_dp * intel_dp)714 static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
715 {
716 struct intel_display *display = to_intel_display(intel_dp);
717 u32 control;
718
719 lockdep_assert_held(&display->pps.mutex);
720
721 control = intel_de_read(display, _pp_ctrl_reg(intel_dp));
722 if (drm_WARN_ON(display->drm, !HAS_DDI(display) &&
723 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
724 control &= ~PANEL_UNLOCK_MASK;
725 control |= PANEL_UNLOCK_REGS;
726 }
727 return control;
728 }
729
730 /*
731 * Must be paired with intel_pps_vdd_off_unlocked().
732 * Must hold pps_mutex around the whole on/off sequence.
733 * Can be nested with intel_pps_vdd_{on,off}() calls.
734 */
intel_pps_vdd_on_unlocked(struct intel_dp * intel_dp)735 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
736 {
737 struct intel_display *display = to_intel_display(intel_dp);
738 struct drm_i915_private *dev_priv = to_i915(display->drm);
739 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
740 u32 pp;
741 i915_reg_t pp_stat_reg, pp_ctrl_reg;
742 bool need_to_disable = !intel_dp->pps.want_panel_vdd;
743
744 lockdep_assert_held(&display->pps.mutex);
745
746 if (!intel_dp_is_edp(intel_dp))
747 return false;
748
749 cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
750 intel_dp->pps.want_panel_vdd = true;
751
752 if (edp_have_panel_vdd(intel_dp))
753 return need_to_disable;
754
755 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
756 intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
757 intel_aux_power_domain(dig_port));
758
759 pp_stat_reg = _pp_stat_reg(intel_dp);
760 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
761
762 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
763 dig_port->base.base.base.id, dig_port->base.base.name,
764 pps_name(intel_dp));
765
766 if (!edp_have_panel_power(intel_dp))
767 wait_panel_power_cycle(intel_dp);
768
769 pp = ilk_get_pp_control(intel_dp);
770 pp |= EDP_FORCE_VDD;
771
772 intel_de_write(display, pp_ctrl_reg, pp);
773 intel_de_posting_read(display, pp_ctrl_reg);
774 drm_dbg_kms(display->drm,
775 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
776 dig_port->base.base.base.id, dig_port->base.base.name,
777 pps_name(intel_dp),
778 intel_de_read(display, pp_stat_reg),
779 intel_de_read(display, pp_ctrl_reg));
780 /*
781 * If the panel wasn't on, delay before accessing aux channel
782 */
783 if (!edp_have_panel_power(intel_dp)) {
784 drm_dbg_kms(display->drm,
785 "[ENCODER:%d:%s] %s panel power wasn't enabled\n",
786 dig_port->base.base.base.id, dig_port->base.base.name,
787 pps_name(intel_dp));
788 msleep(intel_dp->pps.panel_power_up_delay);
789 }
790
791 return need_to_disable;
792 }
793
794 /*
795 * Must be paired with intel_pps_off().
796 * Nested calls to these functions are not allowed since
797 * we drop the lock. Caller must use some higher level
798 * locking to prevent nested calls from other threads.
799 */
intel_pps_vdd_on(struct intel_dp * intel_dp)800 void intel_pps_vdd_on(struct intel_dp *intel_dp)
801 {
802 struct intel_display *display = to_intel_display(intel_dp);
803 struct drm_i915_private *i915 = to_i915(display->drm);
804 intel_wakeref_t wakeref;
805 bool vdd;
806
807 if (!intel_dp_is_edp(intel_dp))
808 return;
809
810 vdd = false;
811 with_intel_pps_lock(intel_dp, wakeref)
812 vdd = intel_pps_vdd_on_unlocked(intel_dp);
813 I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
814 dp_to_dig_port(intel_dp)->base.base.base.id,
815 dp_to_dig_port(intel_dp)->base.base.name,
816 pps_name(intel_dp));
817 }
818
intel_pps_vdd_off_sync_unlocked(struct intel_dp * intel_dp)819 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
820 {
821 struct intel_display *display = to_intel_display(intel_dp);
822 struct drm_i915_private *dev_priv = to_i915(display->drm);
823 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
824 u32 pp;
825 i915_reg_t pp_stat_reg, pp_ctrl_reg;
826
827 lockdep_assert_held(&display->pps.mutex);
828
829 drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd);
830
831 if (!edp_have_panel_vdd(intel_dp))
832 return;
833
834 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
835 dig_port->base.base.base.id, dig_port->base.base.name,
836 pps_name(intel_dp));
837
838 pp = ilk_get_pp_control(intel_dp);
839 pp &= ~EDP_FORCE_VDD;
840
841 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
842 pp_stat_reg = _pp_stat_reg(intel_dp);
843
844 intel_de_write(display, pp_ctrl_reg, pp);
845 intel_de_posting_read(display, pp_ctrl_reg);
846
847 /* Make sure sequencer is idle before allowing subsequent activity */
848 drm_dbg_kms(display->drm,
849 "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
850 dig_port->base.base.base.id, dig_port->base.base.name,
851 pps_name(intel_dp),
852 intel_de_read(display, pp_stat_reg),
853 intel_de_read(display, pp_ctrl_reg));
854
855 if ((pp & PANEL_POWER_ON) == 0)
856 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
857
858 intel_display_power_put(dev_priv,
859 intel_aux_power_domain(dig_port),
860 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
861 }
862
intel_pps_vdd_off_sync(struct intel_dp * intel_dp)863 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
864 {
865 intel_wakeref_t wakeref;
866
867 if (!intel_dp_is_edp(intel_dp))
868 return;
869
870 cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
871 /*
872 * vdd might still be enabled due to the delayed vdd off.
873 * Make sure vdd is actually turned off here.
874 */
875 with_intel_pps_lock(intel_dp, wakeref)
876 intel_pps_vdd_off_sync_unlocked(intel_dp);
877 }
878
edp_panel_vdd_work(struct work_struct * __work)879 static void edp_panel_vdd_work(struct work_struct *__work)
880 {
881 struct intel_pps *pps = container_of(to_delayed_work(__work),
882 struct intel_pps, panel_vdd_work);
883 struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
884 intel_wakeref_t wakeref;
885
886 with_intel_pps_lock(intel_dp, wakeref) {
887 if (!intel_dp->pps.want_panel_vdd)
888 intel_pps_vdd_off_sync_unlocked(intel_dp);
889 }
890 }
891
edp_panel_vdd_schedule_off(struct intel_dp * intel_dp)892 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
893 {
894 struct intel_display *display = to_intel_display(intel_dp);
895 struct drm_i915_private *i915 = to_i915(display->drm);
896 unsigned long delay;
897
898 /*
899 * We may not yet know the real power sequencing delays,
900 * so keep VDD enabled until we're done with init.
901 */
902 if (intel_dp->pps.initializing)
903 return;
904
905 /*
906 * Queue the timer to fire a long time from now (relative to the power
907 * down delay) to keep the panel power up across a sequence of
908 * operations.
909 */
910 delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
911 queue_delayed_work(i915->unordered_wq,
912 &intel_dp->pps.panel_vdd_work, delay);
913 }
914
915 /*
916 * Must be paired with edp_panel_vdd_on().
917 * Must hold pps_mutex around the whole on/off sequence.
918 * Can be nested with intel_pps_vdd_{on,off}() calls.
919 */
intel_pps_vdd_off_unlocked(struct intel_dp * intel_dp,bool sync)920 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
921 {
922 struct intel_display *display = to_intel_display(intel_dp);
923 struct drm_i915_private *dev_priv = to_i915(display->drm);
924
925 lockdep_assert_held(&display->pps.mutex);
926
927 if (!intel_dp_is_edp(intel_dp))
928 return;
929
930 I915_STATE_WARN(dev_priv, !intel_dp->pps.want_panel_vdd,
931 "[ENCODER:%d:%s] %s VDD not forced on",
932 dp_to_dig_port(intel_dp)->base.base.base.id,
933 dp_to_dig_port(intel_dp)->base.base.name,
934 pps_name(intel_dp));
935
936 intel_dp->pps.want_panel_vdd = false;
937
938 if (sync)
939 intel_pps_vdd_off_sync_unlocked(intel_dp);
940 else
941 edp_panel_vdd_schedule_off(intel_dp);
942 }
943
intel_pps_on_unlocked(struct intel_dp * intel_dp)944 void intel_pps_on_unlocked(struct intel_dp *intel_dp)
945 {
946 struct intel_display *display = to_intel_display(intel_dp);
947 struct drm_i915_private *dev_priv = to_i915(display->drm);
948 u32 pp;
949 i915_reg_t pp_ctrl_reg;
950
951 lockdep_assert_held(&display->pps.mutex);
952
953 if (!intel_dp_is_edp(intel_dp))
954 return;
955
956 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
957 dp_to_dig_port(intel_dp)->base.base.base.id,
958 dp_to_dig_port(intel_dp)->base.base.name,
959 pps_name(intel_dp));
960
961 if (drm_WARN(display->drm, edp_have_panel_power(intel_dp),
962 "[ENCODER:%d:%s] %s panel power already on\n",
963 dp_to_dig_port(intel_dp)->base.base.base.id,
964 dp_to_dig_port(intel_dp)->base.base.name,
965 pps_name(intel_dp)))
966 return;
967
968 wait_panel_power_cycle(intel_dp);
969
970 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
971 pp = ilk_get_pp_control(intel_dp);
972 if (IS_IRONLAKE(dev_priv)) {
973 /* ILK workaround: disable reset around power sequence */
974 pp &= ~PANEL_POWER_RESET;
975 intel_de_write(display, pp_ctrl_reg, pp);
976 intel_de_posting_read(display, pp_ctrl_reg);
977 }
978
979 /*
980 * WA: 22019252566
981 * Disable DPLS gating around power sequence.
982 */
983 if (IS_DISPLAY_VER(display, 13, 14))
984 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
985 0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
986
987 pp |= PANEL_POWER_ON;
988 if (!IS_IRONLAKE(dev_priv))
989 pp |= PANEL_POWER_RESET;
990
991 intel_de_write(display, pp_ctrl_reg, pp);
992 intel_de_posting_read(display, pp_ctrl_reg);
993
994 wait_panel_on(intel_dp);
995 intel_dp->pps.last_power_on = jiffies;
996
997 if (IS_DISPLAY_VER(display, 13, 14))
998 intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
999 PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0);
1000
1001 if (IS_IRONLAKE(dev_priv)) {
1002 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1003 intel_de_write(display, pp_ctrl_reg, pp);
1004 intel_de_posting_read(display, pp_ctrl_reg);
1005 }
1006 }
1007
intel_pps_on(struct intel_dp * intel_dp)1008 void intel_pps_on(struct intel_dp *intel_dp)
1009 {
1010 intel_wakeref_t wakeref;
1011
1012 if (!intel_dp_is_edp(intel_dp))
1013 return;
1014
1015 with_intel_pps_lock(intel_dp, wakeref)
1016 intel_pps_on_unlocked(intel_dp);
1017 }
1018
intel_pps_off_unlocked(struct intel_dp * intel_dp)1019 void intel_pps_off_unlocked(struct intel_dp *intel_dp)
1020 {
1021 struct intel_display *display = to_intel_display(intel_dp);
1022 struct drm_i915_private *dev_priv = to_i915(display->drm);
1023 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1024 u32 pp;
1025 i915_reg_t pp_ctrl_reg;
1026
1027 lockdep_assert_held(&display->pps.mutex);
1028
1029 if (!intel_dp_is_edp(intel_dp))
1030 return;
1031
1032 drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
1033 dig_port->base.base.base.id, dig_port->base.base.name,
1034 pps_name(intel_dp));
1035
1036 drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd,
1037 "[ENCODER:%d:%s] %s need VDD to turn off panel\n",
1038 dig_port->base.base.base.id, dig_port->base.base.name,
1039 pps_name(intel_dp));
1040
1041 pp = ilk_get_pp_control(intel_dp);
1042 /* We need to switch off panel power _and_ force vdd, for otherwise some
1043 * panels get very unhappy and cease to work. */
1044 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1045 EDP_BLC_ENABLE);
1046
1047 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1048
1049 intel_dp->pps.want_panel_vdd = false;
1050
1051 intel_de_write(display, pp_ctrl_reg, pp);
1052 intel_de_posting_read(display, pp_ctrl_reg);
1053
1054 wait_panel_off(intel_dp);
1055 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1056
1057 /* We got a reference when we enabled the VDD. */
1058 intel_display_power_put(dev_priv,
1059 intel_aux_power_domain(dig_port),
1060 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
1061 }
1062
intel_pps_off(struct intel_dp * intel_dp)1063 void intel_pps_off(struct intel_dp *intel_dp)
1064 {
1065 intel_wakeref_t wakeref;
1066
1067 if (!intel_dp_is_edp(intel_dp))
1068 return;
1069
1070 with_intel_pps_lock(intel_dp, wakeref)
1071 intel_pps_off_unlocked(intel_dp);
1072 }
1073
1074 /* Enable backlight in the panel power control. */
intel_pps_backlight_on(struct intel_dp * intel_dp)1075 void intel_pps_backlight_on(struct intel_dp *intel_dp)
1076 {
1077 struct intel_display *display = to_intel_display(intel_dp);
1078 intel_wakeref_t wakeref;
1079
1080 /*
1081 * If we enable the backlight right away following a panel power
1082 * on, we may see slight flicker as the panel syncs with the eDP
1083 * link. So delay a bit to make sure the image is solid before
1084 * allowing it to appear.
1085 */
1086 wait_backlight_on(intel_dp);
1087
1088 with_intel_pps_lock(intel_dp, wakeref) {
1089 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1090 u32 pp;
1091
1092 pp = ilk_get_pp_control(intel_dp);
1093 pp |= EDP_BLC_ENABLE;
1094
1095 intel_de_write(display, pp_ctrl_reg, pp);
1096 intel_de_posting_read(display, pp_ctrl_reg);
1097 }
1098 }
1099
1100 /* Disable backlight in the panel power control. */
intel_pps_backlight_off(struct intel_dp * intel_dp)1101 void intel_pps_backlight_off(struct intel_dp *intel_dp)
1102 {
1103 struct intel_display *display = to_intel_display(intel_dp);
1104 intel_wakeref_t wakeref;
1105
1106 if (!intel_dp_is_edp(intel_dp))
1107 return;
1108
1109 with_intel_pps_lock(intel_dp, wakeref) {
1110 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1111 u32 pp;
1112
1113 pp = ilk_get_pp_control(intel_dp);
1114 pp &= ~EDP_BLC_ENABLE;
1115
1116 intel_de_write(display, pp_ctrl_reg, pp);
1117 intel_de_posting_read(display, pp_ctrl_reg);
1118 }
1119
1120 intel_dp->pps.last_backlight_off = jiffies;
1121 edp_wait_backlight_off(intel_dp);
1122 }
1123
1124 /*
1125 * Hook for controlling the panel power control backlight through the bl_power
1126 * sysfs attribute. Take care to handle multiple calls.
1127 */
intel_pps_backlight_power(struct intel_connector * connector,bool enable)1128 void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
1129 {
1130 struct intel_display *display = to_intel_display(connector);
1131 struct intel_dp *intel_dp = intel_attached_dp(connector);
1132 intel_wakeref_t wakeref;
1133 bool is_enabled;
1134
1135 is_enabled = false;
1136 with_intel_pps_lock(intel_dp, wakeref)
1137 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1138 if (is_enabled == enable)
1139 return;
1140
1141 drm_dbg_kms(display->drm, "panel power control backlight %s\n",
1142 enable ? "enable" : "disable");
1143
1144 if (enable)
1145 intel_pps_backlight_on(intel_dp);
1146 else
1147 intel_pps_backlight_off(intel_dp);
1148 }
1149
vlv_detach_power_sequencer(struct intel_dp * intel_dp)1150 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
1151 {
1152 struct intel_display *display = to_intel_display(intel_dp);
1153 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1154 enum pipe pipe = intel_dp->pps.pps_pipe;
1155 i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe);
1156
1157 drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1158
1159 if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
1160 return;
1161
1162 intel_pps_vdd_off_sync_unlocked(intel_dp);
1163
1164 /*
1165 * VLV seems to get confused when multiple power sequencers
1166 * have the same port selected (even if only one has power/vdd
1167 * enabled). The failure manifests as vlv_wait_port_ready() failing
1168 * CHV on the other hand doesn't seem to mind having the same port
1169 * selected in multiple power sequencers, but let's clear the
1170 * port select always when logically disconnecting a power sequencer
1171 * from a port.
1172 */
1173 drm_dbg_kms(display->drm,
1174 "detaching %s from [ENCODER:%d:%s]\n",
1175 pps_name(intel_dp),
1176 dig_port->base.base.base.id, dig_port->base.base.name);
1177 intel_de_write(display, pp_on_reg, 0);
1178 intel_de_posting_read(display, pp_on_reg);
1179
1180 intel_dp->pps.pps_pipe = INVALID_PIPE;
1181 }
1182
vlv_steal_power_sequencer(struct intel_display * display,enum pipe pipe)1183 static void vlv_steal_power_sequencer(struct intel_display *display,
1184 enum pipe pipe)
1185 {
1186 struct intel_encoder *encoder;
1187
1188 lockdep_assert_held(&display->pps.mutex);
1189
1190 for_each_intel_dp(display->drm, encoder) {
1191 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1192
1193 drm_WARN(display->drm, intel_dp->pps.active_pipe == pipe,
1194 "stealing PPS %c from active [ENCODER:%d:%s]\n",
1195 pipe_name(pipe), encoder->base.base.id,
1196 encoder->base.name);
1197
1198 if (intel_dp->pps.pps_pipe != pipe)
1199 continue;
1200
1201 drm_dbg_kms(display->drm,
1202 "stealing PPS %c from [ENCODER:%d:%s]\n",
1203 pipe_name(pipe), encoder->base.base.id,
1204 encoder->base.name);
1205
1206 /* make sure vdd is off before we steal it */
1207 vlv_detach_power_sequencer(intel_dp);
1208 }
1209 }
1210
vlv_pps_init(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)1211 void vlv_pps_init(struct intel_encoder *encoder,
1212 const struct intel_crtc_state *crtc_state)
1213 {
1214 struct intel_display *display = to_intel_display(encoder);
1215 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1216 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1217
1218 lockdep_assert_held(&display->pps.mutex);
1219
1220 drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1221
1222 if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1223 intel_dp->pps.pps_pipe != crtc->pipe) {
1224 /*
1225 * If another power sequencer was being used on this
1226 * port previously make sure to turn off vdd there while
1227 * we still have control of it.
1228 */
1229 vlv_detach_power_sequencer(intel_dp);
1230 }
1231
1232 /*
1233 * We may be stealing the power
1234 * sequencer from another port.
1235 */
1236 vlv_steal_power_sequencer(display, crtc->pipe);
1237
1238 intel_dp->pps.active_pipe = crtc->pipe;
1239
1240 if (!intel_dp_is_edp(intel_dp))
1241 return;
1242
1243 /* now it's all ours */
1244 intel_dp->pps.pps_pipe = crtc->pipe;
1245
1246 drm_dbg_kms(display->drm,
1247 "initializing %s for [ENCODER:%d:%s]\n",
1248 pps_name(intel_dp),
1249 encoder->base.base.id, encoder->base.name);
1250
1251 /* init power sequencer on this pipe and port */
1252 pps_init_delays(intel_dp);
1253 pps_init_registers(intel_dp, true);
1254 }
1255
pps_vdd_init(struct intel_dp * intel_dp)1256 static void pps_vdd_init(struct intel_dp *intel_dp)
1257 {
1258 struct intel_display *display = to_intel_display(intel_dp);
1259 struct drm_i915_private *dev_priv = to_i915(display->drm);
1260 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1261
1262 lockdep_assert_held(&display->pps.mutex);
1263
1264 if (!edp_have_panel_vdd(intel_dp))
1265 return;
1266
1267 /*
1268 * The VDD bit needs a power domain reference, so if the bit is
1269 * already enabled when we boot or resume, grab this reference and
1270 * schedule a vdd off, so we don't hold on to the reference
1271 * indefinitely.
1272 */
1273 drm_dbg_kms(display->drm,
1274 "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
1275 dig_port->base.base.base.id, dig_port->base.base.name,
1276 pps_name(intel_dp));
1277 drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
1278 intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1279 intel_aux_power_domain(dig_port));
1280 }
1281
intel_pps_have_panel_power_or_vdd(struct intel_dp * intel_dp)1282 bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
1283 {
1284 intel_wakeref_t wakeref;
1285 bool have_power = false;
1286
1287 with_intel_pps_lock(intel_dp, wakeref) {
1288 have_power = edp_have_panel_power(intel_dp) ||
1289 edp_have_panel_vdd(intel_dp);
1290 }
1291
1292 return have_power;
1293 }
1294
pps_init_timestamps(struct intel_dp * intel_dp)1295 static void pps_init_timestamps(struct intel_dp *intel_dp)
1296 {
1297 /*
1298 * Initialize panel power off time to 0, assuming panel power could have
1299 * been toggled between kernel boot and now only by a previously loaded
1300 * and removed i915, which has already ensured sufficient power off
1301 * delay at module remove.
1302 */
1303 intel_dp->pps.panel_power_off_time = 0;
1304 intel_dp->pps.last_power_on = jiffies;
1305 intel_dp->pps.last_backlight_off = jiffies;
1306 }
1307
1308 static void
intel_pps_readout_hw_state(struct intel_dp * intel_dp,struct edp_power_seq * seq)1309 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1310 {
1311 struct intel_display *display = to_intel_display(intel_dp);
1312 u32 pp_on, pp_off, pp_ctl;
1313 struct pps_registers regs;
1314
1315 intel_pps_get_registers(intel_dp, ®s);
1316
1317 pp_ctl = ilk_get_pp_control(intel_dp);
1318
1319 /* Ensure PPS is unlocked */
1320 if (!HAS_DDI(display))
1321 intel_de_write(display, regs.pp_ctrl, pp_ctl);
1322
1323 pp_on = intel_de_read(display, regs.pp_on);
1324 pp_off = intel_de_read(display, regs.pp_off);
1325
1326 /* Pull timing values out of registers */
1327 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1328 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1329 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1330 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1331
1332 if (i915_mmio_reg_valid(regs.pp_div)) {
1333 u32 pp_div;
1334
1335 pp_div = intel_de_read(display, regs.pp_div);
1336
1337 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1338 } else {
1339 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1340 }
1341 }
1342
1343 static void
intel_pps_dump_state(struct intel_dp * intel_dp,const char * state_name,const struct edp_power_seq * seq)1344 intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
1345 const struct edp_power_seq *seq)
1346 {
1347 struct intel_display *display = to_intel_display(intel_dp);
1348
1349 drm_dbg_kms(display->drm,
1350 "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1351 state_name,
1352 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1353 }
1354
1355 static void
intel_pps_verify_state(struct intel_dp * intel_dp)1356 intel_pps_verify_state(struct intel_dp *intel_dp)
1357 {
1358 struct intel_display *display = to_intel_display(intel_dp);
1359 struct edp_power_seq hw;
1360 struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1361
1362 intel_pps_readout_hw_state(intel_dp, &hw);
1363
1364 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1365 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1366 drm_err(display->drm, "PPS state mismatch\n");
1367 intel_pps_dump_state(intel_dp, "sw", sw);
1368 intel_pps_dump_state(intel_dp, "hw", &hw);
1369 }
1370 }
1371
pps_delays_valid(struct edp_power_seq * delays)1372 static bool pps_delays_valid(struct edp_power_seq *delays)
1373 {
1374 return delays->t1_t3 || delays->t8 || delays->t9 ||
1375 delays->t10 || delays->t11_t12;
1376 }
1377
pps_init_delays_bios(struct intel_dp * intel_dp,struct edp_power_seq * bios)1378 static void pps_init_delays_bios(struct intel_dp *intel_dp,
1379 struct edp_power_seq *bios)
1380 {
1381 struct intel_display *display = to_intel_display(intel_dp);
1382
1383 lockdep_assert_held(&display->pps.mutex);
1384
1385 if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
1386 intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
1387
1388 *bios = intel_dp->pps.bios_pps_delays;
1389
1390 intel_pps_dump_state(intel_dp, "bios", bios);
1391 }
1392
pps_init_delays_vbt(struct intel_dp * intel_dp,struct edp_power_seq * vbt)1393 static void pps_init_delays_vbt(struct intel_dp *intel_dp,
1394 struct edp_power_seq *vbt)
1395 {
1396 struct intel_display *display = to_intel_display(intel_dp);
1397 struct intel_connector *connector = intel_dp->attached_connector;
1398
1399 *vbt = connector->panel.vbt.edp.pps;
1400
1401 if (!pps_delays_valid(vbt))
1402 return;
1403
1404 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1405 * of 500ms appears to be too short. Ocassionally the panel
1406 * just fails to power back on. Increasing the delay to 800ms
1407 * seems sufficient to avoid this problem.
1408 */
1409 if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
1410 vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
1411 drm_dbg_kms(display->drm,
1412 "Increasing T12 panel delay as per the quirk to %d\n",
1413 vbt->t11_t12);
1414 }
1415
1416 /* T11_T12 delay is special and actually in units of 100ms, but zero
1417 * based in the hw (so we need to add 100 ms). But the sw vbt
1418 * table multiplies it with 1000 to make it in units of 100usec,
1419 * too. */
1420 vbt->t11_t12 += 100 * 10;
1421
1422 intel_pps_dump_state(intel_dp, "vbt", vbt);
1423 }
1424
pps_init_delays_spec(struct intel_dp * intel_dp,struct edp_power_seq * spec)1425 static void pps_init_delays_spec(struct intel_dp *intel_dp,
1426 struct edp_power_seq *spec)
1427 {
1428 struct intel_display *display = to_intel_display(intel_dp);
1429
1430 lockdep_assert_held(&display->pps.mutex);
1431
1432 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1433 * our hw here, which are all in 100usec. */
1434 spec->t1_t3 = 210 * 10;
1435 spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
1436 spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1437 spec->t10 = 500 * 10;
1438 /* This one is special and actually in units of 100ms, but zero
1439 * based in the hw (so we need to add 100 ms). But the sw vbt
1440 * table multiplies it with 1000 to make it in units of 100usec,
1441 * too. */
1442 spec->t11_t12 = (510 + 100) * 10;
1443
1444 intel_pps_dump_state(intel_dp, "spec", spec);
1445 }
1446
pps_init_delays(struct intel_dp * intel_dp)1447 static void pps_init_delays(struct intel_dp *intel_dp)
1448 {
1449 struct intel_display *display = to_intel_display(intel_dp);
1450 struct edp_power_seq cur, vbt, spec,
1451 *final = &intel_dp->pps.pps_delays;
1452
1453 lockdep_assert_held(&display->pps.mutex);
1454
1455 /* already initialized? */
1456 if (pps_delays_valid(final))
1457 return;
1458
1459 pps_init_delays_bios(intel_dp, &cur);
1460 pps_init_delays_vbt(intel_dp, &vbt);
1461 pps_init_delays_spec(intel_dp, &spec);
1462
1463 /* Use the max of the register settings and vbt. If both are
1464 * unset, fall back to the spec limits. */
1465 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
1466 spec.field : \
1467 max(cur.field, vbt.field))
1468 assign_final(t1_t3);
1469 assign_final(t8);
1470 assign_final(t9);
1471 assign_final(t10);
1472 assign_final(t11_t12);
1473 #undef assign_final
1474
1475 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
1476 intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1477 intel_dp->pps.backlight_on_delay = get_delay(t8);
1478 intel_dp->pps.backlight_off_delay = get_delay(t9);
1479 intel_dp->pps.panel_power_down_delay = get_delay(t10);
1480 intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1481 #undef get_delay
1482
1483 drm_dbg_kms(display->drm,
1484 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1485 intel_dp->pps.panel_power_up_delay,
1486 intel_dp->pps.panel_power_down_delay,
1487 intel_dp->pps.panel_power_cycle_delay);
1488
1489 drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n",
1490 intel_dp->pps.backlight_on_delay,
1491 intel_dp->pps.backlight_off_delay);
1492
1493 /*
1494 * We override the HW backlight delays to 1 because we do manual waits
1495 * on them. For T8, even BSpec recommends doing it. For T9, if we
1496 * don't do this, we'll end up waiting for the backlight off delay
1497 * twice: once when we do the manual sleep, and once when we disable
1498 * the panel and wait for the PP_STATUS bit to become zero.
1499 */
1500 final->t8 = 1;
1501 final->t9 = 1;
1502
1503 /*
1504 * HW has only a 100msec granularity for t11_t12 so round it up
1505 * accordingly.
1506 */
1507 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1508 }
1509
pps_init_registers(struct intel_dp * intel_dp,bool force_disable_vdd)1510 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1511 {
1512 struct intel_display *display = to_intel_display(intel_dp);
1513 struct drm_i915_private *dev_priv = to_i915(display->drm);
1514 u32 pp_on, pp_off, port_sel = 0;
1515 int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
1516 struct pps_registers regs;
1517 enum port port = dp_to_dig_port(intel_dp)->base.port;
1518 const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1519
1520 lockdep_assert_held(&display->pps.mutex);
1521
1522 intel_pps_get_registers(intel_dp, ®s);
1523
1524 /*
1525 * On some VLV machines the BIOS can leave the VDD
1526 * enabled even on power sequencers which aren't
1527 * hooked up to any port. This would mess up the
1528 * power domain tracking the first time we pick
1529 * one of these power sequencers for use since
1530 * intel_pps_vdd_on_unlocked() would notice that the VDD was
1531 * already on and therefore wouldn't grab the power
1532 * domain reference. Disable VDD first to avoid this.
1533 * This also avoids spuriously turning the VDD on as
1534 * soon as the new power sequencer gets initialized.
1535 */
1536 if (force_disable_vdd) {
1537 u32 pp = ilk_get_pp_control(intel_dp);
1538
1539 drm_WARN(display->drm, pp & PANEL_POWER_ON,
1540 "Panel power already on\n");
1541
1542 if (pp & EDP_FORCE_VDD)
1543 drm_dbg_kms(display->drm,
1544 "VDD already on, disabling first\n");
1545
1546 pp &= ~EDP_FORCE_VDD;
1547
1548 intel_de_write(display, regs.pp_ctrl, pp);
1549 }
1550
1551 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1552 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1553 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1554 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1555
1556 /* Haswell doesn't have any port selection bits for the panel
1557 * power sequencer any more. */
1558 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1559 port_sel = PANEL_PORT_SELECT_VLV(port);
1560 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1561 switch (port) {
1562 case PORT_A:
1563 port_sel = PANEL_PORT_SELECT_DPA;
1564 break;
1565 case PORT_C:
1566 port_sel = PANEL_PORT_SELECT_DPC;
1567 break;
1568 case PORT_D:
1569 port_sel = PANEL_PORT_SELECT_DPD;
1570 break;
1571 default:
1572 MISSING_CASE(port);
1573 break;
1574 }
1575 }
1576
1577 pp_on |= port_sel;
1578
1579 intel_de_write(display, regs.pp_on, pp_on);
1580 intel_de_write(display, regs.pp_off, pp_off);
1581
1582 /*
1583 * Compute the divisor for the pp clock, simply match the Bspec formula.
1584 */
1585 if (i915_mmio_reg_valid(regs.pp_div))
1586 intel_de_write(display, regs.pp_div,
1587 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1588 else
1589 intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
1590 REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
1591 DIV_ROUND_UP(seq->t11_t12, 1000)));
1592
1593 drm_dbg_kms(display->drm,
1594 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1595 intel_de_read(display, regs.pp_on),
1596 intel_de_read(display, regs.pp_off),
1597 i915_mmio_reg_valid(regs.pp_div) ?
1598 intel_de_read(display, regs.pp_div) :
1599 (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1600 }
1601
intel_pps_encoder_reset(struct intel_dp * intel_dp)1602 void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1603 {
1604 struct intel_display *display = to_intel_display(intel_dp);
1605 struct drm_i915_private *i915 = to_i915(display->drm);
1606 intel_wakeref_t wakeref;
1607
1608 if (!intel_dp_is_edp(intel_dp))
1609 return;
1610
1611 with_intel_pps_lock(intel_dp, wakeref) {
1612 /*
1613 * Reinit the power sequencer also on the resume path, in case
1614 * BIOS did something nasty with it.
1615 */
1616 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1617 vlv_initial_power_sequencer_setup(intel_dp);
1618
1619 pps_init_delays(intel_dp);
1620 pps_init_registers(intel_dp, false);
1621 pps_vdd_init(intel_dp);
1622
1623 if (edp_have_panel_vdd(intel_dp))
1624 edp_panel_vdd_schedule_off(intel_dp);
1625 }
1626 }
1627
intel_pps_init(struct intel_dp * intel_dp)1628 bool intel_pps_init(struct intel_dp *intel_dp)
1629 {
1630 intel_wakeref_t wakeref;
1631 bool ret;
1632
1633 intel_dp->pps.initializing = true;
1634 INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1635
1636 pps_init_timestamps(intel_dp);
1637
1638 with_intel_pps_lock(intel_dp, wakeref) {
1639 ret = pps_initial_setup(intel_dp);
1640
1641 pps_init_delays(intel_dp);
1642 pps_init_registers(intel_dp, false);
1643 pps_vdd_init(intel_dp);
1644 }
1645
1646 return ret;
1647 }
1648
pps_init_late(struct intel_dp * intel_dp)1649 static void pps_init_late(struct intel_dp *intel_dp)
1650 {
1651 struct intel_display *display = to_intel_display(intel_dp);
1652 struct drm_i915_private *i915 = to_i915(display->drm);
1653 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1654 struct intel_connector *connector = intel_dp->attached_connector;
1655
1656 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1657 return;
1658
1659 if (intel_num_pps(display) < 2)
1660 return;
1661
1662 drm_WARN(display->drm,
1663 connector->panel.vbt.backlight.controller >= 0 &&
1664 intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
1665 "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
1666 encoder->base.base.id, encoder->base.name,
1667 intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller);
1668
1669 if (connector->panel.vbt.backlight.controller >= 0)
1670 intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
1671 }
1672
intel_pps_init_late(struct intel_dp * intel_dp)1673 void intel_pps_init_late(struct intel_dp *intel_dp)
1674 {
1675 intel_wakeref_t wakeref;
1676
1677 with_intel_pps_lock(intel_dp, wakeref) {
1678 /* Reinit delays after per-panel info has been parsed from VBT */
1679 pps_init_late(intel_dp);
1680
1681 memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
1682 pps_init_delays(intel_dp);
1683 pps_init_registers(intel_dp, false);
1684
1685 intel_dp->pps.initializing = false;
1686
1687 if (edp_have_panel_vdd(intel_dp))
1688 edp_panel_vdd_schedule_off(intel_dp);
1689 }
1690 }
1691
intel_pps_unlock_regs_wa(struct intel_display * display)1692 void intel_pps_unlock_regs_wa(struct intel_display *display)
1693 {
1694 int pps_num;
1695 int pps_idx;
1696
1697 if (!HAS_DISPLAY(display) || HAS_DDI(display))
1698 return;
1699 /*
1700 * This w/a is needed at least on CPT/PPT, but to be sure apply it
1701 * everywhere where registers can be write protected.
1702 */
1703 pps_num = intel_num_pps(display);
1704
1705 for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
1706 intel_de_rmw(display, PP_CONTROL(display, pps_idx),
1707 PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
1708 }
1709
intel_pps_setup(struct intel_display * display)1710 void intel_pps_setup(struct intel_display *display)
1711 {
1712 struct drm_i915_private *i915 = to_i915(display->drm);
1713
1714 if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
1715 display->pps.mmio_base = PCH_PPS_BASE;
1716 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1717 display->pps.mmio_base = VLV_PPS_BASE;
1718 else
1719 display->pps.mmio_base = PPS_BASE;
1720 }
1721
intel_pps_show(struct seq_file * m,void * data)1722 static int intel_pps_show(struct seq_file *m, void *data)
1723 {
1724 struct intel_connector *connector = m->private;
1725 struct intel_dp *intel_dp = intel_attached_dp(connector);
1726
1727 if (connector->base.status != connector_status_connected)
1728 return -ENODEV;
1729
1730 seq_printf(m, "Panel power up delay: %d\n",
1731 intel_dp->pps.panel_power_up_delay);
1732 seq_printf(m, "Panel power down delay: %d\n",
1733 intel_dp->pps.panel_power_down_delay);
1734 seq_printf(m, "Backlight on delay: %d\n",
1735 intel_dp->pps.backlight_on_delay);
1736 seq_printf(m, "Backlight off delay: %d\n",
1737 intel_dp->pps.backlight_off_delay);
1738
1739 return 0;
1740 }
1741 DEFINE_SHOW_ATTRIBUTE(intel_pps);
1742
intel_pps_connector_debugfs_add(struct intel_connector * connector)1743 void intel_pps_connector_debugfs_add(struct intel_connector *connector)
1744 {
1745 struct dentry *root = connector->base.debugfs_entry;
1746 int connector_type = connector->base.connector_type;
1747
1748 if (connector_type == DRM_MODE_CONNECTOR_eDP)
1749 debugfs_create_file("i915_panel_timings", 0444, root,
1750 connector, &intel_pps_fops);
1751 }
1752
assert_pps_unlocked(struct intel_display * display,enum pipe pipe)1753 void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
1754 {
1755 struct drm_i915_private *dev_priv = to_i915(display->drm);
1756 i915_reg_t pp_reg;
1757 u32 val;
1758 enum pipe panel_pipe = INVALID_PIPE;
1759 bool locked = true;
1760
1761 if (drm_WARN_ON(display->drm, HAS_DDI(display)))
1762 return;
1763
1764 if (HAS_PCH_SPLIT(dev_priv)) {
1765 u32 port_sel;
1766
1767 pp_reg = PP_CONTROL(display, 0);
1768 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
1769 PANEL_PORT_SELECT_MASK;
1770
1771 switch (port_sel) {
1772 case PANEL_PORT_SELECT_LVDS:
1773 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1774 break;
1775 case PANEL_PORT_SELECT_DPA:
1776 g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1777 break;
1778 case PANEL_PORT_SELECT_DPC:
1779 g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1780 break;
1781 case PANEL_PORT_SELECT_DPD:
1782 g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1783 break;
1784 default:
1785 MISSING_CASE(port_sel);
1786 break;
1787 }
1788 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1789 /* presumably write lock depends on pipe, not port select */
1790 pp_reg = PP_CONTROL(display, pipe);
1791 panel_pipe = pipe;
1792 } else {
1793 u32 port_sel;
1794
1795 pp_reg = PP_CONTROL(display, 0);
1796 port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
1797 PANEL_PORT_SELECT_MASK;
1798
1799 drm_WARN_ON(display->drm,
1800 port_sel != PANEL_PORT_SELECT_LVDS);
1801 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1802 }
1803
1804 val = intel_de_read(display, pp_reg);
1805 if (!(val & PANEL_POWER_ON) ||
1806 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1807 locked = false;
1808
1809 I915_STATE_WARN(dev_priv, panel_pipe == pipe && locked,
1810 "panel assertion failure, pipe %c regs locked\n",
1811 pipe_name(pipe));
1812 }
1813