1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/dma-resv.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/string_helpers.h>
34
35 #include <drm/display/drm_dp_helper.h>
36 #include <drm/display/drm_dp_tunnel.h>
37 #include <drm/drm_atomic.h>
38 #include <drm/drm_atomic_helper.h>
39 #include <drm/drm_atomic_uapi.h>
40 #include <drm/drm_damage_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fixed.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46
47 #include "gem/i915_gem_lmem.h"
48 #include "gem/i915_gem_object.h"
49
50 #include "g4x_dp.h"
51 #include "g4x_hdmi.h"
52 #include "hsw_ips.h"
53 #include "i915_config.h"
54 #include "i915_drv.h"
55 #include "i915_reg.h"
56 #include "i915_utils.h"
57 #include "i9xx_plane.h"
58 #include "i9xx_plane_regs.h"
59 #include "i9xx_wm.h"
60 #include "intel_atomic.h"
61 #include "intel_atomic_plane.h"
62 #include "intel_audio.h"
63 #include "intel_bw.h"
64 #include "intel_cdclk.h"
65 #include "intel_clock_gating.h"
66 #include "intel_color.h"
67 #include "intel_crt.h"
68 #include "intel_crtc.h"
69 #include "intel_crtc_state_dump.h"
70 #include "intel_cursor_regs.h"
71 #include "intel_cx0_phy.h"
72 #include "intel_cursor.h"
73 #include "intel_ddi.h"
74 #include "intel_de.h"
75 #include "intel_display_driver.h"
76 #include "intel_display_power.h"
77 #include "intel_display_types.h"
78 #include "intel_dmc.h"
79 #include "intel_dp.h"
80 #include "intel_dp_link_training.h"
81 #include "intel_dp_mst.h"
82 #include "intel_dp_tunnel.h"
83 #include "intel_dpll.h"
84 #include "intel_dpll_mgr.h"
85 #include "intel_dpt.h"
86 #include "intel_dpt_common.h"
87 #include "intel_drrs.h"
88 #include "intel_dsb.h"
89 #include "intel_dsi.h"
90 #include "intel_dvo.h"
91 #include "intel_fb.h"
92 #include "intel_fbc.h"
93 #include "intel_fdi.h"
94 #include "intel_fifo_underrun.h"
95 #include "intel_frontbuffer.h"
96 #include "intel_hdmi.h"
97 #include "intel_hotplug.h"
98 #include "intel_link_bw.h"
99 #include "intel_lvds.h"
100 #include "intel_lvds_regs.h"
101 #include "intel_modeset_setup.h"
102 #include "intel_modeset_verify.h"
103 #include "intel_overlay.h"
104 #include "intel_panel.h"
105 #include "intel_pch_display.h"
106 #include "intel_pch_refclk.h"
107 #include "intel_pcode.h"
108 #include "intel_pipe_crc.h"
109 #include "intel_plane_initial.h"
110 #include "intel_pmdemand.h"
111 #include "intel_pps.h"
112 #include "intel_psr.h"
113 #include "intel_psr_regs.h"
114 #include "intel_sdvo.h"
115 #include "intel_snps_phy.h"
116 #include "intel_tc.h"
117 #include "intel_tdf.h"
118 #include "intel_tv.h"
119 #include "intel_vblank.h"
120 #include "intel_vdsc.h"
121 #include "intel_vdsc_regs.h"
122 #include "intel_vga.h"
123 #include "intel_vrr.h"
124 #include "intel_wm.h"
125 #include "skl_scaler.h"
126 #include "skl_universal_plane.h"
127 #include "skl_universal_plane_regs.h"
128 #include "skl_watermark.h"
129 #include "vlv_dpio_phy_regs.h"
130 #include "vlv_dsi.h"
131 #include "vlv_dsi_pll.h"
132 #include "vlv_dsi_regs.h"
133 #include "vlv_sideband.h"
134
135 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
136 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
137 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
138 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
139
140 /* returns HPLL frequency in kHz */
vlv_get_hpll_vco(struct drm_i915_private * dev_priv)141 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
142 {
143 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
144
145 /* Obtain SKU information */
146 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
147 CCK_FUSE_HPLL_FREQ_MASK;
148
149 return vco_freq[hpll_freq] * 1000;
150 }
151
vlv_get_cck_clock(struct drm_i915_private * dev_priv,const char * name,u32 reg,int ref_freq)152 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
153 const char *name, u32 reg, int ref_freq)
154 {
155 u32 val;
156 int divider;
157
158 val = vlv_cck_read(dev_priv, reg);
159 divider = val & CCK_FREQUENCY_VALUES;
160
161 drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
162 (divider << CCK_FREQUENCY_STATUS_SHIFT),
163 "%s change in progress\n", name);
164
165 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
166 }
167
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)168 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
169 const char *name, u32 reg)
170 {
171 int hpll;
172
173 vlv_cck_get(dev_priv);
174
175 if (dev_priv->hpll_freq == 0)
176 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
177
178 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
179
180 vlv_cck_put(dev_priv);
181
182 return hpll;
183 }
184
intel_update_czclk(struct drm_i915_private * dev_priv)185 void intel_update_czclk(struct drm_i915_private *dev_priv)
186 {
187 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
188 return;
189
190 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
191 CCK_CZ_CLOCK_CONTROL);
192
193 drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
194 dev_priv->czclk_freq);
195 }
196
is_hdr_mode(const struct intel_crtc_state * crtc_state)197 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
198 {
199 return (crtc_state->active_planes &
200 ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
201 }
202
203 /* WA Display #0827: Gen9:all */
204 static void
skl_wa_827(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)205 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
206 {
207 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
208 DUPS1_GATING_DIS | DUPS2_GATING_DIS,
209 enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
210 }
211
212 /* Wa_2006604312:icl,ehl */
213 static void
icl_wa_scalerclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)214 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
215 bool enable)
216 {
217 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
218 DPFR_GATING_DIS,
219 enable ? DPFR_GATING_DIS : 0);
220 }
221
222 /* Wa_1604331009:icl,jsl,ehl */
223 static void
icl_wa_cursorclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)224 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
225 bool enable)
226 {
227 intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
228 CURSOR_GATING_DIS,
229 enable ? CURSOR_GATING_DIS : 0);
230 }
231
232 static bool
is_trans_port_sync_slave(const struct intel_crtc_state * crtc_state)233 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
234 {
235 return crtc_state->master_transcoder != INVALID_TRANSCODER;
236 }
237
238 bool
is_trans_port_sync_master(const struct intel_crtc_state * crtc_state)239 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
240 {
241 return crtc_state->sync_mode_slaves_mask != 0;
242 }
243
244 bool
is_trans_port_sync_mode(const struct intel_crtc_state * crtc_state)245 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
246 {
247 return is_trans_port_sync_master(crtc_state) ||
248 is_trans_port_sync_slave(crtc_state);
249 }
250
joiner_primary_pipe(const struct intel_crtc_state * crtc_state)251 static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state)
252 {
253 return ffs(crtc_state->joiner_pipes) - 1;
254 }
255
intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state * crtc_state)256 u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state)
257 {
258 if (crtc_state->joiner_pipes)
259 return crtc_state->joiner_pipes & ~BIT(joiner_primary_pipe(crtc_state));
260 else
261 return 0;
262 }
263
intel_crtc_is_joiner_secondary(const struct intel_crtc_state * crtc_state)264 bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state)
265 {
266 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
267
268 return crtc_state->joiner_pipes &&
269 crtc->pipe != joiner_primary_pipe(crtc_state);
270 }
271
intel_crtc_is_joiner_primary(const struct intel_crtc_state * crtc_state)272 bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state)
273 {
274 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
275
276 return crtc_state->joiner_pipes &&
277 crtc->pipe == joiner_primary_pipe(crtc_state);
278 }
279
intel_joiner_num_pipes(const struct intel_crtc_state * crtc_state)280 static int intel_joiner_num_pipes(const struct intel_crtc_state *crtc_state)
281 {
282 return hweight8(crtc_state->joiner_pipes);
283 }
284
intel_crtc_joined_pipe_mask(const struct intel_crtc_state * crtc_state)285 u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state)
286 {
287 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
288
289 return BIT(crtc->pipe) | crtc_state->joiner_pipes;
290 }
291
intel_primary_crtc(const struct intel_crtc_state * crtc_state)292 struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state)
293 {
294 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
295
296 if (intel_crtc_is_joiner_secondary(crtc_state))
297 return intel_crtc_for_pipe(i915, joiner_primary_pipe(crtc_state));
298 else
299 return to_intel_crtc(crtc_state->uapi.crtc);
300 }
301
302 static void
intel_wait_for_pipe_off(const struct intel_crtc_state * old_crtc_state)303 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
304 {
305 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
306 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
307
308 if (DISPLAY_VER(dev_priv) >= 4) {
309 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
310
311 /* Wait for the Pipe State to go off */
312 if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
313 TRANSCONF_STATE_ENABLE, 100))
314 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
315 } else {
316 intel_wait_for_pipe_scanline_stopped(crtc);
317 }
318 }
319
assert_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,bool state)320 void assert_transcoder(struct drm_i915_private *dev_priv,
321 enum transcoder cpu_transcoder, bool state)
322 {
323 bool cur_state;
324 enum intel_display_power_domain power_domain;
325 intel_wakeref_t wakeref;
326
327 /* we keep both pipes enabled on 830 */
328 if (IS_I830(dev_priv))
329 state = true;
330
331 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
332 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
333 if (wakeref) {
334 u32 val = intel_de_read(dev_priv,
335 TRANSCONF(dev_priv, cpu_transcoder));
336 cur_state = !!(val & TRANSCONF_ENABLE);
337
338 intel_display_power_put(dev_priv, power_domain, wakeref);
339 } else {
340 cur_state = false;
341 }
342
343 I915_STATE_WARN(dev_priv, cur_state != state,
344 "transcoder %s assertion failure (expected %s, current %s)\n",
345 transcoder_name(cpu_transcoder), str_on_off(state),
346 str_on_off(cur_state));
347 }
348
assert_plane(struct intel_plane * plane,bool state)349 static void assert_plane(struct intel_plane *plane, bool state)
350 {
351 struct drm_i915_private *i915 = to_i915(plane->base.dev);
352 enum pipe pipe;
353 bool cur_state;
354
355 cur_state = plane->get_hw_state(plane, &pipe);
356
357 I915_STATE_WARN(i915, cur_state != state,
358 "%s assertion failure (expected %s, current %s)\n",
359 plane->base.name, str_on_off(state),
360 str_on_off(cur_state));
361 }
362
363 #define assert_plane_enabled(p) assert_plane(p, true)
364 #define assert_plane_disabled(p) assert_plane(p, false)
365
assert_planes_disabled(struct intel_crtc * crtc)366 static void assert_planes_disabled(struct intel_crtc *crtc)
367 {
368 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
369 struct intel_plane *plane;
370
371 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
372 assert_plane_disabled(plane);
373 }
374
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dig_port,unsigned int expected_mask)375 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
376 struct intel_digital_port *dig_port,
377 unsigned int expected_mask)
378 {
379 u32 port_mask;
380 i915_reg_t dpll_reg;
381
382 switch (dig_port->base.port) {
383 default:
384 MISSING_CASE(dig_port->base.port);
385 fallthrough;
386 case PORT_B:
387 port_mask = DPLL_PORTB_READY_MASK;
388 dpll_reg = DPLL(dev_priv, 0);
389 break;
390 case PORT_C:
391 port_mask = DPLL_PORTC_READY_MASK;
392 dpll_reg = DPLL(dev_priv, 0);
393 expected_mask <<= 4;
394 break;
395 case PORT_D:
396 port_mask = DPLL_PORTD_READY_MASK;
397 dpll_reg = DPIO_PHY_STATUS;
398 break;
399 }
400
401 if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000))
402 drm_WARN(&dev_priv->drm, 1,
403 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
404 dig_port->base.base.base.id, dig_port->base.base.name,
405 intel_de_read(dev_priv, dpll_reg) & port_mask,
406 expected_mask);
407 }
408
intel_enable_transcoder(const struct intel_crtc_state * new_crtc_state)409 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
410 {
411 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
412 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
413 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
414 enum pipe pipe = crtc->pipe;
415 u32 val;
416
417 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
418
419 assert_planes_disabled(crtc);
420
421 /*
422 * A pipe without a PLL won't actually be able to drive bits from
423 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
424 * need the check.
425 */
426 if (HAS_GMCH(dev_priv)) {
427 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
428 assert_dsi_pll_enabled(dev_priv);
429 else
430 assert_pll_enabled(dev_priv, pipe);
431 } else {
432 if (new_crtc_state->has_pch_encoder) {
433 /* if driving the PCH, we need FDI enabled */
434 assert_fdi_rx_pll_enabled(dev_priv,
435 intel_crtc_pch_transcoder(crtc));
436 assert_fdi_tx_pll_enabled(dev_priv,
437 (enum pipe) cpu_transcoder);
438 }
439 /* FIXME: assert CPU port conditions for SNB+ */
440 }
441
442 /* Wa_22012358565:adl-p */
443 if (DISPLAY_VER(dev_priv) == 13)
444 intel_de_rmw(dev_priv, PIPE_ARB_CTL(dev_priv, pipe),
445 0, PIPE_ARB_USE_PROG_SLOTS);
446
447 if (DISPLAY_VER(dev_priv) >= 14) {
448 u32 clear = DP_DSC_INSERT_SF_AT_EOL_WA;
449 u32 set = 0;
450
451 if (DISPLAY_VER(dev_priv) == 14)
452 set |= DP_FEC_BS_JITTER_WA;
453
454 intel_de_rmw(dev_priv,
455 hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
456 clear, set);
457 }
458
459 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
460 if (val & TRANSCONF_ENABLE) {
461 /* we keep both pipes enabled on 830 */
462 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
463 return;
464 }
465
466 /* Wa_1409098942:adlp+ */
467 if (DISPLAY_VER(dev_priv) >= 13 &&
468 new_crtc_state->dsc.compression_enable) {
469 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
470 val |= REG_FIELD_PREP(TRANSCONF_PIXEL_COUNT_SCALING_MASK,
471 TRANSCONF_PIXEL_COUNT_SCALING_X4);
472 }
473
474 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
475 val | TRANSCONF_ENABLE);
476 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
477
478 /*
479 * Until the pipe starts PIPEDSL reads will return a stale value,
480 * which causes an apparent vblank timestamp jump when PIPEDSL
481 * resets to its proper value. That also messes up the frame count
482 * when it's derived from the timestamps. So let's wait for the
483 * pipe to start properly before we call drm_crtc_vblank_on()
484 */
485 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
486 intel_wait_for_pipe_scanline_moving(crtc);
487 }
488
intel_disable_transcoder(const struct intel_crtc_state * old_crtc_state)489 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
490 {
491 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
492 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
493 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
494 enum pipe pipe = crtc->pipe;
495 u32 val;
496
497 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
498
499 /*
500 * Make sure planes won't keep trying to pump pixels to us,
501 * or we might hang the display.
502 */
503 assert_planes_disabled(crtc);
504
505 val = intel_de_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
506 if ((val & TRANSCONF_ENABLE) == 0)
507 return;
508
509 /*
510 * Double wide has implications for planes
511 * so best keep it disabled when not needed.
512 */
513 if (old_crtc_state->double_wide)
514 val &= ~TRANSCONF_DOUBLE_WIDE;
515
516 /* Don't disable pipe or pipe PLLs if needed */
517 if (!IS_I830(dev_priv))
518 val &= ~TRANSCONF_ENABLE;
519
520 /* Wa_1409098942:adlp+ */
521 if (DISPLAY_VER(dev_priv) >= 13 &&
522 old_crtc_state->dsc.compression_enable)
523 val &= ~TRANSCONF_PIXEL_COUNT_SCALING_MASK;
524
525 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
526
527 if (DISPLAY_VER(dev_priv) >= 12)
528 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
529 FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
530
531 if ((val & TRANSCONF_ENABLE) == 0)
532 intel_wait_for_pipe_off(old_crtc_state);
533 }
534
intel_rotation_info_size(const struct intel_rotation_info * rot_info)535 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
536 {
537 unsigned int size = 0;
538 int i;
539
540 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
541 size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
542
543 return size;
544 }
545
intel_remapped_info_size(const struct intel_remapped_info * rem_info)546 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
547 {
548 unsigned int size = 0;
549 int i;
550
551 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
552 unsigned int plane_size;
553
554 if (rem_info->plane[i].linear)
555 plane_size = rem_info->plane[i].size;
556 else
557 plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
558
559 if (plane_size == 0)
560 continue;
561
562 if (rem_info->plane_alignment)
563 size = ALIGN(size, rem_info->plane_alignment);
564
565 size += plane_size;
566 }
567
568 return size;
569 }
570
intel_plane_uses_fence(const struct intel_plane_state * plane_state)571 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
572 {
573 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
574 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
575
576 return DISPLAY_VER(dev_priv) < 4 ||
577 (plane->fbc && !plane_state->no_fbc_reason &&
578 plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
579 }
580
581 /*
582 * Convert the x/y offsets into a linear offset.
583 * Only valid with 0/180 degree rotation, which is fine since linear
584 * offset is only used with linear buffers on pre-hsw and tiled buffers
585 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
586 */
intel_fb_xy_to_linear(int x,int y,const struct intel_plane_state * state,int color_plane)587 u32 intel_fb_xy_to_linear(int x, int y,
588 const struct intel_plane_state *state,
589 int color_plane)
590 {
591 const struct drm_framebuffer *fb = state->hw.fb;
592 unsigned int cpp = fb->format->cpp[color_plane];
593 unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
594
595 return y * pitch + x * cpp;
596 }
597
598 /*
599 * Add the x/y offsets derived from fb->offsets[] to the user
600 * specified plane src x/y offsets. The resulting x/y offsets
601 * specify the start of scanout from the beginning of the gtt mapping.
602 */
intel_add_fb_offsets(int * x,int * y,const struct intel_plane_state * state,int color_plane)603 void intel_add_fb_offsets(int *x, int *y,
604 const struct intel_plane_state *state,
605 int color_plane)
606
607 {
608 *x += state->view.color_plane[color_plane].x;
609 *y += state->view.color_plane[color_plane].y;
610 }
611
intel_plane_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)612 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
613 u32 pixel_format, u64 modifier)
614 {
615 struct intel_crtc *crtc;
616 struct intel_plane *plane;
617
618 if (!HAS_DISPLAY(dev_priv))
619 return 0;
620
621 /*
622 * We assume the primary plane for pipe A has
623 * the highest stride limits of them all,
624 * if in case pipe A is disabled, use the first pipe from pipe_mask.
625 */
626 crtc = intel_first_crtc(dev_priv);
627 if (!crtc)
628 return 0;
629
630 plane = to_intel_plane(crtc->base.primary);
631
632 return plane->max_stride(plane, pixel_format, modifier,
633 DRM_MODE_ROTATE_0);
634 }
635
intel_set_plane_visible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state,bool visible)636 void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
637 struct intel_plane_state *plane_state,
638 bool visible)
639 {
640 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
641
642 plane_state->uapi.visible = visible;
643
644 if (visible)
645 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
646 else
647 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
648 }
649
intel_plane_fixup_bitmasks(struct intel_crtc_state * crtc_state)650 void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state)
651 {
652 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
653 struct drm_plane *plane;
654
655 /*
656 * Active_planes aliases if multiple "primary" or cursor planes
657 * have been used on the same (or wrong) pipe. plane_mask uses
658 * unique ids, hence we can use that to reconstruct active_planes.
659 */
660 crtc_state->enabled_planes = 0;
661 crtc_state->active_planes = 0;
662
663 drm_for_each_plane_mask(plane, &dev_priv->drm,
664 crtc_state->uapi.plane_mask) {
665 crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
666 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
667 }
668 }
669
intel_plane_disable_noatomic(struct intel_crtc * crtc,struct intel_plane * plane)670 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
671 struct intel_plane *plane)
672 {
673 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
674 struct intel_crtc_state *crtc_state =
675 to_intel_crtc_state(crtc->base.state);
676 struct intel_plane_state *plane_state =
677 to_intel_plane_state(plane->base.state);
678
679 drm_dbg_kms(&dev_priv->drm,
680 "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
681 plane->base.base.id, plane->base.name,
682 crtc->base.base.id, crtc->base.name);
683
684 intel_set_plane_visible(crtc_state, plane_state, false);
685 intel_plane_fixup_bitmasks(crtc_state);
686 crtc_state->data_rate[plane->id] = 0;
687 crtc_state->data_rate_y[plane->id] = 0;
688 crtc_state->rel_data_rate[plane->id] = 0;
689 crtc_state->rel_data_rate_y[plane->id] = 0;
690 crtc_state->min_cdclk[plane->id] = 0;
691
692 if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
693 hsw_ips_disable(crtc_state)) {
694 crtc_state->ips_enabled = false;
695 intel_crtc_wait_for_next_vblank(crtc);
696 }
697
698 /*
699 * Vblank time updates from the shadow to live plane control register
700 * are blocked if the memory self-refresh mode is active at that
701 * moment. So to make sure the plane gets truly disabled, disable
702 * first the self-refresh mode. The self-refresh enable bit in turn
703 * will be checked/applied by the HW only at the next frame start
704 * event which is after the vblank start event, so we need to have a
705 * wait-for-vblank between disabling the plane and the pipe.
706 */
707 if (HAS_GMCH(dev_priv) &&
708 intel_set_memory_cxsr(dev_priv, false))
709 intel_crtc_wait_for_next_vblank(crtc);
710
711 /*
712 * Gen2 reports pipe underruns whenever all planes are disabled.
713 * So disable underrun reporting before all the planes get disabled.
714 */
715 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
716 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
717
718 intel_plane_disable_arm(plane, crtc_state);
719 intel_crtc_wait_for_next_vblank(crtc);
720 }
721
722 unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state * plane_state)723 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
724 {
725 int x = 0, y = 0;
726
727 intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
728 plane_state->view.color_plane[0].offset, 0);
729
730 return y;
731 }
732
icl_set_pipe_chicken(const struct intel_crtc_state * crtc_state)733 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
734 {
735 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
736 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
737 enum pipe pipe = crtc->pipe;
738 u32 tmp;
739
740 tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
741
742 /*
743 * Display WA #1153: icl
744 * enable hardware to bypass the alpha math
745 * and rounding for per-pixel values 00 and 0xff
746 */
747 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
748 /*
749 * Display WA # 1605353570: icl
750 * Set the pixel rounding bit to 1 for allowing
751 * passthrough of Frame buffer pixels unmodified
752 * across pipe
753 */
754 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
755
756 /*
757 * Underrun recovery must always be disabled on display 13+.
758 * DG2 chicken bit meaning is inverted compared to other platforms.
759 */
760 if (IS_DG2(dev_priv))
761 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
762 else if (DISPLAY_VER(dev_priv) >= 13)
763 tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
764
765 /* Wa_14010547955:dg2 */
766 if (IS_DG2(dev_priv))
767 tmp |= DG2_RENDER_CCSTAG_4_3_EN;
768
769 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
770 }
771
intel_has_pending_fb_unpin(struct drm_i915_private * dev_priv)772 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
773 {
774 struct drm_crtc *crtc;
775 bool cleanup_done;
776
777 drm_for_each_crtc(crtc, &dev_priv->drm) {
778 struct drm_crtc_commit *commit;
779 spin_lock(&crtc->commit_lock);
780 commit = list_first_entry_or_null(&crtc->commit_list,
781 struct drm_crtc_commit, commit_entry);
782 cleanup_done = commit ?
783 try_wait_for_completion(&commit->cleanup_done) : true;
784 spin_unlock(&crtc->commit_lock);
785
786 if (cleanup_done)
787 continue;
788
789 intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
790
791 return true;
792 }
793
794 return false;
795 }
796
797 /*
798 * Finds the encoder associated with the given CRTC. This can only be
799 * used when we know that the CRTC isn't feeding multiple encoders!
800 */
801 struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)802 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
803 const struct intel_crtc_state *crtc_state)
804 {
805 const struct drm_connector_state *connector_state;
806 const struct drm_connector *connector;
807 struct intel_encoder *encoder = NULL;
808 struct intel_crtc *primary_crtc;
809 int num_encoders = 0;
810 int i;
811
812 primary_crtc = intel_primary_crtc(crtc_state);
813
814 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
815 if (connector_state->crtc != &primary_crtc->base)
816 continue;
817
818 encoder = to_intel_encoder(connector_state->best_encoder);
819 num_encoders++;
820 }
821
822 drm_WARN(state->base.dev, num_encoders != 1,
823 "%d encoders for pipe %c\n",
824 num_encoders, pipe_name(primary_crtc->pipe));
825
826 return encoder;
827 }
828
ilk_pfit_enable(const struct intel_crtc_state * crtc_state)829 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
830 {
831 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
832 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
833 const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
834 enum pipe pipe = crtc->pipe;
835 int width = drm_rect_width(dst);
836 int height = drm_rect_height(dst);
837 int x = dst->x1;
838 int y = dst->y1;
839
840 if (!crtc_state->pch_pfit.enabled)
841 return;
842
843 /* Force use of hard-coded filter coefficients
844 * as some pre-programmed values are broken,
845 * e.g. x201.
846 */
847 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
848 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
849 PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
850 else
851 intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
852 PF_FILTER_MED_3x3);
853 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe),
854 PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
855 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe),
856 PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
857 }
858
intel_crtc_dpms_overlay_disable(struct intel_crtc * crtc)859 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
860 {
861 if (crtc->overlay)
862 (void) intel_overlay_switch_off(crtc->overlay);
863
864 /* Let userspace switch the overlay on again. In most cases userspace
865 * has to recompute where to put it anyway.
866 */
867 }
868
needs_nv12_wa(const struct intel_crtc_state * crtc_state)869 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
870 {
871 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
872
873 if (!crtc_state->nv12_planes)
874 return false;
875
876 /* WA Display #0827: Gen9:all */
877 if (DISPLAY_VER(dev_priv) == 9)
878 return true;
879
880 return false;
881 }
882
needs_scalerclk_wa(const struct intel_crtc_state * crtc_state)883 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
884 {
885 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
886
887 /* Wa_2006604312:icl,ehl */
888 if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
889 return true;
890
891 return false;
892 }
893
needs_cursorclk_wa(const struct intel_crtc_state * crtc_state)894 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
895 {
896 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
897
898 /* Wa_1604331009:icl,jsl,ehl */
899 if (is_hdr_mode(crtc_state) &&
900 crtc_state->active_planes & BIT(PLANE_CURSOR) &&
901 DISPLAY_VER(dev_priv) == 11)
902 return true;
903
904 return false;
905 }
906
intel_async_flip_vtd_wa(struct drm_i915_private * i915,enum pipe pipe,bool enable)907 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
908 enum pipe pipe, bool enable)
909 {
910 if (DISPLAY_VER(i915) == 9) {
911 /*
912 * "Plane N strech max must be programmed to 11b (x1)
913 * when Async flips are enabled on that plane."
914 */
915 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
916 SKL_PLANE1_STRETCH_MAX_MASK,
917 enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
918 } else {
919 /* Also needed on HSW/BDW albeit undocumented */
920 intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
921 HSW_PRI_STRETCH_MAX_MASK,
922 enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
923 }
924 }
925
needs_async_flip_vtd_wa(const struct intel_crtc_state * crtc_state)926 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
927 {
928 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
929
930 return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
931 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
932 }
933
intel_encoders_audio_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)934 static void intel_encoders_audio_enable(struct intel_atomic_state *state,
935 struct intel_crtc *crtc)
936 {
937 const struct intel_crtc_state *crtc_state =
938 intel_atomic_get_new_crtc_state(state, crtc);
939 const struct drm_connector_state *conn_state;
940 struct drm_connector *conn;
941 int i;
942
943 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
944 struct intel_encoder *encoder =
945 to_intel_encoder(conn_state->best_encoder);
946
947 if (conn_state->crtc != &crtc->base)
948 continue;
949
950 if (encoder->audio_enable)
951 encoder->audio_enable(encoder, crtc_state, conn_state);
952 }
953 }
954
intel_encoders_audio_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)955 static void intel_encoders_audio_disable(struct intel_atomic_state *state,
956 struct intel_crtc *crtc)
957 {
958 const struct intel_crtc_state *old_crtc_state =
959 intel_atomic_get_old_crtc_state(state, crtc);
960 const struct drm_connector_state *old_conn_state;
961 struct drm_connector *conn;
962 int i;
963
964 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
965 struct intel_encoder *encoder =
966 to_intel_encoder(old_conn_state->best_encoder);
967
968 if (old_conn_state->crtc != &crtc->base)
969 continue;
970
971 if (encoder->audio_disable)
972 encoder->audio_disable(encoder, old_crtc_state, old_conn_state);
973 }
974 }
975
976 #define is_enabling(feature, old_crtc_state, new_crtc_state) \
977 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
978 (new_crtc_state)->feature)
979 #define is_disabling(feature, old_crtc_state, new_crtc_state) \
980 ((old_crtc_state)->feature && \
981 (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)))
982
planes_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)983 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
984 const struct intel_crtc_state *new_crtc_state)
985 {
986 if (!new_crtc_state->hw.active)
987 return false;
988
989 return is_enabling(active_planes, old_crtc_state, new_crtc_state);
990 }
991
planes_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)992 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
993 const struct intel_crtc_state *new_crtc_state)
994 {
995 if (!old_crtc_state->hw.active)
996 return false;
997
998 return is_disabling(active_planes, old_crtc_state, new_crtc_state);
999 }
1000
vrr_params_changed(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1001 static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
1002 const struct intel_crtc_state *new_crtc_state)
1003 {
1004 return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline ||
1005 old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin ||
1006 old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax ||
1007 old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband ||
1008 old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full;
1009 }
1010
cmrr_params_changed(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1011 static bool cmrr_params_changed(const struct intel_crtc_state *old_crtc_state,
1012 const struct intel_crtc_state *new_crtc_state)
1013 {
1014 return old_crtc_state->cmrr.cmrr_m != new_crtc_state->cmrr.cmrr_m ||
1015 old_crtc_state->cmrr.cmrr_n != new_crtc_state->cmrr.cmrr_n;
1016 }
1017
intel_crtc_vrr_enabling(struct intel_atomic_state * state,struct intel_crtc * crtc)1018 static bool intel_crtc_vrr_enabling(struct intel_atomic_state *state,
1019 struct intel_crtc *crtc)
1020 {
1021 const struct intel_crtc_state *old_crtc_state =
1022 intel_atomic_get_old_crtc_state(state, crtc);
1023 const struct intel_crtc_state *new_crtc_state =
1024 intel_atomic_get_new_crtc_state(state, crtc);
1025
1026 if (!new_crtc_state->hw.active)
1027 return false;
1028
1029 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) ||
1030 (new_crtc_state->vrr.enable &&
1031 (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
1032 vrr_params_changed(old_crtc_state, new_crtc_state)));
1033 }
1034
intel_crtc_vrr_disabling(struct intel_atomic_state * state,struct intel_crtc * crtc)1035 bool intel_crtc_vrr_disabling(struct intel_atomic_state *state,
1036 struct intel_crtc *crtc)
1037 {
1038 const struct intel_crtc_state *old_crtc_state =
1039 intel_atomic_get_old_crtc_state(state, crtc);
1040 const struct intel_crtc_state *new_crtc_state =
1041 intel_atomic_get_new_crtc_state(state, crtc);
1042
1043 if (!old_crtc_state->hw.active)
1044 return false;
1045
1046 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) ||
1047 (old_crtc_state->vrr.enable &&
1048 (new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
1049 vrr_params_changed(old_crtc_state, new_crtc_state)));
1050 }
1051
audio_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1052 static bool audio_enabling(const struct intel_crtc_state *old_crtc_state,
1053 const struct intel_crtc_state *new_crtc_state)
1054 {
1055 if (!new_crtc_state->hw.active)
1056 return false;
1057
1058 return is_enabling(has_audio, old_crtc_state, new_crtc_state) ||
1059 (new_crtc_state->has_audio &&
1060 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
1061 }
1062
audio_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)1063 static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
1064 const struct intel_crtc_state *new_crtc_state)
1065 {
1066 if (!old_crtc_state->hw.active)
1067 return false;
1068
1069 return is_disabling(has_audio, old_crtc_state, new_crtc_state) ||
1070 (old_crtc_state->has_audio &&
1071 memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
1072 }
1073
1074 #undef is_disabling
1075 #undef is_enabling
1076
intel_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1077 static void intel_post_plane_update(struct intel_atomic_state *state,
1078 struct intel_crtc *crtc)
1079 {
1080 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1081 const struct intel_crtc_state *old_crtc_state =
1082 intel_atomic_get_old_crtc_state(state, crtc);
1083 const struct intel_crtc_state *new_crtc_state =
1084 intel_atomic_get_new_crtc_state(state, crtc);
1085 enum pipe pipe = crtc->pipe;
1086
1087 intel_psr_post_plane_update(state, crtc);
1088
1089 intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1090
1091 if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1092 intel_update_watermarks(dev_priv);
1093
1094 intel_fbc_post_update(state, crtc);
1095
1096 if (needs_async_flip_vtd_wa(old_crtc_state) &&
1097 !needs_async_flip_vtd_wa(new_crtc_state))
1098 intel_async_flip_vtd_wa(dev_priv, pipe, false);
1099
1100 if (needs_nv12_wa(old_crtc_state) &&
1101 !needs_nv12_wa(new_crtc_state))
1102 skl_wa_827(dev_priv, pipe, false);
1103
1104 if (needs_scalerclk_wa(old_crtc_state) &&
1105 !needs_scalerclk_wa(new_crtc_state))
1106 icl_wa_scalerclkgating(dev_priv, pipe, false);
1107
1108 if (needs_cursorclk_wa(old_crtc_state) &&
1109 !needs_cursorclk_wa(new_crtc_state))
1110 icl_wa_cursorclkgating(dev_priv, pipe, false);
1111
1112 if (intel_crtc_needs_color_update(new_crtc_state))
1113 intel_color_post_update(new_crtc_state);
1114
1115 if (audio_enabling(old_crtc_state, new_crtc_state))
1116 intel_encoders_audio_enable(state, crtc);
1117 }
1118
intel_crtc_enable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)1119 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1120 struct intel_crtc *crtc)
1121 {
1122 const struct intel_crtc_state *crtc_state =
1123 intel_atomic_get_new_crtc_state(state, crtc);
1124 u8 update_planes = crtc_state->update_planes;
1125 const struct intel_plane_state __maybe_unused *plane_state;
1126 struct intel_plane *plane;
1127 int i;
1128
1129 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1130 if (plane->pipe == crtc->pipe &&
1131 update_planes & BIT(plane->id))
1132 plane->enable_flip_done(plane);
1133 }
1134 }
1135
intel_crtc_disable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)1136 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1137 struct intel_crtc *crtc)
1138 {
1139 const struct intel_crtc_state *crtc_state =
1140 intel_atomic_get_new_crtc_state(state, crtc);
1141 u8 update_planes = crtc_state->update_planes;
1142 const struct intel_plane_state __maybe_unused *plane_state;
1143 struct intel_plane *plane;
1144 int i;
1145
1146 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1147 if (plane->pipe == crtc->pipe &&
1148 update_planes & BIT(plane->id))
1149 plane->disable_flip_done(plane);
1150 }
1151 }
1152
intel_crtc_async_flip_disable_wa(struct intel_atomic_state * state,struct intel_crtc * crtc)1153 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1154 struct intel_crtc *crtc)
1155 {
1156 const struct intel_crtc_state *old_crtc_state =
1157 intel_atomic_get_old_crtc_state(state, crtc);
1158 const struct intel_crtc_state *new_crtc_state =
1159 intel_atomic_get_new_crtc_state(state, crtc);
1160 u8 disable_async_flip_planes = old_crtc_state->async_flip_planes &
1161 ~new_crtc_state->async_flip_planes;
1162 const struct intel_plane_state *old_plane_state;
1163 struct intel_plane *plane;
1164 bool need_vbl_wait = false;
1165 int i;
1166
1167 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1168 if (plane->need_async_flip_toggle_wa &&
1169 plane->pipe == crtc->pipe &&
1170 disable_async_flip_planes & BIT(plane->id)) {
1171 /*
1172 * Apart from the async flip bit we want to
1173 * preserve the old state for the plane.
1174 */
1175 intel_plane_async_flip(plane, old_crtc_state,
1176 old_plane_state, false);
1177 need_vbl_wait = true;
1178 }
1179 }
1180
1181 if (need_vbl_wait)
1182 intel_crtc_wait_for_next_vblank(crtc);
1183 }
1184
intel_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)1185 static void intel_pre_plane_update(struct intel_atomic_state *state,
1186 struct intel_crtc *crtc)
1187 {
1188 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1189 const struct intel_crtc_state *old_crtc_state =
1190 intel_atomic_get_old_crtc_state(state, crtc);
1191 const struct intel_crtc_state *new_crtc_state =
1192 intel_atomic_get_new_crtc_state(state, crtc);
1193 enum pipe pipe = crtc->pipe;
1194
1195 if (intel_crtc_vrr_disabling(state, crtc)) {
1196 intel_vrr_disable(old_crtc_state);
1197 intel_crtc_update_active_timings(old_crtc_state, false);
1198 }
1199
1200 if (audio_disabling(old_crtc_state, new_crtc_state))
1201 intel_encoders_audio_disable(state, crtc);
1202
1203 intel_drrs_deactivate(old_crtc_state);
1204
1205 intel_psr_pre_plane_update(state, crtc);
1206
1207 if (hsw_ips_pre_update(state, crtc))
1208 intel_crtc_wait_for_next_vblank(crtc);
1209
1210 if (intel_fbc_pre_update(state, crtc))
1211 intel_crtc_wait_for_next_vblank(crtc);
1212
1213 if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1214 needs_async_flip_vtd_wa(new_crtc_state))
1215 intel_async_flip_vtd_wa(dev_priv, pipe, true);
1216
1217 /* Display WA 827 */
1218 if (!needs_nv12_wa(old_crtc_state) &&
1219 needs_nv12_wa(new_crtc_state))
1220 skl_wa_827(dev_priv, pipe, true);
1221
1222 /* Wa_2006604312:icl,ehl */
1223 if (!needs_scalerclk_wa(old_crtc_state) &&
1224 needs_scalerclk_wa(new_crtc_state))
1225 icl_wa_scalerclkgating(dev_priv, pipe, true);
1226
1227 /* Wa_1604331009:icl,jsl,ehl */
1228 if (!needs_cursorclk_wa(old_crtc_state) &&
1229 needs_cursorclk_wa(new_crtc_state))
1230 icl_wa_cursorclkgating(dev_priv, pipe, true);
1231
1232 /*
1233 * Vblank time updates from the shadow to live plane control register
1234 * are blocked if the memory self-refresh mode is active at that
1235 * moment. So to make sure the plane gets truly disabled, disable
1236 * first the self-refresh mode. The self-refresh enable bit in turn
1237 * will be checked/applied by the HW only at the next frame start
1238 * event which is after the vblank start event, so we need to have a
1239 * wait-for-vblank between disabling the plane and the pipe.
1240 */
1241 if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1242 new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1243 intel_crtc_wait_for_next_vblank(crtc);
1244
1245 /*
1246 * IVB workaround: must disable low power watermarks for at least
1247 * one frame before enabling scaling. LP watermarks can be re-enabled
1248 * when scaling is disabled.
1249 *
1250 * WaCxSRDisabledForSpriteScaling:ivb
1251 */
1252 if (old_crtc_state->hw.active &&
1253 new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1254 intel_crtc_wait_for_next_vblank(crtc);
1255
1256 /*
1257 * If we're doing a modeset we don't need to do any
1258 * pre-vblank watermark programming here.
1259 */
1260 if (!intel_crtc_needs_modeset(new_crtc_state)) {
1261 /*
1262 * For platforms that support atomic watermarks, program the
1263 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
1264 * will be the intermediate values that are safe for both pre- and
1265 * post- vblank; when vblank happens, the 'active' values will be set
1266 * to the final 'target' values and we'll do this again to get the
1267 * optimal watermarks. For gen9+ platforms, the values we program here
1268 * will be the final target values which will get automatically latched
1269 * at vblank time; no further programming will be necessary.
1270 *
1271 * If a platform hasn't been transitioned to atomic watermarks yet,
1272 * we'll continue to update watermarks the old way, if flags tell
1273 * us to.
1274 */
1275 if (!intel_initial_watermarks(state, crtc))
1276 if (new_crtc_state->update_wm_pre)
1277 intel_update_watermarks(dev_priv);
1278 }
1279
1280 /*
1281 * Gen2 reports pipe underruns whenever all planes are disabled.
1282 * So disable underrun reporting before all the planes get disabled.
1283 *
1284 * We do this after .initial_watermarks() so that we have a
1285 * chance of catching underruns with the intermediate watermarks
1286 * vs. the old plane configuration.
1287 */
1288 if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1289 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1290
1291 /*
1292 * WA for platforms where async address update enable bit
1293 * is double buffered and only latched at start of vblank.
1294 */
1295 if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes)
1296 intel_crtc_async_flip_disable_wa(state, crtc);
1297 }
1298
intel_crtc_disable_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)1299 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1300 struct intel_crtc *crtc)
1301 {
1302 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1303 const struct intel_crtc_state *new_crtc_state =
1304 intel_atomic_get_new_crtc_state(state, crtc);
1305 unsigned int update_mask = new_crtc_state->update_planes;
1306 const struct intel_plane_state *old_plane_state;
1307 struct intel_plane *plane;
1308 unsigned fb_bits = 0;
1309 int i;
1310
1311 intel_crtc_dpms_overlay_disable(crtc);
1312
1313 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1314 if (crtc->pipe != plane->pipe ||
1315 !(update_mask & BIT(plane->id)))
1316 continue;
1317
1318 intel_plane_disable_arm(plane, new_crtc_state);
1319
1320 if (old_plane_state->uapi.visible)
1321 fb_bits |= plane->frontbuffer_bit;
1322 }
1323
1324 intel_frontbuffer_flip(dev_priv, fb_bits);
1325 }
1326
intel_encoders_update_prepare(struct intel_atomic_state * state)1327 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1328 {
1329 struct drm_i915_private *i915 = to_i915(state->base.dev);
1330 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1331 struct intel_crtc *crtc;
1332 int i;
1333
1334 /*
1335 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1336 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1337 */
1338 if (i915->display.dpll.mgr) {
1339 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1340 if (intel_crtc_needs_modeset(new_crtc_state))
1341 continue;
1342
1343 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1344 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1345 }
1346 }
1347 }
1348
intel_encoders_pre_pll_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1349 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1350 struct intel_crtc *crtc)
1351 {
1352 const struct intel_crtc_state *crtc_state =
1353 intel_atomic_get_new_crtc_state(state, crtc);
1354 const struct drm_connector_state *conn_state;
1355 struct drm_connector *conn;
1356 int i;
1357
1358 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1359 struct intel_encoder *encoder =
1360 to_intel_encoder(conn_state->best_encoder);
1361
1362 if (conn_state->crtc != &crtc->base)
1363 continue;
1364
1365 if (encoder->pre_pll_enable)
1366 encoder->pre_pll_enable(state, encoder,
1367 crtc_state, conn_state);
1368 }
1369 }
1370
intel_encoders_pre_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1371 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1372 struct intel_crtc *crtc)
1373 {
1374 const struct intel_crtc_state *crtc_state =
1375 intel_atomic_get_new_crtc_state(state, crtc);
1376 const struct drm_connector_state *conn_state;
1377 struct drm_connector *conn;
1378 int i;
1379
1380 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1381 struct intel_encoder *encoder =
1382 to_intel_encoder(conn_state->best_encoder);
1383
1384 if (conn_state->crtc != &crtc->base)
1385 continue;
1386
1387 if (encoder->pre_enable)
1388 encoder->pre_enable(state, encoder,
1389 crtc_state, conn_state);
1390 }
1391 }
1392
intel_encoders_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1393 static void intel_encoders_enable(struct intel_atomic_state *state,
1394 struct intel_crtc *crtc)
1395 {
1396 const struct intel_crtc_state *crtc_state =
1397 intel_atomic_get_new_crtc_state(state, crtc);
1398 const struct drm_connector_state *conn_state;
1399 struct drm_connector *conn;
1400 int i;
1401
1402 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1403 struct intel_encoder *encoder =
1404 to_intel_encoder(conn_state->best_encoder);
1405
1406 if (conn_state->crtc != &crtc->base)
1407 continue;
1408
1409 if (encoder->enable)
1410 encoder->enable(state, encoder,
1411 crtc_state, conn_state);
1412 intel_opregion_notify_encoder(encoder, true);
1413 }
1414 }
1415
intel_encoders_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1416 static void intel_encoders_disable(struct intel_atomic_state *state,
1417 struct intel_crtc *crtc)
1418 {
1419 const struct intel_crtc_state *old_crtc_state =
1420 intel_atomic_get_old_crtc_state(state, crtc);
1421 const struct drm_connector_state *old_conn_state;
1422 struct drm_connector *conn;
1423 int i;
1424
1425 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1426 struct intel_encoder *encoder =
1427 to_intel_encoder(old_conn_state->best_encoder);
1428
1429 if (old_conn_state->crtc != &crtc->base)
1430 continue;
1431
1432 intel_opregion_notify_encoder(encoder, false);
1433 if (encoder->disable)
1434 encoder->disable(state, encoder,
1435 old_crtc_state, old_conn_state);
1436 }
1437 }
1438
intel_encoders_post_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1439 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1440 struct intel_crtc *crtc)
1441 {
1442 const struct intel_crtc_state *old_crtc_state =
1443 intel_atomic_get_old_crtc_state(state, crtc);
1444 const struct drm_connector_state *old_conn_state;
1445 struct drm_connector *conn;
1446 int i;
1447
1448 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1449 struct intel_encoder *encoder =
1450 to_intel_encoder(old_conn_state->best_encoder);
1451
1452 if (old_conn_state->crtc != &crtc->base)
1453 continue;
1454
1455 if (encoder->post_disable)
1456 encoder->post_disable(state, encoder,
1457 old_crtc_state, old_conn_state);
1458 }
1459 }
1460
intel_encoders_post_pll_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1461 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1462 struct intel_crtc *crtc)
1463 {
1464 const struct intel_crtc_state *old_crtc_state =
1465 intel_atomic_get_old_crtc_state(state, crtc);
1466 const struct drm_connector_state *old_conn_state;
1467 struct drm_connector *conn;
1468 int i;
1469
1470 for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1471 struct intel_encoder *encoder =
1472 to_intel_encoder(old_conn_state->best_encoder);
1473
1474 if (old_conn_state->crtc != &crtc->base)
1475 continue;
1476
1477 if (encoder->post_pll_disable)
1478 encoder->post_pll_disable(state, encoder,
1479 old_crtc_state, old_conn_state);
1480 }
1481 }
1482
intel_encoders_update_pipe(struct intel_atomic_state * state,struct intel_crtc * crtc)1483 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1484 struct intel_crtc *crtc)
1485 {
1486 const struct intel_crtc_state *crtc_state =
1487 intel_atomic_get_new_crtc_state(state, crtc);
1488 const struct drm_connector_state *conn_state;
1489 struct drm_connector *conn;
1490 int i;
1491
1492 for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1493 struct intel_encoder *encoder =
1494 to_intel_encoder(conn_state->best_encoder);
1495
1496 if (conn_state->crtc != &crtc->base)
1497 continue;
1498
1499 if (encoder->update_pipe)
1500 encoder->update_pipe(state, encoder,
1501 crtc_state, conn_state);
1502 }
1503 }
1504
intel_disable_primary_plane(const struct intel_crtc_state * crtc_state)1505 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1506 {
1507 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1508 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1509
1510 plane->disable_arm(plane, crtc_state);
1511 }
1512
ilk_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)1513 static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1514 {
1515 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1516 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1517
1518 if (crtc_state->has_pch_encoder) {
1519 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1520 &crtc_state->fdi_m_n);
1521 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1522 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1523 &crtc_state->dp_m_n);
1524 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1525 &crtc_state->dp_m2_n2);
1526 }
1527
1528 intel_set_transcoder_timings(crtc_state);
1529
1530 ilk_set_pipeconf(crtc_state);
1531 }
1532
ilk_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1533 static void ilk_crtc_enable(struct intel_atomic_state *state,
1534 struct intel_crtc *crtc)
1535 {
1536 const struct intel_crtc_state *new_crtc_state =
1537 intel_atomic_get_new_crtc_state(state, crtc);
1538 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1539 enum pipe pipe = crtc->pipe;
1540
1541 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1542 return;
1543
1544 /*
1545 * Sometimes spurious CPU pipe underruns happen during FDI
1546 * training, at least with VGA+HDMI cloning. Suppress them.
1547 *
1548 * On ILK we get an occasional spurious CPU pipe underruns
1549 * between eDP port A enable and vdd enable. Also PCH port
1550 * enable seems to result in the occasional CPU pipe underrun.
1551 *
1552 * Spurious PCH underruns also occur during PCH enabling.
1553 */
1554 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1555 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1556
1557 ilk_configure_cpu_transcoder(new_crtc_state);
1558
1559 intel_set_pipe_src_size(new_crtc_state);
1560
1561 crtc->active = true;
1562
1563 intel_encoders_pre_enable(state, crtc);
1564
1565 if (new_crtc_state->has_pch_encoder) {
1566 ilk_pch_pre_enable(state, crtc);
1567 } else {
1568 assert_fdi_tx_disabled(dev_priv, pipe);
1569 assert_fdi_rx_disabled(dev_priv, pipe);
1570 }
1571
1572 ilk_pfit_enable(new_crtc_state);
1573
1574 /*
1575 * On ILK+ LUT must be loaded before the pipe is running but with
1576 * clocks enabled
1577 */
1578 intel_color_load_luts(new_crtc_state);
1579 intel_color_commit_noarm(new_crtc_state);
1580 intel_color_commit_arm(new_crtc_state);
1581 /* update DSPCNTR to configure gamma for pipe bottom color */
1582 intel_disable_primary_plane(new_crtc_state);
1583
1584 intel_initial_watermarks(state, crtc);
1585 intel_enable_transcoder(new_crtc_state);
1586
1587 if (new_crtc_state->has_pch_encoder)
1588 ilk_pch_enable(state, crtc);
1589
1590 intel_crtc_vblank_on(new_crtc_state);
1591
1592 intel_encoders_enable(state, crtc);
1593
1594 if (HAS_PCH_CPT(dev_priv))
1595 intel_wait_for_pipe_scanline_moving(crtc);
1596
1597 /*
1598 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1599 * And a second vblank wait is needed at least on ILK with
1600 * some interlaced HDMI modes. Let's do the double wait always
1601 * in case there are more corner cases we don't know about.
1602 */
1603 if (new_crtc_state->has_pch_encoder) {
1604 intel_crtc_wait_for_next_vblank(crtc);
1605 intel_crtc_wait_for_next_vblank(crtc);
1606 }
1607 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1608 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1609 }
1610
1611 /* Display WA #1180: WaDisableScalarClockGating: glk */
glk_need_scaler_clock_gating_wa(const struct intel_crtc_state * crtc_state)1612 static bool glk_need_scaler_clock_gating_wa(const struct intel_crtc_state *crtc_state)
1613 {
1614 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1615
1616 return DISPLAY_VER(i915) == 10 && crtc_state->pch_pfit.enabled;
1617 }
1618
glk_pipe_scaler_clock_gating_wa(struct intel_crtc * crtc,bool enable)1619 static void glk_pipe_scaler_clock_gating_wa(struct intel_crtc *crtc, bool enable)
1620 {
1621 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1622 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1623
1624 intel_de_rmw(i915, CLKGATE_DIS_PSL(crtc->pipe),
1625 mask, enable ? mask : 0);
1626 }
1627
hsw_set_linetime_wm(const struct intel_crtc_state * crtc_state)1628 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1629 {
1630 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1631 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1632
1633 intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1634 HSW_LINETIME(crtc_state->linetime) |
1635 HSW_IPS_LINETIME(crtc_state->ips_linetime));
1636 }
1637
hsw_set_frame_start_delay(const struct intel_crtc_state * crtc_state)1638 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1639 {
1640 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1641 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1642
1643 intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
1644 HSW_FRAME_START_DELAY_MASK,
1645 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
1646 }
1647
hsw_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)1648 static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
1649 {
1650 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1651 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1652 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1653
1654 if (crtc_state->has_pch_encoder) {
1655 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1656 &crtc_state->fdi_m_n);
1657 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1658 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
1659 &crtc_state->dp_m_n);
1660 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
1661 &crtc_state->dp_m2_n2);
1662 }
1663
1664 intel_set_transcoder_timings(crtc_state);
1665 if (HAS_VRR(dev_priv))
1666 intel_vrr_set_transcoder_timings(crtc_state);
1667
1668 if (cpu_transcoder != TRANSCODER_EDP)
1669 intel_de_write(dev_priv, TRANS_MULT(dev_priv, cpu_transcoder),
1670 crtc_state->pixel_multiplier - 1);
1671
1672 hsw_set_frame_start_delay(crtc_state);
1673
1674 hsw_set_transconf(crtc_state);
1675 }
1676
hsw_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)1677 static void hsw_crtc_enable(struct intel_atomic_state *state,
1678 struct intel_crtc *crtc)
1679 {
1680 const struct intel_crtc_state *new_crtc_state =
1681 intel_atomic_get_new_crtc_state(state, crtc);
1682 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1683 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1684 struct intel_crtc *pipe_crtc;
1685
1686 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1687 return;
1688
1689 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1690 intel_crtc_joined_pipe_mask(new_crtc_state))
1691 intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe);
1692
1693 intel_encoders_pre_pll_enable(state, crtc);
1694
1695 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1696 intel_crtc_joined_pipe_mask(new_crtc_state)) {
1697 const struct intel_crtc_state *pipe_crtc_state =
1698 intel_atomic_get_new_crtc_state(state, pipe_crtc);
1699
1700 if (pipe_crtc_state->shared_dpll)
1701 intel_enable_shared_dpll(pipe_crtc_state);
1702 }
1703
1704 intel_encoders_pre_enable(state, crtc);
1705
1706 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1707 intel_crtc_joined_pipe_mask(new_crtc_state)) {
1708 const struct intel_crtc_state *pipe_crtc_state =
1709 intel_atomic_get_new_crtc_state(state, pipe_crtc);
1710
1711 intel_dsc_enable(pipe_crtc_state);
1712
1713 if (DISPLAY_VER(dev_priv) >= 13)
1714 intel_uncompressed_joiner_enable(pipe_crtc_state);
1715
1716 intel_set_pipe_src_size(pipe_crtc_state);
1717
1718 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
1719 bdw_set_pipe_misc(pipe_crtc_state);
1720 }
1721
1722 if (!transcoder_is_dsi(cpu_transcoder))
1723 hsw_configure_cpu_transcoder(new_crtc_state);
1724
1725 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1726 intel_crtc_joined_pipe_mask(new_crtc_state)) {
1727 const struct intel_crtc_state *pipe_crtc_state =
1728 intel_atomic_get_new_crtc_state(state, pipe_crtc);
1729
1730 pipe_crtc->active = true;
1731
1732 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state))
1733 glk_pipe_scaler_clock_gating_wa(pipe_crtc, true);
1734
1735 if (DISPLAY_VER(dev_priv) >= 9)
1736 skl_pfit_enable(pipe_crtc_state);
1737 else
1738 ilk_pfit_enable(pipe_crtc_state);
1739
1740 /*
1741 * On ILK+ LUT must be loaded before the pipe is running but with
1742 * clocks enabled
1743 */
1744 intel_color_load_luts(pipe_crtc_state);
1745 intel_color_commit_noarm(pipe_crtc_state);
1746 intel_color_commit_arm(pipe_crtc_state);
1747 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
1748 if (DISPLAY_VER(dev_priv) < 9)
1749 intel_disable_primary_plane(pipe_crtc_state);
1750
1751 hsw_set_linetime_wm(pipe_crtc_state);
1752
1753 if (DISPLAY_VER(dev_priv) >= 11)
1754 icl_set_pipe_chicken(pipe_crtc_state);
1755
1756 intel_initial_watermarks(state, pipe_crtc);
1757 }
1758
1759 intel_encoders_enable(state, crtc);
1760
1761 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
1762 intel_crtc_joined_pipe_mask(new_crtc_state)) {
1763 const struct intel_crtc_state *pipe_crtc_state =
1764 intel_atomic_get_new_crtc_state(state, pipe_crtc);
1765 enum pipe hsw_workaround_pipe;
1766
1767 if (glk_need_scaler_clock_gating_wa(pipe_crtc_state)) {
1768 intel_crtc_wait_for_next_vblank(pipe_crtc);
1769 glk_pipe_scaler_clock_gating_wa(pipe_crtc, false);
1770 }
1771
1772 /*
1773 * If we change the relative order between pipe/planes
1774 * enabling, we need to change the workaround.
1775 */
1776 hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
1777 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
1778 struct intel_crtc *wa_crtc =
1779 intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
1780
1781 intel_crtc_wait_for_next_vblank(wa_crtc);
1782 intel_crtc_wait_for_next_vblank(wa_crtc);
1783 }
1784 }
1785 }
1786
ilk_pfit_disable(const struct intel_crtc_state * old_crtc_state)1787 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
1788 {
1789 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1790 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1791 enum pipe pipe = crtc->pipe;
1792
1793 /* To avoid upsetting the power well on haswell only disable the pfit if
1794 * it's in use. The hw state code will make sure we get this right. */
1795 if (!old_crtc_state->pch_pfit.enabled)
1796 return;
1797
1798 intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
1799 intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
1800 intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
1801 }
1802
ilk_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1803 static void ilk_crtc_disable(struct intel_atomic_state *state,
1804 struct intel_crtc *crtc)
1805 {
1806 const struct intel_crtc_state *old_crtc_state =
1807 intel_atomic_get_old_crtc_state(state, crtc);
1808 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1809 enum pipe pipe = crtc->pipe;
1810
1811 /*
1812 * Sometimes spurious CPU pipe underruns happen when the
1813 * pipe is already disabled, but FDI RX/TX is still enabled.
1814 * Happens at least with VGA+HDMI cloning. Suppress them.
1815 */
1816 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1817 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1818
1819 intel_encoders_disable(state, crtc);
1820
1821 intel_crtc_vblank_off(old_crtc_state);
1822
1823 intel_disable_transcoder(old_crtc_state);
1824
1825 ilk_pfit_disable(old_crtc_state);
1826
1827 if (old_crtc_state->has_pch_encoder)
1828 ilk_pch_disable(state, crtc);
1829
1830 intel_encoders_post_disable(state, crtc);
1831
1832 if (old_crtc_state->has_pch_encoder)
1833 ilk_pch_post_disable(state, crtc);
1834
1835 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1836 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1837
1838 intel_disable_shared_dpll(old_crtc_state);
1839 }
1840
hsw_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)1841 static void hsw_crtc_disable(struct intel_atomic_state *state,
1842 struct intel_crtc *crtc)
1843 {
1844 const struct intel_crtc_state *old_crtc_state =
1845 intel_atomic_get_old_crtc_state(state, crtc);
1846 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1847 struct intel_crtc *pipe_crtc;
1848
1849 /*
1850 * FIXME collapse everything to one hook.
1851 * Need care with mst->ddi interactions.
1852 */
1853 intel_encoders_disable(state, crtc);
1854 intel_encoders_post_disable(state, crtc);
1855
1856 for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
1857 intel_crtc_joined_pipe_mask(old_crtc_state)) {
1858 const struct intel_crtc_state *old_pipe_crtc_state =
1859 intel_atomic_get_old_crtc_state(state, pipe_crtc);
1860
1861 intel_disable_shared_dpll(old_pipe_crtc_state);
1862 }
1863
1864 intel_encoders_post_pll_disable(state, crtc);
1865
1866 for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
1867 intel_crtc_joined_pipe_mask(old_crtc_state))
1868 intel_dmc_disable_pipe(i915, pipe_crtc->pipe);
1869 }
1870
i9xx_pfit_enable(const struct intel_crtc_state * crtc_state)1871 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
1872 {
1873 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1874 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1875
1876 if (!crtc_state->gmch_pfit.control)
1877 return;
1878
1879 /*
1880 * The panel fitter should only be adjusted whilst the pipe is disabled,
1881 * according to register description and PRM.
1882 */
1883 drm_WARN_ON(&dev_priv->drm,
1884 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_ENABLE);
1885 assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
1886
1887 intel_de_write(dev_priv, PFIT_PGM_RATIOS(dev_priv),
1888 crtc_state->gmch_pfit.pgm_ratios);
1889 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv),
1890 crtc_state->gmch_pfit.control);
1891
1892 /* Border color in case we don't scale up to the full screen. Black by
1893 * default, change to something else for debugging. */
1894 intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0);
1895 }
1896
1897 /* Prefer intel_encoder_is_combo() */
intel_phy_is_combo(struct drm_i915_private * dev_priv,enum phy phy)1898 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
1899 {
1900 if (phy == PHY_NONE)
1901 return false;
1902 else if (IS_ALDERLAKE_S(dev_priv))
1903 return phy <= PHY_E;
1904 else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
1905 return phy <= PHY_D;
1906 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
1907 return phy <= PHY_C;
1908 else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
1909 return phy <= PHY_B;
1910 else
1911 /*
1912 * DG2 outputs labelled as "combo PHY" in the bspec use
1913 * SNPS PHYs with completely different programming,
1914 * hence we always return false here.
1915 */
1916 return false;
1917 }
1918
1919 /* Prefer intel_encoder_is_tc() */
intel_phy_is_tc(struct drm_i915_private * dev_priv,enum phy phy)1920 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
1921 {
1922 /*
1923 * Discrete GPU phy's are not attached to FIA's to support TC
1924 * subsystem Legacy or non-legacy, and only support native DP/HDMI
1925 */
1926 if (IS_DGFX(dev_priv))
1927 return false;
1928
1929 if (DISPLAY_VER(dev_priv) >= 13)
1930 return phy >= PHY_F && phy <= PHY_I;
1931 else if (IS_TIGERLAKE(dev_priv))
1932 return phy >= PHY_D && phy <= PHY_I;
1933 else if (IS_ICELAKE(dev_priv))
1934 return phy >= PHY_C && phy <= PHY_F;
1935
1936 return false;
1937 }
1938
1939 /* Prefer intel_encoder_is_snps() */
intel_phy_is_snps(struct drm_i915_private * dev_priv,enum phy phy)1940 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
1941 {
1942 /*
1943 * For DG2, and for DG2 only, all four "combo" ports and the TC1 port
1944 * (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
1945 */
1946 return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
1947 }
1948
1949 /* Prefer intel_encoder_to_phy() */
intel_port_to_phy(struct drm_i915_private * i915,enum port port)1950 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
1951 {
1952 if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
1953 return PHY_D + port - PORT_D_XELPD;
1954 else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
1955 return PHY_F + port - PORT_TC1;
1956 else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
1957 return PHY_B + port - PORT_TC1;
1958 else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
1959 return PHY_C + port - PORT_TC1;
1960 else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1961 port == PORT_D)
1962 return PHY_A;
1963
1964 return PHY_A + port - PORT_A;
1965 }
1966
1967 /* Prefer intel_encoder_to_tc() */
intel_port_to_tc(struct drm_i915_private * dev_priv,enum port port)1968 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
1969 {
1970 if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
1971 return TC_PORT_NONE;
1972
1973 if (DISPLAY_VER(dev_priv) >= 12)
1974 return TC_PORT_1 + port - PORT_TC1;
1975 else
1976 return TC_PORT_1 + port - PORT_C;
1977 }
1978
intel_encoder_to_phy(struct intel_encoder * encoder)1979 enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
1980 {
1981 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1982
1983 return intel_port_to_phy(i915, encoder->port);
1984 }
1985
intel_encoder_is_combo(struct intel_encoder * encoder)1986 bool intel_encoder_is_combo(struct intel_encoder *encoder)
1987 {
1988 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1989
1990 return intel_phy_is_combo(i915, intel_encoder_to_phy(encoder));
1991 }
1992
intel_encoder_is_snps(struct intel_encoder * encoder)1993 bool intel_encoder_is_snps(struct intel_encoder *encoder)
1994 {
1995 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1996
1997 return intel_phy_is_snps(i915, intel_encoder_to_phy(encoder));
1998 }
1999
intel_encoder_is_tc(struct intel_encoder * encoder)2000 bool intel_encoder_is_tc(struct intel_encoder *encoder)
2001 {
2002 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2003
2004 return intel_phy_is_tc(i915, intel_encoder_to_phy(encoder));
2005 }
2006
intel_encoder_to_tc(struct intel_encoder * encoder)2007 enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
2008 {
2009 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2010
2011 return intel_port_to_tc(i915, encoder->port);
2012 }
2013
2014 enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port * dig_port)2015 intel_aux_power_domain(struct intel_digital_port *dig_port)
2016 {
2017 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
2018
2019 if (intel_tc_port_in_tbt_alt_mode(dig_port))
2020 return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
2021
2022 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
2023 }
2024
get_crtc_power_domains(struct intel_crtc_state * crtc_state,struct intel_power_domain_mask * mask)2025 static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2026 struct intel_power_domain_mask *mask)
2027 {
2028 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2029 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2030 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2031 struct drm_encoder *encoder;
2032 enum pipe pipe = crtc->pipe;
2033
2034 bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
2035
2036 if (!crtc_state->hw.active)
2037 return;
2038
2039 set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
2040 set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
2041 if (crtc_state->pch_pfit.enabled ||
2042 crtc_state->pch_pfit.force_thru)
2043 set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
2044
2045 drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2046 crtc_state->uapi.encoder_mask) {
2047 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2048
2049 set_bit(intel_encoder->power_domain, mask->bits);
2050 }
2051
2052 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2053 set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
2054
2055 if (crtc_state->shared_dpll)
2056 set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
2057
2058 if (crtc_state->dsc.compression_enable)
2059 set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
2060 }
2061
intel_modeset_get_crtc_power_domains(struct intel_crtc_state * crtc_state,struct intel_power_domain_mask * old_domains)2062 void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
2063 struct intel_power_domain_mask *old_domains)
2064 {
2065 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2066 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2067 enum intel_display_power_domain domain;
2068 struct intel_power_domain_mask domains, new_domains;
2069
2070 get_crtc_power_domains(crtc_state, &domains);
2071
2072 bitmap_andnot(new_domains.bits,
2073 domains.bits,
2074 crtc->enabled_power_domains.mask.bits,
2075 POWER_DOMAIN_NUM);
2076 bitmap_andnot(old_domains->bits,
2077 crtc->enabled_power_domains.mask.bits,
2078 domains.bits,
2079 POWER_DOMAIN_NUM);
2080
2081 for_each_power_domain(domain, &new_domains)
2082 intel_display_power_get_in_set(dev_priv,
2083 &crtc->enabled_power_domains,
2084 domain);
2085 }
2086
intel_modeset_put_crtc_power_domains(struct intel_crtc * crtc,struct intel_power_domain_mask * domains)2087 void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2088 struct intel_power_domain_mask *domains)
2089 {
2090 intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2091 &crtc->enabled_power_domains,
2092 domains);
2093 }
2094
i9xx_configure_cpu_transcoder(const struct intel_crtc_state * crtc_state)2095 static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
2096 {
2097 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2098 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2099
2100 if (intel_crtc_has_dp_encoder(crtc_state)) {
2101 intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
2102 &crtc_state->dp_m_n);
2103 intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
2104 &crtc_state->dp_m2_n2);
2105 }
2106
2107 intel_set_transcoder_timings(crtc_state);
2108
2109 i9xx_set_pipeconf(crtc_state);
2110 }
2111
valleyview_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2112 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2113 struct intel_crtc *crtc)
2114 {
2115 const struct intel_crtc_state *new_crtc_state =
2116 intel_atomic_get_new_crtc_state(state, crtc);
2117 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2118 enum pipe pipe = crtc->pipe;
2119
2120 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2121 return;
2122
2123 i9xx_configure_cpu_transcoder(new_crtc_state);
2124
2125 intel_set_pipe_src_size(new_crtc_state);
2126
2127 intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0);
2128
2129 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2130 intel_de_write(dev_priv, CHV_BLEND(dev_priv, pipe),
2131 CHV_BLEND_LEGACY);
2132 intel_de_write(dev_priv, CHV_CANVAS(dev_priv, pipe), 0);
2133 }
2134
2135 crtc->active = true;
2136
2137 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2138
2139 intel_encoders_pre_pll_enable(state, crtc);
2140
2141 if (IS_CHERRYVIEW(dev_priv))
2142 chv_enable_pll(new_crtc_state);
2143 else
2144 vlv_enable_pll(new_crtc_state);
2145
2146 intel_encoders_pre_enable(state, crtc);
2147
2148 i9xx_pfit_enable(new_crtc_state);
2149
2150 intel_color_load_luts(new_crtc_state);
2151 intel_color_commit_noarm(new_crtc_state);
2152 intel_color_commit_arm(new_crtc_state);
2153 /* update DSPCNTR to configure gamma for pipe bottom color */
2154 intel_disable_primary_plane(new_crtc_state);
2155
2156 intel_initial_watermarks(state, crtc);
2157 intel_enable_transcoder(new_crtc_state);
2158
2159 intel_crtc_vblank_on(new_crtc_state);
2160
2161 intel_encoders_enable(state, crtc);
2162 }
2163
i9xx_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2164 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2165 struct intel_crtc *crtc)
2166 {
2167 const struct intel_crtc_state *new_crtc_state =
2168 intel_atomic_get_new_crtc_state(state, crtc);
2169 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2170 enum pipe pipe = crtc->pipe;
2171
2172 if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2173 return;
2174
2175 i9xx_configure_cpu_transcoder(new_crtc_state);
2176
2177 intel_set_pipe_src_size(new_crtc_state);
2178
2179 crtc->active = true;
2180
2181 if (DISPLAY_VER(dev_priv) != 2)
2182 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2183
2184 intel_encoders_pre_enable(state, crtc);
2185
2186 i9xx_enable_pll(new_crtc_state);
2187
2188 i9xx_pfit_enable(new_crtc_state);
2189
2190 intel_color_load_luts(new_crtc_state);
2191 intel_color_commit_noarm(new_crtc_state);
2192 intel_color_commit_arm(new_crtc_state);
2193 /* update DSPCNTR to configure gamma for pipe bottom color */
2194 intel_disable_primary_plane(new_crtc_state);
2195
2196 if (!intel_initial_watermarks(state, crtc))
2197 intel_update_watermarks(dev_priv);
2198 intel_enable_transcoder(new_crtc_state);
2199
2200 intel_crtc_vblank_on(new_crtc_state);
2201
2202 intel_encoders_enable(state, crtc);
2203
2204 /* prevents spurious underruns */
2205 if (DISPLAY_VER(dev_priv) == 2)
2206 intel_crtc_wait_for_next_vblank(crtc);
2207 }
2208
i9xx_pfit_disable(const struct intel_crtc_state * old_crtc_state)2209 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2210 {
2211 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2212 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2213
2214 if (!old_crtc_state->gmch_pfit.control)
2215 return;
2216
2217 assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2218
2219 drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2220 intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)));
2221 intel_de_write(dev_priv, PFIT_CONTROL(dev_priv), 0);
2222 }
2223
i9xx_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2224 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2225 struct intel_crtc *crtc)
2226 {
2227 struct intel_crtc_state *old_crtc_state =
2228 intel_atomic_get_old_crtc_state(state, crtc);
2229 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2230 enum pipe pipe = crtc->pipe;
2231
2232 /*
2233 * On gen2 planes are double buffered but the pipe isn't, so we must
2234 * wait for planes to fully turn off before disabling the pipe.
2235 */
2236 if (DISPLAY_VER(dev_priv) == 2)
2237 intel_crtc_wait_for_next_vblank(crtc);
2238
2239 intel_encoders_disable(state, crtc);
2240
2241 intel_crtc_vblank_off(old_crtc_state);
2242
2243 intel_disable_transcoder(old_crtc_state);
2244
2245 i9xx_pfit_disable(old_crtc_state);
2246
2247 intel_encoders_post_disable(state, crtc);
2248
2249 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2250 if (IS_CHERRYVIEW(dev_priv))
2251 chv_disable_pll(dev_priv, pipe);
2252 else if (IS_VALLEYVIEW(dev_priv))
2253 vlv_disable_pll(dev_priv, pipe);
2254 else
2255 i9xx_disable_pll(old_crtc_state);
2256 }
2257
2258 intel_encoders_post_pll_disable(state, crtc);
2259
2260 if (DISPLAY_VER(dev_priv) != 2)
2261 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2262
2263 if (!dev_priv->display.funcs.wm->initial_watermarks)
2264 intel_update_watermarks(dev_priv);
2265
2266 /* clock the pipe down to 640x480@60 to potentially save power */
2267 if (IS_I830(dev_priv))
2268 i830_enable_pipe(dev_priv, pipe);
2269 }
2270
intel_encoder_destroy(struct drm_encoder * encoder)2271 void intel_encoder_destroy(struct drm_encoder *encoder)
2272 {
2273 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2274
2275 drm_encoder_cleanup(encoder);
2276 kfree(intel_encoder);
2277 }
2278
intel_crtc_supports_double_wide(const struct intel_crtc * crtc)2279 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2280 {
2281 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2282
2283 /* GDG double wide on either pipe, otherwise pipe A only */
2284 return DISPLAY_VER(dev_priv) < 4 &&
2285 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2286 }
2287
ilk_pipe_pixel_rate(const struct intel_crtc_state * crtc_state)2288 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2289 {
2290 u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2291 struct drm_rect src;
2292
2293 /*
2294 * We only use IF-ID interlacing. If we ever use
2295 * PF-ID we'll need to adjust the pixel_rate here.
2296 */
2297
2298 if (!crtc_state->pch_pfit.enabled)
2299 return pixel_rate;
2300
2301 drm_rect_init(&src, 0, 0,
2302 drm_rect_width(&crtc_state->pipe_src) << 16,
2303 drm_rect_height(&crtc_state->pipe_src) << 16);
2304
2305 return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2306 pixel_rate);
2307 }
2308
intel_mode_from_crtc_timings(struct drm_display_mode * mode,const struct drm_display_mode * timings)2309 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2310 const struct drm_display_mode *timings)
2311 {
2312 mode->hdisplay = timings->crtc_hdisplay;
2313 mode->htotal = timings->crtc_htotal;
2314 mode->hsync_start = timings->crtc_hsync_start;
2315 mode->hsync_end = timings->crtc_hsync_end;
2316
2317 mode->vdisplay = timings->crtc_vdisplay;
2318 mode->vtotal = timings->crtc_vtotal;
2319 mode->vsync_start = timings->crtc_vsync_start;
2320 mode->vsync_end = timings->crtc_vsync_end;
2321
2322 mode->flags = timings->flags;
2323 mode->type = DRM_MODE_TYPE_DRIVER;
2324
2325 mode->clock = timings->crtc_clock;
2326
2327 drm_mode_set_name(mode);
2328 }
2329
intel_crtc_compute_pixel_rate(struct intel_crtc_state * crtc_state)2330 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2331 {
2332 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2333
2334 if (HAS_GMCH(dev_priv))
2335 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
2336 crtc_state->pixel_rate =
2337 crtc_state->hw.pipe_mode.crtc_clock;
2338 else
2339 crtc_state->pixel_rate =
2340 ilk_pipe_pixel_rate(crtc_state);
2341 }
2342
intel_joiner_adjust_timings(const struct intel_crtc_state * crtc_state,struct drm_display_mode * mode)2343 static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state,
2344 struct drm_display_mode *mode)
2345 {
2346 int num_pipes = intel_joiner_num_pipes(crtc_state);
2347
2348 if (num_pipes < 2)
2349 return;
2350
2351 mode->crtc_clock /= num_pipes;
2352 mode->crtc_hdisplay /= num_pipes;
2353 mode->crtc_hblank_start /= num_pipes;
2354 mode->crtc_hblank_end /= num_pipes;
2355 mode->crtc_hsync_start /= num_pipes;
2356 mode->crtc_hsync_end /= num_pipes;
2357 mode->crtc_htotal /= num_pipes;
2358 }
2359
intel_splitter_adjust_timings(const struct intel_crtc_state * crtc_state,struct drm_display_mode * mode)2360 static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
2361 struct drm_display_mode *mode)
2362 {
2363 int overlap = crtc_state->splitter.pixel_overlap;
2364 int n = crtc_state->splitter.link_count;
2365
2366 if (!crtc_state->splitter.enable)
2367 return;
2368
2369 /*
2370 * eDP MSO uses segment timings from EDID for transcoder
2371 * timings, but full mode for everything else.
2372 *
2373 * h_full = (h_segment - pixel_overlap) * link_count
2374 */
2375 mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
2376 mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
2377 mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
2378 mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
2379 mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
2380 mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
2381 mode->crtc_clock *= n;
2382 }
2383
intel_crtc_readout_derived_state(struct intel_crtc_state * crtc_state)2384 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2385 {
2386 struct drm_display_mode *mode = &crtc_state->hw.mode;
2387 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2388 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2389
2390 /*
2391 * Start with the adjusted_mode crtc timings, which
2392 * have been filled with the transcoder timings.
2393 */
2394 drm_mode_copy(pipe_mode, adjusted_mode);
2395
2396 /* Expand MSO per-segment transcoder timings to full */
2397 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2398
2399 /*
2400 * We want the full numbers in adjusted_mode normal timings,
2401 * adjusted_mode crtc timings are left with the raw transcoder
2402 * timings.
2403 */
2404 intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2405
2406 /* Populate the "user" mode with full numbers */
2407 drm_mode_copy(mode, pipe_mode);
2408 intel_mode_from_crtc_timings(mode, mode);
2409 mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
2410 (intel_joiner_num_pipes(crtc_state) ?: 1);
2411 mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
2412
2413 /* Derive per-pipe timings in case joiner is used */
2414 intel_joiner_adjust_timings(crtc_state, pipe_mode);
2415 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2416
2417 intel_crtc_compute_pixel_rate(crtc_state);
2418 }
2419
intel_encoder_get_config(struct intel_encoder * encoder,struct intel_crtc_state * crtc_state)2420 void intel_encoder_get_config(struct intel_encoder *encoder,
2421 struct intel_crtc_state *crtc_state)
2422 {
2423 encoder->get_config(encoder, crtc_state);
2424
2425 intel_crtc_readout_derived_state(crtc_state);
2426 }
2427
intel_joiner_compute_pipe_src(struct intel_crtc_state * crtc_state)2428 static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
2429 {
2430 int num_pipes = intel_joiner_num_pipes(crtc_state);
2431 int width, height;
2432
2433 if (num_pipes < 2)
2434 return;
2435
2436 width = drm_rect_width(&crtc_state->pipe_src);
2437 height = drm_rect_height(&crtc_state->pipe_src);
2438
2439 drm_rect_init(&crtc_state->pipe_src, 0, 0,
2440 width / num_pipes, height);
2441 }
2442
intel_crtc_compute_pipe_src(struct intel_crtc_state * crtc_state)2443 static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
2444 {
2445 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2446 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2447
2448 intel_joiner_compute_pipe_src(crtc_state);
2449
2450 /*
2451 * Pipe horizontal size must be even in:
2452 * - DVO ganged mode
2453 * - LVDS dual channel mode
2454 * - Double wide pipe
2455 */
2456 if (drm_rect_width(&crtc_state->pipe_src) & 1) {
2457 if (crtc_state->double_wide) {
2458 drm_dbg_kms(&i915->drm,
2459 "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
2460 crtc->base.base.id, crtc->base.name);
2461 return -EINVAL;
2462 }
2463
2464 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
2465 intel_is_dual_link_lvds(i915)) {
2466 drm_dbg_kms(&i915->drm,
2467 "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
2468 crtc->base.base.id, crtc->base.name);
2469 return -EINVAL;
2470 }
2471 }
2472
2473 return 0;
2474 }
2475
intel_crtc_compute_pipe_mode(struct intel_crtc_state * crtc_state)2476 static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
2477 {
2478 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2479 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2480 struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2481 struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2482 int clock_limit = i915->display.cdclk.max_dotclk_freq;
2483
2484 /*
2485 * Start with the adjusted_mode crtc timings, which
2486 * have been filled with the transcoder timings.
2487 */
2488 drm_mode_copy(pipe_mode, adjusted_mode);
2489
2490 /* Expand MSO per-segment transcoder timings to full */
2491 intel_splitter_adjust_timings(crtc_state, pipe_mode);
2492
2493 /* Derive per-pipe timings in case joiner is used */
2494 intel_joiner_adjust_timings(crtc_state, pipe_mode);
2495 intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2496
2497 if (DISPLAY_VER(i915) < 4) {
2498 clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
2499
2500 /*
2501 * Enable double wide mode when the dot clock
2502 * is > 90% of the (display) core speed.
2503 */
2504 if (intel_crtc_supports_double_wide(crtc) &&
2505 pipe_mode->crtc_clock > clock_limit) {
2506 clock_limit = i915->display.cdclk.max_dotclk_freq;
2507 crtc_state->double_wide = true;
2508 }
2509 }
2510
2511 if (pipe_mode->crtc_clock > clock_limit) {
2512 drm_dbg_kms(&i915->drm,
2513 "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
2514 crtc->base.base.id, crtc->base.name,
2515 pipe_mode->crtc_clock, clock_limit,
2516 str_yes_no(crtc_state->double_wide));
2517 return -EINVAL;
2518 }
2519
2520 return 0;
2521 }
2522
intel_crtc_compute_config(struct intel_atomic_state * state,struct intel_crtc * crtc)2523 static int intel_crtc_compute_config(struct intel_atomic_state *state,
2524 struct intel_crtc *crtc)
2525 {
2526 struct intel_crtc_state *crtc_state =
2527 intel_atomic_get_new_crtc_state(state, crtc);
2528 int ret;
2529
2530 ret = intel_dpll_crtc_compute_clock(state, crtc);
2531 if (ret)
2532 return ret;
2533
2534 ret = intel_crtc_compute_pipe_src(crtc_state);
2535 if (ret)
2536 return ret;
2537
2538 ret = intel_crtc_compute_pipe_mode(crtc_state);
2539 if (ret)
2540 return ret;
2541
2542 intel_crtc_compute_pixel_rate(crtc_state);
2543
2544 if (crtc_state->has_pch_encoder)
2545 return ilk_fdi_compute_config(crtc, crtc_state);
2546
2547 return 0;
2548 }
2549
2550 static void
intel_reduce_m_n_ratio(u32 * num,u32 * den)2551 intel_reduce_m_n_ratio(u32 *num, u32 *den)
2552 {
2553 while (*num > DATA_LINK_M_N_MASK ||
2554 *den > DATA_LINK_M_N_MASK) {
2555 *num >>= 1;
2556 *den >>= 1;
2557 }
2558 }
2559
compute_m_n(u32 * ret_m,u32 * ret_n,u32 m,u32 n,u32 constant_n)2560 static void compute_m_n(u32 *ret_m, u32 *ret_n,
2561 u32 m, u32 n, u32 constant_n)
2562 {
2563 if (constant_n)
2564 *ret_n = constant_n;
2565 else
2566 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
2567
2568 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
2569 intel_reduce_m_n_ratio(ret_m, ret_n);
2570 }
2571
2572 void
intel_link_compute_m_n(u16 bits_per_pixel_x16,int nlanes,int pixel_clock,int link_clock,int bw_overhead,struct intel_link_m_n * m_n)2573 intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
2574 int pixel_clock, int link_clock,
2575 int bw_overhead,
2576 struct intel_link_m_n *m_n)
2577 {
2578 u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
2579 u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
2580 bw_overhead);
2581 u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes);
2582
2583 /*
2584 * Windows/BIOS uses fixed M/N values always. Follow suit.
2585 *
2586 * Also several DP dongles in particular seem to be fussy
2587 * about too large link M/N values. Presumably the 20bit
2588 * value used by Windows/BIOS is acceptable to everyone.
2589 */
2590 m_n->tu = 64;
2591 compute_m_n(&m_n->data_m, &m_n->data_n,
2592 data_m, data_n,
2593 0x8000000);
2594
2595 compute_m_n(&m_n->link_m, &m_n->link_n,
2596 pixel_clock, link_symbol_clock,
2597 0x80000);
2598 }
2599
intel_panel_sanitize_ssc(struct drm_i915_private * dev_priv)2600 void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
2601 {
2602 /*
2603 * There may be no VBT; and if the BIOS enabled SSC we can
2604 * just keep using it to avoid unnecessary flicker. Whereas if the
2605 * BIOS isn't using it, don't assume it will work even if the VBT
2606 * indicates as much.
2607 */
2608 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
2609 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
2610 PCH_DREF_CONTROL) &
2611 DREF_SSC1_ENABLE;
2612
2613 if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
2614 drm_dbg_kms(&dev_priv->drm,
2615 "SSC %s by BIOS, overriding VBT which says %s\n",
2616 str_enabled_disabled(bios_lvds_use_ssc),
2617 str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
2618 dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
2619 }
2620 }
2621 }
2622
intel_zero_m_n(struct intel_link_m_n * m_n)2623 void intel_zero_m_n(struct intel_link_m_n *m_n)
2624 {
2625 /* corresponds to 0 register value */
2626 memset(m_n, 0, sizeof(*m_n));
2627 m_n->tu = 1;
2628 }
2629
intel_set_m_n(struct drm_i915_private * i915,const struct intel_link_m_n * m_n,i915_reg_t data_m_reg,i915_reg_t data_n_reg,i915_reg_t link_m_reg,i915_reg_t link_n_reg)2630 void intel_set_m_n(struct drm_i915_private *i915,
2631 const struct intel_link_m_n *m_n,
2632 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
2633 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
2634 {
2635 intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
2636 intel_de_write(i915, data_n_reg, m_n->data_n);
2637 intel_de_write(i915, link_m_reg, m_n->link_m);
2638 /*
2639 * On BDW+ writing LINK_N arms the double buffered update
2640 * of all the M/N registers, so it must be written last.
2641 */
2642 intel_de_write(i915, link_n_reg, m_n->link_n);
2643 }
2644
intel_cpu_transcoder_has_m2_n2(struct drm_i915_private * dev_priv,enum transcoder transcoder)2645 bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
2646 enum transcoder transcoder)
2647 {
2648 if (IS_HASWELL(dev_priv))
2649 return transcoder == TRANSCODER_EDP;
2650
2651 return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
2652 }
2653
intel_cpu_transcoder_set_m1_n1(struct intel_crtc * crtc,enum transcoder transcoder,const struct intel_link_m_n * m_n)2654 void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
2655 enum transcoder transcoder,
2656 const struct intel_link_m_n *m_n)
2657 {
2658 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2659 enum pipe pipe = crtc->pipe;
2660
2661 if (DISPLAY_VER(dev_priv) >= 5)
2662 intel_set_m_n(dev_priv, m_n,
2663 PIPE_DATA_M1(dev_priv, transcoder),
2664 PIPE_DATA_N1(dev_priv, transcoder),
2665 PIPE_LINK_M1(dev_priv, transcoder),
2666 PIPE_LINK_N1(dev_priv, transcoder));
2667 else
2668 intel_set_m_n(dev_priv, m_n,
2669 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
2670 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
2671 }
2672
intel_cpu_transcoder_set_m2_n2(struct intel_crtc * crtc,enum transcoder transcoder,const struct intel_link_m_n * m_n)2673 void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
2674 enum transcoder transcoder,
2675 const struct intel_link_m_n *m_n)
2676 {
2677 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2678
2679 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
2680 return;
2681
2682 intel_set_m_n(dev_priv, m_n,
2683 PIPE_DATA_M2(dev_priv, transcoder),
2684 PIPE_DATA_N2(dev_priv, transcoder),
2685 PIPE_LINK_M2(dev_priv, transcoder),
2686 PIPE_LINK_N2(dev_priv, transcoder));
2687 }
2688
intel_set_transcoder_timings(const struct intel_crtc_state * crtc_state)2689 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
2690 {
2691 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2692 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2693 enum pipe pipe = crtc->pipe;
2694 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2695 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2696 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2697 int vsyncshift = 0;
2698
2699 /* We need to be careful not to changed the adjusted mode, for otherwise
2700 * the hw state checker will get angry at the mismatch. */
2701 crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2702 crtc_vtotal = adjusted_mode->crtc_vtotal;
2703 crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2704 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2705
2706 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
2707 /* the chip adds 2 halflines automatically */
2708 crtc_vtotal -= 1;
2709 crtc_vblank_end -= 1;
2710
2711 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2712 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
2713 else
2714 vsyncshift = adjusted_mode->crtc_hsync_start -
2715 adjusted_mode->crtc_htotal / 2;
2716 if (vsyncshift < 0)
2717 vsyncshift += adjusted_mode->crtc_htotal;
2718 }
2719
2720 /*
2721 * VBLANK_START no longer works on ADL+, instead we must use
2722 * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start.
2723 */
2724 if (DISPLAY_VER(dev_priv) >= 13) {
2725 intel_de_write(dev_priv,
2726 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder),
2727 crtc_vblank_start - crtc_vdisplay);
2728
2729 /*
2730 * VBLANK_START not used by hw, just clear it
2731 * to make it stand out in register dumps.
2732 */
2733 crtc_vblank_start = 1;
2734 }
2735
2736 if (DISPLAY_VER(dev_priv) >= 4)
2737 intel_de_write(dev_priv,
2738 TRANS_VSYNCSHIFT(dev_priv, cpu_transcoder),
2739 vsyncshift);
2740
2741 intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder),
2742 HACTIVE(adjusted_mode->crtc_hdisplay - 1) |
2743 HTOTAL(adjusted_mode->crtc_htotal - 1));
2744 intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder),
2745 HBLANK_START(adjusted_mode->crtc_hblank_start - 1) |
2746 HBLANK_END(adjusted_mode->crtc_hblank_end - 1));
2747 intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder),
2748 HSYNC_START(adjusted_mode->crtc_hsync_start - 1) |
2749 HSYNC_END(adjusted_mode->crtc_hsync_end - 1));
2750
2751 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
2752 VACTIVE(crtc_vdisplay - 1) |
2753 VTOTAL(crtc_vtotal - 1));
2754 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
2755 VBLANK_START(crtc_vblank_start - 1) |
2756 VBLANK_END(crtc_vblank_end - 1));
2757 intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder),
2758 VSYNC_START(adjusted_mode->crtc_vsync_start - 1) |
2759 VSYNC_END(adjusted_mode->crtc_vsync_end - 1));
2760
2761 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
2762 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
2763 * documented on the DDI_FUNC_CTL register description, EDP Input Select
2764 * bits. */
2765 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
2766 (pipe == PIPE_B || pipe == PIPE_C))
2767 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, pipe),
2768 VACTIVE(crtc_vdisplay - 1) |
2769 VTOTAL(crtc_vtotal - 1));
2770 }
2771
intel_set_transcoder_timings_lrr(const struct intel_crtc_state * crtc_state)2772 static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state)
2773 {
2774 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2775 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2776 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2777 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2778 u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end;
2779
2780 crtc_vdisplay = adjusted_mode->crtc_vdisplay;
2781 crtc_vtotal = adjusted_mode->crtc_vtotal;
2782 crtc_vblank_start = adjusted_mode->crtc_vblank_start;
2783 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
2784
2785 drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE);
2786
2787 /*
2788 * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode.
2789 * But let's write it anyway to keep the state checker happy.
2790 */
2791 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
2792 VBLANK_START(crtc_vblank_start - 1) |
2793 VBLANK_END(crtc_vblank_end - 1));
2794 /*
2795 * The double buffer latch point for TRANS_VTOTAL
2796 * is the transcoder's undelayed vblank.
2797 */
2798 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
2799 VACTIVE(crtc_vdisplay - 1) |
2800 VTOTAL(crtc_vtotal - 1));
2801 }
2802
intel_set_pipe_src_size(const struct intel_crtc_state * crtc_state)2803 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
2804 {
2805 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2806 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2807 int width = drm_rect_width(&crtc_state->pipe_src);
2808 int height = drm_rect_height(&crtc_state->pipe_src);
2809 enum pipe pipe = crtc->pipe;
2810
2811 /* pipesrc controls the size that is scaled from, which should
2812 * always be the user's requested size.
2813 */
2814 intel_de_write(dev_priv, PIPESRC(dev_priv, pipe),
2815 PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
2816 }
2817
intel_pipe_is_interlaced(const struct intel_crtc_state * crtc_state)2818 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
2819 {
2820 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2821 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2822
2823 if (DISPLAY_VER(dev_priv) == 2)
2824 return false;
2825
2826 if (DISPLAY_VER(dev_priv) >= 9 ||
2827 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2828 return intel_de_read(dev_priv,
2829 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW;
2830 else
2831 return intel_de_read(dev_priv,
2832 TRANSCONF(dev_priv, cpu_transcoder)) & TRANSCONF_INTERLACE_MASK;
2833 }
2834
intel_get_transcoder_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)2835 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
2836 struct intel_crtc_state *pipe_config)
2837 {
2838 struct drm_device *dev = crtc->base.dev;
2839 struct drm_i915_private *dev_priv = to_i915(dev);
2840 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2841 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2842 u32 tmp;
2843
2844 tmp = intel_de_read(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder));
2845 adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1;
2846 adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1;
2847
2848 if (!transcoder_is_dsi(cpu_transcoder)) {
2849 tmp = intel_de_read(dev_priv,
2850 TRANS_HBLANK(dev_priv, cpu_transcoder));
2851 adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1;
2852 adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1;
2853 }
2854
2855 tmp = intel_de_read(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder));
2856 adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1;
2857 adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1;
2858
2859 tmp = intel_de_read(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder));
2860 adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1;
2861 adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1;
2862
2863 /* FIXME TGL+ DSI transcoders have this! */
2864 if (!transcoder_is_dsi(cpu_transcoder)) {
2865 tmp = intel_de_read(dev_priv,
2866 TRANS_VBLANK(dev_priv, cpu_transcoder));
2867 adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1;
2868 adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1;
2869 }
2870 tmp = intel_de_read(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder));
2871 adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1;
2872 adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1;
2873
2874 if (intel_pipe_is_interlaced(pipe_config)) {
2875 adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE;
2876 adjusted_mode->crtc_vtotal += 1;
2877 adjusted_mode->crtc_vblank_end += 1;
2878 }
2879
2880 if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder))
2881 adjusted_mode->crtc_vblank_start =
2882 adjusted_mode->crtc_vdisplay +
2883 intel_de_read(dev_priv,
2884 TRANS_SET_CONTEXT_LATENCY(dev_priv, cpu_transcoder));
2885 }
2886
intel_joiner_adjust_pipe_src(struct intel_crtc_state * crtc_state)2887 static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
2888 {
2889 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2890 int num_pipes = intel_joiner_num_pipes(crtc_state);
2891 enum pipe primary_pipe, pipe = crtc->pipe;
2892 int width;
2893
2894 if (num_pipes < 2)
2895 return;
2896
2897 primary_pipe = joiner_primary_pipe(crtc_state);
2898 width = drm_rect_width(&crtc_state->pipe_src);
2899
2900 drm_rect_translate_to(&crtc_state->pipe_src,
2901 (pipe - primary_pipe) * width, 0);
2902 }
2903
intel_get_pipe_src_size(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)2904 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
2905 struct intel_crtc_state *pipe_config)
2906 {
2907 struct drm_device *dev = crtc->base.dev;
2908 struct drm_i915_private *dev_priv = to_i915(dev);
2909 u32 tmp;
2910
2911 tmp = intel_de_read(dev_priv, PIPESRC(dev_priv, crtc->pipe));
2912
2913 drm_rect_init(&pipe_config->pipe_src, 0, 0,
2914 REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
2915 REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
2916
2917 intel_joiner_adjust_pipe_src(pipe_config);
2918 }
2919
i9xx_set_pipeconf(const struct intel_crtc_state * crtc_state)2920 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
2921 {
2922 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2923 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2924 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2925 u32 val = 0;
2926
2927 /*
2928 * - We keep both pipes enabled on 830
2929 * - During modeset the pipe is still disabled and must remain so
2930 * - During fastset the pipe is already enabled and must remain so
2931 */
2932 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state))
2933 val |= TRANSCONF_ENABLE;
2934
2935 if (crtc_state->double_wide)
2936 val |= TRANSCONF_DOUBLE_WIDE;
2937
2938 /* only g4x and later have fancy bpc/dither controls */
2939 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2940 IS_CHERRYVIEW(dev_priv)) {
2941 /* Bspec claims that we can't use dithering for 30bpp pipes. */
2942 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
2943 val |= TRANSCONF_DITHER_EN |
2944 TRANSCONF_DITHER_TYPE_SP;
2945
2946 switch (crtc_state->pipe_bpp) {
2947 default:
2948 /* Case prevented by intel_choose_pipe_bpp_dither. */
2949 MISSING_CASE(crtc_state->pipe_bpp);
2950 fallthrough;
2951 case 18:
2952 val |= TRANSCONF_BPC_6;
2953 break;
2954 case 24:
2955 val |= TRANSCONF_BPC_8;
2956 break;
2957 case 30:
2958 val |= TRANSCONF_BPC_10;
2959 break;
2960 }
2961 }
2962
2963 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2964 if (DISPLAY_VER(dev_priv) < 4 ||
2965 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
2966 val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION;
2967 else
2968 val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT;
2969 } else {
2970 val |= TRANSCONF_INTERLACE_PROGRESSIVE;
2971 }
2972
2973 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
2974 crtc_state->limited_color_range)
2975 val |= TRANSCONF_COLOR_RANGE_SELECT;
2976
2977 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
2978
2979 if (crtc_state->wgc_enable)
2980 val |= TRANSCONF_WGC_ENABLE;
2981
2982 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
2983
2984 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
2985 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
2986 }
2987
i9xx_has_pfit(struct drm_i915_private * dev_priv)2988 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
2989 {
2990 if (IS_I830(dev_priv))
2991 return false;
2992
2993 return DISPLAY_VER(dev_priv) >= 4 ||
2994 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
2995 }
2996
i9xx_get_pfit_config(struct intel_crtc_state * crtc_state)2997 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
2998 {
2999 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3000 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3001 enum pipe pipe;
3002 u32 tmp;
3003
3004 if (!i9xx_has_pfit(dev_priv))
3005 return;
3006
3007 tmp = intel_de_read(dev_priv, PFIT_CONTROL(dev_priv));
3008 if (!(tmp & PFIT_ENABLE))
3009 return;
3010
3011 /* Check whether the pfit is attached to our pipe. */
3012 if (DISPLAY_VER(dev_priv) >= 4)
3013 pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
3014 else
3015 pipe = PIPE_B;
3016
3017 if (pipe != crtc->pipe)
3018 return;
3019
3020 crtc_state->gmch_pfit.control = tmp;
3021 crtc_state->gmch_pfit.pgm_ratios =
3022 intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv));
3023 }
3024
3025 static enum intel_output_format
bdw_get_pipe_misc_output_format(struct intel_crtc * crtc)3026 bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
3027 {
3028 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3029 u32 tmp;
3030
3031 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
3032
3033 if (tmp & PIPE_MISC_YUV420_ENABLE) {
3034 /* We support 4:2:0 in full blend mode only */
3035 drm_WARN_ON(&dev_priv->drm,
3036 (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
3037
3038 return INTEL_OUTPUT_FORMAT_YCBCR420;
3039 } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
3040 return INTEL_OUTPUT_FORMAT_YCBCR444;
3041 } else {
3042 return INTEL_OUTPUT_FORMAT_RGB;
3043 }
3044 }
3045
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3046 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3047 struct intel_crtc_state *pipe_config)
3048 {
3049 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3050 enum intel_display_power_domain power_domain;
3051 intel_wakeref_t wakeref;
3052 u32 tmp;
3053 bool ret;
3054
3055 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3056 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3057 if (!wakeref)
3058 return false;
3059
3060 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3061 pipe_config->sink_format = pipe_config->output_format;
3062 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3063 pipe_config->shared_dpll = NULL;
3064
3065 ret = false;
3066
3067 tmp = intel_de_read(dev_priv,
3068 TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
3069 if (!(tmp & TRANSCONF_ENABLE))
3070 goto out;
3071
3072 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3073 IS_CHERRYVIEW(dev_priv)) {
3074 switch (tmp & TRANSCONF_BPC_MASK) {
3075 case TRANSCONF_BPC_6:
3076 pipe_config->pipe_bpp = 18;
3077 break;
3078 case TRANSCONF_BPC_8:
3079 pipe_config->pipe_bpp = 24;
3080 break;
3081 case TRANSCONF_BPC_10:
3082 pipe_config->pipe_bpp = 30;
3083 break;
3084 default:
3085 MISSING_CASE(tmp);
3086 break;
3087 }
3088 }
3089
3090 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3091 (tmp & TRANSCONF_COLOR_RANGE_SELECT))
3092 pipe_config->limited_color_range = true;
3093
3094 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp);
3095
3096 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3097
3098 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3099 (tmp & TRANSCONF_WGC_ENABLE))
3100 pipe_config->wgc_enable = true;
3101
3102 intel_color_get_config(pipe_config);
3103
3104 if (DISPLAY_VER(dev_priv) < 4)
3105 pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
3106
3107 intel_get_transcoder_timings(crtc, pipe_config);
3108 intel_get_pipe_src_size(crtc, pipe_config);
3109
3110 i9xx_get_pfit_config(pipe_config);
3111
3112 i9xx_dpll_get_hw_state(crtc, &pipe_config->dpll_hw_state);
3113
3114 if (DISPLAY_VER(dev_priv) >= 4) {
3115 tmp = pipe_config->dpll_hw_state.i9xx.dpll_md;
3116 pipe_config->pixel_multiplier =
3117 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3118 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3119 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3120 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3121 tmp = pipe_config->dpll_hw_state.i9xx.dpll;
3122 pipe_config->pixel_multiplier =
3123 ((tmp & SDVO_MULTIPLIER_MASK)
3124 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3125 } else {
3126 /* Note that on i915G/GM the pixel multiplier is in the sdvo
3127 * port and will be fixed up in the encoder->get_config
3128 * function. */
3129 pipe_config->pixel_multiplier = 1;
3130 }
3131
3132 if (IS_CHERRYVIEW(dev_priv))
3133 chv_crtc_clock_get(pipe_config);
3134 else if (IS_VALLEYVIEW(dev_priv))
3135 vlv_crtc_clock_get(pipe_config);
3136 else
3137 i9xx_crtc_clock_get(pipe_config);
3138
3139 /*
3140 * Normally the dotclock is filled in by the encoder .get_config()
3141 * but in case the pipe is enabled w/o any ports we need a sane
3142 * default.
3143 */
3144 pipe_config->hw.adjusted_mode.crtc_clock =
3145 pipe_config->port_clock / pipe_config->pixel_multiplier;
3146
3147 ret = true;
3148
3149 out:
3150 intel_display_power_put(dev_priv, power_domain, wakeref);
3151
3152 return ret;
3153 }
3154
ilk_set_pipeconf(const struct intel_crtc_state * crtc_state)3155 void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3156 {
3157 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3158 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3159 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3160 u32 val = 0;
3161
3162 /*
3163 * - During modeset the pipe is still disabled and must remain so
3164 * - During fastset the pipe is already enabled and must remain so
3165 */
3166 if (!intel_crtc_needs_modeset(crtc_state))
3167 val |= TRANSCONF_ENABLE;
3168
3169 switch (crtc_state->pipe_bpp) {
3170 default:
3171 /* Case prevented by intel_choose_pipe_bpp_dither. */
3172 MISSING_CASE(crtc_state->pipe_bpp);
3173 fallthrough;
3174 case 18:
3175 val |= TRANSCONF_BPC_6;
3176 break;
3177 case 24:
3178 val |= TRANSCONF_BPC_8;
3179 break;
3180 case 30:
3181 val |= TRANSCONF_BPC_10;
3182 break;
3183 case 36:
3184 val |= TRANSCONF_BPC_12;
3185 break;
3186 }
3187
3188 if (crtc_state->dither)
3189 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3190
3191 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3192 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3193 else
3194 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3195
3196 /*
3197 * This would end up with an odd purple hue over
3198 * the entire display. Make sure we don't do it.
3199 */
3200 drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3201 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3202
3203 if (crtc_state->limited_color_range &&
3204 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3205 val |= TRANSCONF_COLOR_RANGE_SELECT;
3206
3207 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3208 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709;
3209
3210 val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode);
3211
3212 val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
3213 val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
3214
3215 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
3216 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
3217 }
3218
hsw_set_transconf(const struct intel_crtc_state * crtc_state)3219 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3220 {
3221 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3222 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3223 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3224 u32 val = 0;
3225
3226 /*
3227 * - During modeset the pipe is still disabled and must remain so
3228 * - During fastset the pipe is already enabled and must remain so
3229 */
3230 if (!intel_crtc_needs_modeset(crtc_state))
3231 val |= TRANSCONF_ENABLE;
3232
3233 if (IS_HASWELL(dev_priv) && crtc_state->dither)
3234 val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
3235
3236 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3237 val |= TRANSCONF_INTERLACE_IF_ID_ILK;
3238 else
3239 val |= TRANSCONF_INTERLACE_PF_PD_ILK;
3240
3241 if (IS_HASWELL(dev_priv) &&
3242 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3243 val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW;
3244
3245 intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
3246 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
3247 }
3248
bdw_set_pipe_misc(const struct intel_crtc_state * crtc_state)3249 static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
3250 {
3251 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3252 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3253 u32 val = 0;
3254
3255 switch (crtc_state->pipe_bpp) {
3256 case 18:
3257 val |= PIPE_MISC_BPC_6;
3258 break;
3259 case 24:
3260 val |= PIPE_MISC_BPC_8;
3261 break;
3262 case 30:
3263 val |= PIPE_MISC_BPC_10;
3264 break;
3265 case 36:
3266 /* Port output 12BPC defined for ADLP+ */
3267 if (DISPLAY_VER(dev_priv) >= 13)
3268 val |= PIPE_MISC_BPC_12_ADLP;
3269 break;
3270 default:
3271 MISSING_CASE(crtc_state->pipe_bpp);
3272 break;
3273 }
3274
3275 if (crtc_state->dither)
3276 val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP;
3277
3278 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3279 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3280 val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
3281
3282 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3283 val |= PIPE_MISC_YUV420_ENABLE |
3284 PIPE_MISC_YUV420_MODE_FULL_BLEND;
3285
3286 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3287 val |= PIPE_MISC_HDR_MODE_PRECISION;
3288
3289 if (DISPLAY_VER(dev_priv) >= 12)
3290 val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC;
3291
3292 /* allow PSR with sprite enabled */
3293 if (IS_BROADWELL(dev_priv))
3294 val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
3295
3296 intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
3297 }
3298
bdw_get_pipe_misc_bpp(struct intel_crtc * crtc)3299 int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
3300 {
3301 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3302 u32 tmp;
3303
3304 tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
3305
3306 switch (tmp & PIPE_MISC_BPC_MASK) {
3307 case PIPE_MISC_BPC_6:
3308 return 18;
3309 case PIPE_MISC_BPC_8:
3310 return 24;
3311 case PIPE_MISC_BPC_10:
3312 return 30;
3313 /*
3314 * PORT OUTPUT 12 BPC defined for ADLP+.
3315 *
3316 * TODO:
3317 * For previous platforms with DSI interface, bits 5:7
3318 * are used for storing pipe_bpp irrespective of dithering.
3319 * Since the value of 12 BPC is not defined for these bits
3320 * on older platforms, need to find a workaround for 12 BPC
3321 * MIPI DSI HW readout.
3322 */
3323 case PIPE_MISC_BPC_12_ADLP:
3324 if (DISPLAY_VER(dev_priv) >= 13)
3325 return 36;
3326 fallthrough;
3327 default:
3328 MISSING_CASE(tmp);
3329 return 0;
3330 }
3331 }
3332
ilk_get_lanes_required(int target_clock,int link_bw,int bpp)3333 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3334 {
3335 /*
3336 * Account for spread spectrum to avoid
3337 * oversubscribing the link. Max center spread
3338 * is 2.5%; use 5% for safety's sake.
3339 */
3340 u32 bps = target_clock * bpp * 21 / 20;
3341 return DIV_ROUND_UP(bps, link_bw * 8);
3342 }
3343
intel_get_m_n(struct drm_i915_private * i915,struct intel_link_m_n * m_n,i915_reg_t data_m_reg,i915_reg_t data_n_reg,i915_reg_t link_m_reg,i915_reg_t link_n_reg)3344 void intel_get_m_n(struct drm_i915_private *i915,
3345 struct intel_link_m_n *m_n,
3346 i915_reg_t data_m_reg, i915_reg_t data_n_reg,
3347 i915_reg_t link_m_reg, i915_reg_t link_n_reg)
3348 {
3349 m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
3350 m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
3351 m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
3352 m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
3353 m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
3354 }
3355
intel_cpu_transcoder_get_m1_n1(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n)3356 void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
3357 enum transcoder transcoder,
3358 struct intel_link_m_n *m_n)
3359 {
3360 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3361 enum pipe pipe = crtc->pipe;
3362
3363 if (DISPLAY_VER(dev_priv) >= 5)
3364 intel_get_m_n(dev_priv, m_n,
3365 PIPE_DATA_M1(dev_priv, transcoder),
3366 PIPE_DATA_N1(dev_priv, transcoder),
3367 PIPE_LINK_M1(dev_priv, transcoder),
3368 PIPE_LINK_N1(dev_priv, transcoder));
3369 else
3370 intel_get_m_n(dev_priv, m_n,
3371 PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
3372 PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
3373 }
3374
intel_cpu_transcoder_get_m2_n2(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n)3375 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
3376 enum transcoder transcoder,
3377 struct intel_link_m_n *m_n)
3378 {
3379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3380
3381 if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
3382 return;
3383
3384 intel_get_m_n(dev_priv, m_n,
3385 PIPE_DATA_M2(dev_priv, transcoder),
3386 PIPE_DATA_N2(dev_priv, transcoder),
3387 PIPE_LINK_M2(dev_priv, transcoder),
3388 PIPE_LINK_N2(dev_priv, transcoder));
3389 }
3390
ilk_get_pfit_config(struct intel_crtc_state * crtc_state)3391 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3392 {
3393 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3394 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3395 u32 ctl, pos, size;
3396 enum pipe pipe;
3397
3398 ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3399 if ((ctl & PF_ENABLE) == 0)
3400 return;
3401
3402 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
3403 pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
3404 else
3405 pipe = crtc->pipe;
3406
3407 crtc_state->pch_pfit.enabled = true;
3408
3409 pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3410 size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3411
3412 drm_rect_init(&crtc_state->pch_pfit.dst,
3413 REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
3414 REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
3415 REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
3416 REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
3417
3418 /*
3419 * We currently do not free assignements of panel fitters on
3420 * ivb/hsw (since we don't use the higher upscaling modes which
3421 * differentiates them) so just WARN about this case for now.
3422 */
3423 drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe);
3424 }
3425
ilk_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3426 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
3427 struct intel_crtc_state *pipe_config)
3428 {
3429 struct drm_device *dev = crtc->base.dev;
3430 struct drm_i915_private *dev_priv = to_i915(dev);
3431 enum intel_display_power_domain power_domain;
3432 intel_wakeref_t wakeref;
3433 u32 tmp;
3434 bool ret;
3435
3436 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3437 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3438 if (!wakeref)
3439 return false;
3440
3441 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3442 pipe_config->shared_dpll = NULL;
3443
3444 ret = false;
3445 tmp = intel_de_read(dev_priv,
3446 TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
3447 if (!(tmp & TRANSCONF_ENABLE))
3448 goto out;
3449
3450 switch (tmp & TRANSCONF_BPC_MASK) {
3451 case TRANSCONF_BPC_6:
3452 pipe_config->pipe_bpp = 18;
3453 break;
3454 case TRANSCONF_BPC_8:
3455 pipe_config->pipe_bpp = 24;
3456 break;
3457 case TRANSCONF_BPC_10:
3458 pipe_config->pipe_bpp = 30;
3459 break;
3460 case TRANSCONF_BPC_12:
3461 pipe_config->pipe_bpp = 36;
3462 break;
3463 default:
3464 break;
3465 }
3466
3467 if (tmp & TRANSCONF_COLOR_RANGE_SELECT)
3468 pipe_config->limited_color_range = true;
3469
3470 switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) {
3471 case TRANSCONF_OUTPUT_COLORSPACE_YUV601:
3472 case TRANSCONF_OUTPUT_COLORSPACE_YUV709:
3473 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3474 break;
3475 default:
3476 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3477 break;
3478 }
3479
3480 pipe_config->sink_format = pipe_config->output_format;
3481
3482 pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
3483
3484 pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
3485
3486 pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp);
3487
3488 intel_color_get_config(pipe_config);
3489
3490 pipe_config->pixel_multiplier = 1;
3491
3492 ilk_pch_get_config(pipe_config);
3493
3494 intel_get_transcoder_timings(crtc, pipe_config);
3495 intel_get_pipe_src_size(crtc, pipe_config);
3496
3497 ilk_get_pfit_config(pipe_config);
3498
3499 ret = true;
3500
3501 out:
3502 intel_display_power_put(dev_priv, power_domain, wakeref);
3503
3504 return ret;
3505 }
3506
joiner_pipes(struct drm_i915_private * i915)3507 static u8 joiner_pipes(struct drm_i915_private *i915)
3508 {
3509 u8 pipes;
3510
3511 if (DISPLAY_VER(i915) >= 12)
3512 pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
3513 else if (DISPLAY_VER(i915) >= 11)
3514 pipes = BIT(PIPE_B) | BIT(PIPE_C);
3515 else
3516 pipes = 0;
3517
3518 return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask;
3519 }
3520
transcoder_ddi_func_is_enabled(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)3521 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
3522 enum transcoder cpu_transcoder)
3523 {
3524 enum intel_display_power_domain power_domain;
3525 intel_wakeref_t wakeref;
3526 u32 tmp = 0;
3527
3528 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3529
3530 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3531 tmp = intel_de_read(dev_priv,
3532 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
3533
3534 return tmp & TRANS_DDI_FUNC_ENABLE;
3535 }
3536
enabled_joiner_pipes(struct drm_i915_private * dev_priv,u8 * primary_pipes,u8 * secondary_pipes)3537 static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
3538 u8 *primary_pipes, u8 *secondary_pipes)
3539 {
3540 struct intel_crtc *crtc;
3541
3542 *primary_pipes = 0;
3543 *secondary_pipes = 0;
3544
3545 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
3546 joiner_pipes(dev_priv)) {
3547 enum intel_display_power_domain power_domain;
3548 enum pipe pipe = crtc->pipe;
3549 intel_wakeref_t wakeref;
3550
3551 power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
3552 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3553 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3554
3555 if (!(tmp & BIG_JOINER_ENABLE))
3556 continue;
3557
3558 if (tmp & PRIMARY_BIG_JOINER_ENABLE)
3559 *primary_pipes |= BIT(pipe);
3560 else
3561 *secondary_pipes |= BIT(pipe);
3562 }
3563
3564 if (DISPLAY_VER(dev_priv) < 13)
3565 continue;
3566
3567 power_domain = POWER_DOMAIN_PIPE(pipe);
3568 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
3569 u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
3570
3571 if (tmp & UNCOMPRESSED_JOINER_PRIMARY)
3572 *primary_pipes |= BIT(pipe);
3573 if (tmp & UNCOMPRESSED_JOINER_SECONDARY)
3574 *secondary_pipes |= BIT(pipe);
3575 }
3576 }
3577
3578 /* Joiner pipes should always be consecutive primary and secondary */
3579 drm_WARN(&dev_priv->drm, *secondary_pipes != *primary_pipes << 1,
3580 "Joiner misconfigured (primary pipes 0x%x, secondary pipes 0x%x)\n",
3581 *primary_pipes, *secondary_pipes);
3582 }
3583
get_joiner_primary_pipe(enum pipe pipe,u8 primary_pipes,u8 secondary_pipes)3584 static enum pipe get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes)
3585 {
3586 if ((secondary_pipes & BIT(pipe)) == 0)
3587 return pipe;
3588
3589 /* ignore everything above our pipe */
3590 primary_pipes &= ~GENMASK(7, pipe);
3591
3592 /* highest remaining bit should be our primary pipe */
3593 return fls(primary_pipes) - 1;
3594 }
3595
get_joiner_secondary_pipes(enum pipe pipe,u8 primary_pipes,u8 secondary_pipes)3596 static u8 get_joiner_secondary_pipes(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes)
3597 {
3598 enum pipe primary_pipe, next_primary_pipe;
3599
3600 primary_pipe = get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes);
3601
3602 if ((primary_pipes & BIT(primary_pipe)) == 0)
3603 return 0;
3604
3605 /* ignore our primary pipe and everything below it */
3606 primary_pipes &= ~GENMASK(primary_pipe, 0);
3607 /* make sure a high bit is set for the ffs() */
3608 primary_pipes |= BIT(7);
3609 /* lowest remaining bit should be the next primary pipe */
3610 next_primary_pipe = ffs(primary_pipes) - 1;
3611
3612 return secondary_pipes & GENMASK(next_primary_pipe - 1, primary_pipe);
3613 }
3614
hsw_panel_transcoders(struct drm_i915_private * i915)3615 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
3616 {
3617 u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
3618
3619 if (DISPLAY_VER(i915) >= 11)
3620 panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
3621
3622 return panel_transcoder_mask;
3623 }
3624
hsw_enabled_transcoders(struct intel_crtc * crtc)3625 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
3626 {
3627 struct drm_device *dev = crtc->base.dev;
3628 struct drm_i915_private *dev_priv = to_i915(dev);
3629 u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
3630 enum transcoder cpu_transcoder;
3631 u8 primary_pipes, secondary_pipes;
3632 u8 enabled_transcoders = 0;
3633
3634 /*
3635 * XXX: Do intel_display_power_get_if_enabled before reading this (for
3636 * consistency and less surprising code; it's in always on power).
3637 */
3638 for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
3639 panel_transcoder_mask) {
3640 enum intel_display_power_domain power_domain;
3641 intel_wakeref_t wakeref;
3642 enum pipe trans_pipe;
3643 u32 tmp = 0;
3644
3645 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
3646 with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
3647 tmp = intel_de_read(dev_priv,
3648 TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
3649
3650 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
3651 continue;
3652
3653 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
3654 default:
3655 drm_WARN(dev, 1,
3656 "unknown pipe linked to transcoder %s\n",
3657 transcoder_name(cpu_transcoder));
3658 fallthrough;
3659 case TRANS_DDI_EDP_INPUT_A_ONOFF:
3660 case TRANS_DDI_EDP_INPUT_A_ON:
3661 trans_pipe = PIPE_A;
3662 break;
3663 case TRANS_DDI_EDP_INPUT_B_ONOFF:
3664 trans_pipe = PIPE_B;
3665 break;
3666 case TRANS_DDI_EDP_INPUT_C_ONOFF:
3667 trans_pipe = PIPE_C;
3668 break;
3669 case TRANS_DDI_EDP_INPUT_D_ONOFF:
3670 trans_pipe = PIPE_D;
3671 break;
3672 }
3673
3674 if (trans_pipe == crtc->pipe)
3675 enabled_transcoders |= BIT(cpu_transcoder);
3676 }
3677
3678 /* single pipe or joiner primary */
3679 cpu_transcoder = (enum transcoder) crtc->pipe;
3680 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3681 enabled_transcoders |= BIT(cpu_transcoder);
3682
3683 /* joiner secondary -> consider the primary pipe's transcoder as well */
3684 enabled_joiner_pipes(dev_priv, &primary_pipes, &secondary_pipes);
3685 if (secondary_pipes & BIT(crtc->pipe)) {
3686 cpu_transcoder = (enum transcoder)
3687 get_joiner_primary_pipe(crtc->pipe, primary_pipes, secondary_pipes);
3688 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
3689 enabled_transcoders |= BIT(cpu_transcoder);
3690 }
3691
3692 return enabled_transcoders;
3693 }
3694
has_edp_transcoders(u8 enabled_transcoders)3695 static bool has_edp_transcoders(u8 enabled_transcoders)
3696 {
3697 return enabled_transcoders & BIT(TRANSCODER_EDP);
3698 }
3699
has_dsi_transcoders(u8 enabled_transcoders)3700 static bool has_dsi_transcoders(u8 enabled_transcoders)
3701 {
3702 return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
3703 BIT(TRANSCODER_DSI_1));
3704 }
3705
has_pipe_transcoders(u8 enabled_transcoders)3706 static bool has_pipe_transcoders(u8 enabled_transcoders)
3707 {
3708 return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
3709 BIT(TRANSCODER_DSI_0) |
3710 BIT(TRANSCODER_DSI_1));
3711 }
3712
assert_enabled_transcoders(struct drm_i915_private * i915,u8 enabled_transcoders)3713 static void assert_enabled_transcoders(struct drm_i915_private *i915,
3714 u8 enabled_transcoders)
3715 {
3716 /* Only one type of transcoder please */
3717 drm_WARN_ON(&i915->drm,
3718 has_edp_transcoders(enabled_transcoders) +
3719 has_dsi_transcoders(enabled_transcoders) +
3720 has_pipe_transcoders(enabled_transcoders) > 1);
3721
3722 /* Only DSI transcoders can be ganged */
3723 drm_WARN_ON(&i915->drm,
3724 !has_dsi_transcoders(enabled_transcoders) &&
3725 !is_power_of_2(enabled_transcoders));
3726 }
3727
hsw_get_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)3728 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
3729 struct intel_crtc_state *pipe_config,
3730 struct intel_display_power_domain_set *power_domain_set)
3731 {
3732 struct drm_device *dev = crtc->base.dev;
3733 struct drm_i915_private *dev_priv = to_i915(dev);
3734 unsigned long enabled_transcoders;
3735 u32 tmp;
3736
3737 enabled_transcoders = hsw_enabled_transcoders(crtc);
3738 if (!enabled_transcoders)
3739 return false;
3740
3741 assert_enabled_transcoders(dev_priv, enabled_transcoders);
3742
3743 /*
3744 * With the exception of DSI we should only ever have
3745 * a single enabled transcoder. With DSI let's just
3746 * pick the first one.
3747 */
3748 pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
3749
3750 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3751 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
3752 return false;
3753
3754 if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
3755 tmp = intel_de_read(dev_priv,
3756 TRANS_DDI_FUNC_CTL(dev_priv, pipe_config->cpu_transcoder));
3757
3758 if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
3759 pipe_config->pch_pfit.force_thru = true;
3760 }
3761
3762 tmp = intel_de_read(dev_priv,
3763 TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
3764
3765 return tmp & TRANSCONF_ENABLE;
3766 }
3767
bxt_get_dsi_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)3768 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
3769 struct intel_crtc_state *pipe_config,
3770 struct intel_display_power_domain_set *power_domain_set)
3771 {
3772 struct intel_display *display = to_intel_display(crtc);
3773 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3774 enum transcoder cpu_transcoder;
3775 enum port port;
3776 u32 tmp;
3777
3778 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
3779 if (port == PORT_A)
3780 cpu_transcoder = TRANSCODER_DSI_A;
3781 else
3782 cpu_transcoder = TRANSCODER_DSI_C;
3783
3784 if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
3785 POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
3786 continue;
3787
3788 /*
3789 * The PLL needs to be enabled with a valid divider
3790 * configuration, otherwise accessing DSI registers will hang
3791 * the machine. See BSpec North Display Engine
3792 * registers/MIPI[BXT]. We can break out here early, since we
3793 * need the same DSI PLL to be enabled for both DSI ports.
3794 */
3795 if (!bxt_dsi_pll_is_enabled(dev_priv))
3796 break;
3797
3798 /* XXX: this works for video mode only */
3799 tmp = intel_de_read(display, BXT_MIPI_PORT_CTRL(port));
3800 if (!(tmp & DPI_ENABLE))
3801 continue;
3802
3803 tmp = intel_de_read(display, MIPI_CTRL(display, port));
3804 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
3805 continue;
3806
3807 pipe_config->cpu_transcoder = cpu_transcoder;
3808 break;
3809 }
3810
3811 return transcoder_is_dsi(pipe_config->cpu_transcoder);
3812 }
3813
intel_joiner_get_config(struct intel_crtc_state * crtc_state)3814 static void intel_joiner_get_config(struct intel_crtc_state *crtc_state)
3815 {
3816 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3817 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3818 u8 primary_pipes, secondary_pipes;
3819 enum pipe pipe = crtc->pipe;
3820
3821 enabled_joiner_pipes(i915, &primary_pipes, &secondary_pipes);
3822
3823 if (((primary_pipes | secondary_pipes) & BIT(pipe)) == 0)
3824 return;
3825
3826 crtc_state->joiner_pipes =
3827 BIT(get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes)) |
3828 get_joiner_secondary_pipes(pipe, primary_pipes, secondary_pipes);
3829 }
3830
hsw_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)3831 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
3832 struct intel_crtc_state *pipe_config)
3833 {
3834 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3835 bool active;
3836 u32 tmp;
3837
3838 if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3839 POWER_DOMAIN_PIPE(crtc->pipe)))
3840 return false;
3841
3842 pipe_config->shared_dpll = NULL;
3843
3844 active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains);
3845
3846 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
3847 bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) {
3848 drm_WARN_ON(&dev_priv->drm, active);
3849 active = true;
3850 }
3851
3852 if (!active)
3853 goto out;
3854
3855 intel_joiner_get_config(pipe_config);
3856 intel_dsc_get_config(pipe_config);
3857
3858 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
3859 DISPLAY_VER(dev_priv) >= 11)
3860 intel_get_transcoder_timings(crtc, pipe_config);
3861
3862 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
3863 intel_vrr_get_config(pipe_config);
3864
3865 intel_get_pipe_src_size(crtc, pipe_config);
3866
3867 if (IS_HASWELL(dev_priv)) {
3868 u32 tmp = intel_de_read(dev_priv,
3869 TRANSCONF(dev_priv, pipe_config->cpu_transcoder));
3870
3871 if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW)
3872 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
3873 else
3874 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3875 } else {
3876 pipe_config->output_format =
3877 bdw_get_pipe_misc_output_format(crtc);
3878 }
3879
3880 pipe_config->sink_format = pipe_config->output_format;
3881
3882 intel_color_get_config(pipe_config);
3883
3884 tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
3885 pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
3886 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3887 pipe_config->ips_linetime =
3888 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
3889
3890 if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
3891 POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
3892 if (DISPLAY_VER(dev_priv) >= 9)
3893 skl_scaler_get_config(pipe_config);
3894 else
3895 ilk_get_pfit_config(pipe_config);
3896 }
3897
3898 hsw_ips_get_config(pipe_config);
3899
3900 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
3901 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3902 pipe_config->pixel_multiplier =
3903 intel_de_read(dev_priv,
3904 TRANS_MULT(dev_priv, pipe_config->cpu_transcoder)) + 1;
3905 } else {
3906 pipe_config->pixel_multiplier = 1;
3907 }
3908
3909 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
3910 tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
3911
3912 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
3913 } else {
3914 /* no idea if this is correct */
3915 pipe_config->framestart_delay = 1;
3916 }
3917
3918 out:
3919 intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains);
3920
3921 return active;
3922 }
3923
intel_crtc_get_pipe_config(struct intel_crtc_state * crtc_state)3924 bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
3925 {
3926 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3927 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3928
3929 if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
3930 return false;
3931
3932 crtc_state->hw.active = true;
3933
3934 intel_crtc_readout_derived_state(crtc_state);
3935
3936 return true;
3937 }
3938
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)3939 int intel_dotclock_calculate(int link_freq,
3940 const struct intel_link_m_n *m_n)
3941 {
3942 /*
3943 * The calculation for the data clock -> pixel clock is:
3944 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
3945 * But we want to avoid losing precison if possible, so:
3946 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
3947 *
3948 * and for link freq (10kbs units) -> pixel clock it is:
3949 * link_symbol_clock = link_freq * 10 / link_symbol_size
3950 * pixel_clock = (m * link_symbol_clock) / n
3951 * or for more precision:
3952 * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size)
3953 */
3954
3955 if (!m_n->link_n)
3956 return 0;
3957
3958 return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10),
3959 m_n->link_n * intel_dp_link_symbol_size(link_freq));
3960 }
3961
intel_crtc_dotclock(const struct intel_crtc_state * pipe_config)3962 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
3963 {
3964 int dotclock;
3965
3966 if (intel_crtc_has_dp_encoder(pipe_config))
3967 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
3968 &pipe_config->dp_m_n);
3969 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
3970 dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
3971 pipe_config->pipe_bpp);
3972 else
3973 dotclock = pipe_config->port_clock;
3974
3975 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
3976 !intel_crtc_has_dp_encoder(pipe_config))
3977 dotclock *= 2;
3978
3979 if (pipe_config->pixel_multiplier)
3980 dotclock /= pipe_config->pixel_multiplier;
3981
3982 return dotclock;
3983 }
3984
3985 /* Returns the currently programmed mode of the given encoder. */
3986 struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder * encoder)3987 intel_encoder_current_mode(struct intel_encoder *encoder)
3988 {
3989 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3990 struct intel_crtc_state *crtc_state;
3991 struct drm_display_mode *mode;
3992 struct intel_crtc *crtc;
3993 enum pipe pipe;
3994
3995 if (!encoder->get_hw_state(encoder, &pipe))
3996 return NULL;
3997
3998 crtc = intel_crtc_for_pipe(dev_priv, pipe);
3999
4000 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4001 if (!mode)
4002 return NULL;
4003
4004 crtc_state = intel_crtc_state_alloc(crtc);
4005 if (!crtc_state) {
4006 kfree(mode);
4007 return NULL;
4008 }
4009
4010 if (!intel_crtc_get_pipe_config(crtc_state)) {
4011 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
4012 kfree(mode);
4013 return NULL;
4014 }
4015
4016 intel_encoder_get_config(encoder, crtc_state);
4017
4018 intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4019
4020 intel_crtc_destroy_state(&crtc->base, &crtc_state->uapi);
4021
4022 return mode;
4023 }
4024
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)4025 static bool encoders_cloneable(const struct intel_encoder *a,
4026 const struct intel_encoder *b)
4027 {
4028 /* masks could be asymmetric, so check both ways */
4029 return a == b || (a->cloneable & BIT(b->type) &&
4030 b->cloneable & BIT(a->type));
4031 }
4032
check_single_encoder_cloning(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)4033 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
4034 struct intel_crtc *crtc,
4035 struct intel_encoder *encoder)
4036 {
4037 struct intel_encoder *source_encoder;
4038 struct drm_connector *connector;
4039 struct drm_connector_state *connector_state;
4040 int i;
4041
4042 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4043 if (connector_state->crtc != &crtc->base)
4044 continue;
4045
4046 source_encoder =
4047 to_intel_encoder(connector_state->best_encoder);
4048 if (!encoders_cloneable(encoder, source_encoder))
4049 return false;
4050 }
4051
4052 return true;
4053 }
4054
icl_add_linked_planes(struct intel_atomic_state * state)4055 static int icl_add_linked_planes(struct intel_atomic_state *state)
4056 {
4057 struct intel_plane *plane, *linked;
4058 struct intel_plane_state *plane_state, *linked_plane_state;
4059 int i;
4060
4061 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4062 linked = plane_state->planar_linked_plane;
4063
4064 if (!linked)
4065 continue;
4066
4067 linked_plane_state = intel_atomic_get_plane_state(state, linked);
4068 if (IS_ERR(linked_plane_state))
4069 return PTR_ERR(linked_plane_state);
4070
4071 drm_WARN_ON(state->base.dev,
4072 linked_plane_state->planar_linked_plane != plane);
4073 drm_WARN_ON(state->base.dev,
4074 linked_plane_state->planar_slave == plane_state->planar_slave);
4075 }
4076
4077 return 0;
4078 }
4079
icl_check_nv12_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)4080 static int icl_check_nv12_planes(struct intel_atomic_state *state,
4081 struct intel_crtc *crtc)
4082 {
4083 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4084 struct intel_crtc_state *crtc_state =
4085 intel_atomic_get_new_crtc_state(state, crtc);
4086 struct intel_plane *plane, *linked;
4087 struct intel_plane_state *plane_state;
4088 int i;
4089
4090 if (DISPLAY_VER(dev_priv) < 11)
4091 return 0;
4092
4093 /*
4094 * Destroy all old plane links and make the slave plane invisible
4095 * in the crtc_state->active_planes mask.
4096 */
4097 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4098 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
4099 continue;
4100
4101 plane_state->planar_linked_plane = NULL;
4102 if (plane_state->planar_slave && !plane_state->uapi.visible) {
4103 crtc_state->enabled_planes &= ~BIT(plane->id);
4104 crtc_state->active_planes &= ~BIT(plane->id);
4105 crtc_state->update_planes |= BIT(plane->id);
4106 crtc_state->data_rate[plane->id] = 0;
4107 crtc_state->rel_data_rate[plane->id] = 0;
4108 }
4109
4110 plane_state->planar_slave = false;
4111 }
4112
4113 if (!crtc_state->nv12_planes)
4114 return 0;
4115
4116 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
4117 struct intel_plane_state *linked_state = NULL;
4118
4119 if (plane->pipe != crtc->pipe ||
4120 !(crtc_state->nv12_planes & BIT(plane->id)))
4121 continue;
4122
4123 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
4124 if (!icl_is_nv12_y_plane(dev_priv, linked->id))
4125 continue;
4126
4127 if (crtc_state->active_planes & BIT(linked->id))
4128 continue;
4129
4130 linked_state = intel_atomic_get_plane_state(state, linked);
4131 if (IS_ERR(linked_state))
4132 return PTR_ERR(linked_state);
4133
4134 break;
4135 }
4136
4137 if (!linked_state) {
4138 drm_dbg_kms(&dev_priv->drm,
4139 "Need %d free Y planes for planar YUV\n",
4140 hweight8(crtc_state->nv12_planes));
4141
4142 return -EINVAL;
4143 }
4144
4145 plane_state->planar_linked_plane = linked;
4146
4147 linked_state->planar_slave = true;
4148 linked_state->planar_linked_plane = plane;
4149 crtc_state->enabled_planes |= BIT(linked->id);
4150 crtc_state->active_planes |= BIT(linked->id);
4151 crtc_state->update_planes |= BIT(linked->id);
4152 crtc_state->data_rate[linked->id] =
4153 crtc_state->data_rate_y[plane->id];
4154 crtc_state->rel_data_rate[linked->id] =
4155 crtc_state->rel_data_rate_y[plane->id];
4156 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
4157 linked->base.name, plane->base.name);
4158
4159 /* Copy parameters to slave plane */
4160 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
4161 linked_state->color_ctl = plane_state->color_ctl;
4162 linked_state->view = plane_state->view;
4163 linked_state->decrypt = plane_state->decrypt;
4164
4165 intel_plane_copy_hw_state(linked_state, plane_state);
4166 linked_state->uapi.src = plane_state->uapi.src;
4167 linked_state->uapi.dst = plane_state->uapi.dst;
4168
4169 if (icl_is_hdr_plane(dev_priv, plane->id)) {
4170 if (linked->id == PLANE_7)
4171 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
4172 else if (linked->id == PLANE_6)
4173 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
4174 else if (linked->id == PLANE_5)
4175 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
4176 else if (linked->id == PLANE_4)
4177 plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
4178 else
4179 MISSING_CASE(linked->id);
4180 }
4181 }
4182
4183 return 0;
4184 }
4185
hsw_linetime_wm(const struct intel_crtc_state * crtc_state)4186 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
4187 {
4188 const struct drm_display_mode *pipe_mode =
4189 &crtc_state->hw.pipe_mode;
4190 int linetime_wm;
4191
4192 if (!crtc_state->hw.enable)
4193 return 0;
4194
4195 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4196 pipe_mode->crtc_clock);
4197
4198 return min(linetime_wm, 0x1ff);
4199 }
4200
hsw_ips_linetime_wm(const struct intel_crtc_state * crtc_state,const struct intel_cdclk_state * cdclk_state)4201 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
4202 const struct intel_cdclk_state *cdclk_state)
4203 {
4204 const struct drm_display_mode *pipe_mode =
4205 &crtc_state->hw.pipe_mode;
4206 int linetime_wm;
4207
4208 if (!crtc_state->hw.enable)
4209 return 0;
4210
4211 linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
4212 cdclk_state->logical.cdclk);
4213
4214 return min(linetime_wm, 0x1ff);
4215 }
4216
skl_linetime_wm(const struct intel_crtc_state * crtc_state)4217 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
4218 {
4219 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4220 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4221 const struct drm_display_mode *pipe_mode =
4222 &crtc_state->hw.pipe_mode;
4223 int linetime_wm;
4224
4225 if (!crtc_state->hw.enable)
4226 return 0;
4227
4228 linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
4229 crtc_state->pixel_rate);
4230
4231 /* Display WA #1135: BXT:ALL GLK:ALL */
4232 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4233 skl_watermark_ipc_enabled(dev_priv))
4234 linetime_wm /= 2;
4235
4236 return min(linetime_wm, 0x1ff);
4237 }
4238
hsw_compute_linetime_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)4239 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
4240 struct intel_crtc *crtc)
4241 {
4242 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4243 struct intel_crtc_state *crtc_state =
4244 intel_atomic_get_new_crtc_state(state, crtc);
4245 const struct intel_cdclk_state *cdclk_state;
4246
4247 if (DISPLAY_VER(dev_priv) >= 9)
4248 crtc_state->linetime = skl_linetime_wm(crtc_state);
4249 else
4250 crtc_state->linetime = hsw_linetime_wm(crtc_state);
4251
4252 if (!hsw_crtc_supports_ips(crtc))
4253 return 0;
4254
4255 cdclk_state = intel_atomic_get_cdclk_state(state);
4256 if (IS_ERR(cdclk_state))
4257 return PTR_ERR(cdclk_state);
4258
4259 crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
4260 cdclk_state);
4261
4262 return 0;
4263 }
4264
intel_crtc_atomic_check(struct intel_atomic_state * state,struct intel_crtc * crtc)4265 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
4266 struct intel_crtc *crtc)
4267 {
4268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4269 struct intel_crtc_state *crtc_state =
4270 intel_atomic_get_new_crtc_state(state, crtc);
4271 int ret;
4272
4273 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
4274 intel_crtc_needs_modeset(crtc_state) &&
4275 !crtc_state->hw.active)
4276 crtc_state->update_wm_post = true;
4277
4278 if (intel_crtc_needs_modeset(crtc_state)) {
4279 ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
4280 if (ret)
4281 return ret;
4282 }
4283
4284 ret = intel_color_check(state, crtc);
4285 if (ret)
4286 return ret;
4287
4288 ret = intel_compute_pipe_wm(state, crtc);
4289 if (ret) {
4290 drm_dbg_kms(&dev_priv->drm,
4291 "Target pipe watermarks are invalid\n");
4292 return ret;
4293 }
4294
4295 /*
4296 * Calculate 'intermediate' watermarks that satisfy both the
4297 * old state and the new state. We can program these
4298 * immediately.
4299 */
4300 ret = intel_compute_intermediate_wm(state, crtc);
4301 if (ret) {
4302 drm_dbg_kms(&dev_priv->drm,
4303 "No valid intermediate pipe watermarks are possible\n");
4304 return ret;
4305 }
4306
4307 if (DISPLAY_VER(dev_priv) >= 9) {
4308 if (intel_crtc_needs_modeset(crtc_state) ||
4309 intel_crtc_needs_fastset(crtc_state)) {
4310 ret = skl_update_scaler_crtc(crtc_state);
4311 if (ret)
4312 return ret;
4313 }
4314
4315 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
4316 if (ret)
4317 return ret;
4318 }
4319
4320 if (HAS_IPS(dev_priv)) {
4321 ret = hsw_ips_compute_config(state, crtc);
4322 if (ret)
4323 return ret;
4324 }
4325
4326 if (DISPLAY_VER(dev_priv) >= 9 ||
4327 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
4328 ret = hsw_compute_linetime_wm(state, crtc);
4329 if (ret)
4330 return ret;
4331
4332 }
4333
4334 ret = intel_psr2_sel_fetch_update(state, crtc);
4335 if (ret)
4336 return ret;
4337
4338 return 0;
4339 }
4340
4341 static int
compute_sink_pipe_bpp(const struct drm_connector_state * conn_state,struct intel_crtc_state * crtc_state)4342 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
4343 struct intel_crtc_state *crtc_state)
4344 {
4345 struct drm_connector *connector = conn_state->connector;
4346 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4347 const struct drm_display_info *info = &connector->display_info;
4348 int bpp;
4349
4350 switch (conn_state->max_bpc) {
4351 case 6 ... 7:
4352 bpp = 6 * 3;
4353 break;
4354 case 8 ... 9:
4355 bpp = 8 * 3;
4356 break;
4357 case 10 ... 11:
4358 bpp = 10 * 3;
4359 break;
4360 case 12 ... 16:
4361 bpp = 12 * 3;
4362 break;
4363 default:
4364 MISSING_CASE(conn_state->max_bpc);
4365 return -EINVAL;
4366 }
4367
4368 if (bpp < crtc_state->pipe_bpp) {
4369 drm_dbg_kms(&i915->drm,
4370 "[CONNECTOR:%d:%s] Limiting display bpp to %d "
4371 "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
4372 connector->base.id, connector->name,
4373 bpp, 3 * info->bpc,
4374 3 * conn_state->max_requested_bpc,
4375 crtc_state->pipe_bpp);
4376
4377 crtc_state->pipe_bpp = bpp;
4378 }
4379
4380 return 0;
4381 }
4382
4383 static int
compute_baseline_pipe_bpp(struct intel_atomic_state * state,struct intel_crtc * crtc)4384 compute_baseline_pipe_bpp(struct intel_atomic_state *state,
4385 struct intel_crtc *crtc)
4386 {
4387 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4388 struct intel_crtc_state *crtc_state =
4389 intel_atomic_get_new_crtc_state(state, crtc);
4390 struct drm_connector *connector;
4391 struct drm_connector_state *connector_state;
4392 int bpp, i;
4393
4394 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4395 IS_CHERRYVIEW(dev_priv)))
4396 bpp = 10*3;
4397 else if (DISPLAY_VER(dev_priv) >= 5)
4398 bpp = 12*3;
4399 else
4400 bpp = 8*3;
4401
4402 crtc_state->pipe_bpp = bpp;
4403
4404 /* Clamp display bpp to connector max bpp */
4405 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4406 int ret;
4407
4408 if (connector_state->crtc != &crtc->base)
4409 continue;
4410
4411 ret = compute_sink_pipe_bpp(connector_state, crtc_state);
4412 if (ret)
4413 return ret;
4414 }
4415
4416 return 0;
4417 }
4418
check_digital_port_conflicts(struct intel_atomic_state * state)4419 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
4420 {
4421 struct drm_device *dev = state->base.dev;
4422 struct drm_connector *connector;
4423 struct drm_connector_list_iter conn_iter;
4424 unsigned int used_ports = 0;
4425 unsigned int used_mst_ports = 0;
4426 bool ret = true;
4427
4428 /*
4429 * We're going to peek into connector->state,
4430 * hence connection_mutex must be held.
4431 */
4432 drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
4433
4434 /*
4435 * Walk the connector list instead of the encoder
4436 * list to detect the problem on ddi platforms
4437 * where there's just one encoder per digital port.
4438 */
4439 drm_connector_list_iter_begin(dev, &conn_iter);
4440 drm_for_each_connector_iter(connector, &conn_iter) {
4441 struct drm_connector_state *connector_state;
4442 struct intel_encoder *encoder;
4443
4444 connector_state =
4445 drm_atomic_get_new_connector_state(&state->base,
4446 connector);
4447 if (!connector_state)
4448 connector_state = connector->state;
4449
4450 if (!connector_state->best_encoder)
4451 continue;
4452
4453 encoder = to_intel_encoder(connector_state->best_encoder);
4454
4455 drm_WARN_ON(dev, !connector_state->crtc);
4456
4457 switch (encoder->type) {
4458 case INTEL_OUTPUT_DDI:
4459 if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
4460 break;
4461 fallthrough;
4462 case INTEL_OUTPUT_DP:
4463 case INTEL_OUTPUT_HDMI:
4464 case INTEL_OUTPUT_EDP:
4465 /* the same port mustn't appear more than once */
4466 if (used_ports & BIT(encoder->port))
4467 ret = false;
4468
4469 used_ports |= BIT(encoder->port);
4470 break;
4471 case INTEL_OUTPUT_DP_MST:
4472 used_mst_ports |=
4473 1 << encoder->port;
4474 break;
4475 default:
4476 break;
4477 }
4478 }
4479 drm_connector_list_iter_end(&conn_iter);
4480
4481 /* can't mix MST and SST/HDMI on the same port */
4482 if (used_ports & used_mst_ports)
4483 return false;
4484
4485 return ret;
4486 }
4487
4488 static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc * crtc)4489 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
4490 struct intel_crtc *crtc)
4491 {
4492 struct intel_crtc_state *crtc_state =
4493 intel_atomic_get_new_crtc_state(state, crtc);
4494
4495 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state));
4496
4497 drm_property_replace_blob(&crtc_state->hw.degamma_lut,
4498 crtc_state->uapi.degamma_lut);
4499 drm_property_replace_blob(&crtc_state->hw.gamma_lut,
4500 crtc_state->uapi.gamma_lut);
4501 drm_property_replace_blob(&crtc_state->hw.ctm,
4502 crtc_state->uapi.ctm);
4503 }
4504
4505 static void
intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state * state,struct intel_crtc * crtc)4506 intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
4507 struct intel_crtc *crtc)
4508 {
4509 struct intel_crtc_state *crtc_state =
4510 intel_atomic_get_new_crtc_state(state, crtc);
4511
4512 WARN_ON(intel_crtc_is_joiner_secondary(crtc_state));
4513
4514 crtc_state->hw.enable = crtc_state->uapi.enable;
4515 crtc_state->hw.active = crtc_state->uapi.active;
4516 drm_mode_copy(&crtc_state->hw.mode,
4517 &crtc_state->uapi.mode);
4518 drm_mode_copy(&crtc_state->hw.adjusted_mode,
4519 &crtc_state->uapi.adjusted_mode);
4520 crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
4521
4522 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
4523 }
4524
4525 static void
copy_joiner_crtc_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc * secondary_crtc)4526 copy_joiner_crtc_state_nomodeset(struct intel_atomic_state *state,
4527 struct intel_crtc *secondary_crtc)
4528 {
4529 struct intel_crtc_state *secondary_crtc_state =
4530 intel_atomic_get_new_crtc_state(state, secondary_crtc);
4531 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state);
4532 const struct intel_crtc_state *primary_crtc_state =
4533 intel_atomic_get_new_crtc_state(state, primary_crtc);
4534
4535 drm_property_replace_blob(&secondary_crtc_state->hw.degamma_lut,
4536 primary_crtc_state->hw.degamma_lut);
4537 drm_property_replace_blob(&secondary_crtc_state->hw.gamma_lut,
4538 primary_crtc_state->hw.gamma_lut);
4539 drm_property_replace_blob(&secondary_crtc_state->hw.ctm,
4540 primary_crtc_state->hw.ctm);
4541
4542 secondary_crtc_state->uapi.color_mgmt_changed = primary_crtc_state->uapi.color_mgmt_changed;
4543 }
4544
4545 static int
copy_joiner_crtc_state_modeset(struct intel_atomic_state * state,struct intel_crtc * secondary_crtc)4546 copy_joiner_crtc_state_modeset(struct intel_atomic_state *state,
4547 struct intel_crtc *secondary_crtc)
4548 {
4549 struct intel_crtc_state *secondary_crtc_state =
4550 intel_atomic_get_new_crtc_state(state, secondary_crtc);
4551 struct intel_crtc *primary_crtc = intel_primary_crtc(secondary_crtc_state);
4552 const struct intel_crtc_state *primary_crtc_state =
4553 intel_atomic_get_new_crtc_state(state, primary_crtc);
4554 struct intel_crtc_state *saved_state;
4555
4556 WARN_ON(primary_crtc_state->joiner_pipes !=
4557 secondary_crtc_state->joiner_pipes);
4558
4559 saved_state = kmemdup(primary_crtc_state, sizeof(*saved_state), GFP_KERNEL);
4560 if (!saved_state)
4561 return -ENOMEM;
4562
4563 /* preserve some things from the slave's original crtc state */
4564 saved_state->uapi = secondary_crtc_state->uapi;
4565 saved_state->scaler_state = secondary_crtc_state->scaler_state;
4566 saved_state->shared_dpll = secondary_crtc_state->shared_dpll;
4567 saved_state->crc_enabled = secondary_crtc_state->crc_enabled;
4568
4569 intel_crtc_free_hw_state(secondary_crtc_state);
4570 if (secondary_crtc_state->dp_tunnel_ref.tunnel)
4571 drm_dp_tunnel_ref_put(&secondary_crtc_state->dp_tunnel_ref);
4572 memcpy(secondary_crtc_state, saved_state, sizeof(*secondary_crtc_state));
4573 kfree(saved_state);
4574
4575 /* Re-init hw state */
4576 memset(&secondary_crtc_state->hw, 0, sizeof(secondary_crtc_state->hw));
4577 secondary_crtc_state->hw.enable = primary_crtc_state->hw.enable;
4578 secondary_crtc_state->hw.active = primary_crtc_state->hw.active;
4579 drm_mode_copy(&secondary_crtc_state->hw.mode,
4580 &primary_crtc_state->hw.mode);
4581 drm_mode_copy(&secondary_crtc_state->hw.pipe_mode,
4582 &primary_crtc_state->hw.pipe_mode);
4583 drm_mode_copy(&secondary_crtc_state->hw.adjusted_mode,
4584 &primary_crtc_state->hw.adjusted_mode);
4585 secondary_crtc_state->hw.scaling_filter = primary_crtc_state->hw.scaling_filter;
4586
4587 if (primary_crtc_state->dp_tunnel_ref.tunnel)
4588 drm_dp_tunnel_ref_get(primary_crtc_state->dp_tunnel_ref.tunnel,
4589 &secondary_crtc_state->dp_tunnel_ref);
4590
4591 copy_joiner_crtc_state_nomodeset(state, secondary_crtc);
4592
4593 secondary_crtc_state->uapi.mode_changed = primary_crtc_state->uapi.mode_changed;
4594 secondary_crtc_state->uapi.connectors_changed = primary_crtc_state->uapi.connectors_changed;
4595 secondary_crtc_state->uapi.active_changed = primary_crtc_state->uapi.active_changed;
4596
4597 WARN_ON(primary_crtc_state->joiner_pipes !=
4598 secondary_crtc_state->joiner_pipes);
4599
4600 return 0;
4601 }
4602
4603 static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state * state,struct intel_crtc * crtc)4604 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
4605 struct intel_crtc *crtc)
4606 {
4607 struct intel_crtc_state *crtc_state =
4608 intel_atomic_get_new_crtc_state(state, crtc);
4609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4610 struct intel_crtc_state *saved_state;
4611
4612 saved_state = intel_crtc_state_alloc(crtc);
4613 if (!saved_state)
4614 return -ENOMEM;
4615
4616 /* free the old crtc_state->hw members */
4617 intel_crtc_free_hw_state(crtc_state);
4618
4619 intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state);
4620
4621 /* FIXME: before the switch to atomic started, a new pipe_config was
4622 * kzalloc'd. Code that depends on any field being zero should be
4623 * fixed, so that the crtc_state can be safely duplicated. For now,
4624 * only fields that are know to not cause problems are preserved. */
4625
4626 saved_state->uapi = crtc_state->uapi;
4627 saved_state->inherited = crtc_state->inherited;
4628 saved_state->scaler_state = crtc_state->scaler_state;
4629 saved_state->shared_dpll = crtc_state->shared_dpll;
4630 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
4631 memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
4632 sizeof(saved_state->icl_port_dplls));
4633 saved_state->crc_enabled = crtc_state->crc_enabled;
4634 if (IS_G4X(dev_priv) ||
4635 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4636 saved_state->wm = crtc_state->wm;
4637
4638 memcpy(crtc_state, saved_state, sizeof(*crtc_state));
4639 kfree(saved_state);
4640
4641 intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
4642
4643 return 0;
4644 }
4645
4646 static int
intel_modeset_pipe_config(struct intel_atomic_state * state,struct intel_crtc * crtc,const struct intel_link_bw_limits * limits)4647 intel_modeset_pipe_config(struct intel_atomic_state *state,
4648 struct intel_crtc *crtc,
4649 const struct intel_link_bw_limits *limits)
4650 {
4651 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4652 struct intel_crtc_state *crtc_state =
4653 intel_atomic_get_new_crtc_state(state, crtc);
4654 struct drm_connector *connector;
4655 struct drm_connector_state *connector_state;
4656 int pipe_src_w, pipe_src_h;
4657 int base_bpp, ret, i;
4658
4659 crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe;
4660
4661 crtc_state->framestart_delay = 1;
4662
4663 /*
4664 * Sanitize sync polarity flags based on requested ones. If neither
4665 * positive or negative polarity is requested, treat this as meaning
4666 * negative polarity.
4667 */
4668 if (!(crtc_state->hw.adjusted_mode.flags &
4669 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
4670 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
4671
4672 if (!(crtc_state->hw.adjusted_mode.flags &
4673 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
4674 crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
4675
4676 ret = compute_baseline_pipe_bpp(state, crtc);
4677 if (ret)
4678 return ret;
4679
4680 crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
4681 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
4682
4683 if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
4684 drm_dbg_kms(&i915->drm,
4685 "[CRTC:%d:%s] Link bpp limited to " FXP_Q4_FMT "\n",
4686 crtc->base.base.id, crtc->base.name,
4687 FXP_Q4_ARGS(crtc_state->max_link_bpp_x16));
4688 crtc_state->bw_constrained = true;
4689 }
4690
4691 base_bpp = crtc_state->pipe_bpp;
4692
4693 /*
4694 * Determine the real pipe dimensions. Note that stereo modes can
4695 * increase the actual pipe size due to the frame doubling and
4696 * insertion of additional space for blanks between the frame. This
4697 * is stored in the crtc timings. We use the requested mode to do this
4698 * computation to clearly distinguish it from the adjusted mode, which
4699 * can be changed by the connectors in the below retry loop.
4700 */
4701 drm_mode_get_hv_timing(&crtc_state->hw.mode,
4702 &pipe_src_w, &pipe_src_h);
4703 drm_rect_init(&crtc_state->pipe_src, 0, 0,
4704 pipe_src_w, pipe_src_h);
4705
4706 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4707 struct intel_encoder *encoder =
4708 to_intel_encoder(connector_state->best_encoder);
4709
4710 if (connector_state->crtc != &crtc->base)
4711 continue;
4712
4713 if (!check_single_encoder_cloning(state, crtc, encoder)) {
4714 drm_dbg_kms(&i915->drm,
4715 "[ENCODER:%d:%s] rejecting invalid cloning configuration\n",
4716 encoder->base.base.id, encoder->base.name);
4717 return -EINVAL;
4718 }
4719
4720 /*
4721 * Determine output_types before calling the .compute_config()
4722 * hooks so that the hooks can use this information safely.
4723 */
4724 if (encoder->compute_output_type)
4725 crtc_state->output_types |=
4726 BIT(encoder->compute_output_type(encoder, crtc_state,
4727 connector_state));
4728 else
4729 crtc_state->output_types |= BIT(encoder->type);
4730 }
4731
4732 /* Ensure the port clock defaults are reset when retrying. */
4733 crtc_state->port_clock = 0;
4734 crtc_state->pixel_multiplier = 1;
4735
4736 /* Fill in default crtc timings, allow encoders to overwrite them. */
4737 drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode,
4738 CRTC_STEREO_DOUBLE);
4739
4740 /* Pass our mode to the connectors and the CRTC to give them a chance to
4741 * adjust it according to limitations or connector properties, and also
4742 * a chance to reject the mode entirely.
4743 */
4744 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4745 struct intel_encoder *encoder =
4746 to_intel_encoder(connector_state->best_encoder);
4747
4748 if (connector_state->crtc != &crtc->base)
4749 continue;
4750
4751 ret = encoder->compute_config(encoder, crtc_state,
4752 connector_state);
4753 if (ret == -EDEADLK)
4754 return ret;
4755 if (ret < 0) {
4756 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n",
4757 encoder->base.base.id, encoder->base.name, ret);
4758 return ret;
4759 }
4760 }
4761
4762 /* Set default port clock if not overwritten by the encoder. Needs to be
4763 * done afterwards in case the encoder adjusts the mode. */
4764 if (!crtc_state->port_clock)
4765 crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock
4766 * crtc_state->pixel_multiplier;
4767
4768 ret = intel_crtc_compute_config(state, crtc);
4769 if (ret == -EDEADLK)
4770 return ret;
4771 if (ret < 0) {
4772 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n",
4773 crtc->base.base.id, crtc->base.name, ret);
4774 return ret;
4775 }
4776
4777 /* Dithering seems to not pass-through bits correctly when it should, so
4778 * only enable it on 6bpc panels and when its not a compliance
4779 * test requesting 6bpc video pattern.
4780 */
4781 crtc_state->dither = (crtc_state->pipe_bpp == 6*3) &&
4782 !crtc_state->dither_force_disable;
4783 drm_dbg_kms(&i915->drm,
4784 "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
4785 crtc->base.base.id, crtc->base.name,
4786 base_bpp, crtc_state->pipe_bpp, crtc_state->dither);
4787
4788 return 0;
4789 }
4790
4791 static int
intel_modeset_pipe_config_late(struct intel_atomic_state * state,struct intel_crtc * crtc)4792 intel_modeset_pipe_config_late(struct intel_atomic_state *state,
4793 struct intel_crtc *crtc)
4794 {
4795 struct intel_crtc_state *crtc_state =
4796 intel_atomic_get_new_crtc_state(state, crtc);
4797 struct drm_connector_state *conn_state;
4798 struct drm_connector *connector;
4799 int i;
4800
4801 for_each_new_connector_in_state(&state->base, connector,
4802 conn_state, i) {
4803 struct intel_encoder *encoder =
4804 to_intel_encoder(conn_state->best_encoder);
4805 int ret;
4806
4807 if (conn_state->crtc != &crtc->base ||
4808 !encoder->compute_config_late)
4809 continue;
4810
4811 ret = encoder->compute_config_late(encoder, crtc_state,
4812 conn_state);
4813 if (ret)
4814 return ret;
4815 }
4816
4817 return 0;
4818 }
4819
intel_fuzzy_clock_check(int clock1,int clock2)4820 bool intel_fuzzy_clock_check(int clock1, int clock2)
4821 {
4822 int diff;
4823
4824 if (clock1 == clock2)
4825 return true;
4826
4827 if (!clock1 || !clock2)
4828 return false;
4829
4830 diff = abs(clock1 - clock2);
4831
4832 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
4833 return true;
4834
4835 return false;
4836 }
4837
4838 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2)4839 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
4840 const struct intel_link_m_n *m2_n2)
4841 {
4842 return m_n->tu == m2_n2->tu &&
4843 m_n->data_m == m2_n2->data_m &&
4844 m_n->data_n == m2_n2->data_n &&
4845 m_n->link_m == m2_n2->link_m &&
4846 m_n->link_n == m2_n2->link_n;
4847 }
4848
4849 static bool
intel_compare_infoframe(const union hdmi_infoframe * a,const union hdmi_infoframe * b)4850 intel_compare_infoframe(const union hdmi_infoframe *a,
4851 const union hdmi_infoframe *b)
4852 {
4853 return memcmp(a, b, sizeof(*a)) == 0;
4854 }
4855
4856 static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)4857 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
4858 const struct drm_dp_vsc_sdp *b)
4859 {
4860 return a->pixelformat == b->pixelformat &&
4861 a->colorimetry == b->colorimetry &&
4862 a->bpc == b->bpc &&
4863 a->dynamic_range == b->dynamic_range &&
4864 a->content_type == b->content_type;
4865 }
4866
4867 static bool
intel_compare_dp_as_sdp(const struct drm_dp_as_sdp * a,const struct drm_dp_as_sdp * b)4868 intel_compare_dp_as_sdp(const struct drm_dp_as_sdp *a,
4869 const struct drm_dp_as_sdp *b)
4870 {
4871 return a->vtotal == b->vtotal &&
4872 a->target_rr == b->target_rr &&
4873 a->duration_incr_ms == b->duration_incr_ms &&
4874 a->duration_decr_ms == b->duration_decr_ms &&
4875 a->mode == b->mode;
4876 }
4877
4878 static bool
intel_compare_buffer(const u8 * a,const u8 * b,size_t len)4879 intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
4880 {
4881 return memcmp(a, b, len) == 0;
4882 }
4883
4884 static void __printf(5, 6)
pipe_config_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const char * format,...)4885 pipe_config_mismatch(struct drm_printer *p, bool fastset,
4886 const struct intel_crtc *crtc,
4887 const char *name, const char *format, ...)
4888 {
4889 struct va_format vaf;
4890 va_list args;
4891
4892 va_start(args, format);
4893 vaf.fmt = format;
4894 vaf.va = &args;
4895
4896 if (fastset)
4897 drm_printf(p, "[CRTC:%d:%s] fastset requirement not met in %s %pV\n",
4898 crtc->base.base.id, crtc->base.name, name, &vaf);
4899 else
4900 drm_printf(p, "[CRTC:%d:%s] mismatch in %s %pV\n",
4901 crtc->base.base.id, crtc->base.name, name, &vaf);
4902
4903 va_end(args);
4904 }
4905
4906 static void
pipe_config_infoframe_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const union hdmi_infoframe * a,const union hdmi_infoframe * b)4907 pipe_config_infoframe_mismatch(struct drm_printer *p, bool fastset,
4908 const struct intel_crtc *crtc,
4909 const char *name,
4910 const union hdmi_infoframe *a,
4911 const union hdmi_infoframe *b)
4912 {
4913 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4914 const char *loglevel;
4915
4916 if (fastset) {
4917 if (!drm_debug_enabled(DRM_UT_KMS))
4918 return;
4919
4920 loglevel = KERN_DEBUG;
4921 } else {
4922 loglevel = KERN_ERR;
4923 }
4924
4925 pipe_config_mismatch(p, fastset, crtc, name, "infoframe");
4926
4927 drm_printf(p, "expected:\n");
4928 hdmi_infoframe_log(loglevel, i915->drm.dev, a);
4929 drm_printf(p, "found:\n");
4930 hdmi_infoframe_log(loglevel, i915->drm.dev, b);
4931 }
4932
4933 static void
pipe_config_dp_vsc_sdp_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)4934 pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
4935 const struct intel_crtc *crtc,
4936 const char *name,
4937 const struct drm_dp_vsc_sdp *a,
4938 const struct drm_dp_vsc_sdp *b)
4939 {
4940 pipe_config_mismatch(p, fastset, crtc, name, "dp sdp");
4941
4942 drm_printf(p, "expected:\n");
4943 drm_dp_vsc_sdp_log(p, a);
4944 drm_printf(p, "found:\n");
4945 drm_dp_vsc_sdp_log(p, b);
4946 }
4947
4948 static void
pipe_config_dp_as_sdp_mismatch(struct drm_i915_private * i915,bool fastset,const char * name,const struct drm_dp_as_sdp * a,const struct drm_dp_as_sdp * b)4949 pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915,
4950 bool fastset, const char *name,
4951 const struct drm_dp_as_sdp *a,
4952 const struct drm_dp_as_sdp *b)
4953 {
4954 struct drm_printer p;
4955
4956 if (fastset) {
4957 p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
4958
4959 drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name);
4960 } else {
4961 p = drm_err_printer(&i915->drm, NULL);
4962
4963 drm_printf(&p, "mismatch in %s dp sdp\n", name);
4964 }
4965
4966 drm_printf(&p, "expected:\n");
4967 drm_dp_as_sdp_log(&p, a);
4968 drm_printf(&p, "found:\n");
4969 drm_dp_as_sdp_log(&p, b);
4970 }
4971
4972 /* Returns the length up to and including the last differing byte */
4973 static size_t
memcmp_diff_len(const u8 * a,const u8 * b,size_t len)4974 memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
4975 {
4976 int i;
4977
4978 for (i = len - 1; i >= 0; i--) {
4979 if (a[i] != b[i])
4980 return i + 1;
4981 }
4982
4983 return 0;
4984 }
4985
4986 static void
pipe_config_buffer_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const u8 * a,const u8 * b,size_t len)4987 pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset,
4988 const struct intel_crtc *crtc,
4989 const char *name,
4990 const u8 *a, const u8 *b, size_t len)
4991 {
4992 const char *loglevel;
4993
4994 if (fastset) {
4995 if (!drm_debug_enabled(DRM_UT_KMS))
4996 return;
4997
4998 loglevel = KERN_DEBUG;
4999 } else {
5000 loglevel = KERN_ERR;
5001 }
5002
5003 pipe_config_mismatch(p, fastset, crtc, name, "buffer");
5004
5005 /* only dump up to the last difference */
5006 len = memcmp_diff_len(a, b, len);
5007
5008 print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE,
5009 16, 0, a, len, false);
5010 print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE,
5011 16, 0, b, len, false);
5012 }
5013
5014 static void
pipe_config_pll_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const struct intel_dpll_hw_state * a,const struct intel_dpll_hw_state * b)5015 pipe_config_pll_mismatch(struct drm_printer *p, bool fastset,
5016 const struct intel_crtc *crtc,
5017 const char *name,
5018 const struct intel_dpll_hw_state *a,
5019 const struct intel_dpll_hw_state *b)
5020 {
5021 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5022
5023 pipe_config_mismatch(p, fastset, crtc, name, " "); /* stupid -Werror=format-zero-length */
5024
5025 drm_printf(p, "expected:\n");
5026 intel_dpll_dump_hw_state(i915, p, a);
5027 drm_printf(p, "found:\n");
5028 intel_dpll_dump_hw_state(i915, p, b);
5029 }
5030
5031 static void
pipe_config_cx0pll_mismatch(struct drm_printer * p,bool fastset,const struct intel_crtc * crtc,const char * name,const struct intel_cx0pll_state * a,const struct intel_cx0pll_state * b)5032 pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset,
5033 const struct intel_crtc *crtc,
5034 const char *name,
5035 const struct intel_cx0pll_state *a,
5036 const struct intel_cx0pll_state *b)
5037 {
5038 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5039 char *chipname = a->use_c10 ? "C10" : "C20";
5040
5041 pipe_config_mismatch(p, fastset, crtc, name, chipname);
5042
5043 drm_printf(p, "expected:\n");
5044 intel_cx0pll_dump_hw_state(i915, a);
5045 drm_printf(p, "found:\n");
5046 intel_cx0pll_dump_hw_state(i915, b);
5047 }
5048
5049 bool
intel_pipe_config_compare(const struct intel_crtc_state * current_config,const struct intel_crtc_state * pipe_config,bool fastset)5050 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
5051 const struct intel_crtc_state *pipe_config,
5052 bool fastset)
5053 {
5054 struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
5055 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5056 struct drm_printer p;
5057 bool ret = true;
5058
5059 if (fastset)
5060 p = drm_dbg_printer(&dev_priv->drm, DRM_UT_KMS, NULL);
5061 else
5062 p = drm_err_printer(&dev_priv->drm, NULL);
5063
5064 #define PIPE_CONF_CHECK_X(name) do { \
5065 if (current_config->name != pipe_config->name) { \
5066 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
5067 __stringify(name) " is bool"); \
5068 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5069 "(expected 0x%08x, found 0x%08x)", \
5070 current_config->name, \
5071 pipe_config->name); \
5072 ret = false; \
5073 } \
5074 } while (0)
5075
5076 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
5077 if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
5078 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
5079 __stringify(name) " is bool"); \
5080 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5081 "(expected 0x%08x, found 0x%08x)", \
5082 current_config->name & (mask), \
5083 pipe_config->name & (mask)); \
5084 ret = false; \
5085 } \
5086 } while (0)
5087
5088 #define PIPE_CONF_CHECK_I(name) do { \
5089 if (current_config->name != pipe_config->name) { \
5090 BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
5091 __stringify(name) " is bool"); \
5092 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5093 "(expected %i, found %i)", \
5094 current_config->name, \
5095 pipe_config->name); \
5096 ret = false; \
5097 } \
5098 } while (0)
5099
5100 #define PIPE_CONF_CHECK_LLI(name) do { \
5101 if (current_config->name != pipe_config->name) { \
5102 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5103 "(expected %lli, found %lli)", \
5104 current_config->name, \
5105 pipe_config->name); \
5106 ret = false; \
5107 } \
5108 } while (0)
5109
5110 #define PIPE_CONF_CHECK_BOOL(name) do { \
5111 if (current_config->name != pipe_config->name) { \
5112 BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
5113 __stringify(name) " is not bool"); \
5114 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5115 "(expected %s, found %s)", \
5116 str_yes_no(current_config->name), \
5117 str_yes_no(pipe_config->name)); \
5118 ret = false; \
5119 } \
5120 } while (0)
5121
5122 #define PIPE_CONF_CHECK_P(name) do { \
5123 if (current_config->name != pipe_config->name) { \
5124 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5125 "(expected %p, found %p)", \
5126 current_config->name, \
5127 pipe_config->name); \
5128 ret = false; \
5129 } \
5130 } while (0)
5131
5132 #define PIPE_CONF_CHECK_M_N(name) do { \
5133 if (!intel_compare_link_m_n(¤t_config->name, \
5134 &pipe_config->name)) { \
5135 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5136 "(expected tu %i data %i/%i link %i/%i, " \
5137 "found tu %i, data %i/%i link %i/%i)", \
5138 current_config->name.tu, \
5139 current_config->name.data_m, \
5140 current_config->name.data_n, \
5141 current_config->name.link_m, \
5142 current_config->name.link_n, \
5143 pipe_config->name.tu, \
5144 pipe_config->name.data_m, \
5145 pipe_config->name.data_n, \
5146 pipe_config->name.link_m, \
5147 pipe_config->name.link_n); \
5148 ret = false; \
5149 } \
5150 } while (0)
5151
5152 #define PIPE_CONF_CHECK_PLL(name) do { \
5153 if (!intel_dpll_compare_hw_state(dev_priv, ¤t_config->name, \
5154 &pipe_config->name)) { \
5155 pipe_config_pll_mismatch(&p, fastset, crtc, __stringify(name), \
5156 ¤t_config->name, \
5157 &pipe_config->name); \
5158 ret = false; \
5159 } \
5160 } while (0)
5161
5162 #define PIPE_CONF_CHECK_PLL_CX0(name) do { \
5163 if (!intel_cx0pll_compare_hw_state(¤t_config->name, \
5164 &pipe_config->name)) { \
5165 pipe_config_cx0pll_mismatch(&p, fastset, crtc, __stringify(name), \
5166 ¤t_config->name, \
5167 &pipe_config->name); \
5168 ret = false; \
5169 } \
5170 } while (0)
5171
5172 #define PIPE_CONF_CHECK_TIMINGS(name) do { \
5173 PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
5174 PIPE_CONF_CHECK_I(name.crtc_htotal); \
5175 PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
5176 PIPE_CONF_CHECK_I(name.crtc_hblank_end); \
5177 PIPE_CONF_CHECK_I(name.crtc_hsync_start); \
5178 PIPE_CONF_CHECK_I(name.crtc_hsync_end); \
5179 PIPE_CONF_CHECK_I(name.crtc_vdisplay); \
5180 PIPE_CONF_CHECK_I(name.crtc_vblank_start); \
5181 PIPE_CONF_CHECK_I(name.crtc_vsync_start); \
5182 PIPE_CONF_CHECK_I(name.crtc_vsync_end); \
5183 if (!fastset || !pipe_config->update_lrr) { \
5184 PIPE_CONF_CHECK_I(name.crtc_vtotal); \
5185 PIPE_CONF_CHECK_I(name.crtc_vblank_end); \
5186 } \
5187 } while (0)
5188
5189 #define PIPE_CONF_CHECK_RECT(name) do { \
5190 PIPE_CONF_CHECK_I(name.x1); \
5191 PIPE_CONF_CHECK_I(name.x2); \
5192 PIPE_CONF_CHECK_I(name.y1); \
5193 PIPE_CONF_CHECK_I(name.y2); \
5194 } while (0)
5195
5196 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
5197 if ((current_config->name ^ pipe_config->name) & (mask)) { \
5198 pipe_config_mismatch(&p, fastset, crtc, __stringify(name), \
5199 "(%x) (expected %i, found %i)", \
5200 (mask), \
5201 current_config->name & (mask), \
5202 pipe_config->name & (mask)); \
5203 ret = false; \
5204 } \
5205 } while (0)
5206
5207 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
5208 if (!intel_compare_infoframe(¤t_config->infoframes.name, \
5209 &pipe_config->infoframes.name)) { \
5210 pipe_config_infoframe_mismatch(&p, fastset, crtc, __stringify(name), \
5211 ¤t_config->infoframes.name, \
5212 &pipe_config->infoframes.name); \
5213 ret = false; \
5214 } \
5215 } while (0)
5216
5217 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
5218 if (!intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \
5219 &pipe_config->infoframes.name)) { \
5220 pipe_config_dp_vsc_sdp_mismatch(&p, fastset, crtc, __stringify(name), \
5221 ¤t_config->infoframes.name, \
5222 &pipe_config->infoframes.name); \
5223 ret = false; \
5224 } \
5225 } while (0)
5226
5227 #define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \
5228 if (!intel_compare_dp_as_sdp(¤t_config->infoframes.name, \
5229 &pipe_config->infoframes.name)) { \
5230 pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \
5231 ¤t_config->infoframes.name, \
5232 &pipe_config->infoframes.name); \
5233 ret = false; \
5234 } \
5235 } while (0)
5236
5237 #define PIPE_CONF_CHECK_BUFFER(name, len) do { \
5238 BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
5239 BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
5240 if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
5241 pipe_config_buffer_mismatch(&p, fastset, crtc, __stringify(name), \
5242 current_config->name, \
5243 pipe_config->name, \
5244 (len)); \
5245 ret = false; \
5246 } \
5247 } while (0)
5248
5249 #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
5250 if (current_config->gamma_mode == pipe_config->gamma_mode && \
5251 !intel_color_lut_equal(current_config, \
5252 current_config->lut, pipe_config->lut, \
5253 is_pre_csc_lut)) { \
5254 pipe_config_mismatch(&p, fastset, crtc, __stringify(lut), \
5255 "hw_state doesn't match sw_state"); \
5256 ret = false; \
5257 } \
5258 } while (0)
5259
5260 #define PIPE_CONF_CHECK_CSC(name) do { \
5261 PIPE_CONF_CHECK_X(name.preoff[0]); \
5262 PIPE_CONF_CHECK_X(name.preoff[1]); \
5263 PIPE_CONF_CHECK_X(name.preoff[2]); \
5264 PIPE_CONF_CHECK_X(name.coeff[0]); \
5265 PIPE_CONF_CHECK_X(name.coeff[1]); \
5266 PIPE_CONF_CHECK_X(name.coeff[2]); \
5267 PIPE_CONF_CHECK_X(name.coeff[3]); \
5268 PIPE_CONF_CHECK_X(name.coeff[4]); \
5269 PIPE_CONF_CHECK_X(name.coeff[5]); \
5270 PIPE_CONF_CHECK_X(name.coeff[6]); \
5271 PIPE_CONF_CHECK_X(name.coeff[7]); \
5272 PIPE_CONF_CHECK_X(name.coeff[8]); \
5273 PIPE_CONF_CHECK_X(name.postoff[0]); \
5274 PIPE_CONF_CHECK_X(name.postoff[1]); \
5275 PIPE_CONF_CHECK_X(name.postoff[2]); \
5276 } while (0)
5277
5278 #define PIPE_CONF_QUIRK(quirk) \
5279 ((current_config->quirks | pipe_config->quirks) & (quirk))
5280
5281 PIPE_CONF_CHECK_BOOL(hw.enable);
5282 PIPE_CONF_CHECK_BOOL(hw.active);
5283
5284 PIPE_CONF_CHECK_I(cpu_transcoder);
5285 PIPE_CONF_CHECK_I(mst_master_transcoder);
5286
5287 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
5288 PIPE_CONF_CHECK_I(fdi_lanes);
5289 PIPE_CONF_CHECK_M_N(fdi_m_n);
5290
5291 PIPE_CONF_CHECK_I(lane_count);
5292 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
5293
5294 if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
5295 if (!fastset || !pipe_config->update_m_n)
5296 PIPE_CONF_CHECK_M_N(dp_m_n);
5297 } else {
5298 PIPE_CONF_CHECK_M_N(dp_m_n);
5299 PIPE_CONF_CHECK_M_N(dp_m2_n2);
5300 }
5301
5302 PIPE_CONF_CHECK_X(output_types);
5303
5304 PIPE_CONF_CHECK_I(framestart_delay);
5305 PIPE_CONF_CHECK_I(msa_timing_delay);
5306
5307 PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode);
5308 PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode);
5309
5310 PIPE_CONF_CHECK_I(pixel_multiplier);
5311
5312 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5313 DRM_MODE_FLAG_INTERLACE);
5314
5315 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
5316 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5317 DRM_MODE_FLAG_PHSYNC);
5318 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5319 DRM_MODE_FLAG_NHSYNC);
5320 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5321 DRM_MODE_FLAG_PVSYNC);
5322 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
5323 DRM_MODE_FLAG_NVSYNC);
5324 }
5325
5326 PIPE_CONF_CHECK_I(output_format);
5327 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
5328 if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
5329 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5330 PIPE_CONF_CHECK_BOOL(limited_color_range);
5331
5332 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
5333 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
5334 PIPE_CONF_CHECK_BOOL(has_infoframe);
5335 PIPE_CONF_CHECK_BOOL(enhanced_framing);
5336 PIPE_CONF_CHECK_BOOL(fec_enable);
5337
5338 if (!fastset) {
5339 PIPE_CONF_CHECK_BOOL(has_audio);
5340 PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
5341 }
5342
5343 PIPE_CONF_CHECK_X(gmch_pfit.control);
5344 /* pfit ratios are autocomputed by the hw on gen4+ */
5345 if (DISPLAY_VER(dev_priv) < 4)
5346 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
5347 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
5348
5349 /*
5350 * Changing the EDP transcoder input mux
5351 * (A_ONOFF vs. A_ON) requires a full modeset.
5352 */
5353 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
5354
5355 if (!fastset) {
5356 PIPE_CONF_CHECK_RECT(pipe_src);
5357
5358 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
5359 PIPE_CONF_CHECK_RECT(pch_pfit.dst);
5360
5361 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
5362 PIPE_CONF_CHECK_I(pixel_rate);
5363
5364 PIPE_CONF_CHECK_X(gamma_mode);
5365 if (IS_CHERRYVIEW(dev_priv))
5366 PIPE_CONF_CHECK_X(cgm_mode);
5367 else
5368 PIPE_CONF_CHECK_X(csc_mode);
5369 PIPE_CONF_CHECK_BOOL(gamma_enable);
5370 PIPE_CONF_CHECK_BOOL(csc_enable);
5371 PIPE_CONF_CHECK_BOOL(wgc_enable);
5372
5373 PIPE_CONF_CHECK_I(linetime);
5374 PIPE_CONF_CHECK_I(ips_linetime);
5375
5376 PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
5377 PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
5378
5379 PIPE_CONF_CHECK_CSC(csc);
5380 PIPE_CONF_CHECK_CSC(output_csc);
5381 }
5382
5383 /*
5384 * Panel replay has to be enabled before link training. PSR doesn't have
5385 * this requirement -> check these only if using panel replay
5386 */
5387 if (current_config->active_planes &&
5388 (current_config->has_panel_replay ||
5389 pipe_config->has_panel_replay)) {
5390 PIPE_CONF_CHECK_BOOL(has_psr);
5391 PIPE_CONF_CHECK_BOOL(has_sel_update);
5392 PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
5393 PIPE_CONF_CHECK_BOOL(enable_psr2_su_region_et);
5394 PIPE_CONF_CHECK_BOOL(has_panel_replay);
5395 }
5396
5397 PIPE_CONF_CHECK_BOOL(double_wide);
5398
5399 if (dev_priv->display.dpll.mgr)
5400 PIPE_CONF_CHECK_P(shared_dpll);
5401
5402 /* FIXME convert everything over the dpll_mgr */
5403 if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv))
5404 PIPE_CONF_CHECK_PLL(dpll_hw_state);
5405
5406 /* FIXME convert MTL+ platforms over to dpll_mgr */
5407 if (DISPLAY_VER(dev_priv) >= 14)
5408 PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
5409
5410 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
5411 PIPE_CONF_CHECK_X(dsi_pll.div);
5412
5413 if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
5414 PIPE_CONF_CHECK_I(pipe_bpp);
5415
5416 if (!fastset || !pipe_config->update_m_n) {
5417 PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
5418 PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
5419 }
5420 PIPE_CONF_CHECK_I(port_clock);
5421
5422 PIPE_CONF_CHECK_I(min_voltage_level);
5423
5424 if (current_config->has_psr || pipe_config->has_psr)
5425 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
5426 ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
5427 else
5428 PIPE_CONF_CHECK_X(infoframes.enable);
5429
5430 PIPE_CONF_CHECK_X(infoframes.gcp);
5431 PIPE_CONF_CHECK_INFOFRAME(avi);
5432 PIPE_CONF_CHECK_INFOFRAME(spd);
5433 PIPE_CONF_CHECK_INFOFRAME(hdmi);
5434 PIPE_CONF_CHECK_INFOFRAME(drm);
5435 PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
5436 PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
5437
5438 PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
5439 PIPE_CONF_CHECK_I(master_transcoder);
5440 PIPE_CONF_CHECK_X(joiner_pipes);
5441
5442 PIPE_CONF_CHECK_BOOL(dsc.config.block_pred_enable);
5443 PIPE_CONF_CHECK_BOOL(dsc.config.convert_rgb);
5444 PIPE_CONF_CHECK_BOOL(dsc.config.simple_422);
5445 PIPE_CONF_CHECK_BOOL(dsc.config.native_422);
5446 PIPE_CONF_CHECK_BOOL(dsc.config.native_420);
5447 PIPE_CONF_CHECK_BOOL(dsc.config.vbr_enable);
5448 PIPE_CONF_CHECK_I(dsc.config.line_buf_depth);
5449 PIPE_CONF_CHECK_I(dsc.config.bits_per_component);
5450 PIPE_CONF_CHECK_I(dsc.config.pic_width);
5451 PIPE_CONF_CHECK_I(dsc.config.pic_height);
5452 PIPE_CONF_CHECK_I(dsc.config.slice_width);
5453 PIPE_CONF_CHECK_I(dsc.config.slice_height);
5454 PIPE_CONF_CHECK_I(dsc.config.initial_dec_delay);
5455 PIPE_CONF_CHECK_I(dsc.config.initial_xmit_delay);
5456 PIPE_CONF_CHECK_I(dsc.config.scale_decrement_interval);
5457 PIPE_CONF_CHECK_I(dsc.config.scale_increment_interval);
5458 PIPE_CONF_CHECK_I(dsc.config.initial_scale_value);
5459 PIPE_CONF_CHECK_I(dsc.config.first_line_bpg_offset);
5460 PIPE_CONF_CHECK_I(dsc.config.flatness_min_qp);
5461 PIPE_CONF_CHECK_I(dsc.config.flatness_max_qp);
5462 PIPE_CONF_CHECK_I(dsc.config.slice_bpg_offset);
5463 PIPE_CONF_CHECK_I(dsc.config.nfl_bpg_offset);
5464 PIPE_CONF_CHECK_I(dsc.config.initial_offset);
5465 PIPE_CONF_CHECK_I(dsc.config.final_offset);
5466 PIPE_CONF_CHECK_I(dsc.config.rc_model_size);
5467 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit0);
5468 PIPE_CONF_CHECK_I(dsc.config.rc_quant_incr_limit1);
5469 PIPE_CONF_CHECK_I(dsc.config.slice_chunk_size);
5470 PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset);
5471 PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
5472
5473 PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
5474 PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
5475 PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
5476
5477 PIPE_CONF_CHECK_BOOL(splitter.enable);
5478 PIPE_CONF_CHECK_I(splitter.link_count);
5479 PIPE_CONF_CHECK_I(splitter.pixel_overlap);
5480
5481 if (!fastset) {
5482 PIPE_CONF_CHECK_BOOL(vrr.enable);
5483 PIPE_CONF_CHECK_I(vrr.vmin);
5484 PIPE_CONF_CHECK_I(vrr.vmax);
5485 PIPE_CONF_CHECK_I(vrr.flipline);
5486 PIPE_CONF_CHECK_I(vrr.pipeline_full);
5487 PIPE_CONF_CHECK_I(vrr.guardband);
5488 PIPE_CONF_CHECK_I(vrr.vsync_start);
5489 PIPE_CONF_CHECK_I(vrr.vsync_end);
5490 PIPE_CONF_CHECK_LLI(cmrr.cmrr_m);
5491 PIPE_CONF_CHECK_LLI(cmrr.cmrr_n);
5492 PIPE_CONF_CHECK_BOOL(cmrr.enable);
5493 }
5494
5495 #undef PIPE_CONF_CHECK_X
5496 #undef PIPE_CONF_CHECK_I
5497 #undef PIPE_CONF_CHECK_LLI
5498 #undef PIPE_CONF_CHECK_BOOL
5499 #undef PIPE_CONF_CHECK_P
5500 #undef PIPE_CONF_CHECK_FLAGS
5501 #undef PIPE_CONF_CHECK_COLOR_LUT
5502 #undef PIPE_CONF_CHECK_TIMINGS
5503 #undef PIPE_CONF_CHECK_RECT
5504 #undef PIPE_CONF_QUIRK
5505
5506 return ret;
5507 }
5508
5509 static void
intel_verify_planes(struct intel_atomic_state * state)5510 intel_verify_planes(struct intel_atomic_state *state)
5511 {
5512 struct intel_plane *plane;
5513 const struct intel_plane_state *plane_state;
5514 int i;
5515
5516 for_each_new_intel_plane_in_state(state, plane,
5517 plane_state, i)
5518 assert_plane(plane, plane_state->planar_slave ||
5519 plane_state->uapi.visible);
5520 }
5521
intel_modeset_pipe(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state,const char * reason)5522 static int intel_modeset_pipe(struct intel_atomic_state *state,
5523 struct intel_crtc_state *crtc_state,
5524 const char *reason)
5525 {
5526 struct drm_i915_private *i915 = to_i915(state->base.dev);
5527 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5528 int ret;
5529
5530 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Full modeset due to %s\n",
5531 crtc->base.base.id, crtc->base.name, reason);
5532
5533 ret = drm_atomic_add_affected_connectors(&state->base,
5534 &crtc->base);
5535 if (ret)
5536 return ret;
5537
5538 ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc);
5539 if (ret)
5540 return ret;
5541
5542 ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
5543 if (ret)
5544 return ret;
5545
5546 ret = intel_atomic_add_affected_planes(state, crtc);
5547 if (ret)
5548 return ret;
5549
5550 crtc_state->uapi.mode_changed = true;
5551
5552 return 0;
5553 }
5554
5555 /**
5556 * intel_modeset_pipes_in_mask_early - force a full modeset on a set of pipes
5557 * @state: intel atomic state
5558 * @reason: the reason for the full modeset
5559 * @mask: mask of pipes to modeset
5560 *
5561 * Add pipes in @mask to @state and force a full modeset on the enabled ones
5562 * due to the description in @reason.
5563 * This function can be called only before new plane states are computed.
5564 *
5565 * Returns 0 in case of success, negative error code otherwise.
5566 */
intel_modeset_pipes_in_mask_early(struct intel_atomic_state * state,const char * reason,u8 mask)5567 int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
5568 const char *reason, u8 mask)
5569 {
5570 struct drm_i915_private *i915 = to_i915(state->base.dev);
5571 struct intel_crtc *crtc;
5572
5573 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mask) {
5574 struct intel_crtc_state *crtc_state;
5575 int ret;
5576
5577 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5578 if (IS_ERR(crtc_state))
5579 return PTR_ERR(crtc_state);
5580
5581 if (!crtc_state->hw.enable ||
5582 intel_crtc_needs_modeset(crtc_state))
5583 continue;
5584
5585 ret = intel_modeset_pipe(state, crtc_state, reason);
5586 if (ret)
5587 return ret;
5588 }
5589
5590 return 0;
5591 }
5592
5593 static void
intel_crtc_flag_modeset(struct intel_crtc_state * crtc_state)5594 intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
5595 {
5596 crtc_state->uapi.mode_changed = true;
5597
5598 crtc_state->update_pipe = false;
5599 crtc_state->update_m_n = false;
5600 crtc_state->update_lrr = false;
5601 }
5602
5603 /**
5604 * intel_modeset_all_pipes_late - force a full modeset on all pipes
5605 * @state: intel atomic state
5606 * @reason: the reason for the full modeset
5607 *
5608 * Add all pipes to @state and force a full modeset on the active ones due to
5609 * the description in @reason.
5610 * This function can be called only after new plane states are computed already.
5611 *
5612 * Returns 0 in case of success, negative error code otherwise.
5613 */
intel_modeset_all_pipes_late(struct intel_atomic_state * state,const char * reason)5614 int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
5615 const char *reason)
5616 {
5617 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5618 struct intel_crtc *crtc;
5619
5620 for_each_intel_crtc(&dev_priv->drm, crtc) {
5621 struct intel_crtc_state *crtc_state;
5622 int ret;
5623
5624 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5625 if (IS_ERR(crtc_state))
5626 return PTR_ERR(crtc_state);
5627
5628 if (!crtc_state->hw.active ||
5629 intel_crtc_needs_modeset(crtc_state))
5630 continue;
5631
5632 ret = intel_modeset_pipe(state, crtc_state, reason);
5633 if (ret)
5634 return ret;
5635
5636 intel_crtc_flag_modeset(crtc_state);
5637
5638 crtc_state->update_planes |= crtc_state->active_planes;
5639 crtc_state->async_flip_planes = 0;
5640 crtc_state->do_async_flip = false;
5641 }
5642
5643 return 0;
5644 }
5645
intel_modeset_commit_pipes(struct drm_i915_private * i915,u8 pipe_mask,struct drm_modeset_acquire_ctx * ctx)5646 int intel_modeset_commit_pipes(struct drm_i915_private *i915,
5647 u8 pipe_mask,
5648 struct drm_modeset_acquire_ctx *ctx)
5649 {
5650 struct drm_atomic_state *state;
5651 struct intel_crtc *crtc;
5652 int ret;
5653
5654 state = drm_atomic_state_alloc(&i915->drm);
5655 if (!state)
5656 return -ENOMEM;
5657
5658 state->acquire_ctx = ctx;
5659 to_intel_atomic_state(state)->internal = true;
5660
5661 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
5662 struct intel_crtc_state *crtc_state =
5663 intel_atomic_get_crtc_state(state, crtc);
5664
5665 if (IS_ERR(crtc_state)) {
5666 ret = PTR_ERR(crtc_state);
5667 goto out;
5668 }
5669
5670 crtc_state->uapi.connectors_changed = true;
5671 }
5672
5673 ret = drm_atomic_commit(state);
5674 out:
5675 drm_atomic_state_put(state);
5676
5677 return ret;
5678 }
5679
5680 /*
5681 * This implements the workaround described in the "notes" section of the mode
5682 * set sequence documentation. When going from no pipes or single pipe to
5683 * multiple pipes, and planes are enabled after the pipe, we need to wait at
5684 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
5685 */
hsw_mode_set_planes_workaround(struct intel_atomic_state * state)5686 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
5687 {
5688 struct intel_crtc_state *crtc_state;
5689 struct intel_crtc *crtc;
5690 struct intel_crtc_state *first_crtc_state = NULL;
5691 struct intel_crtc_state *other_crtc_state = NULL;
5692 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
5693 int i;
5694
5695 /* look at all crtc's that are going to be enabled in during modeset */
5696 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5697 if (!crtc_state->hw.active ||
5698 !intel_crtc_needs_modeset(crtc_state))
5699 continue;
5700
5701 if (first_crtc_state) {
5702 other_crtc_state = crtc_state;
5703 break;
5704 } else {
5705 first_crtc_state = crtc_state;
5706 first_pipe = crtc->pipe;
5707 }
5708 }
5709
5710 /* No workaround needed? */
5711 if (!first_crtc_state)
5712 return 0;
5713
5714 /* w/a possibly needed, check how many crtc's are already enabled. */
5715 for_each_intel_crtc(state->base.dev, crtc) {
5716 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5717 if (IS_ERR(crtc_state))
5718 return PTR_ERR(crtc_state);
5719
5720 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
5721
5722 if (!crtc_state->hw.active ||
5723 intel_crtc_needs_modeset(crtc_state))
5724 continue;
5725
5726 /* 2 or more enabled crtcs means no need for w/a */
5727 if (enabled_pipe != INVALID_PIPE)
5728 return 0;
5729
5730 enabled_pipe = crtc->pipe;
5731 }
5732
5733 if (enabled_pipe != INVALID_PIPE)
5734 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
5735 else if (other_crtc_state)
5736 other_crtc_state->hsw_workaround_pipe = first_pipe;
5737
5738 return 0;
5739 }
5740
intel_calc_active_pipes(struct intel_atomic_state * state,u8 active_pipes)5741 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
5742 u8 active_pipes)
5743 {
5744 const struct intel_crtc_state *crtc_state;
5745 struct intel_crtc *crtc;
5746 int i;
5747
5748 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5749 if (crtc_state->hw.active)
5750 active_pipes |= BIT(crtc->pipe);
5751 else
5752 active_pipes &= ~BIT(crtc->pipe);
5753 }
5754
5755 return active_pipes;
5756 }
5757
intel_modeset_checks(struct intel_atomic_state * state)5758 static int intel_modeset_checks(struct intel_atomic_state *state)
5759 {
5760 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5761
5762 state->modeset = true;
5763
5764 if (IS_HASWELL(dev_priv))
5765 return hsw_mode_set_planes_workaround(state);
5766
5767 return 0;
5768 }
5769
intel_crtc_check_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)5770 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
5771 struct intel_crtc_state *new_crtc_state)
5772 {
5773 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5774 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5775
5776 /* only allow LRR when the timings stay within the VRR range */
5777 if (old_crtc_state->vrr.in_range != new_crtc_state->vrr.in_range)
5778 new_crtc_state->update_lrr = false;
5779
5780 if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
5781 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] fastset requirement not met, forcing full modeset\n",
5782 crtc->base.base.id, crtc->base.name);
5783 else
5784 new_crtc_state->uapi.mode_changed = false;
5785
5786 if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
5787 &new_crtc_state->dp_m_n))
5788 new_crtc_state->update_m_n = false;
5789
5790 if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
5791 old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
5792 new_crtc_state->update_lrr = false;
5793
5794 if (intel_crtc_needs_modeset(new_crtc_state))
5795 intel_crtc_flag_modeset(new_crtc_state);
5796 else
5797 new_crtc_state->update_pipe = true;
5798 }
5799
intel_crtc_add_planes_to_state(struct intel_atomic_state * state,struct intel_crtc * crtc,u8 plane_ids_mask)5800 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
5801 struct intel_crtc *crtc,
5802 u8 plane_ids_mask)
5803 {
5804 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5805 struct intel_plane *plane;
5806
5807 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
5808 struct intel_plane_state *plane_state;
5809
5810 if ((plane_ids_mask & BIT(plane->id)) == 0)
5811 continue;
5812
5813 plane_state = intel_atomic_get_plane_state(state, plane);
5814 if (IS_ERR(plane_state))
5815 return PTR_ERR(plane_state);
5816 }
5817
5818 return 0;
5819 }
5820
intel_atomic_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)5821 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
5822 struct intel_crtc *crtc)
5823 {
5824 const struct intel_crtc_state *old_crtc_state =
5825 intel_atomic_get_old_crtc_state(state, crtc);
5826 const struct intel_crtc_state *new_crtc_state =
5827 intel_atomic_get_new_crtc_state(state, crtc);
5828
5829 return intel_crtc_add_planes_to_state(state, crtc,
5830 old_crtc_state->enabled_planes |
5831 new_crtc_state->enabled_planes);
5832 }
5833
active_planes_affects_min_cdclk(struct drm_i915_private * dev_priv)5834 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
5835 {
5836 /* See {hsw,vlv,ivb}_plane_ratio() */
5837 return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
5838 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5839 IS_IVYBRIDGE(dev_priv);
5840 }
5841
intel_crtc_add_joiner_planes(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc * other)5842 static int intel_crtc_add_joiner_planes(struct intel_atomic_state *state,
5843 struct intel_crtc *crtc,
5844 struct intel_crtc *other)
5845 {
5846 const struct intel_plane_state __maybe_unused *plane_state;
5847 struct intel_plane *plane;
5848 u8 plane_ids = 0;
5849 int i;
5850
5851 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5852 if (plane->pipe == crtc->pipe)
5853 plane_ids |= BIT(plane->id);
5854 }
5855
5856 return intel_crtc_add_planes_to_state(state, other, plane_ids);
5857 }
5858
intel_joiner_add_affected_planes(struct intel_atomic_state * state)5859 static int intel_joiner_add_affected_planes(struct intel_atomic_state *state)
5860 {
5861 struct drm_i915_private *i915 = to_i915(state->base.dev);
5862 const struct intel_crtc_state *crtc_state;
5863 struct intel_crtc *crtc;
5864 int i;
5865
5866 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5867 struct intel_crtc *other;
5868
5869 for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
5870 crtc_state->joiner_pipes) {
5871 int ret;
5872
5873 if (crtc == other)
5874 continue;
5875
5876 ret = intel_crtc_add_joiner_planes(state, crtc, other);
5877 if (ret)
5878 return ret;
5879 }
5880 }
5881
5882 return 0;
5883 }
5884
intel_atomic_check_planes(struct intel_atomic_state * state)5885 static int intel_atomic_check_planes(struct intel_atomic_state *state)
5886 {
5887 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5888 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
5889 struct intel_plane_state __maybe_unused *plane_state;
5890 struct intel_plane *plane;
5891 struct intel_crtc *crtc;
5892 int i, ret;
5893
5894 ret = icl_add_linked_planes(state);
5895 if (ret)
5896 return ret;
5897
5898 ret = intel_joiner_add_affected_planes(state);
5899 if (ret)
5900 return ret;
5901
5902 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5903 ret = intel_plane_atomic_check(state, plane);
5904 if (ret) {
5905 drm_dbg_atomic(&dev_priv->drm,
5906 "[PLANE:%d:%s] atomic driver check failed\n",
5907 plane->base.base.id, plane->base.name);
5908 return ret;
5909 }
5910 }
5911
5912 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
5913 new_crtc_state, i) {
5914 u8 old_active_planes, new_active_planes;
5915
5916 ret = icl_check_nv12_planes(state, crtc);
5917 if (ret)
5918 return ret;
5919
5920 /*
5921 * On some platforms the number of active planes affects
5922 * the planes' minimum cdclk calculation. Add such planes
5923 * to the state before we compute the minimum cdclk.
5924 */
5925 if (!active_planes_affects_min_cdclk(dev_priv))
5926 continue;
5927
5928 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
5929 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
5930
5931 if (hweight8(old_active_planes) == hweight8(new_active_planes))
5932 continue;
5933
5934 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
5935 if (ret)
5936 return ret;
5937 }
5938
5939 return 0;
5940 }
5941
intel_atomic_check_crtcs(struct intel_atomic_state * state)5942 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
5943 {
5944 struct intel_crtc_state __maybe_unused *crtc_state;
5945 struct intel_crtc *crtc;
5946 int i;
5947
5948 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
5949 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
5950 int ret;
5951
5952 ret = intel_crtc_atomic_check(state, crtc);
5953 if (ret) {
5954 drm_dbg_atomic(&i915->drm,
5955 "[CRTC:%d:%s] atomic driver check failed\n",
5956 crtc->base.base.id, crtc->base.name);
5957 return ret;
5958 }
5959 }
5960
5961 return 0;
5962 }
5963
intel_cpu_transcoders_need_modeset(struct intel_atomic_state * state,u8 transcoders)5964 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
5965 u8 transcoders)
5966 {
5967 const struct intel_crtc_state *new_crtc_state;
5968 struct intel_crtc *crtc;
5969 int i;
5970
5971 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
5972 if (new_crtc_state->hw.enable &&
5973 transcoders & BIT(new_crtc_state->cpu_transcoder) &&
5974 intel_crtc_needs_modeset(new_crtc_state))
5975 return true;
5976 }
5977
5978 return false;
5979 }
5980
intel_pipes_need_modeset(struct intel_atomic_state * state,u8 pipes)5981 static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
5982 u8 pipes)
5983 {
5984 const struct intel_crtc_state *new_crtc_state;
5985 struct intel_crtc *crtc;
5986 int i;
5987
5988 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
5989 if (new_crtc_state->hw.enable &&
5990 pipes & BIT(crtc->pipe) &&
5991 intel_crtc_needs_modeset(new_crtc_state))
5992 return true;
5993 }
5994
5995 return false;
5996 }
5997
intel_atomic_check_joiner(struct intel_atomic_state * state,struct intel_crtc * primary_crtc)5998 static int intel_atomic_check_joiner(struct intel_atomic_state *state,
5999 struct intel_crtc *primary_crtc)
6000 {
6001 struct drm_i915_private *i915 = to_i915(state->base.dev);
6002 struct intel_crtc_state *primary_crtc_state =
6003 intel_atomic_get_new_crtc_state(state, primary_crtc);
6004 struct intel_crtc *secondary_crtc;
6005
6006 if (!primary_crtc_state->joiner_pipes)
6007 return 0;
6008
6009 /* sanity check */
6010 if (drm_WARN_ON(&i915->drm,
6011 primary_crtc->pipe != joiner_primary_pipe(primary_crtc_state)))
6012 return -EINVAL;
6013
6014 if (primary_crtc_state->joiner_pipes & ~joiner_pipes(i915)) {
6015 drm_dbg_kms(&i915->drm,
6016 "[CRTC:%d:%s] Cannot act as joiner primary "
6017 "(need 0x%x as pipes, only 0x%x possible)\n",
6018 primary_crtc->base.base.id, primary_crtc->base.name,
6019 primary_crtc_state->joiner_pipes, joiner_pipes(i915));
6020 return -EINVAL;
6021 }
6022
6023 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
6024 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
6025 struct intel_crtc_state *secondary_crtc_state;
6026 int ret;
6027
6028 secondary_crtc_state = intel_atomic_get_crtc_state(&state->base, secondary_crtc);
6029 if (IS_ERR(secondary_crtc_state))
6030 return PTR_ERR(secondary_crtc_state);
6031
6032 /* primary being enabled, secondary was already configured? */
6033 if (secondary_crtc_state->uapi.enable) {
6034 drm_dbg_kms(&i915->drm,
6035 "[CRTC:%d:%s] secondary is enabled as normal CRTC, but "
6036 "[CRTC:%d:%s] claiming this CRTC for joiner.\n",
6037 secondary_crtc->base.base.id, secondary_crtc->base.name,
6038 primary_crtc->base.base.id, primary_crtc->base.name);
6039 return -EINVAL;
6040 }
6041
6042 /*
6043 * The state copy logic assumes the primary crtc gets processed
6044 * before the secondary crtc during the main compute_config loop.
6045 * This works because the crtcs are created in pipe order,
6046 * and the hardware requires primary pipe < secondary pipe as well.
6047 * Should that change we need to rethink the logic.
6048 */
6049 if (WARN_ON(drm_crtc_index(&primary_crtc->base) >
6050 drm_crtc_index(&secondary_crtc->base)))
6051 return -EINVAL;
6052
6053 drm_dbg_kms(&i915->drm,
6054 "[CRTC:%d:%s] Used as secondary for joiner primary [CRTC:%d:%s]\n",
6055 secondary_crtc->base.base.id, secondary_crtc->base.name,
6056 primary_crtc->base.base.id, primary_crtc->base.name);
6057
6058 secondary_crtc_state->joiner_pipes =
6059 primary_crtc_state->joiner_pipes;
6060
6061 ret = copy_joiner_crtc_state_modeset(state, secondary_crtc);
6062 if (ret)
6063 return ret;
6064 }
6065
6066 return 0;
6067 }
6068
kill_joiner_secondaries(struct intel_atomic_state * state,struct intel_crtc * primary_crtc)6069 static void kill_joiner_secondaries(struct intel_atomic_state *state,
6070 struct intel_crtc *primary_crtc)
6071 {
6072 struct drm_i915_private *i915 = to_i915(state->base.dev);
6073 struct intel_crtc_state *primary_crtc_state =
6074 intel_atomic_get_new_crtc_state(state, primary_crtc);
6075 struct intel_crtc *secondary_crtc;
6076
6077 for_each_intel_crtc_in_pipe_mask(&i915->drm, secondary_crtc,
6078 intel_crtc_joiner_secondary_pipes(primary_crtc_state)) {
6079 struct intel_crtc_state *secondary_crtc_state =
6080 intel_atomic_get_new_crtc_state(state, secondary_crtc);
6081
6082 secondary_crtc_state->joiner_pipes = 0;
6083
6084 intel_crtc_copy_uapi_to_hw_state_modeset(state, secondary_crtc);
6085 }
6086
6087 primary_crtc_state->joiner_pipes = 0;
6088 }
6089
6090 /**
6091 * DOC: asynchronous flip implementation
6092 *
6093 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
6094 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
6095 * Correspondingly, support is currently added for primary plane only.
6096 *
6097 * Async flip can only change the plane surface address, so anything else
6098 * changing is rejected from the intel_async_flip_check_hw() function.
6099 * Once this check is cleared, flip done interrupt is enabled using
6100 * the intel_crtc_enable_flip_done() function.
6101 *
6102 * As soon as the surface address register is written, flip done interrupt is
6103 * generated and the requested events are sent to the usersapce in the interrupt
6104 * handler itself. The timestamp and sequence sent during the flip done event
6105 * correspond to the last vblank and have no relation to the actual time when
6106 * the flip done event was sent.
6107 */
intel_async_flip_check_uapi(struct intel_atomic_state * state,struct intel_crtc * crtc)6108 static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
6109 struct intel_crtc *crtc)
6110 {
6111 struct drm_i915_private *i915 = to_i915(state->base.dev);
6112 const struct intel_crtc_state *new_crtc_state =
6113 intel_atomic_get_new_crtc_state(state, crtc);
6114 const struct intel_plane_state *old_plane_state;
6115 struct intel_plane_state *new_plane_state;
6116 struct intel_plane *plane;
6117 int i;
6118
6119 if (!new_crtc_state->uapi.async_flip)
6120 return 0;
6121
6122 if (!new_crtc_state->uapi.active) {
6123 drm_dbg_kms(&i915->drm,
6124 "[CRTC:%d:%s] not active\n",
6125 crtc->base.base.id, crtc->base.name);
6126 return -EINVAL;
6127 }
6128
6129 if (intel_crtc_needs_modeset(new_crtc_state)) {
6130 drm_dbg_kms(&i915->drm,
6131 "[CRTC:%d:%s] modeset required\n",
6132 crtc->base.base.id, crtc->base.name);
6133 return -EINVAL;
6134 }
6135
6136 /*
6137 * FIXME: joiner+async flip is busted currently.
6138 * Remove this check once the issues are fixed.
6139 */
6140 if (new_crtc_state->joiner_pipes) {
6141 drm_dbg_kms(&i915->drm,
6142 "[CRTC:%d:%s] async flip disallowed with joiner\n",
6143 crtc->base.base.id, crtc->base.name);
6144 return -EINVAL;
6145 }
6146
6147 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6148 new_plane_state, i) {
6149 if (plane->pipe != crtc->pipe)
6150 continue;
6151
6152 /*
6153 * TODO: Async flip is only supported through the page flip IOCTL
6154 * as of now. So support currently added for primary plane only.
6155 * Support for other planes on platforms on which supports
6156 * this(vlv/chv and icl+) should be added when async flip is
6157 * enabled in the atomic IOCTL path.
6158 */
6159 if (!plane->async_flip) {
6160 drm_dbg_kms(&i915->drm,
6161 "[PLANE:%d:%s] async flip not supported\n",
6162 plane->base.base.id, plane->base.name);
6163 return -EINVAL;
6164 }
6165
6166 if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
6167 drm_dbg_kms(&i915->drm,
6168 "[PLANE:%d:%s] no old or new framebuffer\n",
6169 plane->base.base.id, plane->base.name);
6170 return -EINVAL;
6171 }
6172 }
6173
6174 return 0;
6175 }
6176
intel_async_flip_check_hw(struct intel_atomic_state * state,struct intel_crtc * crtc)6177 static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
6178 {
6179 struct drm_i915_private *i915 = to_i915(state->base.dev);
6180 const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6181 const struct intel_plane_state *new_plane_state, *old_plane_state;
6182 struct intel_plane *plane;
6183 int i;
6184
6185 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6186 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6187
6188 if (!new_crtc_state->uapi.async_flip)
6189 return 0;
6190
6191 if (!new_crtc_state->hw.active) {
6192 drm_dbg_kms(&i915->drm,
6193 "[CRTC:%d:%s] not active\n",
6194 crtc->base.base.id, crtc->base.name);
6195 return -EINVAL;
6196 }
6197
6198 if (intel_crtc_needs_modeset(new_crtc_state)) {
6199 drm_dbg_kms(&i915->drm,
6200 "[CRTC:%d:%s] modeset required\n",
6201 crtc->base.base.id, crtc->base.name);
6202 return -EINVAL;
6203 }
6204
6205 if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
6206 drm_dbg_kms(&i915->drm,
6207 "[CRTC:%d:%s] Active planes cannot be in async flip\n",
6208 crtc->base.base.id, crtc->base.name);
6209 return -EINVAL;
6210 }
6211
6212 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
6213 new_plane_state, i) {
6214 if (plane->pipe != crtc->pipe)
6215 continue;
6216
6217 /*
6218 * Only async flip capable planes should be in the state
6219 * if we're really about to ask the hardware to perform
6220 * an async flip. We should never get this far otherwise.
6221 */
6222 if (drm_WARN_ON(&i915->drm,
6223 new_crtc_state->do_async_flip && !plane->async_flip))
6224 return -EINVAL;
6225
6226 /*
6227 * Only check async flip capable planes other planes
6228 * may be involved in the initial commit due to
6229 * the wm0/ddb optimization.
6230 *
6231 * TODO maybe should track which planes actually
6232 * were requested to do the async flip...
6233 */
6234 if (!plane->async_flip)
6235 continue;
6236
6237 /*
6238 * FIXME: This check is kept generic for all platforms.
6239 * Need to verify this for all gen9 platforms to enable
6240 * this selectively if required.
6241 */
6242 switch (new_plane_state->hw.fb->modifier) {
6243 case DRM_FORMAT_MOD_LINEAR:
6244 /*
6245 * FIXME: Async on Linear buffer is supported on ICL as
6246 * but with additional alignment and fbc restrictions
6247 * need to be taken care of. These aren't applicable for
6248 * gen12+.
6249 */
6250 if (DISPLAY_VER(i915) < 12) {
6251 drm_dbg_kms(&i915->drm,
6252 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n",
6253 plane->base.base.id, plane->base.name,
6254 new_plane_state->hw.fb->modifier, DISPLAY_VER(i915));
6255 return -EINVAL;
6256 }
6257 break;
6258
6259 case I915_FORMAT_MOD_X_TILED:
6260 case I915_FORMAT_MOD_Y_TILED:
6261 case I915_FORMAT_MOD_Yf_TILED:
6262 case I915_FORMAT_MOD_4_TILED:
6263 case I915_FORMAT_MOD_4_TILED_BMG_CCS:
6264 case I915_FORMAT_MOD_4_TILED_LNL_CCS:
6265 break;
6266 default:
6267 drm_dbg_kms(&i915->drm,
6268 "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n",
6269 plane->base.base.id, plane->base.name,
6270 new_plane_state->hw.fb->modifier);
6271 return -EINVAL;
6272 }
6273
6274 if (new_plane_state->hw.fb->format->num_planes > 1) {
6275 drm_dbg_kms(&i915->drm,
6276 "[PLANE:%d:%s] Planar formats do not support async flips\n",
6277 plane->base.base.id, plane->base.name);
6278 return -EINVAL;
6279 }
6280
6281 /*
6282 * We turn the first async flip request into a sync flip
6283 * so that we can reconfigure the plane (eg. change modifier).
6284 */
6285 if (!new_crtc_state->do_async_flip)
6286 continue;
6287
6288 if (old_plane_state->view.color_plane[0].mapping_stride !=
6289 new_plane_state->view.color_plane[0].mapping_stride) {
6290 drm_dbg_kms(&i915->drm,
6291 "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
6292 plane->base.base.id, plane->base.name);
6293 return -EINVAL;
6294 }
6295
6296 if (old_plane_state->hw.fb->modifier !=
6297 new_plane_state->hw.fb->modifier) {
6298 drm_dbg_kms(&i915->drm,
6299 "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
6300 plane->base.base.id, plane->base.name);
6301 return -EINVAL;
6302 }
6303
6304 if (old_plane_state->hw.fb->format !=
6305 new_plane_state->hw.fb->format) {
6306 drm_dbg_kms(&i915->drm,
6307 "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
6308 plane->base.base.id, plane->base.name);
6309 return -EINVAL;
6310 }
6311
6312 if (old_plane_state->hw.rotation !=
6313 new_plane_state->hw.rotation) {
6314 drm_dbg_kms(&i915->drm,
6315 "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
6316 plane->base.base.id, plane->base.name);
6317 return -EINVAL;
6318 }
6319
6320 if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
6321 !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
6322 drm_dbg_kms(&i915->drm,
6323 "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
6324 plane->base.base.id, plane->base.name);
6325 return -EINVAL;
6326 }
6327
6328 if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
6329 drm_dbg_kms(&i915->drm,
6330 "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
6331 plane->base.base.id, plane->base.name);
6332 return -EINVAL;
6333 }
6334
6335 if (old_plane_state->hw.pixel_blend_mode !=
6336 new_plane_state->hw.pixel_blend_mode) {
6337 drm_dbg_kms(&i915->drm,
6338 "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
6339 plane->base.base.id, plane->base.name);
6340 return -EINVAL;
6341 }
6342
6343 if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
6344 drm_dbg_kms(&i915->drm,
6345 "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
6346 plane->base.base.id, plane->base.name);
6347 return -EINVAL;
6348 }
6349
6350 if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
6351 drm_dbg_kms(&i915->drm,
6352 "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
6353 plane->base.base.id, plane->base.name);
6354 return -EINVAL;
6355 }
6356
6357 /* plane decryption is allow to change only in synchronous flips */
6358 if (old_plane_state->decrypt != new_plane_state->decrypt) {
6359 drm_dbg_kms(&i915->drm,
6360 "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
6361 plane->base.base.id, plane->base.name);
6362 return -EINVAL;
6363 }
6364 }
6365
6366 return 0;
6367 }
6368
intel_joiner_add_affected_crtcs(struct intel_atomic_state * state)6369 static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
6370 {
6371 struct drm_i915_private *i915 = to_i915(state->base.dev);
6372 struct intel_crtc_state *crtc_state;
6373 struct intel_crtc *crtc;
6374 u8 affected_pipes = 0;
6375 u8 modeset_pipes = 0;
6376 int i;
6377
6378 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6379 affected_pipes |= crtc_state->joiner_pipes;
6380 if (intel_crtc_needs_modeset(crtc_state))
6381 modeset_pipes |= crtc_state->joiner_pipes;
6382 }
6383
6384 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
6385 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6386 if (IS_ERR(crtc_state))
6387 return PTR_ERR(crtc_state);
6388 }
6389
6390 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
6391 int ret;
6392
6393 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6394
6395 crtc_state->uapi.mode_changed = true;
6396
6397 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6398 if (ret)
6399 return ret;
6400
6401 ret = intel_atomic_add_affected_planes(state, crtc);
6402 if (ret)
6403 return ret;
6404 }
6405
6406 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
6407 /* Kill old joiner link, we may re-establish afterwards */
6408 if (intel_crtc_needs_modeset(crtc_state) &&
6409 intel_crtc_is_joiner_primary(crtc_state))
6410 kill_joiner_secondaries(state, crtc);
6411 }
6412
6413 return 0;
6414 }
6415
intel_atomic_check_config(struct intel_atomic_state * state,struct intel_link_bw_limits * limits,enum pipe * failed_pipe)6416 static int intel_atomic_check_config(struct intel_atomic_state *state,
6417 struct intel_link_bw_limits *limits,
6418 enum pipe *failed_pipe)
6419 {
6420 struct drm_i915_private *i915 = to_i915(state->base.dev);
6421 struct intel_crtc_state *new_crtc_state;
6422 struct intel_crtc *crtc;
6423 int ret;
6424 int i;
6425
6426 *failed_pipe = INVALID_PIPE;
6427
6428 ret = intel_joiner_add_affected_crtcs(state);
6429 if (ret)
6430 return ret;
6431
6432 ret = intel_fdi_add_affected_crtcs(state);
6433 if (ret)
6434 return ret;
6435
6436 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6437 if (!intel_crtc_needs_modeset(new_crtc_state)) {
6438 if (intel_crtc_is_joiner_secondary(new_crtc_state))
6439 copy_joiner_crtc_state_nomodeset(state, crtc);
6440 else
6441 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
6442 continue;
6443 }
6444
6445 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
6446 continue;
6447
6448 ret = intel_crtc_prepare_cleared_state(state, crtc);
6449 if (ret)
6450 goto fail;
6451
6452 if (!new_crtc_state->hw.enable)
6453 continue;
6454
6455 ret = intel_modeset_pipe_config(state, crtc, limits);
6456 if (ret)
6457 goto fail;
6458 }
6459
6460 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6461 if (!intel_crtc_needs_modeset(new_crtc_state))
6462 continue;
6463
6464 if (drm_WARN_ON(&i915->drm, intel_crtc_is_joiner_secondary(new_crtc_state)))
6465 continue;
6466
6467 if (!new_crtc_state->hw.enable)
6468 continue;
6469
6470 ret = intel_modeset_pipe_config_late(state, crtc);
6471 if (ret)
6472 goto fail;
6473 }
6474
6475 fail:
6476 if (ret)
6477 *failed_pipe = crtc->pipe;
6478
6479 return ret;
6480 }
6481
intel_atomic_check_config_and_link(struct intel_atomic_state * state)6482 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
6483 {
6484 struct intel_link_bw_limits new_limits;
6485 struct intel_link_bw_limits old_limits;
6486 int ret;
6487
6488 intel_link_bw_init_limits(state, &new_limits);
6489 old_limits = new_limits;
6490
6491 while (true) {
6492 enum pipe failed_pipe;
6493
6494 ret = intel_atomic_check_config(state, &new_limits,
6495 &failed_pipe);
6496 if (ret) {
6497 /*
6498 * The bpp limit for a pipe is below the minimum it supports, set the
6499 * limit to the minimum and recalculate the config.
6500 */
6501 if (ret == -EINVAL &&
6502 intel_link_bw_set_bpp_limit_for_pipe(state,
6503 &old_limits,
6504 &new_limits,
6505 failed_pipe))
6506 continue;
6507
6508 break;
6509 }
6510
6511 old_limits = new_limits;
6512
6513 ret = intel_link_bw_atomic_check(state, &new_limits);
6514 if (ret != -EAGAIN)
6515 break;
6516 }
6517
6518 return ret;
6519 }
6520 /**
6521 * intel_atomic_check - validate state object
6522 * @dev: drm device
6523 * @_state: state to validate
6524 */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * _state)6525 int intel_atomic_check(struct drm_device *dev,
6526 struct drm_atomic_state *_state)
6527 {
6528 struct drm_i915_private *dev_priv = to_i915(dev);
6529 struct intel_atomic_state *state = to_intel_atomic_state(_state);
6530 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
6531 struct intel_crtc *crtc;
6532 int ret, i;
6533 bool any_ms = false;
6534
6535 if (!intel_display_driver_check_access(dev_priv))
6536 return -ENODEV;
6537
6538 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6539 new_crtc_state, i) {
6540 /*
6541 * crtc's state no longer considered to be inherited
6542 * after the first userspace/client initiated commit.
6543 */
6544 if (!state->internal)
6545 new_crtc_state->inherited = false;
6546
6547 if (new_crtc_state->inherited != old_crtc_state->inherited)
6548 new_crtc_state->uapi.mode_changed = true;
6549
6550 if (new_crtc_state->uapi.scaling_filter !=
6551 old_crtc_state->uapi.scaling_filter)
6552 new_crtc_state->uapi.mode_changed = true;
6553 }
6554
6555 intel_vrr_check_modeset(state);
6556
6557 ret = drm_atomic_helper_check_modeset(dev, &state->base);
6558 if (ret)
6559 goto fail;
6560
6561 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6562 ret = intel_async_flip_check_uapi(state, crtc);
6563 if (ret)
6564 return ret;
6565 }
6566
6567 ret = intel_atomic_check_config_and_link(state);
6568 if (ret)
6569 goto fail;
6570
6571 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6572 if (!intel_crtc_needs_modeset(new_crtc_state))
6573 continue;
6574
6575 if (intel_crtc_is_joiner_secondary(new_crtc_state)) {
6576 drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
6577 continue;
6578 }
6579
6580 ret = intel_atomic_check_joiner(state, crtc);
6581 if (ret)
6582 goto fail;
6583 }
6584
6585 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6586 new_crtc_state, i) {
6587 if (!intel_crtc_needs_modeset(new_crtc_state))
6588 continue;
6589
6590 intel_joiner_adjust_pipe_src(new_crtc_state);
6591
6592 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
6593 }
6594
6595 /**
6596 * Check if fastset is allowed by external dependencies like other
6597 * pipes and transcoders.
6598 *
6599 * Right now it only forces a fullmodeset when the MST master
6600 * transcoder did not changed but the pipe of the master transcoder
6601 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
6602 * in case of port synced crtcs, if one of the synced crtcs
6603 * needs a full modeset, all other synced crtcs should be
6604 * forced a full modeset.
6605 */
6606 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
6607 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
6608 continue;
6609
6610 if (intel_dp_mst_crtc_needs_modeset(state, crtc))
6611 intel_crtc_flag_modeset(new_crtc_state);
6612
6613 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
6614 enum transcoder master = new_crtc_state->mst_master_transcoder;
6615
6616 if (intel_cpu_transcoders_need_modeset(state, BIT(master)))
6617 intel_crtc_flag_modeset(new_crtc_state);
6618 }
6619
6620 if (is_trans_port_sync_mode(new_crtc_state)) {
6621 u8 trans = new_crtc_state->sync_mode_slaves_mask;
6622
6623 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
6624 trans |= BIT(new_crtc_state->master_transcoder);
6625
6626 if (intel_cpu_transcoders_need_modeset(state, trans))
6627 intel_crtc_flag_modeset(new_crtc_state);
6628 }
6629
6630 if (new_crtc_state->joiner_pipes) {
6631 if (intel_pipes_need_modeset(state, new_crtc_state->joiner_pipes))
6632 intel_crtc_flag_modeset(new_crtc_state);
6633 }
6634 }
6635
6636 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6637 new_crtc_state, i) {
6638 if (!intel_crtc_needs_modeset(new_crtc_state))
6639 continue;
6640
6641 any_ms = true;
6642
6643 intel_release_shared_dplls(state, crtc);
6644 }
6645
6646 if (any_ms && !check_digital_port_conflicts(state)) {
6647 drm_dbg_kms(&dev_priv->drm,
6648 "rejecting conflicting digital port configuration\n");
6649 ret = -EINVAL;
6650 goto fail;
6651 }
6652
6653 ret = intel_atomic_check_planes(state);
6654 if (ret)
6655 goto fail;
6656
6657 ret = intel_compute_global_watermarks(state);
6658 if (ret)
6659 goto fail;
6660
6661 ret = intel_bw_atomic_check(state);
6662 if (ret)
6663 goto fail;
6664
6665 ret = intel_cdclk_atomic_check(state, &any_ms);
6666 if (ret)
6667 goto fail;
6668
6669 if (intel_any_crtc_needs_modeset(state))
6670 any_ms = true;
6671
6672 if (any_ms) {
6673 ret = intel_modeset_checks(state);
6674 if (ret)
6675 goto fail;
6676
6677 ret = intel_modeset_calc_cdclk(state);
6678 if (ret)
6679 return ret;
6680 }
6681
6682 ret = intel_pmdemand_atomic_check(state);
6683 if (ret)
6684 goto fail;
6685
6686 ret = intel_atomic_check_crtcs(state);
6687 if (ret)
6688 goto fail;
6689
6690 ret = intel_fbc_atomic_check(state);
6691 if (ret)
6692 goto fail;
6693
6694 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6695 new_crtc_state, i) {
6696 intel_color_assert_luts(new_crtc_state);
6697
6698 ret = intel_async_flip_check_hw(state, crtc);
6699 if (ret)
6700 goto fail;
6701
6702 /* Either full modeset or fastset (or neither), never both */
6703 drm_WARN_ON(&dev_priv->drm,
6704 intel_crtc_needs_modeset(new_crtc_state) &&
6705 intel_crtc_needs_fastset(new_crtc_state));
6706
6707 if (!intel_crtc_needs_modeset(new_crtc_state) &&
6708 !intel_crtc_needs_fastset(new_crtc_state))
6709 continue;
6710
6711 intel_crtc_state_dump(new_crtc_state, state,
6712 intel_crtc_needs_modeset(new_crtc_state) ?
6713 "modeset" : "fastset");
6714 }
6715
6716 return 0;
6717
6718 fail:
6719 if (ret == -EDEADLK)
6720 return ret;
6721
6722 /*
6723 * FIXME would probably be nice to know which crtc specifically
6724 * caused the failure, in cases where we can pinpoint it.
6725 */
6726 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
6727 new_crtc_state, i)
6728 intel_crtc_state_dump(new_crtc_state, state, "failed");
6729
6730 return ret;
6731 }
6732
intel_atomic_prepare_commit(struct intel_atomic_state * state)6733 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
6734 {
6735 struct intel_crtc_state __maybe_unused *crtc_state;
6736 struct intel_crtc *crtc;
6737 int i, ret;
6738
6739 ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
6740 if (ret < 0)
6741 return ret;
6742
6743 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
6744 intel_color_prepare_commit(state, crtc);
6745
6746 return 0;
6747 }
6748
intel_crtc_arm_fifo_underrun(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)6749 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
6750 struct intel_crtc_state *crtc_state)
6751 {
6752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6753
6754 if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
6755 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
6756
6757 if (crtc_state->has_pch_encoder) {
6758 enum pipe pch_transcoder =
6759 intel_crtc_pch_transcoder(crtc);
6760
6761 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
6762 }
6763 }
6764
intel_pipe_fastset(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)6765 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
6766 const struct intel_crtc_state *new_crtc_state)
6767 {
6768 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6769 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6770
6771 /*
6772 * Update pipe size and adjust fitter if needed: the reason for this is
6773 * that in compute_mode_changes we check the native mode (not the pfit
6774 * mode) to see if we can flip rather than do a full mode set. In the
6775 * fastboot case, we'll flip, but if we don't update the pipesrc and
6776 * pfit state, we'll end up with a big fb scanned out into the wrong
6777 * sized surface.
6778 */
6779 intel_set_pipe_src_size(new_crtc_state);
6780
6781 /* on skylake this is done by detaching scalers */
6782 if (DISPLAY_VER(dev_priv) >= 9) {
6783 if (new_crtc_state->pch_pfit.enabled)
6784 skl_pfit_enable(new_crtc_state);
6785 } else if (HAS_PCH_SPLIT(dev_priv)) {
6786 if (new_crtc_state->pch_pfit.enabled)
6787 ilk_pfit_enable(new_crtc_state);
6788 else if (old_crtc_state->pch_pfit.enabled)
6789 ilk_pfit_disable(old_crtc_state);
6790 }
6791
6792 /*
6793 * The register is supposedly single buffered so perhaps
6794 * not 100% correct to do this here. But SKL+ calculate
6795 * this based on the adjust pixel rate so pfit changes do
6796 * affect it and so it must be updated for fastsets.
6797 * HSW/BDW only really need this here for fastboot, after
6798 * that the value should not change without a full modeset.
6799 */
6800 if (DISPLAY_VER(dev_priv) >= 9 ||
6801 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6802 hsw_set_linetime_wm(new_crtc_state);
6803
6804 if (new_crtc_state->update_m_n)
6805 intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
6806 &new_crtc_state->dp_m_n);
6807
6808 if (new_crtc_state->update_lrr)
6809 intel_set_transcoder_timings_lrr(new_crtc_state);
6810 }
6811
commit_pipe_pre_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)6812 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
6813 struct intel_crtc *crtc)
6814 {
6815 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6816 const struct intel_crtc_state *old_crtc_state =
6817 intel_atomic_get_old_crtc_state(state, crtc);
6818 const struct intel_crtc_state *new_crtc_state =
6819 intel_atomic_get_new_crtc_state(state, crtc);
6820 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6821
6822 /*
6823 * During modesets pipe configuration was programmed as the
6824 * CRTC was enabled.
6825 */
6826 if (!modeset) {
6827 if (intel_crtc_needs_color_update(new_crtc_state))
6828 intel_color_commit_arm(new_crtc_state);
6829
6830 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6831 bdw_set_pipe_misc(new_crtc_state);
6832
6833 if (intel_crtc_needs_fastset(new_crtc_state))
6834 intel_pipe_fastset(old_crtc_state, new_crtc_state);
6835 }
6836
6837 intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
6838
6839 intel_atomic_update_watermarks(state, crtc);
6840 }
6841
commit_pipe_post_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)6842 static void commit_pipe_post_planes(struct intel_atomic_state *state,
6843 struct intel_crtc *crtc)
6844 {
6845 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6846 const struct intel_crtc_state *new_crtc_state =
6847 intel_atomic_get_new_crtc_state(state, crtc);
6848
6849 /*
6850 * Disable the scaler(s) after the plane(s) so that we don't
6851 * get a catastrophic underrun even if the two operations
6852 * end up happening in two different frames.
6853 */
6854 if (DISPLAY_VER(dev_priv) >= 9 &&
6855 !intel_crtc_needs_modeset(new_crtc_state))
6856 skl_detach_scalers(new_crtc_state);
6857
6858 if (intel_crtc_vrr_enabling(state, crtc))
6859 intel_vrr_enable(new_crtc_state);
6860 }
6861
intel_enable_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)6862 static void intel_enable_crtc(struct intel_atomic_state *state,
6863 struct intel_crtc *crtc)
6864 {
6865 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6866 const struct intel_crtc_state *new_crtc_state =
6867 intel_atomic_get_new_crtc_state(state, crtc);
6868 struct intel_crtc *pipe_crtc;
6869
6870 if (!intel_crtc_needs_modeset(new_crtc_state))
6871 return;
6872
6873 for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
6874 intel_crtc_joined_pipe_mask(new_crtc_state)) {
6875 const struct intel_crtc_state *pipe_crtc_state =
6876 intel_atomic_get_new_crtc_state(state, pipe_crtc);
6877
6878 /* VRR will be enable later, if required */
6879 intel_crtc_update_active_timings(pipe_crtc_state, false);
6880 }
6881
6882 dev_priv->display.funcs.display->crtc_enable(state, crtc);
6883
6884 /* vblanks work again, re-enable pipe CRC. */
6885 intel_crtc_enable_pipe_crc(crtc);
6886 }
6887
intel_pre_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)6888 static void intel_pre_update_crtc(struct intel_atomic_state *state,
6889 struct intel_crtc *crtc)
6890 {
6891 struct drm_i915_private *i915 = to_i915(state->base.dev);
6892 const struct intel_crtc_state *old_crtc_state =
6893 intel_atomic_get_old_crtc_state(state, crtc);
6894 struct intel_crtc_state *new_crtc_state =
6895 intel_atomic_get_new_crtc_state(state, crtc);
6896 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
6897
6898 if (old_crtc_state->inherited ||
6899 intel_crtc_needs_modeset(new_crtc_state)) {
6900 if (HAS_DPT(i915))
6901 intel_dpt_configure(crtc);
6902 }
6903
6904 if (!modeset) {
6905 if (new_crtc_state->preload_luts &&
6906 intel_crtc_needs_color_update(new_crtc_state))
6907 intel_color_load_luts(new_crtc_state);
6908
6909 intel_pre_plane_update(state, crtc);
6910
6911 if (intel_crtc_needs_fastset(new_crtc_state))
6912 intel_encoders_update_pipe(state, crtc);
6913
6914 if (DISPLAY_VER(i915) >= 11 &&
6915 intel_crtc_needs_fastset(new_crtc_state))
6916 icl_set_pipe_chicken(new_crtc_state);
6917
6918 if (vrr_params_changed(old_crtc_state, new_crtc_state) ||
6919 cmrr_params_changed(old_crtc_state, new_crtc_state))
6920 intel_vrr_set_transcoder_timings(new_crtc_state);
6921 }
6922
6923 intel_fbc_update(state, crtc);
6924
6925 drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
6926
6927 if (!modeset &&
6928 intel_crtc_needs_color_update(new_crtc_state))
6929 intel_color_commit_noarm(new_crtc_state);
6930
6931 intel_crtc_planes_update_noarm(state, crtc);
6932 }
6933
intel_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)6934 static void intel_update_crtc(struct intel_atomic_state *state,
6935 struct intel_crtc *crtc)
6936 {
6937 const struct intel_crtc_state *old_crtc_state =
6938 intel_atomic_get_old_crtc_state(state, crtc);
6939 struct intel_crtc_state *new_crtc_state =
6940 intel_atomic_get_new_crtc_state(state, crtc);
6941
6942 /* Perform vblank evasion around commit operation */
6943 intel_pipe_update_start(state, crtc);
6944
6945 commit_pipe_pre_planes(state, crtc);
6946
6947 intel_crtc_planes_update_arm(state, crtc);
6948
6949 commit_pipe_post_planes(state, crtc);
6950
6951 intel_pipe_update_end(state, crtc);
6952
6953 /*
6954 * VRR/Seamless M/N update may need to update frame timings.
6955 *
6956 * FIXME Should be synchronized with the start of vblank somehow...
6957 */
6958 if (intel_crtc_vrr_enabling(state, crtc) ||
6959 new_crtc_state->update_m_n || new_crtc_state->update_lrr)
6960 intel_crtc_update_active_timings(new_crtc_state,
6961 new_crtc_state->vrr.enable);
6962
6963 /*
6964 * We usually enable FIFO underrun interrupts as part of the
6965 * CRTC enable sequence during modesets. But when we inherit a
6966 * valid pipe configuration from the BIOS we need to take care
6967 * of enabling them on the CRTC's first fastset.
6968 */
6969 if (intel_crtc_needs_fastset(new_crtc_state) &&
6970 old_crtc_state->inherited)
6971 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
6972 }
6973
intel_old_crtc_state_disables(struct intel_atomic_state * state,struct intel_crtc * crtc)6974 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
6975 struct intel_crtc *crtc)
6976 {
6977 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6978 const struct intel_crtc_state *old_crtc_state =
6979 intel_atomic_get_old_crtc_state(state, crtc);
6980 struct intel_crtc *pipe_crtc;
6981
6982 /*
6983 * We need to disable pipe CRC before disabling the pipe,
6984 * or we race against vblank off.
6985 */
6986 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
6987 intel_crtc_joined_pipe_mask(old_crtc_state))
6988 intel_crtc_disable_pipe_crc(pipe_crtc);
6989
6990 dev_priv->display.funcs.display->crtc_disable(state, crtc);
6991
6992 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
6993 intel_crtc_joined_pipe_mask(old_crtc_state)) {
6994 const struct intel_crtc_state *new_pipe_crtc_state =
6995 intel_atomic_get_new_crtc_state(state, pipe_crtc);
6996
6997 pipe_crtc->active = false;
6998 intel_fbc_disable(pipe_crtc);
6999
7000 if (!new_pipe_crtc_state->hw.active)
7001 intel_initial_watermarks(state, pipe_crtc);
7002 }
7003 }
7004
intel_commit_modeset_disables(struct intel_atomic_state * state)7005 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
7006 {
7007 struct drm_i915_private *i915 = to_i915(state->base.dev);
7008 const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7009 struct intel_crtc *crtc;
7010 u8 disable_pipes = 0;
7011 int i;
7012
7013 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7014 new_crtc_state, i) {
7015 if (!intel_crtc_needs_modeset(new_crtc_state))
7016 continue;
7017
7018 /*
7019 * Needs to be done even for pipes
7020 * that weren't enabled previously.
7021 */
7022 intel_pre_plane_update(state, crtc);
7023
7024 if (!old_crtc_state->hw.active)
7025 continue;
7026
7027 disable_pipes |= BIT(crtc->pipe);
7028 }
7029
7030 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
7031 if ((disable_pipes & BIT(crtc->pipe)) == 0)
7032 continue;
7033
7034 intel_crtc_disable_planes(state, crtc);
7035
7036 drm_vblank_work_flush_all(&crtc->base);
7037 }
7038
7039 /* Only disable port sync and MST slaves */
7040 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
7041 if ((disable_pipes & BIT(crtc->pipe)) == 0)
7042 continue;
7043
7044 if (intel_crtc_is_joiner_secondary(old_crtc_state))
7045 continue;
7046
7047 /* In case of Transcoder port Sync master slave CRTCs can be
7048 * assigned in any order and we need to make sure that
7049 * slave CRTCs are disabled first and then master CRTC since
7050 * Slave vblanks are masked till Master Vblanks.
7051 */
7052 if (!is_trans_port_sync_slave(old_crtc_state) &&
7053 !intel_dp_mst_is_slave_trans(old_crtc_state))
7054 continue;
7055
7056 intel_old_crtc_state_disables(state, crtc);
7057
7058 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
7059 }
7060
7061 /* Disable everything else left on */
7062 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) {
7063 if ((disable_pipes & BIT(crtc->pipe)) == 0)
7064 continue;
7065
7066 if (intel_crtc_is_joiner_secondary(old_crtc_state))
7067 continue;
7068
7069 intel_old_crtc_state_disables(state, crtc);
7070
7071 disable_pipes &= ~intel_crtc_joined_pipe_mask(old_crtc_state);
7072 }
7073
7074 drm_WARN_ON(&i915->drm, disable_pipes);
7075 }
7076
intel_commit_modeset_enables(struct intel_atomic_state * state)7077 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
7078 {
7079 struct intel_crtc_state *new_crtc_state;
7080 struct intel_crtc *crtc;
7081 int i;
7082
7083 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7084 if (!new_crtc_state->hw.active)
7085 continue;
7086
7087 intel_enable_crtc(state, crtc);
7088 intel_pre_update_crtc(state, crtc);
7089 }
7090
7091 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7092 if (!new_crtc_state->hw.active)
7093 continue;
7094
7095 intel_update_crtc(state, crtc);
7096 }
7097 }
7098
skl_commit_modeset_enables(struct intel_atomic_state * state)7099 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
7100 {
7101 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7102 struct intel_crtc *crtc;
7103 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7104 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
7105 u8 update_pipes = 0, modeset_pipes = 0;
7106 int i;
7107
7108 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7109 enum pipe pipe = crtc->pipe;
7110
7111 if (!new_crtc_state->hw.active)
7112 continue;
7113
7114 /* ignore allocations for crtc's that have been turned off. */
7115 if (!intel_crtc_needs_modeset(new_crtc_state)) {
7116 entries[pipe] = old_crtc_state->wm.skl.ddb;
7117 update_pipes |= BIT(pipe);
7118 } else {
7119 modeset_pipes |= BIT(pipe);
7120 }
7121 }
7122
7123 /*
7124 * Whenever the number of active pipes changes, we need to make sure we
7125 * update the pipes in the right order so that their ddb allocations
7126 * never overlap with each other between CRTC updates. Otherwise we'll
7127 * cause pipe underruns and other bad stuff.
7128 *
7129 * So first lets enable all pipes that do not need a fullmodeset as
7130 * those don't have any external dependency.
7131 */
7132 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7133 enum pipe pipe = crtc->pipe;
7134
7135 if ((update_pipes & BIT(pipe)) == 0)
7136 continue;
7137
7138 intel_pre_update_crtc(state, crtc);
7139 }
7140
7141 intel_dbuf_mbus_pre_ddb_update(state);
7142
7143 while (update_pipes) {
7144 /*
7145 * Commit in reverse order to make joiner primary
7146 * send the uapi events after secondaries are done.
7147 */
7148 for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
7149 new_crtc_state, i) {
7150 enum pipe pipe = crtc->pipe;
7151
7152 if ((update_pipes & BIT(pipe)) == 0)
7153 continue;
7154
7155 if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7156 entries, I915_MAX_PIPES, pipe))
7157 continue;
7158
7159 entries[pipe] = new_crtc_state->wm.skl.ddb;
7160 update_pipes &= ~BIT(pipe);
7161
7162 intel_update_crtc(state, crtc);
7163
7164 /*
7165 * If this is an already active pipe, it's DDB changed,
7166 * and this isn't the last pipe that needs updating
7167 * then we need to wait for a vblank to pass for the
7168 * new ddb allocation to take effect.
7169 */
7170 if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
7171 &old_crtc_state->wm.skl.ddb) &&
7172 (update_pipes | modeset_pipes))
7173 intel_crtc_wait_for_next_vblank(crtc);
7174 }
7175 }
7176
7177 intel_dbuf_mbus_post_ddb_update(state);
7178
7179 update_pipes = modeset_pipes;
7180
7181 /*
7182 * Enable all pipes that needs a modeset and do not depends on other
7183 * pipes
7184 */
7185 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7186 enum pipe pipe = crtc->pipe;
7187
7188 if ((modeset_pipes & BIT(pipe)) == 0)
7189 continue;
7190
7191 if (intel_crtc_is_joiner_secondary(new_crtc_state))
7192 continue;
7193
7194 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
7195 is_trans_port_sync_master(new_crtc_state))
7196 continue;
7197
7198 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
7199
7200 intel_enable_crtc(state, crtc);
7201 }
7202
7203 /*
7204 * Then we enable all remaining pipes that depend on other
7205 * pipes: MST slaves and port sync masters
7206 */
7207 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7208 enum pipe pipe = crtc->pipe;
7209
7210 if ((modeset_pipes & BIT(pipe)) == 0)
7211 continue;
7212
7213 if (intel_crtc_is_joiner_secondary(new_crtc_state))
7214 continue;
7215
7216 modeset_pipes &= ~intel_crtc_joined_pipe_mask(new_crtc_state);
7217
7218 intel_enable_crtc(state, crtc);
7219 }
7220
7221 /*
7222 * Finally we do the plane updates/etc. for all pipes that got enabled.
7223 */
7224 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7225 enum pipe pipe = crtc->pipe;
7226
7227 if ((update_pipes & BIT(pipe)) == 0)
7228 continue;
7229
7230 intel_pre_update_crtc(state, crtc);
7231 }
7232
7233 /*
7234 * Commit in reverse order to make joiner primary
7235 * send the uapi events after secondaries are done.
7236 */
7237 for_each_new_intel_crtc_in_state_reverse(state, crtc, new_crtc_state, i) {
7238 enum pipe pipe = crtc->pipe;
7239
7240 if ((update_pipes & BIT(pipe)) == 0)
7241 continue;
7242
7243 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
7244 entries, I915_MAX_PIPES, pipe));
7245
7246 entries[pipe] = new_crtc_state->wm.skl.ddb;
7247 update_pipes &= ~BIT(pipe);
7248
7249 intel_update_crtc(state, crtc);
7250 }
7251
7252 drm_WARN_ON(&dev_priv->drm, modeset_pipes);
7253 drm_WARN_ON(&dev_priv->drm, update_pipes);
7254 }
7255
intel_atomic_commit_fence_wait(struct intel_atomic_state * intel_state)7256 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
7257 {
7258 struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
7259 struct drm_plane *plane;
7260 struct drm_plane_state *new_plane_state;
7261 int ret, i;
7262
7263 for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
7264 if (new_plane_state->fence) {
7265 ret = dma_fence_wait_timeout(new_plane_state->fence, false,
7266 i915_fence_timeout(i915));
7267 if (ret <= 0)
7268 break;
7269
7270 dma_fence_put(new_plane_state->fence);
7271 new_plane_state->fence = NULL;
7272 }
7273 }
7274 }
7275
intel_atomic_cleanup_work(struct work_struct * work)7276 static void intel_atomic_cleanup_work(struct work_struct *work)
7277 {
7278 struct intel_atomic_state *state =
7279 container_of(work, struct intel_atomic_state, base.commit_work);
7280 struct drm_i915_private *i915 = to_i915(state->base.dev);
7281 struct intel_crtc_state *old_crtc_state;
7282 struct intel_crtc *crtc;
7283 int i;
7284
7285 for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
7286 intel_color_cleanup_commit(old_crtc_state);
7287
7288 drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
7289 drm_atomic_helper_commit_cleanup_done(&state->base);
7290 drm_atomic_state_put(&state->base);
7291 }
7292
intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state * state)7293 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
7294 {
7295 struct drm_i915_private *i915 = to_i915(state->base.dev);
7296 struct intel_plane *plane;
7297 struct intel_plane_state *plane_state;
7298 int i;
7299
7300 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7301 struct drm_framebuffer *fb = plane_state->hw.fb;
7302 int cc_plane;
7303 int ret;
7304
7305 if (!fb)
7306 continue;
7307
7308 cc_plane = intel_fb_rc_ccs_cc_plane(fb);
7309 if (cc_plane < 0)
7310 continue;
7311
7312 /*
7313 * The layout of the fast clear color value expected by HW
7314 * (the DRM ABI requiring this value to be located in fb at
7315 * offset 0 of cc plane, plane #2 previous generations or
7316 * plane #1 for flat ccs):
7317 * - 4 x 4 bytes per-channel value
7318 * (in surface type specific float/int format provided by the fb user)
7319 * - 8 bytes native color value used by the display
7320 * (converted/written by GPU during a fast clear operation using the
7321 * above per-channel values)
7322 *
7323 * The commit's FB prepare hook already ensured that FB obj is pinned and the
7324 * caller made sure that the object is synced wrt. the related color clear value
7325 * GPU write on it.
7326 */
7327 ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
7328 fb->offsets[cc_plane] + 16,
7329 &plane_state->ccval,
7330 sizeof(plane_state->ccval));
7331 /* The above could only fail if the FB obj has an unexpected backing store type. */
7332 drm_WARN_ON(&i915->drm, ret);
7333 }
7334 }
7335
intel_atomic_commit_tail(struct intel_atomic_state * state)7336 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
7337 {
7338 struct drm_device *dev = state->base.dev;
7339 struct drm_i915_private *dev_priv = to_i915(dev);
7340 struct intel_crtc_state *new_crtc_state, *old_crtc_state;
7341 struct intel_crtc *crtc;
7342 struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
7343 intel_wakeref_t wakeref = 0;
7344 int i;
7345
7346 intel_atomic_commit_fence_wait(state);
7347
7348 intel_td_flush(dev_priv);
7349
7350 drm_atomic_helper_wait_for_dependencies(&state->base);
7351 drm_dp_mst_atomic_wait_for_dependencies(&state->base);
7352 intel_atomic_global_state_wait_for_dependencies(state);
7353
7354 /*
7355 * During full modesets we write a lot of registers, wait
7356 * for PLLs, etc. Doing that while DC states are enabled
7357 * is not a good idea.
7358 *
7359 * During fastsets and other updates we also need to
7360 * disable DC states due to the following scenario:
7361 * 1. DC5 exit and PSR exit happen
7362 * 2. Some or all _noarm() registers are written
7363 * 3. Due to some long delay PSR is re-entered
7364 * 4. DC5 entry -> DMC saves the already written new
7365 * _noarm() registers and the old not yet written
7366 * _arm() registers
7367 * 5. DC5 exit -> DMC restores a mixture of old and
7368 * new register values and arms the update
7369 * 6. PSR exit -> hardware latches a mixture of old and
7370 * new register values -> corrupted frame, or worse
7371 * 7. New _arm() registers are finally written
7372 * 8. Hardware finally latches a complete set of new
7373 * register values, and subsequent frames will be OK again
7374 *
7375 * Also note that due to the pipe CSC hardware issues on
7376 * SKL/GLK DC states must remain off until the pipe CSC
7377 * state readout has happened. Otherwise we risk corrupting
7378 * the CSC latched register values with the readout (see
7379 * skl_read_csc() and skl_color_commit_noarm()).
7380 */
7381 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
7382
7383 intel_atomic_prepare_plane_clear_colors(state);
7384
7385 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7386 new_crtc_state, i) {
7387 if (intel_crtc_needs_modeset(new_crtc_state) ||
7388 intel_crtc_needs_fastset(new_crtc_state))
7389 intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
7390 }
7391
7392 intel_commit_modeset_disables(state);
7393
7394 intel_dp_tunnel_atomic_alloc_bw(state);
7395
7396 /* FIXME: Eventually get rid of our crtc->config pointer */
7397 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7398 crtc->config = new_crtc_state;
7399
7400 /*
7401 * In XE_LPD+ Pmdemand combines many parameters such as voltage index,
7402 * plls, cdclk frequency, QGV point selection parameter etc. Voltage
7403 * index, cdclk/ddiclk frequencies are supposed to be configured before
7404 * the cdclk config is set.
7405 */
7406 intel_pmdemand_pre_plane_update(state);
7407
7408 if (state->modeset) {
7409 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
7410
7411 intel_set_cdclk_pre_plane_update(state);
7412
7413 intel_modeset_verify_disabled(state);
7414 }
7415
7416 intel_sagv_pre_plane_update(state);
7417
7418 /* Complete the events for pipes that have now been disabled */
7419 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7420 bool modeset = intel_crtc_needs_modeset(new_crtc_state);
7421
7422 /* Complete events for now disable pipes here. */
7423 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
7424 spin_lock_irq(&dev->event_lock);
7425 drm_crtc_send_vblank_event(&crtc->base,
7426 new_crtc_state->uapi.event);
7427 spin_unlock_irq(&dev->event_lock);
7428
7429 new_crtc_state->uapi.event = NULL;
7430 }
7431 }
7432
7433 intel_encoders_update_prepare(state);
7434
7435 intel_dbuf_pre_plane_update(state);
7436
7437 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7438 if (new_crtc_state->do_async_flip)
7439 intel_crtc_enable_flip_done(state, crtc);
7440 }
7441
7442 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7443 dev_priv->display.funcs.display->commit_modeset_enables(state);
7444
7445 if (state->modeset)
7446 intel_set_cdclk_post_plane_update(state);
7447
7448 intel_wait_for_vblank_workers(state);
7449
7450 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
7451 * already, but still need the state for the delayed optimization. To
7452 * fix this:
7453 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
7454 * - schedule that vblank worker _before_ calling hw_done
7455 * - at the start of commit_tail, cancel it _synchrously
7456 * - switch over to the vblank wait helper in the core after that since
7457 * we don't need out special handling any more.
7458 */
7459 drm_atomic_helper_wait_for_flip_done(dev, &state->base);
7460
7461 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7462 if (new_crtc_state->do_async_flip)
7463 intel_crtc_disable_flip_done(state, crtc);
7464
7465 intel_color_wait_commit(new_crtc_state);
7466 }
7467
7468 /*
7469 * Now that the vblank has passed, we can go ahead and program the
7470 * optimal watermarks on platforms that need two-step watermark
7471 * programming.
7472 *
7473 * TODO: Move this (and other cleanup) to an async worker eventually.
7474 */
7475 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7476 new_crtc_state, i) {
7477 /*
7478 * Gen2 reports pipe underruns whenever all planes are disabled.
7479 * So re-enable underrun reporting after some planes get enabled.
7480 *
7481 * We do this before .optimize_watermarks() so that we have a
7482 * chance of catching underruns with the intermediate watermarks
7483 * vs. the new plane configuration.
7484 */
7485 if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
7486 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
7487
7488 intel_optimize_watermarks(state, crtc);
7489 }
7490
7491 intel_dbuf_post_plane_update(state);
7492
7493 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7494 intel_post_plane_update(state, crtc);
7495
7496 intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
7497
7498 intel_modeset_verify_crtc(state, crtc);
7499
7500 /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
7501 hsw_ips_post_update(state, crtc);
7502
7503 /*
7504 * Activate DRRS after state readout to avoid
7505 * dp_m_n vs. dp_m2_n2 confusion on BDW+.
7506 */
7507 intel_drrs_activate(new_crtc_state);
7508
7509 /*
7510 * DSB cleanup is done in cleanup_work aligning with framebuffer
7511 * cleanup. So copy and reset the dsb structure to sync with
7512 * commit_done and later do dsb cleanup in cleanup_work.
7513 *
7514 * FIXME get rid of this funny new->old swapping
7515 */
7516 old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
7517 old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit);
7518 }
7519
7520 /* Underruns don't always raise interrupts, so check manually */
7521 intel_check_cpu_fifo_underruns(dev_priv);
7522 intel_check_pch_fifo_underruns(dev_priv);
7523
7524 if (state->modeset)
7525 intel_verify_planes(state);
7526
7527 intel_sagv_post_plane_update(state);
7528 intel_pmdemand_post_plane_update(state);
7529
7530 drm_atomic_helper_commit_hw_done(&state->base);
7531 intel_atomic_global_state_commit_done(state);
7532
7533 if (state->modeset) {
7534 /* As one of the primary mmio accessors, KMS has a high
7535 * likelihood of triggering bugs in unclaimed access. After we
7536 * finish modesetting, see if an error has been flagged, and if
7537 * so enable debugging for the next modeset - and hope we catch
7538 * the culprit.
7539 */
7540 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
7541 }
7542 /*
7543 * Delay re-enabling DC states by 17 ms to avoid the off->on->off
7544 * toggling overhead at and above 60 FPS.
7545 */
7546 intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17);
7547 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7548
7549 /*
7550 * Defer the cleanup of the old state to a separate worker to not
7551 * impede the current task (userspace for blocking modesets) that
7552 * are executed inline. For out-of-line asynchronous modesets/flips,
7553 * deferring to a new worker seems overkill, but we would place a
7554 * schedule point (cond_resched()) here anyway to keep latencies
7555 * down.
7556 */
7557 INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
7558 queue_work(system_highpri_wq, &state->base.commit_work);
7559 }
7560
intel_atomic_commit_work(struct work_struct * work)7561 static void intel_atomic_commit_work(struct work_struct *work)
7562 {
7563 struct intel_atomic_state *state =
7564 container_of(work, struct intel_atomic_state, base.commit_work);
7565
7566 intel_atomic_commit_tail(state);
7567 }
7568
intel_atomic_track_fbs(struct intel_atomic_state * state)7569 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
7570 {
7571 struct intel_plane_state *old_plane_state, *new_plane_state;
7572 struct intel_plane *plane;
7573 int i;
7574
7575 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7576 new_plane_state, i)
7577 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
7578 to_intel_frontbuffer(new_plane_state->hw.fb),
7579 plane->frontbuffer_bit);
7580 }
7581
intel_atomic_setup_commit(struct intel_atomic_state * state,bool nonblock)7582 static int intel_atomic_setup_commit(struct intel_atomic_state *state, bool nonblock)
7583 {
7584 int ret;
7585
7586 ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
7587 if (ret)
7588 return ret;
7589
7590 ret = intel_atomic_global_state_setup_commit(state);
7591 if (ret)
7592 return ret;
7593
7594 return 0;
7595 }
7596
intel_atomic_swap_state(struct intel_atomic_state * state)7597 static int intel_atomic_swap_state(struct intel_atomic_state *state)
7598 {
7599 int ret;
7600
7601 ret = drm_atomic_helper_swap_state(&state->base, true);
7602 if (ret)
7603 return ret;
7604
7605 intel_atomic_swap_global_state(state);
7606
7607 intel_shared_dpll_swap_state(state);
7608
7609 intel_atomic_track_fbs(state);
7610
7611 return 0;
7612 }
7613
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * _state,bool nonblock)7614 int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
7615 bool nonblock)
7616 {
7617 struct intel_atomic_state *state = to_intel_atomic_state(_state);
7618 struct drm_i915_private *dev_priv = to_i915(dev);
7619 int ret = 0;
7620
7621 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
7622
7623 /*
7624 * The intel_legacy_cursor_update() fast path takes care
7625 * of avoiding the vblank waits for simple cursor
7626 * movement and flips. For cursor on/off and size changes,
7627 * we want to perform the vblank waits so that watermark
7628 * updates happen during the correct frames. Gen9+ have
7629 * double buffered watermarks and so shouldn't need this.
7630 *
7631 * Unset state->legacy_cursor_update before the call to
7632 * drm_atomic_helper_setup_commit() because otherwise
7633 * drm_atomic_helper_wait_for_flip_done() is a noop and
7634 * we get FIFO underruns because we didn't wait
7635 * for vblank.
7636 *
7637 * FIXME doing watermarks and fb cleanup from a vblank worker
7638 * (assuming we had any) would solve these problems.
7639 */
7640 if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
7641 struct intel_crtc_state *new_crtc_state;
7642 struct intel_crtc *crtc;
7643 int i;
7644
7645 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7646 if (new_crtc_state->wm.need_postvbl_update ||
7647 new_crtc_state->update_wm_post)
7648 state->base.legacy_cursor_update = false;
7649 }
7650
7651 ret = intel_atomic_prepare_commit(state);
7652 if (ret) {
7653 drm_dbg_atomic(&dev_priv->drm,
7654 "Preparing state failed with %i\n", ret);
7655 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7656 return ret;
7657 }
7658
7659 ret = intel_atomic_setup_commit(state, nonblock);
7660 if (!ret)
7661 ret = intel_atomic_swap_state(state);
7662
7663 if (ret) {
7664 struct intel_crtc_state *new_crtc_state;
7665 struct intel_crtc *crtc;
7666 int i;
7667
7668 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
7669 intel_color_cleanup_commit(new_crtc_state);
7670
7671 drm_atomic_helper_unprepare_planes(dev, &state->base);
7672 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
7673 return ret;
7674 }
7675
7676 drm_atomic_state_get(&state->base);
7677 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
7678
7679 if (nonblock && state->modeset) {
7680 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
7681 } else if (nonblock) {
7682 queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
7683 } else {
7684 if (state->modeset)
7685 flush_workqueue(dev_priv->display.wq.modeset);
7686 intel_atomic_commit_tail(state);
7687 }
7688
7689 return 0;
7690 }
7691
7692 /**
7693 * intel_plane_destroy - destroy a plane
7694 * @plane: plane to destroy
7695 *
7696 * Common destruction function for all types of planes (primary, cursor,
7697 * sprite).
7698 */
intel_plane_destroy(struct drm_plane * plane)7699 void intel_plane_destroy(struct drm_plane *plane)
7700 {
7701 drm_plane_cleanup(plane);
7702 kfree(to_intel_plane(plane));
7703 }
7704
intel_get_pipe_from_crtc_id_ioctl(struct drm_device * dev,void * data,struct drm_file * file)7705 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
7706 struct drm_file *file)
7707 {
7708 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7709 struct drm_crtc *drmmode_crtc;
7710 struct intel_crtc *crtc;
7711
7712 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
7713 if (!drmmode_crtc)
7714 return -ENOENT;
7715
7716 crtc = to_intel_crtc(drmmode_crtc);
7717 pipe_from_crtc_id->pipe = crtc->pipe;
7718
7719 return 0;
7720 }
7721
intel_encoder_possible_clones(struct intel_encoder * encoder)7722 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
7723 {
7724 struct drm_device *dev = encoder->base.dev;
7725 struct intel_encoder *source_encoder;
7726 u32 possible_clones = 0;
7727
7728 for_each_intel_encoder(dev, source_encoder) {
7729 if (encoders_cloneable(encoder, source_encoder))
7730 possible_clones |= drm_encoder_mask(&source_encoder->base);
7731 }
7732
7733 return possible_clones;
7734 }
7735
intel_encoder_possible_crtcs(struct intel_encoder * encoder)7736 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
7737 {
7738 struct drm_device *dev = encoder->base.dev;
7739 struct intel_crtc *crtc;
7740 u32 possible_crtcs = 0;
7741
7742 for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
7743 possible_crtcs |= drm_crtc_mask(&crtc->base);
7744
7745 return possible_crtcs;
7746 }
7747
ilk_has_edp_a(struct drm_i915_private * dev_priv)7748 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
7749 {
7750 if (!IS_MOBILE(dev_priv))
7751 return false;
7752
7753 if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
7754 return false;
7755
7756 if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
7757 return false;
7758
7759 return true;
7760 }
7761
intel_ddi_crt_present(struct drm_i915_private * dev_priv)7762 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
7763 {
7764 if (DISPLAY_VER(dev_priv) >= 9)
7765 return false;
7766
7767 if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv))
7768 return false;
7769
7770 if (HAS_PCH_LPT_H(dev_priv) &&
7771 intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
7772 return false;
7773
7774 /* DDI E can't be used if DDI A requires 4 lanes */
7775 if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
7776 return false;
7777
7778 if (!dev_priv->display.vbt.int_crt_support)
7779 return false;
7780
7781 return true;
7782 }
7783
assert_port_valid(struct drm_i915_private * i915,enum port port)7784 bool assert_port_valid(struct drm_i915_private *i915, enum port port)
7785 {
7786 return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)),
7787 "Platform does not support port %c\n", port_name(port));
7788 }
7789
intel_setup_outputs(struct drm_i915_private * dev_priv)7790 void intel_setup_outputs(struct drm_i915_private *dev_priv)
7791 {
7792 struct intel_display *display = &dev_priv->display;
7793 struct intel_encoder *encoder;
7794 bool dpd_is_edp = false;
7795
7796 intel_pps_unlock_regs_wa(display);
7797
7798 if (!HAS_DISPLAY(dev_priv))
7799 return;
7800
7801 if (HAS_DDI(dev_priv)) {
7802 if (intel_ddi_crt_present(dev_priv))
7803 intel_crt_init(dev_priv);
7804
7805 intel_bios_for_each_encoder(display, intel_ddi_init);
7806
7807 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
7808 vlv_dsi_init(dev_priv);
7809 } else if (HAS_PCH_SPLIT(dev_priv)) {
7810 int found;
7811
7812 /*
7813 * intel_edp_init_connector() depends on this completing first,
7814 * to prevent the registration of both eDP and LVDS and the
7815 * incorrect sharing of the PPS.
7816 */
7817 intel_lvds_init(dev_priv);
7818 intel_crt_init(dev_priv);
7819
7820 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
7821
7822 if (ilk_has_edp_a(dev_priv))
7823 g4x_dp_init(dev_priv, DP_A, PORT_A);
7824
7825 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
7826 /* PCH SDVOB multiplex with HDMIB */
7827 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
7828 if (!found)
7829 g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
7830 if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
7831 g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
7832 }
7833
7834 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
7835 g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
7836
7837 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
7838 g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
7839
7840 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
7841 g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
7842
7843 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
7844 g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
7845 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7846 bool has_edp, has_port;
7847
7848 if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
7849 intel_crt_init(dev_priv);
7850
7851 /*
7852 * The DP_DETECTED bit is the latched state of the DDC
7853 * SDA pin at boot. However since eDP doesn't require DDC
7854 * (no way to plug in a DP->HDMI dongle) the DDC pins for
7855 * eDP ports may have been muxed to an alternate function.
7856 * Thus we can't rely on the DP_DETECTED bit alone to detect
7857 * eDP ports. Consult the VBT as well as DP_DETECTED to
7858 * detect eDP ports.
7859 *
7860 * Sadly the straps seem to be missing sometimes even for HDMI
7861 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
7862 * and VBT for the presence of the port. Additionally we can't
7863 * trust the port type the VBT declares as we've seen at least
7864 * HDMI ports that the VBT claim are DP or eDP.
7865 */
7866 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
7867 has_port = intel_bios_is_port_present(display, PORT_B);
7868 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
7869 has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
7870 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
7871 g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
7872
7873 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
7874 has_port = intel_bios_is_port_present(display, PORT_C);
7875 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
7876 has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
7877 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
7878 g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
7879
7880 if (IS_CHERRYVIEW(dev_priv)) {
7881 /*
7882 * eDP not supported on port D,
7883 * so no need to worry about it
7884 */
7885 has_port = intel_bios_is_port_present(display, PORT_D);
7886 if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
7887 g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
7888 if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
7889 g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
7890 }
7891
7892 vlv_dsi_init(dev_priv);
7893 } else if (IS_PINEVIEW(dev_priv)) {
7894 intel_lvds_init(dev_priv);
7895 intel_crt_init(dev_priv);
7896 } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
7897 bool found = false;
7898
7899 if (IS_MOBILE(dev_priv))
7900 intel_lvds_init(dev_priv);
7901
7902 intel_crt_init(dev_priv);
7903
7904 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7905 drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
7906 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
7907 if (!found && IS_G4X(dev_priv)) {
7908 drm_dbg_kms(&dev_priv->drm,
7909 "probing HDMI on SDVOB\n");
7910 g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
7911 }
7912
7913 if (!found && IS_G4X(dev_priv))
7914 g4x_dp_init(dev_priv, DP_B, PORT_B);
7915 }
7916
7917 /* Before G4X SDVOC doesn't have its own detect register */
7918
7919 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
7920 drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
7921 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
7922 }
7923
7924 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
7925
7926 if (IS_G4X(dev_priv)) {
7927 drm_dbg_kms(&dev_priv->drm,
7928 "probing HDMI on SDVOC\n");
7929 g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
7930 }
7931 if (IS_G4X(dev_priv))
7932 g4x_dp_init(dev_priv, DP_C, PORT_C);
7933 }
7934
7935 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
7936 g4x_dp_init(dev_priv, DP_D, PORT_D);
7937
7938 if (SUPPORTS_TV(dev_priv))
7939 intel_tv_init(display);
7940 } else if (DISPLAY_VER(dev_priv) == 2) {
7941 if (IS_I85X(dev_priv))
7942 intel_lvds_init(dev_priv);
7943
7944 intel_crt_init(dev_priv);
7945 intel_dvo_init(dev_priv);
7946 }
7947
7948 for_each_intel_encoder(&dev_priv->drm, encoder) {
7949 encoder->base.possible_crtcs =
7950 intel_encoder_possible_crtcs(encoder);
7951 encoder->base.possible_clones =
7952 intel_encoder_possible_clones(encoder);
7953 }
7954
7955 intel_init_pch_refclk(dev_priv);
7956
7957 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
7958 }
7959
max_dotclock(struct drm_i915_private * i915)7960 static int max_dotclock(struct drm_i915_private *i915)
7961 {
7962 int max_dotclock = i915->display.cdclk.max_dotclk_freq;
7963
7964 /* icl+ might use joiner */
7965 if (DISPLAY_VER(i915) >= 11)
7966 max_dotclock *= 2;
7967
7968 return max_dotclock;
7969 }
7970
intel_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)7971 enum drm_mode_status intel_mode_valid(struct drm_device *dev,
7972 const struct drm_display_mode *mode)
7973 {
7974 struct drm_i915_private *dev_priv = to_i915(dev);
7975 int hdisplay_max, htotal_max;
7976 int vdisplay_max, vtotal_max;
7977
7978 /*
7979 * Can't reject DBLSCAN here because Xorg ddxen can add piles
7980 * of DBLSCAN modes to the output's mode list when they detect
7981 * the scaling mode property on the connector. And they don't
7982 * ask the kernel to validate those modes in any way until
7983 * modeset time at which point the client gets a protocol error.
7984 * So in order to not upset those clients we silently ignore the
7985 * DBLSCAN flag on such connectors. For other connectors we will
7986 * reject modes with the DBLSCAN flag in encoder->compute_config().
7987 * And we always reject DBLSCAN modes in connector->mode_valid()
7988 * as we never want such modes on the connector's mode list.
7989 */
7990
7991 if (mode->vscan > 1)
7992 return MODE_NO_VSCAN;
7993
7994 if (mode->flags & DRM_MODE_FLAG_HSKEW)
7995 return MODE_H_ILLEGAL;
7996
7997 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
7998 DRM_MODE_FLAG_NCSYNC |
7999 DRM_MODE_FLAG_PCSYNC))
8000 return MODE_HSYNC;
8001
8002 if (mode->flags & (DRM_MODE_FLAG_BCAST |
8003 DRM_MODE_FLAG_PIXMUX |
8004 DRM_MODE_FLAG_CLKDIV2))
8005 return MODE_BAD;
8006
8007 /*
8008 * Reject clearly excessive dotclocks early to
8009 * avoid having to worry about huge integers later.
8010 */
8011 if (mode->clock > max_dotclock(dev_priv))
8012 return MODE_CLOCK_HIGH;
8013
8014 /* Transcoder timing limits */
8015 if (DISPLAY_VER(dev_priv) >= 11) {
8016 hdisplay_max = 16384;
8017 vdisplay_max = 8192;
8018 htotal_max = 16384;
8019 vtotal_max = 8192;
8020 } else if (DISPLAY_VER(dev_priv) >= 9 ||
8021 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
8022 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
8023 vdisplay_max = 4096;
8024 htotal_max = 8192;
8025 vtotal_max = 8192;
8026 } else if (DISPLAY_VER(dev_priv) >= 3) {
8027 hdisplay_max = 4096;
8028 vdisplay_max = 4096;
8029 htotal_max = 8192;
8030 vtotal_max = 8192;
8031 } else {
8032 hdisplay_max = 2048;
8033 vdisplay_max = 2048;
8034 htotal_max = 4096;
8035 vtotal_max = 4096;
8036 }
8037
8038 if (mode->hdisplay > hdisplay_max ||
8039 mode->hsync_start > htotal_max ||
8040 mode->hsync_end > htotal_max ||
8041 mode->htotal > htotal_max)
8042 return MODE_H_ILLEGAL;
8043
8044 if (mode->vdisplay > vdisplay_max ||
8045 mode->vsync_start > vtotal_max ||
8046 mode->vsync_end > vtotal_max ||
8047 mode->vtotal > vtotal_max)
8048 return MODE_V_ILLEGAL;
8049
8050 return MODE_OK;
8051 }
8052
intel_cpu_transcoder_mode_valid(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode)8053 enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
8054 const struct drm_display_mode *mode)
8055 {
8056 /*
8057 * Additional transcoder timing limits,
8058 * excluding BXT/GLK DSI transcoders.
8059 */
8060 if (DISPLAY_VER(dev_priv) >= 5) {
8061 if (mode->hdisplay < 64 ||
8062 mode->htotal - mode->hdisplay < 32)
8063 return MODE_H_ILLEGAL;
8064
8065 if (mode->vtotal - mode->vdisplay < 5)
8066 return MODE_V_ILLEGAL;
8067 } else {
8068 if (mode->htotal - mode->hdisplay < 32)
8069 return MODE_H_ILLEGAL;
8070
8071 if (mode->vtotal - mode->vdisplay < 3)
8072 return MODE_V_ILLEGAL;
8073 }
8074
8075 /*
8076 * Cantiga+ cannot handle modes with a hsync front porch of 0.
8077 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8078 */
8079 if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) &&
8080 mode->hsync_start == mode->hdisplay)
8081 return MODE_H_ILLEGAL;
8082
8083 return MODE_OK;
8084 }
8085
8086 enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode,bool joiner)8087 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
8088 const struct drm_display_mode *mode,
8089 bool joiner)
8090 {
8091 int plane_width_max, plane_height_max;
8092
8093 /*
8094 * intel_mode_valid() should be
8095 * sufficient on older platforms.
8096 */
8097 if (DISPLAY_VER(dev_priv) < 9)
8098 return MODE_OK;
8099
8100 /*
8101 * Most people will probably want a fullscreen
8102 * plane so let's not advertize modes that are
8103 * too big for that.
8104 */
8105 if (DISPLAY_VER(dev_priv) >= 11) {
8106 plane_width_max = 5120 << joiner;
8107 plane_height_max = 4320;
8108 } else {
8109 plane_width_max = 5120;
8110 plane_height_max = 4096;
8111 }
8112
8113 if (mode->hdisplay > plane_width_max)
8114 return MODE_H_ILLEGAL;
8115
8116 if (mode->vdisplay > plane_height_max)
8117 return MODE_V_ILLEGAL;
8118
8119 return MODE_OK;
8120 }
8121
8122 static const struct intel_display_funcs skl_display_funcs = {
8123 .get_pipe_config = hsw_get_pipe_config,
8124 .crtc_enable = hsw_crtc_enable,
8125 .crtc_disable = hsw_crtc_disable,
8126 .commit_modeset_enables = skl_commit_modeset_enables,
8127 .get_initial_plane_config = skl_get_initial_plane_config,
8128 .fixup_initial_plane_config = skl_fixup_initial_plane_config,
8129 };
8130
8131 static const struct intel_display_funcs ddi_display_funcs = {
8132 .get_pipe_config = hsw_get_pipe_config,
8133 .crtc_enable = hsw_crtc_enable,
8134 .crtc_disable = hsw_crtc_disable,
8135 .commit_modeset_enables = intel_commit_modeset_enables,
8136 .get_initial_plane_config = i9xx_get_initial_plane_config,
8137 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
8138 };
8139
8140 static const struct intel_display_funcs pch_split_display_funcs = {
8141 .get_pipe_config = ilk_get_pipe_config,
8142 .crtc_enable = ilk_crtc_enable,
8143 .crtc_disable = ilk_crtc_disable,
8144 .commit_modeset_enables = intel_commit_modeset_enables,
8145 .get_initial_plane_config = i9xx_get_initial_plane_config,
8146 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
8147 };
8148
8149 static const struct intel_display_funcs vlv_display_funcs = {
8150 .get_pipe_config = i9xx_get_pipe_config,
8151 .crtc_enable = valleyview_crtc_enable,
8152 .crtc_disable = i9xx_crtc_disable,
8153 .commit_modeset_enables = intel_commit_modeset_enables,
8154 .get_initial_plane_config = i9xx_get_initial_plane_config,
8155 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
8156 };
8157
8158 static const struct intel_display_funcs i9xx_display_funcs = {
8159 .get_pipe_config = i9xx_get_pipe_config,
8160 .crtc_enable = i9xx_crtc_enable,
8161 .crtc_disable = i9xx_crtc_disable,
8162 .commit_modeset_enables = intel_commit_modeset_enables,
8163 .get_initial_plane_config = i9xx_get_initial_plane_config,
8164 .fixup_initial_plane_config = i9xx_fixup_initial_plane_config,
8165 };
8166
8167 /**
8168 * intel_init_display_hooks - initialize the display modesetting hooks
8169 * @dev_priv: device private
8170 */
intel_init_display_hooks(struct drm_i915_private * dev_priv)8171 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
8172 {
8173 if (DISPLAY_VER(dev_priv) >= 9) {
8174 dev_priv->display.funcs.display = &skl_display_funcs;
8175 } else if (HAS_DDI(dev_priv)) {
8176 dev_priv->display.funcs.display = &ddi_display_funcs;
8177 } else if (HAS_PCH_SPLIT(dev_priv)) {
8178 dev_priv->display.funcs.display = &pch_split_display_funcs;
8179 } else if (IS_CHERRYVIEW(dev_priv) ||
8180 IS_VALLEYVIEW(dev_priv)) {
8181 dev_priv->display.funcs.display = &vlv_display_funcs;
8182 } else {
8183 dev_priv->display.funcs.display = &i9xx_display_funcs;
8184 }
8185 }
8186
intel_initial_commit(struct drm_device * dev)8187 int intel_initial_commit(struct drm_device *dev)
8188 {
8189 struct drm_atomic_state *state = NULL;
8190 struct drm_modeset_acquire_ctx ctx;
8191 struct intel_crtc *crtc;
8192 int ret = 0;
8193
8194 state = drm_atomic_state_alloc(dev);
8195 if (!state)
8196 return -ENOMEM;
8197
8198 drm_modeset_acquire_init(&ctx, 0);
8199
8200 state->acquire_ctx = &ctx;
8201 to_intel_atomic_state(state)->internal = true;
8202
8203 retry:
8204 for_each_intel_crtc(dev, crtc) {
8205 struct intel_crtc_state *crtc_state =
8206 intel_atomic_get_crtc_state(state, crtc);
8207
8208 if (IS_ERR(crtc_state)) {
8209 ret = PTR_ERR(crtc_state);
8210 goto out;
8211 }
8212
8213 if (crtc_state->hw.active) {
8214 struct intel_encoder *encoder;
8215
8216 ret = drm_atomic_add_affected_planes(state, &crtc->base);
8217 if (ret)
8218 goto out;
8219
8220 /*
8221 * FIXME hack to force a LUT update to avoid the
8222 * plane update forcing the pipe gamma on without
8223 * having a proper LUT loaded. Remove once we
8224 * have readout for pipe gamma enable.
8225 */
8226 crtc_state->uapi.color_mgmt_changed = true;
8227
8228 for_each_intel_encoder_mask(dev, encoder,
8229 crtc_state->uapi.encoder_mask) {
8230 if (encoder->initial_fastset_check &&
8231 !encoder->initial_fastset_check(encoder, crtc_state)) {
8232 ret = drm_atomic_add_affected_connectors(state,
8233 &crtc->base);
8234 if (ret)
8235 goto out;
8236 }
8237 }
8238 }
8239 }
8240
8241 ret = drm_atomic_commit(state);
8242
8243 out:
8244 if (ret == -EDEADLK) {
8245 drm_atomic_state_clear(state);
8246 drm_modeset_backoff(&ctx);
8247 goto retry;
8248 }
8249
8250 drm_atomic_state_put(state);
8251
8252 drm_modeset_drop_locks(&ctx);
8253 drm_modeset_acquire_fini(&ctx);
8254
8255 return ret;
8256 }
8257
i830_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)8258 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8259 {
8260 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8261 enum transcoder cpu_transcoder = (enum transcoder)pipe;
8262 /* 640x480@60Hz, ~25175 kHz */
8263 struct dpll clock = {
8264 .m1 = 18,
8265 .m2 = 7,
8266 .p1 = 13,
8267 .p2 = 4,
8268 .n = 2,
8269 };
8270 u32 dpll, fp;
8271 int i;
8272
8273 drm_WARN_ON(&dev_priv->drm,
8274 i9xx_calc_dpll_params(48000, &clock) != 25154);
8275
8276 drm_dbg_kms(&dev_priv->drm,
8277 "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
8278 pipe_name(pipe), clock.vco, clock.dot);
8279
8280 fp = i9xx_dpll_compute_fp(&clock);
8281 dpll = DPLL_DVO_2X_MODE |
8282 DPLL_VGA_MODE_DIS |
8283 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
8284 PLL_P2_DIVIDE_BY_4 |
8285 PLL_REF_INPUT_DREFCLK |
8286 DPLL_VCO_ENABLE;
8287
8288 intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder),
8289 HACTIVE(640 - 1) | HTOTAL(800 - 1));
8290 intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder),
8291 HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
8292 intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder),
8293 HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
8294 intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
8295 VACTIVE(480 - 1) | VTOTAL(525 - 1));
8296 intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
8297 VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
8298 intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder),
8299 VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
8300 intel_de_write(dev_priv, PIPESRC(dev_priv, pipe),
8301 PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
8302
8303 intel_de_write(dev_priv, FP0(pipe), fp);
8304 intel_de_write(dev_priv, FP1(pipe), fp);
8305
8306 /*
8307 * Apparently we need to have VGA mode enabled prior to changing
8308 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
8309 * dividers, even though the register value does change.
8310 */
8311 intel_de_write(dev_priv, DPLL(dev_priv, pipe),
8312 dpll & ~DPLL_VGA_MODE_DIS);
8313 intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
8314
8315 /* Wait for the clocks to stabilize. */
8316 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
8317 udelay(150);
8318
8319 /* The pixel multiplier can only be updated once the
8320 * DPLL is enabled and the clocks are stable.
8321 *
8322 * So write it again.
8323 */
8324 intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
8325
8326 /* We do this three times for luck */
8327 for (i = 0; i < 3 ; i++) {
8328 intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
8329 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
8330 udelay(150); /* wait for warmup */
8331 }
8332
8333 intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), TRANSCONF_ENABLE);
8334 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe));
8335
8336 intel_wait_for_pipe_scanline_moving(crtc);
8337 }
8338
i830_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)8339 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
8340 {
8341 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
8342
8343 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
8344 pipe_name(pipe));
8345
8346 drm_WARN_ON(&dev_priv->drm,
8347 intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_A)) & DISP_ENABLE);
8348 drm_WARN_ON(&dev_priv->drm,
8349 intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_B)) & DISP_ENABLE);
8350 drm_WARN_ON(&dev_priv->drm,
8351 intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_C)) & DISP_ENABLE);
8352 drm_WARN_ON(&dev_priv->drm,
8353 intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_A)) & MCURSOR_MODE_MASK);
8354 drm_WARN_ON(&dev_priv->drm,
8355 intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_B)) & MCURSOR_MODE_MASK);
8356
8357 intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), 0);
8358 intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe));
8359
8360 intel_wait_for_pipe_scanline_stopped(crtc);
8361
8362 intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS);
8363 intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
8364 }
8365
intel_hpd_poll_fini(struct drm_i915_private * i915)8366 void intel_hpd_poll_fini(struct drm_i915_private *i915)
8367 {
8368 struct intel_connector *connector;
8369 struct drm_connector_list_iter conn_iter;
8370
8371 /* Kill all the work that may have been queued by hpd. */
8372 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
8373 for_each_intel_connector_iter(connector, &conn_iter) {
8374 if (connector->modeset_retry_work.func &&
8375 cancel_work_sync(&connector->modeset_retry_work))
8376 drm_connector_put(&connector->base);
8377 if (connector->hdcp.shim) {
8378 cancel_delayed_work_sync(&connector->hdcp.check_work);
8379 cancel_work_sync(&connector->hdcp.prop_work);
8380 }
8381 }
8382 drm_connector_list_iter_end(&conn_iter);
8383 }
8384
intel_scanout_needs_vtd_wa(struct drm_i915_private * i915)8385 bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
8386 {
8387 return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
8388 }
8389