xref: /linux/drivers/gpu/drm/i915/display/intel_display.c (revision 4fd18fc38757217c746aa063ba9e4729814dc737)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34 
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dp_mst.h"
50 #include "display/intel_dpll_mgr.h"
51 #include "display/intel_dsi.h"
52 #include "display/intel_dvo.h"
53 #include "display/intel_gmbus.h"
54 #include "display/intel_hdmi.h"
55 #include "display/intel_lvds.h"
56 #include "display/intel_sdvo.h"
57 #include "display/intel_tv.h"
58 #include "display/intel_vdsc.h"
59 
60 #include "gt/intel_rps.h"
61 
62 #include "i915_drv.h"
63 #include "i915_trace.h"
64 #include "intel_acpi.h"
65 #include "intel_atomic.h"
66 #include "intel_atomic_plane.h"
67 #include "intel_bw.h"
68 #include "intel_cdclk.h"
69 #include "intel_color.h"
70 #include "intel_csr.h"
71 #include "intel_display_types.h"
72 #include "intel_dp_link_training.h"
73 #include "intel_fbc.h"
74 #include "intel_fbdev.h"
75 #include "intel_fifo_underrun.h"
76 #include "intel_frontbuffer.h"
77 #include "intel_hdcp.h"
78 #include "intel_hotplug.h"
79 #include "intel_overlay.h"
80 #include "intel_pipe_crc.h"
81 #include "intel_pm.h"
82 #include "intel_psr.h"
83 #include "intel_quirks.h"
84 #include "intel_sideband.h"
85 #include "intel_sprite.h"
86 #include "intel_tc.h"
87 #include "intel_vga.h"
88 
89 /* Primary plane formats for gen <= 3 */
90 static const u32 i8xx_primary_formats[] = {
91 	DRM_FORMAT_C8,
92 	DRM_FORMAT_XRGB1555,
93 	DRM_FORMAT_RGB565,
94 	DRM_FORMAT_XRGB8888,
95 };
96 
97 /* Primary plane formats for ivb (no fp16 due to hw issue) */
98 static const u32 ivb_primary_formats[] = {
99 	DRM_FORMAT_C8,
100 	DRM_FORMAT_RGB565,
101 	DRM_FORMAT_XRGB8888,
102 	DRM_FORMAT_XBGR8888,
103 	DRM_FORMAT_XRGB2101010,
104 	DRM_FORMAT_XBGR2101010,
105 };
106 
107 /* Primary plane formats for gen >= 4, except ivb */
108 static const u32 i965_primary_formats[] = {
109 	DRM_FORMAT_C8,
110 	DRM_FORMAT_RGB565,
111 	DRM_FORMAT_XRGB8888,
112 	DRM_FORMAT_XBGR8888,
113 	DRM_FORMAT_XRGB2101010,
114 	DRM_FORMAT_XBGR2101010,
115 	DRM_FORMAT_XBGR16161616F,
116 };
117 
118 /* Primary plane formats for vlv/chv */
119 static const u32 vlv_primary_formats[] = {
120 	DRM_FORMAT_C8,
121 	DRM_FORMAT_RGB565,
122 	DRM_FORMAT_XRGB8888,
123 	DRM_FORMAT_XBGR8888,
124 	DRM_FORMAT_ARGB8888,
125 	DRM_FORMAT_ABGR8888,
126 	DRM_FORMAT_XRGB2101010,
127 	DRM_FORMAT_XBGR2101010,
128 	DRM_FORMAT_ARGB2101010,
129 	DRM_FORMAT_ABGR2101010,
130 	DRM_FORMAT_XBGR16161616F,
131 };
132 
133 static const u64 i9xx_format_modifiers[] = {
134 	I915_FORMAT_MOD_X_TILED,
135 	DRM_FORMAT_MOD_LINEAR,
136 	DRM_FORMAT_MOD_INVALID
137 };
138 
139 /* Cursor formats */
140 static const u32 intel_cursor_formats[] = {
141 	DRM_FORMAT_ARGB8888,
142 };
143 
144 static const u64 cursor_format_modifiers[] = {
145 	DRM_FORMAT_MOD_LINEAR,
146 	DRM_FORMAT_MOD_INVALID
147 };
148 
149 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
150 				struct intel_crtc_state *pipe_config);
151 static void ilk_pch_clock_get(struct intel_crtc *crtc,
152 			      struct intel_crtc_state *pipe_config);
153 
154 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
155 				  struct drm_i915_gem_object *obj,
156 				  struct drm_mode_fb_cmd2 *mode_cmd);
157 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
158 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
159 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
160 					 const struct intel_link_m_n *m_n,
161 					 const struct intel_link_m_n *m2_n2);
162 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
163 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
164 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
165 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
166 static void vlv_prepare_pll(struct intel_crtc *crtc,
167 			    const struct intel_crtc_state *pipe_config);
168 static void chv_prepare_pll(struct intel_crtc *crtc,
169 			    const struct intel_crtc_state *pipe_config);
170 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
171 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
172 static void intel_modeset_setup_hw_state(struct drm_device *dev,
173 					 struct drm_modeset_acquire_ctx *ctx);
174 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
175 
176 struct intel_limit {
177 	struct {
178 		int min, max;
179 	} dot, vco, n, m, m1, m2, p, p1;
180 
181 	struct {
182 		int dot_limit;
183 		int p2_slow, p2_fast;
184 	} p2;
185 };
186 
187 /* returns HPLL frequency in kHz */
188 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
189 {
190 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
191 
192 	/* Obtain SKU information */
193 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
194 		CCK_FUSE_HPLL_FREQ_MASK;
195 
196 	return vco_freq[hpll_freq] * 1000;
197 }
198 
199 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
200 		      const char *name, u32 reg, int ref_freq)
201 {
202 	u32 val;
203 	int divider;
204 
205 	val = vlv_cck_read(dev_priv, reg);
206 	divider = val & CCK_FREQUENCY_VALUES;
207 
208 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
209 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
210 		 "%s change in progress\n", name);
211 
212 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
213 }
214 
215 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
216 			   const char *name, u32 reg)
217 {
218 	int hpll;
219 
220 	vlv_cck_get(dev_priv);
221 
222 	if (dev_priv->hpll_freq == 0)
223 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
224 
225 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
226 
227 	vlv_cck_put(dev_priv);
228 
229 	return hpll;
230 }
231 
232 static void intel_update_czclk(struct drm_i915_private *dev_priv)
233 {
234 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
235 		return;
236 
237 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
238 						      CCK_CZ_CLOCK_CONTROL);
239 
240 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
241 		dev_priv->czclk_freq);
242 }
243 
244 /* units of 100MHz */
245 static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
246 			       const struct intel_crtc_state *pipe_config)
247 {
248 	if (HAS_DDI(dev_priv))
249 		return pipe_config->port_clock; /* SPLL */
250 	else
251 		return dev_priv->fdi_pll_freq;
252 }
253 
254 static const struct intel_limit intel_limits_i8xx_dac = {
255 	.dot = { .min = 25000, .max = 350000 },
256 	.vco = { .min = 908000, .max = 1512000 },
257 	.n = { .min = 2, .max = 16 },
258 	.m = { .min = 96, .max = 140 },
259 	.m1 = { .min = 18, .max = 26 },
260 	.m2 = { .min = 6, .max = 16 },
261 	.p = { .min = 4, .max = 128 },
262 	.p1 = { .min = 2, .max = 33 },
263 	.p2 = { .dot_limit = 165000,
264 		.p2_slow = 4, .p2_fast = 2 },
265 };
266 
267 static const struct intel_limit intel_limits_i8xx_dvo = {
268 	.dot = { .min = 25000, .max = 350000 },
269 	.vco = { .min = 908000, .max = 1512000 },
270 	.n = { .min = 2, .max = 16 },
271 	.m = { .min = 96, .max = 140 },
272 	.m1 = { .min = 18, .max = 26 },
273 	.m2 = { .min = 6, .max = 16 },
274 	.p = { .min = 4, .max = 128 },
275 	.p1 = { .min = 2, .max = 33 },
276 	.p2 = { .dot_limit = 165000,
277 		.p2_slow = 4, .p2_fast = 4 },
278 };
279 
280 static const struct intel_limit intel_limits_i8xx_lvds = {
281 	.dot = { .min = 25000, .max = 350000 },
282 	.vco = { .min = 908000, .max = 1512000 },
283 	.n = { .min = 2, .max = 16 },
284 	.m = { .min = 96, .max = 140 },
285 	.m1 = { .min = 18, .max = 26 },
286 	.m2 = { .min = 6, .max = 16 },
287 	.p = { .min = 4, .max = 128 },
288 	.p1 = { .min = 1, .max = 6 },
289 	.p2 = { .dot_limit = 165000,
290 		.p2_slow = 14, .p2_fast = 7 },
291 };
292 
293 static const struct intel_limit intel_limits_i9xx_sdvo = {
294 	.dot = { .min = 20000, .max = 400000 },
295 	.vco = { .min = 1400000, .max = 2800000 },
296 	.n = { .min = 1, .max = 6 },
297 	.m = { .min = 70, .max = 120 },
298 	.m1 = { .min = 8, .max = 18 },
299 	.m2 = { .min = 3, .max = 7 },
300 	.p = { .min = 5, .max = 80 },
301 	.p1 = { .min = 1, .max = 8 },
302 	.p2 = { .dot_limit = 200000,
303 		.p2_slow = 10, .p2_fast = 5 },
304 };
305 
306 static const struct intel_limit intel_limits_i9xx_lvds = {
307 	.dot = { .min = 20000, .max = 400000 },
308 	.vco = { .min = 1400000, .max = 2800000 },
309 	.n = { .min = 1, .max = 6 },
310 	.m = { .min = 70, .max = 120 },
311 	.m1 = { .min = 8, .max = 18 },
312 	.m2 = { .min = 3, .max = 7 },
313 	.p = { .min = 7, .max = 98 },
314 	.p1 = { .min = 1, .max = 8 },
315 	.p2 = { .dot_limit = 112000,
316 		.p2_slow = 14, .p2_fast = 7 },
317 };
318 
319 
320 static const struct intel_limit intel_limits_g4x_sdvo = {
321 	.dot = { .min = 25000, .max = 270000 },
322 	.vco = { .min = 1750000, .max = 3500000},
323 	.n = { .min = 1, .max = 4 },
324 	.m = { .min = 104, .max = 138 },
325 	.m1 = { .min = 17, .max = 23 },
326 	.m2 = { .min = 5, .max = 11 },
327 	.p = { .min = 10, .max = 30 },
328 	.p1 = { .min = 1, .max = 3},
329 	.p2 = { .dot_limit = 270000,
330 		.p2_slow = 10,
331 		.p2_fast = 10
332 	},
333 };
334 
335 static const struct intel_limit intel_limits_g4x_hdmi = {
336 	.dot = { .min = 22000, .max = 400000 },
337 	.vco = { .min = 1750000, .max = 3500000},
338 	.n = { .min = 1, .max = 4 },
339 	.m = { .min = 104, .max = 138 },
340 	.m1 = { .min = 16, .max = 23 },
341 	.m2 = { .min = 5, .max = 11 },
342 	.p = { .min = 5, .max = 80 },
343 	.p1 = { .min = 1, .max = 8},
344 	.p2 = { .dot_limit = 165000,
345 		.p2_slow = 10, .p2_fast = 5 },
346 };
347 
348 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
349 	.dot = { .min = 20000, .max = 115000 },
350 	.vco = { .min = 1750000, .max = 3500000 },
351 	.n = { .min = 1, .max = 3 },
352 	.m = { .min = 104, .max = 138 },
353 	.m1 = { .min = 17, .max = 23 },
354 	.m2 = { .min = 5, .max = 11 },
355 	.p = { .min = 28, .max = 112 },
356 	.p1 = { .min = 2, .max = 8 },
357 	.p2 = { .dot_limit = 0,
358 		.p2_slow = 14, .p2_fast = 14
359 	},
360 };
361 
362 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
363 	.dot = { .min = 80000, .max = 224000 },
364 	.vco = { .min = 1750000, .max = 3500000 },
365 	.n = { .min = 1, .max = 3 },
366 	.m = { .min = 104, .max = 138 },
367 	.m1 = { .min = 17, .max = 23 },
368 	.m2 = { .min = 5, .max = 11 },
369 	.p = { .min = 14, .max = 42 },
370 	.p1 = { .min = 2, .max = 6 },
371 	.p2 = { .dot_limit = 0,
372 		.p2_slow = 7, .p2_fast = 7
373 	},
374 };
375 
376 static const struct intel_limit pnv_limits_sdvo = {
377 	.dot = { .min = 20000, .max = 400000},
378 	.vco = { .min = 1700000, .max = 3500000 },
379 	/* Pineview's Ncounter is a ring counter */
380 	.n = { .min = 3, .max = 6 },
381 	.m = { .min = 2, .max = 256 },
382 	/* Pineview only has one combined m divider, which we treat as m2. */
383 	.m1 = { .min = 0, .max = 0 },
384 	.m2 = { .min = 0, .max = 254 },
385 	.p = { .min = 5, .max = 80 },
386 	.p1 = { .min = 1, .max = 8 },
387 	.p2 = { .dot_limit = 200000,
388 		.p2_slow = 10, .p2_fast = 5 },
389 };
390 
391 static const struct intel_limit pnv_limits_lvds = {
392 	.dot = { .min = 20000, .max = 400000 },
393 	.vco = { .min = 1700000, .max = 3500000 },
394 	.n = { .min = 3, .max = 6 },
395 	.m = { .min = 2, .max = 256 },
396 	.m1 = { .min = 0, .max = 0 },
397 	.m2 = { .min = 0, .max = 254 },
398 	.p = { .min = 7, .max = 112 },
399 	.p1 = { .min = 1, .max = 8 },
400 	.p2 = { .dot_limit = 112000,
401 		.p2_slow = 14, .p2_fast = 14 },
402 };
403 
404 /* Ironlake / Sandybridge
405  *
406  * We calculate clock using (register_value + 2) for N/M1/M2, so here
407  * the range value for them is (actual_value - 2).
408  */
409 static const struct intel_limit ilk_limits_dac = {
410 	.dot = { .min = 25000, .max = 350000 },
411 	.vco = { .min = 1760000, .max = 3510000 },
412 	.n = { .min = 1, .max = 5 },
413 	.m = { .min = 79, .max = 127 },
414 	.m1 = { .min = 12, .max = 22 },
415 	.m2 = { .min = 5, .max = 9 },
416 	.p = { .min = 5, .max = 80 },
417 	.p1 = { .min = 1, .max = 8 },
418 	.p2 = { .dot_limit = 225000,
419 		.p2_slow = 10, .p2_fast = 5 },
420 };
421 
422 static const struct intel_limit ilk_limits_single_lvds = {
423 	.dot = { .min = 25000, .max = 350000 },
424 	.vco = { .min = 1760000, .max = 3510000 },
425 	.n = { .min = 1, .max = 3 },
426 	.m = { .min = 79, .max = 118 },
427 	.m1 = { .min = 12, .max = 22 },
428 	.m2 = { .min = 5, .max = 9 },
429 	.p = { .min = 28, .max = 112 },
430 	.p1 = { .min = 2, .max = 8 },
431 	.p2 = { .dot_limit = 225000,
432 		.p2_slow = 14, .p2_fast = 14 },
433 };
434 
435 static const struct intel_limit ilk_limits_dual_lvds = {
436 	.dot = { .min = 25000, .max = 350000 },
437 	.vco = { .min = 1760000, .max = 3510000 },
438 	.n = { .min = 1, .max = 3 },
439 	.m = { .min = 79, .max = 127 },
440 	.m1 = { .min = 12, .max = 22 },
441 	.m2 = { .min = 5, .max = 9 },
442 	.p = { .min = 14, .max = 56 },
443 	.p1 = { .min = 2, .max = 8 },
444 	.p2 = { .dot_limit = 225000,
445 		.p2_slow = 7, .p2_fast = 7 },
446 };
447 
448 /* LVDS 100mhz refclk limits. */
449 static const struct intel_limit ilk_limits_single_lvds_100m = {
450 	.dot = { .min = 25000, .max = 350000 },
451 	.vco = { .min = 1760000, .max = 3510000 },
452 	.n = { .min = 1, .max = 2 },
453 	.m = { .min = 79, .max = 126 },
454 	.m1 = { .min = 12, .max = 22 },
455 	.m2 = { .min = 5, .max = 9 },
456 	.p = { .min = 28, .max = 112 },
457 	.p1 = { .min = 2, .max = 8 },
458 	.p2 = { .dot_limit = 225000,
459 		.p2_slow = 14, .p2_fast = 14 },
460 };
461 
462 static const struct intel_limit ilk_limits_dual_lvds_100m = {
463 	.dot = { .min = 25000, .max = 350000 },
464 	.vco = { .min = 1760000, .max = 3510000 },
465 	.n = { .min = 1, .max = 3 },
466 	.m = { .min = 79, .max = 126 },
467 	.m1 = { .min = 12, .max = 22 },
468 	.m2 = { .min = 5, .max = 9 },
469 	.p = { .min = 14, .max = 42 },
470 	.p1 = { .min = 2, .max = 6 },
471 	.p2 = { .dot_limit = 225000,
472 		.p2_slow = 7, .p2_fast = 7 },
473 };
474 
475 static const struct intel_limit intel_limits_vlv = {
476 	 /*
477 	  * These are the data rate limits (measured in fast clocks)
478 	  * since those are the strictest limits we have. The fast
479 	  * clock and actual rate limits are more relaxed, so checking
480 	  * them would make no difference.
481 	  */
482 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
483 	.vco = { .min = 4000000, .max = 6000000 },
484 	.n = { .min = 1, .max = 7 },
485 	.m1 = { .min = 2, .max = 3 },
486 	.m2 = { .min = 11, .max = 156 },
487 	.p1 = { .min = 2, .max = 3 },
488 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
489 };
490 
491 static const struct intel_limit intel_limits_chv = {
492 	/*
493 	 * These are the data rate limits (measured in fast clocks)
494 	 * since those are the strictest limits we have.  The fast
495 	 * clock and actual rate limits are more relaxed, so checking
496 	 * them would make no difference.
497 	 */
498 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
499 	.vco = { .min = 4800000, .max = 6480000 },
500 	.n = { .min = 1, .max = 1 },
501 	.m1 = { .min = 2, .max = 2 },
502 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
503 	.p1 = { .min = 2, .max = 4 },
504 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
505 };
506 
507 static const struct intel_limit intel_limits_bxt = {
508 	/* FIXME: find real dot limits */
509 	.dot = { .min = 0, .max = INT_MAX },
510 	.vco = { .min = 4800000, .max = 6700000 },
511 	.n = { .min = 1, .max = 1 },
512 	.m1 = { .min = 2, .max = 2 },
513 	/* FIXME: find real m2 limits */
514 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
515 	.p1 = { .min = 2, .max = 4 },
516 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
517 };
518 
519 /* WA Display #0827: Gen9:all */
520 static void
521 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
522 {
523 	if (enable)
524 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
525 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
526 	else
527 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
528 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
529 }
530 
531 /* Wa_2006604312:icl,ehl */
532 static void
533 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
534 		       bool enable)
535 {
536 	if (enable)
537 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
538 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
539 	else
540 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
541 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
542 }
543 
544 static bool
545 needs_modeset(const struct intel_crtc_state *state)
546 {
547 	return drm_atomic_crtc_needs_modeset(&state->uapi);
548 }
549 
550 static bool
551 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
552 {
553 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
554 }
555 
556 static bool
557 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
558 {
559 	return crtc_state->sync_mode_slaves_mask != 0;
560 }
561 
562 bool
563 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
564 {
565 	return is_trans_port_sync_master(crtc_state) ||
566 		is_trans_port_sync_slave(crtc_state);
567 }
568 
569 /*
570  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
571  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
572  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
573  * The helpers' return value is the rate of the clock that is fed to the
574  * display engine's pipe which can be the above fast dot clock rate or a
575  * divided-down version of it.
576  */
577 /* m1 is reserved as 0 in Pineview, n is a ring counter */
578 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
579 {
580 	clock->m = clock->m2 + 2;
581 	clock->p = clock->p1 * clock->p2;
582 	if (WARN_ON(clock->n == 0 || clock->p == 0))
583 		return 0;
584 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
585 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
586 
587 	return clock->dot;
588 }
589 
590 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
591 {
592 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
593 }
594 
595 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
596 {
597 	clock->m = i9xx_dpll_compute_m(clock);
598 	clock->p = clock->p1 * clock->p2;
599 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
600 		return 0;
601 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
602 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
603 
604 	return clock->dot;
605 }
606 
607 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
608 {
609 	clock->m = clock->m1 * clock->m2;
610 	clock->p = clock->p1 * clock->p2;
611 	if (WARN_ON(clock->n == 0 || clock->p == 0))
612 		return 0;
613 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
614 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
615 
616 	return clock->dot / 5;
617 }
618 
619 int chv_calc_dpll_params(int refclk, struct dpll *clock)
620 {
621 	clock->m = clock->m1 * clock->m2;
622 	clock->p = clock->p1 * clock->p2;
623 	if (WARN_ON(clock->n == 0 || clock->p == 0))
624 		return 0;
625 	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
626 					   clock->n << 22);
627 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
628 
629 	return clock->dot / 5;
630 }
631 
632 /*
633  * Returns whether the given set of divisors are valid for a given refclk with
634  * the given connectors.
635  */
636 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
637 			       const struct intel_limit *limit,
638 			       const struct dpll *clock)
639 {
640 	if (clock->n < limit->n.min || limit->n.max < clock->n)
641 		return false;
642 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
643 		return false;
644 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
645 		return false;
646 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
647 		return false;
648 
649 	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
650 	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
651 		if (clock->m1 <= clock->m2)
652 			return false;
653 
654 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
655 	    !IS_GEN9_LP(dev_priv)) {
656 		if (clock->p < limit->p.min || limit->p.max < clock->p)
657 			return false;
658 		if (clock->m < limit->m.min || limit->m.max < clock->m)
659 			return false;
660 	}
661 
662 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
663 		return false;
664 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
665 	 * connector, etc., rather than just a single range.
666 	 */
667 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
668 		return false;
669 
670 	return true;
671 }
672 
673 static int
674 i9xx_select_p2_div(const struct intel_limit *limit,
675 		   const struct intel_crtc_state *crtc_state,
676 		   int target)
677 {
678 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
679 
680 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
681 		/*
682 		 * For LVDS just rely on its current settings for dual-channel.
683 		 * We haven't figured out how to reliably set up different
684 		 * single/dual channel state, if we even can.
685 		 */
686 		if (intel_is_dual_link_lvds(dev_priv))
687 			return limit->p2.p2_fast;
688 		else
689 			return limit->p2.p2_slow;
690 	} else {
691 		if (target < limit->p2.dot_limit)
692 			return limit->p2.p2_slow;
693 		else
694 			return limit->p2.p2_fast;
695 	}
696 }
697 
698 /*
699  * Returns a set of divisors for the desired target clock with the given
700  * refclk, or FALSE.  The returned values represent the clock equation:
701  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
702  *
703  * Target and reference clocks are specified in kHz.
704  *
705  * If match_clock is provided, then best_clock P divider must match the P
706  * divider from @match_clock used for LVDS downclocking.
707  */
708 static bool
709 i9xx_find_best_dpll(const struct intel_limit *limit,
710 		    struct intel_crtc_state *crtc_state,
711 		    int target, int refclk, struct dpll *match_clock,
712 		    struct dpll *best_clock)
713 {
714 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
715 	struct dpll clock;
716 	int err = target;
717 
718 	memset(best_clock, 0, sizeof(*best_clock));
719 
720 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
721 
722 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
723 	     clock.m1++) {
724 		for (clock.m2 = limit->m2.min;
725 		     clock.m2 <= limit->m2.max; clock.m2++) {
726 			if (clock.m2 >= clock.m1)
727 				break;
728 			for (clock.n = limit->n.min;
729 			     clock.n <= limit->n.max; clock.n++) {
730 				for (clock.p1 = limit->p1.min;
731 					clock.p1 <= limit->p1.max; clock.p1++) {
732 					int this_err;
733 
734 					i9xx_calc_dpll_params(refclk, &clock);
735 					if (!intel_pll_is_valid(to_i915(dev),
736 								limit,
737 								&clock))
738 						continue;
739 					if (match_clock &&
740 					    clock.p != match_clock->p)
741 						continue;
742 
743 					this_err = abs(clock.dot - target);
744 					if (this_err < err) {
745 						*best_clock = clock;
746 						err = this_err;
747 					}
748 				}
749 			}
750 		}
751 	}
752 
753 	return (err != target);
754 }
755 
756 /*
757  * Returns a set of divisors for the desired target clock with the given
758  * refclk, or FALSE.  The returned values represent the clock equation:
759  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
760  *
761  * Target and reference clocks are specified in kHz.
762  *
763  * If match_clock is provided, then best_clock P divider must match the P
764  * divider from @match_clock used for LVDS downclocking.
765  */
766 static bool
767 pnv_find_best_dpll(const struct intel_limit *limit,
768 		   struct intel_crtc_state *crtc_state,
769 		   int target, int refclk, struct dpll *match_clock,
770 		   struct dpll *best_clock)
771 {
772 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
773 	struct dpll clock;
774 	int err = target;
775 
776 	memset(best_clock, 0, sizeof(*best_clock));
777 
778 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
779 
780 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
781 	     clock.m1++) {
782 		for (clock.m2 = limit->m2.min;
783 		     clock.m2 <= limit->m2.max; clock.m2++) {
784 			for (clock.n = limit->n.min;
785 			     clock.n <= limit->n.max; clock.n++) {
786 				for (clock.p1 = limit->p1.min;
787 					clock.p1 <= limit->p1.max; clock.p1++) {
788 					int this_err;
789 
790 					pnv_calc_dpll_params(refclk, &clock);
791 					if (!intel_pll_is_valid(to_i915(dev),
792 								limit,
793 								&clock))
794 						continue;
795 					if (match_clock &&
796 					    clock.p != match_clock->p)
797 						continue;
798 
799 					this_err = abs(clock.dot - target);
800 					if (this_err < err) {
801 						*best_clock = clock;
802 						err = this_err;
803 					}
804 				}
805 			}
806 		}
807 	}
808 
809 	return (err != target);
810 }
811 
812 /*
813  * Returns a set of divisors for the desired target clock with the given
814  * refclk, or FALSE.  The returned values represent the clock equation:
815  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
816  *
817  * Target and reference clocks are specified in kHz.
818  *
819  * If match_clock is provided, then best_clock P divider must match the P
820  * divider from @match_clock used for LVDS downclocking.
821  */
822 static bool
823 g4x_find_best_dpll(const struct intel_limit *limit,
824 		   struct intel_crtc_state *crtc_state,
825 		   int target, int refclk, struct dpll *match_clock,
826 		   struct dpll *best_clock)
827 {
828 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
829 	struct dpll clock;
830 	int max_n;
831 	bool found = false;
832 	/* approximately equals target * 0.00585 */
833 	int err_most = (target >> 8) + (target >> 9);
834 
835 	memset(best_clock, 0, sizeof(*best_clock));
836 
837 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
838 
839 	max_n = limit->n.max;
840 	/* based on hardware requirement, prefer smaller n to precision */
841 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
842 		/* based on hardware requirement, prefere larger m1,m2 */
843 		for (clock.m1 = limit->m1.max;
844 		     clock.m1 >= limit->m1.min; clock.m1--) {
845 			for (clock.m2 = limit->m2.max;
846 			     clock.m2 >= limit->m2.min; clock.m2--) {
847 				for (clock.p1 = limit->p1.max;
848 				     clock.p1 >= limit->p1.min; clock.p1--) {
849 					int this_err;
850 
851 					i9xx_calc_dpll_params(refclk, &clock);
852 					if (!intel_pll_is_valid(to_i915(dev),
853 								limit,
854 								&clock))
855 						continue;
856 
857 					this_err = abs(clock.dot - target);
858 					if (this_err < err_most) {
859 						*best_clock = clock;
860 						err_most = this_err;
861 						max_n = clock.n;
862 						found = true;
863 					}
864 				}
865 			}
866 		}
867 	}
868 	return found;
869 }
870 
871 /*
872  * Check if the calculated PLL configuration is more optimal compared to the
873  * best configuration and error found so far. Return the calculated error.
874  */
875 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
876 			       const struct dpll *calculated_clock,
877 			       const struct dpll *best_clock,
878 			       unsigned int best_error_ppm,
879 			       unsigned int *error_ppm)
880 {
881 	/*
882 	 * For CHV ignore the error and consider only the P value.
883 	 * Prefer a bigger P value based on HW requirements.
884 	 */
885 	if (IS_CHERRYVIEW(to_i915(dev))) {
886 		*error_ppm = 0;
887 
888 		return calculated_clock->p > best_clock->p;
889 	}
890 
891 	if (drm_WARN_ON_ONCE(dev, !target_freq))
892 		return false;
893 
894 	*error_ppm = div_u64(1000000ULL *
895 				abs(target_freq - calculated_clock->dot),
896 			     target_freq);
897 	/*
898 	 * Prefer a better P value over a better (smaller) error if the error
899 	 * is small. Ensure this preference for future configurations too by
900 	 * setting the error to 0.
901 	 */
902 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
903 		*error_ppm = 0;
904 
905 		return true;
906 	}
907 
908 	return *error_ppm + 10 < best_error_ppm;
909 }
910 
911 /*
912  * Returns a set of divisors for the desired target clock with the given
913  * refclk, or FALSE.  The returned values represent the clock equation:
914  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
915  */
916 static bool
917 vlv_find_best_dpll(const struct intel_limit *limit,
918 		   struct intel_crtc_state *crtc_state,
919 		   int target, int refclk, struct dpll *match_clock,
920 		   struct dpll *best_clock)
921 {
922 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
923 	struct drm_device *dev = crtc->base.dev;
924 	struct dpll clock;
925 	unsigned int bestppm = 1000000;
926 	/* min update 19.2 MHz */
927 	int max_n = min(limit->n.max, refclk / 19200);
928 	bool found = false;
929 
930 	target *= 5; /* fast clock */
931 
932 	memset(best_clock, 0, sizeof(*best_clock));
933 
934 	/* based on hardware requirement, prefer smaller n to precision */
935 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
936 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
937 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
938 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
939 				clock.p = clock.p1 * clock.p2;
940 				/* based on hardware requirement, prefer bigger m1,m2 values */
941 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
942 					unsigned int ppm;
943 
944 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
945 								     refclk * clock.m1);
946 
947 					vlv_calc_dpll_params(refclk, &clock);
948 
949 					if (!intel_pll_is_valid(to_i915(dev),
950 								limit,
951 								&clock))
952 						continue;
953 
954 					if (!vlv_PLL_is_optimal(dev, target,
955 								&clock,
956 								best_clock,
957 								bestppm, &ppm))
958 						continue;
959 
960 					*best_clock = clock;
961 					bestppm = ppm;
962 					found = true;
963 				}
964 			}
965 		}
966 	}
967 
968 	return found;
969 }
970 
971 /*
972  * Returns a set of divisors for the desired target clock with the given
973  * refclk, or FALSE.  The returned values represent the clock equation:
974  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
975  */
976 static bool
977 chv_find_best_dpll(const struct intel_limit *limit,
978 		   struct intel_crtc_state *crtc_state,
979 		   int target, int refclk, struct dpll *match_clock,
980 		   struct dpll *best_clock)
981 {
982 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
983 	struct drm_device *dev = crtc->base.dev;
984 	unsigned int best_error_ppm;
985 	struct dpll clock;
986 	u64 m2;
987 	int found = false;
988 
989 	memset(best_clock, 0, sizeof(*best_clock));
990 	best_error_ppm = 1000000;
991 
992 	/*
993 	 * Based on hardware doc, the n always set to 1, and m1 always
994 	 * set to 2.  If requires to support 200Mhz refclk, we need to
995 	 * revisit this because n may not 1 anymore.
996 	 */
997 	clock.n = 1, clock.m1 = 2;
998 	target *= 5;	/* fast clock */
999 
1000 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1001 		for (clock.p2 = limit->p2.p2_fast;
1002 				clock.p2 >= limit->p2.p2_slow;
1003 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1004 			unsigned int error_ppm;
1005 
1006 			clock.p = clock.p1 * clock.p2;
1007 
1008 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1009 						   refclk * clock.m1);
1010 
1011 			if (m2 > INT_MAX/clock.m1)
1012 				continue;
1013 
1014 			clock.m2 = m2;
1015 
1016 			chv_calc_dpll_params(refclk, &clock);
1017 
1018 			if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
1019 				continue;
1020 
1021 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1022 						best_error_ppm, &error_ppm))
1023 				continue;
1024 
1025 			*best_clock = clock;
1026 			best_error_ppm = error_ppm;
1027 			found = true;
1028 		}
1029 	}
1030 
1031 	return found;
1032 }
1033 
1034 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1035 			struct dpll *best_clock)
1036 {
1037 	int refclk = 100000;
1038 	const struct intel_limit *limit = &intel_limits_bxt;
1039 
1040 	return chv_find_best_dpll(limit, crtc_state,
1041 				  crtc_state->port_clock, refclk,
1042 				  NULL, best_clock);
1043 }
1044 
1045 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1046 				    enum pipe pipe)
1047 {
1048 	i915_reg_t reg = PIPEDSL(pipe);
1049 	u32 line1, line2;
1050 	u32 line_mask;
1051 
1052 	if (IS_GEN(dev_priv, 2))
1053 		line_mask = DSL_LINEMASK_GEN2;
1054 	else
1055 		line_mask = DSL_LINEMASK_GEN3;
1056 
1057 	line1 = intel_de_read(dev_priv, reg) & line_mask;
1058 	msleep(5);
1059 	line2 = intel_de_read(dev_priv, reg) & line_mask;
1060 
1061 	return line1 != line2;
1062 }
1063 
1064 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1065 {
1066 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1067 	enum pipe pipe = crtc->pipe;
1068 
1069 	/* Wait for the display line to settle/start moving */
1070 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1071 		drm_err(&dev_priv->drm,
1072 			"pipe %c scanline %s wait timed out\n",
1073 			pipe_name(pipe), onoff(state));
1074 }
1075 
1076 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1077 {
1078 	wait_for_pipe_scanline_moving(crtc, false);
1079 }
1080 
1081 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1082 {
1083 	wait_for_pipe_scanline_moving(crtc, true);
1084 }
1085 
1086 static void
1087 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1088 {
1089 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1090 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1091 
1092 	if (INTEL_GEN(dev_priv) >= 4) {
1093 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1094 		i915_reg_t reg = PIPECONF(cpu_transcoder);
1095 
1096 		/* Wait for the Pipe State to go off */
1097 		if (intel_de_wait_for_clear(dev_priv, reg,
1098 					    I965_PIPECONF_ACTIVE, 100))
1099 			drm_WARN(&dev_priv->drm, 1,
1100 				 "pipe_off wait timed out\n");
1101 	} else {
1102 		intel_wait_for_pipe_scanline_stopped(crtc);
1103 	}
1104 }
1105 
1106 /* Only for pre-ILK configs */
1107 void assert_pll(struct drm_i915_private *dev_priv,
1108 		enum pipe pipe, bool state)
1109 {
1110 	u32 val;
1111 	bool cur_state;
1112 
1113 	val = intel_de_read(dev_priv, DPLL(pipe));
1114 	cur_state = !!(val & DPLL_VCO_ENABLE);
1115 	I915_STATE_WARN(cur_state != state,
1116 	     "PLL state assertion failure (expected %s, current %s)\n",
1117 			onoff(state), onoff(cur_state));
1118 }
1119 
1120 /* XXX: the dsi pll is shared between MIPI DSI ports */
1121 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1122 {
1123 	u32 val;
1124 	bool cur_state;
1125 
1126 	vlv_cck_get(dev_priv);
1127 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1128 	vlv_cck_put(dev_priv);
1129 
1130 	cur_state = val & DSI_PLL_VCO_EN;
1131 	I915_STATE_WARN(cur_state != state,
1132 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1133 			onoff(state), onoff(cur_state));
1134 }
1135 
1136 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1137 			  enum pipe pipe, bool state)
1138 {
1139 	bool cur_state;
1140 
1141 	if (HAS_DDI(dev_priv)) {
1142 		/*
1143 		 * DDI does not have a specific FDI_TX register.
1144 		 *
1145 		 * FDI is never fed from EDP transcoder
1146 		 * so pipe->transcoder cast is fine here.
1147 		 */
1148 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
1149 		u32 val = intel_de_read(dev_priv,
1150 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
1151 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1152 	} else {
1153 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1154 		cur_state = !!(val & FDI_TX_ENABLE);
1155 	}
1156 	I915_STATE_WARN(cur_state != state,
1157 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1158 			onoff(state), onoff(cur_state));
1159 }
1160 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1161 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1162 
1163 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1164 			  enum pipe pipe, bool state)
1165 {
1166 	u32 val;
1167 	bool cur_state;
1168 
1169 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1170 	cur_state = !!(val & FDI_RX_ENABLE);
1171 	I915_STATE_WARN(cur_state != state,
1172 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1173 			onoff(state), onoff(cur_state));
1174 }
1175 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1176 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1177 
1178 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1179 				      enum pipe pipe)
1180 {
1181 	u32 val;
1182 
1183 	/* ILK FDI PLL is always enabled */
1184 	if (IS_GEN(dev_priv, 5))
1185 		return;
1186 
1187 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1188 	if (HAS_DDI(dev_priv))
1189 		return;
1190 
1191 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1192 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1193 }
1194 
1195 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1196 		       enum pipe pipe, bool state)
1197 {
1198 	u32 val;
1199 	bool cur_state;
1200 
1201 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1202 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1203 	I915_STATE_WARN(cur_state != state,
1204 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1205 			onoff(state), onoff(cur_state));
1206 }
1207 
1208 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1209 {
1210 	i915_reg_t pp_reg;
1211 	u32 val;
1212 	enum pipe panel_pipe = INVALID_PIPE;
1213 	bool locked = true;
1214 
1215 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1216 		return;
1217 
1218 	if (HAS_PCH_SPLIT(dev_priv)) {
1219 		u32 port_sel;
1220 
1221 		pp_reg = PP_CONTROL(0);
1222 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1223 
1224 		switch (port_sel) {
1225 		case PANEL_PORT_SELECT_LVDS:
1226 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1227 			break;
1228 		case PANEL_PORT_SELECT_DPA:
1229 			intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1230 			break;
1231 		case PANEL_PORT_SELECT_DPC:
1232 			intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1233 			break;
1234 		case PANEL_PORT_SELECT_DPD:
1235 			intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1236 			break;
1237 		default:
1238 			MISSING_CASE(port_sel);
1239 			break;
1240 		}
1241 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1242 		/* presumably write lock depends on pipe, not port select */
1243 		pp_reg = PP_CONTROL(pipe);
1244 		panel_pipe = pipe;
1245 	} else {
1246 		u32 port_sel;
1247 
1248 		pp_reg = PP_CONTROL(0);
1249 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1250 
1251 		drm_WARN_ON(&dev_priv->drm,
1252 			    port_sel != PANEL_PORT_SELECT_LVDS);
1253 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1254 	}
1255 
1256 	val = intel_de_read(dev_priv, pp_reg);
1257 	if (!(val & PANEL_POWER_ON) ||
1258 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1259 		locked = false;
1260 
1261 	I915_STATE_WARN(panel_pipe == pipe && locked,
1262 	     "panel assertion failure, pipe %c regs locked\n",
1263 	     pipe_name(pipe));
1264 }
1265 
1266 void assert_pipe(struct drm_i915_private *dev_priv,
1267 		 enum transcoder cpu_transcoder, bool state)
1268 {
1269 	bool cur_state;
1270 	enum intel_display_power_domain power_domain;
1271 	intel_wakeref_t wakeref;
1272 
1273 	/* we keep both pipes enabled on 830 */
1274 	if (IS_I830(dev_priv))
1275 		state = true;
1276 
1277 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1278 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1279 	if (wakeref) {
1280 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1281 		cur_state = !!(val & PIPECONF_ENABLE);
1282 
1283 		intel_display_power_put(dev_priv, power_domain, wakeref);
1284 	} else {
1285 		cur_state = false;
1286 	}
1287 
1288 	I915_STATE_WARN(cur_state != state,
1289 			"transcoder %s assertion failure (expected %s, current %s)\n",
1290 			transcoder_name(cpu_transcoder),
1291 			onoff(state), onoff(cur_state));
1292 }
1293 
1294 static void assert_plane(struct intel_plane *plane, bool state)
1295 {
1296 	enum pipe pipe;
1297 	bool cur_state;
1298 
1299 	cur_state = plane->get_hw_state(plane, &pipe);
1300 
1301 	I915_STATE_WARN(cur_state != state,
1302 			"%s assertion failure (expected %s, current %s)\n",
1303 			plane->base.name, onoff(state), onoff(cur_state));
1304 }
1305 
1306 #define assert_plane_enabled(p) assert_plane(p, true)
1307 #define assert_plane_disabled(p) assert_plane(p, false)
1308 
1309 static void assert_planes_disabled(struct intel_crtc *crtc)
1310 {
1311 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1312 	struct intel_plane *plane;
1313 
1314 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1315 		assert_plane_disabled(plane);
1316 }
1317 
1318 static void assert_vblank_disabled(struct drm_crtc *crtc)
1319 {
1320 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1321 		drm_crtc_vblank_put(crtc);
1322 }
1323 
1324 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1325 				    enum pipe pipe)
1326 {
1327 	u32 val;
1328 	bool enabled;
1329 
1330 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1331 	enabled = !!(val & TRANS_ENABLE);
1332 	I915_STATE_WARN(enabled,
1333 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1334 	     pipe_name(pipe));
1335 }
1336 
1337 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1338 				   enum pipe pipe, enum port port,
1339 				   i915_reg_t dp_reg)
1340 {
1341 	enum pipe port_pipe;
1342 	bool state;
1343 
1344 	state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1345 
1346 	I915_STATE_WARN(state && port_pipe == pipe,
1347 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
1348 			port_name(port), pipe_name(pipe));
1349 
1350 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1351 			"IBX PCH DP %c still using transcoder B\n",
1352 			port_name(port));
1353 }
1354 
1355 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1356 				     enum pipe pipe, enum port port,
1357 				     i915_reg_t hdmi_reg)
1358 {
1359 	enum pipe port_pipe;
1360 	bool state;
1361 
1362 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1363 
1364 	I915_STATE_WARN(state && port_pipe == pipe,
1365 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1366 			port_name(port), pipe_name(pipe));
1367 
1368 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1369 			"IBX PCH HDMI %c still using transcoder B\n",
1370 			port_name(port));
1371 }
1372 
1373 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1374 				      enum pipe pipe)
1375 {
1376 	enum pipe port_pipe;
1377 
1378 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1379 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1380 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1381 
1382 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1383 			port_pipe == pipe,
1384 			"PCH VGA enabled on transcoder %c, should be disabled\n",
1385 			pipe_name(pipe));
1386 
1387 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1388 			port_pipe == pipe,
1389 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
1390 			pipe_name(pipe));
1391 
1392 	/* PCH SDVOB multiplex with HDMIB */
1393 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1394 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1395 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1396 }
1397 
1398 static void _vlv_enable_pll(struct intel_crtc *crtc,
1399 			    const struct intel_crtc_state *pipe_config)
1400 {
1401 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1402 	enum pipe pipe = crtc->pipe;
1403 
1404 	intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1405 	intel_de_posting_read(dev_priv, DPLL(pipe));
1406 	udelay(150);
1407 
1408 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1409 		drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1410 }
1411 
1412 static void vlv_enable_pll(struct intel_crtc *crtc,
1413 			   const struct intel_crtc_state *pipe_config)
1414 {
1415 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1416 	enum pipe pipe = crtc->pipe;
1417 
1418 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1419 
1420 	/* PLL is protected by panel, make sure we can write it */
1421 	assert_panel_unlocked(dev_priv, pipe);
1422 
1423 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1424 		_vlv_enable_pll(crtc, pipe_config);
1425 
1426 	intel_de_write(dev_priv, DPLL_MD(pipe),
1427 		       pipe_config->dpll_hw_state.dpll_md);
1428 	intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1429 }
1430 
1431 
1432 static void _chv_enable_pll(struct intel_crtc *crtc,
1433 			    const struct intel_crtc_state *pipe_config)
1434 {
1435 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1436 	enum pipe pipe = crtc->pipe;
1437 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1438 	u32 tmp;
1439 
1440 	vlv_dpio_get(dev_priv);
1441 
1442 	/* Enable back the 10bit clock to display controller */
1443 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1444 	tmp |= DPIO_DCLKP_EN;
1445 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1446 
1447 	vlv_dpio_put(dev_priv);
1448 
1449 	/*
1450 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1451 	 */
1452 	udelay(1);
1453 
1454 	/* Enable PLL */
1455 	intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1456 
1457 	/* Check PLL is locked */
1458 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1459 		drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1460 }
1461 
1462 static void chv_enable_pll(struct intel_crtc *crtc,
1463 			   const struct intel_crtc_state *pipe_config)
1464 {
1465 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1466 	enum pipe pipe = crtc->pipe;
1467 
1468 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1469 
1470 	/* PLL is protected by panel, make sure we can write it */
1471 	assert_panel_unlocked(dev_priv, pipe);
1472 
1473 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1474 		_chv_enable_pll(crtc, pipe_config);
1475 
1476 	if (pipe != PIPE_A) {
1477 		/*
1478 		 * WaPixelRepeatModeFixForC0:chv
1479 		 *
1480 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1481 		 * the value from DPLLBMD to either pipe B or C.
1482 		 */
1483 		intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1484 		intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1485 			       pipe_config->dpll_hw_state.dpll_md);
1486 		intel_de_write(dev_priv, CBR4_VLV, 0);
1487 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1488 
1489 		/*
1490 		 * DPLLB VGA mode also seems to cause problems.
1491 		 * We should always have it disabled.
1492 		 */
1493 		drm_WARN_ON(&dev_priv->drm,
1494 			    (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1495 			     DPLL_VGA_MODE_DIS) == 0);
1496 	} else {
1497 		intel_de_write(dev_priv, DPLL_MD(pipe),
1498 			       pipe_config->dpll_hw_state.dpll_md);
1499 		intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1500 	}
1501 }
1502 
1503 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1504 {
1505 	if (IS_I830(dev_priv))
1506 		return false;
1507 
1508 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1509 }
1510 
1511 static void i9xx_enable_pll(struct intel_crtc *crtc,
1512 			    const struct intel_crtc_state *crtc_state)
1513 {
1514 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1515 	i915_reg_t reg = DPLL(crtc->pipe);
1516 	u32 dpll = crtc_state->dpll_hw_state.dpll;
1517 	int i;
1518 
1519 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1520 
1521 	/* PLL is protected by panel, make sure we can write it */
1522 	if (i9xx_has_pps(dev_priv))
1523 		assert_panel_unlocked(dev_priv, crtc->pipe);
1524 
1525 	/*
1526 	 * Apparently we need to have VGA mode enabled prior to changing
1527 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1528 	 * dividers, even though the register value does change.
1529 	 */
1530 	intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1531 	intel_de_write(dev_priv, reg, dpll);
1532 
1533 	/* Wait for the clocks to stabilize. */
1534 	intel_de_posting_read(dev_priv, reg);
1535 	udelay(150);
1536 
1537 	if (INTEL_GEN(dev_priv) >= 4) {
1538 		intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1539 			       crtc_state->dpll_hw_state.dpll_md);
1540 	} else {
1541 		/* The pixel multiplier can only be updated once the
1542 		 * DPLL is enabled and the clocks are stable.
1543 		 *
1544 		 * So write it again.
1545 		 */
1546 		intel_de_write(dev_priv, reg, dpll);
1547 	}
1548 
1549 	/* We do this three times for luck */
1550 	for (i = 0; i < 3; i++) {
1551 		intel_de_write(dev_priv, reg, dpll);
1552 		intel_de_posting_read(dev_priv, reg);
1553 		udelay(150); /* wait for warmup */
1554 	}
1555 }
1556 
1557 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1558 {
1559 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1560 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1561 	enum pipe pipe = crtc->pipe;
1562 
1563 	/* Don't disable pipe or pipe PLLs if needed */
1564 	if (IS_I830(dev_priv))
1565 		return;
1566 
1567 	/* Make sure the pipe isn't still relying on us */
1568 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1569 
1570 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1571 	intel_de_posting_read(dev_priv, DPLL(pipe));
1572 }
1573 
1574 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1575 {
1576 	u32 val;
1577 
1578 	/* Make sure the pipe isn't still relying on us */
1579 	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1580 
1581 	val = DPLL_INTEGRATED_REF_CLK_VLV |
1582 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1583 	if (pipe != PIPE_A)
1584 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1585 
1586 	intel_de_write(dev_priv, DPLL(pipe), val);
1587 	intel_de_posting_read(dev_priv, DPLL(pipe));
1588 }
1589 
1590 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1591 {
1592 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1593 	u32 val;
1594 
1595 	/* Make sure the pipe isn't still relying on us */
1596 	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1597 
1598 	val = DPLL_SSC_REF_CLK_CHV |
1599 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1600 	if (pipe != PIPE_A)
1601 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1602 
1603 	intel_de_write(dev_priv, DPLL(pipe), val);
1604 	intel_de_posting_read(dev_priv, DPLL(pipe));
1605 
1606 	vlv_dpio_get(dev_priv);
1607 
1608 	/* Disable 10bit clock to display controller */
1609 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1610 	val &= ~DPIO_DCLKP_EN;
1611 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1612 
1613 	vlv_dpio_put(dev_priv);
1614 }
1615 
1616 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1617 			 struct intel_digital_port *dig_port,
1618 			 unsigned int expected_mask)
1619 {
1620 	u32 port_mask;
1621 	i915_reg_t dpll_reg;
1622 
1623 	switch (dig_port->base.port) {
1624 	case PORT_B:
1625 		port_mask = DPLL_PORTB_READY_MASK;
1626 		dpll_reg = DPLL(0);
1627 		break;
1628 	case PORT_C:
1629 		port_mask = DPLL_PORTC_READY_MASK;
1630 		dpll_reg = DPLL(0);
1631 		expected_mask <<= 4;
1632 		break;
1633 	case PORT_D:
1634 		port_mask = DPLL_PORTD_READY_MASK;
1635 		dpll_reg = DPIO_PHY_STATUS;
1636 		break;
1637 	default:
1638 		BUG();
1639 	}
1640 
1641 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
1642 				       port_mask, expected_mask, 1000))
1643 		drm_WARN(&dev_priv->drm, 1,
1644 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1645 			 dig_port->base.base.base.id, dig_port->base.base.name,
1646 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
1647 			 expected_mask);
1648 }
1649 
1650 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1651 {
1652 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1653 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1654 	enum pipe pipe = crtc->pipe;
1655 	i915_reg_t reg;
1656 	u32 val, pipeconf_val;
1657 
1658 	/* Make sure PCH DPLL is enabled */
1659 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1660 
1661 	/* FDI must be feeding us bits for PCH ports */
1662 	assert_fdi_tx_enabled(dev_priv, pipe);
1663 	assert_fdi_rx_enabled(dev_priv, pipe);
1664 
1665 	if (HAS_PCH_CPT(dev_priv)) {
1666 		reg = TRANS_CHICKEN2(pipe);
1667 		val = intel_de_read(dev_priv, reg);
1668 		/*
1669 		 * Workaround: Set the timing override bit
1670 		 * before enabling the pch transcoder.
1671 		 */
1672 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1673 		/* Configure frame start delay to match the CPU */
1674 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1675 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1676 		intel_de_write(dev_priv, reg, val);
1677 	}
1678 
1679 	reg = PCH_TRANSCONF(pipe);
1680 	val = intel_de_read(dev_priv, reg);
1681 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1682 
1683 	if (HAS_PCH_IBX(dev_priv)) {
1684 		/* Configure frame start delay to match the CPU */
1685 		val &= ~TRANS_FRAME_START_DELAY_MASK;
1686 		val |= TRANS_FRAME_START_DELAY(0);
1687 
1688 		/*
1689 		 * Make the BPC in transcoder be consistent with
1690 		 * that in pipeconf reg. For HDMI we must use 8bpc
1691 		 * here for both 8bpc and 12bpc.
1692 		 */
1693 		val &= ~PIPECONF_BPC_MASK;
1694 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1695 			val |= PIPECONF_8BPC;
1696 		else
1697 			val |= pipeconf_val & PIPECONF_BPC_MASK;
1698 	}
1699 
1700 	val &= ~TRANS_INTERLACE_MASK;
1701 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1702 		if (HAS_PCH_IBX(dev_priv) &&
1703 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1704 			val |= TRANS_LEGACY_INTERLACED_ILK;
1705 		else
1706 			val |= TRANS_INTERLACED;
1707 	} else {
1708 		val |= TRANS_PROGRESSIVE;
1709 	}
1710 
1711 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1712 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1713 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1714 			pipe_name(pipe));
1715 }
1716 
1717 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1718 				      enum transcoder cpu_transcoder)
1719 {
1720 	u32 val, pipeconf_val;
1721 
1722 	/* FDI must be feeding us bits for PCH ports */
1723 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1724 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
1725 
1726 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1727 	/* Workaround: set timing override bit. */
1728 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1729 	/* Configure frame start delay to match the CPU */
1730 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1731 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1732 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1733 
1734 	val = TRANS_ENABLE;
1735 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1736 
1737 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1738 	    PIPECONF_INTERLACED_ILK)
1739 		val |= TRANS_INTERLACED;
1740 	else
1741 		val |= TRANS_PROGRESSIVE;
1742 
1743 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
1744 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1745 				  TRANS_STATE_ENABLE, 100))
1746 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1747 }
1748 
1749 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1750 				       enum pipe pipe)
1751 {
1752 	i915_reg_t reg;
1753 	u32 val;
1754 
1755 	/* FDI relies on the transcoder */
1756 	assert_fdi_tx_disabled(dev_priv, pipe);
1757 	assert_fdi_rx_disabled(dev_priv, pipe);
1758 
1759 	/* Ports must be off as well */
1760 	assert_pch_ports_disabled(dev_priv, pipe);
1761 
1762 	reg = PCH_TRANSCONF(pipe);
1763 	val = intel_de_read(dev_priv, reg);
1764 	val &= ~TRANS_ENABLE;
1765 	intel_de_write(dev_priv, reg, val);
1766 	/* wait for PCH transcoder off, transcoder state */
1767 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1768 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1769 			pipe_name(pipe));
1770 
1771 	if (HAS_PCH_CPT(dev_priv)) {
1772 		/* Workaround: Clear the timing override chicken bit again. */
1773 		reg = TRANS_CHICKEN2(pipe);
1774 		val = intel_de_read(dev_priv, reg);
1775 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1776 		intel_de_write(dev_priv, reg, val);
1777 	}
1778 }
1779 
1780 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1781 {
1782 	u32 val;
1783 
1784 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
1785 	val &= ~TRANS_ENABLE;
1786 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
1787 	/* wait for PCH transcoder off, transcoder state */
1788 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1789 				    TRANS_STATE_ENABLE, 50))
1790 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1791 
1792 	/* Workaround: clear timing override bit. */
1793 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1794 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1795 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1796 }
1797 
1798 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1799 {
1800 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1801 
1802 	if (HAS_PCH_LPT(dev_priv))
1803 		return PIPE_A;
1804 	else
1805 		return crtc->pipe;
1806 }
1807 
1808 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1809 {
1810 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1811 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1812 	u32 mode_flags = crtc->mode_flags;
1813 
1814 	/*
1815 	 * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
1816 	 * have updated at the beginning of TE, if we want to use
1817 	 * the hw counter, then we would find it updated in only
1818 	 * the next TE, hence switching to sw counter.
1819 	 */
1820 	if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
1821 		return 0;
1822 
1823 	/*
1824 	 * On i965gm the hardware frame counter reads
1825 	 * zero when the TV encoder is enabled :(
1826 	 */
1827 	if (IS_I965GM(dev_priv) &&
1828 	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1829 		return 0;
1830 
1831 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1832 		return 0xffffffff; /* full 32 bit counter */
1833 	else if (INTEL_GEN(dev_priv) >= 3)
1834 		return 0xffffff; /* only 24 bits of frame count */
1835 	else
1836 		return 0; /* Gen2 doesn't have a hardware frame counter */
1837 }
1838 
1839 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1840 {
1841 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1842 
1843 	assert_vblank_disabled(&crtc->base);
1844 	drm_crtc_set_max_vblank_count(&crtc->base,
1845 				      intel_crtc_max_vblank_count(crtc_state));
1846 	drm_crtc_vblank_on(&crtc->base);
1847 }
1848 
1849 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1850 {
1851 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1852 
1853 	drm_crtc_vblank_off(&crtc->base);
1854 	assert_vblank_disabled(&crtc->base);
1855 }
1856 
1857 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1858 {
1859 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1860 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1861 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1862 	enum pipe pipe = crtc->pipe;
1863 	i915_reg_t reg;
1864 	u32 val;
1865 
1866 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1867 
1868 	assert_planes_disabled(crtc);
1869 
1870 	/*
1871 	 * A pipe without a PLL won't actually be able to drive bits from
1872 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1873 	 * need the check.
1874 	 */
1875 	if (HAS_GMCH(dev_priv)) {
1876 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1877 			assert_dsi_pll_enabled(dev_priv);
1878 		else
1879 			assert_pll_enabled(dev_priv, pipe);
1880 	} else {
1881 		if (new_crtc_state->has_pch_encoder) {
1882 			/* if driving the PCH, we need FDI enabled */
1883 			assert_fdi_rx_pll_enabled(dev_priv,
1884 						  intel_crtc_pch_transcoder(crtc));
1885 			assert_fdi_tx_pll_enabled(dev_priv,
1886 						  (enum pipe) cpu_transcoder);
1887 		}
1888 		/* FIXME: assert CPU port conditions for SNB+ */
1889 	}
1890 
1891 	trace_intel_pipe_enable(crtc);
1892 
1893 	reg = PIPECONF(cpu_transcoder);
1894 	val = intel_de_read(dev_priv, reg);
1895 	if (val & PIPECONF_ENABLE) {
1896 		/* we keep both pipes enabled on 830 */
1897 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1898 		return;
1899 	}
1900 
1901 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1902 	intel_de_posting_read(dev_priv, reg);
1903 
1904 	/*
1905 	 * Until the pipe starts PIPEDSL reads will return a stale value,
1906 	 * which causes an apparent vblank timestamp jump when PIPEDSL
1907 	 * resets to its proper value. That also messes up the frame count
1908 	 * when it's derived from the timestamps. So let's wait for the
1909 	 * pipe to start properly before we call drm_crtc_vblank_on()
1910 	 */
1911 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1912 		intel_wait_for_pipe_scanline_moving(crtc);
1913 }
1914 
1915 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1916 {
1917 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1918 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1919 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1920 	enum pipe pipe = crtc->pipe;
1921 	i915_reg_t reg;
1922 	u32 val;
1923 
1924 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1925 
1926 	/*
1927 	 * Make sure planes won't keep trying to pump pixels to us,
1928 	 * or we might hang the display.
1929 	 */
1930 	assert_planes_disabled(crtc);
1931 
1932 	trace_intel_pipe_disable(crtc);
1933 
1934 	reg = PIPECONF(cpu_transcoder);
1935 	val = intel_de_read(dev_priv, reg);
1936 	if ((val & PIPECONF_ENABLE) == 0)
1937 		return;
1938 
1939 	/*
1940 	 * Double wide has implications for planes
1941 	 * so best keep it disabled when not needed.
1942 	 */
1943 	if (old_crtc_state->double_wide)
1944 		val &= ~PIPECONF_DOUBLE_WIDE;
1945 
1946 	/* Don't disable pipe or pipe PLLs if needed */
1947 	if (!IS_I830(dev_priv))
1948 		val &= ~PIPECONF_ENABLE;
1949 
1950 	intel_de_write(dev_priv, reg, val);
1951 	if ((val & PIPECONF_ENABLE) == 0)
1952 		intel_wait_for_pipe_off(old_crtc_state);
1953 }
1954 
1955 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1956 {
1957 	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1958 }
1959 
1960 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1961 {
1962 	if (!is_ccs_modifier(fb->modifier))
1963 		return false;
1964 
1965 	return plane >= fb->format->num_planes / 2;
1966 }
1967 
1968 static bool is_gen12_ccs_modifier(u64 modifier)
1969 {
1970 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1971 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1972 
1973 }
1974 
1975 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1976 {
1977 	return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1978 }
1979 
1980 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1981 {
1982 	if (is_ccs_modifier(fb->modifier))
1983 		return is_ccs_plane(fb, plane);
1984 
1985 	return plane == 1;
1986 }
1987 
1988 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1989 {
1990 	drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1991 		    (main_plane && main_plane >= fb->format->num_planes / 2));
1992 
1993 	return fb->format->num_planes / 2 + main_plane;
1994 }
1995 
1996 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1997 {
1998 	drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1999 		    ccs_plane < fb->format->num_planes / 2);
2000 
2001 	return ccs_plane - fb->format->num_planes / 2;
2002 }
2003 
2004 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
2005 {
2006 	struct drm_i915_private *i915 = to_i915(fb->dev);
2007 
2008 	if (is_ccs_modifier(fb->modifier))
2009 		return main_to_ccs_plane(fb, main_plane);
2010 	else if (INTEL_GEN(i915) < 11 &&
2011 		 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
2012 		return 1;
2013 	else
2014 		return 0;
2015 }
2016 
2017 bool
2018 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
2019 				    uint64_t modifier)
2020 {
2021 	return info->is_yuv &&
2022 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
2023 }
2024 
2025 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
2026 				   int color_plane)
2027 {
2028 	return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
2029 	       color_plane == 1;
2030 }
2031 
2032 static unsigned int
2033 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
2034 {
2035 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2036 	unsigned int cpp = fb->format->cpp[color_plane];
2037 
2038 	switch (fb->modifier) {
2039 	case DRM_FORMAT_MOD_LINEAR:
2040 		return intel_tile_size(dev_priv);
2041 	case I915_FORMAT_MOD_X_TILED:
2042 		if (IS_GEN(dev_priv, 2))
2043 			return 128;
2044 		else
2045 			return 512;
2046 	case I915_FORMAT_MOD_Y_TILED_CCS:
2047 		if (is_ccs_plane(fb, color_plane))
2048 			return 128;
2049 		fallthrough;
2050 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2051 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2052 		if (is_ccs_plane(fb, color_plane))
2053 			return 64;
2054 		fallthrough;
2055 	case I915_FORMAT_MOD_Y_TILED:
2056 		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2057 			return 128;
2058 		else
2059 			return 512;
2060 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2061 		if (is_ccs_plane(fb, color_plane))
2062 			return 128;
2063 		fallthrough;
2064 	case I915_FORMAT_MOD_Yf_TILED:
2065 		switch (cpp) {
2066 		case 1:
2067 			return 64;
2068 		case 2:
2069 		case 4:
2070 			return 128;
2071 		case 8:
2072 		case 16:
2073 			return 256;
2074 		default:
2075 			MISSING_CASE(cpp);
2076 			return cpp;
2077 		}
2078 		break;
2079 	default:
2080 		MISSING_CASE(fb->modifier);
2081 		return cpp;
2082 	}
2083 }
2084 
2085 static unsigned int
2086 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2087 {
2088 	if (is_gen12_ccs_plane(fb, color_plane))
2089 		return 1;
2090 
2091 	return intel_tile_size(to_i915(fb->dev)) /
2092 		intel_tile_width_bytes(fb, color_plane);
2093 }
2094 
2095 /* Return the tile dimensions in pixel units */
2096 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2097 			    unsigned int *tile_width,
2098 			    unsigned int *tile_height)
2099 {
2100 	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2101 	unsigned int cpp = fb->format->cpp[color_plane];
2102 
2103 	*tile_width = tile_width_bytes / cpp;
2104 	*tile_height = intel_tile_height(fb, color_plane);
2105 }
2106 
2107 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2108 					int color_plane)
2109 {
2110 	unsigned int tile_width, tile_height;
2111 
2112 	intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2113 
2114 	return fb->pitches[color_plane] * tile_height;
2115 }
2116 
2117 unsigned int
2118 intel_fb_align_height(const struct drm_framebuffer *fb,
2119 		      int color_plane, unsigned int height)
2120 {
2121 	unsigned int tile_height = intel_tile_height(fb, color_plane);
2122 
2123 	return ALIGN(height, tile_height);
2124 }
2125 
2126 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2127 {
2128 	unsigned int size = 0;
2129 	int i;
2130 
2131 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2132 		size += rot_info->plane[i].width * rot_info->plane[i].height;
2133 
2134 	return size;
2135 }
2136 
2137 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2138 {
2139 	unsigned int size = 0;
2140 	int i;
2141 
2142 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2143 		size += rem_info->plane[i].width * rem_info->plane[i].height;
2144 
2145 	return size;
2146 }
2147 
2148 static void
2149 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2150 			const struct drm_framebuffer *fb,
2151 			unsigned int rotation)
2152 {
2153 	view->type = I915_GGTT_VIEW_NORMAL;
2154 	if (drm_rotation_90_or_270(rotation)) {
2155 		view->type = I915_GGTT_VIEW_ROTATED;
2156 		view->rotated = to_intel_framebuffer(fb)->rot_info;
2157 	}
2158 }
2159 
2160 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2161 {
2162 	if (IS_I830(dev_priv))
2163 		return 16 * 1024;
2164 	else if (IS_I85X(dev_priv))
2165 		return 256;
2166 	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2167 		return 32;
2168 	else
2169 		return 4 * 1024;
2170 }
2171 
2172 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2173 {
2174 	if (INTEL_GEN(dev_priv) >= 9)
2175 		return 256 * 1024;
2176 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2177 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2178 		return 128 * 1024;
2179 	else if (INTEL_GEN(dev_priv) >= 4)
2180 		return 4 * 1024;
2181 	else
2182 		return 0;
2183 }
2184 
2185 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2186 					 int color_plane)
2187 {
2188 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2189 
2190 	/* AUX_DIST needs only 4K alignment */
2191 	if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2192 	    is_ccs_plane(fb, color_plane))
2193 		return 4096;
2194 
2195 	switch (fb->modifier) {
2196 	case DRM_FORMAT_MOD_LINEAR:
2197 		return intel_linear_alignment(dev_priv);
2198 	case I915_FORMAT_MOD_X_TILED:
2199 		if (INTEL_GEN(dev_priv) >= 9)
2200 			return 256 * 1024;
2201 		return 0;
2202 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2203 		if (is_semiplanar_uv_plane(fb, color_plane))
2204 			return intel_tile_row_size(fb, color_plane);
2205 		fallthrough;
2206 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2207 		return 16 * 1024;
2208 	case I915_FORMAT_MOD_Y_TILED_CCS:
2209 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2210 	case I915_FORMAT_MOD_Y_TILED:
2211 		if (INTEL_GEN(dev_priv) >= 12 &&
2212 		    is_semiplanar_uv_plane(fb, color_plane))
2213 			return intel_tile_row_size(fb, color_plane);
2214 		fallthrough;
2215 	case I915_FORMAT_MOD_Yf_TILED:
2216 		return 1 * 1024 * 1024;
2217 	default:
2218 		MISSING_CASE(fb->modifier);
2219 		return 0;
2220 	}
2221 }
2222 
2223 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2224 {
2225 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2226 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2227 
2228 	return INTEL_GEN(dev_priv) < 4 ||
2229 		(plane->has_fbc &&
2230 		 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2231 }
2232 
2233 struct i915_vma *
2234 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2235 			   const struct i915_ggtt_view *view,
2236 			   bool uses_fence,
2237 			   unsigned long *out_flags)
2238 {
2239 	struct drm_device *dev = fb->dev;
2240 	struct drm_i915_private *dev_priv = to_i915(dev);
2241 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2242 	intel_wakeref_t wakeref;
2243 	struct i915_vma *vma;
2244 	unsigned int pinctl;
2245 	u32 alignment;
2246 
2247 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
2248 		return ERR_PTR(-EINVAL);
2249 
2250 	alignment = intel_surf_alignment(fb, 0);
2251 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
2252 		return ERR_PTR(-EINVAL);
2253 
2254 	/* Note that the w/a also requires 64 PTE of padding following the
2255 	 * bo. We currently fill all unused PTE with the shadow page and so
2256 	 * we should always have valid PTE following the scanout preventing
2257 	 * the VT-d warning.
2258 	 */
2259 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2260 		alignment = 256 * 1024;
2261 
2262 	/*
2263 	 * Global gtt pte registers are special registers which actually forward
2264 	 * writes to a chunk of system memory. Which means that there is no risk
2265 	 * that the register values disappear as soon as we call
2266 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2267 	 * pin/unpin/fence and not more.
2268 	 */
2269 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2270 
2271 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2272 
2273 	/*
2274 	 * Valleyview is definitely limited to scanning out the first
2275 	 * 512MiB. Lets presume this behaviour was inherited from the
2276 	 * g4x display engine and that all earlier gen are similarly
2277 	 * limited. Testing suggests that it is a little more
2278 	 * complicated than this. For example, Cherryview appears quite
2279 	 * happy to scanout from anywhere within its global aperture.
2280 	 */
2281 	pinctl = 0;
2282 	if (HAS_GMCH(dev_priv))
2283 		pinctl |= PIN_MAPPABLE;
2284 
2285 	vma = i915_gem_object_pin_to_display_plane(obj,
2286 						   alignment, view, pinctl);
2287 	if (IS_ERR(vma))
2288 		goto err;
2289 
2290 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2291 		int ret;
2292 
2293 		/*
2294 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
2295 		 * fence, whereas 965+ only requires a fence if using
2296 		 * framebuffer compression.  For simplicity, we always, when
2297 		 * possible, install a fence as the cost is not that onerous.
2298 		 *
2299 		 * If we fail to fence the tiled scanout, then either the
2300 		 * modeset will reject the change (which is highly unlikely as
2301 		 * the affected systems, all but one, do not have unmappable
2302 		 * space) or we will not be able to enable full powersaving
2303 		 * techniques (also likely not to apply due to various limits
2304 		 * FBC and the like impose on the size of the buffer, which
2305 		 * presumably we violated anyway with this unmappable buffer).
2306 		 * Anyway, it is presumably better to stumble onwards with
2307 		 * something and try to run the system in a "less than optimal"
2308 		 * mode that matches the user configuration.
2309 		 */
2310 		ret = i915_vma_pin_fence(vma);
2311 		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2312 			i915_gem_object_unpin_from_display_plane(vma);
2313 			vma = ERR_PTR(ret);
2314 			goto err;
2315 		}
2316 
2317 		if (ret == 0 && vma->fence)
2318 			*out_flags |= PLANE_HAS_FENCE;
2319 	}
2320 
2321 	i915_vma_get(vma);
2322 err:
2323 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2324 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2325 	return vma;
2326 }
2327 
2328 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2329 {
2330 	i915_gem_object_lock(vma->obj, NULL);
2331 	if (flags & PLANE_HAS_FENCE)
2332 		i915_vma_unpin_fence(vma);
2333 	i915_gem_object_unpin_from_display_plane(vma);
2334 	i915_gem_object_unlock(vma->obj);
2335 
2336 	i915_vma_put(vma);
2337 }
2338 
2339 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2340 			  unsigned int rotation)
2341 {
2342 	if (drm_rotation_90_or_270(rotation))
2343 		return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2344 	else
2345 		return fb->pitches[color_plane];
2346 }
2347 
2348 /*
2349  * Convert the x/y offsets into a linear offset.
2350  * Only valid with 0/180 degree rotation, which is fine since linear
2351  * offset is only used with linear buffers on pre-hsw and tiled buffers
2352  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2353  */
2354 u32 intel_fb_xy_to_linear(int x, int y,
2355 			  const struct intel_plane_state *state,
2356 			  int color_plane)
2357 {
2358 	const struct drm_framebuffer *fb = state->hw.fb;
2359 	unsigned int cpp = fb->format->cpp[color_plane];
2360 	unsigned int pitch = state->color_plane[color_plane].stride;
2361 
2362 	return y * pitch + x * cpp;
2363 }
2364 
2365 /*
2366  * Add the x/y offsets derived from fb->offsets[] to the user
2367  * specified plane src x/y offsets. The resulting x/y offsets
2368  * specify the start of scanout from the beginning of the gtt mapping.
2369  */
2370 void intel_add_fb_offsets(int *x, int *y,
2371 			  const struct intel_plane_state *state,
2372 			  int color_plane)
2373 
2374 {
2375 	*x += state->color_plane[color_plane].x;
2376 	*y += state->color_plane[color_plane].y;
2377 }
2378 
2379 static u32 intel_adjust_tile_offset(int *x, int *y,
2380 				    unsigned int tile_width,
2381 				    unsigned int tile_height,
2382 				    unsigned int tile_size,
2383 				    unsigned int pitch_tiles,
2384 				    u32 old_offset,
2385 				    u32 new_offset)
2386 {
2387 	unsigned int pitch_pixels = pitch_tiles * tile_width;
2388 	unsigned int tiles;
2389 
2390 	WARN_ON(old_offset & (tile_size - 1));
2391 	WARN_ON(new_offset & (tile_size - 1));
2392 	WARN_ON(new_offset > old_offset);
2393 
2394 	tiles = (old_offset - new_offset) / tile_size;
2395 
2396 	*y += tiles / pitch_tiles * tile_height;
2397 	*x += tiles % pitch_tiles * tile_width;
2398 
2399 	/* minimize x in case it got needlessly big */
2400 	*y += *x / pitch_pixels * tile_height;
2401 	*x %= pitch_pixels;
2402 
2403 	return new_offset;
2404 }
2405 
2406 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2407 {
2408 	return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2409 	       is_gen12_ccs_plane(fb, color_plane);
2410 }
2411 
2412 static u32 intel_adjust_aligned_offset(int *x, int *y,
2413 				       const struct drm_framebuffer *fb,
2414 				       int color_plane,
2415 				       unsigned int rotation,
2416 				       unsigned int pitch,
2417 				       u32 old_offset, u32 new_offset)
2418 {
2419 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2420 	unsigned int cpp = fb->format->cpp[color_plane];
2421 
2422 	drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
2423 
2424 	if (!is_surface_linear(fb, color_plane)) {
2425 		unsigned int tile_size, tile_width, tile_height;
2426 		unsigned int pitch_tiles;
2427 
2428 		tile_size = intel_tile_size(dev_priv);
2429 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2430 
2431 		if (drm_rotation_90_or_270(rotation)) {
2432 			pitch_tiles = pitch / tile_height;
2433 			swap(tile_width, tile_height);
2434 		} else {
2435 			pitch_tiles = pitch / (tile_width * cpp);
2436 		}
2437 
2438 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2439 					 tile_size, pitch_tiles,
2440 					 old_offset, new_offset);
2441 	} else {
2442 		old_offset += *y * pitch + *x * cpp;
2443 
2444 		*y = (old_offset - new_offset) / pitch;
2445 		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
2446 	}
2447 
2448 	return new_offset;
2449 }
2450 
2451 /*
2452  * Adjust the tile offset by moving the difference into
2453  * the x/y offsets.
2454  */
2455 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2456 					     const struct intel_plane_state *state,
2457 					     int color_plane,
2458 					     u32 old_offset, u32 new_offset)
2459 {
2460 	return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2461 					   state->hw.rotation,
2462 					   state->color_plane[color_plane].stride,
2463 					   old_offset, new_offset);
2464 }
2465 
2466 /*
2467  * Computes the aligned offset to the base tile and adjusts
2468  * x, y. bytes per pixel is assumed to be a power-of-two.
2469  *
2470  * In the 90/270 rotated case, x and y are assumed
2471  * to be already rotated to match the rotated GTT view, and
2472  * pitch is the tile_height aligned framebuffer height.
2473  *
2474  * This function is used when computing the derived information
2475  * under intel_framebuffer, so using any of that information
2476  * here is not allowed. Anything under drm_framebuffer can be
2477  * used. This is why the user has to pass in the pitch since it
2478  * is specified in the rotated orientation.
2479  */
2480 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2481 					int *x, int *y,
2482 					const struct drm_framebuffer *fb,
2483 					int color_plane,
2484 					unsigned int pitch,
2485 					unsigned int rotation,
2486 					u32 alignment)
2487 {
2488 	unsigned int cpp = fb->format->cpp[color_plane];
2489 	u32 offset, offset_aligned;
2490 
2491 	if (!is_surface_linear(fb, color_plane)) {
2492 		unsigned int tile_size, tile_width, tile_height;
2493 		unsigned int tile_rows, tiles, pitch_tiles;
2494 
2495 		tile_size = intel_tile_size(dev_priv);
2496 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2497 
2498 		if (drm_rotation_90_or_270(rotation)) {
2499 			pitch_tiles = pitch / tile_height;
2500 			swap(tile_width, tile_height);
2501 		} else {
2502 			pitch_tiles = pitch / (tile_width * cpp);
2503 		}
2504 
2505 		tile_rows = *y / tile_height;
2506 		*y %= tile_height;
2507 
2508 		tiles = *x / tile_width;
2509 		*x %= tile_width;
2510 
2511 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2512 
2513 		offset_aligned = offset;
2514 		if (alignment)
2515 			offset_aligned = rounddown(offset_aligned, alignment);
2516 
2517 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2518 					 tile_size, pitch_tiles,
2519 					 offset, offset_aligned);
2520 	} else {
2521 		offset = *y * pitch + *x * cpp;
2522 		offset_aligned = offset;
2523 		if (alignment) {
2524 			offset_aligned = rounddown(offset_aligned, alignment);
2525 			*y = (offset % alignment) / pitch;
2526 			*x = ((offset % alignment) - *y * pitch) / cpp;
2527 		} else {
2528 			*y = *x = 0;
2529 		}
2530 	}
2531 
2532 	return offset_aligned;
2533 }
2534 
2535 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2536 					      const struct intel_plane_state *state,
2537 					      int color_plane)
2538 {
2539 	struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2540 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2541 	const struct drm_framebuffer *fb = state->hw.fb;
2542 	unsigned int rotation = state->hw.rotation;
2543 	int pitch = state->color_plane[color_plane].stride;
2544 	u32 alignment;
2545 
2546 	if (intel_plane->id == PLANE_CURSOR)
2547 		alignment = intel_cursor_alignment(dev_priv);
2548 	else
2549 		alignment = intel_surf_alignment(fb, color_plane);
2550 
2551 	return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2552 					    pitch, rotation, alignment);
2553 }
2554 
2555 /* Convert the fb->offset[] into x/y offsets */
2556 static int intel_fb_offset_to_xy(int *x, int *y,
2557 				 const struct drm_framebuffer *fb,
2558 				 int color_plane)
2559 {
2560 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2561 	unsigned int height;
2562 	u32 alignment;
2563 
2564 	if (INTEL_GEN(dev_priv) >= 12 &&
2565 	    is_semiplanar_uv_plane(fb, color_plane))
2566 		alignment = intel_tile_row_size(fb, color_plane);
2567 	else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2568 		alignment = intel_tile_size(dev_priv);
2569 	else
2570 		alignment = 0;
2571 
2572 	if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2573 		drm_dbg_kms(&dev_priv->drm,
2574 			    "Misaligned offset 0x%08x for color plane %d\n",
2575 			    fb->offsets[color_plane], color_plane);
2576 		return -EINVAL;
2577 	}
2578 
2579 	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2580 	height = ALIGN(height, intel_tile_height(fb, color_plane));
2581 
2582 	/* Catch potential overflows early */
2583 	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2584 			    fb->offsets[color_plane])) {
2585 		drm_dbg_kms(&dev_priv->drm,
2586 			    "Bad offset 0x%08x or pitch %d for color plane %d\n",
2587 			    fb->offsets[color_plane], fb->pitches[color_plane],
2588 			    color_plane);
2589 		return -ERANGE;
2590 	}
2591 
2592 	*x = 0;
2593 	*y = 0;
2594 
2595 	intel_adjust_aligned_offset(x, y,
2596 				    fb, color_plane, DRM_MODE_ROTATE_0,
2597 				    fb->pitches[color_plane],
2598 				    fb->offsets[color_plane], 0);
2599 
2600 	return 0;
2601 }
2602 
2603 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2604 {
2605 	switch (fb_modifier) {
2606 	case I915_FORMAT_MOD_X_TILED:
2607 		return I915_TILING_X;
2608 	case I915_FORMAT_MOD_Y_TILED:
2609 	case I915_FORMAT_MOD_Y_TILED_CCS:
2610 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2611 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2612 		return I915_TILING_Y;
2613 	default:
2614 		return I915_TILING_NONE;
2615 	}
2616 }
2617 
2618 /*
2619  * From the Sky Lake PRM:
2620  * "The Color Control Surface (CCS) contains the compression status of
2621  *  the cache-line pairs. The compression state of the cache-line pair
2622  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2623  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2624  *  cache-line-pairs. CCS is always Y tiled."
2625  *
2626  * Since cache line pairs refers to horizontally adjacent cache lines,
2627  * each cache line in the CCS corresponds to an area of 32x16 cache
2628  * lines on the main surface. Since each pixel is 4 bytes, this gives
2629  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2630  * main surface.
2631  */
2632 static const struct drm_format_info skl_ccs_formats[] = {
2633 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2634 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2635 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2636 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2637 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2638 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2639 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2640 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2641 };
2642 
2643 /*
2644  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2645  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2646  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2647  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2648  * the main surface.
2649  */
2650 static const struct drm_format_info gen12_ccs_formats[] = {
2651 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2652 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2653 	  .hsub = 1, .vsub = 1, },
2654 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2655 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2656 	  .hsub = 1, .vsub = 1, },
2657 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2658 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2659 	  .hsub = 1, .vsub = 1, .has_alpha = true },
2660 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2661 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2662 	  .hsub = 1, .vsub = 1, .has_alpha = true },
2663 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
2664 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2665 	  .hsub = 2, .vsub = 1, .is_yuv = true },
2666 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
2667 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2668 	  .hsub = 2, .vsub = 1, .is_yuv = true },
2669 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
2670 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2671 	  .hsub = 2, .vsub = 1, .is_yuv = true },
2672 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
2673 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2674 	  .hsub = 2, .vsub = 1, .is_yuv = true },
2675 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
2676 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2677 	  .hsub = 2, .vsub = 2, .is_yuv = true },
2678 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
2679 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2680 	  .hsub = 2, .vsub = 2, .is_yuv = true },
2681 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
2682 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2683 	  .hsub = 2, .vsub = 2, .is_yuv = true },
2684 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
2685 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2686 	  .hsub = 2, .vsub = 2, .is_yuv = true },
2687 };
2688 
2689 static const struct drm_format_info *
2690 lookup_format_info(const struct drm_format_info formats[],
2691 		   int num_formats, u32 format)
2692 {
2693 	int i;
2694 
2695 	for (i = 0; i < num_formats; i++) {
2696 		if (formats[i].format == format)
2697 			return &formats[i];
2698 	}
2699 
2700 	return NULL;
2701 }
2702 
2703 static const struct drm_format_info *
2704 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2705 {
2706 	switch (cmd->modifier[0]) {
2707 	case I915_FORMAT_MOD_Y_TILED_CCS:
2708 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2709 		return lookup_format_info(skl_ccs_formats,
2710 					  ARRAY_SIZE(skl_ccs_formats),
2711 					  cmd->pixel_format);
2712 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2713 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2714 		return lookup_format_info(gen12_ccs_formats,
2715 					  ARRAY_SIZE(gen12_ccs_formats),
2716 					  cmd->pixel_format);
2717 	default:
2718 		return NULL;
2719 	}
2720 }
2721 
2722 bool is_ccs_modifier(u64 modifier)
2723 {
2724 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2725 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2726 	       modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2727 	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2728 }
2729 
2730 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2731 {
2732 	return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2733 			    512) * 64;
2734 }
2735 
2736 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2737 			      u32 pixel_format, u64 modifier)
2738 {
2739 	struct intel_crtc *crtc;
2740 	struct intel_plane *plane;
2741 
2742 	/*
2743 	 * We assume the primary plane for pipe A has
2744 	 * the highest stride limits of them all,
2745 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
2746 	 */
2747 	crtc = intel_get_first_crtc(dev_priv);
2748 	if (!crtc)
2749 		return 0;
2750 
2751 	plane = to_intel_plane(crtc->base.primary);
2752 
2753 	return plane->max_stride(plane, pixel_format, modifier,
2754 				 DRM_MODE_ROTATE_0);
2755 }
2756 
2757 static
2758 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2759 			u32 pixel_format, u64 modifier)
2760 {
2761 	/*
2762 	 * Arbitrary limit for gen4+ chosen to match the
2763 	 * render engine max stride.
2764 	 *
2765 	 * The new CCS hash mode makes remapping impossible
2766 	 */
2767 	if (!is_ccs_modifier(modifier)) {
2768 		if (INTEL_GEN(dev_priv) >= 7)
2769 			return 256*1024;
2770 		else if (INTEL_GEN(dev_priv) >= 4)
2771 			return 128*1024;
2772 	}
2773 
2774 	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2775 }
2776 
2777 static u32
2778 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2779 {
2780 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2781 	u32 tile_width;
2782 
2783 	if (is_surface_linear(fb, color_plane)) {
2784 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2785 							   fb->format->format,
2786 							   fb->modifier);
2787 
2788 		/*
2789 		 * To make remapping with linear generally feasible
2790 		 * we need the stride to be page aligned.
2791 		 */
2792 		if (fb->pitches[color_plane] > max_stride &&
2793 		    !is_ccs_modifier(fb->modifier))
2794 			return intel_tile_size(dev_priv);
2795 		else
2796 			return 64;
2797 	}
2798 
2799 	tile_width = intel_tile_width_bytes(fb, color_plane);
2800 	if (is_ccs_modifier(fb->modifier)) {
2801 		/*
2802 		 * Display WA #0531: skl,bxt,kbl,glk
2803 		 *
2804 		 * Render decompression and plane width > 3840
2805 		 * combined with horizontal panning requires the
2806 		 * plane stride to be a multiple of 4. We'll just
2807 		 * require the entire fb to accommodate that to avoid
2808 		 * potential runtime errors at plane configuration time.
2809 		 */
2810 		if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2811 			tile_width *= 4;
2812 		/*
2813 		 * The main surface pitch must be padded to a multiple of four
2814 		 * tile widths.
2815 		 */
2816 		else if (INTEL_GEN(dev_priv) >= 12)
2817 			tile_width *= 4;
2818 	}
2819 	return tile_width;
2820 }
2821 
2822 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2823 {
2824 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2825 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2826 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2827 	int i;
2828 
2829 	/* We don't want to deal with remapping with cursors */
2830 	if (plane->id == PLANE_CURSOR)
2831 		return false;
2832 
2833 	/*
2834 	 * The display engine limits already match/exceed the
2835 	 * render engine limits, so not much point in remapping.
2836 	 * Would also need to deal with the fence POT alignment
2837 	 * and gen2 2KiB GTT tile size.
2838 	 */
2839 	if (INTEL_GEN(dev_priv) < 4)
2840 		return false;
2841 
2842 	/*
2843 	 * The new CCS hash mode isn't compatible with remapping as
2844 	 * the virtual address of the pages affects the compressed data.
2845 	 */
2846 	if (is_ccs_modifier(fb->modifier))
2847 		return false;
2848 
2849 	/* Linear needs a page aligned stride for remapping */
2850 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2851 		unsigned int alignment = intel_tile_size(dev_priv) - 1;
2852 
2853 		for (i = 0; i < fb->format->num_planes; i++) {
2854 			if (fb->pitches[i] & alignment)
2855 				return false;
2856 		}
2857 	}
2858 
2859 	return true;
2860 }
2861 
2862 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2863 {
2864 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2865 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2866 	unsigned int rotation = plane_state->hw.rotation;
2867 	u32 stride, max_stride;
2868 
2869 	/*
2870 	 * No remapping for invisible planes since we don't have
2871 	 * an actual source viewport to remap.
2872 	 */
2873 	if (!plane_state->uapi.visible)
2874 		return false;
2875 
2876 	if (!intel_plane_can_remap(plane_state))
2877 		return false;
2878 
2879 	/*
2880 	 * FIXME: aux plane limits on gen9+ are
2881 	 * unclear in Bspec, for now no checking.
2882 	 */
2883 	stride = intel_fb_pitch(fb, 0, rotation);
2884 	max_stride = plane->max_stride(plane, fb->format->format,
2885 				       fb->modifier, rotation);
2886 
2887 	return stride > max_stride;
2888 }
2889 
2890 static void
2891 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2892 			       const struct drm_framebuffer *fb,
2893 			       int color_plane)
2894 {
2895 	int main_plane;
2896 
2897 	if (color_plane == 0) {
2898 		*hsub = 1;
2899 		*vsub = 1;
2900 
2901 		return;
2902 	}
2903 
2904 	/*
2905 	 * TODO: Deduct the subsampling from the char block for all CCS
2906 	 * formats and planes.
2907 	 */
2908 	if (!is_gen12_ccs_plane(fb, color_plane)) {
2909 		*hsub = fb->format->hsub;
2910 		*vsub = fb->format->vsub;
2911 
2912 		return;
2913 	}
2914 
2915 	main_plane = ccs_to_main_plane(fb, color_plane);
2916 	*hsub = drm_format_info_block_width(fb->format, color_plane) /
2917 		drm_format_info_block_width(fb->format, main_plane);
2918 
2919 	/*
2920 	 * The min stride check in the core framebuffer_check() function
2921 	 * assumes that format->hsub applies to every plane except for the
2922 	 * first plane. That's incorrect for the CCS AUX plane of the first
2923 	 * plane, but for the above check to pass we must define the block
2924 	 * width with that subsampling applied to it. Adjust the width here
2925 	 * accordingly, so we can calculate the actual subsampling factor.
2926 	 */
2927 	if (main_plane == 0)
2928 		*hsub *= fb->format->hsub;
2929 
2930 	*vsub = 32;
2931 }
2932 static int
2933 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2934 {
2935 	struct drm_i915_private *i915 = to_i915(fb->dev);
2936 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2937 	int main_plane;
2938 	int hsub, vsub;
2939 	int tile_width, tile_height;
2940 	int ccs_x, ccs_y;
2941 	int main_x, main_y;
2942 
2943 	if (!is_ccs_plane(fb, ccs_plane))
2944 		return 0;
2945 
2946 	intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2947 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2948 
2949 	tile_width *= hsub;
2950 	tile_height *= vsub;
2951 
2952 	ccs_x = (x * hsub) % tile_width;
2953 	ccs_y = (y * vsub) % tile_height;
2954 
2955 	main_plane = ccs_to_main_plane(fb, ccs_plane);
2956 	main_x = intel_fb->normal[main_plane].x % tile_width;
2957 	main_y = intel_fb->normal[main_plane].y % tile_height;
2958 
2959 	/*
2960 	 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2961 	 * x/y offsets must match between CCS and the main surface.
2962 	 */
2963 	if (main_x != ccs_x || main_y != ccs_y) {
2964 		drm_dbg_kms(&i915->drm,
2965 			      "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2966 			      main_x, main_y,
2967 			      ccs_x, ccs_y,
2968 			      intel_fb->normal[main_plane].x,
2969 			      intel_fb->normal[main_plane].y,
2970 			      x, y);
2971 		return -EINVAL;
2972 	}
2973 
2974 	return 0;
2975 }
2976 
2977 static void
2978 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2979 {
2980 	int main_plane = is_ccs_plane(fb, color_plane) ?
2981 			 ccs_to_main_plane(fb, color_plane) : 0;
2982 	int main_hsub, main_vsub;
2983 	int hsub, vsub;
2984 
2985 	intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2986 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2987 	*w = fb->width / main_hsub / hsub;
2988 	*h = fb->height / main_vsub / vsub;
2989 }
2990 
2991 /*
2992  * Setup the rotated view for an FB plane and return the size the GTT mapping
2993  * requires for this view.
2994  */
2995 static u32
2996 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2997 		  u32 gtt_offset_rotated, int x, int y,
2998 		  unsigned int width, unsigned int height,
2999 		  unsigned int tile_size,
3000 		  unsigned int tile_width, unsigned int tile_height,
3001 		  struct drm_framebuffer *fb)
3002 {
3003 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3004 	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
3005 	unsigned int pitch_tiles;
3006 	struct drm_rect r;
3007 
3008 	/* Y or Yf modifiers required for 90/270 rotation */
3009 	if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
3010 	    fb->modifier != I915_FORMAT_MOD_Yf_TILED)
3011 		return 0;
3012 
3013 	if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
3014 		return 0;
3015 
3016 	rot_info->plane[plane] = *plane_info;
3017 
3018 	intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
3019 
3020 	/* rotate the x/y offsets to match the GTT view */
3021 	drm_rect_init(&r, x, y, width, height);
3022 	drm_rect_rotate(&r,
3023 			plane_info->width * tile_width,
3024 			plane_info->height * tile_height,
3025 			DRM_MODE_ROTATE_270);
3026 	x = r.x1;
3027 	y = r.y1;
3028 
3029 	/* rotate the tile dimensions to match the GTT view */
3030 	pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
3031 	swap(tile_width, tile_height);
3032 
3033 	/*
3034 	 * We only keep the x/y offsets, so push all of the
3035 	 * gtt offset into the x/y offsets.
3036 	 */
3037 	intel_adjust_tile_offset(&x, &y,
3038 				 tile_width, tile_height,
3039 				 tile_size, pitch_tiles,
3040 				 gtt_offset_rotated * tile_size, 0);
3041 
3042 	/*
3043 	 * First pixel of the framebuffer from
3044 	 * the start of the rotated gtt mapping.
3045 	 */
3046 	intel_fb->rotated[plane].x = x;
3047 	intel_fb->rotated[plane].y = y;
3048 
3049 	return plane_info->width * plane_info->height;
3050 }
3051 
3052 static int
3053 intel_fill_fb_info(struct drm_i915_private *dev_priv,
3054 		   struct drm_framebuffer *fb)
3055 {
3056 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3057 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3058 	u32 gtt_offset_rotated = 0;
3059 	unsigned int max_size = 0;
3060 	int i, num_planes = fb->format->num_planes;
3061 	unsigned int tile_size = intel_tile_size(dev_priv);
3062 
3063 	for (i = 0; i < num_planes; i++) {
3064 		unsigned int width, height;
3065 		unsigned int cpp, size;
3066 		u32 offset;
3067 		int x, y;
3068 		int ret;
3069 
3070 		cpp = fb->format->cpp[i];
3071 		intel_fb_plane_dims(&width, &height, fb, i);
3072 
3073 		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3074 		if (ret) {
3075 			drm_dbg_kms(&dev_priv->drm,
3076 				    "bad fb plane %d offset: 0x%x\n",
3077 				    i, fb->offsets[i]);
3078 			return ret;
3079 		}
3080 
3081 		ret = intel_fb_check_ccs_xy(fb, i, x, y);
3082 		if (ret)
3083 			return ret;
3084 
3085 		/*
3086 		 * The fence (if used) is aligned to the start of the object
3087 		 * so having the framebuffer wrap around across the edge of the
3088 		 * fenced region doesn't really work. We have no API to configure
3089 		 * the fence start offset within the object (nor could we probably
3090 		 * on gen2/3). So it's just easier if we just require that the
3091 		 * fb layout agrees with the fence layout. We already check that the
3092 		 * fb stride matches the fence stride elsewhere.
3093 		 */
3094 		if (i == 0 && i915_gem_object_is_tiled(obj) &&
3095 		    (x + width) * cpp > fb->pitches[i]) {
3096 			drm_dbg_kms(&dev_priv->drm,
3097 				    "bad fb plane %d offset: 0x%x\n",
3098 				     i, fb->offsets[i]);
3099 			return -EINVAL;
3100 		}
3101 
3102 		/*
3103 		 * First pixel of the framebuffer from
3104 		 * the start of the normal gtt mapping.
3105 		 */
3106 		intel_fb->normal[i].x = x;
3107 		intel_fb->normal[i].y = y;
3108 
3109 		offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3110 						      fb->pitches[i],
3111 						      DRM_MODE_ROTATE_0,
3112 						      tile_size);
3113 		offset /= tile_size;
3114 
3115 		if (!is_surface_linear(fb, i)) {
3116 			struct intel_remapped_plane_info plane_info;
3117 			unsigned int tile_width, tile_height;
3118 
3119 			intel_tile_dims(fb, i, &tile_width, &tile_height);
3120 
3121 			plane_info.offset = offset;
3122 			plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3123 							 tile_width * cpp);
3124 			plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3125 			plane_info.height = DIV_ROUND_UP(y + height,
3126 							 tile_height);
3127 
3128 			/* how many tiles does this plane need */
3129 			size = plane_info.stride * plane_info.height;
3130 			/*
3131 			 * If the plane isn't horizontally tile aligned,
3132 			 * we need one more tile.
3133 			 */
3134 			if (x != 0)
3135 				size++;
3136 
3137 			gtt_offset_rotated +=
3138 				setup_fb_rotation(i, &plane_info,
3139 						  gtt_offset_rotated,
3140 						  x, y, width, height,
3141 						  tile_size,
3142 						  tile_width, tile_height,
3143 						  fb);
3144 		} else {
3145 			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3146 					    x * cpp, tile_size);
3147 		}
3148 
3149 		/* how many tiles in total needed in the bo */
3150 		max_size = max(max_size, offset + size);
3151 	}
3152 
3153 	if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3154 		drm_dbg_kms(&dev_priv->drm,
3155 			    "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3156 			    mul_u32_u32(max_size, tile_size), obj->base.size);
3157 		return -EINVAL;
3158 	}
3159 
3160 	return 0;
3161 }
3162 
3163 static void
3164 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3165 {
3166 	struct drm_i915_private *dev_priv =
3167 		to_i915(plane_state->uapi.plane->dev);
3168 	struct drm_framebuffer *fb = plane_state->hw.fb;
3169 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3170 	struct intel_rotation_info *info = &plane_state->view.rotated;
3171 	unsigned int rotation = plane_state->hw.rotation;
3172 	int i, num_planes = fb->format->num_planes;
3173 	unsigned int tile_size = intel_tile_size(dev_priv);
3174 	unsigned int src_x, src_y;
3175 	unsigned int src_w, src_h;
3176 	u32 gtt_offset = 0;
3177 
3178 	memset(&plane_state->view, 0, sizeof(plane_state->view));
3179 	plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3180 		I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3181 
3182 	src_x = plane_state->uapi.src.x1 >> 16;
3183 	src_y = plane_state->uapi.src.y1 >> 16;
3184 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3185 	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3186 
3187 	drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
3188 
3189 	/* Make src coordinates relative to the viewport */
3190 	drm_rect_translate(&plane_state->uapi.src,
3191 			   -(src_x << 16), -(src_y << 16));
3192 
3193 	/* Rotate src coordinates to match rotated GTT view */
3194 	if (drm_rotation_90_or_270(rotation))
3195 		drm_rect_rotate(&plane_state->uapi.src,
3196 				src_w << 16, src_h << 16,
3197 				DRM_MODE_ROTATE_270);
3198 
3199 	for (i = 0; i < num_planes; i++) {
3200 		unsigned int hsub = i ? fb->format->hsub : 1;
3201 		unsigned int vsub = i ? fb->format->vsub : 1;
3202 		unsigned int cpp = fb->format->cpp[i];
3203 		unsigned int tile_width, tile_height;
3204 		unsigned int width, height;
3205 		unsigned int pitch_tiles;
3206 		unsigned int x, y;
3207 		u32 offset;
3208 
3209 		intel_tile_dims(fb, i, &tile_width, &tile_height);
3210 
3211 		x = src_x / hsub;
3212 		y = src_y / vsub;
3213 		width = src_w / hsub;
3214 		height = src_h / vsub;
3215 
3216 		/*
3217 		 * First pixel of the src viewport from the
3218 		 * start of the normal gtt mapping.
3219 		 */
3220 		x += intel_fb->normal[i].x;
3221 		y += intel_fb->normal[i].y;
3222 
3223 		offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3224 						      fb, i, fb->pitches[i],
3225 						      DRM_MODE_ROTATE_0, tile_size);
3226 		offset /= tile_size;
3227 
3228 		drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
3229 		info->plane[i].offset = offset;
3230 		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3231 						     tile_width * cpp);
3232 		info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3233 		info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3234 
3235 		if (drm_rotation_90_or_270(rotation)) {
3236 			struct drm_rect r;
3237 
3238 			/* rotate the x/y offsets to match the GTT view */
3239 			drm_rect_init(&r, x, y, width, height);
3240 			drm_rect_rotate(&r,
3241 					info->plane[i].width * tile_width,
3242 					info->plane[i].height * tile_height,
3243 					DRM_MODE_ROTATE_270);
3244 			x = r.x1;
3245 			y = r.y1;
3246 
3247 			pitch_tiles = info->plane[i].height;
3248 			plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3249 
3250 			/* rotate the tile dimensions to match the GTT view */
3251 			swap(tile_width, tile_height);
3252 		} else {
3253 			pitch_tiles = info->plane[i].width;
3254 			plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3255 		}
3256 
3257 		/*
3258 		 * We only keep the x/y offsets, so push all of the
3259 		 * gtt offset into the x/y offsets.
3260 		 */
3261 		intel_adjust_tile_offset(&x, &y,
3262 					 tile_width, tile_height,
3263 					 tile_size, pitch_tiles,
3264 					 gtt_offset * tile_size, 0);
3265 
3266 		gtt_offset += info->plane[i].width * info->plane[i].height;
3267 
3268 		plane_state->color_plane[i].offset = 0;
3269 		plane_state->color_plane[i].x = x;
3270 		plane_state->color_plane[i].y = y;
3271 	}
3272 }
3273 
3274 static int
3275 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3276 {
3277 	const struct intel_framebuffer *fb =
3278 		to_intel_framebuffer(plane_state->hw.fb);
3279 	unsigned int rotation = plane_state->hw.rotation;
3280 	int i, num_planes;
3281 
3282 	if (!fb)
3283 		return 0;
3284 
3285 	num_planes = fb->base.format->num_planes;
3286 
3287 	if (intel_plane_needs_remap(plane_state)) {
3288 		intel_plane_remap_gtt(plane_state);
3289 
3290 		/*
3291 		 * Sometimes even remapping can't overcome
3292 		 * the stride limitations :( Can happen with
3293 		 * big plane sizes and suitably misaligned
3294 		 * offsets.
3295 		 */
3296 		return intel_plane_check_stride(plane_state);
3297 	}
3298 
3299 	intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3300 
3301 	for (i = 0; i < num_planes; i++) {
3302 		plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3303 		plane_state->color_plane[i].offset = 0;
3304 
3305 		if (drm_rotation_90_or_270(rotation)) {
3306 			plane_state->color_plane[i].x = fb->rotated[i].x;
3307 			plane_state->color_plane[i].y = fb->rotated[i].y;
3308 		} else {
3309 			plane_state->color_plane[i].x = fb->normal[i].x;
3310 			plane_state->color_plane[i].y = fb->normal[i].y;
3311 		}
3312 	}
3313 
3314 	/* Rotate src coordinates to match rotated GTT view */
3315 	if (drm_rotation_90_or_270(rotation))
3316 		drm_rect_rotate(&plane_state->uapi.src,
3317 				fb->base.width << 16, fb->base.height << 16,
3318 				DRM_MODE_ROTATE_270);
3319 
3320 	return intel_plane_check_stride(plane_state);
3321 }
3322 
3323 static int i9xx_format_to_fourcc(int format)
3324 {
3325 	switch (format) {
3326 	case DISPPLANE_8BPP:
3327 		return DRM_FORMAT_C8;
3328 	case DISPPLANE_BGRA555:
3329 		return DRM_FORMAT_ARGB1555;
3330 	case DISPPLANE_BGRX555:
3331 		return DRM_FORMAT_XRGB1555;
3332 	case DISPPLANE_BGRX565:
3333 		return DRM_FORMAT_RGB565;
3334 	default:
3335 	case DISPPLANE_BGRX888:
3336 		return DRM_FORMAT_XRGB8888;
3337 	case DISPPLANE_RGBX888:
3338 		return DRM_FORMAT_XBGR8888;
3339 	case DISPPLANE_BGRA888:
3340 		return DRM_FORMAT_ARGB8888;
3341 	case DISPPLANE_RGBA888:
3342 		return DRM_FORMAT_ABGR8888;
3343 	case DISPPLANE_BGRX101010:
3344 		return DRM_FORMAT_XRGB2101010;
3345 	case DISPPLANE_RGBX101010:
3346 		return DRM_FORMAT_XBGR2101010;
3347 	case DISPPLANE_BGRA101010:
3348 		return DRM_FORMAT_ARGB2101010;
3349 	case DISPPLANE_RGBA101010:
3350 		return DRM_FORMAT_ABGR2101010;
3351 	case DISPPLANE_RGBX161616:
3352 		return DRM_FORMAT_XBGR16161616F;
3353 	}
3354 }
3355 
3356 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3357 {
3358 	switch (format) {
3359 	case PLANE_CTL_FORMAT_RGB_565:
3360 		return DRM_FORMAT_RGB565;
3361 	case PLANE_CTL_FORMAT_NV12:
3362 		return DRM_FORMAT_NV12;
3363 	case PLANE_CTL_FORMAT_XYUV:
3364 		return DRM_FORMAT_XYUV8888;
3365 	case PLANE_CTL_FORMAT_P010:
3366 		return DRM_FORMAT_P010;
3367 	case PLANE_CTL_FORMAT_P012:
3368 		return DRM_FORMAT_P012;
3369 	case PLANE_CTL_FORMAT_P016:
3370 		return DRM_FORMAT_P016;
3371 	case PLANE_CTL_FORMAT_Y210:
3372 		return DRM_FORMAT_Y210;
3373 	case PLANE_CTL_FORMAT_Y212:
3374 		return DRM_FORMAT_Y212;
3375 	case PLANE_CTL_FORMAT_Y216:
3376 		return DRM_FORMAT_Y216;
3377 	case PLANE_CTL_FORMAT_Y410:
3378 		return DRM_FORMAT_XVYU2101010;
3379 	case PLANE_CTL_FORMAT_Y412:
3380 		return DRM_FORMAT_XVYU12_16161616;
3381 	case PLANE_CTL_FORMAT_Y416:
3382 		return DRM_FORMAT_XVYU16161616;
3383 	default:
3384 	case PLANE_CTL_FORMAT_XRGB_8888:
3385 		if (rgb_order) {
3386 			if (alpha)
3387 				return DRM_FORMAT_ABGR8888;
3388 			else
3389 				return DRM_FORMAT_XBGR8888;
3390 		} else {
3391 			if (alpha)
3392 				return DRM_FORMAT_ARGB8888;
3393 			else
3394 				return DRM_FORMAT_XRGB8888;
3395 		}
3396 	case PLANE_CTL_FORMAT_XRGB_2101010:
3397 		if (rgb_order) {
3398 			if (alpha)
3399 				return DRM_FORMAT_ABGR2101010;
3400 			else
3401 				return DRM_FORMAT_XBGR2101010;
3402 		} else {
3403 			if (alpha)
3404 				return DRM_FORMAT_ARGB2101010;
3405 			else
3406 				return DRM_FORMAT_XRGB2101010;
3407 		}
3408 	case PLANE_CTL_FORMAT_XRGB_16161616F:
3409 		if (rgb_order) {
3410 			if (alpha)
3411 				return DRM_FORMAT_ABGR16161616F;
3412 			else
3413 				return DRM_FORMAT_XBGR16161616F;
3414 		} else {
3415 			if (alpha)
3416 				return DRM_FORMAT_ARGB16161616F;
3417 			else
3418 				return DRM_FORMAT_XRGB16161616F;
3419 		}
3420 	}
3421 }
3422 
3423 static struct i915_vma *
3424 initial_plane_vma(struct drm_i915_private *i915,
3425 		  struct intel_initial_plane_config *plane_config)
3426 {
3427 	struct drm_i915_gem_object *obj;
3428 	struct i915_vma *vma;
3429 	u32 base, size;
3430 
3431 	if (plane_config->size == 0)
3432 		return NULL;
3433 
3434 	base = round_down(plane_config->base,
3435 			  I915_GTT_MIN_ALIGNMENT);
3436 	size = round_up(plane_config->base + plane_config->size,
3437 			I915_GTT_MIN_ALIGNMENT);
3438 	size -= base;
3439 
3440 	/*
3441 	 * If the FB is too big, just don't use it since fbdev is not very
3442 	 * important and we should probably use that space with FBC or other
3443 	 * features.
3444 	 */
3445 	if (size * 2 > i915->stolen_usable_size)
3446 		return NULL;
3447 
3448 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
3449 	if (IS_ERR(obj))
3450 		return NULL;
3451 
3452 	/*
3453 	 * Mark it WT ahead of time to avoid changing the
3454 	 * cache_level during fbdev initialization. The
3455 	 * unbind there would get stuck waiting for rcu.
3456 	 */
3457 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
3458 					    I915_CACHE_WT : I915_CACHE_NONE);
3459 
3460 	switch (plane_config->tiling) {
3461 	case I915_TILING_NONE:
3462 		break;
3463 	case I915_TILING_X:
3464 	case I915_TILING_Y:
3465 		obj->tiling_and_stride =
3466 			plane_config->fb->base.pitches[0] |
3467 			plane_config->tiling;
3468 		break;
3469 	default:
3470 		MISSING_CASE(plane_config->tiling);
3471 		goto err_obj;
3472 	}
3473 
3474 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
3475 	if (IS_ERR(vma))
3476 		goto err_obj;
3477 
3478 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
3479 		goto err_obj;
3480 
3481 	if (i915_gem_object_is_tiled(obj) &&
3482 	    !i915_vma_is_map_and_fenceable(vma))
3483 		goto err_obj;
3484 
3485 	return vma;
3486 
3487 err_obj:
3488 	i915_gem_object_put(obj);
3489 	return NULL;
3490 }
3491 
3492 static bool
3493 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3494 			      struct intel_initial_plane_config *plane_config)
3495 {
3496 	struct drm_device *dev = crtc->base.dev;
3497 	struct drm_i915_private *dev_priv = to_i915(dev);
3498 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3499 	struct drm_framebuffer *fb = &plane_config->fb->base;
3500 	struct i915_vma *vma;
3501 
3502 	switch (fb->modifier) {
3503 	case DRM_FORMAT_MOD_LINEAR:
3504 	case I915_FORMAT_MOD_X_TILED:
3505 	case I915_FORMAT_MOD_Y_TILED:
3506 		break;
3507 	default:
3508 		drm_dbg(&dev_priv->drm,
3509 			"Unsupported modifier for initial FB: 0x%llx\n",
3510 			fb->modifier);
3511 		return false;
3512 	}
3513 
3514 	vma = initial_plane_vma(dev_priv, plane_config);
3515 	if (!vma)
3516 		return false;
3517 
3518 	mode_cmd.pixel_format = fb->format->format;
3519 	mode_cmd.width = fb->width;
3520 	mode_cmd.height = fb->height;
3521 	mode_cmd.pitches[0] = fb->pitches[0];
3522 	mode_cmd.modifier[0] = fb->modifier;
3523 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3524 
3525 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
3526 				   vma->obj, &mode_cmd)) {
3527 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3528 		goto err_vma;
3529 	}
3530 
3531 	plane_config->vma = vma;
3532 	return true;
3533 
3534 err_vma:
3535 	i915_vma_put(vma);
3536 	return false;
3537 }
3538 
3539 static void
3540 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3541 			struct intel_plane_state *plane_state,
3542 			bool visible)
3543 {
3544 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3545 
3546 	plane_state->uapi.visible = visible;
3547 
3548 	if (visible)
3549 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3550 	else
3551 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3552 }
3553 
3554 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3555 {
3556 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3557 	struct drm_plane *plane;
3558 
3559 	/*
3560 	 * Active_planes aliases if multiple "primary" or cursor planes
3561 	 * have been used on the same (or wrong) pipe. plane_mask uses
3562 	 * unique ids, hence we can use that to reconstruct active_planes.
3563 	 */
3564 	crtc_state->active_planes = 0;
3565 
3566 	drm_for_each_plane_mask(plane, &dev_priv->drm,
3567 				crtc_state->uapi.plane_mask)
3568 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3569 }
3570 
3571 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3572 					 struct intel_plane *plane)
3573 {
3574 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3575 	struct intel_crtc_state *crtc_state =
3576 		to_intel_crtc_state(crtc->base.state);
3577 	struct intel_plane_state *plane_state =
3578 		to_intel_plane_state(plane->base.state);
3579 
3580 	drm_dbg_kms(&dev_priv->drm,
3581 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3582 		    plane->base.base.id, plane->base.name,
3583 		    crtc->base.base.id, crtc->base.name);
3584 
3585 	intel_set_plane_visible(crtc_state, plane_state, false);
3586 	fixup_active_planes(crtc_state);
3587 	crtc_state->data_rate[plane->id] = 0;
3588 	crtc_state->min_cdclk[plane->id] = 0;
3589 
3590 	if (plane->id == PLANE_PRIMARY)
3591 		hsw_disable_ips(crtc_state);
3592 
3593 	/*
3594 	 * Vblank time updates from the shadow to live plane control register
3595 	 * are blocked if the memory self-refresh mode is active at that
3596 	 * moment. So to make sure the plane gets truly disabled, disable
3597 	 * first the self-refresh mode. The self-refresh enable bit in turn
3598 	 * will be checked/applied by the HW only at the next frame start
3599 	 * event which is after the vblank start event, so we need to have a
3600 	 * wait-for-vblank between disabling the plane and the pipe.
3601 	 */
3602 	if (HAS_GMCH(dev_priv) &&
3603 	    intel_set_memory_cxsr(dev_priv, false))
3604 		intel_wait_for_vblank(dev_priv, crtc->pipe);
3605 
3606 	/*
3607 	 * Gen2 reports pipe underruns whenever all planes are disabled.
3608 	 * So disable underrun reporting before all the planes get disabled.
3609 	 */
3610 	if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3611 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3612 
3613 	intel_disable_plane(plane, crtc_state);
3614 }
3615 
3616 static struct intel_frontbuffer *
3617 to_intel_frontbuffer(struct drm_framebuffer *fb)
3618 {
3619 	return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3620 }
3621 
3622 static void
3623 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3624 			     struct intel_initial_plane_config *plane_config)
3625 {
3626 	struct drm_device *dev = intel_crtc->base.dev;
3627 	struct drm_i915_private *dev_priv = to_i915(dev);
3628 	struct drm_crtc *c;
3629 	struct drm_plane *primary = intel_crtc->base.primary;
3630 	struct drm_plane_state *plane_state = primary->state;
3631 	struct intel_plane *intel_plane = to_intel_plane(primary);
3632 	struct intel_plane_state *intel_state =
3633 		to_intel_plane_state(plane_state);
3634 	struct intel_crtc_state *crtc_state =
3635 		to_intel_crtc_state(intel_crtc->base.state);
3636 	struct drm_framebuffer *fb;
3637 	struct i915_vma *vma;
3638 
3639 	if (!plane_config->fb)
3640 		return;
3641 
3642 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3643 		fb = &plane_config->fb->base;
3644 		vma = plane_config->vma;
3645 		goto valid_fb;
3646 	}
3647 
3648 	/*
3649 	 * Failed to alloc the obj, check to see if we should share
3650 	 * an fb with another CRTC instead
3651 	 */
3652 	for_each_crtc(dev, c) {
3653 		struct intel_plane_state *state;
3654 
3655 		if (c == &intel_crtc->base)
3656 			continue;
3657 
3658 		if (!to_intel_crtc_state(c->state)->uapi.active)
3659 			continue;
3660 
3661 		state = to_intel_plane_state(c->primary->state);
3662 		if (!state->vma)
3663 			continue;
3664 
3665 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
3666 			fb = state->hw.fb;
3667 			vma = state->vma;
3668 			goto valid_fb;
3669 		}
3670 	}
3671 
3672 	/*
3673 	 * We've failed to reconstruct the BIOS FB.  Current display state
3674 	 * indicates that the primary plane is visible, but has a NULL FB,
3675 	 * which will lead to problems later if we don't fix it up.  The
3676 	 * simplest solution is to just disable the primary plane now and
3677 	 * pretend the BIOS never had it enabled.
3678 	 */
3679 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
3680 	if (crtc_state->bigjoiner) {
3681 		struct intel_crtc *slave =
3682 			crtc_state->bigjoiner_linked_crtc;
3683 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
3684 	}
3685 
3686 	return;
3687 
3688 valid_fb:
3689 	intel_state->hw.rotation = plane_config->rotation;
3690 	intel_fill_fb_ggtt_view(&intel_state->view, fb,
3691 				intel_state->hw.rotation);
3692 	intel_state->color_plane[0].stride =
3693 		intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3694 
3695 	__i915_vma_pin(vma);
3696 	intel_state->vma = i915_vma_get(vma);
3697 	if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
3698 		if (vma->fence)
3699 			intel_state->flags |= PLANE_HAS_FENCE;
3700 
3701 	plane_state->src_x = 0;
3702 	plane_state->src_y = 0;
3703 	plane_state->src_w = fb->width << 16;
3704 	plane_state->src_h = fb->height << 16;
3705 
3706 	plane_state->crtc_x = 0;
3707 	plane_state->crtc_y = 0;
3708 	plane_state->crtc_w = fb->width;
3709 	plane_state->crtc_h = fb->height;
3710 
3711 	intel_state->uapi.src = drm_plane_state_src(plane_state);
3712 	intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3713 
3714 	if (plane_config->tiling)
3715 		dev_priv->preserve_bios_swizzle = true;
3716 
3717 	plane_state->fb = fb;
3718 	drm_framebuffer_get(fb);
3719 
3720 	plane_state->crtc = &intel_crtc->base;
3721 	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
3722 					  intel_crtc);
3723 
3724 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3725 
3726 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3727 		  &to_intel_frontbuffer(fb)->bits);
3728 }
3729 
3730 
3731 static bool
3732 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3733 			       int main_x, int main_y, u32 main_offset,
3734 			       int ccs_plane)
3735 {
3736 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3737 	int aux_x = plane_state->color_plane[ccs_plane].x;
3738 	int aux_y = plane_state->color_plane[ccs_plane].y;
3739 	u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3740 	u32 alignment = intel_surf_alignment(fb, ccs_plane);
3741 	int hsub;
3742 	int vsub;
3743 
3744 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3745 	while (aux_offset >= main_offset && aux_y <= main_y) {
3746 		int x, y;
3747 
3748 		if (aux_x == main_x && aux_y == main_y)
3749 			break;
3750 
3751 		if (aux_offset == 0)
3752 			break;
3753 
3754 		x = aux_x / hsub;
3755 		y = aux_y / vsub;
3756 		aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3757 							       plane_state,
3758 							       ccs_plane,
3759 							       aux_offset,
3760 							       aux_offset -
3761 								alignment);
3762 		aux_x = x * hsub + aux_x % hsub;
3763 		aux_y = y * vsub + aux_y % vsub;
3764 	}
3765 
3766 	if (aux_x != main_x || aux_y != main_y)
3767 		return false;
3768 
3769 	plane_state->color_plane[ccs_plane].offset = aux_offset;
3770 	plane_state->color_plane[ccs_plane].x = aux_x;
3771 	plane_state->color_plane[ccs_plane].y = aux_y;
3772 
3773 	return true;
3774 }
3775 
3776 unsigned int
3777 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
3778 {
3779 	int x = 0, y = 0;
3780 
3781 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3782 					  plane_state->color_plane[0].offset, 0);
3783 
3784 	return y;
3785 }
3786 
3787 static int intel_plane_min_width(struct intel_plane *plane,
3788 				 const struct drm_framebuffer *fb,
3789 				 int color_plane,
3790 				 unsigned int rotation)
3791 {
3792 	if (plane->min_width)
3793 		return plane->min_width(fb, color_plane, rotation);
3794 	else
3795 		return 1;
3796 }
3797 
3798 static int intel_plane_max_width(struct intel_plane *plane,
3799 				 const struct drm_framebuffer *fb,
3800 				 int color_plane,
3801 				 unsigned int rotation)
3802 {
3803 	if (plane->max_width)
3804 		return plane->max_width(fb, color_plane, rotation);
3805 	else
3806 		return INT_MAX;
3807 }
3808 
3809 static int intel_plane_max_height(struct intel_plane *plane,
3810 				  const struct drm_framebuffer *fb,
3811 				  int color_plane,
3812 				  unsigned int rotation)
3813 {
3814 	if (plane->max_height)
3815 		return plane->max_height(fb, color_plane, rotation);
3816 	else
3817 		return INT_MAX;
3818 }
3819 
3820 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3821 {
3822 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3823 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3824 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3825 	unsigned int rotation = plane_state->hw.rotation;
3826 	int x = plane_state->uapi.src.x1 >> 16;
3827 	int y = plane_state->uapi.src.y1 >> 16;
3828 	int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3829 	int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3830 	int min_width = intel_plane_min_width(plane, fb, 0, rotation);
3831 	int max_width = intel_plane_max_width(plane, fb, 0, rotation);
3832 	int max_height = intel_plane_max_height(plane, fb, 0, rotation);
3833 	int aux_plane = intel_main_to_aux_plane(fb, 0);
3834 	u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3835 	u32 alignment, offset;
3836 
3837 	if (w > max_width || w < min_width || h > max_height) {
3838 		drm_dbg_kms(&dev_priv->drm,
3839 			    "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
3840 			    w, h, min_width, max_width, max_height);
3841 		return -EINVAL;
3842 	}
3843 
3844 	intel_add_fb_offsets(&x, &y, plane_state, 0);
3845 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3846 	alignment = intel_surf_alignment(fb, 0);
3847 	if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
3848 		return -EINVAL;
3849 
3850 	/*
3851 	 * AUX surface offset is specified as the distance from the
3852 	 * main surface offset, and it must be non-negative. Make
3853 	 * sure that is what we will get.
3854 	 */
3855 	if (aux_plane && offset > aux_offset)
3856 		offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3857 							   offset, aux_offset & ~(alignment - 1));
3858 
3859 	/*
3860 	 * When using an X-tiled surface, the plane blows up
3861 	 * if the x offset + width exceed the stride.
3862 	 *
3863 	 * TODO: linear and Y-tiled seem fine, Yf untested,
3864 	 */
3865 	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3866 		int cpp = fb->format->cpp[0];
3867 
3868 		while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3869 			if (offset == 0) {
3870 				drm_dbg_kms(&dev_priv->drm,
3871 					    "Unable to find suitable display surface offset due to X-tiling\n");
3872 				return -EINVAL;
3873 			}
3874 
3875 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3876 								   offset, offset - alignment);
3877 		}
3878 	}
3879 
3880 	/*
3881 	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3882 	 * they match with the main surface x/y offsets.
3883 	 */
3884 	if (is_ccs_modifier(fb->modifier)) {
3885 		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3886 						       offset, aux_plane)) {
3887 			if (offset == 0)
3888 				break;
3889 
3890 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3891 								   offset, offset - alignment);
3892 		}
3893 
3894 		if (x != plane_state->color_plane[aux_plane].x ||
3895 		    y != plane_state->color_plane[aux_plane].y) {
3896 			drm_dbg_kms(&dev_priv->drm,
3897 				    "Unable to find suitable display surface offset due to CCS\n");
3898 			return -EINVAL;
3899 		}
3900 	}
3901 
3902 	plane_state->color_plane[0].offset = offset;
3903 	plane_state->color_plane[0].x = x;
3904 	plane_state->color_plane[0].y = y;
3905 
3906 	/*
3907 	 * Put the final coordinates back so that the src
3908 	 * coordinate checks will see the right values.
3909 	 */
3910 	drm_rect_translate_to(&plane_state->uapi.src,
3911 			      x << 16, y << 16);
3912 
3913 	return 0;
3914 }
3915 
3916 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3917 {
3918 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3919 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
3920 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3921 	unsigned int rotation = plane_state->hw.rotation;
3922 	int uv_plane = 1;
3923 	int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
3924 	int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
3925 	int x = plane_state->uapi.src.x1 >> 17;
3926 	int y = plane_state->uapi.src.y1 >> 17;
3927 	int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3928 	int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3929 	u32 offset;
3930 
3931 	/* FIXME not quite sure how/if these apply to the chroma plane */
3932 	if (w > max_width || h > max_height) {
3933 		drm_dbg_kms(&i915->drm,
3934 			    "CbCr source size %dx%d too big (limit %dx%d)\n",
3935 			    w, h, max_width, max_height);
3936 		return -EINVAL;
3937 	}
3938 
3939 	intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
3940 	offset = intel_plane_compute_aligned_offset(&x, &y,
3941 						    plane_state, uv_plane);
3942 
3943 	if (is_ccs_modifier(fb->modifier)) {
3944 		int ccs_plane = main_to_ccs_plane(fb, uv_plane);
3945 		u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3946 		u32 alignment = intel_surf_alignment(fb, uv_plane);
3947 
3948 		if (offset > aux_offset)
3949 			offset = intel_plane_adjust_aligned_offset(&x, &y,
3950 								   plane_state,
3951 								   uv_plane,
3952 								   offset,
3953 								   aux_offset & ~(alignment - 1));
3954 
3955 		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3956 						       offset, ccs_plane)) {
3957 			if (offset == 0)
3958 				break;
3959 
3960 			offset = intel_plane_adjust_aligned_offset(&x, &y,
3961 								   plane_state,
3962 								   uv_plane,
3963 								   offset, offset - alignment);
3964 		}
3965 
3966 		if (x != plane_state->color_plane[ccs_plane].x ||
3967 		    y != plane_state->color_plane[ccs_plane].y) {
3968 			drm_dbg_kms(&i915->drm,
3969 				    "Unable to find suitable display surface offset due to CCS\n");
3970 			return -EINVAL;
3971 		}
3972 	}
3973 
3974 	plane_state->color_plane[uv_plane].offset = offset;
3975 	plane_state->color_plane[uv_plane].x = x;
3976 	plane_state->color_plane[uv_plane].y = y;
3977 
3978 	return 0;
3979 }
3980 
3981 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3982 {
3983 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3984 	int src_x = plane_state->uapi.src.x1 >> 16;
3985 	int src_y = plane_state->uapi.src.y1 >> 16;
3986 	u32 offset;
3987 	int ccs_plane;
3988 
3989 	for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
3990 		int main_hsub, main_vsub;
3991 		int hsub, vsub;
3992 		int x, y;
3993 
3994 		if (!is_ccs_plane(fb, ccs_plane))
3995 			continue;
3996 
3997 		intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
3998 					       ccs_to_main_plane(fb, ccs_plane));
3999 		intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
4000 
4001 		hsub *= main_hsub;
4002 		vsub *= main_vsub;
4003 		x = src_x / hsub;
4004 		y = src_y / vsub;
4005 
4006 		intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
4007 
4008 		offset = intel_plane_compute_aligned_offset(&x, &y,
4009 							    plane_state,
4010 							    ccs_plane);
4011 
4012 		plane_state->color_plane[ccs_plane].offset = offset;
4013 		plane_state->color_plane[ccs_plane].x = (x * hsub +
4014 							 src_x % hsub) /
4015 							main_hsub;
4016 		plane_state->color_plane[ccs_plane].y = (y * vsub +
4017 							 src_y % vsub) /
4018 							main_vsub;
4019 	}
4020 
4021 	return 0;
4022 }
4023 
4024 int skl_check_plane_surface(struct intel_plane_state *plane_state)
4025 {
4026 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4027 	int ret, i;
4028 
4029 	ret = intel_plane_compute_gtt(plane_state);
4030 	if (ret)
4031 		return ret;
4032 
4033 	if (!plane_state->uapi.visible)
4034 		return 0;
4035 
4036 	/*
4037 	 * Handle the AUX surface first since the main surface setup depends on
4038 	 * it.
4039 	 */
4040 	if (is_ccs_modifier(fb->modifier)) {
4041 		ret = skl_check_ccs_aux_surface(plane_state);
4042 		if (ret)
4043 			return ret;
4044 	}
4045 
4046 	if (intel_format_info_is_yuv_semiplanar(fb->format,
4047 						fb->modifier)) {
4048 		ret = skl_check_nv12_aux_surface(plane_state);
4049 		if (ret)
4050 			return ret;
4051 	}
4052 
4053 	for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
4054 		plane_state->color_plane[i].offset = 0;
4055 		plane_state->color_plane[i].x = 0;
4056 		plane_state->color_plane[i].y = 0;
4057 	}
4058 
4059 	ret = skl_check_main_surface(plane_state);
4060 	if (ret)
4061 		return ret;
4062 
4063 	return 0;
4064 }
4065 
4066 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
4067 			     const struct intel_plane_state *plane_state,
4068 			     unsigned int *num, unsigned int *den)
4069 {
4070 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4071 	unsigned int cpp = fb->format->cpp[0];
4072 
4073 	/*
4074 	 * g4x bspec says 64bpp pixel rate can't exceed 80%
4075 	 * of cdclk when the sprite plane is enabled on the
4076 	 * same pipe. ilk/snb bspec says 64bpp pixel rate is
4077 	 * never allowed to exceed 80% of cdclk. Let's just go
4078 	 * with the ilk/snb limit always.
4079 	 */
4080 	if (cpp == 8) {
4081 		*num = 10;
4082 		*den = 8;
4083 	} else {
4084 		*num = 1;
4085 		*den = 1;
4086 	}
4087 }
4088 
4089 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
4090 				const struct intel_plane_state *plane_state)
4091 {
4092 	unsigned int pixel_rate;
4093 	unsigned int num, den;
4094 
4095 	/*
4096 	 * Note that crtc_state->pixel_rate accounts for both
4097 	 * horizontal and vertical panel fitter downscaling factors.
4098 	 * Pre-HSW bspec tells us to only consider the horizontal
4099 	 * downscaling factor here. We ignore that and just consider
4100 	 * both for simplicity.
4101 	 */
4102 	pixel_rate = crtc_state->pixel_rate;
4103 
4104 	i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
4105 
4106 	/* two pixels per clock with double wide pipe */
4107 	if (crtc_state->double_wide)
4108 		den *= 2;
4109 
4110 	return DIV_ROUND_UP(pixel_rate * num, den);
4111 }
4112 
4113 unsigned int
4114 i9xx_plane_max_stride(struct intel_plane *plane,
4115 		      u32 pixel_format, u64 modifier,
4116 		      unsigned int rotation)
4117 {
4118 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4119 
4120 	if (!HAS_GMCH(dev_priv)) {
4121 		return 32*1024;
4122 	} else if (INTEL_GEN(dev_priv) >= 4) {
4123 		if (modifier == I915_FORMAT_MOD_X_TILED)
4124 			return 16*1024;
4125 		else
4126 			return 32*1024;
4127 	} else if (INTEL_GEN(dev_priv) >= 3) {
4128 		if (modifier == I915_FORMAT_MOD_X_TILED)
4129 			return 8*1024;
4130 		else
4131 			return 16*1024;
4132 	} else {
4133 		if (plane->i9xx_plane == PLANE_C)
4134 			return 4*1024;
4135 		else
4136 			return 8*1024;
4137 	}
4138 }
4139 
4140 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4141 {
4142 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4143 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4144 	u32 dspcntr = 0;
4145 
4146 	if (crtc_state->gamma_enable)
4147 		dspcntr |= DISPPLANE_GAMMA_ENABLE;
4148 
4149 	if (crtc_state->csc_enable)
4150 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
4151 
4152 	if (INTEL_GEN(dev_priv) < 5)
4153 		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
4154 
4155 	return dspcntr;
4156 }
4157 
4158 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
4159 			  const struct intel_plane_state *plane_state)
4160 {
4161 	struct drm_i915_private *dev_priv =
4162 		to_i915(plane_state->uapi.plane->dev);
4163 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4164 	unsigned int rotation = plane_state->hw.rotation;
4165 	u32 dspcntr;
4166 
4167 	dspcntr = DISPLAY_PLANE_ENABLE;
4168 
4169 	if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
4170 	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
4171 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
4172 
4173 	switch (fb->format->format) {
4174 	case DRM_FORMAT_C8:
4175 		dspcntr |= DISPPLANE_8BPP;
4176 		break;
4177 	case DRM_FORMAT_XRGB1555:
4178 		dspcntr |= DISPPLANE_BGRX555;
4179 		break;
4180 	case DRM_FORMAT_ARGB1555:
4181 		dspcntr |= DISPPLANE_BGRA555;
4182 		break;
4183 	case DRM_FORMAT_RGB565:
4184 		dspcntr |= DISPPLANE_BGRX565;
4185 		break;
4186 	case DRM_FORMAT_XRGB8888:
4187 		dspcntr |= DISPPLANE_BGRX888;
4188 		break;
4189 	case DRM_FORMAT_XBGR8888:
4190 		dspcntr |= DISPPLANE_RGBX888;
4191 		break;
4192 	case DRM_FORMAT_ARGB8888:
4193 		dspcntr |= DISPPLANE_BGRA888;
4194 		break;
4195 	case DRM_FORMAT_ABGR8888:
4196 		dspcntr |= DISPPLANE_RGBA888;
4197 		break;
4198 	case DRM_FORMAT_XRGB2101010:
4199 		dspcntr |= DISPPLANE_BGRX101010;
4200 		break;
4201 	case DRM_FORMAT_XBGR2101010:
4202 		dspcntr |= DISPPLANE_RGBX101010;
4203 		break;
4204 	case DRM_FORMAT_ARGB2101010:
4205 		dspcntr |= DISPPLANE_BGRA101010;
4206 		break;
4207 	case DRM_FORMAT_ABGR2101010:
4208 		dspcntr |= DISPPLANE_RGBA101010;
4209 		break;
4210 	case DRM_FORMAT_XBGR16161616F:
4211 		dspcntr |= DISPPLANE_RGBX161616;
4212 		break;
4213 	default:
4214 		MISSING_CASE(fb->format->format);
4215 		return 0;
4216 	}
4217 
4218 	if (INTEL_GEN(dev_priv) >= 4 &&
4219 	    fb->modifier == I915_FORMAT_MOD_X_TILED)
4220 		dspcntr |= DISPPLANE_TILED;
4221 
4222 	if (rotation & DRM_MODE_ROTATE_180)
4223 		dspcntr |= DISPPLANE_ROTATE_180;
4224 
4225 	if (rotation & DRM_MODE_REFLECT_X)
4226 		dspcntr |= DISPPLANE_MIRROR;
4227 
4228 	return dspcntr;
4229 }
4230 
4231 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4232 {
4233 	struct drm_i915_private *dev_priv =
4234 		to_i915(plane_state->uapi.plane->dev);
4235 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4236 	int src_x, src_y, src_w;
4237 	u32 offset;
4238 	int ret;
4239 
4240 	ret = intel_plane_compute_gtt(plane_state);
4241 	if (ret)
4242 		return ret;
4243 
4244 	if (!plane_state->uapi.visible)
4245 		return 0;
4246 
4247 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4248 	src_x = plane_state->uapi.src.x1 >> 16;
4249 	src_y = plane_state->uapi.src.y1 >> 16;
4250 
4251 	/* Undocumented hardware limit on i965/g4x/vlv/chv */
4252 	if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4253 		return -EINVAL;
4254 
4255 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4256 
4257 	if (INTEL_GEN(dev_priv) >= 4)
4258 		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4259 							    plane_state, 0);
4260 	else
4261 		offset = 0;
4262 
4263 	/*
4264 	 * Put the final coordinates back so that the src
4265 	 * coordinate checks will see the right values.
4266 	 */
4267 	drm_rect_translate_to(&plane_state->uapi.src,
4268 			      src_x << 16, src_y << 16);
4269 
4270 	/* HSW/BDW do this automagically in hardware */
4271 	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4272 		unsigned int rotation = plane_state->hw.rotation;
4273 		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4274 		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4275 
4276 		if (rotation & DRM_MODE_ROTATE_180) {
4277 			src_x += src_w - 1;
4278 			src_y += src_h - 1;
4279 		} else if (rotation & DRM_MODE_REFLECT_X) {
4280 			src_x += src_w - 1;
4281 		}
4282 	}
4283 
4284 	plane_state->color_plane[0].offset = offset;
4285 	plane_state->color_plane[0].x = src_x;
4286 	plane_state->color_plane[0].y = src_y;
4287 
4288 	return 0;
4289 }
4290 
4291 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4292 {
4293 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4294 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4295 
4296 	if (IS_CHERRYVIEW(dev_priv))
4297 		return i9xx_plane == PLANE_B;
4298 	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4299 		return false;
4300 	else if (IS_GEN(dev_priv, 4))
4301 		return i9xx_plane == PLANE_C;
4302 	else
4303 		return i9xx_plane == PLANE_B ||
4304 			i9xx_plane == PLANE_C;
4305 }
4306 
4307 static int
4308 i9xx_plane_check(struct intel_crtc_state *crtc_state,
4309 		 struct intel_plane_state *plane_state)
4310 {
4311 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4312 	int ret;
4313 
4314 	ret = chv_plane_check_rotation(plane_state);
4315 	if (ret)
4316 		return ret;
4317 
4318 	ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
4319 						DRM_PLANE_HELPER_NO_SCALING,
4320 						DRM_PLANE_HELPER_NO_SCALING,
4321 						i9xx_plane_has_windowing(plane));
4322 	if (ret)
4323 		return ret;
4324 
4325 	ret = i9xx_check_plane_surface(plane_state);
4326 	if (ret)
4327 		return ret;
4328 
4329 	if (!plane_state->uapi.visible)
4330 		return 0;
4331 
4332 	ret = intel_plane_check_src_coordinates(plane_state);
4333 	if (ret)
4334 		return ret;
4335 
4336 	plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4337 
4338 	return 0;
4339 }
4340 
4341 static void i9xx_update_plane(struct intel_plane *plane,
4342 			      const struct intel_crtc_state *crtc_state,
4343 			      const struct intel_plane_state *plane_state)
4344 {
4345 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4346 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4347 	u32 linear_offset;
4348 	int x = plane_state->color_plane[0].x;
4349 	int y = plane_state->color_plane[0].y;
4350 	int crtc_x = plane_state->uapi.dst.x1;
4351 	int crtc_y = plane_state->uapi.dst.y1;
4352 	int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4353 	int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4354 	unsigned long irqflags;
4355 	u32 dspaddr_offset;
4356 	u32 dspcntr;
4357 
4358 	dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4359 
4360 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4361 
4362 	if (INTEL_GEN(dev_priv) >= 4)
4363 		dspaddr_offset = plane_state->color_plane[0].offset;
4364 	else
4365 		dspaddr_offset = linear_offset;
4366 
4367 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4368 
4369 	intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
4370 			  plane_state->color_plane[0].stride);
4371 
4372 	if (INTEL_GEN(dev_priv) < 4) {
4373 		/*
4374 		 * PLANE_A doesn't actually have a full window
4375 		 * generator but let's assume we still need to
4376 		 * program whatever is there.
4377 		 */
4378 		intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
4379 				  (crtc_y << 16) | crtc_x);
4380 		intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
4381 				  ((crtc_h - 1) << 16) | (crtc_w - 1));
4382 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4383 		intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
4384 				  (crtc_y << 16) | crtc_x);
4385 		intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
4386 				  ((crtc_h - 1) << 16) | (crtc_w - 1));
4387 		intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
4388 	}
4389 
4390 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4391 		intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
4392 				  (y << 16) | x);
4393 	} else if (INTEL_GEN(dev_priv) >= 4) {
4394 		intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
4395 				  linear_offset);
4396 		intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
4397 				  (y << 16) | x);
4398 	}
4399 
4400 	/*
4401 	 * The control register self-arms if the plane was previously
4402 	 * disabled. Try to make the plane enable atomic by writing
4403 	 * the control register just before the surface register.
4404 	 */
4405 	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4406 	if (INTEL_GEN(dev_priv) >= 4)
4407 		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
4408 				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4409 	else
4410 		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
4411 				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4412 
4413 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4414 }
4415 
4416 static void i9xx_disable_plane(struct intel_plane *plane,
4417 			       const struct intel_crtc_state *crtc_state)
4418 {
4419 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4420 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4421 	unsigned long irqflags;
4422 	u32 dspcntr;
4423 
4424 	/*
4425 	 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4426 	 * enable on ilk+ affect the pipe bottom color as
4427 	 * well, so we must configure them even if the plane
4428 	 * is disabled.
4429 	 *
4430 	 * On pre-g4x there is no way to gamma correct the
4431 	 * pipe bottom color but we'll keep on doing this
4432 	 * anyway so that the crtc state readout works correctly.
4433 	 */
4434 	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4435 
4436 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4437 
4438 	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4439 	if (INTEL_GEN(dev_priv) >= 4)
4440 		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
4441 	else
4442 		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
4443 
4444 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4445 }
4446 
4447 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4448 				    enum pipe *pipe)
4449 {
4450 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4451 	enum intel_display_power_domain power_domain;
4452 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4453 	intel_wakeref_t wakeref;
4454 	bool ret;
4455 	u32 val;
4456 
4457 	/*
4458 	 * Not 100% correct for planes that can move between pipes,
4459 	 * but that's only the case for gen2-4 which don't have any
4460 	 * display power wells.
4461 	 */
4462 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4463 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4464 	if (!wakeref)
4465 		return false;
4466 
4467 	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4468 
4469 	ret = val & DISPLAY_PLANE_ENABLE;
4470 
4471 	if (INTEL_GEN(dev_priv) >= 5)
4472 		*pipe = plane->pipe;
4473 	else
4474 		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4475 			DISPPLANE_SEL_PIPE_SHIFT;
4476 
4477 	intel_display_power_put(dev_priv, power_domain, wakeref);
4478 
4479 	return ret;
4480 }
4481 
4482 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4483 {
4484 	struct drm_device *dev = intel_crtc->base.dev;
4485 	struct drm_i915_private *dev_priv = to_i915(dev);
4486 	unsigned long irqflags;
4487 
4488 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4489 
4490 	intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4491 	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4492 	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4493 
4494 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4495 }
4496 
4497 /*
4498  * This function detaches (aka. unbinds) unused scalers in hardware
4499  */
4500 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4501 {
4502 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4503 	const struct intel_crtc_scaler_state *scaler_state =
4504 		&crtc_state->scaler_state;
4505 	int i;
4506 
4507 	/* loop through and disable scalers that aren't in use */
4508 	for (i = 0; i < intel_crtc->num_scalers; i++) {
4509 		if (!scaler_state->scalers[i].in_use)
4510 			skl_detach_scaler(intel_crtc, i);
4511 	}
4512 }
4513 
4514 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4515 					  int color_plane, unsigned int rotation)
4516 {
4517 	/*
4518 	 * The stride is either expressed as a multiple of 64 bytes chunks for
4519 	 * linear buffers or in number of tiles for tiled buffers.
4520 	 */
4521 	if (is_surface_linear(fb, color_plane))
4522 		return 64;
4523 	else if (drm_rotation_90_or_270(rotation))
4524 		return intel_tile_height(fb, color_plane);
4525 	else
4526 		return intel_tile_width_bytes(fb, color_plane);
4527 }
4528 
4529 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4530 		     int color_plane)
4531 {
4532 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4533 	unsigned int rotation = plane_state->hw.rotation;
4534 	u32 stride = plane_state->color_plane[color_plane].stride;
4535 
4536 	if (color_plane >= fb->format->num_planes)
4537 		return 0;
4538 
4539 	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4540 }
4541 
4542 static u32 skl_plane_ctl_format(u32 pixel_format)
4543 {
4544 	switch (pixel_format) {
4545 	case DRM_FORMAT_C8:
4546 		return PLANE_CTL_FORMAT_INDEXED;
4547 	case DRM_FORMAT_RGB565:
4548 		return PLANE_CTL_FORMAT_RGB_565;
4549 	case DRM_FORMAT_XBGR8888:
4550 	case DRM_FORMAT_ABGR8888:
4551 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4552 	case DRM_FORMAT_XRGB8888:
4553 	case DRM_FORMAT_ARGB8888:
4554 		return PLANE_CTL_FORMAT_XRGB_8888;
4555 	case DRM_FORMAT_XBGR2101010:
4556 	case DRM_FORMAT_ABGR2101010:
4557 		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4558 	case DRM_FORMAT_XRGB2101010:
4559 	case DRM_FORMAT_ARGB2101010:
4560 		return PLANE_CTL_FORMAT_XRGB_2101010;
4561 	case DRM_FORMAT_XBGR16161616F:
4562 	case DRM_FORMAT_ABGR16161616F:
4563 		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4564 	case DRM_FORMAT_XRGB16161616F:
4565 	case DRM_FORMAT_ARGB16161616F:
4566 		return PLANE_CTL_FORMAT_XRGB_16161616F;
4567 	case DRM_FORMAT_XYUV8888:
4568 		return PLANE_CTL_FORMAT_XYUV;
4569 	case DRM_FORMAT_YUYV:
4570 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4571 	case DRM_FORMAT_YVYU:
4572 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4573 	case DRM_FORMAT_UYVY:
4574 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4575 	case DRM_FORMAT_VYUY:
4576 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4577 	case DRM_FORMAT_NV12:
4578 		return PLANE_CTL_FORMAT_NV12;
4579 	case DRM_FORMAT_P010:
4580 		return PLANE_CTL_FORMAT_P010;
4581 	case DRM_FORMAT_P012:
4582 		return PLANE_CTL_FORMAT_P012;
4583 	case DRM_FORMAT_P016:
4584 		return PLANE_CTL_FORMAT_P016;
4585 	case DRM_FORMAT_Y210:
4586 		return PLANE_CTL_FORMAT_Y210;
4587 	case DRM_FORMAT_Y212:
4588 		return PLANE_CTL_FORMAT_Y212;
4589 	case DRM_FORMAT_Y216:
4590 		return PLANE_CTL_FORMAT_Y216;
4591 	case DRM_FORMAT_XVYU2101010:
4592 		return PLANE_CTL_FORMAT_Y410;
4593 	case DRM_FORMAT_XVYU12_16161616:
4594 		return PLANE_CTL_FORMAT_Y412;
4595 	case DRM_FORMAT_XVYU16161616:
4596 		return PLANE_CTL_FORMAT_Y416;
4597 	default:
4598 		MISSING_CASE(pixel_format);
4599 	}
4600 
4601 	return 0;
4602 }
4603 
4604 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4605 {
4606 	if (!plane_state->hw.fb->format->has_alpha)
4607 		return PLANE_CTL_ALPHA_DISABLE;
4608 
4609 	switch (plane_state->hw.pixel_blend_mode) {
4610 	case DRM_MODE_BLEND_PIXEL_NONE:
4611 		return PLANE_CTL_ALPHA_DISABLE;
4612 	case DRM_MODE_BLEND_PREMULTI:
4613 		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4614 	case DRM_MODE_BLEND_COVERAGE:
4615 		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4616 	default:
4617 		MISSING_CASE(plane_state->hw.pixel_blend_mode);
4618 		return PLANE_CTL_ALPHA_DISABLE;
4619 	}
4620 }
4621 
4622 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4623 {
4624 	if (!plane_state->hw.fb->format->has_alpha)
4625 		return PLANE_COLOR_ALPHA_DISABLE;
4626 
4627 	switch (plane_state->hw.pixel_blend_mode) {
4628 	case DRM_MODE_BLEND_PIXEL_NONE:
4629 		return PLANE_COLOR_ALPHA_DISABLE;
4630 	case DRM_MODE_BLEND_PREMULTI:
4631 		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4632 	case DRM_MODE_BLEND_COVERAGE:
4633 		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4634 	default:
4635 		MISSING_CASE(plane_state->hw.pixel_blend_mode);
4636 		return PLANE_COLOR_ALPHA_DISABLE;
4637 	}
4638 }
4639 
4640 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4641 {
4642 	switch (fb_modifier) {
4643 	case DRM_FORMAT_MOD_LINEAR:
4644 		break;
4645 	case I915_FORMAT_MOD_X_TILED:
4646 		return PLANE_CTL_TILED_X;
4647 	case I915_FORMAT_MOD_Y_TILED:
4648 		return PLANE_CTL_TILED_Y;
4649 	case I915_FORMAT_MOD_Y_TILED_CCS:
4650 		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4651 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4652 		return PLANE_CTL_TILED_Y |
4653 		       PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4654 		       PLANE_CTL_CLEAR_COLOR_DISABLE;
4655 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4656 		return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4657 	case I915_FORMAT_MOD_Yf_TILED:
4658 		return PLANE_CTL_TILED_YF;
4659 	case I915_FORMAT_MOD_Yf_TILED_CCS:
4660 		return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4661 	default:
4662 		MISSING_CASE(fb_modifier);
4663 	}
4664 
4665 	return 0;
4666 }
4667 
4668 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4669 {
4670 	switch (rotate) {
4671 	case DRM_MODE_ROTATE_0:
4672 		break;
4673 	/*
4674 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4675 	 * while i915 HW rotation is clockwise, thats why this swapping.
4676 	 */
4677 	case DRM_MODE_ROTATE_90:
4678 		return PLANE_CTL_ROTATE_270;
4679 	case DRM_MODE_ROTATE_180:
4680 		return PLANE_CTL_ROTATE_180;
4681 	case DRM_MODE_ROTATE_270:
4682 		return PLANE_CTL_ROTATE_90;
4683 	default:
4684 		MISSING_CASE(rotate);
4685 	}
4686 
4687 	return 0;
4688 }
4689 
4690 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4691 {
4692 	switch (reflect) {
4693 	case 0:
4694 		break;
4695 	case DRM_MODE_REFLECT_X:
4696 		return PLANE_CTL_FLIP_HORIZONTAL;
4697 	case DRM_MODE_REFLECT_Y:
4698 	default:
4699 		MISSING_CASE(reflect);
4700 	}
4701 
4702 	return 0;
4703 }
4704 
4705 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4706 {
4707 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4708 	u32 plane_ctl = 0;
4709 
4710 	if (crtc_state->uapi.async_flip)
4711 		plane_ctl |= PLANE_CTL_ASYNC_FLIP;
4712 
4713 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4714 		return plane_ctl;
4715 
4716 	if (crtc_state->gamma_enable)
4717 		plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4718 
4719 	if (crtc_state->csc_enable)
4720 		plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4721 
4722 	return plane_ctl;
4723 }
4724 
4725 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4726 		  const struct intel_plane_state *plane_state)
4727 {
4728 	struct drm_i915_private *dev_priv =
4729 		to_i915(plane_state->uapi.plane->dev);
4730 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4731 	unsigned int rotation = plane_state->hw.rotation;
4732 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4733 	u32 plane_ctl;
4734 
4735 	plane_ctl = PLANE_CTL_ENABLE;
4736 
4737 	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4738 		plane_ctl |= skl_plane_ctl_alpha(plane_state);
4739 		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4740 
4741 		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4742 			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4743 
4744 		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4745 			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4746 	}
4747 
4748 	plane_ctl |= skl_plane_ctl_format(fb->format->format);
4749 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4750 	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4751 
4752 	if (INTEL_GEN(dev_priv) >= 10)
4753 		plane_ctl |= cnl_plane_ctl_flip(rotation &
4754 						DRM_MODE_REFLECT_MASK);
4755 
4756 	if (key->flags & I915_SET_COLORKEY_DESTINATION)
4757 		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4758 	else if (key->flags & I915_SET_COLORKEY_SOURCE)
4759 		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4760 
4761 	return plane_ctl;
4762 }
4763 
4764 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4765 {
4766 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4767 	u32 plane_color_ctl = 0;
4768 
4769 	if (INTEL_GEN(dev_priv) >= 11)
4770 		return plane_color_ctl;
4771 
4772 	if (crtc_state->gamma_enable)
4773 		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4774 
4775 	if (crtc_state->csc_enable)
4776 		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4777 
4778 	return plane_color_ctl;
4779 }
4780 
4781 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4782 			const struct intel_plane_state *plane_state)
4783 {
4784 	struct drm_i915_private *dev_priv =
4785 		to_i915(plane_state->uapi.plane->dev);
4786 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4787 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4788 	u32 plane_color_ctl = 0;
4789 
4790 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4791 	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4792 
4793 	if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4794 		switch (plane_state->hw.color_encoding) {
4795 		case DRM_COLOR_YCBCR_BT709:
4796 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4797 			break;
4798 		case DRM_COLOR_YCBCR_BT2020:
4799 			plane_color_ctl |=
4800 				PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
4801 			break;
4802 		default:
4803 			plane_color_ctl |=
4804 				PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
4805 		}
4806 		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4807 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4808 	} else if (fb->format->is_yuv) {
4809 		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4810 	}
4811 
4812 	return plane_color_ctl;
4813 }
4814 
4815 static int
4816 __intel_display_resume(struct drm_device *dev,
4817 		       struct drm_atomic_state *state,
4818 		       struct drm_modeset_acquire_ctx *ctx)
4819 {
4820 	struct drm_crtc_state *crtc_state;
4821 	struct drm_crtc *crtc;
4822 	int i, ret;
4823 
4824 	intel_modeset_setup_hw_state(dev, ctx);
4825 	intel_vga_redisable(to_i915(dev));
4826 
4827 	if (!state)
4828 		return 0;
4829 
4830 	/*
4831 	 * We've duplicated the state, pointers to the old state are invalid.
4832 	 *
4833 	 * Don't attempt to use the old state until we commit the duplicated state.
4834 	 */
4835 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4836 		/*
4837 		 * Force recalculation even if we restore
4838 		 * current state. With fast modeset this may not result
4839 		 * in a modeset when the state is compatible.
4840 		 */
4841 		crtc_state->mode_changed = true;
4842 	}
4843 
4844 	/* ignore any reset values/BIOS leftovers in the WM registers */
4845 	if (!HAS_GMCH(to_i915(dev)))
4846 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
4847 
4848 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4849 
4850 	drm_WARN_ON(dev, ret == -EDEADLK);
4851 	return ret;
4852 }
4853 
4854 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4855 {
4856 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4857 		intel_has_gpu_reset(&dev_priv->gt));
4858 }
4859 
4860 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
4861 {
4862 	struct drm_device *dev = &dev_priv->drm;
4863 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4864 	struct drm_atomic_state *state;
4865 	int ret;
4866 
4867 	if (!HAS_DISPLAY(dev_priv))
4868 		return;
4869 
4870 	/* reset doesn't touch the display */
4871 	if (!dev_priv->params.force_reset_modeset_test &&
4872 	    !gpu_reset_clobbers_display(dev_priv))
4873 		return;
4874 
4875 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
4876 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4877 	smp_mb__after_atomic();
4878 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4879 
4880 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4881 		drm_dbg_kms(&dev_priv->drm,
4882 			    "Modeset potentially stuck, unbreaking through wedging\n");
4883 		intel_gt_set_wedged(&dev_priv->gt);
4884 	}
4885 
4886 	/*
4887 	 * Need mode_config.mutex so that we don't
4888 	 * trample ongoing ->detect() and whatnot.
4889 	 */
4890 	mutex_lock(&dev->mode_config.mutex);
4891 	drm_modeset_acquire_init(ctx, 0);
4892 	while (1) {
4893 		ret = drm_modeset_lock_all_ctx(dev, ctx);
4894 		if (ret != -EDEADLK)
4895 			break;
4896 
4897 		drm_modeset_backoff(ctx);
4898 	}
4899 	/*
4900 	 * Disabling the crtcs gracefully seems nicer. Also the
4901 	 * g33 docs say we should at least disable all the planes.
4902 	 */
4903 	state = drm_atomic_helper_duplicate_state(dev, ctx);
4904 	if (IS_ERR(state)) {
4905 		ret = PTR_ERR(state);
4906 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4907 			ret);
4908 		return;
4909 	}
4910 
4911 	ret = drm_atomic_helper_disable_all(dev, ctx);
4912 	if (ret) {
4913 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4914 			ret);
4915 		drm_atomic_state_put(state);
4916 		return;
4917 	}
4918 
4919 	dev_priv->modeset_restore_state = state;
4920 	state->acquire_ctx = ctx;
4921 }
4922 
4923 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
4924 {
4925 	struct drm_device *dev = &dev_priv->drm;
4926 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4927 	struct drm_atomic_state *state;
4928 	int ret;
4929 
4930 	if (!HAS_DISPLAY(dev_priv))
4931 		return;
4932 
4933 	/* reset doesn't touch the display */
4934 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4935 		return;
4936 
4937 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
4938 	if (!state)
4939 		goto unlock;
4940 
4941 	/* reset doesn't touch the display */
4942 	if (!gpu_reset_clobbers_display(dev_priv)) {
4943 		/* for testing only restore the display */
4944 		ret = __intel_display_resume(dev, state, ctx);
4945 		if (ret)
4946 			drm_err(&dev_priv->drm,
4947 				"Restoring old state failed with %i\n", ret);
4948 	} else {
4949 		/*
4950 		 * The display has been reset as well,
4951 		 * so need a full re-initialization.
4952 		 */
4953 		intel_pps_unlock_regs_wa(dev_priv);
4954 		intel_modeset_init_hw(dev_priv);
4955 		intel_init_clock_gating(dev_priv);
4956 		intel_hpd_init(dev_priv);
4957 
4958 		ret = __intel_display_resume(dev, state, ctx);
4959 		if (ret)
4960 			drm_err(&dev_priv->drm,
4961 				"Restoring old state failed with %i\n", ret);
4962 
4963 		intel_hpd_poll_disable(dev_priv);
4964 	}
4965 
4966 	drm_atomic_state_put(state);
4967 unlock:
4968 	drm_modeset_drop_locks(ctx);
4969 	drm_modeset_acquire_fini(ctx);
4970 	mutex_unlock(&dev->mode_config.mutex);
4971 
4972 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4973 }
4974 
4975 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4976 {
4977 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4978 	enum pipe pipe = crtc->pipe;
4979 	u32 tmp;
4980 
4981 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
4982 
4983 	/*
4984 	 * Display WA #1153: icl
4985 	 * enable hardware to bypass the alpha math
4986 	 * and rounding for per-pixel values 00 and 0xff
4987 	 */
4988 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4989 	/*
4990 	 * Display WA # 1605353570: icl
4991 	 * Set the pixel rounding bit to 1 for allowing
4992 	 * passthrough of Frame buffer pixels unmodified
4993 	 * across pipe
4994 	 */
4995 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4996 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
4997 }
4998 
4999 static void intel_fdi_normal_train(struct intel_crtc *crtc)
5000 {
5001 	struct drm_device *dev = crtc->base.dev;
5002 	struct drm_i915_private *dev_priv = to_i915(dev);
5003 	enum pipe pipe = crtc->pipe;
5004 	i915_reg_t reg;
5005 	u32 temp;
5006 
5007 	/* enable normal train */
5008 	reg = FDI_TX_CTL(pipe);
5009 	temp = intel_de_read(dev_priv, reg);
5010 	if (IS_IVYBRIDGE(dev_priv)) {
5011 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5012 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
5013 	} else {
5014 		temp &= ~FDI_LINK_TRAIN_NONE;
5015 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
5016 	}
5017 	intel_de_write(dev_priv, reg, temp);
5018 
5019 	reg = FDI_RX_CTL(pipe);
5020 	temp = intel_de_read(dev_priv, reg);
5021 	if (HAS_PCH_CPT(dev_priv)) {
5022 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5023 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
5024 	} else {
5025 		temp &= ~FDI_LINK_TRAIN_NONE;
5026 		temp |= FDI_LINK_TRAIN_NONE;
5027 	}
5028 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
5029 
5030 	/* wait one idle pattern time */
5031 	intel_de_posting_read(dev_priv, reg);
5032 	udelay(1000);
5033 
5034 	/* IVB wants error correction enabled */
5035 	if (IS_IVYBRIDGE(dev_priv))
5036 		intel_de_write(dev_priv, reg,
5037 		               intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
5038 }
5039 
5040 /* The FDI link training functions for ILK/Ibexpeak. */
5041 static void ilk_fdi_link_train(struct intel_crtc *crtc,
5042 			       const struct intel_crtc_state *crtc_state)
5043 {
5044 	struct drm_device *dev = crtc->base.dev;
5045 	struct drm_i915_private *dev_priv = to_i915(dev);
5046 	enum pipe pipe = crtc->pipe;
5047 	i915_reg_t reg;
5048 	u32 temp, tries;
5049 
5050 	/* FDI needs bits from pipe first */
5051 	assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
5052 
5053 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5054 	   for train result */
5055 	reg = FDI_RX_IMR(pipe);
5056 	temp = intel_de_read(dev_priv, reg);
5057 	temp &= ~FDI_RX_SYMBOL_LOCK;
5058 	temp &= ~FDI_RX_BIT_LOCK;
5059 	intel_de_write(dev_priv, reg, temp);
5060 	intel_de_read(dev_priv, reg);
5061 	udelay(150);
5062 
5063 	/* enable CPU FDI TX and PCH FDI RX */
5064 	reg = FDI_TX_CTL(pipe);
5065 	temp = intel_de_read(dev_priv, reg);
5066 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
5067 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5068 	temp &= ~FDI_LINK_TRAIN_NONE;
5069 	temp |= FDI_LINK_TRAIN_PATTERN_1;
5070 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5071 
5072 	reg = FDI_RX_CTL(pipe);
5073 	temp = intel_de_read(dev_priv, reg);
5074 	temp &= ~FDI_LINK_TRAIN_NONE;
5075 	temp |= FDI_LINK_TRAIN_PATTERN_1;
5076 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5077 
5078 	intel_de_posting_read(dev_priv, reg);
5079 	udelay(150);
5080 
5081 	/* Ironlake workaround, enable clock pointer after FDI enable*/
5082 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5083 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
5084 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5085 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
5086 
5087 	reg = FDI_RX_IIR(pipe);
5088 	for (tries = 0; tries < 5; tries++) {
5089 		temp = intel_de_read(dev_priv, reg);
5090 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5091 
5092 		if ((temp & FDI_RX_BIT_LOCK)) {
5093 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
5094 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
5095 			break;
5096 		}
5097 	}
5098 	if (tries == 5)
5099 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5100 
5101 	/* Train 2 */
5102 	reg = FDI_TX_CTL(pipe);
5103 	temp = intel_de_read(dev_priv, reg);
5104 	temp &= ~FDI_LINK_TRAIN_NONE;
5105 	temp |= FDI_LINK_TRAIN_PATTERN_2;
5106 	intel_de_write(dev_priv, reg, temp);
5107 
5108 	reg = FDI_RX_CTL(pipe);
5109 	temp = intel_de_read(dev_priv, reg);
5110 	temp &= ~FDI_LINK_TRAIN_NONE;
5111 	temp |= FDI_LINK_TRAIN_PATTERN_2;
5112 	intel_de_write(dev_priv, reg, temp);
5113 
5114 	intel_de_posting_read(dev_priv, reg);
5115 	udelay(150);
5116 
5117 	reg = FDI_RX_IIR(pipe);
5118 	for (tries = 0; tries < 5; tries++) {
5119 		temp = intel_de_read(dev_priv, reg);
5120 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5121 
5122 		if (temp & FDI_RX_SYMBOL_LOCK) {
5123 			intel_de_write(dev_priv, reg,
5124 				       temp | FDI_RX_SYMBOL_LOCK);
5125 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
5126 			break;
5127 		}
5128 	}
5129 	if (tries == 5)
5130 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5131 
5132 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
5133 
5134 }
5135 
5136 static const int snb_b_fdi_train_param[] = {
5137 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
5138 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
5139 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
5140 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
5141 };
5142 
5143 /* The FDI link training functions for SNB/Cougarpoint. */
5144 static void gen6_fdi_link_train(struct intel_crtc *crtc,
5145 				const struct intel_crtc_state *crtc_state)
5146 {
5147 	struct drm_device *dev = crtc->base.dev;
5148 	struct drm_i915_private *dev_priv = to_i915(dev);
5149 	enum pipe pipe = crtc->pipe;
5150 	i915_reg_t reg;
5151 	u32 temp, i, retry;
5152 
5153 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5154 	   for train result */
5155 	reg = FDI_RX_IMR(pipe);
5156 	temp = intel_de_read(dev_priv, reg);
5157 	temp &= ~FDI_RX_SYMBOL_LOCK;
5158 	temp &= ~FDI_RX_BIT_LOCK;
5159 	intel_de_write(dev_priv, reg, temp);
5160 
5161 	intel_de_posting_read(dev_priv, reg);
5162 	udelay(150);
5163 
5164 	/* enable CPU FDI TX and PCH FDI RX */
5165 	reg = FDI_TX_CTL(pipe);
5166 	temp = intel_de_read(dev_priv, reg);
5167 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
5168 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5169 	temp &= ~FDI_LINK_TRAIN_NONE;
5170 	temp |= FDI_LINK_TRAIN_PATTERN_1;
5171 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5172 	/* SNB-B */
5173 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5174 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5175 
5176 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5177 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5178 
5179 	reg = FDI_RX_CTL(pipe);
5180 	temp = intel_de_read(dev_priv, reg);
5181 	if (HAS_PCH_CPT(dev_priv)) {
5182 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5183 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5184 	} else {
5185 		temp &= ~FDI_LINK_TRAIN_NONE;
5186 		temp |= FDI_LINK_TRAIN_PATTERN_1;
5187 	}
5188 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5189 
5190 	intel_de_posting_read(dev_priv, reg);
5191 	udelay(150);
5192 
5193 	for (i = 0; i < 4; i++) {
5194 		reg = FDI_TX_CTL(pipe);
5195 		temp = intel_de_read(dev_priv, reg);
5196 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5197 		temp |= snb_b_fdi_train_param[i];
5198 		intel_de_write(dev_priv, reg, temp);
5199 
5200 		intel_de_posting_read(dev_priv, reg);
5201 		udelay(500);
5202 
5203 		for (retry = 0; retry < 5; retry++) {
5204 			reg = FDI_RX_IIR(pipe);
5205 			temp = intel_de_read(dev_priv, reg);
5206 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5207 			if (temp & FDI_RX_BIT_LOCK) {
5208 				intel_de_write(dev_priv, reg,
5209 					       temp | FDI_RX_BIT_LOCK);
5210 				drm_dbg_kms(&dev_priv->drm,
5211 					    "FDI train 1 done.\n");
5212 				break;
5213 			}
5214 			udelay(50);
5215 		}
5216 		if (retry < 5)
5217 			break;
5218 	}
5219 	if (i == 4)
5220 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5221 
5222 	/* Train 2 */
5223 	reg = FDI_TX_CTL(pipe);
5224 	temp = intel_de_read(dev_priv, reg);
5225 	temp &= ~FDI_LINK_TRAIN_NONE;
5226 	temp |= FDI_LINK_TRAIN_PATTERN_2;
5227 	if (IS_GEN(dev_priv, 6)) {
5228 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5229 		/* SNB-B */
5230 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5231 	}
5232 	intel_de_write(dev_priv, reg, temp);
5233 
5234 	reg = FDI_RX_CTL(pipe);
5235 	temp = intel_de_read(dev_priv, reg);
5236 	if (HAS_PCH_CPT(dev_priv)) {
5237 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5238 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5239 	} else {
5240 		temp &= ~FDI_LINK_TRAIN_NONE;
5241 		temp |= FDI_LINK_TRAIN_PATTERN_2;
5242 	}
5243 	intel_de_write(dev_priv, reg, temp);
5244 
5245 	intel_de_posting_read(dev_priv, reg);
5246 	udelay(150);
5247 
5248 	for (i = 0; i < 4; i++) {
5249 		reg = FDI_TX_CTL(pipe);
5250 		temp = intel_de_read(dev_priv, reg);
5251 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5252 		temp |= snb_b_fdi_train_param[i];
5253 		intel_de_write(dev_priv, reg, temp);
5254 
5255 		intel_de_posting_read(dev_priv, reg);
5256 		udelay(500);
5257 
5258 		for (retry = 0; retry < 5; retry++) {
5259 			reg = FDI_RX_IIR(pipe);
5260 			temp = intel_de_read(dev_priv, reg);
5261 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5262 			if (temp & FDI_RX_SYMBOL_LOCK) {
5263 				intel_de_write(dev_priv, reg,
5264 					       temp | FDI_RX_SYMBOL_LOCK);
5265 				drm_dbg_kms(&dev_priv->drm,
5266 					    "FDI train 2 done.\n");
5267 				break;
5268 			}
5269 			udelay(50);
5270 		}
5271 		if (retry < 5)
5272 			break;
5273 	}
5274 	if (i == 4)
5275 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5276 
5277 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5278 }
5279 
5280 /* Manual link training for Ivy Bridge A0 parts */
5281 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5282 				      const struct intel_crtc_state *crtc_state)
5283 {
5284 	struct drm_device *dev = crtc->base.dev;
5285 	struct drm_i915_private *dev_priv = to_i915(dev);
5286 	enum pipe pipe = crtc->pipe;
5287 	i915_reg_t reg;
5288 	u32 temp, i, j;
5289 
5290 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5291 	   for train result */
5292 	reg = FDI_RX_IMR(pipe);
5293 	temp = intel_de_read(dev_priv, reg);
5294 	temp &= ~FDI_RX_SYMBOL_LOCK;
5295 	temp &= ~FDI_RX_BIT_LOCK;
5296 	intel_de_write(dev_priv, reg, temp);
5297 
5298 	intel_de_posting_read(dev_priv, reg);
5299 	udelay(150);
5300 
5301 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
5302 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
5303 
5304 	/* Try each vswing and preemphasis setting twice before moving on */
5305 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5306 		/* disable first in case we need to retry */
5307 		reg = FDI_TX_CTL(pipe);
5308 		temp = intel_de_read(dev_priv, reg);
5309 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5310 		temp &= ~FDI_TX_ENABLE;
5311 		intel_de_write(dev_priv, reg, temp);
5312 
5313 		reg = FDI_RX_CTL(pipe);
5314 		temp = intel_de_read(dev_priv, reg);
5315 		temp &= ~FDI_LINK_TRAIN_AUTO;
5316 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5317 		temp &= ~FDI_RX_ENABLE;
5318 		intel_de_write(dev_priv, reg, temp);
5319 
5320 		/* enable CPU FDI TX and PCH FDI RX */
5321 		reg = FDI_TX_CTL(pipe);
5322 		temp = intel_de_read(dev_priv, reg);
5323 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
5324 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5325 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5326 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5327 		temp |= snb_b_fdi_train_param[j/2];
5328 		temp |= FDI_COMPOSITE_SYNC;
5329 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5330 
5331 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5332 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5333 
5334 		reg = FDI_RX_CTL(pipe);
5335 		temp = intel_de_read(dev_priv, reg);
5336 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5337 		temp |= FDI_COMPOSITE_SYNC;
5338 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5339 
5340 		intel_de_posting_read(dev_priv, reg);
5341 		udelay(1); /* should be 0.5us */
5342 
5343 		for (i = 0; i < 4; i++) {
5344 			reg = FDI_RX_IIR(pipe);
5345 			temp = intel_de_read(dev_priv, reg);
5346 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5347 
5348 			if (temp & FDI_RX_BIT_LOCK ||
5349 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
5350 				intel_de_write(dev_priv, reg,
5351 					       temp | FDI_RX_BIT_LOCK);
5352 				drm_dbg_kms(&dev_priv->drm,
5353 					    "FDI train 1 done, level %i.\n",
5354 					    i);
5355 				break;
5356 			}
5357 			udelay(1); /* should be 0.5us */
5358 		}
5359 		if (i == 4) {
5360 			drm_dbg_kms(&dev_priv->drm,
5361 				    "FDI train 1 fail on vswing %d\n", j / 2);
5362 			continue;
5363 		}
5364 
5365 		/* Train 2 */
5366 		reg = FDI_TX_CTL(pipe);
5367 		temp = intel_de_read(dev_priv, reg);
5368 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5369 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5370 		intel_de_write(dev_priv, reg, temp);
5371 
5372 		reg = FDI_RX_CTL(pipe);
5373 		temp = intel_de_read(dev_priv, reg);
5374 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5375 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5376 		intel_de_write(dev_priv, reg, temp);
5377 
5378 		intel_de_posting_read(dev_priv, reg);
5379 		udelay(2); /* should be 1.5us */
5380 
5381 		for (i = 0; i < 4; i++) {
5382 			reg = FDI_RX_IIR(pipe);
5383 			temp = intel_de_read(dev_priv, reg);
5384 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5385 
5386 			if (temp & FDI_RX_SYMBOL_LOCK ||
5387 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
5388 				intel_de_write(dev_priv, reg,
5389 					       temp | FDI_RX_SYMBOL_LOCK);
5390 				drm_dbg_kms(&dev_priv->drm,
5391 					    "FDI train 2 done, level %i.\n",
5392 					    i);
5393 				goto train_done;
5394 			}
5395 			udelay(2); /* should be 1.5us */
5396 		}
5397 		if (i == 4)
5398 			drm_dbg_kms(&dev_priv->drm,
5399 				    "FDI train 2 fail on vswing %d\n", j / 2);
5400 	}
5401 
5402 train_done:
5403 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5404 }
5405 
5406 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5407 {
5408 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5409 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5410 	enum pipe pipe = intel_crtc->pipe;
5411 	i915_reg_t reg;
5412 	u32 temp;
5413 
5414 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5415 	reg = FDI_RX_CTL(pipe);
5416 	temp = intel_de_read(dev_priv, reg);
5417 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5418 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5419 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5420 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
5421 
5422 	intel_de_posting_read(dev_priv, reg);
5423 	udelay(200);
5424 
5425 	/* Switch from Rawclk to PCDclk */
5426 	temp = intel_de_read(dev_priv, reg);
5427 	intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
5428 
5429 	intel_de_posting_read(dev_priv, reg);
5430 	udelay(200);
5431 
5432 	/* Enable CPU FDI TX PLL, always on for Ironlake */
5433 	reg = FDI_TX_CTL(pipe);
5434 	temp = intel_de_read(dev_priv, reg);
5435 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5436 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
5437 
5438 		intel_de_posting_read(dev_priv, reg);
5439 		udelay(100);
5440 	}
5441 }
5442 
5443 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
5444 {
5445 	struct drm_device *dev = intel_crtc->base.dev;
5446 	struct drm_i915_private *dev_priv = to_i915(dev);
5447 	enum pipe pipe = intel_crtc->pipe;
5448 	i915_reg_t reg;
5449 	u32 temp;
5450 
5451 	/* Switch from PCDclk to Rawclk */
5452 	reg = FDI_RX_CTL(pipe);
5453 	temp = intel_de_read(dev_priv, reg);
5454 	intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
5455 
5456 	/* Disable CPU FDI TX PLL */
5457 	reg = FDI_TX_CTL(pipe);
5458 	temp = intel_de_read(dev_priv, reg);
5459 	intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
5460 
5461 	intel_de_posting_read(dev_priv, reg);
5462 	udelay(100);
5463 
5464 	reg = FDI_RX_CTL(pipe);
5465 	temp = intel_de_read(dev_priv, reg);
5466 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
5467 
5468 	/* Wait for the clocks to turn off. */
5469 	intel_de_posting_read(dev_priv, reg);
5470 	udelay(100);
5471 }
5472 
5473 static void ilk_fdi_disable(struct intel_crtc *crtc)
5474 {
5475 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5476 	enum pipe pipe = crtc->pipe;
5477 	i915_reg_t reg;
5478 	u32 temp;
5479 
5480 	/* disable CPU FDI tx and PCH FDI rx */
5481 	reg = FDI_TX_CTL(pipe);
5482 	temp = intel_de_read(dev_priv, reg);
5483 	intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5484 	intel_de_posting_read(dev_priv, reg);
5485 
5486 	reg = FDI_RX_CTL(pipe);
5487 	temp = intel_de_read(dev_priv, reg);
5488 	temp &= ~(0x7 << 16);
5489 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5490 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5491 
5492 	intel_de_posting_read(dev_priv, reg);
5493 	udelay(100);
5494 
5495 	/* Ironlake workaround, disable clock pointer after downing FDI */
5496 	if (HAS_PCH_IBX(dev_priv))
5497 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5498 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
5499 
5500 	/* still set train pattern 1 */
5501 	reg = FDI_TX_CTL(pipe);
5502 	temp = intel_de_read(dev_priv, reg);
5503 	temp &= ~FDI_LINK_TRAIN_NONE;
5504 	temp |= FDI_LINK_TRAIN_PATTERN_1;
5505 	intel_de_write(dev_priv, reg, temp);
5506 
5507 	reg = FDI_RX_CTL(pipe);
5508 	temp = intel_de_read(dev_priv, reg);
5509 	if (HAS_PCH_CPT(dev_priv)) {
5510 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5511 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5512 	} else {
5513 		temp &= ~FDI_LINK_TRAIN_NONE;
5514 		temp |= FDI_LINK_TRAIN_PATTERN_1;
5515 	}
5516 	/* BPC in FDI rx is consistent with that in PIPECONF */
5517 	temp &= ~(0x07 << 16);
5518 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5519 	intel_de_write(dev_priv, reg, temp);
5520 
5521 	intel_de_posting_read(dev_priv, reg);
5522 	udelay(100);
5523 }
5524 
5525 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5526 {
5527 	struct drm_crtc *crtc;
5528 	bool cleanup_done;
5529 
5530 	drm_for_each_crtc(crtc, &dev_priv->drm) {
5531 		struct drm_crtc_commit *commit;
5532 		spin_lock(&crtc->commit_lock);
5533 		commit = list_first_entry_or_null(&crtc->commit_list,
5534 						  struct drm_crtc_commit, commit_entry);
5535 		cleanup_done = commit ?
5536 			try_wait_for_completion(&commit->cleanup_done) : true;
5537 		spin_unlock(&crtc->commit_lock);
5538 
5539 		if (cleanup_done)
5540 			continue;
5541 
5542 		drm_crtc_wait_one_vblank(crtc);
5543 
5544 		return true;
5545 	}
5546 
5547 	return false;
5548 }
5549 
5550 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5551 {
5552 	u32 temp;
5553 
5554 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5555 
5556 	mutex_lock(&dev_priv->sb_lock);
5557 
5558 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5559 	temp |= SBI_SSCCTL_DISABLE;
5560 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5561 
5562 	mutex_unlock(&dev_priv->sb_lock);
5563 }
5564 
5565 /* Program iCLKIP clock to the desired frequency */
5566 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5567 {
5568 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5569 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5570 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5571 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
5572 	u32 temp;
5573 
5574 	lpt_disable_iclkip(dev_priv);
5575 
5576 	/* The iCLK virtual clock root frequency is in MHz,
5577 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
5578 	 * divisors, it is necessary to divide one by another, so we
5579 	 * convert the virtual clock precision to KHz here for higher
5580 	 * precision.
5581 	 */
5582 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5583 		u32 iclk_virtual_root_freq = 172800 * 1000;
5584 		u32 iclk_pi_range = 64;
5585 		u32 desired_divisor;
5586 
5587 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5588 						    clock << auxdiv);
5589 		divsel = (desired_divisor / iclk_pi_range) - 2;
5590 		phaseinc = desired_divisor % iclk_pi_range;
5591 
5592 		/*
5593 		 * Near 20MHz is a corner case which is
5594 		 * out of range for the 7-bit divisor
5595 		 */
5596 		if (divsel <= 0x7f)
5597 			break;
5598 	}
5599 
5600 	/* This should not happen with any sane values */
5601 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5602 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5603 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
5604 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5605 
5606 	drm_dbg_kms(&dev_priv->drm,
5607 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5608 		    clock, auxdiv, divsel, phasedir, phaseinc);
5609 
5610 	mutex_lock(&dev_priv->sb_lock);
5611 
5612 	/* Program SSCDIVINTPHASE6 */
5613 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5614 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5615 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5616 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5617 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5618 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5619 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5620 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5621 
5622 	/* Program SSCAUXDIV */
5623 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5624 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5625 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5626 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5627 
5628 	/* Enable modulator and associated divider */
5629 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5630 	temp &= ~SBI_SSCCTL_DISABLE;
5631 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5632 
5633 	mutex_unlock(&dev_priv->sb_lock);
5634 
5635 	/* Wait for initialization time */
5636 	udelay(24);
5637 
5638 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5639 }
5640 
5641 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5642 {
5643 	u32 divsel, phaseinc, auxdiv;
5644 	u32 iclk_virtual_root_freq = 172800 * 1000;
5645 	u32 iclk_pi_range = 64;
5646 	u32 desired_divisor;
5647 	u32 temp;
5648 
5649 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5650 		return 0;
5651 
5652 	mutex_lock(&dev_priv->sb_lock);
5653 
5654 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5655 	if (temp & SBI_SSCCTL_DISABLE) {
5656 		mutex_unlock(&dev_priv->sb_lock);
5657 		return 0;
5658 	}
5659 
5660 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5661 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5662 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5663 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5664 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5665 
5666 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5667 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5668 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5669 
5670 	mutex_unlock(&dev_priv->sb_lock);
5671 
5672 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5673 
5674 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5675 				 desired_divisor << auxdiv);
5676 }
5677 
5678 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5679 					   enum pipe pch_transcoder)
5680 {
5681 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5682 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5683 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5684 
5685 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5686 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5687 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5688 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5689 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5690 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5691 
5692 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5693 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5694 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5695 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5696 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5697 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5698 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5699 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5700 }
5701 
5702 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5703 {
5704 	u32 temp;
5705 
5706 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5707 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5708 		return;
5709 
5710 	drm_WARN_ON(&dev_priv->drm,
5711 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
5712 		    FDI_RX_ENABLE);
5713 	drm_WARN_ON(&dev_priv->drm,
5714 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
5715 		    FDI_RX_ENABLE);
5716 
5717 	temp &= ~FDI_BC_BIFURCATION_SELECT;
5718 	if (enable)
5719 		temp |= FDI_BC_BIFURCATION_SELECT;
5720 
5721 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5722 		    enable ? "en" : "dis");
5723 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5724 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5725 }
5726 
5727 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5728 {
5729 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5730 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5731 
5732 	switch (crtc->pipe) {
5733 	case PIPE_A:
5734 		break;
5735 	case PIPE_B:
5736 		if (crtc_state->fdi_lanes > 2)
5737 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
5738 		else
5739 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
5740 
5741 		break;
5742 	case PIPE_C:
5743 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
5744 
5745 		break;
5746 	default:
5747 		BUG();
5748 	}
5749 }
5750 
5751 /*
5752  * Finds the encoder associated with the given CRTC. This can only be
5753  * used when we know that the CRTC isn't feeding multiple encoders!
5754  */
5755 static struct intel_encoder *
5756 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5757 			   const struct intel_crtc_state *crtc_state)
5758 {
5759 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5760 	const struct drm_connector_state *connector_state;
5761 	const struct drm_connector *connector;
5762 	struct intel_encoder *encoder = NULL;
5763 	int num_encoders = 0;
5764 	int i;
5765 
5766 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5767 		if (connector_state->crtc != &crtc->base)
5768 			continue;
5769 
5770 		encoder = to_intel_encoder(connector_state->best_encoder);
5771 		num_encoders++;
5772 	}
5773 
5774 	drm_WARN(encoder->base.dev, num_encoders != 1,
5775 		 "%d encoders for pipe %c\n",
5776 		 num_encoders, pipe_name(crtc->pipe));
5777 
5778 	return encoder;
5779 }
5780 
5781 /*
5782  * Enable PCH resources required for PCH ports:
5783  *   - PCH PLLs
5784  *   - FDI training & RX/TX
5785  *   - update transcoder timings
5786  *   - DP transcoding bits
5787  *   - transcoder
5788  */
5789 static void ilk_pch_enable(const struct intel_atomic_state *state,
5790 			   const struct intel_crtc_state *crtc_state)
5791 {
5792 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5793 	struct drm_device *dev = crtc->base.dev;
5794 	struct drm_i915_private *dev_priv = to_i915(dev);
5795 	enum pipe pipe = crtc->pipe;
5796 	u32 temp;
5797 
5798 	assert_pch_transcoder_disabled(dev_priv, pipe);
5799 
5800 	if (IS_IVYBRIDGE(dev_priv))
5801 		ivb_update_fdi_bc_bifurcation(crtc_state);
5802 
5803 	/* Write the TU size bits before fdi link training, so that error
5804 	 * detection works. */
5805 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5806 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5807 
5808 	/* For PCH output, training FDI link */
5809 	dev_priv->display.fdi_link_train(crtc, crtc_state);
5810 
5811 	/* We need to program the right clock selection before writing the pixel
5812 	 * mutliplier into the DPLL. */
5813 	if (HAS_PCH_CPT(dev_priv)) {
5814 		u32 sel;
5815 
5816 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5817 		temp |= TRANS_DPLL_ENABLE(pipe);
5818 		sel = TRANS_DPLLB_SEL(pipe);
5819 		if (crtc_state->shared_dpll ==
5820 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5821 			temp |= sel;
5822 		else
5823 			temp &= ~sel;
5824 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5825 	}
5826 
5827 	/* XXX: pch pll's can be enabled any time before we enable the PCH
5828 	 * transcoder, and we actually should do this to not upset any PCH
5829 	 * transcoder that already use the clock when we share it.
5830 	 *
5831 	 * Note that enable_shared_dpll tries to do the right thing, but
5832 	 * get_shared_dpll unconditionally resets the pll - we need that to have
5833 	 * the right LVDS enable sequence. */
5834 	intel_enable_shared_dpll(crtc_state);
5835 
5836 	/* set transcoder timing, panel must allow it */
5837 	assert_panel_unlocked(dev_priv, pipe);
5838 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
5839 
5840 	intel_fdi_normal_train(crtc);
5841 
5842 	/* For PCH DP, enable TRANS_DP_CTL */
5843 	if (HAS_PCH_CPT(dev_priv) &&
5844 	    intel_crtc_has_dp_encoder(crtc_state)) {
5845 		const struct drm_display_mode *adjusted_mode =
5846 			&crtc_state->hw.adjusted_mode;
5847 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5848 		i915_reg_t reg = TRANS_DP_CTL(pipe);
5849 		enum port port;
5850 
5851 		temp = intel_de_read(dev_priv, reg);
5852 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
5853 			  TRANS_DP_SYNC_MASK |
5854 			  TRANS_DP_BPC_MASK);
5855 		temp |= TRANS_DP_OUTPUT_ENABLE;
5856 		temp |= bpc << 9; /* same format but at 11:9 */
5857 
5858 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5859 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5860 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5861 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5862 
5863 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5864 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
5865 		temp |= TRANS_DP_PORT_SEL(port);
5866 
5867 		intel_de_write(dev_priv, reg, temp);
5868 	}
5869 
5870 	ilk_enable_pch_transcoder(crtc_state);
5871 }
5872 
5873 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5874 {
5875 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5876 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5877 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5878 
5879 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5880 
5881 	lpt_program_iclkip(crtc_state);
5882 
5883 	/* Set transcoder timing. */
5884 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5885 
5886 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5887 }
5888 
5889 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5890 			       enum pipe pipe)
5891 {
5892 	i915_reg_t dslreg = PIPEDSL(pipe);
5893 	u32 temp;
5894 
5895 	temp = intel_de_read(dev_priv, dslreg);
5896 	udelay(500);
5897 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5898 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5899 			drm_err(&dev_priv->drm,
5900 				"mode set failed: pipe %c stuck\n",
5901 				pipe_name(pipe));
5902 	}
5903 }
5904 
5905 /*
5906  * The hardware phase 0.0 refers to the center of the pixel.
5907  * We want to start from the top/left edge which is phase
5908  * -0.5. That matches how the hardware calculates the scaling
5909  * factors (from top-left of the first pixel to bottom-right
5910  * of the last pixel, as opposed to the pixel centers).
5911  *
5912  * For 4:2:0 subsampled chroma planes we obviously have to
5913  * adjust that so that the chroma sample position lands in
5914  * the right spot.
5915  *
5916  * Note that for packed YCbCr 4:2:2 formats there is no way to
5917  * control chroma siting. The hardware simply replicates the
5918  * chroma samples for both of the luma samples, and thus we don't
5919  * actually get the expected MPEG2 chroma siting convention :(
5920  * The same behaviour is observed on pre-SKL platforms as well.
5921  *
5922  * Theory behind the formula (note that we ignore sub-pixel
5923  * source coordinates):
5924  * s = source sample position
5925  * d = destination sample position
5926  *
5927  * Downscaling 4:1:
5928  * -0.5
5929  * | 0.0
5930  * | |     1.5 (initial phase)
5931  * | |     |
5932  * v v     v
5933  * | s | s | s | s |
5934  * |       d       |
5935  *
5936  * Upscaling 1:4:
5937  * -0.5
5938  * | -0.375 (initial phase)
5939  * | |     0.0
5940  * | |     |
5941  * v v     v
5942  * |       s       |
5943  * | d | d | d | d |
5944  */
5945 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5946 {
5947 	int phase = -0x8000;
5948 	u16 trip = 0;
5949 
5950 	if (chroma_cosited)
5951 		phase += (sub - 1) * 0x8000 / sub;
5952 
5953 	phase += scale / (2 * sub);
5954 
5955 	/*
5956 	 * Hardware initial phase limited to [-0.5:1.5].
5957 	 * Since the max hardware scale factor is 3.0, we
5958 	 * should never actually excdeed 1.0 here.
5959 	 */
5960 	WARN_ON(phase < -0x8000 || phase > 0x18000);
5961 
5962 	if (phase < 0)
5963 		phase = 0x10000 + phase;
5964 	else
5965 		trip = PS_PHASE_TRIP;
5966 
5967 	return ((phase >> 2) & PS_PHASE_MASK) | trip;
5968 }
5969 
5970 #define SKL_MIN_SRC_W 8
5971 #define SKL_MAX_SRC_W 4096
5972 #define SKL_MIN_SRC_H 8
5973 #define SKL_MAX_SRC_H 4096
5974 #define SKL_MIN_DST_W 8
5975 #define SKL_MAX_DST_W 4096
5976 #define SKL_MIN_DST_H 8
5977 #define SKL_MAX_DST_H 4096
5978 #define ICL_MAX_SRC_W 5120
5979 #define ICL_MAX_SRC_H 4096
5980 #define ICL_MAX_DST_W 5120
5981 #define ICL_MAX_DST_H 4096
5982 #define SKL_MIN_YUV_420_SRC_W 16
5983 #define SKL_MIN_YUV_420_SRC_H 16
5984 
5985 static int
5986 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5987 		  unsigned int scaler_user, int *scaler_id,
5988 		  int src_w, int src_h, int dst_w, int dst_h,
5989 		  const struct drm_format_info *format,
5990 		  u64 modifier, bool need_scaler)
5991 {
5992 	struct intel_crtc_scaler_state *scaler_state =
5993 		&crtc_state->scaler_state;
5994 	struct intel_crtc *intel_crtc =
5995 		to_intel_crtc(crtc_state->uapi.crtc);
5996 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5997 	const struct drm_display_mode *adjusted_mode =
5998 		&crtc_state->hw.adjusted_mode;
5999 
6000 	/*
6001 	 * Src coordinates are already rotated by 270 degrees for
6002 	 * the 90/270 degree plane rotation cases (to match the
6003 	 * GTT mapping), hence no need to account for rotation here.
6004 	 */
6005 	if (src_w != dst_w || src_h != dst_h)
6006 		need_scaler = true;
6007 
6008 	/*
6009 	 * Scaling/fitting not supported in IF-ID mode in GEN9+
6010 	 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
6011 	 * Once NV12 is enabled, handle it here while allocating scaler
6012 	 * for NV12.
6013 	 */
6014 	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
6015 	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6016 		drm_dbg_kms(&dev_priv->drm,
6017 			    "Pipe/Plane scaling not supported with IF-ID mode\n");
6018 		return -EINVAL;
6019 	}
6020 
6021 	/*
6022 	 * if plane is being disabled or scaler is no more required or force detach
6023 	 *  - free scaler binded to this plane/crtc
6024 	 *  - in order to do this, update crtc->scaler_usage
6025 	 *
6026 	 * Here scaler state in crtc_state is set free so that
6027 	 * scaler can be assigned to other user. Actual register
6028 	 * update to free the scaler is done in plane/panel-fit programming.
6029 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
6030 	 */
6031 	if (force_detach || !need_scaler) {
6032 		if (*scaler_id >= 0) {
6033 			scaler_state->scaler_users &= ~(1 << scaler_user);
6034 			scaler_state->scalers[*scaler_id].in_use = 0;
6035 
6036 			drm_dbg_kms(&dev_priv->drm,
6037 				    "scaler_user index %u.%u: "
6038 				    "Staged freeing scaler id %d scaler_users = 0x%x\n",
6039 				    intel_crtc->pipe, scaler_user, *scaler_id,
6040 				    scaler_state->scaler_users);
6041 			*scaler_id = -1;
6042 		}
6043 		return 0;
6044 	}
6045 
6046 	if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
6047 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
6048 		drm_dbg_kms(&dev_priv->drm,
6049 			    "Planar YUV: src dimensions not met\n");
6050 		return -EINVAL;
6051 	}
6052 
6053 	/* range checks */
6054 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
6055 	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
6056 	    (INTEL_GEN(dev_priv) >= 11 &&
6057 	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
6058 	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
6059 	    (INTEL_GEN(dev_priv) < 11 &&
6060 	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
6061 	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
6062 		drm_dbg_kms(&dev_priv->drm,
6063 			    "scaler_user index %u.%u: src %ux%u dst %ux%u "
6064 			    "size is out of scaler range\n",
6065 			    intel_crtc->pipe, scaler_user, src_w, src_h,
6066 			    dst_w, dst_h);
6067 		return -EINVAL;
6068 	}
6069 
6070 	/* mark this plane as a scaler user in crtc_state */
6071 	scaler_state->scaler_users |= (1 << scaler_user);
6072 	drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
6073 		    "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
6074 		    intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
6075 		    scaler_state->scaler_users);
6076 
6077 	return 0;
6078 }
6079 
6080 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
6081 {
6082 	const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
6083 	int width, height;
6084 
6085 	if (crtc_state->pch_pfit.enabled) {
6086 		width = drm_rect_width(&crtc_state->pch_pfit.dst);
6087 		height = drm_rect_height(&crtc_state->pch_pfit.dst);
6088 	} else {
6089 		width = pipe_mode->crtc_hdisplay;
6090 		height = pipe_mode->crtc_vdisplay;
6091 	}
6092 	return skl_update_scaler(crtc_state, !crtc_state->hw.active,
6093 				 SKL_CRTC_INDEX,
6094 				 &crtc_state->scaler_state.scaler_id,
6095 				 crtc_state->pipe_src_w, crtc_state->pipe_src_h,
6096 				 width, height, NULL, 0,
6097 				 crtc_state->pch_pfit.enabled);
6098 }
6099 
6100 /**
6101  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
6102  * @crtc_state: crtc's scaler state
6103  * @plane_state: atomic plane state to update
6104  *
6105  * Return
6106  *     0 - scaler_usage updated successfully
6107  *    error - requested scaling cannot be supported or other error condition
6108  */
6109 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
6110 				   struct intel_plane_state *plane_state)
6111 {
6112 	struct intel_plane *intel_plane =
6113 		to_intel_plane(plane_state->uapi.plane);
6114 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
6115 	struct drm_framebuffer *fb = plane_state->hw.fb;
6116 	int ret;
6117 	bool force_detach = !fb || !plane_state->uapi.visible;
6118 	bool need_scaler = false;
6119 
6120 	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
6121 	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
6122 	    fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
6123 		need_scaler = true;
6124 
6125 	ret = skl_update_scaler(crtc_state, force_detach,
6126 				drm_plane_index(&intel_plane->base),
6127 				&plane_state->scaler_id,
6128 				drm_rect_width(&plane_state->uapi.src) >> 16,
6129 				drm_rect_height(&plane_state->uapi.src) >> 16,
6130 				drm_rect_width(&plane_state->uapi.dst),
6131 				drm_rect_height(&plane_state->uapi.dst),
6132 				fb ? fb->format : NULL,
6133 				fb ? fb->modifier : 0,
6134 				need_scaler);
6135 
6136 	if (ret || plane_state->scaler_id < 0)
6137 		return ret;
6138 
6139 	/* check colorkey */
6140 	if (plane_state->ckey.flags) {
6141 		drm_dbg_kms(&dev_priv->drm,
6142 			    "[PLANE:%d:%s] scaling with color key not allowed",
6143 			    intel_plane->base.base.id,
6144 			    intel_plane->base.name);
6145 		return -EINVAL;
6146 	}
6147 
6148 	/* Check src format */
6149 	switch (fb->format->format) {
6150 	case DRM_FORMAT_RGB565:
6151 	case DRM_FORMAT_XBGR8888:
6152 	case DRM_FORMAT_XRGB8888:
6153 	case DRM_FORMAT_ABGR8888:
6154 	case DRM_FORMAT_ARGB8888:
6155 	case DRM_FORMAT_XRGB2101010:
6156 	case DRM_FORMAT_XBGR2101010:
6157 	case DRM_FORMAT_ARGB2101010:
6158 	case DRM_FORMAT_ABGR2101010:
6159 	case DRM_FORMAT_YUYV:
6160 	case DRM_FORMAT_YVYU:
6161 	case DRM_FORMAT_UYVY:
6162 	case DRM_FORMAT_VYUY:
6163 	case DRM_FORMAT_NV12:
6164 	case DRM_FORMAT_XYUV8888:
6165 	case DRM_FORMAT_P010:
6166 	case DRM_FORMAT_P012:
6167 	case DRM_FORMAT_P016:
6168 	case DRM_FORMAT_Y210:
6169 	case DRM_FORMAT_Y212:
6170 	case DRM_FORMAT_Y216:
6171 	case DRM_FORMAT_XVYU2101010:
6172 	case DRM_FORMAT_XVYU12_16161616:
6173 	case DRM_FORMAT_XVYU16161616:
6174 		break;
6175 	case DRM_FORMAT_XBGR16161616F:
6176 	case DRM_FORMAT_ABGR16161616F:
6177 	case DRM_FORMAT_XRGB16161616F:
6178 	case DRM_FORMAT_ARGB16161616F:
6179 		if (INTEL_GEN(dev_priv) >= 11)
6180 			break;
6181 		fallthrough;
6182 	default:
6183 		drm_dbg_kms(&dev_priv->drm,
6184 			    "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
6185 			    intel_plane->base.base.id, intel_plane->base.name,
6186 			    fb->base.id, fb->format->format);
6187 		return -EINVAL;
6188 	}
6189 
6190 	return 0;
6191 }
6192 
6193 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
6194 {
6195 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6196 	int i;
6197 
6198 	for (i = 0; i < crtc->num_scalers; i++)
6199 		skl_detach_scaler(crtc, i);
6200 }
6201 
6202 static int cnl_coef_tap(int i)
6203 {
6204 	return i % 7;
6205 }
6206 
6207 static u16 cnl_nearest_filter_coef(int t)
6208 {
6209 	return t == 3 ? 0x0800 : 0x3000;
6210 }
6211 
6212 /*
6213  *  Theory behind setting nearest-neighbor integer scaling:
6214  *
6215  *  17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
6216  *  The letter represents the filter tap (D is the center tap) and the number
6217  *  represents the coefficient set for a phase (0-16).
6218  *
6219  *         +------------+------------------------+------------------------+
6220  *         |Index value | Data value coeffient 1 | Data value coeffient 2 |
6221  *         +------------+------------------------+------------------------+
6222  *         |   00h      |          B0            |          A0            |
6223  *         +------------+------------------------+------------------------+
6224  *         |   01h      |          D0            |          C0            |
6225  *         +------------+------------------------+------------------------+
6226  *         |   02h      |          F0            |          E0            |
6227  *         +------------+------------------------+------------------------+
6228  *         |   03h      |          A1            |          G0            |
6229  *         +------------+------------------------+------------------------+
6230  *         |   04h      |          C1            |          B1            |
6231  *         +------------+------------------------+------------------------+
6232  *         |   ...      |          ...           |          ...           |
6233  *         +------------+------------------------+------------------------+
6234  *         |   38h      |          B16           |          A16           |
6235  *         +------------+------------------------+------------------------+
6236  *         |   39h      |          D16           |          C16           |
6237  *         +------------+------------------------+------------------------+
6238  *         |   3Ah      |          F16           |          C16           |
6239  *         +------------+------------------------+------------------------+
6240  *         |   3Bh      |        Reserved        |          G16           |
6241  *         +------------+------------------------+------------------------+
6242  *
6243  *  To enable nearest-neighbor scaling:  program scaler coefficents with
6244  *  the center tap (Dxx) values set to 1 and all other values set to 0 as per
6245  *  SCALER_COEFFICIENT_FORMAT
6246  *
6247  */
6248 
6249 static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
6250 					     enum pipe pipe, int id, int set)
6251 {
6252 	int i;
6253 
6254 	intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
6255 			  PS_COEE_INDEX_AUTO_INC);
6256 
6257 	for (i = 0; i < 17 * 7; i += 2) {
6258 		u32 tmp;
6259 		int t;
6260 
6261 		t = cnl_coef_tap(i);
6262 		tmp = cnl_nearest_filter_coef(t);
6263 
6264 		t = cnl_coef_tap(i + 1);
6265 		tmp |= cnl_nearest_filter_coef(t) << 16;
6266 
6267 		intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
6268 				  tmp);
6269 	}
6270 
6271 	intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
6272 }
6273 
6274 inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
6275 {
6276 	if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
6277 		return (PS_FILTER_PROGRAMMED |
6278 			PS_Y_VERT_FILTER_SELECT(set) |
6279 			PS_Y_HORZ_FILTER_SELECT(set) |
6280 			PS_UV_VERT_FILTER_SELECT(set) |
6281 			PS_UV_HORZ_FILTER_SELECT(set));
6282 	}
6283 
6284 	return PS_FILTER_MEDIUM;
6285 }
6286 
6287 void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
6288 			     int id, int set, enum drm_scaling_filter filter)
6289 {
6290 	switch (filter) {
6291 	case DRM_SCALING_FILTER_DEFAULT:
6292 		break;
6293 	case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
6294 		cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
6295 		break;
6296 	default:
6297 		MISSING_CASE(filter);
6298 	}
6299 }
6300 
6301 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
6302 {
6303 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6304 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6305 	const struct intel_crtc_scaler_state *scaler_state =
6306 		&crtc_state->scaler_state;
6307 	struct drm_rect src = {
6308 		.x2 = crtc_state->pipe_src_w << 16,
6309 		.y2 = crtc_state->pipe_src_h << 16,
6310 	};
6311 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
6312 	u16 uv_rgb_hphase, uv_rgb_vphase;
6313 	enum pipe pipe = crtc->pipe;
6314 	int width = drm_rect_width(dst);
6315 	int height = drm_rect_height(dst);
6316 	int x = dst->x1;
6317 	int y = dst->y1;
6318 	int hscale, vscale;
6319 	unsigned long irqflags;
6320 	int id;
6321 	u32 ps_ctrl;
6322 
6323 	if (!crtc_state->pch_pfit.enabled)
6324 		return;
6325 
6326 	if (drm_WARN_ON(&dev_priv->drm,
6327 			crtc_state->scaler_state.scaler_id < 0))
6328 		return;
6329 
6330 	hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
6331 	vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
6332 
6333 	uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
6334 	uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
6335 
6336 	id = scaler_state->scaler_id;
6337 
6338 	ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
6339 	ps_ctrl |=  PS_SCALER_EN | scaler_state->scalers[id].mode;
6340 
6341 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6342 
6343 	skl_scaler_setup_filter(dev_priv, pipe, id, 0,
6344 				crtc_state->hw.scaling_filter);
6345 
6346 	intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
6347 
6348 	intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
6349 			  PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
6350 	intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
6351 			  PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
6352 	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
6353 			  x << 16 | y);
6354 	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
6355 			  width << 16 | height);
6356 
6357 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6358 }
6359 
6360 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
6361 {
6362 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6363 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6364 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
6365 	enum pipe pipe = crtc->pipe;
6366 	int width = drm_rect_width(dst);
6367 	int height = drm_rect_height(dst);
6368 	int x = dst->x1;
6369 	int y = dst->y1;
6370 
6371 	if (!crtc_state->pch_pfit.enabled)
6372 		return;
6373 
6374 	/* Force use of hard-coded filter coefficients
6375 	 * as some pre-programmed values are broken,
6376 	 * e.g. x201.
6377 	 */
6378 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6379 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
6380 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
6381 	else
6382 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
6383 			       PF_FILTER_MED_3x3);
6384 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
6385 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
6386 }
6387 
6388 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6389 {
6390 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6391 	struct drm_device *dev = crtc->base.dev;
6392 	struct drm_i915_private *dev_priv = to_i915(dev);
6393 
6394 	if (!crtc_state->ips_enabled)
6395 		return;
6396 
6397 	/*
6398 	 * We can only enable IPS after we enable a plane and wait for a vblank
6399 	 * This function is called from post_plane_update, which is run after
6400 	 * a vblank wait.
6401 	 */
6402 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6403 
6404 	if (IS_BROADWELL(dev_priv)) {
6405 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6406 							 IPS_ENABLE | IPS_PCODE_CONTROL));
6407 		/* Quoting Art Runyan: "its not safe to expect any particular
6408 		 * value in IPS_CTL bit 31 after enabling IPS through the
6409 		 * mailbox." Moreover, the mailbox may return a bogus state,
6410 		 * so we need to just enable it and continue on.
6411 		 */
6412 	} else {
6413 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
6414 		/* The bit only becomes 1 in the next vblank, so this wait here
6415 		 * is essentially intel_wait_for_vblank. If we don't have this
6416 		 * and don't wait for vblanks until the end of crtc_enable, then
6417 		 * the HW state readout code will complain that the expected
6418 		 * IPS_CTL value is not the one we read. */
6419 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6420 			drm_err(&dev_priv->drm,
6421 				"Timed out waiting for IPS enable\n");
6422 	}
6423 }
6424 
6425 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6426 {
6427 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6428 	struct drm_device *dev = crtc->base.dev;
6429 	struct drm_i915_private *dev_priv = to_i915(dev);
6430 
6431 	if (!crtc_state->ips_enabled)
6432 		return;
6433 
6434 	if (IS_BROADWELL(dev_priv)) {
6435 		drm_WARN_ON(dev,
6436 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6437 		/*
6438 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
6439 		 * 42ms timeout value leads to occasional timeouts so use 100ms
6440 		 * instead.
6441 		 */
6442 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6443 			drm_err(&dev_priv->drm,
6444 				"Timed out waiting for IPS disable\n");
6445 	} else {
6446 		intel_de_write(dev_priv, IPS_CTL, 0);
6447 		intel_de_posting_read(dev_priv, IPS_CTL);
6448 	}
6449 
6450 	/* We need to wait for a vblank before we can disable the plane. */
6451 	intel_wait_for_vblank(dev_priv, crtc->pipe);
6452 }
6453 
6454 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6455 {
6456 	if (intel_crtc->overlay)
6457 		(void) intel_overlay_switch_off(intel_crtc->overlay);
6458 
6459 	/* Let userspace switch the overlay on again. In most cases userspace
6460 	 * has to recompute where to put it anyway.
6461 	 */
6462 }
6463 
6464 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6465 				       const struct intel_crtc_state *new_crtc_state)
6466 {
6467 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6468 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6469 
6470 	if (!old_crtc_state->ips_enabled)
6471 		return false;
6472 
6473 	if (needs_modeset(new_crtc_state))
6474 		return true;
6475 
6476 	/*
6477 	 * Workaround : Do not read or write the pipe palette/gamma data while
6478 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6479 	 *
6480 	 * Disable IPS before we program the LUT.
6481 	 */
6482 	if (IS_HASWELL(dev_priv) &&
6483 	    (new_crtc_state->uapi.color_mgmt_changed ||
6484 	     new_crtc_state->update_pipe) &&
6485 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6486 		return true;
6487 
6488 	return !new_crtc_state->ips_enabled;
6489 }
6490 
6491 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6492 				       const struct intel_crtc_state *new_crtc_state)
6493 {
6494 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6495 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6496 
6497 	if (!new_crtc_state->ips_enabled)
6498 		return false;
6499 
6500 	if (needs_modeset(new_crtc_state))
6501 		return true;
6502 
6503 	/*
6504 	 * Workaround : Do not read or write the pipe palette/gamma data while
6505 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6506 	 *
6507 	 * Re-enable IPS after the LUT has been programmed.
6508 	 */
6509 	if (IS_HASWELL(dev_priv) &&
6510 	    (new_crtc_state->uapi.color_mgmt_changed ||
6511 	     new_crtc_state->update_pipe) &&
6512 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6513 		return true;
6514 
6515 	/*
6516 	 * We can't read out IPS on broadwell, assume the worst and
6517 	 * forcibly enable IPS on the first fastset.
6518 	 */
6519 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
6520 		return true;
6521 
6522 	return !old_crtc_state->ips_enabled;
6523 }
6524 
6525 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6526 {
6527 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6528 
6529 	if (!crtc_state->nv12_planes)
6530 		return false;
6531 
6532 	/* WA Display #0827: Gen9:all */
6533 	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6534 		return true;
6535 
6536 	return false;
6537 }
6538 
6539 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6540 {
6541 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6542 
6543 	/* Wa_2006604312:icl,ehl */
6544 	if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
6545 		return true;
6546 
6547 	return false;
6548 }
6549 
6550 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6551 			    const struct intel_crtc_state *new_crtc_state)
6552 {
6553 	return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6554 		new_crtc_state->active_planes;
6555 }
6556 
6557 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6558 			     const struct intel_crtc_state *new_crtc_state)
6559 {
6560 	return old_crtc_state->active_planes &&
6561 		(!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6562 }
6563 
6564 static void intel_post_plane_update(struct intel_atomic_state *state,
6565 				    struct intel_crtc *crtc)
6566 {
6567 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6568 	const struct intel_crtc_state *old_crtc_state =
6569 		intel_atomic_get_old_crtc_state(state, crtc);
6570 	const struct intel_crtc_state *new_crtc_state =
6571 		intel_atomic_get_new_crtc_state(state, crtc);
6572 	enum pipe pipe = crtc->pipe;
6573 
6574 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6575 
6576 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6577 		intel_update_watermarks(crtc);
6578 
6579 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6580 		hsw_enable_ips(new_crtc_state);
6581 
6582 	intel_fbc_post_update(state, crtc);
6583 
6584 	if (needs_nv12_wa(old_crtc_state) &&
6585 	    !needs_nv12_wa(new_crtc_state))
6586 		skl_wa_827(dev_priv, pipe, false);
6587 
6588 	if (needs_scalerclk_wa(old_crtc_state) &&
6589 	    !needs_scalerclk_wa(new_crtc_state))
6590 		icl_wa_scalerclkgating(dev_priv, pipe, false);
6591 }
6592 
6593 static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
6594 				      struct intel_crtc *crtc,
6595 				      const struct intel_crtc_state *new_crtc_state)
6596 {
6597 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6598 	struct intel_plane *plane;
6599 	struct intel_plane_state *new_plane_state;
6600 	int i;
6601 
6602 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
6603 		u32 update_mask = new_crtc_state->update_planes;
6604 		u32 plane_ctl, surf_addr;
6605 		enum plane_id plane_id;
6606 		unsigned long irqflags;
6607 		enum pipe pipe;
6608 
6609 		if (crtc->pipe != plane->pipe ||
6610 		    !(update_mask & BIT(plane->id)))
6611 			continue;
6612 
6613 		plane_id = plane->id;
6614 		pipe = plane->pipe;
6615 
6616 		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6617 		plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
6618 		surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
6619 
6620 		plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
6621 
6622 		intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
6623 		intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
6624 		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6625 	}
6626 
6627 	intel_wait_for_vblank(dev_priv, crtc->pipe);
6628 }
6629 
6630 static void intel_pre_plane_update(struct intel_atomic_state *state,
6631 				   struct intel_crtc *crtc)
6632 {
6633 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6634 	const struct intel_crtc_state *old_crtc_state =
6635 		intel_atomic_get_old_crtc_state(state, crtc);
6636 	const struct intel_crtc_state *new_crtc_state =
6637 		intel_atomic_get_new_crtc_state(state, crtc);
6638 	enum pipe pipe = crtc->pipe;
6639 
6640 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6641 		hsw_disable_ips(old_crtc_state);
6642 
6643 	if (intel_fbc_pre_update(state, crtc))
6644 		intel_wait_for_vblank(dev_priv, pipe);
6645 
6646 	/* Display WA 827 */
6647 	if (!needs_nv12_wa(old_crtc_state) &&
6648 	    needs_nv12_wa(new_crtc_state))
6649 		skl_wa_827(dev_priv, pipe, true);
6650 
6651 	/* Wa_2006604312:icl,ehl */
6652 	if (!needs_scalerclk_wa(old_crtc_state) &&
6653 	    needs_scalerclk_wa(new_crtc_state))
6654 		icl_wa_scalerclkgating(dev_priv, pipe, true);
6655 
6656 	/*
6657 	 * Vblank time updates from the shadow to live plane control register
6658 	 * are blocked if the memory self-refresh mode is active at that
6659 	 * moment. So to make sure the plane gets truly disabled, disable
6660 	 * first the self-refresh mode. The self-refresh enable bit in turn
6661 	 * will be checked/applied by the HW only at the next frame start
6662 	 * event which is after the vblank start event, so we need to have a
6663 	 * wait-for-vblank between disabling the plane and the pipe.
6664 	 */
6665 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6666 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6667 		intel_wait_for_vblank(dev_priv, pipe);
6668 
6669 	/*
6670 	 * IVB workaround: must disable low power watermarks for at least
6671 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
6672 	 * when scaling is disabled.
6673 	 *
6674 	 * WaCxSRDisabledForSpriteScaling:ivb
6675 	 */
6676 	if (old_crtc_state->hw.active &&
6677 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6678 		intel_wait_for_vblank(dev_priv, pipe);
6679 
6680 	/*
6681 	 * If we're doing a modeset we don't need to do any
6682 	 * pre-vblank watermark programming here.
6683 	 */
6684 	if (!needs_modeset(new_crtc_state)) {
6685 		/*
6686 		 * For platforms that support atomic watermarks, program the
6687 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6688 		 * will be the intermediate values that are safe for both pre- and
6689 		 * post- vblank; when vblank happens, the 'active' values will be set
6690 		 * to the final 'target' values and we'll do this again to get the
6691 		 * optimal watermarks.  For gen9+ platforms, the values we program here
6692 		 * will be the final target values which will get automatically latched
6693 		 * at vblank time; no further programming will be necessary.
6694 		 *
6695 		 * If a platform hasn't been transitioned to atomic watermarks yet,
6696 		 * we'll continue to update watermarks the old way, if flags tell
6697 		 * us to.
6698 		 */
6699 		if (dev_priv->display.initial_watermarks)
6700 			dev_priv->display.initial_watermarks(state, crtc);
6701 		else if (new_crtc_state->update_wm_pre)
6702 			intel_update_watermarks(crtc);
6703 	}
6704 
6705 	/*
6706 	 * Gen2 reports pipe underruns whenever all planes are disabled.
6707 	 * So disable underrun reporting before all the planes get disabled.
6708 	 *
6709 	 * We do this after .initial_watermarks() so that we have a
6710 	 * chance of catching underruns with the intermediate watermarks
6711 	 * vs. the old plane configuration.
6712 	 */
6713 	if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6714 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6715 
6716 	/*
6717 	 * WA for platforms where async address update enable bit
6718 	 * is double buffered and only latched at start of vblank.
6719 	 */
6720 	if (old_crtc_state->uapi.async_flip &&
6721 	    !new_crtc_state->uapi.async_flip &&
6722 	    IS_GEN_RANGE(dev_priv, 9, 10))
6723 		skl_disable_async_flip_wa(state, crtc, new_crtc_state);
6724 }
6725 
6726 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6727 				      struct intel_crtc *crtc)
6728 {
6729 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6730 	const struct intel_crtc_state *new_crtc_state =
6731 		intel_atomic_get_new_crtc_state(state, crtc);
6732 	unsigned int update_mask = new_crtc_state->update_planes;
6733 	const struct intel_plane_state *old_plane_state;
6734 	struct intel_plane *plane;
6735 	unsigned fb_bits = 0;
6736 	int i;
6737 
6738 	intel_crtc_dpms_overlay_disable(crtc);
6739 
6740 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6741 		if (crtc->pipe != plane->pipe ||
6742 		    !(update_mask & BIT(plane->id)))
6743 			continue;
6744 
6745 		intel_disable_plane(plane, new_crtc_state);
6746 
6747 		if (old_plane_state->uapi.visible)
6748 			fb_bits |= plane->frontbuffer_bit;
6749 	}
6750 
6751 	intel_frontbuffer_flip(dev_priv, fb_bits);
6752 }
6753 
6754 /*
6755  * intel_connector_primary_encoder - get the primary encoder for a connector
6756  * @connector: connector for which to return the encoder
6757  *
6758  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6759  * all connectors to their encoder, except for DP-MST connectors which have
6760  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6761  * pointed to by as many DP-MST connectors as there are pipes.
6762  */
6763 static struct intel_encoder *
6764 intel_connector_primary_encoder(struct intel_connector *connector)
6765 {
6766 	struct intel_encoder *encoder;
6767 
6768 	if (connector->mst_port)
6769 		return &dp_to_dig_port(connector->mst_port)->base;
6770 
6771 	encoder = intel_attached_encoder(connector);
6772 	drm_WARN_ON(connector->base.dev, !encoder);
6773 
6774 	return encoder;
6775 }
6776 
6777 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6778 {
6779 	struct drm_connector_state *new_conn_state;
6780 	struct drm_connector *connector;
6781 	int i;
6782 
6783 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6784 					i) {
6785 		struct intel_connector *intel_connector;
6786 		struct intel_encoder *encoder;
6787 		struct intel_crtc *crtc;
6788 
6789 		if (!intel_connector_needs_modeset(state, connector))
6790 			continue;
6791 
6792 		intel_connector = to_intel_connector(connector);
6793 		encoder = intel_connector_primary_encoder(intel_connector);
6794 		if (!encoder->update_prepare)
6795 			continue;
6796 
6797 		crtc = new_conn_state->crtc ?
6798 			to_intel_crtc(new_conn_state->crtc) : NULL;
6799 		encoder->update_prepare(state, encoder, crtc);
6800 	}
6801 }
6802 
6803 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6804 {
6805 	struct drm_connector_state *new_conn_state;
6806 	struct drm_connector *connector;
6807 	int i;
6808 
6809 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6810 					i) {
6811 		struct intel_connector *intel_connector;
6812 		struct intel_encoder *encoder;
6813 		struct intel_crtc *crtc;
6814 
6815 		if (!intel_connector_needs_modeset(state, connector))
6816 			continue;
6817 
6818 		intel_connector = to_intel_connector(connector);
6819 		encoder = intel_connector_primary_encoder(intel_connector);
6820 		if (!encoder->update_complete)
6821 			continue;
6822 
6823 		crtc = new_conn_state->crtc ?
6824 			to_intel_crtc(new_conn_state->crtc) : NULL;
6825 		encoder->update_complete(state, encoder, crtc);
6826 	}
6827 }
6828 
6829 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6830 					  struct intel_crtc *crtc)
6831 {
6832 	const struct intel_crtc_state *crtc_state =
6833 		intel_atomic_get_new_crtc_state(state, crtc);
6834 	const struct drm_connector_state *conn_state;
6835 	struct drm_connector *conn;
6836 	int i;
6837 
6838 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6839 		struct intel_encoder *encoder =
6840 			to_intel_encoder(conn_state->best_encoder);
6841 
6842 		if (conn_state->crtc != &crtc->base)
6843 			continue;
6844 
6845 		if (encoder->pre_pll_enable)
6846 			encoder->pre_pll_enable(state, encoder,
6847 						crtc_state, conn_state);
6848 	}
6849 }
6850 
6851 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6852 				      struct intel_crtc *crtc)
6853 {
6854 	const struct intel_crtc_state *crtc_state =
6855 		intel_atomic_get_new_crtc_state(state, crtc);
6856 	const struct drm_connector_state *conn_state;
6857 	struct drm_connector *conn;
6858 	int i;
6859 
6860 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6861 		struct intel_encoder *encoder =
6862 			to_intel_encoder(conn_state->best_encoder);
6863 
6864 		if (conn_state->crtc != &crtc->base)
6865 			continue;
6866 
6867 		if (encoder->pre_enable)
6868 			encoder->pre_enable(state, encoder,
6869 					    crtc_state, conn_state);
6870 	}
6871 }
6872 
6873 static void intel_encoders_enable(struct intel_atomic_state *state,
6874 				  struct intel_crtc *crtc)
6875 {
6876 	const struct intel_crtc_state *crtc_state =
6877 		intel_atomic_get_new_crtc_state(state, crtc);
6878 	const struct drm_connector_state *conn_state;
6879 	struct drm_connector *conn;
6880 	int i;
6881 
6882 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6883 		struct intel_encoder *encoder =
6884 			to_intel_encoder(conn_state->best_encoder);
6885 
6886 		if (conn_state->crtc != &crtc->base)
6887 			continue;
6888 
6889 		if (encoder->enable)
6890 			encoder->enable(state, encoder,
6891 					crtc_state, conn_state);
6892 		intel_opregion_notify_encoder(encoder, true);
6893 	}
6894 }
6895 
6896 static void intel_encoders_disable(struct intel_atomic_state *state,
6897 				   struct intel_crtc *crtc)
6898 {
6899 	const struct intel_crtc_state *old_crtc_state =
6900 		intel_atomic_get_old_crtc_state(state, crtc);
6901 	const struct drm_connector_state *old_conn_state;
6902 	struct drm_connector *conn;
6903 	int i;
6904 
6905 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6906 		struct intel_encoder *encoder =
6907 			to_intel_encoder(old_conn_state->best_encoder);
6908 
6909 		if (old_conn_state->crtc != &crtc->base)
6910 			continue;
6911 
6912 		intel_opregion_notify_encoder(encoder, false);
6913 		if (encoder->disable)
6914 			encoder->disable(state, encoder,
6915 					 old_crtc_state, old_conn_state);
6916 	}
6917 }
6918 
6919 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6920 					struct intel_crtc *crtc)
6921 {
6922 	const struct intel_crtc_state *old_crtc_state =
6923 		intel_atomic_get_old_crtc_state(state, crtc);
6924 	const struct drm_connector_state *old_conn_state;
6925 	struct drm_connector *conn;
6926 	int i;
6927 
6928 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6929 		struct intel_encoder *encoder =
6930 			to_intel_encoder(old_conn_state->best_encoder);
6931 
6932 		if (old_conn_state->crtc != &crtc->base)
6933 			continue;
6934 
6935 		if (encoder->post_disable)
6936 			encoder->post_disable(state, encoder,
6937 					      old_crtc_state, old_conn_state);
6938 	}
6939 }
6940 
6941 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6942 					    struct intel_crtc *crtc)
6943 {
6944 	const struct intel_crtc_state *old_crtc_state =
6945 		intel_atomic_get_old_crtc_state(state, crtc);
6946 	const struct drm_connector_state *old_conn_state;
6947 	struct drm_connector *conn;
6948 	int i;
6949 
6950 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6951 		struct intel_encoder *encoder =
6952 			to_intel_encoder(old_conn_state->best_encoder);
6953 
6954 		if (old_conn_state->crtc != &crtc->base)
6955 			continue;
6956 
6957 		if (encoder->post_pll_disable)
6958 			encoder->post_pll_disable(state, encoder,
6959 						  old_crtc_state, old_conn_state);
6960 	}
6961 }
6962 
6963 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6964 				       struct intel_crtc *crtc)
6965 {
6966 	const struct intel_crtc_state *crtc_state =
6967 		intel_atomic_get_new_crtc_state(state, crtc);
6968 	const struct drm_connector_state *conn_state;
6969 	struct drm_connector *conn;
6970 	int i;
6971 
6972 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6973 		struct intel_encoder *encoder =
6974 			to_intel_encoder(conn_state->best_encoder);
6975 
6976 		if (conn_state->crtc != &crtc->base)
6977 			continue;
6978 
6979 		if (encoder->update_pipe)
6980 			encoder->update_pipe(state, encoder,
6981 					     crtc_state, conn_state);
6982 	}
6983 }
6984 
6985 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6986 {
6987 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6988 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6989 
6990 	plane->disable_plane(plane, crtc_state);
6991 }
6992 
6993 static void ilk_crtc_enable(struct intel_atomic_state *state,
6994 			    struct intel_crtc *crtc)
6995 {
6996 	const struct intel_crtc_state *new_crtc_state =
6997 		intel_atomic_get_new_crtc_state(state, crtc);
6998 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6999 	enum pipe pipe = crtc->pipe;
7000 
7001 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7002 		return;
7003 
7004 	/*
7005 	 * Sometimes spurious CPU pipe underruns happen during FDI
7006 	 * training, at least with VGA+HDMI cloning. Suppress them.
7007 	 *
7008 	 * On ILK we get an occasional spurious CPU pipe underruns
7009 	 * between eDP port A enable and vdd enable. Also PCH port
7010 	 * enable seems to result in the occasional CPU pipe underrun.
7011 	 *
7012 	 * Spurious PCH underruns also occur during PCH enabling.
7013 	 */
7014 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7015 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
7016 
7017 	if (new_crtc_state->has_pch_encoder)
7018 		intel_prepare_shared_dpll(new_crtc_state);
7019 
7020 	if (intel_crtc_has_dp_encoder(new_crtc_state))
7021 		intel_dp_set_m_n(new_crtc_state, M1_N1);
7022 
7023 	intel_set_transcoder_timings(new_crtc_state);
7024 	intel_set_pipe_src_size(new_crtc_state);
7025 
7026 	if (new_crtc_state->has_pch_encoder)
7027 		intel_cpu_transcoder_set_m_n(new_crtc_state,
7028 					     &new_crtc_state->fdi_m_n, NULL);
7029 
7030 	ilk_set_pipeconf(new_crtc_state);
7031 
7032 	crtc->active = true;
7033 
7034 	intel_encoders_pre_enable(state, crtc);
7035 
7036 	if (new_crtc_state->has_pch_encoder) {
7037 		/* Note: FDI PLL enabling _must_ be done before we enable the
7038 		 * cpu pipes, hence this is separate from all the other fdi/pch
7039 		 * enabling. */
7040 		ilk_fdi_pll_enable(new_crtc_state);
7041 	} else {
7042 		assert_fdi_tx_disabled(dev_priv, pipe);
7043 		assert_fdi_rx_disabled(dev_priv, pipe);
7044 	}
7045 
7046 	ilk_pfit_enable(new_crtc_state);
7047 
7048 	/*
7049 	 * On ILK+ LUT must be loaded before the pipe is running but with
7050 	 * clocks enabled
7051 	 */
7052 	intel_color_load_luts(new_crtc_state);
7053 	intel_color_commit(new_crtc_state);
7054 	/* update DSPCNTR to configure gamma for pipe bottom color */
7055 	intel_disable_primary_plane(new_crtc_state);
7056 
7057 	if (dev_priv->display.initial_watermarks)
7058 		dev_priv->display.initial_watermarks(state, crtc);
7059 	intel_enable_pipe(new_crtc_state);
7060 
7061 	if (new_crtc_state->has_pch_encoder)
7062 		ilk_pch_enable(state, new_crtc_state);
7063 
7064 	intel_crtc_vblank_on(new_crtc_state);
7065 
7066 	intel_encoders_enable(state, crtc);
7067 
7068 	if (HAS_PCH_CPT(dev_priv))
7069 		cpt_verify_modeset(dev_priv, pipe);
7070 
7071 	/*
7072 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
7073 	 * And a second vblank wait is needed at least on ILK with
7074 	 * some interlaced HDMI modes. Let's do the double wait always
7075 	 * in case there are more corner cases we don't know about.
7076 	 */
7077 	if (new_crtc_state->has_pch_encoder) {
7078 		intel_wait_for_vblank(dev_priv, pipe);
7079 		intel_wait_for_vblank(dev_priv, pipe);
7080 	}
7081 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7082 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7083 }
7084 
7085 /* IPS only exists on ULT machines and is tied to pipe A. */
7086 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
7087 {
7088 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
7089 }
7090 
7091 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
7092 					    enum pipe pipe, bool apply)
7093 {
7094 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
7095 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
7096 
7097 	if (apply)
7098 		val |= mask;
7099 	else
7100 		val &= ~mask;
7101 
7102 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
7103 }
7104 
7105 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
7106 {
7107 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7108 	enum pipe pipe = crtc->pipe;
7109 	u32 val;
7110 
7111 	val = MBUS_DBOX_A_CREDIT(2);
7112 
7113 	if (INTEL_GEN(dev_priv) >= 12) {
7114 		val |= MBUS_DBOX_BW_CREDIT(2);
7115 		val |= MBUS_DBOX_B_CREDIT(12);
7116 	} else {
7117 		val |= MBUS_DBOX_BW_CREDIT(1);
7118 		val |= MBUS_DBOX_B_CREDIT(8);
7119 	}
7120 
7121 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
7122 }
7123 
7124 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
7125 {
7126 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7127 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7128 
7129 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
7130 		       HSW_LINETIME(crtc_state->linetime) |
7131 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
7132 }
7133 
7134 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
7135 {
7136 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7137 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7138 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
7139 	u32 val;
7140 
7141 	val = intel_de_read(dev_priv, reg);
7142 	val &= ~HSW_FRAME_START_DELAY_MASK;
7143 	val |= HSW_FRAME_START_DELAY(0);
7144 	intel_de_write(dev_priv, reg, val);
7145 }
7146 
7147 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
7148 					 const struct intel_crtc_state *crtc_state)
7149 {
7150 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
7151 	struct intel_crtc_state *master_crtc_state;
7152 	struct drm_connector_state *conn_state;
7153 	struct drm_connector *conn;
7154 	struct intel_encoder *encoder = NULL;
7155 	int i;
7156 
7157 	if (crtc_state->bigjoiner_slave)
7158 		master = crtc_state->bigjoiner_linked_crtc;
7159 
7160 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
7161 
7162 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
7163 		if (conn_state->crtc != &master->base)
7164 			continue;
7165 
7166 		encoder = to_intel_encoder(conn_state->best_encoder);
7167 		break;
7168 	}
7169 
7170 	if (!crtc_state->bigjoiner_slave) {
7171 		/* need to enable VDSC, which we skipped in pre-enable */
7172 		intel_dsc_enable(encoder, crtc_state);
7173 	} else {
7174 		/*
7175 		 * Enable sequence steps 1-7 on bigjoiner master
7176 		 */
7177 		intel_encoders_pre_pll_enable(state, master);
7178 		intel_enable_shared_dpll(master_crtc_state);
7179 		intel_encoders_pre_enable(state, master);
7180 
7181 		/* and DSC on slave */
7182 		intel_dsc_enable(NULL, crtc_state);
7183 	}
7184 }
7185 
7186 static void hsw_crtc_enable(struct intel_atomic_state *state,
7187 			    struct intel_crtc *crtc)
7188 {
7189 	const struct intel_crtc_state *new_crtc_state =
7190 		intel_atomic_get_new_crtc_state(state, crtc);
7191 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7192 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
7193 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
7194 	bool psl_clkgate_wa;
7195 
7196 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7197 		return;
7198 
7199 	if (!new_crtc_state->bigjoiner) {
7200 		intel_encoders_pre_pll_enable(state, crtc);
7201 
7202 		if (new_crtc_state->shared_dpll)
7203 			intel_enable_shared_dpll(new_crtc_state);
7204 
7205 		intel_encoders_pre_enable(state, crtc);
7206 	} else {
7207 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
7208 	}
7209 
7210 	intel_set_pipe_src_size(new_crtc_state);
7211 	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7212 		bdw_set_pipemisc(new_crtc_state);
7213 
7214 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
7215 		intel_set_transcoder_timings(new_crtc_state);
7216 
7217 		if (cpu_transcoder != TRANSCODER_EDP)
7218 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
7219 				       new_crtc_state->pixel_multiplier - 1);
7220 
7221 		if (new_crtc_state->has_pch_encoder)
7222 			intel_cpu_transcoder_set_m_n(new_crtc_state,
7223 						     &new_crtc_state->fdi_m_n, NULL);
7224 
7225 		hsw_set_frame_start_delay(new_crtc_state);
7226 	}
7227 
7228 	if (!transcoder_is_dsi(cpu_transcoder))
7229 		hsw_set_pipeconf(new_crtc_state);
7230 
7231 	crtc->active = true;
7232 
7233 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
7234 	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
7235 		new_crtc_state->pch_pfit.enabled;
7236 	if (psl_clkgate_wa)
7237 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
7238 
7239 	if (INTEL_GEN(dev_priv) >= 9)
7240 		skl_pfit_enable(new_crtc_state);
7241 	else
7242 		ilk_pfit_enable(new_crtc_state);
7243 
7244 	/*
7245 	 * On ILK+ LUT must be loaded before the pipe is running but with
7246 	 * clocks enabled
7247 	 */
7248 	intel_color_load_luts(new_crtc_state);
7249 	intel_color_commit(new_crtc_state);
7250 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
7251 	if (INTEL_GEN(dev_priv) < 9)
7252 		intel_disable_primary_plane(new_crtc_state);
7253 
7254 	hsw_set_linetime_wm(new_crtc_state);
7255 
7256 	if (INTEL_GEN(dev_priv) >= 11)
7257 		icl_set_pipe_chicken(crtc);
7258 
7259 	if (dev_priv->display.initial_watermarks)
7260 		dev_priv->display.initial_watermarks(state, crtc);
7261 
7262 	if (INTEL_GEN(dev_priv) >= 11)
7263 		icl_pipe_mbus_enable(crtc);
7264 
7265 	if (new_crtc_state->bigjoiner_slave) {
7266 		trace_intel_pipe_enable(crtc);
7267 		intel_crtc_vblank_on(new_crtc_state);
7268 	}
7269 
7270 	intel_encoders_enable(state, crtc);
7271 
7272 	if (psl_clkgate_wa) {
7273 		intel_wait_for_vblank(dev_priv, pipe);
7274 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
7275 	}
7276 
7277 	/* If we change the relative order between pipe/planes enabling, we need
7278 	 * to change the workaround. */
7279 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
7280 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
7281 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7282 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7283 	}
7284 }
7285 
7286 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7287 {
7288 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7289 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7290 	enum pipe pipe = crtc->pipe;
7291 
7292 	/* To avoid upsetting the power well on haswell only disable the pfit if
7293 	 * it's in use. The hw state code will make sure we get this right. */
7294 	if (!old_crtc_state->pch_pfit.enabled)
7295 		return;
7296 
7297 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
7298 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
7299 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
7300 }
7301 
7302 static void ilk_crtc_disable(struct intel_atomic_state *state,
7303 			     struct intel_crtc *crtc)
7304 {
7305 	const struct intel_crtc_state *old_crtc_state =
7306 		intel_atomic_get_old_crtc_state(state, crtc);
7307 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7308 	enum pipe pipe = crtc->pipe;
7309 
7310 	/*
7311 	 * Sometimes spurious CPU pipe underruns happen when the
7312 	 * pipe is already disabled, but FDI RX/TX is still enabled.
7313 	 * Happens at least with VGA+HDMI cloning. Suppress them.
7314 	 */
7315 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7316 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
7317 
7318 	intel_encoders_disable(state, crtc);
7319 
7320 	intel_crtc_vblank_off(old_crtc_state);
7321 
7322 	intel_disable_pipe(old_crtc_state);
7323 
7324 	ilk_pfit_disable(old_crtc_state);
7325 
7326 	if (old_crtc_state->has_pch_encoder)
7327 		ilk_fdi_disable(crtc);
7328 
7329 	intel_encoders_post_disable(state, crtc);
7330 
7331 	if (old_crtc_state->has_pch_encoder) {
7332 		ilk_disable_pch_transcoder(dev_priv, pipe);
7333 
7334 		if (HAS_PCH_CPT(dev_priv)) {
7335 			i915_reg_t reg;
7336 			u32 temp;
7337 
7338 			/* disable TRANS_DP_CTL */
7339 			reg = TRANS_DP_CTL(pipe);
7340 			temp = intel_de_read(dev_priv, reg);
7341 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
7342 				  TRANS_DP_PORT_SEL_MASK);
7343 			temp |= TRANS_DP_PORT_SEL_NONE;
7344 			intel_de_write(dev_priv, reg, temp);
7345 
7346 			/* disable DPLL_SEL */
7347 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7348 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
7349 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
7350 		}
7351 
7352 		ilk_fdi_pll_disable(crtc);
7353 	}
7354 
7355 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7356 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7357 }
7358 
7359 static void hsw_crtc_disable(struct intel_atomic_state *state,
7360 			     struct intel_crtc *crtc)
7361 {
7362 	/*
7363 	 * FIXME collapse everything to one hook.
7364 	 * Need care with mst->ddi interactions.
7365 	 */
7366 	intel_encoders_disable(state, crtc);
7367 	intel_encoders_post_disable(state, crtc);
7368 }
7369 
7370 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
7371 {
7372 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7373 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7374 
7375 	if (!crtc_state->gmch_pfit.control)
7376 		return;
7377 
7378 	/*
7379 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
7380 	 * according to register description and PRM.
7381 	 */
7382 	drm_WARN_ON(&dev_priv->drm,
7383 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
7384 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
7385 
7386 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
7387 		       crtc_state->gmch_pfit.pgm_ratios);
7388 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
7389 
7390 	/* Border color in case we don't scale up to the full screen. Black by
7391 	 * default, change to something else for debugging. */
7392 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
7393 }
7394 
7395 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
7396 {
7397 	if (phy == PHY_NONE)
7398 		return false;
7399 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
7400 		return phy <= PHY_D;
7401 	else if (IS_JSL_EHL(dev_priv))
7402 		return phy <= PHY_C;
7403 	else if (INTEL_GEN(dev_priv) >= 11)
7404 		return phy <= PHY_B;
7405 	else
7406 		return false;
7407 }
7408 
7409 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
7410 {
7411 	if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
7412 		return false;
7413 	else if (INTEL_GEN(dev_priv) >= 12)
7414 		return phy >= PHY_D && phy <= PHY_I;
7415 	else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv))
7416 		return phy >= PHY_C && phy <= PHY_F;
7417 	else
7418 		return false;
7419 }
7420 
7421 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
7422 {
7423 	if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
7424 		return PHY_C + port - PORT_TC1;
7425 	else if (IS_JSL_EHL(i915) && port == PORT_D)
7426 		return PHY_A;
7427 
7428 	return PHY_A + port - PORT_A;
7429 }
7430 
7431 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
7432 {
7433 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
7434 		return TC_PORT_NONE;
7435 
7436 	if (INTEL_GEN(dev_priv) >= 12)
7437 		return TC_PORT_1 + port - PORT_TC1;
7438 	else
7439 		return TC_PORT_1 + port - PORT_C;
7440 }
7441 
7442 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
7443 {
7444 	switch (port) {
7445 	case PORT_A:
7446 		return POWER_DOMAIN_PORT_DDI_A_LANES;
7447 	case PORT_B:
7448 		return POWER_DOMAIN_PORT_DDI_B_LANES;
7449 	case PORT_C:
7450 		return POWER_DOMAIN_PORT_DDI_C_LANES;
7451 	case PORT_D:
7452 		return POWER_DOMAIN_PORT_DDI_D_LANES;
7453 	case PORT_E:
7454 		return POWER_DOMAIN_PORT_DDI_E_LANES;
7455 	case PORT_F:
7456 		return POWER_DOMAIN_PORT_DDI_F_LANES;
7457 	case PORT_G:
7458 		return POWER_DOMAIN_PORT_DDI_G_LANES;
7459 	case PORT_H:
7460 		return POWER_DOMAIN_PORT_DDI_H_LANES;
7461 	case PORT_I:
7462 		return POWER_DOMAIN_PORT_DDI_I_LANES;
7463 	default:
7464 		MISSING_CASE(port);
7465 		return POWER_DOMAIN_PORT_OTHER;
7466 	}
7467 }
7468 
7469 enum intel_display_power_domain
7470 intel_aux_power_domain(struct intel_digital_port *dig_port)
7471 {
7472 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7473 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7474 
7475 	if (intel_phy_is_tc(dev_priv, phy) &&
7476 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
7477 		switch (dig_port->aux_ch) {
7478 		case AUX_CH_C:
7479 			return POWER_DOMAIN_AUX_C_TBT;
7480 		case AUX_CH_D:
7481 			return POWER_DOMAIN_AUX_D_TBT;
7482 		case AUX_CH_E:
7483 			return POWER_DOMAIN_AUX_E_TBT;
7484 		case AUX_CH_F:
7485 			return POWER_DOMAIN_AUX_F_TBT;
7486 		case AUX_CH_G:
7487 			return POWER_DOMAIN_AUX_G_TBT;
7488 		case AUX_CH_H:
7489 			return POWER_DOMAIN_AUX_H_TBT;
7490 		case AUX_CH_I:
7491 			return POWER_DOMAIN_AUX_I_TBT;
7492 		default:
7493 			MISSING_CASE(dig_port->aux_ch);
7494 			return POWER_DOMAIN_AUX_C_TBT;
7495 		}
7496 	}
7497 
7498 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
7499 }
7500 
7501 /*
7502  * Converts aux_ch to power_domain without caring about TBT ports for that use
7503  * intel_aux_power_domain()
7504  */
7505 enum intel_display_power_domain
7506 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
7507 {
7508 	switch (aux_ch) {
7509 	case AUX_CH_A:
7510 		return POWER_DOMAIN_AUX_A;
7511 	case AUX_CH_B:
7512 		return POWER_DOMAIN_AUX_B;
7513 	case AUX_CH_C:
7514 		return POWER_DOMAIN_AUX_C;
7515 	case AUX_CH_D:
7516 		return POWER_DOMAIN_AUX_D;
7517 	case AUX_CH_E:
7518 		return POWER_DOMAIN_AUX_E;
7519 	case AUX_CH_F:
7520 		return POWER_DOMAIN_AUX_F;
7521 	case AUX_CH_G:
7522 		return POWER_DOMAIN_AUX_G;
7523 	case AUX_CH_H:
7524 		return POWER_DOMAIN_AUX_H;
7525 	case AUX_CH_I:
7526 		return POWER_DOMAIN_AUX_I;
7527 	default:
7528 		MISSING_CASE(aux_ch);
7529 		return POWER_DOMAIN_AUX_A;
7530 	}
7531 }
7532 
7533 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7534 {
7535 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7536 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7537 	struct drm_encoder *encoder;
7538 	enum pipe pipe = crtc->pipe;
7539 	u64 mask;
7540 	enum transcoder transcoder = crtc_state->cpu_transcoder;
7541 
7542 	if (!crtc_state->hw.active)
7543 		return 0;
7544 
7545 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7546 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7547 	if (crtc_state->pch_pfit.enabled ||
7548 	    crtc_state->pch_pfit.force_thru)
7549 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7550 
7551 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7552 				  crtc_state->uapi.encoder_mask) {
7553 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7554 
7555 		mask |= BIT_ULL(intel_encoder->power_domain);
7556 	}
7557 
7558 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7559 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7560 
7561 	if (crtc_state->shared_dpll)
7562 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7563 
7564 	if (crtc_state->dsc.compression_enable)
7565 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
7566 
7567 	return mask;
7568 }
7569 
7570 static u64
7571 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7572 {
7573 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7574 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7575 	enum intel_display_power_domain domain;
7576 	u64 domains, new_domains, old_domains;
7577 
7578 	old_domains = crtc->enabled_power_domains;
7579 	crtc->enabled_power_domains = new_domains =
7580 		get_crtc_power_domains(crtc_state);
7581 
7582 	domains = new_domains & ~old_domains;
7583 
7584 	for_each_power_domain(domain, domains)
7585 		intel_display_power_get(dev_priv, domain);
7586 
7587 	return old_domains & ~new_domains;
7588 }
7589 
7590 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7591 				      u64 domains)
7592 {
7593 	enum intel_display_power_domain domain;
7594 
7595 	for_each_power_domain(domain, domains)
7596 		intel_display_power_put_unchecked(dev_priv, domain);
7597 }
7598 
7599 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7600 				   struct intel_crtc *crtc)
7601 {
7602 	const struct intel_crtc_state *new_crtc_state =
7603 		intel_atomic_get_new_crtc_state(state, crtc);
7604 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7605 	enum pipe pipe = crtc->pipe;
7606 
7607 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7608 		return;
7609 
7610 	if (intel_crtc_has_dp_encoder(new_crtc_state))
7611 		intel_dp_set_m_n(new_crtc_state, M1_N1);
7612 
7613 	intel_set_transcoder_timings(new_crtc_state);
7614 	intel_set_pipe_src_size(new_crtc_state);
7615 
7616 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7617 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7618 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7619 	}
7620 
7621 	i9xx_set_pipeconf(new_crtc_state);
7622 
7623 	crtc->active = true;
7624 
7625 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7626 
7627 	intel_encoders_pre_pll_enable(state, crtc);
7628 
7629 	if (IS_CHERRYVIEW(dev_priv)) {
7630 		chv_prepare_pll(crtc, new_crtc_state);
7631 		chv_enable_pll(crtc, new_crtc_state);
7632 	} else {
7633 		vlv_prepare_pll(crtc, new_crtc_state);
7634 		vlv_enable_pll(crtc, new_crtc_state);
7635 	}
7636 
7637 	intel_encoders_pre_enable(state, crtc);
7638 
7639 	i9xx_pfit_enable(new_crtc_state);
7640 
7641 	intel_color_load_luts(new_crtc_state);
7642 	intel_color_commit(new_crtc_state);
7643 	/* update DSPCNTR to configure gamma for pipe bottom color */
7644 	intel_disable_primary_plane(new_crtc_state);
7645 
7646 	dev_priv->display.initial_watermarks(state, crtc);
7647 	intel_enable_pipe(new_crtc_state);
7648 
7649 	intel_crtc_vblank_on(new_crtc_state);
7650 
7651 	intel_encoders_enable(state, crtc);
7652 }
7653 
7654 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7655 {
7656 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7657 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7658 
7659 	intel_de_write(dev_priv, FP0(crtc->pipe),
7660 		       crtc_state->dpll_hw_state.fp0);
7661 	intel_de_write(dev_priv, FP1(crtc->pipe),
7662 		       crtc_state->dpll_hw_state.fp1);
7663 }
7664 
7665 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7666 			     struct intel_crtc *crtc)
7667 {
7668 	const struct intel_crtc_state *new_crtc_state =
7669 		intel_atomic_get_new_crtc_state(state, crtc);
7670 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7671 	enum pipe pipe = crtc->pipe;
7672 
7673 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7674 		return;
7675 
7676 	i9xx_set_pll_dividers(new_crtc_state);
7677 
7678 	if (intel_crtc_has_dp_encoder(new_crtc_state))
7679 		intel_dp_set_m_n(new_crtc_state, M1_N1);
7680 
7681 	intel_set_transcoder_timings(new_crtc_state);
7682 	intel_set_pipe_src_size(new_crtc_state);
7683 
7684 	i9xx_set_pipeconf(new_crtc_state);
7685 
7686 	crtc->active = true;
7687 
7688 	if (!IS_GEN(dev_priv, 2))
7689 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7690 
7691 	intel_encoders_pre_enable(state, crtc);
7692 
7693 	i9xx_enable_pll(crtc, new_crtc_state);
7694 
7695 	i9xx_pfit_enable(new_crtc_state);
7696 
7697 	intel_color_load_luts(new_crtc_state);
7698 	intel_color_commit(new_crtc_state);
7699 	/* update DSPCNTR to configure gamma for pipe bottom color */
7700 	intel_disable_primary_plane(new_crtc_state);
7701 
7702 	if (dev_priv->display.initial_watermarks)
7703 		dev_priv->display.initial_watermarks(state, crtc);
7704 	else
7705 		intel_update_watermarks(crtc);
7706 	intel_enable_pipe(new_crtc_state);
7707 
7708 	intel_crtc_vblank_on(new_crtc_state);
7709 
7710 	intel_encoders_enable(state, crtc);
7711 
7712 	/* prevents spurious underruns */
7713 	if (IS_GEN(dev_priv, 2))
7714 		intel_wait_for_vblank(dev_priv, pipe);
7715 }
7716 
7717 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7718 {
7719 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7720 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7721 
7722 	if (!old_crtc_state->gmch_pfit.control)
7723 		return;
7724 
7725 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7726 
7727 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7728 		    intel_de_read(dev_priv, PFIT_CONTROL));
7729 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
7730 }
7731 
7732 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7733 			      struct intel_crtc *crtc)
7734 {
7735 	struct intel_crtc_state *old_crtc_state =
7736 		intel_atomic_get_old_crtc_state(state, crtc);
7737 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7738 	enum pipe pipe = crtc->pipe;
7739 
7740 	/*
7741 	 * On gen2 planes are double buffered but the pipe isn't, so we must
7742 	 * wait for planes to fully turn off before disabling the pipe.
7743 	 */
7744 	if (IS_GEN(dev_priv, 2))
7745 		intel_wait_for_vblank(dev_priv, pipe);
7746 
7747 	intel_encoders_disable(state, crtc);
7748 
7749 	intel_crtc_vblank_off(old_crtc_state);
7750 
7751 	intel_disable_pipe(old_crtc_state);
7752 
7753 	i9xx_pfit_disable(old_crtc_state);
7754 
7755 	intel_encoders_post_disable(state, crtc);
7756 
7757 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7758 		if (IS_CHERRYVIEW(dev_priv))
7759 			chv_disable_pll(dev_priv, pipe);
7760 		else if (IS_VALLEYVIEW(dev_priv))
7761 			vlv_disable_pll(dev_priv, pipe);
7762 		else
7763 			i9xx_disable_pll(old_crtc_state);
7764 	}
7765 
7766 	intel_encoders_post_pll_disable(state, crtc);
7767 
7768 	if (!IS_GEN(dev_priv, 2))
7769 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7770 
7771 	if (!dev_priv->display.initial_watermarks)
7772 		intel_update_watermarks(crtc);
7773 
7774 	/* clock the pipe down to 640x480@60 to potentially save power */
7775 	if (IS_I830(dev_priv))
7776 		i830_enable_pipe(dev_priv, pipe);
7777 }
7778 
7779 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7780 					struct drm_modeset_acquire_ctx *ctx)
7781 {
7782 	struct intel_encoder *encoder;
7783 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7784 	struct intel_bw_state *bw_state =
7785 		to_intel_bw_state(dev_priv->bw_obj.state);
7786 	struct intel_cdclk_state *cdclk_state =
7787 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7788 	struct intel_dbuf_state *dbuf_state =
7789 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
7790 	struct intel_crtc_state *crtc_state =
7791 		to_intel_crtc_state(crtc->base.state);
7792 	enum intel_display_power_domain domain;
7793 	struct intel_plane *plane;
7794 	struct drm_atomic_state *state;
7795 	struct intel_crtc_state *temp_crtc_state;
7796 	enum pipe pipe = crtc->pipe;
7797 	u64 domains;
7798 	int ret;
7799 
7800 	if (!crtc_state->hw.active)
7801 		return;
7802 
7803 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7804 		const struct intel_plane_state *plane_state =
7805 			to_intel_plane_state(plane->base.state);
7806 
7807 		if (plane_state->uapi.visible)
7808 			intel_plane_disable_noatomic(crtc, plane);
7809 	}
7810 
7811 	state = drm_atomic_state_alloc(&dev_priv->drm);
7812 	if (!state) {
7813 		drm_dbg_kms(&dev_priv->drm,
7814 			    "failed to disable [CRTC:%d:%s], out of memory",
7815 			    crtc->base.base.id, crtc->base.name);
7816 		return;
7817 	}
7818 
7819 	state->acquire_ctx = ctx;
7820 
7821 	/* Everything's already locked, -EDEADLK can't happen. */
7822 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7823 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7824 
7825 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
7826 
7827 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7828 
7829 	drm_atomic_state_put(state);
7830 
7831 	drm_dbg_kms(&dev_priv->drm,
7832 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7833 		    crtc->base.base.id, crtc->base.name);
7834 
7835 	crtc->active = false;
7836 	crtc->base.enabled = false;
7837 
7838 	drm_WARN_ON(&dev_priv->drm,
7839 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7840 	crtc_state->uapi.active = false;
7841 	crtc_state->uapi.connector_mask = 0;
7842 	crtc_state->uapi.encoder_mask = 0;
7843 	intel_crtc_free_hw_state(crtc_state);
7844 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7845 
7846 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7847 		encoder->base.crtc = NULL;
7848 
7849 	intel_fbc_disable(crtc);
7850 	intel_update_watermarks(crtc);
7851 	intel_disable_shared_dpll(crtc_state);
7852 
7853 	domains = crtc->enabled_power_domains;
7854 	for_each_power_domain(domain, domains)
7855 		intel_display_power_put_unchecked(dev_priv, domain);
7856 	crtc->enabled_power_domains = 0;
7857 
7858 	dev_priv->active_pipes &= ~BIT(pipe);
7859 	cdclk_state->min_cdclk[pipe] = 0;
7860 	cdclk_state->min_voltage_level[pipe] = 0;
7861 	cdclk_state->active_pipes &= ~BIT(pipe);
7862 
7863 	dbuf_state->active_pipes &= ~BIT(pipe);
7864 
7865 	bw_state->data_rate[pipe] = 0;
7866 	bw_state->num_active_planes[pipe] = 0;
7867 }
7868 
7869 /*
7870  * turn all crtc's off, but do not adjust state
7871  * This has to be paired with a call to intel_modeset_setup_hw_state.
7872  */
7873 int intel_display_suspend(struct drm_device *dev)
7874 {
7875 	struct drm_i915_private *dev_priv = to_i915(dev);
7876 	struct drm_atomic_state *state;
7877 	int ret;
7878 
7879 	state = drm_atomic_helper_suspend(dev);
7880 	ret = PTR_ERR_OR_ZERO(state);
7881 	if (ret)
7882 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7883 			ret);
7884 	else
7885 		dev_priv->modeset_restore_state = state;
7886 	return ret;
7887 }
7888 
7889 void intel_encoder_destroy(struct drm_encoder *encoder)
7890 {
7891 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7892 
7893 	drm_encoder_cleanup(encoder);
7894 	kfree(intel_encoder);
7895 }
7896 
7897 /* Cross check the actual hw state with our own modeset state tracking (and it's
7898  * internal consistency). */
7899 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7900 					 struct drm_connector_state *conn_state)
7901 {
7902 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
7903 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
7904 
7905 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7906 		    connector->base.base.id, connector->base.name);
7907 
7908 	if (connector->get_hw_state(connector)) {
7909 		struct intel_encoder *encoder = intel_attached_encoder(connector);
7910 
7911 		I915_STATE_WARN(!crtc_state,
7912 			 "connector enabled without attached crtc\n");
7913 
7914 		if (!crtc_state)
7915 			return;
7916 
7917 		I915_STATE_WARN(!crtc_state->hw.active,
7918 				"connector is active, but attached crtc isn't\n");
7919 
7920 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7921 			return;
7922 
7923 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7924 			"atomic encoder doesn't match attached encoder\n");
7925 
7926 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7927 			"attached encoder crtc differs from connector crtc\n");
7928 	} else {
7929 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7930 				"attached crtc is active, but connector isn't\n");
7931 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7932 			"best encoder set without crtc!\n");
7933 	}
7934 }
7935 
7936 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7937 {
7938 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7939 		return crtc_state->fdi_lanes;
7940 
7941 	return 0;
7942 }
7943 
7944 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7945 			       struct intel_crtc_state *pipe_config)
7946 {
7947 	struct drm_i915_private *dev_priv = to_i915(dev);
7948 	struct drm_atomic_state *state = pipe_config->uapi.state;
7949 	struct intel_crtc *other_crtc;
7950 	struct intel_crtc_state *other_crtc_state;
7951 
7952 	drm_dbg_kms(&dev_priv->drm,
7953 		    "checking fdi config on pipe %c, lanes %i\n",
7954 		    pipe_name(pipe), pipe_config->fdi_lanes);
7955 	if (pipe_config->fdi_lanes > 4) {
7956 		drm_dbg_kms(&dev_priv->drm,
7957 			    "invalid fdi lane config on pipe %c: %i lanes\n",
7958 			    pipe_name(pipe), pipe_config->fdi_lanes);
7959 		return -EINVAL;
7960 	}
7961 
7962 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7963 		if (pipe_config->fdi_lanes > 2) {
7964 			drm_dbg_kms(&dev_priv->drm,
7965 				    "only 2 lanes on haswell, required: %i lanes\n",
7966 				    pipe_config->fdi_lanes);
7967 			return -EINVAL;
7968 		} else {
7969 			return 0;
7970 		}
7971 	}
7972 
7973 	if (INTEL_NUM_PIPES(dev_priv) == 2)
7974 		return 0;
7975 
7976 	/* Ivybridge 3 pipe is really complicated */
7977 	switch (pipe) {
7978 	case PIPE_A:
7979 		return 0;
7980 	case PIPE_B:
7981 		if (pipe_config->fdi_lanes <= 2)
7982 			return 0;
7983 
7984 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7985 		other_crtc_state =
7986 			intel_atomic_get_crtc_state(state, other_crtc);
7987 		if (IS_ERR(other_crtc_state))
7988 			return PTR_ERR(other_crtc_state);
7989 
7990 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7991 			drm_dbg_kms(&dev_priv->drm,
7992 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
7993 				    pipe_name(pipe), pipe_config->fdi_lanes);
7994 			return -EINVAL;
7995 		}
7996 		return 0;
7997 	case PIPE_C:
7998 		if (pipe_config->fdi_lanes > 2) {
7999 			drm_dbg_kms(&dev_priv->drm,
8000 				    "only 2 lanes on pipe %c: required %i lanes\n",
8001 				    pipe_name(pipe), pipe_config->fdi_lanes);
8002 			return -EINVAL;
8003 		}
8004 
8005 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
8006 		other_crtc_state =
8007 			intel_atomic_get_crtc_state(state, other_crtc);
8008 		if (IS_ERR(other_crtc_state))
8009 			return PTR_ERR(other_crtc_state);
8010 
8011 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
8012 			drm_dbg_kms(&dev_priv->drm,
8013 				    "fdi link B uses too many lanes to enable link C\n");
8014 			return -EINVAL;
8015 		}
8016 		return 0;
8017 	default:
8018 		BUG();
8019 	}
8020 }
8021 
8022 #define RETRY 1
8023 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
8024 				  struct intel_crtc_state *pipe_config)
8025 {
8026 	struct drm_device *dev = intel_crtc->base.dev;
8027 	struct drm_i915_private *i915 = to_i915(dev);
8028 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
8029 	int lane, link_bw, fdi_dotclock, ret;
8030 	bool needs_recompute = false;
8031 
8032 retry:
8033 	/* FDI is a binary signal running at ~2.7GHz, encoding
8034 	 * each output octet as 10 bits. The actual frequency
8035 	 * is stored as a divider into a 100MHz clock, and the
8036 	 * mode pixel clock is stored in units of 1KHz.
8037 	 * Hence the bw of each lane in terms of the mode signal
8038 	 * is:
8039 	 */
8040 	link_bw = intel_fdi_link_freq(i915, pipe_config);
8041 
8042 	fdi_dotclock = adjusted_mode->crtc_clock;
8043 
8044 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
8045 				      pipe_config->pipe_bpp);
8046 
8047 	pipe_config->fdi_lanes = lane;
8048 
8049 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
8050 			       link_bw, &pipe_config->fdi_m_n, false, false);
8051 
8052 	ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
8053 	if (ret == -EDEADLK)
8054 		return ret;
8055 
8056 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
8057 		pipe_config->pipe_bpp -= 2*3;
8058 		drm_dbg_kms(&i915->drm,
8059 			    "fdi link bw constraint, reducing pipe bpp to %i\n",
8060 			    pipe_config->pipe_bpp);
8061 		needs_recompute = true;
8062 		pipe_config->bw_constrained = true;
8063 
8064 		goto retry;
8065 	}
8066 
8067 	if (needs_recompute)
8068 		return RETRY;
8069 
8070 	return ret;
8071 }
8072 
8073 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
8074 {
8075 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8076 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8077 
8078 	/* IPS only exists on ULT machines and is tied to pipe A. */
8079 	if (!hsw_crtc_supports_ips(crtc))
8080 		return false;
8081 
8082 	if (!dev_priv->params.enable_ips)
8083 		return false;
8084 
8085 	if (crtc_state->pipe_bpp > 24)
8086 		return false;
8087 
8088 	/*
8089 	 * We compare against max which means we must take
8090 	 * the increased cdclk requirement into account when
8091 	 * calculating the new cdclk.
8092 	 *
8093 	 * Should measure whether using a lower cdclk w/o IPS
8094 	 */
8095 	if (IS_BROADWELL(dev_priv) &&
8096 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
8097 		return false;
8098 
8099 	return true;
8100 }
8101 
8102 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
8103 {
8104 	struct drm_i915_private *dev_priv =
8105 		to_i915(crtc_state->uapi.crtc->dev);
8106 	struct intel_atomic_state *state =
8107 		to_intel_atomic_state(crtc_state->uapi.state);
8108 
8109 	crtc_state->ips_enabled = false;
8110 
8111 	if (!hsw_crtc_state_ips_capable(crtc_state))
8112 		return 0;
8113 
8114 	/*
8115 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
8116 	 * enabled and disabled dynamically based on package C states,
8117 	 * user space can't make reliable use of the CRCs, so let's just
8118 	 * completely disable it.
8119 	 */
8120 	if (crtc_state->crc_enabled)
8121 		return 0;
8122 
8123 	/* IPS should be fine as long as at least one plane is enabled. */
8124 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
8125 		return 0;
8126 
8127 	if (IS_BROADWELL(dev_priv)) {
8128 		const struct intel_cdclk_state *cdclk_state;
8129 
8130 		cdclk_state = intel_atomic_get_cdclk_state(state);
8131 		if (IS_ERR(cdclk_state))
8132 			return PTR_ERR(cdclk_state);
8133 
8134 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
8135 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
8136 			return 0;
8137 	}
8138 
8139 	crtc_state->ips_enabled = true;
8140 
8141 	return 0;
8142 }
8143 
8144 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
8145 {
8146 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8147 
8148 	/* GDG double wide on either pipe, otherwise pipe A only */
8149 	return INTEL_GEN(dev_priv) < 4 &&
8150 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
8151 }
8152 
8153 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
8154 {
8155 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
8156 	unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
8157 
8158 	/*
8159 	 * We only use IF-ID interlacing. If we ever use
8160 	 * PF-ID we'll need to adjust the pixel_rate here.
8161 	 */
8162 
8163 	if (!crtc_state->pch_pfit.enabled)
8164 		return pixel_rate;
8165 
8166 	pipe_w = crtc_state->pipe_src_w;
8167 	pipe_h = crtc_state->pipe_src_h;
8168 
8169 	pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
8170 	pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
8171 
8172 	if (pipe_w < pfit_w)
8173 		pipe_w = pfit_w;
8174 	if (pipe_h < pfit_h)
8175 		pipe_h = pfit_h;
8176 
8177 	if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
8178 			!pfit_w || !pfit_h))
8179 		return pixel_rate;
8180 
8181 	return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
8182 		       pfit_w * pfit_h);
8183 }
8184 
8185 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
8186 					 const struct drm_display_mode *timings)
8187 {
8188 	mode->hdisplay = timings->crtc_hdisplay;
8189 	mode->htotal = timings->crtc_htotal;
8190 	mode->hsync_start = timings->crtc_hsync_start;
8191 	mode->hsync_end = timings->crtc_hsync_end;
8192 
8193 	mode->vdisplay = timings->crtc_vdisplay;
8194 	mode->vtotal = timings->crtc_vtotal;
8195 	mode->vsync_start = timings->crtc_vsync_start;
8196 	mode->vsync_end = timings->crtc_vsync_end;
8197 
8198 	mode->flags = timings->flags;
8199 	mode->type = DRM_MODE_TYPE_DRIVER;
8200 
8201 	mode->clock = timings->crtc_clock;
8202 
8203 	drm_mode_set_name(mode);
8204 }
8205 
8206 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
8207 {
8208 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8209 
8210 	if (HAS_GMCH(dev_priv))
8211 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
8212 		crtc_state->pixel_rate =
8213 			crtc_state->hw.pipe_mode.crtc_clock;
8214 	else
8215 		crtc_state->pixel_rate =
8216 			ilk_pipe_pixel_rate(crtc_state);
8217 }
8218 
8219 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
8220 {
8221 	struct drm_display_mode *mode = &crtc_state->hw.mode;
8222 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
8223 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8224 
8225 	drm_mode_copy(pipe_mode, adjusted_mode);
8226 
8227 	if (crtc_state->bigjoiner) {
8228 		/*
8229 		 * transcoder is programmed to the full mode,
8230 		 * but pipe timings are half of the transcoder mode
8231 		 */
8232 		pipe_mode->crtc_hdisplay /= 2;
8233 		pipe_mode->crtc_hblank_start /= 2;
8234 		pipe_mode->crtc_hblank_end /= 2;
8235 		pipe_mode->crtc_hsync_start /= 2;
8236 		pipe_mode->crtc_hsync_end /= 2;
8237 		pipe_mode->crtc_htotal /= 2;
8238 		pipe_mode->crtc_clock /= 2;
8239 	}
8240 
8241 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
8242 	intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
8243 
8244 	intel_crtc_compute_pixel_rate(crtc_state);
8245 
8246 	drm_mode_copy(mode, adjusted_mode);
8247 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
8248 	mode->vdisplay = crtc_state->pipe_src_h;
8249 }
8250 
8251 static void intel_encoder_get_config(struct intel_encoder *encoder,
8252 				     struct intel_crtc_state *crtc_state)
8253 {
8254 	encoder->get_config(encoder, crtc_state);
8255 
8256 	intel_crtc_readout_derived_state(crtc_state);
8257 }
8258 
8259 static int intel_crtc_compute_config(struct intel_crtc *crtc,
8260 				     struct intel_crtc_state *pipe_config)
8261 {
8262 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8263 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
8264 	int clock_limit = dev_priv->max_dotclk_freq;
8265 
8266 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
8267 
8268 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
8269 	if (pipe_config->bigjoiner) {
8270 		pipe_mode->crtc_clock /= 2;
8271 		pipe_mode->crtc_hdisplay /= 2;
8272 		pipe_mode->crtc_hblank_start /= 2;
8273 		pipe_mode->crtc_hblank_end /= 2;
8274 		pipe_mode->crtc_hsync_start /= 2;
8275 		pipe_mode->crtc_hsync_end /= 2;
8276 		pipe_mode->crtc_htotal /= 2;
8277 		pipe_config->pipe_src_w /= 2;
8278 	}
8279 
8280 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
8281 
8282 	if (INTEL_GEN(dev_priv) < 4) {
8283 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
8284 
8285 		/*
8286 		 * Enable double wide mode when the dot clock
8287 		 * is > 90% of the (display) core speed.
8288 		 */
8289 		if (intel_crtc_supports_double_wide(crtc) &&
8290 		    pipe_mode->crtc_clock > clock_limit) {
8291 			clock_limit = dev_priv->max_dotclk_freq;
8292 			pipe_config->double_wide = true;
8293 		}
8294 	}
8295 
8296 	if (pipe_mode->crtc_clock > clock_limit) {
8297 		drm_dbg_kms(&dev_priv->drm,
8298 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
8299 			    pipe_mode->crtc_clock, clock_limit,
8300 			    yesno(pipe_config->double_wide));
8301 		return -EINVAL;
8302 	}
8303 
8304 	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8305 	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
8306 	     pipe_config->hw.ctm) {
8307 		/*
8308 		 * There is only one pipe CSC unit per pipe, and we need that
8309 		 * for output conversion from RGB->YCBCR. So if CTM is already
8310 		 * applied we can't support YCBCR420 output.
8311 		 */
8312 		drm_dbg_kms(&dev_priv->drm,
8313 			    "YCBCR420 and CTM together are not possible\n");
8314 		return -EINVAL;
8315 	}
8316 
8317 	/*
8318 	 * Pipe horizontal size must be even in:
8319 	 * - DVO ganged mode
8320 	 * - LVDS dual channel mode
8321 	 * - Double wide pipe
8322 	 */
8323 	if (pipe_config->pipe_src_w & 1) {
8324 		if (pipe_config->double_wide) {
8325 			drm_dbg_kms(&dev_priv->drm,
8326 				    "Odd pipe source width not supported with double wide pipe\n");
8327 			return -EINVAL;
8328 		}
8329 
8330 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
8331 		    intel_is_dual_link_lvds(dev_priv)) {
8332 			drm_dbg_kms(&dev_priv->drm,
8333 				    "Odd pipe source width not supported with dual link LVDS\n");
8334 			return -EINVAL;
8335 		}
8336 	}
8337 
8338 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
8339 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8340 	 */
8341 	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8342 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
8343 		return -EINVAL;
8344 
8345 	intel_crtc_compute_pixel_rate(pipe_config);
8346 
8347 	if (pipe_config->has_pch_encoder)
8348 		return ilk_fdi_compute_config(crtc, pipe_config);
8349 
8350 	return 0;
8351 }
8352 
8353 static void
8354 intel_reduce_m_n_ratio(u32 *num, u32 *den)
8355 {
8356 	while (*num > DATA_LINK_M_N_MASK ||
8357 	       *den > DATA_LINK_M_N_MASK) {
8358 		*num >>= 1;
8359 		*den >>= 1;
8360 	}
8361 }
8362 
8363 static void compute_m_n(unsigned int m, unsigned int n,
8364 			u32 *ret_m, u32 *ret_n,
8365 			bool constant_n)
8366 {
8367 	/*
8368 	 * Several DP dongles in particular seem to be fussy about
8369 	 * too large link M/N values. Give N value as 0x8000 that
8370 	 * should be acceptable by specific devices. 0x8000 is the
8371 	 * specified fixed N value for asynchronous clock mode,
8372 	 * which the devices expect also in synchronous clock mode.
8373 	 */
8374 	if (constant_n)
8375 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
8376 	else
8377 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
8378 
8379 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
8380 	intel_reduce_m_n_ratio(ret_m, ret_n);
8381 }
8382 
8383 void
8384 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
8385 		       int pixel_clock, int link_clock,
8386 		       struct intel_link_m_n *m_n,
8387 		       bool constant_n, bool fec_enable)
8388 {
8389 	u32 data_clock = bits_per_pixel * pixel_clock;
8390 
8391 	if (fec_enable)
8392 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
8393 
8394 	m_n->tu = 64;
8395 	compute_m_n(data_clock,
8396 		    link_clock * nlanes * 8,
8397 		    &m_n->gmch_m, &m_n->gmch_n,
8398 		    constant_n);
8399 
8400 	compute_m_n(pixel_clock, link_clock,
8401 		    &m_n->link_m, &m_n->link_n,
8402 		    constant_n);
8403 }
8404 
8405 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
8406 {
8407 	/*
8408 	 * There may be no VBT; and if the BIOS enabled SSC we can
8409 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
8410 	 * BIOS isn't using it, don't assume it will work even if the VBT
8411 	 * indicates as much.
8412 	 */
8413 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
8414 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
8415 						       PCH_DREF_CONTROL) &
8416 			DREF_SSC1_ENABLE;
8417 
8418 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
8419 			drm_dbg_kms(&dev_priv->drm,
8420 				    "SSC %s by BIOS, overriding VBT which says %s\n",
8421 				    enableddisabled(bios_lvds_use_ssc),
8422 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
8423 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
8424 		}
8425 	}
8426 }
8427 
8428 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
8429 {
8430 	if (dev_priv->params.panel_use_ssc >= 0)
8431 		return dev_priv->params.panel_use_ssc != 0;
8432 	return dev_priv->vbt.lvds_use_ssc
8433 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
8434 }
8435 
8436 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
8437 {
8438 	return (1 << dpll->n) << 16 | dpll->m2;
8439 }
8440 
8441 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
8442 {
8443 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
8444 }
8445 
8446 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
8447 				     struct intel_crtc_state *crtc_state,
8448 				     struct dpll *reduced_clock)
8449 {
8450 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8451 	u32 fp, fp2 = 0;
8452 
8453 	if (IS_PINEVIEW(dev_priv)) {
8454 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
8455 		if (reduced_clock)
8456 			fp2 = pnv_dpll_compute_fp(reduced_clock);
8457 	} else {
8458 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8459 		if (reduced_clock)
8460 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
8461 	}
8462 
8463 	crtc_state->dpll_hw_state.fp0 = fp;
8464 
8465 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8466 	    reduced_clock) {
8467 		crtc_state->dpll_hw_state.fp1 = fp2;
8468 	} else {
8469 		crtc_state->dpll_hw_state.fp1 = fp;
8470 	}
8471 }
8472 
8473 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
8474 		pipe)
8475 {
8476 	u32 reg_val;
8477 
8478 	/*
8479 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
8480 	 * and set it to a reasonable value instead.
8481 	 */
8482 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8483 	reg_val &= 0xffffff00;
8484 	reg_val |= 0x00000030;
8485 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8486 
8487 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8488 	reg_val &= 0x00ffffff;
8489 	reg_val |= 0x8c000000;
8490 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8491 
8492 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8493 	reg_val &= 0xffffff00;
8494 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8495 
8496 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8497 	reg_val &= 0x00ffffff;
8498 	reg_val |= 0xb0000000;
8499 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8500 }
8501 
8502 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8503 					 const struct intel_link_m_n *m_n)
8504 {
8505 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8506 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8507 	enum pipe pipe = crtc->pipe;
8508 
8509 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8510 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
8511 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8512 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8513 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8514 }
8515 
8516 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8517 				 enum transcoder transcoder)
8518 {
8519 	if (IS_HASWELL(dev_priv))
8520 		return transcoder == TRANSCODER_EDP;
8521 
8522 	/*
8523 	 * Strictly speaking some registers are available before
8524 	 * gen7, but we only support DRRS on gen7+
8525 	 */
8526 	return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8527 }
8528 
8529 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8530 					 const struct intel_link_m_n *m_n,
8531 					 const struct intel_link_m_n *m2_n2)
8532 {
8533 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8534 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8535 	enum pipe pipe = crtc->pipe;
8536 	enum transcoder transcoder = crtc_state->cpu_transcoder;
8537 
8538 	if (INTEL_GEN(dev_priv) >= 5) {
8539 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8540 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
8541 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8542 			       m_n->gmch_n);
8543 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8544 			       m_n->link_m);
8545 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8546 			       m_n->link_n);
8547 		/*
8548 		 *  M2_N2 registers are set only if DRRS is supported
8549 		 * (to make sure the registers are not unnecessarily accessed).
8550 		 */
8551 		if (m2_n2 && crtc_state->has_drrs &&
8552 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
8553 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8554 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8555 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8556 				       m2_n2->gmch_n);
8557 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8558 				       m2_n2->link_m);
8559 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8560 				       m2_n2->link_n);
8561 		}
8562 	} else {
8563 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8564 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
8565 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8566 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8567 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8568 	}
8569 }
8570 
8571 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8572 {
8573 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8574 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8575 
8576 	if (m_n == M1_N1) {
8577 		dp_m_n = &crtc_state->dp_m_n;
8578 		dp_m2_n2 = &crtc_state->dp_m2_n2;
8579 	} else if (m_n == M2_N2) {
8580 
8581 		/*
8582 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
8583 		 * needs to be programmed into M1_N1.
8584 		 */
8585 		dp_m_n = &crtc_state->dp_m2_n2;
8586 	} else {
8587 		drm_err(&i915->drm, "Unsupported divider value\n");
8588 		return;
8589 	}
8590 
8591 	if (crtc_state->has_pch_encoder)
8592 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8593 	else
8594 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8595 }
8596 
8597 static void vlv_compute_dpll(struct intel_crtc *crtc,
8598 			     struct intel_crtc_state *pipe_config)
8599 {
8600 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8601 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8602 	if (crtc->pipe != PIPE_A)
8603 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8604 
8605 	/* DPLL not used with DSI, but still need the rest set up */
8606 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8607 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8608 			DPLL_EXT_BUFFER_ENABLE_VLV;
8609 
8610 	pipe_config->dpll_hw_state.dpll_md =
8611 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8612 }
8613 
8614 static void chv_compute_dpll(struct intel_crtc *crtc,
8615 			     struct intel_crtc_state *pipe_config)
8616 {
8617 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8618 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8619 	if (crtc->pipe != PIPE_A)
8620 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8621 
8622 	/* DPLL not used with DSI, but still need the rest set up */
8623 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8624 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8625 
8626 	pipe_config->dpll_hw_state.dpll_md =
8627 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8628 }
8629 
8630 static void vlv_prepare_pll(struct intel_crtc *crtc,
8631 			    const struct intel_crtc_state *pipe_config)
8632 {
8633 	struct drm_device *dev = crtc->base.dev;
8634 	struct drm_i915_private *dev_priv = to_i915(dev);
8635 	enum pipe pipe = crtc->pipe;
8636 	u32 mdiv;
8637 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
8638 	u32 coreclk, reg_val;
8639 
8640 	/* Enable Refclk */
8641 	intel_de_write(dev_priv, DPLL(pipe),
8642 		       pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8643 
8644 	/* No need to actually set up the DPLL with DSI */
8645 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8646 		return;
8647 
8648 	vlv_dpio_get(dev_priv);
8649 
8650 	bestn = pipe_config->dpll.n;
8651 	bestm1 = pipe_config->dpll.m1;
8652 	bestm2 = pipe_config->dpll.m2;
8653 	bestp1 = pipe_config->dpll.p1;
8654 	bestp2 = pipe_config->dpll.p2;
8655 
8656 	/* See eDP HDMI DPIO driver vbios notes doc */
8657 
8658 	/* PLL B needs special handling */
8659 	if (pipe == PIPE_B)
8660 		vlv_pllb_recal_opamp(dev_priv, pipe);
8661 
8662 	/* Set up Tx target for periodic Rcomp update */
8663 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8664 
8665 	/* Disable target IRef on PLL */
8666 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8667 	reg_val &= 0x00ffffff;
8668 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8669 
8670 	/* Disable fast lock */
8671 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8672 
8673 	/* Set idtafcrecal before PLL is enabled */
8674 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8675 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8676 	mdiv |= ((bestn << DPIO_N_SHIFT));
8677 	mdiv |= (1 << DPIO_K_SHIFT);
8678 
8679 	/*
8680 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8681 	 * but we don't support that).
8682 	 * Note: don't use the DAC post divider as it seems unstable.
8683 	 */
8684 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8685 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8686 
8687 	mdiv |= DPIO_ENABLE_CALIBRATION;
8688 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8689 
8690 	/* Set HBR and RBR LPF coefficients */
8691 	if (pipe_config->port_clock == 162000 ||
8692 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8693 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8694 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8695 				 0x009f0003);
8696 	else
8697 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8698 				 0x00d0000f);
8699 
8700 	if (intel_crtc_has_dp_encoder(pipe_config)) {
8701 		/* Use SSC source */
8702 		if (pipe == PIPE_A)
8703 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8704 					 0x0df40000);
8705 		else
8706 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8707 					 0x0df70000);
8708 	} else { /* HDMI or VGA */
8709 		/* Use bend source */
8710 		if (pipe == PIPE_A)
8711 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8712 					 0x0df70000);
8713 		else
8714 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8715 					 0x0df40000);
8716 	}
8717 
8718 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8719 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8720 	if (intel_crtc_has_dp_encoder(pipe_config))
8721 		coreclk |= 0x01000000;
8722 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8723 
8724 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8725 
8726 	vlv_dpio_put(dev_priv);
8727 }
8728 
8729 static void chv_prepare_pll(struct intel_crtc *crtc,
8730 			    const struct intel_crtc_state *pipe_config)
8731 {
8732 	struct drm_device *dev = crtc->base.dev;
8733 	struct drm_i915_private *dev_priv = to_i915(dev);
8734 	enum pipe pipe = crtc->pipe;
8735 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8736 	u32 loopfilter, tribuf_calcntr;
8737 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8738 	u32 dpio_val;
8739 	int vco;
8740 
8741 	/* Enable Refclk and SSC */
8742 	intel_de_write(dev_priv, DPLL(pipe),
8743 		       pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8744 
8745 	/* No need to actually set up the DPLL with DSI */
8746 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8747 		return;
8748 
8749 	bestn = pipe_config->dpll.n;
8750 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8751 	bestm1 = pipe_config->dpll.m1;
8752 	bestm2 = pipe_config->dpll.m2 >> 22;
8753 	bestp1 = pipe_config->dpll.p1;
8754 	bestp2 = pipe_config->dpll.p2;
8755 	vco = pipe_config->dpll.vco;
8756 	dpio_val = 0;
8757 	loopfilter = 0;
8758 
8759 	vlv_dpio_get(dev_priv);
8760 
8761 	/* p1 and p2 divider */
8762 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8763 			5 << DPIO_CHV_S1_DIV_SHIFT |
8764 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8765 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8766 			1 << DPIO_CHV_K_DIV_SHIFT);
8767 
8768 	/* Feedback post-divider - m2 */
8769 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8770 
8771 	/* Feedback refclk divider - n and m1 */
8772 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8773 			DPIO_CHV_M1_DIV_BY_2 |
8774 			1 << DPIO_CHV_N_DIV_SHIFT);
8775 
8776 	/* M2 fraction division */
8777 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8778 
8779 	/* M2 fraction division enable */
8780 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8781 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8782 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8783 	if (bestm2_frac)
8784 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8785 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8786 
8787 	/* Program digital lock detect threshold */
8788 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8789 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8790 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8791 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8792 	if (!bestm2_frac)
8793 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8794 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8795 
8796 	/* Loop filter */
8797 	if (vco == 5400000) {
8798 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8799 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8800 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8801 		tribuf_calcntr = 0x9;
8802 	} else if (vco <= 6200000) {
8803 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8804 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8805 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8806 		tribuf_calcntr = 0x9;
8807 	} else if (vco <= 6480000) {
8808 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8809 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8810 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8811 		tribuf_calcntr = 0x8;
8812 	} else {
8813 		/* Not supported. Apply the same limits as in the max case */
8814 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8815 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8816 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8817 		tribuf_calcntr = 0;
8818 	}
8819 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8820 
8821 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8822 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8823 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8824 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8825 
8826 	/* AFC Recal */
8827 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8828 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8829 			DPIO_AFC_RECAL);
8830 
8831 	vlv_dpio_put(dev_priv);
8832 }
8833 
8834 /**
8835  * vlv_force_pll_on - forcibly enable just the PLL
8836  * @dev_priv: i915 private structure
8837  * @pipe: pipe PLL to enable
8838  * @dpll: PLL configuration
8839  *
8840  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8841  * in cases where we need the PLL enabled even when @pipe is not going to
8842  * be enabled.
8843  */
8844 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8845 		     const struct dpll *dpll)
8846 {
8847 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8848 	struct intel_crtc_state *pipe_config;
8849 
8850 	pipe_config = intel_crtc_state_alloc(crtc);
8851 	if (!pipe_config)
8852 		return -ENOMEM;
8853 
8854 	pipe_config->cpu_transcoder = (enum transcoder)pipe;
8855 	pipe_config->pixel_multiplier = 1;
8856 	pipe_config->dpll = *dpll;
8857 
8858 	if (IS_CHERRYVIEW(dev_priv)) {
8859 		chv_compute_dpll(crtc, pipe_config);
8860 		chv_prepare_pll(crtc, pipe_config);
8861 		chv_enable_pll(crtc, pipe_config);
8862 	} else {
8863 		vlv_compute_dpll(crtc, pipe_config);
8864 		vlv_prepare_pll(crtc, pipe_config);
8865 		vlv_enable_pll(crtc, pipe_config);
8866 	}
8867 
8868 	kfree(pipe_config);
8869 
8870 	return 0;
8871 }
8872 
8873 /**
8874  * vlv_force_pll_off - forcibly disable just the PLL
8875  * @dev_priv: i915 private structure
8876  * @pipe: pipe PLL to disable
8877  *
8878  * Disable the PLL for @pipe. To be used in cases where we need
8879  * the PLL enabled even when @pipe is not going to be enabled.
8880  */
8881 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8882 {
8883 	if (IS_CHERRYVIEW(dev_priv))
8884 		chv_disable_pll(dev_priv, pipe);
8885 	else
8886 		vlv_disable_pll(dev_priv, pipe);
8887 }
8888 
8889 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8890 			      struct intel_crtc_state *crtc_state,
8891 			      struct dpll *reduced_clock)
8892 {
8893 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8894 	u32 dpll;
8895 	struct dpll *clock = &crtc_state->dpll;
8896 
8897 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8898 
8899 	dpll = DPLL_VGA_MODE_DIS;
8900 
8901 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8902 		dpll |= DPLLB_MODE_LVDS;
8903 	else
8904 		dpll |= DPLLB_MODE_DAC_SERIAL;
8905 
8906 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8907 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8908 		dpll |= (crtc_state->pixel_multiplier - 1)
8909 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
8910 	}
8911 
8912 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8913 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8914 		dpll |= DPLL_SDVO_HIGH_SPEED;
8915 
8916 	if (intel_crtc_has_dp_encoder(crtc_state))
8917 		dpll |= DPLL_SDVO_HIGH_SPEED;
8918 
8919 	/* compute bitmask from p1 value */
8920 	if (IS_PINEVIEW(dev_priv))
8921 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8922 	else {
8923 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8924 		if (IS_G4X(dev_priv) && reduced_clock)
8925 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8926 	}
8927 	switch (clock->p2) {
8928 	case 5:
8929 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8930 		break;
8931 	case 7:
8932 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8933 		break;
8934 	case 10:
8935 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8936 		break;
8937 	case 14:
8938 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8939 		break;
8940 	}
8941 	if (INTEL_GEN(dev_priv) >= 4)
8942 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8943 
8944 	if (crtc_state->sdvo_tv_clock)
8945 		dpll |= PLL_REF_INPUT_TVCLKINBC;
8946 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8947 		 intel_panel_use_ssc(dev_priv))
8948 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8949 	else
8950 		dpll |= PLL_REF_INPUT_DREFCLK;
8951 
8952 	dpll |= DPLL_VCO_ENABLE;
8953 	crtc_state->dpll_hw_state.dpll = dpll;
8954 
8955 	if (INTEL_GEN(dev_priv) >= 4) {
8956 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8957 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
8958 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
8959 	}
8960 }
8961 
8962 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8963 			      struct intel_crtc_state *crtc_state,
8964 			      struct dpll *reduced_clock)
8965 {
8966 	struct drm_device *dev = crtc->base.dev;
8967 	struct drm_i915_private *dev_priv = to_i915(dev);
8968 	u32 dpll;
8969 	struct dpll *clock = &crtc_state->dpll;
8970 
8971 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8972 
8973 	dpll = DPLL_VGA_MODE_DIS;
8974 
8975 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8976 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8977 	} else {
8978 		if (clock->p1 == 2)
8979 			dpll |= PLL_P1_DIVIDE_BY_TWO;
8980 		else
8981 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8982 		if (clock->p2 == 4)
8983 			dpll |= PLL_P2_DIVIDE_BY_4;
8984 	}
8985 
8986 	/*
8987 	 * Bspec:
8988 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
8989 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8990 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8991 	 *  Enable) must be set to “1” in both the DPLL A Control Register
8992 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8993 	 *
8994 	 * For simplicity We simply keep both bits always enabled in
8995 	 * both DPLLS. The spec says we should disable the DVO 2X clock
8996 	 * when not needed, but this seems to work fine in practice.
8997 	 */
8998 	if (IS_I830(dev_priv) ||
8999 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
9000 		dpll |= DPLL_DVO_2X_MODE;
9001 
9002 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9003 	    intel_panel_use_ssc(dev_priv))
9004 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9005 	else
9006 		dpll |= PLL_REF_INPUT_DREFCLK;
9007 
9008 	dpll |= DPLL_VCO_ENABLE;
9009 	crtc_state->dpll_hw_state.dpll = dpll;
9010 }
9011 
9012 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
9013 {
9014 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9015 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9016 	enum pipe pipe = crtc->pipe;
9017 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9018 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
9019 	u32 crtc_vtotal, crtc_vblank_end;
9020 	int vsyncshift = 0;
9021 
9022 	/* We need to be careful not to changed the adjusted mode, for otherwise
9023 	 * the hw state checker will get angry at the mismatch. */
9024 	crtc_vtotal = adjusted_mode->crtc_vtotal;
9025 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
9026 
9027 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
9028 		/* the chip adds 2 halflines automatically */
9029 		crtc_vtotal -= 1;
9030 		crtc_vblank_end -= 1;
9031 
9032 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
9033 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
9034 		else
9035 			vsyncshift = adjusted_mode->crtc_hsync_start -
9036 				adjusted_mode->crtc_htotal / 2;
9037 		if (vsyncshift < 0)
9038 			vsyncshift += adjusted_mode->crtc_htotal;
9039 	}
9040 
9041 	if (INTEL_GEN(dev_priv) > 3)
9042 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
9043 		               vsyncshift);
9044 
9045 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
9046 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
9047 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
9048 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
9049 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
9050 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
9051 
9052 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
9053 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
9054 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
9055 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
9056 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
9057 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
9058 
9059 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
9060 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
9061 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
9062 	 * bits. */
9063 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
9064 	    (pipe == PIPE_B || pipe == PIPE_C))
9065 		intel_de_write(dev_priv, VTOTAL(pipe),
9066 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
9067 
9068 }
9069 
9070 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
9071 {
9072 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9073 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9074 	enum pipe pipe = crtc->pipe;
9075 
9076 	/* pipesrc controls the size that is scaled from, which should
9077 	 * always be the user's requested size.
9078 	 */
9079 	intel_de_write(dev_priv, PIPESRC(pipe),
9080 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
9081 }
9082 
9083 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
9084 {
9085 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
9086 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9087 
9088 	if (IS_GEN(dev_priv, 2))
9089 		return false;
9090 
9091 	if (INTEL_GEN(dev_priv) >= 9 ||
9092 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
9093 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
9094 	else
9095 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
9096 }
9097 
9098 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
9099 					 struct intel_crtc_state *pipe_config)
9100 {
9101 	struct drm_device *dev = crtc->base.dev;
9102 	struct drm_i915_private *dev_priv = to_i915(dev);
9103 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
9104 	u32 tmp;
9105 
9106 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
9107 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
9108 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
9109 
9110 	if (!transcoder_is_dsi(cpu_transcoder)) {
9111 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
9112 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
9113 							(tmp & 0xffff) + 1;
9114 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
9115 						((tmp >> 16) & 0xffff) + 1;
9116 	}
9117 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
9118 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
9119 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
9120 
9121 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
9122 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
9123 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
9124 
9125 	if (!transcoder_is_dsi(cpu_transcoder)) {
9126 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
9127 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
9128 							(tmp & 0xffff) + 1;
9129 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
9130 						((tmp >> 16) & 0xffff) + 1;
9131 	}
9132 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
9133 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
9134 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
9135 
9136 	if (intel_pipe_is_interlaced(pipe_config)) {
9137 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
9138 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
9139 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
9140 	}
9141 }
9142 
9143 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
9144 				    struct intel_crtc_state *pipe_config)
9145 {
9146 	struct drm_device *dev = crtc->base.dev;
9147 	struct drm_i915_private *dev_priv = to_i915(dev);
9148 	u32 tmp;
9149 
9150 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
9151 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
9152 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
9153 }
9154 
9155 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
9156 {
9157 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9158 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9159 	u32 pipeconf;
9160 
9161 	pipeconf = 0;
9162 
9163 	/* we keep both pipes enabled on 830 */
9164 	if (IS_I830(dev_priv))
9165 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
9166 
9167 	if (crtc_state->double_wide)
9168 		pipeconf |= PIPECONF_DOUBLE_WIDE;
9169 
9170 	/* only g4x and later have fancy bpc/dither controls */
9171 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9172 	    IS_CHERRYVIEW(dev_priv)) {
9173 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
9174 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
9175 			pipeconf |= PIPECONF_DITHER_EN |
9176 				    PIPECONF_DITHER_TYPE_SP;
9177 
9178 		switch (crtc_state->pipe_bpp) {
9179 		case 18:
9180 			pipeconf |= PIPECONF_6BPC;
9181 			break;
9182 		case 24:
9183 			pipeconf |= PIPECONF_8BPC;
9184 			break;
9185 		case 30:
9186 			pipeconf |= PIPECONF_10BPC;
9187 			break;
9188 		default:
9189 			/* Case prevented by intel_choose_pipe_bpp_dither. */
9190 			BUG();
9191 		}
9192 	}
9193 
9194 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
9195 		if (INTEL_GEN(dev_priv) < 4 ||
9196 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
9197 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
9198 		else
9199 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
9200 	} else {
9201 		pipeconf |= PIPECONF_PROGRESSIVE;
9202 	}
9203 
9204 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9205 	     crtc_state->limited_color_range)
9206 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
9207 
9208 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9209 
9210 	pipeconf |= PIPECONF_FRAME_START_DELAY(0);
9211 
9212 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
9213 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
9214 }
9215 
9216 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
9217 				   struct intel_crtc_state *crtc_state)
9218 {
9219 	struct drm_device *dev = crtc->base.dev;
9220 	struct drm_i915_private *dev_priv = to_i915(dev);
9221 	const struct intel_limit *limit;
9222 	int refclk = 48000;
9223 
9224 	memset(&crtc_state->dpll_hw_state, 0,
9225 	       sizeof(crtc_state->dpll_hw_state));
9226 
9227 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9228 		if (intel_panel_use_ssc(dev_priv)) {
9229 			refclk = dev_priv->vbt.lvds_ssc_freq;
9230 			drm_dbg_kms(&dev_priv->drm,
9231 				    "using SSC reference clock of %d kHz\n",
9232 				    refclk);
9233 		}
9234 
9235 		limit = &intel_limits_i8xx_lvds;
9236 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
9237 		limit = &intel_limits_i8xx_dvo;
9238 	} else {
9239 		limit = &intel_limits_i8xx_dac;
9240 	}
9241 
9242 	if (!crtc_state->clock_set &&
9243 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9244 				 refclk, NULL, &crtc_state->dpll)) {
9245 		drm_err(&dev_priv->drm,
9246 			"Couldn't find PLL settings for mode!\n");
9247 		return -EINVAL;
9248 	}
9249 
9250 	i8xx_compute_dpll(crtc, crtc_state, NULL);
9251 
9252 	return 0;
9253 }
9254 
9255 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
9256 				  struct intel_crtc_state *crtc_state)
9257 {
9258 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9259 	const struct intel_limit *limit;
9260 	int refclk = 96000;
9261 
9262 	memset(&crtc_state->dpll_hw_state, 0,
9263 	       sizeof(crtc_state->dpll_hw_state));
9264 
9265 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9266 		if (intel_panel_use_ssc(dev_priv)) {
9267 			refclk = dev_priv->vbt.lvds_ssc_freq;
9268 			drm_dbg_kms(&dev_priv->drm,
9269 				    "using SSC reference clock of %d kHz\n",
9270 				    refclk);
9271 		}
9272 
9273 		if (intel_is_dual_link_lvds(dev_priv))
9274 			limit = &intel_limits_g4x_dual_channel_lvds;
9275 		else
9276 			limit = &intel_limits_g4x_single_channel_lvds;
9277 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
9278 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
9279 		limit = &intel_limits_g4x_hdmi;
9280 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
9281 		limit = &intel_limits_g4x_sdvo;
9282 	} else {
9283 		/* The option is for other outputs */
9284 		limit = &intel_limits_i9xx_sdvo;
9285 	}
9286 
9287 	if (!crtc_state->clock_set &&
9288 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9289 				refclk, NULL, &crtc_state->dpll)) {
9290 		drm_err(&dev_priv->drm,
9291 			"Couldn't find PLL settings for mode!\n");
9292 		return -EINVAL;
9293 	}
9294 
9295 	i9xx_compute_dpll(crtc, crtc_state, NULL);
9296 
9297 	return 0;
9298 }
9299 
9300 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
9301 				  struct intel_crtc_state *crtc_state)
9302 {
9303 	struct drm_device *dev = crtc->base.dev;
9304 	struct drm_i915_private *dev_priv = to_i915(dev);
9305 	const struct intel_limit *limit;
9306 	int refclk = 96000;
9307 
9308 	memset(&crtc_state->dpll_hw_state, 0,
9309 	       sizeof(crtc_state->dpll_hw_state));
9310 
9311 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9312 		if (intel_panel_use_ssc(dev_priv)) {
9313 			refclk = dev_priv->vbt.lvds_ssc_freq;
9314 			drm_dbg_kms(&dev_priv->drm,
9315 				    "using SSC reference clock of %d kHz\n",
9316 				    refclk);
9317 		}
9318 
9319 		limit = &pnv_limits_lvds;
9320 	} else {
9321 		limit = &pnv_limits_sdvo;
9322 	}
9323 
9324 	if (!crtc_state->clock_set &&
9325 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9326 				refclk, NULL, &crtc_state->dpll)) {
9327 		drm_err(&dev_priv->drm,
9328 			"Couldn't find PLL settings for mode!\n");
9329 		return -EINVAL;
9330 	}
9331 
9332 	i9xx_compute_dpll(crtc, crtc_state, NULL);
9333 
9334 	return 0;
9335 }
9336 
9337 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
9338 				   struct intel_crtc_state *crtc_state)
9339 {
9340 	struct drm_device *dev = crtc->base.dev;
9341 	struct drm_i915_private *dev_priv = to_i915(dev);
9342 	const struct intel_limit *limit;
9343 	int refclk = 96000;
9344 
9345 	memset(&crtc_state->dpll_hw_state, 0,
9346 	       sizeof(crtc_state->dpll_hw_state));
9347 
9348 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9349 		if (intel_panel_use_ssc(dev_priv)) {
9350 			refclk = dev_priv->vbt.lvds_ssc_freq;
9351 			drm_dbg_kms(&dev_priv->drm,
9352 				    "using SSC reference clock of %d kHz\n",
9353 				    refclk);
9354 		}
9355 
9356 		limit = &intel_limits_i9xx_lvds;
9357 	} else {
9358 		limit = &intel_limits_i9xx_sdvo;
9359 	}
9360 
9361 	if (!crtc_state->clock_set &&
9362 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9363 				 refclk, NULL, &crtc_state->dpll)) {
9364 		drm_err(&dev_priv->drm,
9365 			"Couldn't find PLL settings for mode!\n");
9366 		return -EINVAL;
9367 	}
9368 
9369 	i9xx_compute_dpll(crtc, crtc_state, NULL);
9370 
9371 	return 0;
9372 }
9373 
9374 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
9375 				  struct intel_crtc_state *crtc_state)
9376 {
9377 	int refclk = 100000;
9378 	const struct intel_limit *limit = &intel_limits_chv;
9379 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9380 
9381 	memset(&crtc_state->dpll_hw_state, 0,
9382 	       sizeof(crtc_state->dpll_hw_state));
9383 
9384 	if (!crtc_state->clock_set &&
9385 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9386 				refclk, NULL, &crtc_state->dpll)) {
9387 		drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
9388 		return -EINVAL;
9389 	}
9390 
9391 	chv_compute_dpll(crtc, crtc_state);
9392 
9393 	return 0;
9394 }
9395 
9396 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
9397 				  struct intel_crtc_state *crtc_state)
9398 {
9399 	int refclk = 100000;
9400 	const struct intel_limit *limit = &intel_limits_vlv;
9401 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9402 
9403 	memset(&crtc_state->dpll_hw_state, 0,
9404 	       sizeof(crtc_state->dpll_hw_state));
9405 
9406 	if (!crtc_state->clock_set &&
9407 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9408 				refclk, NULL, &crtc_state->dpll)) {
9409 		drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
9410 		return -EINVAL;
9411 	}
9412 
9413 	vlv_compute_dpll(crtc, crtc_state);
9414 
9415 	return 0;
9416 }
9417 
9418 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
9419 {
9420 	if (IS_I830(dev_priv))
9421 		return false;
9422 
9423 	return INTEL_GEN(dev_priv) >= 4 ||
9424 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
9425 }
9426 
9427 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
9428 {
9429 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9430 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9431 	u32 tmp;
9432 
9433 	if (!i9xx_has_pfit(dev_priv))
9434 		return;
9435 
9436 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
9437 	if (!(tmp & PFIT_ENABLE))
9438 		return;
9439 
9440 	/* Check whether the pfit is attached to our pipe. */
9441 	if (INTEL_GEN(dev_priv) < 4) {
9442 		if (crtc->pipe != PIPE_B)
9443 			return;
9444 	} else {
9445 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
9446 			return;
9447 	}
9448 
9449 	crtc_state->gmch_pfit.control = tmp;
9450 	crtc_state->gmch_pfit.pgm_ratios =
9451 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
9452 }
9453 
9454 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
9455 			       struct intel_crtc_state *pipe_config)
9456 {
9457 	struct drm_device *dev = crtc->base.dev;
9458 	struct drm_i915_private *dev_priv = to_i915(dev);
9459 	enum pipe pipe = crtc->pipe;
9460 	struct dpll clock;
9461 	u32 mdiv;
9462 	int refclk = 100000;
9463 
9464 	/* In case of DSI, DPLL will not be used */
9465 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9466 		return;
9467 
9468 	vlv_dpio_get(dev_priv);
9469 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
9470 	vlv_dpio_put(dev_priv);
9471 
9472 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
9473 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
9474 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
9475 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
9476 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
9477 
9478 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
9479 }
9480 
9481 static void
9482 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
9483 			      struct intel_initial_plane_config *plane_config)
9484 {
9485 	struct drm_device *dev = crtc->base.dev;
9486 	struct drm_i915_private *dev_priv = to_i915(dev);
9487 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9488 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9489 	enum pipe pipe;
9490 	u32 val, base, offset;
9491 	int fourcc, pixel_format;
9492 	unsigned int aligned_height;
9493 	struct drm_framebuffer *fb;
9494 	struct intel_framebuffer *intel_fb;
9495 
9496 	if (!plane->get_hw_state(plane, &pipe))
9497 		return;
9498 
9499 	drm_WARN_ON(dev, pipe != crtc->pipe);
9500 
9501 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9502 	if (!intel_fb) {
9503 		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9504 		return;
9505 	}
9506 
9507 	fb = &intel_fb->base;
9508 
9509 	fb->dev = dev;
9510 
9511 	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9512 
9513 	if (INTEL_GEN(dev_priv) >= 4) {
9514 		if (val & DISPPLANE_TILED) {
9515 			plane_config->tiling = I915_TILING_X;
9516 			fb->modifier = I915_FORMAT_MOD_X_TILED;
9517 		}
9518 
9519 		if (val & DISPPLANE_ROTATE_180)
9520 			plane_config->rotation = DRM_MODE_ROTATE_180;
9521 	}
9522 
9523 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9524 	    val & DISPPLANE_MIRROR)
9525 		plane_config->rotation |= DRM_MODE_REFLECT_X;
9526 
9527 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9528 	fourcc = i9xx_format_to_fourcc(pixel_format);
9529 	fb->format = drm_format_info(fourcc);
9530 
9531 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9532 		offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9533 		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9534 	} else if (INTEL_GEN(dev_priv) >= 4) {
9535 		if (plane_config->tiling)
9536 			offset = intel_de_read(dev_priv,
9537 					       DSPTILEOFF(i9xx_plane));
9538 		else
9539 			offset = intel_de_read(dev_priv,
9540 					       DSPLINOFF(i9xx_plane));
9541 		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9542 	} else {
9543 		base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9544 	}
9545 	plane_config->base = base;
9546 
9547 	val = intel_de_read(dev_priv, PIPESRC(pipe));
9548 	fb->width = ((val >> 16) & 0xfff) + 1;
9549 	fb->height = ((val >> 0) & 0xfff) + 1;
9550 
9551 	val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9552 	fb->pitches[0] = val & 0xffffffc0;
9553 
9554 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
9555 
9556 	plane_config->size = fb->pitches[0] * aligned_height;
9557 
9558 	drm_dbg_kms(&dev_priv->drm,
9559 		    "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9560 		    crtc->base.name, plane->base.name, fb->width, fb->height,
9561 		    fb->format->cpp[0] * 8, base, fb->pitches[0],
9562 		    plane_config->size);
9563 
9564 	plane_config->fb = intel_fb;
9565 }
9566 
9567 static void chv_crtc_clock_get(struct intel_crtc *crtc,
9568 			       struct intel_crtc_state *pipe_config)
9569 {
9570 	struct drm_device *dev = crtc->base.dev;
9571 	struct drm_i915_private *dev_priv = to_i915(dev);
9572 	enum pipe pipe = crtc->pipe;
9573 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
9574 	struct dpll clock;
9575 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9576 	int refclk = 100000;
9577 
9578 	/* In case of DSI, DPLL will not be used */
9579 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9580 		return;
9581 
9582 	vlv_dpio_get(dev_priv);
9583 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9584 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9585 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9586 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9587 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9588 	vlv_dpio_put(dev_priv);
9589 
9590 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9591 	clock.m2 = (pll_dw0 & 0xff) << 22;
9592 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9593 		clock.m2 |= pll_dw2 & 0x3fffff;
9594 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9595 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9596 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9597 
9598 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9599 }
9600 
9601 static enum intel_output_format
9602 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9603 {
9604 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9605 	u32 tmp;
9606 
9607 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9608 
9609 	if (tmp & PIPEMISC_YUV420_ENABLE) {
9610 		/* We support 4:2:0 in full blend mode only */
9611 		drm_WARN_ON(&dev_priv->drm,
9612 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9613 
9614 		return INTEL_OUTPUT_FORMAT_YCBCR420;
9615 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9616 		return INTEL_OUTPUT_FORMAT_YCBCR444;
9617 	} else {
9618 		return INTEL_OUTPUT_FORMAT_RGB;
9619 	}
9620 }
9621 
9622 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9623 {
9624 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9625 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9626 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9627 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9628 	u32 tmp;
9629 
9630 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9631 
9632 	if (tmp & DISPPLANE_GAMMA_ENABLE)
9633 		crtc_state->gamma_enable = true;
9634 
9635 	if (!HAS_GMCH(dev_priv) &&
9636 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
9637 		crtc_state->csc_enable = true;
9638 }
9639 
9640 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9641 				 struct intel_crtc_state *pipe_config)
9642 {
9643 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9644 	enum intel_display_power_domain power_domain;
9645 	intel_wakeref_t wakeref;
9646 	u32 tmp;
9647 	bool ret;
9648 
9649 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9650 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9651 	if (!wakeref)
9652 		return false;
9653 
9654 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9655 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9656 	pipe_config->shared_dpll = NULL;
9657 
9658 	ret = false;
9659 
9660 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9661 	if (!(tmp & PIPECONF_ENABLE))
9662 		goto out;
9663 
9664 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9665 	    IS_CHERRYVIEW(dev_priv)) {
9666 		switch (tmp & PIPECONF_BPC_MASK) {
9667 		case PIPECONF_6BPC:
9668 			pipe_config->pipe_bpp = 18;
9669 			break;
9670 		case PIPECONF_8BPC:
9671 			pipe_config->pipe_bpp = 24;
9672 			break;
9673 		case PIPECONF_10BPC:
9674 			pipe_config->pipe_bpp = 30;
9675 			break;
9676 		default:
9677 			break;
9678 		}
9679 	}
9680 
9681 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9682 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
9683 		pipe_config->limited_color_range = true;
9684 
9685 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9686 		PIPECONF_GAMMA_MODE_SHIFT;
9687 
9688 	if (IS_CHERRYVIEW(dev_priv))
9689 		pipe_config->cgm_mode = intel_de_read(dev_priv,
9690 						      CGM_PIPE_MODE(crtc->pipe));
9691 
9692 	i9xx_get_pipe_color_config(pipe_config);
9693 	intel_color_get_config(pipe_config);
9694 
9695 	if (INTEL_GEN(dev_priv) < 4)
9696 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9697 
9698 	intel_get_transcoder_timings(crtc, pipe_config);
9699 	intel_get_pipe_src_size(crtc, pipe_config);
9700 
9701 	i9xx_get_pfit_config(pipe_config);
9702 
9703 	if (INTEL_GEN(dev_priv) >= 4) {
9704 		/* No way to read it out on pipes B and C */
9705 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9706 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
9707 		else
9708 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9709 		pipe_config->pixel_multiplier =
9710 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9711 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9712 		pipe_config->dpll_hw_state.dpll_md = tmp;
9713 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9714 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9715 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9716 		pipe_config->pixel_multiplier =
9717 			((tmp & SDVO_MULTIPLIER_MASK)
9718 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9719 	} else {
9720 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
9721 		 * port and will be fixed up in the encoder->get_config
9722 		 * function. */
9723 		pipe_config->pixel_multiplier = 1;
9724 	}
9725 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9726 							DPLL(crtc->pipe));
9727 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9728 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9729 							       FP0(crtc->pipe));
9730 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9731 							       FP1(crtc->pipe));
9732 	} else {
9733 		/* Mask out read-only status bits. */
9734 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9735 						     DPLL_PORTC_READY_MASK |
9736 						     DPLL_PORTB_READY_MASK);
9737 	}
9738 
9739 	if (IS_CHERRYVIEW(dev_priv))
9740 		chv_crtc_clock_get(crtc, pipe_config);
9741 	else if (IS_VALLEYVIEW(dev_priv))
9742 		vlv_crtc_clock_get(crtc, pipe_config);
9743 	else
9744 		i9xx_crtc_clock_get(crtc, pipe_config);
9745 
9746 	/*
9747 	 * Normally the dotclock is filled in by the encoder .get_config()
9748 	 * but in case the pipe is enabled w/o any ports we need a sane
9749 	 * default.
9750 	 */
9751 	pipe_config->hw.adjusted_mode.crtc_clock =
9752 		pipe_config->port_clock / pipe_config->pixel_multiplier;
9753 
9754 	ret = true;
9755 
9756 out:
9757 	intel_display_power_put(dev_priv, power_domain, wakeref);
9758 
9759 	return ret;
9760 }
9761 
9762 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9763 {
9764 	struct intel_encoder *encoder;
9765 	int i;
9766 	u32 val, final;
9767 	bool has_lvds = false;
9768 	bool has_cpu_edp = false;
9769 	bool has_panel = false;
9770 	bool has_ck505 = false;
9771 	bool can_ssc = false;
9772 	bool using_ssc_source = false;
9773 
9774 	/* We need to take the global config into account */
9775 	for_each_intel_encoder(&dev_priv->drm, encoder) {
9776 		switch (encoder->type) {
9777 		case INTEL_OUTPUT_LVDS:
9778 			has_panel = true;
9779 			has_lvds = true;
9780 			break;
9781 		case INTEL_OUTPUT_EDP:
9782 			has_panel = true;
9783 			if (encoder->port == PORT_A)
9784 				has_cpu_edp = true;
9785 			break;
9786 		default:
9787 			break;
9788 		}
9789 	}
9790 
9791 	if (HAS_PCH_IBX(dev_priv)) {
9792 		has_ck505 = dev_priv->vbt.display_clock_mode;
9793 		can_ssc = has_ck505;
9794 	} else {
9795 		has_ck505 = false;
9796 		can_ssc = true;
9797 	}
9798 
9799 	/* Check if any DPLLs are using the SSC source */
9800 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
9801 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9802 
9803 		if (!(temp & DPLL_VCO_ENABLE))
9804 			continue;
9805 
9806 		if ((temp & PLL_REF_INPUT_MASK) ==
9807 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9808 			using_ssc_source = true;
9809 			break;
9810 		}
9811 	}
9812 
9813 	drm_dbg_kms(&dev_priv->drm,
9814 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9815 		    has_panel, has_lvds, has_ck505, using_ssc_source);
9816 
9817 	/* Ironlake: try to setup display ref clock before DPLL
9818 	 * enabling. This is only under driver's control after
9819 	 * PCH B stepping, previous chipset stepping should be
9820 	 * ignoring this setting.
9821 	 */
9822 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9823 
9824 	/* As we must carefully and slowly disable/enable each source in turn,
9825 	 * compute the final state we want first and check if we need to
9826 	 * make any changes at all.
9827 	 */
9828 	final = val;
9829 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
9830 	if (has_ck505)
9831 		final |= DREF_NONSPREAD_CK505_ENABLE;
9832 	else
9833 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
9834 
9835 	final &= ~DREF_SSC_SOURCE_MASK;
9836 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9837 	final &= ~DREF_SSC1_ENABLE;
9838 
9839 	if (has_panel) {
9840 		final |= DREF_SSC_SOURCE_ENABLE;
9841 
9842 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
9843 			final |= DREF_SSC1_ENABLE;
9844 
9845 		if (has_cpu_edp) {
9846 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
9847 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9848 			else
9849 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9850 		} else
9851 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9852 	} else if (using_ssc_source) {
9853 		final |= DREF_SSC_SOURCE_ENABLE;
9854 		final |= DREF_SSC1_ENABLE;
9855 	}
9856 
9857 	if (final == val)
9858 		return;
9859 
9860 	/* Always enable nonspread source */
9861 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
9862 
9863 	if (has_ck505)
9864 		val |= DREF_NONSPREAD_CK505_ENABLE;
9865 	else
9866 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
9867 
9868 	if (has_panel) {
9869 		val &= ~DREF_SSC_SOURCE_MASK;
9870 		val |= DREF_SSC_SOURCE_ENABLE;
9871 
9872 		/* SSC must be turned on before enabling the CPU output  */
9873 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9874 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9875 			val |= DREF_SSC1_ENABLE;
9876 		} else
9877 			val &= ~DREF_SSC1_ENABLE;
9878 
9879 		/* Get SSC going before enabling the outputs */
9880 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9881 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9882 		udelay(200);
9883 
9884 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9885 
9886 		/* Enable CPU source on CPU attached eDP */
9887 		if (has_cpu_edp) {
9888 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9889 				drm_dbg_kms(&dev_priv->drm,
9890 					    "Using SSC on eDP\n");
9891 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9892 			} else
9893 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9894 		} else
9895 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9896 
9897 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9898 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9899 		udelay(200);
9900 	} else {
9901 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9902 
9903 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9904 
9905 		/* Turn off CPU output */
9906 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9907 
9908 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9909 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9910 		udelay(200);
9911 
9912 		if (!using_ssc_source) {
9913 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9914 
9915 			/* Turn off the SSC source */
9916 			val &= ~DREF_SSC_SOURCE_MASK;
9917 			val |= DREF_SSC_SOURCE_DISABLE;
9918 
9919 			/* Turn off SSC1 */
9920 			val &= ~DREF_SSC1_ENABLE;
9921 
9922 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9923 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9924 			udelay(200);
9925 		}
9926 	}
9927 
9928 	BUG_ON(val != final);
9929 }
9930 
9931 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9932 {
9933 	u32 tmp;
9934 
9935 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9936 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9937 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9938 
9939 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9940 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9941 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9942 
9943 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9944 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9945 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9946 
9947 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9948 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9949 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9950 }
9951 
9952 /* WaMPhyProgramming:hsw */
9953 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9954 {
9955 	u32 tmp;
9956 
9957 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9958 	tmp &= ~(0xFF << 24);
9959 	tmp |= (0x12 << 24);
9960 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9961 
9962 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9963 	tmp |= (1 << 11);
9964 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9965 
9966 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9967 	tmp |= (1 << 11);
9968 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9969 
9970 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9971 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9972 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9973 
9974 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9975 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9976 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9977 
9978 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9979 	tmp &= ~(7 << 13);
9980 	tmp |= (5 << 13);
9981 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9982 
9983 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9984 	tmp &= ~(7 << 13);
9985 	tmp |= (5 << 13);
9986 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9987 
9988 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9989 	tmp &= ~0xFF;
9990 	tmp |= 0x1C;
9991 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9992 
9993 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9994 	tmp &= ~0xFF;
9995 	tmp |= 0x1C;
9996 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9997 
9998 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9999 	tmp &= ~(0xFF << 16);
10000 	tmp |= (0x1C << 16);
10001 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
10002 
10003 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
10004 	tmp &= ~(0xFF << 16);
10005 	tmp |= (0x1C << 16);
10006 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
10007 
10008 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
10009 	tmp |= (1 << 27);
10010 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
10011 
10012 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
10013 	tmp |= (1 << 27);
10014 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
10015 
10016 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
10017 	tmp &= ~(0xF << 28);
10018 	tmp |= (4 << 28);
10019 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
10020 
10021 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
10022 	tmp &= ~(0xF << 28);
10023 	tmp |= (4 << 28);
10024 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
10025 }
10026 
10027 /* Implements 3 different sequences from BSpec chapter "Display iCLK
10028  * Programming" based on the parameters passed:
10029  * - Sequence to enable CLKOUT_DP
10030  * - Sequence to enable CLKOUT_DP without spread
10031  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
10032  */
10033 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
10034 				 bool with_spread, bool with_fdi)
10035 {
10036 	u32 reg, tmp;
10037 
10038 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
10039 		     "FDI requires downspread\n"))
10040 		with_spread = true;
10041 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
10042 		     with_fdi, "LP PCH doesn't have FDI\n"))
10043 		with_fdi = false;
10044 
10045 	mutex_lock(&dev_priv->sb_lock);
10046 
10047 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
10048 	tmp &= ~SBI_SSCCTL_DISABLE;
10049 	tmp |= SBI_SSCCTL_PATHALT;
10050 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
10051 
10052 	udelay(24);
10053 
10054 	if (with_spread) {
10055 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
10056 		tmp &= ~SBI_SSCCTL_PATHALT;
10057 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
10058 
10059 		if (with_fdi) {
10060 			lpt_reset_fdi_mphy(dev_priv);
10061 			lpt_program_fdi_mphy(dev_priv);
10062 		}
10063 	}
10064 
10065 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
10066 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
10067 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
10068 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
10069 
10070 	mutex_unlock(&dev_priv->sb_lock);
10071 }
10072 
10073 /* Sequence to disable CLKOUT_DP */
10074 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
10075 {
10076 	u32 reg, tmp;
10077 
10078 	mutex_lock(&dev_priv->sb_lock);
10079 
10080 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
10081 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
10082 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
10083 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
10084 
10085 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
10086 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
10087 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
10088 			tmp |= SBI_SSCCTL_PATHALT;
10089 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
10090 			udelay(32);
10091 		}
10092 		tmp |= SBI_SSCCTL_DISABLE;
10093 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
10094 	}
10095 
10096 	mutex_unlock(&dev_priv->sb_lock);
10097 }
10098 
10099 #define BEND_IDX(steps) ((50 + (steps)) / 5)
10100 
10101 static const u16 sscdivintphase[] = {
10102 	[BEND_IDX( 50)] = 0x3B23,
10103 	[BEND_IDX( 45)] = 0x3B23,
10104 	[BEND_IDX( 40)] = 0x3C23,
10105 	[BEND_IDX( 35)] = 0x3C23,
10106 	[BEND_IDX( 30)] = 0x3D23,
10107 	[BEND_IDX( 25)] = 0x3D23,
10108 	[BEND_IDX( 20)] = 0x3E23,
10109 	[BEND_IDX( 15)] = 0x3E23,
10110 	[BEND_IDX( 10)] = 0x3F23,
10111 	[BEND_IDX(  5)] = 0x3F23,
10112 	[BEND_IDX(  0)] = 0x0025,
10113 	[BEND_IDX( -5)] = 0x0025,
10114 	[BEND_IDX(-10)] = 0x0125,
10115 	[BEND_IDX(-15)] = 0x0125,
10116 	[BEND_IDX(-20)] = 0x0225,
10117 	[BEND_IDX(-25)] = 0x0225,
10118 	[BEND_IDX(-30)] = 0x0325,
10119 	[BEND_IDX(-35)] = 0x0325,
10120 	[BEND_IDX(-40)] = 0x0425,
10121 	[BEND_IDX(-45)] = 0x0425,
10122 	[BEND_IDX(-50)] = 0x0525,
10123 };
10124 
10125 /*
10126  * Bend CLKOUT_DP
10127  * steps -50 to 50 inclusive, in steps of 5
10128  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
10129  * change in clock period = -(steps / 10) * 5.787 ps
10130  */
10131 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
10132 {
10133 	u32 tmp;
10134 	int idx = BEND_IDX(steps);
10135 
10136 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
10137 		return;
10138 
10139 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
10140 		return;
10141 
10142 	mutex_lock(&dev_priv->sb_lock);
10143 
10144 	if (steps % 10 != 0)
10145 		tmp = 0xAAAAAAAB;
10146 	else
10147 		tmp = 0x00000000;
10148 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
10149 
10150 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
10151 	tmp &= 0xffff0000;
10152 	tmp |= sscdivintphase[idx];
10153 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
10154 
10155 	mutex_unlock(&dev_priv->sb_lock);
10156 }
10157 
10158 #undef BEND_IDX
10159 
10160 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
10161 {
10162 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
10163 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
10164 
10165 	if ((ctl & SPLL_PLL_ENABLE) == 0)
10166 		return false;
10167 
10168 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
10169 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
10170 		return true;
10171 
10172 	if (IS_BROADWELL(dev_priv) &&
10173 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
10174 		return true;
10175 
10176 	return false;
10177 }
10178 
10179 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
10180 			       enum intel_dpll_id id)
10181 {
10182 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
10183 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
10184 
10185 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
10186 		return false;
10187 
10188 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
10189 		return true;
10190 
10191 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
10192 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
10193 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
10194 		return true;
10195 
10196 	return false;
10197 }
10198 
10199 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
10200 {
10201 	struct intel_encoder *encoder;
10202 	bool has_fdi = false;
10203 
10204 	for_each_intel_encoder(&dev_priv->drm, encoder) {
10205 		switch (encoder->type) {
10206 		case INTEL_OUTPUT_ANALOG:
10207 			has_fdi = true;
10208 			break;
10209 		default:
10210 			break;
10211 		}
10212 	}
10213 
10214 	/*
10215 	 * The BIOS may have decided to use the PCH SSC
10216 	 * reference so we must not disable it until the
10217 	 * relevant PLLs have stopped relying on it. We'll
10218 	 * just leave the PCH SSC reference enabled in case
10219 	 * any active PLL is using it. It will get disabled
10220 	 * after runtime suspend if we don't have FDI.
10221 	 *
10222 	 * TODO: Move the whole reference clock handling
10223 	 * to the modeset sequence proper so that we can
10224 	 * actually enable/disable/reconfigure these things
10225 	 * safely. To do that we need to introduce a real
10226 	 * clock hierarchy. That would also allow us to do
10227 	 * clock bending finally.
10228 	 */
10229 	dev_priv->pch_ssc_use = 0;
10230 
10231 	if (spll_uses_pch_ssc(dev_priv)) {
10232 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
10233 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
10234 	}
10235 
10236 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
10237 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
10238 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
10239 	}
10240 
10241 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
10242 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
10243 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
10244 	}
10245 
10246 	if (dev_priv->pch_ssc_use)
10247 		return;
10248 
10249 	if (has_fdi) {
10250 		lpt_bend_clkout_dp(dev_priv, 0);
10251 		lpt_enable_clkout_dp(dev_priv, true, true);
10252 	} else {
10253 		lpt_disable_clkout_dp(dev_priv);
10254 	}
10255 }
10256 
10257 /*
10258  * Initialize reference clocks when the driver loads
10259  */
10260 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
10261 {
10262 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
10263 		ilk_init_pch_refclk(dev_priv);
10264 	else if (HAS_PCH_LPT(dev_priv))
10265 		lpt_init_pch_refclk(dev_priv);
10266 }
10267 
10268 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
10269 {
10270 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10271 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10272 	enum pipe pipe = crtc->pipe;
10273 	u32 val;
10274 
10275 	val = 0;
10276 
10277 	switch (crtc_state->pipe_bpp) {
10278 	case 18:
10279 		val |= PIPECONF_6BPC;
10280 		break;
10281 	case 24:
10282 		val |= PIPECONF_8BPC;
10283 		break;
10284 	case 30:
10285 		val |= PIPECONF_10BPC;
10286 		break;
10287 	case 36:
10288 		val |= PIPECONF_12BPC;
10289 		break;
10290 	default:
10291 		/* Case prevented by intel_choose_pipe_bpp_dither. */
10292 		BUG();
10293 	}
10294 
10295 	if (crtc_state->dither)
10296 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10297 
10298 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10299 		val |= PIPECONF_INTERLACED_ILK;
10300 	else
10301 		val |= PIPECONF_PROGRESSIVE;
10302 
10303 	/*
10304 	 * This would end up with an odd purple hue over
10305 	 * the entire display. Make sure we don't do it.
10306 	 */
10307 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
10308 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
10309 
10310 	if (crtc_state->limited_color_range &&
10311 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
10312 		val |= PIPECONF_COLOR_RANGE_SELECT;
10313 
10314 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10315 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
10316 
10317 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
10318 
10319 	val |= PIPECONF_FRAME_START_DELAY(0);
10320 
10321 	intel_de_write(dev_priv, PIPECONF(pipe), val);
10322 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
10323 }
10324 
10325 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
10326 {
10327 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10328 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10329 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10330 	u32 val = 0;
10331 
10332 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
10333 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10334 
10335 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10336 		val |= PIPECONF_INTERLACED_ILK;
10337 	else
10338 		val |= PIPECONF_PROGRESSIVE;
10339 
10340 	if (IS_HASWELL(dev_priv) &&
10341 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10342 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
10343 
10344 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
10345 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
10346 }
10347 
10348 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
10349 {
10350 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10351 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10352 	u32 val = 0;
10353 
10354 	switch (crtc_state->pipe_bpp) {
10355 	case 18:
10356 		val |= PIPEMISC_DITHER_6_BPC;
10357 		break;
10358 	case 24:
10359 		val |= PIPEMISC_DITHER_8_BPC;
10360 		break;
10361 	case 30:
10362 		val |= PIPEMISC_DITHER_10_BPC;
10363 		break;
10364 	case 36:
10365 		val |= PIPEMISC_DITHER_12_BPC;
10366 		break;
10367 	default:
10368 		MISSING_CASE(crtc_state->pipe_bpp);
10369 		break;
10370 	}
10371 
10372 	if (crtc_state->dither)
10373 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
10374 
10375 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
10376 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
10377 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
10378 
10379 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
10380 		val |= PIPEMISC_YUV420_ENABLE |
10381 			PIPEMISC_YUV420_MODE_FULL_BLEND;
10382 
10383 	if (INTEL_GEN(dev_priv) >= 11 &&
10384 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
10385 					   BIT(PLANE_CURSOR))) == 0)
10386 		val |= PIPEMISC_HDR_MODE_PRECISION;
10387 
10388 	if (INTEL_GEN(dev_priv) >= 12)
10389 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
10390 
10391 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
10392 }
10393 
10394 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
10395 {
10396 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10397 	u32 tmp;
10398 
10399 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
10400 
10401 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
10402 	case PIPEMISC_DITHER_6_BPC:
10403 		return 18;
10404 	case PIPEMISC_DITHER_8_BPC:
10405 		return 24;
10406 	case PIPEMISC_DITHER_10_BPC:
10407 		return 30;
10408 	case PIPEMISC_DITHER_12_BPC:
10409 		return 36;
10410 	default:
10411 		MISSING_CASE(tmp);
10412 		return 0;
10413 	}
10414 }
10415 
10416 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
10417 {
10418 	/*
10419 	 * Account for spread spectrum to avoid
10420 	 * oversubscribing the link. Max center spread
10421 	 * is 2.5%; use 5% for safety's sake.
10422 	 */
10423 	u32 bps = target_clock * bpp * 21 / 20;
10424 	return DIV_ROUND_UP(bps, link_bw * 8);
10425 }
10426 
10427 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
10428 {
10429 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
10430 }
10431 
10432 static void ilk_compute_dpll(struct intel_crtc *crtc,
10433 			     struct intel_crtc_state *crtc_state,
10434 			     struct dpll *reduced_clock)
10435 {
10436 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10437 	u32 dpll, fp, fp2;
10438 	int factor;
10439 
10440 	/* Enable autotuning of the PLL clock (if permissible) */
10441 	factor = 21;
10442 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10443 		if ((intel_panel_use_ssc(dev_priv) &&
10444 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
10445 		    (HAS_PCH_IBX(dev_priv) &&
10446 		     intel_is_dual_link_lvds(dev_priv)))
10447 			factor = 25;
10448 	} else if (crtc_state->sdvo_tv_clock) {
10449 		factor = 20;
10450 	}
10451 
10452 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
10453 
10454 	if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
10455 		fp |= FP_CB_TUNE;
10456 
10457 	if (reduced_clock) {
10458 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
10459 
10460 		if (reduced_clock->m < factor * reduced_clock->n)
10461 			fp2 |= FP_CB_TUNE;
10462 	} else {
10463 		fp2 = fp;
10464 	}
10465 
10466 	dpll = 0;
10467 
10468 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
10469 		dpll |= DPLLB_MODE_LVDS;
10470 	else
10471 		dpll |= DPLLB_MODE_DAC_SERIAL;
10472 
10473 	dpll |= (crtc_state->pixel_multiplier - 1)
10474 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
10475 
10476 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
10477 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
10478 		dpll |= DPLL_SDVO_HIGH_SPEED;
10479 
10480 	if (intel_crtc_has_dp_encoder(crtc_state))
10481 		dpll |= DPLL_SDVO_HIGH_SPEED;
10482 
10483 	/*
10484 	 * The high speed IO clock is only really required for
10485 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
10486 	 * possible to share the DPLL between CRT and HDMI. Enabling
10487 	 * the clock needlessly does no real harm, except use up a
10488 	 * bit of power potentially.
10489 	 *
10490 	 * We'll limit this to IVB with 3 pipes, since it has only two
10491 	 * DPLLs and so DPLL sharing is the only way to get three pipes
10492 	 * driving PCH ports at the same time. On SNB we could do this,
10493 	 * and potentially avoid enabling the second DPLL, but it's not
10494 	 * clear if it''s a win or loss power wise. No point in doing
10495 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10496 	 */
10497 	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10498 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10499 		dpll |= DPLL_SDVO_HIGH_SPEED;
10500 
10501 	/* compute bitmask from p1 value */
10502 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10503 	/* also FPA1 */
10504 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10505 
10506 	switch (crtc_state->dpll.p2) {
10507 	case 5:
10508 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10509 		break;
10510 	case 7:
10511 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10512 		break;
10513 	case 10:
10514 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10515 		break;
10516 	case 14:
10517 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10518 		break;
10519 	}
10520 
10521 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10522 	    intel_panel_use_ssc(dev_priv))
10523 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10524 	else
10525 		dpll |= PLL_REF_INPUT_DREFCLK;
10526 
10527 	dpll |= DPLL_VCO_ENABLE;
10528 
10529 	crtc_state->dpll_hw_state.dpll = dpll;
10530 	crtc_state->dpll_hw_state.fp0 = fp;
10531 	crtc_state->dpll_hw_state.fp1 = fp2;
10532 }
10533 
10534 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10535 				  struct intel_crtc_state *crtc_state)
10536 {
10537 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10538 	struct intel_atomic_state *state =
10539 		to_intel_atomic_state(crtc_state->uapi.state);
10540 	const struct intel_limit *limit;
10541 	int refclk = 120000;
10542 
10543 	memset(&crtc_state->dpll_hw_state, 0,
10544 	       sizeof(crtc_state->dpll_hw_state));
10545 
10546 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10547 	if (!crtc_state->has_pch_encoder)
10548 		return 0;
10549 
10550 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10551 		if (intel_panel_use_ssc(dev_priv)) {
10552 			drm_dbg_kms(&dev_priv->drm,
10553 				    "using SSC reference clock of %d kHz\n",
10554 				    dev_priv->vbt.lvds_ssc_freq);
10555 			refclk = dev_priv->vbt.lvds_ssc_freq;
10556 		}
10557 
10558 		if (intel_is_dual_link_lvds(dev_priv)) {
10559 			if (refclk == 100000)
10560 				limit = &ilk_limits_dual_lvds_100m;
10561 			else
10562 				limit = &ilk_limits_dual_lvds;
10563 		} else {
10564 			if (refclk == 100000)
10565 				limit = &ilk_limits_single_lvds_100m;
10566 			else
10567 				limit = &ilk_limits_single_lvds;
10568 		}
10569 	} else {
10570 		limit = &ilk_limits_dac;
10571 	}
10572 
10573 	if (!crtc_state->clock_set &&
10574 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10575 				refclk, NULL, &crtc_state->dpll)) {
10576 		drm_err(&dev_priv->drm,
10577 			"Couldn't find PLL settings for mode!\n");
10578 		return -EINVAL;
10579 	}
10580 
10581 	ilk_compute_dpll(crtc, crtc_state, NULL);
10582 
10583 	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10584 		drm_dbg_kms(&dev_priv->drm,
10585 			    "failed to find PLL for pipe %c\n",
10586 			    pipe_name(crtc->pipe));
10587 		return -EINVAL;
10588 	}
10589 
10590 	return 0;
10591 }
10592 
10593 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10594 					 struct intel_link_m_n *m_n)
10595 {
10596 	struct drm_device *dev = crtc->base.dev;
10597 	struct drm_i915_private *dev_priv = to_i915(dev);
10598 	enum pipe pipe = crtc->pipe;
10599 
10600 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10601 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10602 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10603 		& ~TU_SIZE_MASK;
10604 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10605 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10606 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10607 }
10608 
10609 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10610 					 enum transcoder transcoder,
10611 					 struct intel_link_m_n *m_n,
10612 					 struct intel_link_m_n *m2_n2)
10613 {
10614 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10615 	enum pipe pipe = crtc->pipe;
10616 
10617 	if (INTEL_GEN(dev_priv) >= 5) {
10618 		m_n->link_m = intel_de_read(dev_priv,
10619 					    PIPE_LINK_M1(transcoder));
10620 		m_n->link_n = intel_de_read(dev_priv,
10621 					    PIPE_LINK_N1(transcoder));
10622 		m_n->gmch_m = intel_de_read(dev_priv,
10623 					    PIPE_DATA_M1(transcoder))
10624 			& ~TU_SIZE_MASK;
10625 		m_n->gmch_n = intel_de_read(dev_priv,
10626 					    PIPE_DATA_N1(transcoder));
10627 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10628 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10629 
10630 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10631 			m2_n2->link_m = intel_de_read(dev_priv,
10632 						      PIPE_LINK_M2(transcoder));
10633 			m2_n2->link_n =	intel_de_read(dev_priv,
10634 							     PIPE_LINK_N2(transcoder));
10635 			m2_n2->gmch_m =	intel_de_read(dev_priv,
10636 							     PIPE_DATA_M2(transcoder))
10637 					& ~TU_SIZE_MASK;
10638 			m2_n2->gmch_n =	intel_de_read(dev_priv,
10639 							     PIPE_DATA_N2(transcoder));
10640 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10641 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10642 		}
10643 	} else {
10644 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10645 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10646 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10647 			& ~TU_SIZE_MASK;
10648 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10649 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10650 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10651 	}
10652 }
10653 
10654 void intel_dp_get_m_n(struct intel_crtc *crtc,
10655 		      struct intel_crtc_state *pipe_config)
10656 {
10657 	if (pipe_config->has_pch_encoder)
10658 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10659 	else
10660 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10661 					     &pipe_config->dp_m_n,
10662 					     &pipe_config->dp_m2_n2);
10663 }
10664 
10665 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10666 				   struct intel_crtc_state *pipe_config)
10667 {
10668 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10669 				     &pipe_config->fdi_m_n, NULL);
10670 }
10671 
10672 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
10673 				  u32 pos, u32 size)
10674 {
10675 	drm_rect_init(&crtc_state->pch_pfit.dst,
10676 		      pos >> 16, pos & 0xffff,
10677 		      size >> 16, size & 0xffff);
10678 }
10679 
10680 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
10681 {
10682 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10683 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10684 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
10685 	int id = -1;
10686 	int i;
10687 
10688 	/* find scaler attached to this pipe */
10689 	for (i = 0; i < crtc->num_scalers; i++) {
10690 		u32 ctl, pos, size;
10691 
10692 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10693 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
10694 			continue;
10695 
10696 		id = i;
10697 		crtc_state->pch_pfit.enabled = true;
10698 
10699 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
10700 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
10701 
10702 		ilk_get_pfit_pos_size(crtc_state, pos, size);
10703 
10704 		scaler_state->scalers[i].in_use = true;
10705 		break;
10706 	}
10707 
10708 	scaler_state->scaler_id = id;
10709 	if (id >= 0)
10710 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10711 	else
10712 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10713 }
10714 
10715 static void
10716 skl_get_initial_plane_config(struct intel_crtc *crtc,
10717 			     struct intel_initial_plane_config *plane_config)
10718 {
10719 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10720 	struct drm_device *dev = crtc->base.dev;
10721 	struct drm_i915_private *dev_priv = to_i915(dev);
10722 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10723 	enum plane_id plane_id = plane->id;
10724 	enum pipe pipe;
10725 	u32 val, base, offset, stride_mult, tiling, alpha;
10726 	int fourcc, pixel_format;
10727 	unsigned int aligned_height;
10728 	struct drm_framebuffer *fb;
10729 	struct intel_framebuffer *intel_fb;
10730 
10731 	if (!plane->get_hw_state(plane, &pipe))
10732 		return;
10733 
10734 	drm_WARN_ON(dev, pipe != crtc->pipe);
10735 
10736 	if (crtc_state->bigjoiner) {
10737 		drm_dbg_kms(&dev_priv->drm,
10738 			    "Unsupported bigjoiner configuration for initial FB\n");
10739 		return;
10740 	}
10741 
10742 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10743 	if (!intel_fb) {
10744 		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10745 		return;
10746 	}
10747 
10748 	fb = &intel_fb->base;
10749 
10750 	fb->dev = dev;
10751 
10752 	val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10753 
10754 	if (INTEL_GEN(dev_priv) >= 11)
10755 		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10756 	else
10757 		pixel_format = val & PLANE_CTL_FORMAT_MASK;
10758 
10759 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10760 		alpha = intel_de_read(dev_priv,
10761 				      PLANE_COLOR_CTL(pipe, plane_id));
10762 		alpha &= PLANE_COLOR_ALPHA_MASK;
10763 	} else {
10764 		alpha = val & PLANE_CTL_ALPHA_MASK;
10765 	}
10766 
10767 	fourcc = skl_format_to_fourcc(pixel_format,
10768 				      val & PLANE_CTL_ORDER_RGBX, alpha);
10769 	fb->format = drm_format_info(fourcc);
10770 
10771 	tiling = val & PLANE_CTL_TILED_MASK;
10772 	switch (tiling) {
10773 	case PLANE_CTL_TILED_LINEAR:
10774 		fb->modifier = DRM_FORMAT_MOD_LINEAR;
10775 		break;
10776 	case PLANE_CTL_TILED_X:
10777 		plane_config->tiling = I915_TILING_X;
10778 		fb->modifier = I915_FORMAT_MOD_X_TILED;
10779 		break;
10780 	case PLANE_CTL_TILED_Y:
10781 		plane_config->tiling = I915_TILING_Y;
10782 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10783 			fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10784 				I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10785 				I915_FORMAT_MOD_Y_TILED_CCS;
10786 		else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10787 			fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10788 		else
10789 			fb->modifier = I915_FORMAT_MOD_Y_TILED;
10790 		break;
10791 	case PLANE_CTL_TILED_YF:
10792 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10793 			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10794 		else
10795 			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10796 		break;
10797 	default:
10798 		MISSING_CASE(tiling);
10799 		goto error;
10800 	}
10801 
10802 	/*
10803 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10804 	 * while i915 HW rotation is clockwise, thats why this swapping.
10805 	 */
10806 	switch (val & PLANE_CTL_ROTATE_MASK) {
10807 	case PLANE_CTL_ROTATE_0:
10808 		plane_config->rotation = DRM_MODE_ROTATE_0;
10809 		break;
10810 	case PLANE_CTL_ROTATE_90:
10811 		plane_config->rotation = DRM_MODE_ROTATE_270;
10812 		break;
10813 	case PLANE_CTL_ROTATE_180:
10814 		plane_config->rotation = DRM_MODE_ROTATE_180;
10815 		break;
10816 	case PLANE_CTL_ROTATE_270:
10817 		plane_config->rotation = DRM_MODE_ROTATE_90;
10818 		break;
10819 	}
10820 
10821 	if (INTEL_GEN(dev_priv) >= 10 &&
10822 	    val & PLANE_CTL_FLIP_HORIZONTAL)
10823 		plane_config->rotation |= DRM_MODE_REFLECT_X;
10824 
10825 	/* 90/270 degree rotation would require extra work */
10826 	if (drm_rotation_90_or_270(plane_config->rotation))
10827 		goto error;
10828 
10829 	base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10830 	plane_config->base = base;
10831 
10832 	offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10833 
10834 	val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10835 	fb->height = ((val >> 16) & 0xffff) + 1;
10836 	fb->width = ((val >> 0) & 0xffff) + 1;
10837 
10838 	val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10839 	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10840 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
10841 
10842 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
10843 
10844 	plane_config->size = fb->pitches[0] * aligned_height;
10845 
10846 	drm_dbg_kms(&dev_priv->drm,
10847 		    "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10848 		    crtc->base.name, plane->base.name, fb->width, fb->height,
10849 		    fb->format->cpp[0] * 8, base, fb->pitches[0],
10850 		    plane_config->size);
10851 
10852 	plane_config->fb = intel_fb;
10853 	return;
10854 
10855 error:
10856 	kfree(intel_fb);
10857 }
10858 
10859 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
10860 {
10861 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10862 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10863 	u32 ctl, pos, size;
10864 
10865 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10866 	if ((ctl & PF_ENABLE) == 0)
10867 		return;
10868 
10869 	crtc_state->pch_pfit.enabled = true;
10870 
10871 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
10872 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
10873 
10874 	ilk_get_pfit_pos_size(crtc_state, pos, size);
10875 
10876 	/*
10877 	 * We currently do not free assignements of panel fitters on
10878 	 * ivb/hsw (since we don't use the higher upscaling modes which
10879 	 * differentiates them) so just WARN about this case for now.
10880 	 */
10881 	drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
10882 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
10883 }
10884 
10885 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10886 				struct intel_crtc_state *pipe_config)
10887 {
10888 	struct drm_device *dev = crtc->base.dev;
10889 	struct drm_i915_private *dev_priv = to_i915(dev);
10890 	enum intel_display_power_domain power_domain;
10891 	intel_wakeref_t wakeref;
10892 	u32 tmp;
10893 	bool ret;
10894 
10895 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10896 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10897 	if (!wakeref)
10898 		return false;
10899 
10900 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10901 	pipe_config->shared_dpll = NULL;
10902 
10903 	ret = false;
10904 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10905 	if (!(tmp & PIPECONF_ENABLE))
10906 		goto out;
10907 
10908 	switch (tmp & PIPECONF_BPC_MASK) {
10909 	case PIPECONF_6BPC:
10910 		pipe_config->pipe_bpp = 18;
10911 		break;
10912 	case PIPECONF_8BPC:
10913 		pipe_config->pipe_bpp = 24;
10914 		break;
10915 	case PIPECONF_10BPC:
10916 		pipe_config->pipe_bpp = 30;
10917 		break;
10918 	case PIPECONF_12BPC:
10919 		pipe_config->pipe_bpp = 36;
10920 		break;
10921 	default:
10922 		break;
10923 	}
10924 
10925 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10926 		pipe_config->limited_color_range = true;
10927 
10928 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10929 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10930 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10931 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10932 		break;
10933 	default:
10934 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10935 		break;
10936 	}
10937 
10938 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10939 		PIPECONF_GAMMA_MODE_SHIFT;
10940 
10941 	pipe_config->csc_mode = intel_de_read(dev_priv,
10942 					      PIPE_CSC_MODE(crtc->pipe));
10943 
10944 	i9xx_get_pipe_color_config(pipe_config);
10945 	intel_color_get_config(pipe_config);
10946 
10947 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10948 		struct intel_shared_dpll *pll;
10949 		enum intel_dpll_id pll_id;
10950 		bool pll_active;
10951 
10952 		pipe_config->has_pch_encoder = true;
10953 
10954 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10955 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10956 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10957 
10958 		ilk_get_fdi_m_n_config(crtc, pipe_config);
10959 
10960 		if (HAS_PCH_IBX(dev_priv)) {
10961 			/*
10962 			 * The pipe->pch transcoder and pch transcoder->pll
10963 			 * mapping is fixed.
10964 			 */
10965 			pll_id = (enum intel_dpll_id) crtc->pipe;
10966 		} else {
10967 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10968 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10969 				pll_id = DPLL_ID_PCH_PLL_B;
10970 			else
10971 				pll_id= DPLL_ID_PCH_PLL_A;
10972 		}
10973 
10974 		pipe_config->shared_dpll =
10975 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
10976 		pll = pipe_config->shared_dpll;
10977 
10978 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10979 						     &pipe_config->dpll_hw_state);
10980 		drm_WARN_ON(dev, !pll_active);
10981 
10982 		tmp = pipe_config->dpll_hw_state.dpll;
10983 		pipe_config->pixel_multiplier =
10984 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10985 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10986 
10987 		ilk_pch_clock_get(crtc, pipe_config);
10988 	} else {
10989 		pipe_config->pixel_multiplier = 1;
10990 	}
10991 
10992 	intel_get_transcoder_timings(crtc, pipe_config);
10993 	intel_get_pipe_src_size(crtc, pipe_config);
10994 
10995 	ilk_get_pfit_config(pipe_config);
10996 
10997 	ret = true;
10998 
10999 out:
11000 	intel_display_power_put(dev_priv, power_domain, wakeref);
11001 
11002 	return ret;
11003 }
11004 
11005 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
11006 				  struct intel_crtc_state *crtc_state)
11007 {
11008 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11009 	struct intel_atomic_state *state =
11010 		to_intel_atomic_state(crtc_state->uapi.state);
11011 
11012 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
11013 	    INTEL_GEN(dev_priv) >= 11) {
11014 		struct intel_encoder *encoder =
11015 			intel_get_crtc_new_encoder(state, crtc_state);
11016 
11017 		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
11018 			drm_dbg_kms(&dev_priv->drm,
11019 				    "failed to find PLL for pipe %c\n",
11020 				    pipe_name(crtc->pipe));
11021 			return -EINVAL;
11022 		}
11023 	}
11024 
11025 	return 0;
11026 }
11027 
11028 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
11029 			    struct intel_crtc_state *pipe_config)
11030 {
11031 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
11032 	enum phy phy = intel_port_to_phy(dev_priv, port);
11033 	struct icl_port_dpll *port_dpll;
11034 	struct intel_shared_dpll *pll;
11035 	enum intel_dpll_id id;
11036 	bool pll_active;
11037 	u32 clk_sel;
11038 
11039 	clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
11040 	id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
11041 
11042 	if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
11043 		return;
11044 
11045 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
11046 	port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
11047 
11048 	port_dpll->pll = pll;
11049 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
11050 					     &port_dpll->hw_state);
11051 	drm_WARN_ON(&dev_priv->drm, !pll_active);
11052 
11053 	icl_set_active_port_dpll(pipe_config, port_dpll_id);
11054 }
11055 
11056 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
11057 			    struct intel_crtc_state *pipe_config)
11058 {
11059 	enum phy phy = intel_port_to_phy(dev_priv, port);
11060 	enum icl_port_dpll_id port_dpll_id;
11061 	struct icl_port_dpll *port_dpll;
11062 	struct intel_shared_dpll *pll;
11063 	enum intel_dpll_id id;
11064 	bool pll_active;
11065 	u32 temp;
11066 
11067 	if (intel_phy_is_combo(dev_priv, phy)) {
11068 		u32 mask, shift;
11069 
11070 		if (IS_ROCKETLAKE(dev_priv)) {
11071 			mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
11072 			shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
11073 		} else {
11074 			mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
11075 			shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
11076 		}
11077 
11078 		temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
11079 		id = temp >> shift;
11080 		port_dpll_id = ICL_PORT_DPLL_DEFAULT;
11081 	} else if (intel_phy_is_tc(dev_priv, phy)) {
11082 		u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
11083 
11084 		if (clk_sel == DDI_CLK_SEL_MG) {
11085 			id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
11086 								    port));
11087 			port_dpll_id = ICL_PORT_DPLL_MG_PHY;
11088 		} else {
11089 			drm_WARN_ON(&dev_priv->drm,
11090 				    clk_sel < DDI_CLK_SEL_TBT_162);
11091 			id = DPLL_ID_ICL_TBTPLL;
11092 			port_dpll_id = ICL_PORT_DPLL_DEFAULT;
11093 		}
11094 	} else {
11095 		drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
11096 		return;
11097 	}
11098 
11099 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
11100 	port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
11101 
11102 	port_dpll->pll = pll;
11103 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
11104 					     &port_dpll->hw_state);
11105 	drm_WARN_ON(&dev_priv->drm, !pll_active);
11106 
11107 	icl_set_active_port_dpll(pipe_config, port_dpll_id);
11108 }
11109 
11110 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
11111 			    struct intel_crtc_state *pipe_config)
11112 {
11113 	struct intel_shared_dpll *pll;
11114 	enum intel_dpll_id id;
11115 	bool pll_active;
11116 	u32 temp;
11117 
11118 	temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
11119 	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
11120 
11121 	if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
11122 		return;
11123 
11124 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
11125 
11126 	pipe_config->shared_dpll = pll;
11127 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
11128 					     &pipe_config->dpll_hw_state);
11129 	drm_WARN_ON(&dev_priv->drm, !pll_active);
11130 }
11131 
11132 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
11133 				enum port port,
11134 				struct intel_crtc_state *pipe_config)
11135 {
11136 	struct intel_shared_dpll *pll;
11137 	enum intel_dpll_id id;
11138 	bool pll_active;
11139 
11140 	switch (port) {
11141 	case PORT_A:
11142 		id = DPLL_ID_SKL_DPLL0;
11143 		break;
11144 	case PORT_B:
11145 		id = DPLL_ID_SKL_DPLL1;
11146 		break;
11147 	case PORT_C:
11148 		id = DPLL_ID_SKL_DPLL2;
11149 		break;
11150 	default:
11151 		drm_err(&dev_priv->drm, "Incorrect port type\n");
11152 		return;
11153 	}
11154 
11155 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
11156 
11157 	pipe_config->shared_dpll = pll;
11158 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
11159 					     &pipe_config->dpll_hw_state);
11160 	drm_WARN_ON(&dev_priv->drm, !pll_active);
11161 }
11162 
11163 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
11164 			    struct intel_crtc_state *pipe_config)
11165 {
11166 	struct intel_shared_dpll *pll;
11167 	enum intel_dpll_id id;
11168 	bool pll_active;
11169 	u32 temp;
11170 
11171 	temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
11172 	id = temp >> (port * 3 + 1);
11173 
11174 	if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
11175 		return;
11176 
11177 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
11178 
11179 	pipe_config->shared_dpll = pll;
11180 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
11181 					     &pipe_config->dpll_hw_state);
11182 	drm_WARN_ON(&dev_priv->drm, !pll_active);
11183 }
11184 
11185 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
11186 			    struct intel_crtc_state *pipe_config)
11187 {
11188 	struct intel_shared_dpll *pll;
11189 	enum intel_dpll_id id;
11190 	u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
11191 	bool pll_active;
11192 
11193 	switch (ddi_pll_sel) {
11194 	case PORT_CLK_SEL_WRPLL1:
11195 		id = DPLL_ID_WRPLL1;
11196 		break;
11197 	case PORT_CLK_SEL_WRPLL2:
11198 		id = DPLL_ID_WRPLL2;
11199 		break;
11200 	case PORT_CLK_SEL_SPLL:
11201 		id = DPLL_ID_SPLL;
11202 		break;
11203 	case PORT_CLK_SEL_LCPLL_810:
11204 		id = DPLL_ID_LCPLL_810;
11205 		break;
11206 	case PORT_CLK_SEL_LCPLL_1350:
11207 		id = DPLL_ID_LCPLL_1350;
11208 		break;
11209 	case PORT_CLK_SEL_LCPLL_2700:
11210 		id = DPLL_ID_LCPLL_2700;
11211 		break;
11212 	default:
11213 		MISSING_CASE(ddi_pll_sel);
11214 		fallthrough;
11215 	case PORT_CLK_SEL_NONE:
11216 		return;
11217 	}
11218 
11219 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
11220 
11221 	pipe_config->shared_dpll = pll;
11222 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
11223 					     &pipe_config->dpll_hw_state);
11224 	drm_WARN_ON(&dev_priv->drm, !pll_active);
11225 }
11226 
11227 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
11228 				     struct intel_crtc_state *pipe_config,
11229 				     u64 *power_domain_mask,
11230 				     intel_wakeref_t *wakerefs)
11231 {
11232 	struct drm_device *dev = crtc->base.dev;
11233 	struct drm_i915_private *dev_priv = to_i915(dev);
11234 	enum intel_display_power_domain power_domain;
11235 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
11236 	unsigned long enabled_panel_transcoders = 0;
11237 	enum transcoder panel_transcoder;
11238 	intel_wakeref_t wf;
11239 	u32 tmp;
11240 
11241 	if (INTEL_GEN(dev_priv) >= 11)
11242 		panel_transcoder_mask |=
11243 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
11244 
11245 	/*
11246 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
11247 	 * and DSI transcoders handled below.
11248 	 */
11249 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
11250 
11251 	/*
11252 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
11253 	 * consistency and less surprising code; it's in always on power).
11254 	 */
11255 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
11256 				       panel_transcoder_mask) {
11257 		bool force_thru = false;
11258 		enum pipe trans_pipe;
11259 
11260 		tmp = intel_de_read(dev_priv,
11261 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
11262 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
11263 			continue;
11264 
11265 		/*
11266 		 * Log all enabled ones, only use the first one.
11267 		 *
11268 		 * FIXME: This won't work for two separate DSI displays.
11269 		 */
11270 		enabled_panel_transcoders |= BIT(panel_transcoder);
11271 		if (enabled_panel_transcoders != BIT(panel_transcoder))
11272 			continue;
11273 
11274 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
11275 		default:
11276 			drm_WARN(dev, 1,
11277 				 "unknown pipe linked to transcoder %s\n",
11278 				 transcoder_name(panel_transcoder));
11279 			fallthrough;
11280 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
11281 			force_thru = true;
11282 			fallthrough;
11283 		case TRANS_DDI_EDP_INPUT_A_ON:
11284 			trans_pipe = PIPE_A;
11285 			break;
11286 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
11287 			trans_pipe = PIPE_B;
11288 			break;
11289 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
11290 			trans_pipe = PIPE_C;
11291 			break;
11292 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
11293 			trans_pipe = PIPE_D;
11294 			break;
11295 		}
11296 
11297 		if (trans_pipe == crtc->pipe) {
11298 			pipe_config->cpu_transcoder = panel_transcoder;
11299 			pipe_config->pch_pfit.force_thru = force_thru;
11300 		}
11301 	}
11302 
11303 	/*
11304 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
11305 	 */
11306 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
11307 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
11308 
11309 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
11310 	drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
11311 
11312 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11313 	if (!wf)
11314 		return false;
11315 
11316 	wakerefs[power_domain] = wf;
11317 	*power_domain_mask |= BIT_ULL(power_domain);
11318 
11319 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
11320 
11321 	return tmp & PIPECONF_ENABLE;
11322 }
11323 
11324 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
11325 					 struct intel_crtc_state *pipe_config,
11326 					 u64 *power_domain_mask,
11327 					 intel_wakeref_t *wakerefs)
11328 {
11329 	struct drm_device *dev = crtc->base.dev;
11330 	struct drm_i915_private *dev_priv = to_i915(dev);
11331 	enum intel_display_power_domain power_domain;
11332 	enum transcoder cpu_transcoder;
11333 	intel_wakeref_t wf;
11334 	enum port port;
11335 	u32 tmp;
11336 
11337 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
11338 		if (port == PORT_A)
11339 			cpu_transcoder = TRANSCODER_DSI_A;
11340 		else
11341 			cpu_transcoder = TRANSCODER_DSI_C;
11342 
11343 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
11344 		drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
11345 
11346 		wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11347 		if (!wf)
11348 			continue;
11349 
11350 		wakerefs[power_domain] = wf;
11351 		*power_domain_mask |= BIT_ULL(power_domain);
11352 
11353 		/*
11354 		 * The PLL needs to be enabled with a valid divider
11355 		 * configuration, otherwise accessing DSI registers will hang
11356 		 * the machine. See BSpec North Display Engine
11357 		 * registers/MIPI[BXT]. We can break out here early, since we
11358 		 * need the same DSI PLL to be enabled for both DSI ports.
11359 		 */
11360 		if (!bxt_dsi_pll_is_enabled(dev_priv))
11361 			break;
11362 
11363 		/* XXX: this works for video mode only */
11364 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
11365 		if (!(tmp & DPI_ENABLE))
11366 			continue;
11367 
11368 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
11369 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
11370 			continue;
11371 
11372 		pipe_config->cpu_transcoder = cpu_transcoder;
11373 		break;
11374 	}
11375 
11376 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
11377 }
11378 
11379 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
11380 				   struct intel_crtc_state *pipe_config)
11381 {
11382 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11383 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
11384 	enum port port;
11385 	u32 tmp;
11386 
11387 	if (transcoder_is_dsi(cpu_transcoder)) {
11388 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
11389 						PORT_A : PORT_B;
11390 	} else {
11391 		tmp = intel_de_read(dev_priv,
11392 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
11393 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
11394 			return;
11395 		if (INTEL_GEN(dev_priv) >= 12)
11396 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11397 		else
11398 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11399 	}
11400 
11401 	if (IS_DG1(dev_priv))
11402 		dg1_get_ddi_pll(dev_priv, port, pipe_config);
11403 	else if (INTEL_GEN(dev_priv) >= 11)
11404 		icl_get_ddi_pll(dev_priv, port, pipe_config);
11405 	else if (IS_CANNONLAKE(dev_priv))
11406 		cnl_get_ddi_pll(dev_priv, port, pipe_config);
11407 	else if (IS_GEN9_LP(dev_priv))
11408 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
11409 	else if (IS_GEN9_BC(dev_priv))
11410 		skl_get_ddi_pll(dev_priv, port, pipe_config);
11411 	else
11412 		hsw_get_ddi_pll(dev_priv, port, pipe_config);
11413 
11414 	/*
11415 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
11416 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
11417 	 * the PCH transcoder is on.
11418 	 */
11419 	if (INTEL_GEN(dev_priv) < 9 &&
11420 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
11421 		pipe_config->has_pch_encoder = true;
11422 
11423 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
11424 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
11425 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
11426 
11427 		ilk_get_fdi_m_n_config(crtc, pipe_config);
11428 	}
11429 }
11430 
11431 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
11432 				struct intel_crtc_state *pipe_config)
11433 {
11434 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11435 	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
11436 	enum intel_display_power_domain power_domain;
11437 	u64 power_domain_mask;
11438 	bool active;
11439 	u32 tmp;
11440 
11441 	pipe_config->master_transcoder = INVALID_TRANSCODER;
11442 
11443 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
11444 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11445 	if (!wf)
11446 		return false;
11447 
11448 	wakerefs[power_domain] = wf;
11449 	power_domain_mask = BIT_ULL(power_domain);
11450 
11451 	pipe_config->shared_dpll = NULL;
11452 
11453 	active = hsw_get_transcoder_state(crtc, pipe_config,
11454 					  &power_domain_mask, wakerefs);
11455 
11456 	if (IS_GEN9_LP(dev_priv) &&
11457 	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
11458 					 &power_domain_mask, wakerefs)) {
11459 		drm_WARN_ON(&dev_priv->drm, active);
11460 		active = true;
11461 	}
11462 
11463 	intel_dsc_get_config(pipe_config);
11464 
11465 	if (!active) {
11466 		/* bigjoiner slave doesn't enable transcoder */
11467 		if (!pipe_config->bigjoiner_slave)
11468 			goto out;
11469 
11470 		active = true;
11471 		pipe_config->pixel_multiplier = 1;
11472 
11473 		/* we cannot read out most state, so don't bother.. */
11474 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
11475 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
11476 	    INTEL_GEN(dev_priv) >= 11) {
11477 		hsw_get_ddi_port_state(crtc, pipe_config);
11478 		intel_get_transcoder_timings(crtc, pipe_config);
11479 	}
11480 
11481 	intel_get_pipe_src_size(crtc, pipe_config);
11482 
11483 	if (IS_HASWELL(dev_priv)) {
11484 		u32 tmp = intel_de_read(dev_priv,
11485 					PIPECONF(pipe_config->cpu_transcoder));
11486 
11487 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
11488 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
11489 		else
11490 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
11491 	} else {
11492 		pipe_config->output_format =
11493 			bdw_get_pipemisc_output_format(crtc);
11494 	}
11495 
11496 	pipe_config->gamma_mode = intel_de_read(dev_priv,
11497 						GAMMA_MODE(crtc->pipe));
11498 
11499 	pipe_config->csc_mode = intel_de_read(dev_priv,
11500 					      PIPE_CSC_MODE(crtc->pipe));
11501 
11502 	if (INTEL_GEN(dev_priv) >= 9) {
11503 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
11504 
11505 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
11506 			pipe_config->gamma_enable = true;
11507 
11508 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
11509 			pipe_config->csc_enable = true;
11510 	} else {
11511 		i9xx_get_pipe_color_config(pipe_config);
11512 	}
11513 
11514 	intel_color_get_config(pipe_config);
11515 
11516 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11517 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11518 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11519 		pipe_config->ips_linetime =
11520 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11521 
11522 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
11523 	drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
11524 
11525 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11526 	if (wf) {
11527 		wakerefs[power_domain] = wf;
11528 		power_domain_mask |= BIT_ULL(power_domain);
11529 
11530 		if (INTEL_GEN(dev_priv) >= 9)
11531 			skl_get_pfit_config(pipe_config);
11532 		else
11533 			ilk_get_pfit_config(pipe_config);
11534 	}
11535 
11536 	if (hsw_crtc_supports_ips(crtc)) {
11537 		if (IS_HASWELL(dev_priv))
11538 			pipe_config->ips_enabled = intel_de_read(dev_priv,
11539 								 IPS_CTL) & IPS_ENABLE;
11540 		else {
11541 			/*
11542 			 * We cannot readout IPS state on broadwell, set to
11543 			 * true so we can set it to a defined state on first
11544 			 * commit.
11545 			 */
11546 			pipe_config->ips_enabled = true;
11547 		}
11548 	}
11549 
11550 	if (pipe_config->bigjoiner_slave) {
11551 		/* Cannot be read out as a slave, set to 0. */
11552 		pipe_config->pixel_multiplier = 0;
11553 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11554 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11555 		pipe_config->pixel_multiplier =
11556 			intel_de_read(dev_priv,
11557 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11558 	} else {
11559 		pipe_config->pixel_multiplier = 1;
11560 	}
11561 
11562 out:
11563 	for_each_power_domain(power_domain, power_domain_mask)
11564 		intel_display_power_put(dev_priv,
11565 					power_domain, wakerefs[power_domain]);
11566 
11567 	return active;
11568 }
11569 
11570 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
11571 {
11572 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11573 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
11574 
11575 	if (!i915->display.get_pipe_config(crtc, crtc_state))
11576 		return false;
11577 
11578 	crtc_state->hw.active = true;
11579 
11580 	intel_crtc_readout_derived_state(crtc_state);
11581 
11582 	return true;
11583 }
11584 
11585 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
11586 {
11587 	struct drm_i915_private *dev_priv =
11588 		to_i915(plane_state->uapi.plane->dev);
11589 	const struct drm_framebuffer *fb = plane_state->hw.fb;
11590 	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11591 	u32 base;
11592 
11593 	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
11594 		base = sg_dma_address(obj->mm.pages->sgl);
11595 	else
11596 		base = intel_plane_ggtt_offset(plane_state);
11597 
11598 	return base + plane_state->color_plane[0].offset;
11599 }
11600 
11601 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
11602 {
11603 	int x = plane_state->uapi.dst.x1;
11604 	int y = plane_state->uapi.dst.y1;
11605 	u32 pos = 0;
11606 
11607 	if (x < 0) {
11608 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
11609 		x = -x;
11610 	}
11611 	pos |= x << CURSOR_X_SHIFT;
11612 
11613 	if (y < 0) {
11614 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
11615 		y = -y;
11616 	}
11617 	pos |= y << CURSOR_Y_SHIFT;
11618 
11619 	return pos;
11620 }
11621 
11622 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
11623 {
11624 	const struct drm_mode_config *config =
11625 		&plane_state->uapi.plane->dev->mode_config;
11626 	int width = drm_rect_width(&plane_state->uapi.dst);
11627 	int height = drm_rect_height(&plane_state->uapi.dst);
11628 
11629 	return width > 0 && width <= config->cursor_width &&
11630 		height > 0 && height <= config->cursor_height;
11631 }
11632 
11633 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
11634 {
11635 	struct drm_i915_private *dev_priv =
11636 		to_i915(plane_state->uapi.plane->dev);
11637 	unsigned int rotation = plane_state->hw.rotation;
11638 	int src_x, src_y;
11639 	u32 offset;
11640 	int ret;
11641 
11642 	ret = intel_plane_compute_gtt(plane_state);
11643 	if (ret)
11644 		return ret;
11645 
11646 	if (!plane_state->uapi.visible)
11647 		return 0;
11648 
11649 	src_x = plane_state->uapi.src.x1 >> 16;
11650 	src_y = plane_state->uapi.src.y1 >> 16;
11651 
11652 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
11653 	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
11654 						    plane_state, 0);
11655 
11656 	if (src_x != 0 || src_y != 0) {
11657 		drm_dbg_kms(&dev_priv->drm,
11658 			    "Arbitrary cursor panning not supported\n");
11659 		return -EINVAL;
11660 	}
11661 
11662 	/*
11663 	 * Put the final coordinates back so that the src
11664 	 * coordinate checks will see the right values.
11665 	 */
11666 	drm_rect_translate_to(&plane_state->uapi.src,
11667 			      src_x << 16, src_y << 16);
11668 
11669 	/* ILK+ do this automagically in hardware */
11670 	if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
11671 		const struct drm_framebuffer *fb = plane_state->hw.fb;
11672 		int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
11673 		int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
11674 
11675 		offset += (src_h * src_w - 1) * fb->format->cpp[0];
11676 	}
11677 
11678 	plane_state->color_plane[0].offset = offset;
11679 	plane_state->color_plane[0].x = src_x;
11680 	plane_state->color_plane[0].y = src_y;
11681 
11682 	return 0;
11683 }
11684 
11685 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11686 			      struct intel_plane_state *plane_state)
11687 {
11688 	const struct drm_framebuffer *fb = plane_state->hw.fb;
11689 	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11690 	const struct drm_rect src = plane_state->uapi.src;
11691 	const struct drm_rect dst = plane_state->uapi.dst;
11692 	int ret;
11693 
11694 	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11695 		drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
11696 		return -EINVAL;
11697 	}
11698 
11699 	ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
11700 						DRM_PLANE_HELPER_NO_SCALING,
11701 						DRM_PLANE_HELPER_NO_SCALING,
11702 						true);
11703 	if (ret)
11704 		return ret;
11705 
11706 	/* Use the unclipped src/dst rectangles, which we program to hw */
11707 	plane_state->uapi.src = src;
11708 	plane_state->uapi.dst = dst;
11709 
11710 	ret = intel_cursor_check_surface(plane_state);
11711 	if (ret)
11712 		return ret;
11713 
11714 	if (!plane_state->uapi.visible)
11715 		return 0;
11716 
11717 	ret = intel_plane_check_src_coordinates(plane_state);
11718 	if (ret)
11719 		return ret;
11720 
11721 	return 0;
11722 }
11723 
11724 static unsigned int
11725 i845_cursor_max_stride(struct intel_plane *plane,
11726 		       u32 pixel_format, u64 modifier,
11727 		       unsigned int rotation)
11728 {
11729 	return 2048;
11730 }
11731 
11732 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11733 {
11734 	u32 cntl = 0;
11735 
11736 	if (crtc_state->gamma_enable)
11737 		cntl |= CURSOR_GAMMA_ENABLE;
11738 
11739 	return cntl;
11740 }
11741 
11742 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11743 			   const struct intel_plane_state *plane_state)
11744 {
11745 	return CURSOR_ENABLE |
11746 		CURSOR_FORMAT_ARGB |
11747 		CURSOR_STRIDE(plane_state->color_plane[0].stride);
11748 }
11749 
11750 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11751 {
11752 	int width = drm_rect_width(&plane_state->uapi.dst);
11753 
11754 	/*
11755 	 * 845g/865g are only limited by the width of their cursors,
11756 	 * the height is arbitrary up to the precision of the register.
11757 	 */
11758 	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11759 }
11760 
11761 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11762 			     struct intel_plane_state *plane_state)
11763 {
11764 	const struct drm_framebuffer *fb = plane_state->hw.fb;
11765 	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11766 	int ret;
11767 
11768 	ret = intel_check_cursor(crtc_state, plane_state);
11769 	if (ret)
11770 		return ret;
11771 
11772 	/* if we want to turn off the cursor ignore width and height */
11773 	if (!fb)
11774 		return 0;
11775 
11776 	/* Check for which cursor types we support */
11777 	if (!i845_cursor_size_ok(plane_state)) {
11778 		drm_dbg_kms(&i915->drm,
11779 			    "Cursor dimension %dx%d not supported\n",
11780 			    drm_rect_width(&plane_state->uapi.dst),
11781 			    drm_rect_height(&plane_state->uapi.dst));
11782 		return -EINVAL;
11783 	}
11784 
11785 	drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
11786 		    plane_state->color_plane[0].stride != fb->pitches[0]);
11787 
11788 	switch (fb->pitches[0]) {
11789 	case 256:
11790 	case 512:
11791 	case 1024:
11792 	case 2048:
11793 		break;
11794 	default:
11795 		 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
11796 			     fb->pitches[0]);
11797 		return -EINVAL;
11798 	}
11799 
11800 	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11801 
11802 	return 0;
11803 }
11804 
11805 static void i845_update_cursor(struct intel_plane *plane,
11806 			       const struct intel_crtc_state *crtc_state,
11807 			       const struct intel_plane_state *plane_state)
11808 {
11809 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11810 	u32 cntl = 0, base = 0, pos = 0, size = 0;
11811 	unsigned long irqflags;
11812 
11813 	if (plane_state && plane_state->uapi.visible) {
11814 		unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11815 		unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11816 
11817 		cntl = plane_state->ctl |
11818 			i845_cursor_ctl_crtc(crtc_state);
11819 
11820 		size = (height << 12) | width;
11821 
11822 		base = intel_cursor_base(plane_state);
11823 		pos = intel_cursor_position(plane_state);
11824 	}
11825 
11826 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11827 
11828 	/* On these chipsets we can only modify the base/size/stride
11829 	 * whilst the cursor is disabled.
11830 	 */
11831 	if (plane->cursor.base != base ||
11832 	    plane->cursor.size != size ||
11833 	    plane->cursor.cntl != cntl) {
11834 		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
11835 		intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
11836 		intel_de_write_fw(dev_priv, CURSIZE, size);
11837 		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11838 		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
11839 
11840 		plane->cursor.base = base;
11841 		plane->cursor.size = size;
11842 		plane->cursor.cntl = cntl;
11843 	} else {
11844 		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11845 	}
11846 
11847 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11848 }
11849 
11850 static void i845_disable_cursor(struct intel_plane *plane,
11851 				const struct intel_crtc_state *crtc_state)
11852 {
11853 	i845_update_cursor(plane, crtc_state, NULL);
11854 }
11855 
11856 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11857 				     enum pipe *pipe)
11858 {
11859 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11860 	enum intel_display_power_domain power_domain;
11861 	intel_wakeref_t wakeref;
11862 	bool ret;
11863 
11864 	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11865 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11866 	if (!wakeref)
11867 		return false;
11868 
11869 	ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11870 
11871 	*pipe = PIPE_A;
11872 
11873 	intel_display_power_put(dev_priv, power_domain, wakeref);
11874 
11875 	return ret;
11876 }
11877 
11878 static unsigned int
11879 i9xx_cursor_max_stride(struct intel_plane *plane,
11880 		       u32 pixel_format, u64 modifier,
11881 		       unsigned int rotation)
11882 {
11883 	return plane->base.dev->mode_config.cursor_width * 4;
11884 }
11885 
11886 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11887 {
11888 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11889 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11890 	u32 cntl = 0;
11891 
11892 	if (INTEL_GEN(dev_priv) >= 11)
11893 		return cntl;
11894 
11895 	if (crtc_state->gamma_enable)
11896 		cntl = MCURSOR_GAMMA_ENABLE;
11897 
11898 	if (crtc_state->csc_enable)
11899 		cntl |= MCURSOR_PIPE_CSC_ENABLE;
11900 
11901 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11902 		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11903 
11904 	return cntl;
11905 }
11906 
11907 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11908 			   const struct intel_plane_state *plane_state)
11909 {
11910 	struct drm_i915_private *dev_priv =
11911 		to_i915(plane_state->uapi.plane->dev);
11912 	u32 cntl = 0;
11913 
11914 	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11915 		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11916 
11917 	switch (drm_rect_width(&plane_state->uapi.dst)) {
11918 	case 64:
11919 		cntl |= MCURSOR_MODE_64_ARGB_AX;
11920 		break;
11921 	case 128:
11922 		cntl |= MCURSOR_MODE_128_ARGB_AX;
11923 		break;
11924 	case 256:
11925 		cntl |= MCURSOR_MODE_256_ARGB_AX;
11926 		break;
11927 	default:
11928 		MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11929 		return 0;
11930 	}
11931 
11932 	if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11933 		cntl |= MCURSOR_ROTATE_180;
11934 
11935 	return cntl;
11936 }
11937 
11938 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11939 {
11940 	struct drm_i915_private *dev_priv =
11941 		to_i915(plane_state->uapi.plane->dev);
11942 	int width = drm_rect_width(&plane_state->uapi.dst);
11943 	int height = drm_rect_height(&plane_state->uapi.dst);
11944 
11945 	if (!intel_cursor_size_ok(plane_state))
11946 		return false;
11947 
11948 	/* Cursor width is limited to a few power-of-two sizes */
11949 	switch (width) {
11950 	case 256:
11951 	case 128:
11952 	case 64:
11953 		break;
11954 	default:
11955 		return false;
11956 	}
11957 
11958 	/*
11959 	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11960 	 * height from 8 lines up to the cursor width, when the
11961 	 * cursor is not rotated. Everything else requires square
11962 	 * cursors.
11963 	 */
11964 	if (HAS_CUR_FBC(dev_priv) &&
11965 	    plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11966 		if (height < 8 || height > width)
11967 			return false;
11968 	} else {
11969 		if (height != width)
11970 			return false;
11971 	}
11972 
11973 	return true;
11974 }
11975 
11976 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11977 			     struct intel_plane_state *plane_state)
11978 {
11979 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11980 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11981 	const struct drm_framebuffer *fb = plane_state->hw.fb;
11982 	enum pipe pipe = plane->pipe;
11983 	int ret;
11984 
11985 	ret = intel_check_cursor(crtc_state, plane_state);
11986 	if (ret)
11987 		return ret;
11988 
11989 	/* if we want to turn off the cursor ignore width and height */
11990 	if (!fb)
11991 		return 0;
11992 
11993 	/* Check for which cursor types we support */
11994 	if (!i9xx_cursor_size_ok(plane_state)) {
11995 		drm_dbg(&dev_priv->drm,
11996 			"Cursor dimension %dx%d not supported\n",
11997 			drm_rect_width(&plane_state->uapi.dst),
11998 			drm_rect_height(&plane_state->uapi.dst));
11999 		return -EINVAL;
12000 	}
12001 
12002 	drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
12003 		    plane_state->color_plane[0].stride != fb->pitches[0]);
12004 
12005 	if (fb->pitches[0] !=
12006 	    drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
12007 		drm_dbg_kms(&dev_priv->drm,
12008 			    "Invalid cursor stride (%u) (cursor width %d)\n",
12009 			    fb->pitches[0],
12010 			    drm_rect_width(&plane_state->uapi.dst));
12011 		return -EINVAL;
12012 	}
12013 
12014 	/*
12015 	 * There's something wrong with the cursor on CHV pipe C.
12016 	 * If it straddles the left edge of the screen then
12017 	 * moving it away from the edge or disabling it often
12018 	 * results in a pipe underrun, and often that can lead to
12019 	 * dead pipe (constant underrun reported, and it scans
12020 	 * out just a solid color). To recover from that, the
12021 	 * display power well must be turned off and on again.
12022 	 * Refuse the put the cursor into that compromised position.
12023 	 */
12024 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
12025 	    plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
12026 		drm_dbg_kms(&dev_priv->drm,
12027 			    "CHV cursor C not allowed to straddle the left screen edge\n");
12028 		return -EINVAL;
12029 	}
12030 
12031 	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
12032 
12033 	return 0;
12034 }
12035 
12036 static void i9xx_update_cursor(struct intel_plane *plane,
12037 			       const struct intel_crtc_state *crtc_state,
12038 			       const struct intel_plane_state *plane_state)
12039 {
12040 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12041 	enum pipe pipe = plane->pipe;
12042 	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
12043 	unsigned long irqflags;
12044 
12045 	if (plane_state && plane_state->uapi.visible) {
12046 		unsigned width = drm_rect_width(&plane_state->uapi.dst);
12047 		unsigned height = drm_rect_height(&plane_state->uapi.dst);
12048 
12049 		cntl = plane_state->ctl |
12050 			i9xx_cursor_ctl_crtc(crtc_state);
12051 
12052 		if (width != height)
12053 			fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
12054 
12055 		base = intel_cursor_base(plane_state);
12056 		pos = intel_cursor_position(plane_state);
12057 	}
12058 
12059 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
12060 
12061 	/*
12062 	 * On some platforms writing CURCNTR first will also
12063 	 * cause CURPOS to be armed by the CURBASE write.
12064 	 * Without the CURCNTR write the CURPOS write would
12065 	 * arm itself. Thus we always update CURCNTR before
12066 	 * CURPOS.
12067 	 *
12068 	 * On other platforms CURPOS always requires the
12069 	 * CURBASE write to arm the update. Additonally
12070 	 * a write to any of the cursor register will cancel
12071 	 * an already armed cursor update. Thus leaving out
12072 	 * the CURBASE write after CURPOS could lead to a
12073 	 * cursor that doesn't appear to move, or even change
12074 	 * shape. Thus we always write CURBASE.
12075 	 *
12076 	 * The other registers are armed by by the CURBASE write
12077 	 * except when the plane is getting enabled at which time
12078 	 * the CURCNTR write arms the update.
12079 	 */
12080 
12081 	if (INTEL_GEN(dev_priv) >= 9)
12082 		skl_write_cursor_wm(plane, crtc_state);
12083 
12084 	if (!needs_modeset(crtc_state))
12085 		intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
12086 
12087 	if (plane->cursor.base != base ||
12088 	    plane->cursor.size != fbc_ctl ||
12089 	    plane->cursor.cntl != cntl) {
12090 		if (HAS_CUR_FBC(dev_priv))
12091 			intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
12092 					  fbc_ctl);
12093 		intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
12094 		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
12095 		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
12096 
12097 		plane->cursor.base = base;
12098 		plane->cursor.size = fbc_ctl;
12099 		plane->cursor.cntl = cntl;
12100 	} else {
12101 		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
12102 		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
12103 	}
12104 
12105 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
12106 }
12107 
12108 static void i9xx_disable_cursor(struct intel_plane *plane,
12109 				const struct intel_crtc_state *crtc_state)
12110 {
12111 	i9xx_update_cursor(plane, crtc_state, NULL);
12112 }
12113 
12114 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
12115 				     enum pipe *pipe)
12116 {
12117 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12118 	enum intel_display_power_domain power_domain;
12119 	intel_wakeref_t wakeref;
12120 	bool ret;
12121 	u32 val;
12122 
12123 	/*
12124 	 * Not 100% correct for planes that can move between pipes,
12125 	 * but that's only the case for gen2-3 which don't have any
12126 	 * display power wells.
12127 	 */
12128 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
12129 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
12130 	if (!wakeref)
12131 		return false;
12132 
12133 	val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
12134 
12135 	ret = val & MCURSOR_MODE;
12136 
12137 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
12138 		*pipe = plane->pipe;
12139 	else
12140 		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
12141 			MCURSOR_PIPE_SELECT_SHIFT;
12142 
12143 	intel_display_power_put(dev_priv, power_domain, wakeref);
12144 
12145 	return ret;
12146 }
12147 
12148 /* VESA 640x480x72Hz mode to set on the pipe */
12149 static const struct drm_display_mode load_detect_mode = {
12150 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
12151 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
12152 };
12153 
12154 struct drm_framebuffer *
12155 intel_framebuffer_create(struct drm_i915_gem_object *obj,
12156 			 struct drm_mode_fb_cmd2 *mode_cmd)
12157 {
12158 	struct intel_framebuffer *intel_fb;
12159 	int ret;
12160 
12161 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
12162 	if (!intel_fb)
12163 		return ERR_PTR(-ENOMEM);
12164 
12165 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
12166 	if (ret)
12167 		goto err;
12168 
12169 	return &intel_fb->base;
12170 
12171 err:
12172 	kfree(intel_fb);
12173 	return ERR_PTR(ret);
12174 }
12175 
12176 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
12177 					struct drm_crtc *crtc)
12178 {
12179 	struct drm_plane *plane;
12180 	struct drm_plane_state *plane_state;
12181 	int ret, i;
12182 
12183 	ret = drm_atomic_add_affected_planes(state, crtc);
12184 	if (ret)
12185 		return ret;
12186 
12187 	for_each_new_plane_in_state(state, plane, plane_state, i) {
12188 		if (plane_state->crtc != crtc)
12189 			continue;
12190 
12191 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
12192 		if (ret)
12193 			return ret;
12194 
12195 		drm_atomic_set_fb_for_plane(plane_state, NULL);
12196 	}
12197 
12198 	return 0;
12199 }
12200 
12201 int intel_get_load_detect_pipe(struct drm_connector *connector,
12202 			       struct intel_load_detect_pipe *old,
12203 			       struct drm_modeset_acquire_ctx *ctx)
12204 {
12205 	struct intel_crtc *intel_crtc;
12206 	struct intel_encoder *intel_encoder =
12207 		intel_attached_encoder(to_intel_connector(connector));
12208 	struct drm_crtc *possible_crtc;
12209 	struct drm_encoder *encoder = &intel_encoder->base;
12210 	struct drm_crtc *crtc = NULL;
12211 	struct drm_device *dev = encoder->dev;
12212 	struct drm_i915_private *dev_priv = to_i915(dev);
12213 	struct drm_mode_config *config = &dev->mode_config;
12214 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
12215 	struct drm_connector_state *connector_state;
12216 	struct intel_crtc_state *crtc_state;
12217 	int ret, i = -1;
12218 
12219 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
12220 		    connector->base.id, connector->name,
12221 		    encoder->base.id, encoder->name);
12222 
12223 	old->restore_state = NULL;
12224 
12225 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
12226 
12227 	/*
12228 	 * Algorithm gets a little messy:
12229 	 *
12230 	 *   - if the connector already has an assigned crtc, use it (but make
12231 	 *     sure it's on first)
12232 	 *
12233 	 *   - try to find the first unused crtc that can drive this connector,
12234 	 *     and use that if we find one
12235 	 */
12236 
12237 	/* See if we already have a CRTC for this connector */
12238 	if (connector->state->crtc) {
12239 		crtc = connector->state->crtc;
12240 
12241 		ret = drm_modeset_lock(&crtc->mutex, ctx);
12242 		if (ret)
12243 			goto fail;
12244 
12245 		/* Make sure the crtc and connector are running */
12246 		goto found;
12247 	}
12248 
12249 	/* Find an unused one (if possible) */
12250 	for_each_crtc(dev, possible_crtc) {
12251 		i++;
12252 		if (!(encoder->possible_crtcs & (1 << i)))
12253 			continue;
12254 
12255 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
12256 		if (ret)
12257 			goto fail;
12258 
12259 		if (possible_crtc->state->enable) {
12260 			drm_modeset_unlock(&possible_crtc->mutex);
12261 			continue;
12262 		}
12263 
12264 		crtc = possible_crtc;
12265 		break;
12266 	}
12267 
12268 	/*
12269 	 * If we didn't find an unused CRTC, don't use any.
12270 	 */
12271 	if (!crtc) {
12272 		drm_dbg_kms(&dev_priv->drm,
12273 			    "no pipe available for load-detect\n");
12274 		ret = -ENODEV;
12275 		goto fail;
12276 	}
12277 
12278 found:
12279 	intel_crtc = to_intel_crtc(crtc);
12280 
12281 	state = drm_atomic_state_alloc(dev);
12282 	restore_state = drm_atomic_state_alloc(dev);
12283 	if (!state || !restore_state) {
12284 		ret = -ENOMEM;
12285 		goto fail;
12286 	}
12287 
12288 	state->acquire_ctx = ctx;
12289 	restore_state->acquire_ctx = ctx;
12290 
12291 	connector_state = drm_atomic_get_connector_state(state, connector);
12292 	if (IS_ERR(connector_state)) {
12293 		ret = PTR_ERR(connector_state);
12294 		goto fail;
12295 	}
12296 
12297 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
12298 	if (ret)
12299 		goto fail;
12300 
12301 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
12302 	if (IS_ERR(crtc_state)) {
12303 		ret = PTR_ERR(crtc_state);
12304 		goto fail;
12305 	}
12306 
12307 	crtc_state->uapi.active = true;
12308 
12309 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
12310 					   &load_detect_mode);
12311 	if (ret)
12312 		goto fail;
12313 
12314 	ret = intel_modeset_disable_planes(state, crtc);
12315 	if (ret)
12316 		goto fail;
12317 
12318 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
12319 	if (!ret)
12320 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
12321 	if (!ret)
12322 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
12323 	if (ret) {
12324 		drm_dbg_kms(&dev_priv->drm,
12325 			    "Failed to create a copy of old state to restore: %i\n",
12326 			    ret);
12327 		goto fail;
12328 	}
12329 
12330 	ret = drm_atomic_commit(state);
12331 	if (ret) {
12332 		drm_dbg_kms(&dev_priv->drm,
12333 			    "failed to set mode on load-detect pipe\n");
12334 		goto fail;
12335 	}
12336 
12337 	old->restore_state = restore_state;
12338 	drm_atomic_state_put(state);
12339 
12340 	/* let the connector get through one full cycle before testing */
12341 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
12342 	return true;
12343 
12344 fail:
12345 	if (state) {
12346 		drm_atomic_state_put(state);
12347 		state = NULL;
12348 	}
12349 	if (restore_state) {
12350 		drm_atomic_state_put(restore_state);
12351 		restore_state = NULL;
12352 	}
12353 
12354 	if (ret == -EDEADLK)
12355 		return ret;
12356 
12357 	return false;
12358 }
12359 
12360 void intel_release_load_detect_pipe(struct drm_connector *connector,
12361 				    struct intel_load_detect_pipe *old,
12362 				    struct drm_modeset_acquire_ctx *ctx)
12363 {
12364 	struct intel_encoder *intel_encoder =
12365 		intel_attached_encoder(to_intel_connector(connector));
12366 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
12367 	struct drm_encoder *encoder = &intel_encoder->base;
12368 	struct drm_atomic_state *state = old->restore_state;
12369 	int ret;
12370 
12371 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
12372 		    connector->base.id, connector->name,
12373 		    encoder->base.id, encoder->name);
12374 
12375 	if (!state)
12376 		return;
12377 
12378 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
12379 	if (ret)
12380 		drm_dbg_kms(&i915->drm,
12381 			    "Couldn't release load detect pipe: %i\n", ret);
12382 	drm_atomic_state_put(state);
12383 }
12384 
12385 static int i9xx_pll_refclk(struct drm_device *dev,
12386 			   const struct intel_crtc_state *pipe_config)
12387 {
12388 	struct drm_i915_private *dev_priv = to_i915(dev);
12389 	u32 dpll = pipe_config->dpll_hw_state.dpll;
12390 
12391 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
12392 		return dev_priv->vbt.lvds_ssc_freq;
12393 	else if (HAS_PCH_SPLIT(dev_priv))
12394 		return 120000;
12395 	else if (!IS_GEN(dev_priv, 2))
12396 		return 96000;
12397 	else
12398 		return 48000;
12399 }
12400 
12401 /* Returns the clock of the currently programmed mode of the given pipe. */
12402 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
12403 				struct intel_crtc_state *pipe_config)
12404 {
12405 	struct drm_device *dev = crtc->base.dev;
12406 	struct drm_i915_private *dev_priv = to_i915(dev);
12407 	enum pipe pipe = crtc->pipe;
12408 	u32 dpll = pipe_config->dpll_hw_state.dpll;
12409 	u32 fp;
12410 	struct dpll clock;
12411 	int port_clock;
12412 	int refclk = i9xx_pll_refclk(dev, pipe_config);
12413 
12414 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
12415 		fp = pipe_config->dpll_hw_state.fp0;
12416 	else
12417 		fp = pipe_config->dpll_hw_state.fp1;
12418 
12419 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
12420 	if (IS_PINEVIEW(dev_priv)) {
12421 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
12422 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
12423 	} else {
12424 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
12425 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
12426 	}
12427 
12428 	if (!IS_GEN(dev_priv, 2)) {
12429 		if (IS_PINEVIEW(dev_priv))
12430 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
12431 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
12432 		else
12433 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
12434 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
12435 
12436 		switch (dpll & DPLL_MODE_MASK) {
12437 		case DPLLB_MODE_DAC_SERIAL:
12438 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
12439 				5 : 10;
12440 			break;
12441 		case DPLLB_MODE_LVDS:
12442 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
12443 				7 : 14;
12444 			break;
12445 		default:
12446 			drm_dbg_kms(&dev_priv->drm,
12447 				    "Unknown DPLL mode %08x in programmed "
12448 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
12449 			return;
12450 		}
12451 
12452 		if (IS_PINEVIEW(dev_priv))
12453 			port_clock = pnv_calc_dpll_params(refclk, &clock);
12454 		else
12455 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
12456 	} else {
12457 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
12458 								 LVDS);
12459 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
12460 
12461 		if (is_lvds) {
12462 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
12463 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
12464 
12465 			if (lvds & LVDS_CLKB_POWER_UP)
12466 				clock.p2 = 7;
12467 			else
12468 				clock.p2 = 14;
12469 		} else {
12470 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
12471 				clock.p1 = 2;
12472 			else {
12473 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
12474 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
12475 			}
12476 			if (dpll & PLL_P2_DIVIDE_BY_4)
12477 				clock.p2 = 4;
12478 			else
12479 				clock.p2 = 2;
12480 		}
12481 
12482 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
12483 	}
12484 
12485 	/*
12486 	 * This value includes pixel_multiplier. We will use
12487 	 * port_clock to compute adjusted_mode.crtc_clock in the
12488 	 * encoder's get_config() function.
12489 	 */
12490 	pipe_config->port_clock = port_clock;
12491 }
12492 
12493 int intel_dotclock_calculate(int link_freq,
12494 			     const struct intel_link_m_n *m_n)
12495 {
12496 	/*
12497 	 * The calculation for the data clock is:
12498 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
12499 	 * But we want to avoid losing precison if possible, so:
12500 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
12501 	 *
12502 	 * and the link clock is simpler:
12503 	 * link_clock = (m * link_clock) / n
12504 	 */
12505 
12506 	if (!m_n->link_n)
12507 		return 0;
12508 
12509 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
12510 }
12511 
12512 static void ilk_pch_clock_get(struct intel_crtc *crtc,
12513 			      struct intel_crtc_state *pipe_config)
12514 {
12515 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12516 
12517 	/* read out port_clock from the DPLL */
12518 	i9xx_crtc_clock_get(crtc, pipe_config);
12519 
12520 	/*
12521 	 * In case there is an active pipe without active ports,
12522 	 * we may need some idea for the dotclock anyway.
12523 	 * Calculate one based on the FDI configuration.
12524 	 */
12525 	pipe_config->hw.adjusted_mode.crtc_clock =
12526 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12527 					 &pipe_config->fdi_m_n);
12528 }
12529 
12530 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
12531 				   struct intel_crtc *crtc)
12532 {
12533 	memset(crtc_state, 0, sizeof(*crtc_state));
12534 
12535 	__drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
12536 
12537 	crtc_state->cpu_transcoder = INVALID_TRANSCODER;
12538 	crtc_state->master_transcoder = INVALID_TRANSCODER;
12539 	crtc_state->hsw_workaround_pipe = INVALID_PIPE;
12540 	crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
12541 	crtc_state->scaler_state.scaler_id = -1;
12542 	crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
12543 }
12544 
12545 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
12546 {
12547 	struct intel_crtc_state *crtc_state;
12548 
12549 	crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
12550 
12551 	if (crtc_state)
12552 		intel_crtc_state_reset(crtc_state, crtc);
12553 
12554 	return crtc_state;
12555 }
12556 
12557 /* Returns the currently programmed mode of the given encoder. */
12558 struct drm_display_mode *
12559 intel_encoder_current_mode(struct intel_encoder *encoder)
12560 {
12561 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12562 	struct intel_crtc_state *crtc_state;
12563 	struct drm_display_mode *mode;
12564 	struct intel_crtc *crtc;
12565 	enum pipe pipe;
12566 
12567 	if (!encoder->get_hw_state(encoder, &pipe))
12568 		return NULL;
12569 
12570 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12571 
12572 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
12573 	if (!mode)
12574 		return NULL;
12575 
12576 	crtc_state = intel_crtc_state_alloc(crtc);
12577 	if (!crtc_state) {
12578 		kfree(mode);
12579 		return NULL;
12580 	}
12581 
12582 	if (!intel_crtc_get_pipe_config(crtc_state)) {
12583 		kfree(crtc_state);
12584 		kfree(mode);
12585 		return NULL;
12586 	}
12587 
12588 	intel_encoder_get_config(encoder, crtc_state);
12589 
12590 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
12591 
12592 	kfree(crtc_state);
12593 
12594 	return mode;
12595 }
12596 
12597 static void intel_crtc_destroy(struct drm_crtc *crtc)
12598 {
12599 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12600 
12601 	drm_crtc_cleanup(crtc);
12602 	kfree(intel_crtc);
12603 }
12604 
12605 /**
12606  * intel_wm_need_update - Check whether watermarks need updating
12607  * @cur: current plane state
12608  * @new: new plane state
12609  *
12610  * Check current plane state versus the new one to determine whether
12611  * watermarks need to be recalculated.
12612  *
12613  * Returns true or false.
12614  */
12615 static bool intel_wm_need_update(const struct intel_plane_state *cur,
12616 				 struct intel_plane_state *new)
12617 {
12618 	/* Update watermarks on tiling or size changes. */
12619 	if (new->uapi.visible != cur->uapi.visible)
12620 		return true;
12621 
12622 	if (!cur->hw.fb || !new->hw.fb)
12623 		return false;
12624 
12625 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
12626 	    cur->hw.rotation != new->hw.rotation ||
12627 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
12628 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
12629 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
12630 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
12631 		return true;
12632 
12633 	return false;
12634 }
12635 
12636 static bool needs_scaling(const struct intel_plane_state *state)
12637 {
12638 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
12639 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
12640 	int dst_w = drm_rect_width(&state->uapi.dst);
12641 	int dst_h = drm_rect_height(&state->uapi.dst);
12642 
12643 	return (src_w != dst_w || src_h != dst_h);
12644 }
12645 
12646 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
12647 				    struct intel_crtc_state *crtc_state,
12648 				    const struct intel_plane_state *old_plane_state,
12649 				    struct intel_plane_state *plane_state)
12650 {
12651 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12652 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12653 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12654 	bool mode_changed = needs_modeset(crtc_state);
12655 	bool was_crtc_enabled = old_crtc_state->hw.active;
12656 	bool is_crtc_enabled = crtc_state->hw.active;
12657 	bool turn_off, turn_on, visible, was_visible;
12658 	int ret;
12659 
12660 	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
12661 		ret = skl_update_scaler_plane(crtc_state, plane_state);
12662 		if (ret)
12663 			return ret;
12664 	}
12665 
12666 	was_visible = old_plane_state->uapi.visible;
12667 	visible = plane_state->uapi.visible;
12668 
12669 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
12670 		was_visible = false;
12671 
12672 	/*
12673 	 * Visibility is calculated as if the crtc was on, but
12674 	 * after scaler setup everything depends on it being off
12675 	 * when the crtc isn't active.
12676 	 *
12677 	 * FIXME this is wrong for watermarks. Watermarks should also
12678 	 * be computed as if the pipe would be active. Perhaps move
12679 	 * per-plane wm computation to the .check_plane() hook, and
12680 	 * only combine the results from all planes in the current place?
12681 	 */
12682 	if (!is_crtc_enabled) {
12683 		intel_plane_set_invisible(crtc_state, plane_state);
12684 		visible = false;
12685 	}
12686 
12687 	if (!was_visible && !visible)
12688 		return 0;
12689 
12690 	turn_off = was_visible && (!visible || mode_changed);
12691 	turn_on = visible && (!was_visible || mode_changed);
12692 
12693 	drm_dbg_atomic(&dev_priv->drm,
12694 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12695 		       crtc->base.base.id, crtc->base.name,
12696 		       plane->base.base.id, plane->base.name,
12697 		       was_visible, visible,
12698 		       turn_off, turn_on, mode_changed);
12699 
12700 	if (turn_on) {
12701 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12702 			crtc_state->update_wm_pre = true;
12703 
12704 		/* must disable cxsr around plane enable/disable */
12705 		if (plane->id != PLANE_CURSOR)
12706 			crtc_state->disable_cxsr = true;
12707 	} else if (turn_off) {
12708 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12709 			crtc_state->update_wm_post = true;
12710 
12711 		/* must disable cxsr around plane enable/disable */
12712 		if (plane->id != PLANE_CURSOR)
12713 			crtc_state->disable_cxsr = true;
12714 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
12715 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12716 			/* FIXME bollocks */
12717 			crtc_state->update_wm_pre = true;
12718 			crtc_state->update_wm_post = true;
12719 		}
12720 	}
12721 
12722 	if (visible || was_visible)
12723 		crtc_state->fb_bits |= plane->frontbuffer_bit;
12724 
12725 	/*
12726 	 * ILK/SNB DVSACNTR/Sprite Enable
12727 	 * IVB SPR_CTL/Sprite Enable
12728 	 * "When in Self Refresh Big FIFO mode, a write to enable the
12729 	 *  plane will be internally buffered and delayed while Big FIFO
12730 	 *  mode is exiting."
12731 	 *
12732 	 * Which means that enabling the sprite can take an extra frame
12733 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
12734 	 * down to LP0 and wait for vblank in order to make sure the
12735 	 * sprite gets enabled on the next vblank after the register write.
12736 	 * Doing otherwise would risk enabling the sprite one frame after
12737 	 * we've already signalled flip completion. We can resume LP1+
12738 	 * once the sprite has been enabled.
12739 	 *
12740 	 *
12741 	 * WaCxSRDisabledForSpriteScaling:ivb
12742 	 * IVB SPR_SCALE/Scaling Enable
12743 	 * "Low Power watermarks must be disabled for at least one
12744 	 *  frame before enabling sprite scaling, and kept disabled
12745 	 *  until sprite scaling is disabled."
12746 	 *
12747 	 * ILK/SNB DVSASCALE/Scaling Enable
12748 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
12749 	 *  masked off while Big FIFO mode is exiting."
12750 	 *
12751 	 * Despite the w/a only being listed for IVB we assume that
12752 	 * the ILK/SNB note has similar ramifications, hence we apply
12753 	 * the w/a on all three platforms.
12754 	 *
12755 	 * With experimental results seems this is needed also for primary
12756 	 * plane, not only sprite plane.
12757 	 */
12758 	if (plane->id != PLANE_CURSOR &&
12759 	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
12760 	     IS_IVYBRIDGE(dev_priv)) &&
12761 	    (turn_on || (!needs_scaling(old_plane_state) &&
12762 			 needs_scaling(plane_state))))
12763 		crtc_state->disable_lp_wm = true;
12764 
12765 	return 0;
12766 }
12767 
12768 static bool encoders_cloneable(const struct intel_encoder *a,
12769 			       const struct intel_encoder *b)
12770 {
12771 	/* masks could be asymmetric, so check both ways */
12772 	return a == b || (a->cloneable & (1 << b->type) &&
12773 			  b->cloneable & (1 << a->type));
12774 }
12775 
12776 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
12777 					 struct intel_crtc *crtc,
12778 					 struct intel_encoder *encoder)
12779 {
12780 	struct intel_encoder *source_encoder;
12781 	struct drm_connector *connector;
12782 	struct drm_connector_state *connector_state;
12783 	int i;
12784 
12785 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12786 		if (connector_state->crtc != &crtc->base)
12787 			continue;
12788 
12789 		source_encoder =
12790 			to_intel_encoder(connector_state->best_encoder);
12791 		if (!encoders_cloneable(encoder, source_encoder))
12792 			return false;
12793 	}
12794 
12795 	return true;
12796 }
12797 
12798 static int icl_add_linked_planes(struct intel_atomic_state *state)
12799 {
12800 	struct intel_plane *plane, *linked;
12801 	struct intel_plane_state *plane_state, *linked_plane_state;
12802 	int i;
12803 
12804 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12805 		linked = plane_state->planar_linked_plane;
12806 
12807 		if (!linked)
12808 			continue;
12809 
12810 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
12811 		if (IS_ERR(linked_plane_state))
12812 			return PTR_ERR(linked_plane_state);
12813 
12814 		drm_WARN_ON(state->base.dev,
12815 			    linked_plane_state->planar_linked_plane != plane);
12816 		drm_WARN_ON(state->base.dev,
12817 			    linked_plane_state->planar_slave == plane_state->planar_slave);
12818 	}
12819 
12820 	return 0;
12821 }
12822 
12823 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12824 {
12825 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12826 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12827 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12828 	struct intel_plane *plane, *linked;
12829 	struct intel_plane_state *plane_state;
12830 	int i;
12831 
12832 	if (INTEL_GEN(dev_priv) < 11)
12833 		return 0;
12834 
12835 	/*
12836 	 * Destroy all old plane links and make the slave plane invisible
12837 	 * in the crtc_state->active_planes mask.
12838 	 */
12839 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12840 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12841 			continue;
12842 
12843 		plane_state->planar_linked_plane = NULL;
12844 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
12845 			crtc_state->active_planes &= ~BIT(plane->id);
12846 			crtc_state->update_planes |= BIT(plane->id);
12847 		}
12848 
12849 		plane_state->planar_slave = false;
12850 	}
12851 
12852 	if (!crtc_state->nv12_planes)
12853 		return 0;
12854 
12855 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12856 		struct intel_plane_state *linked_state = NULL;
12857 
12858 		if (plane->pipe != crtc->pipe ||
12859 		    !(crtc_state->nv12_planes & BIT(plane->id)))
12860 			continue;
12861 
12862 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12863 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
12864 				continue;
12865 
12866 			if (crtc_state->active_planes & BIT(linked->id))
12867 				continue;
12868 
12869 			linked_state = intel_atomic_get_plane_state(state, linked);
12870 			if (IS_ERR(linked_state))
12871 				return PTR_ERR(linked_state);
12872 
12873 			break;
12874 		}
12875 
12876 		if (!linked_state) {
12877 			drm_dbg_kms(&dev_priv->drm,
12878 				    "Need %d free Y planes for planar YUV\n",
12879 				    hweight8(crtc_state->nv12_planes));
12880 
12881 			return -EINVAL;
12882 		}
12883 
12884 		plane_state->planar_linked_plane = linked;
12885 
12886 		linked_state->planar_slave = true;
12887 		linked_state->planar_linked_plane = plane;
12888 		crtc_state->active_planes |= BIT(linked->id);
12889 		crtc_state->update_planes |= BIT(linked->id);
12890 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
12891 			    linked->base.name, plane->base.name);
12892 
12893 		/* Copy parameters to slave plane */
12894 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12895 		linked_state->color_ctl = plane_state->color_ctl;
12896 		linked_state->view = plane_state->view;
12897 		memcpy(linked_state->color_plane, plane_state->color_plane,
12898 		       sizeof(linked_state->color_plane));
12899 
12900 		intel_plane_copy_hw_state(linked_state, plane_state);
12901 		linked_state->uapi.src = plane_state->uapi.src;
12902 		linked_state->uapi.dst = plane_state->uapi.dst;
12903 
12904 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
12905 			if (linked->id == PLANE_SPRITE5)
12906 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12907 			else if (linked->id == PLANE_SPRITE4)
12908 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12909 			else if (linked->id == PLANE_SPRITE3)
12910 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
12911 			else if (linked->id == PLANE_SPRITE2)
12912 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
12913 			else
12914 				MISSING_CASE(linked->id);
12915 		}
12916 	}
12917 
12918 	return 0;
12919 }
12920 
12921 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12922 {
12923 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12924 	struct intel_atomic_state *state =
12925 		to_intel_atomic_state(new_crtc_state->uapi.state);
12926 	const struct intel_crtc_state *old_crtc_state =
12927 		intel_atomic_get_old_crtc_state(state, crtc);
12928 
12929 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12930 }
12931 
12932 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
12933 {
12934 	const struct drm_display_mode *pipe_mode =
12935 		&crtc_state->hw.pipe_mode;
12936 	int linetime_wm;
12937 
12938 	if (!crtc_state->hw.enable)
12939 		return 0;
12940 
12941 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
12942 					pipe_mode->crtc_clock);
12943 
12944 	return min(linetime_wm, 0x1ff);
12945 }
12946 
12947 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
12948 			       const struct intel_cdclk_state *cdclk_state)
12949 {
12950 	const struct drm_display_mode *pipe_mode =
12951 		&crtc_state->hw.pipe_mode;
12952 	int linetime_wm;
12953 
12954 	if (!crtc_state->hw.enable)
12955 		return 0;
12956 
12957 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
12958 					cdclk_state->logical.cdclk);
12959 
12960 	return min(linetime_wm, 0x1ff);
12961 }
12962 
12963 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
12964 {
12965 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12966 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12967 	const struct drm_display_mode *pipe_mode =
12968 		&crtc_state->hw.pipe_mode;
12969 	int linetime_wm;
12970 
12971 	if (!crtc_state->hw.enable)
12972 		return 0;
12973 
12974 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
12975 				   crtc_state->pixel_rate);
12976 
12977 	/* Display WA #1135: BXT:ALL GLK:ALL */
12978 	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
12979 		linetime_wm /= 2;
12980 
12981 	return min(linetime_wm, 0x1ff);
12982 }
12983 
12984 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
12985 				   struct intel_crtc *crtc)
12986 {
12987 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12988 	struct intel_crtc_state *crtc_state =
12989 		intel_atomic_get_new_crtc_state(state, crtc);
12990 	const struct intel_cdclk_state *cdclk_state;
12991 
12992 	if (INTEL_GEN(dev_priv) >= 9)
12993 		crtc_state->linetime = skl_linetime_wm(crtc_state);
12994 	else
12995 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
12996 
12997 	if (!hsw_crtc_supports_ips(crtc))
12998 		return 0;
12999 
13000 	cdclk_state = intel_atomic_get_cdclk_state(state);
13001 	if (IS_ERR(cdclk_state))
13002 		return PTR_ERR(cdclk_state);
13003 
13004 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
13005 						       cdclk_state);
13006 
13007 	return 0;
13008 }
13009 
13010 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
13011 				   struct intel_crtc *crtc)
13012 {
13013 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13014 	struct intel_crtc_state *crtc_state =
13015 		intel_atomic_get_new_crtc_state(state, crtc);
13016 	bool mode_changed = needs_modeset(crtc_state);
13017 	int ret;
13018 
13019 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
13020 	    mode_changed && !crtc_state->hw.active)
13021 		crtc_state->update_wm_post = true;
13022 
13023 	if (mode_changed && crtc_state->hw.enable &&
13024 	    dev_priv->display.crtc_compute_clock &&
13025 	    !crtc_state->bigjoiner_slave &&
13026 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
13027 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
13028 		if (ret)
13029 			return ret;
13030 	}
13031 
13032 	/*
13033 	 * May need to update pipe gamma enable bits
13034 	 * when C8 planes are getting enabled/disabled.
13035 	 */
13036 	if (c8_planes_changed(crtc_state))
13037 		crtc_state->uapi.color_mgmt_changed = true;
13038 
13039 	if (mode_changed || crtc_state->update_pipe ||
13040 	    crtc_state->uapi.color_mgmt_changed) {
13041 		ret = intel_color_check(crtc_state);
13042 		if (ret)
13043 			return ret;
13044 	}
13045 
13046 	if (dev_priv->display.compute_pipe_wm) {
13047 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
13048 		if (ret) {
13049 			drm_dbg_kms(&dev_priv->drm,
13050 				    "Target pipe watermarks are invalid\n");
13051 			return ret;
13052 		}
13053 	}
13054 
13055 	if (dev_priv->display.compute_intermediate_wm) {
13056 		if (drm_WARN_ON(&dev_priv->drm,
13057 				!dev_priv->display.compute_pipe_wm))
13058 			return 0;
13059 
13060 		/*
13061 		 * Calculate 'intermediate' watermarks that satisfy both the
13062 		 * old state and the new state.  We can program these
13063 		 * immediately.
13064 		 */
13065 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
13066 		if (ret) {
13067 			drm_dbg_kms(&dev_priv->drm,
13068 				    "No valid intermediate pipe watermarks are possible\n");
13069 			return ret;
13070 		}
13071 	}
13072 
13073 	if (INTEL_GEN(dev_priv) >= 9) {
13074 		if (mode_changed || crtc_state->update_pipe) {
13075 			ret = skl_update_scaler_crtc(crtc_state);
13076 			if (ret)
13077 				return ret;
13078 		}
13079 
13080 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
13081 		if (ret)
13082 			return ret;
13083 	}
13084 
13085 	if (HAS_IPS(dev_priv)) {
13086 		ret = hsw_compute_ips_config(crtc_state);
13087 		if (ret)
13088 			return ret;
13089 	}
13090 
13091 	if (INTEL_GEN(dev_priv) >= 9 ||
13092 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
13093 		ret = hsw_compute_linetime_wm(state, crtc);
13094 		if (ret)
13095 			return ret;
13096 
13097 	}
13098 
13099 	if (!mode_changed) {
13100 		ret = intel_psr2_sel_fetch_update(state, crtc);
13101 		if (ret)
13102 			return ret;
13103 	}
13104 
13105 	return 0;
13106 }
13107 
13108 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
13109 {
13110 	struct intel_connector *connector;
13111 	struct drm_connector_list_iter conn_iter;
13112 
13113 	drm_connector_list_iter_begin(dev, &conn_iter);
13114 	for_each_intel_connector_iter(connector, &conn_iter) {
13115 		if (connector->base.state->crtc)
13116 			drm_connector_put(&connector->base);
13117 
13118 		if (connector->base.encoder) {
13119 			connector->base.state->best_encoder =
13120 				connector->base.encoder;
13121 			connector->base.state->crtc =
13122 				connector->base.encoder->crtc;
13123 
13124 			drm_connector_get(&connector->base);
13125 		} else {
13126 			connector->base.state->best_encoder = NULL;
13127 			connector->base.state->crtc = NULL;
13128 		}
13129 	}
13130 	drm_connector_list_iter_end(&conn_iter);
13131 }
13132 
13133 static int
13134 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
13135 		      struct intel_crtc_state *pipe_config)
13136 {
13137 	struct drm_connector *connector = conn_state->connector;
13138 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13139 	const struct drm_display_info *info = &connector->display_info;
13140 	int bpp;
13141 
13142 	switch (conn_state->max_bpc) {
13143 	case 6 ... 7:
13144 		bpp = 6 * 3;
13145 		break;
13146 	case 8 ... 9:
13147 		bpp = 8 * 3;
13148 		break;
13149 	case 10 ... 11:
13150 		bpp = 10 * 3;
13151 		break;
13152 	case 12 ... 16:
13153 		bpp = 12 * 3;
13154 		break;
13155 	default:
13156 		MISSING_CASE(conn_state->max_bpc);
13157 		return -EINVAL;
13158 	}
13159 
13160 	if (bpp < pipe_config->pipe_bpp) {
13161 		drm_dbg_kms(&i915->drm,
13162 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
13163 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
13164 			    connector->base.id, connector->name,
13165 			    bpp, 3 * info->bpc,
13166 			    3 * conn_state->max_requested_bpc,
13167 			    pipe_config->pipe_bpp);
13168 
13169 		pipe_config->pipe_bpp = bpp;
13170 	}
13171 
13172 	return 0;
13173 }
13174 
13175 static int
13176 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
13177 			  struct intel_crtc_state *pipe_config)
13178 {
13179 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13180 	struct drm_atomic_state *state = pipe_config->uapi.state;
13181 	struct drm_connector *connector;
13182 	struct drm_connector_state *connector_state;
13183 	int bpp, i;
13184 
13185 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
13186 	    IS_CHERRYVIEW(dev_priv)))
13187 		bpp = 10*3;
13188 	else if (INTEL_GEN(dev_priv) >= 5)
13189 		bpp = 12*3;
13190 	else
13191 		bpp = 8*3;
13192 
13193 	pipe_config->pipe_bpp = bpp;
13194 
13195 	/* Clamp display bpp to connector max bpp */
13196 	for_each_new_connector_in_state(state, connector, connector_state, i) {
13197 		int ret;
13198 
13199 		if (connector_state->crtc != &crtc->base)
13200 			continue;
13201 
13202 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
13203 		if (ret)
13204 			return ret;
13205 	}
13206 
13207 	return 0;
13208 }
13209 
13210 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
13211 				    const struct drm_display_mode *mode)
13212 {
13213 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
13214 		    "type: 0x%x flags: 0x%x\n",
13215 		    mode->crtc_clock,
13216 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
13217 		    mode->crtc_hsync_end, mode->crtc_htotal,
13218 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
13219 		    mode->crtc_vsync_end, mode->crtc_vtotal,
13220 		    mode->type, mode->flags);
13221 }
13222 
13223 static void
13224 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
13225 		      const char *id, unsigned int lane_count,
13226 		      const struct intel_link_m_n *m_n)
13227 {
13228 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13229 
13230 	drm_dbg_kms(&i915->drm,
13231 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
13232 		    id, lane_count,
13233 		    m_n->gmch_m, m_n->gmch_n,
13234 		    m_n->link_m, m_n->link_n, m_n->tu);
13235 }
13236 
13237 static void
13238 intel_dump_infoframe(struct drm_i915_private *dev_priv,
13239 		     const union hdmi_infoframe *frame)
13240 {
13241 	if (!drm_debug_enabled(DRM_UT_KMS))
13242 		return;
13243 
13244 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
13245 }
13246 
13247 static void
13248 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
13249 		      const struct drm_dp_vsc_sdp *vsc)
13250 {
13251 	if (!drm_debug_enabled(DRM_UT_KMS))
13252 		return;
13253 
13254 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
13255 }
13256 
13257 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
13258 
13259 static const char * const output_type_str[] = {
13260 	OUTPUT_TYPE(UNUSED),
13261 	OUTPUT_TYPE(ANALOG),
13262 	OUTPUT_TYPE(DVO),
13263 	OUTPUT_TYPE(SDVO),
13264 	OUTPUT_TYPE(LVDS),
13265 	OUTPUT_TYPE(TVOUT),
13266 	OUTPUT_TYPE(HDMI),
13267 	OUTPUT_TYPE(DP),
13268 	OUTPUT_TYPE(EDP),
13269 	OUTPUT_TYPE(DSI),
13270 	OUTPUT_TYPE(DDI),
13271 	OUTPUT_TYPE(DP_MST),
13272 };
13273 
13274 #undef OUTPUT_TYPE
13275 
13276 static void snprintf_output_types(char *buf, size_t len,
13277 				  unsigned int output_types)
13278 {
13279 	char *str = buf;
13280 	int i;
13281 
13282 	str[0] = '\0';
13283 
13284 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
13285 		int r;
13286 
13287 		if ((output_types & BIT(i)) == 0)
13288 			continue;
13289 
13290 		r = snprintf(str, len, "%s%s",
13291 			     str != buf ? "," : "", output_type_str[i]);
13292 		if (r >= len)
13293 			break;
13294 		str += r;
13295 		len -= r;
13296 
13297 		output_types &= ~BIT(i);
13298 	}
13299 
13300 	WARN_ON_ONCE(output_types != 0);
13301 }
13302 
13303 static const char * const output_format_str[] = {
13304 	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
13305 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
13306 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
13307 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
13308 };
13309 
13310 static const char *output_formats(enum intel_output_format format)
13311 {
13312 	if (format >= ARRAY_SIZE(output_format_str))
13313 		format = INTEL_OUTPUT_FORMAT_INVALID;
13314 	return output_format_str[format];
13315 }
13316 
13317 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
13318 {
13319 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
13320 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
13321 	const struct drm_framebuffer *fb = plane_state->hw.fb;
13322 	struct drm_format_name_buf format_name;
13323 
13324 	if (!fb) {
13325 		drm_dbg_kms(&i915->drm,
13326 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
13327 			    plane->base.base.id, plane->base.name,
13328 			    yesno(plane_state->uapi.visible));
13329 		return;
13330 	}
13331 
13332 	drm_dbg_kms(&i915->drm,
13333 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
13334 		    plane->base.base.id, plane->base.name,
13335 		    fb->base.id, fb->width, fb->height,
13336 		    drm_get_format_name(fb->format->format, &format_name),
13337 		    fb->modifier, yesno(plane_state->uapi.visible));
13338 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
13339 		    plane_state->hw.rotation, plane_state->scaler_id);
13340 	if (plane_state->uapi.visible)
13341 		drm_dbg_kms(&i915->drm,
13342 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
13343 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
13344 			    DRM_RECT_ARG(&plane_state->uapi.dst));
13345 }
13346 
13347 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
13348 				   struct intel_atomic_state *state,
13349 				   const char *context)
13350 {
13351 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13352 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13353 	const struct intel_plane_state *plane_state;
13354 	struct intel_plane *plane;
13355 	char buf[64];
13356 	int i;
13357 
13358 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
13359 		    crtc->base.base.id, crtc->base.name,
13360 		    yesno(pipe_config->hw.enable), context);
13361 
13362 	if (!pipe_config->hw.enable)
13363 		goto dump_planes;
13364 
13365 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
13366 	drm_dbg_kms(&dev_priv->drm,
13367 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
13368 		    yesno(pipe_config->hw.active),
13369 		    buf, pipe_config->output_types,
13370 		    output_formats(pipe_config->output_format));
13371 
13372 	drm_dbg_kms(&dev_priv->drm,
13373 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
13374 		    transcoder_name(pipe_config->cpu_transcoder),
13375 		    pipe_config->pipe_bpp, pipe_config->dither);
13376 
13377 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
13378 		    transcoder_name(pipe_config->mst_master_transcoder));
13379 
13380 	drm_dbg_kms(&dev_priv->drm,
13381 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
13382 		    transcoder_name(pipe_config->master_transcoder),
13383 		    pipe_config->sync_mode_slaves_mask);
13384 
13385 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
13386 		    pipe_config->bigjoiner_slave ? "slave" :
13387 		    pipe_config->bigjoiner ? "master" : "no");
13388 
13389 	if (pipe_config->has_pch_encoder)
13390 		intel_dump_m_n_config(pipe_config, "fdi",
13391 				      pipe_config->fdi_lanes,
13392 				      &pipe_config->fdi_m_n);
13393 
13394 	if (intel_crtc_has_dp_encoder(pipe_config)) {
13395 		intel_dump_m_n_config(pipe_config, "dp m_n",
13396 				pipe_config->lane_count, &pipe_config->dp_m_n);
13397 		if (pipe_config->has_drrs)
13398 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
13399 					      pipe_config->lane_count,
13400 					      &pipe_config->dp_m2_n2);
13401 	}
13402 
13403 	drm_dbg_kms(&dev_priv->drm,
13404 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
13405 		    pipe_config->has_audio, pipe_config->has_infoframe,
13406 		    pipe_config->infoframes.enable);
13407 
13408 	if (pipe_config->infoframes.enable &
13409 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
13410 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
13411 			    pipe_config->infoframes.gcp);
13412 	if (pipe_config->infoframes.enable &
13413 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
13414 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
13415 	if (pipe_config->infoframes.enable &
13416 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
13417 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
13418 	if (pipe_config->infoframes.enable &
13419 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
13420 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
13421 	if (pipe_config->infoframes.enable &
13422 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
13423 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
13424 	if (pipe_config->infoframes.enable &
13425 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
13426 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
13427 	if (pipe_config->infoframes.enable &
13428 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
13429 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
13430 
13431 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
13432 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
13433 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
13434 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
13435 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
13436 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
13437 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
13438 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
13439 	drm_dbg_kms(&dev_priv->drm,
13440 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
13441 		    pipe_config->port_clock,
13442 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
13443 		    pipe_config->pixel_rate);
13444 
13445 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
13446 		    pipe_config->linetime, pipe_config->ips_linetime);
13447 
13448 	if (INTEL_GEN(dev_priv) >= 9)
13449 		drm_dbg_kms(&dev_priv->drm,
13450 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
13451 			    crtc->num_scalers,
13452 			    pipe_config->scaler_state.scaler_users,
13453 			    pipe_config->scaler_state.scaler_id);
13454 
13455 	if (HAS_GMCH(dev_priv))
13456 		drm_dbg_kms(&dev_priv->drm,
13457 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
13458 			    pipe_config->gmch_pfit.control,
13459 			    pipe_config->gmch_pfit.pgm_ratios,
13460 			    pipe_config->gmch_pfit.lvds_border_bits);
13461 	else
13462 		drm_dbg_kms(&dev_priv->drm,
13463 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
13464 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
13465 			    enableddisabled(pipe_config->pch_pfit.enabled),
13466 			    yesno(pipe_config->pch_pfit.force_thru));
13467 
13468 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
13469 		    pipe_config->ips_enabled, pipe_config->double_wide);
13470 
13471 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
13472 
13473 	if (IS_CHERRYVIEW(dev_priv))
13474 		drm_dbg_kms(&dev_priv->drm,
13475 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13476 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
13477 			    pipe_config->gamma_enable, pipe_config->csc_enable);
13478 	else
13479 		drm_dbg_kms(&dev_priv->drm,
13480 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13481 			    pipe_config->csc_mode, pipe_config->gamma_mode,
13482 			    pipe_config->gamma_enable, pipe_config->csc_enable);
13483 
13484 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
13485 		    pipe_config->hw.degamma_lut ?
13486 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
13487 		    pipe_config->hw.gamma_lut ?
13488 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
13489 
13490 dump_planes:
13491 	if (!state)
13492 		return;
13493 
13494 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13495 		if (plane->pipe == crtc->pipe)
13496 			intel_dump_plane_state(plane_state);
13497 	}
13498 }
13499 
13500 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
13501 {
13502 	struct drm_device *dev = state->base.dev;
13503 	struct drm_connector *connector;
13504 	struct drm_connector_list_iter conn_iter;
13505 	unsigned int used_ports = 0;
13506 	unsigned int used_mst_ports = 0;
13507 	bool ret = true;
13508 
13509 	/*
13510 	 * We're going to peek into connector->state,
13511 	 * hence connection_mutex must be held.
13512 	 */
13513 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
13514 
13515 	/*
13516 	 * Walk the connector list instead of the encoder
13517 	 * list to detect the problem on ddi platforms
13518 	 * where there's just one encoder per digital port.
13519 	 */
13520 	drm_connector_list_iter_begin(dev, &conn_iter);
13521 	drm_for_each_connector_iter(connector, &conn_iter) {
13522 		struct drm_connector_state *connector_state;
13523 		struct intel_encoder *encoder;
13524 
13525 		connector_state =
13526 			drm_atomic_get_new_connector_state(&state->base,
13527 							   connector);
13528 		if (!connector_state)
13529 			connector_state = connector->state;
13530 
13531 		if (!connector_state->best_encoder)
13532 			continue;
13533 
13534 		encoder = to_intel_encoder(connector_state->best_encoder);
13535 
13536 		drm_WARN_ON(dev, !connector_state->crtc);
13537 
13538 		switch (encoder->type) {
13539 		case INTEL_OUTPUT_DDI:
13540 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
13541 				break;
13542 			fallthrough;
13543 		case INTEL_OUTPUT_DP:
13544 		case INTEL_OUTPUT_HDMI:
13545 		case INTEL_OUTPUT_EDP:
13546 			/* the same port mustn't appear more than once */
13547 			if (used_ports & BIT(encoder->port))
13548 				ret = false;
13549 
13550 			used_ports |= BIT(encoder->port);
13551 			break;
13552 		case INTEL_OUTPUT_DP_MST:
13553 			used_mst_ports |=
13554 				1 << encoder->port;
13555 			break;
13556 		default:
13557 			break;
13558 		}
13559 	}
13560 	drm_connector_list_iter_end(&conn_iter);
13561 
13562 	/* can't mix MST and SST/HDMI on the same port */
13563 	if (used_ports & used_mst_ports)
13564 		return false;
13565 
13566 	return ret;
13567 }
13568 
13569 static void
13570 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
13571 					   struct intel_crtc_state *crtc_state)
13572 {
13573 	const struct intel_crtc_state *from_crtc_state = crtc_state;
13574 
13575 	if (crtc_state->bigjoiner_slave) {
13576 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
13577 								  crtc_state->bigjoiner_linked_crtc);
13578 
13579 		/* No need to copy state if the master state is unchanged */
13580 		if (!from_crtc_state)
13581 			return;
13582 	}
13583 
13584 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
13585 }
13586 
13587 static void
13588 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
13589 				 struct intel_crtc_state *crtc_state)
13590 {
13591 	crtc_state->hw.enable = crtc_state->uapi.enable;
13592 	crtc_state->hw.active = crtc_state->uapi.active;
13593 	crtc_state->hw.mode = crtc_state->uapi.mode;
13594 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
13595 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
13596 
13597 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
13598 }
13599 
13600 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
13601 {
13602 	if (crtc_state->bigjoiner_slave)
13603 		return;
13604 
13605 	crtc_state->uapi.enable = crtc_state->hw.enable;
13606 	crtc_state->uapi.active = crtc_state->hw.active;
13607 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
13608 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
13609 
13610 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
13611 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
13612 
13613 	/* copy color blobs to uapi */
13614 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
13615 				  crtc_state->hw.degamma_lut);
13616 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
13617 				  crtc_state->hw.gamma_lut);
13618 	drm_property_replace_blob(&crtc_state->uapi.ctm,
13619 				  crtc_state->hw.ctm);
13620 }
13621 
13622 static int
13623 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
13624 			  const struct intel_crtc_state *from_crtc_state)
13625 {
13626 	struct intel_crtc_state *saved_state;
13627 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13628 
13629 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
13630 	if (!saved_state)
13631 		return -ENOMEM;
13632 
13633 	saved_state->uapi = crtc_state->uapi;
13634 	saved_state->scaler_state = crtc_state->scaler_state;
13635 	saved_state->shared_dpll = crtc_state->shared_dpll;
13636 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
13637 	saved_state->crc_enabled = crtc_state->crc_enabled;
13638 
13639 	intel_crtc_free_hw_state(crtc_state);
13640 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
13641 	kfree(saved_state);
13642 
13643 	/* Re-init hw state */
13644 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
13645 	crtc_state->hw.enable = from_crtc_state->hw.enable;
13646 	crtc_state->hw.active = from_crtc_state->hw.active;
13647 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
13648 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
13649 
13650 	/* Some fixups */
13651 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
13652 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
13653 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
13654 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
13655 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
13656 	crtc_state->bigjoiner_slave = true;
13657 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
13658 	crtc_state->has_audio = false;
13659 
13660 	return 0;
13661 }
13662 
13663 static int
13664 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
13665 				 struct intel_crtc_state *crtc_state)
13666 {
13667 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13668 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13669 	struct intel_crtc_state *saved_state;
13670 
13671 	saved_state = intel_crtc_state_alloc(crtc);
13672 	if (!saved_state)
13673 		return -ENOMEM;
13674 
13675 	/* free the old crtc_state->hw members */
13676 	intel_crtc_free_hw_state(crtc_state);
13677 
13678 	/* FIXME: before the switch to atomic started, a new pipe_config was
13679 	 * kzalloc'd. Code that depends on any field being zero should be
13680 	 * fixed, so that the crtc_state can be safely duplicated. For now,
13681 	 * only fields that are know to not cause problems are preserved. */
13682 
13683 	saved_state->uapi = crtc_state->uapi;
13684 	saved_state->scaler_state = crtc_state->scaler_state;
13685 	saved_state->shared_dpll = crtc_state->shared_dpll;
13686 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
13687 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
13688 	       sizeof(saved_state->icl_port_dplls));
13689 	saved_state->crc_enabled = crtc_state->crc_enabled;
13690 	if (IS_G4X(dev_priv) ||
13691 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13692 		saved_state->wm = crtc_state->wm;
13693 
13694 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
13695 	kfree(saved_state);
13696 
13697 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
13698 
13699 	return 0;
13700 }
13701 
13702 static int
13703 intel_modeset_pipe_config(struct intel_atomic_state *state,
13704 			  struct intel_crtc_state *pipe_config)
13705 {
13706 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
13707 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13708 	struct drm_connector *connector;
13709 	struct drm_connector_state *connector_state;
13710 	int base_bpp, ret, i;
13711 	bool retry = true;
13712 
13713 	pipe_config->cpu_transcoder =
13714 		(enum transcoder) to_intel_crtc(crtc)->pipe;
13715 
13716 	/*
13717 	 * Sanitize sync polarity flags based on requested ones. If neither
13718 	 * positive or negative polarity is requested, treat this as meaning
13719 	 * negative polarity.
13720 	 */
13721 	if (!(pipe_config->hw.adjusted_mode.flags &
13722 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
13723 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
13724 
13725 	if (!(pipe_config->hw.adjusted_mode.flags &
13726 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
13727 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
13728 
13729 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
13730 					pipe_config);
13731 	if (ret)
13732 		return ret;
13733 
13734 	base_bpp = pipe_config->pipe_bpp;
13735 
13736 	/*
13737 	 * Determine the real pipe dimensions. Note that stereo modes can
13738 	 * increase the actual pipe size due to the frame doubling and
13739 	 * insertion of additional space for blanks between the frame. This
13740 	 * is stored in the crtc timings. We use the requested mode to do this
13741 	 * computation to clearly distinguish it from the adjusted mode, which
13742 	 * can be changed by the connectors in the below retry loop.
13743 	 */
13744 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
13745 			       &pipe_config->pipe_src_w,
13746 			       &pipe_config->pipe_src_h);
13747 
13748 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
13749 		struct intel_encoder *encoder =
13750 			to_intel_encoder(connector_state->best_encoder);
13751 
13752 		if (connector_state->crtc != crtc)
13753 			continue;
13754 
13755 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
13756 			drm_dbg_kms(&i915->drm,
13757 				    "rejecting invalid cloning configuration\n");
13758 			return -EINVAL;
13759 		}
13760 
13761 		/*
13762 		 * Determine output_types before calling the .compute_config()
13763 		 * hooks so that the hooks can use this information safely.
13764 		 */
13765 		if (encoder->compute_output_type)
13766 			pipe_config->output_types |=
13767 				BIT(encoder->compute_output_type(encoder, pipe_config,
13768 								 connector_state));
13769 		else
13770 			pipe_config->output_types |= BIT(encoder->type);
13771 	}
13772 
13773 encoder_retry:
13774 	/* Ensure the port clock defaults are reset when retrying. */
13775 	pipe_config->port_clock = 0;
13776 	pipe_config->pixel_multiplier = 1;
13777 
13778 	/* Fill in default crtc timings, allow encoders to overwrite them. */
13779 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
13780 			      CRTC_STEREO_DOUBLE);
13781 
13782 	/* Pass our mode to the connectors and the CRTC to give them a chance to
13783 	 * adjust it according to limitations or connector properties, and also
13784 	 * a chance to reject the mode entirely.
13785 	 */
13786 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
13787 		struct intel_encoder *encoder =
13788 			to_intel_encoder(connector_state->best_encoder);
13789 
13790 		if (connector_state->crtc != crtc)
13791 			continue;
13792 
13793 		ret = encoder->compute_config(encoder, pipe_config,
13794 					      connector_state);
13795 		if (ret < 0) {
13796 			if (ret != -EDEADLK)
13797 				drm_dbg_kms(&i915->drm,
13798 					    "Encoder config failure: %d\n",
13799 					    ret);
13800 			return ret;
13801 		}
13802 	}
13803 
13804 	/* Set default port clock if not overwritten by the encoder. Needs to be
13805 	 * done afterwards in case the encoder adjusts the mode. */
13806 	if (!pipe_config->port_clock)
13807 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
13808 			* pipe_config->pixel_multiplier;
13809 
13810 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
13811 	if (ret == -EDEADLK)
13812 		return ret;
13813 	if (ret < 0) {
13814 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
13815 		return ret;
13816 	}
13817 
13818 	if (ret == RETRY) {
13819 		if (drm_WARN(&i915->drm, !retry,
13820 			     "loop in pipe configuration computation\n"))
13821 			return -EINVAL;
13822 
13823 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
13824 		retry = false;
13825 		goto encoder_retry;
13826 	}
13827 
13828 	/* Dithering seems to not pass-through bits correctly when it should, so
13829 	 * only enable it on 6bpc panels and when its not a compliance
13830 	 * test requesting 6bpc video pattern.
13831 	 */
13832 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
13833 		!pipe_config->dither_force_disable;
13834 	drm_dbg_kms(&i915->drm,
13835 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
13836 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13837 
13838 	return 0;
13839 }
13840 
13841 static int
13842 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
13843 {
13844 	struct intel_atomic_state *state =
13845 		to_intel_atomic_state(crtc_state->uapi.state);
13846 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13847 	struct drm_connector_state *conn_state;
13848 	struct drm_connector *connector;
13849 	int i;
13850 
13851 	for_each_new_connector_in_state(&state->base, connector,
13852 					conn_state, i) {
13853 		struct intel_encoder *encoder =
13854 			to_intel_encoder(conn_state->best_encoder);
13855 		int ret;
13856 
13857 		if (conn_state->crtc != &crtc->base ||
13858 		    !encoder->compute_config_late)
13859 			continue;
13860 
13861 		ret = encoder->compute_config_late(encoder, crtc_state,
13862 						   conn_state);
13863 		if (ret)
13864 			return ret;
13865 	}
13866 
13867 	return 0;
13868 }
13869 
13870 bool intel_fuzzy_clock_check(int clock1, int clock2)
13871 {
13872 	int diff;
13873 
13874 	if (clock1 == clock2)
13875 		return true;
13876 
13877 	if (!clock1 || !clock2)
13878 		return false;
13879 
13880 	diff = abs(clock1 - clock2);
13881 
13882 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13883 		return true;
13884 
13885 	return false;
13886 }
13887 
13888 static bool
13889 intel_compare_m_n(unsigned int m, unsigned int n,
13890 		  unsigned int m2, unsigned int n2,
13891 		  bool exact)
13892 {
13893 	if (m == m2 && n == n2)
13894 		return true;
13895 
13896 	if (exact || !m || !n || !m2 || !n2)
13897 		return false;
13898 
13899 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13900 
13901 	if (n > n2) {
13902 		while (n > n2) {
13903 			m2 <<= 1;
13904 			n2 <<= 1;
13905 		}
13906 	} else if (n < n2) {
13907 		while (n < n2) {
13908 			m <<= 1;
13909 			n <<= 1;
13910 		}
13911 	}
13912 
13913 	if (n != n2)
13914 		return false;
13915 
13916 	return intel_fuzzy_clock_check(m, m2);
13917 }
13918 
13919 static bool
13920 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13921 		       const struct intel_link_m_n *m2_n2,
13922 		       bool exact)
13923 {
13924 	return m_n->tu == m2_n2->tu &&
13925 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13926 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13927 		intel_compare_m_n(m_n->link_m, m_n->link_n,
13928 				  m2_n2->link_m, m2_n2->link_n, exact);
13929 }
13930 
13931 static bool
13932 intel_compare_infoframe(const union hdmi_infoframe *a,
13933 			const union hdmi_infoframe *b)
13934 {
13935 	return memcmp(a, b, sizeof(*a)) == 0;
13936 }
13937 
13938 static bool
13939 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
13940 			 const struct drm_dp_vsc_sdp *b)
13941 {
13942 	return memcmp(a, b, sizeof(*a)) == 0;
13943 }
13944 
13945 static void
13946 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13947 			       bool fastset, const char *name,
13948 			       const union hdmi_infoframe *a,
13949 			       const union hdmi_infoframe *b)
13950 {
13951 	if (fastset) {
13952 		if (!drm_debug_enabled(DRM_UT_KMS))
13953 			return;
13954 
13955 		drm_dbg_kms(&dev_priv->drm,
13956 			    "fastset mismatch in %s infoframe\n", name);
13957 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
13958 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13959 		drm_dbg_kms(&dev_priv->drm, "found:\n");
13960 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13961 	} else {
13962 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
13963 		drm_err(&dev_priv->drm, "expected:\n");
13964 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13965 		drm_err(&dev_priv->drm, "found:\n");
13966 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13967 	}
13968 }
13969 
13970 static void
13971 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
13972 				bool fastset, const char *name,
13973 				const struct drm_dp_vsc_sdp *a,
13974 				const struct drm_dp_vsc_sdp *b)
13975 {
13976 	if (fastset) {
13977 		if (!drm_debug_enabled(DRM_UT_KMS))
13978 			return;
13979 
13980 		drm_dbg_kms(&dev_priv->drm,
13981 			    "fastset mismatch in %s dp sdp\n", name);
13982 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
13983 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
13984 		drm_dbg_kms(&dev_priv->drm, "found:\n");
13985 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
13986 	} else {
13987 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
13988 		drm_err(&dev_priv->drm, "expected:\n");
13989 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
13990 		drm_err(&dev_priv->drm, "found:\n");
13991 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
13992 	}
13993 }
13994 
13995 static void __printf(4, 5)
13996 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13997 		     const char *name, const char *format, ...)
13998 {
13999 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14000 	struct va_format vaf;
14001 	va_list args;
14002 
14003 	va_start(args, format);
14004 	vaf.fmt = format;
14005 	vaf.va = &args;
14006 
14007 	if (fastset)
14008 		drm_dbg_kms(&i915->drm,
14009 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
14010 			    crtc->base.base.id, crtc->base.name, name, &vaf);
14011 	else
14012 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
14013 			crtc->base.base.id, crtc->base.name, name, &vaf);
14014 
14015 	va_end(args);
14016 }
14017 
14018 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
14019 {
14020 	if (dev_priv->params.fastboot != -1)
14021 		return dev_priv->params.fastboot;
14022 
14023 	/* Enable fastboot by default on Skylake and newer */
14024 	if (INTEL_GEN(dev_priv) >= 9)
14025 		return true;
14026 
14027 	/* Enable fastboot by default on VLV and CHV */
14028 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14029 		return true;
14030 
14031 	/* Disabled by default on all others */
14032 	return false;
14033 }
14034 
14035 static bool
14036 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
14037 			  const struct intel_crtc_state *pipe_config,
14038 			  bool fastset)
14039 {
14040 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
14041 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
14042 	bool ret = true;
14043 	u32 bp_gamma = 0;
14044 	bool fixup_inherited = fastset &&
14045 		current_config->inherited && !pipe_config->inherited;
14046 
14047 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
14048 		drm_dbg_kms(&dev_priv->drm,
14049 			    "initial modeset and fastboot not set\n");
14050 		ret = false;
14051 	}
14052 
14053 #define PIPE_CONF_CHECK_X(name) do { \
14054 	if (current_config->name != pipe_config->name) { \
14055 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
14056 				     "(expected 0x%08x, found 0x%08x)", \
14057 				     current_config->name, \
14058 				     pipe_config->name); \
14059 		ret = false; \
14060 	} \
14061 } while (0)
14062 
14063 #define PIPE_CONF_CHECK_I(name) do { \
14064 	if (current_config->name != pipe_config->name) { \
14065 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
14066 				     "(expected %i, found %i)", \
14067 				     current_config->name, \
14068 				     pipe_config->name); \
14069 		ret = false; \
14070 	} \
14071 } while (0)
14072 
14073 #define PIPE_CONF_CHECK_BOOL(name) do { \
14074 	if (current_config->name != pipe_config->name) { \
14075 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
14076 				     "(expected %s, found %s)", \
14077 				     yesno(current_config->name), \
14078 				     yesno(pipe_config->name)); \
14079 		ret = false; \
14080 	} \
14081 } while (0)
14082 
14083 /*
14084  * Checks state where we only read out the enabling, but not the entire
14085  * state itself (like full infoframes or ELD for audio). These states
14086  * require a full modeset on bootup to fix up.
14087  */
14088 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
14089 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
14090 		PIPE_CONF_CHECK_BOOL(name); \
14091 	} else { \
14092 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
14093 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
14094 				     yesno(current_config->name), \
14095 				     yesno(pipe_config->name)); \
14096 		ret = false; \
14097 	} \
14098 } while (0)
14099 
14100 #define PIPE_CONF_CHECK_P(name) do { \
14101 	if (current_config->name != pipe_config->name) { \
14102 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
14103 				     "(expected %p, found %p)", \
14104 				     current_config->name, \
14105 				     pipe_config->name); \
14106 		ret = false; \
14107 	} \
14108 } while (0)
14109 
14110 #define PIPE_CONF_CHECK_M_N(name) do { \
14111 	if (!intel_compare_link_m_n(&current_config->name, \
14112 				    &pipe_config->name,\
14113 				    !fastset)) { \
14114 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
14115 				     "(expected tu %i gmch %i/%i link %i/%i, " \
14116 				     "found tu %i, gmch %i/%i link %i/%i)", \
14117 				     current_config->name.tu, \
14118 				     current_config->name.gmch_m, \
14119 				     current_config->name.gmch_n, \
14120 				     current_config->name.link_m, \
14121 				     current_config->name.link_n, \
14122 				     pipe_config->name.tu, \
14123 				     pipe_config->name.gmch_m, \
14124 				     pipe_config->name.gmch_n, \
14125 				     pipe_config->name.link_m, \
14126 				     pipe_config->name.link_n); \
14127 		ret = false; \
14128 	} \
14129 } while (0)
14130 
14131 /* This is required for BDW+ where there is only one set of registers for
14132  * switching between high and low RR.
14133  * This macro can be used whenever a comparison has to be made between one
14134  * hw state and multiple sw state variables.
14135  */
14136 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
14137 	if (!intel_compare_link_m_n(&current_config->name, \
14138 				    &pipe_config->name, !fastset) && \
14139 	    !intel_compare_link_m_n(&current_config->alt_name, \
14140 				    &pipe_config->name, !fastset)) { \
14141 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
14142 				     "(expected tu %i gmch %i/%i link %i/%i, " \
14143 				     "or tu %i gmch %i/%i link %i/%i, " \
14144 				     "found tu %i, gmch %i/%i link %i/%i)", \
14145 				     current_config->name.tu, \
14146 				     current_config->name.gmch_m, \
14147 				     current_config->name.gmch_n, \
14148 				     current_config->name.link_m, \
14149 				     current_config->name.link_n, \
14150 				     current_config->alt_name.tu, \
14151 				     current_config->alt_name.gmch_m, \
14152 				     current_config->alt_name.gmch_n, \
14153 				     current_config->alt_name.link_m, \
14154 				     current_config->alt_name.link_n, \
14155 				     pipe_config->name.tu, \
14156 				     pipe_config->name.gmch_m, \
14157 				     pipe_config->name.gmch_n, \
14158 				     pipe_config->name.link_m, \
14159 				     pipe_config->name.link_n); \
14160 		ret = false; \
14161 	} \
14162 } while (0)
14163 
14164 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
14165 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
14166 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
14167 				     "(%x) (expected %i, found %i)", \
14168 				     (mask), \
14169 				     current_config->name & (mask), \
14170 				     pipe_config->name & (mask)); \
14171 		ret = false; \
14172 	} \
14173 } while (0)
14174 
14175 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
14176 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
14177 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
14178 				     "(expected %i, found %i)", \
14179 				     current_config->name, \
14180 				     pipe_config->name); \
14181 		ret = false; \
14182 	} \
14183 } while (0)
14184 
14185 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
14186 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
14187 				     &pipe_config->infoframes.name)) { \
14188 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
14189 					       &current_config->infoframes.name, \
14190 					       &pipe_config->infoframes.name); \
14191 		ret = false; \
14192 	} \
14193 } while (0)
14194 
14195 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
14196 	if (!current_config->has_psr && !pipe_config->has_psr && \
14197 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
14198 				      &pipe_config->infoframes.name)) { \
14199 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
14200 						&current_config->infoframes.name, \
14201 						&pipe_config->infoframes.name); \
14202 		ret = false; \
14203 	} \
14204 } while (0)
14205 
14206 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
14207 	if (current_config->name1 != pipe_config->name1) { \
14208 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
14209 				"(expected %i, found %i, won't compare lut values)", \
14210 				current_config->name1, \
14211 				pipe_config->name1); \
14212 		ret = false;\
14213 	} else { \
14214 		if (!intel_color_lut_equal(current_config->name2, \
14215 					pipe_config->name2, pipe_config->name1, \
14216 					bit_precision)) { \
14217 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
14218 					"hw_state doesn't match sw_state"); \
14219 			ret = false; \
14220 		} \
14221 	} \
14222 } while (0)
14223 
14224 #define PIPE_CONF_QUIRK(quirk) \
14225 	((current_config->quirks | pipe_config->quirks) & (quirk))
14226 
14227 	PIPE_CONF_CHECK_I(cpu_transcoder);
14228 
14229 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
14230 	PIPE_CONF_CHECK_I(fdi_lanes);
14231 	PIPE_CONF_CHECK_M_N(fdi_m_n);
14232 
14233 	PIPE_CONF_CHECK_I(lane_count);
14234 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
14235 
14236 	if (INTEL_GEN(dev_priv) < 8) {
14237 		PIPE_CONF_CHECK_M_N(dp_m_n);
14238 
14239 		if (current_config->has_drrs)
14240 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
14241 	} else
14242 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
14243 
14244 	PIPE_CONF_CHECK_X(output_types);
14245 
14246 	/* FIXME do the readout properly and get rid of this quirk */
14247 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
14248 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
14249 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
14250 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
14251 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
14252 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
14253 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
14254 
14255 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
14256 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
14257 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
14258 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
14259 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
14260 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
14261 
14262 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
14263 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
14264 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
14265 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
14266 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
14267 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
14268 
14269 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
14270 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
14271 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
14272 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
14273 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
14274 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
14275 
14276 		PIPE_CONF_CHECK_I(pixel_multiplier);
14277 
14278 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
14279 				      DRM_MODE_FLAG_INTERLACE);
14280 
14281 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
14282 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
14283 					      DRM_MODE_FLAG_PHSYNC);
14284 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
14285 					      DRM_MODE_FLAG_NHSYNC);
14286 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
14287 					      DRM_MODE_FLAG_PVSYNC);
14288 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
14289 					      DRM_MODE_FLAG_NVSYNC);
14290 		}
14291 	}
14292 
14293 	PIPE_CONF_CHECK_I(output_format);
14294 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
14295 	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
14296 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14297 		PIPE_CONF_CHECK_BOOL(limited_color_range);
14298 
14299 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
14300 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
14301 	PIPE_CONF_CHECK_BOOL(has_infoframe);
14302 	/* FIXME do the readout properly and get rid of this quirk */
14303 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
14304 		PIPE_CONF_CHECK_BOOL(fec_enable);
14305 
14306 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
14307 
14308 	PIPE_CONF_CHECK_X(gmch_pfit.control);
14309 	/* pfit ratios are autocomputed by the hw on gen4+ */
14310 	if (INTEL_GEN(dev_priv) < 4)
14311 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
14312 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
14313 
14314 	/*
14315 	 * Changing the EDP transcoder input mux
14316 	 * (A_ONOFF vs. A_ON) requires a full modeset.
14317 	 */
14318 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
14319 
14320 	if (!fastset) {
14321 		PIPE_CONF_CHECK_I(pipe_src_w);
14322 		PIPE_CONF_CHECK_I(pipe_src_h);
14323 
14324 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
14325 		if (current_config->pch_pfit.enabled) {
14326 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
14327 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
14328 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
14329 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
14330 		}
14331 
14332 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
14333 		/* FIXME do the readout properly and get rid of this quirk */
14334 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
14335 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
14336 
14337 		PIPE_CONF_CHECK_X(gamma_mode);
14338 		if (IS_CHERRYVIEW(dev_priv))
14339 			PIPE_CONF_CHECK_X(cgm_mode);
14340 		else
14341 			PIPE_CONF_CHECK_X(csc_mode);
14342 		PIPE_CONF_CHECK_BOOL(gamma_enable);
14343 		PIPE_CONF_CHECK_BOOL(csc_enable);
14344 
14345 		PIPE_CONF_CHECK_I(linetime);
14346 		PIPE_CONF_CHECK_I(ips_linetime);
14347 
14348 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
14349 		if (bp_gamma)
14350 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
14351 	}
14352 
14353 	PIPE_CONF_CHECK_BOOL(double_wide);
14354 
14355 	PIPE_CONF_CHECK_P(shared_dpll);
14356 
14357 	/* FIXME do the readout properly and get rid of this quirk */
14358 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
14359 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
14360 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
14361 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
14362 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
14363 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
14364 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
14365 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
14366 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
14367 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
14368 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
14369 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
14370 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
14371 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
14372 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
14373 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
14374 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
14375 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
14376 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
14377 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
14378 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
14379 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
14380 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
14381 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
14382 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
14383 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
14384 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
14385 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
14386 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
14387 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
14388 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
14389 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
14390 
14391 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
14392 		PIPE_CONF_CHECK_X(dsi_pll.div);
14393 
14394 		if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
14395 			PIPE_CONF_CHECK_I(pipe_bpp);
14396 
14397 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
14398 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
14399 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
14400 
14401 		PIPE_CONF_CHECK_I(min_voltage_level);
14402 	}
14403 
14404 	PIPE_CONF_CHECK_X(infoframes.enable);
14405 	PIPE_CONF_CHECK_X(infoframes.gcp);
14406 	PIPE_CONF_CHECK_INFOFRAME(avi);
14407 	PIPE_CONF_CHECK_INFOFRAME(spd);
14408 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
14409 	PIPE_CONF_CHECK_INFOFRAME(drm);
14410 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
14411 
14412 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
14413 	PIPE_CONF_CHECK_I(master_transcoder);
14414 	PIPE_CONF_CHECK_BOOL(bigjoiner);
14415 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
14416 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
14417 
14418 	PIPE_CONF_CHECK_I(dsc.compression_enable);
14419 	PIPE_CONF_CHECK_I(dsc.dsc_split);
14420 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
14421 
14422 	PIPE_CONF_CHECK_I(mst_master_transcoder);
14423 
14424 #undef PIPE_CONF_CHECK_X
14425 #undef PIPE_CONF_CHECK_I
14426 #undef PIPE_CONF_CHECK_BOOL
14427 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
14428 #undef PIPE_CONF_CHECK_P
14429 #undef PIPE_CONF_CHECK_FLAGS
14430 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
14431 #undef PIPE_CONF_CHECK_COLOR_LUT
14432 #undef PIPE_CONF_QUIRK
14433 
14434 	return ret;
14435 }
14436 
14437 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
14438 					   const struct intel_crtc_state *pipe_config)
14439 {
14440 	if (pipe_config->has_pch_encoder) {
14441 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
14442 							    &pipe_config->fdi_m_n);
14443 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
14444 
14445 		/*
14446 		 * FDI already provided one idea for the dotclock.
14447 		 * Yell if the encoder disagrees.
14448 		 */
14449 		drm_WARN(&dev_priv->drm,
14450 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
14451 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
14452 			 fdi_dotclock, dotclock);
14453 	}
14454 }
14455 
14456 static void verify_wm_state(struct intel_crtc *crtc,
14457 			    struct intel_crtc_state *new_crtc_state)
14458 {
14459 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14460 	struct skl_hw_state {
14461 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
14462 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
14463 		struct skl_pipe_wm wm;
14464 	} *hw;
14465 	struct skl_pipe_wm *sw_wm;
14466 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
14467 	u8 hw_enabled_slices;
14468 	const enum pipe pipe = crtc->pipe;
14469 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
14470 
14471 	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
14472 		return;
14473 
14474 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
14475 	if (!hw)
14476 		return;
14477 
14478 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
14479 	sw_wm = &new_crtc_state->wm.skl.optimal;
14480 
14481 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
14482 
14483 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
14484 
14485 	if (INTEL_GEN(dev_priv) >= 11 &&
14486 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
14487 		drm_err(&dev_priv->drm,
14488 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
14489 			dev_priv->dbuf.enabled_slices,
14490 			hw_enabled_slices);
14491 
14492 	/* planes */
14493 	for_each_universal_plane(dev_priv, pipe, plane) {
14494 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14495 
14496 		hw_plane_wm = &hw->wm.planes[plane];
14497 		sw_plane_wm = &sw_wm->planes[plane];
14498 
14499 		/* Watermarks */
14500 		for (level = 0; level <= max_level; level++) {
14501 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14502 						&sw_plane_wm->wm[level]) ||
14503 			    (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
14504 							       &sw_plane_wm->sagv_wm0)))
14505 				continue;
14506 
14507 			drm_err(&dev_priv->drm,
14508 				"mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14509 				pipe_name(pipe), plane + 1, level,
14510 				sw_plane_wm->wm[level].plane_en,
14511 				sw_plane_wm->wm[level].plane_res_b,
14512 				sw_plane_wm->wm[level].plane_res_l,
14513 				hw_plane_wm->wm[level].plane_en,
14514 				hw_plane_wm->wm[level].plane_res_b,
14515 				hw_plane_wm->wm[level].plane_res_l);
14516 		}
14517 
14518 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14519 					 &sw_plane_wm->trans_wm)) {
14520 			drm_err(&dev_priv->drm,
14521 				"mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14522 				pipe_name(pipe), plane + 1,
14523 				sw_plane_wm->trans_wm.plane_en,
14524 				sw_plane_wm->trans_wm.plane_res_b,
14525 				sw_plane_wm->trans_wm.plane_res_l,
14526 				hw_plane_wm->trans_wm.plane_en,
14527 				hw_plane_wm->trans_wm.plane_res_b,
14528 				hw_plane_wm->trans_wm.plane_res_l);
14529 		}
14530 
14531 		/* DDB */
14532 		hw_ddb_entry = &hw->ddb_y[plane];
14533 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
14534 
14535 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14536 			drm_err(&dev_priv->drm,
14537 				"mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
14538 				pipe_name(pipe), plane + 1,
14539 				sw_ddb_entry->start, sw_ddb_entry->end,
14540 				hw_ddb_entry->start, hw_ddb_entry->end);
14541 		}
14542 	}
14543 
14544 	/*
14545 	 * cursor
14546 	 * If the cursor plane isn't active, we may not have updated it's ddb
14547 	 * allocation. In that case since the ddb allocation will be updated
14548 	 * once the plane becomes visible, we can skip this check
14549 	 */
14550 	if (1) {
14551 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14552 
14553 		hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
14554 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
14555 
14556 		/* Watermarks */
14557 		for (level = 0; level <= max_level; level++) {
14558 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14559 						&sw_plane_wm->wm[level]) ||
14560 			    (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
14561 							       &sw_plane_wm->sagv_wm0)))
14562 				continue;
14563 
14564 			drm_err(&dev_priv->drm,
14565 				"mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14566 				pipe_name(pipe), level,
14567 				sw_plane_wm->wm[level].plane_en,
14568 				sw_plane_wm->wm[level].plane_res_b,
14569 				sw_plane_wm->wm[level].plane_res_l,
14570 				hw_plane_wm->wm[level].plane_en,
14571 				hw_plane_wm->wm[level].plane_res_b,
14572 				hw_plane_wm->wm[level].plane_res_l);
14573 		}
14574 
14575 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14576 					 &sw_plane_wm->trans_wm)) {
14577 			drm_err(&dev_priv->drm,
14578 				"mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14579 				pipe_name(pipe),
14580 				sw_plane_wm->trans_wm.plane_en,
14581 				sw_plane_wm->trans_wm.plane_res_b,
14582 				sw_plane_wm->trans_wm.plane_res_l,
14583 				hw_plane_wm->trans_wm.plane_en,
14584 				hw_plane_wm->trans_wm.plane_res_b,
14585 				hw_plane_wm->trans_wm.plane_res_l);
14586 		}
14587 
14588 		/* DDB */
14589 		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
14590 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
14591 
14592 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14593 			drm_err(&dev_priv->drm,
14594 				"mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
14595 				pipe_name(pipe),
14596 				sw_ddb_entry->start, sw_ddb_entry->end,
14597 				hw_ddb_entry->start, hw_ddb_entry->end);
14598 		}
14599 	}
14600 
14601 	kfree(hw);
14602 }
14603 
14604 static void
14605 verify_connector_state(struct intel_atomic_state *state,
14606 		       struct intel_crtc *crtc)
14607 {
14608 	struct drm_connector *connector;
14609 	struct drm_connector_state *new_conn_state;
14610 	int i;
14611 
14612 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
14613 		struct drm_encoder *encoder = connector->encoder;
14614 		struct intel_crtc_state *crtc_state = NULL;
14615 
14616 		if (new_conn_state->crtc != &crtc->base)
14617 			continue;
14618 
14619 		if (crtc)
14620 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
14621 
14622 		intel_connector_verify_state(crtc_state, new_conn_state);
14623 
14624 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
14625 		     "connector's atomic encoder doesn't match legacy encoder\n");
14626 	}
14627 }
14628 
14629 static void
14630 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
14631 {
14632 	struct intel_encoder *encoder;
14633 	struct drm_connector *connector;
14634 	struct drm_connector_state *old_conn_state, *new_conn_state;
14635 	int i;
14636 
14637 	for_each_intel_encoder(&dev_priv->drm, encoder) {
14638 		bool enabled = false, found = false;
14639 		enum pipe pipe;
14640 
14641 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
14642 			    encoder->base.base.id,
14643 			    encoder->base.name);
14644 
14645 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
14646 						   new_conn_state, i) {
14647 			if (old_conn_state->best_encoder == &encoder->base)
14648 				found = true;
14649 
14650 			if (new_conn_state->best_encoder != &encoder->base)
14651 				continue;
14652 			found = enabled = true;
14653 
14654 			I915_STATE_WARN(new_conn_state->crtc !=
14655 					encoder->base.crtc,
14656 			     "connector's crtc doesn't match encoder crtc\n");
14657 		}
14658 
14659 		if (!found)
14660 			continue;
14661 
14662 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
14663 		     "encoder's enabled state mismatch "
14664 		     "(expected %i, found %i)\n",
14665 		     !!encoder->base.crtc, enabled);
14666 
14667 		if (!encoder->base.crtc) {
14668 			bool active;
14669 
14670 			active = encoder->get_hw_state(encoder, &pipe);
14671 			I915_STATE_WARN(active,
14672 			     "encoder detached but still enabled on pipe %c.\n",
14673 			     pipe_name(pipe));
14674 		}
14675 	}
14676 }
14677 
14678 static void
14679 verify_crtc_state(struct intel_crtc *crtc,
14680 		  struct intel_crtc_state *old_crtc_state,
14681 		  struct intel_crtc_state *new_crtc_state)
14682 {
14683 	struct drm_device *dev = crtc->base.dev;
14684 	struct drm_i915_private *dev_priv = to_i915(dev);
14685 	struct intel_encoder *encoder;
14686 	struct intel_crtc_state *pipe_config = old_crtc_state;
14687 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
14688 	struct intel_crtc *master = crtc;
14689 
14690 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
14691 	intel_crtc_free_hw_state(old_crtc_state);
14692 	intel_crtc_state_reset(old_crtc_state, crtc);
14693 	old_crtc_state->uapi.state = state;
14694 
14695 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
14696 		    crtc->base.name);
14697 
14698 	pipe_config->hw.enable = new_crtc_state->hw.enable;
14699 
14700 	intel_crtc_get_pipe_config(pipe_config);
14701 
14702 	/* we keep both pipes enabled on 830 */
14703 	if (IS_I830(dev_priv) && pipe_config->hw.active)
14704 		pipe_config->hw.active = new_crtc_state->hw.active;
14705 
14706 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
14707 			"crtc active state doesn't match with hw state "
14708 			"(expected %i, found %i)\n",
14709 			new_crtc_state->hw.active, pipe_config->hw.active);
14710 
14711 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
14712 			"transitional active state does not match atomic hw state "
14713 			"(expected %i, found %i)\n",
14714 			new_crtc_state->hw.active, crtc->active);
14715 
14716 	if (new_crtc_state->bigjoiner_slave)
14717 		master = new_crtc_state->bigjoiner_linked_crtc;
14718 
14719 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
14720 		enum pipe pipe;
14721 		bool active;
14722 
14723 		active = encoder->get_hw_state(encoder, &pipe);
14724 		I915_STATE_WARN(active != new_crtc_state->hw.active,
14725 				"[ENCODER:%i] active %i with crtc active %i\n",
14726 				encoder->base.base.id, active,
14727 				new_crtc_state->hw.active);
14728 
14729 		I915_STATE_WARN(active && master->pipe != pipe,
14730 				"Encoder connected to wrong pipe %c\n",
14731 				pipe_name(pipe));
14732 
14733 		if (active)
14734 			intel_encoder_get_config(encoder, pipe_config);
14735 	}
14736 
14737 	if (!new_crtc_state->hw.active)
14738 		return;
14739 
14740 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
14741 
14742 	if (!intel_pipe_config_compare(new_crtc_state,
14743 				       pipe_config, false)) {
14744 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
14745 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
14746 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
14747 	}
14748 }
14749 
14750 static void
14751 intel_verify_planes(struct intel_atomic_state *state)
14752 {
14753 	struct intel_plane *plane;
14754 	const struct intel_plane_state *plane_state;
14755 	int i;
14756 
14757 	for_each_new_intel_plane_in_state(state, plane,
14758 					  plane_state, i)
14759 		assert_plane(plane, plane_state->planar_slave ||
14760 			     plane_state->uapi.visible);
14761 }
14762 
14763 static void
14764 verify_single_dpll_state(struct drm_i915_private *dev_priv,
14765 			 struct intel_shared_dpll *pll,
14766 			 struct intel_crtc *crtc,
14767 			 struct intel_crtc_state *new_crtc_state)
14768 {
14769 	struct intel_dpll_hw_state dpll_hw_state;
14770 	unsigned int crtc_mask;
14771 	bool active;
14772 
14773 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
14774 
14775 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
14776 
14777 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
14778 
14779 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
14780 		I915_STATE_WARN(!pll->on && pll->active_mask,
14781 		     "pll in active use but not on in sw tracking\n");
14782 		I915_STATE_WARN(pll->on && !pll->active_mask,
14783 		     "pll is on but not used by any active crtc\n");
14784 		I915_STATE_WARN(pll->on != active,
14785 		     "pll on state mismatch (expected %i, found %i)\n",
14786 		     pll->on, active);
14787 	}
14788 
14789 	if (!crtc) {
14790 		I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
14791 				"more active pll users than references: %x vs %x\n",
14792 				pll->active_mask, pll->state.crtc_mask);
14793 
14794 		return;
14795 	}
14796 
14797 	crtc_mask = drm_crtc_mask(&crtc->base);
14798 
14799 	if (new_crtc_state->hw.active)
14800 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
14801 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
14802 				pipe_name(crtc->pipe), pll->active_mask);
14803 	else
14804 		I915_STATE_WARN(pll->active_mask & crtc_mask,
14805 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
14806 				pipe_name(crtc->pipe), pll->active_mask);
14807 
14808 	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
14809 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
14810 			crtc_mask, pll->state.crtc_mask);
14811 
14812 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
14813 					  &dpll_hw_state,
14814 					  sizeof(dpll_hw_state)),
14815 			"pll hw state mismatch\n");
14816 }
14817 
14818 static void
14819 verify_shared_dpll_state(struct intel_crtc *crtc,
14820 			 struct intel_crtc_state *old_crtc_state,
14821 			 struct intel_crtc_state *new_crtc_state)
14822 {
14823 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14824 
14825 	if (new_crtc_state->shared_dpll)
14826 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
14827 
14828 	if (old_crtc_state->shared_dpll &&
14829 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
14830 		unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
14831 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
14832 
14833 		I915_STATE_WARN(pll->active_mask & crtc_mask,
14834 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
14835 				pipe_name(crtc->pipe));
14836 		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
14837 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
14838 				pipe_name(crtc->pipe));
14839 	}
14840 }
14841 
14842 static void
14843 intel_modeset_verify_crtc(struct intel_crtc *crtc,
14844 			  struct intel_atomic_state *state,
14845 			  struct intel_crtc_state *old_crtc_state,
14846 			  struct intel_crtc_state *new_crtc_state)
14847 {
14848 	if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
14849 		return;
14850 
14851 	verify_wm_state(crtc, new_crtc_state);
14852 	verify_connector_state(state, crtc);
14853 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
14854 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
14855 }
14856 
14857 static void
14858 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
14859 {
14860 	int i;
14861 
14862 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
14863 		verify_single_dpll_state(dev_priv,
14864 					 &dev_priv->dpll.shared_dplls[i],
14865 					 NULL, NULL);
14866 }
14867 
14868 static void
14869 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
14870 			      struct intel_atomic_state *state)
14871 {
14872 	verify_encoder_state(dev_priv, state);
14873 	verify_connector_state(state, NULL);
14874 	verify_disabled_dpll_state(dev_priv);
14875 }
14876 
14877 static void
14878 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
14879 {
14880 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14881 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14882 	const struct drm_display_mode *adjusted_mode =
14883 		&crtc_state->hw.adjusted_mode;
14884 
14885 	drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
14886 
14887 	crtc->mode_flags = crtc_state->mode_flags;
14888 
14889 	/*
14890 	 * The scanline counter increments at the leading edge of hsync.
14891 	 *
14892 	 * On most platforms it starts counting from vtotal-1 on the
14893 	 * first active line. That means the scanline counter value is
14894 	 * always one less than what we would expect. Ie. just after
14895 	 * start of vblank, which also occurs at start of hsync (on the
14896 	 * last active line), the scanline counter will read vblank_start-1.
14897 	 *
14898 	 * On gen2 the scanline counter starts counting from 1 instead
14899 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
14900 	 * to keep the value positive), instead of adding one.
14901 	 *
14902 	 * On HSW+ the behaviour of the scanline counter depends on the output
14903 	 * type. For DP ports it behaves like most other platforms, but on HDMI
14904 	 * there's an extra 1 line difference. So we need to add two instead of
14905 	 * one to the value.
14906 	 *
14907 	 * On VLV/CHV DSI the scanline counter would appear to increment
14908 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
14909 	 * that means we can't tell whether we're in vblank or not while
14910 	 * we're on that particular line. We must still set scanline_offset
14911 	 * to 1 so that the vblank timestamps come out correct when we query
14912 	 * the scanline counter from within the vblank interrupt handler.
14913 	 * However if queried just before the start of vblank we'll get an
14914 	 * answer that's slightly in the future.
14915 	 */
14916 	if (IS_GEN(dev_priv, 2)) {
14917 		int vtotal;
14918 
14919 		vtotal = adjusted_mode->crtc_vtotal;
14920 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
14921 			vtotal /= 2;
14922 
14923 		crtc->scanline_offset = vtotal - 1;
14924 	} else if (HAS_DDI(dev_priv) &&
14925 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
14926 		crtc->scanline_offset = 2;
14927 	} else {
14928 		crtc->scanline_offset = 1;
14929 	}
14930 }
14931 
14932 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
14933 {
14934 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14935 	struct intel_crtc_state *new_crtc_state;
14936 	struct intel_crtc *crtc;
14937 	int i;
14938 
14939 	if (!dev_priv->display.crtc_compute_clock)
14940 		return;
14941 
14942 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14943 		if (!needs_modeset(new_crtc_state))
14944 			continue;
14945 
14946 		intel_release_shared_dplls(state, crtc);
14947 	}
14948 }
14949 
14950 /*
14951  * This implements the workaround described in the "notes" section of the mode
14952  * set sequence documentation. When going from no pipes or single pipe to
14953  * multiple pipes, and planes are enabled after the pipe, we need to wait at
14954  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
14955  */
14956 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
14957 {
14958 	struct intel_crtc_state *crtc_state;
14959 	struct intel_crtc *crtc;
14960 	struct intel_crtc_state *first_crtc_state = NULL;
14961 	struct intel_crtc_state *other_crtc_state = NULL;
14962 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14963 	int i;
14964 
14965 	/* look at all crtc's that are going to be enabled in during modeset */
14966 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14967 		if (!crtc_state->hw.active ||
14968 		    !needs_modeset(crtc_state))
14969 			continue;
14970 
14971 		if (first_crtc_state) {
14972 			other_crtc_state = crtc_state;
14973 			break;
14974 		} else {
14975 			first_crtc_state = crtc_state;
14976 			first_pipe = crtc->pipe;
14977 		}
14978 	}
14979 
14980 	/* No workaround needed? */
14981 	if (!first_crtc_state)
14982 		return 0;
14983 
14984 	/* w/a possibly needed, check how many crtc's are already enabled. */
14985 	for_each_intel_crtc(state->base.dev, crtc) {
14986 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14987 		if (IS_ERR(crtc_state))
14988 			return PTR_ERR(crtc_state);
14989 
14990 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14991 
14992 		if (!crtc_state->hw.active ||
14993 		    needs_modeset(crtc_state))
14994 			continue;
14995 
14996 		/* 2 or more enabled crtcs means no need for w/a */
14997 		if (enabled_pipe != INVALID_PIPE)
14998 			return 0;
14999 
15000 		enabled_pipe = crtc->pipe;
15001 	}
15002 
15003 	if (enabled_pipe != INVALID_PIPE)
15004 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
15005 	else if (other_crtc_state)
15006 		other_crtc_state->hsw_workaround_pipe = first_pipe;
15007 
15008 	return 0;
15009 }
15010 
15011 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
15012 			   u8 active_pipes)
15013 {
15014 	const struct intel_crtc_state *crtc_state;
15015 	struct intel_crtc *crtc;
15016 	int i;
15017 
15018 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
15019 		if (crtc_state->hw.active)
15020 			active_pipes |= BIT(crtc->pipe);
15021 		else
15022 			active_pipes &= ~BIT(crtc->pipe);
15023 	}
15024 
15025 	return active_pipes;
15026 }
15027 
15028 static int intel_modeset_checks(struct intel_atomic_state *state)
15029 {
15030 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15031 
15032 	state->modeset = true;
15033 
15034 	if (IS_HASWELL(dev_priv))
15035 		return hsw_mode_set_planes_workaround(state);
15036 
15037 	return 0;
15038 }
15039 
15040 /*
15041  * Handle calculation of various watermark data at the end of the atomic check
15042  * phase.  The code here should be run after the per-crtc and per-plane 'check'
15043  * handlers to ensure that all derived state has been updated.
15044  */
15045 static int calc_watermark_data(struct intel_atomic_state *state)
15046 {
15047 	struct drm_device *dev = state->base.dev;
15048 	struct drm_i915_private *dev_priv = to_i915(dev);
15049 
15050 	/* Is there platform-specific watermark information to calculate? */
15051 	if (dev_priv->display.compute_global_watermarks)
15052 		return dev_priv->display.compute_global_watermarks(state);
15053 
15054 	return 0;
15055 }
15056 
15057 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
15058 				     struct intel_crtc_state *new_crtc_state)
15059 {
15060 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
15061 		return;
15062 
15063 	new_crtc_state->uapi.mode_changed = false;
15064 	new_crtc_state->update_pipe = true;
15065 }
15066 
15067 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
15068 				    struct intel_crtc_state *new_crtc_state)
15069 {
15070 	/*
15071 	 * If we're not doing the full modeset we want to
15072 	 * keep the current M/N values as they may be
15073 	 * sufficiently different to the computed values
15074 	 * to cause problems.
15075 	 *
15076 	 * FIXME: should really copy more fuzzy state here
15077 	 */
15078 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
15079 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
15080 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
15081 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
15082 }
15083 
15084 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
15085 					  struct intel_crtc *crtc,
15086 					  u8 plane_ids_mask)
15087 {
15088 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15089 	struct intel_plane *plane;
15090 
15091 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
15092 		struct intel_plane_state *plane_state;
15093 
15094 		if ((plane_ids_mask & BIT(plane->id)) == 0)
15095 			continue;
15096 
15097 		plane_state = intel_atomic_get_plane_state(state, plane);
15098 		if (IS_ERR(plane_state))
15099 			return PTR_ERR(plane_state);
15100 	}
15101 
15102 	return 0;
15103 }
15104 
15105 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
15106 {
15107 	/* See {hsw,vlv,ivb}_plane_ratio() */
15108 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
15109 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
15110 		IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
15111 }
15112 
15113 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
15114 					   struct intel_crtc *crtc,
15115 					   struct intel_crtc *other)
15116 {
15117 	const struct intel_plane_state *plane_state;
15118 	struct intel_plane *plane;
15119 	u8 plane_ids = 0;
15120 	int i;
15121 
15122 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
15123 		if (plane->pipe == crtc->pipe)
15124 			plane_ids |= BIT(plane->id);
15125 	}
15126 
15127 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
15128 }
15129 
15130 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
15131 {
15132 	const struct intel_crtc_state *crtc_state;
15133 	struct intel_crtc *crtc;
15134 	int i;
15135 
15136 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
15137 		int ret;
15138 
15139 		if (!crtc_state->bigjoiner)
15140 			continue;
15141 
15142 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
15143 						      crtc_state->bigjoiner_linked_crtc);
15144 		if (ret)
15145 			return ret;
15146 	}
15147 
15148 	return 0;
15149 }
15150 
15151 static int intel_atomic_check_planes(struct intel_atomic_state *state)
15152 {
15153 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15154 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15155 	struct intel_plane_state *plane_state;
15156 	struct intel_plane *plane;
15157 	struct intel_crtc *crtc;
15158 	int i, ret;
15159 
15160 	ret = icl_add_linked_planes(state);
15161 	if (ret)
15162 		return ret;
15163 
15164 	ret = intel_bigjoiner_add_affected_planes(state);
15165 	if (ret)
15166 		return ret;
15167 
15168 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
15169 		ret = intel_plane_atomic_check(state, plane);
15170 		if (ret) {
15171 			drm_dbg_atomic(&dev_priv->drm,
15172 				       "[PLANE:%d:%s] atomic driver check failed\n",
15173 				       plane->base.base.id, plane->base.name);
15174 			return ret;
15175 		}
15176 	}
15177 
15178 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15179 					    new_crtc_state, i) {
15180 		u8 old_active_planes, new_active_planes;
15181 
15182 		ret = icl_check_nv12_planes(new_crtc_state);
15183 		if (ret)
15184 			return ret;
15185 
15186 		/*
15187 		 * On some platforms the number of active planes affects
15188 		 * the planes' minimum cdclk calculation. Add such planes
15189 		 * to the state before we compute the minimum cdclk.
15190 		 */
15191 		if (!active_planes_affects_min_cdclk(dev_priv))
15192 			continue;
15193 
15194 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
15195 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
15196 
15197 		/*
15198 		 * Not only the number of planes, but if the plane configuration had
15199 		 * changed might already mean we need to recompute min CDCLK,
15200 		 * because different planes might consume different amount of Dbuf bandwidth
15201 		 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
15202 		 */
15203 		if (old_active_planes == new_active_planes)
15204 			continue;
15205 
15206 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
15207 		if (ret)
15208 			return ret;
15209 	}
15210 
15211 	return 0;
15212 }
15213 
15214 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
15215 				    bool *need_cdclk_calc)
15216 {
15217 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15218 	const struct intel_cdclk_state *old_cdclk_state;
15219 	const struct intel_cdclk_state *new_cdclk_state;
15220 	struct intel_plane_state *plane_state;
15221 	struct intel_bw_state *new_bw_state;
15222 	struct intel_plane *plane;
15223 	int min_cdclk = 0;
15224 	enum pipe pipe;
15225 	int ret;
15226 	int i;
15227 	/*
15228 	 * active_planes bitmask has been updated, and potentially
15229 	 * affected planes are part of the state. We can now
15230 	 * compute the minimum cdclk for each plane.
15231 	 */
15232 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
15233 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
15234 		if (ret)
15235 			return ret;
15236 	}
15237 
15238 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
15239 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
15240 
15241 	if (new_cdclk_state &&
15242 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
15243 		*need_cdclk_calc = true;
15244 
15245 	ret = dev_priv->display.bw_calc_min_cdclk(state);
15246 	if (ret)
15247 		return ret;
15248 
15249 	new_bw_state = intel_atomic_get_new_bw_state(state);
15250 
15251 	if (!new_cdclk_state || !new_bw_state)
15252 		return 0;
15253 
15254 	for_each_pipe(dev_priv, pipe) {
15255 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
15256 
15257 		/*
15258 		 * Currently do this change only if we need to increase
15259 		 */
15260 		if (new_bw_state->min_cdclk > min_cdclk)
15261 			*need_cdclk_calc = true;
15262 	}
15263 
15264 	return 0;
15265 }
15266 
15267 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
15268 {
15269 	struct intel_crtc_state *crtc_state;
15270 	struct intel_crtc *crtc;
15271 	int i;
15272 
15273 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
15274 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
15275 		int ret;
15276 
15277 		ret = intel_crtc_atomic_check(state, crtc);
15278 		if (ret) {
15279 			drm_dbg_atomic(&i915->drm,
15280 				       "[CRTC:%d:%s] atomic driver check failed\n",
15281 				       crtc->base.base.id, crtc->base.name);
15282 			return ret;
15283 		}
15284 	}
15285 
15286 	return 0;
15287 }
15288 
15289 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
15290 					       u8 transcoders)
15291 {
15292 	const struct intel_crtc_state *new_crtc_state;
15293 	struct intel_crtc *crtc;
15294 	int i;
15295 
15296 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15297 		if (new_crtc_state->hw.enable &&
15298 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
15299 		    needs_modeset(new_crtc_state))
15300 			return true;
15301 	}
15302 
15303 	return false;
15304 }
15305 
15306 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
15307 					struct intel_crtc *crtc,
15308 					struct intel_crtc_state *old_crtc_state,
15309 					struct intel_crtc_state *new_crtc_state)
15310 {
15311 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15312 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
15313 	struct intel_crtc *slave, *master;
15314 
15315 	/* slave being enabled, is master is still claiming this crtc? */
15316 	if (old_crtc_state->bigjoiner_slave) {
15317 		slave = crtc;
15318 		master = old_crtc_state->bigjoiner_linked_crtc;
15319 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
15320 		if (!master_crtc_state || !needs_modeset(master_crtc_state))
15321 			goto claimed;
15322 	}
15323 
15324 	if (!new_crtc_state->bigjoiner)
15325 		return 0;
15326 
15327 	if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
15328 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
15329 			      "CRTC + 1 to be used, doesn't exist\n",
15330 			      crtc->base.base.id, crtc->base.name);
15331 		return -EINVAL;
15332 	}
15333 
15334 	slave = new_crtc_state->bigjoiner_linked_crtc =
15335 		intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
15336 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
15337 	master = crtc;
15338 	if (IS_ERR(slave_crtc_state))
15339 		return PTR_ERR(slave_crtc_state);
15340 
15341 	/* master being enabled, slave was already configured? */
15342 	if (slave_crtc_state->uapi.enable)
15343 		goto claimed;
15344 
15345 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
15346 		      slave->base.base.id, slave->base.name);
15347 
15348 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
15349 
15350 claimed:
15351 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
15352 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
15353 		      slave->base.base.id, slave->base.name,
15354 		      master->base.base.id, master->base.name);
15355 	return -EINVAL;
15356 }
15357 
15358 static int kill_bigjoiner_slave(struct intel_atomic_state *state,
15359 				struct intel_crtc_state *master_crtc_state)
15360 {
15361 	struct intel_crtc_state *slave_crtc_state =
15362 		intel_atomic_get_crtc_state(&state->base,
15363 					    master_crtc_state->bigjoiner_linked_crtc);
15364 
15365 	if (IS_ERR(slave_crtc_state))
15366 		return PTR_ERR(slave_crtc_state);
15367 
15368 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
15369 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
15370 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
15371 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
15372 	return 0;
15373 }
15374 
15375 /**
15376  * DOC: asynchronous flip implementation
15377  *
15378  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
15379  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
15380  * Correspondingly, support is currently added for primary plane only.
15381  *
15382  * Async flip can only change the plane surface address, so anything else
15383  * changing is rejected from the intel_atomic_check_async() function.
15384  * Once this check is cleared, flip done interrupt is enabled using
15385  * the skl_enable_flip_done() function.
15386  *
15387  * As soon as the surface address register is written, flip done interrupt is
15388  * generated and the requested events are sent to the usersapce in the interrupt
15389  * handler itself. The timestamp and sequence sent during the flip done event
15390  * correspond to the last vblank and have no relation to the actual time when
15391  * the flip done event was sent.
15392  */
15393 static int intel_atomic_check_async(struct intel_atomic_state *state)
15394 {
15395 	struct drm_i915_private *i915 = to_i915(state->base.dev);
15396 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15397 	const struct intel_plane_state *new_plane_state, *old_plane_state;
15398 	struct intel_crtc *crtc;
15399 	struct intel_plane *plane;
15400 	int i;
15401 
15402 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15403 					    new_crtc_state, i) {
15404 		if (needs_modeset(new_crtc_state)) {
15405 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
15406 			return -EINVAL;
15407 		}
15408 
15409 		if (!new_crtc_state->hw.active) {
15410 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
15411 			return -EINVAL;
15412 		}
15413 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
15414 			drm_dbg_kms(&i915->drm,
15415 				    "Active planes cannot be changed during async flip\n");
15416 			return -EINVAL;
15417 		}
15418 	}
15419 
15420 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15421 					     new_plane_state, i) {
15422 		/*
15423 		 * TODO: Async flip is only supported through the page flip IOCTL
15424 		 * as of now. So support currently added for primary plane only.
15425 		 * Support for other planes on platforms on which supports
15426 		 * this(vlv/chv and icl+) should be added when async flip is
15427 		 * enabled in the atomic IOCTL path.
15428 		 */
15429 		if (plane->id != PLANE_PRIMARY)
15430 			return -EINVAL;
15431 
15432 		/*
15433 		 * FIXME: This check is kept generic for all platforms.
15434 		 * Need to verify this for all gen9 and gen10 platforms to enable
15435 		 * this selectively if required.
15436 		 */
15437 		switch (new_plane_state->hw.fb->modifier) {
15438 		case I915_FORMAT_MOD_X_TILED:
15439 		case I915_FORMAT_MOD_Y_TILED:
15440 		case I915_FORMAT_MOD_Yf_TILED:
15441 			break;
15442 		default:
15443 			drm_dbg_kms(&i915->drm,
15444 				    "Linear memory/CCS does not support async flips\n");
15445 			return -EINVAL;
15446 		}
15447 
15448 		if (old_plane_state->color_plane[0].stride !=
15449 		    new_plane_state->color_plane[0].stride) {
15450 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
15451 			return -EINVAL;
15452 		}
15453 
15454 		if (old_plane_state->hw.fb->modifier !=
15455 		    new_plane_state->hw.fb->modifier) {
15456 			drm_dbg_kms(&i915->drm,
15457 				    "Framebuffer modifiers cannot be changed in async flip\n");
15458 			return -EINVAL;
15459 		}
15460 
15461 		if (old_plane_state->hw.fb->format !=
15462 		    new_plane_state->hw.fb->format) {
15463 			drm_dbg_kms(&i915->drm,
15464 				    "Framebuffer format cannot be changed in async flip\n");
15465 			return -EINVAL;
15466 		}
15467 
15468 		if (old_plane_state->hw.rotation !=
15469 		    new_plane_state->hw.rotation) {
15470 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
15471 			return -EINVAL;
15472 		}
15473 
15474 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
15475 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
15476 			drm_dbg_kms(&i915->drm,
15477 				    "Plane size/co-ordinates cannot be changed in async flip\n");
15478 			return -EINVAL;
15479 		}
15480 
15481 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
15482 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
15483 			return -EINVAL;
15484 		}
15485 
15486 		if (old_plane_state->hw.pixel_blend_mode !=
15487 		    new_plane_state->hw.pixel_blend_mode) {
15488 			drm_dbg_kms(&i915->drm,
15489 				    "Pixel blend mode cannot be changed in async flip\n");
15490 			return -EINVAL;
15491 		}
15492 
15493 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
15494 			drm_dbg_kms(&i915->drm,
15495 				    "Color encoding cannot be changed in async flip\n");
15496 			return -EINVAL;
15497 		}
15498 
15499 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
15500 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
15501 			return -EINVAL;
15502 		}
15503 	}
15504 
15505 	return 0;
15506 }
15507 
15508 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
15509 {
15510 	const struct intel_crtc_state *crtc_state;
15511 	struct intel_crtc *crtc;
15512 	int i;
15513 
15514 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
15515 		struct intel_crtc_state *linked_crtc_state;
15516 
15517 		if (!crtc_state->bigjoiner)
15518 			continue;
15519 
15520 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base,
15521 								crtc_state->bigjoiner_linked_crtc);
15522 		if (IS_ERR(linked_crtc_state))
15523 			return PTR_ERR(linked_crtc_state);
15524 	}
15525 
15526 	return 0;
15527 }
15528 
15529 /**
15530  * intel_atomic_check - validate state object
15531  * @dev: drm device
15532  * @_state: state to validate
15533  */
15534 static int intel_atomic_check(struct drm_device *dev,
15535 			      struct drm_atomic_state *_state)
15536 {
15537 	struct drm_i915_private *dev_priv = to_i915(dev);
15538 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
15539 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15540 	struct intel_crtc *crtc;
15541 	int ret, i;
15542 	bool any_ms = false;
15543 
15544 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15545 					    new_crtc_state, i) {
15546 		if (new_crtc_state->inherited != old_crtc_state->inherited)
15547 			new_crtc_state->uapi.mode_changed = true;
15548 	}
15549 
15550 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
15551 	if (ret)
15552 		goto fail;
15553 
15554 	ret = intel_bigjoiner_add_affected_crtcs(state);
15555 	if (ret)
15556 		goto fail;
15557 
15558 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15559 					    new_crtc_state, i) {
15560 		if (!needs_modeset(new_crtc_state)) {
15561 			/* Light copy */
15562 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
15563 
15564 			continue;
15565 		}
15566 
15567 		/* Kill old bigjoiner link, we may re-establish afterwards */
15568 		if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) {
15569 			ret = kill_bigjoiner_slave(state, new_crtc_state);
15570 			if (ret)
15571 				goto fail;
15572 		}
15573 
15574 		if (!new_crtc_state->uapi.enable) {
15575 			if (!new_crtc_state->bigjoiner_slave) {
15576 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
15577 				any_ms = true;
15578 			}
15579 			continue;
15580 		}
15581 
15582 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
15583 		if (ret)
15584 			goto fail;
15585 
15586 		ret = intel_modeset_pipe_config(state, new_crtc_state);
15587 		if (ret)
15588 			goto fail;
15589 
15590 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
15591 						   new_crtc_state);
15592 		if (ret)
15593 			goto fail;
15594 	}
15595 
15596 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15597 					    new_crtc_state, i) {
15598 		if (!needs_modeset(new_crtc_state))
15599 			continue;
15600 
15601 		ret = intel_modeset_pipe_config_late(new_crtc_state);
15602 		if (ret)
15603 			goto fail;
15604 
15605 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
15606 	}
15607 
15608 	/**
15609 	 * Check if fastset is allowed by external dependencies like other
15610 	 * pipes and transcoders.
15611 	 *
15612 	 * Right now it only forces a fullmodeset when the MST master
15613 	 * transcoder did not changed but the pipe of the master transcoder
15614 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
15615 	 * in case of port synced crtcs, if one of the synced crtcs
15616 	 * needs a full modeset, all other synced crtcs should be
15617 	 * forced a full modeset.
15618 	 */
15619 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15620 		if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
15621 			continue;
15622 
15623 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
15624 			enum transcoder master = new_crtc_state->mst_master_transcoder;
15625 
15626 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
15627 				new_crtc_state->uapi.mode_changed = true;
15628 				new_crtc_state->update_pipe = false;
15629 			}
15630 		}
15631 
15632 		if (is_trans_port_sync_mode(new_crtc_state)) {
15633 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
15634 
15635 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
15636 				trans |= BIT(new_crtc_state->master_transcoder);
15637 
15638 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
15639 				new_crtc_state->uapi.mode_changed = true;
15640 				new_crtc_state->update_pipe = false;
15641 			}
15642 		}
15643 	}
15644 
15645 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15646 					    new_crtc_state, i) {
15647 		if (needs_modeset(new_crtc_state)) {
15648 			any_ms = true;
15649 			continue;
15650 		}
15651 
15652 		if (!new_crtc_state->update_pipe)
15653 			continue;
15654 
15655 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
15656 	}
15657 
15658 	if (any_ms && !check_digital_port_conflicts(state)) {
15659 		drm_dbg_kms(&dev_priv->drm,
15660 			    "rejecting conflicting digital port configuration\n");
15661 		ret = -EINVAL;
15662 		goto fail;
15663 	}
15664 
15665 	ret = drm_dp_mst_atomic_check(&state->base);
15666 	if (ret)
15667 		goto fail;
15668 
15669 	ret = intel_atomic_check_planes(state);
15670 	if (ret)
15671 		goto fail;
15672 
15673 	/*
15674 	 * distrust_bios_wm will force a full dbuf recomputation
15675 	 * but the hardware state will only get updated accordingly
15676 	 * if state->modeset==true. Hence distrust_bios_wm==true &&
15677 	 * state->modeset==false is an invalid combination which
15678 	 * would cause the hardware and software dbuf state to get
15679 	 * out of sync. We must prevent that.
15680 	 *
15681 	 * FIXME clean up this mess and introduce better
15682 	 * state tracking for dbuf.
15683 	 */
15684 	if (dev_priv->wm.distrust_bios_wm)
15685 		any_ms = true;
15686 
15687 	intel_fbc_choose_crtc(dev_priv, state);
15688 	ret = calc_watermark_data(state);
15689 	if (ret)
15690 		goto fail;
15691 
15692 	ret = intel_bw_atomic_check(state);
15693 	if (ret)
15694 		goto fail;
15695 
15696 	ret = intel_atomic_check_cdclk(state, &any_ms);
15697 	if (ret)
15698 		goto fail;
15699 
15700 	if (any_ms) {
15701 		ret = intel_modeset_checks(state);
15702 		if (ret)
15703 			goto fail;
15704 
15705 		ret = intel_modeset_calc_cdclk(state);
15706 		if (ret)
15707 			return ret;
15708 
15709 		intel_modeset_clear_plls(state);
15710 	}
15711 
15712 	ret = intel_atomic_check_crtcs(state);
15713 	if (ret)
15714 		goto fail;
15715 
15716 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15717 					    new_crtc_state, i) {
15718 		if (new_crtc_state->uapi.async_flip) {
15719 			ret = intel_atomic_check_async(state);
15720 			if (ret)
15721 				goto fail;
15722 		}
15723 
15724 		if (!needs_modeset(new_crtc_state) &&
15725 		    !new_crtc_state->update_pipe)
15726 			continue;
15727 
15728 		intel_dump_pipe_config(new_crtc_state, state,
15729 				       needs_modeset(new_crtc_state) ?
15730 				       "[modeset]" : "[fastset]");
15731 	}
15732 
15733 	return 0;
15734 
15735  fail:
15736 	if (ret == -EDEADLK)
15737 		return ret;
15738 
15739 	/*
15740 	 * FIXME would probably be nice to know which crtc specifically
15741 	 * caused the failure, in cases where we can pinpoint it.
15742 	 */
15743 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15744 					    new_crtc_state, i)
15745 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
15746 
15747 	return ret;
15748 }
15749 
15750 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
15751 {
15752 	struct intel_crtc_state *crtc_state;
15753 	struct intel_crtc *crtc;
15754 	int i, ret;
15755 
15756 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
15757 	if (ret < 0)
15758 		return ret;
15759 
15760 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
15761 		bool mode_changed = needs_modeset(crtc_state);
15762 
15763 		if (mode_changed || crtc_state->update_pipe ||
15764 		    crtc_state->uapi.color_mgmt_changed) {
15765 			intel_dsb_prepare(crtc_state);
15766 		}
15767 	}
15768 
15769 	return 0;
15770 }
15771 
15772 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
15773 {
15774 	struct drm_device *dev = crtc->base.dev;
15775 	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
15776 
15777 	if (!vblank->max_vblank_count)
15778 		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
15779 
15780 	return crtc->base.funcs->get_vblank_counter(&crtc->base);
15781 }
15782 
15783 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
15784 				  struct intel_crtc_state *crtc_state)
15785 {
15786 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15787 
15788 	if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
15789 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15790 
15791 	if (crtc_state->has_pch_encoder) {
15792 		enum pipe pch_transcoder =
15793 			intel_crtc_pch_transcoder(crtc);
15794 
15795 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
15796 	}
15797 }
15798 
15799 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
15800 			       const struct intel_crtc_state *new_crtc_state)
15801 {
15802 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
15803 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15804 
15805 	/*
15806 	 * Update pipe size and adjust fitter if needed: the reason for this is
15807 	 * that in compute_mode_changes we check the native mode (not the pfit
15808 	 * mode) to see if we can flip rather than do a full mode set. In the
15809 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
15810 	 * pfit state, we'll end up with a big fb scanned out into the wrong
15811 	 * sized surface.
15812 	 */
15813 	intel_set_pipe_src_size(new_crtc_state);
15814 
15815 	/* on skylake this is done by detaching scalers */
15816 	if (INTEL_GEN(dev_priv) >= 9) {
15817 		skl_detach_scalers(new_crtc_state);
15818 
15819 		if (new_crtc_state->pch_pfit.enabled)
15820 			skl_pfit_enable(new_crtc_state);
15821 	} else if (HAS_PCH_SPLIT(dev_priv)) {
15822 		if (new_crtc_state->pch_pfit.enabled)
15823 			ilk_pfit_enable(new_crtc_state);
15824 		else if (old_crtc_state->pch_pfit.enabled)
15825 			ilk_pfit_disable(old_crtc_state);
15826 	}
15827 
15828 	/*
15829 	 * The register is supposedly single buffered so perhaps
15830 	 * not 100% correct to do this here. But SKL+ calculate
15831 	 * this based on the adjust pixel rate so pfit changes do
15832 	 * affect it and so it must be updated for fastsets.
15833 	 * HSW/BDW only really need this here for fastboot, after
15834 	 * that the value should not change without a full modeset.
15835 	 */
15836 	if (INTEL_GEN(dev_priv) >= 9 ||
15837 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15838 		hsw_set_linetime_wm(new_crtc_state);
15839 
15840 	if (INTEL_GEN(dev_priv) >= 11)
15841 		icl_set_pipe_chicken(crtc);
15842 }
15843 
15844 static void commit_pipe_config(struct intel_atomic_state *state,
15845 			       struct intel_crtc *crtc)
15846 {
15847 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15848 	const struct intel_crtc_state *old_crtc_state =
15849 		intel_atomic_get_old_crtc_state(state, crtc);
15850 	const struct intel_crtc_state *new_crtc_state =
15851 		intel_atomic_get_new_crtc_state(state, crtc);
15852 	bool modeset = needs_modeset(new_crtc_state);
15853 
15854 	/*
15855 	 * During modesets pipe configuration was programmed as the
15856 	 * CRTC was enabled.
15857 	 */
15858 	if (!modeset) {
15859 		if (new_crtc_state->uapi.color_mgmt_changed ||
15860 		    new_crtc_state->update_pipe)
15861 			intel_color_commit(new_crtc_state);
15862 
15863 		if (INTEL_GEN(dev_priv) >= 9)
15864 			skl_detach_scalers(new_crtc_state);
15865 
15866 		if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15867 			bdw_set_pipemisc(new_crtc_state);
15868 
15869 		if (new_crtc_state->update_pipe)
15870 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
15871 
15872 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
15873 	}
15874 
15875 	if (dev_priv->display.atomic_update_watermarks)
15876 		dev_priv->display.atomic_update_watermarks(state, crtc);
15877 }
15878 
15879 static void intel_enable_crtc(struct intel_atomic_state *state,
15880 			      struct intel_crtc *crtc)
15881 {
15882 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15883 	const struct intel_crtc_state *new_crtc_state =
15884 		intel_atomic_get_new_crtc_state(state, crtc);
15885 
15886 	if (!needs_modeset(new_crtc_state))
15887 		return;
15888 
15889 	intel_crtc_update_active_timings(new_crtc_state);
15890 
15891 	dev_priv->display.crtc_enable(state, crtc);
15892 
15893 	if (new_crtc_state->bigjoiner_slave)
15894 		return;
15895 
15896 	/* vblanks work again, re-enable pipe CRC. */
15897 	intel_crtc_enable_pipe_crc(crtc);
15898 }
15899 
15900 static void intel_update_crtc(struct intel_atomic_state *state,
15901 			      struct intel_crtc *crtc)
15902 {
15903 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15904 	const struct intel_crtc_state *old_crtc_state =
15905 		intel_atomic_get_old_crtc_state(state, crtc);
15906 	struct intel_crtc_state *new_crtc_state =
15907 		intel_atomic_get_new_crtc_state(state, crtc);
15908 	bool modeset = needs_modeset(new_crtc_state);
15909 
15910 	if (!modeset) {
15911 		if (new_crtc_state->preload_luts &&
15912 		    (new_crtc_state->uapi.color_mgmt_changed ||
15913 		     new_crtc_state->update_pipe))
15914 			intel_color_load_luts(new_crtc_state);
15915 
15916 		intel_pre_plane_update(state, crtc);
15917 
15918 		if (new_crtc_state->update_pipe)
15919 			intel_encoders_update_pipe(state, crtc);
15920 	}
15921 
15922 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
15923 		intel_fbc_disable(crtc);
15924 	else
15925 		intel_fbc_enable(state, crtc);
15926 
15927 	/* Perform vblank evasion around commit operation */
15928 	intel_pipe_update_start(new_crtc_state);
15929 
15930 	commit_pipe_config(state, crtc);
15931 
15932 	if (INTEL_GEN(dev_priv) >= 9)
15933 		skl_update_planes_on_crtc(state, crtc);
15934 	else
15935 		i9xx_update_planes_on_crtc(state, crtc);
15936 
15937 	intel_pipe_update_end(new_crtc_state);
15938 
15939 	/*
15940 	 * We usually enable FIFO underrun interrupts as part of the
15941 	 * CRTC enable sequence during modesets.  But when we inherit a
15942 	 * valid pipe configuration from the BIOS we need to take care
15943 	 * of enabling them on the CRTC's first fastset.
15944 	 */
15945 	if (new_crtc_state->update_pipe && !modeset &&
15946 	    old_crtc_state->inherited)
15947 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15948 }
15949 
15950 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
15951 					  struct intel_crtc_state *old_crtc_state,
15952 					  struct intel_crtc_state *new_crtc_state,
15953 					  struct intel_crtc *crtc)
15954 {
15955 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15956 
15957 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
15958 
15959 	intel_crtc_disable_planes(state, crtc);
15960 
15961 	/*
15962 	 * We still need special handling for disabling bigjoiner master
15963 	 * and slaves since for slave we do not have encoder or plls
15964 	 * so we dont need to disable those.
15965 	 */
15966 	if (old_crtc_state->bigjoiner) {
15967 		intel_crtc_disable_planes(state,
15968 					  old_crtc_state->bigjoiner_linked_crtc);
15969 		old_crtc_state->bigjoiner_linked_crtc->active = false;
15970 	}
15971 
15972 	/*
15973 	 * We need to disable pipe CRC before disabling the pipe,
15974 	 * or we race against vblank off.
15975 	 */
15976 	intel_crtc_disable_pipe_crc(crtc);
15977 
15978 	dev_priv->display.crtc_disable(state, crtc);
15979 	crtc->active = false;
15980 	intel_fbc_disable(crtc);
15981 	intel_disable_shared_dpll(old_crtc_state);
15982 
15983 	/* FIXME unify this for all platforms */
15984 	if (!new_crtc_state->hw.active &&
15985 	    !HAS_GMCH(dev_priv) &&
15986 	    dev_priv->display.initial_watermarks)
15987 		dev_priv->display.initial_watermarks(state, crtc);
15988 }
15989 
15990 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
15991 {
15992 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15993 	struct intel_crtc *crtc;
15994 	u32 handled = 0;
15995 	int i;
15996 
15997 	/* Only disable port sync and MST slaves */
15998 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15999 					    new_crtc_state, i) {
16000 		if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
16001 			continue;
16002 
16003 		if (!old_crtc_state->hw.active)
16004 			continue;
16005 
16006 		/* In case of Transcoder port Sync master slave CRTCs can be
16007 		 * assigned in any order and we need to make sure that
16008 		 * slave CRTCs are disabled first and then master CRTC since
16009 		 * Slave vblanks are masked till Master Vblanks.
16010 		 */
16011 		if (!is_trans_port_sync_slave(old_crtc_state) &&
16012 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
16013 			continue;
16014 
16015 		intel_pre_plane_update(state, crtc);
16016 		intel_old_crtc_state_disables(state, old_crtc_state,
16017 					      new_crtc_state, crtc);
16018 		handled |= BIT(crtc->pipe);
16019 	}
16020 
16021 	/* Disable everything else left on */
16022 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
16023 					    new_crtc_state, i) {
16024 		if (!needs_modeset(new_crtc_state) ||
16025 		    (handled & BIT(crtc->pipe)) ||
16026 		    old_crtc_state->bigjoiner_slave)
16027 			continue;
16028 
16029 		intel_pre_plane_update(state, crtc);
16030 		if (old_crtc_state->bigjoiner) {
16031 			struct intel_crtc *slave =
16032 				old_crtc_state->bigjoiner_linked_crtc;
16033 
16034 			intel_pre_plane_update(state, slave);
16035 		}
16036 
16037 		if (old_crtc_state->hw.active)
16038 			intel_old_crtc_state_disables(state, old_crtc_state,
16039 						      new_crtc_state, crtc);
16040 	}
16041 }
16042 
16043 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
16044 {
16045 	struct intel_crtc_state *new_crtc_state;
16046 	struct intel_crtc *crtc;
16047 	int i;
16048 
16049 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
16050 		if (!new_crtc_state->hw.active)
16051 			continue;
16052 
16053 		intel_enable_crtc(state, crtc);
16054 		intel_update_crtc(state, crtc);
16055 	}
16056 }
16057 
16058 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
16059 {
16060 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
16061 	struct intel_crtc *crtc;
16062 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
16063 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
16064 	u8 update_pipes = 0, modeset_pipes = 0;
16065 	int i;
16066 
16067 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
16068 		enum pipe pipe = crtc->pipe;
16069 
16070 		if (!new_crtc_state->hw.active)
16071 			continue;
16072 
16073 		/* ignore allocations for crtc's that have been turned off. */
16074 		if (!needs_modeset(new_crtc_state)) {
16075 			entries[pipe] = old_crtc_state->wm.skl.ddb;
16076 			update_pipes |= BIT(pipe);
16077 		} else {
16078 			modeset_pipes |= BIT(pipe);
16079 		}
16080 	}
16081 
16082 	/*
16083 	 * Whenever the number of active pipes changes, we need to make sure we
16084 	 * update the pipes in the right order so that their ddb allocations
16085 	 * never overlap with each other between CRTC updates. Otherwise we'll
16086 	 * cause pipe underruns and other bad stuff.
16087 	 *
16088 	 * So first lets enable all pipes that do not need a fullmodeset as
16089 	 * those don't have any external dependency.
16090 	 */
16091 	while (update_pipes) {
16092 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
16093 						    new_crtc_state, i) {
16094 			enum pipe pipe = crtc->pipe;
16095 
16096 			if ((update_pipes & BIT(pipe)) == 0)
16097 				continue;
16098 
16099 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
16100 							entries, I915_MAX_PIPES, pipe))
16101 				continue;
16102 
16103 			entries[pipe] = new_crtc_state->wm.skl.ddb;
16104 			update_pipes &= ~BIT(pipe);
16105 
16106 			intel_update_crtc(state, crtc);
16107 
16108 			/*
16109 			 * If this is an already active pipe, it's DDB changed,
16110 			 * and this isn't the last pipe that needs updating
16111 			 * then we need to wait for a vblank to pass for the
16112 			 * new ddb allocation to take effect.
16113 			 */
16114 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
16115 						 &old_crtc_state->wm.skl.ddb) &&
16116 			    (update_pipes | modeset_pipes))
16117 				intel_wait_for_vblank(dev_priv, pipe);
16118 		}
16119 	}
16120 
16121 	update_pipes = modeset_pipes;
16122 
16123 	/*
16124 	 * Enable all pipes that needs a modeset and do not depends on other
16125 	 * pipes
16126 	 */
16127 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
16128 		enum pipe pipe = crtc->pipe;
16129 
16130 		if ((modeset_pipes & BIT(pipe)) == 0)
16131 			continue;
16132 
16133 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
16134 		    is_trans_port_sync_master(new_crtc_state) ||
16135 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
16136 			continue;
16137 
16138 		modeset_pipes &= ~BIT(pipe);
16139 
16140 		intel_enable_crtc(state, crtc);
16141 	}
16142 
16143 	/*
16144 	 * Then we enable all remaining pipes that depend on other
16145 	 * pipes: MST slaves and port sync masters, big joiner master
16146 	 */
16147 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
16148 		enum pipe pipe = crtc->pipe;
16149 
16150 		if ((modeset_pipes & BIT(pipe)) == 0)
16151 			continue;
16152 
16153 		modeset_pipes &= ~BIT(pipe);
16154 
16155 		intel_enable_crtc(state, crtc);
16156 	}
16157 
16158 	/*
16159 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
16160 	 */
16161 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
16162 		enum pipe pipe = crtc->pipe;
16163 
16164 		if ((update_pipes & BIT(pipe)) == 0)
16165 			continue;
16166 
16167 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
16168 									entries, I915_MAX_PIPES, pipe));
16169 
16170 		entries[pipe] = new_crtc_state->wm.skl.ddb;
16171 		update_pipes &= ~BIT(pipe);
16172 
16173 		intel_update_crtc(state, crtc);
16174 	}
16175 
16176 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
16177 	drm_WARN_ON(&dev_priv->drm, update_pipes);
16178 }
16179 
16180 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
16181 {
16182 	struct intel_atomic_state *state, *next;
16183 	struct llist_node *freed;
16184 
16185 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
16186 	llist_for_each_entry_safe(state, next, freed, freed)
16187 		drm_atomic_state_put(&state->base);
16188 }
16189 
16190 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
16191 {
16192 	struct drm_i915_private *dev_priv =
16193 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
16194 
16195 	intel_atomic_helper_free_state(dev_priv);
16196 }
16197 
16198 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
16199 {
16200 	struct wait_queue_entry wait_fence, wait_reset;
16201 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
16202 
16203 	init_wait_entry(&wait_fence, 0);
16204 	init_wait_entry(&wait_reset, 0);
16205 	for (;;) {
16206 		prepare_to_wait(&intel_state->commit_ready.wait,
16207 				&wait_fence, TASK_UNINTERRUPTIBLE);
16208 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
16209 					      I915_RESET_MODESET),
16210 				&wait_reset, TASK_UNINTERRUPTIBLE);
16211 
16212 
16213 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
16214 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
16215 			break;
16216 
16217 		schedule();
16218 	}
16219 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
16220 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
16221 				  I915_RESET_MODESET),
16222 		    &wait_reset);
16223 }
16224 
16225 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
16226 {
16227 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
16228 	struct intel_crtc *crtc;
16229 	int i;
16230 
16231 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
16232 					    new_crtc_state, i)
16233 		intel_dsb_cleanup(old_crtc_state);
16234 }
16235 
16236 static void intel_atomic_cleanup_work(struct work_struct *work)
16237 {
16238 	struct intel_atomic_state *state =
16239 		container_of(work, struct intel_atomic_state, base.commit_work);
16240 	struct drm_i915_private *i915 = to_i915(state->base.dev);
16241 
16242 	intel_cleanup_dsbs(state);
16243 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
16244 	drm_atomic_helper_commit_cleanup_done(&state->base);
16245 	drm_atomic_state_put(&state->base);
16246 
16247 	intel_atomic_helper_free_state(i915);
16248 }
16249 
16250 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
16251 {
16252 	struct drm_device *dev = state->base.dev;
16253 	struct drm_i915_private *dev_priv = to_i915(dev);
16254 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
16255 	struct intel_crtc *crtc;
16256 	u64 put_domains[I915_MAX_PIPES] = {};
16257 	intel_wakeref_t wakeref = 0;
16258 	int i;
16259 
16260 	intel_atomic_commit_fence_wait(state);
16261 
16262 	drm_atomic_helper_wait_for_dependencies(&state->base);
16263 
16264 	if (state->modeset)
16265 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
16266 
16267 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
16268 					    new_crtc_state, i) {
16269 		if (needs_modeset(new_crtc_state) ||
16270 		    new_crtc_state->update_pipe) {
16271 
16272 			put_domains[crtc->pipe] =
16273 				modeset_get_crtc_power_domains(new_crtc_state);
16274 		}
16275 	}
16276 
16277 	intel_commit_modeset_disables(state);
16278 
16279 	/* FIXME: Eventually get rid of our crtc->config pointer */
16280 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
16281 		crtc->config = new_crtc_state;
16282 
16283 	if (state->modeset) {
16284 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
16285 
16286 		intel_set_cdclk_pre_plane_update(state);
16287 
16288 		intel_modeset_verify_disabled(dev_priv, state);
16289 	}
16290 
16291 	intel_sagv_pre_plane_update(state);
16292 
16293 	/* Complete the events for pipes that have now been disabled */
16294 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
16295 		bool modeset = needs_modeset(new_crtc_state);
16296 
16297 		/* Complete events for now disable pipes here. */
16298 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
16299 			spin_lock_irq(&dev->event_lock);
16300 			drm_crtc_send_vblank_event(&crtc->base,
16301 						   new_crtc_state->uapi.event);
16302 			spin_unlock_irq(&dev->event_lock);
16303 
16304 			new_crtc_state->uapi.event = NULL;
16305 		}
16306 	}
16307 
16308 	if (state->modeset)
16309 		intel_encoders_update_prepare(state);
16310 
16311 	intel_dbuf_pre_plane_update(state);
16312 
16313 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
16314 		if (new_crtc_state->uapi.async_flip)
16315 			skl_enable_flip_done(crtc);
16316 	}
16317 
16318 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
16319 	dev_priv->display.commit_modeset_enables(state);
16320 
16321 	if (state->modeset) {
16322 		intel_encoders_update_complete(state);
16323 
16324 		intel_set_cdclk_post_plane_update(state);
16325 	}
16326 
16327 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
16328 	 * already, but still need the state for the delayed optimization. To
16329 	 * fix this:
16330 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
16331 	 * - schedule that vblank worker _before_ calling hw_done
16332 	 * - at the start of commit_tail, cancel it _synchrously
16333 	 * - switch over to the vblank wait helper in the core after that since
16334 	 *   we don't need out special handling any more.
16335 	 */
16336 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
16337 
16338 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
16339 		if (new_crtc_state->uapi.async_flip)
16340 			skl_disable_flip_done(crtc);
16341 
16342 		if (new_crtc_state->hw.active &&
16343 		    !needs_modeset(new_crtc_state) &&
16344 		    !new_crtc_state->preload_luts &&
16345 		    (new_crtc_state->uapi.color_mgmt_changed ||
16346 		     new_crtc_state->update_pipe))
16347 			intel_color_load_luts(new_crtc_state);
16348 	}
16349 
16350 	/*
16351 	 * Now that the vblank has passed, we can go ahead and program the
16352 	 * optimal watermarks on platforms that need two-step watermark
16353 	 * programming.
16354 	 *
16355 	 * TODO: Move this (and other cleanup) to an async worker eventually.
16356 	 */
16357 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
16358 					    new_crtc_state, i) {
16359 		/*
16360 		 * Gen2 reports pipe underruns whenever all planes are disabled.
16361 		 * So re-enable underrun reporting after some planes get enabled.
16362 		 *
16363 		 * We do this before .optimize_watermarks() so that we have a
16364 		 * chance of catching underruns with the intermediate watermarks
16365 		 * vs. the new plane configuration.
16366 		 */
16367 		if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
16368 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
16369 
16370 		if (dev_priv->display.optimize_watermarks)
16371 			dev_priv->display.optimize_watermarks(state, crtc);
16372 	}
16373 
16374 	intel_dbuf_post_plane_update(state);
16375 
16376 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
16377 		intel_post_plane_update(state, crtc);
16378 
16379 		if (put_domains[i])
16380 			modeset_put_power_domains(dev_priv, put_domains[i]);
16381 
16382 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
16383 
16384 		/*
16385 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
16386 		 * cleanup. So copy and reset the dsb structure to sync with
16387 		 * commit_done and later do dsb cleanup in cleanup_work.
16388 		 */
16389 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
16390 	}
16391 
16392 	/* Underruns don't always raise interrupts, so check manually */
16393 	intel_check_cpu_fifo_underruns(dev_priv);
16394 	intel_check_pch_fifo_underruns(dev_priv);
16395 
16396 	if (state->modeset)
16397 		intel_verify_planes(state);
16398 
16399 	intel_sagv_post_plane_update(state);
16400 
16401 	drm_atomic_helper_commit_hw_done(&state->base);
16402 
16403 	if (state->modeset) {
16404 		/* As one of the primary mmio accessors, KMS has a high
16405 		 * likelihood of triggering bugs in unclaimed access. After we
16406 		 * finish modesetting, see if an error has been flagged, and if
16407 		 * so enable debugging for the next modeset - and hope we catch
16408 		 * the culprit.
16409 		 */
16410 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
16411 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
16412 	}
16413 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
16414 
16415 	/*
16416 	 * Defer the cleanup of the old state to a separate worker to not
16417 	 * impede the current task (userspace for blocking modesets) that
16418 	 * are executed inline. For out-of-line asynchronous modesets/flips,
16419 	 * deferring to a new worker seems overkill, but we would place a
16420 	 * schedule point (cond_resched()) here anyway to keep latencies
16421 	 * down.
16422 	 */
16423 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
16424 	queue_work(system_highpri_wq, &state->base.commit_work);
16425 }
16426 
16427 static void intel_atomic_commit_work(struct work_struct *work)
16428 {
16429 	struct intel_atomic_state *state =
16430 		container_of(work, struct intel_atomic_state, base.commit_work);
16431 
16432 	intel_atomic_commit_tail(state);
16433 }
16434 
16435 static int __i915_sw_fence_call
16436 intel_atomic_commit_ready(struct i915_sw_fence *fence,
16437 			  enum i915_sw_fence_notify notify)
16438 {
16439 	struct intel_atomic_state *state =
16440 		container_of(fence, struct intel_atomic_state, commit_ready);
16441 
16442 	switch (notify) {
16443 	case FENCE_COMPLETE:
16444 		/* we do blocking waits in the worker, nothing to do here */
16445 		break;
16446 	case FENCE_FREE:
16447 		{
16448 			struct intel_atomic_helper *helper =
16449 				&to_i915(state->base.dev)->atomic_helper;
16450 
16451 			if (llist_add(&state->freed, &helper->free_list))
16452 				schedule_work(&helper->free_work);
16453 			break;
16454 		}
16455 	}
16456 
16457 	return NOTIFY_DONE;
16458 }
16459 
16460 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
16461 {
16462 	struct intel_plane_state *old_plane_state, *new_plane_state;
16463 	struct intel_plane *plane;
16464 	int i;
16465 
16466 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
16467 					     new_plane_state, i)
16468 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
16469 					to_intel_frontbuffer(new_plane_state->hw.fb),
16470 					plane->frontbuffer_bit);
16471 }
16472 
16473 static int intel_atomic_commit(struct drm_device *dev,
16474 			       struct drm_atomic_state *_state,
16475 			       bool nonblock)
16476 {
16477 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
16478 	struct drm_i915_private *dev_priv = to_i915(dev);
16479 	int ret = 0;
16480 
16481 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
16482 
16483 	drm_atomic_state_get(&state->base);
16484 	i915_sw_fence_init(&state->commit_ready,
16485 			   intel_atomic_commit_ready);
16486 
16487 	/*
16488 	 * The intel_legacy_cursor_update() fast path takes care
16489 	 * of avoiding the vblank waits for simple cursor
16490 	 * movement and flips. For cursor on/off and size changes,
16491 	 * we want to perform the vblank waits so that watermark
16492 	 * updates happen during the correct frames. Gen9+ have
16493 	 * double buffered watermarks and so shouldn't need this.
16494 	 *
16495 	 * Unset state->legacy_cursor_update before the call to
16496 	 * drm_atomic_helper_setup_commit() because otherwise
16497 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
16498 	 * we get FIFO underruns because we didn't wait
16499 	 * for vblank.
16500 	 *
16501 	 * FIXME doing watermarks and fb cleanup from a vblank worker
16502 	 * (assuming we had any) would solve these problems.
16503 	 */
16504 	if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
16505 		struct intel_crtc_state *new_crtc_state;
16506 		struct intel_crtc *crtc;
16507 		int i;
16508 
16509 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
16510 			if (new_crtc_state->wm.need_postvbl_update ||
16511 			    new_crtc_state->update_wm_post)
16512 				state->base.legacy_cursor_update = false;
16513 	}
16514 
16515 	ret = intel_atomic_prepare_commit(state);
16516 	if (ret) {
16517 		drm_dbg_atomic(&dev_priv->drm,
16518 			       "Preparing state failed with %i\n", ret);
16519 		i915_sw_fence_commit(&state->commit_ready);
16520 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
16521 		return ret;
16522 	}
16523 
16524 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
16525 	if (!ret)
16526 		ret = drm_atomic_helper_swap_state(&state->base, true);
16527 	if (!ret)
16528 		intel_atomic_swap_global_state(state);
16529 
16530 	if (ret) {
16531 		struct intel_crtc_state *new_crtc_state;
16532 		struct intel_crtc *crtc;
16533 		int i;
16534 
16535 		i915_sw_fence_commit(&state->commit_ready);
16536 
16537 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
16538 			intel_dsb_cleanup(new_crtc_state);
16539 
16540 		drm_atomic_helper_cleanup_planes(dev, &state->base);
16541 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
16542 		return ret;
16543 	}
16544 	dev_priv->wm.distrust_bios_wm = false;
16545 	intel_shared_dpll_swap_state(state);
16546 	intel_atomic_track_fbs(state);
16547 
16548 	drm_atomic_state_get(&state->base);
16549 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
16550 
16551 	i915_sw_fence_commit(&state->commit_ready);
16552 	if (nonblock && state->modeset) {
16553 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
16554 	} else if (nonblock) {
16555 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
16556 	} else {
16557 		if (state->modeset)
16558 			flush_workqueue(dev_priv->modeset_wq);
16559 		intel_atomic_commit_tail(state);
16560 	}
16561 
16562 	return 0;
16563 }
16564 
16565 struct wait_rps_boost {
16566 	struct wait_queue_entry wait;
16567 
16568 	struct drm_crtc *crtc;
16569 	struct i915_request *request;
16570 };
16571 
16572 static int do_rps_boost(struct wait_queue_entry *_wait,
16573 			unsigned mode, int sync, void *key)
16574 {
16575 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
16576 	struct i915_request *rq = wait->request;
16577 
16578 	/*
16579 	 * If we missed the vblank, but the request is already running it
16580 	 * is reasonable to assume that it will complete before the next
16581 	 * vblank without our intervention, so leave RPS alone.
16582 	 */
16583 	if (!i915_request_started(rq))
16584 		intel_rps_boost(rq);
16585 	i915_request_put(rq);
16586 
16587 	drm_crtc_vblank_put(wait->crtc);
16588 
16589 	list_del(&wait->wait.entry);
16590 	kfree(wait);
16591 	return 1;
16592 }
16593 
16594 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
16595 				       struct dma_fence *fence)
16596 {
16597 	struct wait_rps_boost *wait;
16598 
16599 	if (!dma_fence_is_i915(fence))
16600 		return;
16601 
16602 	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
16603 		return;
16604 
16605 	if (drm_crtc_vblank_get(crtc))
16606 		return;
16607 
16608 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
16609 	if (!wait) {
16610 		drm_crtc_vblank_put(crtc);
16611 		return;
16612 	}
16613 
16614 	wait->request = to_request(dma_fence_get(fence));
16615 	wait->crtc = crtc;
16616 
16617 	wait->wait.func = do_rps_boost;
16618 	wait->wait.flags = 0;
16619 
16620 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
16621 }
16622 
16623 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
16624 {
16625 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
16626 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
16627 	struct drm_framebuffer *fb = plane_state->hw.fb;
16628 	struct i915_vma *vma;
16629 
16630 	if (plane->id == PLANE_CURSOR &&
16631 	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
16632 		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16633 		const int align = intel_cursor_alignment(dev_priv);
16634 		int err;
16635 
16636 		err = i915_gem_object_attach_phys(obj, align);
16637 		if (err)
16638 			return err;
16639 	}
16640 
16641 	vma = intel_pin_and_fence_fb_obj(fb,
16642 					 &plane_state->view,
16643 					 intel_plane_uses_fence(plane_state),
16644 					 &plane_state->flags);
16645 	if (IS_ERR(vma))
16646 		return PTR_ERR(vma);
16647 
16648 	plane_state->vma = vma;
16649 
16650 	return 0;
16651 }
16652 
16653 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
16654 {
16655 	struct i915_vma *vma;
16656 
16657 	vma = fetch_and_zero(&old_plane_state->vma);
16658 	if (vma)
16659 		intel_unpin_fb_vma(vma, old_plane_state->flags);
16660 }
16661 
16662 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
16663 {
16664 	struct i915_sched_attr attr = {
16665 		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
16666 	};
16667 
16668 	i915_gem_object_wait_priority(obj, 0, &attr);
16669 }
16670 
16671 /**
16672  * intel_prepare_plane_fb - Prepare fb for usage on plane
16673  * @_plane: drm plane to prepare for
16674  * @_new_plane_state: the plane state being prepared
16675  *
16676  * Prepares a framebuffer for usage on a display plane.  Generally this
16677  * involves pinning the underlying object and updating the frontbuffer tracking
16678  * bits.  Some older platforms need special physical address handling for
16679  * cursor planes.
16680  *
16681  * Returns 0 on success, negative error code on failure.
16682  */
16683 int
16684 intel_prepare_plane_fb(struct drm_plane *_plane,
16685 		       struct drm_plane_state *_new_plane_state)
16686 {
16687 	struct intel_plane *plane = to_intel_plane(_plane);
16688 	struct intel_plane_state *new_plane_state =
16689 		to_intel_plane_state(_new_plane_state);
16690 	struct intel_atomic_state *state =
16691 		to_intel_atomic_state(new_plane_state->uapi.state);
16692 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
16693 	const struct intel_plane_state *old_plane_state =
16694 		intel_atomic_get_old_plane_state(state, plane);
16695 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
16696 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
16697 	int ret;
16698 
16699 	if (old_obj) {
16700 		const struct intel_crtc_state *crtc_state =
16701 			intel_atomic_get_new_crtc_state(state,
16702 							to_intel_crtc(old_plane_state->hw.crtc));
16703 
16704 		/* Big Hammer, we also need to ensure that any pending
16705 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
16706 		 * current scanout is retired before unpinning the old
16707 		 * framebuffer. Note that we rely on userspace rendering
16708 		 * into the buffer attached to the pipe they are waiting
16709 		 * on. If not, userspace generates a GPU hang with IPEHR
16710 		 * point to the MI_WAIT_FOR_EVENT.
16711 		 *
16712 		 * This should only fail upon a hung GPU, in which case we
16713 		 * can safely continue.
16714 		 */
16715 		if (needs_modeset(crtc_state)) {
16716 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
16717 							      old_obj->base.resv, NULL,
16718 							      false, 0,
16719 							      GFP_KERNEL);
16720 			if (ret < 0)
16721 				return ret;
16722 		}
16723 	}
16724 
16725 	if (new_plane_state->uapi.fence) { /* explicit fencing */
16726 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
16727 						    new_plane_state->uapi.fence,
16728 						    i915_fence_timeout(dev_priv),
16729 						    GFP_KERNEL);
16730 		if (ret < 0)
16731 			return ret;
16732 	}
16733 
16734 	if (!obj)
16735 		return 0;
16736 
16737 	ret = i915_gem_object_pin_pages(obj);
16738 	if (ret)
16739 		return ret;
16740 
16741 	ret = intel_plane_pin_fb(new_plane_state);
16742 
16743 	i915_gem_object_unpin_pages(obj);
16744 	if (ret)
16745 		return ret;
16746 
16747 	fb_obj_bump_render_priority(obj);
16748 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
16749 
16750 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
16751 		struct dma_fence *fence;
16752 
16753 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
16754 						      obj->base.resv, NULL,
16755 						      false,
16756 						      i915_fence_timeout(dev_priv),
16757 						      GFP_KERNEL);
16758 		if (ret < 0)
16759 			goto unpin_fb;
16760 
16761 		fence = dma_resv_get_excl_rcu(obj->base.resv);
16762 		if (fence) {
16763 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
16764 						   fence);
16765 			dma_fence_put(fence);
16766 		}
16767 	} else {
16768 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
16769 					   new_plane_state->uapi.fence);
16770 	}
16771 
16772 	/*
16773 	 * We declare pageflips to be interactive and so merit a small bias
16774 	 * towards upclocking to deliver the frame on time. By only changing
16775 	 * the RPS thresholds to sample more regularly and aim for higher
16776 	 * clocks we can hopefully deliver low power workloads (like kodi)
16777 	 * that are not quite steady state without resorting to forcing
16778 	 * maximum clocks following a vblank miss (see do_rps_boost()).
16779 	 */
16780 	if (!state->rps_interactive) {
16781 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
16782 		state->rps_interactive = true;
16783 	}
16784 
16785 	return 0;
16786 
16787 unpin_fb:
16788 	intel_plane_unpin_fb(new_plane_state);
16789 
16790 	return ret;
16791 }
16792 
16793 /**
16794  * intel_cleanup_plane_fb - Cleans up an fb after plane use
16795  * @plane: drm plane to clean up for
16796  * @_old_plane_state: the state from the previous modeset
16797  *
16798  * Cleans up a framebuffer that has just been removed from a plane.
16799  */
16800 void
16801 intel_cleanup_plane_fb(struct drm_plane *plane,
16802 		       struct drm_plane_state *_old_plane_state)
16803 {
16804 	struct intel_plane_state *old_plane_state =
16805 		to_intel_plane_state(_old_plane_state);
16806 	struct intel_atomic_state *state =
16807 		to_intel_atomic_state(old_plane_state->uapi.state);
16808 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
16809 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
16810 
16811 	if (!obj)
16812 		return;
16813 
16814 	if (state->rps_interactive) {
16815 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
16816 		state->rps_interactive = false;
16817 	}
16818 
16819 	/* Should only be called after a successful intel_prepare_plane_fb()! */
16820 	intel_plane_unpin_fb(old_plane_state);
16821 }
16822 
16823 /**
16824  * intel_plane_destroy - destroy a plane
16825  * @plane: plane to destroy
16826  *
16827  * Common destruction function for all types of planes (primary, cursor,
16828  * sprite).
16829  */
16830 void intel_plane_destroy(struct drm_plane *plane)
16831 {
16832 	drm_plane_cleanup(plane);
16833 	kfree(to_intel_plane(plane));
16834 }
16835 
16836 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
16837 					    u32 format, u64 modifier)
16838 {
16839 	switch (modifier) {
16840 	case DRM_FORMAT_MOD_LINEAR:
16841 	case I915_FORMAT_MOD_X_TILED:
16842 		break;
16843 	default:
16844 		return false;
16845 	}
16846 
16847 	switch (format) {
16848 	case DRM_FORMAT_C8:
16849 	case DRM_FORMAT_RGB565:
16850 	case DRM_FORMAT_XRGB1555:
16851 	case DRM_FORMAT_XRGB8888:
16852 		return modifier == DRM_FORMAT_MOD_LINEAR ||
16853 			modifier == I915_FORMAT_MOD_X_TILED;
16854 	default:
16855 		return false;
16856 	}
16857 }
16858 
16859 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
16860 					    u32 format, u64 modifier)
16861 {
16862 	switch (modifier) {
16863 	case DRM_FORMAT_MOD_LINEAR:
16864 	case I915_FORMAT_MOD_X_TILED:
16865 		break;
16866 	default:
16867 		return false;
16868 	}
16869 
16870 	switch (format) {
16871 	case DRM_FORMAT_C8:
16872 	case DRM_FORMAT_RGB565:
16873 	case DRM_FORMAT_XRGB8888:
16874 	case DRM_FORMAT_XBGR8888:
16875 	case DRM_FORMAT_ARGB8888:
16876 	case DRM_FORMAT_ABGR8888:
16877 	case DRM_FORMAT_XRGB2101010:
16878 	case DRM_FORMAT_XBGR2101010:
16879 	case DRM_FORMAT_ARGB2101010:
16880 	case DRM_FORMAT_ABGR2101010:
16881 	case DRM_FORMAT_XBGR16161616F:
16882 		return modifier == DRM_FORMAT_MOD_LINEAR ||
16883 			modifier == I915_FORMAT_MOD_X_TILED;
16884 	default:
16885 		return false;
16886 	}
16887 }
16888 
16889 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
16890 					      u32 format, u64 modifier)
16891 {
16892 	return modifier == DRM_FORMAT_MOD_LINEAR &&
16893 		format == DRM_FORMAT_ARGB8888;
16894 }
16895 
16896 static const struct drm_plane_funcs i965_plane_funcs = {
16897 	.update_plane = drm_atomic_helper_update_plane,
16898 	.disable_plane = drm_atomic_helper_disable_plane,
16899 	.destroy = intel_plane_destroy,
16900 	.atomic_duplicate_state = intel_plane_duplicate_state,
16901 	.atomic_destroy_state = intel_plane_destroy_state,
16902 	.format_mod_supported = i965_plane_format_mod_supported,
16903 };
16904 
16905 static const struct drm_plane_funcs i8xx_plane_funcs = {
16906 	.update_plane = drm_atomic_helper_update_plane,
16907 	.disable_plane = drm_atomic_helper_disable_plane,
16908 	.destroy = intel_plane_destroy,
16909 	.atomic_duplicate_state = intel_plane_duplicate_state,
16910 	.atomic_destroy_state = intel_plane_destroy_state,
16911 	.format_mod_supported = i8xx_plane_format_mod_supported,
16912 };
16913 
16914 static int
16915 intel_legacy_cursor_update(struct drm_plane *_plane,
16916 			   struct drm_crtc *_crtc,
16917 			   struct drm_framebuffer *fb,
16918 			   int crtc_x, int crtc_y,
16919 			   unsigned int crtc_w, unsigned int crtc_h,
16920 			   u32 src_x, u32 src_y,
16921 			   u32 src_w, u32 src_h,
16922 			   struct drm_modeset_acquire_ctx *ctx)
16923 {
16924 	struct intel_plane *plane = to_intel_plane(_plane);
16925 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
16926 	struct intel_plane_state *old_plane_state =
16927 		to_intel_plane_state(plane->base.state);
16928 	struct intel_plane_state *new_plane_state;
16929 	struct intel_crtc_state *crtc_state =
16930 		to_intel_crtc_state(crtc->base.state);
16931 	struct intel_crtc_state *new_crtc_state;
16932 	int ret;
16933 
16934 	/*
16935 	 * When crtc is inactive or there is a modeset pending,
16936 	 * wait for it to complete in the slowpath
16937 	 *
16938 	 * FIXME bigjoiner fastpath would be good
16939 	 */
16940 	if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
16941 	    crtc_state->update_pipe || crtc_state->bigjoiner)
16942 		goto slow;
16943 
16944 	/*
16945 	 * Don't do an async update if there is an outstanding commit modifying
16946 	 * the plane.  This prevents our async update's changes from getting
16947 	 * overridden by a previous synchronous update's state.
16948 	 */
16949 	if (old_plane_state->uapi.commit &&
16950 	    !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
16951 		goto slow;
16952 
16953 	/*
16954 	 * If any parameters change that may affect watermarks,
16955 	 * take the slowpath. Only changing fb or position should be
16956 	 * in the fastpath.
16957 	 */
16958 	if (old_plane_state->uapi.crtc != &crtc->base ||
16959 	    old_plane_state->uapi.src_w != src_w ||
16960 	    old_plane_state->uapi.src_h != src_h ||
16961 	    old_plane_state->uapi.crtc_w != crtc_w ||
16962 	    old_plane_state->uapi.crtc_h != crtc_h ||
16963 	    !old_plane_state->uapi.fb != !fb)
16964 		goto slow;
16965 
16966 	new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
16967 	if (!new_plane_state)
16968 		return -ENOMEM;
16969 
16970 	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
16971 	if (!new_crtc_state) {
16972 		ret = -ENOMEM;
16973 		goto out_free;
16974 	}
16975 
16976 	drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
16977 
16978 	new_plane_state->uapi.src_x = src_x;
16979 	new_plane_state->uapi.src_y = src_y;
16980 	new_plane_state->uapi.src_w = src_w;
16981 	new_plane_state->uapi.src_h = src_h;
16982 	new_plane_state->uapi.crtc_x = crtc_x;
16983 	new_plane_state->uapi.crtc_y = crtc_y;
16984 	new_plane_state->uapi.crtc_w = crtc_w;
16985 	new_plane_state->uapi.crtc_h = crtc_h;
16986 
16987 	intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc);
16988 
16989 	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
16990 						  old_plane_state, new_plane_state);
16991 	if (ret)
16992 		goto out_free;
16993 
16994 	ret = intel_plane_pin_fb(new_plane_state);
16995 	if (ret)
16996 		goto out_free;
16997 
16998 	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
16999 				ORIGIN_FLIP);
17000 	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
17001 				to_intel_frontbuffer(new_plane_state->hw.fb),
17002 				plane->frontbuffer_bit);
17003 
17004 	/* Swap plane state */
17005 	plane->base.state = &new_plane_state->uapi;
17006 
17007 	/*
17008 	 * We cannot swap crtc_state as it may be in use by an atomic commit or
17009 	 * page flip that's running simultaneously. If we swap crtc_state and
17010 	 * destroy the old state, we will cause a use-after-free there.
17011 	 *
17012 	 * Only update active_planes, which is needed for our internal
17013 	 * bookkeeping. Either value will do the right thing when updating
17014 	 * planes atomically. If the cursor was part of the atomic update then
17015 	 * we would have taken the slowpath.
17016 	 */
17017 	crtc_state->active_planes = new_crtc_state->active_planes;
17018 
17019 	if (new_plane_state->uapi.visible)
17020 		intel_update_plane(plane, crtc_state, new_plane_state);
17021 	else
17022 		intel_disable_plane(plane, crtc_state);
17023 
17024 	intel_plane_unpin_fb(old_plane_state);
17025 
17026 out_free:
17027 	if (new_crtc_state)
17028 		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
17029 	if (ret)
17030 		intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
17031 	else
17032 		intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
17033 	return ret;
17034 
17035 slow:
17036 	return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
17037 					      crtc_x, crtc_y, crtc_w, crtc_h,
17038 					      src_x, src_y, src_w, src_h, ctx);
17039 }
17040 
17041 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
17042 	.update_plane = intel_legacy_cursor_update,
17043 	.disable_plane = drm_atomic_helper_disable_plane,
17044 	.destroy = intel_plane_destroy,
17045 	.atomic_duplicate_state = intel_plane_duplicate_state,
17046 	.atomic_destroy_state = intel_plane_destroy_state,
17047 	.format_mod_supported = intel_cursor_format_mod_supported,
17048 };
17049 
17050 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
17051 			       enum i9xx_plane_id i9xx_plane)
17052 {
17053 	if (!HAS_FBC(dev_priv))
17054 		return false;
17055 
17056 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
17057 		return i9xx_plane == PLANE_A; /* tied to pipe A */
17058 	else if (IS_IVYBRIDGE(dev_priv))
17059 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
17060 			i9xx_plane == PLANE_C;
17061 	else if (INTEL_GEN(dev_priv) >= 4)
17062 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
17063 	else
17064 		return i9xx_plane == PLANE_A;
17065 }
17066 
17067 static struct intel_plane *
17068 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
17069 {
17070 	struct intel_plane *plane;
17071 	const struct drm_plane_funcs *plane_funcs;
17072 	unsigned int supported_rotations;
17073 	const u32 *formats;
17074 	int num_formats;
17075 	int ret, zpos;
17076 
17077 	if (INTEL_GEN(dev_priv) >= 9)
17078 		return skl_universal_plane_create(dev_priv, pipe,
17079 						  PLANE_PRIMARY);
17080 
17081 	plane = intel_plane_alloc();
17082 	if (IS_ERR(plane))
17083 		return plane;
17084 
17085 	plane->pipe = pipe;
17086 	/*
17087 	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
17088 	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
17089 	 */
17090 	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 &&
17091 	    INTEL_NUM_PIPES(dev_priv) == 2)
17092 		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
17093 	else
17094 		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
17095 	plane->id = PLANE_PRIMARY;
17096 	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
17097 
17098 	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
17099 	if (plane->has_fbc) {
17100 		struct intel_fbc *fbc = &dev_priv->fbc;
17101 
17102 		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
17103 	}
17104 
17105 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17106 		formats = vlv_primary_formats;
17107 		num_formats = ARRAY_SIZE(vlv_primary_formats);
17108 	} else if (INTEL_GEN(dev_priv) >= 4) {
17109 		/*
17110 		 * WaFP16GammaEnabling:ivb
17111 		 * "Workaround : When using the 64-bit format, the plane
17112 		 *  output on each color channel has one quarter amplitude.
17113 		 *  It can be brought up to full amplitude by using pipe
17114 		 *  gamma correction or pipe color space conversion to
17115 		 *  multiply the plane output by four."
17116 		 *
17117 		 * There is no dedicated plane gamma for the primary plane,
17118 		 * and using the pipe gamma/csc could conflict with other
17119 		 * planes, so we choose not to expose fp16 on IVB primary
17120 		 * planes. HSW primary planes no longer have this problem.
17121 		 */
17122 		if (IS_IVYBRIDGE(dev_priv)) {
17123 			formats = ivb_primary_formats;
17124 			num_formats = ARRAY_SIZE(ivb_primary_formats);
17125 		} else {
17126 			formats = i965_primary_formats;
17127 			num_formats = ARRAY_SIZE(i965_primary_formats);
17128 		}
17129 	} else {
17130 		formats = i8xx_primary_formats;
17131 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
17132 	}
17133 
17134 	if (INTEL_GEN(dev_priv) >= 4)
17135 		plane_funcs = &i965_plane_funcs;
17136 	else
17137 		plane_funcs = &i8xx_plane_funcs;
17138 
17139 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
17140 		plane->min_cdclk = vlv_plane_min_cdclk;
17141 	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
17142 		plane->min_cdclk = hsw_plane_min_cdclk;
17143 	else if (IS_IVYBRIDGE(dev_priv))
17144 		plane->min_cdclk = ivb_plane_min_cdclk;
17145 	else
17146 		plane->min_cdclk = i9xx_plane_min_cdclk;
17147 
17148 	plane->max_stride = i9xx_plane_max_stride;
17149 	plane->update_plane = i9xx_update_plane;
17150 	plane->disable_plane = i9xx_disable_plane;
17151 	plane->get_hw_state = i9xx_plane_get_hw_state;
17152 	plane->check_plane = i9xx_plane_check;
17153 
17154 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
17155 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
17156 					       0, plane_funcs,
17157 					       formats, num_formats,
17158 					       i9xx_format_modifiers,
17159 					       DRM_PLANE_TYPE_PRIMARY,
17160 					       "primary %c", pipe_name(pipe));
17161 	else
17162 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
17163 					       0, plane_funcs,
17164 					       formats, num_formats,
17165 					       i9xx_format_modifiers,
17166 					       DRM_PLANE_TYPE_PRIMARY,
17167 					       "plane %c",
17168 					       plane_name(plane->i9xx_plane));
17169 	if (ret)
17170 		goto fail;
17171 
17172 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
17173 		supported_rotations =
17174 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
17175 			DRM_MODE_REFLECT_X;
17176 	} else if (INTEL_GEN(dev_priv) >= 4) {
17177 		supported_rotations =
17178 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
17179 	} else {
17180 		supported_rotations = DRM_MODE_ROTATE_0;
17181 	}
17182 
17183 	if (INTEL_GEN(dev_priv) >= 4)
17184 		drm_plane_create_rotation_property(&plane->base,
17185 						   DRM_MODE_ROTATE_0,
17186 						   supported_rotations);
17187 
17188 	zpos = 0;
17189 	drm_plane_create_zpos_immutable_property(&plane->base, zpos);
17190 
17191 	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
17192 
17193 	return plane;
17194 
17195 fail:
17196 	intel_plane_free(plane);
17197 
17198 	return ERR_PTR(ret);
17199 }
17200 
17201 static struct intel_plane *
17202 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
17203 			  enum pipe pipe)
17204 {
17205 	struct intel_plane *cursor;
17206 	int ret, zpos;
17207 
17208 	cursor = intel_plane_alloc();
17209 	if (IS_ERR(cursor))
17210 		return cursor;
17211 
17212 	cursor->pipe = pipe;
17213 	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
17214 	cursor->id = PLANE_CURSOR;
17215 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
17216 
17217 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
17218 		cursor->max_stride = i845_cursor_max_stride;
17219 		cursor->update_plane = i845_update_cursor;
17220 		cursor->disable_plane = i845_disable_cursor;
17221 		cursor->get_hw_state = i845_cursor_get_hw_state;
17222 		cursor->check_plane = i845_check_cursor;
17223 	} else {
17224 		cursor->max_stride = i9xx_cursor_max_stride;
17225 		cursor->update_plane = i9xx_update_cursor;
17226 		cursor->disable_plane = i9xx_disable_cursor;
17227 		cursor->get_hw_state = i9xx_cursor_get_hw_state;
17228 		cursor->check_plane = i9xx_check_cursor;
17229 	}
17230 
17231 	cursor->cursor.base = ~0;
17232 	cursor->cursor.cntl = ~0;
17233 
17234 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
17235 		cursor->cursor.size = ~0;
17236 
17237 	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
17238 				       0, &intel_cursor_plane_funcs,
17239 				       intel_cursor_formats,
17240 				       ARRAY_SIZE(intel_cursor_formats),
17241 				       cursor_format_modifiers,
17242 				       DRM_PLANE_TYPE_CURSOR,
17243 				       "cursor %c", pipe_name(pipe));
17244 	if (ret)
17245 		goto fail;
17246 
17247 	if (INTEL_GEN(dev_priv) >= 4)
17248 		drm_plane_create_rotation_property(&cursor->base,
17249 						   DRM_MODE_ROTATE_0,
17250 						   DRM_MODE_ROTATE_0 |
17251 						   DRM_MODE_ROTATE_180);
17252 
17253 	zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
17254 	drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
17255 
17256 	if (INTEL_GEN(dev_priv) >= 12)
17257 		drm_plane_enable_fb_damage_clips(&cursor->base);
17258 
17259 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
17260 
17261 	return cursor;
17262 
17263 fail:
17264 	intel_plane_free(cursor);
17265 
17266 	return ERR_PTR(ret);
17267 }
17268 
17269 #define INTEL_CRTC_FUNCS \
17270 	.gamma_set = drm_atomic_helper_legacy_gamma_set, \
17271 	.set_config = drm_atomic_helper_set_config, \
17272 	.destroy = intel_crtc_destroy, \
17273 	.page_flip = drm_atomic_helper_page_flip, \
17274 	.atomic_duplicate_state = intel_crtc_duplicate_state, \
17275 	.atomic_destroy_state = intel_crtc_destroy_state, \
17276 	.set_crc_source = intel_crtc_set_crc_source, \
17277 	.verify_crc_source = intel_crtc_verify_crc_source, \
17278 	.get_crc_sources = intel_crtc_get_crc_sources
17279 
17280 static const struct drm_crtc_funcs bdw_crtc_funcs = {
17281 	INTEL_CRTC_FUNCS,
17282 
17283 	.get_vblank_counter = g4x_get_vblank_counter,
17284 	.enable_vblank = bdw_enable_vblank,
17285 	.disable_vblank = bdw_disable_vblank,
17286 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
17287 };
17288 
17289 static const struct drm_crtc_funcs ilk_crtc_funcs = {
17290 	INTEL_CRTC_FUNCS,
17291 
17292 	.get_vblank_counter = g4x_get_vblank_counter,
17293 	.enable_vblank = ilk_enable_vblank,
17294 	.disable_vblank = ilk_disable_vblank,
17295 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
17296 };
17297 
17298 static const struct drm_crtc_funcs g4x_crtc_funcs = {
17299 	INTEL_CRTC_FUNCS,
17300 
17301 	.get_vblank_counter = g4x_get_vblank_counter,
17302 	.enable_vblank = i965_enable_vblank,
17303 	.disable_vblank = i965_disable_vblank,
17304 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
17305 };
17306 
17307 static const struct drm_crtc_funcs i965_crtc_funcs = {
17308 	INTEL_CRTC_FUNCS,
17309 
17310 	.get_vblank_counter = i915_get_vblank_counter,
17311 	.enable_vblank = i965_enable_vblank,
17312 	.disable_vblank = i965_disable_vblank,
17313 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
17314 };
17315 
17316 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
17317 	INTEL_CRTC_FUNCS,
17318 
17319 	.get_vblank_counter = i915_get_vblank_counter,
17320 	.enable_vblank = i915gm_enable_vblank,
17321 	.disable_vblank = i915gm_disable_vblank,
17322 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
17323 };
17324 
17325 static const struct drm_crtc_funcs i915_crtc_funcs = {
17326 	INTEL_CRTC_FUNCS,
17327 
17328 	.get_vblank_counter = i915_get_vblank_counter,
17329 	.enable_vblank = i8xx_enable_vblank,
17330 	.disable_vblank = i8xx_disable_vblank,
17331 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
17332 };
17333 
17334 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
17335 	INTEL_CRTC_FUNCS,
17336 
17337 	/* no hw vblank counter */
17338 	.enable_vblank = i8xx_enable_vblank,
17339 	.disable_vblank = i8xx_disable_vblank,
17340 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
17341 };
17342 
17343 static struct intel_crtc *intel_crtc_alloc(void)
17344 {
17345 	struct intel_crtc_state *crtc_state;
17346 	struct intel_crtc *crtc;
17347 
17348 	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
17349 	if (!crtc)
17350 		return ERR_PTR(-ENOMEM);
17351 
17352 	crtc_state = intel_crtc_state_alloc(crtc);
17353 	if (!crtc_state) {
17354 		kfree(crtc);
17355 		return ERR_PTR(-ENOMEM);
17356 	}
17357 
17358 	crtc->base.state = &crtc_state->uapi;
17359 	crtc->config = crtc_state;
17360 
17361 	return crtc;
17362 }
17363 
17364 static void intel_crtc_free(struct intel_crtc *crtc)
17365 {
17366 	intel_crtc_destroy_state(&crtc->base, crtc->base.state);
17367 	kfree(crtc);
17368 }
17369 
17370 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
17371 {
17372 	struct intel_plane *plane;
17373 
17374 	for_each_intel_plane(&dev_priv->drm, plane) {
17375 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
17376 								  plane->pipe);
17377 
17378 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
17379 	}
17380 }
17381 
17382 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
17383 {
17384 	struct intel_plane *primary, *cursor;
17385 	const struct drm_crtc_funcs *funcs;
17386 	struct intel_crtc *crtc;
17387 	int sprite, ret;
17388 
17389 	crtc = intel_crtc_alloc();
17390 	if (IS_ERR(crtc))
17391 		return PTR_ERR(crtc);
17392 
17393 	crtc->pipe = pipe;
17394 	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
17395 
17396 	primary = intel_primary_plane_create(dev_priv, pipe);
17397 	if (IS_ERR(primary)) {
17398 		ret = PTR_ERR(primary);
17399 		goto fail;
17400 	}
17401 	crtc->plane_ids_mask |= BIT(primary->id);
17402 
17403 	for_each_sprite(dev_priv, pipe, sprite) {
17404 		struct intel_plane *plane;
17405 
17406 		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
17407 		if (IS_ERR(plane)) {
17408 			ret = PTR_ERR(plane);
17409 			goto fail;
17410 		}
17411 		crtc->plane_ids_mask |= BIT(plane->id);
17412 	}
17413 
17414 	cursor = intel_cursor_plane_create(dev_priv, pipe);
17415 	if (IS_ERR(cursor)) {
17416 		ret = PTR_ERR(cursor);
17417 		goto fail;
17418 	}
17419 	crtc->plane_ids_mask |= BIT(cursor->id);
17420 
17421 	if (HAS_GMCH(dev_priv)) {
17422 		if (IS_CHERRYVIEW(dev_priv) ||
17423 		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
17424 			funcs = &g4x_crtc_funcs;
17425 		else if (IS_GEN(dev_priv, 4))
17426 			funcs = &i965_crtc_funcs;
17427 		else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
17428 			funcs = &i915gm_crtc_funcs;
17429 		else if (IS_GEN(dev_priv, 3))
17430 			funcs = &i915_crtc_funcs;
17431 		else
17432 			funcs = &i8xx_crtc_funcs;
17433 	} else {
17434 		if (INTEL_GEN(dev_priv) >= 8)
17435 			funcs = &bdw_crtc_funcs;
17436 		else
17437 			funcs = &ilk_crtc_funcs;
17438 	}
17439 
17440 	ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
17441 					&primary->base, &cursor->base,
17442 					funcs, "pipe %c", pipe_name(pipe));
17443 	if (ret)
17444 		goto fail;
17445 
17446 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
17447 	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
17448 	dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
17449 
17450 	if (INTEL_GEN(dev_priv) < 9) {
17451 		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
17452 
17453 		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
17454 		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
17455 		dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
17456 	}
17457 
17458 	if (INTEL_GEN(dev_priv) >= 10)
17459 		drm_crtc_create_scaling_filter_property(&crtc->base,
17460 						BIT(DRM_SCALING_FILTER_DEFAULT) |
17461 						BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
17462 
17463 	intel_color_init(crtc);
17464 
17465 	intel_crtc_crc_init(crtc);
17466 
17467 	drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
17468 
17469 	return 0;
17470 
17471 fail:
17472 	intel_crtc_free(crtc);
17473 
17474 	return ret;
17475 }
17476 
17477 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
17478 				      struct drm_file *file)
17479 {
17480 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
17481 	struct drm_crtc *drmmode_crtc;
17482 	struct intel_crtc *crtc;
17483 
17484 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
17485 	if (!drmmode_crtc)
17486 		return -ENOENT;
17487 
17488 	crtc = to_intel_crtc(drmmode_crtc);
17489 	pipe_from_crtc_id->pipe = crtc->pipe;
17490 
17491 	return 0;
17492 }
17493 
17494 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
17495 {
17496 	struct drm_device *dev = encoder->base.dev;
17497 	struct intel_encoder *source_encoder;
17498 	u32 possible_clones = 0;
17499 
17500 	for_each_intel_encoder(dev, source_encoder) {
17501 		if (encoders_cloneable(encoder, source_encoder))
17502 			possible_clones |= drm_encoder_mask(&source_encoder->base);
17503 	}
17504 
17505 	return possible_clones;
17506 }
17507 
17508 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
17509 {
17510 	struct drm_device *dev = encoder->base.dev;
17511 	struct intel_crtc *crtc;
17512 	u32 possible_crtcs = 0;
17513 
17514 	for_each_intel_crtc(dev, crtc) {
17515 		if (encoder->pipe_mask & BIT(crtc->pipe))
17516 			possible_crtcs |= drm_crtc_mask(&crtc->base);
17517 	}
17518 
17519 	return possible_crtcs;
17520 }
17521 
17522 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
17523 {
17524 	if (!IS_MOBILE(dev_priv))
17525 		return false;
17526 
17527 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
17528 		return false;
17529 
17530 	if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
17531 		return false;
17532 
17533 	return true;
17534 }
17535 
17536 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
17537 {
17538 	if (INTEL_GEN(dev_priv) >= 9)
17539 		return false;
17540 
17541 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
17542 		return false;
17543 
17544 	if (HAS_PCH_LPT_H(dev_priv) &&
17545 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
17546 		return false;
17547 
17548 	/* DDI E can't be used if DDI A requires 4 lanes */
17549 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
17550 		return false;
17551 
17552 	if (!dev_priv->vbt.int_crt_support)
17553 		return false;
17554 
17555 	return true;
17556 }
17557 
17558 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
17559 {
17560 	int pps_num;
17561 	int pps_idx;
17562 
17563 	if (HAS_DDI(dev_priv))
17564 		return;
17565 	/*
17566 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
17567 	 * everywhere where registers can be write protected.
17568 	 */
17569 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
17570 		pps_num = 2;
17571 	else
17572 		pps_num = 1;
17573 
17574 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
17575 		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
17576 
17577 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
17578 		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
17579 	}
17580 }
17581 
17582 static void intel_pps_init(struct drm_i915_private *dev_priv)
17583 {
17584 	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
17585 		dev_priv->pps_mmio_base = PCH_PPS_BASE;
17586 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
17587 		dev_priv->pps_mmio_base = VLV_PPS_BASE;
17588 	else
17589 		dev_priv->pps_mmio_base = PPS_BASE;
17590 
17591 	intel_pps_unlock_regs_wa(dev_priv);
17592 }
17593 
17594 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
17595 {
17596 	struct intel_encoder *encoder;
17597 	bool dpd_is_edp = false;
17598 
17599 	intel_pps_init(dev_priv);
17600 
17601 	if (!HAS_DISPLAY(dev_priv))
17602 		return;
17603 
17604 	if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
17605 		intel_ddi_init(dev_priv, PORT_A);
17606 		intel_ddi_init(dev_priv, PORT_B);
17607 		intel_ddi_init(dev_priv, PORT_TC1);
17608 		intel_ddi_init(dev_priv, PORT_TC2);
17609 	} else if (INTEL_GEN(dev_priv) >= 12) {
17610 		intel_ddi_init(dev_priv, PORT_A);
17611 		intel_ddi_init(dev_priv, PORT_B);
17612 		intel_ddi_init(dev_priv, PORT_TC1);
17613 		intel_ddi_init(dev_priv, PORT_TC2);
17614 		intel_ddi_init(dev_priv, PORT_TC3);
17615 		intel_ddi_init(dev_priv, PORT_TC4);
17616 		intel_ddi_init(dev_priv, PORT_TC5);
17617 		intel_ddi_init(dev_priv, PORT_TC6);
17618 		icl_dsi_init(dev_priv);
17619 	} else if (IS_JSL_EHL(dev_priv)) {
17620 		intel_ddi_init(dev_priv, PORT_A);
17621 		intel_ddi_init(dev_priv, PORT_B);
17622 		intel_ddi_init(dev_priv, PORT_C);
17623 		intel_ddi_init(dev_priv, PORT_D);
17624 		icl_dsi_init(dev_priv);
17625 	} else if (IS_GEN(dev_priv, 11)) {
17626 		intel_ddi_init(dev_priv, PORT_A);
17627 		intel_ddi_init(dev_priv, PORT_B);
17628 		intel_ddi_init(dev_priv, PORT_C);
17629 		intel_ddi_init(dev_priv, PORT_D);
17630 		intel_ddi_init(dev_priv, PORT_E);
17631 		/*
17632 		 * On some ICL SKUs port F is not present. No strap bits for
17633 		 * this, so rely on VBT.
17634 		 * Work around broken VBTs on SKUs known to have no port F.
17635 		 */
17636 		if (IS_ICL_WITH_PORT_F(dev_priv) &&
17637 		    intel_bios_is_port_present(dev_priv, PORT_F))
17638 			intel_ddi_init(dev_priv, PORT_F);
17639 
17640 		icl_dsi_init(dev_priv);
17641 	} else if (IS_GEN9_LP(dev_priv)) {
17642 		/*
17643 		 * FIXME: Broxton doesn't support port detection via the
17644 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
17645 		 * detect the ports.
17646 		 */
17647 		intel_ddi_init(dev_priv, PORT_A);
17648 		intel_ddi_init(dev_priv, PORT_B);
17649 		intel_ddi_init(dev_priv, PORT_C);
17650 
17651 		vlv_dsi_init(dev_priv);
17652 	} else if (HAS_DDI(dev_priv)) {
17653 		int found;
17654 
17655 		if (intel_ddi_crt_present(dev_priv))
17656 			intel_crt_init(dev_priv);
17657 
17658 		/*
17659 		 * Haswell uses DDI functions to detect digital outputs.
17660 		 * On SKL pre-D0 the strap isn't connected, so we assume
17661 		 * it's there.
17662 		 */
17663 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
17664 		/* WaIgnoreDDIAStrap: skl */
17665 		if (found || IS_GEN9_BC(dev_priv))
17666 			intel_ddi_init(dev_priv, PORT_A);
17667 
17668 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
17669 		 * register */
17670 		found = intel_de_read(dev_priv, SFUSE_STRAP);
17671 
17672 		if (found & SFUSE_STRAP_DDIB_DETECTED)
17673 			intel_ddi_init(dev_priv, PORT_B);
17674 		if (found & SFUSE_STRAP_DDIC_DETECTED)
17675 			intel_ddi_init(dev_priv, PORT_C);
17676 		if (found & SFUSE_STRAP_DDID_DETECTED)
17677 			intel_ddi_init(dev_priv, PORT_D);
17678 		if (found & SFUSE_STRAP_DDIF_DETECTED)
17679 			intel_ddi_init(dev_priv, PORT_F);
17680 		/*
17681 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
17682 		 */
17683 		if (IS_GEN9_BC(dev_priv) &&
17684 		    intel_bios_is_port_present(dev_priv, PORT_E))
17685 			intel_ddi_init(dev_priv, PORT_E);
17686 
17687 	} else if (HAS_PCH_SPLIT(dev_priv)) {
17688 		int found;
17689 
17690 		/*
17691 		 * intel_edp_init_connector() depends on this completing first,
17692 		 * to prevent the registration of both eDP and LVDS and the
17693 		 * incorrect sharing of the PPS.
17694 		 */
17695 		intel_lvds_init(dev_priv);
17696 		intel_crt_init(dev_priv);
17697 
17698 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
17699 
17700 		if (ilk_has_edp_a(dev_priv))
17701 			intel_dp_init(dev_priv, DP_A, PORT_A);
17702 
17703 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
17704 			/* PCH SDVOB multiplex with HDMIB */
17705 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
17706 			if (!found)
17707 				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
17708 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
17709 				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
17710 		}
17711 
17712 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
17713 			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
17714 
17715 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
17716 			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
17717 
17718 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
17719 			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
17720 
17721 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
17722 			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
17723 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17724 		bool has_edp, has_port;
17725 
17726 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
17727 			intel_crt_init(dev_priv);
17728 
17729 		/*
17730 		 * The DP_DETECTED bit is the latched state of the DDC
17731 		 * SDA pin at boot. However since eDP doesn't require DDC
17732 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
17733 		 * eDP ports may have been muxed to an alternate function.
17734 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
17735 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
17736 		 * detect eDP ports.
17737 		 *
17738 		 * Sadly the straps seem to be missing sometimes even for HDMI
17739 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
17740 		 * and VBT for the presence of the port. Additionally we can't
17741 		 * trust the port type the VBT declares as we've seen at least
17742 		 * HDMI ports that the VBT claim are DP or eDP.
17743 		 */
17744 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
17745 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
17746 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
17747 			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
17748 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
17749 			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
17750 
17751 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
17752 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
17753 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
17754 			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
17755 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
17756 			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
17757 
17758 		if (IS_CHERRYVIEW(dev_priv)) {
17759 			/*
17760 			 * eDP not supported on port D,
17761 			 * so no need to worry about it
17762 			 */
17763 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
17764 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
17765 				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
17766 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
17767 				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
17768 		}
17769 
17770 		vlv_dsi_init(dev_priv);
17771 	} else if (IS_PINEVIEW(dev_priv)) {
17772 		intel_lvds_init(dev_priv);
17773 		intel_crt_init(dev_priv);
17774 	} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
17775 		bool found = false;
17776 
17777 		if (IS_MOBILE(dev_priv))
17778 			intel_lvds_init(dev_priv);
17779 
17780 		intel_crt_init(dev_priv);
17781 
17782 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17783 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
17784 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
17785 			if (!found && IS_G4X(dev_priv)) {
17786 				drm_dbg_kms(&dev_priv->drm,
17787 					    "probing HDMI on SDVOB\n");
17788 				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
17789 			}
17790 
17791 			if (!found && IS_G4X(dev_priv))
17792 				intel_dp_init(dev_priv, DP_B, PORT_B);
17793 		}
17794 
17795 		/* Before G4X SDVOC doesn't have its own detect register */
17796 
17797 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17798 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
17799 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
17800 		}
17801 
17802 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
17803 
17804 			if (IS_G4X(dev_priv)) {
17805 				drm_dbg_kms(&dev_priv->drm,
17806 					    "probing HDMI on SDVOC\n");
17807 				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
17808 			}
17809 			if (IS_G4X(dev_priv))
17810 				intel_dp_init(dev_priv, DP_C, PORT_C);
17811 		}
17812 
17813 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
17814 			intel_dp_init(dev_priv, DP_D, PORT_D);
17815 
17816 		if (SUPPORTS_TV(dev_priv))
17817 			intel_tv_init(dev_priv);
17818 	} else if (IS_GEN(dev_priv, 2)) {
17819 		if (IS_I85X(dev_priv))
17820 			intel_lvds_init(dev_priv);
17821 
17822 		intel_crt_init(dev_priv);
17823 		intel_dvo_init(dev_priv);
17824 	}
17825 
17826 	intel_psr_init(dev_priv);
17827 
17828 	for_each_intel_encoder(&dev_priv->drm, encoder) {
17829 		encoder->base.possible_crtcs =
17830 			intel_encoder_possible_crtcs(encoder);
17831 		encoder->base.possible_clones =
17832 			intel_encoder_possible_clones(encoder);
17833 	}
17834 
17835 	intel_init_pch_refclk(dev_priv);
17836 
17837 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
17838 }
17839 
17840 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
17841 {
17842 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
17843 
17844 	drm_framebuffer_cleanup(fb);
17845 	intel_frontbuffer_put(intel_fb->frontbuffer);
17846 
17847 	kfree(intel_fb);
17848 }
17849 
17850 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
17851 						struct drm_file *file,
17852 						unsigned int *handle)
17853 {
17854 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17855 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
17856 
17857 	if (obj->userptr.mm) {
17858 		drm_dbg(&i915->drm,
17859 			"attempting to use a userptr for a framebuffer, denied\n");
17860 		return -EINVAL;
17861 	}
17862 
17863 	return drm_gem_handle_create(file, &obj->base, handle);
17864 }
17865 
17866 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
17867 					struct drm_file *file,
17868 					unsigned flags, unsigned color,
17869 					struct drm_clip_rect *clips,
17870 					unsigned num_clips)
17871 {
17872 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17873 
17874 	i915_gem_object_flush_if_display(obj);
17875 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
17876 
17877 	return 0;
17878 }
17879 
17880 static const struct drm_framebuffer_funcs intel_fb_funcs = {
17881 	.destroy = intel_user_framebuffer_destroy,
17882 	.create_handle = intel_user_framebuffer_create_handle,
17883 	.dirty = intel_user_framebuffer_dirty,
17884 };
17885 
17886 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
17887 				  struct drm_i915_gem_object *obj,
17888 				  struct drm_mode_fb_cmd2 *mode_cmd)
17889 {
17890 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
17891 	struct drm_framebuffer *fb = &intel_fb->base;
17892 	u32 max_stride;
17893 	unsigned int tiling, stride;
17894 	int ret = -EINVAL;
17895 	int i;
17896 
17897 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
17898 	if (!intel_fb->frontbuffer)
17899 		return -ENOMEM;
17900 
17901 	i915_gem_object_lock(obj, NULL);
17902 	tiling = i915_gem_object_get_tiling(obj);
17903 	stride = i915_gem_object_get_stride(obj);
17904 	i915_gem_object_unlock(obj);
17905 
17906 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
17907 		/*
17908 		 * If there's a fence, enforce that
17909 		 * the fb modifier and tiling mode match.
17910 		 */
17911 		if (tiling != I915_TILING_NONE &&
17912 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17913 			drm_dbg_kms(&dev_priv->drm,
17914 				    "tiling_mode doesn't match fb modifier\n");
17915 			goto err;
17916 		}
17917 	} else {
17918 		if (tiling == I915_TILING_X) {
17919 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
17920 		} else if (tiling == I915_TILING_Y) {
17921 			drm_dbg_kms(&dev_priv->drm,
17922 				    "No Y tiling for legacy addfb\n");
17923 			goto err;
17924 		}
17925 	}
17926 
17927 	if (!drm_any_plane_has_format(&dev_priv->drm,
17928 				      mode_cmd->pixel_format,
17929 				      mode_cmd->modifier[0])) {
17930 		struct drm_format_name_buf format_name;
17931 
17932 		drm_dbg_kms(&dev_priv->drm,
17933 			    "unsupported pixel format %s / modifier 0x%llx\n",
17934 			    drm_get_format_name(mode_cmd->pixel_format,
17935 						&format_name),
17936 			    mode_cmd->modifier[0]);
17937 		goto err;
17938 	}
17939 
17940 	/*
17941 	 * gen2/3 display engine uses the fence if present,
17942 	 * so the tiling mode must match the fb modifier exactly.
17943 	 */
17944 	if (INTEL_GEN(dev_priv) < 4 &&
17945 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17946 		drm_dbg_kms(&dev_priv->drm,
17947 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
17948 		goto err;
17949 	}
17950 
17951 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
17952 					 mode_cmd->modifier[0]);
17953 	if (mode_cmd->pitches[0] > max_stride) {
17954 		drm_dbg_kms(&dev_priv->drm,
17955 			    "%s pitch (%u) must be at most %d\n",
17956 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
17957 			    "tiled" : "linear",
17958 			    mode_cmd->pitches[0], max_stride);
17959 		goto err;
17960 	}
17961 
17962 	/*
17963 	 * If there's a fence, enforce that
17964 	 * the fb pitch and fence stride match.
17965 	 */
17966 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
17967 		drm_dbg_kms(&dev_priv->drm,
17968 			    "pitch (%d) must match tiling stride (%d)\n",
17969 			    mode_cmd->pitches[0], stride);
17970 		goto err;
17971 	}
17972 
17973 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
17974 	if (mode_cmd->offsets[0] != 0) {
17975 		drm_dbg_kms(&dev_priv->drm,
17976 			    "plane 0 offset (0x%08x) must be 0\n",
17977 			    mode_cmd->offsets[0]);
17978 		goto err;
17979 	}
17980 
17981 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
17982 
17983 	for (i = 0; i < fb->format->num_planes; i++) {
17984 		u32 stride_alignment;
17985 
17986 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
17987 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
17988 				    i);
17989 			goto err;
17990 		}
17991 
17992 		stride_alignment = intel_fb_stride_alignment(fb, i);
17993 		if (fb->pitches[i] & (stride_alignment - 1)) {
17994 			drm_dbg_kms(&dev_priv->drm,
17995 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
17996 				    i, fb->pitches[i], stride_alignment);
17997 			goto err;
17998 		}
17999 
18000 		if (is_gen12_ccs_plane(fb, i)) {
18001 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
18002 
18003 			if (fb->pitches[i] != ccs_aux_stride) {
18004 				drm_dbg_kms(&dev_priv->drm,
18005 					    "ccs aux plane %d pitch (%d) must be %d\n",
18006 					    i,
18007 					    fb->pitches[i], ccs_aux_stride);
18008 				goto err;
18009 			}
18010 		}
18011 
18012 		fb->obj[i] = &obj->base;
18013 	}
18014 
18015 	ret = intel_fill_fb_info(dev_priv, fb);
18016 	if (ret)
18017 		goto err;
18018 
18019 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
18020 	if (ret) {
18021 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
18022 		goto err;
18023 	}
18024 
18025 	return 0;
18026 
18027 err:
18028 	intel_frontbuffer_put(intel_fb->frontbuffer);
18029 	return ret;
18030 }
18031 
18032 static struct drm_framebuffer *
18033 intel_user_framebuffer_create(struct drm_device *dev,
18034 			      struct drm_file *filp,
18035 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
18036 {
18037 	struct drm_framebuffer *fb;
18038 	struct drm_i915_gem_object *obj;
18039 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
18040 
18041 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
18042 	if (!obj)
18043 		return ERR_PTR(-ENOENT);
18044 
18045 	fb = intel_framebuffer_create(obj, &mode_cmd);
18046 	i915_gem_object_put(obj);
18047 
18048 	return fb;
18049 }
18050 
18051 static enum drm_mode_status
18052 intel_mode_valid(struct drm_device *dev,
18053 		 const struct drm_display_mode *mode)
18054 {
18055 	struct drm_i915_private *dev_priv = to_i915(dev);
18056 	int hdisplay_max, htotal_max;
18057 	int vdisplay_max, vtotal_max;
18058 
18059 	/*
18060 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
18061 	 * of DBLSCAN modes to the output's mode list when they detect
18062 	 * the scaling mode property on the connector. And they don't
18063 	 * ask the kernel to validate those modes in any way until
18064 	 * modeset time at which point the client gets a protocol error.
18065 	 * So in order to not upset those clients we silently ignore the
18066 	 * DBLSCAN flag on such connectors. For other connectors we will
18067 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
18068 	 * And we always reject DBLSCAN modes in connector->mode_valid()
18069 	 * as we never want such modes on the connector's mode list.
18070 	 */
18071 
18072 	if (mode->vscan > 1)
18073 		return MODE_NO_VSCAN;
18074 
18075 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
18076 		return MODE_H_ILLEGAL;
18077 
18078 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
18079 			   DRM_MODE_FLAG_NCSYNC |
18080 			   DRM_MODE_FLAG_PCSYNC))
18081 		return MODE_HSYNC;
18082 
18083 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
18084 			   DRM_MODE_FLAG_PIXMUX |
18085 			   DRM_MODE_FLAG_CLKDIV2))
18086 		return MODE_BAD;
18087 
18088 	/* Transcoder timing limits */
18089 	if (INTEL_GEN(dev_priv) >= 11) {
18090 		hdisplay_max = 16384;
18091 		vdisplay_max = 8192;
18092 		htotal_max = 16384;
18093 		vtotal_max = 8192;
18094 	} else if (INTEL_GEN(dev_priv) >= 9 ||
18095 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
18096 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
18097 		vdisplay_max = 4096;
18098 		htotal_max = 8192;
18099 		vtotal_max = 8192;
18100 	} else if (INTEL_GEN(dev_priv) >= 3) {
18101 		hdisplay_max = 4096;
18102 		vdisplay_max = 4096;
18103 		htotal_max = 8192;
18104 		vtotal_max = 8192;
18105 	} else {
18106 		hdisplay_max = 2048;
18107 		vdisplay_max = 2048;
18108 		htotal_max = 4096;
18109 		vtotal_max = 4096;
18110 	}
18111 
18112 	if (mode->hdisplay > hdisplay_max ||
18113 	    mode->hsync_start > htotal_max ||
18114 	    mode->hsync_end > htotal_max ||
18115 	    mode->htotal > htotal_max)
18116 		return MODE_H_ILLEGAL;
18117 
18118 	if (mode->vdisplay > vdisplay_max ||
18119 	    mode->vsync_start > vtotal_max ||
18120 	    mode->vsync_end > vtotal_max ||
18121 	    mode->vtotal > vtotal_max)
18122 		return MODE_V_ILLEGAL;
18123 
18124 	if (INTEL_GEN(dev_priv) >= 5) {
18125 		if (mode->hdisplay < 64 ||
18126 		    mode->htotal - mode->hdisplay < 32)
18127 			return MODE_H_ILLEGAL;
18128 
18129 		if (mode->vtotal - mode->vdisplay < 5)
18130 			return MODE_V_ILLEGAL;
18131 	} else {
18132 		if (mode->htotal - mode->hdisplay < 32)
18133 			return MODE_H_ILLEGAL;
18134 
18135 		if (mode->vtotal - mode->vdisplay < 3)
18136 			return MODE_V_ILLEGAL;
18137 	}
18138 
18139 	return MODE_OK;
18140 }
18141 
18142 enum drm_mode_status
18143 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
18144 				const struct drm_display_mode *mode,
18145 				bool bigjoiner)
18146 {
18147 	int plane_width_max, plane_height_max;
18148 
18149 	/*
18150 	 * intel_mode_valid() should be
18151 	 * sufficient on older platforms.
18152 	 */
18153 	if (INTEL_GEN(dev_priv) < 9)
18154 		return MODE_OK;
18155 
18156 	/*
18157 	 * Most people will probably want a fullscreen
18158 	 * plane so let's not advertize modes that are
18159 	 * too big for that.
18160 	 */
18161 	if (INTEL_GEN(dev_priv) >= 11) {
18162 		plane_width_max = 5120 << bigjoiner;
18163 		plane_height_max = 4320;
18164 	} else {
18165 		plane_width_max = 5120;
18166 		plane_height_max = 4096;
18167 	}
18168 
18169 	if (mode->hdisplay > plane_width_max)
18170 		return MODE_H_ILLEGAL;
18171 
18172 	if (mode->vdisplay > plane_height_max)
18173 		return MODE_V_ILLEGAL;
18174 
18175 	return MODE_OK;
18176 }
18177 
18178 static const struct drm_mode_config_funcs intel_mode_funcs = {
18179 	.fb_create = intel_user_framebuffer_create,
18180 	.get_format_info = intel_get_format_info,
18181 	.output_poll_changed = intel_fbdev_output_poll_changed,
18182 	.mode_valid = intel_mode_valid,
18183 	.atomic_check = intel_atomic_check,
18184 	.atomic_commit = intel_atomic_commit,
18185 	.atomic_state_alloc = intel_atomic_state_alloc,
18186 	.atomic_state_clear = intel_atomic_state_clear,
18187 	.atomic_state_free = intel_atomic_state_free,
18188 };
18189 
18190 /**
18191  * intel_init_display_hooks - initialize the display modesetting hooks
18192  * @dev_priv: device private
18193  */
18194 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
18195 {
18196 	intel_init_cdclk_hooks(dev_priv);
18197 
18198 	if (INTEL_GEN(dev_priv) >= 9) {
18199 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
18200 		dev_priv->display.get_initial_plane_config =
18201 			skl_get_initial_plane_config;
18202 		dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
18203 		dev_priv->display.crtc_enable = hsw_crtc_enable;
18204 		dev_priv->display.crtc_disable = hsw_crtc_disable;
18205 	} else if (HAS_DDI(dev_priv)) {
18206 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
18207 		dev_priv->display.get_initial_plane_config =
18208 			i9xx_get_initial_plane_config;
18209 		dev_priv->display.crtc_compute_clock =
18210 			hsw_crtc_compute_clock;
18211 		dev_priv->display.crtc_enable = hsw_crtc_enable;
18212 		dev_priv->display.crtc_disable = hsw_crtc_disable;
18213 	} else if (HAS_PCH_SPLIT(dev_priv)) {
18214 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
18215 		dev_priv->display.get_initial_plane_config =
18216 			i9xx_get_initial_plane_config;
18217 		dev_priv->display.crtc_compute_clock =
18218 			ilk_crtc_compute_clock;
18219 		dev_priv->display.crtc_enable = ilk_crtc_enable;
18220 		dev_priv->display.crtc_disable = ilk_crtc_disable;
18221 	} else if (IS_CHERRYVIEW(dev_priv)) {
18222 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
18223 		dev_priv->display.get_initial_plane_config =
18224 			i9xx_get_initial_plane_config;
18225 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
18226 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
18227 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
18228 	} else if (IS_VALLEYVIEW(dev_priv)) {
18229 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
18230 		dev_priv->display.get_initial_plane_config =
18231 			i9xx_get_initial_plane_config;
18232 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
18233 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
18234 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
18235 	} else if (IS_G4X(dev_priv)) {
18236 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
18237 		dev_priv->display.get_initial_plane_config =
18238 			i9xx_get_initial_plane_config;
18239 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
18240 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
18241 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
18242 	} else if (IS_PINEVIEW(dev_priv)) {
18243 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
18244 		dev_priv->display.get_initial_plane_config =
18245 			i9xx_get_initial_plane_config;
18246 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
18247 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
18248 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
18249 	} else if (!IS_GEN(dev_priv, 2)) {
18250 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
18251 		dev_priv->display.get_initial_plane_config =
18252 			i9xx_get_initial_plane_config;
18253 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
18254 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
18255 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
18256 	} else {
18257 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
18258 		dev_priv->display.get_initial_plane_config =
18259 			i9xx_get_initial_plane_config;
18260 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
18261 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
18262 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
18263 	}
18264 
18265 	if (IS_GEN(dev_priv, 5)) {
18266 		dev_priv->display.fdi_link_train = ilk_fdi_link_train;
18267 	} else if (IS_GEN(dev_priv, 6)) {
18268 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
18269 	} else if (IS_IVYBRIDGE(dev_priv)) {
18270 		/* FIXME: detect B0+ stepping and use auto training */
18271 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
18272 	}
18273 
18274 	if (INTEL_GEN(dev_priv) >= 9)
18275 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
18276 	else
18277 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
18278 
18279 }
18280 
18281 void intel_modeset_init_hw(struct drm_i915_private *i915)
18282 {
18283 	struct intel_cdclk_state *cdclk_state =
18284 		to_intel_cdclk_state(i915->cdclk.obj.state);
18285 	struct intel_dbuf_state *dbuf_state =
18286 		to_intel_dbuf_state(i915->dbuf.obj.state);
18287 
18288 	intel_update_cdclk(i915);
18289 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
18290 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
18291 
18292 	dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
18293 }
18294 
18295 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
18296 {
18297 	struct drm_plane *plane;
18298 	struct intel_crtc *crtc;
18299 
18300 	for_each_intel_crtc(state->dev, crtc) {
18301 		struct intel_crtc_state *crtc_state;
18302 
18303 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
18304 		if (IS_ERR(crtc_state))
18305 			return PTR_ERR(crtc_state);
18306 
18307 		if (crtc_state->hw.active) {
18308 			/*
18309 			 * Preserve the inherited flag to avoid
18310 			 * taking the full modeset path.
18311 			 */
18312 			crtc_state->inherited = true;
18313 		}
18314 	}
18315 
18316 	drm_for_each_plane(plane, state->dev) {
18317 		struct drm_plane_state *plane_state;
18318 
18319 		plane_state = drm_atomic_get_plane_state(state, plane);
18320 		if (IS_ERR(plane_state))
18321 			return PTR_ERR(plane_state);
18322 	}
18323 
18324 	return 0;
18325 }
18326 
18327 /*
18328  * Calculate what we think the watermarks should be for the state we've read
18329  * out of the hardware and then immediately program those watermarks so that
18330  * we ensure the hardware settings match our internal state.
18331  *
18332  * We can calculate what we think WM's should be by creating a duplicate of the
18333  * current state (which was constructed during hardware readout) and running it
18334  * through the atomic check code to calculate new watermark values in the
18335  * state object.
18336  */
18337 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
18338 {
18339 	struct drm_atomic_state *state;
18340 	struct intel_atomic_state *intel_state;
18341 	struct intel_crtc *crtc;
18342 	struct intel_crtc_state *crtc_state;
18343 	struct drm_modeset_acquire_ctx ctx;
18344 	int ret;
18345 	int i;
18346 
18347 	/* Only supported on platforms that use atomic watermark design */
18348 	if (!dev_priv->display.optimize_watermarks)
18349 		return;
18350 
18351 	state = drm_atomic_state_alloc(&dev_priv->drm);
18352 	if (drm_WARN_ON(&dev_priv->drm, !state))
18353 		return;
18354 
18355 	intel_state = to_intel_atomic_state(state);
18356 
18357 	drm_modeset_acquire_init(&ctx, 0);
18358 
18359 retry:
18360 	state->acquire_ctx = &ctx;
18361 
18362 	/*
18363 	 * Hardware readout is the only time we don't want to calculate
18364 	 * intermediate watermarks (since we don't trust the current
18365 	 * watermarks).
18366 	 */
18367 	if (!HAS_GMCH(dev_priv))
18368 		intel_state->skip_intermediate_wm = true;
18369 
18370 	ret = sanitize_watermarks_add_affected(state);
18371 	if (ret)
18372 		goto fail;
18373 
18374 	ret = intel_atomic_check(&dev_priv->drm, state);
18375 	if (ret)
18376 		goto fail;
18377 
18378 	/* Write calculated watermark values back */
18379 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
18380 		crtc_state->wm.need_postvbl_update = true;
18381 		dev_priv->display.optimize_watermarks(intel_state, crtc);
18382 
18383 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
18384 	}
18385 
18386 fail:
18387 	if (ret == -EDEADLK) {
18388 		drm_atomic_state_clear(state);
18389 		drm_modeset_backoff(&ctx);
18390 		goto retry;
18391 	}
18392 
18393 	/*
18394 	 * If we fail here, it means that the hardware appears to be
18395 	 * programmed in a way that shouldn't be possible, given our
18396 	 * understanding of watermark requirements.  This might mean a
18397 	 * mistake in the hardware readout code or a mistake in the
18398 	 * watermark calculations for a given platform.  Raise a WARN
18399 	 * so that this is noticeable.
18400 	 *
18401 	 * If this actually happens, we'll have to just leave the
18402 	 * BIOS-programmed watermarks untouched and hope for the best.
18403 	 */
18404 	drm_WARN(&dev_priv->drm, ret,
18405 		 "Could not determine valid watermarks for inherited state\n");
18406 
18407 	drm_atomic_state_put(state);
18408 
18409 	drm_modeset_drop_locks(&ctx);
18410 	drm_modeset_acquire_fini(&ctx);
18411 }
18412 
18413 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
18414 {
18415 	if (IS_GEN(dev_priv, 5)) {
18416 		u32 fdi_pll_clk =
18417 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
18418 
18419 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
18420 	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
18421 		dev_priv->fdi_pll_freq = 270000;
18422 	} else {
18423 		return;
18424 	}
18425 
18426 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
18427 }
18428 
18429 static int intel_initial_commit(struct drm_device *dev)
18430 {
18431 	struct drm_atomic_state *state = NULL;
18432 	struct drm_modeset_acquire_ctx ctx;
18433 	struct intel_crtc *crtc;
18434 	int ret = 0;
18435 
18436 	state = drm_atomic_state_alloc(dev);
18437 	if (!state)
18438 		return -ENOMEM;
18439 
18440 	drm_modeset_acquire_init(&ctx, 0);
18441 
18442 retry:
18443 	state->acquire_ctx = &ctx;
18444 
18445 	for_each_intel_crtc(dev, crtc) {
18446 		struct intel_crtc_state *crtc_state =
18447 			intel_atomic_get_crtc_state(state, crtc);
18448 
18449 		if (IS_ERR(crtc_state)) {
18450 			ret = PTR_ERR(crtc_state);
18451 			goto out;
18452 		}
18453 
18454 		if (crtc_state->hw.active) {
18455 			struct intel_encoder *encoder;
18456 
18457 			/*
18458 			 * We've not yet detected sink capabilities
18459 			 * (audio,infoframes,etc.) and thus we don't want to
18460 			 * force a full state recomputation yet. We want that to
18461 			 * happen only for the first real commit from userspace.
18462 			 * So preserve the inherited flag for the time being.
18463 			 */
18464 			crtc_state->inherited = true;
18465 
18466 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
18467 			if (ret)
18468 				goto out;
18469 
18470 			/*
18471 			 * FIXME hack to force a LUT update to avoid the
18472 			 * plane update forcing the pipe gamma on without
18473 			 * having a proper LUT loaded. Remove once we
18474 			 * have readout for pipe gamma enable.
18475 			 */
18476 			crtc_state->uapi.color_mgmt_changed = true;
18477 
18478 			for_each_intel_encoder_mask(dev, encoder,
18479 						    crtc_state->uapi.encoder_mask) {
18480 				if (encoder->initial_fastset_check &&
18481 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
18482 					ret = drm_atomic_add_affected_connectors(state,
18483 										 &crtc->base);
18484 					if (ret)
18485 						goto out;
18486 				}
18487 			}
18488 		}
18489 	}
18490 
18491 	ret = drm_atomic_commit(state);
18492 
18493 out:
18494 	if (ret == -EDEADLK) {
18495 		drm_atomic_state_clear(state);
18496 		drm_modeset_backoff(&ctx);
18497 		goto retry;
18498 	}
18499 
18500 	drm_atomic_state_put(state);
18501 
18502 	drm_modeset_drop_locks(&ctx);
18503 	drm_modeset_acquire_fini(&ctx);
18504 
18505 	return ret;
18506 }
18507 
18508 static void intel_mode_config_init(struct drm_i915_private *i915)
18509 {
18510 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
18511 
18512 	drm_mode_config_init(&i915->drm);
18513 	INIT_LIST_HEAD(&i915->global_obj_list);
18514 
18515 	mode_config->min_width = 0;
18516 	mode_config->min_height = 0;
18517 
18518 	mode_config->preferred_depth = 24;
18519 	mode_config->prefer_shadow = 1;
18520 
18521 	mode_config->allow_fb_modifiers = true;
18522 
18523 	mode_config->funcs = &intel_mode_funcs;
18524 
18525 	if (INTEL_GEN(i915) >= 9)
18526 		mode_config->async_page_flip = true;
18527 
18528 	/*
18529 	 * Maximum framebuffer dimensions, chosen to match
18530 	 * the maximum render engine surface size on gen4+.
18531 	 */
18532 	if (INTEL_GEN(i915) >= 7) {
18533 		mode_config->max_width = 16384;
18534 		mode_config->max_height = 16384;
18535 	} else if (INTEL_GEN(i915) >= 4) {
18536 		mode_config->max_width = 8192;
18537 		mode_config->max_height = 8192;
18538 	} else if (IS_GEN(i915, 3)) {
18539 		mode_config->max_width = 4096;
18540 		mode_config->max_height = 4096;
18541 	} else {
18542 		mode_config->max_width = 2048;
18543 		mode_config->max_height = 2048;
18544 	}
18545 
18546 	if (IS_I845G(i915) || IS_I865G(i915)) {
18547 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
18548 		mode_config->cursor_height = 1023;
18549 	} else if (IS_I830(i915) || IS_I85X(i915) ||
18550 		   IS_I915G(i915) || IS_I915GM(i915)) {
18551 		mode_config->cursor_width = 64;
18552 		mode_config->cursor_height = 64;
18553 	} else {
18554 		mode_config->cursor_width = 256;
18555 		mode_config->cursor_height = 256;
18556 	}
18557 }
18558 
18559 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
18560 {
18561 	intel_atomic_global_obj_cleanup(i915);
18562 	drm_mode_config_cleanup(&i915->drm);
18563 }
18564 
18565 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
18566 {
18567 	if (plane_config->fb) {
18568 		struct drm_framebuffer *fb = &plane_config->fb->base;
18569 
18570 		/* We may only have the stub and not a full framebuffer */
18571 		if (drm_framebuffer_read_refcount(fb))
18572 			drm_framebuffer_put(fb);
18573 		else
18574 			kfree(fb);
18575 	}
18576 
18577 	if (plane_config->vma)
18578 		i915_vma_put(plane_config->vma);
18579 }
18580 
18581 /* part #1: call before irq install */
18582 int intel_modeset_init_noirq(struct drm_i915_private *i915)
18583 {
18584 	int ret;
18585 
18586 	if (i915_inject_probe_failure(i915))
18587 		return -ENODEV;
18588 
18589 	if (HAS_DISPLAY(i915)) {
18590 		ret = drm_vblank_init(&i915->drm,
18591 				      INTEL_NUM_PIPES(i915));
18592 		if (ret)
18593 			return ret;
18594 	}
18595 
18596 	intel_bios_init(i915);
18597 
18598 	ret = intel_vga_register(i915);
18599 	if (ret)
18600 		goto cleanup_bios;
18601 
18602 	/* FIXME: completely on the wrong abstraction layer */
18603 	intel_power_domains_init_hw(i915, false);
18604 
18605 	intel_csr_ucode_init(i915);
18606 
18607 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
18608 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
18609 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
18610 
18611 	intel_mode_config_init(i915);
18612 
18613 	ret = intel_cdclk_init(i915);
18614 	if (ret)
18615 		goto cleanup_vga_client_pw_domain_csr;
18616 
18617 	ret = intel_dbuf_init(i915);
18618 	if (ret)
18619 		goto cleanup_vga_client_pw_domain_csr;
18620 
18621 	ret = intel_bw_init(i915);
18622 	if (ret)
18623 		goto cleanup_vga_client_pw_domain_csr;
18624 
18625 	init_llist_head(&i915->atomic_helper.free_list);
18626 	INIT_WORK(&i915->atomic_helper.free_work,
18627 		  intel_atomic_helper_free_state_worker);
18628 
18629 	intel_init_quirks(i915);
18630 
18631 	intel_fbc_init(i915);
18632 
18633 	return 0;
18634 
18635 cleanup_vga_client_pw_domain_csr:
18636 	intel_csr_ucode_fini(i915);
18637 	intel_power_domains_driver_remove(i915);
18638 	intel_vga_unregister(i915);
18639 cleanup_bios:
18640 	intel_bios_driver_remove(i915);
18641 
18642 	return ret;
18643 }
18644 
18645 /* part #2: call after irq install, but before gem init */
18646 int intel_modeset_init_nogem(struct drm_i915_private *i915)
18647 {
18648 	struct drm_device *dev = &i915->drm;
18649 	enum pipe pipe;
18650 	struct intel_crtc *crtc;
18651 	int ret;
18652 
18653 	intel_init_pm(i915);
18654 
18655 	intel_panel_sanitize_ssc(i915);
18656 
18657 	intel_gmbus_setup(i915);
18658 
18659 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
18660 		    INTEL_NUM_PIPES(i915),
18661 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
18662 
18663 	if (HAS_DISPLAY(i915)) {
18664 		for_each_pipe(i915, pipe) {
18665 			ret = intel_crtc_init(i915, pipe);
18666 			if (ret) {
18667 				intel_mode_config_cleanup(i915);
18668 				return ret;
18669 			}
18670 		}
18671 	}
18672 
18673 	intel_plane_possible_crtcs_init(i915);
18674 	intel_shared_dpll_init(dev);
18675 	intel_update_fdi_pll_freq(i915);
18676 
18677 	intel_update_czclk(i915);
18678 	intel_modeset_init_hw(i915);
18679 
18680 	intel_hdcp_component_init(i915);
18681 
18682 	if (i915->max_cdclk_freq == 0)
18683 		intel_update_max_cdclk(i915);
18684 
18685 	/*
18686 	 * If the platform has HTI, we need to find out whether it has reserved
18687 	 * any display resources before we create our display outputs.
18688 	 */
18689 	if (INTEL_INFO(i915)->display.has_hti)
18690 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
18691 
18692 	/* Just disable it once at startup */
18693 	intel_vga_disable(i915);
18694 	intel_setup_outputs(i915);
18695 
18696 	drm_modeset_lock_all(dev);
18697 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
18698 	drm_modeset_unlock_all(dev);
18699 
18700 	for_each_intel_crtc(dev, crtc) {
18701 		struct intel_initial_plane_config plane_config = {};
18702 
18703 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
18704 			continue;
18705 
18706 		/*
18707 		 * Note that reserving the BIOS fb up front prevents us
18708 		 * from stuffing other stolen allocations like the ring
18709 		 * on top.  This prevents some ugliness at boot time, and
18710 		 * can even allow for smooth boot transitions if the BIOS
18711 		 * fb is large enough for the active pipe configuration.
18712 		 */
18713 		i915->display.get_initial_plane_config(crtc, &plane_config);
18714 
18715 		/*
18716 		 * If the fb is shared between multiple heads, we'll
18717 		 * just get the first one.
18718 		 */
18719 		intel_find_initial_plane_obj(crtc, &plane_config);
18720 
18721 		plane_config_fini(&plane_config);
18722 	}
18723 
18724 	/*
18725 	 * Make sure hardware watermarks really match the state we read out.
18726 	 * Note that we need to do this after reconstructing the BIOS fb's
18727 	 * since the watermark calculation done here will use pstate->fb.
18728 	 */
18729 	if (!HAS_GMCH(i915))
18730 		sanitize_watermarks(i915);
18731 
18732 	return 0;
18733 }
18734 
18735 /* part #3: call after gem init */
18736 int intel_modeset_init(struct drm_i915_private *i915)
18737 {
18738 	int ret;
18739 
18740 	if (!HAS_DISPLAY(i915))
18741 		return 0;
18742 
18743 	/*
18744 	 * Force all active planes to recompute their states. So that on
18745 	 * mode_setcrtc after probe, all the intel_plane_state variables
18746 	 * are already calculated and there is no assert_plane warnings
18747 	 * during bootup.
18748 	 */
18749 	ret = intel_initial_commit(&i915->drm);
18750 	if (ret)
18751 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
18752 
18753 	intel_overlay_setup(i915);
18754 
18755 	ret = intel_fbdev_init(&i915->drm);
18756 	if (ret)
18757 		return ret;
18758 
18759 	/* Only enable hotplug handling once the fbdev is fully set up. */
18760 	intel_hpd_init(i915);
18761 	intel_hpd_poll_disable(i915);
18762 
18763 	intel_init_ipc(i915);
18764 
18765 	return 0;
18766 }
18767 
18768 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
18769 {
18770 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18771 	/* 640x480@60Hz, ~25175 kHz */
18772 	struct dpll clock = {
18773 		.m1 = 18,
18774 		.m2 = 7,
18775 		.p1 = 13,
18776 		.p2 = 4,
18777 		.n = 2,
18778 	};
18779 	u32 dpll, fp;
18780 	int i;
18781 
18782 	drm_WARN_ON(&dev_priv->drm,
18783 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
18784 
18785 	drm_dbg_kms(&dev_priv->drm,
18786 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
18787 		    pipe_name(pipe), clock.vco, clock.dot);
18788 
18789 	fp = i9xx_dpll_compute_fp(&clock);
18790 	dpll = DPLL_DVO_2X_MODE |
18791 		DPLL_VGA_MODE_DIS |
18792 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
18793 		PLL_P2_DIVIDE_BY_4 |
18794 		PLL_REF_INPUT_DREFCLK |
18795 		DPLL_VCO_ENABLE;
18796 
18797 	intel_de_write(dev_priv, FP0(pipe), fp);
18798 	intel_de_write(dev_priv, FP1(pipe), fp);
18799 
18800 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
18801 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
18802 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
18803 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
18804 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
18805 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
18806 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
18807 
18808 	/*
18809 	 * Apparently we need to have VGA mode enabled prior to changing
18810 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
18811 	 * dividers, even though the register value does change.
18812 	 */
18813 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
18814 	intel_de_write(dev_priv, DPLL(pipe), dpll);
18815 
18816 	/* Wait for the clocks to stabilize. */
18817 	intel_de_posting_read(dev_priv, DPLL(pipe));
18818 	udelay(150);
18819 
18820 	/* The pixel multiplier can only be updated once the
18821 	 * DPLL is enabled and the clocks are stable.
18822 	 *
18823 	 * So write it again.
18824 	 */
18825 	intel_de_write(dev_priv, DPLL(pipe), dpll);
18826 
18827 	/* We do this three times for luck */
18828 	for (i = 0; i < 3 ; i++) {
18829 		intel_de_write(dev_priv, DPLL(pipe), dpll);
18830 		intel_de_posting_read(dev_priv, DPLL(pipe));
18831 		udelay(150); /* wait for warmup */
18832 	}
18833 
18834 	intel_de_write(dev_priv, PIPECONF(pipe),
18835 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
18836 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
18837 
18838 	intel_wait_for_pipe_scanline_moving(crtc);
18839 }
18840 
18841 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
18842 {
18843 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18844 
18845 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
18846 		    pipe_name(pipe));
18847 
18848 	drm_WARN_ON(&dev_priv->drm,
18849 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
18850 		    DISPLAY_PLANE_ENABLE);
18851 	drm_WARN_ON(&dev_priv->drm,
18852 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
18853 		    DISPLAY_PLANE_ENABLE);
18854 	drm_WARN_ON(&dev_priv->drm,
18855 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
18856 		    DISPLAY_PLANE_ENABLE);
18857 	drm_WARN_ON(&dev_priv->drm,
18858 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
18859 	drm_WARN_ON(&dev_priv->drm,
18860 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
18861 
18862 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
18863 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
18864 
18865 	intel_wait_for_pipe_scanline_stopped(crtc);
18866 
18867 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
18868 	intel_de_posting_read(dev_priv, DPLL(pipe));
18869 }
18870 
18871 static void
18872 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
18873 {
18874 	struct intel_crtc *crtc;
18875 
18876 	if (INTEL_GEN(dev_priv) >= 4)
18877 		return;
18878 
18879 	for_each_intel_crtc(&dev_priv->drm, crtc) {
18880 		struct intel_plane *plane =
18881 			to_intel_plane(crtc->base.primary);
18882 		struct intel_crtc *plane_crtc;
18883 		enum pipe pipe;
18884 
18885 		if (!plane->get_hw_state(plane, &pipe))
18886 			continue;
18887 
18888 		if (pipe == crtc->pipe)
18889 			continue;
18890 
18891 		drm_dbg_kms(&dev_priv->drm,
18892 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
18893 			    plane->base.base.id, plane->base.name);
18894 
18895 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18896 		intel_plane_disable_noatomic(plane_crtc, plane);
18897 	}
18898 }
18899 
18900 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
18901 {
18902 	struct drm_device *dev = crtc->base.dev;
18903 	struct intel_encoder *encoder;
18904 
18905 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
18906 		return true;
18907 
18908 	return false;
18909 }
18910 
18911 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
18912 {
18913 	struct drm_device *dev = encoder->base.dev;
18914 	struct intel_connector *connector;
18915 
18916 	for_each_connector_on_encoder(dev, &encoder->base, connector)
18917 		return connector;
18918 
18919 	return NULL;
18920 }
18921 
18922 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
18923 			      enum pipe pch_transcoder)
18924 {
18925 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
18926 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
18927 }
18928 
18929 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
18930 {
18931 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
18932 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
18933 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
18934 
18935 	if (INTEL_GEN(dev_priv) >= 9 ||
18936 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
18937 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
18938 		u32 val;
18939 
18940 		if (transcoder_is_dsi(cpu_transcoder))
18941 			return;
18942 
18943 		val = intel_de_read(dev_priv, reg);
18944 		val &= ~HSW_FRAME_START_DELAY_MASK;
18945 		val |= HSW_FRAME_START_DELAY(0);
18946 		intel_de_write(dev_priv, reg, val);
18947 	} else {
18948 		i915_reg_t reg = PIPECONF(cpu_transcoder);
18949 		u32 val;
18950 
18951 		val = intel_de_read(dev_priv, reg);
18952 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
18953 		val |= PIPECONF_FRAME_START_DELAY(0);
18954 		intel_de_write(dev_priv, reg, val);
18955 	}
18956 
18957 	if (!crtc_state->has_pch_encoder)
18958 		return;
18959 
18960 	if (HAS_PCH_IBX(dev_priv)) {
18961 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
18962 		u32 val;
18963 
18964 		val = intel_de_read(dev_priv, reg);
18965 		val &= ~TRANS_FRAME_START_DELAY_MASK;
18966 		val |= TRANS_FRAME_START_DELAY(0);
18967 		intel_de_write(dev_priv, reg, val);
18968 	} else {
18969 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
18970 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
18971 		u32 val;
18972 
18973 		val = intel_de_read(dev_priv, reg);
18974 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
18975 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
18976 		intel_de_write(dev_priv, reg, val);
18977 	}
18978 }
18979 
18980 static void intel_sanitize_crtc(struct intel_crtc *crtc,
18981 				struct drm_modeset_acquire_ctx *ctx)
18982 {
18983 	struct drm_device *dev = crtc->base.dev;
18984 	struct drm_i915_private *dev_priv = to_i915(dev);
18985 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
18986 
18987 	if (crtc_state->hw.active) {
18988 		struct intel_plane *plane;
18989 
18990 		/* Clear any frame start delays used for debugging left by the BIOS */
18991 		intel_sanitize_frame_start_delay(crtc_state);
18992 
18993 		/* Disable everything but the primary plane */
18994 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
18995 			const struct intel_plane_state *plane_state =
18996 				to_intel_plane_state(plane->base.state);
18997 
18998 			if (plane_state->uapi.visible &&
18999 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
19000 				intel_plane_disable_noatomic(crtc, plane);
19001 		}
19002 
19003 		/*
19004 		 * Disable any background color set by the BIOS, but enable the
19005 		 * gamma and CSC to match how we program our planes.
19006 		 */
19007 		if (INTEL_GEN(dev_priv) >= 9)
19008 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
19009 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
19010 	}
19011 
19012 	/* Adjust the state of the output pipe according to whether we
19013 	 * have active connectors/encoders. */
19014 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
19015 	    !crtc_state->bigjoiner_slave)
19016 		intel_crtc_disable_noatomic(crtc, ctx);
19017 
19018 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
19019 		/*
19020 		 * We start out with underrun reporting disabled to avoid races.
19021 		 * For correct bookkeeping mark this on active crtcs.
19022 		 *
19023 		 * Also on gmch platforms we dont have any hardware bits to
19024 		 * disable the underrun reporting. Which means we need to start
19025 		 * out with underrun reporting disabled also on inactive pipes,
19026 		 * since otherwise we'll complain about the garbage we read when
19027 		 * e.g. coming up after runtime pm.
19028 		 *
19029 		 * No protection against concurrent access is required - at
19030 		 * worst a fifo underrun happens which also sets this to false.
19031 		 */
19032 		crtc->cpu_fifo_underrun_disabled = true;
19033 		/*
19034 		 * We track the PCH trancoder underrun reporting state
19035 		 * within the crtc. With crtc for pipe A housing the underrun
19036 		 * reporting state for PCH transcoder A, crtc for pipe B housing
19037 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
19038 		 * and marking underrun reporting as disabled for the non-existing
19039 		 * PCH transcoders B and C would prevent enabling the south
19040 		 * error interrupt (see cpt_can_enable_serr_int()).
19041 		 */
19042 		if (has_pch_trancoder(dev_priv, crtc->pipe))
19043 			crtc->pch_fifo_underrun_disabled = true;
19044 	}
19045 }
19046 
19047 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
19048 {
19049 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
19050 
19051 	/*
19052 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
19053 	 * the hardware when a high res displays plugged in. DPLL P
19054 	 * divider is zero, and the pipe timings are bonkers. We'll
19055 	 * try to disable everything in that case.
19056 	 *
19057 	 * FIXME would be nice to be able to sanitize this state
19058 	 * without several WARNs, but for now let's take the easy
19059 	 * road.
19060 	 */
19061 	return IS_GEN(dev_priv, 6) &&
19062 		crtc_state->hw.active &&
19063 		crtc_state->shared_dpll &&
19064 		crtc_state->port_clock == 0;
19065 }
19066 
19067 static void intel_sanitize_encoder(struct intel_encoder *encoder)
19068 {
19069 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
19070 	struct intel_connector *connector;
19071 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
19072 	struct intel_crtc_state *crtc_state = crtc ?
19073 		to_intel_crtc_state(crtc->base.state) : NULL;
19074 
19075 	/* We need to check both for a crtc link (meaning that the
19076 	 * encoder is active and trying to read from a pipe) and the
19077 	 * pipe itself being active. */
19078 	bool has_active_crtc = crtc_state &&
19079 		crtc_state->hw.active;
19080 
19081 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
19082 		drm_dbg_kms(&dev_priv->drm,
19083 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
19084 			    pipe_name(crtc->pipe));
19085 		has_active_crtc = false;
19086 	}
19087 
19088 	connector = intel_encoder_find_connector(encoder);
19089 	if (connector && !has_active_crtc) {
19090 		drm_dbg_kms(&dev_priv->drm,
19091 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
19092 			    encoder->base.base.id,
19093 			    encoder->base.name);
19094 
19095 		/* Connector is active, but has no active pipe. This is
19096 		 * fallout from our resume register restoring. Disable
19097 		 * the encoder manually again. */
19098 		if (crtc_state) {
19099 			struct drm_encoder *best_encoder;
19100 
19101 			drm_dbg_kms(&dev_priv->drm,
19102 				    "[ENCODER:%d:%s] manually disabled\n",
19103 				    encoder->base.base.id,
19104 				    encoder->base.name);
19105 
19106 			/* avoid oopsing in case the hooks consult best_encoder */
19107 			best_encoder = connector->base.state->best_encoder;
19108 			connector->base.state->best_encoder = &encoder->base;
19109 
19110 			/* FIXME NULL atomic state passed! */
19111 			if (encoder->disable)
19112 				encoder->disable(NULL, encoder, crtc_state,
19113 						 connector->base.state);
19114 			if (encoder->post_disable)
19115 				encoder->post_disable(NULL, encoder, crtc_state,
19116 						      connector->base.state);
19117 
19118 			connector->base.state->best_encoder = best_encoder;
19119 		}
19120 		encoder->base.crtc = NULL;
19121 
19122 		/* Inconsistent output/port/pipe state happens presumably due to
19123 		 * a bug in one of the get_hw_state functions. Or someplace else
19124 		 * in our code, like the register restore mess on resume. Clamp
19125 		 * things to off as a safer default. */
19126 
19127 		connector->base.dpms = DRM_MODE_DPMS_OFF;
19128 		connector->base.encoder = NULL;
19129 	}
19130 
19131 	/* notify opregion of the sanitized encoder state */
19132 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
19133 
19134 	if (INTEL_GEN(dev_priv) >= 11)
19135 		icl_sanitize_encoder_pll_mapping(encoder);
19136 }
19137 
19138 /* FIXME read out full plane state for all planes */
19139 static void readout_plane_state(struct drm_i915_private *dev_priv)
19140 {
19141 	struct intel_plane *plane;
19142 	struct intel_crtc *crtc;
19143 
19144 	for_each_intel_plane(&dev_priv->drm, plane) {
19145 		struct intel_plane_state *plane_state =
19146 			to_intel_plane_state(plane->base.state);
19147 		struct intel_crtc_state *crtc_state;
19148 		enum pipe pipe = PIPE_A;
19149 		bool visible;
19150 
19151 		visible = plane->get_hw_state(plane, &pipe);
19152 
19153 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
19154 		crtc_state = to_intel_crtc_state(crtc->base.state);
19155 
19156 		intel_set_plane_visible(crtc_state, plane_state, visible);
19157 
19158 		drm_dbg_kms(&dev_priv->drm,
19159 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
19160 			    plane->base.base.id, plane->base.name,
19161 			    enableddisabled(visible), pipe_name(pipe));
19162 	}
19163 
19164 	for_each_intel_crtc(&dev_priv->drm, crtc) {
19165 		struct intel_crtc_state *crtc_state =
19166 			to_intel_crtc_state(crtc->base.state);
19167 
19168 		fixup_active_planes(crtc_state);
19169 	}
19170 }
19171 
19172 static void intel_modeset_readout_hw_state(struct drm_device *dev)
19173 {
19174 	struct drm_i915_private *dev_priv = to_i915(dev);
19175 	struct intel_cdclk_state *cdclk_state =
19176 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
19177 	struct intel_dbuf_state *dbuf_state =
19178 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
19179 	enum pipe pipe;
19180 	struct intel_crtc *crtc;
19181 	struct intel_encoder *encoder;
19182 	struct intel_connector *connector;
19183 	struct drm_connector_list_iter conn_iter;
19184 	u8 active_pipes = 0;
19185 
19186 	for_each_intel_crtc(dev, crtc) {
19187 		struct intel_crtc_state *crtc_state =
19188 			to_intel_crtc_state(crtc->base.state);
19189 
19190 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
19191 		intel_crtc_free_hw_state(crtc_state);
19192 		intel_crtc_state_reset(crtc_state, crtc);
19193 
19194 		intel_crtc_get_pipe_config(crtc_state);
19195 
19196 		crtc_state->hw.enable = crtc_state->hw.active;
19197 
19198 		crtc->base.enabled = crtc_state->hw.enable;
19199 		crtc->active = crtc_state->hw.active;
19200 
19201 		if (crtc_state->hw.active)
19202 			active_pipes |= BIT(crtc->pipe);
19203 
19204 		drm_dbg_kms(&dev_priv->drm,
19205 			    "[CRTC:%d:%s] hw state readout: %s\n",
19206 			    crtc->base.base.id, crtc->base.name,
19207 			    enableddisabled(crtc_state->hw.active));
19208 	}
19209 
19210 	dev_priv->active_pipes = cdclk_state->active_pipes =
19211 		dbuf_state->active_pipes = active_pipes;
19212 
19213 	readout_plane_state(dev_priv);
19214 
19215 	intel_dpll_readout_hw_state(dev_priv);
19216 
19217 	for_each_intel_encoder(dev, encoder) {
19218 		pipe = 0;
19219 
19220 		if (encoder->get_hw_state(encoder, &pipe)) {
19221 			struct intel_crtc_state *crtc_state;
19222 
19223 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
19224 			crtc_state = to_intel_crtc_state(crtc->base.state);
19225 
19226 			encoder->base.crtc = &crtc->base;
19227 			intel_encoder_get_config(encoder, crtc_state);
19228 			if (encoder->sync_state)
19229 				encoder->sync_state(encoder, crtc_state);
19230 
19231 			/* read out to slave crtc as well for bigjoiner */
19232 			if (crtc_state->bigjoiner) {
19233 				/* encoder should read be linked to bigjoiner master */
19234 				WARN_ON(crtc_state->bigjoiner_slave);
19235 
19236 				crtc = crtc_state->bigjoiner_linked_crtc;
19237 				crtc_state = to_intel_crtc_state(crtc->base.state);
19238 				intel_encoder_get_config(encoder, crtc_state);
19239 			}
19240 		} else {
19241 			encoder->base.crtc = NULL;
19242 		}
19243 
19244 		drm_dbg_kms(&dev_priv->drm,
19245 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
19246 			    encoder->base.base.id, encoder->base.name,
19247 			    enableddisabled(encoder->base.crtc),
19248 			    pipe_name(pipe));
19249 	}
19250 
19251 	drm_connector_list_iter_begin(dev, &conn_iter);
19252 	for_each_intel_connector_iter(connector, &conn_iter) {
19253 		if (connector->get_hw_state(connector)) {
19254 			struct intel_crtc_state *crtc_state;
19255 			struct intel_crtc *crtc;
19256 
19257 			connector->base.dpms = DRM_MODE_DPMS_ON;
19258 
19259 			encoder = intel_attached_encoder(connector);
19260 			connector->base.encoder = &encoder->base;
19261 
19262 			crtc = to_intel_crtc(encoder->base.crtc);
19263 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
19264 
19265 			if (crtc_state && crtc_state->hw.active) {
19266 				/*
19267 				 * This has to be done during hardware readout
19268 				 * because anything calling .crtc_disable may
19269 				 * rely on the connector_mask being accurate.
19270 				 */
19271 				crtc_state->uapi.connector_mask |=
19272 					drm_connector_mask(&connector->base);
19273 				crtc_state->uapi.encoder_mask |=
19274 					drm_encoder_mask(&encoder->base);
19275 			}
19276 		} else {
19277 			connector->base.dpms = DRM_MODE_DPMS_OFF;
19278 			connector->base.encoder = NULL;
19279 		}
19280 		drm_dbg_kms(&dev_priv->drm,
19281 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
19282 			    connector->base.base.id, connector->base.name,
19283 			    enableddisabled(connector->base.encoder));
19284 	}
19285 	drm_connector_list_iter_end(&conn_iter);
19286 
19287 	for_each_intel_crtc(dev, crtc) {
19288 		struct intel_bw_state *bw_state =
19289 			to_intel_bw_state(dev_priv->bw_obj.state);
19290 		struct intel_crtc_state *crtc_state =
19291 			to_intel_crtc_state(crtc->base.state);
19292 		struct intel_plane *plane;
19293 		int min_cdclk = 0;
19294 
19295 		if (crtc_state->bigjoiner_slave)
19296 			continue;
19297 
19298 		if (crtc_state->hw.active) {
19299 			/*
19300 			 * The initial mode needs to be set in order to keep
19301 			 * the atomic core happy. It wants a valid mode if the
19302 			 * crtc's enabled, so we do the above call.
19303 			 *
19304 			 * But we don't set all the derived state fully, hence
19305 			 * set a flag to indicate that a full recalculation is
19306 			 * needed on the next commit.
19307 			 */
19308 			crtc_state->inherited = true;
19309 
19310 			intel_crtc_update_active_timings(crtc_state);
19311 
19312 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
19313 		}
19314 
19315 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
19316 			const struct intel_plane_state *plane_state =
19317 				to_intel_plane_state(plane->base.state);
19318 
19319 			/*
19320 			 * FIXME don't have the fb yet, so can't
19321 			 * use intel_plane_data_rate() :(
19322 			 */
19323 			if (plane_state->uapi.visible)
19324 				crtc_state->data_rate[plane->id] =
19325 					4 * crtc_state->pixel_rate;
19326 			/*
19327 			 * FIXME don't have the fb yet, so can't
19328 			 * use plane->min_cdclk() :(
19329 			 */
19330 			if (plane_state->uapi.visible && plane->min_cdclk) {
19331 				if (crtc_state->double_wide ||
19332 				    INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
19333 					crtc_state->min_cdclk[plane->id] =
19334 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
19335 				else
19336 					crtc_state->min_cdclk[plane->id] =
19337 						crtc_state->pixel_rate;
19338 			}
19339 			drm_dbg_kms(&dev_priv->drm,
19340 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
19341 				    plane->base.base.id, plane->base.name,
19342 				    crtc_state->min_cdclk[plane->id]);
19343 		}
19344 
19345 		if (crtc_state->hw.active) {
19346 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
19347 			if (drm_WARN_ON(dev, min_cdclk < 0))
19348 				min_cdclk = 0;
19349 		}
19350 
19351 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
19352 		cdclk_state->min_voltage_level[crtc->pipe] =
19353 			crtc_state->min_voltage_level;
19354 
19355 		intel_bw_crtc_update(bw_state, crtc_state);
19356 
19357 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
19358 
19359 		/* discard our incomplete slave state, copy it from master */
19360 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
19361 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
19362 			struct intel_crtc_state *slave_crtc_state =
19363 				to_intel_crtc_state(slave->base.state);
19364 
19365 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
19366 			slave->base.mode = crtc->base.mode;
19367 
19368 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
19369 			cdclk_state->min_voltage_level[slave->pipe] =
19370 				crtc_state->min_voltage_level;
19371 
19372 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
19373 				const struct intel_plane_state *plane_state =
19374 					to_intel_plane_state(plane->base.state);
19375 
19376 				/*
19377 				 * FIXME don't have the fb yet, so can't
19378 				 * use intel_plane_data_rate() :(
19379 				 */
19380 				if (plane_state->uapi.visible)
19381 					crtc_state->data_rate[plane->id] =
19382 						4 * crtc_state->pixel_rate;
19383 				else
19384 					crtc_state->data_rate[plane->id] = 0;
19385 			}
19386 
19387 			intel_bw_crtc_update(bw_state, slave_crtc_state);
19388 			drm_calc_timestamping_constants(&slave->base,
19389 							&slave_crtc_state->hw.adjusted_mode);
19390 		}
19391 	}
19392 }
19393 
19394 static void
19395 get_encoder_power_domains(struct drm_i915_private *dev_priv)
19396 {
19397 	struct intel_encoder *encoder;
19398 
19399 	for_each_intel_encoder(&dev_priv->drm, encoder) {
19400 		struct intel_crtc_state *crtc_state;
19401 
19402 		if (!encoder->get_power_domains)
19403 			continue;
19404 
19405 		/*
19406 		 * MST-primary and inactive encoders don't have a crtc state
19407 		 * and neither of these require any power domain references.
19408 		 */
19409 		if (!encoder->base.crtc)
19410 			continue;
19411 
19412 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
19413 		encoder->get_power_domains(encoder, crtc_state);
19414 	}
19415 }
19416 
19417 static void intel_early_display_was(struct drm_i915_private *dev_priv)
19418 {
19419 	/*
19420 	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
19421 	 * Also known as Wa_14010480278.
19422 	 */
19423 	if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
19424 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
19425 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
19426 
19427 	if (IS_HASWELL(dev_priv)) {
19428 		/*
19429 		 * WaRsPkgCStateDisplayPMReq:hsw
19430 		 * System hang if this isn't done before disabling all planes!
19431 		 */
19432 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
19433 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
19434 	}
19435 
19436 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
19437 		/* Display WA #1142:kbl,cfl,cml */
19438 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
19439 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
19440 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
19441 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
19442 			     KBL_ARB_FILL_SPARE_14);
19443 	}
19444 }
19445 
19446 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
19447 				       enum port port, i915_reg_t hdmi_reg)
19448 {
19449 	u32 val = intel_de_read(dev_priv, hdmi_reg);
19450 
19451 	if (val & SDVO_ENABLE ||
19452 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
19453 		return;
19454 
19455 	drm_dbg_kms(&dev_priv->drm,
19456 		    "Sanitizing transcoder select for HDMI %c\n",
19457 		    port_name(port));
19458 
19459 	val &= ~SDVO_PIPE_SEL_MASK;
19460 	val |= SDVO_PIPE_SEL(PIPE_A);
19461 
19462 	intel_de_write(dev_priv, hdmi_reg, val);
19463 }
19464 
19465 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
19466 				     enum port port, i915_reg_t dp_reg)
19467 {
19468 	u32 val = intel_de_read(dev_priv, dp_reg);
19469 
19470 	if (val & DP_PORT_EN ||
19471 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
19472 		return;
19473 
19474 	drm_dbg_kms(&dev_priv->drm,
19475 		    "Sanitizing transcoder select for DP %c\n",
19476 		    port_name(port));
19477 
19478 	val &= ~DP_PIPE_SEL_MASK;
19479 	val |= DP_PIPE_SEL(PIPE_A);
19480 
19481 	intel_de_write(dev_priv, dp_reg, val);
19482 }
19483 
19484 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
19485 {
19486 	/*
19487 	 * The BIOS may select transcoder B on some of the PCH
19488 	 * ports even it doesn't enable the port. This would trip
19489 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
19490 	 * Sanitize the transcoder select bits to prevent that. We
19491 	 * assume that the BIOS never actually enabled the port,
19492 	 * because if it did we'd actually have to toggle the port
19493 	 * on and back off to make the transcoder A select stick
19494 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
19495 	 * intel_disable_sdvo()).
19496 	 */
19497 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
19498 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
19499 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
19500 
19501 	/* PCH SDVOB multiplex with HDMIB */
19502 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
19503 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
19504 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
19505 }
19506 
19507 /* Scan out the current hw modeset state,
19508  * and sanitizes it to the current state
19509  */
19510 static void
19511 intel_modeset_setup_hw_state(struct drm_device *dev,
19512 			     struct drm_modeset_acquire_ctx *ctx)
19513 {
19514 	struct drm_i915_private *dev_priv = to_i915(dev);
19515 	struct intel_encoder *encoder;
19516 	struct intel_crtc *crtc;
19517 	intel_wakeref_t wakeref;
19518 
19519 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
19520 
19521 	intel_early_display_was(dev_priv);
19522 	intel_modeset_readout_hw_state(dev);
19523 
19524 	/* HW state is read out, now we need to sanitize this mess. */
19525 
19526 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
19527 	for_each_intel_encoder(dev, encoder) {
19528 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
19529 
19530 		/* We need to sanitize only the MST primary port. */
19531 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
19532 		    intel_phy_is_tc(dev_priv, phy))
19533 			intel_tc_port_sanitize(enc_to_dig_port(encoder));
19534 	}
19535 
19536 	get_encoder_power_domains(dev_priv);
19537 
19538 	if (HAS_PCH_IBX(dev_priv))
19539 		ibx_sanitize_pch_ports(dev_priv);
19540 
19541 	/*
19542 	 * intel_sanitize_plane_mapping() may need to do vblank
19543 	 * waits, so we need vblank interrupts restored beforehand.
19544 	 */
19545 	for_each_intel_crtc(&dev_priv->drm, crtc) {
19546 		struct intel_crtc_state *crtc_state =
19547 			to_intel_crtc_state(crtc->base.state);
19548 
19549 		drm_crtc_vblank_reset(&crtc->base);
19550 
19551 		if (crtc_state->hw.active)
19552 			intel_crtc_vblank_on(crtc_state);
19553 	}
19554 
19555 	intel_sanitize_plane_mapping(dev_priv);
19556 
19557 	for_each_intel_encoder(dev, encoder)
19558 		intel_sanitize_encoder(encoder);
19559 
19560 	for_each_intel_crtc(&dev_priv->drm, crtc) {
19561 		struct intel_crtc_state *crtc_state =
19562 			to_intel_crtc_state(crtc->base.state);
19563 
19564 		intel_sanitize_crtc(crtc, ctx);
19565 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
19566 	}
19567 
19568 	intel_modeset_update_connector_atomic_state(dev);
19569 
19570 	intel_dpll_sanitize_state(dev_priv);
19571 
19572 	if (IS_G4X(dev_priv)) {
19573 		g4x_wm_get_hw_state(dev_priv);
19574 		g4x_wm_sanitize(dev_priv);
19575 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
19576 		vlv_wm_get_hw_state(dev_priv);
19577 		vlv_wm_sanitize(dev_priv);
19578 	} else if (INTEL_GEN(dev_priv) >= 9) {
19579 		skl_wm_get_hw_state(dev_priv);
19580 	} else if (HAS_PCH_SPLIT(dev_priv)) {
19581 		ilk_wm_get_hw_state(dev_priv);
19582 	}
19583 
19584 	for_each_intel_crtc(dev, crtc) {
19585 		struct intel_crtc_state *crtc_state =
19586 			to_intel_crtc_state(crtc->base.state);
19587 		u64 put_domains;
19588 
19589 		put_domains = modeset_get_crtc_power_domains(crtc_state);
19590 		if (drm_WARN_ON(dev, put_domains))
19591 			modeset_put_power_domains(dev_priv, put_domains);
19592 	}
19593 
19594 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
19595 }
19596 
19597 void intel_display_resume(struct drm_device *dev)
19598 {
19599 	struct drm_i915_private *dev_priv = to_i915(dev);
19600 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
19601 	struct drm_modeset_acquire_ctx ctx;
19602 	int ret;
19603 
19604 	dev_priv->modeset_restore_state = NULL;
19605 	if (state)
19606 		state->acquire_ctx = &ctx;
19607 
19608 	drm_modeset_acquire_init(&ctx, 0);
19609 
19610 	while (1) {
19611 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
19612 		if (ret != -EDEADLK)
19613 			break;
19614 
19615 		drm_modeset_backoff(&ctx);
19616 	}
19617 
19618 	if (!ret)
19619 		ret = __intel_display_resume(dev, state, &ctx);
19620 
19621 	intel_enable_ipc(dev_priv);
19622 	drm_modeset_drop_locks(&ctx);
19623 	drm_modeset_acquire_fini(&ctx);
19624 
19625 	if (ret)
19626 		drm_err(&dev_priv->drm,
19627 			"Restoring old state failed with %i\n", ret);
19628 	if (state)
19629 		drm_atomic_state_put(state);
19630 }
19631 
19632 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
19633 {
19634 	struct intel_connector *connector;
19635 	struct drm_connector_list_iter conn_iter;
19636 
19637 	/* Kill all the work that may have been queued by hpd. */
19638 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
19639 	for_each_intel_connector_iter(connector, &conn_iter) {
19640 		if (connector->modeset_retry_work.func)
19641 			cancel_work_sync(&connector->modeset_retry_work);
19642 		if (connector->hdcp.shim) {
19643 			cancel_delayed_work_sync(&connector->hdcp.check_work);
19644 			cancel_work_sync(&connector->hdcp.prop_work);
19645 		}
19646 	}
19647 	drm_connector_list_iter_end(&conn_iter);
19648 }
19649 
19650 /* part #1: call before irq uninstall */
19651 void intel_modeset_driver_remove(struct drm_i915_private *i915)
19652 {
19653 	flush_workqueue(i915->flip_wq);
19654 	flush_workqueue(i915->modeset_wq);
19655 
19656 	flush_work(&i915->atomic_helper.free_work);
19657 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
19658 }
19659 
19660 /* part #2: call after irq uninstall */
19661 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
19662 {
19663 	/*
19664 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
19665 	 * poll handlers. Hence disable polling after hpd handling is shut down.
19666 	 */
19667 	intel_hpd_poll_fini(i915);
19668 
19669 	/*
19670 	 * MST topology needs to be suspended so we don't have any calls to
19671 	 * fbdev after it's finalized. MST will be destroyed later as part of
19672 	 * drm_mode_config_cleanup()
19673 	 */
19674 	intel_dp_mst_suspend(i915);
19675 
19676 	/* poll work can call into fbdev, hence clean that up afterwards */
19677 	intel_fbdev_fini(i915);
19678 
19679 	intel_unregister_dsm_handler();
19680 
19681 	intel_fbc_global_disable(i915);
19682 
19683 	/* flush any delayed tasks or pending work */
19684 	flush_scheduled_work();
19685 
19686 	intel_hdcp_component_fini(i915);
19687 
19688 	intel_mode_config_cleanup(i915);
19689 
19690 	intel_overlay_cleanup(i915);
19691 
19692 	intel_gmbus_teardown(i915);
19693 
19694 	destroy_workqueue(i915->flip_wq);
19695 	destroy_workqueue(i915->modeset_wq);
19696 
19697 	intel_fbc_cleanup_cfb(i915);
19698 }
19699 
19700 /* part #3: call after gem init */
19701 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
19702 {
19703 	intel_csr_ucode_fini(i915);
19704 
19705 	intel_power_domains_driver_remove(i915);
19706 
19707 	intel_vga_unregister(i915);
19708 
19709 	intel_bios_driver_remove(i915);
19710 }
19711 
19712 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
19713 
19714 struct intel_display_error_state {
19715 
19716 	u32 power_well_driver;
19717 
19718 	struct intel_cursor_error_state {
19719 		u32 control;
19720 		u32 position;
19721 		u32 base;
19722 		u32 size;
19723 	} cursor[I915_MAX_PIPES];
19724 
19725 	struct intel_pipe_error_state {
19726 		bool power_domain_on;
19727 		u32 source;
19728 		u32 stat;
19729 	} pipe[I915_MAX_PIPES];
19730 
19731 	struct intel_plane_error_state {
19732 		u32 control;
19733 		u32 stride;
19734 		u32 size;
19735 		u32 pos;
19736 		u32 addr;
19737 		u32 surface;
19738 		u32 tile_offset;
19739 	} plane[I915_MAX_PIPES];
19740 
19741 	struct intel_transcoder_error_state {
19742 		bool available;
19743 		bool power_domain_on;
19744 		enum transcoder cpu_transcoder;
19745 
19746 		u32 conf;
19747 
19748 		u32 htotal;
19749 		u32 hblank;
19750 		u32 hsync;
19751 		u32 vtotal;
19752 		u32 vblank;
19753 		u32 vsync;
19754 	} transcoder[5];
19755 };
19756 
19757 struct intel_display_error_state *
19758 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
19759 {
19760 	struct intel_display_error_state *error;
19761 	int transcoders[] = {
19762 		TRANSCODER_A,
19763 		TRANSCODER_B,
19764 		TRANSCODER_C,
19765 		TRANSCODER_D,
19766 		TRANSCODER_EDP,
19767 	};
19768 	int i;
19769 
19770 	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
19771 
19772 	if (!HAS_DISPLAY(dev_priv))
19773 		return NULL;
19774 
19775 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
19776 	if (error == NULL)
19777 		return NULL;
19778 
19779 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
19780 		error->power_well_driver = intel_de_read(dev_priv,
19781 							 HSW_PWR_WELL_CTL2);
19782 
19783 	for_each_pipe(dev_priv, i) {
19784 		error->pipe[i].power_domain_on =
19785 			__intel_display_power_is_enabled(dev_priv,
19786 							 POWER_DOMAIN_PIPE(i));
19787 		if (!error->pipe[i].power_domain_on)
19788 			continue;
19789 
19790 		error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
19791 		error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
19792 		error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
19793 
19794 		error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
19795 		error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
19796 		if (INTEL_GEN(dev_priv) <= 3) {
19797 			error->plane[i].size = intel_de_read(dev_priv,
19798 							     DSPSIZE(i));
19799 			error->plane[i].pos = intel_de_read(dev_priv,
19800 							    DSPPOS(i));
19801 		}
19802 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
19803 			error->plane[i].addr = intel_de_read(dev_priv,
19804 							     DSPADDR(i));
19805 		if (INTEL_GEN(dev_priv) >= 4) {
19806 			error->plane[i].surface = intel_de_read(dev_priv,
19807 								DSPSURF(i));
19808 			error->plane[i].tile_offset = intel_de_read(dev_priv,
19809 								    DSPTILEOFF(i));
19810 		}
19811 
19812 		error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
19813 
19814 		if (HAS_GMCH(dev_priv))
19815 			error->pipe[i].stat = intel_de_read(dev_priv,
19816 							    PIPESTAT(i));
19817 	}
19818 
19819 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
19820 		enum transcoder cpu_transcoder = transcoders[i];
19821 
19822 		if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
19823 			continue;
19824 
19825 		error->transcoder[i].available = true;
19826 		error->transcoder[i].power_domain_on =
19827 			__intel_display_power_is_enabled(dev_priv,
19828 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
19829 		if (!error->transcoder[i].power_domain_on)
19830 			continue;
19831 
19832 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
19833 
19834 		error->transcoder[i].conf = intel_de_read(dev_priv,
19835 							  PIPECONF(cpu_transcoder));
19836 		error->transcoder[i].htotal = intel_de_read(dev_priv,
19837 							    HTOTAL(cpu_transcoder));
19838 		error->transcoder[i].hblank = intel_de_read(dev_priv,
19839 							    HBLANK(cpu_transcoder));
19840 		error->transcoder[i].hsync = intel_de_read(dev_priv,
19841 							   HSYNC(cpu_transcoder));
19842 		error->transcoder[i].vtotal = intel_de_read(dev_priv,
19843 							    VTOTAL(cpu_transcoder));
19844 		error->transcoder[i].vblank = intel_de_read(dev_priv,
19845 							    VBLANK(cpu_transcoder));
19846 		error->transcoder[i].vsync = intel_de_read(dev_priv,
19847 							   VSYNC(cpu_transcoder));
19848 	}
19849 
19850 	return error;
19851 }
19852 
19853 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
19854 
19855 void
19856 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
19857 				struct intel_display_error_state *error)
19858 {
19859 	struct drm_i915_private *dev_priv = m->i915;
19860 	int i;
19861 
19862 	if (!error)
19863 		return;
19864 
19865 	err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
19866 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
19867 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
19868 			   error->power_well_driver);
19869 	for_each_pipe(dev_priv, i) {
19870 		err_printf(m, "Pipe [%d]:\n", i);
19871 		err_printf(m, "  Power: %s\n",
19872 			   onoff(error->pipe[i].power_domain_on));
19873 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
19874 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
19875 
19876 		err_printf(m, "Plane [%d]:\n", i);
19877 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
19878 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
19879 		if (INTEL_GEN(dev_priv) <= 3) {
19880 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
19881 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
19882 		}
19883 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
19884 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
19885 		if (INTEL_GEN(dev_priv) >= 4) {
19886 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
19887 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
19888 		}
19889 
19890 		err_printf(m, "Cursor [%d]:\n", i);
19891 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
19892 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
19893 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
19894 	}
19895 
19896 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
19897 		if (!error->transcoder[i].available)
19898 			continue;
19899 
19900 		err_printf(m, "CPU transcoder: %s\n",
19901 			   transcoder_name(error->transcoder[i].cpu_transcoder));
19902 		err_printf(m, "  Power: %s\n",
19903 			   onoff(error->transcoder[i].power_domain_on));
19904 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
19905 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
19906 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
19907 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
19908 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
19909 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
19910 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
19911 	}
19912 }
19913 
19914 #endif
19915