xref: /linux/drivers/gpu/drm/i915/display/intel_fdi.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include "i915_reg.h"
9 #include "intel_atomic.h"
10 #include "intel_crtc.h"
11 #include "intel_ddi.h"
12 #include "intel_de.h"
13 #include "intel_dp.h"
14 #include "intel_display_types.h"
15 #include "intel_fdi.h"
16 #include "intel_fdi_regs.h"
17 #include "intel_link_bw.h"
18 
19 struct intel_fdi_funcs {
20 	void (*fdi_link_train)(struct intel_crtc *crtc,
21 			       const struct intel_crtc_state *crtc_state);
22 };
23 
24 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
25 			  enum pipe pipe, bool state)
26 {
27 	bool cur_state;
28 
29 	if (HAS_DDI(dev_priv)) {
30 		/*
31 		 * DDI does not have a specific FDI_TX register.
32 		 *
33 		 * FDI is never fed from EDP transcoder
34 		 * so pipe->transcoder cast is fine here.
35 		 */
36 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
37 		cur_state = intel_de_read(dev_priv,
38 					  TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
39 	} else {
40 		cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
41 	}
42 	I915_STATE_WARN(dev_priv, cur_state != state,
43 			"FDI TX state assertion failure (expected %s, current %s)\n",
44 			str_on_off(state), str_on_off(cur_state));
45 }
46 
47 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
48 {
49 	assert_fdi_tx(i915, pipe, true);
50 }
51 
52 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
53 {
54 	assert_fdi_tx(i915, pipe, false);
55 }
56 
57 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
58 			  enum pipe pipe, bool state)
59 {
60 	bool cur_state;
61 
62 	cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
63 	I915_STATE_WARN(dev_priv, cur_state != state,
64 			"FDI RX state assertion failure (expected %s, current %s)\n",
65 			str_on_off(state), str_on_off(cur_state));
66 }
67 
68 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
69 {
70 	assert_fdi_rx(i915, pipe, true);
71 }
72 
73 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
74 {
75 	assert_fdi_rx(i915, pipe, false);
76 }
77 
78 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
79 			       enum pipe pipe)
80 {
81 	bool cur_state;
82 
83 	/* ILK FDI PLL is always enabled */
84 	if (IS_IRONLAKE(i915))
85 		return;
86 
87 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
88 	if (HAS_DDI(i915))
89 		return;
90 
91 	cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
92 	I915_STATE_WARN(i915, !cur_state,
93 			"FDI TX PLL assertion failure, should be active but is disabled\n");
94 }
95 
96 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
97 			      enum pipe pipe, bool state)
98 {
99 	bool cur_state;
100 
101 	cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
102 	I915_STATE_WARN(i915, cur_state != state,
103 			"FDI RX PLL assertion failure (expected %s, current %s)\n",
104 			str_on_off(state), str_on_off(cur_state));
105 }
106 
107 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
108 {
109 	assert_fdi_rx_pll(i915, pipe, true);
110 }
111 
112 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
113 {
114 	assert_fdi_rx_pll(i915, pipe, false);
115 }
116 
117 void intel_fdi_link_train(struct intel_crtc *crtc,
118 			  const struct intel_crtc_state *crtc_state)
119 {
120 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
121 
122 	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
123 }
124 
125 /**
126  * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
127  * @state: intel atomic state
128  *
129  * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
130  * known to affect the available FDI BW for the former CRTC. In practice this
131  * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
132  * CRTC C) and CRTC C is getting disabled.
133  *
134  * Returns 0 in case of success, or a negative error code otherwise.
135  */
136 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
137 {
138 	struct drm_i915_private *i915 = to_i915(state->base.dev);
139 	const struct intel_crtc_state *old_crtc_state;
140 	const struct intel_crtc_state *new_crtc_state;
141 	struct intel_crtc *crtc;
142 
143 	if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
144 		return 0;
145 
146 	crtc = intel_crtc_for_pipe(i915, PIPE_C);
147 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
148 	if (!new_crtc_state)
149 		return 0;
150 
151 	if (!intel_crtc_needs_modeset(new_crtc_state))
152 		return 0;
153 
154 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
155 	if (!old_crtc_state->fdi_lanes)
156 		return 0;
157 
158 	crtc = intel_crtc_for_pipe(i915, PIPE_B);
159 	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
160 	if (IS_ERR(new_crtc_state))
161 		return PTR_ERR(new_crtc_state);
162 
163 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
164 	if (!old_crtc_state->fdi_lanes)
165 		return 0;
166 
167 	return intel_modeset_pipes_in_mask_early(state,
168 						 "FDI link BW decrease on pipe C",
169 						 BIT(PIPE_B));
170 }
171 
172 /* units of 100MHz */
173 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
174 {
175 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
176 		return crtc_state->fdi_lanes;
177 
178 	return 0;
179 }
180 
181 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
182 			       struct intel_crtc_state *pipe_config,
183 			       enum pipe *pipe_to_reduce)
184 {
185 	struct drm_i915_private *dev_priv = to_i915(dev);
186 	struct drm_atomic_state *state = pipe_config->uapi.state;
187 	struct intel_crtc *other_crtc;
188 	struct intel_crtc_state *other_crtc_state;
189 
190 	*pipe_to_reduce = pipe;
191 
192 	drm_dbg_kms(&dev_priv->drm,
193 		    "checking fdi config on pipe %c, lanes %i\n",
194 		    pipe_name(pipe), pipe_config->fdi_lanes);
195 	if (pipe_config->fdi_lanes > 4) {
196 		drm_dbg_kms(&dev_priv->drm,
197 			    "invalid fdi lane config on pipe %c: %i lanes\n",
198 			    pipe_name(pipe), pipe_config->fdi_lanes);
199 		return -EINVAL;
200 	}
201 
202 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
203 		if (pipe_config->fdi_lanes > 2) {
204 			drm_dbg_kms(&dev_priv->drm,
205 				    "only 2 lanes on haswell, required: %i lanes\n",
206 				    pipe_config->fdi_lanes);
207 			return -EINVAL;
208 		} else {
209 			return 0;
210 		}
211 	}
212 
213 	if (INTEL_NUM_PIPES(dev_priv) == 2)
214 		return 0;
215 
216 	/* Ivybridge 3 pipe is really complicated */
217 	switch (pipe) {
218 	case PIPE_A:
219 		return 0;
220 	case PIPE_B:
221 		if (pipe_config->fdi_lanes <= 2)
222 			return 0;
223 
224 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
225 		other_crtc_state =
226 			intel_atomic_get_crtc_state(state, other_crtc);
227 		if (IS_ERR(other_crtc_state))
228 			return PTR_ERR(other_crtc_state);
229 
230 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
231 			drm_dbg_kms(&dev_priv->drm,
232 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
233 				    pipe_name(pipe), pipe_config->fdi_lanes);
234 			return -EINVAL;
235 		}
236 		return 0;
237 	case PIPE_C:
238 		if (pipe_config->fdi_lanes > 2) {
239 			drm_dbg_kms(&dev_priv->drm,
240 				    "only 2 lanes on pipe %c: required %i lanes\n",
241 				    pipe_name(pipe), pipe_config->fdi_lanes);
242 			return -EINVAL;
243 		}
244 
245 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
246 		other_crtc_state =
247 			intel_atomic_get_crtc_state(state, other_crtc);
248 		if (IS_ERR(other_crtc_state))
249 			return PTR_ERR(other_crtc_state);
250 
251 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
252 			drm_dbg_kms(&dev_priv->drm,
253 				    "fdi link B uses too many lanes to enable link C\n");
254 
255 			*pipe_to_reduce = PIPE_B;
256 
257 			return -EINVAL;
258 		}
259 		return 0;
260 	default:
261 		MISSING_CASE(pipe);
262 		return 0;
263 	}
264 }
265 
266 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
267 {
268 	if (IS_IRONLAKE(i915)) {
269 		u32 fdi_pll_clk =
270 			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
271 
272 		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
273 	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
274 		i915->display.fdi.pll_freq = 270000;
275 	} else {
276 		return;
277 	}
278 
279 	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
280 }
281 
282 int intel_fdi_link_freq(struct drm_i915_private *i915,
283 			const struct intel_crtc_state *pipe_config)
284 {
285 	if (HAS_DDI(i915))
286 		return pipe_config->port_clock; /* SPLL */
287 	else
288 		return i915->display.fdi.pll_freq;
289 }
290 
291 /**
292  * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
293  * @crtc_state: the crtc state
294  *
295  * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
296  * call this function during state computation in the simple case where the
297  * link bpp will always match the pipe bpp. This is the case for all non-DP
298  * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
299  * of DSC compression.
300  *
301  * Returns %true in case of success, %false if pipe bpp would need to be
302  * reduced below its valid range.
303  */
304 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
305 {
306 	int pipe_bpp = min(crtc_state->pipe_bpp,
307 			   to_bpp_int(crtc_state->max_link_bpp_x16));
308 
309 	pipe_bpp = rounddown(pipe_bpp, 2 * 3);
310 
311 	if (pipe_bpp < 6 * 3)
312 		return false;
313 
314 	crtc_state->pipe_bpp = pipe_bpp;
315 
316 	return true;
317 }
318 
319 int ilk_fdi_compute_config(struct intel_crtc *crtc,
320 			   struct intel_crtc_state *pipe_config)
321 {
322 	struct drm_device *dev = crtc->base.dev;
323 	struct drm_i915_private *i915 = to_i915(dev);
324 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
325 	int lane, link_bw, fdi_dotclock;
326 
327 	/* FDI is a binary signal running at ~2.7GHz, encoding
328 	 * each output octet as 10 bits. The actual frequency
329 	 * is stored as a divider into a 100MHz clock, and the
330 	 * mode pixel clock is stored in units of 1KHz.
331 	 * Hence the bw of each lane in terms of the mode signal
332 	 * is:
333 	 */
334 	link_bw = intel_fdi_link_freq(i915, pipe_config);
335 
336 	fdi_dotclock = adjusted_mode->crtc_clock;
337 
338 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
339 				      pipe_config->pipe_bpp);
340 
341 	pipe_config->fdi_lanes = lane;
342 
343 	intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
344 			       lane, fdi_dotclock,
345 			       link_bw,
346 			       intel_dp_bw_fec_overhead(false),
347 			       &pipe_config->fdi_m_n);
348 
349 	return 0;
350 }
351 
352 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
353 				     struct intel_crtc *crtc,
354 				     struct intel_crtc_state *pipe_config,
355 				     struct intel_link_bw_limits *limits)
356 {
357 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
358 	enum pipe pipe_to_reduce;
359 	int ret;
360 
361 	ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
362 				  &pipe_to_reduce);
363 	if (ret != -EINVAL)
364 		return ret;
365 
366 	ret = intel_link_bw_reduce_bpp(state, limits,
367 				       BIT(pipe_to_reduce),
368 				       "FDI link BW");
369 
370 	return ret ? : -EAGAIN;
371 }
372 
373 /**
374  * intel_fdi_atomic_check_link - check all modeset FDI link configuration
375  * @state: intel atomic state
376  * @limits: link BW limits
377  *
378  * Check the link configuration for all modeset FDI outputs. If the
379  * configuration is invalid @limits will be updated if possible to
380  * reduce the total BW, after which the configuration for all CRTCs in
381  * @state must be recomputed with the updated @limits.
382  *
383  * Returns:
384  *   - 0 if the confugration is valid
385  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
386  *     with fallback values with which the configuration of all CRTCs
387  *     in @state must be recomputed
388  *   - Other negative error, if the configuration is invalid without a
389  *     fallback possibility, or the check failed for another reason
390  */
391 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
392 				struct intel_link_bw_limits *limits)
393 {
394 	struct intel_crtc *crtc;
395 	struct intel_crtc_state *crtc_state;
396 	int i;
397 
398 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
399 		int ret;
400 
401 		if (!crtc_state->has_pch_encoder ||
402 		    !intel_crtc_needs_modeset(crtc_state) ||
403 		    !crtc_state->hw.enable)
404 			continue;
405 
406 		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
407 		if (ret)
408 			return ret;
409 	}
410 
411 	return 0;
412 }
413 
414 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
415 {
416 	u32 temp;
417 
418 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
419 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
420 		return;
421 
422 	drm_WARN_ON(&dev_priv->drm,
423 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
424 		    FDI_RX_ENABLE);
425 	drm_WARN_ON(&dev_priv->drm,
426 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
427 		    FDI_RX_ENABLE);
428 
429 	temp &= ~FDI_BC_BIFURCATION_SELECT;
430 	if (enable)
431 		temp |= FDI_BC_BIFURCATION_SELECT;
432 
433 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
434 		    enable ? "en" : "dis");
435 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
436 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
437 }
438 
439 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
440 {
441 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
442 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
443 
444 	switch (crtc->pipe) {
445 	case PIPE_A:
446 		break;
447 	case PIPE_B:
448 		if (crtc_state->fdi_lanes > 2)
449 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
450 		else
451 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
452 
453 		break;
454 	case PIPE_C:
455 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
456 
457 		break;
458 	default:
459 		MISSING_CASE(crtc->pipe);
460 	}
461 }
462 
463 void intel_fdi_normal_train(struct intel_crtc *crtc)
464 {
465 	struct drm_device *dev = crtc->base.dev;
466 	struct drm_i915_private *dev_priv = to_i915(dev);
467 	enum pipe pipe = crtc->pipe;
468 	i915_reg_t reg;
469 	u32 temp;
470 
471 	/* enable normal train */
472 	reg = FDI_TX_CTL(pipe);
473 	temp = intel_de_read(dev_priv, reg);
474 	if (IS_IVYBRIDGE(dev_priv)) {
475 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
476 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
477 	} else {
478 		temp &= ~FDI_LINK_TRAIN_NONE;
479 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
480 	}
481 	intel_de_write(dev_priv, reg, temp);
482 
483 	reg = FDI_RX_CTL(pipe);
484 	temp = intel_de_read(dev_priv, reg);
485 	if (HAS_PCH_CPT(dev_priv)) {
486 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
487 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
488 	} else {
489 		temp &= ~FDI_LINK_TRAIN_NONE;
490 		temp |= FDI_LINK_TRAIN_NONE;
491 	}
492 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
493 
494 	/* wait one idle pattern time */
495 	intel_de_posting_read(dev_priv, reg);
496 	udelay(1000);
497 
498 	/* IVB wants error correction enabled */
499 	if (IS_IVYBRIDGE(dev_priv))
500 		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
501 }
502 
503 /* The FDI link training functions for ILK/Ibexpeak. */
504 static void ilk_fdi_link_train(struct intel_crtc *crtc,
505 			       const struct intel_crtc_state *crtc_state)
506 {
507 	struct drm_device *dev = crtc->base.dev;
508 	struct drm_i915_private *dev_priv = to_i915(dev);
509 	enum pipe pipe = crtc->pipe;
510 	i915_reg_t reg;
511 	u32 temp, tries;
512 
513 	/*
514 	 * Write the TU size bits before fdi link training, so that error
515 	 * detection works.
516 	 */
517 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
518 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
519 
520 	/* FDI needs bits from pipe first */
521 	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
522 
523 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
524 	   for train result */
525 	reg = FDI_RX_IMR(pipe);
526 	temp = intel_de_read(dev_priv, reg);
527 	temp &= ~FDI_RX_SYMBOL_LOCK;
528 	temp &= ~FDI_RX_BIT_LOCK;
529 	intel_de_write(dev_priv, reg, temp);
530 	intel_de_read(dev_priv, reg);
531 	udelay(150);
532 
533 	/* enable CPU FDI TX and PCH FDI RX */
534 	reg = FDI_TX_CTL(pipe);
535 	temp = intel_de_read(dev_priv, reg);
536 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
537 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
538 	temp &= ~FDI_LINK_TRAIN_NONE;
539 	temp |= FDI_LINK_TRAIN_PATTERN_1;
540 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
541 
542 	reg = FDI_RX_CTL(pipe);
543 	temp = intel_de_read(dev_priv, reg);
544 	temp &= ~FDI_LINK_TRAIN_NONE;
545 	temp |= FDI_LINK_TRAIN_PATTERN_1;
546 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
547 
548 	intel_de_posting_read(dev_priv, reg);
549 	udelay(150);
550 
551 	/* Ironlake workaround, enable clock pointer after FDI enable*/
552 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
553 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
554 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
555 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
556 
557 	reg = FDI_RX_IIR(pipe);
558 	for (tries = 0; tries < 5; tries++) {
559 		temp = intel_de_read(dev_priv, reg);
560 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
561 
562 		if ((temp & FDI_RX_BIT_LOCK)) {
563 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
564 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
565 			break;
566 		}
567 	}
568 	if (tries == 5)
569 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
570 
571 	/* Train 2 */
572 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
573 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
574 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
575 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
576 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
577 	udelay(150);
578 
579 	reg = FDI_RX_IIR(pipe);
580 	for (tries = 0; tries < 5; tries++) {
581 		temp = intel_de_read(dev_priv, reg);
582 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
583 
584 		if (temp & FDI_RX_SYMBOL_LOCK) {
585 			intel_de_write(dev_priv, reg,
586 				       temp | FDI_RX_SYMBOL_LOCK);
587 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
588 			break;
589 		}
590 	}
591 	if (tries == 5)
592 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
593 
594 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
595 
596 }
597 
598 static const int snb_b_fdi_train_param[] = {
599 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
600 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
601 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
602 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
603 };
604 
605 /* The FDI link training functions for SNB/Cougarpoint. */
606 static void gen6_fdi_link_train(struct intel_crtc *crtc,
607 				const struct intel_crtc_state *crtc_state)
608 {
609 	struct drm_device *dev = crtc->base.dev;
610 	struct drm_i915_private *dev_priv = to_i915(dev);
611 	enum pipe pipe = crtc->pipe;
612 	i915_reg_t reg;
613 	u32 temp, i, retry;
614 
615 	/*
616 	 * Write the TU size bits before fdi link training, so that error
617 	 * detection works.
618 	 */
619 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
620 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
621 
622 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
623 	   for train result */
624 	reg = FDI_RX_IMR(pipe);
625 	temp = intel_de_read(dev_priv, reg);
626 	temp &= ~FDI_RX_SYMBOL_LOCK;
627 	temp &= ~FDI_RX_BIT_LOCK;
628 	intel_de_write(dev_priv, reg, temp);
629 
630 	intel_de_posting_read(dev_priv, reg);
631 	udelay(150);
632 
633 	/* enable CPU FDI TX and PCH FDI RX */
634 	reg = FDI_TX_CTL(pipe);
635 	temp = intel_de_read(dev_priv, reg);
636 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
637 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
638 	temp &= ~FDI_LINK_TRAIN_NONE;
639 	temp |= FDI_LINK_TRAIN_PATTERN_1;
640 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
641 	/* SNB-B */
642 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
643 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
644 
645 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
646 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
647 
648 	reg = FDI_RX_CTL(pipe);
649 	temp = intel_de_read(dev_priv, reg);
650 	if (HAS_PCH_CPT(dev_priv)) {
651 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
652 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
653 	} else {
654 		temp &= ~FDI_LINK_TRAIN_NONE;
655 		temp |= FDI_LINK_TRAIN_PATTERN_1;
656 	}
657 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
658 
659 	intel_de_posting_read(dev_priv, reg);
660 	udelay(150);
661 
662 	for (i = 0; i < 4; i++) {
663 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
664 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
665 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
666 		udelay(500);
667 
668 		for (retry = 0; retry < 5; retry++) {
669 			reg = FDI_RX_IIR(pipe);
670 			temp = intel_de_read(dev_priv, reg);
671 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
672 			if (temp & FDI_RX_BIT_LOCK) {
673 				intel_de_write(dev_priv, reg,
674 					       temp | FDI_RX_BIT_LOCK);
675 				drm_dbg_kms(&dev_priv->drm,
676 					    "FDI train 1 done.\n");
677 				break;
678 			}
679 			udelay(50);
680 		}
681 		if (retry < 5)
682 			break;
683 	}
684 	if (i == 4)
685 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
686 
687 	/* Train 2 */
688 	reg = FDI_TX_CTL(pipe);
689 	temp = intel_de_read(dev_priv, reg);
690 	temp &= ~FDI_LINK_TRAIN_NONE;
691 	temp |= FDI_LINK_TRAIN_PATTERN_2;
692 	if (IS_SANDYBRIDGE(dev_priv)) {
693 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
694 		/* SNB-B */
695 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
696 	}
697 	intel_de_write(dev_priv, reg, temp);
698 
699 	reg = FDI_RX_CTL(pipe);
700 	temp = intel_de_read(dev_priv, reg);
701 	if (HAS_PCH_CPT(dev_priv)) {
702 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
703 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
704 	} else {
705 		temp &= ~FDI_LINK_TRAIN_NONE;
706 		temp |= FDI_LINK_TRAIN_PATTERN_2;
707 	}
708 	intel_de_write(dev_priv, reg, temp);
709 
710 	intel_de_posting_read(dev_priv, reg);
711 	udelay(150);
712 
713 	for (i = 0; i < 4; i++) {
714 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
715 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
716 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
717 		udelay(500);
718 
719 		for (retry = 0; retry < 5; retry++) {
720 			reg = FDI_RX_IIR(pipe);
721 			temp = intel_de_read(dev_priv, reg);
722 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
723 			if (temp & FDI_RX_SYMBOL_LOCK) {
724 				intel_de_write(dev_priv, reg,
725 					       temp | FDI_RX_SYMBOL_LOCK);
726 				drm_dbg_kms(&dev_priv->drm,
727 					    "FDI train 2 done.\n");
728 				break;
729 			}
730 			udelay(50);
731 		}
732 		if (retry < 5)
733 			break;
734 	}
735 	if (i == 4)
736 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
737 
738 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
739 }
740 
741 /* Manual link training for Ivy Bridge A0 parts */
742 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
743 				      const struct intel_crtc_state *crtc_state)
744 {
745 	struct drm_device *dev = crtc->base.dev;
746 	struct drm_i915_private *dev_priv = to_i915(dev);
747 	enum pipe pipe = crtc->pipe;
748 	i915_reg_t reg;
749 	u32 temp, i, j;
750 
751 	ivb_update_fdi_bc_bifurcation(crtc_state);
752 
753 	/*
754 	 * Write the TU size bits before fdi link training, so that error
755 	 * detection works.
756 	 */
757 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
758 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
759 
760 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
761 	   for train result */
762 	reg = FDI_RX_IMR(pipe);
763 	temp = intel_de_read(dev_priv, reg);
764 	temp &= ~FDI_RX_SYMBOL_LOCK;
765 	temp &= ~FDI_RX_BIT_LOCK;
766 	intel_de_write(dev_priv, reg, temp);
767 
768 	intel_de_posting_read(dev_priv, reg);
769 	udelay(150);
770 
771 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
772 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
773 
774 	/* Try each vswing and preemphasis setting twice before moving on */
775 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
776 		/* disable first in case we need to retry */
777 		reg = FDI_TX_CTL(pipe);
778 		temp = intel_de_read(dev_priv, reg);
779 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
780 		temp &= ~FDI_TX_ENABLE;
781 		intel_de_write(dev_priv, reg, temp);
782 
783 		reg = FDI_RX_CTL(pipe);
784 		temp = intel_de_read(dev_priv, reg);
785 		temp &= ~FDI_LINK_TRAIN_AUTO;
786 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
787 		temp &= ~FDI_RX_ENABLE;
788 		intel_de_write(dev_priv, reg, temp);
789 
790 		/* enable CPU FDI TX and PCH FDI RX */
791 		reg = FDI_TX_CTL(pipe);
792 		temp = intel_de_read(dev_priv, reg);
793 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
794 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
795 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
796 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
797 		temp |= snb_b_fdi_train_param[j/2];
798 		temp |= FDI_COMPOSITE_SYNC;
799 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
800 
801 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
802 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
803 
804 		reg = FDI_RX_CTL(pipe);
805 		temp = intel_de_read(dev_priv, reg);
806 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
807 		temp |= FDI_COMPOSITE_SYNC;
808 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
809 
810 		intel_de_posting_read(dev_priv, reg);
811 		udelay(1); /* should be 0.5us */
812 
813 		for (i = 0; i < 4; i++) {
814 			reg = FDI_RX_IIR(pipe);
815 			temp = intel_de_read(dev_priv, reg);
816 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
817 
818 			if (temp & FDI_RX_BIT_LOCK ||
819 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
820 				intel_de_write(dev_priv, reg,
821 					       temp | FDI_RX_BIT_LOCK);
822 				drm_dbg_kms(&dev_priv->drm,
823 					    "FDI train 1 done, level %i.\n",
824 					    i);
825 				break;
826 			}
827 			udelay(1); /* should be 0.5us */
828 		}
829 		if (i == 4) {
830 			drm_dbg_kms(&dev_priv->drm,
831 				    "FDI train 1 fail on vswing %d\n", j / 2);
832 			continue;
833 		}
834 
835 		/* Train 2 */
836 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
837 			     FDI_LINK_TRAIN_NONE_IVB,
838 			     FDI_LINK_TRAIN_PATTERN_2_IVB);
839 		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
840 			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
841 			     FDI_LINK_TRAIN_PATTERN_2_CPT);
842 		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
843 		udelay(2); /* should be 1.5us */
844 
845 		for (i = 0; i < 4; i++) {
846 			reg = FDI_RX_IIR(pipe);
847 			temp = intel_de_read(dev_priv, reg);
848 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
849 
850 			if (temp & FDI_RX_SYMBOL_LOCK ||
851 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
852 				intel_de_write(dev_priv, reg,
853 					       temp | FDI_RX_SYMBOL_LOCK);
854 				drm_dbg_kms(&dev_priv->drm,
855 					    "FDI train 2 done, level %i.\n",
856 					    i);
857 				goto train_done;
858 			}
859 			udelay(2); /* should be 1.5us */
860 		}
861 		if (i == 4)
862 			drm_dbg_kms(&dev_priv->drm,
863 				    "FDI train 2 fail on vswing %d\n", j / 2);
864 	}
865 
866 train_done:
867 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
868 }
869 
870 /* Starting with Haswell, different DDI ports can work in FDI mode for
871  * connection to the PCH-located connectors. For this, it is necessary to train
872  * both the DDI port and PCH receiver for the desired DDI buffer settings.
873  *
874  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
875  * please note that when FDI mode is active on DDI E, it shares 2 lines with
876  * DDI A (which is used for eDP)
877  */
878 void hsw_fdi_link_train(struct intel_encoder *encoder,
879 			const struct intel_crtc_state *crtc_state)
880 {
881 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
882 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
883 	u32 temp, i, rx_ctl_val;
884 	int n_entries;
885 
886 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
887 
888 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
889 
890 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
891 	 * mode set "sequence for CRT port" document:
892 	 * - TP1 to TP2 time with the default value
893 	 * - FDI delay to 90h
894 	 *
895 	 * WaFDIAutoLinkSetTimingOverrride:hsw
896 	 */
897 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
898 		       FDI_RX_PWRDN_LANE1_VAL(2) |
899 		       FDI_RX_PWRDN_LANE0_VAL(2) |
900 		       FDI_RX_TP1_TO_TP2_48 |
901 		       FDI_RX_FDI_DELAY_90);
902 
903 	/* Enable the PCH Receiver FDI PLL */
904 	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
905 		     FDI_RX_PLL_ENABLE |
906 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
907 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
908 	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
909 	udelay(220);
910 
911 	/* Switch from Rawclk to PCDclk */
912 	rx_ctl_val |= FDI_PCDCLK;
913 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
914 
915 	/* Configure Port Clock Select */
916 	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
917 	intel_ddi_enable_clock(encoder, crtc_state);
918 
919 	/* Start the training iterating through available voltages and emphasis,
920 	 * testing each value twice. */
921 	for (i = 0; i < n_entries * 2; i++) {
922 		/* Configure DP_TP_CTL with auto-training */
923 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
924 			       DP_TP_CTL_FDI_AUTOTRAIN |
925 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
926 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
927 			       DP_TP_CTL_ENABLE);
928 
929 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
930 		 * DDI E does not support port reversal, the functionality is
931 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
932 		 * port reversal bit */
933 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
934 			       DDI_BUF_CTL_ENABLE |
935 			       ((crtc_state->fdi_lanes - 1) << 1) |
936 			       DDI_BUF_TRANS_SELECT(i / 2));
937 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
938 
939 		udelay(600);
940 
941 		/* Program PCH FDI Receiver TU */
942 		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
943 
944 		/* Enable PCH FDI Receiver with auto-training */
945 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
946 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
947 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
948 
949 		/* Wait for FDI receiver lane calibration */
950 		udelay(30);
951 
952 		/* Unset FDI_RX_MISC pwrdn lanes */
953 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
954 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
955 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
956 
957 		/* Wait for FDI auto training time */
958 		udelay(5);
959 
960 		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
961 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
962 			drm_dbg_kms(&dev_priv->drm,
963 				    "FDI link training done on step %d\n", i);
964 			break;
965 		}
966 
967 		/*
968 		 * Leave things enabled even if we failed to train FDI.
969 		 * Results in less fireworks from the state checker.
970 		 */
971 		if (i == n_entries * 2 - 1) {
972 			drm_err(&dev_priv->drm, "FDI link training failed!\n");
973 			break;
974 		}
975 
976 		rx_ctl_val &= ~FDI_RX_ENABLE;
977 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
978 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
979 
980 		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
981 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
982 
983 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
984 		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
985 		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
986 
987 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
988 
989 		/* Reset FDI_RX_MISC pwrdn lanes */
990 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
991 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
992 			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
993 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
994 	}
995 
996 	/* Enable normal pixel sending for FDI */
997 	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
998 		       DP_TP_CTL_FDI_AUTOTRAIN |
999 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
1000 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1001 		       DP_TP_CTL_ENABLE);
1002 }
1003 
1004 void hsw_fdi_disable(struct intel_encoder *encoder)
1005 {
1006 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1007 
1008 	/*
1009 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1010 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1011 	 * step 13 is the correct place for it. Step 18 is where it was
1012 	 * originally before the BUN.
1013 	 */
1014 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1015 	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1016 	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1017 	intel_ddi_disable_clock(encoder);
1018 	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1019 		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1020 		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1021 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1022 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1023 }
1024 
1025 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1026 {
1027 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1028 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1029 	enum pipe pipe = crtc->pipe;
1030 	i915_reg_t reg;
1031 	u32 temp;
1032 
1033 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1034 	reg = FDI_RX_CTL(pipe);
1035 	temp = intel_de_read(dev_priv, reg);
1036 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1037 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1038 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1039 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1040 
1041 	intel_de_posting_read(dev_priv, reg);
1042 	udelay(200);
1043 
1044 	/* Switch from Rawclk to PCDclk */
1045 	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1046 	intel_de_posting_read(dev_priv, reg);
1047 	udelay(200);
1048 
1049 	/* Enable CPU FDI TX PLL, always on for Ironlake */
1050 	reg = FDI_TX_CTL(pipe);
1051 	temp = intel_de_read(dev_priv, reg);
1052 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1053 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1054 
1055 		intel_de_posting_read(dev_priv, reg);
1056 		udelay(100);
1057 	}
1058 }
1059 
1060 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1061 {
1062 	struct drm_device *dev = crtc->base.dev;
1063 	struct drm_i915_private *dev_priv = to_i915(dev);
1064 	enum pipe pipe = crtc->pipe;
1065 
1066 	/* Switch from PCDclk to Rawclk */
1067 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1068 
1069 	/* Disable CPU FDI TX PLL */
1070 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1071 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1072 	udelay(100);
1073 
1074 	/* Wait for the clocks to turn off. */
1075 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1076 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1077 	udelay(100);
1078 }
1079 
1080 void ilk_fdi_disable(struct intel_crtc *crtc)
1081 {
1082 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1083 	enum pipe pipe = crtc->pipe;
1084 	i915_reg_t reg;
1085 	u32 temp;
1086 
1087 	/* disable CPU FDI tx and PCH FDI rx */
1088 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1089 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1090 
1091 	reg = FDI_RX_CTL(pipe);
1092 	temp = intel_de_read(dev_priv, reg);
1093 	temp &= ~(0x7 << 16);
1094 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1095 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1096 
1097 	intel_de_posting_read(dev_priv, reg);
1098 	udelay(100);
1099 
1100 	/* Ironlake workaround, disable clock pointer after downing FDI */
1101 	if (HAS_PCH_IBX(dev_priv))
1102 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1103 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1104 
1105 	/* still set train pattern 1 */
1106 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1107 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1108 
1109 	reg = FDI_RX_CTL(pipe);
1110 	temp = intel_de_read(dev_priv, reg);
1111 	if (HAS_PCH_CPT(dev_priv)) {
1112 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1113 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1114 	} else {
1115 		temp &= ~FDI_LINK_TRAIN_NONE;
1116 		temp |= FDI_LINK_TRAIN_PATTERN_1;
1117 	}
1118 	/* BPC in FDI rx is consistent with that in TRANSCONF */
1119 	temp &= ~(0x07 << 16);
1120 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1121 	intel_de_write(dev_priv, reg, temp);
1122 
1123 	intel_de_posting_read(dev_priv, reg);
1124 	udelay(100);
1125 }
1126 
1127 static const struct intel_fdi_funcs ilk_funcs = {
1128 	.fdi_link_train = ilk_fdi_link_train,
1129 };
1130 
1131 static const struct intel_fdi_funcs gen6_funcs = {
1132 	.fdi_link_train = gen6_fdi_link_train,
1133 };
1134 
1135 static const struct intel_fdi_funcs ivb_funcs = {
1136 	.fdi_link_train = ivb_manual_fdi_link_train,
1137 };
1138 
1139 void
1140 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1141 {
1142 	if (IS_IRONLAKE(dev_priv)) {
1143 		dev_priv->display.funcs.fdi = &ilk_funcs;
1144 	} else if (IS_SANDYBRIDGE(dev_priv)) {
1145 		dev_priv->display.funcs.fdi = &gen6_funcs;
1146 	} else if (IS_IVYBRIDGE(dev_priv)) {
1147 		/* FIXME: detect B0+ stepping and use auto training */
1148 		dev_priv->display.funcs.fdi = &ivb_funcs;
1149 	}
1150 }
1151