xref: /linux/drivers/gpu/drm/i915/display/intel_fdi.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include "i915_reg.h"
9 #include "intel_atomic.h"
10 #include "intel_crtc.h"
11 #include "intel_ddi.h"
12 #include "intel_de.h"
13 #include "intel_dp.h"
14 #include "intel_display_types.h"
15 #include "intel_fdi.h"
16 #include "intel_fdi_regs.h"
17 #include "intel_link_bw.h"
18 
19 struct intel_fdi_funcs {
20 	void (*fdi_link_train)(struct intel_crtc *crtc,
21 			       const struct intel_crtc_state *crtc_state);
22 };
23 
24 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
25 			  enum pipe pipe, bool state)
26 {
27 	bool cur_state;
28 
29 	if (HAS_DDI(dev_priv)) {
30 		/*
31 		 * DDI does not have a specific FDI_TX register.
32 		 *
33 		 * FDI is never fed from EDP transcoder
34 		 * so pipe->transcoder cast is fine here.
35 		 */
36 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
37 		cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
38 	} else {
39 		cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
40 	}
41 	I915_STATE_WARN(dev_priv, cur_state != state,
42 			"FDI TX state assertion failure (expected %s, current %s)\n",
43 			str_on_off(state), str_on_off(cur_state));
44 }
45 
46 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
47 {
48 	assert_fdi_tx(i915, pipe, true);
49 }
50 
51 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
52 {
53 	assert_fdi_tx(i915, pipe, false);
54 }
55 
56 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
57 			  enum pipe pipe, bool state)
58 {
59 	bool cur_state;
60 
61 	cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
62 	I915_STATE_WARN(dev_priv, cur_state != state,
63 			"FDI RX state assertion failure (expected %s, current %s)\n",
64 			str_on_off(state), str_on_off(cur_state));
65 }
66 
67 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
68 {
69 	assert_fdi_rx(i915, pipe, true);
70 }
71 
72 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
73 {
74 	assert_fdi_rx(i915, pipe, false);
75 }
76 
77 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
78 			       enum pipe pipe)
79 {
80 	bool cur_state;
81 
82 	/* ILK FDI PLL is always enabled */
83 	if (IS_IRONLAKE(i915))
84 		return;
85 
86 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
87 	if (HAS_DDI(i915))
88 		return;
89 
90 	cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
91 	I915_STATE_WARN(i915, !cur_state,
92 			"FDI TX PLL assertion failure, should be active but is disabled\n");
93 }
94 
95 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
96 			      enum pipe pipe, bool state)
97 {
98 	bool cur_state;
99 
100 	cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
101 	I915_STATE_WARN(i915, cur_state != state,
102 			"FDI RX PLL assertion failure (expected %s, current %s)\n",
103 			str_on_off(state), str_on_off(cur_state));
104 }
105 
106 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
107 {
108 	assert_fdi_rx_pll(i915, pipe, true);
109 }
110 
111 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
112 {
113 	assert_fdi_rx_pll(i915, pipe, false);
114 }
115 
116 void intel_fdi_link_train(struct intel_crtc *crtc,
117 			  const struct intel_crtc_state *crtc_state)
118 {
119 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
120 
121 	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
122 }
123 
124 /**
125  * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
126  * @state: intel atomic state
127  *
128  * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
129  * known to affect the available FDI BW for the former CRTC. In practice this
130  * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
131  * CRTC C) and CRTC C is getting disabled.
132  *
133  * Returns 0 in case of success, or a negative error code otherwise.
134  */
135 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
136 {
137 	struct drm_i915_private *i915 = to_i915(state->base.dev);
138 	const struct intel_crtc_state *old_crtc_state;
139 	const struct intel_crtc_state *new_crtc_state;
140 	struct intel_crtc *crtc;
141 
142 	if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
143 		return 0;
144 
145 	crtc = intel_crtc_for_pipe(i915, PIPE_C);
146 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
147 	if (!new_crtc_state)
148 		return 0;
149 
150 	if (!intel_crtc_needs_modeset(new_crtc_state))
151 		return 0;
152 
153 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
154 	if (!old_crtc_state->fdi_lanes)
155 		return 0;
156 
157 	crtc = intel_crtc_for_pipe(i915, PIPE_B);
158 	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
159 	if (IS_ERR(new_crtc_state))
160 		return PTR_ERR(new_crtc_state);
161 
162 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
163 	if (!old_crtc_state->fdi_lanes)
164 		return 0;
165 
166 	return intel_modeset_pipes_in_mask_early(state,
167 						 "FDI link BW decrease on pipe C",
168 						 BIT(PIPE_B));
169 }
170 
171 /* units of 100MHz */
172 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
173 {
174 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
175 		return crtc_state->fdi_lanes;
176 
177 	return 0;
178 }
179 
180 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
181 			       struct intel_crtc_state *pipe_config,
182 			       enum pipe *pipe_to_reduce)
183 {
184 	struct drm_i915_private *dev_priv = to_i915(dev);
185 	struct drm_atomic_state *state = pipe_config->uapi.state;
186 	struct intel_crtc *other_crtc;
187 	struct intel_crtc_state *other_crtc_state;
188 
189 	*pipe_to_reduce = pipe;
190 
191 	drm_dbg_kms(&dev_priv->drm,
192 		    "checking fdi config on pipe %c, lanes %i\n",
193 		    pipe_name(pipe), pipe_config->fdi_lanes);
194 	if (pipe_config->fdi_lanes > 4) {
195 		drm_dbg_kms(&dev_priv->drm,
196 			    "invalid fdi lane config on pipe %c: %i lanes\n",
197 			    pipe_name(pipe), pipe_config->fdi_lanes);
198 		return -EINVAL;
199 	}
200 
201 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
202 		if (pipe_config->fdi_lanes > 2) {
203 			drm_dbg_kms(&dev_priv->drm,
204 				    "only 2 lanes on haswell, required: %i lanes\n",
205 				    pipe_config->fdi_lanes);
206 			return -EINVAL;
207 		} else {
208 			return 0;
209 		}
210 	}
211 
212 	if (INTEL_NUM_PIPES(dev_priv) == 2)
213 		return 0;
214 
215 	/* Ivybridge 3 pipe is really complicated */
216 	switch (pipe) {
217 	case PIPE_A:
218 		return 0;
219 	case PIPE_B:
220 		if (pipe_config->fdi_lanes <= 2)
221 			return 0;
222 
223 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
224 		other_crtc_state =
225 			intel_atomic_get_crtc_state(state, other_crtc);
226 		if (IS_ERR(other_crtc_state))
227 			return PTR_ERR(other_crtc_state);
228 
229 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
230 			drm_dbg_kms(&dev_priv->drm,
231 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
232 				    pipe_name(pipe), pipe_config->fdi_lanes);
233 			return -EINVAL;
234 		}
235 		return 0;
236 	case PIPE_C:
237 		if (pipe_config->fdi_lanes > 2) {
238 			drm_dbg_kms(&dev_priv->drm,
239 				    "only 2 lanes on pipe %c: required %i lanes\n",
240 				    pipe_name(pipe), pipe_config->fdi_lanes);
241 			return -EINVAL;
242 		}
243 
244 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
245 		other_crtc_state =
246 			intel_atomic_get_crtc_state(state, other_crtc);
247 		if (IS_ERR(other_crtc_state))
248 			return PTR_ERR(other_crtc_state);
249 
250 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
251 			drm_dbg_kms(&dev_priv->drm,
252 				    "fdi link B uses too many lanes to enable link C\n");
253 
254 			*pipe_to_reduce = PIPE_B;
255 
256 			return -EINVAL;
257 		}
258 		return 0;
259 	default:
260 		MISSING_CASE(pipe);
261 		return 0;
262 	}
263 }
264 
265 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
266 {
267 	if (IS_IRONLAKE(i915)) {
268 		u32 fdi_pll_clk =
269 			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
270 
271 		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
272 	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
273 		i915->display.fdi.pll_freq = 270000;
274 	} else {
275 		return;
276 	}
277 
278 	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
279 }
280 
281 int intel_fdi_link_freq(struct drm_i915_private *i915,
282 			const struct intel_crtc_state *pipe_config)
283 {
284 	if (HAS_DDI(i915))
285 		return pipe_config->port_clock; /* SPLL */
286 	else
287 		return i915->display.fdi.pll_freq;
288 }
289 
290 /**
291  * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
292  * @crtc_state: the crtc state
293  *
294  * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
295  * call this function during state computation in the simple case where the
296  * link bpp will always match the pipe bpp. This is the case for all non-DP
297  * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
298  * of DSC compression.
299  *
300  * Returns %true in case of success, %false if pipe bpp would need to be
301  * reduced below its valid range.
302  */
303 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
304 {
305 	int pipe_bpp = min(crtc_state->pipe_bpp,
306 			   to_bpp_int(crtc_state->max_link_bpp_x16));
307 
308 	pipe_bpp = rounddown(pipe_bpp, 2 * 3);
309 
310 	if (pipe_bpp < 6 * 3)
311 		return false;
312 
313 	crtc_state->pipe_bpp = pipe_bpp;
314 
315 	return true;
316 }
317 
318 int ilk_fdi_compute_config(struct intel_crtc *crtc,
319 			   struct intel_crtc_state *pipe_config)
320 {
321 	struct drm_device *dev = crtc->base.dev;
322 	struct drm_i915_private *i915 = to_i915(dev);
323 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
324 	int lane, link_bw, fdi_dotclock;
325 
326 	/* FDI is a binary signal running at ~2.7GHz, encoding
327 	 * each output octet as 10 bits. The actual frequency
328 	 * is stored as a divider into a 100MHz clock, and the
329 	 * mode pixel clock is stored in units of 1KHz.
330 	 * Hence the bw of each lane in terms of the mode signal
331 	 * is:
332 	 */
333 	link_bw = intel_fdi_link_freq(i915, pipe_config);
334 
335 	fdi_dotclock = adjusted_mode->crtc_clock;
336 
337 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
338 				      pipe_config->pipe_bpp);
339 
340 	pipe_config->fdi_lanes = lane;
341 
342 	intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
343 			       lane, fdi_dotclock,
344 			       link_bw,
345 			       intel_dp_bw_fec_overhead(false),
346 			       &pipe_config->fdi_m_n);
347 
348 	return 0;
349 }
350 
351 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
352 				     struct intel_crtc *crtc,
353 				     struct intel_crtc_state *pipe_config,
354 				     struct intel_link_bw_limits *limits)
355 {
356 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
357 	enum pipe pipe_to_reduce;
358 	int ret;
359 
360 	ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
361 				  &pipe_to_reduce);
362 	if (ret != -EINVAL)
363 		return ret;
364 
365 	ret = intel_link_bw_reduce_bpp(state, limits,
366 				       BIT(pipe_to_reduce),
367 				       "FDI link BW");
368 
369 	return ret ? : -EAGAIN;
370 }
371 
372 /**
373  * intel_fdi_atomic_check_link - check all modeset FDI link configuration
374  * @state: intel atomic state
375  * @limits: link BW limits
376  *
377  * Check the link configuration for all modeset FDI outputs. If the
378  * configuration is invalid @limits will be updated if possible to
379  * reduce the total BW, after which the configuration for all CRTCs in
380  * @state must be recomputed with the updated @limits.
381  *
382  * Returns:
383  *   - 0 if the confugration is valid
384  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
385  *     with fallback values with which the configuration of all CRTCs
386  *     in @state must be recomputed
387  *   - Other negative error, if the configuration is invalid without a
388  *     fallback possibility, or the check failed for another reason
389  */
390 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
391 				struct intel_link_bw_limits *limits)
392 {
393 	struct intel_crtc *crtc;
394 	struct intel_crtc_state *crtc_state;
395 	int i;
396 
397 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
398 		int ret;
399 
400 		if (!crtc_state->has_pch_encoder ||
401 		    !intel_crtc_needs_modeset(crtc_state) ||
402 		    !crtc_state->hw.enable)
403 			continue;
404 
405 		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
406 		if (ret)
407 			return ret;
408 	}
409 
410 	return 0;
411 }
412 
413 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
414 {
415 	u32 temp;
416 
417 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
418 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
419 		return;
420 
421 	drm_WARN_ON(&dev_priv->drm,
422 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
423 		    FDI_RX_ENABLE);
424 	drm_WARN_ON(&dev_priv->drm,
425 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
426 		    FDI_RX_ENABLE);
427 
428 	temp &= ~FDI_BC_BIFURCATION_SELECT;
429 	if (enable)
430 		temp |= FDI_BC_BIFURCATION_SELECT;
431 
432 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
433 		    enable ? "en" : "dis");
434 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
435 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
436 }
437 
438 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
439 {
440 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
441 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
442 
443 	switch (crtc->pipe) {
444 	case PIPE_A:
445 		break;
446 	case PIPE_B:
447 		if (crtc_state->fdi_lanes > 2)
448 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
449 		else
450 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
451 
452 		break;
453 	case PIPE_C:
454 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
455 
456 		break;
457 	default:
458 		MISSING_CASE(crtc->pipe);
459 	}
460 }
461 
462 void intel_fdi_normal_train(struct intel_crtc *crtc)
463 {
464 	struct drm_device *dev = crtc->base.dev;
465 	struct drm_i915_private *dev_priv = to_i915(dev);
466 	enum pipe pipe = crtc->pipe;
467 	i915_reg_t reg;
468 	u32 temp;
469 
470 	/* enable normal train */
471 	reg = FDI_TX_CTL(pipe);
472 	temp = intel_de_read(dev_priv, reg);
473 	if (IS_IVYBRIDGE(dev_priv)) {
474 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
475 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
476 	} else {
477 		temp &= ~FDI_LINK_TRAIN_NONE;
478 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
479 	}
480 	intel_de_write(dev_priv, reg, temp);
481 
482 	reg = FDI_RX_CTL(pipe);
483 	temp = intel_de_read(dev_priv, reg);
484 	if (HAS_PCH_CPT(dev_priv)) {
485 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
486 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
487 	} else {
488 		temp &= ~FDI_LINK_TRAIN_NONE;
489 		temp |= FDI_LINK_TRAIN_NONE;
490 	}
491 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
492 
493 	/* wait one idle pattern time */
494 	intel_de_posting_read(dev_priv, reg);
495 	udelay(1000);
496 
497 	/* IVB wants error correction enabled */
498 	if (IS_IVYBRIDGE(dev_priv))
499 		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
500 }
501 
502 /* The FDI link training functions for ILK/Ibexpeak. */
503 static void ilk_fdi_link_train(struct intel_crtc *crtc,
504 			       const struct intel_crtc_state *crtc_state)
505 {
506 	struct drm_device *dev = crtc->base.dev;
507 	struct drm_i915_private *dev_priv = to_i915(dev);
508 	enum pipe pipe = crtc->pipe;
509 	i915_reg_t reg;
510 	u32 temp, tries;
511 
512 	/*
513 	 * Write the TU size bits before fdi link training, so that error
514 	 * detection works.
515 	 */
516 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
517 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
518 
519 	/* FDI needs bits from pipe first */
520 	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
521 
522 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
523 	   for train result */
524 	reg = FDI_RX_IMR(pipe);
525 	temp = intel_de_read(dev_priv, reg);
526 	temp &= ~FDI_RX_SYMBOL_LOCK;
527 	temp &= ~FDI_RX_BIT_LOCK;
528 	intel_de_write(dev_priv, reg, temp);
529 	intel_de_read(dev_priv, reg);
530 	udelay(150);
531 
532 	/* enable CPU FDI TX and PCH FDI RX */
533 	reg = FDI_TX_CTL(pipe);
534 	temp = intel_de_read(dev_priv, reg);
535 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
536 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
537 	temp &= ~FDI_LINK_TRAIN_NONE;
538 	temp |= FDI_LINK_TRAIN_PATTERN_1;
539 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
540 
541 	reg = FDI_RX_CTL(pipe);
542 	temp = intel_de_read(dev_priv, reg);
543 	temp &= ~FDI_LINK_TRAIN_NONE;
544 	temp |= FDI_LINK_TRAIN_PATTERN_1;
545 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
546 
547 	intel_de_posting_read(dev_priv, reg);
548 	udelay(150);
549 
550 	/* Ironlake workaround, enable clock pointer after FDI enable*/
551 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
552 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
553 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
554 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
555 
556 	reg = FDI_RX_IIR(pipe);
557 	for (tries = 0; tries < 5; tries++) {
558 		temp = intel_de_read(dev_priv, reg);
559 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
560 
561 		if ((temp & FDI_RX_BIT_LOCK)) {
562 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
563 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
564 			break;
565 		}
566 	}
567 	if (tries == 5)
568 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
569 
570 	/* Train 2 */
571 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
572 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
573 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
574 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
575 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
576 	udelay(150);
577 
578 	reg = FDI_RX_IIR(pipe);
579 	for (tries = 0; tries < 5; tries++) {
580 		temp = intel_de_read(dev_priv, reg);
581 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
582 
583 		if (temp & FDI_RX_SYMBOL_LOCK) {
584 			intel_de_write(dev_priv, reg,
585 				       temp | FDI_RX_SYMBOL_LOCK);
586 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
587 			break;
588 		}
589 	}
590 	if (tries == 5)
591 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
592 
593 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
594 
595 }
596 
597 static const int snb_b_fdi_train_param[] = {
598 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
599 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
600 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
601 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
602 };
603 
604 /* The FDI link training functions for SNB/Cougarpoint. */
605 static void gen6_fdi_link_train(struct intel_crtc *crtc,
606 				const struct intel_crtc_state *crtc_state)
607 {
608 	struct drm_device *dev = crtc->base.dev;
609 	struct drm_i915_private *dev_priv = to_i915(dev);
610 	enum pipe pipe = crtc->pipe;
611 	i915_reg_t reg;
612 	u32 temp, i, retry;
613 
614 	/*
615 	 * Write the TU size bits before fdi link training, so that error
616 	 * detection works.
617 	 */
618 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
619 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
620 
621 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
622 	   for train result */
623 	reg = FDI_RX_IMR(pipe);
624 	temp = intel_de_read(dev_priv, reg);
625 	temp &= ~FDI_RX_SYMBOL_LOCK;
626 	temp &= ~FDI_RX_BIT_LOCK;
627 	intel_de_write(dev_priv, reg, temp);
628 
629 	intel_de_posting_read(dev_priv, reg);
630 	udelay(150);
631 
632 	/* enable CPU FDI TX and PCH FDI RX */
633 	reg = FDI_TX_CTL(pipe);
634 	temp = intel_de_read(dev_priv, reg);
635 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
636 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
637 	temp &= ~FDI_LINK_TRAIN_NONE;
638 	temp |= FDI_LINK_TRAIN_PATTERN_1;
639 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
640 	/* SNB-B */
641 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
642 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
643 
644 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
645 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
646 
647 	reg = FDI_RX_CTL(pipe);
648 	temp = intel_de_read(dev_priv, reg);
649 	if (HAS_PCH_CPT(dev_priv)) {
650 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
651 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
652 	} else {
653 		temp &= ~FDI_LINK_TRAIN_NONE;
654 		temp |= FDI_LINK_TRAIN_PATTERN_1;
655 	}
656 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
657 
658 	intel_de_posting_read(dev_priv, reg);
659 	udelay(150);
660 
661 	for (i = 0; i < 4; i++) {
662 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
663 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
664 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
665 		udelay(500);
666 
667 		for (retry = 0; retry < 5; retry++) {
668 			reg = FDI_RX_IIR(pipe);
669 			temp = intel_de_read(dev_priv, reg);
670 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
671 			if (temp & FDI_RX_BIT_LOCK) {
672 				intel_de_write(dev_priv, reg,
673 					       temp | FDI_RX_BIT_LOCK);
674 				drm_dbg_kms(&dev_priv->drm,
675 					    "FDI train 1 done.\n");
676 				break;
677 			}
678 			udelay(50);
679 		}
680 		if (retry < 5)
681 			break;
682 	}
683 	if (i == 4)
684 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
685 
686 	/* Train 2 */
687 	reg = FDI_TX_CTL(pipe);
688 	temp = intel_de_read(dev_priv, reg);
689 	temp &= ~FDI_LINK_TRAIN_NONE;
690 	temp |= FDI_LINK_TRAIN_PATTERN_2;
691 	if (IS_SANDYBRIDGE(dev_priv)) {
692 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
693 		/* SNB-B */
694 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
695 	}
696 	intel_de_write(dev_priv, reg, temp);
697 
698 	reg = FDI_RX_CTL(pipe);
699 	temp = intel_de_read(dev_priv, reg);
700 	if (HAS_PCH_CPT(dev_priv)) {
701 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
702 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
703 	} else {
704 		temp &= ~FDI_LINK_TRAIN_NONE;
705 		temp |= FDI_LINK_TRAIN_PATTERN_2;
706 	}
707 	intel_de_write(dev_priv, reg, temp);
708 
709 	intel_de_posting_read(dev_priv, reg);
710 	udelay(150);
711 
712 	for (i = 0; i < 4; i++) {
713 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
714 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
715 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
716 		udelay(500);
717 
718 		for (retry = 0; retry < 5; retry++) {
719 			reg = FDI_RX_IIR(pipe);
720 			temp = intel_de_read(dev_priv, reg);
721 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
722 			if (temp & FDI_RX_SYMBOL_LOCK) {
723 				intel_de_write(dev_priv, reg,
724 					       temp | FDI_RX_SYMBOL_LOCK);
725 				drm_dbg_kms(&dev_priv->drm,
726 					    "FDI train 2 done.\n");
727 				break;
728 			}
729 			udelay(50);
730 		}
731 		if (retry < 5)
732 			break;
733 	}
734 	if (i == 4)
735 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
736 
737 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
738 }
739 
740 /* Manual link training for Ivy Bridge A0 parts */
741 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
742 				      const struct intel_crtc_state *crtc_state)
743 {
744 	struct drm_device *dev = crtc->base.dev;
745 	struct drm_i915_private *dev_priv = to_i915(dev);
746 	enum pipe pipe = crtc->pipe;
747 	i915_reg_t reg;
748 	u32 temp, i, j;
749 
750 	ivb_update_fdi_bc_bifurcation(crtc_state);
751 
752 	/*
753 	 * Write the TU size bits before fdi link training, so that error
754 	 * detection works.
755 	 */
756 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
757 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
758 
759 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
760 	   for train result */
761 	reg = FDI_RX_IMR(pipe);
762 	temp = intel_de_read(dev_priv, reg);
763 	temp &= ~FDI_RX_SYMBOL_LOCK;
764 	temp &= ~FDI_RX_BIT_LOCK;
765 	intel_de_write(dev_priv, reg, temp);
766 
767 	intel_de_posting_read(dev_priv, reg);
768 	udelay(150);
769 
770 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
771 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
772 
773 	/* Try each vswing and preemphasis setting twice before moving on */
774 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
775 		/* disable first in case we need to retry */
776 		reg = FDI_TX_CTL(pipe);
777 		temp = intel_de_read(dev_priv, reg);
778 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
779 		temp &= ~FDI_TX_ENABLE;
780 		intel_de_write(dev_priv, reg, temp);
781 
782 		reg = FDI_RX_CTL(pipe);
783 		temp = intel_de_read(dev_priv, reg);
784 		temp &= ~FDI_LINK_TRAIN_AUTO;
785 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
786 		temp &= ~FDI_RX_ENABLE;
787 		intel_de_write(dev_priv, reg, temp);
788 
789 		/* enable CPU FDI TX and PCH FDI RX */
790 		reg = FDI_TX_CTL(pipe);
791 		temp = intel_de_read(dev_priv, reg);
792 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
793 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
794 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
795 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
796 		temp |= snb_b_fdi_train_param[j/2];
797 		temp |= FDI_COMPOSITE_SYNC;
798 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
799 
800 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
801 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
802 
803 		reg = FDI_RX_CTL(pipe);
804 		temp = intel_de_read(dev_priv, reg);
805 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
806 		temp |= FDI_COMPOSITE_SYNC;
807 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
808 
809 		intel_de_posting_read(dev_priv, reg);
810 		udelay(1); /* should be 0.5us */
811 
812 		for (i = 0; i < 4; i++) {
813 			reg = FDI_RX_IIR(pipe);
814 			temp = intel_de_read(dev_priv, reg);
815 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
816 
817 			if (temp & FDI_RX_BIT_LOCK ||
818 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
819 				intel_de_write(dev_priv, reg,
820 					       temp | FDI_RX_BIT_LOCK);
821 				drm_dbg_kms(&dev_priv->drm,
822 					    "FDI train 1 done, level %i.\n",
823 					    i);
824 				break;
825 			}
826 			udelay(1); /* should be 0.5us */
827 		}
828 		if (i == 4) {
829 			drm_dbg_kms(&dev_priv->drm,
830 				    "FDI train 1 fail on vswing %d\n", j / 2);
831 			continue;
832 		}
833 
834 		/* Train 2 */
835 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
836 			     FDI_LINK_TRAIN_NONE_IVB,
837 			     FDI_LINK_TRAIN_PATTERN_2_IVB);
838 		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
839 			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
840 			     FDI_LINK_TRAIN_PATTERN_2_CPT);
841 		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
842 		udelay(2); /* should be 1.5us */
843 
844 		for (i = 0; i < 4; i++) {
845 			reg = FDI_RX_IIR(pipe);
846 			temp = intel_de_read(dev_priv, reg);
847 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
848 
849 			if (temp & FDI_RX_SYMBOL_LOCK ||
850 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
851 				intel_de_write(dev_priv, reg,
852 					       temp | FDI_RX_SYMBOL_LOCK);
853 				drm_dbg_kms(&dev_priv->drm,
854 					    "FDI train 2 done, level %i.\n",
855 					    i);
856 				goto train_done;
857 			}
858 			udelay(2); /* should be 1.5us */
859 		}
860 		if (i == 4)
861 			drm_dbg_kms(&dev_priv->drm,
862 				    "FDI train 2 fail on vswing %d\n", j / 2);
863 	}
864 
865 train_done:
866 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
867 }
868 
869 /* Starting with Haswell, different DDI ports can work in FDI mode for
870  * connection to the PCH-located connectors. For this, it is necessary to train
871  * both the DDI port and PCH receiver for the desired DDI buffer settings.
872  *
873  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
874  * please note that when FDI mode is active on DDI E, it shares 2 lines with
875  * DDI A (which is used for eDP)
876  */
877 void hsw_fdi_link_train(struct intel_encoder *encoder,
878 			const struct intel_crtc_state *crtc_state)
879 {
880 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
881 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
882 	u32 temp, i, rx_ctl_val;
883 	int n_entries;
884 
885 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
886 
887 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
888 
889 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
890 	 * mode set "sequence for CRT port" document:
891 	 * - TP1 to TP2 time with the default value
892 	 * - FDI delay to 90h
893 	 *
894 	 * WaFDIAutoLinkSetTimingOverrride:hsw
895 	 */
896 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
897 		       FDI_RX_PWRDN_LANE1_VAL(2) |
898 		       FDI_RX_PWRDN_LANE0_VAL(2) |
899 		       FDI_RX_TP1_TO_TP2_48 |
900 		       FDI_RX_FDI_DELAY_90);
901 
902 	/* Enable the PCH Receiver FDI PLL */
903 	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
904 		     FDI_RX_PLL_ENABLE |
905 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
906 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
907 	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
908 	udelay(220);
909 
910 	/* Switch from Rawclk to PCDclk */
911 	rx_ctl_val |= FDI_PCDCLK;
912 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
913 
914 	/* Configure Port Clock Select */
915 	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
916 	intel_ddi_enable_clock(encoder, crtc_state);
917 
918 	/* Start the training iterating through available voltages and emphasis,
919 	 * testing each value twice. */
920 	for (i = 0; i < n_entries * 2; i++) {
921 		/* Configure DP_TP_CTL with auto-training */
922 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
923 			       DP_TP_CTL_FDI_AUTOTRAIN |
924 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
925 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
926 			       DP_TP_CTL_ENABLE);
927 
928 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
929 		 * DDI E does not support port reversal, the functionality is
930 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
931 		 * port reversal bit */
932 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
933 			       DDI_BUF_CTL_ENABLE |
934 			       ((crtc_state->fdi_lanes - 1) << 1) |
935 			       DDI_BUF_TRANS_SELECT(i / 2));
936 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
937 
938 		udelay(600);
939 
940 		/* Program PCH FDI Receiver TU */
941 		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
942 
943 		/* Enable PCH FDI Receiver with auto-training */
944 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
945 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
946 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
947 
948 		/* Wait for FDI receiver lane calibration */
949 		udelay(30);
950 
951 		/* Unset FDI_RX_MISC pwrdn lanes */
952 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
953 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
954 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
955 
956 		/* Wait for FDI auto training time */
957 		udelay(5);
958 
959 		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
960 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
961 			drm_dbg_kms(&dev_priv->drm,
962 				    "FDI link training done on step %d\n", i);
963 			break;
964 		}
965 
966 		/*
967 		 * Leave things enabled even if we failed to train FDI.
968 		 * Results in less fireworks from the state checker.
969 		 */
970 		if (i == n_entries * 2 - 1) {
971 			drm_err(&dev_priv->drm, "FDI link training failed!\n");
972 			break;
973 		}
974 
975 		rx_ctl_val &= ~FDI_RX_ENABLE;
976 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
977 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
978 
979 		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
980 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
981 
982 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
983 		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
984 		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
985 
986 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
987 
988 		/* Reset FDI_RX_MISC pwrdn lanes */
989 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
990 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
991 			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
992 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
993 	}
994 
995 	/* Enable normal pixel sending for FDI */
996 	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
997 		       DP_TP_CTL_FDI_AUTOTRAIN |
998 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
999 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1000 		       DP_TP_CTL_ENABLE);
1001 }
1002 
1003 void hsw_fdi_disable(struct intel_encoder *encoder)
1004 {
1005 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1006 
1007 	/*
1008 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1009 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1010 	 * step 13 is the correct place for it. Step 18 is where it was
1011 	 * originally before the BUN.
1012 	 */
1013 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1014 	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1015 	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1016 	intel_ddi_disable_clock(encoder);
1017 	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1018 		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1019 		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1020 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1021 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1022 }
1023 
1024 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1025 {
1026 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1027 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1028 	enum pipe pipe = crtc->pipe;
1029 	i915_reg_t reg;
1030 	u32 temp;
1031 
1032 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1033 	reg = FDI_RX_CTL(pipe);
1034 	temp = intel_de_read(dev_priv, reg);
1035 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1036 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1037 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1038 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1039 
1040 	intel_de_posting_read(dev_priv, reg);
1041 	udelay(200);
1042 
1043 	/* Switch from Rawclk to PCDclk */
1044 	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1045 	intel_de_posting_read(dev_priv, reg);
1046 	udelay(200);
1047 
1048 	/* Enable CPU FDI TX PLL, always on for Ironlake */
1049 	reg = FDI_TX_CTL(pipe);
1050 	temp = intel_de_read(dev_priv, reg);
1051 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1052 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1053 
1054 		intel_de_posting_read(dev_priv, reg);
1055 		udelay(100);
1056 	}
1057 }
1058 
1059 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1060 {
1061 	struct drm_device *dev = crtc->base.dev;
1062 	struct drm_i915_private *dev_priv = to_i915(dev);
1063 	enum pipe pipe = crtc->pipe;
1064 
1065 	/* Switch from PCDclk to Rawclk */
1066 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1067 
1068 	/* Disable CPU FDI TX PLL */
1069 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1070 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1071 	udelay(100);
1072 
1073 	/* Wait for the clocks to turn off. */
1074 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1075 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1076 	udelay(100);
1077 }
1078 
1079 void ilk_fdi_disable(struct intel_crtc *crtc)
1080 {
1081 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1082 	enum pipe pipe = crtc->pipe;
1083 	i915_reg_t reg;
1084 	u32 temp;
1085 
1086 	/* disable CPU FDI tx and PCH FDI rx */
1087 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1088 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1089 
1090 	reg = FDI_RX_CTL(pipe);
1091 	temp = intel_de_read(dev_priv, reg);
1092 	temp &= ~(0x7 << 16);
1093 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1094 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1095 
1096 	intel_de_posting_read(dev_priv, reg);
1097 	udelay(100);
1098 
1099 	/* Ironlake workaround, disable clock pointer after downing FDI */
1100 	if (HAS_PCH_IBX(dev_priv))
1101 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1102 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1103 
1104 	/* still set train pattern 1 */
1105 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1106 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1107 
1108 	reg = FDI_RX_CTL(pipe);
1109 	temp = intel_de_read(dev_priv, reg);
1110 	if (HAS_PCH_CPT(dev_priv)) {
1111 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1112 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1113 	} else {
1114 		temp &= ~FDI_LINK_TRAIN_NONE;
1115 		temp |= FDI_LINK_TRAIN_PATTERN_1;
1116 	}
1117 	/* BPC in FDI rx is consistent with that in TRANSCONF */
1118 	temp &= ~(0x07 << 16);
1119 	temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11;
1120 	intel_de_write(dev_priv, reg, temp);
1121 
1122 	intel_de_posting_read(dev_priv, reg);
1123 	udelay(100);
1124 }
1125 
1126 static const struct intel_fdi_funcs ilk_funcs = {
1127 	.fdi_link_train = ilk_fdi_link_train,
1128 };
1129 
1130 static const struct intel_fdi_funcs gen6_funcs = {
1131 	.fdi_link_train = gen6_fdi_link_train,
1132 };
1133 
1134 static const struct intel_fdi_funcs ivb_funcs = {
1135 	.fdi_link_train = ivb_manual_fdi_link_train,
1136 };
1137 
1138 void
1139 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1140 {
1141 	if (IS_IRONLAKE(dev_priv)) {
1142 		dev_priv->display.funcs.fdi = &ilk_funcs;
1143 	} else if (IS_SANDYBRIDGE(dev_priv)) {
1144 		dev_priv->display.funcs.fdi = &gen6_funcs;
1145 	} else if (IS_IVYBRIDGE(dev_priv)) {
1146 		/* FIXME: detect B0+ stepping and use auto training */
1147 		dev_priv->display.funcs.fdi = &ivb_funcs;
1148 	}
1149 }
1150