xref: /linux/drivers/gpu/drm/i915/display/intel_fdi.c (revision bcfe43f0ea77c42c2154fb79b99b7d1d82ac3231)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include <drm/drm_fixed.h>
9 
10 #include "i915_reg.h"
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_ddi.h"
14 #include "intel_de.h"
15 #include "intel_dp.h"
16 #include "intel_display_types.h"
17 #include "intel_fdi.h"
18 #include "intel_fdi_regs.h"
19 #include "intel_link_bw.h"
20 
21 struct intel_fdi_funcs {
22 	void (*fdi_link_train)(struct intel_crtc *crtc,
23 			       const struct intel_crtc_state *crtc_state);
24 };
25 
26 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
27 			  enum pipe pipe, bool state)
28 {
29 	bool cur_state;
30 
31 	if (HAS_DDI(dev_priv)) {
32 		/*
33 		 * DDI does not have a specific FDI_TX register.
34 		 *
35 		 * FDI is never fed from EDP transcoder
36 		 * so pipe->transcoder cast is fine here.
37 		 */
38 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
39 		cur_state = intel_de_read(dev_priv,
40 					  TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
41 	} else {
42 		cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
43 	}
44 	I915_STATE_WARN(dev_priv, cur_state != state,
45 			"FDI TX state assertion failure (expected %s, current %s)\n",
46 			str_on_off(state), str_on_off(cur_state));
47 }
48 
49 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
50 {
51 	assert_fdi_tx(i915, pipe, true);
52 }
53 
54 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
55 {
56 	assert_fdi_tx(i915, pipe, false);
57 }
58 
59 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
60 			  enum pipe pipe, bool state)
61 {
62 	bool cur_state;
63 
64 	cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
65 	I915_STATE_WARN(dev_priv, cur_state != state,
66 			"FDI RX state assertion failure (expected %s, current %s)\n",
67 			str_on_off(state), str_on_off(cur_state));
68 }
69 
70 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
71 {
72 	assert_fdi_rx(i915, pipe, true);
73 }
74 
75 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
76 {
77 	assert_fdi_rx(i915, pipe, false);
78 }
79 
80 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
81 			       enum pipe pipe)
82 {
83 	bool cur_state;
84 
85 	/* ILK FDI PLL is always enabled */
86 	if (IS_IRONLAKE(i915))
87 		return;
88 
89 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
90 	if (HAS_DDI(i915))
91 		return;
92 
93 	cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
94 	I915_STATE_WARN(i915, !cur_state,
95 			"FDI TX PLL assertion failure, should be active but is disabled\n");
96 }
97 
98 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
99 			      enum pipe pipe, bool state)
100 {
101 	bool cur_state;
102 
103 	cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
104 	I915_STATE_WARN(i915, cur_state != state,
105 			"FDI RX PLL assertion failure (expected %s, current %s)\n",
106 			str_on_off(state), str_on_off(cur_state));
107 }
108 
109 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
110 {
111 	assert_fdi_rx_pll(i915, pipe, true);
112 }
113 
114 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
115 {
116 	assert_fdi_rx_pll(i915, pipe, false);
117 }
118 
119 void intel_fdi_link_train(struct intel_crtc *crtc,
120 			  const struct intel_crtc_state *crtc_state)
121 {
122 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
123 
124 	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
125 }
126 
127 /**
128  * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
129  * @state: intel atomic state
130  *
131  * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
132  * known to affect the available FDI BW for the former CRTC. In practice this
133  * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
134  * CRTC C) and CRTC C is getting disabled.
135  *
136  * Returns 0 in case of success, or a negative error code otherwise.
137  */
138 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
139 {
140 	struct intel_display *display = to_intel_display(state);
141 	struct drm_i915_private *i915 = to_i915(state->base.dev);
142 	const struct intel_crtc_state *old_crtc_state;
143 	const struct intel_crtc_state *new_crtc_state;
144 	struct intel_crtc *crtc;
145 
146 	if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
147 		return 0;
148 
149 	crtc = intel_crtc_for_pipe(display, PIPE_C);
150 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
151 	if (!new_crtc_state)
152 		return 0;
153 
154 	if (!intel_crtc_needs_modeset(new_crtc_state))
155 		return 0;
156 
157 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
158 	if (!old_crtc_state->fdi_lanes)
159 		return 0;
160 
161 	crtc = intel_crtc_for_pipe(display, PIPE_B);
162 	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
163 	if (IS_ERR(new_crtc_state))
164 		return PTR_ERR(new_crtc_state);
165 
166 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
167 	if (!old_crtc_state->fdi_lanes)
168 		return 0;
169 
170 	return intel_modeset_pipes_in_mask_early(state,
171 						 "FDI link BW decrease on pipe C",
172 						 BIT(PIPE_B));
173 }
174 
175 /* units of 100MHz */
176 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
177 {
178 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
179 		return crtc_state->fdi_lanes;
180 
181 	return 0;
182 }
183 
184 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
185 			       struct intel_crtc_state *pipe_config,
186 			       enum pipe *pipe_to_reduce)
187 {
188 	struct intel_display *display = to_intel_display(dev);
189 	struct drm_i915_private *dev_priv = to_i915(dev);
190 	struct drm_atomic_state *state = pipe_config->uapi.state;
191 	struct intel_crtc *other_crtc;
192 	struct intel_crtc_state *other_crtc_state;
193 
194 	*pipe_to_reduce = pipe;
195 
196 	drm_dbg_kms(&dev_priv->drm,
197 		    "checking fdi config on pipe %c, lanes %i\n",
198 		    pipe_name(pipe), pipe_config->fdi_lanes);
199 	if (pipe_config->fdi_lanes > 4) {
200 		drm_dbg_kms(&dev_priv->drm,
201 			    "invalid fdi lane config on pipe %c: %i lanes\n",
202 			    pipe_name(pipe), pipe_config->fdi_lanes);
203 		return -EINVAL;
204 	}
205 
206 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
207 		if (pipe_config->fdi_lanes > 2) {
208 			drm_dbg_kms(&dev_priv->drm,
209 				    "only 2 lanes on haswell, required: %i lanes\n",
210 				    pipe_config->fdi_lanes);
211 			return -EINVAL;
212 		} else {
213 			return 0;
214 		}
215 	}
216 
217 	if (INTEL_NUM_PIPES(dev_priv) == 2)
218 		return 0;
219 
220 	/* Ivybridge 3 pipe is really complicated */
221 	switch (pipe) {
222 	case PIPE_A:
223 		return 0;
224 	case PIPE_B:
225 		if (pipe_config->fdi_lanes <= 2)
226 			return 0;
227 
228 		other_crtc = intel_crtc_for_pipe(display, PIPE_C);
229 		other_crtc_state =
230 			intel_atomic_get_crtc_state(state, other_crtc);
231 		if (IS_ERR(other_crtc_state))
232 			return PTR_ERR(other_crtc_state);
233 
234 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
235 			drm_dbg_kms(&dev_priv->drm,
236 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
237 				    pipe_name(pipe), pipe_config->fdi_lanes);
238 			return -EINVAL;
239 		}
240 		return 0;
241 	case PIPE_C:
242 		if (pipe_config->fdi_lanes > 2) {
243 			drm_dbg_kms(&dev_priv->drm,
244 				    "only 2 lanes on pipe %c: required %i lanes\n",
245 				    pipe_name(pipe), pipe_config->fdi_lanes);
246 			return -EINVAL;
247 		}
248 
249 		other_crtc = intel_crtc_for_pipe(display, PIPE_B);
250 		other_crtc_state =
251 			intel_atomic_get_crtc_state(state, other_crtc);
252 		if (IS_ERR(other_crtc_state))
253 			return PTR_ERR(other_crtc_state);
254 
255 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
256 			drm_dbg_kms(&dev_priv->drm,
257 				    "fdi link B uses too many lanes to enable link C\n");
258 
259 			*pipe_to_reduce = PIPE_B;
260 
261 			return -EINVAL;
262 		}
263 		return 0;
264 	default:
265 		MISSING_CASE(pipe);
266 		return 0;
267 	}
268 }
269 
270 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
271 {
272 	if (IS_IRONLAKE(i915)) {
273 		u32 fdi_pll_clk =
274 			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
275 
276 		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
277 	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
278 		i915->display.fdi.pll_freq = 270000;
279 	} else {
280 		return;
281 	}
282 
283 	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
284 }
285 
286 int intel_fdi_link_freq(struct drm_i915_private *i915,
287 			const struct intel_crtc_state *pipe_config)
288 {
289 	if (HAS_DDI(i915))
290 		return pipe_config->port_clock; /* SPLL */
291 	else
292 		return i915->display.fdi.pll_freq;
293 }
294 
295 /**
296  * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
297  * @crtc_state: the crtc state
298  *
299  * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
300  * call this function during state computation in the simple case where the
301  * link bpp will always match the pipe bpp. This is the case for all non-DP
302  * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
303  * of DSC compression.
304  *
305  * Returns %true in case of success, %false if pipe bpp would need to be
306  * reduced below its valid range.
307  */
308 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
309 {
310 	int pipe_bpp = min(crtc_state->pipe_bpp,
311 			   fxp_q4_to_int(crtc_state->max_link_bpp_x16));
312 
313 	pipe_bpp = rounddown(pipe_bpp, 2 * 3);
314 
315 	if (pipe_bpp < 6 * 3)
316 		return false;
317 
318 	crtc_state->pipe_bpp = pipe_bpp;
319 
320 	return true;
321 }
322 
323 int ilk_fdi_compute_config(struct intel_crtc *crtc,
324 			   struct intel_crtc_state *pipe_config)
325 {
326 	struct drm_device *dev = crtc->base.dev;
327 	struct drm_i915_private *i915 = to_i915(dev);
328 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
329 	int lane, link_bw, fdi_dotclock;
330 
331 	/* FDI is a binary signal running at ~2.7GHz, encoding
332 	 * each output octet as 10 bits. The actual frequency
333 	 * is stored as a divider into a 100MHz clock, and the
334 	 * mode pixel clock is stored in units of 1KHz.
335 	 * Hence the bw of each lane in terms of the mode signal
336 	 * is:
337 	 */
338 	link_bw = intel_fdi_link_freq(i915, pipe_config);
339 
340 	fdi_dotclock = adjusted_mode->crtc_clock;
341 
342 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
343 				      pipe_config->pipe_bpp);
344 
345 	pipe_config->fdi_lanes = lane;
346 
347 	intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
348 			       lane, fdi_dotclock,
349 			       link_bw,
350 			       intel_dp_bw_fec_overhead(false),
351 			       &pipe_config->fdi_m_n);
352 
353 	return 0;
354 }
355 
356 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
357 				     struct intel_crtc *crtc,
358 				     struct intel_crtc_state *pipe_config,
359 				     struct intel_link_bw_limits *limits)
360 {
361 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
362 	enum pipe pipe_to_reduce;
363 	int ret;
364 
365 	ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
366 				  &pipe_to_reduce);
367 	if (ret != -EINVAL)
368 		return ret;
369 
370 	ret = intel_link_bw_reduce_bpp(state, limits,
371 				       BIT(pipe_to_reduce),
372 				       "FDI link BW");
373 
374 	return ret ? : -EAGAIN;
375 }
376 
377 /**
378  * intel_fdi_atomic_check_link - check all modeset FDI link configuration
379  * @state: intel atomic state
380  * @limits: link BW limits
381  *
382  * Check the link configuration for all modeset FDI outputs. If the
383  * configuration is invalid @limits will be updated if possible to
384  * reduce the total BW, after which the configuration for all CRTCs in
385  * @state must be recomputed with the updated @limits.
386  *
387  * Returns:
388  *   - 0 if the confugration is valid
389  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
390  *     with fallback values with which the configuration of all CRTCs
391  *     in @state must be recomputed
392  *   - Other negative error, if the configuration is invalid without a
393  *     fallback possibility, or the check failed for another reason
394  */
395 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
396 				struct intel_link_bw_limits *limits)
397 {
398 	struct intel_crtc *crtc;
399 	struct intel_crtc_state *crtc_state;
400 	int i;
401 
402 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
403 		int ret;
404 
405 		if (!crtc_state->has_pch_encoder ||
406 		    !intel_crtc_needs_modeset(crtc_state) ||
407 		    !crtc_state->hw.enable)
408 			continue;
409 
410 		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
411 		if (ret)
412 			return ret;
413 	}
414 
415 	return 0;
416 }
417 
418 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
419 {
420 	u32 temp;
421 
422 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
423 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
424 		return;
425 
426 	drm_WARN_ON(&dev_priv->drm,
427 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
428 		    FDI_RX_ENABLE);
429 	drm_WARN_ON(&dev_priv->drm,
430 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
431 		    FDI_RX_ENABLE);
432 
433 	temp &= ~FDI_BC_BIFURCATION_SELECT;
434 	if (enable)
435 		temp |= FDI_BC_BIFURCATION_SELECT;
436 
437 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
438 		    enable ? "en" : "dis");
439 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
440 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
441 }
442 
443 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
444 {
445 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
446 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
447 
448 	switch (crtc->pipe) {
449 	case PIPE_A:
450 		break;
451 	case PIPE_B:
452 		if (crtc_state->fdi_lanes > 2)
453 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
454 		else
455 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
456 
457 		break;
458 	case PIPE_C:
459 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
460 
461 		break;
462 	default:
463 		MISSING_CASE(crtc->pipe);
464 	}
465 }
466 
467 void intel_fdi_normal_train(struct intel_crtc *crtc)
468 {
469 	struct drm_device *dev = crtc->base.dev;
470 	struct drm_i915_private *dev_priv = to_i915(dev);
471 	enum pipe pipe = crtc->pipe;
472 	i915_reg_t reg;
473 	u32 temp;
474 
475 	/* enable normal train */
476 	reg = FDI_TX_CTL(pipe);
477 	temp = intel_de_read(dev_priv, reg);
478 	if (IS_IVYBRIDGE(dev_priv)) {
479 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
480 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
481 	} else {
482 		temp &= ~FDI_LINK_TRAIN_NONE;
483 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
484 	}
485 	intel_de_write(dev_priv, reg, temp);
486 
487 	reg = FDI_RX_CTL(pipe);
488 	temp = intel_de_read(dev_priv, reg);
489 	if (HAS_PCH_CPT(dev_priv)) {
490 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
491 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
492 	} else {
493 		temp &= ~FDI_LINK_TRAIN_NONE;
494 		temp |= FDI_LINK_TRAIN_NONE;
495 	}
496 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
497 
498 	/* wait one idle pattern time */
499 	intel_de_posting_read(dev_priv, reg);
500 	udelay(1000);
501 
502 	/* IVB wants error correction enabled */
503 	if (IS_IVYBRIDGE(dev_priv))
504 		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
505 }
506 
507 /* The FDI link training functions for ILK/Ibexpeak. */
508 static void ilk_fdi_link_train(struct intel_crtc *crtc,
509 			       const struct intel_crtc_state *crtc_state)
510 {
511 	struct drm_device *dev = crtc->base.dev;
512 	struct drm_i915_private *dev_priv = to_i915(dev);
513 	enum pipe pipe = crtc->pipe;
514 	i915_reg_t reg;
515 	u32 temp, tries;
516 
517 	/*
518 	 * Write the TU size bits before fdi link training, so that error
519 	 * detection works.
520 	 */
521 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
522 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
523 
524 	/* FDI needs bits from pipe first */
525 	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
526 
527 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
528 	   for train result */
529 	reg = FDI_RX_IMR(pipe);
530 	temp = intel_de_read(dev_priv, reg);
531 	temp &= ~FDI_RX_SYMBOL_LOCK;
532 	temp &= ~FDI_RX_BIT_LOCK;
533 	intel_de_write(dev_priv, reg, temp);
534 	intel_de_read(dev_priv, reg);
535 	udelay(150);
536 
537 	/* enable CPU FDI TX and PCH FDI RX */
538 	reg = FDI_TX_CTL(pipe);
539 	temp = intel_de_read(dev_priv, reg);
540 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
541 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
542 	temp &= ~FDI_LINK_TRAIN_NONE;
543 	temp |= FDI_LINK_TRAIN_PATTERN_1;
544 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
545 
546 	reg = FDI_RX_CTL(pipe);
547 	temp = intel_de_read(dev_priv, reg);
548 	temp &= ~FDI_LINK_TRAIN_NONE;
549 	temp |= FDI_LINK_TRAIN_PATTERN_1;
550 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
551 
552 	intel_de_posting_read(dev_priv, reg);
553 	udelay(150);
554 
555 	/* Ironlake workaround, enable clock pointer after FDI enable*/
556 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
557 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
558 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
559 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
560 
561 	reg = FDI_RX_IIR(pipe);
562 	for (tries = 0; tries < 5; tries++) {
563 		temp = intel_de_read(dev_priv, reg);
564 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
565 
566 		if ((temp & FDI_RX_BIT_LOCK)) {
567 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
568 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
569 			break;
570 		}
571 	}
572 	if (tries == 5)
573 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
574 
575 	/* Train 2 */
576 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
577 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
578 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
579 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
580 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
581 	udelay(150);
582 
583 	reg = FDI_RX_IIR(pipe);
584 	for (tries = 0; tries < 5; tries++) {
585 		temp = intel_de_read(dev_priv, reg);
586 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
587 
588 		if (temp & FDI_RX_SYMBOL_LOCK) {
589 			intel_de_write(dev_priv, reg,
590 				       temp | FDI_RX_SYMBOL_LOCK);
591 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
592 			break;
593 		}
594 	}
595 	if (tries == 5)
596 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
597 
598 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
599 
600 }
601 
602 static const int snb_b_fdi_train_param[] = {
603 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
604 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
605 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
606 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
607 };
608 
609 /* The FDI link training functions for SNB/Cougarpoint. */
610 static void gen6_fdi_link_train(struct intel_crtc *crtc,
611 				const struct intel_crtc_state *crtc_state)
612 {
613 	struct drm_device *dev = crtc->base.dev;
614 	struct drm_i915_private *dev_priv = to_i915(dev);
615 	enum pipe pipe = crtc->pipe;
616 	i915_reg_t reg;
617 	u32 temp, i, retry;
618 
619 	/*
620 	 * Write the TU size bits before fdi link training, so that error
621 	 * detection works.
622 	 */
623 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
624 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
625 
626 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
627 	   for train result */
628 	reg = FDI_RX_IMR(pipe);
629 	temp = intel_de_read(dev_priv, reg);
630 	temp &= ~FDI_RX_SYMBOL_LOCK;
631 	temp &= ~FDI_RX_BIT_LOCK;
632 	intel_de_write(dev_priv, reg, temp);
633 
634 	intel_de_posting_read(dev_priv, reg);
635 	udelay(150);
636 
637 	/* enable CPU FDI TX and PCH FDI RX */
638 	reg = FDI_TX_CTL(pipe);
639 	temp = intel_de_read(dev_priv, reg);
640 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
641 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
642 	temp &= ~FDI_LINK_TRAIN_NONE;
643 	temp |= FDI_LINK_TRAIN_PATTERN_1;
644 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
645 	/* SNB-B */
646 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
647 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
648 
649 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
650 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
651 
652 	reg = FDI_RX_CTL(pipe);
653 	temp = intel_de_read(dev_priv, reg);
654 	if (HAS_PCH_CPT(dev_priv)) {
655 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
656 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
657 	} else {
658 		temp &= ~FDI_LINK_TRAIN_NONE;
659 		temp |= FDI_LINK_TRAIN_PATTERN_1;
660 	}
661 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
662 
663 	intel_de_posting_read(dev_priv, reg);
664 	udelay(150);
665 
666 	for (i = 0; i < 4; i++) {
667 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
668 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
669 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
670 		udelay(500);
671 
672 		for (retry = 0; retry < 5; retry++) {
673 			reg = FDI_RX_IIR(pipe);
674 			temp = intel_de_read(dev_priv, reg);
675 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
676 			if (temp & FDI_RX_BIT_LOCK) {
677 				intel_de_write(dev_priv, reg,
678 					       temp | FDI_RX_BIT_LOCK);
679 				drm_dbg_kms(&dev_priv->drm,
680 					    "FDI train 1 done.\n");
681 				break;
682 			}
683 			udelay(50);
684 		}
685 		if (retry < 5)
686 			break;
687 	}
688 	if (i == 4)
689 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
690 
691 	/* Train 2 */
692 	reg = FDI_TX_CTL(pipe);
693 	temp = intel_de_read(dev_priv, reg);
694 	temp &= ~FDI_LINK_TRAIN_NONE;
695 	temp |= FDI_LINK_TRAIN_PATTERN_2;
696 	if (IS_SANDYBRIDGE(dev_priv)) {
697 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
698 		/* SNB-B */
699 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
700 	}
701 	intel_de_write(dev_priv, reg, temp);
702 
703 	reg = FDI_RX_CTL(pipe);
704 	temp = intel_de_read(dev_priv, reg);
705 	if (HAS_PCH_CPT(dev_priv)) {
706 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
707 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
708 	} else {
709 		temp &= ~FDI_LINK_TRAIN_NONE;
710 		temp |= FDI_LINK_TRAIN_PATTERN_2;
711 	}
712 	intel_de_write(dev_priv, reg, temp);
713 
714 	intel_de_posting_read(dev_priv, reg);
715 	udelay(150);
716 
717 	for (i = 0; i < 4; i++) {
718 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
719 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
720 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
721 		udelay(500);
722 
723 		for (retry = 0; retry < 5; retry++) {
724 			reg = FDI_RX_IIR(pipe);
725 			temp = intel_de_read(dev_priv, reg);
726 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
727 			if (temp & FDI_RX_SYMBOL_LOCK) {
728 				intel_de_write(dev_priv, reg,
729 					       temp | FDI_RX_SYMBOL_LOCK);
730 				drm_dbg_kms(&dev_priv->drm,
731 					    "FDI train 2 done.\n");
732 				break;
733 			}
734 			udelay(50);
735 		}
736 		if (retry < 5)
737 			break;
738 	}
739 	if (i == 4)
740 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
741 
742 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
743 }
744 
745 /* Manual link training for Ivy Bridge A0 parts */
746 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
747 				      const struct intel_crtc_state *crtc_state)
748 {
749 	struct drm_device *dev = crtc->base.dev;
750 	struct drm_i915_private *dev_priv = to_i915(dev);
751 	enum pipe pipe = crtc->pipe;
752 	i915_reg_t reg;
753 	u32 temp, i, j;
754 
755 	ivb_update_fdi_bc_bifurcation(crtc_state);
756 
757 	/*
758 	 * Write the TU size bits before fdi link training, so that error
759 	 * detection works.
760 	 */
761 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
762 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
763 
764 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
765 	   for train result */
766 	reg = FDI_RX_IMR(pipe);
767 	temp = intel_de_read(dev_priv, reg);
768 	temp &= ~FDI_RX_SYMBOL_LOCK;
769 	temp &= ~FDI_RX_BIT_LOCK;
770 	intel_de_write(dev_priv, reg, temp);
771 
772 	intel_de_posting_read(dev_priv, reg);
773 	udelay(150);
774 
775 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
776 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
777 
778 	/* Try each vswing and preemphasis setting twice before moving on */
779 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
780 		/* disable first in case we need to retry */
781 		reg = FDI_TX_CTL(pipe);
782 		temp = intel_de_read(dev_priv, reg);
783 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
784 		temp &= ~FDI_TX_ENABLE;
785 		intel_de_write(dev_priv, reg, temp);
786 
787 		reg = FDI_RX_CTL(pipe);
788 		temp = intel_de_read(dev_priv, reg);
789 		temp &= ~FDI_LINK_TRAIN_AUTO;
790 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
791 		temp &= ~FDI_RX_ENABLE;
792 		intel_de_write(dev_priv, reg, temp);
793 
794 		/* enable CPU FDI TX and PCH FDI RX */
795 		reg = FDI_TX_CTL(pipe);
796 		temp = intel_de_read(dev_priv, reg);
797 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
798 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
799 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
800 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
801 		temp |= snb_b_fdi_train_param[j/2];
802 		temp |= FDI_COMPOSITE_SYNC;
803 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
804 
805 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
806 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
807 
808 		reg = FDI_RX_CTL(pipe);
809 		temp = intel_de_read(dev_priv, reg);
810 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
811 		temp |= FDI_COMPOSITE_SYNC;
812 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
813 
814 		intel_de_posting_read(dev_priv, reg);
815 		udelay(1); /* should be 0.5us */
816 
817 		for (i = 0; i < 4; i++) {
818 			reg = FDI_RX_IIR(pipe);
819 			temp = intel_de_read(dev_priv, reg);
820 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
821 
822 			if (temp & FDI_RX_BIT_LOCK ||
823 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
824 				intel_de_write(dev_priv, reg,
825 					       temp | FDI_RX_BIT_LOCK);
826 				drm_dbg_kms(&dev_priv->drm,
827 					    "FDI train 1 done, level %i.\n",
828 					    i);
829 				break;
830 			}
831 			udelay(1); /* should be 0.5us */
832 		}
833 		if (i == 4) {
834 			drm_dbg_kms(&dev_priv->drm,
835 				    "FDI train 1 fail on vswing %d\n", j / 2);
836 			continue;
837 		}
838 
839 		/* Train 2 */
840 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
841 			     FDI_LINK_TRAIN_NONE_IVB,
842 			     FDI_LINK_TRAIN_PATTERN_2_IVB);
843 		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
844 			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
845 			     FDI_LINK_TRAIN_PATTERN_2_CPT);
846 		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
847 		udelay(2); /* should be 1.5us */
848 
849 		for (i = 0; i < 4; i++) {
850 			reg = FDI_RX_IIR(pipe);
851 			temp = intel_de_read(dev_priv, reg);
852 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
853 
854 			if (temp & FDI_RX_SYMBOL_LOCK ||
855 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
856 				intel_de_write(dev_priv, reg,
857 					       temp | FDI_RX_SYMBOL_LOCK);
858 				drm_dbg_kms(&dev_priv->drm,
859 					    "FDI train 2 done, level %i.\n",
860 					    i);
861 				goto train_done;
862 			}
863 			udelay(2); /* should be 1.5us */
864 		}
865 		if (i == 4)
866 			drm_dbg_kms(&dev_priv->drm,
867 				    "FDI train 2 fail on vswing %d\n", j / 2);
868 	}
869 
870 train_done:
871 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
872 }
873 
874 /* Starting with Haswell, different DDI ports can work in FDI mode for
875  * connection to the PCH-located connectors. For this, it is necessary to train
876  * both the DDI port and PCH receiver for the desired DDI buffer settings.
877  *
878  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
879  * please note that when FDI mode is active on DDI E, it shares 2 lines with
880  * DDI A (which is used for eDP)
881  */
882 void hsw_fdi_link_train(struct intel_encoder *encoder,
883 			const struct intel_crtc_state *crtc_state)
884 {
885 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
886 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
887 	u32 temp, i, rx_ctl_val;
888 	int n_entries;
889 
890 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
891 
892 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
893 
894 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
895 	 * mode set "sequence for CRT port" document:
896 	 * - TP1 to TP2 time with the default value
897 	 * - FDI delay to 90h
898 	 *
899 	 * WaFDIAutoLinkSetTimingOverrride:hsw
900 	 */
901 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
902 		       FDI_RX_PWRDN_LANE1_VAL(2) |
903 		       FDI_RX_PWRDN_LANE0_VAL(2) |
904 		       FDI_RX_TP1_TO_TP2_48 |
905 		       FDI_RX_FDI_DELAY_90);
906 
907 	/* Enable the PCH Receiver FDI PLL */
908 	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
909 		     FDI_RX_PLL_ENABLE |
910 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
911 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
912 	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
913 	udelay(220);
914 
915 	/* Switch from Rawclk to PCDclk */
916 	rx_ctl_val |= FDI_PCDCLK;
917 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
918 
919 	/* Configure Port Clock Select */
920 	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
921 	intel_ddi_enable_clock(encoder, crtc_state);
922 
923 	/* Start the training iterating through available voltages and emphasis,
924 	 * testing each value twice. */
925 	for (i = 0; i < n_entries * 2; i++) {
926 		/* Configure DP_TP_CTL with auto-training */
927 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
928 			       DP_TP_CTL_FDI_AUTOTRAIN |
929 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
930 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
931 			       DP_TP_CTL_ENABLE);
932 
933 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
934 		 * DDI E does not support port reversal, the functionality is
935 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
936 		 * port reversal bit */
937 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
938 			       DDI_BUF_CTL_ENABLE |
939 			       ((crtc_state->fdi_lanes - 1) << 1) |
940 			       DDI_BUF_TRANS_SELECT(i / 2));
941 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
942 
943 		udelay(600);
944 
945 		/* Program PCH FDI Receiver TU */
946 		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
947 
948 		/* Enable PCH FDI Receiver with auto-training */
949 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
950 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
951 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
952 
953 		/* Wait for FDI receiver lane calibration */
954 		udelay(30);
955 
956 		/* Unset FDI_RX_MISC pwrdn lanes */
957 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
958 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
959 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
960 
961 		/* Wait for FDI auto training time */
962 		udelay(5);
963 
964 		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
965 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
966 			drm_dbg_kms(&dev_priv->drm,
967 				    "FDI link training done on step %d\n", i);
968 			break;
969 		}
970 
971 		/*
972 		 * Leave things enabled even if we failed to train FDI.
973 		 * Results in less fireworks from the state checker.
974 		 */
975 		if (i == n_entries * 2 - 1) {
976 			drm_err(&dev_priv->drm, "FDI link training failed!\n");
977 			break;
978 		}
979 
980 		rx_ctl_val &= ~FDI_RX_ENABLE;
981 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
982 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
983 
984 		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
985 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
986 
987 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
988 		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
989 		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
990 
991 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
992 
993 		/* Reset FDI_RX_MISC pwrdn lanes */
994 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
995 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
996 			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
997 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
998 	}
999 
1000 	/* Enable normal pixel sending for FDI */
1001 	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
1002 		       DP_TP_CTL_FDI_AUTOTRAIN |
1003 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
1004 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1005 		       DP_TP_CTL_ENABLE);
1006 }
1007 
1008 void hsw_fdi_disable(struct intel_encoder *encoder)
1009 {
1010 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1011 
1012 	/*
1013 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1014 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1015 	 * step 13 is the correct place for it. Step 18 is where it was
1016 	 * originally before the BUN.
1017 	 */
1018 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1019 	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1020 	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1021 	intel_ddi_disable_clock(encoder);
1022 	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1023 		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1024 		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1025 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1026 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1027 }
1028 
1029 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1030 {
1031 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1032 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1033 	enum pipe pipe = crtc->pipe;
1034 	i915_reg_t reg;
1035 	u32 temp;
1036 
1037 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1038 	reg = FDI_RX_CTL(pipe);
1039 	temp = intel_de_read(dev_priv, reg);
1040 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1041 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1042 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1043 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1044 
1045 	intel_de_posting_read(dev_priv, reg);
1046 	udelay(200);
1047 
1048 	/* Switch from Rawclk to PCDclk */
1049 	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1050 	intel_de_posting_read(dev_priv, reg);
1051 	udelay(200);
1052 
1053 	/* Enable CPU FDI TX PLL, always on for Ironlake */
1054 	reg = FDI_TX_CTL(pipe);
1055 	temp = intel_de_read(dev_priv, reg);
1056 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1057 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1058 
1059 		intel_de_posting_read(dev_priv, reg);
1060 		udelay(100);
1061 	}
1062 }
1063 
1064 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1065 {
1066 	struct drm_device *dev = crtc->base.dev;
1067 	struct drm_i915_private *dev_priv = to_i915(dev);
1068 	enum pipe pipe = crtc->pipe;
1069 
1070 	/* Switch from PCDclk to Rawclk */
1071 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1072 
1073 	/* Disable CPU FDI TX PLL */
1074 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1075 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1076 	udelay(100);
1077 
1078 	/* Wait for the clocks to turn off. */
1079 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1080 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1081 	udelay(100);
1082 }
1083 
1084 void ilk_fdi_disable(struct intel_crtc *crtc)
1085 {
1086 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1087 	enum pipe pipe = crtc->pipe;
1088 	i915_reg_t reg;
1089 	u32 temp;
1090 
1091 	/* disable CPU FDI tx and PCH FDI rx */
1092 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1093 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1094 
1095 	reg = FDI_RX_CTL(pipe);
1096 	temp = intel_de_read(dev_priv, reg);
1097 	temp &= ~(0x7 << 16);
1098 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1099 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1100 
1101 	intel_de_posting_read(dev_priv, reg);
1102 	udelay(100);
1103 
1104 	/* Ironlake workaround, disable clock pointer after downing FDI */
1105 	if (HAS_PCH_IBX(dev_priv))
1106 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1107 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1108 
1109 	/* still set train pattern 1 */
1110 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1111 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1112 
1113 	reg = FDI_RX_CTL(pipe);
1114 	temp = intel_de_read(dev_priv, reg);
1115 	if (HAS_PCH_CPT(dev_priv)) {
1116 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1117 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1118 	} else {
1119 		temp &= ~FDI_LINK_TRAIN_NONE;
1120 		temp |= FDI_LINK_TRAIN_PATTERN_1;
1121 	}
1122 	/* BPC in FDI rx is consistent with that in TRANSCONF */
1123 	temp &= ~(0x07 << 16);
1124 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1125 	intel_de_write(dev_priv, reg, temp);
1126 
1127 	intel_de_posting_read(dev_priv, reg);
1128 	udelay(100);
1129 }
1130 
1131 static const struct intel_fdi_funcs ilk_funcs = {
1132 	.fdi_link_train = ilk_fdi_link_train,
1133 };
1134 
1135 static const struct intel_fdi_funcs gen6_funcs = {
1136 	.fdi_link_train = gen6_fdi_link_train,
1137 };
1138 
1139 static const struct intel_fdi_funcs ivb_funcs = {
1140 	.fdi_link_train = ivb_manual_fdi_link_train,
1141 };
1142 
1143 void
1144 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1145 {
1146 	if (IS_IRONLAKE(dev_priv)) {
1147 		dev_priv->display.funcs.fdi = &ilk_funcs;
1148 	} else if (IS_SANDYBRIDGE(dev_priv)) {
1149 		dev_priv->display.funcs.fdi = &gen6_funcs;
1150 	} else if (IS_IVYBRIDGE(dev_priv)) {
1151 		/* FIXME: detect B0+ stepping and use auto training */
1152 		dev_priv->display.funcs.fdi = &ivb_funcs;
1153 	}
1154 }
1155