xref: /linux/drivers/gpu/drm/i915/display/intel_fdi.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include <drm/drm_fixed.h>
9 #include <drm/drm_print.h>
10 
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_ddi.h"
14 #include "intel_de.h"
15 #include "intel_display_regs.h"
16 #include "intel_display_types.h"
17 #include "intel_display_utils.h"
18 #include "intel_dp.h"
19 #include "intel_fdi.h"
20 #include "intel_fdi_regs.h"
21 #include "intel_link_bw.h"
22 
23 struct intel_fdi_funcs {
24 	void (*fdi_link_train)(struct intel_crtc *crtc,
25 			       const struct intel_crtc_state *crtc_state);
26 };
27 
28 static void assert_fdi_tx(struct intel_display *display,
29 			  enum pipe pipe, bool state)
30 {
31 	bool cur_state;
32 
33 	if (HAS_DDI(display)) {
34 		/*
35 		 * DDI does not have a specific FDI_TX register.
36 		 *
37 		 * FDI is never fed from EDP transcoder
38 		 * so pipe->transcoder cast is fine here.
39 		 */
40 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
41 		cur_state = intel_de_read(display,
42 					  TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
43 	} else {
44 		cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
45 	}
46 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
47 				 "FDI TX state assertion failure (expected %s, current %s)\n",
48 				 str_on_off(state), str_on_off(cur_state));
49 }
50 
51 void assert_fdi_tx_enabled(struct intel_display *display, enum pipe pipe)
52 {
53 	assert_fdi_tx(display, pipe, true);
54 }
55 
56 void assert_fdi_tx_disabled(struct intel_display *display, enum pipe pipe)
57 {
58 	assert_fdi_tx(display, pipe, false);
59 }
60 
61 static void assert_fdi_rx(struct intel_display *display,
62 			  enum pipe pipe, bool state)
63 {
64 	bool cur_state;
65 
66 	cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
67 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
68 				 "FDI RX state assertion failure (expected %s, current %s)\n",
69 				 str_on_off(state), str_on_off(cur_state));
70 }
71 
72 void assert_fdi_rx_enabled(struct intel_display *display, enum pipe pipe)
73 {
74 	assert_fdi_rx(display, pipe, true);
75 }
76 
77 void assert_fdi_rx_disabled(struct intel_display *display, enum pipe pipe)
78 {
79 	assert_fdi_rx(display, pipe, false);
80 }
81 
82 void assert_fdi_tx_pll_enabled(struct intel_display *display, enum pipe pipe)
83 {
84 	bool cur_state;
85 
86 	/* ILK FDI PLL is always enabled */
87 	if (display->platform.ironlake)
88 		return;
89 
90 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
91 	if (HAS_DDI(display))
92 		return;
93 
94 	cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
95 	INTEL_DISPLAY_STATE_WARN(display, !cur_state,
96 				 "FDI TX PLL assertion failure, should be active but is disabled\n");
97 }
98 
99 static void assert_fdi_rx_pll(struct intel_display *display,
100 			      enum pipe pipe, bool state)
101 {
102 	bool cur_state;
103 
104 	cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
105 	INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
106 				 "FDI RX PLL assertion failure (expected %s, current %s)\n",
107 				 str_on_off(state), str_on_off(cur_state));
108 }
109 
110 void assert_fdi_rx_pll_enabled(struct intel_display *display, enum pipe pipe)
111 {
112 	assert_fdi_rx_pll(display, pipe, true);
113 }
114 
115 void assert_fdi_rx_pll_disabled(struct intel_display *display, enum pipe pipe)
116 {
117 	assert_fdi_rx_pll(display, pipe, false);
118 }
119 
120 void intel_fdi_link_train(struct intel_crtc *crtc,
121 			  const struct intel_crtc_state *crtc_state)
122 {
123 	struct intel_display *display = to_intel_display(crtc);
124 
125 	display->funcs.fdi->fdi_link_train(crtc, crtc_state);
126 }
127 
128 /**
129  * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
130  * @state: intel atomic state
131  *
132  * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
133  * known to affect the available FDI BW for the former CRTC. In practice this
134  * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
135  * CRTC C) and CRTC C is getting disabled.
136  *
137  * Returns 0 in case of success, or a negative error code otherwise.
138  */
139 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
140 {
141 	struct intel_display *display = to_intel_display(state);
142 	const struct intel_crtc_state *old_crtc_state;
143 	const struct intel_crtc_state *new_crtc_state;
144 	struct intel_crtc *crtc;
145 
146 	if (!display->platform.ivybridge || INTEL_NUM_PIPES(display) != 3)
147 		return 0;
148 
149 	crtc = intel_crtc_for_pipe(display, PIPE_C);
150 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
151 	if (!new_crtc_state)
152 		return 0;
153 
154 	if (!intel_crtc_needs_modeset(new_crtc_state))
155 		return 0;
156 
157 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
158 	if (!old_crtc_state->fdi_lanes)
159 		return 0;
160 
161 	crtc = intel_crtc_for_pipe(display, PIPE_B);
162 	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
163 	if (IS_ERR(new_crtc_state))
164 		return PTR_ERR(new_crtc_state);
165 
166 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
167 	if (!old_crtc_state->fdi_lanes)
168 		return 0;
169 
170 	return intel_modeset_pipes_in_mask_early(state,
171 						 "FDI link BW decrease on pipe C",
172 						 BIT(PIPE_B));
173 }
174 
175 /* units of 100MHz */
176 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
177 {
178 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
179 		return crtc_state->fdi_lanes;
180 
181 	return 0;
182 }
183 
184 static int ilk_check_fdi_lanes(struct intel_display *display, enum pipe pipe,
185 			       struct intel_crtc_state *pipe_config,
186 			       enum pipe *pipe_to_reduce)
187 {
188 	struct drm_atomic_state *state = pipe_config->uapi.state;
189 	struct intel_crtc *other_crtc;
190 	struct intel_crtc_state *other_crtc_state;
191 
192 	*pipe_to_reduce = pipe;
193 
194 	drm_dbg_kms(display->drm,
195 		    "checking fdi config on pipe %c, lanes %i\n",
196 		    pipe_name(pipe), pipe_config->fdi_lanes);
197 	if (pipe_config->fdi_lanes > 4) {
198 		drm_dbg_kms(display->drm,
199 			    "invalid fdi lane config on pipe %c: %i lanes\n",
200 			    pipe_name(pipe), pipe_config->fdi_lanes);
201 		return -EINVAL;
202 	}
203 
204 	if (display->platform.haswell || display->platform.broadwell) {
205 		if (pipe_config->fdi_lanes > 2) {
206 			drm_dbg_kms(display->drm,
207 				    "only 2 lanes on haswell, required: %i lanes\n",
208 				    pipe_config->fdi_lanes);
209 			return -EINVAL;
210 		} else {
211 			return 0;
212 		}
213 	}
214 
215 	if (INTEL_NUM_PIPES(display) == 2)
216 		return 0;
217 
218 	/* Ivybridge 3 pipe is really complicated */
219 	switch (pipe) {
220 	case PIPE_A:
221 		return 0;
222 	case PIPE_B:
223 		if (pipe_config->fdi_lanes <= 2)
224 			return 0;
225 
226 		other_crtc = intel_crtc_for_pipe(display, PIPE_C);
227 		other_crtc_state =
228 			intel_atomic_get_crtc_state(state, other_crtc);
229 		if (IS_ERR(other_crtc_state))
230 			return PTR_ERR(other_crtc_state);
231 
232 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
233 			drm_dbg_kms(display->drm,
234 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
235 				    pipe_name(pipe), pipe_config->fdi_lanes);
236 			return -EINVAL;
237 		}
238 		return 0;
239 	case PIPE_C:
240 		if (pipe_config->fdi_lanes > 2) {
241 			drm_dbg_kms(display->drm,
242 				    "only 2 lanes on pipe %c: required %i lanes\n",
243 				    pipe_name(pipe), pipe_config->fdi_lanes);
244 			return -EINVAL;
245 		}
246 
247 		other_crtc = intel_crtc_for_pipe(display, PIPE_B);
248 		other_crtc_state =
249 			intel_atomic_get_crtc_state(state, other_crtc);
250 		if (IS_ERR(other_crtc_state))
251 			return PTR_ERR(other_crtc_state);
252 
253 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
254 			drm_dbg_kms(display->drm,
255 				    "fdi link B uses too many lanes to enable link C\n");
256 
257 			*pipe_to_reduce = PIPE_B;
258 
259 			return -EINVAL;
260 		}
261 		return 0;
262 	default:
263 		MISSING_CASE(pipe);
264 		return 0;
265 	}
266 }
267 
268 void intel_fdi_pll_freq_update(struct intel_display *display)
269 {
270 	if (display->platform.ironlake) {
271 		u32 fdi_pll_clk;
272 
273 		fdi_pll_clk = intel_de_read(display, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
274 
275 		display->fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
276 	} else if (display->platform.sandybridge || display->platform.ivybridge) {
277 		display->fdi.pll_freq = 270000;
278 	} else {
279 		return;
280 	}
281 
282 	drm_dbg(display->drm, "FDI PLL freq=%d\n", display->fdi.pll_freq);
283 }
284 
285 int intel_fdi_link_freq(struct intel_display *display,
286 			const struct intel_crtc_state *pipe_config)
287 {
288 	if (HAS_DDI(display))
289 		return pipe_config->port_clock; /* SPLL */
290 	else
291 		return display->fdi.pll_freq;
292 }
293 
294 int ilk_fdi_compute_config(struct intel_crtc *crtc,
295 			   struct intel_crtc_state *pipe_config)
296 {
297 	struct intel_display *display = to_intel_display(crtc);
298 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
299 	int lane, link_bw, fdi_dotclock;
300 
301 	/* FDI is a binary signal running at ~2.7GHz, encoding
302 	 * each output octet as 10 bits. The actual frequency
303 	 * is stored as a divider into a 100MHz clock, and the
304 	 * mode pixel clock is stored in units of 1KHz.
305 	 * Hence the bw of each lane in terms of the mode signal
306 	 * is:
307 	 */
308 	link_bw = intel_fdi_link_freq(display, pipe_config);
309 
310 	fdi_dotclock = adjusted_mode->crtc_clock;
311 
312 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
313 				      pipe_config->pipe_bpp);
314 
315 	pipe_config->fdi_lanes = lane;
316 
317 	intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
318 			       lane, fdi_dotclock,
319 			       link_bw,
320 			       intel_dp_bw_fec_overhead(false),
321 			       &pipe_config->fdi_m_n);
322 
323 	return 0;
324 }
325 
326 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
327 				     struct intel_crtc *crtc,
328 				     struct intel_crtc_state *pipe_config,
329 				     struct intel_link_bw_limits *limits)
330 {
331 	struct intel_display *display = to_intel_display(crtc);
332 	enum pipe pipe_to_reduce;
333 	int ret;
334 
335 	ret = ilk_check_fdi_lanes(display, crtc->pipe, pipe_config,
336 				  &pipe_to_reduce);
337 	if (ret != -EINVAL)
338 		return ret;
339 
340 	ret = intel_link_bw_reduce_bpp(state, limits,
341 				       BIT(pipe_to_reduce),
342 				       "FDI link BW");
343 
344 	return ret ? : -EAGAIN;
345 }
346 
347 /**
348  * intel_fdi_atomic_check_link - check all modeset FDI link configuration
349  * @state: intel atomic state
350  * @limits: link BW limits
351  *
352  * Check the link configuration for all modeset FDI outputs. If the
353  * configuration is invalid @limits will be updated if possible to
354  * reduce the total BW, after which the configuration for all CRTCs in
355  * @state must be recomputed with the updated @limits.
356  *
357  * Returns:
358  *   - 0 if the configuration is valid
359  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
360  *     with fallback values with which the configuration of all CRTCs
361  *     in @state must be recomputed
362  *   - Other negative error, if the configuration is invalid without a
363  *     fallback possibility, or the check failed for another reason
364  */
365 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
366 				struct intel_link_bw_limits *limits)
367 {
368 	struct intel_crtc *crtc;
369 	struct intel_crtc_state *crtc_state;
370 	int i;
371 
372 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
373 		int ret;
374 
375 		if (!crtc_state->has_pch_encoder ||
376 		    !intel_crtc_needs_modeset(crtc_state) ||
377 		    !crtc_state->hw.enable)
378 			continue;
379 
380 		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
381 		if (ret)
382 			return ret;
383 	}
384 
385 	return 0;
386 }
387 
388 static void cpt_set_fdi_bc_bifurcation(struct intel_display *display, bool enable)
389 {
390 	u32 temp;
391 
392 	temp = intel_de_read(display, SOUTH_CHICKEN1);
393 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
394 		return;
395 
396 	drm_WARN_ON(display->drm,
397 		    intel_de_read(display, FDI_RX_CTL(PIPE_B)) &
398 		    FDI_RX_ENABLE);
399 	drm_WARN_ON(display->drm,
400 		    intel_de_read(display, FDI_RX_CTL(PIPE_C)) &
401 		    FDI_RX_ENABLE);
402 
403 	temp &= ~FDI_BC_BIFURCATION_SELECT;
404 	if (enable)
405 		temp |= FDI_BC_BIFURCATION_SELECT;
406 
407 	drm_dbg_kms(display->drm, "%sabling fdi C rx\n",
408 		    enable ? "en" : "dis");
409 	intel_de_write(display, SOUTH_CHICKEN1, temp);
410 	intel_de_posting_read(display, SOUTH_CHICKEN1);
411 }
412 
413 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
414 {
415 	struct intel_display *display = to_intel_display(crtc_state);
416 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
417 
418 	switch (crtc->pipe) {
419 	case PIPE_A:
420 		break;
421 	case PIPE_B:
422 		if (crtc_state->fdi_lanes > 2)
423 			cpt_set_fdi_bc_bifurcation(display, false);
424 		else
425 			cpt_set_fdi_bc_bifurcation(display, true);
426 
427 		break;
428 	case PIPE_C:
429 		cpt_set_fdi_bc_bifurcation(display, true);
430 
431 		break;
432 	default:
433 		MISSING_CASE(crtc->pipe);
434 	}
435 }
436 
437 void intel_fdi_normal_train(struct intel_crtc *crtc)
438 {
439 	struct intel_display *display = to_intel_display(crtc);
440 	enum pipe pipe = crtc->pipe;
441 	i915_reg_t reg;
442 	u32 temp;
443 
444 	/* enable normal train */
445 	reg = FDI_TX_CTL(pipe);
446 	temp = intel_de_read(display, reg);
447 	if (display->platform.ivybridge) {
448 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
449 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
450 	} else {
451 		temp &= ~FDI_LINK_TRAIN_NONE;
452 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
453 	}
454 	intel_de_write(display, reg, temp);
455 
456 	reg = FDI_RX_CTL(pipe);
457 	temp = intel_de_read(display, reg);
458 	if (HAS_PCH_CPT(display)) {
459 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
460 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
461 	} else {
462 		temp &= ~FDI_LINK_TRAIN_NONE;
463 		temp |= FDI_LINK_TRAIN_NONE;
464 	}
465 	intel_de_write(display, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
466 
467 	/* wait one idle pattern time */
468 	intel_de_posting_read(display, reg);
469 	udelay(1000);
470 
471 	/* IVB wants error correction enabled */
472 	if (display->platform.ivybridge)
473 		intel_de_rmw(display, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
474 }
475 
476 /* The FDI link training functions for ILK/Ibexpeak. */
477 static void ilk_fdi_link_train(struct intel_crtc *crtc,
478 			       const struct intel_crtc_state *crtc_state)
479 {
480 	struct intel_display *display = to_intel_display(crtc);
481 	enum pipe pipe = crtc->pipe;
482 	i915_reg_t reg;
483 	u32 temp, tries;
484 
485 	/*
486 	 * Write the TU size bits before fdi link training, so that error
487 	 * detection works.
488 	 */
489 	intel_de_write(display, FDI_RX_TUSIZE1(pipe),
490 		       intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
491 
492 	/* FDI needs bits from pipe first */
493 	assert_transcoder_enabled(display, crtc_state->cpu_transcoder);
494 
495 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
496 	   for train result */
497 	reg = FDI_RX_IMR(pipe);
498 	temp = intel_de_read(display, reg);
499 	temp &= ~FDI_RX_SYMBOL_LOCK;
500 	temp &= ~FDI_RX_BIT_LOCK;
501 	intel_de_write(display, reg, temp);
502 	intel_de_read(display, reg);
503 	udelay(150);
504 
505 	/* enable CPU FDI TX and PCH FDI RX */
506 	reg = FDI_TX_CTL(pipe);
507 	temp = intel_de_read(display, reg);
508 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
509 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
510 	temp &= ~FDI_LINK_TRAIN_NONE;
511 	temp |= FDI_LINK_TRAIN_PATTERN_1;
512 	intel_de_write(display, reg, temp | FDI_TX_ENABLE);
513 
514 	reg = FDI_RX_CTL(pipe);
515 	temp = intel_de_read(display, reg);
516 	temp &= ~FDI_LINK_TRAIN_NONE;
517 	temp |= FDI_LINK_TRAIN_PATTERN_1;
518 	intel_de_write(display, reg, temp | FDI_RX_ENABLE);
519 
520 	intel_de_posting_read(display, reg);
521 	udelay(150);
522 
523 	/* Ironlake workaround, enable clock pointer after FDI enable*/
524 	intel_de_write(display, FDI_RX_CHICKEN(pipe),
525 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
526 	intel_de_write(display, FDI_RX_CHICKEN(pipe),
527 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
528 
529 	reg = FDI_RX_IIR(pipe);
530 	for (tries = 0; tries < 5; tries++) {
531 		temp = intel_de_read(display, reg);
532 		drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
533 
534 		if ((temp & FDI_RX_BIT_LOCK)) {
535 			drm_dbg_kms(display->drm, "FDI train 1 done.\n");
536 			intel_de_write(display, reg, temp | FDI_RX_BIT_LOCK);
537 			break;
538 		}
539 	}
540 	if (tries == 5)
541 		drm_err(display->drm, "FDI train 1 fail!\n");
542 
543 	/* Train 2 */
544 	intel_de_rmw(display, FDI_TX_CTL(pipe),
545 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
546 	intel_de_rmw(display, FDI_RX_CTL(pipe),
547 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
548 	intel_de_posting_read(display, FDI_RX_CTL(pipe));
549 	udelay(150);
550 
551 	reg = FDI_RX_IIR(pipe);
552 	for (tries = 0; tries < 5; tries++) {
553 		temp = intel_de_read(display, reg);
554 		drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
555 
556 		if (temp & FDI_RX_SYMBOL_LOCK) {
557 			intel_de_write(display, reg,
558 				       temp | FDI_RX_SYMBOL_LOCK);
559 			drm_dbg_kms(display->drm, "FDI train 2 done.\n");
560 			break;
561 		}
562 	}
563 	if (tries == 5)
564 		drm_err(display->drm, "FDI train 2 fail!\n");
565 
566 	drm_dbg_kms(display->drm, "FDI train done\n");
567 
568 }
569 
570 static const int snb_b_fdi_train_param[] = {
571 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
572 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
573 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
574 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
575 };
576 
577 /* The FDI link training functions for SNB/Cougarpoint. */
578 static void gen6_fdi_link_train(struct intel_crtc *crtc,
579 				const struct intel_crtc_state *crtc_state)
580 {
581 	struct intel_display *display = to_intel_display(crtc);
582 	enum pipe pipe = crtc->pipe;
583 	i915_reg_t reg;
584 	u32 temp, i, retry;
585 
586 	/*
587 	 * Write the TU size bits before fdi link training, so that error
588 	 * detection works.
589 	 */
590 	intel_de_write(display, FDI_RX_TUSIZE1(pipe),
591 		       intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
592 
593 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
594 	   for train result */
595 	reg = FDI_RX_IMR(pipe);
596 	temp = intel_de_read(display, reg);
597 	temp &= ~FDI_RX_SYMBOL_LOCK;
598 	temp &= ~FDI_RX_BIT_LOCK;
599 	intel_de_write(display, reg, temp);
600 
601 	intel_de_posting_read(display, reg);
602 	udelay(150);
603 
604 	/* enable CPU FDI TX and PCH FDI RX */
605 	reg = FDI_TX_CTL(pipe);
606 	temp = intel_de_read(display, reg);
607 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
608 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
609 	temp &= ~FDI_LINK_TRAIN_NONE;
610 	temp |= FDI_LINK_TRAIN_PATTERN_1;
611 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
612 	/* SNB-B */
613 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
614 	intel_de_write(display, reg, temp | FDI_TX_ENABLE);
615 
616 	intel_de_write(display, FDI_RX_MISC(pipe),
617 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
618 
619 	reg = FDI_RX_CTL(pipe);
620 	temp = intel_de_read(display, reg);
621 	if (HAS_PCH_CPT(display)) {
622 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
623 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
624 	} else {
625 		temp &= ~FDI_LINK_TRAIN_NONE;
626 		temp |= FDI_LINK_TRAIN_PATTERN_1;
627 	}
628 	intel_de_write(display, reg, temp | FDI_RX_ENABLE);
629 
630 	intel_de_posting_read(display, reg);
631 	udelay(150);
632 
633 	for (i = 0; i < 4; i++) {
634 		intel_de_rmw(display, FDI_TX_CTL(pipe),
635 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
636 		intel_de_posting_read(display, FDI_TX_CTL(pipe));
637 		udelay(500);
638 
639 		for (retry = 0; retry < 5; retry++) {
640 			reg = FDI_RX_IIR(pipe);
641 			temp = intel_de_read(display, reg);
642 			drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
643 			if (temp & FDI_RX_BIT_LOCK) {
644 				intel_de_write(display, reg,
645 					       temp | FDI_RX_BIT_LOCK);
646 				drm_dbg_kms(display->drm,
647 					    "FDI train 1 done.\n");
648 				break;
649 			}
650 			udelay(50);
651 		}
652 		if (retry < 5)
653 			break;
654 	}
655 	if (i == 4)
656 		drm_err(display->drm, "FDI train 1 fail!\n");
657 
658 	/* Train 2 */
659 	reg = FDI_TX_CTL(pipe);
660 	temp = intel_de_read(display, reg);
661 	temp &= ~FDI_LINK_TRAIN_NONE;
662 	temp |= FDI_LINK_TRAIN_PATTERN_2;
663 	if (display->platform.sandybridge) {
664 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
665 		/* SNB-B */
666 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
667 	}
668 	intel_de_write(display, reg, temp);
669 
670 	reg = FDI_RX_CTL(pipe);
671 	temp = intel_de_read(display, reg);
672 	if (HAS_PCH_CPT(display)) {
673 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
674 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
675 	} else {
676 		temp &= ~FDI_LINK_TRAIN_NONE;
677 		temp |= FDI_LINK_TRAIN_PATTERN_2;
678 	}
679 	intel_de_write(display, reg, temp);
680 
681 	intel_de_posting_read(display, reg);
682 	udelay(150);
683 
684 	for (i = 0; i < 4; i++) {
685 		intel_de_rmw(display, FDI_TX_CTL(pipe),
686 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
687 		intel_de_posting_read(display, FDI_TX_CTL(pipe));
688 		udelay(500);
689 
690 		for (retry = 0; retry < 5; retry++) {
691 			reg = FDI_RX_IIR(pipe);
692 			temp = intel_de_read(display, reg);
693 			drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
694 			if (temp & FDI_RX_SYMBOL_LOCK) {
695 				intel_de_write(display, reg,
696 					       temp | FDI_RX_SYMBOL_LOCK);
697 				drm_dbg_kms(display->drm,
698 					    "FDI train 2 done.\n");
699 				break;
700 			}
701 			udelay(50);
702 		}
703 		if (retry < 5)
704 			break;
705 	}
706 	if (i == 4)
707 		drm_err(display->drm, "FDI train 2 fail!\n");
708 
709 	drm_dbg_kms(display->drm, "FDI train done.\n");
710 }
711 
712 /* Manual link training for Ivy Bridge A0 parts */
713 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
714 				      const struct intel_crtc_state *crtc_state)
715 {
716 	struct intel_display *display = to_intel_display(crtc);
717 	enum pipe pipe = crtc->pipe;
718 	i915_reg_t reg;
719 	u32 temp, i, j;
720 
721 	ivb_update_fdi_bc_bifurcation(crtc_state);
722 
723 	/*
724 	 * Write the TU size bits before fdi link training, so that error
725 	 * detection works.
726 	 */
727 	intel_de_write(display, FDI_RX_TUSIZE1(pipe),
728 		       intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK);
729 
730 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
731 	   for train result */
732 	reg = FDI_RX_IMR(pipe);
733 	temp = intel_de_read(display, reg);
734 	temp &= ~FDI_RX_SYMBOL_LOCK;
735 	temp &= ~FDI_RX_BIT_LOCK;
736 	intel_de_write(display, reg, temp);
737 
738 	intel_de_posting_read(display, reg);
739 	udelay(150);
740 
741 	drm_dbg_kms(display->drm, "FDI_RX_IIR before link train 0x%x\n",
742 		    intel_de_read(display, FDI_RX_IIR(pipe)));
743 
744 	/* Try each vswing and preemphasis setting twice before moving on */
745 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
746 		/* disable first in case we need to retry */
747 		reg = FDI_TX_CTL(pipe);
748 		temp = intel_de_read(display, reg);
749 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
750 		temp &= ~FDI_TX_ENABLE;
751 		intel_de_write(display, reg, temp);
752 
753 		reg = FDI_RX_CTL(pipe);
754 		temp = intel_de_read(display, reg);
755 		temp &= ~FDI_LINK_TRAIN_AUTO;
756 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
757 		temp &= ~FDI_RX_ENABLE;
758 		intel_de_write(display, reg, temp);
759 
760 		/* enable CPU FDI TX and PCH FDI RX */
761 		reg = FDI_TX_CTL(pipe);
762 		temp = intel_de_read(display, reg);
763 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
764 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
765 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
766 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
767 		temp |= snb_b_fdi_train_param[j/2];
768 		temp |= FDI_COMPOSITE_SYNC;
769 		intel_de_write(display, reg, temp | FDI_TX_ENABLE);
770 
771 		intel_de_write(display, FDI_RX_MISC(pipe),
772 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
773 
774 		reg = FDI_RX_CTL(pipe);
775 		temp = intel_de_read(display, reg);
776 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
777 		temp |= FDI_COMPOSITE_SYNC;
778 		intel_de_write(display, reg, temp | FDI_RX_ENABLE);
779 
780 		intel_de_posting_read(display, reg);
781 		udelay(1); /* should be 0.5us */
782 
783 		for (i = 0; i < 4; i++) {
784 			reg = FDI_RX_IIR(pipe);
785 			temp = intel_de_read(display, reg);
786 			drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
787 
788 			if (temp & FDI_RX_BIT_LOCK ||
789 			    (intel_de_read(display, reg) & FDI_RX_BIT_LOCK)) {
790 				intel_de_write(display, reg,
791 					       temp | FDI_RX_BIT_LOCK);
792 				drm_dbg_kms(display->drm,
793 					    "FDI train 1 done, level %i.\n",
794 					    i);
795 				break;
796 			}
797 			udelay(1); /* should be 0.5us */
798 		}
799 		if (i == 4) {
800 			drm_dbg_kms(display->drm,
801 				    "FDI train 1 fail on vswing %d\n", j / 2);
802 			continue;
803 		}
804 
805 		/* Train 2 */
806 		intel_de_rmw(display, FDI_TX_CTL(pipe),
807 			     FDI_LINK_TRAIN_NONE_IVB,
808 			     FDI_LINK_TRAIN_PATTERN_2_IVB);
809 		intel_de_rmw(display, FDI_RX_CTL(pipe),
810 			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
811 			     FDI_LINK_TRAIN_PATTERN_2_CPT);
812 		intel_de_posting_read(display, FDI_RX_CTL(pipe));
813 		udelay(2); /* should be 1.5us */
814 
815 		for (i = 0; i < 4; i++) {
816 			reg = FDI_RX_IIR(pipe);
817 			temp = intel_de_read(display, reg);
818 			drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp);
819 
820 			if (temp & FDI_RX_SYMBOL_LOCK ||
821 			    (intel_de_read(display, reg) & FDI_RX_SYMBOL_LOCK)) {
822 				intel_de_write(display, reg,
823 					       temp | FDI_RX_SYMBOL_LOCK);
824 				drm_dbg_kms(display->drm,
825 					    "FDI train 2 done, level %i.\n",
826 					    i);
827 				goto train_done;
828 			}
829 			udelay(2); /* should be 1.5us */
830 		}
831 		if (i == 4)
832 			drm_dbg_kms(display->drm,
833 				    "FDI train 2 fail on vswing %d\n", j / 2);
834 	}
835 
836 train_done:
837 	drm_dbg_kms(display->drm, "FDI train done.\n");
838 }
839 
840 /* Starting with Haswell, different DDI ports can work in FDI mode for
841  * connection to the PCH-located connectors. For this, it is necessary to train
842  * both the DDI port and PCH receiver for the desired DDI buffer settings.
843  *
844  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
845  * please note that when FDI mode is active on DDI E, it shares 2 lines with
846  * DDI A (which is used for eDP)
847  */
848 void hsw_fdi_link_train(struct intel_encoder *encoder,
849 			const struct intel_crtc_state *crtc_state)
850 {
851 	struct intel_display *display = to_intel_display(crtc_state);
852 	u32 temp, i, rx_ctl_val;
853 	int n_entries;
854 
855 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
856 
857 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
858 
859 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
860 	 * mode set "sequence for CRT port" document:
861 	 * - TP1 to TP2 time with the default value
862 	 * - FDI delay to 90h
863 	 *
864 	 * WaFDIAutoLinkSetTimingOverrride:hsw
865 	 */
866 	intel_de_write(display, FDI_RX_MISC(PIPE_A),
867 		       FDI_RX_PWRDN_LANE1_VAL(2) |
868 		       FDI_RX_PWRDN_LANE0_VAL(2) |
869 		       FDI_RX_TP1_TO_TP2_48 |
870 		       FDI_RX_FDI_DELAY_90);
871 
872 	/* Enable the PCH Receiver FDI PLL */
873 	rx_ctl_val = display->fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
874 		     FDI_RX_PLL_ENABLE |
875 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
876 	intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
877 	intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
878 	udelay(220);
879 
880 	/* Switch from Rawclk to PCDclk */
881 	rx_ctl_val |= FDI_PCDCLK;
882 	intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
883 
884 	/* Configure Port Clock Select */
885 	drm_WARN_ON(display->drm, crtc_state->intel_dpll->info->id != DPLL_ID_SPLL);
886 	intel_ddi_enable_clock(encoder, crtc_state);
887 
888 	/* Start the training iterating through available voltages and emphasis,
889 	 * testing each value twice. */
890 	for (i = 0; i < n_entries * 2; i++) {
891 		/* Configure DP_TP_CTL with auto-training */
892 		intel_de_write(display, DP_TP_CTL(PORT_E),
893 			       DP_TP_CTL_FDI_AUTOTRAIN |
894 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
895 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
896 			       DP_TP_CTL_ENABLE);
897 
898 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
899 		 * DDI E does not support port reversal, the functionality is
900 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
901 		 * port reversal bit */
902 		intel_de_write(display, DDI_BUF_CTL(PORT_E),
903 			       DDI_BUF_CTL_ENABLE |
904 			       ((crtc_state->fdi_lanes - 1) << 1) |
905 			       DDI_BUF_TRANS_SELECT(i / 2));
906 		intel_de_posting_read(display, DDI_BUF_CTL(PORT_E));
907 
908 		udelay(600);
909 
910 		/* Program PCH FDI Receiver TU */
911 		intel_de_write(display, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
912 
913 		/* Enable PCH FDI Receiver with auto-training */
914 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
915 		intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
916 		intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
917 
918 		/* Wait for FDI receiver lane calibration */
919 		udelay(30);
920 
921 		/* Unset FDI_RX_MISC pwrdn lanes */
922 		intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
923 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
924 		intel_de_posting_read(display, FDI_RX_MISC(PIPE_A));
925 
926 		/* Wait for FDI auto training time */
927 		udelay(5);
928 
929 		temp = intel_de_read(display, DP_TP_STATUS(PORT_E));
930 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
931 			drm_dbg_kms(display->drm,
932 				    "FDI link training done on step %d\n", i);
933 			break;
934 		}
935 
936 		/*
937 		 * Leave things enabled even if we failed to train FDI.
938 		 * Results in less fireworks from the state checker.
939 		 */
940 		if (i == n_entries * 2 - 1) {
941 			drm_err(display->drm, "FDI link training failed!\n");
942 			break;
943 		}
944 
945 		rx_ctl_val &= ~FDI_RX_ENABLE;
946 		intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val);
947 		intel_de_posting_read(display, FDI_RX_CTL(PIPE_A));
948 
949 		intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
950 		intel_de_posting_read(display, DDI_BUF_CTL(PORT_E));
951 
952 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
953 		intel_de_rmw(display, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
954 		intel_de_posting_read(display, DP_TP_CTL(PORT_E));
955 
956 		intel_wait_ddi_buf_idle(display, PORT_E);
957 
958 		/* Reset FDI_RX_MISC pwrdn lanes */
959 		intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
960 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
961 			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
962 		intel_de_posting_read(display, FDI_RX_MISC(PIPE_A));
963 	}
964 
965 	/* Enable normal pixel sending for FDI */
966 	intel_de_write(display, DP_TP_CTL(PORT_E),
967 		       DP_TP_CTL_FDI_AUTOTRAIN |
968 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
969 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
970 		       DP_TP_CTL_ENABLE);
971 }
972 
973 void hsw_fdi_disable(struct intel_encoder *encoder)
974 {
975 	struct intel_display *display = to_intel_display(encoder);
976 
977 	/*
978 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
979 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
980 	 * step 13 is the correct place for it. Step 18 is where it was
981 	 * originally before the BUN.
982 	 */
983 	intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
984 	intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
985 	intel_wait_ddi_buf_idle(display, PORT_E);
986 	intel_ddi_disable_clock(encoder);
987 	intel_de_rmw(display, FDI_RX_MISC(PIPE_A),
988 		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
989 		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
990 	intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
991 	intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
992 }
993 
994 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
995 {
996 	struct intel_display *display = to_intel_display(crtc_state);
997 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
998 	enum pipe pipe = crtc->pipe;
999 	i915_reg_t reg;
1000 	u32 temp;
1001 
1002 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1003 	reg = FDI_RX_CTL(pipe);
1004 	temp = intel_de_read(display, reg);
1005 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1006 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1007 	temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
1008 	intel_de_write(display, reg, temp | FDI_RX_PLL_ENABLE);
1009 
1010 	intel_de_posting_read(display, reg);
1011 	udelay(200);
1012 
1013 	/* Switch from Rawclk to PCDclk */
1014 	intel_de_rmw(display, reg, 0, FDI_PCDCLK);
1015 	intel_de_posting_read(display, reg);
1016 	udelay(200);
1017 
1018 	/* Enable CPU FDI TX PLL, always on for Ironlake */
1019 	reg = FDI_TX_CTL(pipe);
1020 	temp = intel_de_read(display, reg);
1021 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1022 		intel_de_write(display, reg, temp | FDI_TX_PLL_ENABLE);
1023 
1024 		intel_de_posting_read(display, reg);
1025 		udelay(100);
1026 	}
1027 }
1028 
1029 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1030 {
1031 	struct intel_display *display = to_intel_display(crtc);
1032 	enum pipe pipe = crtc->pipe;
1033 
1034 	/* Switch from PCDclk to Rawclk */
1035 	intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1036 
1037 	/* Disable CPU FDI TX PLL */
1038 	intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1039 	intel_de_posting_read(display, FDI_TX_CTL(pipe));
1040 	udelay(100);
1041 
1042 	/* Wait for the clocks to turn off. */
1043 	intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1044 	intel_de_posting_read(display, FDI_RX_CTL(pipe));
1045 	udelay(100);
1046 }
1047 
1048 void ilk_fdi_disable(struct intel_crtc *crtc)
1049 {
1050 	struct intel_display *display = to_intel_display(crtc);
1051 	enum pipe pipe = crtc->pipe;
1052 	i915_reg_t reg;
1053 	u32 temp;
1054 
1055 	/* disable CPU FDI tx and PCH FDI rx */
1056 	intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1057 	intel_de_posting_read(display, FDI_TX_CTL(pipe));
1058 
1059 	reg = FDI_RX_CTL(pipe);
1060 	temp = intel_de_read(display, reg);
1061 	temp &= ~(0x7 << 16);
1062 	temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
1063 	intel_de_write(display, reg, temp & ~FDI_RX_ENABLE);
1064 
1065 	intel_de_posting_read(display, reg);
1066 	udelay(100);
1067 
1068 	/* Ironlake workaround, disable clock pointer after downing FDI */
1069 	if (HAS_PCH_IBX(display))
1070 		intel_de_write(display, FDI_RX_CHICKEN(pipe),
1071 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1072 
1073 	/* still set train pattern 1 */
1074 	intel_de_rmw(display, FDI_TX_CTL(pipe),
1075 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1076 
1077 	reg = FDI_RX_CTL(pipe);
1078 	temp = intel_de_read(display, reg);
1079 	if (HAS_PCH_CPT(display)) {
1080 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1081 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1082 	} else {
1083 		temp &= ~FDI_LINK_TRAIN_NONE;
1084 		temp |= FDI_LINK_TRAIN_PATTERN_1;
1085 	}
1086 	/* BPC in FDI rx is consistent with that in TRANSCONF */
1087 	temp &= ~(0x07 << 16);
1088 	temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11;
1089 	intel_de_write(display, reg, temp);
1090 
1091 	intel_de_posting_read(display, reg);
1092 	udelay(100);
1093 }
1094 
1095 static const struct intel_fdi_funcs ilk_funcs = {
1096 	.fdi_link_train = ilk_fdi_link_train,
1097 };
1098 
1099 static const struct intel_fdi_funcs gen6_funcs = {
1100 	.fdi_link_train = gen6_fdi_link_train,
1101 };
1102 
1103 static const struct intel_fdi_funcs ivb_funcs = {
1104 	.fdi_link_train = ivb_manual_fdi_link_train,
1105 };
1106 
1107 void
1108 intel_fdi_init_hook(struct intel_display *display)
1109 {
1110 	if (display->platform.ironlake) {
1111 		display->funcs.fdi = &ilk_funcs;
1112 	} else if (display->platform.sandybridge) {
1113 		display->funcs.fdi = &gen6_funcs;
1114 	} else if (display->platform.ivybridge) {
1115 		/* FIXME: detect B0+ stepping and use auto training */
1116 		display->funcs.fdi = &ivb_funcs;
1117 	}
1118 }
1119