xref: /linux/drivers/gpu/drm/i915/display/intel_dpio_phy.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 /*
2  * Copyright © 2014-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "bxt_dpio_phy_regs.h"
25 #include "i915_drv.h"
26 #include "i915_reg.h"
27 #include "intel_ddi.h"
28 #include "intel_ddi_buf_trans.h"
29 #include "intel_de.h"
30 #include "intel_display_power_well.h"
31 #include "intel_display_types.h"
32 #include "intel_dp.h"
33 #include "intel_dpio_phy.h"
34 #include "vlv_dpio_phy_regs.h"
35 #include "vlv_sideband.h"
36 
37 /**
38  * DOC: DPIO
39  *
40  * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
41  * ports. DPIO is the name given to such a display PHY. These PHYs
42  * don't follow the standard programming model using direct MMIO
43  * registers, and instead their registers must be accessed through IOSF
44  * sideband. VLV has one such PHY for driving ports B and C, and CHV
45  * adds another PHY for driving port D. Each PHY responds to specific
46  * IOSF-SB port.
47  *
48  * Each display PHY is made up of one or two channels. Each channel
49  * houses a common lane part which contains the PLL and other common
50  * logic. CH0 common lane also contains the IOSF-SB logic for the
51  * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
52  * must be running when any DPIO registers are accessed.
53  *
54  * In addition to having their own registers, the PHYs are also
55  * controlled through some dedicated signals from the display
56  * controller. These include PLL reference clock enable, PLL enable,
57  * and CRI clock selection, for example.
58  *
59  * Eeach channel also has two splines (also called data lanes), and
60  * each spline is made up of one Physical Access Coding Sub-Layer
61  * (PCS) block and two TX lanes. So each channel has two PCS blocks
62  * and four TX lanes. The TX lanes are used as DP lanes or TMDS
63  * data/clock pairs depending on the output type.
64  *
65  * Additionally the PHY also contains an AUX lane with AUX blocks
66  * for each channel. This is used for DP AUX communication, but
67  * this fact isn't really relevant for the driver since AUX is
68  * controlled from the display controller side. No DPIO registers
69  * need to be accessed during AUX communication,
70  *
71  * Generally on VLV/CHV the common lane corresponds to the pipe and
72  * the spline (PCS/TX) corresponds to the port.
73  *
74  * For dual channel PHY (VLV/CHV):
75  *
76  *  pipe A == CMN/PLL/REF CH0
77  *
78  *  pipe B == CMN/PLL/REF CH1
79  *
80  *  port B == PCS/TX CH0
81  *
82  *  port C == PCS/TX CH1
83  *
84  * This is especially important when we cross the streams
85  * ie. drive port B with pipe B, or port C with pipe A.
86  *
87  * For single channel PHY (CHV):
88  *
89  *  pipe C == CMN/PLL/REF CH0
90  *
91  *  port D == PCS/TX CH0
92  *
93  * On BXT the entire PHY channel corresponds to the port. That means
94  * the PLL is also now associated with the port rather than the pipe,
95  * and so the clock needs to be routed to the appropriate transcoder.
96  * Port A PLL is directly connected to transcoder EDP and port B/C
97  * PLLs can be routed to any transcoder A/B/C.
98  *
99  * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
100  * digital port D (CHV) or port A (BXT). ::
101  *
102  *
103  *     Dual channel PHY (VLV/CHV/BXT)
104  *     ---------------------------------
105  *     |      CH0      |      CH1      |
106  *     |  CMN/PLL/REF  |  CMN/PLL/REF  |
107  *     |---------------|---------------| Display PHY
108  *     | PCS01 | PCS23 | PCS01 | PCS23 |
109  *     |-------|-------|-------|-------|
110  *     |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
111  *     ---------------------------------
112  *     |     DDI0      |     DDI1      | DP/HDMI ports
113  *     ---------------------------------
114  *
115  *     Single channel PHY (CHV/BXT)
116  *     -----------------
117  *     |      CH0      |
118  *     |  CMN/PLL/REF  |
119  *     |---------------| Display PHY
120  *     | PCS01 | PCS23 |
121  *     |-------|-------|
122  *     |TX0|TX1|TX2|TX3|
123  *     -----------------
124  *     |     DDI2      | DP/HDMI port
125  *     -----------------
126  */
127 
128 /**
129  * struct bxt_dpio_phy_info - Hold info for a broxton DDI phy
130  */
131 struct bxt_dpio_phy_info {
132 	/**
133 	 * @dual_channel: true if this phy has a second channel.
134 	 */
135 	bool dual_channel;
136 
137 	/**
138 	 * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
139 	 * Otherwise the GRC value will be copied from the phy indicated by
140 	 * this field.
141 	 */
142 	enum dpio_phy rcomp_phy;
143 
144 	/**
145 	 * @reset_delay: delay in us to wait before setting the common reset
146 	 * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
147 	 */
148 	int reset_delay;
149 
150 	/**
151 	 * @pwron_mask: Mask with the appropriate bit set that would cause the
152 	 * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
153 	 */
154 	u32 pwron_mask;
155 
156 	/**
157 	 * @channel: struct containing per channel information.
158 	 */
159 	struct {
160 		/**
161 		 * @channel.port: which port maps to this channel.
162 		 */
163 		enum port port;
164 	} channel[2];
165 };
166 
167 static const struct bxt_dpio_phy_info bxt_dpio_phy_info[] = {
168 	[DPIO_PHY0] = {
169 		.dual_channel = true,
170 		.rcomp_phy = DPIO_PHY1,
171 		.pwron_mask = BIT(0),
172 
173 		.channel = {
174 			[DPIO_CH0] = { .port = PORT_B },
175 			[DPIO_CH1] = { .port = PORT_C },
176 		}
177 	},
178 	[DPIO_PHY1] = {
179 		.dual_channel = false,
180 		.rcomp_phy = -1,
181 		.pwron_mask = BIT(1),
182 
183 		.channel = {
184 			[DPIO_CH0] = { .port = PORT_A },
185 		}
186 	},
187 };
188 
189 static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
190 	[DPIO_PHY0] = {
191 		.dual_channel = false,
192 		.rcomp_phy = DPIO_PHY1,
193 		.pwron_mask = BIT(0),
194 		.reset_delay = 20,
195 
196 		.channel = {
197 			[DPIO_CH0] = { .port = PORT_B },
198 		}
199 	},
200 	[DPIO_PHY1] = {
201 		.dual_channel = false,
202 		.rcomp_phy = -1,
203 		.pwron_mask = BIT(3),
204 		.reset_delay = 20,
205 
206 		.channel = {
207 			[DPIO_CH0] = { .port = PORT_A },
208 		}
209 	},
210 	[DPIO_PHY2] = {
211 		.dual_channel = false,
212 		.rcomp_phy = DPIO_PHY1,
213 		.pwron_mask = BIT(1),
214 		.reset_delay = 20,
215 
216 		.channel = {
217 			[DPIO_CH0] = { .port = PORT_C },
218 		}
219 	},
220 };
221 
222 static const struct bxt_dpio_phy_info *
223 bxt_get_phy_list(struct intel_display *display, int *count)
224 {
225 	if (display->platform.geminilake) {
226 		*count =  ARRAY_SIZE(glk_dpio_phy_info);
227 		return glk_dpio_phy_info;
228 	} else {
229 		*count =  ARRAY_SIZE(bxt_dpio_phy_info);
230 		return bxt_dpio_phy_info;
231 	}
232 }
233 
234 static const struct bxt_dpio_phy_info *
235 bxt_get_phy_info(struct intel_display *display, enum dpio_phy phy)
236 {
237 	int count;
238 	const struct bxt_dpio_phy_info *phy_list =
239 		bxt_get_phy_list(display, &count);
240 
241 	return &phy_list[phy];
242 }
243 
244 void bxt_port_to_phy_channel(struct intel_display *display, enum port port,
245 			     enum dpio_phy *phy, enum dpio_channel *ch)
246 {
247 	const struct bxt_dpio_phy_info *phy_info, *phys;
248 	int i, count;
249 
250 	phys = bxt_get_phy_list(display, &count);
251 
252 	for (i = 0; i < count; i++) {
253 		phy_info = &phys[i];
254 
255 		if (port == phy_info->channel[DPIO_CH0].port) {
256 			*phy = i;
257 			*ch = DPIO_CH0;
258 			return;
259 		}
260 
261 		if (phy_info->dual_channel &&
262 		    port == phy_info->channel[DPIO_CH1].port) {
263 			*phy = i;
264 			*ch = DPIO_CH1;
265 			return;
266 		}
267 	}
268 
269 	drm_WARN(display->drm, 1, "PHY not found for PORT %c",
270 		 port_name(port));
271 	*phy = DPIO_PHY0;
272 	*ch = DPIO_CH0;
273 }
274 
275 /*
276  * Like intel_de_rmw() but reads from a single per-lane register and
277  * writes to the group register to write the same value to all the lanes.
278  */
279 static u32 bxt_dpio_phy_rmw_grp(struct intel_display *display,
280 				i915_reg_t reg_single,
281 				i915_reg_t reg_group,
282 				u32 clear, u32 set)
283 {
284 	u32 old, val;
285 
286 	old = intel_de_read(display, reg_single);
287 	val = (old & ~clear) | set;
288 	intel_de_write(display, reg_group, val);
289 
290 	return old;
291 }
292 
293 void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
294 				    const struct intel_crtc_state *crtc_state)
295 {
296 	struct intel_display *display = to_intel_display(encoder);
297 	const struct intel_ddi_buf_trans *trans;
298 	enum dpio_channel ch;
299 	enum dpio_phy phy;
300 	int lane, n_entries;
301 
302 	trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
303 	if (drm_WARN_ON_ONCE(display->drm, !trans))
304 		return;
305 
306 	bxt_port_to_phy_channel(display, encoder->port, &phy, &ch);
307 
308 	/*
309 	 * While we write to the group register to program all lanes at once we
310 	 * can read only lane registers and we pick lanes 0/1 for that.
311 	 */
312 	bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch),
313 			     BXT_PORT_PCS_DW10_GRP(phy, ch),
314 			     TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0);
315 
316 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
317 		int level = intel_ddi_level(encoder, crtc_state, lane);
318 
319 		intel_de_rmw(display, BXT_PORT_TX_DW2_LN(phy, ch, lane),
320 			     MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK,
321 			     MARGIN_000(trans->entries[level].bxt.margin) |
322 			     UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale));
323 	}
324 
325 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
326 		int level = intel_ddi_level(encoder, crtc_state, lane);
327 		u32 val;
328 
329 		intel_de_rmw(display, BXT_PORT_TX_DW3_LN(phy, ch, lane),
330 			     SCALE_DCOMP_METHOD,
331 			     trans->entries[level].bxt.enable ?
332 			     SCALE_DCOMP_METHOD : 0);
333 
334 		val = intel_de_read(display, BXT_PORT_TX_DW3_LN(phy, ch, lane));
335 		if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
336 			drm_err(display->drm,
337 				"Disabled scaling while ouniqetrangenmethod was set");
338 	}
339 
340 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
341 		int level = intel_ddi_level(encoder, crtc_state, lane);
342 
343 		intel_de_rmw(display, BXT_PORT_TX_DW4_LN(phy, ch, lane),
344 			     DE_EMPHASIS_MASK,
345 			     DE_EMPHASIS(trans->entries[level].bxt.deemphasis));
346 	}
347 
348 	bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch),
349 			     BXT_PORT_PCS_DW10_GRP(phy, ch),
350 			     0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
351 }
352 
353 bool bxt_dpio_phy_is_enabled(struct intel_display *display,
354 			     enum dpio_phy phy)
355 {
356 	const struct bxt_dpio_phy_info *phy_info;
357 
358 	phy_info = bxt_get_phy_info(display, phy);
359 
360 	if (!(intel_de_read(display, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
361 		return false;
362 
363 	if ((intel_de_read(display, BXT_PORT_CL1CM_DW0(phy)) &
364 	     (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
365 		drm_dbg(display->drm,
366 			"DDI PHY %d powered, but power hasn't settled\n", phy);
367 
368 		return false;
369 	}
370 
371 	if (!(intel_de_read(display, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
372 		drm_dbg(display->drm,
373 			"DDI PHY %d powered, but still in reset\n", phy);
374 
375 		return false;
376 	}
377 
378 	return true;
379 }
380 
381 static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy)
382 {
383 	u32 val = intel_de_read(display, BXT_PORT_REF_DW6(phy));
384 
385 	return REG_FIELD_GET(GRC_CODE_MASK, val);
386 }
387 
388 static void bxt_phy_wait_grc_done(struct intel_display *display,
389 				  enum dpio_phy phy)
390 {
391 	if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10))
392 		drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy);
393 }
394 
395 static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
396 {
397 	const struct bxt_dpio_phy_info *phy_info;
398 	u32 val;
399 
400 	phy_info = bxt_get_phy_info(display, phy);
401 
402 	if (bxt_dpio_phy_is_enabled(display, phy)) {
403 		/* Still read out the GRC value for state verification */
404 		if (phy_info->rcomp_phy != -1)
405 			display->state.bxt_phy_grc = bxt_get_grc(display, phy);
406 
407 		if (bxt_dpio_phy_verify_state(display, phy)) {
408 			drm_dbg(display->drm, "DDI PHY %d already enabled, "
409 				"won't reprogram it\n", phy);
410 			return;
411 		}
412 
413 		drm_dbg(display->drm,
414 			"DDI PHY %d enabled with invalid state, "
415 			"force reprogramming it\n", phy);
416 	}
417 
418 	intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask);
419 
420 	/*
421 	 * The PHY registers start out inaccessible and respond to reads with
422 	 * all 1s.  Eventually they become accessible as they power up, then
423 	 * the reserved bit will give the default 0.  Poll on the reserved bit
424 	 * becoming 0 to find when the PHY is accessible.
425 	 * The flag should get set in 100us according to the HW team, but
426 	 * use 1ms due to occasional timeouts observed with that.
427 	 */
428 	if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy),
429 			     PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
430 		drm_err(display->drm, "timeout during PHY%d power on\n",
431 			phy);
432 
433 	/* Program PLL Rcomp code offset */
434 	intel_de_rmw(display, BXT_PORT_CL1CM_DW9(phy),
435 		     IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4));
436 
437 	intel_de_rmw(display, BXT_PORT_CL1CM_DW10(phy),
438 		     IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4));
439 
440 	/* Program power gating */
441 	intel_de_rmw(display, BXT_PORT_CL1CM_DW28(phy), 0,
442 		     OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG);
443 
444 	if (phy_info->dual_channel)
445 		intel_de_rmw(display, BXT_PORT_CL2CM_DW6(phy), 0,
446 			     DW6_OLDO_DYN_PWR_DOWN_EN);
447 
448 	if (phy_info->rcomp_phy != -1) {
449 		u32 grc_code;
450 
451 		bxt_phy_wait_grc_done(display, phy_info->rcomp_phy);
452 
453 		/*
454 		 * PHY0 isn't connected to an RCOMP resistor so copy over
455 		 * the corresponding calibrated value from PHY1, and disable
456 		 * the automatic calibration on PHY0.
457 		 */
458 		val = bxt_get_grc(display, phy_info->rcomp_phy);
459 		display->state.bxt_phy_grc = val;
460 
461 		grc_code = GRC_CODE_FAST(val) |
462 			GRC_CODE_SLOW(val) |
463 			GRC_CODE_NOM(val);
464 		intel_de_write(display, BXT_PORT_REF_DW6(phy), grc_code);
465 		intel_de_rmw(display, BXT_PORT_REF_DW8(phy),
466 			     0, GRC_DIS | GRC_RDY_OVRD);
467 	}
468 
469 	if (phy_info->reset_delay)
470 		udelay(phy_info->reset_delay);
471 
472 	intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
473 }
474 
475 void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy)
476 {
477 	const struct bxt_dpio_phy_info *phy_info;
478 
479 	phy_info = bxt_get_phy_info(display, phy);
480 
481 	intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0);
482 
483 	intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
484 }
485 
486 void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
487 {
488 	const struct bxt_dpio_phy_info *phy_info = bxt_get_phy_info(display, phy);
489 	enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
490 	bool was_enabled;
491 
492 	lockdep_assert_held(&display->power.domains.lock);
493 
494 	was_enabled = true;
495 	if (rcomp_phy != -1)
496 		was_enabled = bxt_dpio_phy_is_enabled(display, rcomp_phy);
497 
498 	/*
499 	 * We need to copy the GRC calibration value from rcomp_phy,
500 	 * so make sure it's powered up.
501 	 */
502 	if (!was_enabled)
503 		_bxt_dpio_phy_init(display, rcomp_phy);
504 
505 	_bxt_dpio_phy_init(display, phy);
506 
507 	if (!was_enabled)
508 		bxt_dpio_phy_uninit(display, rcomp_phy);
509 }
510 
511 static bool __printf(6, 7)
512 __phy_reg_verify_state(struct intel_display *display, enum dpio_phy phy,
513 		       i915_reg_t reg, u32 mask, u32 expected,
514 		       const char *reg_fmt, ...)
515 {
516 	struct va_format vaf;
517 	va_list args;
518 	u32 val;
519 
520 	val = intel_de_read(display, reg);
521 	if ((val & mask) == expected)
522 		return true;
523 
524 	va_start(args, reg_fmt);
525 	vaf.fmt = reg_fmt;
526 	vaf.va = &args;
527 
528 	drm_dbg(display->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
529 			 "current %08x, expected %08x (mask %08x)\n",
530 			 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
531 			 mask);
532 
533 	va_end(args);
534 
535 	return false;
536 }
537 
538 bool bxt_dpio_phy_verify_state(struct intel_display *display,
539 			       enum dpio_phy phy)
540 {
541 	const struct bxt_dpio_phy_info *phy_info;
542 	u32 mask;
543 	bool ok;
544 
545 	phy_info = bxt_get_phy_info(display, phy);
546 
547 #define _CHK(reg, mask, exp, fmt, ...)					\
548 	__phy_reg_verify_state(display, phy, reg, mask, exp, fmt,	\
549 			       ## __VA_ARGS__)
550 
551 	if (!bxt_dpio_phy_is_enabled(display, phy))
552 		return false;
553 
554 	ok = true;
555 
556 	/* PLL Rcomp code offset */
557 	ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
558 		   IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xe4),
559 		   "BXT_PORT_CL1CM_DW9(%d)", phy);
560 	ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
561 		   IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xe4),
562 		   "BXT_PORT_CL1CM_DW10(%d)", phy);
563 
564 	/* Power gating */
565 	mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
566 	ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
567 		   "BXT_PORT_CL1CM_DW28(%d)", phy);
568 
569 	if (phy_info->dual_channel)
570 		ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
571 			   DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
572 			   "BXT_PORT_CL2CM_DW6(%d)", phy);
573 
574 	if (phy_info->rcomp_phy != -1) {
575 		u32 grc_code = display->state.bxt_phy_grc;
576 
577 		grc_code = GRC_CODE_FAST(grc_code) |
578 			GRC_CODE_SLOW(grc_code) |
579 			GRC_CODE_NOM(grc_code);
580 		mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
581 		       GRC_CODE_NOM_MASK;
582 		ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
583 			   "BXT_PORT_REF_DW6(%d)", phy);
584 
585 		mask = GRC_DIS | GRC_RDY_OVRD;
586 		ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
587 			   "BXT_PORT_REF_DW8(%d)", phy);
588 	}
589 
590 	return ok;
591 #undef _CHK
592 }
593 
594 u8
595 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
596 {
597 	switch (lane_count) {
598 	case 1:
599 		return 0;
600 	case 2:
601 		return BIT(2) | BIT(0);
602 	case 4:
603 		return BIT(3) | BIT(2) | BIT(0);
604 	default:
605 		MISSING_CASE(lane_count);
606 
607 		return 0;
608 	}
609 }
610 
611 void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
612 				      u8 lane_lat_optim_mask)
613 {
614 	struct intel_display *display = to_intel_display(encoder);
615 	enum port port = encoder->port;
616 	enum dpio_phy phy;
617 	enum dpio_channel ch;
618 	int lane;
619 
620 	bxt_port_to_phy_channel(display, port, &phy, &ch);
621 
622 	for (lane = 0; lane < 4; lane++) {
623 		/*
624 		 * Note that on CHV this flag is called UPAR, but has
625 		 * the same function.
626 		 */
627 		intel_de_rmw(display, BXT_PORT_TX_DW14_LN(phy, ch, lane),
628 			     LATENCY_OPTIM,
629 			     lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0);
630 	}
631 }
632 
633 u8
634 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
635 {
636 	struct intel_display *display = to_intel_display(encoder);
637 	enum port port = encoder->port;
638 	enum dpio_phy phy;
639 	enum dpio_channel ch;
640 	int lane;
641 	u8 mask;
642 
643 	bxt_port_to_phy_channel(display, port, &phy, &ch);
644 
645 	mask = 0;
646 	for (lane = 0; lane < 4; lane++) {
647 		u32 val = intel_de_read(display,
648 					BXT_PORT_TX_DW14_LN(phy, ch, lane));
649 
650 		if (val & LATENCY_OPTIM)
651 			mask |= BIT(lane);
652 	}
653 
654 	return mask;
655 }
656 
657 enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port)
658 {
659 	switch (dig_port->base.port) {
660 	default:
661 		MISSING_CASE(dig_port->base.port);
662 		fallthrough;
663 	case PORT_B:
664 	case PORT_D:
665 		return DPIO_CH0;
666 	case PORT_C:
667 		return DPIO_CH1;
668 	}
669 }
670 
671 enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
672 {
673 	switch (dig_port->base.port) {
674 	default:
675 		MISSING_CASE(dig_port->base.port);
676 		fallthrough;
677 	case PORT_B:
678 	case PORT_C:
679 		return DPIO_PHY0;
680 	case PORT_D:
681 		return DPIO_PHY1;
682 	}
683 }
684 
685 enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
686 {
687 	switch (pipe) {
688 	default:
689 		MISSING_CASE(pipe);
690 		fallthrough;
691 	case PIPE_A:
692 	case PIPE_B:
693 		return DPIO_PHY0;
694 	case PIPE_C:
695 		return DPIO_PHY1;
696 	}
697 }
698 
699 enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
700 {
701 	switch (pipe) {
702 	default:
703 		MISSING_CASE(pipe);
704 		fallthrough;
705 	case PIPE_A:
706 	case PIPE_C:
707 		return DPIO_CH0;
708 	case PIPE_B:
709 		return DPIO_CH1;
710 	}
711 }
712 
713 void chv_set_phy_signal_level(struct intel_encoder *encoder,
714 			      const struct intel_crtc_state *crtc_state,
715 			      u32 deemph_reg_value, u32 margin_reg_value,
716 			      bool uniq_trans_scale)
717 {
718 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
719 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
720 	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
721 	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
722 	u32 val;
723 	int i;
724 
725 	vlv_dpio_get(dev_priv);
726 
727 	/* Clear calc init */
728 	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
729 	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
730 	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
731 	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
732 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
733 
734 	if (crtc_state->lane_count > 2) {
735 		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
736 		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
737 		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
738 		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
739 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
740 	}
741 
742 	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
743 	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
744 	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
745 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
746 
747 	if (crtc_state->lane_count > 2) {
748 		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
749 		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
750 		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
751 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
752 	}
753 
754 	/* Program swing deemph */
755 	for (i = 0; i < crtc_state->lane_count; i++) {
756 		val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
757 		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
758 		val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value);
759 		vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
760 	}
761 
762 	/* Program swing margin */
763 	for (i = 0; i < crtc_state->lane_count; i++) {
764 		val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
765 
766 		val &= ~DPIO_SWING_MARGIN000_MASK;
767 		val |= DPIO_SWING_MARGIN000(margin_reg_value);
768 
769 		/*
770 		 * Supposedly this value shouldn't matter when unique transition
771 		 * scale is disabled, but in fact it does matter. Let's just
772 		 * always program the same value and hope it's OK.
773 		 */
774 		val &= ~DPIO_UNIQ_TRANS_SCALE_MASK;
775 		val |= DPIO_UNIQ_TRANS_SCALE(0x9a);
776 
777 		vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
778 	}
779 
780 	/*
781 	 * The document said it needs to set bit 27 for ch0 and bit 26
782 	 * for ch1. Might be a typo in the doc.
783 	 * For now, for this unique transition scale selection, set bit
784 	 * 27 for ch0 and ch1.
785 	 */
786 	for (i = 0; i < crtc_state->lane_count; i++) {
787 		val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
788 		if (uniq_trans_scale)
789 			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
790 		else
791 			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
792 		vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
793 	}
794 
795 	/* Start swing calculation */
796 	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
797 	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
798 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
799 
800 	if (crtc_state->lane_count > 2) {
801 		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
802 		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
803 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
804 	}
805 
806 	vlv_dpio_put(dev_priv);
807 }
808 
809 static void __chv_data_lane_soft_reset(struct intel_encoder *encoder,
810 				       const struct intel_crtc_state *crtc_state,
811 				       bool reset)
812 {
813 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
814 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
815 	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
816 	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
817 	u32 val;
818 
819 	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
820 	if (reset)
821 		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
822 	else
823 		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
824 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
825 
826 	if (crtc_state->lane_count > 2) {
827 		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
828 		if (reset)
829 			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
830 		else
831 			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
832 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
833 	}
834 
835 	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
836 	val |= CHV_PCS_REQ_SOFTRESET_EN;
837 	if (reset)
838 		val &= ~DPIO_PCS_CLK_SOFT_RESET;
839 	else
840 		val |= DPIO_PCS_CLK_SOFT_RESET;
841 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
842 
843 	if (crtc_state->lane_count > 2) {
844 		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
845 		val |= CHV_PCS_REQ_SOFTRESET_EN;
846 		if (reset)
847 			val &= ~DPIO_PCS_CLK_SOFT_RESET;
848 		else
849 			val |= DPIO_PCS_CLK_SOFT_RESET;
850 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
851 	}
852 }
853 
854 void chv_data_lane_soft_reset(struct intel_encoder *encoder,
855 			      const struct intel_crtc_state *crtc_state,
856 			      bool reset)
857 {
858 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
859 
860 	vlv_dpio_get(i915);
861 	__chv_data_lane_soft_reset(encoder, crtc_state, reset);
862 	vlv_dpio_put(i915);
863 }
864 
865 void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
866 			    const struct intel_crtc_state *crtc_state)
867 {
868 	struct intel_display *display = to_intel_display(encoder);
869 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
870 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
871 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
872 	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
873 	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
874 	enum pipe pipe = crtc->pipe;
875 	unsigned int lane_mask =
876 		intel_dp_unused_lane_mask(crtc_state->lane_count);
877 	u32 val;
878 
879 	/*
880 	 * Must trick the second common lane into life.
881 	 * Otherwise we can't even access the PLL.
882 	 */
883 	if (ch == DPIO_CH0 && pipe == PIPE_B)
884 		dig_port->release_cl2_override =
885 			!chv_phy_powergate_ch(display, DPIO_PHY0, DPIO_CH1, true);
886 
887 	chv_phy_powergate_lanes(encoder, true, lane_mask);
888 
889 	vlv_dpio_get(dev_priv);
890 
891 	/* Assert data lane reset */
892 	__chv_data_lane_soft_reset(encoder, crtc_state, true);
893 
894 	/* program left/right clock distribution */
895 	if (pipe != PIPE_B) {
896 		val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
897 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
898 		if (ch == DPIO_CH0)
899 			val |= CHV_BUFLEFTENA1_FORCE;
900 		if (ch == DPIO_CH1)
901 			val |= CHV_BUFRIGHTENA1_FORCE;
902 		vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
903 	} else {
904 		val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
905 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
906 		if (ch == DPIO_CH0)
907 			val |= CHV_BUFLEFTENA2_FORCE;
908 		if (ch == DPIO_CH1)
909 			val |= CHV_BUFRIGHTENA2_FORCE;
910 		vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
911 	}
912 
913 	/* program clock channel usage */
914 	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
915 	val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
916 	if (pipe == PIPE_B)
917 		val |= DPIO_PCS_USEDCLKCHANNEL;
918 	else
919 		val &= ~DPIO_PCS_USEDCLKCHANNEL;
920 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
921 
922 	if (crtc_state->lane_count > 2) {
923 		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
924 		val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
925 		if (pipe == PIPE_B)
926 			val |= DPIO_PCS_USEDCLKCHANNEL;
927 		else
928 			val &= ~DPIO_PCS_USEDCLKCHANNEL;
929 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
930 	}
931 
932 	/*
933 	 * This a a bit weird since generally CL
934 	 * matches the pipe, but here we need to
935 	 * pick the CL based on the port.
936 	 */
937 	val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
938 	if (pipe == PIPE_B)
939 		val |= CHV_CMN_USEDCLKCHANNEL;
940 	else
941 		val &= ~CHV_CMN_USEDCLKCHANNEL;
942 	vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
943 
944 	vlv_dpio_put(dev_priv);
945 }
946 
947 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
948 				const struct intel_crtc_state *crtc_state)
949 {
950 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
951 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
952 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
953 	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
954 	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
955 	int data, i, stagger;
956 	u32 val;
957 
958 	vlv_dpio_get(dev_priv);
959 
960 	/* allow hardware to manage TX FIFO reset source */
961 	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
962 	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
963 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
964 
965 	if (crtc_state->lane_count > 2) {
966 		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
967 		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
968 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
969 	}
970 
971 	/* Program Tx lane latency optimal setting*/
972 	for (i = 0; i < crtc_state->lane_count; i++) {
973 		/* Set the upar bit */
974 		if (crtc_state->lane_count == 1)
975 			data = 0;
976 		else
977 			data = (i == 1) ? 0 : DPIO_UPAR;
978 		vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data);
979 	}
980 
981 	/* Data lane stagger programming */
982 	if (crtc_state->port_clock > 270000)
983 		stagger = 0x18;
984 	else if (crtc_state->port_clock > 135000)
985 		stagger = 0xd;
986 	else if (crtc_state->port_clock > 67500)
987 		stagger = 0x7;
988 	else if (crtc_state->port_clock > 33750)
989 		stagger = 0x4;
990 	else
991 		stagger = 0x2;
992 
993 	val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
994 	val |= DPIO_TX2_STAGGER_MASK(0x1f);
995 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
996 
997 	if (crtc_state->lane_count > 2) {
998 		val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
999 		val |= DPIO_TX2_STAGGER_MASK(0x1f);
1000 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
1001 	}
1002 
1003 	vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
1004 		       DPIO_LANESTAGGER_STRAP(stagger) |
1005 		       DPIO_LANESTAGGER_STRAP_OVRD |
1006 		       DPIO_TX1_STAGGER_MASK(0x1f) |
1007 		       DPIO_TX1_STAGGER_MULT(6) |
1008 		       DPIO_TX2_STAGGER_MULT(0));
1009 
1010 	if (crtc_state->lane_count > 2) {
1011 		vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
1012 			       DPIO_LANESTAGGER_STRAP(stagger) |
1013 			       DPIO_LANESTAGGER_STRAP_OVRD |
1014 			       DPIO_TX1_STAGGER_MASK(0x1f) |
1015 			       DPIO_TX1_STAGGER_MULT(7) |
1016 			       DPIO_TX2_STAGGER_MULT(5));
1017 	}
1018 
1019 	/* Deassert data lane reset */
1020 	__chv_data_lane_soft_reset(encoder, crtc_state, false);
1021 
1022 	vlv_dpio_put(dev_priv);
1023 }
1024 
1025 void chv_phy_release_cl2_override(struct intel_encoder *encoder)
1026 {
1027 	struct intel_display *display = to_intel_display(encoder);
1028 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1029 
1030 	if (dig_port->release_cl2_override) {
1031 		chv_phy_powergate_ch(display, DPIO_PHY0, DPIO_CH1, false);
1032 		dig_port->release_cl2_override = false;
1033 	}
1034 }
1035 
1036 void chv_phy_post_pll_disable(struct intel_encoder *encoder,
1037 			      const struct intel_crtc_state *old_crtc_state)
1038 {
1039 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1040 	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1041 	enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
1042 	u32 val;
1043 
1044 	vlv_dpio_get(dev_priv);
1045 
1046 	/* disable left/right clock distribution */
1047 	if (pipe != PIPE_B) {
1048 		val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0);
1049 		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1050 		vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val);
1051 	} else {
1052 		val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1);
1053 		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1054 		vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val);
1055 	}
1056 
1057 	vlv_dpio_put(dev_priv);
1058 
1059 	/*
1060 	 * Leave the power down bit cleared for at least one
1061 	 * lane so that chv_powergate_phy_ch() will power
1062 	 * on something when the channel is otherwise unused.
1063 	 * When the port is off and the override is removed
1064 	 * the lanes power down anyway, so otherwise it doesn't
1065 	 * really matter what the state of power down bits is
1066 	 * after this.
1067 	 */
1068 	chv_phy_powergate_lanes(encoder, false, 0x0);
1069 }
1070 
1071 void vlv_set_phy_signal_level(struct intel_encoder *encoder,
1072 			      const struct intel_crtc_state *crtc_state,
1073 			      u32 demph_reg_value, u32 preemph_reg_value,
1074 			      u32 uniqtranscale_reg_value, u32 tx3_demph)
1075 {
1076 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1077 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1078 	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
1079 	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
1080 
1081 	vlv_dpio_get(dev_priv);
1082 
1083 	vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000);
1084 	vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value);
1085 	vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch),
1086 			 uniqtranscale_reg_value);
1087 	vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040);
1088 
1089 	if (tx3_demph)
1090 		vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph);
1091 
1092 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000);
1093 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value);
1094 	vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN);
1095 
1096 	vlv_dpio_put(dev_priv);
1097 }
1098 
1099 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
1100 			    const struct intel_crtc_state *crtc_state)
1101 {
1102 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1103 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1104 	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
1105 	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
1106 
1107 	/* Program Tx lane resets to default */
1108 	vlv_dpio_get(dev_priv);
1109 
1110 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch),
1111 		       DPIO_PCS_TX_LANE2_RESET |
1112 		       DPIO_PCS_TX_LANE1_RESET);
1113 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch),
1114 		       DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1115 		       DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1116 		       DPIO_PCS_CLK_DATAWIDTH_8_10 |
1117 		       DPIO_PCS_CLK_SOFT_RESET);
1118 
1119 	/* Fix up inter-pair skew failure */
1120 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00);
1121 	vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500);
1122 	vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000);
1123 
1124 	vlv_dpio_put(dev_priv);
1125 }
1126 
1127 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
1128 				const struct intel_crtc_state *crtc_state)
1129 {
1130 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1131 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1132 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1133 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1134 	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
1135 	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
1136 	enum pipe pipe = crtc->pipe;
1137 	u32 val;
1138 
1139 	vlv_dpio_get(dev_priv);
1140 
1141 	/* Enable clock channels for this port */
1142 	val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE;
1143 	if (pipe == PIPE_B)
1144 		val |= DPIO_PCS_USEDCLKCHANNEL;
1145 	val |= 0xc4;
1146 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val);
1147 
1148 	/* Program lane clock */
1149 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018);
1150 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888);
1151 
1152 	vlv_dpio_put(dev_priv);
1153 }
1154 
1155 void vlv_phy_reset_lanes(struct intel_encoder *encoder,
1156 			 const struct intel_crtc_state *old_crtc_state)
1157 {
1158 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1159 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1160 	enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
1161 	enum dpio_phy phy = vlv_dig_port_to_phy(dig_port);
1162 
1163 	vlv_dpio_get(dev_priv);
1164 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000);
1165 	vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060);
1166 	vlv_dpio_put(dev_priv);
1167 }
1168 
1169 void vlv_wait_port_ready(struct intel_encoder *encoder,
1170 			 unsigned int expected_mask)
1171 {
1172 	struct intel_display *display = to_intel_display(encoder);
1173 	u32 port_mask;
1174 	i915_reg_t dpll_reg;
1175 
1176 	switch (encoder->port) {
1177 	default:
1178 		MISSING_CASE(encoder->port);
1179 		fallthrough;
1180 	case PORT_B:
1181 		port_mask = DPLL_PORTB_READY_MASK;
1182 		dpll_reg = DPLL(display, 0);
1183 		break;
1184 	case PORT_C:
1185 		port_mask = DPLL_PORTC_READY_MASK;
1186 		dpll_reg = DPLL(display, 0);
1187 		expected_mask <<= 4;
1188 		break;
1189 	case PORT_D:
1190 		port_mask = DPLL_PORTD_READY_MASK;
1191 		dpll_reg = DPIO_PHY_STATUS;
1192 		break;
1193 	}
1194 
1195 	if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000))
1196 		drm_WARN(display->drm, 1,
1197 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1198 			 encoder->base.base.id, encoder->base.name,
1199 			 intel_de_read(display, dpll_reg) & port_mask,
1200 			 expected_mask);
1201 }
1202