xref: /linux/drivers/gpu/drm/i915/display/intel_cx0_phy.c (revision 7bdbfb4e36e34eb788e44f27666bf0a2b3b90803)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/log2.h>
7 #include <linux/math64.h>
8 #include "i915_reg.h"
9 #include "intel_cx0_phy.h"
10 #include "intel_cx0_phy_regs.h"
11 #include "intel_ddi.h"
12 #include "intel_ddi_buf_trans.h"
13 #include "intel_de.h"
14 #include "intel_display_types.h"
15 #include "intel_dp.h"
16 #include "intel_hdmi.h"
17 #include "intel_panel.h"
18 #include "intel_psr.h"
19 #include "intel_tc.h"
20 
21 #define MB_WRITE_COMMITTED      true
22 #define MB_WRITE_UNCOMMITTED    false
23 
24 #define for_each_cx0_lane_in_mask(__lane_mask, __lane) \
25 	for ((__lane) = 0; (__lane) < 2; (__lane)++) \
26 		for_each_if((__lane_mask) & BIT(__lane))
27 
28 #define INTEL_CX0_LANE0		BIT(0)
29 #define INTEL_CX0_LANE1		BIT(1)
30 #define INTEL_CX0_BOTH_LANES	(INTEL_CX0_LANE1 | INTEL_CX0_LANE0)
31 
32 bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
33 {
34 	if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
35 		return true;
36 
37 	return false;
38 }
39 
40 static int lane_mask_to_lane(u8 lane_mask)
41 {
42 	if (WARN_ON((lane_mask & ~INTEL_CX0_BOTH_LANES) ||
43 		    hweight8(lane_mask) != 1))
44 		return 0;
45 
46 	return ilog2(lane_mask);
47 }
48 
49 static u8 intel_cx0_get_owned_lane_mask(struct drm_i915_private *i915,
50 					struct intel_encoder *encoder)
51 {
52 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
53 
54 	if (!intel_tc_port_in_dp_alt_mode(dig_port))
55 		return INTEL_CX0_BOTH_LANES;
56 
57 	/*
58 	 * In DP-alt with pin assignment D, only PHY lane 0 is owned
59 	 * by display and lane 1 is owned by USB.
60 	 */
61 	return intel_tc_port_max_lane_count(dig_port) > 2
62 		? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
63 }
64 
65 static void
66 assert_dc_off(struct drm_i915_private *i915)
67 {
68 	bool enabled;
69 
70 	enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF);
71 	drm_WARN_ON(&i915->drm, !enabled);
72 }
73 
74 static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder)
75 {
76 	int lane;
77 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
78 
79 	for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane)
80 		intel_de_rmw(i915,
81 			     XELPDP_PORT_MSGBUS_TIMER(encoder->port, lane),
82 			     XELPDP_PORT_MSGBUS_TIMER_VAL_MASK,
83 			     XELPDP_PORT_MSGBUS_TIMER_VAL);
84 }
85 
86 /*
87  * Prepare HW for CX0 phy transactions.
88  *
89  * It is required that PSR and DC5/6 are disabled before any CX0 message
90  * bus transaction is executed.
91  *
92  * We also do the msgbus timer programming here to ensure that the timer
93  * is already programmed before any access to the msgbus.
94  */
95 static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder)
96 {
97 	intel_wakeref_t wakeref;
98 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
99 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
100 
101 	intel_psr_pause(intel_dp);
102 	wakeref = intel_display_power_get(i915, POWER_DOMAIN_DC_OFF);
103 	intel_cx0_program_msgbus_timer(encoder);
104 
105 	return wakeref;
106 }
107 
108 static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
109 {
110 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
111 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
112 
113 	intel_psr_resume(intel_dp);
114 	intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
115 }
116 
117 static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
118 					    enum port port, int lane)
119 {
120 	intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
121 		     0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
122 }
123 
124 static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
125 {
126 	enum phy phy = intel_port_to_phy(i915, port);
127 
128 	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
129 		       XELPDP_PORT_M2P_TRANSACTION_RESET);
130 
131 	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
132 				    XELPDP_PORT_M2P_TRANSACTION_RESET,
133 				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
134 		drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy));
135 		return;
136 	}
137 
138 	intel_clear_response_ready_flag(i915, port, lane);
139 }
140 
141 static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
142 				  int command, int lane, u32 *val)
143 {
144 	enum phy phy = intel_port_to_phy(i915, port);
145 
146 	if (__intel_de_wait_for_register(i915,
147 					 XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
148 					 XELPDP_PORT_P2M_RESPONSE_READY,
149 					 XELPDP_PORT_P2M_RESPONSE_READY,
150 					 XELPDP_MSGBUS_TIMEOUT_FAST_US,
151 					 XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
152 		drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
153 			    phy_name(phy), *val);
154 
155 		if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(port, lane)) &
156 		      XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT))
157 			drm_dbg_kms(&i915->drm,
158 				    "PHY %c Hardware did not detect a timeout\n",
159 				    phy_name(phy));
160 
161 		intel_cx0_bus_reset(i915, port, lane);
162 		return -ETIMEDOUT;
163 	}
164 
165 	if (*val & XELPDP_PORT_P2M_ERROR_SET) {
166 		drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy),
167 			    command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
168 		intel_cx0_bus_reset(i915, port, lane);
169 		return -EINVAL;
170 	}
171 
172 	if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) {
173 		drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy),
174 			    command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
175 		intel_cx0_bus_reset(i915, port, lane);
176 		return -EINVAL;
177 	}
178 
179 	return 0;
180 }
181 
182 static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
183 				 int lane, u16 addr)
184 {
185 	enum phy phy = intel_port_to_phy(i915, port);
186 	int ack;
187 	u32 val;
188 
189 	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
190 				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
191 				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
192 		drm_dbg_kms(&i915->drm,
193 			    "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
194 		intel_cx0_bus_reset(i915, port, lane);
195 		return -ETIMEDOUT;
196 	}
197 
198 	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
199 		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
200 		       XELPDP_PORT_M2P_COMMAND_READ |
201 		       XELPDP_PORT_M2P_ADDRESS(addr));
202 
203 	ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
204 	if (ack < 0)
205 		return ack;
206 
207 	intel_clear_response_ready_flag(i915, port, lane);
208 
209 	/*
210 	 * FIXME: Workaround to let HW to settle
211 	 * down and let the message bus to end up
212 	 * in a known state
213 	 */
214 	intel_cx0_bus_reset(i915, port, lane);
215 
216 	return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
217 }
218 
219 static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
220 			   int lane, u16 addr)
221 {
222 	enum phy phy = intel_port_to_phy(i915, port);
223 	int i, status;
224 
225 	assert_dc_off(i915);
226 
227 	/* 3 tries is assumed to be enough to read successfully */
228 	for (i = 0; i < 3; i++) {
229 		status = __intel_cx0_read_once(i915, port, lane, addr);
230 
231 		if (status >= 0)
232 			return status;
233 	}
234 
235 	drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n",
236 		     phy_name(phy), addr, i);
237 
238 	return 0;
239 }
240 
241 static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
242 			 u8 lane_mask, u16 addr)
243 {
244 	int lane = lane_mask_to_lane(lane_mask);
245 
246 	return __intel_cx0_read(i915, port, lane, addr);
247 }
248 
249 static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
250 				  int lane, u16 addr, u8 data, bool committed)
251 {
252 	enum phy phy = intel_port_to_phy(i915, port);
253 	int ack;
254 	u32 val;
255 
256 	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
257 				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
258 				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
259 		drm_dbg_kms(&i915->drm,
260 			    "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
261 		intel_cx0_bus_reset(i915, port, lane);
262 		return -ETIMEDOUT;
263 	}
264 
265 	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
266 		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
267 		       (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
268 				    XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
269 		       XELPDP_PORT_M2P_DATA(data) |
270 		       XELPDP_PORT_M2P_ADDRESS(addr));
271 
272 	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
273 				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
274 				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
275 		drm_dbg_kms(&i915->drm,
276 			    "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
277 		intel_cx0_bus_reset(i915, port, lane);
278 		return -ETIMEDOUT;
279 	}
280 
281 	if (committed) {
282 		ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
283 		if (ack < 0)
284 			return ack;
285 	} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) &
286 		    XELPDP_PORT_P2M_ERROR_SET)) {
287 		drm_dbg_kms(&i915->drm,
288 			    "PHY %c Error occurred during write command.\n", phy_name(phy));
289 		intel_cx0_bus_reset(i915, port, lane);
290 		return -EINVAL;
291 	}
292 
293 	intel_clear_response_ready_flag(i915, port, lane);
294 
295 	/*
296 	 * FIXME: Workaround to let HW to settle
297 	 * down and let the message bus to end up
298 	 * in a known state
299 	 */
300 	intel_cx0_bus_reset(i915, port, lane);
301 
302 	return 0;
303 }
304 
305 static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
306 			      int lane, u16 addr, u8 data, bool committed)
307 {
308 	enum phy phy = intel_port_to_phy(i915, port);
309 	int i, status;
310 
311 	assert_dc_off(i915);
312 
313 	/* 3 tries is assumed to be enough to write successfully */
314 	for (i = 0; i < 3; i++) {
315 		status = __intel_cx0_write_once(i915, port, lane, addr, data, committed);
316 
317 		if (status == 0)
318 			return;
319 	}
320 
321 	drm_err_once(&i915->drm,
322 		     "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
323 }
324 
325 static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
326 			    u8 lane_mask, u16 addr, u8 data, bool committed)
327 {
328 	int lane;
329 
330 	for_each_cx0_lane_in_mask(lane_mask, lane)
331 		__intel_cx0_write(i915, port, lane, addr, data, committed);
332 }
333 
334 static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port,
335 				 int lane, u16 addr, u16 data)
336 {
337 	assert_dc_off(i915);
338 
339 	intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
340 	intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
341 
342 	intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
343 	intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
344 }
345 
346 static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port,
347 			       int lane, u16 addr)
348 {
349 	u16 val;
350 
351 	assert_dc_off(i915);
352 
353 	intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
354 	intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
355 
356 	val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H);
357 	val <<= 8;
358 	val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L);
359 
360 	return val;
361 }
362 
363 static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
364 			    int lane, u16 addr, u8 clear, u8 set, bool committed)
365 {
366 	u8 old, val;
367 
368 	old = __intel_cx0_read(i915, port, lane, addr);
369 	val = (old & ~clear) | set;
370 
371 	if (val != old)
372 		__intel_cx0_write(i915, port, lane, addr, val, committed);
373 }
374 
375 static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
376 			  u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
377 {
378 	u8 lane;
379 
380 	for_each_cx0_lane_in_mask(lane_mask, lane)
381 		__intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
382 }
383 
384 static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state)
385 {
386 	if (intel_crtc_has_dp_encoder(crtc_state)) {
387 		if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
388 		    (crtc_state->port_clock == 540000 ||
389 		     crtc_state->port_clock == 810000))
390 			return 5;
391 		else
392 			return 4;
393 	} else {
394 		return 5;
395 	}
396 }
397 
398 static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state)
399 {
400 	if (intel_crtc_has_dp_encoder(crtc_state)) {
401 		if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
402 		    (crtc_state->port_clock == 540000 ||
403 		     crtc_state->port_clock == 810000))
404 			return 5;
405 		else
406 			return 2;
407 	} else {
408 		return 6;
409 	}
410 }
411 
412 void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
413 				     const struct intel_crtc_state *crtc_state)
414 {
415 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
416 	const struct intel_ddi_buf_trans *trans;
417 	enum phy phy = intel_port_to_phy(i915, encoder->port);
418 	u8 owned_lane_mask;
419 	intel_wakeref_t wakeref;
420 	int n_entries, ln;
421 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
422 
423 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
424 		return;
425 
426 	owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
427 
428 	wakeref = intel_cx0_phy_transaction_begin(encoder);
429 
430 	trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
431 	if (drm_WARN_ON_ONCE(&i915->drm, !trans)) {
432 		intel_cx0_phy_transaction_end(encoder, wakeref);
433 		return;
434 	}
435 
436 	if (intel_is_c10phy(i915, phy)) {
437 		intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
438 			      0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
439 		intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CMN(3),
440 			      C10_CMN3_TXVBOOST_MASK,
441 			      C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)),
442 			      MB_WRITE_UNCOMMITTED);
443 		intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_TX(1),
444 			      C10_TX1_TERMCTL_MASK,
445 			      C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)),
446 			      MB_WRITE_COMMITTED);
447 	}
448 
449 	for (ln = 0; ln < crtc_state->lane_count; ln++) {
450 		int level = intel_ddi_level(encoder, crtc_state, ln);
451 		int lane = ln / 2;
452 		int tx = ln % 2;
453 		u8 lane_mask = lane == 0 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
454 
455 		if (!(lane_mask & owned_lane_mask))
456 			continue;
457 
458 		intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0),
459 			      C10_PHY_OVRD_LEVEL_MASK,
460 			      C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor),
461 			      MB_WRITE_COMMITTED);
462 		intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1),
463 			      C10_PHY_OVRD_LEVEL_MASK,
464 			      C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing),
465 			      MB_WRITE_COMMITTED);
466 		intel_cx0_rmw(i915, encoder->port, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2),
467 			      C10_PHY_OVRD_LEVEL_MASK,
468 			      C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor),
469 			      MB_WRITE_COMMITTED);
470 	}
471 
472 	/* Write Override enables in 0xD71 */
473 	intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_OVRD,
474 		      0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2,
475 		      MB_WRITE_COMMITTED);
476 
477 	if (intel_is_c10phy(i915, phy))
478 		intel_cx0_rmw(i915, encoder->port, owned_lane_mask, PHY_C10_VDR_CONTROL(1),
479 			      0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
480 
481 	intel_cx0_phy_transaction_end(encoder, wakeref);
482 }
483 
484 /*
485  * Basic DP link rates with 38.4 MHz reference clock.
486  * Note: The tables below are with SSC. In non-ssc
487  * registers 0xC04 to 0xC08(pll[4] to pll[8]) will be
488  * programmed 0.
489  */
490 
491 static const struct intel_c10pll_state mtl_c10_dp_rbr = {
492 	.clock = 162000,
493 	.tx = 0x10,
494 	.cmn = 0x21,
495 	.pll[0] = 0xB4,
496 	.pll[1] = 0,
497 	.pll[2] = 0x30,
498 	.pll[3] = 0x1,
499 	.pll[4] = 0x26,
500 	.pll[5] = 0x0C,
501 	.pll[6] = 0x98,
502 	.pll[7] = 0x46,
503 	.pll[8] = 0x1,
504 	.pll[9] = 0x1,
505 	.pll[10] = 0,
506 	.pll[11] = 0,
507 	.pll[12] = 0xC0,
508 	.pll[13] = 0,
509 	.pll[14] = 0,
510 	.pll[15] = 0x2,
511 	.pll[16] = 0x84,
512 	.pll[17] = 0x4F,
513 	.pll[18] = 0xE5,
514 	.pll[19] = 0x23,
515 };
516 
517 static const struct intel_c10pll_state mtl_c10_edp_r216 = {
518 	.clock = 216000,
519 	.tx = 0x10,
520 	.cmn = 0x21,
521 	.pll[0] = 0x4,
522 	.pll[1] = 0,
523 	.pll[2] = 0xA2,
524 	.pll[3] = 0x1,
525 	.pll[4] = 0x33,
526 	.pll[5] = 0x10,
527 	.pll[6] = 0x75,
528 	.pll[7] = 0xB3,
529 	.pll[8] = 0x1,
530 	.pll[9] = 0x1,
531 	.pll[10] = 0,
532 	.pll[11] = 0,
533 	.pll[12] = 0,
534 	.pll[13] = 0,
535 	.pll[14] = 0,
536 	.pll[15] = 0x2,
537 	.pll[16] = 0x85,
538 	.pll[17] = 0x0F,
539 	.pll[18] = 0xE6,
540 	.pll[19] = 0x23,
541 };
542 
543 static const struct intel_c10pll_state mtl_c10_edp_r243 = {
544 	.clock = 243000,
545 	.tx = 0x10,
546 	.cmn = 0x21,
547 	.pll[0] = 0x34,
548 	.pll[1] = 0,
549 	.pll[2] = 0xDA,
550 	.pll[3] = 0x1,
551 	.pll[4] = 0x39,
552 	.pll[5] = 0x12,
553 	.pll[6] = 0xE3,
554 	.pll[7] = 0xE9,
555 	.pll[8] = 0x1,
556 	.pll[9] = 0x1,
557 	.pll[10] = 0,
558 	.pll[11] = 0,
559 	.pll[12] = 0x20,
560 	.pll[13] = 0,
561 	.pll[14] = 0,
562 	.pll[15] = 0x2,
563 	.pll[16] = 0x85,
564 	.pll[17] = 0x8F,
565 	.pll[18] = 0xE6,
566 	.pll[19] = 0x23,
567 };
568 
569 static const struct intel_c10pll_state mtl_c10_dp_hbr1 = {
570 	.clock = 270000,
571 	.tx = 0x10,
572 	.cmn = 0x21,
573 	.pll[0] = 0xF4,
574 	.pll[1] = 0,
575 	.pll[2] = 0xF8,
576 	.pll[3] = 0x0,
577 	.pll[4] = 0x20,
578 	.pll[5] = 0x0A,
579 	.pll[6] = 0x29,
580 	.pll[7] = 0x10,
581 	.pll[8] = 0x1,   /* Verify */
582 	.pll[9] = 0x1,
583 	.pll[10] = 0,
584 	.pll[11] = 0,
585 	.pll[12] = 0xA0,
586 	.pll[13] = 0,
587 	.pll[14] = 0,
588 	.pll[15] = 0x1,
589 	.pll[16] = 0x84,
590 	.pll[17] = 0x4F,
591 	.pll[18] = 0xE5,
592 	.pll[19] = 0x23,
593 };
594 
595 static const struct intel_c10pll_state mtl_c10_edp_r324 = {
596 	.clock = 324000,
597 	.tx = 0x10,
598 	.cmn = 0x21,
599 	.pll[0] = 0xB4,
600 	.pll[1] = 0,
601 	.pll[2] = 0x30,
602 	.pll[3] = 0x1,
603 	.pll[4] = 0x26,
604 	.pll[5] = 0x0C,
605 	.pll[6] = 0x98,
606 	.pll[7] = 0x46,
607 	.pll[8] = 0x1,
608 	.pll[9] = 0x1,
609 	.pll[10] = 0,
610 	.pll[11] = 0,
611 	.pll[12] = 0xC0,
612 	.pll[13] = 0,
613 	.pll[14] = 0,
614 	.pll[15] = 0x1,
615 	.pll[16] = 0x85,
616 	.pll[17] = 0x4F,
617 	.pll[18] = 0xE6,
618 	.pll[19] = 0x23,
619 };
620 
621 static const struct intel_c10pll_state mtl_c10_edp_r432 = {
622 	.clock = 432000,
623 	.tx = 0x10,
624 	.cmn = 0x21,
625 	.pll[0] = 0x4,
626 	.pll[1] = 0,
627 	.pll[2] = 0xA2,
628 	.pll[3] = 0x1,
629 	.pll[4] = 0x33,
630 	.pll[5] = 0x10,
631 	.pll[6] = 0x75,
632 	.pll[7] = 0xB3,
633 	.pll[8] = 0x1,
634 	.pll[9] = 0x1,
635 	.pll[10] = 0,
636 	.pll[11] = 0,
637 	.pll[12] = 0,
638 	.pll[13] = 0,
639 	.pll[14] = 0,
640 	.pll[15] = 0x1,
641 	.pll[16] = 0x85,
642 	.pll[17] = 0x0F,
643 	.pll[18] = 0xE6,
644 	.pll[19] = 0x23,
645 };
646 
647 static const struct intel_c10pll_state mtl_c10_dp_hbr2 = {
648 	.clock = 540000,
649 	.tx = 0x10,
650 	.cmn = 0x21,
651 	.pll[0] = 0xF4,
652 	.pll[1] = 0,
653 	.pll[2] = 0xF8,
654 	.pll[3] = 0,
655 	.pll[4] = 0x20,
656 	.pll[5] = 0x0A,
657 	.pll[6] = 0x29,
658 	.pll[7] = 0x10,
659 	.pll[8] = 0x1,
660 	.pll[9] = 0x1,
661 	.pll[10] = 0,
662 	.pll[11] = 0,
663 	.pll[12] = 0xA0,
664 	.pll[13] = 0,
665 	.pll[14] = 0,
666 	.pll[15] = 0,
667 	.pll[16] = 0x84,
668 	.pll[17] = 0x4F,
669 	.pll[18] = 0xE5,
670 	.pll[19] = 0x23,
671 };
672 
673 static const struct intel_c10pll_state mtl_c10_edp_r675 = {
674 	.clock = 675000,
675 	.tx = 0x10,
676 	.cmn = 0x21,
677 	.pll[0] = 0xB4,
678 	.pll[1] = 0,
679 	.pll[2] = 0x3E,
680 	.pll[3] = 0x1,
681 	.pll[4] = 0xA8,
682 	.pll[5] = 0x0C,
683 	.pll[6] = 0x33,
684 	.pll[7] = 0x54,
685 	.pll[8] = 0x1,
686 	.pll[9] = 0x1,
687 	.pll[10] = 0,
688 	.pll[11] = 0,
689 	.pll[12] = 0xC8,
690 	.pll[13] = 0,
691 	.pll[14] = 0,
692 	.pll[15] = 0,
693 	.pll[16] = 0x85,
694 	.pll[17] = 0x8F,
695 	.pll[18] = 0xE6,
696 	.pll[19] = 0x23,
697 };
698 
699 static const struct intel_c10pll_state mtl_c10_dp_hbr3 = {
700 	.clock = 810000,
701 	.tx = 0x10,
702 	.cmn = 0x21,
703 	.pll[0] = 0x34,
704 	.pll[1] = 0,
705 	.pll[2] = 0x84,
706 	.pll[3] = 0x1,
707 	.pll[4] = 0x30,
708 	.pll[5] = 0x0F,
709 	.pll[6] = 0x3D,
710 	.pll[7] = 0x98,
711 	.pll[8] = 0x1,
712 	.pll[9] = 0x1,
713 	.pll[10] = 0,
714 	.pll[11] = 0,
715 	.pll[12] = 0xF0,
716 	.pll[13] = 0,
717 	.pll[14] = 0,
718 	.pll[15] = 0,
719 	.pll[16] = 0x84,
720 	.pll[17] = 0x0F,
721 	.pll[18] = 0xE5,
722 	.pll[19] = 0x23,
723 };
724 
725 static const struct intel_c10pll_state * const mtl_c10_dp_tables[] = {
726 	&mtl_c10_dp_rbr,
727 	&mtl_c10_dp_hbr1,
728 	&mtl_c10_dp_hbr2,
729 	&mtl_c10_dp_hbr3,
730 	NULL,
731 };
732 
733 static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = {
734 	&mtl_c10_dp_rbr,
735 	&mtl_c10_edp_r216,
736 	&mtl_c10_edp_r243,
737 	&mtl_c10_dp_hbr1,
738 	&mtl_c10_edp_r324,
739 	&mtl_c10_edp_r432,
740 	&mtl_c10_dp_hbr2,
741 	&mtl_c10_edp_r675,
742 	&mtl_c10_dp_hbr3,
743 	NULL,
744 };
745 
746 /* C20 basic DP 1.4 tables */
747 static const struct intel_c20pll_state mtl_c20_dp_rbr = {
748 	.clock = 162000,
749 	.tx = {	0xbe88, /* tx cfg0 */
750 		0x5800, /* tx cfg1 */
751 		0x0000, /* tx cfg2 */
752 		},
753 	.cmn = {0x0500, /* cmn cfg0*/
754 		0x0005, /* cmn cfg1 */
755 		0x0000, /* cmn cfg2 */
756 		0x0000, /* cmn cfg3 */
757 		},
758 	.mpllb = { 0x50a8,	/* mpllb cfg0 */
759 		0x2120,		/* mpllb cfg1 */
760 		0xcd9a,		/* mpllb cfg2 */
761 		0xbfc1,		/* mpllb cfg3 */
762 		0x5ab8,         /* mpllb cfg4 */
763 		0x4c34,         /* mpllb cfg5 */
764 		0x2000,		/* mpllb cfg6 */
765 		0x0001,		/* mpllb cfg7 */
766 		0x6000,		/* mpllb cfg8 */
767 		0x0000,		/* mpllb cfg9 */
768 		0x0000,		/* mpllb cfg10 */
769 		},
770 };
771 
772 static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
773 	.clock = 270000,
774 	.tx = {	0xbe88, /* tx cfg0 */
775 		0x4800, /* tx cfg1 */
776 		0x0000, /* tx cfg2 */
777 		},
778 	.cmn = {0x0500, /* cmn cfg0*/
779 		0x0005, /* cmn cfg1 */
780 		0x0000, /* cmn cfg2 */
781 		0x0000, /* cmn cfg3 */
782 		},
783 	.mpllb = { 0x308c,	/* mpllb cfg0 */
784 		0x2110,		/* mpllb cfg1 */
785 		0xcc9c,		/* mpllb cfg2 */
786 		0xbfc1,		/* mpllb cfg3 */
787 		0x4b9a,         /* mpllb cfg4 */
788 		0x3f81,         /* mpllb cfg5 */
789 		0x2000,		/* mpllb cfg6 */
790 		0x0001,		/* mpllb cfg7 */
791 		0x5000,		/* mpllb cfg8 */
792 		0x0000,		/* mpllb cfg9 */
793 		0x0000,		/* mpllb cfg10 */
794 		},
795 };
796 
797 static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
798 	.clock = 540000,
799 	.tx = {	0xbe88, /* tx cfg0 */
800 		0x4800, /* tx cfg1 */
801 		0x0000, /* tx cfg2 */
802 		},
803 	.cmn = {0x0500, /* cmn cfg0*/
804 		0x0005, /* cmn cfg1 */
805 		0x0000, /* cmn cfg2 */
806 		0x0000, /* cmn cfg3 */
807 		},
808 	.mpllb = { 0x108c,	/* mpllb cfg0 */
809 		0x2108,		/* mpllb cfg1 */
810 		0xcc9c,		/* mpllb cfg2 */
811 		0xbfc1,		/* mpllb cfg3 */
812 		0x4b9a,         /* mpllb cfg4 */
813 		0x3f81,         /* mpllb cfg5 */
814 		0x2000,		/* mpllb cfg6 */
815 		0x0001,		/* mpllb cfg7 */
816 		0x5000,		/* mpllb cfg8 */
817 		0x0000,		/* mpllb cfg9 */
818 		0x0000,		/* mpllb cfg10 */
819 		},
820 };
821 
822 static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
823 	.clock = 810000,
824 	.tx = {	0xbe88, /* tx cfg0 */
825 		0x4800, /* tx cfg1 */
826 		0x0000, /* tx cfg2 */
827 		},
828 	.cmn = {0x0500, /* cmn cfg0*/
829 		0x0005, /* cmn cfg1 */
830 		0x0000, /* cmn cfg2 */
831 		0x0000, /* cmn cfg3 */
832 		},
833 	.mpllb = { 0x10d2,	/* mpllb cfg0 */
834 		0x2108,		/* mpllb cfg1 */
835 		0x8d98,		/* mpllb cfg2 */
836 		0xbfc1,		/* mpllb cfg3 */
837 		0x7166,         /* mpllb cfg4 */
838 		0x5f42,         /* mpllb cfg5 */
839 		0x2000,		/* mpllb cfg6 */
840 		0x0001,		/* mpllb cfg7 */
841 		0x7800,		/* mpllb cfg8 */
842 		0x0000,		/* mpllb cfg9 */
843 		0x0000,		/* mpllb cfg10 */
844 		},
845 };
846 
847 /* C20 basic DP 2.0 tables */
848 static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
849 	.clock = 1000000, /* 10 Gbps */
850 	.tx = {	0xbe21, /* tx cfg0 */
851 		0x4800, /* tx cfg1 */
852 		0x0000, /* tx cfg2 */
853 		},
854 	.cmn = {0x0500, /* cmn cfg0*/
855 		0x0005, /* cmn cfg1 */
856 		0x0000, /* cmn cfg2 */
857 		0x0000, /* cmn cfg3 */
858 		},
859 	.mplla = { 0x3104,	/* mplla cfg0 */
860 		0xd105,		/* mplla cfg1 */
861 		0xc025,		/* mplla cfg2 */
862 		0xc025,		/* mplla cfg3 */
863 		0x8c00,		/* mplla cfg4 */
864 		0x759a,		/* mplla cfg5 */
865 		0x4000,		/* mplla cfg6 */
866 		0x0003,		/* mplla cfg7 */
867 		0x3555,		/* mplla cfg8 */
868 		0x0001,		/* mplla cfg9 */
869 		},
870 };
871 
872 static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
873 	.clock = 1350000, /* 13.5 Gbps */
874 	.tx = {	0xbea0, /* tx cfg0 */
875 		0x4800, /* tx cfg1 */
876 		0x0000, /* tx cfg2 */
877 		},
878 	.cmn = {0x0500, /* cmn cfg0*/
879 		0x0005, /* cmn cfg1 */
880 		0x0000, /* cmn cfg2 */
881 		0x0000, /* cmn cfg3 */
882 		},
883 	.mpllb = { 0x015f,	/* mpllb cfg0 */
884 		0x2205,		/* mpllb cfg1 */
885 		0x1b17,		/* mpllb cfg2 */
886 		0xffc1,		/* mpllb cfg3 */
887 		0xe100,		/* mpllb cfg4 */
888 		0xbd00,		/* mpllb cfg5 */
889 		0x2000,		/* mpllb cfg6 */
890 		0x0001,		/* mpllb cfg7 */
891 		0x4800,		/* mpllb cfg8 */
892 		0x0000,		/* mpllb cfg9 */
893 		0x0000,		/* mpllb cfg10 */
894 		},
895 };
896 
897 static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
898 	.clock = 2000000, /* 20 Gbps */
899 	.tx = {	0xbe20, /* tx cfg0 */
900 		0x4800, /* tx cfg1 */
901 		0x0000, /* tx cfg2 */
902 		},
903 	.cmn = {0x0500, /* cmn cfg0*/
904 		0x0005, /* cmn cfg1 */
905 		0x0000, /* cmn cfg2 */
906 		0x0000, /* cmn cfg3 */
907 		},
908 	.mplla = { 0x3104,	/* mplla cfg0 */
909 		0xd105,		/* mplla cfg1 */
910 		0xc025,		/* mplla cfg2 */
911 		0xc025,		/* mplla cfg3 */
912 		0xa6ab,		/* mplla cfg4 */
913 		0x8c00,		/* mplla cfg5 */
914 		0x4000,		/* mplla cfg6 */
915 		0x0003,		/* mplla cfg7 */
916 		0x3555,		/* mplla cfg8 */
917 		0x0001,		/* mplla cfg9 */
918 		},
919 };
920 
921 static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = {
922 	&mtl_c20_dp_rbr,
923 	&mtl_c20_dp_hbr1,
924 	&mtl_c20_dp_hbr2,
925 	&mtl_c20_dp_hbr3,
926 	&mtl_c20_dp_uhbr10,
927 	&mtl_c20_dp_uhbr13_5,
928 	&mtl_c20_dp_uhbr20,
929 	NULL,
930 };
931 
932 /*
933  * HDMI link rates with 38.4 MHz reference clock.
934  */
935 
936 static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = {
937 	.clock = 25200,
938 	.tx = 0x10,
939 	.cmn = 0x1,
940 	.pll[0] = 0x4,
941 	.pll[1] = 0,
942 	.pll[2] = 0xB2,
943 	.pll[3] = 0,
944 	.pll[4] = 0,
945 	.pll[5] = 0,
946 	.pll[6] = 0,
947 	.pll[7] = 0,
948 	.pll[8] = 0x20,
949 	.pll[9] = 0x1,
950 	.pll[10] = 0,
951 	.pll[11] = 0,
952 	.pll[12] = 0,
953 	.pll[13] = 0,
954 	.pll[14] = 0,
955 	.pll[15] = 0xD,
956 	.pll[16] = 0x6,
957 	.pll[17] = 0x8F,
958 	.pll[18] = 0x84,
959 	.pll[19] = 0x23,
960 };
961 
962 static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = {
963 	.clock = 27000,
964 	.tx = 0x10,
965 	.cmn = 0x1,
966 	.pll[0] = 0x34,
967 	.pll[1] = 0,
968 	.pll[2] = 0xC0,
969 	.pll[3] = 0,
970 	.pll[4] = 0,
971 	.pll[5] = 0,
972 	.pll[6] = 0,
973 	.pll[7] = 0,
974 	.pll[8] = 0x20,
975 	.pll[9] = 0x1,
976 	.pll[10] = 0,
977 	.pll[11] = 0,
978 	.pll[12] = 0x80,
979 	.pll[13] = 0,
980 	.pll[14] = 0,
981 	.pll[15] = 0xD,
982 	.pll[16] = 0x6,
983 	.pll[17] = 0xCF,
984 	.pll[18] = 0x84,
985 	.pll[19] = 0x23,
986 };
987 
988 static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = {
989 	.clock = 74250,
990 	.tx = 0x10,
991 	.cmn = 0x1,
992 	.pll[0] = 0xF4,
993 	.pll[1] = 0,
994 	.pll[2] = 0x7A,
995 	.pll[3] = 0,
996 	.pll[4] = 0,
997 	.pll[5] = 0,
998 	.pll[6] = 0,
999 	.pll[7] = 0,
1000 	.pll[8] = 0x20,
1001 	.pll[9] = 0x1,
1002 	.pll[10] = 0,
1003 	.pll[11] = 0,
1004 	.pll[12] = 0x58,
1005 	.pll[13] = 0,
1006 	.pll[14] = 0,
1007 	.pll[15] = 0xB,
1008 	.pll[16] = 0x6,
1009 	.pll[17] = 0xF,
1010 	.pll[18] = 0x85,
1011 	.pll[19] = 0x23,
1012 };
1013 
1014 static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = {
1015 	.clock = 148500,
1016 	.tx = 0x10,
1017 	.cmn = 0x1,
1018 	.pll[0] = 0xF4,
1019 	.pll[1] = 0,
1020 	.pll[2] = 0x7A,
1021 	.pll[3] = 0,
1022 	.pll[4] = 0,
1023 	.pll[5] = 0,
1024 	.pll[6] = 0,
1025 	.pll[7] = 0,
1026 	.pll[8] = 0x20,
1027 	.pll[9] = 0x1,
1028 	.pll[10] = 0,
1029 	.pll[11] = 0,
1030 	.pll[12] = 0x58,
1031 	.pll[13] = 0,
1032 	.pll[14] = 0,
1033 	.pll[15] = 0xA,
1034 	.pll[16] = 0x6,
1035 	.pll[17] = 0xF,
1036 	.pll[18] = 0x85,
1037 	.pll[19] = 0x23,
1038 };
1039 
1040 static const struct intel_c10pll_state mtl_c10_hdmi_594 = {
1041 	.clock = 594000,
1042 	.tx = 0x10,
1043 	.cmn = 0x1,
1044 	.pll[0] = 0xF4,
1045 	.pll[1] = 0,
1046 	.pll[2] = 0x7A,
1047 	.pll[3] = 0,
1048 	.pll[4] = 0,
1049 	.pll[5] = 0,
1050 	.pll[6] = 0,
1051 	.pll[7] = 0,
1052 	.pll[8] = 0x20,
1053 	.pll[9] = 0x1,
1054 	.pll[10] = 0,
1055 	.pll[11] = 0,
1056 	.pll[12] = 0x58,
1057 	.pll[13] = 0,
1058 	.pll[14] = 0,
1059 	.pll[15] = 0x8,
1060 	.pll[16] = 0x6,
1061 	.pll[17] = 0xF,
1062 	.pll[18] = 0x85,
1063 	.pll[19] = 0x23,
1064 };
1065 
1066 /* Precomputed C10 HDMI PLL tables */
1067 static const struct intel_c10pll_state mtl_c10_hdmi_27027 = {
1068 	.clock = 27027,
1069 	.tx = 0x10,
1070 	.cmn = 0x1,
1071 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00,
1072 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1073 	.pll[10] = 0xFF, .pll[11] = 0xCC, .pll[12] = 0x9C, .pll[13] = 0xCB, .pll[14] = 0xCC,
1074 	.pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1075 };
1076 
1077 static const struct intel_c10pll_state mtl_c10_hdmi_28320 = {
1078 	.clock = 28320,
1079 	.tx = 0x10,
1080 	.cmn = 0x1,
1081 	.pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xCC, .pll[3] = 0x00, .pll[4] = 0x00,
1082 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1083 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00,
1084 	.pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1085 };
1086 
1087 static const struct intel_c10pll_state mtl_c10_hdmi_30240 = {
1088 	.clock = 30240,
1089 	.tx = 0x10,
1090 	.cmn = 0x1,
1091 	.pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xDC, .pll[3] = 0x00, .pll[4] = 0x00,
1092 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1093 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00,
1094 	.pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1095 };
1096 
1097 static const struct intel_c10pll_state mtl_c10_hdmi_31500 = {
1098 	.clock = 31500,
1099 	.tx = 0x10,
1100 	.cmn = 0x1,
1101 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x62, .pll[3] = 0x00, .pll[4] = 0x00,
1102 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1103 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xA0, .pll[13] = 0x00, .pll[14] = 0x00,
1104 	.pll[15] = 0x0C, .pll[16] = 0x09, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1105 };
1106 
1107 static const struct intel_c10pll_state mtl_c10_hdmi_36000 = {
1108 	.clock = 36000,
1109 	.tx = 0x10,
1110 	.cmn = 0x1,
1111 	.pll[0] = 0xC4, .pll[1] = 0x00, .pll[2] = 0x76, .pll[3] = 0x00, .pll[4] = 0x00,
1112 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1113 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00,
1114 	.pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1115 };
1116 
1117 static const struct intel_c10pll_state mtl_c10_hdmi_40000 = {
1118 	.clock = 40000,
1119 	.tx = 0x10,
1120 	.cmn = 0x1,
1121 	.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00,
1122 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1123 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x55, .pll[13] = 0x55, .pll[14] = 0x55,
1124 	.pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1125 };
1126 
1127 static const struct intel_c10pll_state mtl_c10_hdmi_49500 = {
1128 	.clock = 49500,
1129 	.tx = 0x10,
1130 	.cmn = 0x1,
1131 	.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
1132 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1133 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00,
1134 	.pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1135 };
1136 
1137 static const struct intel_c10pll_state mtl_c10_hdmi_50000 = {
1138 	.clock = 50000,
1139 	.tx = 0x10,
1140 	.cmn = 0x1,
1141 	.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xB0, .pll[3] = 0x00, .pll[4] = 0x00,
1142 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1143 	.pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x2A, .pll[13] = 0xA9, .pll[14] = 0xAA,
1144 	.pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1145 };
1146 
1147 static const struct intel_c10pll_state mtl_c10_hdmi_57284 = {
1148 	.clock = 57284,
1149 	.tx = 0x10,
1150 	.cmn = 0x1,
1151 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xCE, .pll[3] = 0x00, .pll[4] = 0x00,
1152 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1153 	.pll[10] = 0xFF, .pll[11] = 0x77, .pll[12] = 0x57, .pll[13] = 0x77, .pll[14] = 0x77,
1154 	.pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1155 };
1156 
1157 static const struct intel_c10pll_state mtl_c10_hdmi_58000 = {
1158 	.clock = 58000,
1159 	.tx = 0x10,
1160 	.cmn = 0x1,
1161 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00,
1162 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1163 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xD5, .pll[13] = 0x55, .pll[14] = 0x55,
1164 	.pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1165 };
1166 
1167 static const struct intel_c10pll_state mtl_c10_hdmi_65000 = {
1168 	.clock = 65000,
1169 	.tx = 0x10,
1170 	.cmn = 0x1,
1171 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x66, .pll[3] = 0x00, .pll[4] = 0x00,
1172 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1173 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xB5, .pll[13] = 0x55, .pll[14] = 0x55,
1174 	.pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1175 };
1176 
1177 static const struct intel_c10pll_state mtl_c10_hdmi_71000 = {
1178 	.clock = 71000,
1179 	.tx = 0x10,
1180 	.cmn = 0x1,
1181 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x72, .pll[3] = 0x00, .pll[4] = 0x00,
1182 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1183 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55,
1184 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1185 };
1186 
1187 static const struct intel_c10pll_state mtl_c10_hdmi_74176 = {
1188 	.clock = 74176,
1189 	.tx = 0x10,
1190 	.cmn = 0x1,
1191 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
1192 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1193 	.pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44,
1194 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1195 };
1196 
1197 static const struct intel_c10pll_state mtl_c10_hdmi_75000 = {
1198 	.clock = 75000,
1199 	.tx = 0x10,
1200 	.cmn = 0x1,
1201 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7C, .pll[3] = 0x00, .pll[4] = 0x00,
1202 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1203 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00,
1204 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1205 };
1206 
1207 static const struct intel_c10pll_state mtl_c10_hdmi_78750 = {
1208 	.clock = 78750,
1209 	.tx = 0x10,
1210 	.cmn = 0x1,
1211 	.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x84, .pll[3] = 0x00, .pll[4] = 0x00,
1212 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1213 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x08, .pll[13] = 0x00, .pll[14] = 0x00,
1214 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1215 };
1216 
1217 static const struct intel_c10pll_state mtl_c10_hdmi_85500 = {
1218 	.clock = 85500,
1219 	.tx = 0x10,
1220 	.cmn = 0x1,
1221 	.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x92, .pll[3] = 0x00, .pll[4] = 0x00,
1222 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1223 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x10, .pll[13] = 0x00, .pll[14] = 0x00,
1224 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1225 };
1226 
1227 static const struct intel_c10pll_state mtl_c10_hdmi_88750 = {
1228 	.clock = 88750,
1229 	.tx = 0x10,
1230 	.cmn = 0x1,
1231 	.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0x98, .pll[3] = 0x00, .pll[4] = 0x00,
1232 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1233 	.pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x72, .pll[13] = 0xA9, .pll[14] = 0xAA,
1234 	.pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1235 };
1236 
1237 static const struct intel_c10pll_state mtl_c10_hdmi_106500 = {
1238 	.clock = 106500,
1239 	.tx = 0x10,
1240 	.cmn = 0x1,
1241 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBC, .pll[3] = 0x00, .pll[4] = 0x00,
1242 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1243 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xF0, .pll[13] = 0x00, .pll[14] = 0x00,
1244 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1245 };
1246 
1247 static const struct intel_c10pll_state mtl_c10_hdmi_108000 = {
1248 	.clock = 108000,
1249 	.tx = 0x10,
1250 	.cmn = 0x1,
1251 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00,
1252 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1253 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x80, .pll[13] = 0x00, .pll[14] = 0x00,
1254 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1255 };
1256 
1257 static const struct intel_c10pll_state mtl_c10_hdmi_115500 = {
1258 	.clock = 115500,
1259 	.tx = 0x10,
1260 	.cmn = 0x1,
1261 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00,
1262 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1263 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00,
1264 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1265 };
1266 
1267 static const struct intel_c10pll_state mtl_c10_hdmi_119000 = {
1268 	.clock = 119000,
1269 	.tx = 0x10,
1270 	.cmn = 0x1,
1271 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD6, .pll[3] = 0x00, .pll[4] = 0x00,
1272 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1273 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55,
1274 	.pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1275 };
1276 
1277 static const struct intel_c10pll_state mtl_c10_hdmi_135000 = {
1278 	.clock = 135000,
1279 	.tx = 0x10,
1280 	.cmn = 0x1,
1281 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6C, .pll[3] = 0x00, .pll[4] = 0x00,
1282 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1283 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00,
1284 	.pll[15] = 0x0A, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1285 };
1286 
1287 static const struct intel_c10pll_state mtl_c10_hdmi_138500 = {
1288 	.clock = 138500,
1289 	.tx = 0x10,
1290 	.cmn = 0x1,
1291 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x70, .pll[3] = 0x00, .pll[4] = 0x00,
1292 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1293 	.pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x22, .pll[13] = 0xA9, .pll[14] = 0xAA,
1294 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1295 };
1296 
1297 static const struct intel_c10pll_state mtl_c10_hdmi_147160 = {
1298 	.clock = 147160,
1299 	.tx = 0x10,
1300 	.cmn = 0x1,
1301 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x78, .pll[3] = 0x00, .pll[4] = 0x00,
1302 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1303 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xA5, .pll[13] = 0x55, .pll[14] = 0x55,
1304 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1305 };
1306 
1307 static const struct intel_c10pll_state mtl_c10_hdmi_148352 = {
1308 	.clock = 148352,
1309 	.tx = 0x10,
1310 	.cmn = 0x1,
1311 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
1312 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1313 	.pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44,
1314 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1315 };
1316 
1317 static const struct intel_c10pll_state mtl_c10_hdmi_154000 = {
1318 	.clock = 154000,
1319 	.tx = 0x10,
1320 	.cmn = 0x1,
1321 	.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x80, .pll[3] = 0x00, .pll[4] = 0x00,
1322 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1323 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x35, .pll[13] = 0x55, .pll[14] = 0x55,
1324 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1325 };
1326 
1327 static const struct intel_c10pll_state mtl_c10_hdmi_162000 = {
1328 	.clock = 162000,
1329 	.tx = 0x10,
1330 	.cmn = 0x1,
1331 	.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x88, .pll[3] = 0x00, .pll[4] = 0x00,
1332 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1333 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x60, .pll[13] = 0x00, .pll[14] = 0x00,
1334 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1335 };
1336 
1337 static const struct intel_c10pll_state mtl_c10_hdmi_167000 = {
1338 	.clock = 167000,
1339 	.tx = 0x10,
1340 	.cmn = 0x1,
1341 	.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x8C, .pll[3] = 0x00, .pll[4] = 0x00,
1342 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1343 	.pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0xFA, .pll[13] = 0xA9, .pll[14] = 0xAA,
1344 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1345 };
1346 
1347 static const struct intel_c10pll_state mtl_c10_hdmi_197802 = {
1348 	.clock = 197802,
1349 	.tx = 0x10,
1350 	.cmn = 0x1,
1351 	.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
1352 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1353 	.pll[10] = 0xFF, .pll[11] = 0x99, .pll[12] = 0x05, .pll[13] = 0x98, .pll[14] = 0x99,
1354 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1355 };
1356 
1357 static const struct intel_c10pll_state mtl_c10_hdmi_198000 = {
1358 	.clock = 198000,
1359 	.tx = 0x10,
1360 	.cmn = 0x1,
1361 	.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
1362 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1363 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00,
1364 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1365 };
1366 
1367 static const struct intel_c10pll_state mtl_c10_hdmi_209800 = {
1368 	.clock = 209800,
1369 	.tx = 0x10,
1370 	.cmn = 0x1,
1371 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBA, .pll[3] = 0x00, .pll[4] = 0x00,
1372 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1373 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x45, .pll[13] = 0x55, .pll[14] = 0x55,
1374 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1375 };
1376 
1377 static const struct intel_c10pll_state mtl_c10_hdmi_241500 = {
1378 	.clock = 241500,
1379 	.tx = 0x10,
1380 	.cmn = 0x1,
1381 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xDA, .pll[3] = 0x00, .pll[4] = 0x00,
1382 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1383 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xC8, .pll[13] = 0x00, .pll[14] = 0x00,
1384 	.pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1385 };
1386 
1387 static const struct intel_c10pll_state mtl_c10_hdmi_262750 = {
1388 	.clock = 262750,
1389 	.tx = 0x10,
1390 	.cmn = 0x1,
1391 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x68, .pll[3] = 0x00, .pll[4] = 0x00,
1392 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1393 	.pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x6C, .pll[13] = 0xA9, .pll[14] = 0xAA,
1394 	.pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1395 };
1396 
1397 static const struct intel_c10pll_state mtl_c10_hdmi_268500 = {
1398 	.clock = 268500,
1399 	.tx = 0x10,
1400 	.cmn = 0x1,
1401 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6A, .pll[3] = 0x00, .pll[4] = 0x00,
1402 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1403 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xEC, .pll[13] = 0x00, .pll[14] = 0x00,
1404 	.pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1405 };
1406 
1407 static const struct intel_c10pll_state mtl_c10_hdmi_296703 = {
1408 	.clock = 296703,
1409 	.tx = 0x10,
1410 	.cmn = 0x1,
1411 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
1412 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1413 	.pll[10] = 0xFF, .pll[11] = 0x33, .pll[12] = 0x44, .pll[13] = 0x33, .pll[14] = 0x33,
1414 	.pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1415 };
1416 
1417 static const struct intel_c10pll_state mtl_c10_hdmi_297000 = {
1418 	.clock = 297000,
1419 	.tx = 0x10,
1420 	.cmn = 0x1,
1421 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
1422 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1423 	.pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x58, .pll[13] = 0x00, .pll[14] = 0x00,
1424 	.pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1425 };
1426 
1427 static const struct intel_c10pll_state mtl_c10_hdmi_319750 = {
1428 	.clock = 319750,
1429 	.tx = 0x10,
1430 	.cmn = 0x1,
1431 	.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00,
1432 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1433 	.pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x44, .pll[13] = 0xA9, .pll[14] = 0xAA,
1434 	.pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1435 };
1436 
1437 static const struct intel_c10pll_state mtl_c10_hdmi_497750 = {
1438 	.clock = 497750,
1439 	.tx = 0x10,
1440 	.cmn = 0x1,
1441 	.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xE2, .pll[3] = 0x00, .pll[4] = 0x00,
1442 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1443 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x9F, .pll[13] = 0x55, .pll[14] = 0x55,
1444 	.pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
1445 };
1446 
1447 static const struct intel_c10pll_state mtl_c10_hdmi_592000 = {
1448 	.clock = 592000,
1449 	.tx = 0x10,
1450 	.cmn = 0x1,
1451 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
1452 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1453 	.pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x15, .pll[13] = 0x55, .pll[14] = 0x55,
1454 	.pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1455 };
1456 
1457 static const struct intel_c10pll_state mtl_c10_hdmi_593407 = {
1458 	.clock = 593407,
1459 	.tx = 0x10,
1460 	.cmn = 0x1,
1461 	.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
1462 	.pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
1463 	.pll[10] = 0xFF, .pll[11] = 0x3B, .pll[12] = 0x44, .pll[13] = 0xBA, .pll[14] = 0xBB,
1464 	.pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
1465 };
1466 
1467 static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = {
1468 	&mtl_c10_hdmi_25_2, /* Consolidated Table */
1469 	&mtl_c10_hdmi_27_0, /* Consolidated Table */
1470 	&mtl_c10_hdmi_27027,
1471 	&mtl_c10_hdmi_28320,
1472 	&mtl_c10_hdmi_30240,
1473 	&mtl_c10_hdmi_31500,
1474 	&mtl_c10_hdmi_36000,
1475 	&mtl_c10_hdmi_40000,
1476 	&mtl_c10_hdmi_49500,
1477 	&mtl_c10_hdmi_50000,
1478 	&mtl_c10_hdmi_57284,
1479 	&mtl_c10_hdmi_58000,
1480 	&mtl_c10_hdmi_65000,
1481 	&mtl_c10_hdmi_71000,
1482 	&mtl_c10_hdmi_74176,
1483 	&mtl_c10_hdmi_74_25, /* Consolidated Table */
1484 	&mtl_c10_hdmi_75000,
1485 	&mtl_c10_hdmi_78750,
1486 	&mtl_c10_hdmi_85500,
1487 	&mtl_c10_hdmi_88750,
1488 	&mtl_c10_hdmi_106500,
1489 	&mtl_c10_hdmi_108000,
1490 	&mtl_c10_hdmi_115500,
1491 	&mtl_c10_hdmi_119000,
1492 	&mtl_c10_hdmi_135000,
1493 	&mtl_c10_hdmi_138500,
1494 	&mtl_c10_hdmi_147160,
1495 	&mtl_c10_hdmi_148352,
1496 	&mtl_c10_hdmi_148_5, /* Consolidated Table */
1497 	&mtl_c10_hdmi_154000,
1498 	&mtl_c10_hdmi_162000,
1499 	&mtl_c10_hdmi_167000,
1500 	&mtl_c10_hdmi_197802,
1501 	&mtl_c10_hdmi_198000,
1502 	&mtl_c10_hdmi_209800,
1503 	&mtl_c10_hdmi_241500,
1504 	&mtl_c10_hdmi_262750,
1505 	&mtl_c10_hdmi_268500,
1506 	&mtl_c10_hdmi_296703,
1507 	&mtl_c10_hdmi_297000,
1508 	&mtl_c10_hdmi_319750,
1509 	&mtl_c10_hdmi_497750,
1510 	&mtl_c10_hdmi_592000,
1511 	&mtl_c10_hdmi_593407,
1512 	&mtl_c10_hdmi_594, /* Consolidated Table */
1513 	NULL,
1514 };
1515 
1516 static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
1517 	.clock = 25175,
1518 	.tx = {  0xbe88, /* tx cfg0 */
1519 		  0x9800, /* tx cfg1 */
1520 		  0x0000, /* tx cfg2 */
1521 		},
1522 	.cmn = { 0x0500, /* cmn cfg0*/
1523 		  0x0005, /* cmn cfg1 */
1524 		  0x0000, /* cmn cfg2 */
1525 		  0x0000, /* cmn cfg3 */
1526 		},
1527 	.mpllb = { 0xa0d2,	/* mpllb cfg0 */
1528 		   0x7d80,	/* mpllb cfg1 */
1529 		   0x0906,	/* mpllb cfg2 */
1530 		   0xbe40,	/* mpllb cfg3 */
1531 		   0x0000,	/* mpllb cfg4 */
1532 		   0x0000,	/* mpllb cfg5 */
1533 		   0x0200,	/* mpllb cfg6 */
1534 		   0x0001,	/* mpllb cfg7 */
1535 		   0x0000,	/* mpllb cfg8 */
1536 		   0x0000,	/* mpllb cfg9 */
1537 		   0x0001,	/* mpllb cfg10 */
1538 		},
1539 };
1540 
1541 static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
1542 	.clock = 27000,
1543 	.tx = {  0xbe88, /* tx cfg0 */
1544 		  0x9800, /* tx cfg1 */
1545 		  0x0000, /* tx cfg2 */
1546 		},
1547 	.cmn = { 0x0500, /* cmn cfg0*/
1548 		  0x0005, /* cmn cfg1 */
1549 		  0x0000, /* cmn cfg2 */
1550 		  0x0000, /* cmn cfg3 */
1551 		},
1552 	.mpllb = { 0xa0e0,	/* mpllb cfg0 */
1553 		   0x7d80,	/* mpllb cfg1 */
1554 		   0x0906,	/* mpllb cfg2 */
1555 		   0xbe40,	/* mpllb cfg3 */
1556 		   0x0000,	/* mpllb cfg4 */
1557 		   0x0000,	/* mpllb cfg5 */
1558 		   0x2200,	/* mpllb cfg6 */
1559 		   0x0001,	/* mpllb cfg7 */
1560 		   0x8000,	/* mpllb cfg8 */
1561 		   0x0000,	/* mpllb cfg9 */
1562 		   0x0001,	/* mpllb cfg10 */
1563 		},
1564 };
1565 
1566 static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
1567 	.clock = 74250,
1568 	.tx = {  0xbe88, /* tx cfg0 */
1569 		  0x9800, /* tx cfg1 */
1570 		  0x0000, /* tx cfg2 */
1571 		},
1572 	.cmn = { 0x0500, /* cmn cfg0*/
1573 		  0x0005, /* cmn cfg1 */
1574 		  0x0000, /* cmn cfg2 */
1575 		  0x0000, /* cmn cfg3 */
1576 		},
1577 	.mpllb = { 0x609a,	/* mpllb cfg0 */
1578 		   0x7d40,	/* mpllb cfg1 */
1579 		   0xca06,	/* mpllb cfg2 */
1580 		   0xbe40,	/* mpllb cfg3 */
1581 		   0x0000,	/* mpllb cfg4 */
1582 		   0x0000,	/* mpllb cfg5 */
1583 		   0x2200,	/* mpllb cfg6 */
1584 		   0x0001,	/* mpllb cfg7 */
1585 		   0x5800,	/* mpllb cfg8 */
1586 		   0x0000,	/* mpllb cfg9 */
1587 		   0x0001,	/* mpllb cfg10 */
1588 		},
1589 };
1590 
1591 static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
1592 	.clock = 148500,
1593 	.tx = {  0xbe88, /* tx cfg0 */
1594 		  0x9800, /* tx cfg1 */
1595 		  0x0000, /* tx cfg2 */
1596 		},
1597 	.cmn = { 0x0500, /* cmn cfg0*/
1598 		  0x0005, /* cmn cfg1 */
1599 		  0x0000, /* cmn cfg2 */
1600 		  0x0000, /* cmn cfg3 */
1601 		},
1602 	.mpllb = { 0x409a,	/* mpllb cfg0 */
1603 		   0x7d20,	/* mpllb cfg1 */
1604 		   0xca06,	/* mpllb cfg2 */
1605 		   0xbe40,	/* mpllb cfg3 */
1606 		   0x0000,	/* mpllb cfg4 */
1607 		   0x0000,	/* mpllb cfg5 */
1608 		   0x2200,	/* mpllb cfg6 */
1609 		   0x0001,	/* mpllb cfg7 */
1610 		   0x5800,	/* mpllb cfg8 */
1611 		   0x0000,	/* mpllb cfg9 */
1612 		   0x0001,	/* mpllb cfg10 */
1613 		},
1614 };
1615 
1616 static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
1617 	.clock = 594000,
1618 	.tx = {  0xbe88, /* tx cfg0 */
1619 		  0x9800, /* tx cfg1 */
1620 		  0x0000, /* tx cfg2 */
1621 		},
1622 	.cmn = { 0x0500, /* cmn cfg0*/
1623 		  0x0005, /* cmn cfg1 */
1624 		  0x0000, /* cmn cfg2 */
1625 		  0x0000, /* cmn cfg3 */
1626 		},
1627 	.mpllb = { 0x009a,	/* mpllb cfg0 */
1628 		   0x7d08,	/* mpllb cfg1 */
1629 		   0xca06,	/* mpllb cfg2 */
1630 		   0xbe40,	/* mpllb cfg3 */
1631 		   0x0000,	/* mpllb cfg4 */
1632 		   0x0000,	/* mpllb cfg5 */
1633 		   0x2200,	/* mpllb cfg6 */
1634 		   0x0001,	/* mpllb cfg7 */
1635 		   0x5800,	/* mpllb cfg8 */
1636 		   0x0000,	/* mpllb cfg9 */
1637 		   0x0001,	/* mpllb cfg10 */
1638 		},
1639 };
1640 
1641 static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
1642 	.clock = 3000000,
1643 	.tx = {  0xbe98, /* tx cfg0 */
1644 		  0x9800, /* tx cfg1 */
1645 		  0x0000, /* tx cfg2 */
1646 		},
1647 	.cmn = { 0x0500, /* cmn cfg0*/
1648 		  0x0005, /* cmn cfg1 */
1649 		  0x0000, /* cmn cfg2 */
1650 		  0x0000, /* cmn cfg3 */
1651 		},
1652 	.mpllb = { 0x209c,	/* mpllb cfg0 */
1653 		   0x7d10,	/* mpllb cfg1 */
1654 		   0xca06,	/* mpllb cfg2 */
1655 		   0xbe40,	/* mpllb cfg3 */
1656 		   0x0000,	/* mpllb cfg4 */
1657 		   0x0000,	/* mpllb cfg5 */
1658 		   0x2200,	/* mpllb cfg6 */
1659 		   0x0001,	/* mpllb cfg7 */
1660 		   0x2000,	/* mpllb cfg8 */
1661 		   0x0000,	/* mpllb cfg9 */
1662 		   0x0004,	/* mpllb cfg10 */
1663 		},
1664 };
1665 
1666 static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
1667 	.clock = 6000000,
1668 	.tx = {  0xbe98, /* tx cfg0 */
1669 		  0x9800, /* tx cfg1 */
1670 		  0x0000, /* tx cfg2 */
1671 		},
1672 	.cmn = { 0x0500, /* cmn cfg0*/
1673 		  0x0005, /* cmn cfg1 */
1674 		  0x0000, /* cmn cfg2 */
1675 		  0x0000, /* cmn cfg3 */
1676 		},
1677 	.mpllb = { 0x009c,	/* mpllb cfg0 */
1678 		   0x7d08,	/* mpllb cfg1 */
1679 		   0xca06,	/* mpllb cfg2 */
1680 		   0xbe40,	/* mpllb cfg3 */
1681 		   0x0000,	/* mpllb cfg4 */
1682 		   0x0000,	/* mpllb cfg5 */
1683 		   0x2200,	/* mpllb cfg6 */
1684 		   0x0001,	/* mpllb cfg7 */
1685 		   0x2000,	/* mpllb cfg8 */
1686 		   0x0000,	/* mpllb cfg9 */
1687 		   0x0004,	/* mpllb cfg10 */
1688 		},
1689 };
1690 
1691 static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
1692 	.clock = 8000000,
1693 	.tx = {  0xbe98, /* tx cfg0 */
1694 		  0x9800, /* tx cfg1 */
1695 		  0x0000, /* tx cfg2 */
1696 		},
1697 	.cmn = { 0x0500, /* cmn cfg0*/
1698 		  0x0005, /* cmn cfg1 */
1699 		  0x0000, /* cmn cfg2 */
1700 		  0x0000, /* cmn cfg3 */
1701 		},
1702 	.mpllb = { 0x00d0,	/* mpllb cfg0 */
1703 		   0x7d08,	/* mpllb cfg1 */
1704 		   0x4a06,	/* mpllb cfg2 */
1705 		   0xbe40,	/* mpllb cfg3 */
1706 		   0x0000,	/* mpllb cfg4 */
1707 		   0x0000,	/* mpllb cfg5 */
1708 		   0x2200,	/* mpllb cfg6 */
1709 		   0x0003,	/* mpllb cfg7 */
1710 		   0x2aaa,	/* mpllb cfg8 */
1711 		   0x0002,	/* mpllb cfg9 */
1712 		   0x0004,	/* mpllb cfg10 */
1713 		},
1714 };
1715 
1716 static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
1717 	.clock = 10000000,
1718 	.tx = {  0xbe98, /* tx cfg0 */
1719 		  0x9800, /* tx cfg1 */
1720 		  0x0000, /* tx cfg2 */
1721 		},
1722 	.cmn = { 0x0500, /* cmn cfg0*/
1723 		  0x0005, /* cmn cfg1 */
1724 		  0x0000, /* cmn cfg2 */
1725 		  0x0000, /* cmn cfg3 */
1726 		},
1727 	.mpllb = { 0x1104,	/* mpllb cfg0 */
1728 		   0x7d08,	/* mpllb cfg1 */
1729 		   0x0a06,	/* mpllb cfg2 */
1730 		   0xbe40,	/* mpllb cfg3 */
1731 		   0x0000,	/* mpllb cfg4 */
1732 		   0x0000,	/* mpllb cfg5 */
1733 		   0x2200,	/* mpllb cfg6 */
1734 		   0x0003,	/* mpllb cfg7 */
1735 		   0x3555,	/* mpllb cfg8 */
1736 		   0x0001,	/* mpllb cfg9 */
1737 		   0x0004,	/* mpllb cfg10 */
1738 		},
1739 };
1740 
1741 static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
1742 	.clock = 12000000,
1743 	.tx = {  0xbe98, /* tx cfg0 */
1744 		  0x9800, /* tx cfg1 */
1745 		  0x0000, /* tx cfg2 */
1746 		},
1747 	.cmn = { 0x0500, /* cmn cfg0*/
1748 		  0x0005, /* cmn cfg1 */
1749 		  0x0000, /* cmn cfg2 */
1750 		  0x0000, /* cmn cfg3 */
1751 		},
1752 	.mpllb = { 0x0138,	/* mpllb cfg0 */
1753 		   0x7d08,	/* mpllb cfg1 */
1754 		   0x5486,	/* mpllb cfg2 */
1755 		   0xfe40,	/* mpllb cfg3 */
1756 		   0x0000,	/* mpllb cfg4 */
1757 		   0x0000,	/* mpllb cfg5 */
1758 		   0x2200,	/* mpllb cfg6 */
1759 		   0x0001,	/* mpllb cfg7 */
1760 		   0x4000,	/* mpllb cfg8 */
1761 		   0x0000,	/* mpllb cfg9 */
1762 		   0x0004,	/* mpllb cfg10 */
1763 		},
1764 };
1765 
1766 static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = {
1767 	&mtl_c20_hdmi_25_175,
1768 	&mtl_c20_hdmi_27_0,
1769 	&mtl_c20_hdmi_74_25,
1770 	&mtl_c20_hdmi_148_5,
1771 	&mtl_c20_hdmi_594,
1772 	&mtl_c20_hdmi_300,
1773 	&mtl_c20_hdmi_600,
1774 	&mtl_c20_hdmi_800,
1775 	&mtl_c20_hdmi_1000,
1776 	&mtl_c20_hdmi_1200,
1777 	NULL,
1778 };
1779 
1780 static int intel_c10_phy_check_hdmi_link_rate(int clock)
1781 {
1782 	const struct intel_c10pll_state * const *tables = mtl_c10_hdmi_tables;
1783 	int i;
1784 
1785 	for (i = 0; tables[i]; i++) {
1786 		if (clock == tables[i]->clock)
1787 			return MODE_OK;
1788 	}
1789 
1790 	return MODE_CLOCK_RANGE;
1791 }
1792 
1793 static const struct intel_c10pll_state * const *
1794 intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
1795 			struct intel_encoder *encoder)
1796 {
1797 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1798 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1799 			return mtl_c10_edp_tables;
1800 		else
1801 			return mtl_c10_dp_tables;
1802 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1803 		return mtl_c10_hdmi_tables;
1804 	}
1805 
1806 	MISSING_CASE(encoder->type);
1807 	return NULL;
1808 }
1809 
1810 static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
1811 				    struct intel_encoder *encoder)
1812 {
1813 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1814 	struct intel_cx0pll_state *pll_state = &crtc_state->cx0pll_state;
1815 	int i;
1816 
1817 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1818 		if (intel_panel_use_ssc(i915)) {
1819 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1820 
1821 			pll_state->ssc_enabled =
1822 				(intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
1823 		}
1824 	}
1825 
1826 	if (pll_state->ssc_enabled)
1827 		return;
1828 
1829 	drm_WARN_ON(&i915->drm, ARRAY_SIZE(pll_state->c10.pll) < 9);
1830 	for (i = 4; i < 9; i++)
1831 		pll_state->c10.pll[i] = 0;
1832 }
1833 
1834 static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
1835 				   struct intel_encoder *encoder)
1836 {
1837 	const struct intel_c10pll_state * const *tables;
1838 	int i;
1839 
1840 	tables = intel_c10pll_tables_get(crtc_state, encoder);
1841 	if (!tables)
1842 		return -EINVAL;
1843 
1844 	for (i = 0; tables[i]; i++) {
1845 		if (crtc_state->port_clock == tables[i]->clock) {
1846 			crtc_state->cx0pll_state.c10 = *tables[i];
1847 			intel_c10pll_update_pll(crtc_state, encoder);
1848 
1849 			return 0;
1850 		}
1851 	}
1852 
1853 	return -EINVAL;
1854 }
1855 
1856 static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
1857 					  struct intel_c10pll_state *pll_state)
1858 {
1859 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1860 	u8 lane = INTEL_CX0_LANE0;
1861 	intel_wakeref_t wakeref;
1862 	int i;
1863 
1864 	wakeref = intel_cx0_phy_transaction_begin(encoder);
1865 
1866 	/*
1867 	 * According to C10 VDR Register programming Sequence we need
1868 	 * to do this to read PHY internal registers from MsgBus.
1869 	 */
1870 	intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1),
1871 		      0, C10_VDR_CTRL_MSGBUS_ACCESS,
1872 		      MB_WRITE_COMMITTED);
1873 
1874 	for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
1875 		pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
1876 						   PHY_C10_VDR_PLL(i));
1877 
1878 	pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
1879 	pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
1880 
1881 	intel_cx0_phy_transaction_end(encoder, wakeref);
1882 }
1883 
1884 static void intel_c10_pll_program(struct drm_i915_private *i915,
1885 				  const struct intel_crtc_state *crtc_state,
1886 				  struct intel_encoder *encoder)
1887 {
1888 	const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10;
1889 	int i;
1890 
1891 	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
1892 		      0, C10_VDR_CTRL_MSGBUS_ACCESS,
1893 		      MB_WRITE_COMMITTED);
1894 
1895 	/* Custom width needs to be programmed to 0 for both the phy lanes */
1896 	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
1897 		      C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
1898 		      MB_WRITE_COMMITTED);
1899 	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
1900 		      0, C10_VDR_CTRL_UPDATE_CFG,
1901 		      MB_WRITE_COMMITTED);
1902 
1903 	/* Program the pll values only for the master lane */
1904 	for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
1905 		intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
1906 				pll_state->pll[i],
1907 				(i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);
1908 
1909 	intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
1910 	intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
1911 
1912 	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
1913 		      0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
1914 		      MB_WRITE_COMMITTED);
1915 }
1916 
1917 void intel_c10pll_dump_hw_state(struct drm_i915_private *i915,
1918 				const struct intel_c10pll_state *hw_state)
1919 {
1920 	bool fracen;
1921 	int i;
1922 	unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
1923 	unsigned int multiplier, tx_clk_div;
1924 
1925 	fracen = hw_state->pll[0] & C10_PLL0_FRACEN;
1926 	drm_dbg_kms(&i915->drm, "c10pll_hw_state: fracen: %s, ",
1927 		    str_yes_no(fracen));
1928 
1929 	if (fracen) {
1930 		frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11];
1931 		frac_rem =  hw_state->pll[14] << 8 | hw_state->pll[13];
1932 		frac_den =  hw_state->pll[10] << 8 | hw_state->pll[9];
1933 		drm_dbg_kms(&i915->drm, "quot: %u, rem: %u, den: %u,\n",
1934 			    frac_quot, frac_rem, frac_den);
1935 	}
1936 
1937 	multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 |
1938 		      hw_state->pll[2]) / 2 + 16;
1939 	tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]);
1940 	drm_dbg_kms(&i915->drm,
1941 		    "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div);
1942 
1943 	drm_dbg_kms(&i915->drm, "c10pll_rawhw_state:");
1944 	drm_dbg_kms(&i915->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn);
1945 
1946 	BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4);
1947 	for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4)
1948 		drm_dbg_kms(&i915->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n",
1949 			    i, hw_state->pll[i], i + 1, hw_state->pll[i + 1],
1950 			    i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]);
1951 }
1952 
1953 static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_state *pll_state)
1954 {
1955 	u64 datarate;
1956 	u64 mpll_tx_clk_div;
1957 	u64 vco_freq_shift;
1958 	u64 vco_freq;
1959 	u64 multiplier;
1960 	u64 mpll_multiplier;
1961 	u64 mpll_fracn_quot;
1962 	u64 mpll_fracn_rem;
1963 	u8  mpllb_ana_freq_vco;
1964 	u8  mpll_div_multiplier;
1965 
1966 	if (pixel_clock < 25175 || pixel_clock > 600000)
1967 		return -EINVAL;
1968 
1969 	datarate = ((u64)pixel_clock * 1000) * 10;
1970 	mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate));
1971 	vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate));
1972 	vco_freq = (datarate << vco_freq_shift) >> 8;
1973 	multiplier = div64_u64((vco_freq << 28), (REFCLK_38_4_MHZ >> 4));
1974 	mpll_multiplier = 2 * (multiplier >> 32);
1975 
1976 	mpll_fracn_quot = (multiplier >> 16) & 0xFFFF;
1977 	mpll_fracn_rem  = multiplier & 0xFFFF;
1978 
1979 	mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)),
1980 						  datarate), 255);
1981 
1982 	if (vco_freq <= DATARATE_3000000000)
1983 		mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3;
1984 	else if (vco_freq <= DATARATE_3500000000)
1985 		mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_2;
1986 	else if (vco_freq <= DATARATE_4000000000)
1987 		mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_1;
1988 	else
1989 		mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0;
1990 
1991 	pll_state->clock	= pixel_clock;
1992 	pll_state->tx[0]	= 0xbe88;
1993 	pll_state->tx[1]	= 0x9800;
1994 	pll_state->tx[2]	= 0x0000;
1995 	pll_state->cmn[0]	= 0x0500;
1996 	pll_state->cmn[1]	= 0x0005;
1997 	pll_state->cmn[2]	= 0x0000;
1998 	pll_state->cmn[3]	= 0x0000;
1999 	pll_state->mpllb[0]	= (MPLL_TX_CLK_DIV(mpll_tx_clk_div) |
2000 				   MPLL_MULTIPLIER(mpll_multiplier));
2001 	pll_state->mpllb[1]	= (CAL_DAC_CODE(CAL_DAC_CODE_31) |
2002 				   WORD_CLK_DIV |
2003 				   MPLL_DIV_MULTIPLIER(mpll_div_multiplier));
2004 	pll_state->mpllb[2]	= (MPLLB_ANA_FREQ_VCO(mpllb_ana_freq_vco) |
2005 				   CP_PROP(CP_PROP_20) |
2006 				   CP_INT(CP_INT_6));
2007 	pll_state->mpllb[3]	= (V2I(V2I_2) |
2008 				   CP_PROP_GS(CP_PROP_GS_30) |
2009 				   CP_INT_GS(CP_INT_GS_28));
2010 	pll_state->mpllb[4]	= 0x0000;
2011 	pll_state->mpllb[5]	= 0x0000;
2012 	pll_state->mpllb[6]	= (C20_MPLLB_FRACEN | SSC_UP_SPREAD);
2013 	pll_state->mpllb[7]	= MPLL_FRACN_DEN;
2014 	pll_state->mpllb[8]	= mpll_fracn_quot;
2015 	pll_state->mpllb[9]	= mpll_fracn_rem;
2016 	pll_state->mpllb[10]	= HDMI_DIV(HDMI_DIV_1);
2017 
2018 	return 0;
2019 }
2020 
2021 static int intel_c20_phy_check_hdmi_link_rate(int clock)
2022 {
2023 	const struct intel_c20pll_state * const *tables = mtl_c20_hdmi_tables;
2024 	int i;
2025 
2026 	for (i = 0; tables[i]; i++) {
2027 		if (clock == tables[i]->clock)
2028 			return MODE_OK;
2029 	}
2030 
2031 	if (clock >= 25175 && clock <= 594000)
2032 		return MODE_OK;
2033 
2034 	return MODE_CLOCK_RANGE;
2035 }
2036 
2037 int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
2038 {
2039 	struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
2040 	struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi);
2041 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
2042 
2043 	if (intel_is_c10phy(i915, phy))
2044 		return intel_c10_phy_check_hdmi_link_rate(clock);
2045 	return intel_c20_phy_check_hdmi_link_rate(clock);
2046 }
2047 
2048 static const struct intel_c20pll_state * const *
2049 intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state,
2050 			 struct intel_encoder *encoder)
2051 {
2052 	if (intel_crtc_has_dp_encoder(crtc_state))
2053 		return mtl_c20_dp_tables;
2054 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2055 		return mtl_c20_hdmi_tables;
2056 
2057 	MISSING_CASE(encoder->type);
2058 	return NULL;
2059 }
2060 
2061 static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
2062 				   struct intel_encoder *encoder)
2063 {
2064 	const struct intel_c20pll_state * const *tables;
2065 	int i;
2066 
2067 	/* try computed C20 HDMI tables before using consolidated tables */
2068 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2069 		if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock,
2070 						    &crtc_state->cx0pll_state.c20) == 0)
2071 			return 0;
2072 	}
2073 
2074 	tables = intel_c20_pll_tables_get(crtc_state, encoder);
2075 	if (!tables)
2076 		return -EINVAL;
2077 
2078 	for (i = 0; tables[i]; i++) {
2079 		if (crtc_state->port_clock == tables[i]->clock) {
2080 			crtc_state->cx0pll_state.c20 = *tables[i];
2081 			return 0;
2082 		}
2083 	}
2084 
2085 	return -EINVAL;
2086 }
2087 
2088 int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
2089 			    struct intel_encoder *encoder)
2090 {
2091 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2092 	enum phy phy = intel_port_to_phy(i915, encoder->port);
2093 
2094 	if (intel_is_c10phy(i915, phy))
2095 		return intel_c10pll_calc_state(crtc_state, encoder);
2096 	return intel_c20pll_calc_state(crtc_state, encoder);
2097 }
2098 
2099 static bool intel_c20_use_mplla(u32 clock)
2100 {
2101 	/* 10G and 20G rates use MPLLA */
2102 	if (clock == 1000000 || clock == 2000000)
2103 		return true;
2104 
2105 	return false;
2106 }
2107 
2108 static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
2109 					  struct intel_c20pll_state *pll_state)
2110 {
2111 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2112 	bool cntx;
2113 	intel_wakeref_t wakeref;
2114 	int i;
2115 
2116 	wakeref = intel_cx0_phy_transaction_begin(encoder);
2117 
2118 	/* 1. Read current context selection */
2119 	cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
2120 
2121 	/* Read Tx configuration */
2122 	for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
2123 		if (cntx)
2124 			pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
2125 							       PHY_C20_B_TX_CNTX_CFG(i));
2126 		else
2127 			pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
2128 							       PHY_C20_A_TX_CNTX_CFG(i));
2129 	}
2130 
2131 	/* Read common configuration */
2132 	for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
2133 		if (cntx)
2134 			pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
2135 								PHY_C20_B_CMN_CNTX_CFG(i));
2136 		else
2137 			pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
2138 								PHY_C20_A_CMN_CNTX_CFG(i));
2139 	}
2140 
2141 	if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
2142 		/* MPLLB configuration */
2143 		for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
2144 			if (cntx)
2145 				pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
2146 									  PHY_C20_B_MPLLB_CNTX_CFG(i));
2147 			else
2148 				pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
2149 									  PHY_C20_A_MPLLB_CNTX_CFG(i));
2150 		}
2151 	} else {
2152 		/* MPLLA configuration */
2153 		for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
2154 			if (cntx)
2155 				pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
2156 									  PHY_C20_B_MPLLA_CNTX_CFG(i));
2157 			else
2158 				pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
2159 									  PHY_C20_A_MPLLA_CNTX_CFG(i));
2160 		}
2161 	}
2162 
2163 	intel_cx0_phy_transaction_end(encoder, wakeref);
2164 }
2165 
2166 void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
2167 				const struct intel_c20pll_state *hw_state)
2168 {
2169 	int i;
2170 
2171 	drm_dbg_kms(&i915->drm, "c20pll_hw_state:\n");
2172 	drm_dbg_kms(&i915->drm, "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n",
2173 		    hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]);
2174 	drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n",
2175 		    hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]);
2176 
2177 	if (intel_c20_use_mplla(hw_state->clock)) {
2178 		for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
2179 			drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
2180 	} else {
2181 		for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++)
2182 			drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]);
2183 	}
2184 }
2185 
2186 static u8 intel_c20_get_dp_rate(u32 clock)
2187 {
2188 	switch (clock) {
2189 	case 162000: /* 1.62 Gbps DP1.4 */
2190 		return 0;
2191 	case 270000: /* 2.7 Gbps DP1.4 */
2192 		return 1;
2193 	case 540000: /* 5.4 Gbps DP 1.4 */
2194 		return 2;
2195 	case 810000: /* 8.1 Gbps DP1.4 */
2196 		return 3;
2197 	case 216000: /* 2.16 Gbps eDP */
2198 		return 4;
2199 	case 243000: /* 2.43 Gbps eDP */
2200 		return 5;
2201 	case 324000: /* 3.24 Gbps eDP */
2202 		return 6;
2203 	case 432000: /* 4.32 Gbps eDP */
2204 		return 7;
2205 	case 1000000: /* 10 Gbps DP2.0 */
2206 		return 8;
2207 	case 1350000: /* 13.5 Gbps DP2.0 */
2208 		return 9;
2209 	case 2000000: /* 20 Gbps DP2.0 */
2210 		return 10;
2211 	case 648000: /* 6.48 Gbps eDP*/
2212 		return 11;
2213 	case 675000: /* 6.75 Gbps eDP*/
2214 		return 12;
2215 	default:
2216 		MISSING_CASE(clock);
2217 		return 0;
2218 	}
2219 }
2220 
2221 static u8 intel_c20_get_hdmi_rate(u32 clock)
2222 {
2223 	if (clock >= 25175 && clock <= 600000)
2224 		return 0;
2225 
2226 	switch (clock) {
2227 	case 300000: /* 3 Gbps */
2228 	case 600000: /* 6 Gbps */
2229 	case 1200000: /* 12 Gbps */
2230 		return 1;
2231 	case 800000: /* 8 Gbps */
2232 		return 2;
2233 	case 1000000: /* 10 Gbps */
2234 		return 3;
2235 	default:
2236 		MISSING_CASE(clock);
2237 		return 0;
2238 	}
2239 }
2240 
2241 static bool is_dp2(u32 clock)
2242 {
2243 	/* DP2.0 clock rates */
2244 	if (clock == 1000000 || clock == 1350000 || clock  == 2000000)
2245 		return true;
2246 
2247 	return false;
2248 }
2249 
2250 static bool is_hdmi_frl(u32 clock)
2251 {
2252 	switch (clock) {
2253 	case 300000: /* 3 Gbps */
2254 	case 600000: /* 6 Gbps */
2255 	case 800000: /* 8 Gbps */
2256 	case 1000000: /* 10 Gbps */
2257 	case 1200000: /* 12 Gbps */
2258 		return true;
2259 	default:
2260 		return false;
2261 	}
2262 }
2263 
2264 static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder)
2265 {
2266 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2267 
2268 	/* banks should not be cleared for DPALT/USB4/TBT modes */
2269 	/* TODO: optimize re-calibration in legacy mode */
2270 	return intel_tc_port_in_legacy_mode(intel_dig_port);
2271 }
2272 
2273 static int intel_get_c20_custom_width(u32 clock, bool dp)
2274 {
2275 	if (dp && is_dp2(clock))
2276 		return 2;
2277 	else if (is_hdmi_frl(clock))
2278 		return 1;
2279 	else
2280 		return 0;
2281 }
2282 
2283 static void intel_c20_pll_program(struct drm_i915_private *i915,
2284 				  const struct intel_crtc_state *crtc_state,
2285 				  struct intel_encoder *encoder)
2286 {
2287 	const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20;
2288 	bool dp = false;
2289 	int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
2290 	u32 clock = crtc_state->port_clock;
2291 	bool cntx;
2292 	int i;
2293 
2294 	if (intel_crtc_has_dp_encoder(crtc_state))
2295 		dp = true;
2296 
2297 	/* 1. Read current context selection */
2298 	cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
2299 
2300 	/*
2301 	 * 2. If there is a protocol switch from HDMI to DP or vice versa, clear
2302 	 * the lane #0 MPLLB CAL_DONE_BANK DP2.0 10G and 20G rates enable MPLLA.
2303 	 * Protocol switch is only applicable for MPLLA
2304 	 */
2305 	if (intel_c20_protocol_switch_valid(encoder)) {
2306 		for (i = 0; i < 4; i++)
2307 			intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
2308 		usleep_range(4000, 4100);
2309 	}
2310 
2311 	/* 3. Write SRAM configuration context. If A in use, write configuration to B context */
2312 	/* 3.1 Tx configuration */
2313 	for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
2314 		if (cntx)
2315 			intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
2316 		else
2317 			intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
2318 	}
2319 
2320 	/* 3.2 common configuration */
2321 	for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
2322 		if (cntx)
2323 			intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
2324 		else
2325 			intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
2326 	}
2327 
2328 	/* 3.3 mpllb or mplla configuration */
2329 	if (intel_c20_use_mplla(clock)) {
2330 		for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
2331 			if (cntx)
2332 				intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
2333 						     PHY_C20_A_MPLLA_CNTX_CFG(i),
2334 						     pll_state->mplla[i]);
2335 			else
2336 				intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
2337 						     PHY_C20_B_MPLLA_CNTX_CFG(i),
2338 						     pll_state->mplla[i]);
2339 		}
2340 	} else {
2341 		for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
2342 			if (cntx)
2343 				intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
2344 						     PHY_C20_A_MPLLB_CNTX_CFG(i),
2345 						     pll_state->mpllb[i]);
2346 			else
2347 				intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
2348 						     PHY_C20_B_MPLLB_CNTX_CFG(i),
2349 						     pll_state->mpllb[i]);
2350 		}
2351 	}
2352 
2353 	/* 4. Program custom width to match the link protocol */
2354 	intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
2355 		      PHY_C20_CUSTOM_WIDTH_MASK,
2356 		      PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
2357 		      MB_WRITE_COMMITTED);
2358 
2359 	/* 5. For DP or 6. For HDMI */
2360 	if (dp) {
2361 		intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
2362 			      BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
2363 			      BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
2364 			      MB_WRITE_COMMITTED);
2365 	} else {
2366 		intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
2367 			      BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
2368 			      is_hdmi_frl(clock) ? BIT(7) : 0,
2369 			      MB_WRITE_COMMITTED);
2370 
2371 		intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
2372 				intel_c20_get_hdmi_rate(clock),
2373 				MB_WRITE_COMMITTED);
2374 	}
2375 
2376 	/*
2377 	 * 7. Write Vendor specific registers to toggle context setting to load
2378 	 * the updated programming toggle context bit
2379 	 */
2380 	intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
2381 		      BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
2382 }
2383 
2384 static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
2385 					const struct intel_c10pll_state *pll_state)
2386 {
2387 	unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
2388 	unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
2389 	int tmpclk = 0;
2390 
2391 	if (pll_state->pll[0] & C10_PLL0_FRACEN) {
2392 		frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11];
2393 		frac_rem =  pll_state->pll[14] << 8 | pll_state->pll[13];
2394 		frac_den =  pll_state->pll[10] << 8 | pll_state->pll[9];
2395 	}
2396 
2397 	multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 |
2398 		      pll_state->pll[2]) / 2 + 16;
2399 
2400 	tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]);
2401 	hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]);
2402 
2403 	tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
2404 				     DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
2405 				     10 << (tx_clk_div + 16));
2406 	tmpclk *= (hdmi_div ? 2 : 1);
2407 
2408 	return tmpclk;
2409 }
2410 
2411 static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
2412 					const struct intel_c20pll_state *pll_state)
2413 {
2414 	unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
2415 	unsigned int multiplier, refclk = 38400;
2416 	unsigned int tx_clk_div;
2417 	unsigned int ref_clk_mpllb_div;
2418 	unsigned int fb_clk_div4_en;
2419 	unsigned int ref, vco;
2420 	unsigned int tx_rate_mult;
2421 	unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
2422 
2423 	if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
2424 		tx_rate_mult = 1;
2425 		frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
2426 		frac_quot = pll_state->mpllb[8];
2427 		frac_rem =  pll_state->mpllb[9];
2428 		frac_den =  pll_state->mpllb[7];
2429 		multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
2430 		tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
2431 		ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
2432 		fb_clk_div4_en = 0;
2433 	} else {
2434 		tx_rate_mult = 2;
2435 		frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
2436 		frac_quot = pll_state->mplla[8];
2437 		frac_rem =  pll_state->mplla[9];
2438 		frac_den =  pll_state->mplla[7];
2439 		multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
2440 		tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
2441 		ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
2442 		fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
2443 	}
2444 
2445 	if (frac_en)
2446 		frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
2447 	else
2448 		frac = 0;
2449 
2450 	ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
2451 	vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
2452 
2453 	return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
2454 }
2455 
2456 static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
2457 					 const struct intel_crtc_state *crtc_state,
2458 					 bool lane_reversal)
2459 {
2460 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2461 	u32 val = 0;
2462 
2463 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL,
2464 		     lane_reversal ? XELPDP_PORT_REVERSAL : 0);
2465 
2466 	if (lane_reversal)
2467 		val |= XELPDP_LANE1_PHY_CLOCK_SELECT;
2468 
2469 	val |= XELPDP_FORWARD_CLOCK_UNGATE;
2470 
2471 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2472 	    is_hdmi_frl(crtc_state->port_clock))
2473 		val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
2474 	else
2475 		val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
2476 
2477 	/* TODO: HDMI FRL */
2478 	/* DP2.0 10G and 20G rates enable MPLLA*/
2479 	if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
2480 		val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
2481 	else
2482 		val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
2483 
2484 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2485 		     XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
2486 		     XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
2487 		     XELPDP_SSC_ENABLE_PLLB, val);
2488 }
2489 
2490 static u32 intel_cx0_get_powerdown_update(u8 lane_mask)
2491 {
2492 	u32 val = 0;
2493 	int lane = 0;
2494 
2495 	for_each_cx0_lane_in_mask(lane_mask, lane)
2496 		val |= XELPDP_LANE_POWERDOWN_UPDATE(lane);
2497 
2498 	return val;
2499 }
2500 
2501 static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
2502 {
2503 	u32 val = 0;
2504 	int lane = 0;
2505 
2506 	for_each_cx0_lane_in_mask(lane_mask, lane)
2507 		val |= XELPDP_LANE_POWERDOWN_NEW_STATE(lane, state);
2508 
2509 	return val;
2510 }
2511 
2512 static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
2513 						enum port port,
2514 						u8 lane_mask, u8 state)
2515 {
2516 	enum phy phy = intel_port_to_phy(i915, port);
2517 	int lane;
2518 
2519 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
2520 		     intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK),
2521 		     intel_cx0_get_powerdown_state(lane_mask, state));
2522 
2523 	/* Wait for pending transactions.*/
2524 	for_each_cx0_lane_in_mask(lane_mask, lane)
2525 		if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
2526 					    XELPDP_PORT_M2P_TRANSACTION_PENDING,
2527 					    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
2528 			drm_dbg_kms(&i915->drm,
2529 				    "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
2530 				    phy_name(phy));
2531 			intel_cx0_bus_reset(i915, port, lane);
2532 		}
2533 
2534 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
2535 		     intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES),
2536 		     intel_cx0_get_powerdown_update(lane_mask));
2537 
2538 	/* Update Timeout Value */
2539 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
2540 					 intel_cx0_get_powerdown_update(lane_mask), 0,
2541 					 XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
2542 		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
2543 			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
2544 }
2545 
2546 static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
2547 {
2548 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
2549 		     XELPDP_POWER_STATE_READY_MASK,
2550 		     XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
2551 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port),
2552 		     XELPDP_POWER_STATE_ACTIVE_MASK |
2553 		     XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
2554 		     XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
2555 		     XELPDP_PLL_LANE_STAGGERING_DELAY(0));
2556 }
2557 
2558 static u32 intel_cx0_get_pclk_refclk_request(u8 lane_mask)
2559 {
2560 	u32 val = 0;
2561 	int lane = 0;
2562 
2563 	for_each_cx0_lane_in_mask(lane_mask, lane)
2564 		val |= XELPDP_LANE_PCLK_REFCLK_REQUEST(lane);
2565 
2566 	return val;
2567 }
2568 
2569 static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask)
2570 {
2571 	u32 val = 0;
2572 	int lane = 0;
2573 
2574 	for_each_cx0_lane_in_mask(lane_mask, lane)
2575 		val |= XELPDP_LANE_PCLK_REFCLK_ACK(lane);
2576 
2577 	return val;
2578 }
2579 
2580 static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
2581 				     struct intel_encoder *encoder,
2582 				     bool lane_reversal)
2583 {
2584 	enum port port = encoder->port;
2585 	enum phy phy = intel_port_to_phy(i915, port);
2586 	u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
2587 	u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0;
2588 	u32 lane_pipe_reset = owned_lane_mask == INTEL_CX0_BOTH_LANES
2589 				? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
2590 				: XELPDP_LANE_PIPE_RESET(0);
2591 	u32 lane_phy_current_status = owned_lane_mask == INTEL_CX0_BOTH_LANES
2592 					? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
2593 					   XELPDP_LANE_PHY_CURRENT_STATUS(1))
2594 					: XELPDP_LANE_PHY_CURRENT_STATUS(0);
2595 
2596 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port),
2597 					 XELPDP_PORT_BUF_SOC_PHY_READY,
2598 					 XELPDP_PORT_BUF_SOC_PHY_READY,
2599 					 XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
2600 		drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
2601 			 phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
2602 
2603 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
2604 		     lane_pipe_reset);
2605 
2606 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
2607 					 lane_phy_current_status, lane_phy_current_status,
2608 					 XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
2609 		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
2610 			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
2611 
2612 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
2613 		     intel_cx0_get_pclk_refclk_request(owned_lane_mask),
2614 		     intel_cx0_get_pclk_refclk_request(lane_mask));
2615 
2616 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port),
2617 					 intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
2618 					 intel_cx0_get_pclk_refclk_ack(lane_mask),
2619 					 XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
2620 		drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
2621 			 phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
2622 
2623 	intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES,
2624 					    CX0_P2_STATE_RESET);
2625 	intel_cx0_setup_powerdown(i915, port);
2626 
2627 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset, 0);
2628 
2629 	if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port), lane_phy_current_status,
2630 				    XELPDP_PORT_RESET_END_TIMEOUT))
2631 		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n",
2632 			 phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
2633 }
2634 
2635 static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
2636 				       struct intel_encoder *encoder, int lane_count,
2637 				       bool lane_reversal)
2638 {
2639 	int i;
2640 	u8 disables;
2641 	bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder));
2642 	u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
2643 	enum port port = encoder->port;
2644 
2645 	if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
2646 		intel_cx0_rmw(i915, port, owned_lane_mask,
2647 			      PHY_C10_VDR_CONTROL(1), 0,
2648 			      C10_VDR_CTRL_MSGBUS_ACCESS,
2649 			      MB_WRITE_COMMITTED);
2650 
2651 	if (lane_reversal)
2652 		disables = REG_GENMASK8(3, 0) >> lane_count;
2653 	else
2654 		disables = REG_GENMASK8(3, 0) << lane_count;
2655 
2656 	if (dp_alt_mode && lane_count == 1) {
2657 		disables &= ~REG_GENMASK8(1, 0);
2658 		disables |= REG_FIELD_PREP8(REG_GENMASK8(1, 0), 0x1);
2659 	}
2660 
2661 	for (i = 0; i < 4; i++) {
2662 		int tx = i % 2 + 1;
2663 		u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
2664 
2665 		if (!(owned_lane_mask & lane_mask))
2666 			continue;
2667 
2668 		intel_cx0_rmw(i915, port, lane_mask, PHY_CX0_TX_CONTROL(tx, 2),
2669 			      CONTROL2_DISABLE_SINGLE_TX,
2670 			      disables & BIT(i) ? CONTROL2_DISABLE_SINGLE_TX : 0,
2671 			      MB_WRITE_COMMITTED);
2672 	}
2673 
2674 	if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
2675 		intel_cx0_rmw(i915, port, owned_lane_mask,
2676 			      PHY_C10_VDR_CONTROL(1), 0,
2677 			      C10_VDR_CTRL_UPDATE_CFG,
2678 			      MB_WRITE_COMMITTED);
2679 }
2680 
2681 static u32 intel_cx0_get_pclk_pll_request(u8 lane_mask)
2682 {
2683 	u32 val = 0;
2684 	int lane = 0;
2685 
2686 	for_each_cx0_lane_in_mask(lane_mask, lane)
2687 		val |= XELPDP_LANE_PCLK_PLL_REQUEST(lane);
2688 
2689 	return val;
2690 }
2691 
2692 static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask)
2693 {
2694 	u32 val = 0;
2695 	int lane = 0;
2696 
2697 	for_each_cx0_lane_in_mask(lane_mask, lane)
2698 		val |= XELPDP_LANE_PCLK_PLL_ACK(lane);
2699 
2700 	return val;
2701 }
2702 
2703 static void intel_cx0pll_enable(struct intel_encoder *encoder,
2704 				const struct intel_crtc_state *crtc_state)
2705 {
2706 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2707 	enum phy phy = intel_port_to_phy(i915, encoder->port);
2708 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2709 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
2710 	u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
2711 					  INTEL_CX0_LANE0;
2712 	intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
2713 
2714 	/*
2715 	 * 1. Program PORT_CLOCK_CTL REGISTER to configure
2716 	 * clock muxes, gating and SSC
2717 	 */
2718 	intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
2719 
2720 	/* 2. Bring PHY out of reset. */
2721 	intel_cx0_phy_lane_reset(i915, encoder, lane_reversal);
2722 
2723 	/*
2724 	 * 3. Change Phy power state to Ready.
2725 	 * TODO: For DP alt mode use only one lane.
2726 	 */
2727 	intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
2728 					    CX0_P2_STATE_READY);
2729 
2730 	/*
2731 	 * 4. Program PORT_MSGBUS_TIMER register's Message Bus Timer field to 0xA000.
2732 	 *    (This is done inside intel_cx0_phy_transaction_begin(), since we would need
2733 	 *    the right timer thresholds for readouts too.)
2734 	 */
2735 
2736 	/* 5. Program PHY internal PLL internal registers. */
2737 	if (intel_is_c10phy(i915, phy))
2738 		intel_c10_pll_program(i915, crtc_state, encoder);
2739 	else
2740 		intel_c20_pll_program(i915, crtc_state, encoder);
2741 
2742 	/*
2743 	 * 6. Program the enabled and disabled owned PHY lane
2744 	 * transmitters over message bus
2745 	 */
2746 	intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal);
2747 
2748 	/*
2749 	 * 7. Follow the Display Voltage Frequency Switching - Sequence
2750 	 * Before Frequency Change. We handle this step in bxt_set_cdclk().
2751 	 */
2752 
2753 	/*
2754 	 * 8. Program DDI_CLK_VALFREQ to match intended DDI
2755 	 * clock frequency.
2756 	 */
2757 	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
2758 		       crtc_state->port_clock);
2759 
2760 	/*
2761 	 * 9. Set PORT_CLOCK_CTL register PCLK PLL Request
2762 	 * LN<Lane for maxPCLK> to "1" to enable PLL.
2763 	 */
2764 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2765 		     intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES),
2766 		     intel_cx0_get_pclk_pll_request(maxpclk_lane));
2767 
2768 	/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
2769 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2770 					 intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
2771 					 intel_cx0_get_pclk_pll_ack(maxpclk_lane),
2772 					 XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
2773 		drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
2774 			 phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
2775 
2776 	/*
2777 	 * 11. Follow the Display Voltage Frequency Switching Sequence After
2778 	 * Frequency Change. We handle this step in bxt_set_cdclk().
2779 	 */
2780 
2781 	/* TODO: enable TBT-ALT mode */
2782 	intel_cx0_phy_transaction_end(encoder, wakeref);
2783 }
2784 
2785 int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
2786 {
2787 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2788 	u32 clock;
2789 	u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
2790 
2791 	clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
2792 
2793 	drm_WARN_ON(&i915->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
2794 	drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
2795 	drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_ACK));
2796 
2797 	switch (clock) {
2798 	case XELPDP_DDI_CLOCK_SELECT_TBT_162:
2799 		return 162000;
2800 	case XELPDP_DDI_CLOCK_SELECT_TBT_270:
2801 		return 270000;
2802 	case XELPDP_DDI_CLOCK_SELECT_TBT_540:
2803 		return 540000;
2804 	case XELPDP_DDI_CLOCK_SELECT_TBT_810:
2805 		return 810000;
2806 	default:
2807 		MISSING_CASE(clock);
2808 		return 162000;
2809 	}
2810 }
2811 
2812 static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock)
2813 {
2814 	switch (clock) {
2815 	case 162000:
2816 		return XELPDP_DDI_CLOCK_SELECT_TBT_162;
2817 	case 270000:
2818 		return XELPDP_DDI_CLOCK_SELECT_TBT_270;
2819 	case 540000:
2820 		return XELPDP_DDI_CLOCK_SELECT_TBT_540;
2821 	case 810000:
2822 		return XELPDP_DDI_CLOCK_SELECT_TBT_810;
2823 	default:
2824 		MISSING_CASE(clock);
2825 		return XELPDP_DDI_CLOCK_SELECT_TBT_162;
2826 	}
2827 }
2828 
2829 static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
2830 				     const struct intel_crtc_state *crtc_state)
2831 {
2832 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2833 	enum phy phy = intel_port_to_phy(i915, encoder->port);
2834 	u32 val = 0;
2835 
2836 	/*
2837 	 * 1. Program PORT_CLOCK_CTL REGISTER to configure
2838 	 * clock muxes, gating and SSC
2839 	 */
2840 	val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock));
2841 	val |= XELPDP_FORWARD_CLOCK_UNGATE;
2842 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2843 		     XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
2844 
2845 	/* 2. Read back PORT_CLOCK_CTL REGISTER */
2846 	val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
2847 
2848 	/*
2849 	 * 3. Follow the Display Voltage Frequency Switching - Sequence
2850 	 * Before Frequency Change. We handle this step in bxt_set_cdclk().
2851 	 */
2852 
2853 	/*
2854 	 * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL.
2855 	 */
2856 	val |= XELPDP_TBT_CLOCK_REQUEST;
2857 	intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), val);
2858 
2859 	/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
2860 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2861 					 XELPDP_TBT_CLOCK_ACK,
2862 					 XELPDP_TBT_CLOCK_ACK,
2863 					 100, 0, NULL))
2864 		drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
2865 			 encoder->base.base.id, encoder->base.name, phy_name(phy));
2866 
2867 	/*
2868 	 * 6. Follow the Display Voltage Frequency Switching Sequence After
2869 	 * Frequency Change. We handle this step in bxt_set_cdclk().
2870 	 */
2871 
2872 	/*
2873 	 * 7. Program DDI_CLK_VALFREQ to match intended DDI
2874 	 * clock frequency.
2875 	 */
2876 	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
2877 		       crtc_state->port_clock);
2878 }
2879 
2880 void intel_mtl_pll_enable(struct intel_encoder *encoder,
2881 			  const struct intel_crtc_state *crtc_state)
2882 {
2883 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2884 
2885 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
2886 		intel_mtl_tbt_pll_enable(encoder, crtc_state);
2887 	else
2888 		intel_cx0pll_enable(encoder, crtc_state);
2889 }
2890 
2891 static void intel_cx0pll_disable(struct intel_encoder *encoder)
2892 {
2893 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2894 	enum phy phy = intel_port_to_phy(i915, encoder->port);
2895 	bool is_c10 = intel_is_c10phy(i915, phy);
2896 	intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
2897 
2898 	/* 1. Change owned PHY lane power to Disable state. */
2899 	intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
2900 					    is_c10 ? CX0_P2PG_STATE_DISABLE :
2901 					    CX0_P4PG_STATE_DISABLE);
2902 
2903 	/*
2904 	 * 2. Follow the Display Voltage Frequency Switching Sequence Before
2905 	 * Frequency Change. We handle this step in bxt_set_cdclk().
2906 	 */
2907 
2908 	/*
2909 	 * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK>
2910 	 * to "0" to disable PLL.
2911 	 */
2912 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2913 		     intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) |
2914 		     intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0);
2915 
2916 	/* 4. Program DDI_CLK_VALFREQ to 0. */
2917 	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
2918 
2919 	/*
2920 	 * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
2921 	 */
2922 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2923 					 intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
2924 					 intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
2925 					 XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
2926 		drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
2927 			 phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
2928 
2929 	/*
2930 	 * 6. Follow the Display Voltage Frequency Switching Sequence After
2931 	 * Frequency Change. We handle this step in bxt_set_cdclk().
2932 	 */
2933 
2934 	/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
2935 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2936 		     XELPDP_DDI_CLOCK_SELECT_MASK, 0);
2937 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2938 		     XELPDP_FORWARD_CLOCK_UNGATE, 0);
2939 
2940 	intel_cx0_phy_transaction_end(encoder, wakeref);
2941 }
2942 
2943 static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
2944 {
2945 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2946 	enum phy phy = intel_port_to_phy(i915, encoder->port);
2947 
2948 	/*
2949 	 * 1. Follow the Display Voltage Frequency Switching Sequence Before
2950 	 * Frequency Change. We handle this step in bxt_set_cdclk().
2951 	 */
2952 
2953 	/*
2954 	 * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL.
2955 	 */
2956 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2957 		     XELPDP_TBT_CLOCK_REQUEST, 0);
2958 
2959 	/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
2960 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2961 					 XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
2962 		drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
2963 			 encoder->base.base.id, encoder->base.name, phy_name(phy));
2964 
2965 	/*
2966 	 * 4. Follow the Display Voltage Frequency Switching Sequence After
2967 	 * Frequency Change. We handle this step in bxt_set_cdclk().
2968 	 */
2969 
2970 	/*
2971 	 * 5. Program PORT CLOCK CTRL register to disable and gate clocks
2972 	 */
2973 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
2974 		     XELPDP_DDI_CLOCK_SELECT_MASK |
2975 		     XELPDP_FORWARD_CLOCK_UNGATE, 0);
2976 
2977 	/* 6. Program DDI_CLK_VALFREQ to 0. */
2978 	intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
2979 }
2980 
2981 void intel_mtl_pll_disable(struct intel_encoder *encoder)
2982 {
2983 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2984 
2985 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
2986 		intel_mtl_tbt_pll_disable(encoder);
2987 	else
2988 		intel_cx0pll_disable(encoder);
2989 }
2990 
2991 enum icl_port_dpll_id
2992 intel_mtl_port_pll_type(struct intel_encoder *encoder,
2993 			const struct intel_crtc_state *crtc_state)
2994 {
2995 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2996 	/*
2997 	 * TODO: Determine the PLL type from the SW state, once MTL PLL
2998 	 * handling is done via the standard shared DPLL framework.
2999 	 */
3000 	u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
3001 	u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
3002 
3003 	if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
3004 	    clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK)
3005 		return ICL_PORT_DPLL_MG_PHY;
3006 	else
3007 		return ICL_PORT_DPLL_DEFAULT;
3008 }
3009 
3010 static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
3011 				      struct intel_crtc *crtc,
3012 				      struct intel_encoder *encoder,
3013 				      struct intel_c10pll_state *mpllb_hw_state)
3014 {
3015 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3016 	const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
3017 	int i;
3018 
3019 	for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
3020 		u8 expected = mpllb_sw_state->pll[i];
3021 
3022 		I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected,
3023 				"[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
3024 				crtc->base.base.id, crtc->base.name, i,
3025 				expected, mpllb_hw_state->pll[i]);
3026 	}
3027 
3028 	I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx,
3029 			"[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
3030 			crtc->base.base.id, crtc->base.name,
3031 			mpllb_sw_state->tx, mpllb_hw_state->tx);
3032 
3033 	I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn,
3034 			"[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
3035 			crtc->base.base.id, crtc->base.name,
3036 			mpllb_sw_state->cmn, mpllb_hw_state->cmn);
3037 }
3038 
3039 void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
3040 				   struct intel_cx0pll_state *pll_state)
3041 {
3042 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3043 	enum phy phy = intel_port_to_phy(i915, encoder->port);
3044 
3045 	if (intel_is_c10phy(i915, phy))
3046 		intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
3047 	else
3048 		intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
3049 }
3050 
3051 int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
3052 				 const struct intel_cx0pll_state *pll_state)
3053 {
3054 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3055 	enum phy phy = intel_port_to_phy(i915, encoder->port);
3056 
3057 	if (intel_is_c10phy(i915, phy))
3058 		return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
3059 
3060 	return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
3061 }
3062 
3063 static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
3064 				      struct intel_crtc *crtc,
3065 				      struct intel_encoder *encoder,
3066 				      struct intel_c20pll_state *mpll_hw_state)
3067 {
3068 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
3069 	const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
3070 	bool use_mplla;
3071 	int i;
3072 
3073 	use_mplla = intel_c20_use_mplla(mpll_hw_state->clock);
3074 	if (use_mplla) {
3075 		for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
3076 			I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
3077 					"[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
3078 					crtc->base.base.id, crtc->base.name, i,
3079 					mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
3080 		}
3081 	} else {
3082 		for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
3083 			I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
3084 					"[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
3085 					crtc->base.base.id, crtc->base.name, i,
3086 					mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
3087 		}
3088 	}
3089 
3090 	for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
3091 		I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i],
3092 				"[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)",
3093 				crtc->base.base.id, crtc->base.name, i,
3094 				mpll_sw_state->tx[i], mpll_hw_state->tx[i]);
3095 	}
3096 
3097 	for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) {
3098 		I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i],
3099 				"[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)",
3100 				crtc->base.base.id, crtc->base.name, i,
3101 				mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]);
3102 	}
3103 }
3104 
3105 void intel_cx0pll_state_verify(struct intel_atomic_state *state,
3106 			       struct intel_crtc *crtc)
3107 {
3108 	struct drm_i915_private *i915 = to_i915(state->base.dev);
3109 	const struct intel_crtc_state *new_crtc_state =
3110 		intel_atomic_get_new_crtc_state(state, crtc);
3111 	struct intel_encoder *encoder;
3112 	struct intel_cx0pll_state mpll_hw_state = {};
3113 	enum phy phy;
3114 
3115 	if (DISPLAY_VER(i915) < 14)
3116 		return;
3117 
3118 	if (!new_crtc_state->hw.active)
3119 		return;
3120 
3121 	/* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
3122 	if (!intel_crtc_needs_modeset(new_crtc_state) &&
3123 	    !intel_crtc_needs_fastset(new_crtc_state))
3124 		return;
3125 
3126 	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
3127 	phy = intel_port_to_phy(i915, encoder->port);
3128 
3129 	if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
3130 		return;
3131 
3132 	intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
3133 
3134 	if (intel_is_c10phy(i915, phy))
3135 		intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
3136 	else
3137 		intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
3138 }
3139