xref: /linux/drivers/gpu/drm/i915/display/intel_lt_phy.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <drm/drm_print.h>
7 
8 #include "i915_reg.h"
9 #include "i915_utils.h"
10 #include "intel_cx0_phy.h"
11 #include "intel_cx0_phy_regs.h"
12 #include "intel_ddi.h"
13 #include "intel_ddi_buf_trans.h"
14 #include "intel_de.h"
15 #include "intel_display.h"
16 #include "intel_display_types.h"
17 #include "intel_dpll_mgr.h"
18 #include "intel_hdmi.h"
19 #include "intel_lt_phy.h"
20 #include "intel_lt_phy_regs.h"
21 #include "intel_panel.h"
22 #include "intel_psr.h"
23 #include "intel_tc.h"
24 
25 #define for_each_lt_phy_lane_in_mask(__lane_mask, __lane) \
26 	for ((__lane) = 0; (__lane) < 2; (__lane)++) \
27 		for_each_if((__lane_mask) & BIT(__lane))
28 
29 #define INTEL_LT_PHY_LANE0		BIT(0)
30 #define INTEL_LT_PHY_LANE1		BIT(1)
31 #define INTEL_LT_PHY_BOTH_LANES		(INTEL_LT_PHY_LANE1 |\
32 					 INTEL_LT_PHY_LANE0)
33 #define MODE_DP				3
34 
35 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
36 	.clock = 162000,
37 	.config = {
38 		0x83,
39 		0x2d,
40 		0x0,
41 	},
42 	.addr_msb = {
43 		0x87,
44 		0x87,
45 		0x87,
46 		0x87,
47 		0x88,
48 		0x88,
49 		0x88,
50 		0x88,
51 		0x88,
52 		0x88,
53 		0x88,
54 		0x88,
55 		0x88,
56 	},
57 	.addr_lsb = {
58 		0x10,
59 		0x0c,
60 		0x14,
61 		0xe4,
62 		0x0c,
63 		0x10,
64 		0x14,
65 		0x18,
66 		0x48,
67 		0x40,
68 		0x4c,
69 		0x24,
70 		0x44,
71 	},
72 	.data = {
73 		{ 0x0,  0x4c, 0x2,  0x0  },
74 		{ 0x5,  0xa,  0x2a, 0x20 },
75 		{ 0x80, 0x0,  0x0,  0x0  },
76 		{ 0x4,  0x4,  0x82, 0x28 },
77 		{ 0xfa, 0x16, 0x83, 0x11 },
78 		{ 0x80, 0x0f, 0xf9, 0x53 },
79 		{ 0x84, 0x26, 0x5,  0x4  },
80 		{ 0x0,  0xe0, 0x1,  0x0  },
81 		{ 0x4b, 0x48, 0x0,  0x0  },
82 		{ 0x27, 0x8,  0x0,  0x0  },
83 		{ 0x5a, 0x13, 0x29, 0x13 },
84 		{ 0x0,  0x5b, 0xe0, 0x0a },
85 		{ 0x0,  0x0,  0x0,  0x0  },
86 	},
87 };
88 
89 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
90 	.clock = 270000,
91 	.config = {
92 		0x8b,
93 		0x2d,
94 		0x0,
95 	},
96 	.addr_msb = {
97 		0x87,
98 		0x87,
99 		0x87,
100 		0x87,
101 		0x88,
102 		0x88,
103 		0x88,
104 		0x88,
105 		0x88,
106 		0x88,
107 		0x88,
108 		0x88,
109 		0x88,
110 	},
111 	.addr_lsb = {
112 		0x10,
113 		0x0c,
114 		0x14,
115 		0xe4,
116 		0x0c,
117 		0x10,
118 		0x14,
119 		0x18,
120 		0x48,
121 		0x40,
122 		0x4c,
123 		0x24,
124 		0x44,
125 	},
126 	.data = {
127 		{ 0x0,  0x4c, 0x2,  0x0  },
128 		{ 0x3,  0xca, 0x34, 0xa0 },
129 		{ 0xe0, 0x0,  0x0,  0x0  },
130 		{ 0x5,  0x4,  0x81, 0xad },
131 		{ 0xfa, 0x11, 0x83, 0x11 },
132 		{ 0x80, 0x0f, 0xf9, 0x53 },
133 		{ 0x84, 0x26, 0x7,  0x4  },
134 		{ 0x0,  0xe0, 0x1,  0x0  },
135 		{ 0x43, 0x48, 0x0,  0x0  },
136 		{ 0x27, 0x8,  0x0,  0x0  },
137 		{ 0x5a, 0x13, 0x29, 0x13 },
138 		{ 0x0,  0x5b, 0xe0, 0x0d },
139 		{ 0x0,  0x0,  0x0,  0x0  },
140 	},
141 };
142 
143 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
144 	.clock = 540000,
145 	.config = {
146 		0x93,
147 		0x2d,
148 		0x0,
149 	},
150 	.addr_msb = {
151 		0x87,
152 		0x87,
153 		0x87,
154 		0x87,
155 		0x88,
156 		0x88,
157 		0x88,
158 		0x88,
159 		0x88,
160 		0x88,
161 		0x88,
162 		0x88,
163 		0x88,
164 	},
165 	.addr_lsb = {
166 		0x10,
167 		0x0c,
168 		0x14,
169 		0xe4,
170 		0x0c,
171 		0x10,
172 		0x14,
173 		0x18,
174 		0x48,
175 		0x40,
176 		0x4c,
177 		0x24,
178 		0x44,
179 	},
180 	.data = {
181 		{ 0x0,  0x4c, 0x2,  0x0  },
182 		{ 0x1,  0x4d, 0x34, 0xa0 },
183 		{ 0xe0, 0x0,  0x0,  0x0  },
184 		{ 0xa,  0x4,  0x81, 0xda },
185 		{ 0xfa, 0x11, 0x83, 0x11 },
186 		{ 0x80, 0x0f, 0xf9, 0x53 },
187 		{ 0x84, 0x26, 0x7,  0x4  },
188 		{ 0x0,  0xe0, 0x1,  0x0  },
189 		{ 0x43, 0x48, 0x0,  0x0  },
190 		{ 0x27, 0x8,  0x0,  0x0  },
191 		{ 0x5a, 0x13, 0x29, 0x13 },
192 		{ 0x0,  0x5b, 0xe0, 0x0d },
193 		{ 0x0,  0x0,  0x0,  0x0  },
194 	},
195 };
196 
197 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
198 	.clock = 810000,
199 	.config = {
200 		0x9b,
201 		0x2d,
202 		0x0,
203 	},
204 	.addr_msb = {
205 		0x87,
206 		0x87,
207 		0x87,
208 		0x87,
209 		0x88,
210 		0x88,
211 		0x88,
212 		0x88,
213 		0x88,
214 		0x88,
215 		0x88,
216 		0x88,
217 		0x88,
218 	},
219 	.addr_lsb = {
220 		0x10,
221 		0x0c,
222 		0x14,
223 		0xe4,
224 		0x0c,
225 		0x10,
226 		0x14,
227 		0x18,
228 		0x48,
229 		0x40,
230 		0x4c,
231 		0x24,
232 		0x44,
233 	},
234 	.data = {
235 		{ 0x0,  0x4c, 0x2,  0x0  },
236 		{ 0x1,  0x4a, 0x34, 0xa0 },
237 		{ 0xe0, 0x0,  0x0,  0x0  },
238 		{ 0x5,  0x4,  0x80, 0xa8 },
239 		{ 0xfa, 0x11, 0x83, 0x11 },
240 		{ 0x80, 0x0f, 0xf9, 0x53 },
241 		{ 0x84, 0x26, 0x7,  0x4  },
242 		{ 0x0,  0xe0, 0x1,  0x0  },
243 		{ 0x43, 0x48, 0x0,  0x0  },
244 		{ 0x27, 0x8,  0x0,  0x0  },
245 		{ 0x5a, 0x13, 0x29, 0x13 },
246 		{ 0x0,  0x5b, 0xe0, 0x0d },
247 		{ 0x0,  0x0,  0x0,  0x0  },
248 	},
249 };
250 
251 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
252 	.clock = 1000000,
253 	.config = {
254 		0x43,
255 		0x2d,
256 		0x0,
257 	},
258 	.addr_msb = {
259 		0x85,
260 		0x85,
261 		0x85,
262 		0x85,
263 		0x86,
264 		0x86,
265 		0x86,
266 		0x86,
267 		0x86,
268 		0x86,
269 		0x86,
270 		0x86,
271 		0x86,
272 	},
273 	.addr_lsb = {
274 		0x10,
275 		0x0c,
276 		0x14,
277 		0xe4,
278 		0x0c,
279 		0x10,
280 		0x14,
281 		0x18,
282 		0x48,
283 		0x40,
284 		0x4c,
285 		0x24,
286 		0x44,
287 	},
288 	.data = {
289 		{ 0x0,  0x4c, 0x2,  0x0  },
290 		{ 0x1,  0xa,  0x20, 0x80 },
291 		{ 0x6a, 0xaa, 0xaa, 0xab },
292 		{ 0x0,  0x3,  0x4,  0x94 },
293 		{ 0xfa, 0x1c, 0x83, 0x11 },
294 		{ 0x80, 0x0f, 0xf9, 0x53 },
295 		{ 0x84, 0x26, 0x4,  0x4  },
296 		{ 0x0,  0xe0, 0x1,  0x0  },
297 		{ 0x45, 0x48, 0x0,  0x0  },
298 		{ 0x27, 0x8,  0x0,  0x0  },
299 		{ 0x5a, 0x14, 0x2a, 0x14 },
300 		{ 0x0,  0x5b, 0xe0, 0x8  },
301 		{ 0x0,  0x0,  0x0,  0x0  },
302 	},
303 };
304 
305 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
306 	.clock = 1350000,
307 	.config = {
308 		0xcb,
309 		0x2d,
310 		0x0,
311 	},
312 	.addr_msb = {
313 		0x87,
314 		0x87,
315 		0x87,
316 		0x87,
317 		0x88,
318 		0x88,
319 		0x88,
320 		0x88,
321 		0x88,
322 		0x88,
323 		0x88,
324 		0x88,
325 		0x88,
326 	},
327 	.addr_lsb = {
328 		0x10,
329 		0x0c,
330 		0x14,
331 		0xe4,
332 		0x0c,
333 		0x10,
334 		0x14,
335 		0x18,
336 		0x48,
337 		0x40,
338 		0x4c,
339 		0x24,
340 		0x44,
341 	},
342 	.data = {
343 		{ 0x0,  0x4c, 0x2,  0x0  },
344 		{ 0x2,  0x9,  0x2b, 0xe0 },
345 		{ 0x90, 0x0,  0x0,  0x0  },
346 		{ 0x8,  0x4,  0x80, 0xe0 },
347 		{ 0xfa, 0x15, 0x83, 0x11 },
348 		{ 0x80, 0x0f, 0xf9, 0x53 },
349 		{ 0x84, 0x26, 0x6,  0x4  },
350 		{ 0x0,  0xe0, 0x1,  0x0  },
351 		{ 0x49, 0x48, 0x0,  0x0  },
352 		{ 0x27, 0x8,  0x0,  0x0  },
353 		{ 0x5a, 0x13, 0x29, 0x13 },
354 		{ 0x0,  0x57, 0xe0, 0x0c },
355 		{ 0x0,  0x0,  0x0,  0x0  },
356 	},
357 };
358 
359 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
360 	.clock = 2000000,
361 	.config = {
362 		0x53,
363 		0x2d,
364 		0x0,
365 	},
366 	.addr_msb = {
367 		0x85,
368 		0x85,
369 		0x85,
370 		0x85,
371 		0x86,
372 		0x86,
373 		0x86,
374 		0x86,
375 		0x86,
376 		0x86,
377 		0x86,
378 		0x86,
379 		0x86,
380 	},
381 	.addr_lsb = {
382 		0x10,
383 		0x0c,
384 		0x14,
385 		0xe4,
386 		0x0c,
387 		0x10,
388 		0x14,
389 		0x18,
390 		0x48,
391 		0x40,
392 		0x4c,
393 		0x24,
394 		0x44,
395 	},
396 	.data = {
397 		{ 0x0,  0x4c, 0x2,  0x0  },
398 		{ 0x1,  0xa,  0x20, 0x80 },
399 		{ 0x6a, 0xaa, 0xaa, 0xab },
400 		{ 0x0,  0x3,  0x4,  0x94 },
401 		{ 0xfa, 0x1c, 0x83, 0x11 },
402 		{ 0x80, 0x0f, 0xf9, 0x53 },
403 		{ 0x84, 0x26, 0x4,  0x4  },
404 		{ 0x0,  0xe0, 0x1,  0x0  },
405 		{ 0x45, 0x48, 0x0,  0x0  },
406 		{ 0x27, 0x8,  0x0,  0x0  },
407 		{ 0x5a, 0x14, 0x2a, 0x14 },
408 		{ 0x0,  0x5b, 0xe0, 0x8  },
409 		{ 0x0,  0x0,  0x0,  0x0  },
410 	},
411 };
412 
413 static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = {
414 	&xe3plpd_lt_dp_rbr,
415 	&xe3plpd_lt_dp_hbr1,
416 	&xe3plpd_lt_dp_hbr2,
417 	&xe3plpd_lt_dp_hbr3,
418 	&xe3plpd_lt_dp_uhbr10,
419 	&xe3plpd_lt_dp_uhbr13_5,
420 	&xe3plpd_lt_dp_uhbr20,
421 	NULL,
422 };
423 
424 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
425 	.clock = 216000,
426 	.config = {
427 		0xa3,
428 		0x2d,
429 		0x1,
430 	},
431 	.addr_msb = {
432 		0x87,
433 		0x87,
434 		0x87,
435 		0x87,
436 		0x88,
437 		0x88,
438 		0x88,
439 		0x88,
440 		0x88,
441 		0x88,
442 		0x88,
443 		0x88,
444 		0x88,
445 	},
446 	.addr_lsb = {
447 		0x10,
448 		0x0c,
449 		0x14,
450 		0xe4,
451 		0x0c,
452 		0x10,
453 		0x14,
454 		0x18,
455 		0x48,
456 		0x40,
457 		0x4c,
458 		0x24,
459 		0x44,
460 	},
461 	.data = {
462 		{ 0x0,  0x4c, 0x2,  0x0  },
463 		{ 0x3,  0xca, 0x2a, 0x20 },
464 		{ 0x80, 0x0,  0x0,  0x0  },
465 		{ 0x6,  0x4,  0x81, 0xbc },
466 		{ 0xfa, 0x16, 0x83, 0x11 },
467 		{ 0x80, 0x0f, 0xf9, 0x53 },
468 		{ 0x84, 0x26, 0x5,  0x4  },
469 		{ 0x0,  0xe0, 0x1,  0x0  },
470 		{ 0x4b, 0x48, 0x0,  0x0  },
471 		{ 0x27, 0x8,  0x0,  0x0  },
472 		{ 0x5a, 0x13, 0x29, 0x13 },
473 		{ 0x0,  0x5b, 0xe0, 0x0a },
474 		{ 0x0,  0x0,  0x0,  0x0  },
475 	},
476 };
477 
478 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
479 	.clock = 243000,
480 	.config = {
481 		0xab,
482 		0x2d,
483 		0x1,
484 	},
485 	.addr_msb = {
486 		0x87,
487 		0x87,
488 		0x87,
489 		0x87,
490 		0x88,
491 		0x88,
492 		0x88,
493 		0x88,
494 		0x88,
495 		0x88,
496 		0x88,
497 		0x88,
498 		0x88,
499 	},
500 	.addr_lsb = {
501 		0x10,
502 		0x0c,
503 		0x14,
504 		0xe4,
505 		0x0c,
506 		0x10,
507 		0x14,
508 		0x18,
509 		0x48,
510 		0x40,
511 		0x4c,
512 		0x24,
513 		0x44,
514 	},
515 	.data = {
516 		{ 0x0,  0x4c, 0x2,  0x0  },
517 		{ 0x3,  0xca, 0x2f, 0x60 },
518 		{ 0xb0, 0x0,  0x0,  0x0  },
519 		{ 0x6,  0x4,  0x81, 0xbc },
520 		{ 0xfa, 0x13, 0x83, 0x11 },
521 		{ 0x80, 0x0f, 0xf9, 0x53 },
522 		{ 0x84, 0x26, 0x6,  0x4  },
523 		{ 0x0,  0xe0, 0x1,  0x0  },
524 		{ 0x47, 0x48, 0x0,  0x0  },
525 		{ 0x0,  0x0,  0x0,  0x0  },
526 		{ 0x5a, 0x13, 0x29, 0x13 },
527 		{ 0x0,  0x5b, 0xe0, 0x0c },
528 		{ 0x0,  0x0,  0x0,  0x0  },
529 	},
530 };
531 
532 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
533 	.clock = 324000,
534 	.config = {
535 		0xb3,
536 		0x2d,
537 		0x1,
538 	},
539 	.addr_msb = {
540 		0x87,
541 		0x87,
542 		0x87,
543 		0x87,
544 		0x88,
545 		0x88,
546 		0x88,
547 		0x88,
548 		0x88,
549 		0x88,
550 		0x88,
551 		0x88,
552 		0x88,
553 	},
554 	.addr_lsb = {
555 		0x10,
556 		0x0c,
557 		0x14,
558 		0xe4,
559 		0x0c,
560 		0x10,
561 		0x14,
562 		0x18,
563 		0x48,
564 		0x40,
565 		0x4c,
566 		0x24,
567 		0x44,
568 	},
569 	.data = {
570 		{ 0x0,  0x4c, 0x2,  0x0  },
571 		{ 0x2,  0x8a, 0x2a, 0x20 },
572 		{ 0x80, 0x0,  0x0,  0x0  },
573 		{ 0x6,  0x4,  0x81, 0x28 },
574 		{ 0xfa, 0x16, 0x83, 0x11 },
575 		{ 0x80, 0x0f, 0xf9, 0x53 },
576 		{ 0x84, 0x26, 0x5,  0x4  },
577 		{ 0x0,  0xe0, 0x1,  0x0  },
578 		{ 0x4b, 0x48, 0x0,  0x0  },
579 		{ 0x27, 0x8,  0x0,  0x0  },
580 		{ 0x5a, 0x13, 0x29, 0x13 },
581 		{ 0x0,  0x5b, 0xe0, 0x0a },
582 		{ 0x0,  0x0,  0x0,  0x0  },
583 	},
584 };
585 
586 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
587 	.clock = 432000,
588 	.config = {
589 		0xbb,
590 		0x2d,
591 		0x1,
592 	},
593 	.addr_msb = {
594 		0x87,
595 		0x87,
596 		0x87,
597 		0x87,
598 		0x88,
599 		0x88,
600 		0x88,
601 		0x88,
602 		0x88,
603 		0x88,
604 		0x88,
605 		0x88,
606 		0x88,
607 	},
608 	.addr_lsb = {
609 		0x10,
610 		0x0c,
611 		0x14,
612 		0xe4,
613 		0x0c,
614 		0x10,
615 		0x14,
616 		0x18,
617 		0x48,
618 		0x40,
619 		0x4c,
620 		0x24,
621 		0x44,
622 	},
623 	.data = {
624 		{ 0x0,  0x4c, 0x2,  0x0  },
625 		{ 0x1,  0x4d, 0x2a, 0x20 },
626 		{ 0x80, 0x0,  0x0,  0x0  },
627 		{ 0xc,  0x4,  0x81, 0xbc },
628 		{ 0xfa, 0x16, 0x83, 0x11 },
629 		{ 0x80, 0x0f, 0xf9, 0x53 },
630 		{ 0x84, 0x26, 0x5,  0x4  },
631 		{ 0x0,  0xe0, 0x1,  0x0  },
632 		{ 0x4b, 0x48, 0x0,  0x0  },
633 		{ 0x27, 0x8,  0x0,  0x0  },
634 		{ 0x5a, 0x13, 0x29, 0x13 },
635 		{ 0x0,  0x5b, 0xe0, 0x0a },
636 		{ 0x0,  0x0,  0x0,  0x0  },
637 	},
638 };
639 
640 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
641 	.clock = 675000,
642 	.config = {
643 		0xdb,
644 		0x2d,
645 		0x1,
646 	},
647 	.addr_msb = {
648 		0x87,
649 		0x87,
650 		0x87,
651 		0x87,
652 		0x88,
653 		0x88,
654 		0x88,
655 		0x88,
656 		0x88,
657 		0x88,
658 		0x88,
659 		0x88,
660 		0x88,
661 	},
662 	.addr_lsb = {
663 		0x10,
664 		0x0c,
665 		0x14,
666 		0xe4,
667 		0x0c,
668 		0x10,
669 		0x14,
670 		0x18,
671 		0x48,
672 		0x40,
673 		0x4c,
674 		0x24,
675 		0x44,
676 	},
677 	.data = {
678 		{ 0x0,  0x4c, 0x2,  0x0  },
679 		{ 0x1,  0x4a, 0x2b, 0xe0 },
680 		{ 0x90, 0x0,  0x0,  0x0  },
681 		{ 0x6,  0x4,  0x80, 0xa8 },
682 		{ 0xfa, 0x15, 0x83, 0x11 },
683 		{ 0x80, 0x0f, 0xf9, 0x53 },
684 		{ 0x84, 0x26, 0x6,  0x4  },
685 		{ 0x0,  0xe0, 0x1,  0x0  },
686 		{ 0x49, 0x48, 0x0,  0x0  },
687 		{ 0x27, 0x8,  0x0,  0x0  },
688 		{ 0x5a, 0x13, 0x29, 0x13 },
689 		{ 0x0,  0x57, 0xe0, 0x0c },
690 		{ 0x0,  0x0,  0x0,  0x0  },
691 	},
692 };
693 
694 static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = {
695 	&xe3plpd_lt_dp_rbr,
696 	&xe3plpd_lt_edp_2_16,
697 	&xe3plpd_lt_edp_2_43,
698 	&xe3plpd_lt_dp_hbr1,
699 	&xe3plpd_lt_edp_3_24,
700 	&xe3plpd_lt_edp_4_32,
701 	&xe3plpd_lt_dp_hbr2,
702 	&xe3plpd_lt_edp_6_75,
703 	&xe3plpd_lt_dp_hbr3,
704 	NULL,
705 };
706 
707 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
708 	.clock = 25200,
709 	.config = {
710 		0x84,
711 		0x2d,
712 		0x0,
713 	},
714 	.addr_msb = {
715 		0x87,
716 		0x87,
717 		0x87,
718 		0x87,
719 		0x88,
720 		0x88,
721 		0x88,
722 		0x88,
723 		0x88,
724 		0x88,
725 		0x88,
726 		0x88,
727 		0x88,
728 	},
729 	.addr_lsb = {
730 		0x10,
731 		0x0c,
732 		0x14,
733 		0xe4,
734 		0x0c,
735 		0x10,
736 		0x14,
737 		0x18,
738 		0x48,
739 		0x40,
740 		0x4c,
741 		0x24,
742 		0x44,
743 	},
744 	.data = {
745 		{ 0x0,  0x4c, 0x2,  0x0  },
746 		{ 0x0c, 0x15, 0x27, 0x60 },
747 		{ 0x0,  0x0,  0x0,  0x0  },
748 		{ 0x8,  0x4,  0x98, 0x28 },
749 		{ 0x42, 0x0,  0x84, 0x10 },
750 		{ 0x80, 0x0f, 0xd9, 0xb5 },
751 		{ 0x86, 0x0,  0x0,  0x0  },
752 		{ 0x1,  0xa0, 0x1,  0x0  },
753 		{ 0x4b, 0x0,  0x0,  0x0  },
754 		{ 0x28, 0x0,  0x0,  0x0  },
755 		{ 0x0,  0x14, 0x2a, 0x14 },
756 		{ 0x0,  0x0,  0x0,  0x0  },
757 		{ 0x0,  0x0,  0x0,  0x0  },
758 	},
759 };
760 
761 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = {
762 	.clock = 27200,
763 	.config = {
764 		0x84,
765 		0x2d,
766 		0x0,
767 	},
768 	.addr_msb = {
769 		0x87,
770 		0x87,
771 		0x87,
772 		0x87,
773 		0x88,
774 		0x88,
775 		0x88,
776 		0x88,
777 		0x88,
778 		0x88,
779 		0x88,
780 		0x88,
781 		0x88,
782 	},
783 	.addr_lsb = {
784 		0x10,
785 		0x0c,
786 		0x14,
787 		0xe4,
788 		0x0c,
789 		0x10,
790 		0x14,
791 		0x18,
792 		0x48,
793 		0x40,
794 		0x4c,
795 		0x24,
796 		0x44,
797 	},
798 	.data = {
799 		{ 0x0,  0x4c, 0x2,  0x0  },
800 		{ 0x0b, 0x15, 0x26, 0xa0 },
801 		{ 0x60, 0x0,  0x0,  0x0  },
802 		{ 0x8,  0x4,  0x96, 0x28 },
803 		{ 0xfa, 0x0c, 0x84, 0x11 },
804 		{ 0x80, 0x0f, 0xd9, 0x53 },
805 		{ 0x86, 0x0,  0x0,  0x0  },
806 		{ 0x1,  0xa0, 0x1,  0x0  },
807 		{ 0x4b, 0x0,  0x0,  0x0  },
808 		{ 0x28, 0x0,  0x0,  0x0  },
809 		{ 0x0,  0x14, 0x2a, 0x14 },
810 		{ 0x0,  0x0,  0x0,  0x0  },
811 		{ 0x0,  0x0,  0x0,  0x0  },
812 	},
813 };
814 
815 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
816 	.clock = 74250,
817 	.config = {
818 		0x84,
819 		0x2d,
820 		0x0,
821 	},
822 	.addr_msb = {
823 		0x87,
824 		0x87,
825 		0x87,
826 		0x87,
827 		0x88,
828 		0x88,
829 		0x88,
830 		0x88,
831 		0x88,
832 		0x88,
833 		0x88,
834 		0x88,
835 		0x88,
836 	},
837 	.addr_lsb = {
838 		0x10,
839 		0x0c,
840 		0x14,
841 		0xe4,
842 		0x0c,
843 		0x10,
844 		0x14,
845 		0x18,
846 		0x48,
847 		0x40,
848 		0x4c,
849 		0x24,
850 		0x44,
851 	},
852 	.data = {
853 		{ 0x0,  0x4c, 0x2,  0x0  },
854 		{ 0x4,  0x15, 0x26, 0xa0 },
855 		{ 0x60, 0x0,  0x0,  0x0  },
856 		{ 0x8,  0x4,  0x88, 0x28 },
857 		{ 0xfa, 0x0c, 0x84, 0x11 },
858 		{ 0x80, 0x0f, 0xd9, 0x53 },
859 		{ 0x86, 0x0,  0x0,  0x0  },
860 		{ 0x1,  0xa0, 0x1,  0x0  },
861 		{ 0x4b, 0x0,  0x0,  0x0  },
862 		{ 0x28, 0x0,  0x0,  0x0  },
863 		{ 0x0,  0x14, 0x2a, 0x14 },
864 		{ 0x0,  0x0,  0x0,  0x0  },
865 		{ 0x0,  0x0,  0x0,  0x0  },
866 	},
867 };
868 
869 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
870 	.clock = 148500,
871 	.config = {
872 		0x84,
873 		0x2d,
874 		0x0,
875 	},
876 	.addr_msb = {
877 		0x87,
878 		0x87,
879 		0x87,
880 		0x87,
881 		0x88,
882 		0x88,
883 		0x88,
884 		0x88,
885 		0x88,
886 		0x88,
887 		0x88,
888 		0x88,
889 		0x88,
890 	},
891 	.addr_lsb = {
892 		0x10,
893 		0x0c,
894 		0x14,
895 		0xe4,
896 		0x0c,
897 		0x10,
898 		0x14,
899 		0x18,
900 		0x48,
901 		0x40,
902 		0x4c,
903 		0x24,
904 		0x44,
905 	},
906 	.data = {
907 		{ 0x0,  0x4c, 0x2,  0x0  },
908 		{ 0x2,  0x15, 0x26, 0xa0 },
909 		{ 0x60, 0x0,  0x0,  0x0  },
910 		{ 0x8,  0x4,  0x84, 0x28 },
911 		{ 0xfa, 0x0c, 0x84, 0x11 },
912 		{ 0x80, 0x0f, 0xd9, 0x53 },
913 		{ 0x86, 0x0,  0x0,  0x0  },
914 		{ 0x1,  0xa0, 0x1,  0x0  },
915 		{ 0x4b, 0x0,  0x0,  0x0  },
916 		{ 0x28, 0x0,  0x0,  0x0  },
917 		{ 0x0,  0x14, 0x2a, 0x14 },
918 		{ 0x0,  0x0,  0x0,  0x0  },
919 		{ 0x0,  0x0,  0x0,  0x0  },
920 	},
921 };
922 
923 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
924 	.clock = 594000,
925 	.config = {
926 		0x84,
927 		0x2d,
928 		0x0,
929 	},
930 	.addr_msb = {
931 		0x87,
932 		0x87,
933 		0x87,
934 		0x87,
935 		0x88,
936 		0x88,
937 		0x88,
938 		0x88,
939 		0x88,
940 		0x88,
941 		0x88,
942 		0x88,
943 		0x88,
944 	},
945 	.addr_lsb = {
946 		0x10,
947 		0x0c,
948 		0x14,
949 		0xe4,
950 		0x0c,
951 		0x10,
952 		0x14,
953 		0x18,
954 		0x48,
955 		0x40,
956 		0x4c,
957 		0x24,
958 		0x44,
959 	},
960 	.data = {
961 		{ 0x0,  0x4c, 0x2,  0x0  },
962 		{ 0x0,  0x95, 0x26, 0xa0 },
963 		{ 0x60, 0x0,  0x0,  0x0  },
964 		{ 0x8,  0x4,  0x81, 0x28 },
965 		{ 0xfa, 0x0c, 0x84, 0x11 },
966 		{ 0x80, 0x0f, 0xd9, 0x53 },
967 		{ 0x86, 0x0,  0x0,  0x0  },
968 		{ 0x1,  0xa0, 0x1,  0x0  },
969 		{ 0x4b, 0x0,  0x0,  0x0  },
970 		{ 0x28, 0x0,  0x0,  0x0  },
971 		{ 0x0,  0x14, 0x2a, 0x14 },
972 		{ 0x0,  0x0,  0x0,  0x0  },
973 		{ 0x0,  0x0,  0x0,  0x0  },
974 	},
975 };
976 
977 static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = {
978 	&xe3plpd_lt_hdmi_252,
979 	&xe3plpd_lt_hdmi_272,
980 	&xe3plpd_lt_hdmi_742p5,
981 	&xe3plpd_lt_hdmi_1p485,
982 	&xe3plpd_lt_hdmi_5p94,
983 	NULL,
984 };
985 
986 static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder)
987 {
988 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
989 
990 	if (!intel_tc_port_in_dp_alt_mode(dig_port))
991 		return INTEL_LT_PHY_BOTH_LANES;
992 
993 	return intel_tc_port_max_lane_count(dig_port) > 2
994 		? INTEL_LT_PHY_BOTH_LANES : INTEL_LT_PHY_LANE0;
995 }
996 
997 static u8 intel_lt_phy_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
998 {
999 	return intel_cx0_read(encoder, lane_mask, addr);
1000 }
1001 
1002 static void intel_lt_phy_write(struct intel_encoder *encoder,
1003 			       u8 lane_mask, u16 addr, u8 data, bool committed)
1004 {
1005 	intel_cx0_write(encoder, lane_mask, addr, data, committed);
1006 }
1007 
1008 static void intel_lt_phy_rmw(struct intel_encoder *encoder,
1009 			     u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
1010 {
1011 	intel_cx0_rmw(encoder, lane_mask, addr, clear, set, committed);
1012 }
1013 
1014 static void intel_lt_phy_clear_status_p2p(struct intel_encoder *encoder,
1015 					  int lane)
1016 {
1017 	struct intel_display *display = to_intel_display(encoder);
1018 
1019 	intel_de_rmw(display,
1020 		     XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(encoder->port, lane),
1021 		     XELPDP_PORT_P2M_RESPONSE_READY, 0);
1022 }
1023 
1024 static void
1025 assert_dc_off(struct intel_display *display)
1026 {
1027 	bool enabled;
1028 
1029 	enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF);
1030 	drm_WARN_ON(display->drm, !enabled);
1031 }
1032 
1033 static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder,
1034 					 int lane, u16 addr, u8 data,
1035 					 i915_reg_t mac_reg_addr,
1036 					 u8 expected_mac_val)
1037 {
1038 	struct intel_display *display = to_intel_display(encoder);
1039 	enum port port = encoder->port;
1040 	enum phy phy = intel_encoder_to_phy(encoder);
1041 	int ack;
1042 	u32 val;
1043 
1044 	if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
1045 				    XELPDP_PORT_P2P_TRANSACTION_PENDING,
1046 				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
1047 		drm_dbg_kms(display->drm,
1048 			    "PHY %c Timeout waiting for previous transaction to complete. Resetting bus.\n",
1049 			    phy_name(phy));
1050 		intel_cx0_bus_reset(encoder, lane);
1051 		return -ETIMEDOUT;
1052 	}
1053 
1054 	intel_de_rmw(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), 0, 0);
1055 
1056 	intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
1057 		       XELPDP_PORT_P2P_TRANSACTION_PENDING |
1058 		       XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED |
1059 		       XELPDP_PORT_M2P_DATA(data) |
1060 		       XELPDP_PORT_M2P_ADDRESS(addr));
1061 
1062 	ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
1063 	if (ack < 0)
1064 		return ack;
1065 
1066 	if (val & XELPDP_PORT_P2M_ERROR_SET) {
1067 		drm_dbg_kms(display->drm,
1068 			    "PHY %c Error occurred during P2P write command. Status: 0x%x\n",
1069 			    phy_name(phy), val);
1070 		intel_lt_phy_clear_status_p2p(encoder, lane);
1071 		intel_cx0_bus_reset(encoder, lane);
1072 		return -EINVAL;
1073 	}
1074 
1075 	/*
1076 	 * RE-VISIT:
1077 	 * This needs to be added to give PHY time to set everything up this was a requirement
1078 	 * to get the display up and running
1079 	 * This is the time PHY takes to settle down after programming the PHY.
1080 	 */
1081 	udelay(150);
1082 	intel_clear_response_ready_flag(encoder, lane);
1083 	intel_lt_phy_clear_status_p2p(encoder, lane);
1084 
1085 	return 0;
1086 }
1087 
1088 static void __intel_lt_phy_p2p_write(struct intel_encoder *encoder,
1089 				     int lane, u16 addr, u8 data,
1090 				     i915_reg_t mac_reg_addr,
1091 				     u8 expected_mac_val)
1092 {
1093 	struct intel_display *display = to_intel_display(encoder);
1094 	enum phy phy = intel_encoder_to_phy(encoder);
1095 	int i, status;
1096 
1097 	assert_dc_off(display);
1098 
1099 	/* 3 tries is assumed to be enough to write successfully */
1100 	for (i = 0; i < 3; i++) {
1101 		status = __intel_lt_phy_p2p_write_once(encoder, lane, addr, data, mac_reg_addr,
1102 						       expected_mac_val);
1103 
1104 		if (status == 0)
1105 			return;
1106 	}
1107 
1108 	drm_err_once(display->drm,
1109 		     "PHY %c P2P Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
1110 }
1111 
1112 static void intel_lt_phy_p2p_write(struct intel_encoder *encoder,
1113 				   u8 lane_mask, u16 addr, u8 data,
1114 				   i915_reg_t mac_reg_addr,
1115 				   u8 expected_mac_val)
1116 {
1117 	int lane;
1118 
1119 	for_each_lt_phy_lane_in_mask(lane_mask, lane)
1120 		__intel_lt_phy_p2p_write(encoder, lane, addr, data, mac_reg_addr, expected_mac_val);
1121 }
1122 
1123 static void
1124 intel_lt_phy_setup_powerdown(struct intel_encoder *encoder, u8 lane_count)
1125 {
1126 	/*
1127 	 * The new PORT_BUF_CTL6 stuff for dc5 entry and exit needs to be handled
1128 	 * by dmc firmware not explicitly mentioned in Bspec. This leaves this
1129 	 * function as a wrapper only but keeping it expecting future changes.
1130 	 */
1131 	intel_cx0_setup_powerdown(encoder);
1132 }
1133 
1134 static void
1135 intel_lt_phy_powerdown_change_sequence(struct intel_encoder *encoder,
1136 				       u8 lane_mask, u8 state)
1137 {
1138 	intel_cx0_powerdown_change_sequence(encoder, lane_mask, state);
1139 }
1140 
1141 static void
1142 intel_lt_phy_lane_reset(struct intel_encoder *encoder,
1143 			u8 lane_count)
1144 {
1145 	struct intel_display *display = to_intel_display(encoder);
1146 	enum port port = encoder->port;
1147 	enum phy phy = intel_encoder_to_phy(encoder);
1148 	u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1149 	u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1150 				? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
1151 				: XELPDP_LANE_PIPE_RESET(0);
1152 	u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1153 					? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
1154 					   XELPDP_LANE_PHY_CURRENT_STATUS(1))
1155 					: XELPDP_LANE_PHY_CURRENT_STATUS(0);
1156 	u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1157 					? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
1158 					   XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
1159 					: XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
1160 
1161 	intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
1162 		     XE3PLPD_MACCLK_RATE_MASK, XE3PLPD_MACCLK_RATE_DEF);
1163 
1164 	intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
1165 		     XE3PLPDP_PHY_MODE_MASK, XE3PLPDP_PHY_MODE_DP);
1166 
1167 	intel_lt_phy_setup_powerdown(encoder, lane_count);
1168 	intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
1169 					       XELPDP_P2_STATE_RESET);
1170 
1171 	intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
1172 		     XE3PLPD_MACCLK_RESET_0, 0);
1173 
1174 	intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1175 		     XELPDP_LANE_PCLK_PLL_REQUEST(0),
1176 		     XELPDP_LANE_PCLK_PLL_REQUEST(0));
1177 
1178 	if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
1179 				 XELPDP_LANE_PCLK_PLL_ACK(0),
1180 				 XELPDP_LANE_PCLK_PLL_ACK(0),
1181 				 XE3PLPD_MACCLK_TURNON_LATENCY_US,
1182 				 XE3PLPD_MACCLK_TURNON_LATENCY_MS, NULL))
1183 		drm_warn(display->drm, "PHY %c PLL MacCLK assertion Ack not done after %dus.\n",
1184 			 phy_name(phy), XE3PLPD_MACCLK_TURNON_LATENCY_MS * 1000);
1185 
1186 	intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1187 		     XELPDP_FORWARD_CLOCK_UNGATE,
1188 		     XELPDP_FORWARD_CLOCK_UNGATE);
1189 
1190 	intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
1191 		     lane_pipe_reset | lane_phy_pulse_status, 0);
1192 
1193 	if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
1194 				 lane_phy_current_status, 0,
1195 				 XE3PLPD_RESET_END_LATENCY_US, 2, NULL))
1196 		drm_warn(display->drm,
1197 			 "PHY %c failed to bring out of Lane reset after %dus.\n",
1198 			 phy_name(phy), XE3PLPD_RESET_END_LATENCY_US);
1199 
1200 	if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
1201 				 lane_phy_pulse_status, lane_phy_pulse_status,
1202 				 XE3PLPD_RATE_CALIB_DONE_LATENCY_US, 0, NULL))
1203 		drm_warn(display->drm, "PHY %c PLL rate not changed after %dus.\n",
1204 			 phy_name(phy), XE3PLPD_RATE_CALIB_DONE_LATENCY_US);
1205 
1206 	intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_pulse_status, 0);
1207 }
1208 
1209 static void
1210 intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder,
1211 				    const struct intel_crtc_state *crtc_state,
1212 				    bool lane_reversal)
1213 {
1214 	struct intel_display *display = to_intel_display(encoder);
1215 	u32 val = 0;
1216 
1217 	intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
1218 		     XELPDP_PORT_REVERSAL,
1219 		     lane_reversal ? XELPDP_PORT_REVERSAL : 0);
1220 
1221 	val |= XELPDP_FORWARD_CLOCK_UNGATE;
1222 
1223 	/*
1224 	 * We actually mean MACCLK here and not MAXPCLK when using LT Phy
1225 	 * but since the register bits still remain the same we use
1226 	 * the same definition
1227 	 */
1228 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1229 	    intel_hdmi_is_frl(crtc_state->port_clock))
1230 		val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
1231 	else
1232 		val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
1233 
1234 	 /* DP2.0 10G and 20G rates enable MPLLA*/
1235 	if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
1236 		val |= XELPDP_SSC_ENABLE_PLLA;
1237 	else
1238 		val |= crtc_state->dpll_hw_state.ltpll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
1239 
1240 	intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
1241 		     XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
1242 		     XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
1243 		     XELPDP_SSC_ENABLE_PLLB, val);
1244 }
1245 
1246 static u32 intel_lt_phy_get_dp_clock(u8 rate)
1247 {
1248 	switch (rate) {
1249 	case 0:
1250 		return 162000;
1251 	case 1:
1252 		return 270000;
1253 	case 2:
1254 		return 540000;
1255 	case 3:
1256 		return 810000;
1257 	case 4:
1258 		return 216000;
1259 	case 5:
1260 		return 243000;
1261 	case 6:
1262 		return 324000;
1263 	case 7:
1264 		return 432000;
1265 	case 8:
1266 		return 1000000;
1267 	case 9:
1268 		return 1350000;
1269 	case 10:
1270 		return 2000000;
1271 	case 11:
1272 		return 675000;
1273 	default:
1274 		MISSING_CASE(rate);
1275 		return 0;
1276 	}
1277 }
1278 
1279 static bool
1280 intel_lt_phy_config_changed(struct intel_encoder *encoder,
1281 			    const struct intel_crtc_state *crtc_state)
1282 {
1283 	u8 val, rate;
1284 	u32 clock;
1285 
1286 	val = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
1287 				LT_PHY_VDR_0_CONFIG);
1288 	rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, val);
1289 
1290 	/*
1291 	 * The only time we do not reconfigure the PLL is when we are
1292 	 * using 1.62 Gbps clock since PHY PLL defaults to that
1293 	 * otherwise we always need to reconfigure it.
1294 	 */
1295 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1296 		clock = intel_lt_phy_get_dp_clock(rate);
1297 		if (crtc_state->port_clock == 1620000 && crtc_state->port_clock == clock)
1298 			return false;
1299 	}
1300 
1301 	return true;
1302 }
1303 
1304 static intel_wakeref_t intel_lt_phy_transaction_begin(struct intel_encoder *encoder)
1305 {
1306 	struct intel_display *display = to_intel_display(encoder);
1307 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1308 	intel_wakeref_t wakeref;
1309 
1310 	intel_psr_pause(intel_dp);
1311 	wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
1312 
1313 	return wakeref;
1314 }
1315 
1316 static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
1317 {
1318 	struct intel_display *display = to_intel_display(encoder);
1319 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1320 
1321 	intel_psr_resume(intel_dp);
1322 	intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
1323 }
1324 
1325 static const struct intel_lt_phy_pll_state * const *
1326 intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state,
1327 			    struct intel_encoder *encoder)
1328 {
1329 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1330 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1331 			return xe3plpd_lt_edp_tables;
1332 
1333 		return xe3plpd_lt_dp_tables;
1334 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1335 		return xe3plpd_lt_hdmi_tables;
1336 	}
1337 
1338 	MISSING_CASE(encoder->type);
1339 	return NULL;
1340 }
1341 
1342 static bool
1343 intel_lt_phy_pll_is_ssc_enabled(struct intel_crtc_state *crtc_state,
1344 				struct intel_encoder *encoder)
1345 {
1346 	struct intel_display *display = to_intel_display(encoder);
1347 
1348 	if (intel_crtc_has_dp_encoder(crtc_state)) {
1349 		if (intel_panel_use_ssc(display)) {
1350 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1351 
1352 			return (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
1353 		}
1354 	}
1355 
1356 	return false;
1357 }
1358 
1359 static int
1360 intel_lt_phy_calc_hdmi_port_clock(const struct intel_lt_phy_pll_state *lt_state)
1361 {
1362 #define REF_CLK_KHZ 38400
1363 #define REGVAL(i) (				\
1364 	(lt_state->data[i][3])		|	\
1365 	(lt_state->data[i][2] << 8)	|	\
1366 	(lt_state->data[i][1] << 16)	|	\
1367 	(lt_state->data[i][0] << 24)		\
1368 )
1369 
1370 	int clk = 0;
1371 	u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int;
1372 	u64 temp0, temp1;
1373 	/*
1374 	 * The algorithm uses '+' to combine bitfields when
1375 	 * constructing PLL_reg3 and PLL_reg57:
1376 	 * PLL_reg57 = (D7 << 24) + (postdiv << 15) + (D8 << 7) + D6_new;
1377 	 * PLL_reg3 = (D4 << 21) + (D3 << 18) + (D1 << 15) + (m2div_int << 5);
1378 	 *
1379 	 * However, this is likely intended to be a bitwise OR operation,
1380 	 * as each field occupies distinct, non-overlapping bits in the register.
1381 	 *
1382 	 * PLL_reg57 is composed of following fields packed into a 32-bit value:
1383 	 * - D7: max value 10 -> fits in 4 bits -> placed at bits 24-27
1384 	 * - postdiv: max value 9 -> fits in 4 bits -> placed at bits 15-18
1385 	 * - D8: derived from loop_cnt / 2, max 127 -> fits in 7 bits
1386 	 *	(though 8 bits are given to it) -> placed at bits 7-14
1387 	 * - D6_new: fits in lower 7 bits -> placed at bits 0-6
1388 	 * PLL_reg57 = (D7 << 24) | (postdiv << 15) | (D8 << 7) | D6_new;
1389 	 *
1390 	 * Similarly, PLL_reg3 is packed as:
1391 	 * - D4: max value 256 -> fits in 9 bits -> placed at bits 21-29
1392 	 * - D3: max value 9 -> fits in 4 bits -> placed at bits 18-21
1393 	 * - D1: max value 2 -> fits in 2 bits -> placed at bits 15-16
1394 	 * - m2div_int: max value 511 -> fits in 9 bits (10 bits allocated)
1395 	 *   -> placed at bits 5-14
1396 	 * PLL_reg3 = (D4 << 21) | (D3 << 18) | (D1 << 15) | (m2div_int << 5);
1397 	 */
1398 	pll_reg_5 = REGVAL(2);
1399 	pll_reg_3 = REGVAL(1);
1400 	pll_reg_57 = REGVAL(3);
1401 	m2div_frac = pll_reg_5;
1402 
1403 	/*
1404 	 * From forward algorithm we know
1405 	 * m2div = 2 * m2
1406 	 * val = y * frequency * 5
1407 	 * So now,
1408 	 * frequency = (m2 * 2 * refclk_khz / (d8 * 10))
1409 	 * frequency = (m2div * refclk_khz / (d8 * 10))
1410 	 */
1411 	d8 = (pll_reg_57 & REG_GENMASK(14, 7)) >> 7;
1412 	m2div_int = (pll_reg_3  & REG_GENMASK(14, 5)) >> 5;
1413 	temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32;
1414 	temp1 = (u64)m2div_int * REF_CLK_KHZ;
1415 	if (d8 == 0)
1416 		return 0;
1417 
1418 	clk = div_u64((temp1 + temp0), d8 * 10);
1419 
1420 	return clk;
1421 }
1422 
1423 int
1424 intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
1425 			     const struct intel_crtc_state *crtc_state)
1426 {
1427 	int clk;
1428 	const struct intel_lt_phy_pll_state *lt_state =
1429 		&crtc_state->dpll_hw_state.ltpll;
1430 	u8 mode, rate;
1431 
1432 	mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK,
1433 			      lt_state->config[0]);
1434 	/*
1435 	 * For edp/dp read the clock value from the tables
1436 	 * and return the clock as the algorithm used for
1437 	 * calculating the port clock does not exactly matches
1438 	 * with edp/dp clock.
1439 	 */
1440 	if (mode == MODE_DP) {
1441 		rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK,
1442 				      lt_state->config[0]);
1443 		clk = intel_lt_phy_get_dp_clock(rate);
1444 	} else {
1445 		clk = intel_lt_phy_calc_hdmi_port_clock(lt_state);
1446 	}
1447 
1448 	return clk;
1449 }
1450 
1451 int
1452 intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
1453 			    struct intel_encoder *encoder)
1454 {
1455 	const struct intel_lt_phy_pll_state * const *tables;
1456 	int i;
1457 
1458 	tables = intel_lt_phy_pll_tables_get(crtc_state, encoder);
1459 	if (!tables)
1460 		return -EINVAL;
1461 
1462 	for (i = 0; tables[i]; i++) {
1463 		if (crtc_state->port_clock == tables[i]->clock) {
1464 			crtc_state->dpll_hw_state.ltpll = *tables[i];
1465 			if (intel_crtc_has_dp_encoder(crtc_state)) {
1466 				if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1467 					crtc_state->dpll_hw_state.ltpll.config[2] = 1;
1468 			}
1469 			crtc_state->dpll_hw_state.ltpll.ssc_enabled =
1470 				intel_lt_phy_pll_is_ssc_enabled(crtc_state, encoder);
1471 			return 0;
1472 		}
1473 	}
1474 
1475 	/* TODO: Add a function to compute the data for HDMI TMDS*/
1476 
1477 	return -EINVAL;
1478 }
1479 
1480 static void
1481 intel_lt_phy_program_pll(struct intel_encoder *encoder,
1482 			 const struct intel_crtc_state *crtc_state)
1483 {
1484 	u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1485 	int i, j, k;
1486 
1487 	intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_0_CONFIG,
1488 			   crtc_state->dpll_hw_state.ltpll.config[0], MB_WRITE_COMMITTED);
1489 	intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG,
1490 			   crtc_state->dpll_hw_state.ltpll.config[1], MB_WRITE_COMMITTED);
1491 	intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_2_CONFIG,
1492 			   crtc_state->dpll_hw_state.ltpll.config[2], MB_WRITE_COMMITTED);
1493 
1494 	for (i = 0; i <= 12; i++) {
1495 		intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_MSB(i),
1496 				   crtc_state->dpll_hw_state.ltpll.addr_msb[i],
1497 				   MB_WRITE_COMMITTED);
1498 		intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_LSB(i),
1499 				   crtc_state->dpll_hw_state.ltpll.addr_lsb[i],
1500 				   MB_WRITE_COMMITTED);
1501 
1502 		for (j = 3, k = 0; j >= 0; j--, k++)
1503 			intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0,
1504 					   LT_PHY_VDR_X_DATAY(i, j),
1505 					   crtc_state->dpll_hw_state.ltpll.data[i][k],
1506 					   MB_WRITE_COMMITTED);
1507 	}
1508 }
1509 
1510 static void
1511 intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder,
1512 			       const struct intel_crtc_state *crtc_state)
1513 {
1514 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1515 	bool lane_reversal = dig_port->lane_reversal;
1516 	u8 lane_count = crtc_state->lane_count;
1517 	bool is_dp_alt =
1518 		intel_tc_port_in_dp_alt_mode(dig_port);
1519 	enum intel_tc_pin_assignment tc_pin =
1520 		intel_tc_port_get_pin_assignment(dig_port);
1521 	u8 transmitter_mask = 0;
1522 
1523 	/*
1524 	 * We have a two transmitters per lane and total of 2 PHY lanes so a total
1525 	 * of 4 transmitters. We prepare a mask of the lanes that need to be activated
1526 	 * and the transmitter which need to be activated for each lane. TX 0,1 correspond
1527 	 * to LANE0 and TX 2, 3 correspond to LANE1.
1528 	 */
1529 
1530 	switch (lane_count) {
1531 	case 1:
1532 		transmitter_mask = lane_reversal ? REG_BIT8(3) : REG_BIT8(0);
1533 		if (is_dp_alt) {
1534 			if (tc_pin == INTEL_TC_PIN_ASSIGNMENT_D)
1535 				transmitter_mask = REG_BIT8(0);
1536 			else
1537 				transmitter_mask = REG_BIT8(1);
1538 		}
1539 		break;
1540 	case 2:
1541 		transmitter_mask = lane_reversal ? REG_GENMASK8(3, 2) : REG_GENMASK8(1, 0);
1542 		if (is_dp_alt)
1543 			transmitter_mask = REG_GENMASK8(1, 0);
1544 		break;
1545 	case 3:
1546 		transmitter_mask = lane_reversal ? REG_GENMASK8(3, 1) : REG_GENMASK8(2, 0);
1547 		if (is_dp_alt)
1548 			transmitter_mask = REG_GENMASK8(2, 0);
1549 		break;
1550 	case 4:
1551 		transmitter_mask = REG_GENMASK8(3, 0);
1552 		break;
1553 	default:
1554 		MISSING_CASE(lane_count);
1555 		transmitter_mask = REG_GENMASK8(3, 0);
1556 		break;
1557 	}
1558 
1559 	if (transmitter_mask & BIT(0)) {
1560 		intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
1561 				       LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
1562 				       LT_PHY_TX_LANE_ENABLE);
1563 	} else {
1564 		intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
1565 				       0, LT_PHY_TXY_CTL10_MAC(0), 0);
1566 	}
1567 
1568 	if (transmitter_mask & BIT(1)) {
1569 		intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
1570 				       LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
1571 				       LT_PHY_TX_LANE_ENABLE);
1572 	} else {
1573 		intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
1574 				       0, LT_PHY_TXY_CTL10_MAC(1), 0);
1575 	}
1576 
1577 	if (transmitter_mask & BIT(2)) {
1578 		intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
1579 				       LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
1580 				       LT_PHY_TX_LANE_ENABLE);
1581 	} else {
1582 		intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
1583 				       0, LT_PHY_TXY_CTL10_MAC(0), 0);
1584 	}
1585 
1586 	if (transmitter_mask & BIT(3)) {
1587 		intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
1588 				       LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
1589 				       LT_PHY_TX_LANE_ENABLE);
1590 	} else {
1591 		intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
1592 				       0, LT_PHY_TXY_CTL10_MAC(1), 0);
1593 	}
1594 }
1595 
1596 void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
1597 			     const struct intel_crtc_state *crtc_state)
1598 {
1599 	struct intel_display *display = to_intel_display(encoder);
1600 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1601 	bool lane_reversal = dig_port->lane_reversal;
1602 	u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1603 	enum phy phy = intel_encoder_to_phy(encoder);
1604 	enum port port = encoder->port;
1605 	intel_wakeref_t wakeref = 0;
1606 	u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1607 					? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
1608 					   XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
1609 					: XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
1610 	u8 rate_update;
1611 
1612 	wakeref = intel_lt_phy_transaction_begin(encoder);
1613 
1614 	/* 1. Enable MacCLK at default 162 MHz frequency. */
1615 	intel_lt_phy_lane_reset(encoder, crtc_state->lane_count);
1616 
1617 	/* 2. Program PORT_CLOCK_CTL register to configure clock muxes, gating, and SSC. */
1618 	intel_lt_phy_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
1619 
1620 	/* 3. Change owned PHY lanes power to Ready state. */
1621 	intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
1622 					       XELPDP_P2_STATE_READY);
1623 
1624 	/*
1625 	 * 4. Read the PHY message bus VDR register PHY_VDR_0_Config check enabled PLL type,
1626 	 * encoded rate and encoded mode.
1627 	 */
1628 	if (intel_lt_phy_config_changed(encoder, crtc_state)) {
1629 		/*
1630 		 * 5. Program the PHY internal PLL registers over PHY message bus for the desired
1631 		 * frequency and protocol type
1632 		 */
1633 		intel_lt_phy_program_pll(encoder, crtc_state);
1634 
1635 		/* 6. Use the P2P transaction flow */
1636 		/*
1637 		 * 6.1. Set the PHY VDR register 0xCC4[Rate Control VDR Update] = 1 over PHY message
1638 		 * bus for Owned PHY Lanes.
1639 		 */
1640 		/*
1641 		 * 6.2. Poll for P2P Transaction Ready = "1" and read the MAC message bus VDR
1642 		 * register at offset 0xC00 for Owned PHY Lanes*.
1643 		 */
1644 		/* 6.3. Clear P2P transaction Ready bit. */
1645 		intel_lt_phy_p2p_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE,
1646 				       LT_PHY_RATE_CONTROL_VDR_UPDATE, LT_PHY_MAC_VDR,
1647 				       LT_PHY_PCLKIN_GATE);
1648 
1649 		/* 7. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
1650 		intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1651 			     XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
1652 
1653 		/* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
1654 		if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
1655 					 XELPDP_LANE_PCLK_PLL_ACK(0), 0,
1656 					 XE3PLPD_MACCLK_TURNOFF_LATENCY_US, 0, NULL))
1657 			drm_warn(display->drm, "PHY %c PLL MacCLK Ack deassertion Timeout after %dus.\n",
1658 				 phy_name(phy), XE3PLPD_MACCLK_TURNOFF_LATENCY_US);
1659 
1660 		/*
1661 		 * 9. Follow the Display Voltage Frequency Switching - Sequence Before Frequency
1662 		 * Change. We handle this step in bxt_set_cdclk().
1663 		 */
1664 		/* 10. Program DDI_CLK_VALFREQ to match intended DDI clock frequency. */
1665 		intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
1666 			       crtc_state->port_clock);
1667 
1668 		/* 11. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 1. */
1669 		intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1670 			     XELPDP_LANE_PCLK_PLL_REQUEST(0),
1671 			     XELPDP_LANE_PCLK_PLL_REQUEST(0));
1672 
1673 		/* 12. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 1. */
1674 		if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
1675 					 XELPDP_LANE_PCLK_PLL_ACK(0),
1676 					 XELPDP_LANE_PCLK_PLL_ACK(0),
1677 					 XE3PLPD_MACCLK_TURNON_LATENCY_US, 2, NULL))
1678 			drm_warn(display->drm, "PHY %c PLL MacCLK Ack assertion Timeout after %dus.\n",
1679 				 phy_name(phy), XE3PLPD_MACCLK_TURNON_LATENCY_US);
1680 
1681 		/*
1682 		 * 13. Ungate the forward clock by setting
1683 		 * PORT_CLOCK_CTL[Forward Clock Ungate] = 1.
1684 		 */
1685 		intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1686 			     XELPDP_FORWARD_CLOCK_UNGATE,
1687 			     XELPDP_FORWARD_CLOCK_UNGATE);
1688 
1689 		/* 14. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
1690 		intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
1691 			     lane_phy_pulse_status,
1692 			     lane_phy_pulse_status);
1693 		/*
1694 		 * 15. Clear the PHY VDR register 0xCC4[Rate Control VDR Update] over
1695 		 * PHY message bus for Owned PHY Lanes.
1696 		 */
1697 		rate_update = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_RATE_UPDATE);
1698 		rate_update &= ~LT_PHY_RATE_CONTROL_VDR_UPDATE;
1699 		intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE,
1700 				   rate_update, MB_WRITE_COMMITTED);
1701 
1702 		/* 16. Poll for PORT_BUF_CTL2 register PHY Pulse Status = 1 for Owned PHY Lanes. */
1703 		if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
1704 					 lane_phy_pulse_status, lane_phy_pulse_status,
1705 					 XE3PLPD_RATE_CALIB_DONE_LATENCY_US, 2, NULL))
1706 			drm_warn(display->drm, "PHY %c PLL rate not changed after %dus.\n",
1707 				 phy_name(phy), XE3PLPD_RATE_CALIB_DONE_LATENCY_US);
1708 
1709 		/* 17. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
1710 		intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
1711 			     lane_phy_pulse_status,
1712 			     lane_phy_pulse_status);
1713 	} else {
1714 		intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock);
1715 	}
1716 
1717 	/*
1718 	 * 18. Follow the Display Voltage Frequency Switching - Sequence After Frequency Change.
1719 	 * We handle this step in bxt_set_cdclk()
1720 	 */
1721 	/* 19. Move the PHY powerdown state to Active and program to enable/disable transmitters */
1722 	intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
1723 					       XELPDP_P0_STATE_ACTIVE);
1724 
1725 	intel_lt_phy_enable_disable_tx(encoder, crtc_state);
1726 	intel_lt_phy_transaction_end(encoder, wakeref);
1727 }
1728 
1729 void intel_lt_phy_pll_disable(struct intel_encoder *encoder)
1730 {
1731 	struct intel_display *display = to_intel_display(encoder);
1732 	enum phy phy = intel_encoder_to_phy(encoder);
1733 	enum port port = encoder->port;
1734 	intel_wakeref_t wakeref;
1735 	u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1736 	u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1737 				? (XELPDP_LANE_PIPE_RESET(0) |
1738 				   XELPDP_LANE_PIPE_RESET(1))
1739 				: XELPDP_LANE_PIPE_RESET(0);
1740 	u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1741 					? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
1742 					   XELPDP_LANE_PHY_CURRENT_STATUS(1))
1743 					: XELPDP_LANE_PHY_CURRENT_STATUS(0);
1744 	u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
1745 					? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
1746 					   XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
1747 					: XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
1748 
1749 	wakeref = intel_lt_phy_transaction_begin(encoder);
1750 
1751 	/* 1. Clear PORT_BUF_CTL2 [PHY Pulse Status]. */
1752 	intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
1753 		     lane_phy_pulse_status,
1754 		     lane_phy_pulse_status);
1755 
1756 	/* 2. Set PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> Pipe Reset to 1. */
1757 	intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset,
1758 		     lane_pipe_reset);
1759 
1760 	/* 3. Poll for PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> PHY Current Status == 1. */
1761 	if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
1762 				 lane_phy_current_status,
1763 				 lane_phy_current_status,
1764 				 XE3PLPD_RESET_START_LATENCY_US, 0, NULL))
1765 		drm_warn(display->drm,
1766 			 "PHY %c failed to reset Lane after %dms.\n",
1767 			 phy_name(phy), XE3PLPD_RESET_START_LATENCY_US);
1768 
1769 	/* 4. Clear for PHY pulse status on owned PHY lanes. */
1770 	intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
1771 		     lane_phy_pulse_status,
1772 		     lane_phy_pulse_status);
1773 
1774 	/*
1775 	 * 5. Follow the Display Voltage Frequency Switching -
1776 	 * Sequence Before Frequency Change. We handle this step in bxt_set_cdclk().
1777 	 */
1778 	/* 6. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
1779 	intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1780 		     XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
1781 
1782 	/* 7. Program DDI_CLK_VALFREQ to 0. */
1783 	intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0);
1784 
1785 	/* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
1786 	if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
1787 				 XELPDP_LANE_PCLK_PLL_ACK(0), 0,
1788 				 XE3PLPD_MACCLK_TURNOFF_LATENCY_US, 0, NULL))
1789 		drm_warn(display->drm, "PHY %c PLL MacCLK Ack deassertion Timeout after %dus.\n",
1790 			 phy_name(phy), XE3PLPD_MACCLK_TURNOFF_LATENCY_US);
1791 
1792 	/*
1793 	 *  9. Follow the Display Voltage Frequency Switching -
1794 	 *  Sequence After Frequency Change. We handle this step in bxt_set_cdclk().
1795 	 */
1796 	/* 10. Program PORT_CLOCK_CTL register to disable and gate clocks. */
1797 	intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
1798 		     XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_FORWARD_CLOCK_UNGATE, 0);
1799 
1800 	/* 11. Program PORT_BUF_CTL5[MacCLK Reset_0] = 1 to assert MacCLK reset. */
1801 	intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
1802 		     XE3PLPD_MACCLK_RESET_0, XE3PLPD_MACCLK_RESET_0);
1803 
1804 	intel_lt_phy_transaction_end(encoder, wakeref);
1805 }
1806 
1807 void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
1808 				    const struct intel_crtc_state *crtc_state)
1809 {
1810 	struct intel_display *display = to_intel_display(encoder);
1811 	const struct intel_ddi_buf_trans *trans;
1812 	u8 owned_lane_mask;
1813 	intel_wakeref_t wakeref;
1814 	int n_entries, ln;
1815 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1816 
1817 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
1818 		return;
1819 
1820 	owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1821 
1822 	wakeref = intel_lt_phy_transaction_begin(encoder);
1823 
1824 	trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
1825 	if (drm_WARN_ON_ONCE(display->drm, !trans)) {
1826 		intel_lt_phy_transaction_end(encoder, wakeref);
1827 		return;
1828 	}
1829 
1830 	for (ln = 0; ln < crtc_state->lane_count; ln++) {
1831 		int level = intel_ddi_level(encoder, crtc_state, ln);
1832 		int lane = ln / 2;
1833 		int tx = ln % 2;
1834 		u8 lane_mask = lane == 0 ? INTEL_LT_PHY_LANE0 : INTEL_LT_PHY_LANE1;
1835 
1836 		if (!(lane_mask & owned_lane_mask))
1837 			continue;
1838 
1839 		intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL8(tx),
1840 				 LT_PHY_TX_SWING_LEVEL_MASK | LT_PHY_TX_SWING_MASK,
1841 				 LT_PHY_TX_SWING_LEVEL(trans->entries[level].lt.txswing_level) |
1842 				 LT_PHY_TX_SWING(trans->entries[level].lt.txswing),
1843 				 MB_WRITE_COMMITTED);
1844 
1845 		intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL2(tx),
1846 				 LT_PHY_TX_CURSOR_MASK,
1847 				 LT_PHY_TX_CURSOR(trans->entries[level].lt.pre_cursor),
1848 				 MB_WRITE_COMMITTED);
1849 		intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL3(tx),
1850 				 LT_PHY_TX_CURSOR_MASK,
1851 				 LT_PHY_TX_CURSOR(trans->entries[level].lt.main_cursor),
1852 				 MB_WRITE_COMMITTED);
1853 		intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL4(tx),
1854 				 LT_PHY_TX_CURSOR_MASK,
1855 				 LT_PHY_TX_CURSOR(trans->entries[level].lt.post_cursor),
1856 				 MB_WRITE_COMMITTED);
1857 	}
1858 
1859 	intel_lt_phy_transaction_end(encoder, wakeref);
1860 }
1861 
1862 void intel_lt_phy_dump_hw_state(struct intel_display *display,
1863 				const struct intel_lt_phy_pll_state *hw_state)
1864 {
1865 	int i, j;
1866 
1867 	drm_dbg_kms(display->drm, "lt_phy_pll_hw_state:\n");
1868 	for (i = 0; i < 3; i++) {
1869 		drm_dbg_kms(display->drm, "config[%d] = 0x%.4x,\n",
1870 			    i, hw_state->config[i]);
1871 	}
1872 
1873 	for (i = 0; i <= 12; i++)
1874 		for (j = 3; j >= 0; j--)
1875 			drm_dbg_kms(display->drm, "vdr_data[%d][%d] = 0x%.4x,\n",
1876 				    i, j, hw_state->data[i][j]);
1877 }
1878 
1879 bool
1880 intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
1881 				  const struct intel_lt_phy_pll_state *b)
1882 {
1883 	if (memcmp(&a->config, &b->config, sizeof(a->config)) != 0)
1884 		return false;
1885 
1886 	if (memcmp(&a->data, &b->data, sizeof(a->data)) != 0)
1887 		return false;
1888 
1889 	return true;
1890 }
1891 
1892 void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
1893 				       const struct intel_crtc_state *crtc_state,
1894 				       struct intel_lt_phy_pll_state *pll_state)
1895 {
1896 	u8 owned_lane_mask;
1897 	u8 lane;
1898 	intel_wakeref_t wakeref;
1899 	int i, j, k;
1900 
1901 	pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder));
1902 	if (pll_state->tbt_mode)
1903 		return;
1904 
1905 	owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
1906 	lane = owned_lane_mask & INTEL_LT_PHY_LANE0 ? : INTEL_LT_PHY_LANE1;
1907 	wakeref = intel_lt_phy_transaction_begin(encoder);
1908 
1909 	pll_state->config[0] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_0_CONFIG);
1910 	pll_state->config[1] = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG);
1911 	pll_state->config[2] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_2_CONFIG);
1912 
1913 	for (i = 0; i <= 12; i++) {
1914 		for (j = 3, k = 0; j >= 0; j--, k++)
1915 			pll_state->data[i][k] =
1916 				intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
1917 						  LT_PHY_VDR_X_DATAY(i, j));
1918 	}
1919 
1920 	pll_state->clock =
1921 		intel_lt_phy_calc_port_clock(encoder, crtc_state);
1922 	intel_lt_phy_transaction_end(encoder, wakeref);
1923 }
1924 
1925 void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
1926 				   struct intel_crtc *crtc)
1927 {
1928 	struct intel_display *display = to_intel_display(state);
1929 	struct intel_digital_port *dig_port;
1930 	const struct intel_crtc_state *new_crtc_state =
1931 		intel_atomic_get_new_crtc_state(state, crtc);
1932 	struct intel_encoder *encoder;
1933 	struct intel_lt_phy_pll_state pll_hw_state = {};
1934 	const struct intel_lt_phy_pll_state *pll_sw_state = &new_crtc_state->dpll_hw_state.ltpll;
1935 	int clock;
1936 	int i, j;
1937 
1938 	if (DISPLAY_VER(display) < 35)
1939 		return;
1940 
1941 	if (!new_crtc_state->hw.active)
1942 		return;
1943 
1944 	/* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
1945 	if (!intel_crtc_needs_modeset(new_crtc_state) &&
1946 	    !intel_crtc_needs_fastset(new_crtc_state))
1947 		return;
1948 
1949 	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
1950 	intel_lt_phy_pll_readout_hw_state(encoder, new_crtc_state, &pll_hw_state);
1951 	clock = intel_lt_phy_calc_port_clock(encoder, new_crtc_state);
1952 
1953 	dig_port = enc_to_dig_port(encoder);
1954 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
1955 		return;
1956 
1957 	INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.clock != clock,
1958 				 "[CRTC:%d:%s] mismatch in LT PHY: Register CLOCK (expected %d, found %d)",
1959 				 crtc->base.base.id, crtc->base.name,
1960 				 pll_sw_state->clock, pll_hw_state.clock);
1961 
1962 	for (i = 0; i < 3; i++) {
1963 		INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[i] != pll_sw_state->config[i],
1964 					 "[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG%d: (expected 0x%04x, found 0x%04x)",
1965 					 crtc->base.base.id, crtc->base.name, i,
1966 					 pll_sw_state->config[i], pll_hw_state.config[i]);
1967 	}
1968 
1969 	for (i = 0; i <= 12; i++) {
1970 		for (j = 3; j >= 0; j--)
1971 			INTEL_DISPLAY_STATE_WARN(display,
1972 						 pll_hw_state.data[i][j] !=
1973 						 pll_sw_state->data[i][j],
1974 						 "[CRTC:%d:%s] mismatch in LT PHY PLL DATA[%d][%d]: (expected 0x%04x, found 0x%04x)",
1975 						 crtc->base.base.id, crtc->base.name, i, j,
1976 						 pll_sw_state->data[i][j], pll_hw_state.data[i][j]);
1977 	}
1978 }
1979 
1980 void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
1981 			      const struct intel_crtc_state *crtc_state)
1982 {
1983 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1984 
1985 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
1986 		intel_mtl_tbt_pll_enable(encoder, crtc_state);
1987 	else
1988 		intel_lt_phy_pll_enable(encoder, crtc_state);
1989 }
1990 
1991 void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
1992 {
1993 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1994 
1995 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
1996 		intel_mtl_tbt_pll_disable(encoder);
1997 	else
1998 		intel_lt_phy_pll_disable(encoder);
1999 
2000 }
2001