1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/clk-provider.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_clock.h>
9 #include <linux/pm_runtime.h>
10 #include <dt-bindings/phy/phy.h>
11
12 #include "dsi_phy.h"
13
14 #define S_DIV_ROUND_UP(n, d) \
15 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
16
linear_inter(s32 tmax,s32 tmin,s32 percent,s32 min_result,bool even)17 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
18 s32 min_result, bool even)
19 {
20 s32 v;
21
22 v = (tmax - tmin) * percent;
23 v = S_DIV_ROUND_UP(v, 100) + tmin;
24 if (even && (v & 0x1))
25 return max_t(s32, min_result, v - 1);
26 else
27 return max_t(s32, min_result, v);
28 }
29
dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing * timing,s32 ui,s32 coeff,s32 pcnt)30 static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
31 s32 ui, s32 coeff, s32 pcnt)
32 {
33 s32 tmax, tmin, clk_z;
34 s32 temp;
35
36 /* reset */
37 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
38 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
39 if (tmin > 255) {
40 tmax = 511;
41 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
42 } else {
43 tmax = 255;
44 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
45 }
46
47 /* adjust */
48 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
49 timing->clk_zero = clk_z + 8 - temp;
50 }
51
msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing * timing,struct msm_dsi_phy_clk_request * clk_req)52 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
53 struct msm_dsi_phy_clk_request *clk_req)
54 {
55 const unsigned long bit_rate = clk_req->bitclk_rate;
56 const unsigned long esc_rate = clk_req->escclk_rate;
57 s32 ui, lpx;
58 s32 tmax, tmin;
59 s32 pcnt0 = 10;
60 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
61 s32 pcnt2 = 10;
62 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
63 s32 coeff = 1000; /* Precision, should avoid overflow */
64 s32 temp;
65
66 if (!bit_rate || !esc_rate)
67 return -EINVAL;
68
69 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
70 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
71
72 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
73 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
74 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
75
76 temp = lpx / ui;
77 if (temp & 0x1)
78 timing->hs_rqst = temp;
79 else
80 timing->hs_rqst = max_t(s32, 0, temp - 2);
81
82 /* Calculate clk_zero after clk_prepare and hs_rqst */
83 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
84
85 temp = 105 * coeff + 12 * ui - 20 * coeff;
86 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
87 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
88 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
89
90 temp = 85 * coeff + 6 * ui;
91 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
92 temp = 40 * coeff + 4 * ui;
93 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
94 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
95
96 tmax = 255;
97 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
98 temp = 145 * coeff + 10 * ui - temp;
99 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
100 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
101
102 temp = 105 * coeff + 12 * ui - 20 * coeff;
103 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
104 temp = 60 * coeff + 4 * ui;
105 tmin = DIV_ROUND_UP(temp, ui) - 2;
106 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
107
108 tmax = 255;
109 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
110 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
111
112 tmax = 63;
113 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
114 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
115 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
116 timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
117 false);
118 tmax = 63;
119 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
120 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
121 temp += 8 * ui + lpx;
122 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
123 if (tmin > tmax) {
124 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
125 timing->shared_timings.clk_pre = temp >> 1;
126 timing->shared_timings.clk_pre_inc_by_2 = true;
127 } else {
128 timing->shared_timings.clk_pre =
129 linear_inter(tmax, tmin, pcnt2, 0, false);
130 timing->shared_timings.clk_pre_inc_by_2 = false;
131 }
132
133 timing->ta_go = 3;
134 timing->ta_sure = 0;
135 timing->ta_get = 4;
136
137 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
138 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
139 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
140 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
141 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
142 timing->hs_rqst);
143
144 return 0;
145 }
146
msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing * timing,struct msm_dsi_phy_clk_request * clk_req)147 int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
148 struct msm_dsi_phy_clk_request *clk_req)
149 {
150 const unsigned long bit_rate = clk_req->bitclk_rate;
151 const unsigned long esc_rate = clk_req->escclk_rate;
152 s32 ui, ui_x8;
153 s32 tmax, tmin;
154 s32 pcnt0 = 50;
155 s32 pcnt1 = 50;
156 s32 pcnt2 = 10;
157 s32 pcnt3 = 30;
158 s32 pcnt4 = 10;
159 s32 pcnt5 = 2;
160 s32 coeff = 1000; /* Precision, should avoid overflow */
161 s32 hb_en, hb_en_ckln, pd_ckln, pd;
162 s32 val, val_ckln;
163 s32 temp;
164
165 if (!bit_rate || !esc_rate)
166 return -EINVAL;
167
168 timing->hs_halfbyte_en = 0;
169 hb_en = 0;
170 timing->hs_halfbyte_en_ckln = 0;
171 hb_en_ckln = 0;
172 timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
173 pd_ckln = timing->hs_prep_dly_ckln;
174 timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
175 pd = timing->hs_prep_dly;
176
177 val = (hb_en << 2) + (pd << 1);
178 val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
179
180 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
181 ui_x8 = ui << 3;
182
183 temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
184 tmin = max_t(s32, temp, 0);
185 temp = (95 * coeff - val_ckln * ui) / ui_x8;
186 tmax = max_t(s32, temp, 0);
187 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
188
189 temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
190 tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
191 tmax = (tmin > 255) ? 511 : 255;
192 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
193
194 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
195 temp = 105 * coeff + 12 * ui - 20 * coeff;
196 tmax = (temp + 3 * ui) / ui_x8;
197 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
198
199 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
200 tmin = max_t(s32, temp, 0);
201 temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
202 tmax = max_t(s32, temp, 0);
203 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
204
205 temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
206 tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
207 tmax = 255;
208 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
209
210 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
211 temp = 105 * coeff + 12 * ui - 20 * coeff;
212 tmax = (temp + 3 * ui) / ui_x8;
213 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
214
215 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
216 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
217
218 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
219 tmax = 255;
220 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
221
222 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
223 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
224
225 temp = 60 * coeff + 52 * ui - 43 * ui;
226 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
227 tmax = 63;
228 timing->shared_timings.clk_post =
229 linear_inter(tmax, tmin, pcnt2, 0, false);
230
231 temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
232 temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
233 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
234 (((timing->hs_rqst_ckln << 3) + 8) * ui);
235 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
236 tmax = 63;
237 if (tmin > tmax) {
238 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
239 timing->shared_timings.clk_pre = temp >> 1;
240 timing->shared_timings.clk_pre_inc_by_2 = 1;
241 } else {
242 timing->shared_timings.clk_pre =
243 linear_inter(tmax, tmin, pcnt2, 0, false);
244 timing->shared_timings.clk_pre_inc_by_2 = 0;
245 }
246
247 timing->ta_go = 3;
248 timing->ta_sure = 0;
249 timing->ta_get = 4;
250
251 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
252 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
253 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
254 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
255 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
256 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
257 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
258 timing->hs_prep_dly_ckln);
259
260 return 0;
261 }
262
msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing * timing,struct msm_dsi_phy_clk_request * clk_req)263 int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
264 struct msm_dsi_phy_clk_request *clk_req)
265 {
266 const unsigned long bit_rate = clk_req->bitclk_rate;
267 const unsigned long esc_rate = clk_req->escclk_rate;
268 s32 ui, ui_x8;
269 s32 tmax, tmin;
270 s32 pcnt0 = 50;
271 s32 pcnt1 = 50;
272 s32 pcnt2 = 10;
273 s32 pcnt3 = 30;
274 s32 pcnt4 = 10;
275 s32 pcnt5 = 2;
276 s32 coeff = 1000; /* Precision, should avoid overflow */
277 s32 hb_en, hb_en_ckln;
278 s32 temp;
279
280 if (!bit_rate || !esc_rate)
281 return -EINVAL;
282
283 timing->hs_halfbyte_en = 0;
284 hb_en = 0;
285 timing->hs_halfbyte_en_ckln = 0;
286 hb_en_ckln = 0;
287
288 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
289 ui_x8 = ui << 3;
290
291 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
292 tmin = max_t(s32, temp, 0);
293 temp = (95 * coeff) / ui_x8;
294 tmax = max_t(s32, temp, 0);
295 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
296
297 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
298 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
299 tmax = (tmin > 255) ? 511 : 255;
300 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
301
302 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
303 temp = 105 * coeff + 12 * ui - 20 * coeff;
304 tmax = (temp + 3 * ui) / ui_x8;
305 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
306
307 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
308 tmin = max_t(s32, temp, 0);
309 temp = (85 * coeff + 6 * ui) / ui_x8;
310 tmax = max_t(s32, temp, 0);
311 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
312
313 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
314 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
315 tmax = 255;
316 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
317
318 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
319 temp = 105 * coeff + 12 * ui - 20 * coeff;
320 tmax = (temp / ui_x8) - 1;
321 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
322
323 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
324 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
325
326 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
327 tmax = 255;
328 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
329
330 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
331 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
332
333 temp = 60 * coeff + 52 * ui - 43 * ui;
334 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
335 tmax = 63;
336 timing->shared_timings.clk_post =
337 linear_inter(tmax, tmin, pcnt2, 0, false);
338
339 temp = 8 * ui + (timing->clk_prepare << 3) * ui;
340 temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
341 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
342 (((timing->hs_rqst_ckln << 3) + 8) * ui);
343 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
344 tmax = 63;
345 if (tmin > tmax) {
346 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
347 timing->shared_timings.clk_pre = temp >> 1;
348 timing->shared_timings.clk_pre_inc_by_2 = 1;
349 } else {
350 timing->shared_timings.clk_pre =
351 linear_inter(tmax, tmin, pcnt2, 0, false);
352 timing->shared_timings.clk_pre_inc_by_2 = 0;
353 }
354
355 timing->shared_timings.byte_intf_clk_div_2 = true;
356
357 timing->ta_go = 3;
358 timing->ta_sure = 0;
359 timing->ta_get = 4;
360
361 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
362 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
363 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
364 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
365 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
366 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
367 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
368 timing->hs_prep_dly_ckln);
369
370 return 0;
371 }
372
msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing * timing,struct msm_dsi_phy_clk_request * clk_req)373 int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
374 struct msm_dsi_phy_clk_request *clk_req)
375 {
376 const unsigned long bit_rate = clk_req->bitclk_rate;
377 const unsigned long esc_rate = clk_req->escclk_rate;
378 s32 ui, ui_x8;
379 s32 tmax, tmin;
380 s32 pcnt_clk_prep = 50;
381 s32 pcnt_clk_zero = 2;
382 s32 pcnt_clk_trail = 30;
383 s32 pcnt_hs_prep = 50;
384 s32 pcnt_hs_zero = 10;
385 s32 pcnt_hs_trail = 30;
386 s32 pcnt_hs_exit = 10;
387 s32 coeff = 1000; /* Precision, should avoid overflow */
388 s32 hb_en;
389 s32 temp;
390
391 if (!bit_rate || !esc_rate)
392 return -EINVAL;
393
394 hb_en = 0;
395
396 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
397 ui_x8 = ui << 3;
398
399 /* TODO: verify these calculations against latest downstream driver
400 * everything except clk_post/clk_pre uses calculations from v3 based
401 * on the downstream driver having the same calculations for v3 and v4
402 */
403
404 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
405 tmin = max_t(s32, temp, 0);
406 temp = (95 * coeff) / ui_x8;
407 tmax = max_t(s32, temp, 0);
408 timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
409
410 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
411 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
412 tmax = (tmin > 255) ? 511 : 255;
413 timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
414
415 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
416 temp = 105 * coeff + 12 * ui - 20 * coeff;
417 tmax = (temp + 3 * ui) / ui_x8;
418 timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
419
420 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
421 tmin = max_t(s32, temp, 0);
422 temp = (85 * coeff + 6 * ui) / ui_x8;
423 tmax = max_t(s32, temp, 0);
424 timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
425
426 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
427 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
428 tmax = 255;
429 timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
430
431 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
432 temp = 105 * coeff + 12 * ui - 20 * coeff;
433 tmax = (temp / ui_x8) - 1;
434 timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
435
436 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
437 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
438
439 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
440 tmax = 255;
441 timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
442
443 /* recommended min
444 * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
445 */
446 temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
447 tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
448 tmax = 255;
449 timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
450
451 /* recommended min
452 * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
453 * val2 = (16 * bit_clk_ns)
454 * final = roundup(val1/val2, 0) - 1
455 */
456 temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
457 tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
458 tmax = 255;
459 timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
460
461 timing->shared_timings.byte_intf_clk_div_2 = true;
462
463 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
464 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
465 timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
466 timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
467
468 return 0;
469 }
470
msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing * timing,struct msm_dsi_phy_clk_request * clk_req)471 int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
472 struct msm_dsi_phy_clk_request *clk_req)
473 {
474 const unsigned long bit_rate = clk_req->bitclk_rate;
475 const unsigned long esc_rate = clk_req->escclk_rate;
476 s32 ui, ui_x7;
477 s32 tmax, tmin;
478 s32 coeff = 1000; /* Precision, should avoid overflow */
479 s32 temp;
480
481 if (!bit_rate || !esc_rate)
482 return -EINVAL;
483
484 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
485 ui_x7 = ui * 7;
486
487 temp = S_DIV_ROUND_UP(38 * coeff, ui_x7);
488 tmin = max_t(s32, temp, 0);
489 temp = (95 * coeff) / ui_x7;
490 tmax = max_t(s32, temp, 0);
491 timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false);
492
493 tmin = DIV_ROUND_UP(50 * coeff, ui_x7);
494 tmax = 255;
495 timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false);
496
497 tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1;
498 tmax = 255;
499 timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false);
500
501 tmin = 1;
502 tmax = 32;
503 timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false);
504
505 tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1);
506 tmax = 64;
507 timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false);
508
509 DBG("%d, %d, %d, %d, %d",
510 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
511 timing->clk_prepare, timing->hs_exit, timing->hs_rqst);
512
513 return 0;
514 }
515
516 static const struct of_device_id dsi_phy_dt_match[] = {
517 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
518 { .compatible = "qcom,dsi-phy-28nm-hpm",
519 .data = &dsi_phy_28nm_hpm_cfgs },
520 { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
521 .data = &dsi_phy_28nm_hpm_famb_cfgs },
522 { .compatible = "qcom,dsi-phy-28nm-lp",
523 .data = &dsi_phy_28nm_lp_cfgs },
524 { .compatible = "qcom,dsi-phy-28nm-8226",
525 .data = &dsi_phy_28nm_8226_cfgs },
526 { .compatible = "qcom,dsi-phy-28nm-8937",
527 .data = &dsi_phy_28nm_8937_cfgs },
528 #endif
529 #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
530 { .compatible = "qcom,dsi-phy-20nm",
531 .data = &dsi_phy_20nm_cfgs },
532 #endif
533 #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
534 { .compatible = "qcom,dsi-phy-28nm-8960",
535 .data = &dsi_phy_28nm_8960_cfgs },
536 #endif
537 #ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
538 { .compatible = "qcom,dsi-phy-14nm",
539 .data = &dsi_phy_14nm_cfgs },
540 { .compatible = "qcom,dsi-phy-14nm-2290",
541 .data = &dsi_phy_14nm_2290_cfgs },
542 { .compatible = "qcom,dsi-phy-14nm-660",
543 .data = &dsi_phy_14nm_660_cfgs },
544 { .compatible = "qcom,dsi-phy-14nm-8953",
545 .data = &dsi_phy_14nm_8953_cfgs },
546 { .compatible = "qcom,sm6125-dsi-phy-14nm",
547 .data = &dsi_phy_14nm_2290_cfgs },
548 { .compatible = "qcom,sm6150-dsi-phy-14nm",
549 .data = &dsi_phy_14nm_6150_cfgs },
550 #endif
551 #ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
552 { .compatible = "qcom,dsi-phy-10nm",
553 .data = &dsi_phy_10nm_cfgs },
554 { .compatible = "qcom,dsi-phy-10nm-8998",
555 .data = &dsi_phy_10nm_8998_cfgs },
556 #endif
557 #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
558 { .compatible = "qcom,dsi-phy-7nm",
559 .data = &dsi_phy_7nm_cfgs },
560 { .compatible = "qcom,dsi-phy-7nm-8150",
561 .data = &dsi_phy_7nm_8150_cfgs },
562 { .compatible = "qcom,sa8775p-dsi-phy-5nm",
563 .data = &dsi_phy_5nm_8775p_cfgs },
564 { .compatible = "qcom,sar2130p-dsi-phy-5nm",
565 .data = &dsi_phy_5nm_sar2130p_cfgs },
566 { .compatible = "qcom,sc7280-dsi-phy-7nm",
567 .data = &dsi_phy_7nm_7280_cfgs },
568 { .compatible = "qcom,sm6375-dsi-phy-7nm",
569 .data = &dsi_phy_7nm_6375_cfgs },
570 { .compatible = "qcom,sm8350-dsi-phy-5nm",
571 .data = &dsi_phy_5nm_8350_cfgs },
572 { .compatible = "qcom,sm8450-dsi-phy-5nm",
573 .data = &dsi_phy_5nm_8450_cfgs },
574 { .compatible = "qcom,sm8550-dsi-phy-4nm",
575 .data = &dsi_phy_4nm_8550_cfgs },
576 { .compatible = "qcom,sm8650-dsi-phy-4nm",
577 .data = &dsi_phy_4nm_8650_cfgs },
578 { .compatible = "qcom,sm8750-dsi-phy-3nm",
579 .data = &dsi_phy_3nm_8750_cfgs },
580 #endif
581 {}
582 };
583
584 /*
585 * Currently, we only support one SoC for each PHY type. When we have multiple
586 * SoCs for the same PHY, we can try to make the index searching a bit more
587 * clever.
588 */
dsi_phy_get_id(struct msm_dsi_phy * phy)589 static int dsi_phy_get_id(struct msm_dsi_phy *phy)
590 {
591 struct platform_device *pdev = phy->pdev;
592 const struct msm_dsi_phy_cfg *cfg = phy->cfg;
593 struct resource *res;
594 int i;
595
596 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
597 if (!res)
598 return -EINVAL;
599
600 for (i = 0; i < cfg->num_dsi_phy; i++) {
601 if (cfg->io_start[i] == res->start)
602 return i;
603 }
604
605 return -EINVAL;
606 }
607
dsi_phy_driver_probe(struct platform_device * pdev)608 static int dsi_phy_driver_probe(struct platform_device *pdev)
609 {
610 struct msm_dsi_phy *phy;
611 struct device *dev = &pdev->dev;
612 u32 phy_type;
613 int ret;
614
615 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
616 if (!phy)
617 return -ENOMEM;
618
619 phy->provided_clocks = devm_kzalloc(dev,
620 struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
621 GFP_KERNEL);
622 if (!phy->provided_clocks)
623 return -ENOMEM;
624
625 phy->provided_clocks->num = NUM_PROVIDED_CLKS;
626
627 phy->cfg = of_device_get_match_data(&pdev->dev);
628 if (!phy->cfg)
629 return -ENODEV;
630
631 phy->pdev = pdev;
632
633 phy->id = dsi_phy_get_id(phy);
634 if (phy->id < 0)
635 return dev_err_probe(dev, phy->id,
636 "Couldn't identify PHY index\n");
637
638 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
639 "qcom,dsi-phy-regulator-ldo-mode");
640 if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
641 phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
642
643 phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
644 if (IS_ERR(phy->base))
645 return dev_err_probe(dev, PTR_ERR(phy->base),
646 "Failed to map phy base\n");
647
648 phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
649 if (IS_ERR(phy->pll_base))
650 return dev_err_probe(dev, PTR_ERR(phy->pll_base),
651 "Failed to map pll base\n");
652
653 if (phy->cfg->has_phy_lane) {
654 phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
655 if (IS_ERR(phy->lane_base))
656 return dev_err_probe(dev, PTR_ERR(phy->lane_base),
657 "Failed to map phy lane base\n");
658 }
659
660 if (phy->cfg->has_phy_regulator) {
661 phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
662 if (IS_ERR(phy->reg_base))
663 return dev_err_probe(dev, PTR_ERR(phy->reg_base),
664 "Failed to map phy regulator base\n");
665 }
666
667 if (phy->cfg->ops.parse_dt_properties) {
668 ret = phy->cfg->ops.parse_dt_properties(phy);
669 if (ret)
670 return ret;
671 }
672
673 ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators,
674 phy->cfg->regulator_data,
675 &phy->supplies);
676 if (ret)
677 return ret;
678
679 platform_set_drvdata(pdev, phy);
680
681 ret = devm_pm_runtime_enable(dev);
682 if (ret)
683 return ret;
684
685 ret = devm_pm_clk_create(dev);
686 if (ret)
687 return ret;
688
689 ret = pm_clk_add(dev, "iface");
690 if (ret < 0)
691 return dev_err_probe(dev, ret, "Unable to get iface clk\n");
692
693 if (phy->cfg->ops.pll_init) {
694 ret = phy->cfg->ops.pll_init(phy);
695 if (ret)
696 return dev_err_probe(dev, ret,
697 "PLL init failed; need separate clk driver\n");
698 }
699
700 ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
701 phy->provided_clocks);
702 if (ret)
703 return dev_err_probe(dev, ret,
704 "Failed to register clk provider\n");
705
706 return 0;
707 }
708
709 static const struct dev_pm_ops dsi_phy_pm_ops = {
710 SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
711 };
712
713 static struct platform_driver dsi_phy_platform_driver = {
714 .probe = dsi_phy_driver_probe,
715 .driver = {
716 .name = "msm_dsi_phy",
717 .of_match_table = dsi_phy_dt_match,
718 .pm = &dsi_phy_pm_ops,
719 },
720 };
721
msm_dsi_phy_driver_register(void)722 void __init msm_dsi_phy_driver_register(void)
723 {
724 platform_driver_register(&dsi_phy_platform_driver);
725 }
726
msm_dsi_phy_driver_unregister(void)727 void __exit msm_dsi_phy_driver_unregister(void)
728 {
729 platform_driver_unregister(&dsi_phy_platform_driver);
730 }
731
msm_dsi_phy_enable(struct msm_dsi_phy * phy,struct msm_dsi_phy_clk_request * clk_req,struct msm_dsi_phy_shared_timings * shared_timings)732 int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
733 struct msm_dsi_phy_clk_request *clk_req,
734 struct msm_dsi_phy_shared_timings *shared_timings)
735 {
736 struct device *dev;
737 int ret;
738
739 if (!phy || !phy->cfg->ops.enable)
740 return -EINVAL;
741
742 dev = &phy->pdev->dev;
743
744 ret = pm_runtime_resume_and_get(dev);
745 if (ret) {
746 DRM_DEV_ERROR(dev, "%s: resume failed, %d\n",
747 __func__, ret);
748 goto res_en_fail;
749 }
750
751 ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies);
752 if (ret) {
753 DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
754 __func__, ret);
755 goto reg_en_fail;
756 }
757
758 ret = phy->cfg->ops.enable(phy, clk_req);
759 if (ret) {
760 DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
761 goto phy_en_fail;
762 }
763
764 memcpy(shared_timings, &phy->timing.shared_timings,
765 sizeof(*shared_timings));
766
767 /*
768 * Resetting DSI PHY silently changes its PLL registers to reset status,
769 * which will confuse clock driver and result in wrong output rate of
770 * link clocks. Restore PLL status if its PLL is being used as clock
771 * source.
772 */
773 if (phy->usecase != MSM_DSI_PHY_SLAVE) {
774 ret = msm_dsi_phy_pll_restore_state(phy);
775 if (ret) {
776 DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
777 __func__, ret);
778 goto pll_restor_fail;
779 }
780 }
781
782 return 0;
783
784 pll_restor_fail:
785 if (phy->cfg->ops.disable)
786 phy->cfg->ops.disable(phy);
787 phy_en_fail:
788 regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
789 reg_en_fail:
790 pm_runtime_put(dev);
791 res_en_fail:
792 return ret;
793 }
794
msm_dsi_phy_disable(struct msm_dsi_phy * phy)795 void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
796 {
797 if (!phy || !phy->cfg->ops.disable)
798 return;
799
800 phy->cfg->ops.disable(phy);
801
802 regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
803 pm_runtime_put(&phy->pdev->dev);
804 }
805
msm_dsi_phy_set_usecase(struct msm_dsi_phy * phy,enum msm_dsi_phy_usecase uc)806 void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
807 enum msm_dsi_phy_usecase uc)
808 {
809 if (phy)
810 phy->usecase = uc;
811 }
812
813 /* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */
msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy * phy,bool enable)814 bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
815 {
816 if (!phy || !phy->cfg->ops.set_continuous_clock)
817 return false;
818
819 return phy->cfg->ops.set_continuous_clock(phy, enable);
820 }
821
msm_dsi_phy_pll_save_state(struct msm_dsi_phy * phy)822 void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
823 {
824 if (phy->cfg->ops.save_pll_state) {
825 phy->cfg->ops.save_pll_state(phy);
826 phy->state_saved = true;
827 }
828 }
829
msm_dsi_phy_pll_restore_state(struct msm_dsi_phy * phy)830 int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
831 {
832 int ret;
833
834 if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
835 ret = phy->cfg->ops.restore_pll_state(phy);
836 if (ret)
837 return ret;
838
839 phy->state_saved = false;
840 }
841
842 return 0;
843 }
844
msm_dsi_phy_snapshot(struct msm_disp_state * disp_state,struct msm_dsi_phy * phy)845 void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy)
846 {
847 msm_disp_snapshot_add_block(disp_state,
848 phy->base_size, phy->base,
849 "dsi%d_phy", phy->id);
850
851 /* Do not try accessing PLL registers if it is switched off */
852 if (phy->pll_on)
853 msm_disp_snapshot_add_block(disp_state,
854 phy->pll_size, phy->pll_base,
855 "dsi%d_pll", phy->id);
856
857 if (phy->lane_base)
858 msm_disp_snapshot_add_block(disp_state,
859 phy->lane_size, phy->lane_base,
860 "dsi%d_lane", phy->id);
861
862 if (phy->reg_base)
863 msm_disp_snapshot_add_block(disp_state,
864 phy->reg_size, phy->reg_base,
865 "dsi%d_reg", phy->id);
866 }
867