1 /*
2 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 *
22 * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
23 *
24 */
25 #include "priv.h"
26 #include "gk20a_devfreq.h"
27 #include "gk20a.h"
28
29 #include <core/tegra.h>
30 #include <subdev/timer.h>
31
32 static const u8 _pl_to_div[] = {
33 /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
34 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
35 };
36
pl_to_div(u32 pl)37 static u32 pl_to_div(u32 pl)
38 {
39 if (pl >= ARRAY_SIZE(_pl_to_div))
40 return 1;
41
42 return _pl_to_div[pl];
43 }
44
div_to_pl(u32 div)45 static u32 div_to_pl(u32 div)
46 {
47 u32 pl;
48
49 for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) {
50 if (_pl_to_div[pl] >= div)
51 return pl;
52 }
53
54 return ARRAY_SIZE(_pl_to_div) - 1;
55 }
56
57 static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
58 .min_vco = 1000000, .max_vco = 2064000,
59 .min_u = 12000, .max_u = 38000,
60 .min_m = 1, .max_m = 255,
61 .min_n = 8, .max_n = 255,
62 .min_pl = 1, .max_pl = 32,
63 };
64
65 void
gk20a_pllg_read_mnp(struct gk20a_clk * clk,struct gk20a_pll * pll)66 gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
67 {
68 struct nvkm_device *device = clk->base.subdev.device;
69 u32 val;
70
71 val = nvkm_rd32(device, GPCPLL_COEFF);
72 pll->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
73 pll->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
74 pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
75 }
76
77 void
gk20a_pllg_write_mnp(struct gk20a_clk * clk,const struct gk20a_pll * pll)78 gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
79 {
80 struct nvkm_device *device = clk->base.subdev.device;
81 u32 val;
82
83 val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
84 val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
85 val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
86 nvkm_wr32(device, GPCPLL_COEFF, val);
87 }
88
89 u32
gk20a_pllg_calc_rate(struct gk20a_clk * clk,struct gk20a_pll * pll)90 gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
91 {
92 u32 rate;
93 u32 divider;
94
95 rate = clk->parent_rate * pll->n;
96 divider = pll->m * clk->pl_to_div(pll->pl);
97
98 return rate / divider / 2;
99 }
100
101 int
gk20a_pllg_calc_mnp(struct gk20a_clk * clk,unsigned long rate,struct gk20a_pll * pll)102 gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
103 struct gk20a_pll *pll)
104 {
105 struct nvkm_subdev *subdev = &clk->base.subdev;
106 u32 target_clk_f, ref_clk_f, target_freq;
107 u32 min_vco_f, max_vco_f;
108 u32 low_pl, high_pl, best_pl;
109 u32 target_vco_f;
110 u32 best_m, best_n;
111 u32 best_delta = ~0;
112 u32 pl;
113
114 target_clk_f = rate * 2 / KHZ;
115 ref_clk_f = clk->parent_rate / KHZ;
116
117 target_vco_f = target_clk_f + target_clk_f / 50;
118 max_vco_f = max(clk->params->max_vco, target_vco_f);
119 min_vco_f = clk->params->min_vco;
120 best_m = clk->params->max_m;
121 best_n = clk->params->min_n;
122 best_pl = clk->params->min_pl;
123
124 /* min_pl <= high_pl <= max_pl */
125 high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
126 high_pl = min(high_pl, clk->params->max_pl);
127 high_pl = max(high_pl, clk->params->min_pl);
128 high_pl = clk->div_to_pl(high_pl);
129
130 /* min_pl <= low_pl <= max_pl */
131 low_pl = min_vco_f / target_vco_f;
132 low_pl = min(low_pl, clk->params->max_pl);
133 low_pl = max(low_pl, clk->params->min_pl);
134 low_pl = clk->div_to_pl(low_pl);
135
136 nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
137 clk->pl_to_div(low_pl), high_pl, clk->pl_to_div(high_pl));
138
139 /* Select lowest possible VCO */
140 for (pl = low_pl; pl <= high_pl; pl++) {
141 u32 m, n, n2;
142
143 target_vco_f = target_clk_f * clk->pl_to_div(pl);
144
145 for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
146 u32 u_f = ref_clk_f / m;
147
148 if (u_f < clk->params->min_u)
149 break;
150 if (u_f > clk->params->max_u)
151 continue;
152
153 n = (target_vco_f * m) / ref_clk_f;
154 n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
155
156 if (n > clk->params->max_n)
157 break;
158
159 for (; n <= n2; n++) {
160 u32 vco_f;
161
162 if (n < clk->params->min_n)
163 continue;
164 if (n > clk->params->max_n)
165 break;
166
167 vco_f = ref_clk_f * n / m;
168
169 if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
170 u32 delta, lwv;
171
172 lwv = (vco_f + (clk->pl_to_div(pl) / 2))
173 / clk->pl_to_div(pl);
174 delta = abs(lwv - target_clk_f);
175
176 if (delta < best_delta) {
177 best_delta = delta;
178 best_m = m;
179 best_n = n;
180 best_pl = pl;
181
182 if (best_delta == 0)
183 goto found_match;
184 }
185 }
186 }
187 }
188 }
189
190 found_match:
191 WARN_ON(best_delta == ~0);
192
193 if (best_delta != 0)
194 nvkm_debug(subdev,
195 "no best match for target @ %dMHz on gpc_pll",
196 target_clk_f / KHZ);
197
198 pll->m = best_m;
199 pll->n = best_n;
200 pll->pl = best_pl;
201
202 target_freq = gk20a_pllg_calc_rate(clk, pll);
203
204 nvkm_debug(subdev,
205 "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
206 target_freq / KHZ, pll->m, pll->n, pll->pl,
207 clk->pl_to_div(pll->pl));
208 return 0;
209 }
210
211 static int
gk20a_pllg_slide(struct gk20a_clk * clk,u32 n)212 gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
213 {
214 struct nvkm_subdev *subdev = &clk->base.subdev;
215 struct nvkm_device *device = subdev->device;
216 struct gk20a_pll pll;
217 int ret = 0;
218
219 /* get old coefficients */
220 gk20a_pllg_read_mnp(clk, &pll);
221 /* do nothing if NDIV is the same */
222 if (n == pll.n)
223 return 0;
224
225 /* pll slowdown mode */
226 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
227 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
228 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
229
230 /* new ndiv ready for ramp */
231 pll.n = n;
232 udelay(1);
233 gk20a_pllg_write_mnp(clk, &pll);
234
235 /* dynamic ramp to new ndiv */
236 udelay(1);
237 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
238 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
239 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
240
241 /* wait for ramping to complete */
242 if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
243 GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
244 GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
245 ret = -ETIMEDOUT;
246
247 /* exit slowdown mode */
248 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
249 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
250 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
251 nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
252
253 return ret;
254 }
255
256 static int
gk20a_pllg_enable(struct gk20a_clk * clk)257 gk20a_pllg_enable(struct gk20a_clk *clk)
258 {
259 struct nvkm_device *device = clk->base.subdev.device;
260 u32 val;
261
262 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
263 nvkm_rd32(device, GPCPLL_CFG);
264
265 /* enable lock detection */
266 val = nvkm_rd32(device, GPCPLL_CFG);
267 if (val & GPCPLL_CFG_LOCK_DET_OFF) {
268 val &= ~GPCPLL_CFG_LOCK_DET_OFF;
269 nvkm_wr32(device, GPCPLL_CFG, val);
270 }
271
272 /* wait for lock */
273 if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
274 GPCPLL_CFG_LOCK) < 0)
275 return -ETIMEDOUT;
276
277 /* switch to VCO mode */
278 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
279 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
280
281 return 0;
282 }
283
284 static void
gk20a_pllg_disable(struct gk20a_clk * clk)285 gk20a_pllg_disable(struct gk20a_clk *clk)
286 {
287 struct nvkm_device *device = clk->base.subdev.device;
288
289 /* put PLL in bypass before disabling it */
290 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
291
292 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
293 nvkm_rd32(device, GPCPLL_CFG);
294 }
295
296 static int
gk20a_pllg_program_mnp(struct gk20a_clk * clk,const struct gk20a_pll * pll)297 gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
298 {
299 struct nvkm_subdev *subdev = &clk->base.subdev;
300 struct nvkm_device *device = subdev->device;
301 struct gk20a_pll cur_pll;
302 int ret;
303
304 gk20a_pllg_read_mnp(clk, &cur_pll);
305
306 /* split VCO-to-bypass jump in half by setting out divider 1:2 */
307 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
308 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
309 /* Intentional 2nd write to assure linear divider operation */
310 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
311 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
312 nvkm_rd32(device, GPC2CLK_OUT);
313 udelay(2);
314
315 gk20a_pllg_disable(clk);
316
317 gk20a_pllg_write_mnp(clk, pll);
318
319 ret = gk20a_pllg_enable(clk);
320 if (ret)
321 return ret;
322
323 /* restore out divider 1:1 */
324 udelay(2);
325 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
326 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
327 /* Intentional 2nd write to assure linear divider operation */
328 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
329 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
330 nvkm_rd32(device, GPC2CLK_OUT);
331
332 return 0;
333 }
334
335 static int
gk20a_pllg_program_mnp_slide(struct gk20a_clk * clk,const struct gk20a_pll * pll)336 gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
337 {
338 struct gk20a_pll cur_pll;
339 int ret;
340
341 if (gk20a_pllg_is_enabled(clk)) {
342 gk20a_pllg_read_mnp(clk, &cur_pll);
343
344 /* just do NDIV slide if there is no change to M and PL */
345 if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
346 return gk20a_pllg_slide(clk, pll->n);
347
348 /* slide down to current NDIV_LO */
349 cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
350 ret = gk20a_pllg_slide(clk, cur_pll.n);
351 if (ret)
352 return ret;
353 }
354
355 /* program MNP with the new clock parameters and new NDIV_LO */
356 cur_pll = *pll;
357 cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
358 ret = gk20a_pllg_program_mnp(clk, &cur_pll);
359 if (ret)
360 return ret;
361
362 /* slide up to new NDIV */
363 return gk20a_pllg_slide(clk, pll->n);
364 }
365
366 static struct nvkm_pstate
367 gk20a_pstates[] = {
368 {
369 .base = {
370 .domain[nv_clk_src_gpc] = 72000,
371 .voltage = 0,
372 },
373 },
374 {
375 .base = {
376 .domain[nv_clk_src_gpc] = 108000,
377 .voltage = 1,
378 },
379 },
380 {
381 .base = {
382 .domain[nv_clk_src_gpc] = 180000,
383 .voltage = 2,
384 },
385 },
386 {
387 .base = {
388 .domain[nv_clk_src_gpc] = 252000,
389 .voltage = 3,
390 },
391 },
392 {
393 .base = {
394 .domain[nv_clk_src_gpc] = 324000,
395 .voltage = 4,
396 },
397 },
398 {
399 .base = {
400 .domain[nv_clk_src_gpc] = 396000,
401 .voltage = 5,
402 },
403 },
404 {
405 .base = {
406 .domain[nv_clk_src_gpc] = 468000,
407 .voltage = 6,
408 },
409 },
410 {
411 .base = {
412 .domain[nv_clk_src_gpc] = 540000,
413 .voltage = 7,
414 },
415 },
416 {
417 .base = {
418 .domain[nv_clk_src_gpc] = 612000,
419 .voltage = 8,
420 },
421 },
422 {
423 .base = {
424 .domain[nv_clk_src_gpc] = 648000,
425 .voltage = 9,
426 },
427 },
428 {
429 .base = {
430 .domain[nv_clk_src_gpc] = 684000,
431 .voltage = 10,
432 },
433 },
434 {
435 .base = {
436 .domain[nv_clk_src_gpc] = 708000,
437 .voltage = 11,
438 },
439 },
440 {
441 .base = {
442 .domain[nv_clk_src_gpc] = 756000,
443 .voltage = 12,
444 },
445 },
446 {
447 .base = {
448 .domain[nv_clk_src_gpc] = 804000,
449 .voltage = 13,
450 },
451 },
452 {
453 .base = {
454 .domain[nv_clk_src_gpc] = 852000,
455 .voltage = 14,
456 },
457 },
458 };
459
460 int
gk20a_clk_read(struct nvkm_clk * base,enum nv_clk_src src)461 gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
462 {
463 struct gk20a_clk *clk = gk20a_clk(base);
464 struct nvkm_subdev *subdev = &clk->base.subdev;
465 struct nvkm_device *device = subdev->device;
466 struct gk20a_pll pll;
467
468 switch (src) {
469 case nv_clk_src_crystal:
470 return device->crystal;
471 case nv_clk_src_gpc:
472 gk20a_pllg_read_mnp(clk, &pll);
473 return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
474 default:
475 nvkm_error(subdev, "invalid clock source %d\n", src);
476 return -EINVAL;
477 }
478 }
479
480 int
gk20a_clk_calc(struct nvkm_clk * base,struct nvkm_cstate * cstate)481 gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
482 {
483 struct gk20a_clk *clk = gk20a_clk(base);
484
485 return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
486 GK20A_CLK_GPC_MDIV, &clk->pll);
487 }
488
489 int
gk20a_clk_prog(struct nvkm_clk * base)490 gk20a_clk_prog(struct nvkm_clk *base)
491 {
492 struct gk20a_clk *clk = gk20a_clk(base);
493 int ret;
494
495 ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
496 if (ret)
497 ret = gk20a_pllg_program_mnp(clk, &clk->pll);
498
499 return ret;
500 }
501
502 void
gk20a_clk_tidy(struct nvkm_clk * base)503 gk20a_clk_tidy(struct nvkm_clk *base)
504 {
505 }
506
507 int
gk20a_clk_setup_slide(struct gk20a_clk * clk)508 gk20a_clk_setup_slide(struct gk20a_clk *clk)
509 {
510 struct nvkm_subdev *subdev = &clk->base.subdev;
511 struct nvkm_device *device = subdev->device;
512 u32 step_a, step_b;
513
514 switch (clk->parent_rate) {
515 case 12000000:
516 case 12800000:
517 case 13000000:
518 step_a = 0x2b;
519 step_b = 0x0b;
520 break;
521 case 19200000:
522 step_a = 0x12;
523 step_b = 0x08;
524 break;
525 case 38400000:
526 step_a = 0x04;
527 step_b = 0x05;
528 break;
529 default:
530 nvkm_error(subdev, "invalid parent clock rate %u KHz",
531 clk->parent_rate / KHZ);
532 return -EINVAL;
533 }
534
535 nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
536 step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
537 nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
538 step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
539
540 return 0;
541 }
542
543 void
gk20a_clk_fini(struct nvkm_clk * base)544 gk20a_clk_fini(struct nvkm_clk *base)
545 {
546 struct nvkm_device *device = base->subdev.device;
547 struct gk20a_clk *clk = gk20a_clk(base);
548
549 /* slide to VCO min */
550 if (gk20a_pllg_is_enabled(clk)) {
551 struct gk20a_pll pll;
552 u32 n_lo;
553
554 gk20a_pllg_read_mnp(clk, &pll);
555 n_lo = gk20a_pllg_n_lo(clk, &pll);
556 gk20a_pllg_slide(clk, n_lo);
557 }
558
559 gk20a_pllg_disable(clk);
560
561 /* set IDDQ */
562 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
563 }
564
565 static int
gk20a_clk_init(struct nvkm_clk * base)566 gk20a_clk_init(struct nvkm_clk *base)
567 {
568 struct gk20a_clk *clk = gk20a_clk(base);
569 struct nvkm_subdev *subdev = &clk->base.subdev;
570 struct nvkm_device *device = subdev->device;
571 int ret;
572
573 /* get out from IDDQ */
574 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
575 nvkm_rd32(device, GPCPLL_CFG);
576 udelay(5);
577
578 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
579 GPC2CLK_OUT_INIT_VAL);
580
581 ret = gk20a_clk_setup_slide(clk);
582 if (ret)
583 return ret;
584
585 /* Start with lowest frequency */
586 base->func->calc(base, &base->func->pstates[0].base);
587 ret = base->func->prog(&clk->base);
588 if (ret) {
589 nvkm_error(subdev, "cannot initialize clock\n");
590 return ret;
591 }
592
593 ret = gk20a_devfreq_init(base, &clk->devfreq);
594 if (ret)
595 return ret;
596
597 return 0;
598 }
599
600 static const struct nvkm_clk_func
601 gk20a_clk = {
602 .init = gk20a_clk_init,
603 .fini = gk20a_clk_fini,
604 .read = gk20a_clk_read,
605 .calc = gk20a_clk_calc,
606 .prog = gk20a_clk_prog,
607 .tidy = gk20a_clk_tidy,
608 .pstates = gk20a_pstates,
609 .nr_pstates = ARRAY_SIZE(gk20a_pstates),
610 .domains = {
611 { nv_clk_src_crystal, 0xff },
612 { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
613 { nv_clk_src_max }
614 }
615 };
616
617 int
gk20a_clk_ctor(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,const struct nvkm_clk_func * func,const struct gk20a_clk_pllg_params * params,struct gk20a_clk * clk)618 gk20a_clk_ctor(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
619 const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params,
620 struct gk20a_clk *clk)
621 {
622 struct nvkm_device_tegra *tdev = device->func->tegra(device);
623 int ret;
624 int i;
625
626 /* Finish initializing the pstates */
627 for (i = 0; i < func->nr_pstates; i++) {
628 INIT_LIST_HEAD(&func->pstates[i].list);
629 func->pstates[i].pstate = i + 1;
630 }
631
632 clk->params = params;
633 clk->parent_rate = clk_get_rate(tdev->clk);
634
635 ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
636 if (ret)
637 return ret;
638
639 nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n",
640 clk->parent_rate / KHZ);
641
642 return 0;
643 }
644
645 int
gk20a_clk_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_clk ** pclk)646 gk20a_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
647 struct nvkm_clk **pclk)
648 {
649 struct gk20a_clk *clk;
650 int ret;
651
652 clk = kzalloc_obj(*clk);
653 if (!clk)
654 return -ENOMEM;
655 *pclk = &clk->base;
656
657 ret = gk20a_clk_ctor(device, type, inst, &gk20a_clk, &gk20a_pllg_params, clk);
658
659 clk->pl_to_div = pl_to_div;
660 clk->div_to_pl = div_to_pl;
661 return ret;
662 }
663