xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c (revision 95298d63c67673c654c08952672d016212b26054)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <subdev/clk.h>
24 #include <subdev/volt.h>
25 #include <subdev/timer.h>
26 #include <core/device.h>
27 #include <core/tegra.h>
28 
29 #include "priv.h"
30 #include "gk20a.h"
31 
32 #define GPCPLL_CFG_SYNC_MODE	BIT(2)
33 
34 #define BYPASSCTRL_SYS	(SYS_GPCPLL_CFG_BASE + 0x340)
35 #define BYPASSCTRL_SYS_GPCPLL_SHIFT	0
36 #define BYPASSCTRL_SYS_GPCPLL_WIDTH	1
37 
38 #define GPCPLL_CFG2_SDM_DIN_SHIFT	0
39 #define GPCPLL_CFG2_SDM_DIN_WIDTH	8
40 #define GPCPLL_CFG2_SDM_DIN_MASK	\
41 	(MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
42 #define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT	8
43 #define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH	15
44 #define GPCPLL_CFG2_SDM_DIN_NEW_MASK	\
45 	(MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
46 #define GPCPLL_CFG2_SETUP2_SHIFT	16
47 #define GPCPLL_CFG2_PLL_STEPA_SHIFT	24
48 
49 #define GPCPLL_DVFS0	(SYS_GPCPLL_CFG_BASE + 0x10)
50 #define GPCPLL_DVFS0_DFS_COEFF_SHIFT	0
51 #define GPCPLL_DVFS0_DFS_COEFF_WIDTH	7
52 #define GPCPLL_DVFS0_DFS_COEFF_MASK	\
53 	(MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
54 #define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT	8
55 #define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH	7
56 #define GPCPLL_DVFS0_DFS_DET_MAX_MASK	\
57 	(MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
58 
59 #define GPCPLL_DVFS1		(SYS_GPCPLL_CFG_BASE + 0x14)
60 #define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT		0
61 #define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH		7
62 #define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT		7
63 #define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH		1
64 #define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT		8
65 #define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH		7
66 #define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT		15
67 #define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH		1
68 #define GPCPLL_DVFS1_DFS_CTRL_SHIFT		16
69 #define GPCPLL_DVFS1_DFS_CTRL_WIDTH		12
70 #define GPCPLL_DVFS1_EN_SDM_SHIFT		28
71 #define GPCPLL_DVFS1_EN_SDM_WIDTH		1
72 #define GPCPLL_DVFS1_EN_SDM_BIT			BIT(28)
73 #define GPCPLL_DVFS1_EN_DFS_SHIFT		29
74 #define GPCPLL_DVFS1_EN_DFS_WIDTH		1
75 #define GPCPLL_DVFS1_EN_DFS_BIT			BIT(29)
76 #define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT		30
77 #define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH		1
78 #define GPCPLL_DVFS1_EN_DFS_CAL_BIT		BIT(30)
79 #define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT		31
80 #define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH		1
81 #define GPCPLL_DVFS1_DFS_CAL_DONE_BIT		BIT(31)
82 
83 #define GPC_BCAST_GPCPLL_DVFS2	(GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
84 #define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT	BIT(16)
85 
86 #define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT	24
87 #define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH	7
88 
89 #define DFS_DET_RANGE	6	/* -2^6 ... 2^6-1 */
90 #define SDM_DIN_RANGE	12	/* -2^12 ... 2^12-1 */
91 
92 struct gm20b_clk_dvfs_params {
93 	s32 coeff_slope;
94 	s32 coeff_offs;
95 	u32 vco_ctrl;
96 };
97 
98 static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
99 	.coeff_slope = -165230,
100 	.coeff_offs = 214007,
101 	.vco_ctrl = 0x7 << 3,
102 };
103 
104 /*
105  * base.n is now the *integer* part of the N factor.
106  * sdm_din contains n's decimal part.
107  */
108 struct gm20b_pll {
109 	struct gk20a_pll base;
110 	u32 sdm_din;
111 };
112 
113 struct gm20b_clk_dvfs {
114 	u32 dfs_coeff;
115 	s32 dfs_det_max;
116 	s32 dfs_ext_cal;
117 };
118 
119 struct gm20b_clk {
120 	/* currently applied parameters */
121 	struct gk20a_clk base;
122 	struct gm20b_clk_dvfs dvfs;
123 	u32 uv;
124 
125 	/* new parameters to apply */
126 	struct gk20a_pll new_pll;
127 	struct gm20b_clk_dvfs new_dvfs;
128 	u32 new_uv;
129 
130 	const struct gm20b_clk_dvfs_params *dvfs_params;
131 
132 	/* fused parameters */
133 	s32 uvdet_slope;
134 	s32 uvdet_offs;
135 
136 	/* safe frequency we can use at minimum voltage */
137 	u32 safe_fmax_vmin;
138 };
139 #define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
140 
141 static u32 pl_to_div(u32 pl)
142 {
143 	return pl;
144 }
145 
146 static u32 div_to_pl(u32 div)
147 {
148 	return div;
149 }
150 
151 static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
152 	.min_vco = 1300000, .max_vco = 2600000,
153 	.min_u = 12000, .max_u = 38400,
154 	.min_m = 1, .max_m = 255,
155 	.min_n = 8, .max_n = 255,
156 	.min_pl = 1, .max_pl = 31,
157 };
158 
159 static void
160 gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
161 {
162 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
163 	struct nvkm_device *device = subdev->device;
164 	u32 val;
165 
166 	gk20a_pllg_read_mnp(&clk->base, &pll->base);
167 	val = nvkm_rd32(device, GPCPLL_CFG2);
168 	pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
169 		       MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
170 }
171 
172 static void
173 gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
174 {
175 	struct nvkm_device *device = clk->base.base.subdev.device;
176 
177 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
178 		  pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
179 	gk20a_pllg_write_mnp(&clk->base, &pll->base);
180 }
181 
182 /*
183  * Determine DFS_COEFF for the requested voltage. Always select external
184  * calibration override equal to the voltage, and set maximum detection
185  * limit "0" (to make sure that PLL output remains under F/V curve when
186  * voltage increases).
187  */
188 static void
189 gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
190 			  struct gm20b_clk_dvfs *dvfs)
191 {
192 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
193 	const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
194 	u32 coeff;
195 	/* Work with mv as uv would likely trigger an overflow */
196 	s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
197 
198 	/* coeff = slope * voltage + offset */
199 	coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
200 	coeff = DIV_ROUND_CLOSEST(coeff, 1000);
201 	dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
202 
203 	dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
204 					     clk->uvdet_slope);
205 	/* should never happen */
206 	if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
207 		nvkm_error(subdev, "dfs_ext_cal overflow!\n");
208 
209 	dvfs->dfs_det_max = 0;
210 
211 	nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
212 		   __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
213 		   dvfs->dfs_det_max);
214 }
215 
216 /*
217  * Solve equation for integer and fractional part of the effective NDIV:
218  *
219  * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
220  *         (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
221  *
222  * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
223  */
224 static void
225 gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
226 {
227 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
228 	const struct gk20a_clk_pllg_params *p = clk->base.params;
229 	u32 n;
230 	s32 det_delta;
231 	u32 rem, rem_range;
232 
233 	/* calculate current ext_cal and subtract previous one */
234 	det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
235 				      clk->uvdet_slope);
236 	det_delta -= clk->dvfs.dfs_ext_cal;
237 	det_delta = min(det_delta, clk->dvfs.dfs_det_max);
238 	det_delta *= clk->dvfs.dfs_coeff;
239 
240 	/* integer part of n */
241 	n = (n_eff << DFS_DET_RANGE) - det_delta;
242 	/* should never happen! */
243 	if (n <= 0) {
244 		nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
245 		n = 1 << DFS_DET_RANGE;
246 	}
247 	if (n >> DFS_DET_RANGE > p->max_n) {
248 		nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
249 		n = p->max_n << DFS_DET_RANGE;
250 	}
251 	*n_int = n >> DFS_DET_RANGE;
252 
253 	/* fractional part of n */
254 	rem = ((u32)n) & MASK(DFS_DET_RANGE);
255 	rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
256 	/* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
257 	rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
258 	/* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
259 	*sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
260 
261 	nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
262 		   n_eff, *n_int, *sdm_din);
263 }
264 
265 static int
266 gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
267 {
268 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
269 	struct nvkm_device *device = subdev->device;
270 	struct gm20b_pll pll;
271 	u32 n_int, sdm_din;
272 	int ret = 0;
273 
274 	/* calculate the new n_int/sdm_din for this n/uv */
275 	gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
276 
277 	/* get old coefficients */
278 	gm20b_pllg_read_mnp(clk, &pll);
279 	/* do nothing if NDIV is the same */
280 	if (n_int == pll.base.n && sdm_din == pll.sdm_din)
281 		return 0;
282 
283 	/* pll slowdown mode */
284 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
285 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
286 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
287 
288 	/* new ndiv ready for ramp */
289 	/* in DVFS mode SDM is updated via "new" field */
290 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
291 		  sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
292 	pll.base.n = n_int;
293 	udelay(1);
294 	gk20a_pllg_write_mnp(&clk->base, &pll.base);
295 
296 	/* dynamic ramp to new ndiv */
297 	udelay(1);
298 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
299 		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
300 		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
301 
302 	/* wait for ramping to complete */
303 	if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
304 		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
305 		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
306 		ret = -ETIMEDOUT;
307 
308 	/* in DVFS mode complete SDM update */
309 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
310 		  sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
311 
312 	/* exit slowdown mode */
313 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
314 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
315 		BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
316 	nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
317 
318 	return ret;
319 }
320 
321 static int
322 gm20b_pllg_enable(struct gm20b_clk *clk)
323 {
324 	struct nvkm_device *device = clk->base.base.subdev.device;
325 
326 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
327 	nvkm_rd32(device, GPCPLL_CFG);
328 
329 	/* In DVFS mode lock cannot be used - so just delay */
330 	udelay(40);
331 
332 	/* set SYNC_MODE for glitchless switch out of bypass */
333 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
334 		       GPCPLL_CFG_SYNC_MODE);
335 	nvkm_rd32(device, GPCPLL_CFG);
336 
337 	/* switch to VCO mode */
338 	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
339 		  BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
340 
341 	return 0;
342 }
343 
344 static void
345 gm20b_pllg_disable(struct gm20b_clk *clk)
346 {
347 	struct nvkm_device *device = clk->base.base.subdev.device;
348 
349 	/* put PLL in bypass before disabling it */
350 	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
351 
352 	/* clear SYNC_MODE before disabling PLL */
353 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
354 
355 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
356 	nvkm_rd32(device, GPCPLL_CFG);
357 }
358 
359 static int
360 gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
361 {
362 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
363 	struct nvkm_device *device = subdev->device;
364 	struct gm20b_pll cur_pll;
365 	u32 n_int, sdm_din;
366 	/* if we only change pdiv, we can do a glitchless transition */
367 	bool pdiv_only;
368 	int ret;
369 
370 	gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
371 	gm20b_pllg_read_mnp(clk, &cur_pll);
372 	pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
373 		    cur_pll.base.m == pll->m;
374 
375 	/* need full sequence if clock not enabled yet */
376 	if (!gk20a_pllg_is_enabled(&clk->base))
377 		pdiv_only = false;
378 
379 	/* split VCO-to-bypass jump in half by setting out divider 1:2 */
380 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
381 		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
382 	/* Intentional 2nd write to assure linear divider operation */
383 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
384 		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
385 	nvkm_rd32(device, GPC2CLK_OUT);
386 	udelay(2);
387 
388 	if (pdiv_only) {
389 		u32 old = cur_pll.base.pl;
390 		u32 new = pll->pl;
391 
392 		/*
393 		 * we can do a glitchless transition only if the old and new PL
394 		 * parameters share at least one bit set to 1. If this is not
395 		 * the case, calculate and program an interim PL that will allow
396 		 * us to respect that rule.
397 		 */
398 		if ((old & new) == 0) {
399 			cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
400 					      new | BIT(ffs(old) - 1));
401 			gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
402 		}
403 
404 		cur_pll.base.pl = new;
405 		gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
406 	} else {
407 		/* disable before programming if more than pdiv changes */
408 		gm20b_pllg_disable(clk);
409 
410 		cur_pll.base = *pll;
411 		cur_pll.base.n = n_int;
412 		cur_pll.sdm_din = sdm_din;
413 		gm20b_pllg_write_mnp(clk, &cur_pll);
414 
415 		ret = gm20b_pllg_enable(clk);
416 		if (ret)
417 			return ret;
418 	}
419 
420 	/* restore out divider 1:1 */
421 	udelay(2);
422 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
423 		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
424 	/* Intentional 2nd write to assure linear divider operation */
425 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
426 		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
427 	nvkm_rd32(device, GPC2CLK_OUT);
428 
429 	return 0;
430 }
431 
432 static int
433 gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
434 {
435 	struct gk20a_pll cur_pll;
436 	int ret;
437 
438 	if (gk20a_pllg_is_enabled(&clk->base)) {
439 		gk20a_pllg_read_mnp(&clk->base, &cur_pll);
440 
441 		/* just do NDIV slide if there is no change to M and PL */
442 		if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
443 			return gm20b_pllg_slide(clk, pll->n);
444 
445 		/* slide down to current NDIV_LO */
446 		cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
447 		ret = gm20b_pllg_slide(clk, cur_pll.n);
448 		if (ret)
449 			return ret;
450 	}
451 
452 	/* program MNP with the new clock parameters and new NDIV_LO */
453 	cur_pll = *pll;
454 	cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
455 	ret = gm20b_pllg_program_mnp(clk, &cur_pll);
456 	if (ret)
457 		return ret;
458 
459 	/* slide up to new NDIV */
460 	return gm20b_pllg_slide(clk, pll->n);
461 }
462 
463 static int
464 gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
465 {
466 	struct gm20b_clk *clk = gm20b_clk(base);
467 	struct nvkm_subdev *subdev = &base->subdev;
468 	struct nvkm_volt *volt = base->subdev.device->volt;
469 	int ret;
470 
471 	ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
472 					     GK20A_CLK_GPC_MDIV, &clk->new_pll);
473 	if (ret)
474 		return ret;
475 
476 	clk->new_uv = volt->vid[cstate->voltage].uv;
477 	gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
478 
479 	nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
480 
481 	return 0;
482 }
483 
484 /*
485  * Compute PLL parameters that are always safe for the current voltage
486  */
487 static void
488 gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
489 {
490 	u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
491 	u32 parent_rate = clk->base.parent_rate / KHZ;
492 	u32 nmin, nsafe;
493 
494 	/* remove a safe margin of 10% */
495 	if (rate > clk->safe_fmax_vmin)
496 		rate = rate * (100 - 10) / 100;
497 
498 	/* gpc2clk */
499 	rate *= 2;
500 
501 	nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
502 	nsafe = pll->m * rate / (clk->base.parent_rate);
503 
504 	if (nsafe < nmin) {
505 		pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
506 		nsafe = nmin;
507 	}
508 
509 	pll->n = nsafe;
510 }
511 
512 static void
513 gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
514 {
515 	struct nvkm_device *device = clk->base.base.subdev.device;
516 
517 	/* strobe to read external DFS coefficient */
518 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
519 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
520 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
521 
522 	nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
523 		  coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
524 
525 	udelay(1);
526 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
527 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
528 }
529 
530 static void
531 gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
532 {
533 	struct nvkm_device *device = clk->base.base.subdev.device;
534 	u32 val;
535 
536 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
537 		  dfs_det_cal);
538 	udelay(1);
539 
540 	val = nvkm_rd32(device, GPCPLL_DVFS1);
541 	if (!(val & BIT(25))) {
542 		/* Use external value to overwrite calibration value */
543 		val |= BIT(25) | BIT(16);
544 		nvkm_wr32(device, GPCPLL_DVFS1, val);
545 	}
546 }
547 
548 static void
549 gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
550 				 struct gm20b_clk_dvfs *dvfs)
551 {
552 	struct nvkm_device *device = clk->base.base.subdev.device;
553 
554 	/* strobe to read external DFS coefficient */
555 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
556 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
557 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
558 
559 	nvkm_mask(device, GPCPLL_DVFS0,
560 		  GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
561 		  dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
562 		  dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
563 
564 	udelay(1);
565 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
566 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
567 
568 	gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
569 }
570 
571 static int
572 gm20b_clk_prog(struct nvkm_clk *base)
573 {
574 	struct gm20b_clk *clk = gm20b_clk(base);
575 	u32 cur_freq;
576 	int ret;
577 
578 	/* No change in DVFS settings? */
579 	if (clk->uv == clk->new_uv)
580 		goto prog;
581 
582 	/*
583 	 * Interim step for changing DVFS detection settings: low enough
584 	 * frequency to be safe at at DVFS coeff = 0.
585 	 *
586 	 * 1. If voltage is increasing:
587 	 * - safe frequency target matches the lowest - old - frequency
588 	 * - DVFS settings are still old
589 	 * - Voltage already increased to new level by volt, but maximum
590 	 *   detection limit assures PLL output remains under F/V curve
591 	 *
592 	 * 2. If voltage is decreasing:
593 	 * - safe frequency target matches the lowest - new - frequency
594 	 * - DVFS settings are still old
595 	 * - Voltage is also old, it will be lowered by volt afterwards
596 	 *
597 	 * Interim step can be skipped if old frequency is below safe minimum,
598 	 * i.e., it is low enough to be safe at any voltage in operating range
599 	 * with zero DVFS coefficient.
600 	 */
601 	cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
602 	if (cur_freq > clk->safe_fmax_vmin) {
603 		struct gk20a_pll pll_safe;
604 
605 		if (clk->uv < clk->new_uv)
606 			/* voltage will raise: safe frequency is current one */
607 			pll_safe = clk->base.pll;
608 		else
609 			/* voltage will drop: safe frequency is new one */
610 			pll_safe = clk->new_pll;
611 
612 		gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
613 		ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
614 		if (ret)
615 			return ret;
616 	}
617 
618 	/*
619 	 * DVFS detection settings transition:
620 	 * - Set DVFS coefficient zero
621 	 * - Set calibration level to new voltage
622 	 * - Set DVFS coefficient to match new voltage
623 	 */
624 	gm20b_dvfs_program_coeff(clk, 0);
625 	gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
626 	gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
627 	gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
628 
629 prog:
630 	clk->uv = clk->new_uv;
631 	clk->dvfs = clk->new_dvfs;
632 	clk->base.pll = clk->new_pll;
633 
634 	return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
635 }
636 
637 static struct nvkm_pstate
638 gm20b_pstates[] = {
639 	{
640 		.base = {
641 			.domain[nv_clk_src_gpc] = 76800,
642 			.voltage = 0,
643 		},
644 	},
645 	{
646 		.base = {
647 			.domain[nv_clk_src_gpc] = 153600,
648 			.voltage = 1,
649 		},
650 	},
651 	{
652 		.base = {
653 			.domain[nv_clk_src_gpc] = 230400,
654 			.voltage = 2,
655 		},
656 	},
657 	{
658 		.base = {
659 			.domain[nv_clk_src_gpc] = 307200,
660 			.voltage = 3,
661 		},
662 	},
663 	{
664 		.base = {
665 			.domain[nv_clk_src_gpc] = 384000,
666 			.voltage = 4,
667 		},
668 	},
669 	{
670 		.base = {
671 			.domain[nv_clk_src_gpc] = 460800,
672 			.voltage = 5,
673 		},
674 	},
675 	{
676 		.base = {
677 			.domain[nv_clk_src_gpc] = 537600,
678 			.voltage = 6,
679 		},
680 	},
681 	{
682 		.base = {
683 			.domain[nv_clk_src_gpc] = 614400,
684 			.voltage = 7,
685 		},
686 	},
687 	{
688 		.base = {
689 			.domain[nv_clk_src_gpc] = 691200,
690 			.voltage = 8,
691 		},
692 	},
693 	{
694 		.base = {
695 			.domain[nv_clk_src_gpc] = 768000,
696 			.voltage = 9,
697 		},
698 	},
699 	{
700 		.base = {
701 			.domain[nv_clk_src_gpc] = 844800,
702 			.voltage = 10,
703 		},
704 	},
705 	{
706 		.base = {
707 			.domain[nv_clk_src_gpc] = 921600,
708 			.voltage = 11,
709 		},
710 	},
711 	{
712 		.base = {
713 			.domain[nv_clk_src_gpc] = 998400,
714 			.voltage = 12,
715 		},
716 	},
717 };
718 
719 static void
720 gm20b_clk_fini(struct nvkm_clk *base)
721 {
722 	struct nvkm_device *device = base->subdev.device;
723 	struct gm20b_clk *clk = gm20b_clk(base);
724 
725 	/* slide to VCO min */
726 	if (gk20a_pllg_is_enabled(&clk->base)) {
727 		struct gk20a_pll pll;
728 		u32 n_lo;
729 
730 		gk20a_pllg_read_mnp(&clk->base, &pll);
731 		n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
732 		gm20b_pllg_slide(clk, n_lo);
733 	}
734 
735 	gm20b_pllg_disable(clk);
736 
737 	/* set IDDQ */
738 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
739 }
740 
741 static int
742 gm20b_clk_init_dvfs(struct gm20b_clk *clk)
743 {
744 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
745 	struct nvkm_device *device = subdev->device;
746 	bool fused = clk->uvdet_offs && clk->uvdet_slope;
747 	static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
748 	u32 data;
749 	int ret;
750 
751 	/* Enable NA DVFS */
752 	nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
753 		  GPCPLL_DVFS1_EN_DFS_BIT);
754 
755 	/* Set VCO_CTRL */
756 	if (clk->dvfs_params->vco_ctrl)
757 		nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
758 		      clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
759 
760 	if (fused) {
761 		/* Start internal calibration, but ignore results */
762 		nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
763 			  GPCPLL_DVFS1_EN_DFS_CAL_BIT);
764 
765 		/* got uvdev parameters from fuse, skip calibration */
766 		goto calibrated;
767 	}
768 
769 	/*
770 	 * If calibration parameters are not fused, start internal calibration,
771 	 * wait for completion, and use results along with default slope to
772 	 * calculate ADC offset during boot.
773 	 */
774 	nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
775 			  GPCPLL_DVFS1_EN_DFS_CAL_BIT);
776 
777 	/* Wait for internal calibration done (spec < 2us). */
778 	ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
779 			     GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
780 			     GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
781 	if (ret < 0) {
782 		nvkm_error(subdev, "GPCPLL calibration timeout\n");
783 		return -ETIMEDOUT;
784 	}
785 
786 	data = nvkm_rd32(device, GPCPLL_CFG3) >>
787 			 GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
788 	data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
789 
790 	clk->uvdet_slope = ADC_SLOPE_UV;
791 	clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
792 
793 	nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
794 		   clk->uvdet_offs, clk->uvdet_slope);
795 
796 calibrated:
797 	/* Compute and apply initial DVFS parameters */
798 	gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
799 	gm20b_dvfs_program_coeff(clk, 0);
800 	gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
801 	gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
802 	gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
803 
804 	return 0;
805 }
806 
807 /* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
808 static const struct nvkm_clk_func gm20b_clk;
809 
810 static int
811 gm20b_clk_init(struct nvkm_clk *base)
812 {
813 	struct gk20a_clk *clk = gk20a_clk(base);
814 	struct nvkm_subdev *subdev = &clk->base.subdev;
815 	struct nvkm_device *device = subdev->device;
816 	int ret;
817 	u32 data;
818 
819 	/* get out from IDDQ */
820 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
821 	nvkm_rd32(device, GPCPLL_CFG);
822 	udelay(5);
823 
824 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
825 		  GPC2CLK_OUT_INIT_VAL);
826 
827 	/* Set the global bypass control to VCO */
828 	nvkm_mask(device, BYPASSCTRL_SYS,
829 	       MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
830 	       0);
831 
832 	ret = gk20a_clk_setup_slide(clk);
833 	if (ret)
834 		return ret;
835 
836 	/* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
837 	data = nvkm_rd32(device, 0x021944);
838 	if (!(data & 0x3)) {
839 		data |= 0x2;
840 		nvkm_wr32(device, 0x021944, data);
841 
842 		data = nvkm_rd32(device, 0x021948);
843 		data |=  0x1;
844 		nvkm_wr32(device, 0x021948, data);
845 	}
846 
847 	/* Disable idle slow down  */
848 	nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
849 
850 	/* speedo >= 1? */
851 	if (clk->base.func == &gm20b_clk) {
852 		struct gm20b_clk *_clk = gm20b_clk(base);
853 		struct nvkm_volt *volt = device->volt;
854 
855 		/* Get current voltage */
856 		_clk->uv = nvkm_volt_get(volt);
857 
858 		/* Initialize DVFS */
859 		ret = gm20b_clk_init_dvfs(_clk);
860 		if (ret)
861 			return ret;
862 	}
863 
864 	/* Start with lowest frequency */
865 	base->func->calc(base, &base->func->pstates[0].base);
866 	ret = base->func->prog(base);
867 	if (ret) {
868 		nvkm_error(subdev, "cannot initialize clock\n");
869 		return ret;
870 	}
871 
872 	return 0;
873 }
874 
875 static const struct nvkm_clk_func
876 gm20b_clk_speedo0 = {
877 	.init = gm20b_clk_init,
878 	.fini = gk20a_clk_fini,
879 	.read = gk20a_clk_read,
880 	.calc = gk20a_clk_calc,
881 	.prog = gk20a_clk_prog,
882 	.tidy = gk20a_clk_tidy,
883 	.pstates = gm20b_pstates,
884 	/* Speedo 0 only supports 12 voltages */
885 	.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
886 	.domains = {
887 		{ nv_clk_src_crystal, 0xff },
888 		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
889 		{ nv_clk_src_max },
890 	},
891 };
892 
893 static const struct nvkm_clk_func
894 gm20b_clk = {
895 	.init = gm20b_clk_init,
896 	.fini = gm20b_clk_fini,
897 	.read = gk20a_clk_read,
898 	.calc = gm20b_clk_calc,
899 	.prog = gm20b_clk_prog,
900 	.tidy = gk20a_clk_tidy,
901 	.pstates = gm20b_pstates,
902 	.nr_pstates = ARRAY_SIZE(gm20b_pstates),
903 	.domains = {
904 		{ nv_clk_src_crystal, 0xff },
905 		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
906 		{ nv_clk_src_max },
907 	},
908 };
909 
910 static int
911 gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
912 		      struct nvkm_clk **pclk)
913 {
914 	struct gk20a_clk *clk;
915 	int ret;
916 
917 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
918 	if (!clk)
919 		return -ENOMEM;
920 	*pclk = &clk->base;
921 
922 	ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
923 			     &gm20b_pllg_params, clk);
924 
925 	clk->pl_to_div = pl_to_div;
926 	clk->div_to_pl = div_to_pl;
927 
928 	return ret;
929 }
930 
931 /* FUSE register */
932 #define FUSE_RESERVED_CALIB0	0x204
933 #define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT	0
934 #define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH	4
935 #define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT	4
936 #define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH	10
937 #define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT		14
938 #define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH		10
939 #define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT		24
940 #define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH		6
941 #define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT		30
942 #define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH		2
943 
944 static int
945 gm20b_clk_init_fused_params(struct gm20b_clk *clk)
946 {
947 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
948 	u32 val = 0;
949 	u32 rev = 0;
950 
951 #if IS_ENABLED(CONFIG_ARCH_TEGRA)
952 	tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
953 	rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
954 	      MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
955 #endif
956 
957 	/* No fused parameters, we will calibrate later */
958 	if (rev == 0)
959 		return -EINVAL;
960 
961 	/* Integer part in mV + fractional part in uV */
962 	clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
963 			MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
964 			((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
965 			MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
966 
967 	/* Integer part in mV + fractional part in 100uV */
968 	clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
969 			MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
970 			((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
971 			 MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
972 
973 	nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
974 		   clk->uvdet_slope, clk->uvdet_offs);
975 	return 0;
976 }
977 
978 static int
979 gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
980 {
981 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
982 	struct nvkm_volt *volt = subdev->device->volt;
983 	struct nvkm_pstate *pstates = clk->base.base.func->pstates;
984 	int nr_pstates = clk->base.base.func->nr_pstates;
985 	int vmin, id = 0;
986 	u32 fmax = 0;
987 	int i;
988 
989 	/* find lowest voltage we can use */
990 	vmin = volt->vid[0].uv;
991 	for (i = 1; i < volt->vid_nr; i++) {
992 		if (volt->vid[i].uv <= vmin) {
993 			vmin = volt->vid[i].uv;
994 			id = volt->vid[i].vid;
995 		}
996 	}
997 
998 	/* find max frequency at this voltage */
999 	for (i = 0; i < nr_pstates; i++)
1000 		if (pstates[i].base.voltage == id)
1001 			fmax = max(fmax,
1002 				   pstates[i].base.domain[nv_clk_src_gpc]);
1003 
1004 	if (!fmax) {
1005 		nvkm_error(subdev, "failed to evaluate safe fmax\n");
1006 		return -EINVAL;
1007 	}
1008 
1009 	/* we are safe at 90% of the max frequency */
1010 	clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
1011 	nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
1012 
1013 	return 0;
1014 }
1015 
1016 int
1017 gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
1018 {
1019 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
1020 	struct gm20b_clk *clk;
1021 	struct nvkm_subdev *subdev;
1022 	struct gk20a_clk_pllg_params *clk_params;
1023 	int ret;
1024 
1025 	/* Speedo 0 GPUs cannot use noise-aware PLL */
1026 	if (tdev->gpu_speedo_id == 0)
1027 		return gm20b_clk_new_speedo0(device, index, pclk);
1028 
1029 	/* Speedo >= 1, use NAPLL */
1030 	clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
1031 	if (!clk)
1032 		return -ENOMEM;
1033 	*pclk = &clk->base.base;
1034 	subdev = &clk->base.base.subdev;
1035 
1036 	/* duplicate the clock parameters since we will patch them below */
1037 	clk_params = (void *) (clk + 1);
1038 	*clk_params = gm20b_pllg_params;
1039 	ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
1040 			     &clk->base);
1041 	if (ret)
1042 		return ret;
1043 
1044 	/*
1045 	 * NAPLL can only work with max_u, clamp the m range so
1046 	 * gk20a_pllg_calc_mnp always uses it
1047 	 */
1048 	clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
1049 						(clk->base.parent_rate / KHZ));
1050 	if (clk_params->max_m == 0) {
1051 		nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
1052 		kfree(clk);
1053 		return gm20b_clk_new_speedo0(device, index, pclk);
1054 	}
1055 
1056 	clk->base.pl_to_div = pl_to_div;
1057 	clk->base.div_to_pl = div_to_pl;
1058 
1059 	clk->dvfs_params = &gm20b_dvfs_params;
1060 
1061 	ret = gm20b_clk_init_fused_params(clk);
1062 	/*
1063 	 * we will calibrate during init - should never happen on
1064 	 * prod parts
1065 	 */
1066 	if (ret)
1067 		nvkm_warn(subdev, "no fused calibration parameters\n");
1068 
1069 	ret = gm20b_clk_init_safe_fmax(clk);
1070 	if (ret)
1071 		return ret;
1072 
1073 	return 0;
1074 }
1075