xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c (revision 52e6b198833411564e0b9ce6e96bbd3d72f961e7)
1 /*
2  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <subdev/clk.h>
24 #include <subdev/volt.h>
25 #include <subdev/timer.h>
26 #include <core/device.h>
27 #include <core/tegra.h>
28 
29 #include "priv.h"
30 #include "gk20a_devfreq.h"
31 #include "gk20a.h"
32 
33 #define GPCPLL_CFG_SYNC_MODE	BIT(2)
34 
35 #define BYPASSCTRL_SYS	(SYS_GPCPLL_CFG_BASE + 0x340)
36 #define BYPASSCTRL_SYS_GPCPLL_SHIFT	0
37 #define BYPASSCTRL_SYS_GPCPLL_WIDTH	1
38 
39 #define GPCPLL_CFG2_SDM_DIN_SHIFT	0
40 #define GPCPLL_CFG2_SDM_DIN_WIDTH	8
41 #define GPCPLL_CFG2_SDM_DIN_MASK	\
42 	(MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
43 #define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT	8
44 #define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH	15
45 #define GPCPLL_CFG2_SDM_DIN_NEW_MASK	\
46 	(MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
47 #define GPCPLL_CFG2_SETUP2_SHIFT	16
48 #define GPCPLL_CFG2_PLL_STEPA_SHIFT	24
49 
50 #define GPCPLL_DVFS0	(SYS_GPCPLL_CFG_BASE + 0x10)
51 #define GPCPLL_DVFS0_DFS_COEFF_SHIFT	0
52 #define GPCPLL_DVFS0_DFS_COEFF_WIDTH	7
53 #define GPCPLL_DVFS0_DFS_COEFF_MASK	\
54 	(MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
55 #define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT	8
56 #define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH	7
57 #define GPCPLL_DVFS0_DFS_DET_MAX_MASK	\
58 	(MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
59 
60 #define GPCPLL_DVFS1		(SYS_GPCPLL_CFG_BASE + 0x14)
61 #define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT		0
62 #define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH		7
63 #define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT		7
64 #define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH		1
65 #define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT		8
66 #define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH		7
67 #define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT		15
68 #define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH		1
69 #define GPCPLL_DVFS1_DFS_CTRL_SHIFT		16
70 #define GPCPLL_DVFS1_DFS_CTRL_WIDTH		12
71 #define GPCPLL_DVFS1_EN_SDM_SHIFT		28
72 #define GPCPLL_DVFS1_EN_SDM_WIDTH		1
73 #define GPCPLL_DVFS1_EN_SDM_BIT			BIT(28)
74 #define GPCPLL_DVFS1_EN_DFS_SHIFT		29
75 #define GPCPLL_DVFS1_EN_DFS_WIDTH		1
76 #define GPCPLL_DVFS1_EN_DFS_BIT			BIT(29)
77 #define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT		30
78 #define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH		1
79 #define GPCPLL_DVFS1_EN_DFS_CAL_BIT		BIT(30)
80 #define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT		31
81 #define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH		1
82 #define GPCPLL_DVFS1_DFS_CAL_DONE_BIT		BIT(31)
83 
84 #define GPC_BCAST_GPCPLL_DVFS2	(GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
85 #define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT	BIT(16)
86 
87 #define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT	24
88 #define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH	7
89 
90 #define DFS_DET_RANGE	6	/* -2^6 ... 2^6-1 */
91 #define SDM_DIN_RANGE	12	/* -2^12 ... 2^12-1 */
92 
93 struct gm20b_clk_dvfs_params {
94 	s32 coeff_slope;
95 	s32 coeff_offs;
96 	u32 vco_ctrl;
97 };
98 
99 static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
100 	.coeff_slope = -165230,
101 	.coeff_offs = 214007,
102 	.vco_ctrl = 0x7 << 3,
103 };
104 
105 /*
106  * base.n is now the *integer* part of the N factor.
107  * sdm_din contains n's decimal part.
108  */
109 struct gm20b_pll {
110 	struct gk20a_pll base;
111 	u32 sdm_din;
112 };
113 
114 struct gm20b_clk_dvfs {
115 	u32 dfs_coeff;
116 	s32 dfs_det_max;
117 	s32 dfs_ext_cal;
118 };
119 
120 struct gm20b_clk {
121 	/* currently applied parameters */
122 	struct gk20a_clk base;
123 	struct gm20b_clk_dvfs dvfs;
124 	u32 uv;
125 
126 	/* new parameters to apply */
127 	struct gk20a_pll new_pll;
128 	struct gm20b_clk_dvfs new_dvfs;
129 	u32 new_uv;
130 
131 	const struct gm20b_clk_dvfs_params *dvfs_params;
132 
133 	/* fused parameters */
134 	s32 uvdet_slope;
135 	s32 uvdet_offs;
136 
137 	/* safe frequency we can use at minimum voltage */
138 	u32 safe_fmax_vmin;
139 };
140 #define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
141 
142 static u32 pl_to_div(u32 pl)
143 {
144 	return pl;
145 }
146 
147 static u32 div_to_pl(u32 div)
148 {
149 	return div;
150 }
151 
152 static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
153 	.min_vco = 1300000, .max_vco = 2600000,
154 	.min_u = 12000, .max_u = 38400,
155 	.min_m = 1, .max_m = 255,
156 	.min_n = 8, .max_n = 255,
157 	.min_pl = 1, .max_pl = 31,
158 };
159 
160 static void
161 gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
162 {
163 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
164 	struct nvkm_device *device = subdev->device;
165 	u32 val;
166 
167 	gk20a_pllg_read_mnp(&clk->base, &pll->base);
168 	val = nvkm_rd32(device, GPCPLL_CFG2);
169 	pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
170 		       MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
171 }
172 
173 static void
174 gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
175 {
176 	struct nvkm_device *device = clk->base.base.subdev.device;
177 
178 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
179 		  pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
180 	gk20a_pllg_write_mnp(&clk->base, &pll->base);
181 }
182 
183 /*
184  * Determine DFS_COEFF for the requested voltage. Always select external
185  * calibration override equal to the voltage, and set maximum detection
186  * limit "0" (to make sure that PLL output remains under F/V curve when
187  * voltage increases).
188  */
189 static void
190 gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
191 			  struct gm20b_clk_dvfs *dvfs)
192 {
193 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
194 	const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
195 	u32 coeff;
196 	/* Work with mv as uv would likely trigger an overflow */
197 	s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
198 
199 	/* coeff = slope * voltage + offset */
200 	coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
201 	coeff = DIV_ROUND_CLOSEST(coeff, 1000);
202 	dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
203 
204 	dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
205 					     clk->uvdet_slope);
206 	/* should never happen */
207 	if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
208 		nvkm_error(subdev, "dfs_ext_cal overflow!\n");
209 
210 	dvfs->dfs_det_max = 0;
211 
212 	nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
213 		   __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
214 		   dvfs->dfs_det_max);
215 }
216 
217 /*
218  * Solve equation for integer and fractional part of the effective NDIV:
219  *
220  * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
221  *         (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
222  *
223  * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
224  */
225 static void
226 gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
227 {
228 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
229 	const struct gk20a_clk_pllg_params *p = clk->base.params;
230 	u32 n;
231 	s32 det_delta;
232 	u32 rem, rem_range;
233 
234 	/* calculate current ext_cal and subtract previous one */
235 	det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
236 				      clk->uvdet_slope);
237 	det_delta -= clk->dvfs.dfs_ext_cal;
238 	det_delta = min(det_delta, clk->dvfs.dfs_det_max);
239 	det_delta *= clk->dvfs.dfs_coeff;
240 
241 	/* integer part of n */
242 	n = (n_eff << DFS_DET_RANGE) - det_delta;
243 	/* should never happen! */
244 	if (n <= 0) {
245 		nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
246 		n = 1 << DFS_DET_RANGE;
247 	}
248 	if (n >> DFS_DET_RANGE > p->max_n) {
249 		nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
250 		n = p->max_n << DFS_DET_RANGE;
251 	}
252 	*n_int = n >> DFS_DET_RANGE;
253 
254 	/* fractional part of n */
255 	rem = ((u32)n) & MASK(DFS_DET_RANGE);
256 	rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
257 	/* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
258 	rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
259 	/* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
260 	*sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
261 
262 	nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
263 		   n_eff, *n_int, *sdm_din);
264 }
265 
266 static int
267 gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
268 {
269 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
270 	struct nvkm_device *device = subdev->device;
271 	struct gm20b_pll pll;
272 	u32 n_int, sdm_din;
273 	int ret = 0;
274 
275 	/* calculate the new n_int/sdm_din for this n/uv */
276 	gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
277 
278 	/* get old coefficients */
279 	gm20b_pllg_read_mnp(clk, &pll);
280 	/* do nothing if NDIV is the same */
281 	if (n_int == pll.base.n && sdm_din == pll.sdm_din)
282 		return 0;
283 
284 	/* pll slowdown mode */
285 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
286 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
287 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
288 
289 	/* new ndiv ready for ramp */
290 	/* in DVFS mode SDM is updated via "new" field */
291 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
292 		  sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
293 	pll.base.n = n_int;
294 	udelay(1);
295 	gk20a_pllg_write_mnp(&clk->base, &pll.base);
296 
297 	/* dynamic ramp to new ndiv */
298 	udelay(1);
299 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
300 		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
301 		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
302 
303 	/* wait for ramping to complete */
304 	if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
305 		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
306 		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
307 		ret = -ETIMEDOUT;
308 
309 	/* in DVFS mode complete SDM update */
310 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
311 		  sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
312 
313 	/* exit slowdown mode */
314 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
315 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
316 		BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
317 	nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
318 
319 	return ret;
320 }
321 
322 static int
323 gm20b_pllg_enable(struct gm20b_clk *clk)
324 {
325 	struct nvkm_device *device = clk->base.base.subdev.device;
326 
327 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
328 	nvkm_rd32(device, GPCPLL_CFG);
329 
330 	/* In DVFS mode lock cannot be used - so just delay */
331 	udelay(40);
332 
333 	/* set SYNC_MODE for glitchless switch out of bypass */
334 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
335 		       GPCPLL_CFG_SYNC_MODE);
336 	nvkm_rd32(device, GPCPLL_CFG);
337 
338 	/* switch to VCO mode */
339 	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
340 		  BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
341 
342 	return 0;
343 }
344 
345 static void
346 gm20b_pllg_disable(struct gm20b_clk *clk)
347 {
348 	struct nvkm_device *device = clk->base.base.subdev.device;
349 
350 	/* put PLL in bypass before disabling it */
351 	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
352 
353 	/* clear SYNC_MODE before disabling PLL */
354 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
355 
356 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
357 	nvkm_rd32(device, GPCPLL_CFG);
358 }
359 
360 static int
361 gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
362 {
363 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
364 	struct nvkm_device *device = subdev->device;
365 	struct gm20b_pll cur_pll;
366 	u32 n_int, sdm_din;
367 	/* if we only change pdiv, we can do a glitchless transition */
368 	bool pdiv_only;
369 	int ret;
370 
371 	gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
372 	gm20b_pllg_read_mnp(clk, &cur_pll);
373 	pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
374 		    cur_pll.base.m == pll->m;
375 
376 	/* need full sequence if clock not enabled yet */
377 	if (!gk20a_pllg_is_enabled(&clk->base))
378 		pdiv_only = false;
379 
380 	/* split VCO-to-bypass jump in half by setting out divider 1:2 */
381 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
382 		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
383 	/* Intentional 2nd write to assure linear divider operation */
384 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
385 		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
386 	nvkm_rd32(device, GPC2CLK_OUT);
387 	udelay(2);
388 
389 	if (pdiv_only) {
390 		u32 old = cur_pll.base.pl;
391 		u32 new = pll->pl;
392 
393 		/*
394 		 * we can do a glitchless transition only if the old and new PL
395 		 * parameters share at least one bit set to 1. If this is not
396 		 * the case, calculate and program an interim PL that will allow
397 		 * us to respect that rule.
398 		 */
399 		if ((old & new) == 0) {
400 			cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
401 					      new | BIT(ffs(old) - 1));
402 			gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
403 		}
404 
405 		cur_pll.base.pl = new;
406 		gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
407 	} else {
408 		/* disable before programming if more than pdiv changes */
409 		gm20b_pllg_disable(clk);
410 
411 		cur_pll.base = *pll;
412 		cur_pll.base.n = n_int;
413 		cur_pll.sdm_din = sdm_din;
414 		gm20b_pllg_write_mnp(clk, &cur_pll);
415 
416 		ret = gm20b_pllg_enable(clk);
417 		if (ret)
418 			return ret;
419 	}
420 
421 	/* restore out divider 1:1 */
422 	udelay(2);
423 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
424 		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
425 	/* Intentional 2nd write to assure linear divider operation */
426 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
427 		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
428 	nvkm_rd32(device, GPC2CLK_OUT);
429 
430 	return 0;
431 }
432 
433 static int
434 gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
435 {
436 	struct gk20a_pll cur_pll;
437 	int ret;
438 
439 	if (gk20a_pllg_is_enabled(&clk->base)) {
440 		gk20a_pllg_read_mnp(&clk->base, &cur_pll);
441 
442 		/* just do NDIV slide if there is no change to M and PL */
443 		if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
444 			return gm20b_pllg_slide(clk, pll->n);
445 
446 		/* slide down to current NDIV_LO */
447 		cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
448 		ret = gm20b_pllg_slide(clk, cur_pll.n);
449 		if (ret)
450 			return ret;
451 	}
452 
453 	/* program MNP with the new clock parameters and new NDIV_LO */
454 	cur_pll = *pll;
455 	cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
456 	ret = gm20b_pllg_program_mnp(clk, &cur_pll);
457 	if (ret)
458 		return ret;
459 
460 	/* slide up to new NDIV */
461 	return gm20b_pllg_slide(clk, pll->n);
462 }
463 
464 static int
465 gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
466 {
467 	struct gm20b_clk *clk = gm20b_clk(base);
468 	struct nvkm_subdev *subdev = &base->subdev;
469 	struct nvkm_volt *volt = base->subdev.device->volt;
470 	int ret;
471 
472 	ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
473 					     GK20A_CLK_GPC_MDIV, &clk->new_pll);
474 	if (ret)
475 		return ret;
476 
477 	clk->new_uv = volt->vid[cstate->voltage].uv;
478 	gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
479 
480 	nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
481 
482 	return 0;
483 }
484 
485 /*
486  * Compute PLL parameters that are always safe for the current voltage
487  */
488 static void
489 gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
490 {
491 	u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
492 	u32 parent_rate = clk->base.parent_rate / KHZ;
493 	u32 nmin, nsafe;
494 
495 	/* remove a safe margin of 10% */
496 	if (rate > clk->safe_fmax_vmin)
497 		rate = rate * (100 - 10) / 100;
498 
499 	/* gpc2clk */
500 	rate *= 2;
501 
502 	nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
503 	nsafe = pll->m * rate / (clk->base.parent_rate);
504 
505 	if (nsafe < nmin) {
506 		pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
507 		nsafe = nmin;
508 	}
509 
510 	pll->n = nsafe;
511 }
512 
513 static void
514 gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
515 {
516 	struct nvkm_device *device = clk->base.base.subdev.device;
517 
518 	/* strobe to read external DFS coefficient */
519 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
520 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
521 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
522 
523 	nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
524 		  coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
525 
526 	udelay(1);
527 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
528 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
529 }
530 
531 static void
532 gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
533 {
534 	struct nvkm_device *device = clk->base.base.subdev.device;
535 	u32 val;
536 
537 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
538 		  dfs_det_cal);
539 	udelay(1);
540 
541 	val = nvkm_rd32(device, GPCPLL_DVFS1);
542 	if (!(val & BIT(25))) {
543 		/* Use external value to overwrite calibration value */
544 		val |= BIT(25) | BIT(16);
545 		nvkm_wr32(device, GPCPLL_DVFS1, val);
546 	}
547 }
548 
549 static void
550 gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
551 				 struct gm20b_clk_dvfs *dvfs)
552 {
553 	struct nvkm_device *device = clk->base.base.subdev.device;
554 
555 	/* strobe to read external DFS coefficient */
556 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
557 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
558 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
559 
560 	nvkm_mask(device, GPCPLL_DVFS0,
561 		  GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
562 		  dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
563 		  dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
564 
565 	udelay(1);
566 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
567 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
568 
569 	gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
570 }
571 
572 static int
573 gm20b_clk_prog(struct nvkm_clk *base)
574 {
575 	struct gm20b_clk *clk = gm20b_clk(base);
576 	u32 cur_freq;
577 	int ret;
578 
579 	/* No change in DVFS settings? */
580 	if (clk->uv == clk->new_uv)
581 		goto prog;
582 
583 	/*
584 	 * Interim step for changing DVFS detection settings: low enough
585 	 * frequency to be safe at DVFS coeff = 0.
586 	 *
587 	 * 1. If voltage is increasing:
588 	 * - safe frequency target matches the lowest - old - frequency
589 	 * - DVFS settings are still old
590 	 * - Voltage already increased to new level by volt, but maximum
591 	 *   detection limit assures PLL output remains under F/V curve
592 	 *
593 	 * 2. If voltage is decreasing:
594 	 * - safe frequency target matches the lowest - new - frequency
595 	 * - DVFS settings are still old
596 	 * - Voltage is also old, it will be lowered by volt afterwards
597 	 *
598 	 * Interim step can be skipped if old frequency is below safe minimum,
599 	 * i.e., it is low enough to be safe at any voltage in operating range
600 	 * with zero DVFS coefficient.
601 	 */
602 	cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
603 	if (cur_freq > clk->safe_fmax_vmin) {
604 		struct gk20a_pll pll_safe;
605 
606 		if (clk->uv < clk->new_uv)
607 			/* voltage will raise: safe frequency is current one */
608 			pll_safe = clk->base.pll;
609 		else
610 			/* voltage will drop: safe frequency is new one */
611 			pll_safe = clk->new_pll;
612 
613 		gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
614 		ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
615 		if (ret)
616 			return ret;
617 	}
618 
619 	/*
620 	 * DVFS detection settings transition:
621 	 * - Set DVFS coefficient zero
622 	 * - Set calibration level to new voltage
623 	 * - Set DVFS coefficient to match new voltage
624 	 */
625 	gm20b_dvfs_program_coeff(clk, 0);
626 	gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
627 	gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
628 	gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
629 
630 prog:
631 	clk->uv = clk->new_uv;
632 	clk->dvfs = clk->new_dvfs;
633 	clk->base.pll = clk->new_pll;
634 
635 	return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
636 }
637 
638 static struct nvkm_pstate
639 gm20b_pstates[] = {
640 	{
641 		.base = {
642 			.domain[nv_clk_src_gpc] = 76800,
643 			.voltage = 0,
644 		},
645 	},
646 	{
647 		.base = {
648 			.domain[nv_clk_src_gpc] = 153600,
649 			.voltage = 1,
650 		},
651 	},
652 	{
653 		.base = {
654 			.domain[nv_clk_src_gpc] = 230400,
655 			.voltage = 2,
656 		},
657 	},
658 	{
659 		.base = {
660 			.domain[nv_clk_src_gpc] = 307200,
661 			.voltage = 3,
662 		},
663 	},
664 	{
665 		.base = {
666 			.domain[nv_clk_src_gpc] = 384000,
667 			.voltage = 4,
668 		},
669 	},
670 	{
671 		.base = {
672 			.domain[nv_clk_src_gpc] = 460800,
673 			.voltage = 5,
674 		},
675 	},
676 	{
677 		.base = {
678 			.domain[nv_clk_src_gpc] = 537600,
679 			.voltage = 6,
680 		},
681 	},
682 	{
683 		.base = {
684 			.domain[nv_clk_src_gpc] = 614400,
685 			.voltage = 7,
686 		},
687 	},
688 	{
689 		.base = {
690 			.domain[nv_clk_src_gpc] = 691200,
691 			.voltage = 8,
692 		},
693 	},
694 	{
695 		.base = {
696 			.domain[nv_clk_src_gpc] = 768000,
697 			.voltage = 9,
698 		},
699 	},
700 	{
701 		.base = {
702 			.domain[nv_clk_src_gpc] = 844800,
703 			.voltage = 10,
704 		},
705 	},
706 	{
707 		.base = {
708 			.domain[nv_clk_src_gpc] = 921600,
709 			.voltage = 11,
710 		},
711 	},
712 	{
713 		.base = {
714 			.domain[nv_clk_src_gpc] = 998400,
715 			.voltage = 12,
716 		},
717 	},
718 };
719 
720 static void
721 gm20b_clk_fini(struct nvkm_clk *base)
722 {
723 	struct nvkm_device *device = base->subdev.device;
724 	struct gm20b_clk *clk = gm20b_clk(base);
725 
726 	/* slide to VCO min */
727 	if (gk20a_pllg_is_enabled(&clk->base)) {
728 		struct gk20a_pll pll;
729 		u32 n_lo;
730 
731 		gk20a_pllg_read_mnp(&clk->base, &pll);
732 		n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
733 		gm20b_pllg_slide(clk, n_lo);
734 	}
735 
736 	gm20b_pllg_disable(clk);
737 
738 	/* set IDDQ */
739 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
740 }
741 
742 static int
743 gm20b_clk_init_dvfs(struct gm20b_clk *clk)
744 {
745 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
746 	struct nvkm_device *device = subdev->device;
747 	bool fused = clk->uvdet_offs && clk->uvdet_slope;
748 	static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
749 	u32 data;
750 	int ret;
751 
752 	/* Enable NA DVFS */
753 	nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
754 		  GPCPLL_DVFS1_EN_DFS_BIT);
755 
756 	/* Set VCO_CTRL */
757 	if (clk->dvfs_params->vco_ctrl)
758 		nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
759 		      clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
760 
761 	if (fused) {
762 		/* Start internal calibration, but ignore results */
763 		nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
764 			  GPCPLL_DVFS1_EN_DFS_CAL_BIT);
765 
766 		/* got uvdev parameters from fuse, skip calibration */
767 		goto calibrated;
768 	}
769 
770 	/*
771 	 * If calibration parameters are not fused, start internal calibration,
772 	 * wait for completion, and use results along with default slope to
773 	 * calculate ADC offset during boot.
774 	 */
775 	nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
776 			  GPCPLL_DVFS1_EN_DFS_CAL_BIT);
777 
778 	/* Wait for internal calibration done (spec < 2us). */
779 	ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
780 			     GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
781 			     GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
782 	if (ret < 0) {
783 		nvkm_error(subdev, "GPCPLL calibration timeout\n");
784 		return -ETIMEDOUT;
785 	}
786 
787 	data = nvkm_rd32(device, GPCPLL_CFG3) >>
788 			 GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
789 	data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
790 
791 	clk->uvdet_slope = ADC_SLOPE_UV;
792 	clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
793 
794 	nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
795 		   clk->uvdet_offs, clk->uvdet_slope);
796 
797 calibrated:
798 	/* Compute and apply initial DVFS parameters */
799 	gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
800 	gm20b_dvfs_program_coeff(clk, 0);
801 	gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
802 	gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
803 	gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
804 
805 	return 0;
806 }
807 
808 /* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
809 static const struct nvkm_clk_func gm20b_clk;
810 
811 static int
812 gm20b_clk_init(struct nvkm_clk *base)
813 {
814 	struct gk20a_clk *clk = gk20a_clk(base);
815 	struct nvkm_subdev *subdev = &clk->base.subdev;
816 	struct nvkm_device *device = subdev->device;
817 	int ret;
818 	u32 data;
819 
820 	/* get out from IDDQ */
821 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
822 	nvkm_rd32(device, GPCPLL_CFG);
823 	udelay(5);
824 
825 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
826 		  GPC2CLK_OUT_INIT_VAL);
827 
828 	/* Set the global bypass control to VCO */
829 	nvkm_mask(device, BYPASSCTRL_SYS,
830 	       MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
831 	       0);
832 
833 	ret = gk20a_clk_setup_slide(clk);
834 	if (ret)
835 		return ret;
836 
837 	/* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
838 	data = nvkm_rd32(device, 0x021944);
839 	if (!(data & 0x3)) {
840 		data |= 0x2;
841 		nvkm_wr32(device, 0x021944, data);
842 
843 		data = nvkm_rd32(device, 0x021948);
844 		data |=  0x1;
845 		nvkm_wr32(device, 0x021948, data);
846 	}
847 
848 	/* Disable idle slow down  */
849 	nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
850 
851 	/* speedo >= 1? */
852 	if (clk->base.func == &gm20b_clk) {
853 		struct gm20b_clk *_clk = gm20b_clk(base);
854 		struct nvkm_volt *volt = device->volt;
855 
856 		/* Get current voltage */
857 		_clk->uv = nvkm_volt_get(volt);
858 
859 		/* Initialize DVFS */
860 		ret = gm20b_clk_init_dvfs(_clk);
861 		if (ret)
862 			return ret;
863 	}
864 
865 	/* Start with lowest frequency */
866 	base->func->calc(base, &base->func->pstates[0].base);
867 	ret = base->func->prog(base);
868 	if (ret) {
869 		nvkm_error(subdev, "cannot initialize clock\n");
870 		return ret;
871 	}
872 
873 	ret = gk20a_devfreq_init(base, &clk->devfreq);
874 	if (ret)
875 		return ret;
876 
877 	return 0;
878 }
879 
880 static const struct nvkm_clk_func
881 gm20b_clk_speedo0 = {
882 	.init = gm20b_clk_init,
883 	.fini = gk20a_clk_fini,
884 	.read = gk20a_clk_read,
885 	.calc = gk20a_clk_calc,
886 	.prog = gk20a_clk_prog,
887 	.tidy = gk20a_clk_tidy,
888 	.pstates = gm20b_pstates,
889 	/* Speedo 0 only supports 12 voltages */
890 	.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
891 	.domains = {
892 		{ nv_clk_src_crystal, 0xff },
893 		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
894 		{ nv_clk_src_max },
895 	},
896 };
897 
898 static const struct nvkm_clk_func
899 gm20b_clk = {
900 	.init = gm20b_clk_init,
901 	.fini = gm20b_clk_fini,
902 	.read = gk20a_clk_read,
903 	.calc = gm20b_clk_calc,
904 	.prog = gm20b_clk_prog,
905 	.tidy = gk20a_clk_tidy,
906 	.pstates = gm20b_pstates,
907 	.nr_pstates = ARRAY_SIZE(gm20b_pstates),
908 	.domains = {
909 		{ nv_clk_src_crystal, 0xff },
910 		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
911 		{ nv_clk_src_max },
912 	},
913 };
914 
915 static int
916 gm20b_clk_new_speedo0(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
917 		      struct nvkm_clk **pclk)
918 {
919 	struct gk20a_clk *clk;
920 	int ret;
921 
922 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
923 	if (!clk)
924 		return -ENOMEM;
925 	*pclk = &clk->base;
926 
927 	ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk_speedo0, &gm20b_pllg_params, clk);
928 	clk->pl_to_div = pl_to_div;
929 	clk->div_to_pl = div_to_pl;
930 	return ret;
931 }
932 
933 /* FUSE register */
934 #define FUSE_RESERVED_CALIB0	0x204
935 #define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT	0
936 #define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH	4
937 #define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT	4
938 #define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH	10
939 #define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT		14
940 #define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH		10
941 #define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT		24
942 #define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH		6
943 #define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT		30
944 #define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH		2
945 
946 static int
947 gm20b_clk_init_fused_params(struct gm20b_clk *clk)
948 {
949 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
950 	u32 val = 0;
951 	u32 rev = 0;
952 
953 #if IS_ENABLED(CONFIG_ARCH_TEGRA)
954 	tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
955 	rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
956 	      MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
957 #endif
958 
959 	/* No fused parameters, we will calibrate later */
960 	if (rev == 0)
961 		return -EINVAL;
962 
963 	/* Integer part in mV + fractional part in uV */
964 	clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
965 			MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
966 			((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
967 			MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
968 
969 	/* Integer part in mV + fractional part in 100uV */
970 	clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
971 			MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
972 			((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
973 			 MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
974 
975 	nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
976 		   clk->uvdet_slope, clk->uvdet_offs);
977 	return 0;
978 }
979 
980 static int
981 gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
982 {
983 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
984 	struct nvkm_volt *volt = subdev->device->volt;
985 	struct nvkm_pstate *pstates = clk->base.base.func->pstates;
986 	int nr_pstates = clk->base.base.func->nr_pstates;
987 	int vmin, id = 0;
988 	u32 fmax = 0;
989 	int i;
990 
991 	/* find lowest voltage we can use */
992 	vmin = volt->vid[0].uv;
993 	for (i = 1; i < volt->vid_nr; i++) {
994 		if (volt->vid[i].uv <= vmin) {
995 			vmin = volt->vid[i].uv;
996 			id = volt->vid[i].vid;
997 		}
998 	}
999 
1000 	/* find max frequency at this voltage */
1001 	for (i = 0; i < nr_pstates; i++)
1002 		if (pstates[i].base.voltage == id)
1003 			fmax = max(fmax,
1004 				   pstates[i].base.domain[nv_clk_src_gpc]);
1005 
1006 	if (!fmax) {
1007 		nvkm_error(subdev, "failed to evaluate safe fmax\n");
1008 		return -EINVAL;
1009 	}
1010 
1011 	/* we are safe at 90% of the max frequency */
1012 	clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
1013 	nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
1014 
1015 	return 0;
1016 }
1017 
1018 int
1019 gm20b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
1020 	      struct nvkm_clk **pclk)
1021 {
1022 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
1023 	struct gm20b_clk *clk;
1024 	struct nvkm_subdev *subdev;
1025 	struct gk20a_clk_pllg_params *clk_params;
1026 	int ret;
1027 
1028 	/* Speedo 0 GPUs cannot use noise-aware PLL */
1029 	if (tdev->gpu_speedo_id == 0)
1030 		return gm20b_clk_new_speedo0(device, type, inst, pclk);
1031 
1032 	/* Speedo >= 1, use NAPLL */
1033 	clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
1034 	if (!clk)
1035 		return -ENOMEM;
1036 	*pclk = &clk->base.base;
1037 	subdev = &clk->base.base.subdev;
1038 
1039 	/* duplicate the clock parameters since we will patch them below */
1040 	clk_params = (void *) (clk + 1);
1041 	*clk_params = gm20b_pllg_params;
1042 	ret = gk20a_clk_ctor(device, type, inst, &gm20b_clk, clk_params, &clk->base);
1043 	if (ret)
1044 		return ret;
1045 
1046 	/*
1047 	 * NAPLL can only work with max_u, clamp the m range so
1048 	 * gk20a_pllg_calc_mnp always uses it
1049 	 */
1050 	clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
1051 						(clk->base.parent_rate / KHZ));
1052 	if (clk_params->max_m == 0) {
1053 		nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
1054 		kfree(clk);
1055 		return gm20b_clk_new_speedo0(device, type, inst, pclk);
1056 	}
1057 
1058 	clk->base.pl_to_div = pl_to_div;
1059 	clk->base.div_to_pl = div_to_pl;
1060 
1061 	clk->dvfs_params = &gm20b_dvfs_params;
1062 
1063 	ret = gm20b_clk_init_fused_params(clk);
1064 	/*
1065 	 * we will calibrate during init - should never happen on
1066 	 * prod parts
1067 	 */
1068 	if (ret)
1069 		nvkm_warn(subdev, "no fused calibration parameters\n");
1070 
1071 	ret = gm20b_clk_init_safe_fmax(clk);
1072 	if (ret)
1073 		return ret;
1074 
1075 	return 0;
1076 }
1077