xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 
26 #include <subdev/bios.h>
27 #include <subdev/bios/boost.h>
28 #include <subdev/bios/cstep.h>
29 #include <subdev/bios/perf.h>
30 #include <subdev/bios/vpstate.h>
31 #include <subdev/fb.h>
32 #include <subdev/therm.h>
33 #include <subdev/volt.h>
34 
35 #include <core/option.h>
36 
37 /******************************************************************************
38  * misc
39  *****************************************************************************/
40 static u32
41 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
42 		u8 pstate, u8 domain, u32 input)
43 {
44 	struct nvkm_bios *bios = clk->subdev.device->bios;
45 	struct nvbios_boostE boostE;
46 	u8  ver, hdr, cnt, len;
47 	u32 data;
48 
49 	data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
50 	if (data) {
51 		struct nvbios_boostS boostS;
52 		u8  idx = 0, sver, shdr;
53 		u32 subd;
54 
55 		input = max(boostE.min, input);
56 		input = min(boostE.max, input);
57 		do {
58 			sver = ver;
59 			shdr = hdr;
60 			subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
61 					      cnt, len, &boostS);
62 			if (subd && boostS.domain == domain) {
63 				if (adjust)
64 					input = input * boostS.percent / 100;
65 				input = max(boostS.min, input);
66 				input = min(boostS.max, input);
67 				break;
68 			}
69 		} while (subd);
70 	}
71 
72 	return input;
73 }
74 
75 /******************************************************************************
76  * C-States
77  *****************************************************************************/
78 static bool
79 nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
80 		  u32 max_volt, int temp)
81 {
82 	const struct nvkm_domain *domain = clk->domains;
83 	struct nvkm_volt *volt = clk->subdev.device->volt;
84 	int voltage;
85 
86 	while (domain && domain->name != nv_clk_src_max) {
87 		if (domain->flags & NVKM_CLK_DOM_FLAG_VPSTATE) {
88 			u32 freq = cstate->domain[domain->name];
89 			switch (clk->boost_mode) {
90 			case NVKM_CLK_BOOST_NONE:
91 				if (clk->base_khz && freq > clk->base_khz)
92 					return false;
93 			case NVKM_CLK_BOOST_BIOS:
94 				if (clk->boost_khz && freq > clk->boost_khz)
95 					return false;
96 			}
97 		}
98 		domain++;
99 	}
100 
101 	if (!volt)
102 		return true;
103 
104 	voltage = nvkm_volt_map(volt, cstate->voltage, temp);
105 	if (voltage < 0)
106 		return false;
107 	return voltage <= min(max_volt, volt->max_uv);
108 }
109 
110 static struct nvkm_cstate *
111 nvkm_cstate_find_best(struct nvkm_clk *clk, struct nvkm_pstate *pstate,
112 		      struct nvkm_cstate *start)
113 {
114 	struct nvkm_device *device = clk->subdev.device;
115 	struct nvkm_volt *volt = device->volt;
116 	struct nvkm_cstate *cstate;
117 	int max_volt;
118 
119 	if (!pstate || !start)
120 		return NULL;
121 
122 	if (!volt)
123 		return start;
124 
125 	max_volt = volt->max_uv;
126 	if (volt->max0_id != 0xff)
127 		max_volt = min(max_volt,
128 			       nvkm_volt_map(volt, volt->max0_id, clk->temp));
129 	if (volt->max1_id != 0xff)
130 		max_volt = min(max_volt,
131 			       nvkm_volt_map(volt, volt->max1_id, clk->temp));
132 	if (volt->max2_id != 0xff)
133 		max_volt = min(max_volt,
134 			       nvkm_volt_map(volt, volt->max2_id, clk->temp));
135 
136 	for (cstate = start; &cstate->head != &pstate->list;
137 	     cstate = list_entry(cstate->head.prev, typeof(*cstate), head)) {
138 		if (nvkm_cstate_valid(clk, cstate, max_volt, clk->temp))
139 			break;
140 	}
141 
142 	return cstate;
143 }
144 
145 static struct nvkm_cstate *
146 nvkm_cstate_get(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
147 {
148 	struct nvkm_cstate *cstate;
149 	if (cstatei == NVKM_CLK_CSTATE_HIGHEST)
150 		return list_last_entry(&pstate->list, typeof(*cstate), head);
151 	else {
152 		list_for_each_entry(cstate, &pstate->list, head) {
153 			if (cstate->id == cstatei)
154 				return cstate;
155 		}
156 	}
157 	return NULL;
158 }
159 
160 static int
161 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
162 {
163 	struct nvkm_subdev *subdev = &clk->subdev;
164 	struct nvkm_device *device = subdev->device;
165 	struct nvkm_therm *therm = device->therm;
166 	struct nvkm_volt *volt = device->volt;
167 	struct nvkm_cstate *cstate;
168 	int ret;
169 
170 	if (!list_empty(&pstate->list)) {
171 		cstate = nvkm_cstate_get(clk, pstate, cstatei);
172 		cstate = nvkm_cstate_find_best(clk, pstate, cstate);
173 	} else {
174 		cstate = &pstate->base;
175 	}
176 
177 	if (therm) {
178 		ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
179 		if (ret && ret != -ENODEV) {
180 			nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
181 			return ret;
182 		}
183 	}
184 
185 	if (volt) {
186 		ret = nvkm_volt_set_id(volt, cstate->voltage,
187 				       pstate->base.voltage, clk->temp, +1);
188 		if (ret && ret != -ENODEV) {
189 			nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
190 			return ret;
191 		}
192 	}
193 
194 	ret = clk->func->calc(clk, cstate);
195 	if (ret == 0) {
196 		ret = clk->func->prog(clk);
197 		clk->func->tidy(clk);
198 	}
199 
200 	if (volt) {
201 		ret = nvkm_volt_set_id(volt, cstate->voltage,
202 				       pstate->base.voltage, clk->temp, -1);
203 		if (ret && ret != -ENODEV)
204 			nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
205 	}
206 
207 	if (therm) {
208 		ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
209 		if (ret && ret != -ENODEV)
210 			nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
211 	}
212 
213 	return ret;
214 }
215 
216 static void
217 nvkm_cstate_del(struct nvkm_cstate *cstate)
218 {
219 	list_del(&cstate->head);
220 	kfree(cstate);
221 }
222 
223 static int
224 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
225 {
226 	struct nvkm_bios *bios = clk->subdev.device->bios;
227 	struct nvkm_volt *volt = clk->subdev.device->volt;
228 	const struct nvkm_domain *domain = clk->domains;
229 	struct nvkm_cstate *cstate = NULL;
230 	struct nvbios_cstepX cstepX;
231 	u8  ver, hdr;
232 	u32 data;
233 
234 	data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
235 	if (!data)
236 		return -ENOENT;
237 
238 	if (volt && nvkm_volt_map_min(volt, cstepX.voltage) > volt->max_uv)
239 		return -EINVAL;
240 
241 	cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
242 	if (!cstate)
243 		return -ENOMEM;
244 
245 	*cstate = pstate->base;
246 	cstate->voltage = cstepX.voltage;
247 	cstate->id = idx;
248 
249 	while (domain && domain->name != nv_clk_src_max) {
250 		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
251 			u32 freq = nvkm_clk_adjust(clk, true, pstate->pstate,
252 						   domain->bios, cstepX.freq);
253 			cstate->domain[domain->name] = freq;
254 		}
255 		domain++;
256 	}
257 
258 	list_add(&cstate->head, &pstate->list);
259 	return 0;
260 }
261 
262 /******************************************************************************
263  * P-States
264  *****************************************************************************/
265 static int
266 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
267 {
268 	struct nvkm_subdev *subdev = &clk->subdev;
269 	struct nvkm_fb *fb = subdev->device->fb;
270 	struct nvkm_pci *pci = subdev->device->pci;
271 	struct nvkm_pstate *pstate;
272 	int ret, idx = 0;
273 
274 	list_for_each_entry(pstate, &clk->states, head) {
275 		if (idx++ == pstatei)
276 			break;
277 	}
278 
279 	nvkm_debug(subdev, "setting performance state %d\n", pstatei);
280 	clk->pstate = pstatei;
281 
282 	nvkm_pcie_set_link(pci, pstate->pcie_speed, pstate->pcie_width);
283 
284 	if (fb && fb->ram && fb->ram->func->calc) {
285 		struct nvkm_ram *ram = fb->ram;
286 		int khz = pstate->base.domain[nv_clk_src_mem];
287 		do {
288 			ret = ram->func->calc(ram, khz);
289 			if (ret == 0)
290 				ret = ram->func->prog(ram);
291 		} while (ret > 0);
292 		ram->func->tidy(ram);
293 	}
294 
295 	return nvkm_cstate_prog(clk, pstate, NVKM_CLK_CSTATE_HIGHEST);
296 }
297 
298 static void
299 nvkm_pstate_work(struct work_struct *work)
300 {
301 	struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
302 	struct nvkm_subdev *subdev = &clk->subdev;
303 	int pstate;
304 
305 	if (!atomic_xchg(&clk->waiting, 0))
306 		return;
307 	clk->pwrsrc = power_supply_is_system_supplied();
308 
309 	nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d°C D %d\n",
310 		   clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
311 		   clk->astate, clk->temp, clk->dstate);
312 
313 	pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
314 	if (clk->state_nr && pstate != -1) {
315 		pstate = (pstate < 0) ? clk->astate : pstate;
316 		pstate = min(pstate, clk->state_nr - 1);
317 		pstate = max(pstate, clk->dstate);
318 	} else {
319 		pstate = clk->pstate = -1;
320 	}
321 
322 	nvkm_trace(subdev, "-> %d\n", pstate);
323 	if (pstate != clk->pstate) {
324 		int ret = nvkm_pstate_prog(clk, pstate);
325 		if (ret) {
326 			nvkm_error(subdev, "error setting pstate %d: %d\n",
327 				   pstate, ret);
328 		}
329 	}
330 
331 	wake_up_all(&clk->wait);
332 	nvkm_notify_get(&clk->pwrsrc_ntfy);
333 }
334 
335 static int
336 nvkm_pstate_calc(struct nvkm_clk *clk, bool wait)
337 {
338 	atomic_set(&clk->waiting, 1);
339 	schedule_work(&clk->work);
340 	if (wait)
341 		wait_event(clk->wait, !atomic_read(&clk->waiting));
342 	return 0;
343 }
344 
345 static void
346 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
347 {
348 	const struct nvkm_domain *clock = clk->domains - 1;
349 	struct nvkm_cstate *cstate;
350 	struct nvkm_subdev *subdev = &clk->subdev;
351 	char info[3][32] = { "", "", "" };
352 	char name[4] = "--";
353 	int i = -1;
354 
355 	if (pstate->pstate != 0xff)
356 		snprintf(name, sizeof(name), "%02x", pstate->pstate);
357 
358 	while ((++clock)->name != nv_clk_src_max) {
359 		u32 lo = pstate->base.domain[clock->name];
360 		u32 hi = lo;
361 		if (hi == 0)
362 			continue;
363 
364 		nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
365 		list_for_each_entry(cstate, &pstate->list, head) {
366 			u32 freq = cstate->domain[clock->name];
367 			lo = min(lo, freq);
368 			hi = max(hi, freq);
369 			nvkm_debug(subdev, "%10d KHz\n", freq);
370 		}
371 
372 		if (clock->mname && ++i < ARRAY_SIZE(info)) {
373 			lo /= clock->mdiv;
374 			hi /= clock->mdiv;
375 			if (lo == hi) {
376 				snprintf(info[i], sizeof(info[i]), "%s %d MHz",
377 					 clock->mname, lo);
378 			} else {
379 				snprintf(info[i], sizeof(info[i]),
380 					 "%s %d-%d MHz", clock->mname, lo, hi);
381 			}
382 		}
383 	}
384 
385 	nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
386 }
387 
388 static void
389 nvkm_pstate_del(struct nvkm_pstate *pstate)
390 {
391 	struct nvkm_cstate *cstate, *temp;
392 
393 	list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
394 		nvkm_cstate_del(cstate);
395 	}
396 
397 	list_del(&pstate->head);
398 	kfree(pstate);
399 }
400 
401 static int
402 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
403 {
404 	struct nvkm_bios *bios = clk->subdev.device->bios;
405 	const struct nvkm_domain *domain = clk->domains - 1;
406 	struct nvkm_pstate *pstate;
407 	struct nvkm_cstate *cstate;
408 	struct nvbios_cstepE cstepE;
409 	struct nvbios_perfE perfE;
410 	u8  ver, hdr, cnt, len;
411 	u32 data;
412 
413 	data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
414 	if (!data)
415 		return -EINVAL;
416 	if (perfE.pstate == 0xff)
417 		return 0;
418 
419 	pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
420 	cstate = &pstate->base;
421 	if (!pstate)
422 		return -ENOMEM;
423 
424 	INIT_LIST_HEAD(&pstate->list);
425 
426 	pstate->pstate = perfE.pstate;
427 	pstate->fanspeed = perfE.fanspeed;
428 	pstate->pcie_speed = perfE.pcie_speed;
429 	pstate->pcie_width = perfE.pcie_width;
430 	cstate->voltage = perfE.voltage;
431 	cstate->domain[nv_clk_src_core] = perfE.core;
432 	cstate->domain[nv_clk_src_shader] = perfE.shader;
433 	cstate->domain[nv_clk_src_mem] = perfE.memory;
434 	cstate->domain[nv_clk_src_vdec] = perfE.vdec;
435 	cstate->domain[nv_clk_src_dom6] = perfE.disp;
436 
437 	while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
438 		struct nvbios_perfS perfS;
439 		u8  sver = ver, shdr = hdr;
440 		u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
441 					  &sver, &shdr, cnt, len, &perfS);
442 		if (perfSe == 0 || sver != 0x40)
443 			continue;
444 
445 		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
446 			perfS.v40.freq = nvkm_clk_adjust(clk, false,
447 							 pstate->pstate,
448 							 domain->bios,
449 							 perfS.v40.freq);
450 		}
451 
452 		cstate->domain[domain->name] = perfS.v40.freq;
453 	}
454 
455 	data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
456 	if (data) {
457 		int idx = cstepE.index;
458 		do {
459 			nvkm_cstate_new(clk, idx, pstate);
460 		} while(idx--);
461 	}
462 
463 	nvkm_pstate_info(clk, pstate);
464 	list_add_tail(&pstate->head, &clk->states);
465 	clk->state_nr++;
466 	return 0;
467 }
468 
469 /******************************************************************************
470  * Adjustment triggers
471  *****************************************************************************/
472 static int
473 nvkm_clk_ustate_update(struct nvkm_clk *clk, int req)
474 {
475 	struct nvkm_pstate *pstate;
476 	int i = 0;
477 
478 	if (!clk->allow_reclock)
479 		return -ENOSYS;
480 
481 	if (req != -1 && req != -2) {
482 		list_for_each_entry(pstate, &clk->states, head) {
483 			if (pstate->pstate == req)
484 				break;
485 			i++;
486 		}
487 
488 		if (pstate->pstate != req)
489 			return -EINVAL;
490 		req = i;
491 	}
492 
493 	return req + 2;
494 }
495 
496 static int
497 nvkm_clk_nstate(struct nvkm_clk *clk, const char *mode, int arglen)
498 {
499 	int ret = 1;
500 
501 	if (clk->allow_reclock && !strncasecmpz(mode, "auto", arglen))
502 		return -2;
503 
504 	if (strncasecmpz(mode, "disabled", arglen)) {
505 		char save = mode[arglen];
506 		long v;
507 
508 		((char *)mode)[arglen] = '\0';
509 		if (!kstrtol(mode, 0, &v)) {
510 			ret = nvkm_clk_ustate_update(clk, v);
511 			if (ret < 0)
512 				ret = 1;
513 		}
514 		((char *)mode)[arglen] = save;
515 	}
516 
517 	return ret - 2;
518 }
519 
520 int
521 nvkm_clk_ustate(struct nvkm_clk *clk, int req, int pwr)
522 {
523 	int ret = nvkm_clk_ustate_update(clk, req);
524 	if (ret >= 0) {
525 		if (ret -= 2, pwr) clk->ustate_ac = ret;
526 		else		   clk->ustate_dc = ret;
527 		return nvkm_pstate_calc(clk, true);
528 	}
529 	return ret;
530 }
531 
532 int
533 nvkm_clk_astate(struct nvkm_clk *clk, int req, int rel, bool wait)
534 {
535 	if (!rel) clk->astate  = req;
536 	if ( rel) clk->astate += rel;
537 	clk->astate = min(clk->astate, clk->state_nr - 1);
538 	clk->astate = max(clk->astate, 0);
539 	return nvkm_pstate_calc(clk, wait);
540 }
541 
542 int
543 nvkm_clk_tstate(struct nvkm_clk *clk, u8 temp)
544 {
545 	if (clk->temp == temp)
546 		return 0;
547 	clk->temp = temp;
548 	return nvkm_pstate_calc(clk, false);
549 }
550 
551 int
552 nvkm_clk_dstate(struct nvkm_clk *clk, int req, int rel)
553 {
554 	if (!rel) clk->dstate  = req;
555 	if ( rel) clk->dstate += rel;
556 	clk->dstate = min(clk->dstate, clk->state_nr - 1);
557 	clk->dstate = max(clk->dstate, 0);
558 	return nvkm_pstate_calc(clk, true);
559 }
560 
561 static int
562 nvkm_clk_pwrsrc(struct nvkm_notify *notify)
563 {
564 	struct nvkm_clk *clk =
565 		container_of(notify, typeof(*clk), pwrsrc_ntfy);
566 	nvkm_pstate_calc(clk, false);
567 	return NVKM_NOTIFY_DROP;
568 }
569 
570 /******************************************************************************
571  * subdev base class implementation
572  *****************************************************************************/
573 
574 int
575 nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
576 {
577 	return clk->func->read(clk, src);
578 }
579 
580 static int
581 nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
582 {
583 	struct nvkm_clk *clk = nvkm_clk(subdev);
584 	nvkm_notify_put(&clk->pwrsrc_ntfy);
585 	flush_work(&clk->work);
586 	if (clk->func->fini)
587 		clk->func->fini(clk);
588 	return 0;
589 }
590 
591 static int
592 nvkm_clk_init(struct nvkm_subdev *subdev)
593 {
594 	struct nvkm_clk *clk = nvkm_clk(subdev);
595 	const struct nvkm_domain *clock = clk->domains;
596 	int ret;
597 
598 	memset(&clk->bstate, 0x00, sizeof(clk->bstate));
599 	INIT_LIST_HEAD(&clk->bstate.list);
600 	clk->bstate.pstate = 0xff;
601 
602 	while (clock->name != nv_clk_src_max) {
603 		ret = nvkm_clk_read(clk, clock->name);
604 		if (ret < 0) {
605 			nvkm_error(subdev, "%02x freq unknown\n", clock->name);
606 			return ret;
607 		}
608 		clk->bstate.base.domain[clock->name] = ret;
609 		clock++;
610 	}
611 
612 	nvkm_pstate_info(clk, &clk->bstate);
613 
614 	if (clk->func->init)
615 		return clk->func->init(clk);
616 
617 	clk->astate = clk->state_nr - 1;
618 	clk->dstate = 0;
619 	clk->pstate = -1;
620 	clk->temp = 90; /* reasonable default value */
621 	nvkm_pstate_calc(clk, true);
622 	return 0;
623 }
624 
625 static void *
626 nvkm_clk_dtor(struct nvkm_subdev *subdev)
627 {
628 	struct nvkm_clk *clk = nvkm_clk(subdev);
629 	struct nvkm_pstate *pstate, *temp;
630 
631 	nvkm_notify_fini(&clk->pwrsrc_ntfy);
632 
633 	/* Early return if the pstates have been provided statically */
634 	if (clk->func->pstates)
635 		return clk;
636 
637 	list_for_each_entry_safe(pstate, temp, &clk->states, head) {
638 		nvkm_pstate_del(pstate);
639 	}
640 
641 	return clk;
642 }
643 
644 static const struct nvkm_subdev_func
645 nvkm_clk = {
646 	.dtor = nvkm_clk_dtor,
647 	.init = nvkm_clk_init,
648 	.fini = nvkm_clk_fini,
649 };
650 
651 int
652 nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
653 	      int index, bool allow_reclock, struct nvkm_clk *clk)
654 {
655 	struct nvkm_subdev *subdev = &clk->subdev;
656 	struct nvkm_bios *bios = device->bios;
657 	int ret, idx, arglen;
658 	const char *mode;
659 	struct nvbios_vpstate_header h;
660 
661 	nvkm_subdev_ctor(&nvkm_clk, device, index, subdev);
662 
663 	if (bios && !nvbios_vpstate_parse(bios, &h)) {
664 		struct nvbios_vpstate_entry base, boost;
665 		if (!nvbios_vpstate_entry(bios, &h, h.boost_id, &boost))
666 			clk->boost_khz = boost.clock_mhz * 1000;
667 		if (!nvbios_vpstate_entry(bios, &h, h.base_id, &base))
668 			clk->base_khz = base.clock_mhz * 1000;
669 	}
670 
671 	clk->func = func;
672 	INIT_LIST_HEAD(&clk->states);
673 	clk->domains = func->domains;
674 	clk->ustate_ac = -1;
675 	clk->ustate_dc = -1;
676 	clk->allow_reclock = allow_reclock;
677 
678 	INIT_WORK(&clk->work, nvkm_pstate_work);
679 	init_waitqueue_head(&clk->wait);
680 	atomic_set(&clk->waiting, 0);
681 
682 	/* If no pstates are provided, try and fetch them from the BIOS */
683 	if (!func->pstates) {
684 		idx = 0;
685 		do {
686 			ret = nvkm_pstate_new(clk, idx++);
687 		} while (ret == 0);
688 	} else {
689 		for (idx = 0; idx < func->nr_pstates; idx++)
690 			list_add_tail(&func->pstates[idx].head, &clk->states);
691 		clk->state_nr = func->nr_pstates;
692 	}
693 
694 	ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
695 			       NULL, 0, 0, &clk->pwrsrc_ntfy);
696 	if (ret)
697 		return ret;
698 
699 	mode = nvkm_stropt(device->cfgopt, "NvClkMode", &arglen);
700 	if (mode) {
701 		clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
702 		clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
703 	}
704 
705 	mode = nvkm_stropt(device->cfgopt, "NvClkModeAC", &arglen);
706 	if (mode)
707 		clk->ustate_ac = nvkm_clk_nstate(clk, mode, arglen);
708 
709 	mode = nvkm_stropt(device->cfgopt, "NvClkModeDC", &arglen);
710 	if (mode)
711 		clk->ustate_dc = nvkm_clk_nstate(clk, mode, arglen);
712 
713 	clk->boost_mode = nvkm_longopt(device->cfgopt, "NvBoost",
714 				       NVKM_CLK_BOOST_NONE);
715 	return 0;
716 }
717 
718 int
719 nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
720 	      int index, bool allow_reclock, struct nvkm_clk **pclk)
721 {
722 	if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
723 		return -ENOMEM;
724 	return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
725 }
726