xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c (revision 7f5f518fd70b1b72ca4cf8249ca3306846383ed4)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <subdev/clk.h>
25 #include "pll.h"
26 
27 #include <subdev/bios.h>
28 #include <subdev/bios/pll.h>
29 
30 struct nv40_clk {
31 	struct nvkm_clk base;
32 	u32 ctrl;
33 	u32 npll_ctrl;
34 	u32 npll_coef;
35 	u32 spll;
36 };
37 
38 static struct nvkm_domain
39 nv40_domain[] = {
40 	{ nv_clk_src_crystal, 0xff },
41 	{ nv_clk_src_href   , 0xff },
42 	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
43 	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
44 	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
45 	{ nv_clk_src_max }
46 };
47 
48 static u32
49 read_pll_1(struct nv40_clk *clk, u32 reg)
50 {
51 	struct nvkm_device *device = clk->base.subdev.device;
52 	u32 ctrl = nvkm_rd32(device, reg + 0x00);
53 	int P = (ctrl & 0x00070000) >> 16;
54 	int N = (ctrl & 0x0000ff00) >> 8;
55 	int M = (ctrl & 0x000000ff) >> 0;
56 	u32 ref = 27000, khz = 0;
57 
58 	if (ctrl & 0x80000000)
59 		khz = ref * N / M;
60 
61 	return khz >> P;
62 }
63 
64 static u32
65 read_pll_2(struct nv40_clk *clk, u32 reg)
66 {
67 	struct nvkm_device *device = clk->base.subdev.device;
68 	u32 ctrl = nvkm_rd32(device, reg + 0x00);
69 	u32 coef = nvkm_rd32(device, reg + 0x04);
70 	int N2 = (coef & 0xff000000) >> 24;
71 	int M2 = (coef & 0x00ff0000) >> 16;
72 	int N1 = (coef & 0x0000ff00) >> 8;
73 	int M1 = (coef & 0x000000ff) >> 0;
74 	int P = (ctrl & 0x00070000) >> 16;
75 	u32 ref = 27000, khz = 0;
76 
77 	if ((ctrl & 0x80000000) && M1) {
78 		khz = ref * N1 / M1;
79 		if ((ctrl & 0x40000100) == 0x40000000) {
80 			if (M2)
81 				khz = khz * N2 / M2;
82 			else
83 				khz = 0;
84 		}
85 	}
86 
87 	return khz >> P;
88 }
89 
90 static u32
91 read_clk(struct nv40_clk *clk, u32 src)
92 {
93 	switch (src) {
94 	case 3:
95 		return read_pll_2(clk, 0x004000);
96 	case 2:
97 		return read_pll_1(clk, 0x004008);
98 	default:
99 		break;
100 	}
101 
102 	return 0;
103 }
104 
105 static int
106 nv40_clk_read(struct nvkm_clk *obj, enum nv_clk_src src)
107 {
108 	struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
109 	struct nvkm_subdev *subdev = &clk->base.subdev;
110 	struct nvkm_device *device = subdev->device;
111 	u32 mast = nvkm_rd32(device, 0x00c040);
112 
113 	switch (src) {
114 	case nv_clk_src_crystal:
115 		return device->crystal;
116 	case nv_clk_src_href:
117 		return 100000; /*XXX: PCIE/AGP differ*/
118 	case nv_clk_src_core:
119 		return read_clk(clk, (mast & 0x00000003) >> 0);
120 	case nv_clk_src_shader:
121 		return read_clk(clk, (mast & 0x00000030) >> 4);
122 	case nv_clk_src_mem:
123 		return read_pll_2(clk, 0x4020);
124 	default:
125 		break;
126 	}
127 
128 	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
129 	return -EINVAL;
130 }
131 
132 static int
133 nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
134 		  int *N1, int *M1, int *N2, int *M2, int *log2P)
135 {
136 	struct nvkm_bios *bios = nvkm_bios(clk);
137 	struct nvbios_pll pll;
138 	int ret;
139 
140 	ret = nvbios_pll_parse(bios, reg, &pll);
141 	if (ret)
142 		return ret;
143 
144 	if (khz < pll.vco1.max_freq)
145 		pll.vco2.max_freq = 0;
146 
147 	ret = nv04_pll_calc(nv_subdev(clk), &pll, khz, N1, M1, N2, M2, log2P);
148 	if (ret == 0)
149 		return -ERANGE;
150 
151 	return ret;
152 }
153 
154 static int
155 nv40_clk_calc(struct nvkm_clk *obj, struct nvkm_cstate *cstate)
156 {
157 	struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
158 	int gclk = cstate->domain[nv_clk_src_core];
159 	int sclk = cstate->domain[nv_clk_src_shader];
160 	int N1, M1, N2, M2, log2P;
161 	int ret;
162 
163 	/* core/geometric clock */
164 	ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
165 				&N1, &M1, &N2, &M2, &log2P);
166 	if (ret < 0)
167 		return ret;
168 
169 	if (N2 == M2) {
170 		clk->npll_ctrl = 0x80000100 | (log2P << 16);
171 		clk->npll_coef = (N1 << 8) | M1;
172 	} else {
173 		clk->npll_ctrl = 0xc0000000 | (log2P << 16);
174 		clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
175 	}
176 
177 	/* use the second pll for shader/rop clock, if it differs from core */
178 	if (sclk && sclk != gclk) {
179 		ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
180 					&N1, &M1, NULL, NULL, &log2P);
181 		if (ret < 0)
182 			return ret;
183 
184 		clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
185 		clk->ctrl = 0x00000223;
186 	} else {
187 		clk->spll = 0x00000000;
188 		clk->ctrl = 0x00000333;
189 	}
190 
191 	return 0;
192 }
193 
194 static int
195 nv40_clk_prog(struct nvkm_clk *obj)
196 {
197 	struct nv40_clk *clk = container_of(obj, typeof(*clk), base);
198 	struct nvkm_device *device = clk->base.subdev.device;
199 	nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
200 	nvkm_wr32(device, 0x004004, clk->npll_coef);
201 	nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
202 	nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
203 	mdelay(5);
204 	nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
205 	return 0;
206 }
207 
208 static void
209 nv40_clk_tidy(struct nvkm_clk *obj)
210 {
211 }
212 
213 static int
214 nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
215 	      struct nvkm_oclass *oclass, void *data, u32 size,
216 	      struct nvkm_object **pobject)
217 {
218 	struct nv40_clk *clk;
219 	int ret;
220 
221 	ret = nvkm_clk_create(parent, engine, oclass, nv40_domain,
222 			      NULL, 0, true, &clk);
223 	*pobject = nv_object(clk);
224 	if (ret)
225 		return ret;
226 
227 	clk->base.pll_calc = nv04_clk_pll_calc;
228 	clk->base.pll_prog = nv04_clk_pll_prog;
229 	clk->base.read = nv40_clk_read;
230 	clk->base.calc = nv40_clk_calc;
231 	clk->base.prog = nv40_clk_prog;
232 	clk->base.tidy = nv40_clk_tidy;
233 	return 0;
234 }
235 
236 struct nvkm_oclass
237 nv40_clk_oclass = {
238 	.handle = NV_SUBDEV(CLK, 0x40),
239 	.ofuncs = &(struct nvkm_ofuncs) {
240 		.ctor = nv40_clk_ctor,
241 		.dtor = _nvkm_clk_dtor,
242 		.init = _nvkm_clk_init,
243 		.fini = _nvkm_clk_fini,
244 	},
245 };
246