xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c (revision d0034a7a4ac7fae708146ac0059b9c47a1543f0d)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #define nv40_clk(p) container_of((p), struct nv40_clk, base)
25 #include "priv.h"
26 #include "pll.h"
27 
28 #include <subdev/bios.h>
29 #include <subdev/bios/pll.h>
30 
31 struct nv40_clk {
32 	struct nvkm_clk base;
33 	u32 ctrl;
34 	u32 npll_ctrl;
35 	u32 npll_coef;
36 	u32 spll;
37 };
38 
39 static u32
read_pll_1(struct nv40_clk * clk,u32 reg)40 read_pll_1(struct nv40_clk *clk, u32 reg)
41 {
42 	struct nvkm_device *device = clk->base.subdev.device;
43 	u32 ctrl = nvkm_rd32(device, reg + 0x00);
44 	int P = (ctrl & 0x00070000) >> 16;
45 	int N = (ctrl & 0x0000ff00) >> 8;
46 	int M = (ctrl & 0x000000ff) >> 0;
47 	u32 ref = 27000, khz = 0;
48 
49 	if (ctrl & 0x80000000)
50 		khz = ref * N / M;
51 
52 	return khz >> P;
53 }
54 
55 static u32
read_pll_2(struct nv40_clk * clk,u32 reg)56 read_pll_2(struct nv40_clk *clk, u32 reg)
57 {
58 	struct nvkm_device *device = clk->base.subdev.device;
59 	u32 ctrl = nvkm_rd32(device, reg + 0x00);
60 	u32 coef = nvkm_rd32(device, reg + 0x04);
61 	int N2 = (coef & 0xff000000) >> 24;
62 	int M2 = (coef & 0x00ff0000) >> 16;
63 	int N1 = (coef & 0x0000ff00) >> 8;
64 	int M1 = (coef & 0x000000ff) >> 0;
65 	int P = (ctrl & 0x00070000) >> 16;
66 	u32 ref = 27000, khz = 0;
67 
68 	if ((ctrl & 0x80000000) && M1) {
69 		khz = ref * N1 / M1;
70 		if ((ctrl & 0x40000100) == 0x40000000) {
71 			if (M2)
72 				khz = khz * N2 / M2;
73 			else
74 				khz = 0;
75 		}
76 	}
77 
78 	return khz >> P;
79 }
80 
81 static u32
read_clk(struct nv40_clk * clk,u32 src)82 read_clk(struct nv40_clk *clk, u32 src)
83 {
84 	switch (src) {
85 	case 3:
86 		return read_pll_2(clk, 0x004000);
87 	case 2:
88 		return read_pll_1(clk, 0x004008);
89 	default:
90 		break;
91 	}
92 
93 	return 0;
94 }
95 
96 static int
nv40_clk_read(struct nvkm_clk * base,enum nv_clk_src src)97 nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
98 {
99 	struct nv40_clk *clk = nv40_clk(base);
100 	struct nvkm_subdev *subdev = &clk->base.subdev;
101 	struct nvkm_device *device = subdev->device;
102 	u32 mast = nvkm_rd32(device, 0x00c040);
103 
104 	switch (src) {
105 	case nv_clk_src_crystal:
106 		return device->crystal;
107 	case nv_clk_src_href:
108 		return 100000; /*XXX: PCIE/AGP differ*/
109 	case nv_clk_src_core:
110 		return read_clk(clk, (mast & 0x00000003) >> 0);
111 	case nv_clk_src_shader:
112 		return read_clk(clk, (mast & 0x00000030) >> 4);
113 	case nv_clk_src_mem:
114 		return read_pll_2(clk, 0x4020);
115 	default:
116 		break;
117 	}
118 
119 	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
120 	return -EINVAL;
121 }
122 
123 static int
nv40_clk_calc_pll(struct nv40_clk * clk,u32 reg,u32 khz,int * N1,int * M1,int * N2,int * M2,int * log2P)124 nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
125 		  int *N1, int *M1, int *N2, int *M2, int *log2P)
126 {
127 	struct nvkm_subdev *subdev = &clk->base.subdev;
128 	struct nvbios_pll pll;
129 	int ret;
130 
131 	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
132 	if (ret)
133 		return ret;
134 
135 	if (khz < pll.vco1.max_freq)
136 		pll.vco2.max_freq = 0;
137 
138 	ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P);
139 	if (ret == 0)
140 		return -ERANGE;
141 
142 	return ret;
143 }
144 
145 static int
nv40_clk_calc(struct nvkm_clk * base,struct nvkm_cstate * cstate)146 nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
147 {
148 	struct nv40_clk *clk = nv40_clk(base);
149 	int gclk = cstate->domain[nv_clk_src_core];
150 	int sclk = cstate->domain[nv_clk_src_shader];
151 	int N1, M1, N2, M2, log2P;
152 	int ret;
153 
154 	/* core/geometric clock */
155 	ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
156 				&N1, &M1, &N2, &M2, &log2P);
157 	if (ret < 0)
158 		return ret;
159 
160 	if (N2 == M2) {
161 		clk->npll_ctrl = 0x80000100 | (log2P << 16);
162 		clk->npll_coef = (N1 << 8) | M1;
163 	} else {
164 		clk->npll_ctrl = 0xc0000000 | (log2P << 16);
165 		clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
166 	}
167 
168 	/* use the second pll for shader/rop clock, if it differs from core */
169 	if (sclk && sclk != gclk) {
170 		ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
171 					&N1, &M1, NULL, NULL, &log2P);
172 		if (ret < 0)
173 			return ret;
174 
175 		clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
176 		clk->ctrl = 0x00000223;
177 	} else {
178 		clk->spll = 0x00000000;
179 		clk->ctrl = 0x00000333;
180 	}
181 
182 	return 0;
183 }
184 
185 static int
nv40_clk_prog(struct nvkm_clk * base)186 nv40_clk_prog(struct nvkm_clk *base)
187 {
188 	struct nv40_clk *clk = nv40_clk(base);
189 	struct nvkm_device *device = clk->base.subdev.device;
190 	nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
191 	nvkm_wr32(device, 0x004004, clk->npll_coef);
192 	nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
193 	nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
194 	mdelay(5);
195 	nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
196 	return 0;
197 }
198 
199 static void
nv40_clk_tidy(struct nvkm_clk * obj)200 nv40_clk_tidy(struct nvkm_clk *obj)
201 {
202 }
203 
204 static const struct nvkm_clk_func
205 nv40_clk = {
206 	.read = nv40_clk_read,
207 	.calc = nv40_clk_calc,
208 	.prog = nv40_clk_prog,
209 	.tidy = nv40_clk_tidy,
210 	.domains = {
211 		{ nv_clk_src_crystal, 0xff },
212 		{ nv_clk_src_href   , 0xff },
213 		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
214 		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
215 		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
216 		{ nv_clk_src_max }
217 	}
218 };
219 
220 int
nv40_clk_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_clk ** pclk)221 nv40_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
222 	     struct nvkm_clk **pclk)
223 {
224 	struct nv40_clk *clk;
225 
226 	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
227 		return -ENOMEM;
228 	clk->base.pll_calc = nv04_clk_pll_calc;
229 	clk->base.pll_prog = nv04_clk_pll_prog;
230 	*pclk = &clk->base;
231 
232 	return nvkm_clk_ctor(&nv40_clk, device, type, inst, true, &clk->base);
233 }
234