xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "gf100.h"
23 #include "ctxgf100.h"
24 
25 #include <nvif/class.h>
26 
27 static void
28 gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
29 {
30 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
31 	struct nvkm_device *device = subdev->device;
32 	u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730 + (sm * 0x80)));
33 	u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734 + (sm * 0x80)));
34 	const struct nvkm_enum *warp;
35 	char glob[128];
36 
37 	nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
38 	warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
39 
40 	nvkm_error(subdev, "GPC%i/TPC%i/SM%d trap: "
41 			   "global %08x [%s] warp %04x [%s]\n",
42 		   gpc, tpc, sm, gerr, glob, werr, warp ? warp->name : "");
43 
44 	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730 + sm * 0x80), 0x00000000);
45 	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
46 }
47 
48 void
49 gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
50 {
51 	gv100_gr_trap_sm(gr, gpc, tpc, 0);
52 	gv100_gr_trap_sm(gr, gpc, tpc, 1);
53 }
54 
55 void
56 gv100_gr_init_4188a4(struct gf100_gr *gr)
57 {
58 	struct nvkm_device *device = gr->base.engine.subdev.device;
59 
60 	nvkm_mask(device, 0x4188a4, 0x03000000, 0x03000000);
61 }
62 
63 void
64 gv100_gr_init_shader_exceptions(struct gf100_gr *gr, int gpc, int tpc)
65 {
66 	struct nvkm_device *device = gr->base.engine.subdev.device;
67 	int sm;
68 	for (sm = 0; sm < 0x100; sm += 0x80) {
69 		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x610), 0x00000001);
70 		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x72c + sm), 0x00000004);
71 	}
72 }
73 
74 void
75 gv100_gr_init_504430(struct gf100_gr *gr, int gpc, int tpc)
76 {
77 	struct nvkm_device *device = gr->base.engine.subdev.device;
78 	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0x403f0000);
79 }
80 
81 void
82 gv100_gr_init_419bd8(struct gf100_gr *gr)
83 {
84 	struct nvkm_device *device = gr->base.engine.subdev.device;
85 	nvkm_mask(device, 0x419bd8, 0x00000700, 0x00000000);
86 }
87 
88 u32
89 gv100_gr_nonpes_aware_tpc(struct gf100_gr *gr, u32 gpc, u32 tpc)
90 {
91 	u32 pes, temp, tpc_new = 0;
92 
93 	for (pes = 0; pes < gr->ppc_nr[gpc]; pes++) {
94 		if (gr->ppc_tpc_mask[gpc][pes] & BIT(tpc))
95 			break;
96 
97 		tpc_new += gr->ppc_tpc_nr[gpc][pes];
98 	}
99 
100 	temp = (BIT(tpc) - 1) & gr->ppc_tpc_mask[gpc][pes];
101 	temp = hweight32(temp);
102 	return tpc_new + temp;
103 }
104 
105 static int
106 gv100_gr_scg_estimate_perf(struct gf100_gr *gr, unsigned long *gpc_tpc_mask,
107 			   u32 disable_gpc, u32 disable_tpc, int *perf)
108 {
109 	const u32 scale_factor = 512UL;		/* Use fx23.9 */
110 	const u32 pix_scale = 1024*1024UL;	/* Pix perf in [29:20] */
111 	const u32 world_scale = 1024UL;		/* World performance in [19:10] */
112 	const u32 tpc_scale = 1;		/* TPC balancing in [9:0] */
113 	u32 scg_num_pes = 0;
114 	u32 min_scg_gpc_pix_perf = scale_factor; /* Init perf as maximum */
115 	u32 average_tpcs = 0; /* Average of # of TPCs per GPC */
116 	u32 deviation; /* absolute diff between TPC# and average_tpcs, averaged across GPCs */
117 	u32 norm_tpc_deviation;	/* deviation/max_tpc_per_gpc */
118 	u32 tpc_balance;
119 	u32 scg_gpc_pix_perf;
120 	u32 scg_world_perf;
121 	u32 gpc;
122 	u32 pes;
123 	int diff;
124 	bool tpc_removed_gpc = false;
125 	bool tpc_removed_pes = false;
126 	u32 max_tpc_gpc = 0;
127 	u32 num_tpc_mask;
128 	u32 *num_tpc_gpc;
129 	int ret = -EINVAL;
130 
131 	if (!(num_tpc_gpc = kcalloc(gr->gpc_nr, sizeof(*num_tpc_gpc), GFP_KERNEL)))
132 		return -ENOMEM;
133 
134 	/* Calculate pix-perf-reduction-rate per GPC and find bottleneck TPC */
135 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
136 		num_tpc_mask = gpc_tpc_mask[gpc];
137 
138 		if ((gpc == disable_gpc) && num_tpc_mask & BIT(disable_tpc)) {
139 			/* Safety check if a TPC is removed twice */
140 			if (WARN_ON(tpc_removed_gpc))
141 				goto done;
142 
143 			/* Remove logical TPC from set */
144 			num_tpc_mask &= ~BIT(disable_tpc);
145 			tpc_removed_gpc = true;
146 		}
147 
148 		/* track balancing of tpcs across gpcs */
149 		num_tpc_gpc[gpc] = hweight32(num_tpc_mask);
150 		average_tpcs += num_tpc_gpc[gpc];
151 
152 		/* save the maximum numer of gpcs */
153 		max_tpc_gpc = num_tpc_gpc[gpc] > max_tpc_gpc ? num_tpc_gpc[gpc] : max_tpc_gpc;
154 
155 		/*
156 		 * Calculate ratio between TPC count and post-FS and post-SCG
157 		 *
158 		 * ratio represents relative throughput of the GPC
159 		 */
160 		scg_gpc_pix_perf = scale_factor * num_tpc_gpc[gpc] / gr->tpc_nr[gpc];
161 		if (min_scg_gpc_pix_perf > scg_gpc_pix_perf)
162 			min_scg_gpc_pix_perf = scg_gpc_pix_perf;
163 
164 		/* Calculate # of surviving PES */
165 		for (pes = 0; pes < gr->ppc_nr[gpc]; pes++) {
166 			/* Count the number of TPC on the set */
167 			num_tpc_mask = gr->ppc_tpc_mask[gpc][pes] & gpc_tpc_mask[gpc];
168 
169 			if ((gpc == disable_gpc) && (num_tpc_mask & BIT(disable_tpc))) {
170 				if (WARN_ON(tpc_removed_pes))
171 					goto done;
172 
173 				num_tpc_mask &= ~BIT(disable_tpc);
174 				tpc_removed_pes = true;
175 			}
176 
177 			if (hweight32(num_tpc_mask))
178 				scg_num_pes++;
179 		}
180 	}
181 
182 	if (WARN_ON(!tpc_removed_gpc || !tpc_removed_pes))
183 		goto done;
184 
185 	if (max_tpc_gpc == 0) {
186 		*perf = 0;
187 		goto done_ok;
188 	}
189 
190 	/* Now calculate perf */
191 	scg_world_perf = (scale_factor * scg_num_pes) / gr->ppc_total;
192 	deviation = 0;
193 	average_tpcs = scale_factor * average_tpcs / gr->gpc_nr;
194 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
195 		diff = average_tpcs - scale_factor * num_tpc_gpc[gpc];
196 		if (diff < 0)
197 			diff = -diff;
198 
199 		deviation += diff;
200 	}
201 
202 	deviation /= gr->gpc_nr;
203 
204 	norm_tpc_deviation = deviation / max_tpc_gpc;
205 
206 	tpc_balance = scale_factor - norm_tpc_deviation;
207 
208 	if ((tpc_balance > scale_factor)          ||
209 	    (scg_world_perf > scale_factor)       ||
210 	    (min_scg_gpc_pix_perf > scale_factor) ||
211 	    (norm_tpc_deviation > scale_factor)) {
212 		WARN_ON(1);
213 		goto done;
214 	}
215 
216 	*perf = (pix_scale * min_scg_gpc_pix_perf) +
217 		(world_scale * scg_world_perf) +
218 		(tpc_scale * tpc_balance);
219 done_ok:
220 	ret = 0;
221 done:
222 	kfree(num_tpc_gpc);
223 	return ret;
224 }
225 
226 int
227 gv100_gr_oneinit_sm_id(struct gf100_gr *gr)
228 {
229 	unsigned long *gpc_tpc_mask;
230 	u32 *tpc_table, *gpc_table;
231 	u32 gpc, tpc, pes, gtpc;
232 	int perf, maxperf, ret = 0;
233 
234 	gpc_tpc_mask = kcalloc(gr->gpc_nr, sizeof(*gpc_tpc_mask), GFP_KERNEL);
235 	gpc_table = kcalloc(gr->tpc_total, sizeof(*gpc_table), GFP_KERNEL);
236 	tpc_table = kcalloc(gr->tpc_total, sizeof(*tpc_table), GFP_KERNEL);
237 	if (!gpc_table || !tpc_table || !gpc_tpc_mask) {
238 		ret = -ENOMEM;
239 		goto done;
240 	}
241 
242 	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
243 		for (pes = 0; pes < gr->ppc_nr[gpc]; pes++)
244 			gpc_tpc_mask[gpc] |= gr->ppc_tpc_mask[gpc][pes];
245 	}
246 
247 	for (gtpc = 0; gtpc < gr->tpc_total; gtpc++) {
248 		for (maxperf = -1, gpc = 0; gpc < gr->gpc_nr; gpc++) {
249 			for_each_set_bit(tpc, &gpc_tpc_mask[gpc], gr->tpc_nr[gpc]) {
250 				ret = gv100_gr_scg_estimate_perf(gr, gpc_tpc_mask, gpc, tpc, &perf);
251 				if (ret)
252 					goto done;
253 
254 				/* nvgpu does ">=" here, but this gets us RM's numbers. */
255 				if (perf > maxperf) {
256 					maxperf = perf;
257 					gpc_table[gtpc] = gpc;
258 					tpc_table[gtpc] = tpc;
259 				}
260 			}
261 		}
262 
263 		gpc_tpc_mask[gpc_table[gtpc]] &= ~BIT(tpc_table[gtpc]);
264 	}
265 
266 	/*TODO: build table for sm_per_tpc != 1, don't use yet, but might need later? */
267 	for (gtpc = 0; gtpc < gr->tpc_total; gtpc++) {
268 		gr->sm[gtpc].gpc = gpc_table[gtpc];
269 		gr->sm[gtpc].tpc = tpc_table[gtpc];
270 		gr->sm_nr++;
271 	}
272 
273 done:
274 	kfree(gpc_table);
275 	kfree(tpc_table);
276 	kfree(gpc_tpc_mask);
277 	return ret;
278 }
279 
280 static const struct gf100_gr_func
281 gv100_gr = {
282 	.oneinit_tiles = gm200_gr_oneinit_tiles,
283 	.oneinit_sm_id = gv100_gr_oneinit_sm_id,
284 	.init = gf100_gr_init,
285 	.init_419bd8 = gv100_gr_init_419bd8,
286 	.init_gpc_mmu = gm200_gr_init_gpc_mmu,
287 	.init_vsc_stream_master = gk104_gr_init_vsc_stream_master,
288 	.init_zcull = gf117_gr_init_zcull,
289 	.init_num_active_ltcs = gm200_gr_init_num_active_ltcs,
290 	.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
291 	.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
292 	.init_fecs_exceptions = gp100_gr_init_fecs_exceptions,
293 	.init_ds_hww_esr_2 = gm200_gr_init_ds_hww_esr_2,
294 	.init_sked_hww_esr = gk104_gr_init_sked_hww_esr,
295 	.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
296 	.init_504430 = gv100_gr_init_504430,
297 	.init_shader_exceptions = gv100_gr_init_shader_exceptions,
298 	.init_rop_exceptions = gf100_gr_init_rop_exceptions,
299 	.init_exception2 = gf100_gr_init_exception2,
300 	.init_4188a4 = gv100_gr_init_4188a4,
301 	.trap_mp = gv100_gr_trap_mp,
302 	.fecs.reset = gf100_gr_fecs_reset,
303 	.rops = gm200_gr_rops,
304 	.gpc_nr = 6,
305 	.tpc_nr = 7,
306 	.ppc_nr = 3,
307 	.grctx = &gv100_grctx,
308 	.zbc = &gp102_gr_zbc,
309 	.sclass = {
310 		{ -1, -1, FERMI_TWOD_A },
311 		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
312 		{ -1, -1, VOLTA_A, &gf100_fermi },
313 		{ -1, -1, VOLTA_COMPUTE_A },
314 		{}
315 	}
316 };
317 
318 MODULE_FIRMWARE("nvidia/gv100/gr/fecs_bl.bin");
319 MODULE_FIRMWARE("nvidia/gv100/gr/fecs_inst.bin");
320 MODULE_FIRMWARE("nvidia/gv100/gr/fecs_data.bin");
321 MODULE_FIRMWARE("nvidia/gv100/gr/fecs_sig.bin");
322 MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_bl.bin");
323 MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_inst.bin");
324 MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_data.bin");
325 MODULE_FIRMWARE("nvidia/gv100/gr/gpccs_sig.bin");
326 MODULE_FIRMWARE("nvidia/gv100/gr/sw_ctx.bin");
327 MODULE_FIRMWARE("nvidia/gv100/gr/sw_nonctx.bin");
328 MODULE_FIRMWARE("nvidia/gv100/gr/sw_bundle_init.bin");
329 MODULE_FIRMWARE("nvidia/gv100/gr/sw_method_init.bin");
330 
331 static const struct gf100_gr_fwif
332 gv100_gr_fwif[] = {
333 	{  0, gm200_gr_load, &gv100_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
334 	{ -1, gm200_gr_nofw },
335 	{}
336 };
337 
338 int
339 gv100_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
340 {
341 	return gf100_gr_new_(gv100_gr_fwif, device, type, inst, pgr);
342 }
343