xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c (revision 0d08df6c493898e679d9c517e77ea95c063d40ec)
1 /*
2  * Copyright 2015 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #include "ctxgf100.h"
25 
26 /*******************************************************************************
27  * PGRAPH context implementation
28  ******************************************************************************/
29 
30 void
31 gm200_grctx_generate_tpcid(struct gf100_gr *gr)
32 {
33 	struct nvkm_device *device = gr->base.engine.subdev.device;
34 	int gpc, tpc, id;
35 
36 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
37 		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
38 			if (tpc < gr->tpc_nr[gpc]) {
39 				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
40 				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
41 				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
42 				id++;
43 			}
44 		}
45 	}
46 }
47 
48 static void
49 gm200_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
50 {
51 	struct nvkm_device *device = gr->base.engine.subdev.device;
52 	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
53 	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
54 	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
55 }
56 
57 void
58 gm200_grctx_generate_405b60(struct gf100_gr *gr)
59 {
60 	struct nvkm_device *device = gr->base.engine.subdev.device;
61 	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
62 	u32 dist[TPC_MAX / 4] = {};
63 	u32 gpcs[GPC_MAX] = {};
64 	u8  tpcnr[GPC_MAX];
65 	int tpc, gpc, i;
66 
67 	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
68 
69 	/* won't result in the same distribution as the binary driver where
70 	 * some of the gpcs have more tpcs than others, but this shall do
71 	 * for the moment.  the code for earlier gpus has this issue too.
72 	 */
73 	for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
74 		do {
75 			gpc = (gpc + 1) % gr->gpc_nr;
76 		} while(!tpcnr[gpc]);
77 		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
78 
79 		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
80 		gpcs[gpc] |= i << (tpc * 8);
81 	}
82 
83 	for (i = 0; i < dist_nr; i++)
84 		nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
85 	for (i = 0; i < gr->gpc_nr; i++)
86 		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
87 }
88 
89 void
90 gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
91 {
92 	struct nvkm_device *device = gr->base.engine.subdev.device;
93 	const struct gf100_grctx_func *grctx = gr->func->grctx;
94 	u32 tmp;
95 	int i;
96 
97 	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
98 
99 	nvkm_wr32(device, 0x404154, 0x00000000);
100 
101 	grctx->bundle(info);
102 	grctx->pagepool(info);
103 	grctx->attrib(info);
104 	grctx->unkn(gr);
105 
106 	gm200_grctx_generate_tpcid(gr);
107 	gf100_grctx_generate_r406028(gr);
108 	gk104_grctx_generate_r418bb8(gr);
109 
110 	for (i = 0; i < 8; i++)
111 		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
112 	nvkm_wr32(device, 0x406500, 0x00000000);
113 
114 	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
115 
116 	gm200_grctx_generate_rop_active_fbps(gr);
117 
118 	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
119 		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
120 	nvkm_wr32(device, 0x4041c4, tmp);
121 
122 	gm200_grctx_generate_405b60(gr);
123 
124 	gf100_gr_icmd(gr, gr->fuc_bundle);
125 	nvkm_wr32(device, 0x404154, 0x00000800);
126 	gf100_gr_mthd(gr, gr->fuc_method);
127 
128 	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
129 	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
130 }
131 
132 const struct gf100_grctx_func
133 gm200_grctx = {
134 	.main  = gm200_grctx_generate_main,
135 	.unkn  = gk104_grctx_generate_unkn,
136 	.bundle = gm107_grctx_generate_bundle,
137 	.bundle_size = 0x3000,
138 	.bundle_min_gpm_fifo_depth = 0x180,
139 	.bundle_token_limit = 0x780,
140 	.pagepool = gm107_grctx_generate_pagepool,
141 	.pagepool_size = 0x20000,
142 	.attrib = gm107_grctx_generate_attrib,
143 	.attrib_nr_max = 0x600,
144 	.attrib_nr = 0x400,
145 	.alpha_nr_max = 0x1800,
146 	.alpha_nr = 0x1000,
147 };
148