xref: /linux/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 /*
2  * Copyright 2015 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #include "priv.h"
25 
26 #include <engine/fifo.h>
27 
28 u32
29 nvkm_gr_ctxsw_inst(struct nvkm_device *device)
30 {
31 	struct nvkm_gr *gr = device->gr;
32 	if (gr && gr->func->ctxsw.inst)
33 		return gr->func->ctxsw.inst(gr);
34 	return 0;
35 }
36 
37 int
38 nvkm_gr_ctxsw_resume(struct nvkm_device *device)
39 {
40 	struct nvkm_gr *gr = device->gr;
41 	if (gr && gr->func->ctxsw.resume)
42 		return gr->func->ctxsw.resume(gr);
43 	return 0;
44 }
45 
46 int
47 nvkm_gr_ctxsw_pause(struct nvkm_device *device)
48 {
49 	struct nvkm_gr *gr = device->gr;
50 	if (gr && gr->func->ctxsw.pause)
51 		return gr->func->ctxsw.pause(gr);
52 	return 0;
53 }
54 
55 static bool
56 nvkm_gr_chsw_load(struct nvkm_engine *engine)
57 {
58 	struct nvkm_gr *gr = nvkm_gr(engine);
59 	if (gr->func->chsw_load)
60 		return gr->func->chsw_load(gr);
61 	return false;
62 }
63 
64 static void
65 nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
66 {
67 	struct nvkm_gr *gr = nvkm_gr(engine);
68 	if (gr->func->tile)
69 		gr->func->tile(gr, region, tile);
70 }
71 
72 u64
73 nvkm_gr_units(struct nvkm_gr *gr)
74 {
75 	if (gr->func->units)
76 		return gr->func->units(gr);
77 	return 0;
78 }
79 
80 int
81 nvkm_gr_tlb_flush(struct nvkm_gr *gr)
82 {
83 	if (gr->func->tlb_flush)
84 		return gr->func->tlb_flush(gr);
85 	return -ENODEV;
86 }
87 
88 static int
89 nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
90 {
91 	struct nvkm_gr *gr = nvkm_gr(oclass->engine);
92 	int c = 0;
93 
94 	if (gr->func->object_get) {
95 		int ret = gr->func->object_get(gr, index, &oclass->base);
96 		if (oclass->base.oclass)
97 			return index;
98 		return ret;
99 	}
100 
101 	while (gr->func->sclass[c].oclass) {
102 		if (c++ == index) {
103 			oclass->base = gr->func->sclass[index];
104 			return index;
105 		}
106 	}
107 
108 	return c;
109 }
110 
111 static int
112 nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
113 		   const struct nvkm_oclass *oclass,
114 		   struct nvkm_object **pobject)
115 {
116 	struct nvkm_gr *gr = nvkm_gr(oclass->engine);
117 	if (gr->func->chan_new)
118 		return gr->func->chan_new(gr, chan, oclass, pobject);
119 	return 0;
120 }
121 
122 static void
123 nvkm_gr_intr(struct nvkm_engine *engine)
124 {
125 	struct nvkm_gr *gr = nvkm_gr(engine);
126 	gr->func->intr(gr);
127 }
128 
129 static int
130 nvkm_gr_oneinit(struct nvkm_engine *engine)
131 {
132 	struct nvkm_gr *gr = nvkm_gr(engine);
133 	if (gr->func->oneinit)
134 		return gr->func->oneinit(gr);
135 	return 0;
136 }
137 
138 static int
139 nvkm_gr_init(struct nvkm_engine *engine)
140 {
141 	struct nvkm_gr *gr = nvkm_gr(engine);
142 	return gr->func->init(gr);
143 }
144 
145 static int
146 nvkm_gr_fini(struct nvkm_engine *engine, bool suspend)
147 {
148 	struct nvkm_gr *gr = nvkm_gr(engine);
149 	if (gr->func->fini)
150 		return gr->func->fini(gr, suspend);
151 	return 0;
152 }
153 
154 static void *
155 nvkm_gr_dtor(struct nvkm_engine *engine)
156 {
157 	struct nvkm_gr *gr = nvkm_gr(engine);
158 	if (gr->func->dtor)
159 		return gr->func->dtor(gr);
160 	return gr;
161 }
162 
163 static const struct nvkm_engine_func
164 nvkm_gr = {
165 	.dtor = nvkm_gr_dtor,
166 	.oneinit = nvkm_gr_oneinit,
167 	.init = nvkm_gr_init,
168 	.fini = nvkm_gr_fini,
169 	.intr = nvkm_gr_intr,
170 	.tile = nvkm_gr_tile,
171 	.chsw_load = nvkm_gr_chsw_load,
172 	.fifo.cclass = nvkm_gr_cclass_new,
173 	.fifo.sclass = nvkm_gr_oclass_get,
174 };
175 
176 int
177 nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
178 	     enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_gr *gr)
179 {
180 	gr->func = func;
181 	return nvkm_engine_ctor(&nvkm_gr, device, type, inst, enable, &gr->engine);
182 }
183