xref: /linux/drivers/gpu/drm/nouveau/nvkm/core/object.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <core/object.h>
25 #include <core/client.h>
26 #include <core/engine.h>
27 
28 struct nvkm_object *
nvkm_object_search(struct nvkm_client * client,u64 handle,const struct nvkm_object_func * func)29 nvkm_object_search(struct nvkm_client *client, u64 handle,
30 		   const struct nvkm_object_func *func)
31 {
32 	struct nvkm_object *object;
33 	unsigned long flags;
34 
35 	if (handle) {
36 		spin_lock_irqsave(&client->obj_lock, flags);
37 		struct rb_node *node = client->objroot.rb_node;
38 		while (node) {
39 			object = rb_entry(node, typeof(*object), node);
40 			if (handle < object->object)
41 				node = node->rb_left;
42 			else
43 			if (handle > object->object)
44 				node = node->rb_right;
45 			else {
46 				spin_unlock_irqrestore(&client->obj_lock, flags);
47 				goto done;
48 			}
49 		}
50 		spin_unlock_irqrestore(&client->obj_lock, flags);
51 		return ERR_PTR(-ENOENT);
52 	} else {
53 		object = &client->object;
54 	}
55 
56 done:
57 	if (unlikely(func && object->func != func))
58 		return ERR_PTR(-EINVAL);
59 	return object;
60 }
61 
62 void
nvkm_object_remove(struct nvkm_object * object)63 nvkm_object_remove(struct nvkm_object *object)
64 {
65 	unsigned long flags;
66 
67 	spin_lock_irqsave(&object->client->obj_lock, flags);
68 	if (!RB_EMPTY_NODE(&object->node))
69 		rb_erase(&object->node, &object->client->objroot);
70 	spin_unlock_irqrestore(&object->client->obj_lock, flags);
71 }
72 
73 bool
nvkm_object_insert(struct nvkm_object * object)74 nvkm_object_insert(struct nvkm_object *object)
75 {
76 	struct rb_node **ptr;
77 	struct rb_node *parent = NULL;
78 	unsigned long flags;
79 
80 	spin_lock_irqsave(&object->client->obj_lock, flags);
81 	ptr = &object->client->objroot.rb_node;
82 	while (*ptr) {
83 		struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
84 		parent = *ptr;
85 		if (object->object < this->object) {
86 			ptr = &parent->rb_left;
87 		} else if (object->object > this->object) {
88 			ptr = &parent->rb_right;
89 		} else {
90 			spin_unlock_irqrestore(&object->client->obj_lock, flags);
91 			return false;
92 		}
93 	}
94 
95 	rb_link_node(&object->node, parent, ptr);
96 	rb_insert_color(&object->node, &object->client->objroot);
97 	spin_unlock_irqrestore(&object->client->obj_lock, flags);
98 	return true;
99 }
100 
101 int
nvkm_object_mthd(struct nvkm_object * object,u32 mthd,void * data,u32 size)102 nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
103 {
104 	if (likely(object->func->mthd))
105 		return object->func->mthd(object, mthd, data, size);
106 	return -ENODEV;
107 }
108 
109 int
nvkm_object_ntfy(struct nvkm_object * object,u32 mthd,struct nvkm_event ** pevent)110 nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
111 		 struct nvkm_event **pevent)
112 {
113 	if (likely(object->func->ntfy))
114 		return object->func->ntfy(object, mthd, pevent);
115 	return -ENODEV;
116 }
117 
118 int
nvkm_object_map(struct nvkm_object * object,void * argv,u32 argc,enum nvkm_object_map * type,u64 * addr,u64 * size)119 nvkm_object_map(struct nvkm_object *object, void *argv, u32 argc,
120 		enum nvkm_object_map *type, u64 *addr, u64 *size)
121 {
122 	if (likely(object->func->map))
123 		return object->func->map(object, argv, argc, type, addr, size);
124 	return -ENODEV;
125 }
126 
127 int
nvkm_object_unmap(struct nvkm_object * object)128 nvkm_object_unmap(struct nvkm_object *object)
129 {
130 	if (likely(object->func->unmap))
131 		return object->func->unmap(object);
132 	return -ENODEV;
133 }
134 
135 int
nvkm_object_bind(struct nvkm_object * object,struct nvkm_gpuobj * gpuobj,int align,struct nvkm_gpuobj ** pgpuobj)136 nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
137 		 int align, struct nvkm_gpuobj **pgpuobj)
138 {
139 	if (object->func->bind)
140 		return object->func->bind(object, gpuobj, align, pgpuobj);
141 	return -ENODEV;
142 }
143 
144 int
nvkm_object_fini(struct nvkm_object * object,bool suspend)145 nvkm_object_fini(struct nvkm_object *object, bool suspend)
146 {
147 	const char *action = suspend ? "suspend" : "fini";
148 	struct nvkm_object *child;
149 	s64 time;
150 	int ret;
151 
152 	nvif_debug(object, "%s children...\n", action);
153 	time = ktime_to_us(ktime_get());
154 	list_for_each_entry_reverse(child, &object->tree, head) {
155 		ret = nvkm_object_fini(child, suspend);
156 		if (ret && suspend)
157 			goto fail_child;
158 	}
159 
160 	nvif_debug(object, "%s running...\n", action);
161 	if (object->func->fini) {
162 		ret = object->func->fini(object, suspend);
163 		if (ret) {
164 			nvif_error(object, "%s failed with %d\n", action, ret);
165 			if (suspend)
166 				goto fail;
167 		}
168 	}
169 
170 	time = ktime_to_us(ktime_get()) - time;
171 	nvif_debug(object, "%s completed in %lldus\n", action, time);
172 	return 0;
173 
174 fail:
175 	if (object->func->init) {
176 		int rret = object->func->init(object);
177 		if (rret)
178 			nvif_fatal(object, "failed to restart, %d\n", rret);
179 	}
180 fail_child:
181 	list_for_each_entry_continue_reverse(child, &object->tree, head) {
182 		nvkm_object_init(child);
183 	}
184 	return ret;
185 }
186 
187 int
nvkm_object_init(struct nvkm_object * object)188 nvkm_object_init(struct nvkm_object *object)
189 {
190 	struct nvkm_object *child;
191 	s64 time;
192 	int ret;
193 
194 	nvif_debug(object, "init running...\n");
195 	time = ktime_to_us(ktime_get());
196 	if (object->func->init) {
197 		ret = object->func->init(object);
198 		if (ret)
199 			goto fail;
200 	}
201 
202 	nvif_debug(object, "init children...\n");
203 	list_for_each_entry(child, &object->tree, head) {
204 		ret = nvkm_object_init(child);
205 		if (ret)
206 			goto fail_child;
207 	}
208 
209 	time = ktime_to_us(ktime_get()) - time;
210 	nvif_debug(object, "init completed in %lldus\n", time);
211 	return 0;
212 
213 fail_child:
214 	list_for_each_entry_continue_reverse(child, &object->tree, head)
215 		nvkm_object_fini(child, false);
216 fail:
217 	nvif_error(object, "init failed with %d\n", ret);
218 	if (object->func->fini)
219 		object->func->fini(object, false);
220 	return ret;
221 }
222 
223 void *
nvkm_object_dtor(struct nvkm_object * object)224 nvkm_object_dtor(struct nvkm_object *object)
225 {
226 	struct nvkm_object *child, *ctemp;
227 	void *data = object;
228 	s64 time;
229 
230 	nvif_debug(object, "destroy children...\n");
231 	time = ktime_to_us(ktime_get());
232 	list_for_each_entry_safe(child, ctemp, &object->tree, head) {
233 		nvkm_object_del(&child);
234 	}
235 
236 	nvif_debug(object, "destroy running...\n");
237 	nvkm_object_unmap(object);
238 	if (object->func->dtor)
239 		data = object->func->dtor(object);
240 	nvkm_engine_unref(&object->engine);
241 	time = ktime_to_us(ktime_get()) - time;
242 	nvif_debug(object, "destroy completed in %lldus...\n", time);
243 	return data;
244 }
245 
246 void
nvkm_object_del(struct nvkm_object ** pobject)247 nvkm_object_del(struct nvkm_object **pobject)
248 {
249 	struct nvkm_object *object = *pobject;
250 	if (object && !WARN_ON(!object->func)) {
251 		*pobject = nvkm_object_dtor(object);
252 		nvkm_object_remove(object);
253 		list_del(&object->head);
254 		kfree(*pobject);
255 		*pobject = NULL;
256 	}
257 }
258 
259 void
nvkm_object_ctor(const struct nvkm_object_func * func,const struct nvkm_oclass * oclass,struct nvkm_object * object)260 nvkm_object_ctor(const struct nvkm_object_func *func,
261 		 const struct nvkm_oclass *oclass, struct nvkm_object *object)
262 {
263 	object->func = func;
264 	object->client = oclass->client;
265 	object->engine = nvkm_engine_ref(oclass->engine);
266 	object->oclass = oclass->base.oclass;
267 	object->handle = oclass->handle;
268 	object->object = oclass->object;
269 	INIT_LIST_HEAD(&object->head);
270 	INIT_LIST_HEAD(&object->tree);
271 	RB_CLEAR_NODE(&object->node);
272 	WARN_ON(IS_ERR(object->engine));
273 }
274 
275 int
nvkm_object_new_(const struct nvkm_object_func * func,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)276 nvkm_object_new_(const struct nvkm_object_func *func,
277 		 const struct nvkm_oclass *oclass, void *data, u32 size,
278 		 struct nvkm_object **pobject)
279 {
280 	if (size == 0) {
281 		if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL)))
282 			return -ENOMEM;
283 		nvkm_object_ctor(func, oclass, *pobject);
284 		return 0;
285 	}
286 	return -ENOSYS;
287 }
288 
289 static const struct nvkm_object_func
290 nvkm_object_func = {
291 };
292 
293 int
nvkm_object_new(const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)294 nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size,
295 		struct nvkm_object **pobject)
296 {
297 	const struct nvkm_object_func *func =
298 		oclass->base.func ? oclass->base.func : &nvkm_object_func;
299 	return nvkm_object_new_(func, oclass, data, size, pobject);
300 }
301