xref: /linux/drivers/gpu/drm/nouveau/nvkm/core/subdev.c (revision 8185461e531c39d67aa4705d7f94873feb87adfd)
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include <core/subdev.h>
25 #include <core/device.h>
26 #include <core/option.h>
27 #include <subdev/mc.h>
28 
29 const char *
30 nvkm_subdev_type[NVKM_SUBDEV_NR] = {
31 #define NVKM_LAYOUT_ONCE(type,data,ptr,...) [type] = #ptr,
32 #define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A)
33 #include <core/layout.h>
34 #undef NVKM_LAYOUT_ONCE
35 #undef NVKM_LAYOUT_INST
36 };
37 
38 void
nvkm_subdev_intr(struct nvkm_subdev * subdev)39 nvkm_subdev_intr(struct nvkm_subdev *subdev)
40 {
41 	if (subdev->func->intr)
42 		subdev->func->intr(subdev);
43 }
44 
45 int
nvkm_subdev_info(struct nvkm_subdev * subdev,u64 mthd,u64 * data)46 nvkm_subdev_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
47 {
48 	if (subdev->func->info)
49 		return subdev->func->info(subdev, mthd, data);
50 	return -ENOSYS;
51 }
52 
53 int
nvkm_subdev_fini(struct nvkm_subdev * subdev,enum nvkm_suspend_state suspend)54 nvkm_subdev_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend)
55 {
56 	struct nvkm_device *device = subdev->device;
57 	const char *action;
58 	s64 time;
59 
60 	switch (suspend) {
61 	case NVKM_POWEROFF:
62 	default:
63 		action = subdev->use.enabled ? "fini" : "reset";
64 		break;
65 	case NVKM_SUSPEND:
66 		action = "suspend";
67 		break;
68 	case NVKM_RUNTIME_SUSPEND:
69 		action = "runtime";
70 		break;
71 	}
72 	nvkm_trace(subdev, "%s running...\n", action);
73 	time = ktime_to_us(ktime_get());
74 
75 	if (subdev->func->fini) {
76 		int ret = subdev->func->fini(subdev, suspend);
77 		if (ret) {
78 			nvkm_error(subdev, "%s failed, %d\n", action, ret);
79 			if (suspend)
80 				return ret;
81 		}
82 	}
83 	subdev->use.enabled = false;
84 
85 	nvkm_mc_reset(device, subdev->type, subdev->inst);
86 
87 	time = ktime_to_us(ktime_get()) - time;
88 	nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
89 	return 0;
90 }
91 
92 int
nvkm_subdev_preinit(struct nvkm_subdev * subdev)93 nvkm_subdev_preinit(struct nvkm_subdev *subdev)
94 {
95 	s64 time;
96 
97 	nvkm_trace(subdev, "preinit running...\n");
98 	time = ktime_to_us(ktime_get());
99 
100 	if (subdev->func->preinit) {
101 		int ret = subdev->func->preinit(subdev);
102 		if (ret) {
103 			nvkm_error(subdev, "preinit failed, %d\n", ret);
104 			return ret;
105 		}
106 	}
107 
108 	time = ktime_to_us(ktime_get()) - time;
109 	nvkm_trace(subdev, "preinit completed in %lldus\n", time);
110 	return 0;
111 }
112 
113 static int
nvkm_subdev_oneinit_(struct nvkm_subdev * subdev)114 nvkm_subdev_oneinit_(struct nvkm_subdev *subdev)
115 {
116 	s64 time;
117 	int ret;
118 
119 	if (!subdev->func->oneinit || subdev->oneinit)
120 		return 0;
121 
122 	nvkm_trace(subdev, "one-time init running...\n");
123 	time = ktime_to_us(ktime_get());
124 	ret = subdev->func->oneinit(subdev);
125 	if (ret) {
126 		nvkm_error(subdev, "one-time init failed, %d\n", ret);
127 		return ret;
128 	}
129 
130 	subdev->oneinit = true;
131 	time = ktime_to_us(ktime_get()) - time;
132 	nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
133 	return 0;
134 }
135 
136 static int
nvkm_subdev_init_(struct nvkm_subdev * subdev)137 nvkm_subdev_init_(struct nvkm_subdev *subdev)
138 {
139 	s64 time;
140 	int ret;
141 
142 	if (subdev->use.enabled) {
143 		nvkm_trace(subdev, "init skipped, already running\n");
144 		return 0;
145 	}
146 
147 	nvkm_trace(subdev, "init running...\n");
148 	time = ktime_to_us(ktime_get());
149 
150 	ret = nvkm_subdev_oneinit_(subdev);
151 	if (ret)
152 		return ret;
153 
154 	subdev->use.enabled = true;
155 
156 	if (subdev->func->init) {
157 		ret = subdev->func->init(subdev);
158 		if (ret) {
159 			nvkm_error(subdev, "init failed, %d\n", ret);
160 			return ret;
161 		}
162 	}
163 
164 	time = ktime_to_us(ktime_get()) - time;
165 	nvkm_trace(subdev, "init completed in %lldus\n", time);
166 	return 0;
167 }
168 
169 int
nvkm_subdev_init(struct nvkm_subdev * subdev)170 nvkm_subdev_init(struct nvkm_subdev *subdev)
171 {
172 	int ret;
173 
174 	mutex_lock(&subdev->use.mutex);
175 	if (refcount_read(&subdev->use.refcount) == 0) {
176 		nvkm_trace(subdev, "init skipped, no users\n");
177 		mutex_unlock(&subdev->use.mutex);
178 		return 0;
179 	}
180 
181 	ret = nvkm_subdev_init_(subdev);
182 	mutex_unlock(&subdev->use.mutex);
183 	return ret;
184 }
185 
186 int
nvkm_subdev_oneinit(struct nvkm_subdev * subdev)187 nvkm_subdev_oneinit(struct nvkm_subdev *subdev)
188 {
189 	int ret;
190 
191 	mutex_lock(&subdev->use.mutex);
192 	ret = nvkm_subdev_oneinit_(subdev);
193 	mutex_unlock(&subdev->use.mutex);
194 	return ret;
195 }
196 
197 void
nvkm_subdev_unref(struct nvkm_subdev * subdev)198 nvkm_subdev_unref(struct nvkm_subdev *subdev)
199 {
200 	if (refcount_dec_and_mutex_lock(&subdev->use.refcount, &subdev->use.mutex)) {
201 		nvkm_subdev_fini(subdev, NVKM_POWEROFF);
202 		mutex_unlock(&subdev->use.mutex);
203 	}
204 }
205 
206 int
nvkm_subdev_ref(struct nvkm_subdev * subdev)207 nvkm_subdev_ref(struct nvkm_subdev *subdev)
208 {
209 	int ret;
210 
211 	if (subdev && !refcount_inc_not_zero(&subdev->use.refcount)) {
212 		mutex_lock(&subdev->use.mutex);
213 		if (!refcount_inc_not_zero(&subdev->use.refcount)) {
214 			if ((ret = nvkm_subdev_init_(subdev))) {
215 				mutex_unlock(&subdev->use.mutex);
216 				return ret;
217 			}
218 
219 			refcount_set(&subdev->use.refcount, 1);
220 		}
221 		mutex_unlock(&subdev->use.mutex);
222 	}
223 
224 	return 0;
225 }
226 
227 void
nvkm_subdev_del(struct nvkm_subdev ** psubdev)228 nvkm_subdev_del(struct nvkm_subdev **psubdev)
229 {
230 	struct nvkm_subdev *subdev = *psubdev;
231 	s64 time;
232 
233 	if (subdev && !WARN_ON(!subdev->func)) {
234 		nvkm_trace(subdev, "destroy running...\n");
235 		time = ktime_to_us(ktime_get());
236 		list_del(&subdev->head);
237 		if (subdev->func->dtor)
238 			*psubdev = subdev->func->dtor(subdev);
239 		mutex_destroy(&subdev->use.mutex);
240 		time = ktime_to_us(ktime_get()) - time;
241 		nvkm_trace(subdev, "destroy completed in %lldus\n", time);
242 		kfree(*psubdev);
243 		*psubdev = NULL;
244 	}
245 }
246 
247 void
nvkm_subdev_disable(struct nvkm_device * device,enum nvkm_subdev_type type,int inst)248 nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst)
249 {
250 	struct nvkm_subdev *subdev;
251 	list_for_each_entry(subdev, &device->subdev, head) {
252 		if (subdev->type == type && subdev->inst == inst) {
253 			*subdev->pself = NULL;
254 			nvkm_subdev_del(&subdev);
255 			break;
256 		}
257 	}
258 }
259 
260 void
__nvkm_subdev_ctor(const struct nvkm_subdev_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_subdev * subdev)261 __nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device,
262 		   enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev)
263 {
264 	subdev->func = func;
265 	subdev->device = device;
266 	subdev->type = type;
267 	subdev->inst = inst < 0 ? 0 : inst;
268 
269 	if (inst >= 0)
270 		snprintf(subdev->name, sizeof(subdev->name), "%s%d", nvkm_subdev_type[type], inst);
271 	else
272 		strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name));
273 	subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name);
274 
275 	refcount_set(&subdev->use.refcount, 1);
276 	list_add_tail(&subdev->head, &device->subdev);
277 }
278 
279 int
nvkm_subdev_new_(const struct nvkm_subdev_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_subdev ** psubdev)280 nvkm_subdev_new_(const struct nvkm_subdev_func *func, struct nvkm_device *device,
281 		 enum nvkm_subdev_type type, int inst, struct nvkm_subdev **psubdev)
282 {
283 	if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
284 		return -ENOMEM;
285 	nvkm_subdev_ctor(func, device, type, inst, *psubdev);
286 	return 0;
287 }
288