xref: /linux/drivers/gpu/drm/nouveau/nvkm/core/intr.c (revision a940daa52167e9db8ecce82213813b735a9d9f23)
1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <core/intr.h>
23 #include <core/device.h>
24 #include <core/subdev.h>
25 #include <subdev/pci.h>
26 #include <subdev/top.h>
27 
28 static int
nvkm_intr_xlat(struct nvkm_subdev * subdev,struct nvkm_intr * intr,enum nvkm_intr_type type,int * leaf,u32 * mask)29 nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr,
30 	       enum nvkm_intr_type type, int *leaf, u32 *mask)
31 {
32 	struct nvkm_device *device = subdev->device;
33 
34 	if (type < NVKM_INTR_VECTOR_0) {
35 		if (type == NVKM_INTR_SUBDEV) {
36 			const struct nvkm_intr_data *data = intr->data;
37 			struct nvkm_top_device *tdev;
38 
39 			while (data && data->mask) {
40 				if (data->type == NVKM_SUBDEV_TOP) {
41 					list_for_each_entry(tdev, &device->top->device, head) {
42 						if (tdev->intr >= 0 &&
43 						    tdev->type == subdev->type &&
44 						    tdev->inst == subdev->inst) {
45 							if (data->mask & BIT(tdev->intr)) {
46 								*leaf = data->leaf;
47 								*mask = BIT(tdev->intr);
48 								return 0;
49 							}
50 						}
51 					}
52 				} else
53 				if (data->type == subdev->type && data->inst == subdev->inst) {
54 					*leaf = data->leaf;
55 					*mask = data->mask;
56 					return 0;
57 				}
58 
59 				data++;
60 			}
61 		} else {
62 			return -ENOSYS;
63 		}
64 	} else {
65 		if (type < intr->leaves * sizeof(*intr->stat) * 8) {
66 			*leaf = type / 32;
67 			*mask = BIT(type % 32);
68 			return 0;
69 		}
70 	}
71 
72 	return -EINVAL;
73 }
74 
75 static struct nvkm_intr *
nvkm_intr_find(struct nvkm_subdev * subdev,enum nvkm_intr_type type,int * leaf,u32 * mask)76 nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask)
77 {
78 	struct nvkm_intr *intr;
79 	int ret;
80 
81 	list_for_each_entry(intr, &subdev->device->intr.intr, head) {
82 		ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask);
83 		if (ret == 0)
84 			return intr;
85 	}
86 
87 	return NULL;
88 }
89 
90 static void
nvkm_intr_allow_locked(struct nvkm_intr * intr,int leaf,u32 mask)91 nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask)
92 {
93 	intr->mask[leaf] |= mask;
94 	if (intr->func->allow) {
95 		if (intr->func->reset)
96 			intr->func->reset(intr, leaf, mask);
97 		intr->func->allow(intr, leaf, mask);
98 	}
99 }
100 
101 void
nvkm_intr_allow(struct nvkm_subdev * subdev,enum nvkm_intr_type type)102 nvkm_intr_allow(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
103 {
104 	struct nvkm_device *device = subdev->device;
105 	struct nvkm_intr *intr;
106 	unsigned long flags;
107 	int leaf;
108 	u32 mask;
109 
110 	intr = nvkm_intr_find(subdev, type, &leaf, &mask);
111 	if (intr) {
112 		nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name);
113 		spin_lock_irqsave(&device->intr.lock, flags);
114 		nvkm_intr_allow_locked(intr, leaf, mask);
115 		spin_unlock_irqrestore(&device->intr.lock, flags);
116 	}
117 }
118 
119 static void
nvkm_intr_block_locked(struct nvkm_intr * intr,int leaf,u32 mask)120 nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask)
121 {
122 	intr->mask[leaf] &= ~mask;
123 	if (intr->func->block)
124 		intr->func->block(intr, leaf, mask);
125 }
126 
127 void
nvkm_intr_block(struct nvkm_subdev * subdev,enum nvkm_intr_type type)128 nvkm_intr_block(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
129 {
130 	struct nvkm_device *device = subdev->device;
131 	struct nvkm_intr *intr;
132 	unsigned long flags;
133 	int leaf;
134 	u32 mask;
135 
136 	intr = nvkm_intr_find(subdev, type, &leaf, &mask);
137 	if (intr) {
138 		nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name);
139 		spin_lock_irqsave(&device->intr.lock, flags);
140 		nvkm_intr_block_locked(intr, leaf, mask);
141 		spin_unlock_irqrestore(&device->intr.lock, flags);
142 	}
143 }
144 
145 static void
nvkm_intr_rearm_locked(struct nvkm_device * device)146 nvkm_intr_rearm_locked(struct nvkm_device *device)
147 {
148 	struct nvkm_intr *intr;
149 
150 	list_for_each_entry(intr, &device->intr.intr, head)
151 		intr->func->rearm(intr);
152 }
153 
154 static void
nvkm_intr_unarm_locked(struct nvkm_device * device)155 nvkm_intr_unarm_locked(struct nvkm_device *device)
156 {
157 	struct nvkm_intr *intr;
158 
159 	list_for_each_entry(intr, &device->intr.intr, head)
160 		intr->func->unarm(intr);
161 }
162 
163 static irqreturn_t
nvkm_intr(int irq,void * arg)164 nvkm_intr(int irq, void *arg)
165 {
166 	struct nvkm_device *device = arg;
167 	struct nvkm_intr *intr;
168 	struct nvkm_inth *inth;
169 	irqreturn_t ret = IRQ_NONE;
170 	bool pending = false;
171 	int prio, leaf;
172 
173 	/* Disable all top-level interrupt sources, and re-arm MSI interrupts. */
174 	spin_lock(&device->intr.lock);
175 	if (!device->intr.armed)
176 		goto done_unlock;
177 
178 	nvkm_intr_unarm_locked(device);
179 	nvkm_pci_msi_rearm(device);
180 
181 	/* Fetch pending interrupt masks. */
182 	list_for_each_entry(intr, &device->intr.intr, head) {
183 		if (intr->func->pending(intr))
184 			pending = true;
185 	}
186 
187 	if (!pending)
188 		goto done;
189 
190 	/* Check that GPU is still on the bus by reading NV_PMC_BOOT_0. */
191 	if (WARN_ON(nvkm_rd32(device, 0x000000) == 0xffffffff))
192 		goto done;
193 
194 	/* Execute handlers. */
195 	for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) {
196 		list_for_each_entry(inth, &device->intr.prio[prio], head) {
197 			struct nvkm_intr *intr = inth->intr;
198 
199 			if (intr->stat[inth->leaf] & inth->mask) {
200 				if (atomic_read(&inth->allowed)) {
201 					if (intr->func->reset)
202 						intr->func->reset(intr, inth->leaf, inth->mask);
203 					if (inth->func(inth) == IRQ_HANDLED)
204 						ret = IRQ_HANDLED;
205 				}
206 			}
207 		}
208 	}
209 
210 	/* Nothing handled?  Some debugging/protection from IRQ storms is in order... */
211 	if (ret == IRQ_NONE) {
212 		list_for_each_entry(intr, &device->intr.intr, head) {
213 			for (leaf = 0; leaf < intr->leaves; leaf++) {
214 				if (intr->stat[leaf]) {
215 					nvkm_debug(intr->subdev, "intr%d: %08x\n",
216 						   leaf, intr->stat[leaf]);
217 					nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
218 				}
219 			}
220 		}
221 	}
222 
223 done:
224 	/* Re-enable all top-level interrupt sources. */
225 	nvkm_intr_rearm_locked(device);
226 done_unlock:
227 	spin_unlock(&device->intr.lock);
228 	return ret;
229 }
230 
231 int
nvkm_intr_add(const struct nvkm_intr_func * func,const struct nvkm_intr_data * data,struct nvkm_subdev * subdev,int leaves,struct nvkm_intr * intr)232 nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *data,
233 	      struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr)
234 {
235 	struct nvkm_device *device = subdev->device;
236 	int i;
237 
238 	intr->func = func;
239 	intr->data = data;
240 	intr->subdev = subdev;
241 	intr->leaves = leaves;
242 	intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL);
243 	intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL);
244 	if (!intr->stat || !intr->mask) {
245 		kfree(intr->stat);
246 		return -ENOMEM;
247 	}
248 
249 	if (intr->subdev->debug >= NV_DBG_DEBUG) {
250 		for (i = 0; i < intr->leaves; i++)
251 			intr->mask[i] = ~0;
252 	}
253 
254 	spin_lock_irq(&device->intr.lock);
255 	list_add_tail(&intr->head, &device->intr.intr);
256 	spin_unlock_irq(&device->intr.lock);
257 	return 0;
258 }
259 
260 static irqreturn_t
nvkm_intr_subdev(struct nvkm_inth * inth)261 nvkm_intr_subdev(struct nvkm_inth *inth)
262 {
263 	struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth);
264 
265 	nvkm_subdev_intr(subdev);
266 	return IRQ_HANDLED;
267 }
268 
269 static void
nvkm_intr_subdev_add_dev(struct nvkm_intr * intr,enum nvkm_subdev_type type,int inst)270 nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst)
271 {
272 	struct nvkm_subdev *subdev;
273 	enum nvkm_intr_prio prio;
274 	int ret;
275 
276 	subdev = nvkm_device_subdev(intr->subdev->device, type, inst);
277 	if (!subdev || !subdev->func->intr)
278 		return;
279 
280 	if (type == NVKM_ENGINE_DISP)
281 		prio = NVKM_INTR_PRIO_VBLANK;
282 	else
283 		prio = NVKM_INTR_PRIO_NORMAL;
284 
285 	ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth);
286 	if (WARN_ON(ret))
287 		return;
288 
289 	nvkm_inth_allow(&subdev->inth);
290 }
291 
292 static void
nvkm_intr_subdev_add(struct nvkm_intr * intr)293 nvkm_intr_subdev_add(struct nvkm_intr *intr)
294 {
295 	const struct nvkm_intr_data *data;
296 	struct nvkm_device *device = intr->subdev->device;
297 	struct nvkm_top_device *tdev;
298 
299 	for (data = intr->data; data && data->mask; data++) {
300 		if (data->legacy) {
301 			if (data->type == NVKM_SUBDEV_TOP) {
302 				list_for_each_entry(tdev, &device->top->device, head) {
303 					if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr)))
304 						continue;
305 
306 					nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst);
307 				}
308 			} else {
309 				nvkm_intr_subdev_add_dev(intr, data->type, data->inst);
310 			}
311 		}
312 	}
313 }
314 
315 void
nvkm_intr_rearm(struct nvkm_device * device)316 nvkm_intr_rearm(struct nvkm_device *device)
317 {
318 	struct nvkm_intr *intr;
319 	int i;
320 
321 	if (unlikely(!device->intr.legacy_done)) {
322 		list_for_each_entry(intr, &device->intr.intr, head)
323 			nvkm_intr_subdev_add(intr);
324 		device->intr.legacy_done = true;
325 	}
326 
327 	spin_lock_irq(&device->intr.lock);
328 	list_for_each_entry(intr, &device->intr.intr, head) {
329 		for (i = 0; intr->func->block && i < intr->leaves; i++) {
330 			intr->func->block(intr, i, ~0);
331 			intr->func->allow(intr, i, intr->mask[i]);
332 		}
333 	}
334 
335 	nvkm_intr_rearm_locked(device);
336 	device->intr.armed = true;
337 	spin_unlock_irq(&device->intr.lock);
338 }
339 
340 void
nvkm_intr_unarm(struct nvkm_device * device)341 nvkm_intr_unarm(struct nvkm_device *device)
342 {
343 	spin_lock_irq(&device->intr.lock);
344 	nvkm_intr_unarm_locked(device);
345 	device->intr.armed = false;
346 	spin_unlock_irq(&device->intr.lock);
347 }
348 
349 int
nvkm_intr_install(struct nvkm_device * device)350 nvkm_intr_install(struct nvkm_device *device)
351 {
352 	int ret;
353 
354 	device->intr.irq = device->func->irq(device);
355 	if (device->intr.irq < 0)
356 		return device->intr.irq;
357 
358 	ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device);
359 	if (ret)
360 		return ret;
361 
362 	device->intr.alloc = true;
363 	return 0;
364 }
365 
366 void
nvkm_intr_dtor(struct nvkm_device * device)367 nvkm_intr_dtor(struct nvkm_device *device)
368 {
369 	struct nvkm_intr *intr, *intt;
370 
371 	list_for_each_entry_safe(intr, intt, &device->intr.intr, head) {
372 		list_del(&intr->head);
373 		kfree(intr->mask);
374 		kfree(intr->stat);
375 	}
376 
377 	if (device->intr.alloc)
378 		free_irq(device->intr.irq, device);
379 }
380 
381 void
nvkm_intr_ctor(struct nvkm_device * device)382 nvkm_intr_ctor(struct nvkm_device *device)
383 {
384 	int i;
385 
386 	INIT_LIST_HEAD(&device->intr.intr);
387 	for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++)
388 		INIT_LIST_HEAD(&device->intr.prio[i]);
389 
390 	spin_lock_init(&device->intr.lock);
391 	device->intr.armed = false;
392 }
393 
394 void
nvkm_inth_block(struct nvkm_inth * inth)395 nvkm_inth_block(struct nvkm_inth *inth)
396 {
397 	if (unlikely(!inth->intr))
398 		return;
399 
400 	atomic_set(&inth->allowed, 0);
401 }
402 
403 void
nvkm_inth_allow(struct nvkm_inth * inth)404 nvkm_inth_allow(struct nvkm_inth *inth)
405 {
406 	struct nvkm_intr *intr = inth->intr;
407 	unsigned long flags;
408 
409 	if (unlikely(!inth->intr))
410 		return;
411 
412 	spin_lock_irqsave(&intr->subdev->device->intr.lock, flags);
413 	if (!atomic_xchg(&inth->allowed, 1)) {
414 		if ((intr->mask[inth->leaf] & inth->mask) != inth->mask)
415 			nvkm_intr_allow_locked(intr, inth->leaf, inth->mask);
416 	}
417 	spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags);
418 }
419 
420 int
nvkm_inth_add(struct nvkm_intr * intr,enum nvkm_intr_type type,enum nvkm_intr_prio prio,struct nvkm_subdev * subdev,nvkm_inth_func func,struct nvkm_inth * inth)421 nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio,
422 	      struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth)
423 {
424 	struct nvkm_device *device = subdev->device;
425 	int ret;
426 
427 	if (WARN_ON(inth->mask))
428 		return -EBUSY;
429 
430 	ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask);
431 	if (ret)
432 		return ret;
433 
434 	nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n",
435 		   inth->leaf, inth->mask, subdev->name);
436 
437 	inth->intr = intr;
438 	inth->func = func;
439 	atomic_set(&inth->allowed, 0);
440 	list_add_tail(&inth->head, &device->intr.prio[prio]);
441 	return 0;
442 }
443