xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright 2016 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24 #include "priv.h"
25 
26 struct nvkm_top_device *
27 nvkm_top_device_new(struct nvkm_top *top)
28 {
29 	struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL);
30 	if (info) {
31 		info->index = NVKM_SUBDEV_NR;
32 		info->addr = 0;
33 		info->fault = -1;
34 		info->engine = -1;
35 		info->runlist = -1;
36 		info->reset = -1;
37 		info->intr = -1;
38 		list_add_tail(&info->head, &top->device);
39 	}
40 	return info;
41 }
42 
43 u32
44 nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
45 {
46 	struct nvkm_top *top = device->top;
47 	struct nvkm_top_device *info;
48 
49 	if (top) {
50 		list_for_each_entry(info, &top->device, head) {
51 			if (info->index == index && info->reset >= 0)
52 				return BIT(info->reset);
53 		}
54 	}
55 
56 	return 0;
57 }
58 
59 u32
60 nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
61 {
62 	struct nvkm_top *top = device->top;
63 	struct nvkm_top_device *info;
64 
65 	if (top) {
66 		list_for_each_entry(info, &top->device, head) {
67 			if (info->index == devidx && info->intr >= 0)
68 				return BIT(info->intr);
69 		}
70 	}
71 
72 	return 0;
73 }
74 
75 u32
76 nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
77 {
78 	struct nvkm_top *top = device->top;
79 	struct nvkm_top_device *info;
80 	u64 subdevs = 0;
81 	u32 handled = 0;
82 
83 	if (top) {
84 		list_for_each_entry(info, &top->device, head) {
85 			if (info->index != NVKM_SUBDEV_NR && info->intr >= 0) {
86 				if (intr & BIT(info->intr)) {
87 					subdevs |= BIT_ULL(info->index);
88 					handled |= BIT(info->intr);
89 				}
90 			}
91 		}
92 	}
93 
94 	*psubdevs = subdevs;
95 	return intr & ~handled;
96 }
97 
98 int
99 nvkm_top_fault_id(struct nvkm_device *device, enum nvkm_devidx devidx)
100 {
101 	struct nvkm_top *top = device->top;
102 	struct nvkm_top_device *info;
103 
104 	list_for_each_entry(info, &top->device, head) {
105 		if (info->index == devidx && info->fault >= 0)
106 			return info->fault;
107 	}
108 
109 	return -ENOENT;
110 }
111 
112 enum nvkm_devidx
113 nvkm_top_fault(struct nvkm_device *device, int fault)
114 {
115 	struct nvkm_top *top = device->top;
116 	struct nvkm_top_device *info;
117 
118 	list_for_each_entry(info, &top->device, head) {
119 		if (info->fault == fault)
120 			return info->index;
121 	}
122 
123 	return NVKM_SUBDEV_NR;
124 }
125 
126 enum nvkm_devidx
127 nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
128 {
129 	struct nvkm_top *top = device->top;
130 	struct nvkm_top_device *info;
131 	int n = 0;
132 
133 	list_for_each_entry(info, &top->device, head) {
134 		if (info->engine >= 0 && info->runlist >= 0 && n++ == index) {
135 			*runl = info->runlist;
136 			*engn = info->engine;
137 			return info->index;
138 		}
139 	}
140 
141 	return -ENODEV;
142 }
143 
144 static int
145 nvkm_top_oneinit(struct nvkm_subdev *subdev)
146 {
147 	struct nvkm_top *top = nvkm_top(subdev);
148 	return top->func->oneinit(top);
149 }
150 
151 static void *
152 nvkm_top_dtor(struct nvkm_subdev *subdev)
153 {
154 	struct nvkm_top *top = nvkm_top(subdev);
155 	struct nvkm_top_device *info, *temp;
156 
157 	list_for_each_entry_safe(info, temp, &top->device, head) {
158 		list_del(&info->head);
159 		kfree(info);
160 	}
161 
162 	return top;
163 }
164 
165 static const struct nvkm_subdev_func
166 nvkm_top = {
167 	.dtor = nvkm_top_dtor,
168 	.oneinit = nvkm_top_oneinit,
169 };
170 
171 int
172 nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device,
173 	      int index, struct nvkm_top **ptop)
174 {
175 	struct nvkm_top *top;
176 	if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL)))
177 		return -ENOMEM;
178 	nvkm_subdev_ctor(&nvkm_top, device, index, &top->subdev);
179 	top->func = func;
180 	INIT_LIST_HEAD(&top->device);
181 	return 0;
182 }
183