xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c (revision 4e0ae876f77bc01a7e77724dea57b4b82bd53244)
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "priv.h"
23 
24 #include <core/memory.h>
25 #include <core/notify.h>
26 
27 static void
28 nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
29 {
30 	struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
31 	fault->func->buffer.intr(fault->buffer[index], false);
32 }
33 
34 static void
35 nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
36 {
37 	struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
38 	fault->func->buffer.intr(fault->buffer[index], true);
39 }
40 
41 static int
42 nvkm_fault_ntfy_ctor(struct nvkm_object *object, void *argv, u32 argc,
43 		     struct nvkm_notify *notify)
44 {
45 	struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
46 	if (argc == 0) {
47 		notify->size  = 0;
48 		notify->types = 1;
49 		notify->index = buffer->id;
50 		return 0;
51 	}
52 	return -ENOSYS;
53 }
54 
55 static const struct nvkm_event_func
56 nvkm_fault_ntfy = {
57 	.ctor = nvkm_fault_ntfy_ctor,
58 	.init = nvkm_fault_ntfy_init,
59 	.fini = nvkm_fault_ntfy_fini,
60 };
61 
62 static void
63 nvkm_fault_intr(struct nvkm_subdev *subdev)
64 {
65 	struct nvkm_fault *fault = nvkm_fault(subdev);
66 	return fault->func->intr(fault);
67 }
68 
69 static int
70 nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend)
71 {
72 	struct nvkm_fault *fault = nvkm_fault(subdev);
73 	if (fault->func->fini)
74 		fault->func->fini(fault);
75 	return 0;
76 }
77 
78 static int
79 nvkm_fault_init(struct nvkm_subdev *subdev)
80 {
81 	struct nvkm_fault *fault = nvkm_fault(subdev);
82 	if (fault->func->init)
83 		fault->func->init(fault);
84 	return 0;
85 }
86 
87 static int
88 nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
89 {
90 	struct nvkm_subdev *subdev = &fault->subdev;
91 	struct nvkm_device *device = subdev->device;
92 	struct nvkm_fault_buffer *buffer;
93 	int ret;
94 
95 	if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL)))
96 		return -ENOMEM;
97 	buffer->fault = fault;
98 	buffer->id = id;
99 	fault->func->buffer.info(buffer);
100 	fault->buffer[id] = buffer;
101 
102 	nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
103 
104 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries *
105 			      fault->func->buffer.entry_size, 0x1000, true,
106 			      &buffer->mem);
107 	if (ret)
108 		return ret;
109 
110 	/* Pin fault buffer in BAR2. */
111 	buffer->addr = nvkm_memory_bar2(buffer->mem);
112 	if (buffer->addr == ~0ULL)
113 		return -EFAULT;
114 
115 	return 0;
116 }
117 
118 static int
119 nvkm_fault_oneinit(struct nvkm_subdev *subdev)
120 {
121 	struct nvkm_fault *fault = nvkm_fault(subdev);
122 	int ret, i;
123 
124 	for (i = 0; i < ARRAY_SIZE(fault->buffer); i++) {
125 		if (i < fault->func->buffer.nr) {
126 			ret = nvkm_fault_oneinit_buffer(fault, i);
127 			if (ret)
128 				return ret;
129 			fault->buffer_nr = i + 1;
130 		}
131 	}
132 
133 	ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
134 			      &fault->event);
135 	if (ret)
136 		return ret;
137 
138 	if (fault->func->oneinit)
139 		ret = fault->func->oneinit(fault);
140 	return ret;
141 }
142 
143 static void *
144 nvkm_fault_dtor(struct nvkm_subdev *subdev)
145 {
146 	struct nvkm_fault *fault = nvkm_fault(subdev);
147 	int i;
148 
149 	nvkm_event_fini(&fault->event);
150 
151 	for (i = 0; i < fault->buffer_nr; i++) {
152 		if (fault->buffer[i]) {
153 			nvkm_memory_unref(&fault->buffer[i]->mem);
154 			kfree(fault->buffer[i]);
155 		}
156 	}
157 
158 	return fault;
159 }
160 
161 static const struct nvkm_subdev_func
162 nvkm_fault = {
163 	.dtor = nvkm_fault_dtor,
164 	.oneinit = nvkm_fault_oneinit,
165 	.init = nvkm_fault_init,
166 	.fini = nvkm_fault_fini,
167 	.intr = nvkm_fault_intr,
168 };
169 
170 int
171 nvkm_fault_new_(const struct nvkm_fault_func *func, struct nvkm_device *device,
172 		int index, struct nvkm_fault **pfault)
173 {
174 	struct nvkm_fault *fault;
175 	if (!(fault = *pfault = kzalloc(sizeof(*fault), GFP_KERNEL)))
176 		return -ENOMEM;
177 	nvkm_subdev_ctor(&nvkm_fault, device, index, &fault->subdev);
178 	fault->func = func;
179 	fault->user.ctor = nvkm_ufault_new;
180 	fault->user.base = func->user.base;
181 	return 0;
182 }
183