xref: /linux/drivers/gpu/drm/nouveau/nvkm/core/event.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 /*
2  * Copyright 2013-2014 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <core/event.h>
23 #include <core/subdev.h>
24 
25 static void
nvkm_event_put(struct nvkm_event * event,u32 types,int index)26 nvkm_event_put(struct nvkm_event *event, u32 types, int index)
27 {
28 	assert_spin_locked(&event->refs_lock);
29 
30 	nvkm_trace(event->subdev, "event: decr %08x on %d\n", types, index);
31 
32 	while (types) {
33 		int type = __ffs(types); types &= ~(1 << type);
34 		if (--event->refs[index * event->types_nr + type] == 0) {
35 			nvkm_trace(event->subdev, "event: blocking %d on %d\n", type, index);
36 			if (event->func->fini)
37 				event->func->fini(event, 1 << type, index);
38 		}
39 	}
40 }
41 
42 static void
nvkm_event_get(struct nvkm_event * event,u32 types,int index)43 nvkm_event_get(struct nvkm_event *event, u32 types, int index)
44 {
45 	assert_spin_locked(&event->refs_lock);
46 
47 	nvkm_trace(event->subdev, "event: incr %08x on %d\n", types, index);
48 
49 	while (types) {
50 		int type = __ffs(types); types &= ~(1 << type);
51 		if (++event->refs[index * event->types_nr + type] == 1) {
52 			nvkm_trace(event->subdev, "event: allowing %d on %d\n", type, index);
53 			if (event->func->init)
54 				event->func->init(event, 1 << type, index);
55 		}
56 	}
57 }
58 
59 static void
nvkm_event_ntfy_state(struct nvkm_event_ntfy * ntfy)60 nvkm_event_ntfy_state(struct nvkm_event_ntfy *ntfy)
61 {
62 	struct nvkm_event *event = ntfy->event;
63 	unsigned long flags;
64 
65 	nvkm_trace(event->subdev, "event: ntfy state changed\n");
66 	spin_lock_irqsave(&event->refs_lock, flags);
67 
68 	if (atomic_read(&ntfy->allowed) != ntfy->running) {
69 		if (ntfy->running) {
70 			nvkm_event_put(ntfy->event, ntfy->bits, ntfy->id);
71 			ntfy->running = false;
72 		} else {
73 			nvkm_event_get(ntfy->event, ntfy->bits, ntfy->id);
74 			ntfy->running = true;
75 		}
76 	}
77 
78 	spin_unlock_irqrestore(&event->refs_lock, flags);
79 }
80 
81 static void
nvkm_event_ntfy_remove(struct nvkm_event_ntfy * ntfy)82 nvkm_event_ntfy_remove(struct nvkm_event_ntfy *ntfy)
83 {
84 	write_lock_irq(&ntfy->event->list_lock);
85 	list_del_init(&ntfy->head);
86 	write_unlock_irq(&ntfy->event->list_lock);
87 }
88 
89 static void
nvkm_event_ntfy_insert(struct nvkm_event_ntfy * ntfy)90 nvkm_event_ntfy_insert(struct nvkm_event_ntfy *ntfy)
91 {
92 	write_lock_irq(&ntfy->event->list_lock);
93 	list_add_tail(&ntfy->head, &ntfy->event->ntfy);
94 	write_unlock_irq(&ntfy->event->list_lock);
95 }
96 
97 static void
nvkm_event_ntfy_block_(struct nvkm_event_ntfy * ntfy,bool wait)98 nvkm_event_ntfy_block_(struct nvkm_event_ntfy *ntfy, bool wait)
99 {
100 	struct nvkm_subdev *subdev = ntfy->event->subdev;
101 
102 	nvkm_trace(subdev, "event: ntfy block %08x on %d wait:%d\n", ntfy->bits, ntfy->id, wait);
103 
104 	if (atomic_xchg(&ntfy->allowed, 0) == 1) {
105 		nvkm_event_ntfy_state(ntfy);
106 		if (wait)
107 			nvkm_event_ntfy_remove(ntfy);
108 	}
109 }
110 
111 void
nvkm_event_ntfy_block(struct nvkm_event_ntfy * ntfy)112 nvkm_event_ntfy_block(struct nvkm_event_ntfy *ntfy)
113 {
114 	if (ntfy->event)
115 		nvkm_event_ntfy_block_(ntfy, ntfy->wait);
116 }
117 
118 void
nvkm_event_ntfy_allow(struct nvkm_event_ntfy * ntfy)119 nvkm_event_ntfy_allow(struct nvkm_event_ntfy *ntfy)
120 {
121 	nvkm_trace(ntfy->event->subdev, "event: ntfy allow %08x on %d\n", ntfy->bits, ntfy->id);
122 
123 	if (atomic_xchg(&ntfy->allowed, 1) == 0) {
124 		nvkm_event_ntfy_state(ntfy);
125 		if (ntfy->wait)
126 			nvkm_event_ntfy_insert(ntfy);
127 	}
128 }
129 
130 void
nvkm_event_ntfy_del(struct nvkm_event_ntfy * ntfy)131 nvkm_event_ntfy_del(struct nvkm_event_ntfy *ntfy)
132 {
133 	struct nvkm_event *event = ntfy->event;
134 
135 	if (!event)
136 		return;
137 
138 	nvkm_trace(event->subdev, "event: ntfy del %08x on %d\n", ntfy->bits, ntfy->id);
139 
140 	nvkm_event_ntfy_block_(ntfy, false);
141 	nvkm_event_ntfy_remove(ntfy);
142 	ntfy->event = NULL;
143 }
144 
145 void
nvkm_event_ntfy_add(struct nvkm_event * event,int id,u32 bits,bool wait,nvkm_event_func func,struct nvkm_event_ntfy * ntfy)146 nvkm_event_ntfy_add(struct nvkm_event *event, int id, u32 bits, bool wait, nvkm_event_func func,
147 		    struct nvkm_event_ntfy *ntfy)
148 {
149 	nvkm_trace(event->subdev, "event: ntfy add %08x on %d wait:%d\n", id, bits, wait);
150 
151 	ntfy->event = event;
152 	ntfy->id = id;
153 	ntfy->bits = bits;
154 	ntfy->wait = wait;
155 	ntfy->func = func;
156 	atomic_set(&ntfy->allowed, 0);
157 	ntfy->running = false;
158 	INIT_LIST_HEAD(&ntfy->head);
159 	if (!ntfy->wait)
160 		nvkm_event_ntfy_insert(ntfy);
161 }
162 
163 bool
nvkm_event_ntfy_valid(struct nvkm_event * event,int id,u32 bits)164 nvkm_event_ntfy_valid(struct nvkm_event *event, int id, u32 bits)
165 {
166 	return true;
167 }
168 
169 void
nvkm_event_ntfy(struct nvkm_event * event,int id,u32 bits)170 nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
171 {
172 	struct nvkm_event_ntfy *ntfy, *ntmp;
173 	unsigned long flags;
174 
175 	if (!event->refs || WARN_ON(id >= event->index_nr))
176 		return;
177 
178 	nvkm_trace(event->subdev, "event: ntfy %08x on %d\n", bits, id);
179 	read_lock_irqsave(&event->list_lock, flags);
180 
181 	list_for_each_entry_safe(ntfy, ntmp, &event->ntfy, head) {
182 		if (ntfy->id == id && ntfy->bits & bits) {
183 			if (atomic_read(&ntfy->allowed))
184 				ntfy->func(ntfy, ntfy->bits & bits);
185 		}
186 	}
187 
188 	read_unlock_irqrestore(&event->list_lock, flags);
189 }
190 
191 void
nvkm_event_fini(struct nvkm_event * event)192 nvkm_event_fini(struct nvkm_event *event)
193 {
194 	if (event->refs) {
195 		kfree(event->refs);
196 		event->refs = NULL;
197 	}
198 }
199 
200 int
__nvkm_event_init(const struct nvkm_event_func * func,struct nvkm_subdev * subdev,int types_nr,int index_nr,struct nvkm_event * event)201 __nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
202 		  int types_nr, int index_nr, struct nvkm_event *event)
203 {
204 	event->refs = kzalloc(array3_size(index_nr, types_nr, sizeof(*event->refs)), GFP_KERNEL);
205 	if (!event->refs)
206 		return -ENOMEM;
207 
208 	event->func = func;
209 	event->subdev = subdev;
210 	event->types_nr = types_nr;
211 	event->index_nr = index_nr;
212 	INIT_LIST_HEAD(&event->ntfy);
213 	return 0;
214 }
215