xref: /linux/drivers/media/v4l2-core/v4l2-event.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * v4l2-event.c
3  *
4  * V4L2 events.
5  *
6  * Copyright (C) 2009--2010 Nokia Corporation.
7  *
8  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  */
19 
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
23 
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/export.h>
27 
28 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
29 {
30 	idx += sev->first;
31 	return idx >= sev->elems ? idx - sev->elems : idx;
32 }
33 
34 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
35 {
36 	struct v4l2_kevent *kev;
37 	unsigned long flags;
38 
39 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
40 
41 	if (list_empty(&fh->available)) {
42 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
43 		return -ENOENT;
44 	}
45 
46 	WARN_ON(fh->navailable == 0);
47 
48 	kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
49 	list_del(&kev->list);
50 	fh->navailable--;
51 
52 	kev->event.pending = fh->navailable;
53 	*event = kev->event;
54 	kev->sev->first = sev_pos(kev->sev, 1);
55 	kev->sev->in_use--;
56 
57 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
58 
59 	return 0;
60 }
61 
62 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
63 		       int nonblocking)
64 {
65 	int ret;
66 
67 	if (nonblocking)
68 		return __v4l2_event_dequeue(fh, event);
69 
70 	/* Release the vdev lock while waiting */
71 	if (fh->vdev->lock)
72 		mutex_unlock(fh->vdev->lock);
73 
74 	do {
75 		ret = wait_event_interruptible(fh->wait,
76 					       fh->navailable != 0);
77 		if (ret < 0)
78 			break;
79 
80 		ret = __v4l2_event_dequeue(fh, event);
81 	} while (ret == -ENOENT);
82 
83 	if (fh->vdev->lock)
84 		mutex_lock(fh->vdev->lock);
85 
86 	return ret;
87 }
88 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
89 
90 /* Caller must hold fh->vdev->fh_lock! */
91 static struct v4l2_subscribed_event *v4l2_event_subscribed(
92 		struct v4l2_fh *fh, u32 type, u32 id)
93 {
94 	struct v4l2_subscribed_event *sev;
95 
96 	assert_spin_locked(&fh->vdev->fh_lock);
97 
98 	list_for_each_entry(sev, &fh->subscribed, list)
99 		if (sev->type == type && sev->id == id)
100 			return sev;
101 
102 	return NULL;
103 }
104 
105 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
106 		const struct timespec *ts)
107 {
108 	struct v4l2_subscribed_event *sev;
109 	struct v4l2_kevent *kev;
110 	bool copy_payload = true;
111 
112 	/* Are we subscribed? */
113 	sev = v4l2_event_subscribed(fh, ev->type, ev->id);
114 	if (sev == NULL)
115 		return;
116 
117 	/*
118 	 * If the event has been added to the fh->subscribed list, but its
119 	 * add op has not completed yet elems will be 0, treat this as
120 	 * not being subscribed.
121 	 */
122 	if (!sev->elems)
123 		return;
124 
125 	/* Increase event sequence number on fh. */
126 	fh->sequence++;
127 
128 	/* Do we have any free events? */
129 	if (sev->in_use == sev->elems) {
130 		/* no, remove the oldest one */
131 		kev = sev->events + sev_pos(sev, 0);
132 		list_del(&kev->list);
133 		sev->in_use--;
134 		sev->first = sev_pos(sev, 1);
135 		fh->navailable--;
136 		if (sev->elems == 1) {
137 			if (sev->ops && sev->ops->replace) {
138 				sev->ops->replace(&kev->event, ev);
139 				copy_payload = false;
140 			}
141 		} else if (sev->ops && sev->ops->merge) {
142 			struct v4l2_kevent *second_oldest =
143 				sev->events + sev_pos(sev, 0);
144 			sev->ops->merge(&kev->event, &second_oldest->event);
145 		}
146 	}
147 
148 	/* Take one and fill it. */
149 	kev = sev->events + sev_pos(sev, sev->in_use);
150 	kev->event.type = ev->type;
151 	if (copy_payload)
152 		kev->event.u = ev->u;
153 	kev->event.id = ev->id;
154 	kev->event.timestamp = *ts;
155 	kev->event.sequence = fh->sequence;
156 	sev->in_use++;
157 	list_add_tail(&kev->list, &fh->available);
158 
159 	fh->navailable++;
160 
161 	wake_up_all(&fh->wait);
162 }
163 
164 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
165 {
166 	struct v4l2_fh *fh;
167 	unsigned long flags;
168 	struct timespec timestamp;
169 
170 	if (vdev == NULL)
171 		return;
172 
173 	ktime_get_ts(&timestamp);
174 
175 	spin_lock_irqsave(&vdev->fh_lock, flags);
176 
177 	list_for_each_entry(fh, &vdev->fh_list, list)
178 		__v4l2_event_queue_fh(fh, ev, &timestamp);
179 
180 	spin_unlock_irqrestore(&vdev->fh_lock, flags);
181 }
182 EXPORT_SYMBOL_GPL(v4l2_event_queue);
183 
184 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
185 {
186 	unsigned long flags;
187 	struct timespec timestamp;
188 
189 	ktime_get_ts(&timestamp);
190 
191 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
192 	__v4l2_event_queue_fh(fh, ev, &timestamp);
193 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
194 }
195 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
196 
197 int v4l2_event_pending(struct v4l2_fh *fh)
198 {
199 	return fh->navailable;
200 }
201 EXPORT_SYMBOL_GPL(v4l2_event_pending);
202 
203 int v4l2_event_subscribe(struct v4l2_fh *fh,
204 			 const struct v4l2_event_subscription *sub, unsigned elems,
205 			 const struct v4l2_subscribed_event_ops *ops)
206 {
207 	struct v4l2_subscribed_event *sev, *found_ev;
208 	unsigned long flags;
209 	unsigned i;
210 
211 	if (sub->type == V4L2_EVENT_ALL)
212 		return -EINVAL;
213 
214 	if (elems < 1)
215 		elems = 1;
216 
217 	sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
218 	if (!sev)
219 		return -ENOMEM;
220 	for (i = 0; i < elems; i++)
221 		sev->events[i].sev = sev;
222 	sev->type = sub->type;
223 	sev->id = sub->id;
224 	sev->flags = sub->flags;
225 	sev->fh = fh;
226 	sev->ops = ops;
227 
228 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
229 	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
230 	if (!found_ev)
231 		list_add(&sev->list, &fh->subscribed);
232 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
233 
234 	if (found_ev) {
235 		kfree(sev);
236 		return 0; /* Already listening */
237 	}
238 
239 	if (sev->ops && sev->ops->add) {
240 		int ret = sev->ops->add(sev, elems);
241 		if (ret) {
242 			sev->ops = NULL;
243 			v4l2_event_unsubscribe(fh, sub);
244 			return ret;
245 		}
246 	}
247 
248 	/* Mark as ready for use */
249 	sev->elems = elems;
250 
251 	return 0;
252 }
253 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
254 
255 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
256 {
257 	struct v4l2_event_subscription sub;
258 	struct v4l2_subscribed_event *sev;
259 	unsigned long flags;
260 
261 	do {
262 		sev = NULL;
263 
264 		spin_lock_irqsave(&fh->vdev->fh_lock, flags);
265 		if (!list_empty(&fh->subscribed)) {
266 			sev = list_first_entry(&fh->subscribed,
267 					struct v4l2_subscribed_event, list);
268 			sub.type = sev->type;
269 			sub.id = sev->id;
270 		}
271 		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
272 		if (sev)
273 			v4l2_event_unsubscribe(fh, &sub);
274 	} while (sev);
275 }
276 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
277 
278 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
279 			   const struct v4l2_event_subscription *sub)
280 {
281 	struct v4l2_subscribed_event *sev;
282 	unsigned long flags;
283 	int i;
284 
285 	if (sub->type == V4L2_EVENT_ALL) {
286 		v4l2_event_unsubscribe_all(fh);
287 		return 0;
288 	}
289 
290 	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
291 
292 	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
293 	if (sev != NULL) {
294 		/* Remove any pending events for this subscription */
295 		for (i = 0; i < sev->in_use; i++) {
296 			list_del(&sev->events[sev_pos(sev, i)].list);
297 			fh->navailable--;
298 		}
299 		list_del(&sev->list);
300 	}
301 
302 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
303 
304 	if (sev && sev->ops && sev->ops->del)
305 		sev->ops->del(sev);
306 
307 	kfree(sev);
308 
309 	return 0;
310 }
311 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
312 
313 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
314 				  struct v4l2_event_subscription *sub)
315 {
316 	return v4l2_event_unsubscribe(fh, sub);
317 }
318 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
319 
320 static void v4l2_event_src_replace(struct v4l2_event *old,
321 				const struct v4l2_event *new)
322 {
323 	u32 old_changes = old->u.src_change.changes;
324 
325 	old->u.src_change = new->u.src_change;
326 	old->u.src_change.changes |= old_changes;
327 }
328 
329 static void v4l2_event_src_merge(const struct v4l2_event *old,
330 				struct v4l2_event *new)
331 {
332 	new->u.src_change.changes |= old->u.src_change.changes;
333 }
334 
335 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
336 	.replace = v4l2_event_src_replace,
337 	.merge = v4l2_event_src_merge,
338 };
339 
340 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
341 				const struct v4l2_event_subscription *sub)
342 {
343 	if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
344 		return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
345 	return -EINVAL;
346 }
347 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
348 
349 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
350 		struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
351 {
352 	return v4l2_src_change_event_subscribe(fh, sub);
353 }
354 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
355