xref: /linux/drivers/media/v4l2-core/v4l2-async.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * V4L2 asynchronous subdevice registration API
3  *
4  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/i2c.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 
21 #include <media/v4l2-async.h>
22 #include <media/v4l2-device.h>
23 #include <media/v4l2-subdev.h>
24 
25 static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
26 {
27 #if IS_ENABLED(CONFIG_I2C)
28 	struct i2c_client *client = i2c_verify_client(sd->dev);
29 	return client &&
30 		asd->match.i2c.adapter_id == client->adapter->nr &&
31 		asd->match.i2c.address == client->addr;
32 #else
33 	return false;
34 #endif
35 }
36 
37 static bool match_devname(struct v4l2_subdev *sd,
38 			  struct v4l2_async_subdev *asd)
39 {
40 	return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
41 }
42 
43 static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
44 {
45 	return sd->of_node == asd->match.of.node;
46 }
47 
48 static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
49 {
50 	if (!asd->match.custom.match)
51 		/* Match always */
52 		return true;
53 
54 	return asd->match.custom.match(sd->dev, asd);
55 }
56 
57 static LIST_HEAD(subdev_list);
58 static LIST_HEAD(notifier_list);
59 static DEFINE_MUTEX(list_lock);
60 
61 static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
62 						    struct v4l2_subdev *sd)
63 {
64 	bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
65 	struct v4l2_async_subdev *asd;
66 
67 	list_for_each_entry(asd, &notifier->waiting, list) {
68 		/* bus_type has been verified valid before */
69 		switch (asd->match_type) {
70 		case V4L2_ASYNC_MATCH_CUSTOM:
71 			match = match_custom;
72 			break;
73 		case V4L2_ASYNC_MATCH_DEVNAME:
74 			match = match_devname;
75 			break;
76 		case V4L2_ASYNC_MATCH_I2C:
77 			match = match_i2c;
78 			break;
79 		case V4L2_ASYNC_MATCH_OF:
80 			match = match_of;
81 			break;
82 		default:
83 			/* Cannot happen, unless someone breaks us */
84 			WARN_ON(true);
85 			return NULL;
86 		}
87 
88 		/* match cannot be NULL here */
89 		if (match(sd, asd))
90 			return asd;
91 	}
92 
93 	return NULL;
94 }
95 
96 static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
97 				  struct v4l2_subdev *sd,
98 				  struct v4l2_async_subdev *asd)
99 {
100 	int ret;
101 
102 	/* Remove from the waiting list */
103 	list_del(&asd->list);
104 	sd->asd = asd;
105 	sd->notifier = notifier;
106 
107 	if (notifier->bound) {
108 		ret = notifier->bound(notifier, sd, asd);
109 		if (ret < 0)
110 			return ret;
111 	}
112 	/* Move from the global subdevice list to notifier's done */
113 	list_move(&sd->async_list, &notifier->done);
114 
115 	ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
116 	if (ret < 0) {
117 		if (notifier->unbind)
118 			notifier->unbind(notifier, sd, asd);
119 		return ret;
120 	}
121 
122 	if (list_empty(&notifier->waiting) && notifier->complete)
123 		return notifier->complete(notifier);
124 
125 	return 0;
126 }
127 
128 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
129 {
130 	v4l2_device_unregister_subdev(sd);
131 	/* Subdevice driver will reprobe and put the subdev back onto the list */
132 	list_del_init(&sd->async_list);
133 	sd->asd = NULL;
134 	sd->dev = NULL;
135 }
136 
137 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
138 				 struct v4l2_async_notifier *notifier)
139 {
140 	struct v4l2_subdev *sd, *tmp;
141 	struct v4l2_async_subdev *asd;
142 	int i;
143 
144 	if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
145 		return -EINVAL;
146 
147 	notifier->v4l2_dev = v4l2_dev;
148 	INIT_LIST_HEAD(&notifier->waiting);
149 	INIT_LIST_HEAD(&notifier->done);
150 
151 	for (i = 0; i < notifier->num_subdevs; i++) {
152 		asd = notifier->subdevs[i];
153 
154 		switch (asd->match_type) {
155 		case V4L2_ASYNC_MATCH_CUSTOM:
156 		case V4L2_ASYNC_MATCH_DEVNAME:
157 		case V4L2_ASYNC_MATCH_I2C:
158 		case V4L2_ASYNC_MATCH_OF:
159 			break;
160 		default:
161 			dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
162 				"Invalid match type %u on %p\n",
163 				asd->match_type, asd);
164 			return -EINVAL;
165 		}
166 		list_add_tail(&asd->list, &notifier->waiting);
167 	}
168 
169 	mutex_lock(&list_lock);
170 
171 	/* Keep also completed notifiers on the list */
172 	list_add(&notifier->list, &notifier_list);
173 
174 	list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
175 		int ret;
176 
177 		asd = v4l2_async_belongs(notifier, sd);
178 		if (!asd)
179 			continue;
180 
181 		ret = v4l2_async_test_notify(notifier, sd, asd);
182 		if (ret < 0) {
183 			mutex_unlock(&list_lock);
184 			return ret;
185 		}
186 	}
187 
188 	mutex_unlock(&list_lock);
189 
190 	return 0;
191 }
192 EXPORT_SYMBOL(v4l2_async_notifier_register);
193 
194 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
195 {
196 	struct v4l2_subdev *sd, *tmp;
197 	unsigned int notif_n_subdev = notifier->num_subdevs;
198 	unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
199 	struct device **dev;
200 	int i = 0;
201 
202 	if (!notifier->v4l2_dev)
203 		return;
204 
205 	dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
206 	if (!dev) {
207 		dev_err(notifier->v4l2_dev->dev,
208 			"Failed to allocate device cache!\n");
209 	}
210 
211 	mutex_lock(&list_lock);
212 
213 	list_del(&notifier->list);
214 
215 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
216 		struct device *d;
217 
218 		d = get_device(sd->dev);
219 
220 		v4l2_async_cleanup(sd);
221 
222 		/* If we handled USB devices, we'd have to lock the parent too */
223 		device_release_driver(d);
224 
225 		if (notifier->unbind)
226 			notifier->unbind(notifier, sd, sd->asd);
227 
228 		/*
229 		 * Store device at the device cache, in order to call
230 		 * put_device() on the final step
231 		 */
232 		if (dev)
233 			dev[i++] = d;
234 		else
235 			put_device(d);
236 	}
237 
238 	mutex_unlock(&list_lock);
239 
240 	/*
241 	 * Call device_attach() to reprobe devices
242 	 *
243 	 * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
244 	 * executed.
245 	 */
246 	while (i--) {
247 		struct device *d = dev[i];
248 
249 		if (d && device_attach(d) < 0) {
250 			const char *name = "(none)";
251 			int lock = device_trylock(d);
252 
253 			if (lock && d->driver)
254 				name = d->driver->name;
255 			dev_err(d, "Failed to re-probe to %s\n", name);
256 			if (lock)
257 				device_unlock(d);
258 		}
259 		put_device(d);
260 	}
261 	kfree(dev);
262 
263 	notifier->v4l2_dev = NULL;
264 
265 	/*
266 	 * Don't care about the waiting list, it is initialised and populated
267 	 * upon notifier registration.
268 	 */
269 }
270 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
271 
272 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
273 {
274 	struct v4l2_async_notifier *notifier;
275 
276 	/*
277 	 * No reference taken. The reference is held by the device
278 	 * (struct v4l2_subdev.dev), and async sub-device does not
279 	 * exist independently of the device at any point of time.
280 	 */
281 	if (!sd->of_node && sd->dev)
282 		sd->of_node = sd->dev->of_node;
283 
284 	mutex_lock(&list_lock);
285 
286 	INIT_LIST_HEAD(&sd->async_list);
287 
288 	list_for_each_entry(notifier, &notifier_list, list) {
289 		struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
290 		if (asd) {
291 			int ret = v4l2_async_test_notify(notifier, sd, asd);
292 			mutex_unlock(&list_lock);
293 			return ret;
294 		}
295 	}
296 
297 	/* None matched, wait for hot-plugging */
298 	list_add(&sd->async_list, &subdev_list);
299 
300 	mutex_unlock(&list_lock);
301 
302 	return 0;
303 }
304 EXPORT_SYMBOL(v4l2_async_register_subdev);
305 
306 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
307 {
308 	struct v4l2_async_notifier *notifier = sd->notifier;
309 
310 	if (!sd->asd) {
311 		if (!list_empty(&sd->async_list))
312 			v4l2_async_cleanup(sd);
313 		return;
314 	}
315 
316 	mutex_lock(&list_lock);
317 
318 	list_add(&sd->asd->list, &notifier->waiting);
319 
320 	v4l2_async_cleanup(sd);
321 
322 	if (notifier->unbind)
323 		notifier->unbind(notifier, sd, sd->asd);
324 
325 	mutex_unlock(&list_lock);
326 }
327 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
328