xref: /linux/drivers/media/v4l2-core/v4l2-async.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
26 
27 #include "v4l2-subdev-priv.h"
28 
v4l2_async_nf_call_bound(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_connection * asc)29 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
30 				    struct v4l2_subdev *subdev,
31 				    struct v4l2_async_connection *asc)
32 {
33 	if (!n->ops || !n->ops->bound)
34 		return 0;
35 
36 	return n->ops->bound(n, subdev, asc);
37 }
38 
v4l2_async_nf_call_unbind(struct v4l2_async_notifier * n,struct v4l2_subdev * subdev,struct v4l2_async_connection * asc)39 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
40 				      struct v4l2_subdev *subdev,
41 				      struct v4l2_async_connection *asc)
42 {
43 	if (!n->ops || !n->ops->unbind)
44 		return;
45 
46 	n->ops->unbind(n, subdev, asc);
47 }
48 
v4l2_async_nf_call_complete(struct v4l2_async_notifier * n)49 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
50 {
51 	if (!n->ops || !n->ops->complete)
52 		return 0;
53 
54 	return n->ops->complete(n);
55 }
56 
v4l2_async_nf_call_destroy(struct v4l2_async_notifier * n,struct v4l2_async_connection * asc)57 static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
58 				       struct v4l2_async_connection *asc)
59 {
60 	if (!n->ops || !n->ops->destroy)
61 		return;
62 
63 	n->ops->destroy(asc);
64 }
65 
match_i2c(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_match_desc * match)66 static bool match_i2c(struct v4l2_async_notifier *notifier,
67 		      struct v4l2_subdev *sd,
68 		      struct v4l2_async_match_desc *match)
69 {
70 #if IS_ENABLED(CONFIG_I2C)
71 	struct i2c_client *client = i2c_verify_client(sd->dev);
72 
73 	return client &&
74 		match->i2c.adapter_id == client->adapter->nr &&
75 		match->i2c.address == client->addr;
76 #else
77 	return false;
78 #endif
79 }
80 
notifier_dev(struct v4l2_async_notifier * notifier)81 static struct device *notifier_dev(struct v4l2_async_notifier *notifier)
82 {
83 	if (notifier->sd)
84 		return notifier->sd->dev;
85 
86 	if (notifier->v4l2_dev)
87 		return notifier->v4l2_dev->dev;
88 
89 	return NULL;
90 }
91 
92 static bool
match_fwnode_one(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct fwnode_handle * sd_fwnode,struct v4l2_async_match_desc * match)93 match_fwnode_one(struct v4l2_async_notifier *notifier,
94 		 struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
95 		 struct v4l2_async_match_desc *match)
96 {
97 	struct fwnode_handle *asd_dev_fwnode;
98 	bool ret;
99 
100 	dev_dbg(notifier_dev(notifier),
101 		"v4l2-async: fwnode match: need %pfw, trying %pfw\n",
102 		sd_fwnode, match->fwnode);
103 
104 	if (sd_fwnode == match->fwnode) {
105 		dev_dbg(notifier_dev(notifier),
106 			"v4l2-async: direct match found\n");
107 		return true;
108 	}
109 
110 	if (!fwnode_graph_is_endpoint(match->fwnode)) {
111 		dev_dbg(notifier_dev(notifier),
112 			"v4l2-async: direct match not found\n");
113 		return false;
114 	}
115 
116 	asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode);
117 
118 	ret = sd_fwnode == asd_dev_fwnode;
119 
120 	fwnode_handle_put(asd_dev_fwnode);
121 
122 	dev_dbg(notifier_dev(notifier),
123 		"v4l2-async: device--endpoint match %sfound\n",
124 		ret ? "" : "not ");
125 
126 	return ret;
127 }
128 
match_fwnode(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_match_desc * match)129 static bool match_fwnode(struct v4l2_async_notifier *notifier,
130 			 struct v4l2_subdev *sd,
131 			 struct v4l2_async_match_desc *match)
132 {
133 	dev_dbg(notifier_dev(notifier),
134 		"v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n",
135 		dev_fwnode(notifier_dev(notifier)), sd->fwnode);
136 
137 	if (!list_empty(&sd->async_subdev_endpoint_list)) {
138 		struct v4l2_async_subdev_endpoint *ase;
139 
140 		dev_dbg(sd->dev,
141 			"v4l2-async: endpoint fwnode list available, looking for %pfw\n",
142 			match->fwnode);
143 
144 		list_for_each_entry(ase, &sd->async_subdev_endpoint_list,
145 				    async_subdev_endpoint_entry) {
146 			bool matched = ase->endpoint == match->fwnode;
147 
148 			dev_dbg(sd->dev,
149 				"v4l2-async: endpoint-endpoint match %sfound with %pfw\n",
150 				matched ? "" : "not ", ase->endpoint);
151 
152 			if (matched)
153 				return true;
154 		}
155 
156 		dev_dbg(sd->dev, "async: no endpoint matched\n");
157 
158 		return false;
159 	}
160 
161 	if (match_fwnode_one(notifier, sd, sd->fwnode, match))
162 		return true;
163 
164 	/* Also check the secondary fwnode. */
165 	if (IS_ERR_OR_NULL(sd->fwnode->secondary))
166 		return false;
167 
168 	dev_dbg(notifier_dev(notifier),
169 		"v4l2-async: trying secondary fwnode match\n");
170 
171 	return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match);
172 }
173 
174 static LIST_HEAD(subdev_list);
175 static LIST_HEAD(notifier_list);
176 static DEFINE_MUTEX(list_lock);
177 
178 static struct v4l2_async_connection *
v4l2_async_find_match(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd)179 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
180 		      struct v4l2_subdev *sd)
181 {
182 	bool (*match)(struct v4l2_async_notifier *notifier,
183 		      struct v4l2_subdev *sd,
184 		      struct v4l2_async_match_desc *match);
185 	struct v4l2_async_connection *asc;
186 
187 	list_for_each_entry(asc, &notifier->waiting_list, asc_entry) {
188 		/* bus_type has been verified valid before */
189 		switch (asc->match.type) {
190 		case V4L2_ASYNC_MATCH_TYPE_I2C:
191 			match = match_i2c;
192 			break;
193 		case V4L2_ASYNC_MATCH_TYPE_FWNODE:
194 			match = match_fwnode;
195 			break;
196 		default:
197 			/* Cannot happen, unless someone breaks us */
198 			WARN_ON(true);
199 			return NULL;
200 		}
201 
202 		/* match cannot be NULL here */
203 		if (match(notifier, sd, &asc->match))
204 			return asc;
205 	}
206 
207 	return NULL;
208 }
209 
210 /* Compare two async match descriptors for equivalence */
v4l2_async_match_equal(struct v4l2_async_match_desc * match1,struct v4l2_async_match_desc * match2)211 static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1,
212 				   struct v4l2_async_match_desc *match2)
213 {
214 	if (match1->type != match2->type)
215 		return false;
216 
217 	switch (match1->type) {
218 	case V4L2_ASYNC_MATCH_TYPE_I2C:
219 		return match1->i2c.adapter_id == match2->i2c.adapter_id &&
220 			match1->i2c.address == match2->i2c.address;
221 	case V4L2_ASYNC_MATCH_TYPE_FWNODE:
222 		return match1->fwnode == match2->fwnode;
223 	default:
224 		break;
225 	}
226 
227 	return false;
228 }
229 
230 /* Find the sub-device notifier registered by a sub-device driver. */
231 static struct v4l2_async_notifier *
v4l2_async_find_subdev_notifier(struct v4l2_subdev * sd)232 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
233 {
234 	struct v4l2_async_notifier *n;
235 
236 	list_for_each_entry(n, &notifier_list, notifier_entry)
237 		if (n->sd == sd)
238 			return n;
239 
240 	return NULL;
241 }
242 
243 /* Get v4l2_device related to the notifier if one can be found. */
244 static struct v4l2_device *
v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier * notifier)245 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
246 {
247 	while (notifier->parent)
248 		notifier = notifier->parent;
249 
250 	return notifier->v4l2_dev;
251 }
252 
253 /*
254  * Return true if all child sub-device notifiers are complete, false otherwise.
255  */
256 static bool
v4l2_async_nf_can_complete(struct v4l2_async_notifier * notifier)257 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
258 {
259 	struct v4l2_async_connection *asc;
260 
261 	if (!list_empty(&notifier->waiting_list))
262 		return false;
263 
264 	list_for_each_entry(asc, &notifier->done_list, asc_entry) {
265 		struct v4l2_async_notifier *subdev_notifier =
266 			v4l2_async_find_subdev_notifier(asc->sd);
267 
268 		if (subdev_notifier &&
269 		    !v4l2_async_nf_can_complete(subdev_notifier))
270 			return false;
271 	}
272 
273 	return true;
274 }
275 
276 /*
277  * Complete the master notifier if possible. This is done when all async
278  * sub-devices have been bound; v4l2_device is also available then.
279  */
280 static int
v4l2_async_nf_try_complete(struct v4l2_async_notifier * notifier)281 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
282 {
283 	struct v4l2_async_notifier *__notifier = notifier;
284 
285 	/* Quick check whether there are still more sub-devices here. */
286 	if (!list_empty(&notifier->waiting_list))
287 		return 0;
288 
289 	if (notifier->sd)
290 		dev_dbg(notifier_dev(notifier),
291 			"v4l2-async: trying to complete\n");
292 
293 	/* Check the entire notifier tree; find the root notifier first. */
294 	while (notifier->parent)
295 		notifier = notifier->parent;
296 
297 	/* This is root if it has v4l2_dev. */
298 	if (!notifier->v4l2_dev) {
299 		dev_dbg(notifier_dev(__notifier),
300 			"v4l2-async: V4L2 device not available\n");
301 		return 0;
302 	}
303 
304 	/* Is everything ready? */
305 	if (!v4l2_async_nf_can_complete(notifier))
306 		return 0;
307 
308 	dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n");
309 
310 	return v4l2_async_nf_call_complete(notifier);
311 }
312 
313 static int
314 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
315 
v4l2_async_create_ancillary_links(struct v4l2_async_notifier * n,struct v4l2_subdev * sd)316 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
317 					     struct v4l2_subdev *sd)
318 {
319 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
320 	struct media_link *link;
321 
322 	if (sd->entity.function != MEDIA_ENT_F_LENS &&
323 	    sd->entity.function != MEDIA_ENT_F_FLASH)
324 		return 0;
325 
326 	if (!n->sd) {
327 		dev_warn(notifier_dev(n),
328 			 "not a sub-device notifier, not creating an ancillary link for %s!\n",
329 			 dev_name(sd->dev));
330 		return 0;
331 	}
332 
333 	link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
334 
335 	return IS_ERR(link) ? PTR_ERR(link) : 0;
336 #else
337 	return 0;
338 #endif
339 }
340 
v4l2_async_match_notify(struct v4l2_async_notifier * notifier,struct v4l2_device * v4l2_dev,struct v4l2_subdev * sd,struct v4l2_async_connection * asc)341 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
342 				   struct v4l2_device *v4l2_dev,
343 				   struct v4l2_subdev *sd,
344 				   struct v4l2_async_connection *asc)
345 {
346 	struct v4l2_async_notifier *subdev_notifier;
347 	bool registered = false;
348 	int ret;
349 
350 	if (list_empty(&sd->asc_list)) {
351 		ret = __v4l2_device_register_subdev(v4l2_dev, sd, sd->owner);
352 		if (ret < 0)
353 			return ret;
354 		registered = true;
355 	}
356 
357 	ret = v4l2_async_nf_call_bound(notifier, sd, asc);
358 	if (ret < 0) {
359 		if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
360 			dev_dbg(notifier_dev(notifier),
361 				"failed binding %pfw (%d)\n",
362 				asc->match.fwnode, ret);
363 		goto err_unregister_subdev;
364 	}
365 
366 	if (registered) {
367 		/*
368 		 * Depending of the function of the entities involved, we may
369 		 * want to create links between them (for example between a
370 		 * sensor and its lens or between a sensor's source pad and the
371 		 * connected device's sink pad).
372 		 */
373 		ret = v4l2_async_create_ancillary_links(notifier, sd);
374 		if (ret) {
375 			if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
376 				dev_dbg(notifier_dev(notifier),
377 					"failed creating links for %pfw (%d)\n",
378 					asc->match.fwnode, ret);
379 			goto err_call_unbind;
380 		}
381 	}
382 
383 	list_add(&asc->asc_subdev_entry, &sd->asc_list);
384 	asc->sd = sd;
385 
386 	/* Move from the waiting list to notifier's done */
387 	list_move(&asc->asc_entry, &notifier->done_list);
388 
389 	dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n",
390 		dev_name(sd->dev), ret);
391 
392 	/*
393 	 * See if the sub-device has a notifier. If not, return here.
394 	 */
395 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
396 	if (!subdev_notifier || subdev_notifier->parent)
397 		return 0;
398 
399 	/*
400 	 * Proceed with checking for the sub-device notifier's async
401 	 * sub-devices, and return the result. The error will be handled by the
402 	 * caller.
403 	 */
404 	subdev_notifier->parent = notifier;
405 
406 	return v4l2_async_nf_try_all_subdevs(subdev_notifier);
407 
408 err_call_unbind:
409 	v4l2_async_nf_call_unbind(notifier, sd, asc);
410 	list_del(&asc->asc_subdev_entry);
411 
412 err_unregister_subdev:
413 	if (registered)
414 		v4l2_device_unregister_subdev(sd);
415 
416 	return ret;
417 }
418 
419 /* Test all async sub-devices in a notifier for a match. */
420 static int
v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier * notifier)421 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
422 {
423 	struct v4l2_device *v4l2_dev =
424 		v4l2_async_nf_find_v4l2_dev(notifier);
425 	struct v4l2_subdev *sd;
426 
427 	if (!v4l2_dev)
428 		return 0;
429 
430 	dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n");
431 
432 again:
433 	list_for_each_entry(sd, &subdev_list, async_list) {
434 		struct v4l2_async_connection *asc;
435 		int ret;
436 
437 		asc = v4l2_async_find_match(notifier, sd);
438 		if (!asc)
439 			continue;
440 
441 		dev_dbg(notifier_dev(notifier),
442 			"v4l2-async: match found, subdev %s\n", sd->name);
443 
444 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc);
445 		if (ret < 0)
446 			return ret;
447 
448 		/*
449 		 * v4l2_async_match_notify() may lead to registering a
450 		 * new notifier and thus changing the async subdevs
451 		 * list. In order to proceed safely from here, restart
452 		 * parsing the list from the beginning.
453 		 */
454 		goto again;
455 	}
456 
457 	return 0;
458 }
459 
v4l2_async_unbind_subdev_one(struct v4l2_async_notifier * notifier,struct v4l2_async_connection * asc)460 static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier,
461 					 struct v4l2_async_connection *asc)
462 {
463 	list_move_tail(&asc->asc_entry, &notifier->waiting_list);
464 	if (list_is_singular(&asc->asc_subdev_entry)) {
465 		v4l2_async_nf_call_unbind(notifier, asc->sd, asc);
466 		v4l2_device_unregister_subdev(asc->sd);
467 		asc->sd = NULL;
468 	}
469 	list_del(&asc->asc_subdev_entry);
470 }
471 
472 /* Unbind all sub-devices in the notifier tree. */
473 static void
v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier * notifier)474 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
475 {
476 	struct v4l2_async_connection *asc, *asc_tmp;
477 
478 	list_for_each_entry_safe(asc, asc_tmp, &notifier->done_list,
479 				 asc_entry) {
480 		struct v4l2_async_notifier *subdev_notifier =
481 			v4l2_async_find_subdev_notifier(asc->sd);
482 
483 		if (subdev_notifier)
484 			v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
485 
486 		v4l2_async_unbind_subdev_one(notifier, asc);
487 	}
488 
489 	notifier->parent = NULL;
490 }
491 
492 /* See if an async sub-device can be found in a notifier's lists. */
493 static bool
v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier * notifier,struct v4l2_async_match_desc * match)494 v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier,
495 				    struct v4l2_async_match_desc *match)
496 {
497 	struct v4l2_async_connection *asc;
498 
499 	list_for_each_entry(asc, &notifier->waiting_list, asc_entry)
500 		if (v4l2_async_match_equal(&asc->match, match))
501 			return true;
502 
503 	list_for_each_entry(asc, &notifier->done_list, asc_entry)
504 		if (v4l2_async_match_equal(&asc->match, match))
505 			return true;
506 
507 	return false;
508 }
509 
510 /*
511  * Find out whether an async sub-device was set up already or whether it exists
512  * in a given notifier.
513  */
514 static bool
v4l2_async_nf_has_async_match(struct v4l2_async_notifier * notifier,struct v4l2_async_match_desc * match)515 v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier,
516 			      struct v4l2_async_match_desc *match)
517 {
518 	struct list_head *heads[] = {
519 		&notifier->waiting_list,
520 		&notifier->done_list,
521 	};
522 	unsigned int i;
523 
524 	lockdep_assert_held(&list_lock);
525 
526 	/* Check that an asd is not being added more than once. */
527 	for (i = 0; i < ARRAY_SIZE(heads); i++) {
528 		struct v4l2_async_connection *asc;
529 
530 		list_for_each_entry(asc, heads[i], asc_entry) {
531 			if (&asc->match == match)
532 				continue;
533 			if (v4l2_async_match_equal(&asc->match, match))
534 				return true;
535 		}
536 	}
537 
538 	/* Check that an asc does not exist in other notifiers. */
539 	list_for_each_entry(notifier, &notifier_list, notifier_entry)
540 		if (v4l2_async_nf_has_async_match_entry(notifier, match))
541 			return true;
542 
543 	return false;
544 }
545 
v4l2_async_nf_match_valid(struct v4l2_async_notifier * notifier,struct v4l2_async_match_desc * match)546 static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier,
547 				     struct v4l2_async_match_desc *match)
548 {
549 	struct device *dev = notifier_dev(notifier);
550 
551 	switch (match->type) {
552 	case V4L2_ASYNC_MATCH_TYPE_I2C:
553 	case V4L2_ASYNC_MATCH_TYPE_FWNODE:
554 		if (v4l2_async_nf_has_async_match(notifier, match)) {
555 			dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n");
556 			return -EEXIST;
557 		}
558 		break;
559 	default:
560 		dev_err(dev, "v4l2-async: Invalid match type %u on %p\n",
561 			match->type, match);
562 		return -EINVAL;
563 	}
564 
565 	return 0;
566 }
567 
v4l2_async_nf_init(struct v4l2_async_notifier * notifier,struct v4l2_device * v4l2_dev)568 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
569 			struct v4l2_device *v4l2_dev)
570 {
571 	INIT_LIST_HEAD(&notifier->waiting_list);
572 	INIT_LIST_HEAD(&notifier->done_list);
573 	INIT_LIST_HEAD(&notifier->notifier_entry);
574 	notifier->v4l2_dev = v4l2_dev;
575 }
576 EXPORT_SYMBOL(v4l2_async_nf_init);
577 
v4l2_async_subdev_nf_init(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd)578 void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
579 			       struct v4l2_subdev *sd)
580 {
581 	INIT_LIST_HEAD(&notifier->waiting_list);
582 	INIT_LIST_HEAD(&notifier->done_list);
583 	INIT_LIST_HEAD(&notifier->notifier_entry);
584 	notifier->sd = sd;
585 }
586 EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
587 
__v4l2_async_nf_register(struct v4l2_async_notifier * notifier)588 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
589 {
590 	struct v4l2_async_connection *asc;
591 	int ret;
592 
593 	mutex_lock(&list_lock);
594 
595 	list_for_each_entry(asc, &notifier->waiting_list, asc_entry) {
596 		ret = v4l2_async_nf_match_valid(notifier, &asc->match);
597 		if (ret)
598 			goto err_unlock;
599 	}
600 
601 	ret = v4l2_async_nf_try_all_subdevs(notifier);
602 	if (ret < 0)
603 		goto err_unbind;
604 
605 	ret = v4l2_async_nf_try_complete(notifier);
606 	if (ret < 0)
607 		goto err_unbind;
608 
609 	/* Keep also completed notifiers on the list */
610 	list_add(&notifier->notifier_entry, &notifier_list);
611 
612 	mutex_unlock(&list_lock);
613 
614 	return 0;
615 
616 err_unbind:
617 	/*
618 	 * On failure, unbind all sub-devices registered through this notifier.
619 	 */
620 	v4l2_async_nf_unbind_all_subdevs(notifier);
621 
622 err_unlock:
623 	mutex_unlock(&list_lock);
624 
625 	return ret;
626 }
627 
v4l2_async_nf_register(struct v4l2_async_notifier * notifier)628 int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
629 {
630 	if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
631 		return -EINVAL;
632 
633 	return __v4l2_async_nf_register(notifier);
634 }
635 EXPORT_SYMBOL(v4l2_async_nf_register);
636 
637 static void
__v4l2_async_nf_unregister(struct v4l2_async_notifier * notifier)638 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
639 {
640 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
641 		return;
642 
643 	v4l2_async_nf_unbind_all_subdevs(notifier);
644 
645 	list_del_init(&notifier->notifier_entry);
646 }
647 
v4l2_async_nf_unregister(struct v4l2_async_notifier * notifier)648 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
649 {
650 	mutex_lock(&list_lock);
651 
652 	__v4l2_async_nf_unregister(notifier);
653 
654 	mutex_unlock(&list_lock);
655 }
656 EXPORT_SYMBOL(v4l2_async_nf_unregister);
657 
__v4l2_async_nf_cleanup(struct v4l2_async_notifier * notifier)658 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
659 {
660 	struct v4l2_async_connection *asc, *tmp;
661 
662 	if (!notifier || !notifier->waiting_list.next)
663 		return;
664 
665 	WARN_ON(!list_empty(&notifier->done_list));
666 
667 	list_for_each_entry_safe(asc, tmp, &notifier->waiting_list, asc_entry) {
668 		list_del(&asc->asc_entry);
669 		v4l2_async_nf_call_destroy(notifier, asc);
670 
671 		if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
672 			fwnode_handle_put(asc->match.fwnode);
673 
674 		kfree(asc);
675 	}
676 
677 	notifier->sd = NULL;
678 	notifier->v4l2_dev = NULL;
679 }
680 
v4l2_async_nf_cleanup(struct v4l2_async_notifier * notifier)681 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
682 {
683 	mutex_lock(&list_lock);
684 
685 	__v4l2_async_nf_cleanup(notifier);
686 
687 	mutex_unlock(&list_lock);
688 }
689 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
690 
__v4l2_async_nf_add_connection(struct v4l2_async_notifier * notifier,struct v4l2_async_connection * asc)691 static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier,
692 					   struct v4l2_async_connection *asc)
693 {
694 	mutex_lock(&list_lock);
695 
696 	list_add_tail(&asc->asc_entry, &notifier->waiting_list);
697 
698 	mutex_unlock(&list_lock);
699 }
700 
701 struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier * notifier,struct fwnode_handle * fwnode,unsigned int asc_struct_size)702 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
703 			   struct fwnode_handle *fwnode,
704 			   unsigned int asc_struct_size)
705 {
706 	struct v4l2_async_connection *asc;
707 
708 	asc = kzalloc(asc_struct_size, GFP_KERNEL);
709 	if (!asc)
710 		return ERR_PTR(-ENOMEM);
711 
712 	asc->notifier = notifier;
713 	asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE;
714 	asc->match.fwnode = fwnode_handle_get(fwnode);
715 
716 	__v4l2_async_nf_add_connection(notifier, asc);
717 
718 	return asc;
719 }
720 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
721 
722 struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier * notif,struct fwnode_handle * endpoint,unsigned int asc_struct_size)723 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
724 				  struct fwnode_handle *endpoint,
725 				  unsigned int asc_struct_size)
726 {
727 	struct v4l2_async_connection *asc;
728 	struct fwnode_handle *remote;
729 
730 	remote = fwnode_graph_get_remote_endpoint(endpoint);
731 	if (!remote)
732 		return ERR_PTR(-ENOTCONN);
733 
734 	asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size);
735 	/*
736 	 * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
737 	 * so drop the one we got in fwnode_graph_get_remote_port_parent.
738 	 */
739 	fwnode_handle_put(remote);
740 	return asc;
741 }
742 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
743 
744 struct v4l2_async_connection *
__v4l2_async_nf_add_i2c(struct v4l2_async_notifier * notifier,int adapter_id,unsigned short address,unsigned int asc_struct_size)745 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
746 			unsigned short address, unsigned int asc_struct_size)
747 {
748 	struct v4l2_async_connection *asc;
749 
750 	asc = kzalloc(asc_struct_size, GFP_KERNEL);
751 	if (!asc)
752 		return ERR_PTR(-ENOMEM);
753 
754 	asc->notifier = notifier;
755 	asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C;
756 	asc->match.i2c.adapter_id = adapter_id;
757 	asc->match.i2c.address = address;
758 
759 	__v4l2_async_nf_add_connection(notifier, asc);
760 
761 	return asc;
762 }
763 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
764 
v4l2_async_subdev_endpoint_add(struct v4l2_subdev * sd,struct fwnode_handle * fwnode)765 int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
766 				   struct fwnode_handle *fwnode)
767 {
768 	struct v4l2_async_subdev_endpoint *ase;
769 
770 	ase = kmalloc(sizeof(*ase), GFP_KERNEL);
771 	if (!ase)
772 		return -ENOMEM;
773 
774 	ase->endpoint = fwnode;
775 	list_add(&ase->async_subdev_endpoint_entry,
776 		 &sd->async_subdev_endpoint_list);
777 
778 	return 0;
779 }
780 EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add);
781 
782 struct v4l2_async_connection *
v4l2_async_connection_unique(struct v4l2_subdev * sd)783 v4l2_async_connection_unique(struct v4l2_subdev *sd)
784 {
785 	if (!list_is_singular(&sd->asc_list))
786 		return NULL;
787 
788 	return list_first_entry(&sd->asc_list,
789 				struct v4l2_async_connection, asc_subdev_entry);
790 }
791 EXPORT_SYMBOL_GPL(v4l2_async_connection_unique);
792 
__v4l2_async_register_subdev(struct v4l2_subdev * sd,struct module * module)793 int __v4l2_async_register_subdev(struct v4l2_subdev *sd, struct module *module)
794 {
795 	struct v4l2_async_notifier *subdev_notifier;
796 	struct v4l2_async_notifier *notifier;
797 	struct v4l2_async_connection *asc;
798 	int ret;
799 
800 	INIT_LIST_HEAD(&sd->asc_list);
801 
802 	/*
803 	 * No reference taken. The reference is held by the device (struct
804 	 * v4l2_subdev.dev), and async sub-device does not exist independently
805 	 * of the device at any point of time.
806 	 *
807 	 * The async sub-device shall always be registered for its device node,
808 	 * not the endpoint node.
809 	 */
810 	if (!sd->fwnode && sd->dev) {
811 		sd->fwnode = dev_fwnode(sd->dev);
812 	} else if (fwnode_graph_is_endpoint(sd->fwnode)) {
813 		dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n");
814 		return -EINVAL;
815 	}
816 
817 	sd->owner = module;
818 
819 	mutex_lock(&list_lock);
820 
821 	list_for_each_entry(notifier, &notifier_list, notifier_entry) {
822 		struct v4l2_device *v4l2_dev =
823 			v4l2_async_nf_find_v4l2_dev(notifier);
824 
825 		if (!v4l2_dev)
826 			continue;
827 
828 		while ((asc = v4l2_async_find_match(notifier, sd))) {
829 			ret = v4l2_async_match_notify(notifier, v4l2_dev, sd,
830 						      asc);
831 			if (ret)
832 				goto err_unbind;
833 
834 			ret = v4l2_async_nf_try_complete(notifier);
835 			if (ret)
836 				goto err_unbind;
837 		}
838 	}
839 
840 	/* None matched, wait for hot-plugging */
841 	list_add(&sd->async_list, &subdev_list);
842 
843 	mutex_unlock(&list_lock);
844 
845 	return 0;
846 
847 err_unbind:
848 	/*
849 	 * Complete failed. Unbind the sub-devices bound through registering
850 	 * this async sub-device.
851 	 */
852 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
853 	if (subdev_notifier)
854 		v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
855 
856 	if (asc)
857 		v4l2_async_unbind_subdev_one(notifier, asc);
858 
859 	mutex_unlock(&list_lock);
860 
861 	sd->owner = NULL;
862 
863 	return ret;
864 }
865 EXPORT_SYMBOL(__v4l2_async_register_subdev);
866 
v4l2_async_unregister_subdev(struct v4l2_subdev * sd)867 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
868 {
869 	struct v4l2_async_connection *asc, *asc_tmp;
870 
871 	if (!sd->async_list.next)
872 		return;
873 
874 	v4l2_subdev_put_privacy_led(sd);
875 
876 	mutex_lock(&list_lock);
877 
878 	__v4l2_async_nf_unregister(sd->subdev_notifier);
879 	__v4l2_async_nf_cleanup(sd->subdev_notifier);
880 	kfree(sd->subdev_notifier);
881 	sd->subdev_notifier = NULL;
882 
883 	if (sd->asc_list.next) {
884 		list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list,
885 					 asc_subdev_entry) {
886 			v4l2_async_unbind_subdev_one(asc->notifier, asc);
887 		}
888 	}
889 
890 	list_del(&sd->async_list);
891 	sd->async_list.next = NULL;
892 
893 	mutex_unlock(&list_lock);
894 }
895 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
896 
print_waiting_match(struct seq_file * s,struct v4l2_async_match_desc * match)897 static void print_waiting_match(struct seq_file *s,
898 				struct v4l2_async_match_desc *match)
899 {
900 	switch (match->type) {
901 	case V4L2_ASYNC_MATCH_TYPE_I2C:
902 		seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id,
903 			   match->i2c.address);
904 		break;
905 	case V4L2_ASYNC_MATCH_TYPE_FWNODE: {
906 		struct fwnode_handle *devnode, *fwnode = match->fwnode;
907 
908 		devnode = fwnode_graph_is_endpoint(fwnode) ?
909 			  fwnode_graph_get_port_parent(fwnode) :
910 			  fwnode_handle_get(fwnode);
911 
912 		seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
913 			   devnode->dev ? dev_name(devnode->dev) : "nil",
914 			   fwnode);
915 
916 		fwnode_handle_put(devnode);
917 		break;
918 	}
919 	}
920 }
921 
922 static const char *
v4l2_async_nf_name(struct v4l2_async_notifier * notifier)923 v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
924 {
925 	if (notifier->v4l2_dev)
926 		return notifier->v4l2_dev->name;
927 	else if (notifier->sd)
928 		return notifier->sd->name;
929 	else
930 		return "nil";
931 }
932 
pending_subdevs_show(struct seq_file * s,void * data)933 static int pending_subdevs_show(struct seq_file *s, void *data)
934 {
935 	struct v4l2_async_notifier *notif;
936 	struct v4l2_async_connection *asc;
937 
938 	mutex_lock(&list_lock);
939 
940 	list_for_each_entry(notif, &notifier_list, notifier_entry) {
941 		seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
942 		list_for_each_entry(asc, &notif->waiting_list, asc_entry)
943 			print_waiting_match(s, &asc->match);
944 	}
945 
946 	mutex_unlock(&list_lock);
947 
948 	return 0;
949 }
950 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
951 
952 static struct dentry *v4l2_async_debugfs_dir;
953 
v4l2_async_init(void)954 static int __init v4l2_async_init(void)
955 {
956 	v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
957 	debugfs_create_file("pending_async_subdevices", 0444,
958 			    v4l2_async_debugfs_dir, NULL,
959 			    &pending_subdevs_fops);
960 
961 	return 0;
962 }
963 
v4l2_async_exit(void)964 static void __exit v4l2_async_exit(void)
965 {
966 	debugfs_remove_recursive(v4l2_async_debugfs_dir);
967 }
968 
969 subsys_initcall(v4l2_async_init);
970 module_exit(v4l2_async_exit);
971 
972 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
973 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
974 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
975 MODULE_DESCRIPTION("V4L2 asynchronous subdevice registration API");
976 MODULE_LICENSE("GPL");
977