xref: /linux/drivers/gpu/drm/drm_bridge.c (revision c0d6f52f9b62479d61f8cd4faf9fb2f8bce6e301)
1 /*
2  * Copyright (c) 2014 Samsung Electronics Co., Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/err.h>
26 #include <linux/export.h>
27 #include <linux/media-bus-format.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/srcu.h>
31 
32 #include <drm/drm_atomic_state_helper.h>
33 #include <drm/drm_bridge.h>
34 #include <drm/drm_debugfs.h>
35 #include <drm/drm_edid.h>
36 #include <drm/drm_encoder.h>
37 #include <drm/drm_file.h>
38 #include <drm/drm_of.h>
39 #include <drm/drm_print.h>
40 
41 #include "drm_crtc_internal.h"
42 
43 /**
44  * DOC: overview
45  *
46  * &struct drm_bridge represents a device that hangs on to an encoder. These are
47  * handy when a regular &drm_encoder entity isn't enough to represent the entire
48  * encoder chain.
49  *
50  * A bridge is always attached to a single &drm_encoder at a time, but can be
51  * either connected to it directly, or through a chain of bridges::
52  *
53  *     [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
54  *
55  * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
56  * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
57  * Chaining multiple bridges to the output of a bridge, or the same bridge to
58  * the output of different bridges, is not supported.
59  *
60  * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
61  * CRTCs, encoders or connectors and hence are not visible to userspace. They
62  * just provide additional hooks to get the desired output at the end of the
63  * encoder chain.
64  */
65 
66 /**
67  * DOC:	display driver integration
68  *
69  * Display drivers are responsible for linking encoders with the first bridge
70  * in the chains. This is done by acquiring the appropriate bridge with
71  * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
72  * encoder with a call to drm_bridge_attach().
73  *
74  * Bridges are responsible for linking themselves with the next bridge in the
75  * chain, if any. This is done the same way as for encoders, with the call to
76  * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
77  *
78  * Once these links are created, the bridges can participate along with encoder
79  * functions to perform mode validation and fixup (through
80  * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
81  * setting (through drm_bridge_chain_mode_set()), enable (through
82  * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
83  * and disable (through drm_atomic_bridge_chain_disable() and
84  * drm_atomic_bridge_chain_post_disable()). Those functions call the
85  * corresponding operations provided in &drm_bridge_funcs in sequence for all
86  * bridges in the chain.
87  *
88  * For display drivers that use the atomic helpers
89  * drm_atomic_helper_check_modeset(),
90  * drm_atomic_helper_commit_modeset_enables() and
91  * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
92  * commit check and commit tail handlers, or through the higher-level
93  * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
94  * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
95  * requires no intervention from the driver. For other drivers, the relevant
96  * DRM bridge chain functions shall be called manually.
97  *
98  * Bridges also participate in implementing the &drm_connector at the end of
99  * the bridge chain. Display drivers may use the drm_bridge_connector_init()
100  * helper to create the &drm_connector, or implement it manually on top of the
101  * connector-related operations exposed by the bridge (see the overview
102  * documentation of bridge operations for more details).
103  */
104 
105 /**
106  * DOC: special care dsi
107  *
108  * The interaction between the bridges and other frameworks involved in
109  * the probing of the upstream driver and the bridge driver can be
110  * challenging. Indeed, there's multiple cases that needs to be
111  * considered:
112  *
113  * - The upstream driver doesn't use the component framework and isn't a
114  *   MIPI-DSI host. In this case, the bridge driver will probe at some
115  *   point and the upstream driver should try to probe again by returning
116  *   EPROBE_DEFER as long as the bridge driver hasn't probed.
117  *
118  * - The upstream driver doesn't use the component framework, but is a
119  *   MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
120  *   controlled. In this case, the bridge device is a child of the
121  *   display device and when it will probe it's assured that the display
122  *   device (and MIPI-DSI host) is present. The upstream driver will be
123  *   assured that the bridge driver is connected between the
124  *   &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
125  *   Therefore, it must run mipi_dsi_host_register() in its probe
126  *   function, and then run drm_bridge_attach() in its
127  *   &mipi_dsi_host_ops.attach hook.
128  *
129  * - The upstream driver uses the component framework and is a MIPI-DSI
130  *   host. The bridge device uses the MIPI-DCS commands to be
131  *   controlled. This is the same situation than above, and can run
132  *   mipi_dsi_host_register() in either its probe or bind hooks.
133  *
134  * - The upstream driver uses the component framework and is a MIPI-DSI
135  *   host. The bridge device uses a separate bus (such as I2C) to be
136  *   controlled. In this case, there's no correlation between the probe
137  *   of the bridge and upstream drivers, so care must be taken to avoid
138  *   an endless EPROBE_DEFER loop, with each driver waiting for the
139  *   other to probe.
140  *
141  * The ideal pattern to cover the last item (and all the others in the
142  * MIPI-DSI host driver case) is to split the operations like this:
143  *
144  * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
145  *   probe hook. It will make sure that the MIPI-DSI host sticks around,
146  *   and that the driver's bind can be called.
147  *
148  * - In its probe hook, the bridge driver must try to find its MIPI-DSI
149  *   host, register as a MIPI-DSI device and attach the MIPI-DSI device
150  *   to its host. The bridge driver is now functional.
151  *
152  * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
153  *   now add its component. Its bind hook will now be called and since
154  *   the bridge driver is attached and registered, we can now look for
155  *   and attach it.
156  *
157  * At this point, we're now certain that both the upstream driver and
158  * the bridge driver are functional and we can't have a deadlock-like
159  * situation when probing.
160  */
161 
162 /**
163  * DOC: dsi bridge operations
164  *
165  * DSI host interfaces are expected to be implemented as bridges rather than
166  * encoders, however there are a few aspects of their operation that need to
167  * be defined in order to provide a consistent interface.
168  *
169  * A DSI host should keep the PHY powered down until the pre_enable operation is
170  * called. All lanes are in an undefined idle state up to this point, and it
171  * must not be assumed that it is LP-11.
172  * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
173  * clock lane to either LP-11 or HS depending on the mode_flag
174  * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
175  *
176  * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
177  * called before the DSI host. If the DSI peripheral requires LP-11 and/or
178  * the clock lane to be in HS mode prior to pre_enable, then it can set the
179  * &pre_enable_prev_first flag to request the pre_enable (and
180  * post_disable) order to be altered to enable the DSI host first.
181  *
182  * Either the CRTC being enabled, or the DSI host enable operation should switch
183  * the host to actively transmitting video on the data lanes.
184  *
185  * The reverse also applies. The DSI host disable operation or stopping the CRTC
186  * should stop transmitting video, and the data lanes should return to the LP-11
187  * state. The DSI host &post_disable operation should disable the PHY.
188  * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
189  * bridge &post_disable will be called before the DSI host's post_disable.
190  *
191  * Whilst it is valid to call &host_transfer prior to pre_enable or after
192  * post_disable, the exact state of the lanes is undefined at this point. The
193  * DSI host should initialise the interface, transmit the data, and then disable
194  * the interface again.
195  *
196  * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
197  * implemented, it therefore needs to be handled entirely within the DSI Host
198  * driver.
199  */
200 
201 /* Protect bridge_list and bridge_lingering_list */
202 static DEFINE_MUTEX(bridge_lock);
203 static LIST_HEAD(bridge_list);
204 static LIST_HEAD(bridge_lingering_list);
205 
206 DEFINE_STATIC_SRCU(drm_bridge_unplug_srcu);
207 
208 /**
209  * drm_bridge_enter - Enter DRM bridge critical section
210  * @bridge: DRM bridge
211  * @idx: Pointer to index that will be passed to the matching drm_bridge_exit()
212  *
213  * This function marks and protects the beginning of a section that should not
214  * be entered after the bridge has been unplugged. The section end is marked
215  * with drm_bridge_exit(). Calls to this function can be nested.
216  *
217  * Returns:
218  * True if it is OK to enter the section, false otherwise.
219  */
220 bool drm_bridge_enter(struct drm_bridge *bridge, int *idx)
221 {
222 	*idx = srcu_read_lock(&drm_bridge_unplug_srcu);
223 
224 	if (bridge->unplugged) {
225 		srcu_read_unlock(&drm_bridge_unplug_srcu, *idx);
226 		return false;
227 	}
228 
229 	return true;
230 }
231 EXPORT_SYMBOL(drm_bridge_enter);
232 
233 /**
234  * drm_bridge_exit - Exit DRM bridge critical section
235  * @idx: index returned by drm_bridge_enter()
236  *
237  * This function marks the end of a section that should not be entered after
238  * the bridge has been unplugged.
239  */
240 void drm_bridge_exit(int idx)
241 {
242 	srcu_read_unlock(&drm_bridge_unplug_srcu, idx);
243 }
244 EXPORT_SYMBOL(drm_bridge_exit);
245 
246 /**
247  * drm_bridge_unplug - declare a DRM bridge was unplugged and remove it
248  * @bridge: DRM bridge
249  *
250  * This tells the bridge has been physically unplugged and no operations on
251  * device resources must be done anymore. Entry-points can use
252  * drm_bridge_enter() and drm_bridge_exit() to protect device resources in
253  * a race free manner.
254  *
255  * Also unregisters the bridge.
256  */
257 void drm_bridge_unplug(struct drm_bridge *bridge)
258 {
259 	bridge->unplugged = true;
260 
261 	synchronize_srcu(&drm_bridge_unplug_srcu);
262 
263 	drm_bridge_remove(bridge);
264 }
265 EXPORT_SYMBOL(drm_bridge_unplug);
266 
267 static void __drm_bridge_free(struct kref *kref)
268 {
269 	struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
270 
271 	mutex_lock(&bridge_lock);
272 	list_del(&bridge->list);
273 	mutex_unlock(&bridge_lock);
274 
275 	if (bridge->funcs->destroy)
276 		bridge->funcs->destroy(bridge);
277 
278 	drm_bridge_put(bridge->next_bridge);
279 
280 	kfree(bridge->container);
281 }
282 
283 /**
284  * drm_bridge_get - Acquire a bridge reference
285  * @bridge: DRM bridge
286  *
287  * This function increments the bridge's refcount.
288  *
289  * Returns:
290  * Pointer to @bridge.
291  */
292 struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
293 {
294 	if (bridge)
295 		kref_get(&bridge->refcount);
296 
297 	return bridge;
298 }
299 EXPORT_SYMBOL(drm_bridge_get);
300 
301 /**
302  * drm_bridge_put - Release a bridge reference
303  * @bridge: DRM bridge
304  *
305  * This function decrements the bridge's reference count and frees the
306  * object if the reference count drops to zero.
307  */
308 void drm_bridge_put(struct drm_bridge *bridge)
309 {
310 	if (bridge)
311 		kref_put(&bridge->refcount, __drm_bridge_free);
312 }
313 EXPORT_SYMBOL(drm_bridge_put);
314 
315 /**
316  * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
317  *
318  * @data: pointer to @struct drm_bridge, cast to a void pointer
319  *
320  * Wrapper of drm_bridge_put() to be used when a function taking a void
321  * pointer is needed, for example as a devm action.
322  */
323 static void drm_bridge_put_void(void *data)
324 {
325 	struct drm_bridge *bridge = (struct drm_bridge *)data;
326 
327 	drm_bridge_put(bridge);
328 }
329 
330 void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
331 			      const struct drm_bridge_funcs *funcs)
332 {
333 	void *container;
334 	struct drm_bridge *bridge;
335 	int err;
336 
337 	if (!funcs) {
338 		dev_warn(dev, "Missing funcs pointer\n");
339 		return ERR_PTR(-EINVAL);
340 	}
341 
342 	container = kzalloc(size, GFP_KERNEL);
343 	if (!container)
344 		return ERR_PTR(-ENOMEM);
345 
346 	bridge = container + offset;
347 	INIT_LIST_HEAD(&bridge->list);
348 	bridge->container = container;
349 	bridge->funcs = funcs;
350 	kref_init(&bridge->refcount);
351 
352 	err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
353 	if (err)
354 		return ERR_PTR(err);
355 
356 	return container;
357 }
358 EXPORT_SYMBOL(__devm_drm_bridge_alloc);
359 
360 /**
361  * drm_bridge_add - register a bridge
362  *
363  * @bridge: bridge control structure
364  *
365  * Add the given bridge to the global list of bridges, where they can be
366  * found by users via of_drm_find_and_get_bridge().
367  *
368  * The bridge to be added must have been allocated by
369  * devm_drm_bridge_alloc().
370  */
371 void drm_bridge_add(struct drm_bridge *bridge)
372 {
373 	if (!bridge->container)
374 		DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
375 
376 	drm_bridge_get(bridge);
377 
378 	/*
379 	 * If the bridge was previously added and then removed, it is now
380 	 * in bridge_lingering_list. Remove it or bridge_lingering_list will be
381 	 * corrupted when adding this bridge to bridge_list below.
382 	 */
383 	if (!list_empty(&bridge->list))
384 		list_del_init(&bridge->list);
385 
386 	mutex_init(&bridge->hpd_mutex);
387 
388 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
389 		bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
390 					       BIT(HDMI_COLORSPACE_YUV420));
391 
392 	mutex_lock(&bridge_lock);
393 	list_add_tail(&bridge->list, &bridge_list);
394 	mutex_unlock(&bridge_lock);
395 }
396 EXPORT_SYMBOL(drm_bridge_add);
397 
398 static void drm_bridge_remove_void(void *bridge)
399 {
400 	drm_bridge_remove(bridge);
401 }
402 
403 /**
404  * devm_drm_bridge_add - devm managed version of drm_bridge_add()
405  *
406  * @dev: device to tie the bridge lifetime to
407  * @bridge: bridge control structure
408  *
409  * This is the managed version of drm_bridge_add() which automatically
410  * calls drm_bridge_remove() when @dev is unbound.
411  *
412  * Return: 0 if no error or negative error code.
413  */
414 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
415 {
416 	drm_bridge_add(bridge);
417 	return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
418 }
419 EXPORT_SYMBOL(devm_drm_bridge_add);
420 
421 /**
422  * drm_bridge_remove - unregister a bridge
423  *
424  * @bridge: bridge control structure
425  *
426  * Remove the given bridge from the global list of registered bridges, so
427  * it won't be found by users via of_drm_find_and_get_bridge(), and add it
428  * to the lingering bridge list, to keep track of it until its allocated
429  * memory is eventually freed.
430  */
431 void drm_bridge_remove(struct drm_bridge *bridge)
432 {
433 	mutex_lock(&bridge_lock);
434 	list_move_tail(&bridge->list, &bridge_lingering_list);
435 	mutex_unlock(&bridge_lock);
436 
437 	mutex_destroy(&bridge->hpd_mutex);
438 
439 	drm_bridge_put(bridge);
440 }
441 EXPORT_SYMBOL(drm_bridge_remove);
442 
443 static struct drm_private_state *
444 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
445 {
446 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
447 	struct drm_bridge_state *state;
448 
449 	state = bridge->funcs->atomic_duplicate_state(bridge);
450 	return state ? &state->base : NULL;
451 }
452 
453 static void
454 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
455 				     struct drm_private_state *s)
456 {
457 	struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
458 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
459 
460 	bridge->funcs->atomic_destroy_state(bridge, state);
461 }
462 
463 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
464 	.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
465 	.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
466 };
467 
468 static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
469 {
470 	return bridge->funcs->atomic_reset != NULL;
471 }
472 
473 /**
474  * drm_bridge_attach - attach the bridge to an encoder's chain
475  *
476  * @encoder: DRM encoder
477  * @bridge: bridge to attach
478  * @previous: previous bridge in the chain (optional)
479  * @flags: DRM_BRIDGE_ATTACH_* flags
480  *
481  * Called by a kms driver to link the bridge to an encoder's chain. The previous
482  * argument specifies the previous bridge in the chain. If NULL, the bridge is
483  * linked directly at the encoder's output. Otherwise it is linked at the
484  * previous bridge's output.
485  *
486  * If non-NULL the previous bridge must be already attached by a call to this
487  * function.
488  *
489  * The bridge to be attached must have been previously added by
490  * drm_bridge_add().
491  *
492  * Note that bridges attached to encoders are auto-detached during encoder
493  * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
494  * *not* be balanced with a drm_bridge_detach() in driver code.
495  *
496  * RETURNS:
497  * Zero on success, error code on failure
498  */
499 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
500 		      struct drm_bridge *previous,
501 		      enum drm_bridge_attach_flags flags)
502 {
503 	int ret;
504 
505 	if (!encoder || !bridge)
506 		return -EINVAL;
507 
508 	if (!bridge->container)
509 		DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
510 
511 	if (list_empty(&bridge->list))
512 		DRM_WARN("Missing drm_bridge_add() before attach\n");
513 
514 	drm_bridge_get(bridge);
515 
516 	if (previous && (!previous->dev || previous->encoder != encoder)) {
517 		ret = -EINVAL;
518 		goto err_put_bridge;
519 	}
520 
521 	if (bridge->dev) {
522 		ret = -EBUSY;
523 		goto err_put_bridge;
524 	}
525 
526 	bridge->dev = encoder->dev;
527 	bridge->encoder = encoder;
528 
529 	if (previous)
530 		list_add(&bridge->chain_node, &previous->chain_node);
531 	else
532 		list_add(&bridge->chain_node, &encoder->bridge_chain);
533 
534 	if (bridge->funcs->attach) {
535 		ret = bridge->funcs->attach(bridge, encoder, flags);
536 		if (ret < 0)
537 			goto err_reset_bridge;
538 	}
539 
540 	if (drm_bridge_is_atomic(bridge)) {
541 		struct drm_bridge_state *state;
542 
543 		state = bridge->funcs->atomic_reset(bridge);
544 		if (IS_ERR(state)) {
545 			ret = PTR_ERR(state);
546 			goto err_detach_bridge;
547 		}
548 
549 		drm_atomic_private_obj_init(bridge->dev, &bridge->base,
550 					    &state->base,
551 					    &drm_bridge_priv_state_funcs);
552 	}
553 
554 	return 0;
555 
556 err_detach_bridge:
557 	if (bridge->funcs->detach)
558 		bridge->funcs->detach(bridge);
559 
560 err_reset_bridge:
561 	bridge->dev = NULL;
562 	bridge->encoder = NULL;
563 	list_del(&bridge->chain_node);
564 
565 	if (ret != -EPROBE_DEFER)
566 		DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
567 			  bridge->of_node, encoder->name, ret);
568 	else
569 		dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
570 			      "failed to attach bridge %pOF to encoder %s\n",
571 			      bridge->of_node, encoder->name);
572 
573 err_put_bridge:
574 	drm_bridge_put(bridge);
575 	return ret;
576 }
577 EXPORT_SYMBOL(drm_bridge_attach);
578 
579 void drm_bridge_detach(struct drm_bridge *bridge)
580 {
581 	if (WARN_ON(!bridge))
582 		return;
583 
584 	if (WARN_ON(!bridge->dev))
585 		return;
586 
587 	if (drm_bridge_is_atomic(bridge))
588 		drm_atomic_private_obj_fini(&bridge->base);
589 
590 	if (bridge->funcs->detach)
591 		bridge->funcs->detach(bridge);
592 
593 	list_del(&bridge->chain_node);
594 	bridge->dev = NULL;
595 	drm_bridge_put(bridge);
596 }
597 
598 /**
599  * DOC: bridge operations
600  *
601  * Bridge drivers expose operations through the &drm_bridge_funcs structure.
602  * The DRM internals (atomic and CRTC helpers) use the helpers defined in
603  * drm_bridge.c to call bridge operations. Those operations are divided in
604  * three big categories to support different parts of the bridge usage.
605  *
606  * - The encoder-related operations support control of the bridges in the
607  *   chain, and are roughly counterparts to the &drm_encoder_helper_funcs
608  *   operations. They are used by the legacy CRTC and the atomic modeset
609  *   helpers to perform mode validation, fixup and setting, and enable and
610  *   disable the bridge automatically.
611  *
612  *   The enable and disable operations are split in
613  *   &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
614  *   &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
615  *   finer-grained control.
616  *
617  *   Bridge drivers may implement the legacy version of those operations, or
618  *   the atomic version (prefixed with atomic\_), in which case they shall also
619  *   implement the atomic state bookkeeping operations
620  *   (&drm_bridge_funcs.atomic_duplicate_state,
621  *   &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
622  *   Mixing atomic and non-atomic versions of the operations is not supported.
623  *
624  * - The bus format negotiation operations
625  *   &drm_bridge_funcs.atomic_get_output_bus_fmts and
626  *   &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
627  *   negotiate the formats transmitted between bridges in the chain when
628  *   multiple formats are supported. Negotiation for formats is performed
629  *   transparently for display drivers by the atomic modeset helpers. Only
630  *   atomic versions of those operations exist, bridge drivers that need to
631  *   implement them shall thus also implement the atomic version of the
632  *   encoder-related operations. This feature is not supported by the legacy
633  *   CRTC helpers.
634  *
635  * - The connector-related operations support implementing a &drm_connector
636  *   based on a chain of bridges. DRM bridges traditionally create a
637  *   &drm_connector for bridges meant to be used at the end of the chain. This
638  *   puts additional burden on bridge drivers, especially for bridges that may
639  *   be used in the middle of a chain or at the end of it. Furthermore, it
640  *   requires all operations of the &drm_connector to be handled by a single
641  *   bridge, which doesn't always match the hardware architecture.
642  *
643  *   To simplify bridge drivers and make the connector implementation more
644  *   flexible, a new model allows bridges to unconditionally skip creation of
645  *   &drm_connector and instead expose &drm_bridge_funcs operations to support
646  *   an externally-implemented &drm_connector. Those operations are
647  *   &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
648  *   &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
649  *   &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
650  *   implemented, display drivers shall create a &drm_connector instance for
651  *   each chain of bridges, and implement those connector instances based on
652  *   the bridge connector operations.
653  *
654  *   Bridge drivers shall implement the connector-related operations for all
655  *   the features that the bridge hardware support. For instance, if a bridge
656  *   supports reading EDID, the &drm_bridge_funcs.get_edid shall be
657  *   implemented. This however doesn't mean that the DDC lines are wired to the
658  *   bridge on a particular platform, as they could also be connected to an I2C
659  *   controller of the SoC. Support for the connector-related operations on the
660  *   running platform is reported through the &drm_bridge.ops flags. Bridge
661  *   drivers shall detect which operations they can support on the platform
662  *   (usually this information is provided by ACPI or DT), and set the
663  *   &drm_bridge.ops flags for all supported operations. A flag shall only be
664  *   set if the corresponding &drm_bridge_funcs operation is implemented, but
665  *   an implemented operation doesn't necessarily imply that the corresponding
666  *   flag will be set. Display drivers shall use the &drm_bridge.ops flags to
667  *   decide which bridge to delegate a connector operation to. This mechanism
668  *   allows providing a single static const &drm_bridge_funcs instance in
669  *   bridge drivers, improving security by storing function pointers in
670  *   read-only memory.
671  *
672  *   In order to ease transition, bridge drivers may support both the old and
673  *   new models by making connector creation optional and implementing the
674  *   connected-related bridge operations. Connector creation is then controlled
675  *   by the flags argument to the drm_bridge_attach() function. Display drivers
676  *   that support the new model and create connectors themselves shall set the
677  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
678  *   connector creation. For intermediate bridges in the chain, the flag shall
679  *   be passed to the drm_bridge_attach() call for the downstream bridge.
680  *   Bridge drivers that implement the new model only shall return an error
681  *   from their &drm_bridge_funcs.attach handler when the
682  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
683  *   should use the new model, and convert the bridge drivers they use if
684  *   needed, in order to gradually transition to the new model.
685  */
686 
687 /**
688  * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
689  *				 encoder chain.
690  * @bridge: bridge control structure
691  * @info: display info against which the mode shall be validated
692  * @mode: desired mode to be validated
693  *
694  * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
695  * chain, starting from the first bridge to the last. If at least one bridge
696  * does not accept the mode the function returns the error code.
697  *
698  * Note: the bridge passed should be the one closest to the encoder.
699  *
700  * RETURNS:
701  * MODE_OK on success, drm_mode_status Enum error code on failure
702  */
703 enum drm_mode_status
704 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
705 			    const struct drm_display_info *info,
706 			    const struct drm_display_mode *mode)
707 {
708 	struct drm_encoder *encoder;
709 
710 	if (!bridge)
711 		return MODE_OK;
712 
713 	encoder = bridge->encoder;
714 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
715 		enum drm_mode_status ret;
716 
717 		if (!bridge->funcs->mode_valid)
718 			continue;
719 
720 		ret = bridge->funcs->mode_valid(bridge, info, mode);
721 		if (ret != MODE_OK)
722 			return ret;
723 	}
724 
725 	return MODE_OK;
726 }
727 EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
728 
729 /**
730  * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
731  *			       encoder chain
732  * @bridge: bridge control structure
733  * @mode: desired mode to be set for the encoder chain
734  * @adjusted_mode: updated mode that works for this encoder chain
735  *
736  * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
737  * encoder chain, starting from the first bridge to the last.
738  *
739  * Note: the bridge passed should be the one closest to the encoder
740  */
741 void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
742 			       const struct drm_display_mode *mode,
743 			       const struct drm_display_mode *adjusted_mode)
744 {
745 	struct drm_encoder *encoder;
746 
747 	if (!bridge)
748 		return;
749 
750 	encoder = bridge->encoder;
751 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
752 		if (bridge->funcs->mode_set)
753 			bridge->funcs->mode_set(bridge, mode, adjusted_mode);
754 	}
755 }
756 EXPORT_SYMBOL(drm_bridge_chain_mode_set);
757 
758 /**
759  * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
760  * @bridge: bridge control structure
761  * @state: atomic state being committed
762  *
763  * Calls &drm_bridge_funcs.atomic_disable (falls back on
764  * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
765  * starting from the last bridge to the first. These are called before calling
766  * &drm_encoder_helper_funcs.atomic_disable
767  *
768  * Note: the bridge passed should be the one closest to the encoder
769  */
770 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
771 				     struct drm_atomic_state *state)
772 {
773 	struct drm_encoder *encoder;
774 	struct drm_bridge *iter;
775 
776 	if (!bridge)
777 		return;
778 
779 	encoder = bridge->encoder;
780 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
781 		if (iter->funcs->atomic_disable) {
782 			iter->funcs->atomic_disable(iter, state);
783 		} else if (iter->funcs->disable) {
784 			iter->funcs->disable(iter);
785 		}
786 
787 		if (iter == bridge)
788 			break;
789 	}
790 }
791 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
792 
793 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
794 						struct drm_atomic_state *state)
795 {
796 	if (state && bridge->funcs->atomic_post_disable)
797 		bridge->funcs->atomic_post_disable(bridge, state);
798 	else if (bridge->funcs->post_disable)
799 		bridge->funcs->post_disable(bridge);
800 }
801 
802 /**
803  * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
804  *					  in the encoder chain
805  * @bridge: bridge control structure
806  * @state: atomic state being committed
807  *
808  * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
809  * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
810  * starting from the first bridge to the last. These are called after completing
811  * &drm_encoder_helper_funcs.atomic_disable
812  *
813  * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
814  * bridge will be called before the previous one to reverse the @pre_enable
815  * calling direction.
816  *
817  * Example:
818  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
819  *
820  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
821  * @post_disable order would be,
822  * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
823  *
824  * Note: the bridge passed should be the one closest to the encoder
825  */
826 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
827 					  struct drm_atomic_state *state)
828 {
829 	struct drm_encoder *encoder;
830 	struct drm_bridge *next, *limit;
831 
832 	if (!bridge)
833 		return;
834 
835 	encoder = bridge->encoder;
836 
837 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
838 		limit = NULL;
839 
840 		if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
841 			next = list_next_entry(bridge, chain_node);
842 
843 			if (next->pre_enable_prev_first) {
844 				/* next bridge had requested that prev
845 				 * was enabled first, so disabled last
846 				 */
847 				limit = next;
848 
849 				/* Find the next bridge that has NOT requested
850 				 * prev to be enabled first / disabled last
851 				 */
852 				list_for_each_entry_from(next, &encoder->bridge_chain,
853 							 chain_node) {
854 					if (!next->pre_enable_prev_first) {
855 						next = list_prev_entry(next, chain_node);
856 						limit = next;
857 						break;
858 					}
859 
860 					if (list_is_last(&next->chain_node,
861 							 &encoder->bridge_chain)) {
862 						limit = next;
863 						break;
864 					}
865 				}
866 
867 				/* Call these bridges in reverse order */
868 				list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
869 								 chain_node) {
870 					if (next == bridge)
871 						break;
872 
873 					drm_atomic_bridge_call_post_disable(next,
874 									    state);
875 				}
876 			}
877 		}
878 
879 		drm_atomic_bridge_call_post_disable(bridge, state);
880 
881 		if (limit)
882 			/* Jump all bridges that we have already post_disabled */
883 			bridge = limit;
884 	}
885 }
886 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
887 
888 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
889 					      struct drm_atomic_state *state)
890 {
891 	if (state && bridge->funcs->atomic_pre_enable)
892 		bridge->funcs->atomic_pre_enable(bridge, state);
893 	else if (bridge->funcs->pre_enable)
894 		bridge->funcs->pre_enable(bridge);
895 }
896 
897 /**
898  * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
899  *					the encoder chain
900  * @bridge: bridge control structure
901  * @state: atomic state being committed
902  *
903  * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
904  * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
905  * starting from the last bridge to the first. These are called before calling
906  * &drm_encoder_helper_funcs.atomic_enable
907  *
908  * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
909  * prev bridge will be called before pre_enable of this bridge.
910  *
911  * Example:
912  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
913  *
914  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
915  * @pre_enable order would be,
916  * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
917  *
918  * Note: the bridge passed should be the one closest to the encoder
919  */
920 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
921 					struct drm_atomic_state *state)
922 {
923 	struct drm_encoder *encoder;
924 	struct drm_bridge *iter, *next, *limit;
925 
926 	if (!bridge)
927 		return;
928 
929 	encoder = bridge->encoder;
930 
931 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
932 		if (iter->pre_enable_prev_first) {
933 			next = iter;
934 			limit = bridge;
935 			list_for_each_entry_from_reverse(next,
936 							 &encoder->bridge_chain,
937 							 chain_node) {
938 				if (next == bridge)
939 					break;
940 
941 				if (!next->pre_enable_prev_first) {
942 					/* Found first bridge that does NOT
943 					 * request prev to be enabled first
944 					 */
945 					limit = next;
946 					break;
947 				}
948 			}
949 
950 			list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
951 				/* Call requested prev bridge pre_enable
952 				 * in order.
953 				 */
954 				if (next == iter)
955 					/* At the first bridge to request prev
956 					 * bridges called first.
957 					 */
958 					break;
959 
960 				drm_atomic_bridge_call_pre_enable(next, state);
961 			}
962 		}
963 
964 		drm_atomic_bridge_call_pre_enable(iter, state);
965 
966 		if (iter->pre_enable_prev_first)
967 			/* Jump all bridges that we have already pre_enabled */
968 			iter = limit;
969 
970 		if (iter == bridge)
971 			break;
972 	}
973 }
974 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
975 
976 /**
977  * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
978  * @bridge: bridge control structure
979  * @state: atomic state being committed
980  *
981  * Calls &drm_bridge_funcs.atomic_enable (falls back on
982  * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
983  * starting from the first bridge to the last. These are called after completing
984  * &drm_encoder_helper_funcs.atomic_enable
985  *
986  * Note: the bridge passed should be the one closest to the encoder
987  */
988 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
989 				    struct drm_atomic_state *state)
990 {
991 	struct drm_encoder *encoder;
992 
993 	if (!bridge)
994 		return;
995 
996 	encoder = bridge->encoder;
997 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
998 		if (bridge->funcs->atomic_enable) {
999 			bridge->funcs->atomic_enable(bridge, state);
1000 		} else if (bridge->funcs->enable) {
1001 			bridge->funcs->enable(bridge);
1002 		}
1003 	}
1004 }
1005 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
1006 
1007 static int drm_atomic_bridge_check(struct drm_bridge *bridge,
1008 				   struct drm_crtc_state *crtc_state,
1009 				   struct drm_connector_state *conn_state)
1010 {
1011 	if (bridge->funcs->atomic_check) {
1012 		struct drm_bridge_state *bridge_state;
1013 		int ret;
1014 
1015 		bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1016 							       bridge);
1017 		if (WARN_ON(!bridge_state))
1018 			return -EINVAL;
1019 
1020 		ret = bridge->funcs->atomic_check(bridge, bridge_state,
1021 						  crtc_state, conn_state);
1022 		if (ret)
1023 			return ret;
1024 	} else if (bridge->funcs->mode_fixup) {
1025 		if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
1026 					       &crtc_state->adjusted_mode))
1027 			return -EINVAL;
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
1034 				    struct drm_bridge *cur_bridge,
1035 				    struct drm_crtc_state *crtc_state,
1036 				    struct drm_connector_state *conn_state,
1037 				    u32 out_bus_fmt)
1038 {
1039 	unsigned int i, num_in_bus_fmts = 0;
1040 	struct drm_bridge_state *cur_state;
1041 	struct drm_bridge *prev_bridge __free(drm_bridge_put) =
1042 		drm_bridge_get_prev_bridge(cur_bridge);
1043 	u32 *in_bus_fmts;
1044 	int ret;
1045 
1046 	cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1047 						    cur_bridge);
1048 
1049 	/*
1050 	 * If bus format negotiation is not supported by this bridge, let's
1051 	 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
1052 	 * hope that it can handle this situation gracefully (by providing
1053 	 * appropriate default values).
1054 	 */
1055 	if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
1056 		if (cur_bridge != first_bridge) {
1057 			ret = select_bus_fmt_recursive(first_bridge,
1058 						       prev_bridge, crtc_state,
1059 						       conn_state,
1060 						       MEDIA_BUS_FMT_FIXED);
1061 			if (ret)
1062 				return ret;
1063 		}
1064 
1065 		/*
1066 		 * Driver does not implement the atomic state hooks, but that's
1067 		 * fine, as long as it does not access the bridge state.
1068 		 */
1069 		if (cur_state) {
1070 			cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
1071 			cur_state->output_bus_cfg.format = out_bus_fmt;
1072 		}
1073 
1074 		return 0;
1075 	}
1076 
1077 	/*
1078 	 * If the driver implements ->atomic_get_input_bus_fmts() it
1079 	 * should also implement the atomic state hooks.
1080 	 */
1081 	if (WARN_ON(!cur_state))
1082 		return -EINVAL;
1083 
1084 	in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
1085 							cur_state,
1086 							crtc_state,
1087 							conn_state,
1088 							out_bus_fmt,
1089 							&num_in_bus_fmts);
1090 	if (!num_in_bus_fmts)
1091 		return -ENOTSUPP;
1092 	else if (!in_bus_fmts)
1093 		return -ENOMEM;
1094 
1095 	if (first_bridge == cur_bridge) {
1096 		cur_state->input_bus_cfg.format = in_bus_fmts[0];
1097 		cur_state->output_bus_cfg.format = out_bus_fmt;
1098 		kfree(in_bus_fmts);
1099 		return 0;
1100 	}
1101 
1102 	for (i = 0; i < num_in_bus_fmts; i++) {
1103 		ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
1104 					       crtc_state, conn_state,
1105 					       in_bus_fmts[i]);
1106 		if (ret != -ENOTSUPP)
1107 			break;
1108 	}
1109 
1110 	if (!ret) {
1111 		cur_state->input_bus_cfg.format = in_bus_fmts[i];
1112 		cur_state->output_bus_cfg.format = out_bus_fmt;
1113 	}
1114 
1115 	kfree(in_bus_fmts);
1116 	return ret;
1117 }
1118 
1119 /*
1120  * This function is called by &drm_atomic_bridge_chain_check() just before
1121  * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
1122  * It performs bus format negotiation between bridge elements. The negotiation
1123  * happens in reverse order, starting from the last element in the chain up to
1124  * @bridge.
1125  *
1126  * Negotiation starts by retrieving supported output bus formats on the last
1127  * bridge element and testing them one by one. The test is recursive, meaning
1128  * that for each tested output format, the whole chain will be walked backward,
1129  * and each element will have to choose an input bus format that can be
1130  * transcoded to the requested output format. When a bridge element does not
1131  * support transcoding into a specific output format -ENOTSUPP is returned and
1132  * the next bridge element will have to try a different format. If none of the
1133  * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
1134  *
1135  * This implementation is relying on
1136  * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
1137  * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
1138  * input/output formats.
1139  *
1140  * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
1141  * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
1142  * tries a single format: &drm_connector.display_info.bus_formats[0] if
1143  * available, MEDIA_BUS_FMT_FIXED otherwise.
1144  *
1145  * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
1146  * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
1147  * bridge element that lacks this hook and asks the previous element in the
1148  * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
1149  * to do in that case (fail if they want to enforce bus format negotiation, or
1150  * provide a reasonable default if they need to support pipelines where not
1151  * all elements support bus format negotiation).
1152  */
1153 static int
1154 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
1155 					struct drm_crtc_state *crtc_state,
1156 					struct drm_connector_state *conn_state)
1157 {
1158 	struct drm_connector *conn = conn_state->connector;
1159 	struct drm_encoder *encoder = bridge->encoder;
1160 	struct drm_bridge_state *last_bridge_state;
1161 	unsigned int i, num_out_bus_fmts = 0;
1162 	u32 *out_bus_fmts;
1163 	int ret = 0;
1164 
1165 	struct drm_bridge *last_bridge __free(drm_bridge_put) =
1166 		drm_bridge_get(list_last_entry(&encoder->bridge_chain,
1167 					       struct drm_bridge, chain_node));
1168 	last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1169 							    last_bridge);
1170 
1171 	if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1172 		const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1173 
1174 		/*
1175 		 * If the driver implements ->atomic_get_output_bus_fmts() it
1176 		 * should also implement the atomic state hooks.
1177 		 */
1178 		if (WARN_ON(!last_bridge_state))
1179 			return -EINVAL;
1180 
1181 		out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1182 							last_bridge_state,
1183 							crtc_state,
1184 							conn_state,
1185 							&num_out_bus_fmts);
1186 		if (!num_out_bus_fmts)
1187 			return -ENOTSUPP;
1188 		else if (!out_bus_fmts)
1189 			return -ENOMEM;
1190 	} else {
1191 		num_out_bus_fmts = 1;
1192 		out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
1193 		if (!out_bus_fmts)
1194 			return -ENOMEM;
1195 
1196 		if (conn->display_info.num_bus_formats &&
1197 		    conn->display_info.bus_formats)
1198 			out_bus_fmts[0] = conn->display_info.bus_formats[0];
1199 		else
1200 			out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1201 	}
1202 
1203 	for (i = 0; i < num_out_bus_fmts; i++) {
1204 		ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1205 					       conn_state, out_bus_fmts[i]);
1206 		if (ret != -ENOTSUPP)
1207 			break;
1208 	}
1209 
1210 	kfree(out_bus_fmts);
1211 
1212 	return ret;
1213 }
1214 
1215 static void
1216 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1217 				      struct drm_connector *conn,
1218 				      struct drm_atomic_state *state)
1219 {
1220 	struct drm_bridge_state *bridge_state, *next_bridge_state;
1221 	u32 output_flags = 0;
1222 
1223 	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1224 
1225 	/* No bridge state attached to this bridge => nothing to propagate. */
1226 	if (!bridge_state)
1227 		return;
1228 
1229 	struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
1230 
1231 	/*
1232 	 * Let's try to apply the most common case here, that is, propagate
1233 	 * display_info flags for the last bridge, and propagate the input
1234 	 * flags of the next bridge element to the output end of the current
1235 	 * bridge when the bridge is not the last one.
1236 	 * There are exceptions to this rule, like when signal inversion is
1237 	 * happening at the board level, but that's something drivers can deal
1238 	 * with from their &drm_bridge_funcs.atomic_check() implementation by
1239 	 * simply overriding the flags value we've set here.
1240 	 */
1241 	if (!next_bridge) {
1242 		output_flags = conn->display_info.bus_flags;
1243 	} else {
1244 		next_bridge_state = drm_atomic_get_new_bridge_state(state,
1245 								next_bridge);
1246 		/*
1247 		 * No bridge state attached to the next bridge, just leave the
1248 		 * flags to 0.
1249 		 */
1250 		if (next_bridge_state)
1251 			output_flags = next_bridge_state->input_bus_cfg.flags;
1252 	}
1253 
1254 	bridge_state->output_bus_cfg.flags = output_flags;
1255 
1256 	/*
1257 	 * Propagate the output flags to the input end of the bridge. Again, it's
1258 	 * not necessarily what all bridges want, but that's what most of them
1259 	 * do, and by doing that by default we avoid forcing drivers to
1260 	 * duplicate the "dummy propagation" logic.
1261 	 */
1262 	bridge_state->input_bus_cfg.flags = output_flags;
1263 }
1264 
1265 /**
1266  * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1267  * @bridge: bridge control structure
1268  * @crtc_state: new CRTC state
1269  * @conn_state: new connector state
1270  *
1271  * First trigger a bus format negotiation before calling
1272  * &drm_bridge_funcs.atomic_check() (falls back on
1273  * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1274  * starting from the last bridge to the first. These are called before calling
1275  * &drm_encoder_helper_funcs.atomic_check()
1276  *
1277  * RETURNS:
1278  * 0 on success, a negative error code on failure
1279  */
1280 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1281 				  struct drm_crtc_state *crtc_state,
1282 				  struct drm_connector_state *conn_state)
1283 {
1284 	struct drm_connector *conn = conn_state->connector;
1285 	struct drm_encoder *encoder;
1286 	struct drm_bridge *iter;
1287 	int ret;
1288 
1289 	if (!bridge)
1290 		return 0;
1291 
1292 	ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1293 						      conn_state);
1294 	if (ret)
1295 		return ret;
1296 
1297 	encoder = bridge->encoder;
1298 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1299 		int ret;
1300 
1301 		/*
1302 		 * Bus flags are propagated by default. If a bridge needs to
1303 		 * tweak the input bus flags for any reason, it should happen
1304 		 * in its &drm_bridge_funcs.atomic_check() implementation such
1305 		 * that preceding bridges in the chain can propagate the new
1306 		 * bus flags.
1307 		 */
1308 		drm_atomic_bridge_propagate_bus_flags(iter, conn,
1309 						      crtc_state->state);
1310 
1311 		ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1312 		if (ret)
1313 			return ret;
1314 
1315 		if (iter == bridge)
1316 			break;
1317 	}
1318 
1319 	return 0;
1320 }
1321 EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1322 
1323 /**
1324  * drm_bridge_detect - check if anything is attached to the bridge output
1325  * @bridge: bridge control structure
1326  * @connector: attached connector
1327  *
1328  * If the bridge supports output detection, as reported by the
1329  * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1330  * bridge and return the connection status. Otherwise return
1331  * connector_status_unknown.
1332  *
1333  * RETURNS:
1334  * The detection status on success, or connector_status_unknown if the bridge
1335  * doesn't support output detection.
1336  */
1337 enum drm_connector_status
1338 drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
1339 {
1340 	if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1341 		return connector_status_unknown;
1342 
1343 	return bridge->funcs->detect(bridge, connector);
1344 }
1345 EXPORT_SYMBOL_GPL(drm_bridge_detect);
1346 
1347 /**
1348  * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1349  * @connector
1350  * @bridge: bridge control structure
1351  * @connector: the connector to fill with modes
1352  *
1353  * If the bridge supports output modes retrieval, as reported by the
1354  * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1355  * fill the connector with all valid modes and return the number of modes
1356  * added. Otherwise return 0.
1357  *
1358  * RETURNS:
1359  * The number of modes added to the connector.
1360  */
1361 int drm_bridge_get_modes(struct drm_bridge *bridge,
1362 			 struct drm_connector *connector)
1363 {
1364 	if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1365 		return 0;
1366 
1367 	return bridge->funcs->get_modes(bridge, connector);
1368 }
1369 EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1370 
1371 /**
1372  * drm_bridge_edid_read - read the EDID data of the connected display
1373  * @bridge: bridge control structure
1374  * @connector: the connector to read EDID for
1375  *
1376  * If the bridge supports output EDID retrieval, as reported by the
1377  * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1378  * the EDID and return it. Otherwise return NULL.
1379  *
1380  * RETURNS:
1381  * The retrieved EDID on success, or NULL otherwise.
1382  */
1383 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1384 					    struct drm_connector *connector)
1385 {
1386 	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1387 		return NULL;
1388 
1389 	return bridge->funcs->edid_read(bridge, connector);
1390 }
1391 EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1392 
1393 /**
1394  * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1395  * @bridge: bridge control structure
1396  * @cb: hot-plug detection callback
1397  * @data: data to be passed to the hot-plug detection callback
1398  *
1399  * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1400  * and @data as hot plug notification callback. From now on the @cb will be
1401  * called with @data when an output status change is detected by the bridge,
1402  * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1403  *
1404  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1405  * bridge->ops. This function shall not be called when the flag is not set.
1406  *
1407  * Only one hot plug detection callback can be registered at a time, it is an
1408  * error to call this function when hot plug detection is already enabled for
1409  * the bridge.
1410  */
1411 void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1412 			   void (*cb)(void *data,
1413 				      enum drm_connector_status status),
1414 			   void *data)
1415 {
1416 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1417 		return;
1418 
1419 	mutex_lock(&bridge->hpd_mutex);
1420 
1421 	if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1422 		goto unlock;
1423 
1424 	bridge->hpd_cb = cb;
1425 	bridge->hpd_data = data;
1426 
1427 	if (bridge->funcs->hpd_enable)
1428 		bridge->funcs->hpd_enable(bridge);
1429 
1430 unlock:
1431 	mutex_unlock(&bridge->hpd_mutex);
1432 }
1433 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1434 
1435 /**
1436  * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1437  * @bridge: bridge control structure
1438  *
1439  * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1440  * plug detection callback previously registered with drm_bridge_hpd_enable().
1441  * Once this function returns the callback will not be called by the bridge
1442  * when an output status change occurs.
1443  *
1444  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1445  * bridge->ops. This function shall not be called when the flag is not set.
1446  */
1447 void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1448 {
1449 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1450 		return;
1451 
1452 	mutex_lock(&bridge->hpd_mutex);
1453 	if (bridge->funcs->hpd_disable)
1454 		bridge->funcs->hpd_disable(bridge);
1455 
1456 	bridge->hpd_cb = NULL;
1457 	bridge->hpd_data = NULL;
1458 	mutex_unlock(&bridge->hpd_mutex);
1459 }
1460 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1461 
1462 /**
1463  * drm_bridge_hpd_notify - notify hot plug detection events
1464  * @bridge: bridge control structure
1465  * @status: output connection status
1466  *
1467  * Bridge drivers shall call this function to report hot plug events when they
1468  * detect a change in the output status, when hot plug detection has been
1469  * enabled by drm_bridge_hpd_enable().
1470  *
1471  * This function shall be called in a context that can sleep.
1472  */
1473 void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1474 			   enum drm_connector_status status)
1475 {
1476 	mutex_lock(&bridge->hpd_mutex);
1477 	if (bridge->hpd_cb)
1478 		bridge->hpd_cb(bridge->hpd_data, status);
1479 	mutex_unlock(&bridge->hpd_mutex);
1480 }
1481 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1482 
1483 #ifdef CONFIG_OF
1484 /**
1485  * of_drm_find_and_get_bridge - find the bridge corresponding to the device
1486  *                              node in the global bridge list
1487  * @np: device node
1488  *
1489  * The refcount of the returned bridge is incremented. Use drm_bridge_put()
1490  * when done with it.
1491  *
1492  * RETURNS:
1493  * drm_bridge control struct on success, NULL on failure
1494  */
1495 struct drm_bridge *of_drm_find_and_get_bridge(struct device_node *np)
1496 {
1497 	struct drm_bridge *bridge;
1498 
1499 	scoped_guard(mutex, &bridge_lock) {
1500 		list_for_each_entry(bridge, &bridge_list, list)
1501 			if (bridge->of_node == np)
1502 				return drm_bridge_get(bridge);
1503 	}
1504 
1505 	return NULL;
1506 }
1507 EXPORT_SYMBOL(of_drm_find_and_get_bridge);
1508 
1509 /**
1510  * of_drm_find_bridge - find the bridge corresponding to the device node in
1511  *			the global bridge list
1512  *
1513  * @np: device node
1514  *
1515  * This function is deprecated. Convert to of_drm_find_and_get_bridge()
1516  * instead for proper refcounting.
1517  *
1518  * The bridge returned by this function is not refcounted. This is
1519  * dangerous because the bridge might be deallocated even before the caller
1520  * has a chance to use it. To use this function you have to do one of:
1521  * - get a reference with drm_bridge_get() as soon as possible to
1522  *   minimize the race window, and then drm_bridge_put() when no longer
1523  *   using the pointer
1524  * - not call drm_bridge_get() or drm_bridge_put() at all, which used to
1525  *   be the correct practice before dynamic bridge lifetime was introduced
1526  * - again, convert to of_drm_find_and_get_bridge(), which is the only safe
1527  *   thing to do
1528  *
1529  * RETURNS:
1530  * drm_bridge control struct on success, NULL on failure
1531  */
1532 struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1533 {
1534 	struct drm_bridge *bridge = of_drm_find_and_get_bridge(np);
1535 
1536 	/*
1537 	 * We need to emulate the original semantics of
1538 	 * of_drm_find_bridge(), which was not getting any bridge
1539 	 * reference. Being now based on of_drm_find_and_get_bridge() which
1540 	 * gets a reference, put it before returning.
1541 	 */
1542 	drm_bridge_put(bridge);
1543 
1544 	return bridge;
1545 }
1546 EXPORT_SYMBOL(of_drm_find_bridge);
1547 #endif
1548 
1549 /**
1550  * devm_drm_put_bridge - Release a bridge reference obtained via devm
1551  * @dev: device that got the bridge via devm
1552  * @bridge: pointer to a struct drm_bridge obtained via devm
1553  *
1554  * Same as drm_bridge_put() for bridge pointers obtained via devm functions
1555  * such as devm_drm_bridge_alloc().
1556  *
1557  * This function is a temporary workaround and MUST NOT be used. Manual
1558  * handling of bridge lifetime is inherently unsafe.
1559  */
1560 void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
1561 {
1562 	devm_release_action(dev, drm_bridge_put_void, bridge);
1563 }
1564 EXPORT_SYMBOL(devm_drm_put_bridge);
1565 
1566 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
1567 					   struct drm_bridge *bridge,
1568 					   unsigned int idx,
1569 					   bool lingering)
1570 {
1571 	drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
1572 
1573 	drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount),
1574 		   lingering ? " [lingering]" : "");
1575 
1576 	drm_printf(p, "\ttype: [%d] %s\n",
1577 		   bridge->type,
1578 		   drm_get_connector_type_name(bridge->type));
1579 
1580 	/* The OF node could be freed after drm_bridge_remove() */
1581 	if (bridge->of_node && !lingering)
1582 		drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
1583 
1584 	drm_printf(p, "\tops: [0x%x]", bridge->ops);
1585 	if (bridge->ops & DRM_BRIDGE_OP_DETECT)
1586 		drm_puts(p, " detect");
1587 	if (bridge->ops & DRM_BRIDGE_OP_EDID)
1588 		drm_puts(p, " edid");
1589 	if (bridge->ops & DRM_BRIDGE_OP_HPD)
1590 		drm_puts(p, " hpd");
1591 	if (bridge->ops & DRM_BRIDGE_OP_MODES)
1592 		drm_puts(p, " modes");
1593 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
1594 		drm_puts(p, " hdmi");
1595 	drm_puts(p, "\n");
1596 }
1597 
1598 static int allbridges_show(struct seq_file *m, void *data)
1599 {
1600 	struct drm_printer p = drm_seq_file_printer(m);
1601 	struct drm_bridge *bridge;
1602 	unsigned int idx = 0;
1603 
1604 	mutex_lock(&bridge_lock);
1605 
1606 	list_for_each_entry(bridge, &bridge_list, list)
1607 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
1608 
1609 	list_for_each_entry(bridge, &bridge_lingering_list, list)
1610 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true);
1611 
1612 	mutex_unlock(&bridge_lock);
1613 
1614 	return 0;
1615 }
1616 DEFINE_SHOW_ATTRIBUTE(allbridges);
1617 
1618 static int encoder_bridges_show(struct seq_file *m, void *data)
1619 {
1620 	struct drm_encoder *encoder = m->private;
1621 	struct drm_printer p = drm_seq_file_printer(m);
1622 	unsigned int idx = 0;
1623 
1624 	drm_for_each_bridge_in_chain_scoped(encoder, bridge)
1625 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
1626 
1627 	return 0;
1628 }
1629 DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
1630 
1631 void drm_bridge_debugfs_params(struct dentry *root)
1632 {
1633 	debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
1634 }
1635 
1636 void drm_bridge_debugfs_encoder_params(struct dentry *root,
1637 				       struct drm_encoder *encoder)
1638 {
1639 	/* bridges list */
1640 	debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
1641 }
1642 
1643 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1644 MODULE_DESCRIPTION("DRM bridge infrastructure");
1645 MODULE_LICENSE("GPL and additional rights");
1646