xref: /linux/drivers/gpu/drm/drm_bridge.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 /*
2  * Copyright (c) 2014 Samsung Electronics Co., Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/err.h>
26 #include <linux/export.h>
27 #include <linux/media-bus-format.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 
31 #include <drm/drm_atomic_state_helper.h>
32 #include <drm/drm_bridge.h>
33 #include <drm/drm_debugfs.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drm_encoder.h>
36 #include <drm/drm_file.h>
37 #include <drm/drm_of.h>
38 #include <drm/drm_print.h>
39 
40 #include "drm_crtc_internal.h"
41 
42 /**
43  * DOC: overview
44  *
45  * &struct drm_bridge represents a device that hangs on to an encoder. These are
46  * handy when a regular &drm_encoder entity isn't enough to represent the entire
47  * encoder chain.
48  *
49  * A bridge is always attached to a single &drm_encoder at a time, but can be
50  * either connected to it directly, or through a chain of bridges::
51  *
52  *     [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
53  *
54  * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
55  * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
56  * Chaining multiple bridges to the output of a bridge, or the same bridge to
57  * the output of different bridges, is not supported.
58  *
59  * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
60  * CRTCs, encoders or connectors and hence are not visible to userspace. They
61  * just provide additional hooks to get the desired output at the end of the
62  * encoder chain.
63  */
64 
65 /**
66  * DOC:	display driver integration
67  *
68  * Display drivers are responsible for linking encoders with the first bridge
69  * in the chains. This is done by acquiring the appropriate bridge with
70  * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
71  * encoder with a call to drm_bridge_attach().
72  *
73  * Bridges are responsible for linking themselves with the next bridge in the
74  * chain, if any. This is done the same way as for encoders, with the call to
75  * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
76  *
77  * Once these links are created, the bridges can participate along with encoder
78  * functions to perform mode validation and fixup (through
79  * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
80  * setting (through drm_bridge_chain_mode_set()), enable (through
81  * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
82  * and disable (through drm_atomic_bridge_chain_disable() and
83  * drm_atomic_bridge_chain_post_disable()). Those functions call the
84  * corresponding operations provided in &drm_bridge_funcs in sequence for all
85  * bridges in the chain.
86  *
87  * For display drivers that use the atomic helpers
88  * drm_atomic_helper_check_modeset(),
89  * drm_atomic_helper_commit_modeset_enables() and
90  * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
91  * commit check and commit tail handlers, or through the higher-level
92  * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
93  * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
94  * requires no intervention from the driver. For other drivers, the relevant
95  * DRM bridge chain functions shall be called manually.
96  *
97  * Bridges also participate in implementing the &drm_connector at the end of
98  * the bridge chain. Display drivers may use the drm_bridge_connector_init()
99  * helper to create the &drm_connector, or implement it manually on top of the
100  * connector-related operations exposed by the bridge (see the overview
101  * documentation of bridge operations for more details).
102  */
103 
104 /**
105  * DOC: special care dsi
106  *
107  * The interaction between the bridges and other frameworks involved in
108  * the probing of the upstream driver and the bridge driver can be
109  * challenging. Indeed, there's multiple cases that needs to be
110  * considered:
111  *
112  * - The upstream driver doesn't use the component framework and isn't a
113  *   MIPI-DSI host. In this case, the bridge driver will probe at some
114  *   point and the upstream driver should try to probe again by returning
115  *   EPROBE_DEFER as long as the bridge driver hasn't probed.
116  *
117  * - The upstream driver doesn't use the component framework, but is a
118  *   MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
119  *   controlled. In this case, the bridge device is a child of the
120  *   display device and when it will probe it's assured that the display
121  *   device (and MIPI-DSI host) is present. The upstream driver will be
122  *   assured that the bridge driver is connected between the
123  *   &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
124  *   Therefore, it must run mipi_dsi_host_register() in its probe
125  *   function, and then run drm_bridge_attach() in its
126  *   &mipi_dsi_host_ops.attach hook.
127  *
128  * - The upstream driver uses the component framework and is a MIPI-DSI
129  *   host. The bridge device uses the MIPI-DCS commands to be
130  *   controlled. This is the same situation than above, and can run
131  *   mipi_dsi_host_register() in either its probe or bind hooks.
132  *
133  * - The upstream driver uses the component framework and is a MIPI-DSI
134  *   host. The bridge device uses a separate bus (such as I2C) to be
135  *   controlled. In this case, there's no correlation between the probe
136  *   of the bridge and upstream drivers, so care must be taken to avoid
137  *   an endless EPROBE_DEFER loop, with each driver waiting for the
138  *   other to probe.
139  *
140  * The ideal pattern to cover the last item (and all the others in the
141  * MIPI-DSI host driver case) is to split the operations like this:
142  *
143  * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
144  *   probe hook. It will make sure that the MIPI-DSI host sticks around,
145  *   and that the driver's bind can be called.
146  *
147  * - In its probe hook, the bridge driver must try to find its MIPI-DSI
148  *   host, register as a MIPI-DSI device and attach the MIPI-DSI device
149  *   to its host. The bridge driver is now functional.
150  *
151  * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
152  *   now add its component. Its bind hook will now be called and since
153  *   the bridge driver is attached and registered, we can now look for
154  *   and attach it.
155  *
156  * At this point, we're now certain that both the upstream driver and
157  * the bridge driver are functional and we can't have a deadlock-like
158  * situation when probing.
159  */
160 
161 /**
162  * DOC: dsi bridge operations
163  *
164  * DSI host interfaces are expected to be implemented as bridges rather than
165  * encoders, however there are a few aspects of their operation that need to
166  * be defined in order to provide a consistent interface.
167  *
168  * A DSI host should keep the PHY powered down until the pre_enable operation is
169  * called. All lanes are in an undefined idle state up to this point, and it
170  * must not be assumed that it is LP-11.
171  * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
172  * clock lane to either LP-11 or HS depending on the mode_flag
173  * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
174  *
175  * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
176  * called before the DSI host. If the DSI peripheral requires LP-11 and/or
177  * the clock lane to be in HS mode prior to pre_enable, then it can set the
178  * &pre_enable_prev_first flag to request the pre_enable (and
179  * post_disable) order to be altered to enable the DSI host first.
180  *
181  * Either the CRTC being enabled, or the DSI host enable operation should switch
182  * the host to actively transmitting video on the data lanes.
183  *
184  * The reverse also applies. The DSI host disable operation or stopping the CRTC
185  * should stop transmitting video, and the data lanes should return to the LP-11
186  * state. The DSI host &post_disable operation should disable the PHY.
187  * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
188  * bridge &post_disable will be called before the DSI host's post_disable.
189  *
190  * Whilst it is valid to call &host_transfer prior to pre_enable or after
191  * post_disable, the exact state of the lanes is undefined at this point. The
192  * DSI host should initialise the interface, transmit the data, and then disable
193  * the interface again.
194  *
195  * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
196  * implemented, it therefore needs to be handled entirely within the DSI Host
197  * driver.
198  */
199 
200 /* Protect bridge_list and bridge_lingering_list */
201 static DEFINE_MUTEX(bridge_lock);
202 static LIST_HEAD(bridge_list);
203 static LIST_HEAD(bridge_lingering_list);
204 
205 static void __drm_bridge_free(struct kref *kref)
206 {
207 	struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
208 
209 	mutex_lock(&bridge_lock);
210 	list_del(&bridge->list);
211 	mutex_unlock(&bridge_lock);
212 
213 	if (bridge->funcs->destroy)
214 		bridge->funcs->destroy(bridge);
215 
216 	kfree(bridge->container);
217 }
218 
219 /**
220  * drm_bridge_get - Acquire a bridge reference
221  * @bridge: DRM bridge
222  *
223  * This function increments the bridge's refcount.
224  *
225  * Returns:
226  * Pointer to @bridge.
227  */
228 struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
229 {
230 	if (bridge)
231 		kref_get(&bridge->refcount);
232 
233 	return bridge;
234 }
235 EXPORT_SYMBOL(drm_bridge_get);
236 
237 /**
238  * drm_bridge_put - Release a bridge reference
239  * @bridge: DRM bridge
240  *
241  * This function decrements the bridge's reference count and frees the
242  * object if the reference count drops to zero.
243  */
244 void drm_bridge_put(struct drm_bridge *bridge)
245 {
246 	if (bridge)
247 		kref_put(&bridge->refcount, __drm_bridge_free);
248 }
249 EXPORT_SYMBOL(drm_bridge_put);
250 
251 /**
252  * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
253  *
254  * @data: pointer to @struct drm_bridge, cast to a void pointer
255  *
256  * Wrapper of drm_bridge_put() to be used when a function taking a void
257  * pointer is needed, for example as a devm action.
258  */
259 static void drm_bridge_put_void(void *data)
260 {
261 	struct drm_bridge *bridge = (struct drm_bridge *)data;
262 
263 	drm_bridge_put(bridge);
264 }
265 
266 void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
267 			      const struct drm_bridge_funcs *funcs)
268 {
269 	void *container;
270 	struct drm_bridge *bridge;
271 	int err;
272 
273 	if (!funcs) {
274 		dev_warn(dev, "Missing funcs pointer\n");
275 		return ERR_PTR(-EINVAL);
276 	}
277 
278 	container = kzalloc(size, GFP_KERNEL);
279 	if (!container)
280 		return ERR_PTR(-ENOMEM);
281 
282 	bridge = container + offset;
283 	INIT_LIST_HEAD(&bridge->list);
284 	bridge->container = container;
285 	bridge->funcs = funcs;
286 	kref_init(&bridge->refcount);
287 
288 	err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
289 	if (err)
290 		return ERR_PTR(err);
291 
292 	return container;
293 }
294 EXPORT_SYMBOL(__devm_drm_bridge_alloc);
295 
296 /**
297  * drm_bridge_add - register a bridge
298  *
299  * @bridge: bridge control structure
300  *
301  * Add the given bridge to the global list of bridges, where they can be
302  * found by users via of_drm_find_bridge().
303  *
304  * The bridge to be added must have been allocated by
305  * devm_drm_bridge_alloc().
306  */
307 void drm_bridge_add(struct drm_bridge *bridge)
308 {
309 	if (!bridge->container)
310 		DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
311 
312 	drm_bridge_get(bridge);
313 
314 	/*
315 	 * If the bridge was previously added and then removed, it is now
316 	 * in bridge_lingering_list. Remove it or bridge_lingering_list will be
317 	 * corrupted when adding this bridge to bridge_list below.
318 	 */
319 	if (!list_empty(&bridge->list))
320 		list_del_init(&bridge->list);
321 
322 	mutex_init(&bridge->hpd_mutex);
323 
324 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
325 		bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
326 					       BIT(HDMI_COLORSPACE_YUV420));
327 
328 	mutex_lock(&bridge_lock);
329 	list_add_tail(&bridge->list, &bridge_list);
330 	mutex_unlock(&bridge_lock);
331 }
332 EXPORT_SYMBOL(drm_bridge_add);
333 
334 static void drm_bridge_remove_void(void *bridge)
335 {
336 	drm_bridge_remove(bridge);
337 }
338 
339 /**
340  * devm_drm_bridge_add - devm managed version of drm_bridge_add()
341  *
342  * @dev: device to tie the bridge lifetime to
343  * @bridge: bridge control structure
344  *
345  * This is the managed version of drm_bridge_add() which automatically
346  * calls drm_bridge_remove() when @dev is unbound.
347  *
348  * Return: 0 if no error or negative error code.
349  */
350 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
351 {
352 	drm_bridge_add(bridge);
353 	return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
354 }
355 EXPORT_SYMBOL(devm_drm_bridge_add);
356 
357 /**
358  * drm_bridge_remove - unregister a bridge
359  *
360  * @bridge: bridge control structure
361  *
362  * Remove the given bridge from the global list of registered bridges, so
363  * it won't be found by users via of_drm_find_bridge(), and add it to the
364  * lingering bridge list, to keep track of it until its allocated memory is
365  * eventually freed.
366  */
367 void drm_bridge_remove(struct drm_bridge *bridge)
368 {
369 	mutex_lock(&bridge_lock);
370 	list_move_tail(&bridge->list, &bridge_lingering_list);
371 	mutex_unlock(&bridge_lock);
372 
373 	mutex_destroy(&bridge->hpd_mutex);
374 
375 	drm_bridge_put(bridge);
376 }
377 EXPORT_SYMBOL(drm_bridge_remove);
378 
379 static struct drm_private_state *
380 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
381 {
382 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
383 	struct drm_bridge_state *state;
384 
385 	state = bridge->funcs->atomic_duplicate_state(bridge);
386 	return state ? &state->base : NULL;
387 }
388 
389 static void
390 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
391 				     struct drm_private_state *s)
392 {
393 	struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
394 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
395 
396 	bridge->funcs->atomic_destroy_state(bridge, state);
397 }
398 
399 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
400 	.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
401 	.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
402 };
403 
404 static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
405 {
406 	return bridge->funcs->atomic_reset != NULL;
407 }
408 
409 /**
410  * drm_bridge_attach - attach the bridge to an encoder's chain
411  *
412  * @encoder: DRM encoder
413  * @bridge: bridge to attach
414  * @previous: previous bridge in the chain (optional)
415  * @flags: DRM_BRIDGE_ATTACH_* flags
416  *
417  * Called by a kms driver to link the bridge to an encoder's chain. The previous
418  * argument specifies the previous bridge in the chain. If NULL, the bridge is
419  * linked directly at the encoder's output. Otherwise it is linked at the
420  * previous bridge's output.
421  *
422  * If non-NULL the previous bridge must be already attached by a call to this
423  * function.
424  *
425  * Note that bridges attached to encoders are auto-detached during encoder
426  * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
427  * *not* be balanced with a drm_bridge_detach() in driver code.
428  *
429  * RETURNS:
430  * Zero on success, error code on failure
431  */
432 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
433 		      struct drm_bridge *previous,
434 		      enum drm_bridge_attach_flags flags)
435 {
436 	int ret;
437 
438 	if (!encoder || !bridge)
439 		return -EINVAL;
440 
441 	drm_bridge_get(bridge);
442 
443 	if (previous && (!previous->dev || previous->encoder != encoder)) {
444 		ret = -EINVAL;
445 		goto err_put_bridge;
446 	}
447 
448 	if (bridge->dev) {
449 		ret = -EBUSY;
450 		goto err_put_bridge;
451 	}
452 
453 	bridge->dev = encoder->dev;
454 	bridge->encoder = encoder;
455 
456 	if (previous)
457 		list_add(&bridge->chain_node, &previous->chain_node);
458 	else
459 		list_add(&bridge->chain_node, &encoder->bridge_chain);
460 
461 	if (bridge->funcs->attach) {
462 		ret = bridge->funcs->attach(bridge, encoder, flags);
463 		if (ret < 0)
464 			goto err_reset_bridge;
465 	}
466 
467 	if (drm_bridge_is_atomic(bridge)) {
468 		struct drm_bridge_state *state;
469 
470 		state = bridge->funcs->atomic_reset(bridge);
471 		if (IS_ERR(state)) {
472 			ret = PTR_ERR(state);
473 			goto err_detach_bridge;
474 		}
475 
476 		drm_atomic_private_obj_init(bridge->dev, &bridge->base,
477 					    &state->base,
478 					    &drm_bridge_priv_state_funcs);
479 	}
480 
481 	return 0;
482 
483 err_detach_bridge:
484 	if (bridge->funcs->detach)
485 		bridge->funcs->detach(bridge);
486 
487 err_reset_bridge:
488 	bridge->dev = NULL;
489 	bridge->encoder = NULL;
490 	list_del(&bridge->chain_node);
491 
492 	if (ret != -EPROBE_DEFER)
493 		DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
494 			  bridge->of_node, encoder->name, ret);
495 	else
496 		dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
497 			      "failed to attach bridge %pOF to encoder %s\n",
498 			      bridge->of_node, encoder->name);
499 
500 err_put_bridge:
501 	drm_bridge_put(bridge);
502 	return ret;
503 }
504 EXPORT_SYMBOL(drm_bridge_attach);
505 
506 void drm_bridge_detach(struct drm_bridge *bridge)
507 {
508 	if (WARN_ON(!bridge))
509 		return;
510 
511 	if (WARN_ON(!bridge->dev))
512 		return;
513 
514 	if (drm_bridge_is_atomic(bridge))
515 		drm_atomic_private_obj_fini(&bridge->base);
516 
517 	if (bridge->funcs->detach)
518 		bridge->funcs->detach(bridge);
519 
520 	list_del(&bridge->chain_node);
521 	bridge->dev = NULL;
522 	drm_bridge_put(bridge);
523 }
524 
525 /**
526  * DOC: bridge operations
527  *
528  * Bridge drivers expose operations through the &drm_bridge_funcs structure.
529  * The DRM internals (atomic and CRTC helpers) use the helpers defined in
530  * drm_bridge.c to call bridge operations. Those operations are divided in
531  * three big categories to support different parts of the bridge usage.
532  *
533  * - The encoder-related operations support control of the bridges in the
534  *   chain, and are roughly counterparts to the &drm_encoder_helper_funcs
535  *   operations. They are used by the legacy CRTC and the atomic modeset
536  *   helpers to perform mode validation, fixup and setting, and enable and
537  *   disable the bridge automatically.
538  *
539  *   The enable and disable operations are split in
540  *   &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
541  *   &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
542  *   finer-grained control.
543  *
544  *   Bridge drivers may implement the legacy version of those operations, or
545  *   the atomic version (prefixed with atomic\_), in which case they shall also
546  *   implement the atomic state bookkeeping operations
547  *   (&drm_bridge_funcs.atomic_duplicate_state,
548  *   &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
549  *   Mixing atomic and non-atomic versions of the operations is not supported.
550  *
551  * - The bus format negotiation operations
552  *   &drm_bridge_funcs.atomic_get_output_bus_fmts and
553  *   &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
554  *   negotiate the formats transmitted between bridges in the chain when
555  *   multiple formats are supported. Negotiation for formats is performed
556  *   transparently for display drivers by the atomic modeset helpers. Only
557  *   atomic versions of those operations exist, bridge drivers that need to
558  *   implement them shall thus also implement the atomic version of the
559  *   encoder-related operations. This feature is not supported by the legacy
560  *   CRTC helpers.
561  *
562  * - The connector-related operations support implementing a &drm_connector
563  *   based on a chain of bridges. DRM bridges traditionally create a
564  *   &drm_connector for bridges meant to be used at the end of the chain. This
565  *   puts additional burden on bridge drivers, especially for bridges that may
566  *   be used in the middle of a chain or at the end of it. Furthermore, it
567  *   requires all operations of the &drm_connector to be handled by a single
568  *   bridge, which doesn't always match the hardware architecture.
569  *
570  *   To simplify bridge drivers and make the connector implementation more
571  *   flexible, a new model allows bridges to unconditionally skip creation of
572  *   &drm_connector and instead expose &drm_bridge_funcs operations to support
573  *   an externally-implemented &drm_connector. Those operations are
574  *   &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
575  *   &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
576  *   &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
577  *   implemented, display drivers shall create a &drm_connector instance for
578  *   each chain of bridges, and implement those connector instances based on
579  *   the bridge connector operations.
580  *
581  *   Bridge drivers shall implement the connector-related operations for all
582  *   the features that the bridge hardware support. For instance, if a bridge
583  *   supports reading EDID, the &drm_bridge_funcs.get_edid shall be
584  *   implemented. This however doesn't mean that the DDC lines are wired to the
585  *   bridge on a particular platform, as they could also be connected to an I2C
586  *   controller of the SoC. Support for the connector-related operations on the
587  *   running platform is reported through the &drm_bridge.ops flags. Bridge
588  *   drivers shall detect which operations they can support on the platform
589  *   (usually this information is provided by ACPI or DT), and set the
590  *   &drm_bridge.ops flags for all supported operations. A flag shall only be
591  *   set if the corresponding &drm_bridge_funcs operation is implemented, but
592  *   an implemented operation doesn't necessarily imply that the corresponding
593  *   flag will be set. Display drivers shall use the &drm_bridge.ops flags to
594  *   decide which bridge to delegate a connector operation to. This mechanism
595  *   allows providing a single static const &drm_bridge_funcs instance in
596  *   bridge drivers, improving security by storing function pointers in
597  *   read-only memory.
598  *
599  *   In order to ease transition, bridge drivers may support both the old and
600  *   new models by making connector creation optional and implementing the
601  *   connected-related bridge operations. Connector creation is then controlled
602  *   by the flags argument to the drm_bridge_attach() function. Display drivers
603  *   that support the new model and create connectors themselves shall set the
604  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
605  *   connector creation. For intermediate bridges in the chain, the flag shall
606  *   be passed to the drm_bridge_attach() call for the downstream bridge.
607  *   Bridge drivers that implement the new model only shall return an error
608  *   from their &drm_bridge_funcs.attach handler when the
609  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
610  *   should use the new model, and convert the bridge drivers they use if
611  *   needed, in order to gradually transition to the new model.
612  */
613 
614 /**
615  * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
616  *				 encoder chain.
617  * @bridge: bridge control structure
618  * @info: display info against which the mode shall be validated
619  * @mode: desired mode to be validated
620  *
621  * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
622  * chain, starting from the first bridge to the last. If at least one bridge
623  * does not accept the mode the function returns the error code.
624  *
625  * Note: the bridge passed should be the one closest to the encoder.
626  *
627  * RETURNS:
628  * MODE_OK on success, drm_mode_status Enum error code on failure
629  */
630 enum drm_mode_status
631 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
632 			    const struct drm_display_info *info,
633 			    const struct drm_display_mode *mode)
634 {
635 	struct drm_encoder *encoder;
636 
637 	if (!bridge)
638 		return MODE_OK;
639 
640 	encoder = bridge->encoder;
641 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
642 		enum drm_mode_status ret;
643 
644 		if (!bridge->funcs->mode_valid)
645 			continue;
646 
647 		ret = bridge->funcs->mode_valid(bridge, info, mode);
648 		if (ret != MODE_OK)
649 			return ret;
650 	}
651 
652 	return MODE_OK;
653 }
654 EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
655 
656 /**
657  * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
658  *			       encoder chain
659  * @bridge: bridge control structure
660  * @mode: desired mode to be set for the encoder chain
661  * @adjusted_mode: updated mode that works for this encoder chain
662  *
663  * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
664  * encoder chain, starting from the first bridge to the last.
665  *
666  * Note: the bridge passed should be the one closest to the encoder
667  */
668 void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
669 			       const struct drm_display_mode *mode,
670 			       const struct drm_display_mode *adjusted_mode)
671 {
672 	struct drm_encoder *encoder;
673 
674 	if (!bridge)
675 		return;
676 
677 	encoder = bridge->encoder;
678 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
679 		if (bridge->funcs->mode_set)
680 			bridge->funcs->mode_set(bridge, mode, adjusted_mode);
681 	}
682 }
683 EXPORT_SYMBOL(drm_bridge_chain_mode_set);
684 
685 /**
686  * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
687  * @bridge: bridge control structure
688  * @state: atomic state being committed
689  *
690  * Calls &drm_bridge_funcs.atomic_disable (falls back on
691  * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
692  * starting from the last bridge to the first. These are called before calling
693  * &drm_encoder_helper_funcs.atomic_disable
694  *
695  * Note: the bridge passed should be the one closest to the encoder
696  */
697 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
698 				     struct drm_atomic_state *state)
699 {
700 	struct drm_encoder *encoder;
701 	struct drm_bridge *iter;
702 
703 	if (!bridge)
704 		return;
705 
706 	encoder = bridge->encoder;
707 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
708 		if (iter->funcs->atomic_disable) {
709 			iter->funcs->atomic_disable(iter, state);
710 		} else if (iter->funcs->disable) {
711 			iter->funcs->disable(iter);
712 		}
713 
714 		if (iter == bridge)
715 			break;
716 	}
717 }
718 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
719 
720 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
721 						struct drm_atomic_state *state)
722 {
723 	if (state && bridge->funcs->atomic_post_disable)
724 		bridge->funcs->atomic_post_disable(bridge, state);
725 	else if (bridge->funcs->post_disable)
726 		bridge->funcs->post_disable(bridge);
727 }
728 
729 /**
730  * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
731  *					  in the encoder chain
732  * @bridge: bridge control structure
733  * @state: atomic state being committed
734  *
735  * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
736  * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
737  * starting from the first bridge to the last. These are called after completing
738  * &drm_encoder_helper_funcs.atomic_disable
739  *
740  * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
741  * bridge will be called before the previous one to reverse the @pre_enable
742  * calling direction.
743  *
744  * Example:
745  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
746  *
747  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
748  * @post_disable order would be,
749  * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
750  *
751  * Note: the bridge passed should be the one closest to the encoder
752  */
753 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
754 					  struct drm_atomic_state *state)
755 {
756 	struct drm_encoder *encoder;
757 	struct drm_bridge *next, *limit;
758 
759 	if (!bridge)
760 		return;
761 
762 	encoder = bridge->encoder;
763 
764 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
765 		limit = NULL;
766 
767 		if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
768 			next = list_next_entry(bridge, chain_node);
769 
770 			if (next->pre_enable_prev_first) {
771 				/* next bridge had requested that prev
772 				 * was enabled first, so disabled last
773 				 */
774 				limit = next;
775 
776 				/* Find the next bridge that has NOT requested
777 				 * prev to be enabled first / disabled last
778 				 */
779 				list_for_each_entry_from(next, &encoder->bridge_chain,
780 							 chain_node) {
781 					if (!next->pre_enable_prev_first) {
782 						next = list_prev_entry(next, chain_node);
783 						limit = next;
784 						break;
785 					}
786 
787 					if (list_is_last(&next->chain_node,
788 							 &encoder->bridge_chain)) {
789 						limit = next;
790 						break;
791 					}
792 				}
793 
794 				/* Call these bridges in reverse order */
795 				list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
796 								 chain_node) {
797 					if (next == bridge)
798 						break;
799 
800 					drm_atomic_bridge_call_post_disable(next,
801 									    state);
802 				}
803 			}
804 		}
805 
806 		drm_atomic_bridge_call_post_disable(bridge, state);
807 
808 		if (limit)
809 			/* Jump all bridges that we have already post_disabled */
810 			bridge = limit;
811 	}
812 }
813 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
814 
815 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
816 					      struct drm_atomic_state *state)
817 {
818 	if (state && bridge->funcs->atomic_pre_enable)
819 		bridge->funcs->atomic_pre_enable(bridge, state);
820 	else if (bridge->funcs->pre_enable)
821 		bridge->funcs->pre_enable(bridge);
822 }
823 
824 /**
825  * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
826  *					the encoder chain
827  * @bridge: bridge control structure
828  * @state: atomic state being committed
829  *
830  * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
831  * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
832  * starting from the last bridge to the first. These are called before calling
833  * &drm_encoder_helper_funcs.atomic_enable
834  *
835  * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
836  * prev bridge will be called before pre_enable of this bridge.
837  *
838  * Example:
839  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
840  *
841  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
842  * @pre_enable order would be,
843  * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
844  *
845  * Note: the bridge passed should be the one closest to the encoder
846  */
847 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
848 					struct drm_atomic_state *state)
849 {
850 	struct drm_encoder *encoder;
851 	struct drm_bridge *iter, *next, *limit;
852 
853 	if (!bridge)
854 		return;
855 
856 	encoder = bridge->encoder;
857 
858 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
859 		if (iter->pre_enable_prev_first) {
860 			next = iter;
861 			limit = bridge;
862 			list_for_each_entry_from_reverse(next,
863 							 &encoder->bridge_chain,
864 							 chain_node) {
865 				if (next == bridge)
866 					break;
867 
868 				if (!next->pre_enable_prev_first) {
869 					/* Found first bridge that does NOT
870 					 * request prev to be enabled first
871 					 */
872 					limit = next;
873 					break;
874 				}
875 			}
876 
877 			list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
878 				/* Call requested prev bridge pre_enable
879 				 * in order.
880 				 */
881 				if (next == iter)
882 					/* At the first bridge to request prev
883 					 * bridges called first.
884 					 */
885 					break;
886 
887 				drm_atomic_bridge_call_pre_enable(next, state);
888 			}
889 		}
890 
891 		drm_atomic_bridge_call_pre_enable(iter, state);
892 
893 		if (iter->pre_enable_prev_first)
894 			/* Jump all bridges that we have already pre_enabled */
895 			iter = limit;
896 
897 		if (iter == bridge)
898 			break;
899 	}
900 }
901 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
902 
903 /**
904  * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
905  * @bridge: bridge control structure
906  * @state: atomic state being committed
907  *
908  * Calls &drm_bridge_funcs.atomic_enable (falls back on
909  * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
910  * starting from the first bridge to the last. These are called after completing
911  * &drm_encoder_helper_funcs.atomic_enable
912  *
913  * Note: the bridge passed should be the one closest to the encoder
914  */
915 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
916 				    struct drm_atomic_state *state)
917 {
918 	struct drm_encoder *encoder;
919 
920 	if (!bridge)
921 		return;
922 
923 	encoder = bridge->encoder;
924 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
925 		if (bridge->funcs->atomic_enable) {
926 			bridge->funcs->atomic_enable(bridge, state);
927 		} else if (bridge->funcs->enable) {
928 			bridge->funcs->enable(bridge);
929 		}
930 	}
931 }
932 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
933 
934 static int drm_atomic_bridge_check(struct drm_bridge *bridge,
935 				   struct drm_crtc_state *crtc_state,
936 				   struct drm_connector_state *conn_state)
937 {
938 	if (bridge->funcs->atomic_check) {
939 		struct drm_bridge_state *bridge_state;
940 		int ret;
941 
942 		bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
943 							       bridge);
944 		if (WARN_ON(!bridge_state))
945 			return -EINVAL;
946 
947 		ret = bridge->funcs->atomic_check(bridge, bridge_state,
948 						  crtc_state, conn_state);
949 		if (ret)
950 			return ret;
951 	} else if (bridge->funcs->mode_fixup) {
952 		if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
953 					       &crtc_state->adjusted_mode))
954 			return -EINVAL;
955 	}
956 
957 	return 0;
958 }
959 
960 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
961 				    struct drm_bridge *cur_bridge,
962 				    struct drm_crtc_state *crtc_state,
963 				    struct drm_connector_state *conn_state,
964 				    u32 out_bus_fmt)
965 {
966 	unsigned int i, num_in_bus_fmts = 0;
967 	struct drm_bridge_state *cur_state;
968 	struct drm_bridge *prev_bridge __free(drm_bridge_put) =
969 		drm_bridge_get_prev_bridge(cur_bridge);
970 	u32 *in_bus_fmts;
971 	int ret;
972 
973 	cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
974 						    cur_bridge);
975 
976 	/*
977 	 * If bus format negotiation is not supported by this bridge, let's
978 	 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
979 	 * hope that it can handle this situation gracefully (by providing
980 	 * appropriate default values).
981 	 */
982 	if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
983 		if (cur_bridge != first_bridge) {
984 			ret = select_bus_fmt_recursive(first_bridge,
985 						       prev_bridge, crtc_state,
986 						       conn_state,
987 						       MEDIA_BUS_FMT_FIXED);
988 			if (ret)
989 				return ret;
990 		}
991 
992 		/*
993 		 * Driver does not implement the atomic state hooks, but that's
994 		 * fine, as long as it does not access the bridge state.
995 		 */
996 		if (cur_state) {
997 			cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
998 			cur_state->output_bus_cfg.format = out_bus_fmt;
999 		}
1000 
1001 		return 0;
1002 	}
1003 
1004 	/*
1005 	 * If the driver implements ->atomic_get_input_bus_fmts() it
1006 	 * should also implement the atomic state hooks.
1007 	 */
1008 	if (WARN_ON(!cur_state))
1009 		return -EINVAL;
1010 
1011 	in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
1012 							cur_state,
1013 							crtc_state,
1014 							conn_state,
1015 							out_bus_fmt,
1016 							&num_in_bus_fmts);
1017 	if (!num_in_bus_fmts)
1018 		return -ENOTSUPP;
1019 	else if (!in_bus_fmts)
1020 		return -ENOMEM;
1021 
1022 	if (first_bridge == cur_bridge) {
1023 		cur_state->input_bus_cfg.format = in_bus_fmts[0];
1024 		cur_state->output_bus_cfg.format = out_bus_fmt;
1025 		kfree(in_bus_fmts);
1026 		return 0;
1027 	}
1028 
1029 	for (i = 0; i < num_in_bus_fmts; i++) {
1030 		ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
1031 					       crtc_state, conn_state,
1032 					       in_bus_fmts[i]);
1033 		if (ret != -ENOTSUPP)
1034 			break;
1035 	}
1036 
1037 	if (!ret) {
1038 		cur_state->input_bus_cfg.format = in_bus_fmts[i];
1039 		cur_state->output_bus_cfg.format = out_bus_fmt;
1040 	}
1041 
1042 	kfree(in_bus_fmts);
1043 	return ret;
1044 }
1045 
1046 /*
1047  * This function is called by &drm_atomic_bridge_chain_check() just before
1048  * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
1049  * It performs bus format negotiation between bridge elements. The negotiation
1050  * happens in reverse order, starting from the last element in the chain up to
1051  * @bridge.
1052  *
1053  * Negotiation starts by retrieving supported output bus formats on the last
1054  * bridge element and testing them one by one. The test is recursive, meaning
1055  * that for each tested output format, the whole chain will be walked backward,
1056  * and each element will have to choose an input bus format that can be
1057  * transcoded to the requested output format. When a bridge element does not
1058  * support transcoding into a specific output format -ENOTSUPP is returned and
1059  * the next bridge element will have to try a different format. If none of the
1060  * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
1061  *
1062  * This implementation is relying on
1063  * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
1064  * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
1065  * input/output formats.
1066  *
1067  * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
1068  * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
1069  * tries a single format: &drm_connector.display_info.bus_formats[0] if
1070  * available, MEDIA_BUS_FMT_FIXED otherwise.
1071  *
1072  * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
1073  * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
1074  * bridge element that lacks this hook and asks the previous element in the
1075  * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
1076  * to do in that case (fail if they want to enforce bus format negotiation, or
1077  * provide a reasonable default if they need to support pipelines where not
1078  * all elements support bus format negotiation).
1079  */
1080 static int
1081 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
1082 					struct drm_crtc_state *crtc_state,
1083 					struct drm_connector_state *conn_state)
1084 {
1085 	struct drm_connector *conn = conn_state->connector;
1086 	struct drm_encoder *encoder = bridge->encoder;
1087 	struct drm_bridge_state *last_bridge_state;
1088 	unsigned int i, num_out_bus_fmts = 0;
1089 	u32 *out_bus_fmts;
1090 	int ret = 0;
1091 
1092 	struct drm_bridge *last_bridge __free(drm_bridge_put) =
1093 		drm_bridge_get(list_last_entry(&encoder->bridge_chain,
1094 					       struct drm_bridge, chain_node));
1095 	last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1096 							    last_bridge);
1097 
1098 	if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1099 		const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1100 
1101 		/*
1102 		 * If the driver implements ->atomic_get_output_bus_fmts() it
1103 		 * should also implement the atomic state hooks.
1104 		 */
1105 		if (WARN_ON(!last_bridge_state))
1106 			return -EINVAL;
1107 
1108 		out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1109 							last_bridge_state,
1110 							crtc_state,
1111 							conn_state,
1112 							&num_out_bus_fmts);
1113 		if (!num_out_bus_fmts)
1114 			return -ENOTSUPP;
1115 		else if (!out_bus_fmts)
1116 			return -ENOMEM;
1117 	} else {
1118 		num_out_bus_fmts = 1;
1119 		out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
1120 		if (!out_bus_fmts)
1121 			return -ENOMEM;
1122 
1123 		if (conn->display_info.num_bus_formats &&
1124 		    conn->display_info.bus_formats)
1125 			out_bus_fmts[0] = conn->display_info.bus_formats[0];
1126 		else
1127 			out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1128 	}
1129 
1130 	for (i = 0; i < num_out_bus_fmts; i++) {
1131 		ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1132 					       conn_state, out_bus_fmts[i]);
1133 		if (ret != -ENOTSUPP)
1134 			break;
1135 	}
1136 
1137 	kfree(out_bus_fmts);
1138 
1139 	return ret;
1140 }
1141 
1142 static void
1143 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1144 				      struct drm_connector *conn,
1145 				      struct drm_atomic_state *state)
1146 {
1147 	struct drm_bridge_state *bridge_state, *next_bridge_state;
1148 	u32 output_flags = 0;
1149 
1150 	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1151 
1152 	/* No bridge state attached to this bridge => nothing to propagate. */
1153 	if (!bridge_state)
1154 		return;
1155 
1156 	struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
1157 
1158 	/*
1159 	 * Let's try to apply the most common case here, that is, propagate
1160 	 * display_info flags for the last bridge, and propagate the input
1161 	 * flags of the next bridge element to the output end of the current
1162 	 * bridge when the bridge is not the last one.
1163 	 * There are exceptions to this rule, like when signal inversion is
1164 	 * happening at the board level, but that's something drivers can deal
1165 	 * with from their &drm_bridge_funcs.atomic_check() implementation by
1166 	 * simply overriding the flags value we've set here.
1167 	 */
1168 	if (!next_bridge) {
1169 		output_flags = conn->display_info.bus_flags;
1170 	} else {
1171 		next_bridge_state = drm_atomic_get_new_bridge_state(state,
1172 								next_bridge);
1173 		/*
1174 		 * No bridge state attached to the next bridge, just leave the
1175 		 * flags to 0.
1176 		 */
1177 		if (next_bridge_state)
1178 			output_flags = next_bridge_state->input_bus_cfg.flags;
1179 	}
1180 
1181 	bridge_state->output_bus_cfg.flags = output_flags;
1182 
1183 	/*
1184 	 * Propagate the output flags to the input end of the bridge. Again, it's
1185 	 * not necessarily what all bridges want, but that's what most of them
1186 	 * do, and by doing that by default we avoid forcing drivers to
1187 	 * duplicate the "dummy propagation" logic.
1188 	 */
1189 	bridge_state->input_bus_cfg.flags = output_flags;
1190 }
1191 
1192 /**
1193  * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1194  * @bridge: bridge control structure
1195  * @crtc_state: new CRTC state
1196  * @conn_state: new connector state
1197  *
1198  * First trigger a bus format negotiation before calling
1199  * &drm_bridge_funcs.atomic_check() (falls back on
1200  * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1201  * starting from the last bridge to the first. These are called before calling
1202  * &drm_encoder_helper_funcs.atomic_check()
1203  *
1204  * RETURNS:
1205  * 0 on success, a negative error code on failure
1206  */
1207 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1208 				  struct drm_crtc_state *crtc_state,
1209 				  struct drm_connector_state *conn_state)
1210 {
1211 	struct drm_connector *conn = conn_state->connector;
1212 	struct drm_encoder *encoder;
1213 	struct drm_bridge *iter;
1214 	int ret;
1215 
1216 	if (!bridge)
1217 		return 0;
1218 
1219 	ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1220 						      conn_state);
1221 	if (ret)
1222 		return ret;
1223 
1224 	encoder = bridge->encoder;
1225 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1226 		int ret;
1227 
1228 		/*
1229 		 * Bus flags are propagated by default. If a bridge needs to
1230 		 * tweak the input bus flags for any reason, it should happen
1231 		 * in its &drm_bridge_funcs.atomic_check() implementation such
1232 		 * that preceding bridges in the chain can propagate the new
1233 		 * bus flags.
1234 		 */
1235 		drm_atomic_bridge_propagate_bus_flags(iter, conn,
1236 						      crtc_state->state);
1237 
1238 		ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1239 		if (ret)
1240 			return ret;
1241 
1242 		if (iter == bridge)
1243 			break;
1244 	}
1245 
1246 	return 0;
1247 }
1248 EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1249 
1250 /**
1251  * drm_bridge_detect - check if anything is attached to the bridge output
1252  * @bridge: bridge control structure
1253  * @connector: attached connector
1254  *
1255  * If the bridge supports output detection, as reported by the
1256  * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1257  * bridge and return the connection status. Otherwise return
1258  * connector_status_unknown.
1259  *
1260  * RETURNS:
1261  * The detection status on success, or connector_status_unknown if the bridge
1262  * doesn't support output detection.
1263  */
1264 enum drm_connector_status
1265 drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
1266 {
1267 	if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1268 		return connector_status_unknown;
1269 
1270 	return bridge->funcs->detect(bridge, connector);
1271 }
1272 EXPORT_SYMBOL_GPL(drm_bridge_detect);
1273 
1274 /**
1275  * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1276  * @connector
1277  * @bridge: bridge control structure
1278  * @connector: the connector to fill with modes
1279  *
1280  * If the bridge supports output modes retrieval, as reported by the
1281  * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1282  * fill the connector with all valid modes and return the number of modes
1283  * added. Otherwise return 0.
1284  *
1285  * RETURNS:
1286  * The number of modes added to the connector.
1287  */
1288 int drm_bridge_get_modes(struct drm_bridge *bridge,
1289 			 struct drm_connector *connector)
1290 {
1291 	if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1292 		return 0;
1293 
1294 	return bridge->funcs->get_modes(bridge, connector);
1295 }
1296 EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1297 
1298 /**
1299  * drm_bridge_edid_read - read the EDID data of the connected display
1300  * @bridge: bridge control structure
1301  * @connector: the connector to read EDID for
1302  *
1303  * If the bridge supports output EDID retrieval, as reported by the
1304  * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1305  * the EDID and return it. Otherwise return NULL.
1306  *
1307  * RETURNS:
1308  * The retrieved EDID on success, or NULL otherwise.
1309  */
1310 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1311 					    struct drm_connector *connector)
1312 {
1313 	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1314 		return NULL;
1315 
1316 	return bridge->funcs->edid_read(bridge, connector);
1317 }
1318 EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1319 
1320 /**
1321  * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1322  * @bridge: bridge control structure
1323  * @cb: hot-plug detection callback
1324  * @data: data to be passed to the hot-plug detection callback
1325  *
1326  * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1327  * and @data as hot plug notification callback. From now on the @cb will be
1328  * called with @data when an output status change is detected by the bridge,
1329  * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1330  *
1331  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1332  * bridge->ops. This function shall not be called when the flag is not set.
1333  *
1334  * Only one hot plug detection callback can be registered at a time, it is an
1335  * error to call this function when hot plug detection is already enabled for
1336  * the bridge.
1337  */
1338 void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1339 			   void (*cb)(void *data,
1340 				      enum drm_connector_status status),
1341 			   void *data)
1342 {
1343 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1344 		return;
1345 
1346 	mutex_lock(&bridge->hpd_mutex);
1347 
1348 	if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1349 		goto unlock;
1350 
1351 	bridge->hpd_cb = cb;
1352 	bridge->hpd_data = data;
1353 
1354 	if (bridge->funcs->hpd_enable)
1355 		bridge->funcs->hpd_enable(bridge);
1356 
1357 unlock:
1358 	mutex_unlock(&bridge->hpd_mutex);
1359 }
1360 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1361 
1362 /**
1363  * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1364  * @bridge: bridge control structure
1365  *
1366  * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1367  * plug detection callback previously registered with drm_bridge_hpd_enable().
1368  * Once this function returns the callback will not be called by the bridge
1369  * when an output status change occurs.
1370  *
1371  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1372  * bridge->ops. This function shall not be called when the flag is not set.
1373  */
1374 void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1375 {
1376 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1377 		return;
1378 
1379 	mutex_lock(&bridge->hpd_mutex);
1380 	if (bridge->funcs->hpd_disable)
1381 		bridge->funcs->hpd_disable(bridge);
1382 
1383 	bridge->hpd_cb = NULL;
1384 	bridge->hpd_data = NULL;
1385 	mutex_unlock(&bridge->hpd_mutex);
1386 }
1387 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1388 
1389 /**
1390  * drm_bridge_hpd_notify - notify hot plug detection events
1391  * @bridge: bridge control structure
1392  * @status: output connection status
1393  *
1394  * Bridge drivers shall call this function to report hot plug events when they
1395  * detect a change in the output status, when hot plug detection has been
1396  * enabled by drm_bridge_hpd_enable().
1397  *
1398  * This function shall be called in a context that can sleep.
1399  */
1400 void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1401 			   enum drm_connector_status status)
1402 {
1403 	mutex_lock(&bridge->hpd_mutex);
1404 	if (bridge->hpd_cb)
1405 		bridge->hpd_cb(bridge->hpd_data, status);
1406 	mutex_unlock(&bridge->hpd_mutex);
1407 }
1408 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1409 
1410 #ifdef CONFIG_OF
1411 /**
1412  * of_drm_find_bridge - find the bridge corresponding to the device node in
1413  *			the global bridge list
1414  *
1415  * @np: device node
1416  *
1417  * RETURNS:
1418  * drm_bridge control struct on success, NULL on failure
1419  */
1420 struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1421 {
1422 	struct drm_bridge *bridge;
1423 
1424 	mutex_lock(&bridge_lock);
1425 
1426 	list_for_each_entry(bridge, &bridge_list, list) {
1427 		if (bridge->of_node == np) {
1428 			mutex_unlock(&bridge_lock);
1429 			return bridge;
1430 		}
1431 	}
1432 
1433 	mutex_unlock(&bridge_lock);
1434 	return NULL;
1435 }
1436 EXPORT_SYMBOL(of_drm_find_bridge);
1437 #endif
1438 
1439 /**
1440  * devm_drm_put_bridge - Release a bridge reference obtained via devm
1441  * @dev: device that got the bridge via devm
1442  * @bridge: pointer to a struct drm_bridge obtained via devm
1443  *
1444  * Same as drm_bridge_put() for bridge pointers obtained via devm functions
1445  * such as devm_drm_bridge_alloc().
1446  *
1447  * This function is a temporary workaround and MUST NOT be used. Manual
1448  * handling of bridge lifetime is inherently unsafe.
1449  */
1450 void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
1451 {
1452 	devm_release_action(dev, drm_bridge_put_void, bridge);
1453 }
1454 EXPORT_SYMBOL(devm_drm_put_bridge);
1455 
1456 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
1457 					   struct drm_bridge *bridge,
1458 					   unsigned int idx,
1459 					   bool lingering)
1460 {
1461 	drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
1462 
1463 	drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount),
1464 		   lingering ? " [lingering]" : "");
1465 
1466 	drm_printf(p, "\ttype: [%d] %s\n",
1467 		   bridge->type,
1468 		   drm_get_connector_type_name(bridge->type));
1469 
1470 	/* The OF node could be freed after drm_bridge_remove() */
1471 	if (bridge->of_node && !lingering)
1472 		drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
1473 
1474 	drm_printf(p, "\tops: [0x%x]", bridge->ops);
1475 	if (bridge->ops & DRM_BRIDGE_OP_DETECT)
1476 		drm_puts(p, " detect");
1477 	if (bridge->ops & DRM_BRIDGE_OP_EDID)
1478 		drm_puts(p, " edid");
1479 	if (bridge->ops & DRM_BRIDGE_OP_HPD)
1480 		drm_puts(p, " hpd");
1481 	if (bridge->ops & DRM_BRIDGE_OP_MODES)
1482 		drm_puts(p, " modes");
1483 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
1484 		drm_puts(p, " hdmi");
1485 	drm_puts(p, "\n");
1486 }
1487 
1488 static int allbridges_show(struct seq_file *m, void *data)
1489 {
1490 	struct drm_printer p = drm_seq_file_printer(m);
1491 	struct drm_bridge *bridge;
1492 	unsigned int idx = 0;
1493 
1494 	mutex_lock(&bridge_lock);
1495 
1496 	list_for_each_entry(bridge, &bridge_list, list)
1497 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
1498 
1499 	list_for_each_entry(bridge, &bridge_lingering_list, list)
1500 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true);
1501 
1502 	mutex_unlock(&bridge_lock);
1503 
1504 	return 0;
1505 }
1506 DEFINE_SHOW_ATTRIBUTE(allbridges);
1507 
1508 static int encoder_bridges_show(struct seq_file *m, void *data)
1509 {
1510 	struct drm_encoder *encoder = m->private;
1511 	struct drm_printer p = drm_seq_file_printer(m);
1512 	unsigned int idx = 0;
1513 
1514 	drm_for_each_bridge_in_chain_scoped(encoder, bridge)
1515 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
1516 
1517 	return 0;
1518 }
1519 DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
1520 
1521 void drm_bridge_debugfs_params(struct dentry *root)
1522 {
1523 	debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
1524 }
1525 
1526 void drm_bridge_debugfs_encoder_params(struct dentry *root,
1527 				       struct drm_encoder *encoder)
1528 {
1529 	/* bridges list */
1530 	debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
1531 }
1532 
1533 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1534 MODULE_DESCRIPTION("DRM bridge infrastructure");
1535 MODULE_LICENSE("GPL and additional rights");
1536