xref: /linux/drivers/gpu/drm/drm_bridge.c (revision 38f7e5450ebfc6f2e046a249a3f629ea7bec8c31)
1 /*
2  * Copyright (c) 2014 Samsung Electronics Co., Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/err.h>
26 #include <linux/export.h>
27 #include <linux/media-bus-format.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/srcu.h>
31 
32 #include <drm/drm_atomic_state_helper.h>
33 #include <drm/drm_bridge.h>
34 #include <drm/drm_debugfs.h>
35 #include <drm/drm_edid.h>
36 #include <drm/drm_encoder.h>
37 #include <drm/drm_file.h>
38 #include <drm/drm_of.h>
39 #include <drm/drm_print.h>
40 
41 #include "drm_crtc_internal.h"
42 
43 /**
44  * DOC: overview
45  *
46  * &struct drm_bridge represents a device that hangs on to an encoder. These are
47  * handy when a regular &drm_encoder entity isn't enough to represent the entire
48  * encoder chain.
49  *
50  * A bridge is always attached to a single &drm_encoder at a time, but can be
51  * either connected to it directly, or through a chain of bridges::
52  *
53  *     [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
54  *
55  * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
56  * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
57  * Chaining multiple bridges to the output of a bridge, or the same bridge to
58  * the output of different bridges, is not supported.
59  *
60  * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
61  * CRTCs, encoders or connectors and hence are not visible to userspace. They
62  * just provide additional hooks to get the desired output at the end of the
63  * encoder chain.
64  */
65 
66 /**
67  * DOC:	display driver integration
68  *
69  * Display drivers are responsible for linking encoders with the first bridge
70  * in the chains. This is done by acquiring the appropriate bridge with
71  * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
72  * encoder with a call to drm_bridge_attach().
73  *
74  * Bridges are responsible for linking themselves with the next bridge in the
75  * chain, if any. This is done the same way as for encoders, with the call to
76  * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
77  *
78  * Once these links are created, the bridges can participate along with encoder
79  * functions to perform mode validation and fixup (through
80  * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
81  * setting (through drm_bridge_chain_mode_set()), enable (through
82  * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
83  * and disable (through drm_atomic_bridge_chain_disable() and
84  * drm_atomic_bridge_chain_post_disable()). Those functions call the
85  * corresponding operations provided in &drm_bridge_funcs in sequence for all
86  * bridges in the chain.
87  *
88  * For display drivers that use the atomic helpers
89  * drm_atomic_helper_check_modeset(),
90  * drm_atomic_helper_commit_modeset_enables() and
91  * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
92  * commit check and commit tail handlers, or through the higher-level
93  * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
94  * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
95  * requires no intervention from the driver. For other drivers, the relevant
96  * DRM bridge chain functions shall be called manually.
97  *
98  * Bridges also participate in implementing the &drm_connector at the end of
99  * the bridge chain. Display drivers may use the drm_bridge_connector_init()
100  * helper to create the &drm_connector, or implement it manually on top of the
101  * connector-related operations exposed by the bridge (see the overview
102  * documentation of bridge operations for more details).
103  */
104 
105 /**
106  * DOC: special care dsi
107  *
108  * The interaction between the bridges and other frameworks involved in
109  * the probing of the upstream driver and the bridge driver can be
110  * challenging. Indeed, there's multiple cases that needs to be
111  * considered:
112  *
113  * - The upstream driver doesn't use the component framework and isn't a
114  *   MIPI-DSI host. In this case, the bridge driver will probe at some
115  *   point and the upstream driver should try to probe again by returning
116  *   EPROBE_DEFER as long as the bridge driver hasn't probed.
117  *
118  * - The upstream driver doesn't use the component framework, but is a
119  *   MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
120  *   controlled. In this case, the bridge device is a child of the
121  *   display device and when it will probe it's assured that the display
122  *   device (and MIPI-DSI host) is present. The upstream driver will be
123  *   assured that the bridge driver is connected between the
124  *   &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
125  *   Therefore, it must run mipi_dsi_host_register() in its probe
126  *   function, and then run drm_bridge_attach() in its
127  *   &mipi_dsi_host_ops.attach hook.
128  *
129  * - The upstream driver uses the component framework and is a MIPI-DSI
130  *   host. The bridge device uses the MIPI-DCS commands to be
131  *   controlled. This is the same situation than above, and can run
132  *   mipi_dsi_host_register() in either its probe or bind hooks.
133  *
134  * - The upstream driver uses the component framework and is a MIPI-DSI
135  *   host. The bridge device uses a separate bus (such as I2C) to be
136  *   controlled. In this case, there's no correlation between the probe
137  *   of the bridge and upstream drivers, so care must be taken to avoid
138  *   an endless EPROBE_DEFER loop, with each driver waiting for the
139  *   other to probe.
140  *
141  * The ideal pattern to cover the last item (and all the others in the
142  * MIPI-DSI host driver case) is to split the operations like this:
143  *
144  * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
145  *   probe hook. It will make sure that the MIPI-DSI host sticks around,
146  *   and that the driver's bind can be called.
147  *
148  * - In its probe hook, the bridge driver must try to find its MIPI-DSI
149  *   host, register as a MIPI-DSI device and attach the MIPI-DSI device
150  *   to its host. The bridge driver is now functional.
151  *
152  * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
153  *   now add its component. Its bind hook will now be called and since
154  *   the bridge driver is attached and registered, we can now look for
155  *   and attach it.
156  *
157  * At this point, we're now certain that both the upstream driver and
158  * the bridge driver are functional and we can't have a deadlock-like
159  * situation when probing.
160  */
161 
162 /**
163  * DOC: dsi bridge operations
164  *
165  * DSI host interfaces are expected to be implemented as bridges rather than
166  * encoders, however there are a few aspects of their operation that need to
167  * be defined in order to provide a consistent interface.
168  *
169  * A DSI host should keep the PHY powered down until the pre_enable operation is
170  * called. All lanes are in an undefined idle state up to this point, and it
171  * must not be assumed that it is LP-11.
172  * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
173  * clock lane to either LP-11 or HS depending on the mode_flag
174  * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
175  *
176  * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
177  * called before the DSI host. If the DSI peripheral requires LP-11 and/or
178  * the clock lane to be in HS mode prior to pre_enable, then it can set the
179  * &pre_enable_prev_first flag to request the pre_enable (and
180  * post_disable) order to be altered to enable the DSI host first.
181  *
182  * Either the CRTC being enabled, or the DSI host enable operation should switch
183  * the host to actively transmitting video on the data lanes.
184  *
185  * The reverse also applies. The DSI host disable operation or stopping the CRTC
186  * should stop transmitting video, and the data lanes should return to the LP-11
187  * state. The DSI host &post_disable operation should disable the PHY.
188  * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
189  * bridge &post_disable will be called before the DSI host's post_disable.
190  *
191  * Whilst it is valid to call &host_transfer prior to pre_enable or after
192  * post_disable, the exact state of the lanes is undefined at this point. The
193  * DSI host should initialise the interface, transmit the data, and then disable
194  * the interface again.
195  *
196  * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
197  * implemented, it therefore needs to be handled entirely within the DSI Host
198  * driver.
199  */
200 
201 /* Protect bridge_list and bridge_lingering_list */
202 static DEFINE_MUTEX(bridge_lock);
203 static LIST_HEAD(bridge_list);
204 static LIST_HEAD(bridge_lingering_list);
205 
206 DEFINE_STATIC_SRCU(drm_bridge_unplug_srcu);
207 
208 /**
209  * drm_bridge_enter - Enter DRM bridge critical section
210  * @bridge: DRM bridge
211  * @idx: Pointer to index that will be passed to the matching drm_bridge_exit()
212  *
213  * This function marks and protects the beginning of a section that should not
214  * be entered after the bridge has been unplugged. The section end is marked
215  * with drm_bridge_exit(). Calls to this function can be nested.
216  *
217  * Returns:
218  * True if it is OK to enter the section, false otherwise.
219  */
220 bool drm_bridge_enter(struct drm_bridge *bridge, int *idx)
221 {
222 	*idx = srcu_read_lock(&drm_bridge_unplug_srcu);
223 
224 	if (bridge->unplugged) {
225 		srcu_read_unlock(&drm_bridge_unplug_srcu, *idx);
226 		return false;
227 	}
228 
229 	return true;
230 }
231 EXPORT_SYMBOL(drm_bridge_enter);
232 
233 /**
234  * drm_bridge_exit - Exit DRM bridge critical section
235  * @idx: index returned by drm_bridge_enter()
236  *
237  * This function marks the end of a section that should not be entered after
238  * the bridge has been unplugged.
239  */
240 void drm_bridge_exit(int idx)
241 {
242 	srcu_read_unlock(&drm_bridge_unplug_srcu, idx);
243 }
244 EXPORT_SYMBOL(drm_bridge_exit);
245 
246 /**
247  * drm_bridge_unplug - declare a DRM bridge was unplugged and remove it
248  * @bridge: DRM bridge
249  *
250  * This tells the bridge has been physically unplugged and no operations on
251  * device resources must be done anymore. Entry-points can use
252  * drm_bridge_enter() and drm_bridge_exit() to protect device resources in
253  * a race free manner.
254  *
255  * Also unregisters the bridge.
256  */
257 void drm_bridge_unplug(struct drm_bridge *bridge)
258 {
259 	bridge->unplugged = true;
260 
261 	synchronize_srcu(&drm_bridge_unplug_srcu);
262 
263 	drm_bridge_remove(bridge);
264 }
265 EXPORT_SYMBOL(drm_bridge_unplug);
266 
267 static void __drm_bridge_free(struct kref *kref)
268 {
269 	struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
270 
271 	mutex_lock(&bridge_lock);
272 	list_del(&bridge->list);
273 	mutex_unlock(&bridge_lock);
274 
275 	if (bridge->funcs->destroy)
276 		bridge->funcs->destroy(bridge);
277 
278 	drm_bridge_put(bridge->next_bridge);
279 
280 	kfree(bridge->container);
281 }
282 
283 /**
284  * drm_bridge_get - Acquire a bridge reference
285  * @bridge: DRM bridge
286  *
287  * This function increments the bridge's refcount.
288  *
289  * Returns:
290  * Pointer to @bridge.
291  */
292 struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
293 {
294 	if (bridge)
295 		kref_get(&bridge->refcount);
296 
297 	return bridge;
298 }
299 EXPORT_SYMBOL(drm_bridge_get);
300 
301 /**
302  * drm_bridge_put - Release a bridge reference
303  * @bridge: DRM bridge
304  *
305  * This function decrements the bridge's reference count and frees the
306  * object if the reference count drops to zero.
307  */
308 void drm_bridge_put(struct drm_bridge *bridge)
309 {
310 	if (bridge)
311 		kref_put(&bridge->refcount, __drm_bridge_free);
312 }
313 EXPORT_SYMBOL(drm_bridge_put);
314 
315 /**
316  * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
317  *
318  * @data: pointer to @struct drm_bridge, cast to a void pointer
319  *
320  * Wrapper of drm_bridge_put() to be used when a function taking a void
321  * pointer is needed, for example as a devm action.
322  */
323 static void drm_bridge_put_void(void *data)
324 {
325 	struct drm_bridge *bridge = (struct drm_bridge *)data;
326 
327 	drm_bridge_put(bridge);
328 }
329 
330 void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
331 			      const struct drm_bridge_funcs *funcs)
332 {
333 	void *container;
334 	struct drm_bridge *bridge;
335 	int err;
336 
337 	if (!funcs) {
338 		dev_warn(dev, "Missing funcs pointer\n");
339 		return ERR_PTR(-EINVAL);
340 	}
341 
342 	container = kzalloc(size, GFP_KERNEL);
343 	if (!container)
344 		return ERR_PTR(-ENOMEM);
345 
346 	bridge = container + offset;
347 	INIT_LIST_HEAD(&bridge->list);
348 	bridge->container = container;
349 	bridge->funcs = funcs;
350 	kref_init(&bridge->refcount);
351 
352 	err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
353 	if (err)
354 		return ERR_PTR(err);
355 
356 	return container;
357 }
358 EXPORT_SYMBOL(__devm_drm_bridge_alloc);
359 
360 /**
361  * drm_bridge_add - register a bridge
362  *
363  * @bridge: bridge control structure
364  *
365  * Add the given bridge to the global list of bridges, where they can be
366  * found by users via of_drm_find_and_get_bridge().
367  *
368  * The bridge to be added must have been allocated by
369  * devm_drm_bridge_alloc().
370  */
371 void drm_bridge_add(struct drm_bridge *bridge)
372 {
373 	if (!bridge->container)
374 		DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
375 
376 	drm_bridge_get(bridge);
377 
378 	/*
379 	 * If the bridge was previously added and then removed, it is now
380 	 * in bridge_lingering_list. Remove it or bridge_lingering_list will be
381 	 * corrupted when adding this bridge to bridge_list below.
382 	 */
383 	if (!list_empty(&bridge->list))
384 		list_del_init(&bridge->list);
385 
386 	mutex_init(&bridge->hpd_mutex);
387 
388 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
389 		bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
390 					       BIT(HDMI_COLORSPACE_YUV420));
391 
392 	mutex_lock(&bridge_lock);
393 	list_add_tail(&bridge->list, &bridge_list);
394 	mutex_unlock(&bridge_lock);
395 }
396 EXPORT_SYMBOL(drm_bridge_add);
397 
398 static void drm_bridge_remove_void(void *bridge)
399 {
400 	drm_bridge_remove(bridge);
401 }
402 
403 /**
404  * devm_drm_bridge_add - devm managed version of drm_bridge_add()
405  *
406  * @dev: device to tie the bridge lifetime to
407  * @bridge: bridge control structure
408  *
409  * This is the managed version of drm_bridge_add() which automatically
410  * calls drm_bridge_remove() when @dev is unbound.
411  *
412  * Return: 0 if no error or negative error code.
413  */
414 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
415 {
416 	drm_bridge_add(bridge);
417 	return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
418 }
419 EXPORT_SYMBOL(devm_drm_bridge_add);
420 
421 /**
422  * drm_bridge_remove - unregister a bridge
423  *
424  * @bridge: bridge control structure
425  *
426  * Remove the given bridge from the global list of registered bridges, so
427  * it won't be found by users via of_drm_find_and_get_bridge(), and add it
428  * to the lingering bridge list, to keep track of it until its allocated
429  * memory is eventually freed.
430  */
431 void drm_bridge_remove(struct drm_bridge *bridge)
432 {
433 	mutex_lock(&bridge_lock);
434 	list_move_tail(&bridge->list, &bridge_lingering_list);
435 	mutex_unlock(&bridge_lock);
436 
437 	mutex_destroy(&bridge->hpd_mutex);
438 
439 	drm_bridge_put(bridge);
440 }
441 EXPORT_SYMBOL(drm_bridge_remove);
442 
443 static struct drm_private_state *
444 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
445 {
446 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
447 	struct drm_bridge_state *state;
448 
449 	state = bridge->funcs->atomic_duplicate_state(bridge);
450 	return state ? &state->base : NULL;
451 }
452 
453 static void
454 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
455 				     struct drm_private_state *s)
456 {
457 	struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
458 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
459 
460 	bridge->funcs->atomic_destroy_state(bridge, state);
461 }
462 
463 static struct drm_private_state *
464 drm_bridge_atomic_create_priv_state(struct drm_private_obj *obj)
465 {
466 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
467 	struct drm_bridge_state *state;
468 
469 	state = bridge->funcs->atomic_reset(bridge);
470 	if (IS_ERR(state))
471 		return ERR_CAST(state);
472 
473 	return &state->base;
474 }
475 
476 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
477 	.atomic_create_state = drm_bridge_atomic_create_priv_state,
478 	.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
479 	.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
480 };
481 
482 static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
483 {
484 	return bridge->funcs->atomic_reset != NULL;
485 }
486 
487 /**
488  * drm_bridge_attach - attach the bridge to an encoder's chain
489  *
490  * @encoder: DRM encoder
491  * @bridge: bridge to attach
492  * @previous: previous bridge in the chain (optional)
493  * @flags: DRM_BRIDGE_ATTACH_* flags
494  *
495  * Called by a kms driver to link the bridge to an encoder's chain. The previous
496  * argument specifies the previous bridge in the chain. If NULL, the bridge is
497  * linked directly at the encoder's output. Otherwise it is linked at the
498  * previous bridge's output.
499  *
500  * If non-NULL the previous bridge must be already attached by a call to this
501  * function.
502  *
503  * The bridge to be attached must have been previously added by
504  * drm_bridge_add().
505  *
506  * Note that bridges attached to encoders are auto-detached during encoder
507  * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
508  * *not* be balanced with a drm_bridge_detach() in driver code.
509  *
510  * RETURNS:
511  * Zero on success, error code on failure
512  */
513 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
514 		      struct drm_bridge *previous,
515 		      enum drm_bridge_attach_flags flags)
516 {
517 	int ret;
518 
519 	if (!encoder || !bridge)
520 		return -EINVAL;
521 
522 	if (!bridge->container)
523 		DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
524 
525 	if (list_empty(&bridge->list))
526 		DRM_WARN("Missing drm_bridge_add() before attach\n");
527 
528 	drm_bridge_get(bridge);
529 
530 	if (previous && (!previous->dev || previous->encoder != encoder)) {
531 		ret = -EINVAL;
532 		goto err_put_bridge;
533 	}
534 
535 	if (bridge->dev) {
536 		ret = -EBUSY;
537 		goto err_put_bridge;
538 	}
539 
540 	bridge->dev = encoder->dev;
541 	bridge->encoder = encoder;
542 
543 	if (previous)
544 		list_add(&bridge->chain_node, &previous->chain_node);
545 	else
546 		list_add(&bridge->chain_node, &encoder->bridge_chain);
547 
548 	if (bridge->funcs->attach) {
549 		ret = bridge->funcs->attach(bridge, encoder, flags);
550 		if (ret < 0)
551 			goto err_reset_bridge;
552 	}
553 
554 	if (drm_bridge_is_atomic(bridge))
555 		drm_atomic_private_obj_init(bridge->dev, &bridge->base,
556 					    NULL,
557 					    &drm_bridge_priv_state_funcs);
558 
559 	return 0;
560 
561 err_reset_bridge:
562 	bridge->dev = NULL;
563 	bridge->encoder = NULL;
564 	list_del(&bridge->chain_node);
565 
566 	if (ret != -EPROBE_DEFER)
567 		DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
568 			  bridge->of_node, encoder->name, ret);
569 	else
570 		dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
571 			      "failed to attach bridge %pOF to encoder %s\n",
572 			      bridge->of_node, encoder->name);
573 
574 err_put_bridge:
575 	drm_bridge_put(bridge);
576 	return ret;
577 }
578 EXPORT_SYMBOL(drm_bridge_attach);
579 
580 void drm_bridge_detach(struct drm_bridge *bridge)
581 {
582 	if (WARN_ON(!bridge))
583 		return;
584 
585 	if (WARN_ON(!bridge->dev))
586 		return;
587 
588 	if (drm_bridge_is_atomic(bridge))
589 		drm_atomic_private_obj_fini(&bridge->base);
590 
591 	if (bridge->funcs->detach)
592 		bridge->funcs->detach(bridge);
593 
594 	list_del(&bridge->chain_node);
595 	bridge->dev = NULL;
596 	drm_bridge_put(bridge);
597 }
598 
599 /**
600  * DOC: bridge operations
601  *
602  * Bridge drivers expose operations through the &drm_bridge_funcs structure.
603  * The DRM internals (atomic and CRTC helpers) use the helpers defined in
604  * drm_bridge.c to call bridge operations. Those operations are divided in
605  * three big categories to support different parts of the bridge usage.
606  *
607  * - The encoder-related operations support control of the bridges in the
608  *   chain, and are roughly counterparts to the &drm_encoder_helper_funcs
609  *   operations. They are used by the legacy CRTC and the atomic modeset
610  *   helpers to perform mode validation, fixup and setting, and enable and
611  *   disable the bridge automatically.
612  *
613  *   The enable and disable operations are split in
614  *   &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
615  *   &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
616  *   finer-grained control.
617  *
618  *   Bridge drivers may implement the legacy version of those operations, or
619  *   the atomic version (prefixed with atomic\_), in which case they shall also
620  *   implement the atomic state bookkeeping operations
621  *   (&drm_bridge_funcs.atomic_duplicate_state,
622  *   &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
623  *   Mixing atomic and non-atomic versions of the operations is not supported.
624  *
625  * - The bus format negotiation operations
626  *   &drm_bridge_funcs.atomic_get_output_bus_fmts and
627  *   &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
628  *   negotiate the formats transmitted between bridges in the chain when
629  *   multiple formats are supported. Negotiation for formats is performed
630  *   transparently for display drivers by the atomic modeset helpers. Only
631  *   atomic versions of those operations exist, bridge drivers that need to
632  *   implement them shall thus also implement the atomic version of the
633  *   encoder-related operations. This feature is not supported by the legacy
634  *   CRTC helpers.
635  *
636  * - The connector-related operations support implementing a &drm_connector
637  *   based on a chain of bridges. DRM bridges traditionally create a
638  *   &drm_connector for bridges meant to be used at the end of the chain. This
639  *   puts additional burden on bridge drivers, especially for bridges that may
640  *   be used in the middle of a chain or at the end of it. Furthermore, it
641  *   requires all operations of the &drm_connector to be handled by a single
642  *   bridge, which doesn't always match the hardware architecture.
643  *
644  *   To simplify bridge drivers and make the connector implementation more
645  *   flexible, a new model allows bridges to unconditionally skip creation of
646  *   &drm_connector and instead expose &drm_bridge_funcs operations to support
647  *   an externally-implemented &drm_connector. Those operations are
648  *   &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
649  *   &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
650  *   &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
651  *   implemented, display drivers shall create a &drm_connector instance for
652  *   each chain of bridges, and implement those connector instances based on
653  *   the bridge connector operations.
654  *
655  *   Bridge drivers shall implement the connector-related operations for all
656  *   the features that the bridge hardware support. For instance, if a bridge
657  *   supports reading EDID, the &drm_bridge_funcs.get_edid shall be
658  *   implemented. This however doesn't mean that the DDC lines are wired to the
659  *   bridge on a particular platform, as they could also be connected to an I2C
660  *   controller of the SoC. Support for the connector-related operations on the
661  *   running platform is reported through the &drm_bridge.ops flags. Bridge
662  *   drivers shall detect which operations they can support on the platform
663  *   (usually this information is provided by ACPI or DT), and set the
664  *   &drm_bridge.ops flags for all supported operations. A flag shall only be
665  *   set if the corresponding &drm_bridge_funcs operation is implemented, but
666  *   an implemented operation doesn't necessarily imply that the corresponding
667  *   flag will be set. Display drivers shall use the &drm_bridge.ops flags to
668  *   decide which bridge to delegate a connector operation to. This mechanism
669  *   allows providing a single static const &drm_bridge_funcs instance in
670  *   bridge drivers, improving security by storing function pointers in
671  *   read-only memory.
672  *
673  *   In order to ease transition, bridge drivers may support both the old and
674  *   new models by making connector creation optional and implementing the
675  *   connected-related bridge operations. Connector creation is then controlled
676  *   by the flags argument to the drm_bridge_attach() function. Display drivers
677  *   that support the new model and create connectors themselves shall set the
678  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
679  *   connector creation. For intermediate bridges in the chain, the flag shall
680  *   be passed to the drm_bridge_attach() call for the downstream bridge.
681  *   Bridge drivers that implement the new model only shall return an error
682  *   from their &drm_bridge_funcs.attach handler when the
683  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
684  *   should use the new model, and convert the bridge drivers they use if
685  *   needed, in order to gradually transition to the new model.
686  */
687 
688 /**
689  * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
690  *				 encoder chain.
691  * @bridge: bridge control structure
692  * @info: display info against which the mode shall be validated
693  * @mode: desired mode to be validated
694  *
695  * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
696  * chain, starting from the first bridge to the last. If at least one bridge
697  * does not accept the mode the function returns the error code.
698  *
699  * Note: the bridge passed should be the one closest to the encoder.
700  *
701  * RETURNS:
702  * MODE_OK on success, drm_mode_status Enum error code on failure
703  */
704 enum drm_mode_status
705 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
706 			    const struct drm_display_info *info,
707 			    const struct drm_display_mode *mode)
708 {
709 	struct drm_encoder *encoder;
710 
711 	if (!bridge)
712 		return MODE_OK;
713 
714 	encoder = bridge->encoder;
715 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
716 		enum drm_mode_status ret;
717 
718 		if (!bridge->funcs->mode_valid)
719 			continue;
720 
721 		ret = bridge->funcs->mode_valid(bridge, info, mode);
722 		if (ret != MODE_OK)
723 			return ret;
724 	}
725 
726 	return MODE_OK;
727 }
728 EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
729 
730 /**
731  * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
732  *			       encoder chain
733  * @bridge: bridge control structure
734  * @mode: desired mode to be set for the encoder chain
735  * @adjusted_mode: updated mode that works for this encoder chain
736  *
737  * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
738  * encoder chain, starting from the first bridge to the last.
739  *
740  * Note: the bridge passed should be the one closest to the encoder
741  */
742 void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
743 			       const struct drm_display_mode *mode,
744 			       const struct drm_display_mode *adjusted_mode)
745 {
746 	struct drm_encoder *encoder;
747 
748 	if (!bridge)
749 		return;
750 
751 	encoder = bridge->encoder;
752 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
753 		if (bridge->funcs->mode_set)
754 			bridge->funcs->mode_set(bridge, mode, adjusted_mode);
755 	}
756 }
757 EXPORT_SYMBOL(drm_bridge_chain_mode_set);
758 
759 /**
760  * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
761  * @bridge: bridge control structure
762  * @state: atomic state being committed
763  *
764  * Calls &drm_bridge_funcs.atomic_disable (falls back on
765  * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
766  * starting from the last bridge to the first. These are called before calling
767  * &drm_encoder_helper_funcs.atomic_disable
768  *
769  * Note: the bridge passed should be the one closest to the encoder
770  */
771 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
772 				     struct drm_atomic_state *state)
773 {
774 	struct drm_encoder *encoder;
775 	struct drm_bridge *iter;
776 
777 	if (!bridge)
778 		return;
779 
780 	encoder = bridge->encoder;
781 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
782 		if (iter->funcs->atomic_disable) {
783 			iter->funcs->atomic_disable(iter, state);
784 		} else if (iter->funcs->disable) {
785 			iter->funcs->disable(iter);
786 		}
787 
788 		if (iter == bridge)
789 			break;
790 	}
791 }
792 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
793 
794 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
795 						struct drm_atomic_state *state)
796 {
797 	if (state && bridge->funcs->atomic_post_disable)
798 		bridge->funcs->atomic_post_disable(bridge, state);
799 	else if (bridge->funcs->post_disable)
800 		bridge->funcs->post_disable(bridge);
801 }
802 
803 /**
804  * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
805  *					  in the encoder chain
806  * @bridge: bridge control structure
807  * @state: atomic state being committed
808  *
809  * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
810  * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
811  * starting from the first bridge to the last. These are called after completing
812  * &drm_encoder_helper_funcs.atomic_disable
813  *
814  * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
815  * bridge will be called before the previous one to reverse the @pre_enable
816  * calling direction.
817  *
818  * Example:
819  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
820  *
821  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
822  * @post_disable order would be,
823  * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
824  *
825  * Note: the bridge passed should be the one closest to the encoder
826  */
827 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
828 					  struct drm_atomic_state *state)
829 {
830 	struct drm_encoder *encoder;
831 	struct drm_bridge *next, *limit;
832 
833 	if (!bridge)
834 		return;
835 
836 	encoder = bridge->encoder;
837 
838 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
839 		limit = NULL;
840 
841 		if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
842 			next = list_next_entry(bridge, chain_node);
843 
844 			if (next->pre_enable_prev_first) {
845 				/* next bridge had requested that prev
846 				 * was enabled first, so disabled last
847 				 */
848 				limit = next;
849 
850 				/* Find the next bridge that has NOT requested
851 				 * prev to be enabled first / disabled last
852 				 */
853 				list_for_each_entry_from(next, &encoder->bridge_chain,
854 							 chain_node) {
855 					if (!next->pre_enable_prev_first) {
856 						next = list_prev_entry(next, chain_node);
857 						limit = next;
858 						break;
859 					}
860 
861 					if (list_is_last(&next->chain_node,
862 							 &encoder->bridge_chain)) {
863 						limit = next;
864 						break;
865 					}
866 				}
867 
868 				/* Call these bridges in reverse order */
869 				list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
870 								 chain_node) {
871 					if (next == bridge)
872 						break;
873 
874 					drm_atomic_bridge_call_post_disable(next,
875 									    state);
876 				}
877 			}
878 		}
879 
880 		drm_atomic_bridge_call_post_disable(bridge, state);
881 
882 		if (limit)
883 			/* Jump all bridges that we have already post_disabled */
884 			bridge = limit;
885 	}
886 }
887 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
888 
889 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
890 					      struct drm_atomic_state *state)
891 {
892 	if (state && bridge->funcs->atomic_pre_enable)
893 		bridge->funcs->atomic_pre_enable(bridge, state);
894 	else if (bridge->funcs->pre_enable)
895 		bridge->funcs->pre_enable(bridge);
896 }
897 
898 /**
899  * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
900  *					the encoder chain
901  * @bridge: bridge control structure
902  * @state: atomic state being committed
903  *
904  * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
905  * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
906  * starting from the last bridge to the first. These are called before calling
907  * &drm_encoder_helper_funcs.atomic_enable
908  *
909  * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
910  * prev bridge will be called before pre_enable of this bridge.
911  *
912  * Example:
913  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
914  *
915  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
916  * @pre_enable order would be,
917  * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
918  *
919  * Note: the bridge passed should be the one closest to the encoder
920  */
921 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
922 					struct drm_atomic_state *state)
923 {
924 	struct drm_encoder *encoder;
925 	struct drm_bridge *iter, *next, *limit;
926 
927 	if (!bridge)
928 		return;
929 
930 	encoder = bridge->encoder;
931 
932 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
933 		if (iter->pre_enable_prev_first) {
934 			next = iter;
935 			limit = bridge;
936 			list_for_each_entry_from_reverse(next,
937 							 &encoder->bridge_chain,
938 							 chain_node) {
939 				if (next == bridge)
940 					break;
941 
942 				if (!next->pre_enable_prev_first) {
943 					/* Found first bridge that does NOT
944 					 * request prev to be enabled first
945 					 */
946 					limit = next;
947 					break;
948 				}
949 			}
950 
951 			list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
952 				/* Call requested prev bridge pre_enable
953 				 * in order.
954 				 */
955 				if (next == iter)
956 					/* At the first bridge to request prev
957 					 * bridges called first.
958 					 */
959 					break;
960 
961 				drm_atomic_bridge_call_pre_enable(next, state);
962 			}
963 		}
964 
965 		drm_atomic_bridge_call_pre_enable(iter, state);
966 
967 		if (iter->pre_enable_prev_first)
968 			/* Jump all bridges that we have already pre_enabled */
969 			iter = limit;
970 
971 		if (iter == bridge)
972 			break;
973 	}
974 }
975 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
976 
977 /**
978  * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
979  * @bridge: bridge control structure
980  * @state: atomic state being committed
981  *
982  * Calls &drm_bridge_funcs.atomic_enable (falls back on
983  * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
984  * starting from the first bridge to the last. These are called after completing
985  * &drm_encoder_helper_funcs.atomic_enable
986  *
987  * Note: the bridge passed should be the one closest to the encoder
988  */
989 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
990 				    struct drm_atomic_state *state)
991 {
992 	struct drm_encoder *encoder;
993 
994 	if (!bridge)
995 		return;
996 
997 	encoder = bridge->encoder;
998 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
999 		if (bridge->funcs->atomic_enable) {
1000 			bridge->funcs->atomic_enable(bridge, state);
1001 		} else if (bridge->funcs->enable) {
1002 			bridge->funcs->enable(bridge);
1003 		}
1004 	}
1005 }
1006 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
1007 
1008 static int drm_atomic_bridge_check(struct drm_bridge *bridge,
1009 				   struct drm_crtc_state *crtc_state,
1010 				   struct drm_connector_state *conn_state)
1011 {
1012 	if (bridge->funcs->atomic_check) {
1013 		struct drm_bridge_state *bridge_state;
1014 		int ret;
1015 
1016 		bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1017 							       bridge);
1018 		if (WARN_ON(!bridge_state))
1019 			return -EINVAL;
1020 
1021 		ret = bridge->funcs->atomic_check(bridge, bridge_state,
1022 						  crtc_state, conn_state);
1023 		if (ret)
1024 			return ret;
1025 	} else if (bridge->funcs->mode_fixup) {
1026 		if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
1027 					       &crtc_state->adjusted_mode))
1028 			return -EINVAL;
1029 	}
1030 
1031 	return 0;
1032 }
1033 
1034 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
1035 				    struct drm_bridge *cur_bridge,
1036 				    struct drm_crtc_state *crtc_state,
1037 				    struct drm_connector_state *conn_state,
1038 				    u32 out_bus_fmt)
1039 {
1040 	unsigned int i, num_in_bus_fmts = 0;
1041 	struct drm_bridge_state *cur_state;
1042 	struct drm_bridge *prev_bridge __free(drm_bridge_put) =
1043 		drm_bridge_get_prev_bridge(cur_bridge);
1044 	u32 *in_bus_fmts;
1045 	int ret;
1046 
1047 	cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1048 						    cur_bridge);
1049 
1050 	/*
1051 	 * If bus format negotiation is not supported by this bridge, let's
1052 	 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
1053 	 * hope that it can handle this situation gracefully (by providing
1054 	 * appropriate default values).
1055 	 */
1056 	if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
1057 		if (cur_bridge != first_bridge) {
1058 			ret = select_bus_fmt_recursive(first_bridge,
1059 						       prev_bridge, crtc_state,
1060 						       conn_state,
1061 						       MEDIA_BUS_FMT_FIXED);
1062 			if (ret)
1063 				return ret;
1064 		}
1065 
1066 		/*
1067 		 * Driver does not implement the atomic state hooks, but that's
1068 		 * fine, as long as it does not access the bridge state.
1069 		 */
1070 		if (cur_state) {
1071 			cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
1072 			cur_state->output_bus_cfg.format = out_bus_fmt;
1073 		}
1074 
1075 		return 0;
1076 	}
1077 
1078 	/*
1079 	 * If the driver implements ->atomic_get_input_bus_fmts() it
1080 	 * should also implement the atomic state hooks.
1081 	 */
1082 	if (WARN_ON(!cur_state))
1083 		return -EINVAL;
1084 
1085 	in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
1086 							cur_state,
1087 							crtc_state,
1088 							conn_state,
1089 							out_bus_fmt,
1090 							&num_in_bus_fmts);
1091 	if (!num_in_bus_fmts)
1092 		return -ENOTSUPP;
1093 	else if (!in_bus_fmts)
1094 		return -ENOMEM;
1095 
1096 	if (first_bridge == cur_bridge) {
1097 		cur_state->input_bus_cfg.format = in_bus_fmts[0];
1098 		cur_state->output_bus_cfg.format = out_bus_fmt;
1099 		kfree(in_bus_fmts);
1100 		return 0;
1101 	}
1102 
1103 	for (i = 0; i < num_in_bus_fmts; i++) {
1104 		ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
1105 					       crtc_state, conn_state,
1106 					       in_bus_fmts[i]);
1107 		if (ret != -ENOTSUPP)
1108 			break;
1109 	}
1110 
1111 	if (!ret) {
1112 		cur_state->input_bus_cfg.format = in_bus_fmts[i];
1113 		cur_state->output_bus_cfg.format = out_bus_fmt;
1114 	}
1115 
1116 	kfree(in_bus_fmts);
1117 	return ret;
1118 }
1119 
1120 /*
1121  * This function is called by &drm_atomic_bridge_chain_check() just before
1122  * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
1123  * It performs bus format negotiation between bridge elements. The negotiation
1124  * happens in reverse order, starting from the last element in the chain up to
1125  * @bridge.
1126  *
1127  * Negotiation starts by retrieving supported output bus formats on the last
1128  * bridge element and testing them one by one. The test is recursive, meaning
1129  * that for each tested output format, the whole chain will be walked backward,
1130  * and each element will have to choose an input bus format that can be
1131  * transcoded to the requested output format. When a bridge element does not
1132  * support transcoding into a specific output format -ENOTSUPP is returned and
1133  * the next bridge element will have to try a different format. If none of the
1134  * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
1135  *
1136  * This implementation is relying on
1137  * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
1138  * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
1139  * input/output formats.
1140  *
1141  * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
1142  * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
1143  * tries a single format: &drm_connector.display_info.bus_formats[0] if
1144  * available, MEDIA_BUS_FMT_FIXED otherwise.
1145  *
1146  * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
1147  * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
1148  * bridge element that lacks this hook and asks the previous element in the
1149  * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
1150  * to do in that case (fail if they want to enforce bus format negotiation, or
1151  * provide a reasonable default if they need to support pipelines where not
1152  * all elements support bus format negotiation).
1153  */
1154 static int
1155 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
1156 					struct drm_crtc_state *crtc_state,
1157 					struct drm_connector_state *conn_state)
1158 {
1159 	struct drm_connector *conn = conn_state->connector;
1160 	struct drm_encoder *encoder = bridge->encoder;
1161 	struct drm_bridge_state *last_bridge_state;
1162 	unsigned int i, num_out_bus_fmts = 0;
1163 	u32 *out_bus_fmts;
1164 	int ret = 0;
1165 
1166 	struct drm_bridge *last_bridge __free(drm_bridge_put) =
1167 		drm_bridge_get(list_last_entry(&encoder->bridge_chain,
1168 					       struct drm_bridge, chain_node));
1169 	last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1170 							    last_bridge);
1171 
1172 	if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1173 		const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1174 
1175 		/*
1176 		 * If the driver implements ->atomic_get_output_bus_fmts() it
1177 		 * should also implement the atomic state hooks.
1178 		 */
1179 		if (WARN_ON(!last_bridge_state))
1180 			return -EINVAL;
1181 
1182 		out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1183 							last_bridge_state,
1184 							crtc_state,
1185 							conn_state,
1186 							&num_out_bus_fmts);
1187 		if (!num_out_bus_fmts)
1188 			return -ENOTSUPP;
1189 		else if (!out_bus_fmts)
1190 			return -ENOMEM;
1191 	} else {
1192 		num_out_bus_fmts = 1;
1193 		out_bus_fmts = kmalloc_obj(*out_bus_fmts);
1194 		if (!out_bus_fmts)
1195 			return -ENOMEM;
1196 
1197 		if (conn->display_info.num_bus_formats &&
1198 		    conn->display_info.bus_formats)
1199 			out_bus_fmts[0] = conn->display_info.bus_formats[0];
1200 		else
1201 			out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1202 	}
1203 
1204 	for (i = 0; i < num_out_bus_fmts; i++) {
1205 		ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1206 					       conn_state, out_bus_fmts[i]);
1207 		if (ret != -ENOTSUPP)
1208 			break;
1209 	}
1210 
1211 	kfree(out_bus_fmts);
1212 
1213 	return ret;
1214 }
1215 
1216 static void
1217 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1218 				      struct drm_connector *conn,
1219 				      struct drm_atomic_state *state)
1220 {
1221 	struct drm_bridge_state *bridge_state, *next_bridge_state;
1222 	u32 output_flags = 0;
1223 
1224 	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1225 
1226 	/* No bridge state attached to this bridge => nothing to propagate. */
1227 	if (!bridge_state)
1228 		return;
1229 
1230 	struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
1231 
1232 	/*
1233 	 * Let's try to apply the most common case here, that is, propagate
1234 	 * display_info flags for the last bridge, and propagate the input
1235 	 * flags of the next bridge element to the output end of the current
1236 	 * bridge when the bridge is not the last one.
1237 	 * There are exceptions to this rule, like when signal inversion is
1238 	 * happening at the board level, but that's something drivers can deal
1239 	 * with from their &drm_bridge_funcs.atomic_check() implementation by
1240 	 * simply overriding the flags value we've set here.
1241 	 */
1242 	if (!next_bridge) {
1243 		output_flags = conn->display_info.bus_flags;
1244 	} else {
1245 		next_bridge_state = drm_atomic_get_new_bridge_state(state,
1246 								next_bridge);
1247 		/*
1248 		 * No bridge state attached to the next bridge, just leave the
1249 		 * flags to 0.
1250 		 */
1251 		if (next_bridge_state)
1252 			output_flags = next_bridge_state->input_bus_cfg.flags;
1253 	}
1254 
1255 	bridge_state->output_bus_cfg.flags = output_flags;
1256 
1257 	/*
1258 	 * Propagate the output flags to the input end of the bridge. Again, it's
1259 	 * not necessarily what all bridges want, but that's what most of them
1260 	 * do, and by doing that by default we avoid forcing drivers to
1261 	 * duplicate the "dummy propagation" logic.
1262 	 */
1263 	bridge_state->input_bus_cfg.flags = output_flags;
1264 }
1265 
1266 /**
1267  * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1268  * @bridge: bridge control structure
1269  * @crtc_state: new CRTC state
1270  * @conn_state: new connector state
1271  *
1272  * First trigger a bus format negotiation before calling
1273  * &drm_bridge_funcs.atomic_check() (falls back on
1274  * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1275  * starting from the last bridge to the first. These are called before calling
1276  * &drm_encoder_helper_funcs.atomic_check()
1277  *
1278  * RETURNS:
1279  * 0 on success, a negative error code on failure
1280  */
1281 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1282 				  struct drm_crtc_state *crtc_state,
1283 				  struct drm_connector_state *conn_state)
1284 {
1285 	struct drm_connector *conn = conn_state->connector;
1286 	struct drm_encoder *encoder;
1287 	struct drm_bridge *iter;
1288 	int ret;
1289 
1290 	if (!bridge)
1291 		return 0;
1292 
1293 	ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1294 						      conn_state);
1295 	if (ret)
1296 		return ret;
1297 
1298 	encoder = bridge->encoder;
1299 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1300 		int ret;
1301 
1302 		/*
1303 		 * Bus flags are propagated by default. If a bridge needs to
1304 		 * tweak the input bus flags for any reason, it should happen
1305 		 * in its &drm_bridge_funcs.atomic_check() implementation such
1306 		 * that preceding bridges in the chain can propagate the new
1307 		 * bus flags.
1308 		 */
1309 		drm_atomic_bridge_propagate_bus_flags(iter, conn,
1310 						      crtc_state->state);
1311 
1312 		ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1313 		if (ret)
1314 			return ret;
1315 
1316 		if (iter == bridge)
1317 			break;
1318 	}
1319 
1320 	return 0;
1321 }
1322 EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1323 
1324 /**
1325  * drm_bridge_detect - check if anything is attached to the bridge output
1326  * @bridge: bridge control structure
1327  * @connector: attached connector
1328  *
1329  * If the bridge supports output detection, as reported by the
1330  * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1331  * bridge and return the connection status. Otherwise return
1332  * connector_status_unknown.
1333  *
1334  * RETURNS:
1335  * The detection status on success, or connector_status_unknown if the bridge
1336  * doesn't support output detection.
1337  */
1338 enum drm_connector_status
1339 drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
1340 {
1341 	if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1342 		return connector_status_unknown;
1343 
1344 	return bridge->funcs->detect(bridge, connector);
1345 }
1346 EXPORT_SYMBOL_GPL(drm_bridge_detect);
1347 
1348 /**
1349  * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1350  * @connector
1351  * @bridge: bridge control structure
1352  * @connector: the connector to fill with modes
1353  *
1354  * If the bridge supports output modes retrieval, as reported by the
1355  * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1356  * fill the connector with all valid modes and return the number of modes
1357  * added. Otherwise return 0.
1358  *
1359  * RETURNS:
1360  * The number of modes added to the connector.
1361  */
1362 int drm_bridge_get_modes(struct drm_bridge *bridge,
1363 			 struct drm_connector *connector)
1364 {
1365 	if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1366 		return 0;
1367 
1368 	return bridge->funcs->get_modes(bridge, connector);
1369 }
1370 EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1371 
1372 /**
1373  * drm_bridge_edid_read - read the EDID data of the connected display
1374  * @bridge: bridge control structure
1375  * @connector: the connector to read EDID for
1376  *
1377  * If the bridge supports output EDID retrieval, as reported by the
1378  * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1379  * the EDID and return it. Otherwise return NULL.
1380  *
1381  * RETURNS:
1382  * The retrieved EDID on success, or NULL otherwise.
1383  */
1384 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1385 					    struct drm_connector *connector)
1386 {
1387 	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1388 		return NULL;
1389 
1390 	return bridge->funcs->edid_read(bridge, connector);
1391 }
1392 EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1393 
1394 /**
1395  * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1396  * @bridge: bridge control structure
1397  * @cb: hot-plug detection callback
1398  * @data: data to be passed to the hot-plug detection callback
1399  *
1400  * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1401  * and @data as hot plug notification callback. From now on the @cb will be
1402  * called with @data when an output status change is detected by the bridge,
1403  * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1404  *
1405  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1406  * bridge->ops. This function shall not be called when the flag is not set.
1407  *
1408  * Only one hot plug detection callback can be registered at a time, it is an
1409  * error to call this function when hot plug detection is already enabled for
1410  * the bridge.
1411  */
1412 void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1413 			   void (*cb)(void *data,
1414 				      enum drm_connector_status status),
1415 			   void *data)
1416 {
1417 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1418 		return;
1419 
1420 	mutex_lock(&bridge->hpd_mutex);
1421 
1422 	if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1423 		goto unlock;
1424 
1425 	bridge->hpd_cb = cb;
1426 	bridge->hpd_data = data;
1427 
1428 	if (bridge->funcs->hpd_enable)
1429 		bridge->funcs->hpd_enable(bridge);
1430 
1431 unlock:
1432 	mutex_unlock(&bridge->hpd_mutex);
1433 }
1434 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1435 
1436 /**
1437  * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1438  * @bridge: bridge control structure
1439  *
1440  * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1441  * plug detection callback previously registered with drm_bridge_hpd_enable().
1442  * Once this function returns the callback will not be called by the bridge
1443  * when an output status change occurs.
1444  *
1445  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1446  * bridge->ops. This function shall not be called when the flag is not set.
1447  */
1448 void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1449 {
1450 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1451 		return;
1452 
1453 	mutex_lock(&bridge->hpd_mutex);
1454 	if (bridge->funcs->hpd_disable)
1455 		bridge->funcs->hpd_disable(bridge);
1456 
1457 	bridge->hpd_cb = NULL;
1458 	bridge->hpd_data = NULL;
1459 	mutex_unlock(&bridge->hpd_mutex);
1460 }
1461 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1462 
1463 /**
1464  * drm_bridge_hpd_notify - notify hot plug detection events
1465  * @bridge: bridge control structure
1466  * @status: output connection status
1467  *
1468  * Bridge drivers shall call this function to report hot plug events when they
1469  * detect a change in the output status, when hot plug detection has been
1470  * enabled by drm_bridge_hpd_enable().
1471  *
1472  * This function shall be called in a context that can sleep.
1473  */
1474 void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1475 			   enum drm_connector_status status)
1476 {
1477 	mutex_lock(&bridge->hpd_mutex);
1478 	if (bridge->hpd_cb)
1479 		bridge->hpd_cb(bridge->hpd_data, status);
1480 	mutex_unlock(&bridge->hpd_mutex);
1481 }
1482 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1483 
1484 #ifdef CONFIG_OF
1485 /**
1486  * of_drm_find_and_get_bridge - find the bridge corresponding to the device
1487  *                              node in the global bridge list
1488  * @np: device node
1489  *
1490  * The refcount of the returned bridge is incremented. Use drm_bridge_put()
1491  * when done with it.
1492  *
1493  * RETURNS:
1494  * drm_bridge control struct on success, NULL on failure
1495  */
1496 struct drm_bridge *of_drm_find_and_get_bridge(struct device_node *np)
1497 {
1498 	struct drm_bridge *bridge;
1499 
1500 	scoped_guard(mutex, &bridge_lock) {
1501 		list_for_each_entry(bridge, &bridge_list, list)
1502 			if (bridge->of_node == np)
1503 				return drm_bridge_get(bridge);
1504 	}
1505 
1506 	return NULL;
1507 }
1508 EXPORT_SYMBOL(of_drm_find_and_get_bridge);
1509 
1510 /**
1511  * of_drm_find_bridge - find the bridge corresponding to the device node in
1512  *			the global bridge list
1513  *
1514  * @np: device node
1515  *
1516  * This function is deprecated. Convert to of_drm_find_and_get_bridge()
1517  * instead for proper refcounting.
1518  *
1519  * The bridge returned by this function is not refcounted. This is
1520  * dangerous because the bridge might be deallocated even before the caller
1521  * has a chance to use it. To use this function you have to do one of:
1522  *
1523  * - get a reference with drm_bridge_get() as soon as possible to
1524  *   minimize the race window, and then drm_bridge_put() when no longer
1525  *   using the pointer
1526  *
1527  * - not call drm_bridge_get() or drm_bridge_put() at all, which used to
1528  *   be the correct practice before dynamic bridge lifetime was introduced
1529  *
1530  * - again, convert to of_drm_find_and_get_bridge(), which is the only safe
1531  *   thing to do
1532  *
1533  * RETURNS:
1534  * drm_bridge control struct on success, NULL on failure
1535  */
1536 struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1537 {
1538 	struct drm_bridge *bridge = of_drm_find_and_get_bridge(np);
1539 
1540 	/*
1541 	 * We need to emulate the original semantics of
1542 	 * of_drm_find_bridge(), which was not getting any bridge
1543 	 * reference. Being now based on of_drm_find_and_get_bridge() which
1544 	 * gets a reference, put it before returning.
1545 	 */
1546 	drm_bridge_put(bridge);
1547 
1548 	return bridge;
1549 }
1550 EXPORT_SYMBOL(of_drm_find_bridge);
1551 #endif
1552 
1553 /**
1554  * devm_drm_put_bridge - Release a bridge reference obtained via devm
1555  * @dev: device that got the bridge via devm
1556  * @bridge: pointer to a struct drm_bridge obtained via devm
1557  *
1558  * Same as drm_bridge_put() for bridge pointers obtained via devm functions
1559  * such as devm_drm_bridge_alloc().
1560  *
1561  * This function is a temporary workaround and MUST NOT be used. Manual
1562  * handling of bridge lifetime is inherently unsafe.
1563  */
1564 void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
1565 {
1566 	devm_release_action(dev, drm_bridge_put_void, bridge);
1567 }
1568 EXPORT_SYMBOL(devm_drm_put_bridge);
1569 
1570 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
1571 					   struct drm_bridge *bridge,
1572 					   unsigned int idx,
1573 					   bool lingering)
1574 {
1575 	drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
1576 
1577 	drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount),
1578 		   lingering ? " [lingering]" : "");
1579 
1580 	drm_printf(p, "\ttype: [%d] %s\n",
1581 		   bridge->type,
1582 		   drm_get_connector_type_name(bridge->type));
1583 
1584 	/* The OF node could be freed after drm_bridge_remove() */
1585 	if (bridge->of_node && !lingering)
1586 		drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
1587 
1588 	drm_printf(p, "\tops: [0x%x]", bridge->ops);
1589 	if (bridge->ops & DRM_BRIDGE_OP_DETECT)
1590 		drm_puts(p, " detect");
1591 	if (bridge->ops & DRM_BRIDGE_OP_EDID)
1592 		drm_puts(p, " edid");
1593 	if (bridge->ops & DRM_BRIDGE_OP_HPD)
1594 		drm_puts(p, " hpd");
1595 	if (bridge->ops & DRM_BRIDGE_OP_MODES)
1596 		drm_puts(p, " modes");
1597 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
1598 		drm_puts(p, " hdmi");
1599 	drm_puts(p, "\n");
1600 }
1601 
1602 static int allbridges_show(struct seq_file *m, void *data)
1603 {
1604 	struct drm_printer p = drm_seq_file_printer(m);
1605 	struct drm_bridge *bridge;
1606 	unsigned int idx = 0;
1607 
1608 	mutex_lock(&bridge_lock);
1609 
1610 	list_for_each_entry(bridge, &bridge_list, list)
1611 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
1612 
1613 	list_for_each_entry(bridge, &bridge_lingering_list, list)
1614 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true);
1615 
1616 	mutex_unlock(&bridge_lock);
1617 
1618 	return 0;
1619 }
1620 DEFINE_SHOW_ATTRIBUTE(allbridges);
1621 
1622 static int encoder_bridges_show(struct seq_file *m, void *data)
1623 {
1624 	struct drm_encoder *encoder = m->private;
1625 	struct drm_printer p = drm_seq_file_printer(m);
1626 	unsigned int idx = 0;
1627 
1628 	drm_for_each_bridge_in_chain_scoped(encoder, bridge)
1629 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
1630 
1631 	return 0;
1632 }
1633 DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
1634 
1635 void drm_bridge_debugfs_params(struct dentry *root)
1636 {
1637 	debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
1638 }
1639 
1640 void drm_bridge_debugfs_encoder_params(struct dentry *root,
1641 				       struct drm_encoder *encoder)
1642 {
1643 	/* bridges list */
1644 	debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
1645 }
1646 
1647 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1648 MODULE_DESCRIPTION("DRM bridge infrastructure");
1649 MODULE_LICENSE("GPL and additional rights");
1650