xref: /linux/drivers/gpu/drm/drm_bridge.c (revision 9e26a3740cc08ef8bcdc5e5d824792cd677affce)
1 /*
2  * Copyright (c) 2014 Samsung Electronics Co., Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 #include <linux/err.h>
26 #include <linux/media-bus-format.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 
30 #include <drm/drm_atomic_state_helper.h>
31 #include <drm/drm_bridge.h>
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_edid.h>
34 #include <drm/drm_encoder.h>
35 #include <drm/drm_file.h>
36 #include <drm/drm_of.h>
37 #include <drm/drm_print.h>
38 
39 #include "drm_crtc_internal.h"
40 
41 /**
42  * DOC: overview
43  *
44  * &struct drm_bridge represents a device that hangs on to an encoder. These are
45  * handy when a regular &drm_encoder entity isn't enough to represent the entire
46  * encoder chain.
47  *
48  * A bridge is always attached to a single &drm_encoder at a time, but can be
49  * either connected to it directly, or through a chain of bridges::
50  *
51  *     [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
52  *
53  * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
54  * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
55  * Chaining multiple bridges to the output of a bridge, or the same bridge to
56  * the output of different bridges, is not supported.
57  *
58  * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
59  * CRTCs, encoders or connectors and hence are not visible to userspace. They
60  * just provide additional hooks to get the desired output at the end of the
61  * encoder chain.
62  */
63 
64 /**
65  * DOC:	display driver integration
66  *
67  * Display drivers are responsible for linking encoders with the first bridge
68  * in the chains. This is done by acquiring the appropriate bridge with
69  * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
70  * encoder with a call to drm_bridge_attach().
71  *
72  * Bridges are responsible for linking themselves with the next bridge in the
73  * chain, if any. This is done the same way as for encoders, with the call to
74  * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
75  *
76  * Once these links are created, the bridges can participate along with encoder
77  * functions to perform mode validation and fixup (through
78  * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
79  * setting (through drm_bridge_chain_mode_set()), enable (through
80  * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
81  * and disable (through drm_atomic_bridge_chain_disable() and
82  * drm_atomic_bridge_chain_post_disable()). Those functions call the
83  * corresponding operations provided in &drm_bridge_funcs in sequence for all
84  * bridges in the chain.
85  *
86  * For display drivers that use the atomic helpers
87  * drm_atomic_helper_check_modeset(),
88  * drm_atomic_helper_commit_modeset_enables() and
89  * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
90  * commit check and commit tail handlers, or through the higher-level
91  * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
92  * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
93  * requires no intervention from the driver. For other drivers, the relevant
94  * DRM bridge chain functions shall be called manually.
95  *
96  * Bridges also participate in implementing the &drm_connector at the end of
97  * the bridge chain. Display drivers may use the drm_bridge_connector_init()
98  * helper to create the &drm_connector, or implement it manually on top of the
99  * connector-related operations exposed by the bridge (see the overview
100  * documentation of bridge operations for more details).
101  */
102 
103 /**
104  * DOC: special care dsi
105  *
106  * The interaction between the bridges and other frameworks involved in
107  * the probing of the upstream driver and the bridge driver can be
108  * challenging. Indeed, there's multiple cases that needs to be
109  * considered:
110  *
111  * - The upstream driver doesn't use the component framework and isn't a
112  *   MIPI-DSI host. In this case, the bridge driver will probe at some
113  *   point and the upstream driver should try to probe again by returning
114  *   EPROBE_DEFER as long as the bridge driver hasn't probed.
115  *
116  * - The upstream driver doesn't use the component framework, but is a
117  *   MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
118  *   controlled. In this case, the bridge device is a child of the
119  *   display device and when it will probe it's assured that the display
120  *   device (and MIPI-DSI host) is present. The upstream driver will be
121  *   assured that the bridge driver is connected between the
122  *   &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
123  *   Therefore, it must run mipi_dsi_host_register() in its probe
124  *   function, and then run drm_bridge_attach() in its
125  *   &mipi_dsi_host_ops.attach hook.
126  *
127  * - The upstream driver uses the component framework and is a MIPI-DSI
128  *   host. The bridge device uses the MIPI-DCS commands to be
129  *   controlled. This is the same situation than above, and can run
130  *   mipi_dsi_host_register() in either its probe or bind hooks.
131  *
132  * - The upstream driver uses the component framework and is a MIPI-DSI
133  *   host. The bridge device uses a separate bus (such as I2C) to be
134  *   controlled. In this case, there's no correlation between the probe
135  *   of the bridge and upstream drivers, so care must be taken to avoid
136  *   an endless EPROBE_DEFER loop, with each driver waiting for the
137  *   other to probe.
138  *
139  * The ideal pattern to cover the last item (and all the others in the
140  * MIPI-DSI host driver case) is to split the operations like this:
141  *
142  * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
143  *   probe hook. It will make sure that the MIPI-DSI host sticks around,
144  *   and that the driver's bind can be called.
145  *
146  * - In its probe hook, the bridge driver must try to find its MIPI-DSI
147  *   host, register as a MIPI-DSI device and attach the MIPI-DSI device
148  *   to its host. The bridge driver is now functional.
149  *
150  * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
151  *   now add its component. Its bind hook will now be called and since
152  *   the bridge driver is attached and registered, we can now look for
153  *   and attach it.
154  *
155  * At this point, we're now certain that both the upstream driver and
156  * the bridge driver are functional and we can't have a deadlock-like
157  * situation when probing.
158  */
159 
160 /**
161  * DOC: dsi bridge operations
162  *
163  * DSI host interfaces are expected to be implemented as bridges rather than
164  * encoders, however there are a few aspects of their operation that need to
165  * be defined in order to provide a consistent interface.
166  *
167  * A DSI host should keep the PHY powered down until the pre_enable operation is
168  * called. All lanes are in an undefined idle state up to this point, and it
169  * must not be assumed that it is LP-11.
170  * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
171  * clock lane to either LP-11 or HS depending on the mode_flag
172  * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
173  *
174  * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
175  * called before the DSI host. If the DSI peripheral requires LP-11 and/or
176  * the clock lane to be in HS mode prior to pre_enable, then it can set the
177  * &pre_enable_prev_first flag to request the pre_enable (and
178  * post_disable) order to be altered to enable the DSI host first.
179  *
180  * Either the CRTC being enabled, or the DSI host enable operation should switch
181  * the host to actively transmitting video on the data lanes.
182  *
183  * The reverse also applies. The DSI host disable operation or stopping the CRTC
184  * should stop transmitting video, and the data lanes should return to the LP-11
185  * state. The DSI host &post_disable operation should disable the PHY.
186  * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
187  * bridge &post_disable will be called before the DSI host's post_disable.
188  *
189  * Whilst it is valid to call &host_transfer prior to pre_enable or after
190  * post_disable, the exact state of the lanes is undefined at this point. The
191  * DSI host should initialise the interface, transmit the data, and then disable
192  * the interface again.
193  *
194  * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
195  * implemented, it therefore needs to be handled entirely within the DSI Host
196  * driver.
197  */
198 
199 static DEFINE_MUTEX(bridge_lock);
200 static LIST_HEAD(bridge_list);
201 
202 /**
203  * drm_bridge_add - add the given bridge to the global bridge list
204  *
205  * @bridge: bridge control structure
206  */
207 void drm_bridge_add(struct drm_bridge *bridge)
208 {
209 	mutex_init(&bridge->hpd_mutex);
210 
211 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
212 		bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
213 					       BIT(HDMI_COLORSPACE_YUV420));
214 
215 	mutex_lock(&bridge_lock);
216 	list_add_tail(&bridge->list, &bridge_list);
217 	mutex_unlock(&bridge_lock);
218 }
219 EXPORT_SYMBOL(drm_bridge_add);
220 
221 static void drm_bridge_remove_void(void *bridge)
222 {
223 	drm_bridge_remove(bridge);
224 }
225 
226 /**
227  * devm_drm_bridge_add - devm managed version of drm_bridge_add()
228  *
229  * @dev: device to tie the bridge lifetime to
230  * @bridge: bridge control structure
231  *
232  * This is the managed version of drm_bridge_add() which automatically
233  * calls drm_bridge_remove() when @dev is unbound.
234  *
235  * Return: 0 if no error or negative error code.
236  */
237 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
238 {
239 	drm_bridge_add(bridge);
240 	return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
241 }
242 EXPORT_SYMBOL(devm_drm_bridge_add);
243 
244 /**
245  * drm_bridge_remove - remove the given bridge from the global bridge list
246  *
247  * @bridge: bridge control structure
248  */
249 void drm_bridge_remove(struct drm_bridge *bridge)
250 {
251 	mutex_lock(&bridge_lock);
252 	list_del_init(&bridge->list);
253 	mutex_unlock(&bridge_lock);
254 
255 	mutex_destroy(&bridge->hpd_mutex);
256 }
257 EXPORT_SYMBOL(drm_bridge_remove);
258 
259 static struct drm_private_state *
260 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
261 {
262 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
263 	struct drm_bridge_state *state;
264 
265 	state = bridge->funcs->atomic_duplicate_state(bridge);
266 	return state ? &state->base : NULL;
267 }
268 
269 static void
270 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
271 				     struct drm_private_state *s)
272 {
273 	struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
274 	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
275 
276 	bridge->funcs->atomic_destroy_state(bridge, state);
277 }
278 
279 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
280 	.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
281 	.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
282 };
283 
284 static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
285 {
286 	return bridge->funcs->atomic_reset != NULL;
287 }
288 
289 /**
290  * drm_bridge_attach - attach the bridge to an encoder's chain
291  *
292  * @encoder: DRM encoder
293  * @bridge: bridge to attach
294  * @previous: previous bridge in the chain (optional)
295  * @flags: DRM_BRIDGE_ATTACH_* flags
296  *
297  * Called by a kms driver to link the bridge to an encoder's chain. The previous
298  * argument specifies the previous bridge in the chain. If NULL, the bridge is
299  * linked directly at the encoder's output. Otherwise it is linked at the
300  * previous bridge's output.
301  *
302  * If non-NULL the previous bridge must be already attached by a call to this
303  * function.
304  *
305  * Note that bridges attached to encoders are auto-detached during encoder
306  * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
307  * *not* be balanced with a drm_bridge_detach() in driver code.
308  *
309  * RETURNS:
310  * Zero on success, error code on failure
311  */
312 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
313 		      struct drm_bridge *previous,
314 		      enum drm_bridge_attach_flags flags)
315 {
316 	int ret;
317 
318 	if (!encoder || !bridge)
319 		return -EINVAL;
320 
321 	if (previous && (!previous->dev || previous->encoder != encoder))
322 		return -EINVAL;
323 
324 	if (bridge->dev)
325 		return -EBUSY;
326 
327 	bridge->dev = encoder->dev;
328 	bridge->encoder = encoder;
329 
330 	if (previous)
331 		list_add(&bridge->chain_node, &previous->chain_node);
332 	else
333 		list_add(&bridge->chain_node, &encoder->bridge_chain);
334 
335 	if (bridge->funcs->attach) {
336 		ret = bridge->funcs->attach(bridge, encoder, flags);
337 		if (ret < 0)
338 			goto err_reset_bridge;
339 	}
340 
341 	if (drm_bridge_is_atomic(bridge)) {
342 		struct drm_bridge_state *state;
343 
344 		state = bridge->funcs->atomic_reset(bridge);
345 		if (IS_ERR(state)) {
346 			ret = PTR_ERR(state);
347 			goto err_detach_bridge;
348 		}
349 
350 		drm_atomic_private_obj_init(bridge->dev, &bridge->base,
351 					    &state->base,
352 					    &drm_bridge_priv_state_funcs);
353 	}
354 
355 	return 0;
356 
357 err_detach_bridge:
358 	if (bridge->funcs->detach)
359 		bridge->funcs->detach(bridge);
360 
361 err_reset_bridge:
362 	bridge->dev = NULL;
363 	bridge->encoder = NULL;
364 	list_del(&bridge->chain_node);
365 
366 	if (ret != -EPROBE_DEFER)
367 		DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
368 			  bridge->of_node, encoder->name, ret);
369 	else
370 		dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
371 			      "failed to attach bridge %pOF to encoder %s\n",
372 			      bridge->of_node, encoder->name);
373 
374 	return ret;
375 }
376 EXPORT_SYMBOL(drm_bridge_attach);
377 
378 void drm_bridge_detach(struct drm_bridge *bridge)
379 {
380 	if (WARN_ON(!bridge))
381 		return;
382 
383 	if (WARN_ON(!bridge->dev))
384 		return;
385 
386 	if (drm_bridge_is_atomic(bridge))
387 		drm_atomic_private_obj_fini(&bridge->base);
388 
389 	if (bridge->funcs->detach)
390 		bridge->funcs->detach(bridge);
391 
392 	list_del(&bridge->chain_node);
393 	bridge->dev = NULL;
394 }
395 
396 /**
397  * DOC: bridge operations
398  *
399  * Bridge drivers expose operations through the &drm_bridge_funcs structure.
400  * The DRM internals (atomic and CRTC helpers) use the helpers defined in
401  * drm_bridge.c to call bridge operations. Those operations are divided in
402  * three big categories to support different parts of the bridge usage.
403  *
404  * - The encoder-related operations support control of the bridges in the
405  *   chain, and are roughly counterparts to the &drm_encoder_helper_funcs
406  *   operations. They are used by the legacy CRTC and the atomic modeset
407  *   helpers to perform mode validation, fixup and setting, and enable and
408  *   disable the bridge automatically.
409  *
410  *   The enable and disable operations are split in
411  *   &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
412  *   &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
413  *   finer-grained control.
414  *
415  *   Bridge drivers may implement the legacy version of those operations, or
416  *   the atomic version (prefixed with atomic\_), in which case they shall also
417  *   implement the atomic state bookkeeping operations
418  *   (&drm_bridge_funcs.atomic_duplicate_state,
419  *   &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
420  *   Mixing atomic and non-atomic versions of the operations is not supported.
421  *
422  * - The bus format negotiation operations
423  *   &drm_bridge_funcs.atomic_get_output_bus_fmts and
424  *   &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
425  *   negotiate the formats transmitted between bridges in the chain when
426  *   multiple formats are supported. Negotiation for formats is performed
427  *   transparently for display drivers by the atomic modeset helpers. Only
428  *   atomic versions of those operations exist, bridge drivers that need to
429  *   implement them shall thus also implement the atomic version of the
430  *   encoder-related operations. This feature is not supported by the legacy
431  *   CRTC helpers.
432  *
433  * - The connector-related operations support implementing a &drm_connector
434  *   based on a chain of bridges. DRM bridges traditionally create a
435  *   &drm_connector for bridges meant to be used at the end of the chain. This
436  *   puts additional burden on bridge drivers, especially for bridges that may
437  *   be used in the middle of a chain or at the end of it. Furthermore, it
438  *   requires all operations of the &drm_connector to be handled by a single
439  *   bridge, which doesn't always match the hardware architecture.
440  *
441  *   To simplify bridge drivers and make the connector implementation more
442  *   flexible, a new model allows bridges to unconditionally skip creation of
443  *   &drm_connector and instead expose &drm_bridge_funcs operations to support
444  *   an externally-implemented &drm_connector. Those operations are
445  *   &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
446  *   &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
447  *   &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
448  *   implemented, display drivers shall create a &drm_connector instance for
449  *   each chain of bridges, and implement those connector instances based on
450  *   the bridge connector operations.
451  *
452  *   Bridge drivers shall implement the connector-related operations for all
453  *   the features that the bridge hardware support. For instance, if a bridge
454  *   supports reading EDID, the &drm_bridge_funcs.get_edid shall be
455  *   implemented. This however doesn't mean that the DDC lines are wired to the
456  *   bridge on a particular platform, as they could also be connected to an I2C
457  *   controller of the SoC. Support for the connector-related operations on the
458  *   running platform is reported through the &drm_bridge.ops flags. Bridge
459  *   drivers shall detect which operations they can support on the platform
460  *   (usually this information is provided by ACPI or DT), and set the
461  *   &drm_bridge.ops flags for all supported operations. A flag shall only be
462  *   set if the corresponding &drm_bridge_funcs operation is implemented, but
463  *   an implemented operation doesn't necessarily imply that the corresponding
464  *   flag will be set. Display drivers shall use the &drm_bridge.ops flags to
465  *   decide which bridge to delegate a connector operation to. This mechanism
466  *   allows providing a single static const &drm_bridge_funcs instance in
467  *   bridge drivers, improving security by storing function pointers in
468  *   read-only memory.
469  *
470  *   In order to ease transition, bridge drivers may support both the old and
471  *   new models by making connector creation optional and implementing the
472  *   connected-related bridge operations. Connector creation is then controlled
473  *   by the flags argument to the drm_bridge_attach() function. Display drivers
474  *   that support the new model and create connectors themselves shall set the
475  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
476  *   connector creation. For intermediate bridges in the chain, the flag shall
477  *   be passed to the drm_bridge_attach() call for the downstream bridge.
478  *   Bridge drivers that implement the new model only shall return an error
479  *   from their &drm_bridge_funcs.attach handler when the
480  *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
481  *   should use the new model, and convert the bridge drivers they use if
482  *   needed, in order to gradually transition to the new model.
483  */
484 
485 /**
486  * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
487  *				 encoder chain.
488  * @bridge: bridge control structure
489  * @info: display info against which the mode shall be validated
490  * @mode: desired mode to be validated
491  *
492  * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
493  * chain, starting from the first bridge to the last. If at least one bridge
494  * does not accept the mode the function returns the error code.
495  *
496  * Note: the bridge passed should be the one closest to the encoder.
497  *
498  * RETURNS:
499  * MODE_OK on success, drm_mode_status Enum error code on failure
500  */
501 enum drm_mode_status
502 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
503 			    const struct drm_display_info *info,
504 			    const struct drm_display_mode *mode)
505 {
506 	struct drm_encoder *encoder;
507 
508 	if (!bridge)
509 		return MODE_OK;
510 
511 	encoder = bridge->encoder;
512 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
513 		enum drm_mode_status ret;
514 
515 		if (!bridge->funcs->mode_valid)
516 			continue;
517 
518 		ret = bridge->funcs->mode_valid(bridge, info, mode);
519 		if (ret != MODE_OK)
520 			return ret;
521 	}
522 
523 	return MODE_OK;
524 }
525 EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
526 
527 /**
528  * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
529  *			       encoder chain
530  * @bridge: bridge control structure
531  * @mode: desired mode to be set for the encoder chain
532  * @adjusted_mode: updated mode that works for this encoder chain
533  *
534  * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
535  * encoder chain, starting from the first bridge to the last.
536  *
537  * Note: the bridge passed should be the one closest to the encoder
538  */
539 void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
540 			       const struct drm_display_mode *mode,
541 			       const struct drm_display_mode *adjusted_mode)
542 {
543 	struct drm_encoder *encoder;
544 
545 	if (!bridge)
546 		return;
547 
548 	encoder = bridge->encoder;
549 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
550 		if (bridge->funcs->mode_set)
551 			bridge->funcs->mode_set(bridge, mode, adjusted_mode);
552 	}
553 }
554 EXPORT_SYMBOL(drm_bridge_chain_mode_set);
555 
556 /**
557  * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
558  * @bridge: bridge control structure
559  * @state: atomic state being committed
560  *
561  * Calls &drm_bridge_funcs.atomic_disable (falls back on
562  * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
563  * starting from the last bridge to the first. These are called before calling
564  * &drm_encoder_helper_funcs.atomic_disable
565  *
566  * Note: the bridge passed should be the one closest to the encoder
567  */
568 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
569 				     struct drm_atomic_state *state)
570 {
571 	struct drm_encoder *encoder;
572 	struct drm_bridge *iter;
573 
574 	if (!bridge)
575 		return;
576 
577 	encoder = bridge->encoder;
578 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
579 		if (iter->funcs->atomic_disable) {
580 			iter->funcs->atomic_disable(iter, state);
581 		} else if (iter->funcs->disable) {
582 			iter->funcs->disable(iter);
583 		}
584 
585 		if (iter == bridge)
586 			break;
587 	}
588 }
589 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
590 
591 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
592 						struct drm_atomic_state *state)
593 {
594 	if (state && bridge->funcs->atomic_post_disable)
595 		bridge->funcs->atomic_post_disable(bridge, state);
596 	else if (bridge->funcs->post_disable)
597 		bridge->funcs->post_disable(bridge);
598 }
599 
600 /**
601  * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
602  *					  in the encoder chain
603  * @bridge: bridge control structure
604  * @state: atomic state being committed
605  *
606  * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
607  * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
608  * starting from the first bridge to the last. These are called after completing
609  * &drm_encoder_helper_funcs.atomic_disable
610  *
611  * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
612  * bridge will be called before the previous one to reverse the @pre_enable
613  * calling direction.
614  *
615  * Example:
616  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
617  *
618  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
619  * @post_disable order would be,
620  * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
621  *
622  * Note: the bridge passed should be the one closest to the encoder
623  */
624 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
625 					  struct drm_atomic_state *state)
626 {
627 	struct drm_encoder *encoder;
628 	struct drm_bridge *next, *limit;
629 
630 	if (!bridge)
631 		return;
632 
633 	encoder = bridge->encoder;
634 
635 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
636 		limit = NULL;
637 
638 		if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
639 			next = list_next_entry(bridge, chain_node);
640 
641 			if (next->pre_enable_prev_first) {
642 				/* next bridge had requested that prev
643 				 * was enabled first, so disabled last
644 				 */
645 				limit = next;
646 
647 				/* Find the next bridge that has NOT requested
648 				 * prev to be enabled first / disabled last
649 				 */
650 				list_for_each_entry_from(next, &encoder->bridge_chain,
651 							 chain_node) {
652 					if (!next->pre_enable_prev_first) {
653 						next = list_prev_entry(next, chain_node);
654 						limit = next;
655 						break;
656 					}
657 
658 					if (list_is_last(&next->chain_node,
659 							 &encoder->bridge_chain)) {
660 						limit = next;
661 						break;
662 					}
663 				}
664 
665 				/* Call these bridges in reverse order */
666 				list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
667 								 chain_node) {
668 					if (next == bridge)
669 						break;
670 
671 					drm_atomic_bridge_call_post_disable(next,
672 									    state);
673 				}
674 			}
675 		}
676 
677 		drm_atomic_bridge_call_post_disable(bridge, state);
678 
679 		if (limit)
680 			/* Jump all bridges that we have already post_disabled */
681 			bridge = limit;
682 	}
683 }
684 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
685 
686 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
687 					      struct drm_atomic_state *state)
688 {
689 	if (state && bridge->funcs->atomic_pre_enable)
690 		bridge->funcs->atomic_pre_enable(bridge, state);
691 	else if (bridge->funcs->pre_enable)
692 		bridge->funcs->pre_enable(bridge);
693 }
694 
695 /**
696  * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
697  *					the encoder chain
698  * @bridge: bridge control structure
699  * @state: atomic state being committed
700  *
701  * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
702  * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
703  * starting from the last bridge to the first. These are called before calling
704  * &drm_encoder_helper_funcs.atomic_enable
705  *
706  * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
707  * prev bridge will be called before pre_enable of this bridge.
708  *
709  * Example:
710  * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
711  *
712  * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
713  * @pre_enable order would be,
714  * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
715  *
716  * Note: the bridge passed should be the one closest to the encoder
717  */
718 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
719 					struct drm_atomic_state *state)
720 {
721 	struct drm_encoder *encoder;
722 	struct drm_bridge *iter, *next, *limit;
723 
724 	if (!bridge)
725 		return;
726 
727 	encoder = bridge->encoder;
728 
729 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
730 		if (iter->pre_enable_prev_first) {
731 			next = iter;
732 			limit = bridge;
733 			list_for_each_entry_from_reverse(next,
734 							 &encoder->bridge_chain,
735 							 chain_node) {
736 				if (next == bridge)
737 					break;
738 
739 				if (!next->pre_enable_prev_first) {
740 					/* Found first bridge that does NOT
741 					 * request prev to be enabled first
742 					 */
743 					limit = next;
744 					break;
745 				}
746 			}
747 
748 			list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
749 				/* Call requested prev bridge pre_enable
750 				 * in order.
751 				 */
752 				if (next == iter)
753 					/* At the first bridge to request prev
754 					 * bridges called first.
755 					 */
756 					break;
757 
758 				drm_atomic_bridge_call_pre_enable(next, state);
759 			}
760 		}
761 
762 		drm_atomic_bridge_call_pre_enable(iter, state);
763 
764 		if (iter->pre_enable_prev_first)
765 			/* Jump all bridges that we have already pre_enabled */
766 			iter = limit;
767 
768 		if (iter == bridge)
769 			break;
770 	}
771 }
772 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
773 
774 /**
775  * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
776  * @bridge: bridge control structure
777  * @state: atomic state being committed
778  *
779  * Calls &drm_bridge_funcs.atomic_enable (falls back on
780  * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
781  * starting from the first bridge to the last. These are called after completing
782  * &drm_encoder_helper_funcs.atomic_enable
783  *
784  * Note: the bridge passed should be the one closest to the encoder
785  */
786 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
787 				    struct drm_atomic_state *state)
788 {
789 	struct drm_encoder *encoder;
790 
791 	if (!bridge)
792 		return;
793 
794 	encoder = bridge->encoder;
795 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
796 		if (bridge->funcs->atomic_enable) {
797 			bridge->funcs->atomic_enable(bridge, state);
798 		} else if (bridge->funcs->enable) {
799 			bridge->funcs->enable(bridge);
800 		}
801 	}
802 }
803 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
804 
805 static int drm_atomic_bridge_check(struct drm_bridge *bridge,
806 				   struct drm_crtc_state *crtc_state,
807 				   struct drm_connector_state *conn_state)
808 {
809 	if (bridge->funcs->atomic_check) {
810 		struct drm_bridge_state *bridge_state;
811 		int ret;
812 
813 		bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
814 							       bridge);
815 		if (WARN_ON(!bridge_state))
816 			return -EINVAL;
817 
818 		ret = bridge->funcs->atomic_check(bridge, bridge_state,
819 						  crtc_state, conn_state);
820 		if (ret)
821 			return ret;
822 	} else if (bridge->funcs->mode_fixup) {
823 		if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
824 					       &crtc_state->adjusted_mode))
825 			return -EINVAL;
826 	}
827 
828 	return 0;
829 }
830 
831 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
832 				    struct drm_bridge *cur_bridge,
833 				    struct drm_crtc_state *crtc_state,
834 				    struct drm_connector_state *conn_state,
835 				    u32 out_bus_fmt)
836 {
837 	unsigned int i, num_in_bus_fmts = 0;
838 	struct drm_bridge_state *cur_state;
839 	struct drm_bridge *prev_bridge;
840 	u32 *in_bus_fmts;
841 	int ret;
842 
843 	prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
844 	cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
845 						    cur_bridge);
846 
847 	/*
848 	 * If bus format negotiation is not supported by this bridge, let's
849 	 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
850 	 * hope that it can handle this situation gracefully (by providing
851 	 * appropriate default values).
852 	 */
853 	if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
854 		if (cur_bridge != first_bridge) {
855 			ret = select_bus_fmt_recursive(first_bridge,
856 						       prev_bridge, crtc_state,
857 						       conn_state,
858 						       MEDIA_BUS_FMT_FIXED);
859 			if (ret)
860 				return ret;
861 		}
862 
863 		/*
864 		 * Driver does not implement the atomic state hooks, but that's
865 		 * fine, as long as it does not access the bridge state.
866 		 */
867 		if (cur_state) {
868 			cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
869 			cur_state->output_bus_cfg.format = out_bus_fmt;
870 		}
871 
872 		return 0;
873 	}
874 
875 	/*
876 	 * If the driver implements ->atomic_get_input_bus_fmts() it
877 	 * should also implement the atomic state hooks.
878 	 */
879 	if (WARN_ON(!cur_state))
880 		return -EINVAL;
881 
882 	in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
883 							cur_state,
884 							crtc_state,
885 							conn_state,
886 							out_bus_fmt,
887 							&num_in_bus_fmts);
888 	if (!num_in_bus_fmts)
889 		return -ENOTSUPP;
890 	else if (!in_bus_fmts)
891 		return -ENOMEM;
892 
893 	if (first_bridge == cur_bridge) {
894 		cur_state->input_bus_cfg.format = in_bus_fmts[0];
895 		cur_state->output_bus_cfg.format = out_bus_fmt;
896 		kfree(in_bus_fmts);
897 		return 0;
898 	}
899 
900 	for (i = 0; i < num_in_bus_fmts; i++) {
901 		ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
902 					       crtc_state, conn_state,
903 					       in_bus_fmts[i]);
904 		if (ret != -ENOTSUPP)
905 			break;
906 	}
907 
908 	if (!ret) {
909 		cur_state->input_bus_cfg.format = in_bus_fmts[i];
910 		cur_state->output_bus_cfg.format = out_bus_fmt;
911 	}
912 
913 	kfree(in_bus_fmts);
914 	return ret;
915 }
916 
917 /*
918  * This function is called by &drm_atomic_bridge_chain_check() just before
919  * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
920  * It performs bus format negotiation between bridge elements. The negotiation
921  * happens in reverse order, starting from the last element in the chain up to
922  * @bridge.
923  *
924  * Negotiation starts by retrieving supported output bus formats on the last
925  * bridge element and testing them one by one. The test is recursive, meaning
926  * that for each tested output format, the whole chain will be walked backward,
927  * and each element will have to choose an input bus format that can be
928  * transcoded to the requested output format. When a bridge element does not
929  * support transcoding into a specific output format -ENOTSUPP is returned and
930  * the next bridge element will have to try a different format. If none of the
931  * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
932  *
933  * This implementation is relying on
934  * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
935  * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
936  * input/output formats.
937  *
938  * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
939  * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
940  * tries a single format: &drm_connector.display_info.bus_formats[0] if
941  * available, MEDIA_BUS_FMT_FIXED otherwise.
942  *
943  * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
944  * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
945  * bridge element that lacks this hook and asks the previous element in the
946  * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
947  * to do in that case (fail if they want to enforce bus format negotiation, or
948  * provide a reasonable default if they need to support pipelines where not
949  * all elements support bus format negotiation).
950  */
951 static int
952 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
953 					struct drm_crtc_state *crtc_state,
954 					struct drm_connector_state *conn_state)
955 {
956 	struct drm_connector *conn = conn_state->connector;
957 	struct drm_encoder *encoder = bridge->encoder;
958 	struct drm_bridge_state *last_bridge_state;
959 	unsigned int i, num_out_bus_fmts = 0;
960 	struct drm_bridge *last_bridge;
961 	u32 *out_bus_fmts;
962 	int ret = 0;
963 
964 	last_bridge = list_last_entry(&encoder->bridge_chain,
965 				      struct drm_bridge, chain_node);
966 	last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
967 							    last_bridge);
968 
969 	if (last_bridge->funcs->atomic_get_output_bus_fmts) {
970 		const struct drm_bridge_funcs *funcs = last_bridge->funcs;
971 
972 		/*
973 		 * If the driver implements ->atomic_get_output_bus_fmts() it
974 		 * should also implement the atomic state hooks.
975 		 */
976 		if (WARN_ON(!last_bridge_state))
977 			return -EINVAL;
978 
979 		out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
980 							last_bridge_state,
981 							crtc_state,
982 							conn_state,
983 							&num_out_bus_fmts);
984 		if (!num_out_bus_fmts)
985 			return -ENOTSUPP;
986 		else if (!out_bus_fmts)
987 			return -ENOMEM;
988 	} else {
989 		num_out_bus_fmts = 1;
990 		out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
991 		if (!out_bus_fmts)
992 			return -ENOMEM;
993 
994 		if (conn->display_info.num_bus_formats &&
995 		    conn->display_info.bus_formats)
996 			out_bus_fmts[0] = conn->display_info.bus_formats[0];
997 		else
998 			out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
999 	}
1000 
1001 	for (i = 0; i < num_out_bus_fmts; i++) {
1002 		ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1003 					       conn_state, out_bus_fmts[i]);
1004 		if (ret != -ENOTSUPP)
1005 			break;
1006 	}
1007 
1008 	kfree(out_bus_fmts);
1009 
1010 	return ret;
1011 }
1012 
1013 static void
1014 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1015 				      struct drm_connector *conn,
1016 				      struct drm_atomic_state *state)
1017 {
1018 	struct drm_bridge_state *bridge_state, *next_bridge_state;
1019 	struct drm_bridge *next_bridge;
1020 	u32 output_flags = 0;
1021 
1022 	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1023 
1024 	/* No bridge state attached to this bridge => nothing to propagate. */
1025 	if (!bridge_state)
1026 		return;
1027 
1028 	next_bridge = drm_bridge_get_next_bridge(bridge);
1029 
1030 	/*
1031 	 * Let's try to apply the most common case here, that is, propagate
1032 	 * display_info flags for the last bridge, and propagate the input
1033 	 * flags of the next bridge element to the output end of the current
1034 	 * bridge when the bridge is not the last one.
1035 	 * There are exceptions to this rule, like when signal inversion is
1036 	 * happening at the board level, but that's something drivers can deal
1037 	 * with from their &drm_bridge_funcs.atomic_check() implementation by
1038 	 * simply overriding the flags value we've set here.
1039 	 */
1040 	if (!next_bridge) {
1041 		output_flags = conn->display_info.bus_flags;
1042 	} else {
1043 		next_bridge_state = drm_atomic_get_new_bridge_state(state,
1044 								next_bridge);
1045 		/*
1046 		 * No bridge state attached to the next bridge, just leave the
1047 		 * flags to 0.
1048 		 */
1049 		if (next_bridge_state)
1050 			output_flags = next_bridge_state->input_bus_cfg.flags;
1051 	}
1052 
1053 	bridge_state->output_bus_cfg.flags = output_flags;
1054 
1055 	/*
1056 	 * Propagate the output flags to the input end of the bridge. Again, it's
1057 	 * not necessarily what all bridges want, but that's what most of them
1058 	 * do, and by doing that by default we avoid forcing drivers to
1059 	 * duplicate the "dummy propagation" logic.
1060 	 */
1061 	bridge_state->input_bus_cfg.flags = output_flags;
1062 }
1063 
1064 /**
1065  * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1066  * @bridge: bridge control structure
1067  * @crtc_state: new CRTC state
1068  * @conn_state: new connector state
1069  *
1070  * First trigger a bus format negotiation before calling
1071  * &drm_bridge_funcs.atomic_check() (falls back on
1072  * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1073  * starting from the last bridge to the first. These are called before calling
1074  * &drm_encoder_helper_funcs.atomic_check()
1075  *
1076  * RETURNS:
1077  * 0 on success, a negative error code on failure
1078  */
1079 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1080 				  struct drm_crtc_state *crtc_state,
1081 				  struct drm_connector_state *conn_state)
1082 {
1083 	struct drm_connector *conn = conn_state->connector;
1084 	struct drm_encoder *encoder;
1085 	struct drm_bridge *iter;
1086 	int ret;
1087 
1088 	if (!bridge)
1089 		return 0;
1090 
1091 	ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1092 						      conn_state);
1093 	if (ret)
1094 		return ret;
1095 
1096 	encoder = bridge->encoder;
1097 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1098 		int ret;
1099 
1100 		/*
1101 		 * Bus flags are propagated by default. If a bridge needs to
1102 		 * tweak the input bus flags for any reason, it should happen
1103 		 * in its &drm_bridge_funcs.atomic_check() implementation such
1104 		 * that preceding bridges in the chain can propagate the new
1105 		 * bus flags.
1106 		 */
1107 		drm_atomic_bridge_propagate_bus_flags(iter, conn,
1108 						      crtc_state->state);
1109 
1110 		ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1111 		if (ret)
1112 			return ret;
1113 
1114 		if (iter == bridge)
1115 			break;
1116 	}
1117 
1118 	return 0;
1119 }
1120 EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1121 
1122 /**
1123  * drm_bridge_detect - check if anything is attached to the bridge output
1124  * @bridge: bridge control structure
1125  *
1126  * If the bridge supports output detection, as reported by the
1127  * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1128  * bridge and return the connection status. Otherwise return
1129  * connector_status_unknown.
1130  *
1131  * RETURNS:
1132  * The detection status on success, or connector_status_unknown if the bridge
1133  * doesn't support output detection.
1134  */
1135 enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
1136 {
1137 	if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1138 		return connector_status_unknown;
1139 
1140 	return bridge->funcs->detect(bridge);
1141 }
1142 EXPORT_SYMBOL_GPL(drm_bridge_detect);
1143 
1144 /**
1145  * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1146  * @connector
1147  * @bridge: bridge control structure
1148  * @connector: the connector to fill with modes
1149  *
1150  * If the bridge supports output modes retrieval, as reported by the
1151  * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1152  * fill the connector with all valid modes and return the number of modes
1153  * added. Otherwise return 0.
1154  *
1155  * RETURNS:
1156  * The number of modes added to the connector.
1157  */
1158 int drm_bridge_get_modes(struct drm_bridge *bridge,
1159 			 struct drm_connector *connector)
1160 {
1161 	if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1162 		return 0;
1163 
1164 	return bridge->funcs->get_modes(bridge, connector);
1165 }
1166 EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1167 
1168 /**
1169  * drm_bridge_edid_read - read the EDID data of the connected display
1170  * @bridge: bridge control structure
1171  * @connector: the connector to read EDID for
1172  *
1173  * If the bridge supports output EDID retrieval, as reported by the
1174  * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1175  * the EDID and return it. Otherwise return NULL.
1176  *
1177  * RETURNS:
1178  * The retrieved EDID on success, or NULL otherwise.
1179  */
1180 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1181 					    struct drm_connector *connector)
1182 {
1183 	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1184 		return NULL;
1185 
1186 	return bridge->funcs->edid_read(bridge, connector);
1187 }
1188 EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1189 
1190 /**
1191  * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1192  * @bridge: bridge control structure
1193  * @cb: hot-plug detection callback
1194  * @data: data to be passed to the hot-plug detection callback
1195  *
1196  * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1197  * and @data as hot plug notification callback. From now on the @cb will be
1198  * called with @data when an output status change is detected by the bridge,
1199  * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1200  *
1201  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1202  * bridge->ops. This function shall not be called when the flag is not set.
1203  *
1204  * Only one hot plug detection callback can be registered at a time, it is an
1205  * error to call this function when hot plug detection is already enabled for
1206  * the bridge.
1207  */
1208 void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1209 			   void (*cb)(void *data,
1210 				      enum drm_connector_status status),
1211 			   void *data)
1212 {
1213 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1214 		return;
1215 
1216 	mutex_lock(&bridge->hpd_mutex);
1217 
1218 	if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1219 		goto unlock;
1220 
1221 	bridge->hpd_cb = cb;
1222 	bridge->hpd_data = data;
1223 
1224 	if (bridge->funcs->hpd_enable)
1225 		bridge->funcs->hpd_enable(bridge);
1226 
1227 unlock:
1228 	mutex_unlock(&bridge->hpd_mutex);
1229 }
1230 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1231 
1232 /**
1233  * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1234  * @bridge: bridge control structure
1235  *
1236  * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1237  * plug detection callback previously registered with drm_bridge_hpd_enable().
1238  * Once this function returns the callback will not be called by the bridge
1239  * when an output status change occurs.
1240  *
1241  * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1242  * bridge->ops. This function shall not be called when the flag is not set.
1243  */
1244 void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1245 {
1246 	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1247 		return;
1248 
1249 	mutex_lock(&bridge->hpd_mutex);
1250 	if (bridge->funcs->hpd_disable)
1251 		bridge->funcs->hpd_disable(bridge);
1252 
1253 	bridge->hpd_cb = NULL;
1254 	bridge->hpd_data = NULL;
1255 	mutex_unlock(&bridge->hpd_mutex);
1256 }
1257 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1258 
1259 /**
1260  * drm_bridge_hpd_notify - notify hot plug detection events
1261  * @bridge: bridge control structure
1262  * @status: output connection status
1263  *
1264  * Bridge drivers shall call this function to report hot plug events when they
1265  * detect a change in the output status, when hot plug detection has been
1266  * enabled by drm_bridge_hpd_enable().
1267  *
1268  * This function shall be called in a context that can sleep.
1269  */
1270 void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1271 			   enum drm_connector_status status)
1272 {
1273 	mutex_lock(&bridge->hpd_mutex);
1274 	if (bridge->hpd_cb)
1275 		bridge->hpd_cb(bridge->hpd_data, status);
1276 	mutex_unlock(&bridge->hpd_mutex);
1277 }
1278 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1279 
1280 #ifdef CONFIG_OF
1281 /**
1282  * of_drm_find_bridge - find the bridge corresponding to the device node in
1283  *			the global bridge list
1284  *
1285  * @np: device node
1286  *
1287  * RETURNS:
1288  * drm_bridge control struct on success, NULL on failure
1289  */
1290 struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1291 {
1292 	struct drm_bridge *bridge;
1293 
1294 	mutex_lock(&bridge_lock);
1295 
1296 	list_for_each_entry(bridge, &bridge_list, list) {
1297 		if (bridge->of_node == np) {
1298 			mutex_unlock(&bridge_lock);
1299 			return bridge;
1300 		}
1301 	}
1302 
1303 	mutex_unlock(&bridge_lock);
1304 	return NULL;
1305 }
1306 EXPORT_SYMBOL(of_drm_find_bridge);
1307 #endif
1308 
1309 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
1310 					   struct drm_bridge *bridge,
1311 					   unsigned int idx)
1312 {
1313 	drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
1314 	drm_printf(p, "\ttype: [%d] %s\n",
1315 		   bridge->type,
1316 		   drm_get_connector_type_name(bridge->type));
1317 
1318 	if (bridge->of_node)
1319 		drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
1320 
1321 	drm_printf(p, "\tops: [0x%x]", bridge->ops);
1322 	if (bridge->ops & DRM_BRIDGE_OP_DETECT)
1323 		drm_puts(p, " detect");
1324 	if (bridge->ops & DRM_BRIDGE_OP_EDID)
1325 		drm_puts(p, " edid");
1326 	if (bridge->ops & DRM_BRIDGE_OP_HPD)
1327 		drm_puts(p, " hpd");
1328 	if (bridge->ops & DRM_BRIDGE_OP_MODES)
1329 		drm_puts(p, " modes");
1330 	if (bridge->ops & DRM_BRIDGE_OP_HDMI)
1331 		drm_puts(p, " hdmi");
1332 	drm_puts(p, "\n");
1333 }
1334 
1335 static int allbridges_show(struct seq_file *m, void *data)
1336 {
1337 	struct drm_printer p = drm_seq_file_printer(m);
1338 	struct drm_bridge *bridge;
1339 	unsigned int idx = 0;
1340 
1341 	mutex_lock(&bridge_lock);
1342 
1343 	list_for_each_entry(bridge, &bridge_list, list)
1344 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
1345 
1346 	mutex_unlock(&bridge_lock);
1347 
1348 	return 0;
1349 }
1350 DEFINE_SHOW_ATTRIBUTE(allbridges);
1351 
1352 static int encoder_bridges_show(struct seq_file *m, void *data)
1353 {
1354 	struct drm_encoder *encoder = m->private;
1355 	struct drm_printer p = drm_seq_file_printer(m);
1356 	struct drm_bridge *bridge;
1357 	unsigned int idx = 0;
1358 
1359 	drm_for_each_bridge_in_chain(encoder, bridge)
1360 		drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
1361 
1362 	return 0;
1363 }
1364 DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
1365 
1366 void drm_bridge_debugfs_params(struct dentry *root)
1367 {
1368 	debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
1369 }
1370 
1371 void drm_bridge_debugfs_encoder_params(struct dentry *root,
1372 				       struct drm_encoder *encoder)
1373 {
1374 	/* bridges list */
1375 	debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
1376 }
1377 
1378 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1379 MODULE_DESCRIPTION("DRM bridge infrastructure");
1380 MODULE_LICENSE("GPL and additional rights");
1381