1 /*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/debugfs.h>
25 #include <linux/err.h>
26 #include <linux/export.h>
27 #include <linux/media-bus-format.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/srcu.h>
31
32 #include <drm/drm_atomic_state_helper.h>
33 #include <drm/drm_bridge.h>
34 #include <drm/drm_debugfs.h>
35 #include <drm/drm_edid.h>
36 #include <drm/drm_encoder.h>
37 #include <drm/drm_file.h>
38 #include <drm/drm_of.h>
39 #include <drm/drm_print.h>
40
41 #include "drm_crtc_internal.h"
42
43 /**
44 * DOC: overview
45 *
46 * &struct drm_bridge represents a device that hangs on to an encoder. These are
47 * handy when a regular &drm_encoder entity isn't enough to represent the entire
48 * encoder chain.
49 *
50 * A bridge is always attached to a single &drm_encoder at a time, but can be
51 * either connected to it directly, or through a chain of bridges::
52 *
53 * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
54 *
55 * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
56 * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
57 * Chaining multiple bridges to the output of a bridge, or the same bridge to
58 * the output of different bridges, is not supported.
59 *
60 * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
61 * CRTCs, encoders or connectors and hence are not visible to userspace. They
62 * just provide additional hooks to get the desired output at the end of the
63 * encoder chain.
64 */
65
66 /**
67 * DOC: display driver integration
68 *
69 * Display drivers are responsible for linking encoders with the first bridge
70 * in the chains. This is done by acquiring the appropriate bridge with
71 * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
72 * encoder with a call to drm_bridge_attach().
73 *
74 * Bridges are responsible for linking themselves with the next bridge in the
75 * chain, if any. This is done the same way as for encoders, with the call to
76 * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
77 *
78 * Once these links are created, the bridges can participate along with encoder
79 * functions to perform mode validation and fixup (through
80 * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
81 * setting (through drm_bridge_chain_mode_set()), enable (through
82 * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
83 * and disable (through drm_atomic_bridge_chain_disable() and
84 * drm_atomic_bridge_chain_post_disable()). Those functions call the
85 * corresponding operations provided in &drm_bridge_funcs in sequence for all
86 * bridges in the chain.
87 *
88 * For display drivers that use the atomic helpers
89 * drm_atomic_helper_check_modeset(),
90 * drm_atomic_helper_commit_modeset_enables() and
91 * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
92 * commit check and commit tail handlers, or through the higher-level
93 * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
94 * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
95 * requires no intervention from the driver. For other drivers, the relevant
96 * DRM bridge chain functions shall be called manually.
97 *
98 * Bridges also participate in implementing the &drm_connector at the end of
99 * the bridge chain. Display drivers may use the drm_bridge_connector_init()
100 * helper to create the &drm_connector, or implement it manually on top of the
101 * connector-related operations exposed by the bridge (see the overview
102 * documentation of bridge operations for more details).
103 */
104
105 /**
106 * DOC: special care dsi
107 *
108 * The interaction between the bridges and other frameworks involved in
109 * the probing of the upstream driver and the bridge driver can be
110 * challenging. Indeed, there's multiple cases that needs to be
111 * considered:
112 *
113 * - The upstream driver doesn't use the component framework and isn't a
114 * MIPI-DSI host. In this case, the bridge driver will probe at some
115 * point and the upstream driver should try to probe again by returning
116 * EPROBE_DEFER as long as the bridge driver hasn't probed.
117 *
118 * - The upstream driver doesn't use the component framework, but is a
119 * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
120 * controlled. In this case, the bridge device is a child of the
121 * display device and when it will probe it's assured that the display
122 * device (and MIPI-DSI host) is present. The upstream driver will be
123 * assured that the bridge driver is connected between the
124 * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
125 * Therefore, it must run mipi_dsi_host_register() in its probe
126 * function, and then run drm_bridge_attach() in its
127 * &mipi_dsi_host_ops.attach hook.
128 *
129 * - The upstream driver uses the component framework and is a MIPI-DSI
130 * host. The bridge device uses the MIPI-DCS commands to be
131 * controlled. This is the same situation than above, and can run
132 * mipi_dsi_host_register() in either its probe or bind hooks.
133 *
134 * - The upstream driver uses the component framework and is a MIPI-DSI
135 * host. The bridge device uses a separate bus (such as I2C) to be
136 * controlled. In this case, there's no correlation between the probe
137 * of the bridge and upstream drivers, so care must be taken to avoid
138 * an endless EPROBE_DEFER loop, with each driver waiting for the
139 * other to probe.
140 *
141 * The ideal pattern to cover the last item (and all the others in the
142 * MIPI-DSI host driver case) is to split the operations like this:
143 *
144 * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
145 * probe hook. It will make sure that the MIPI-DSI host sticks around,
146 * and that the driver's bind can be called.
147 *
148 * - In its probe hook, the bridge driver must try to find its MIPI-DSI
149 * host, register as a MIPI-DSI device and attach the MIPI-DSI device
150 * to its host. The bridge driver is now functional.
151 *
152 * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
153 * now add its component. Its bind hook will now be called and since
154 * the bridge driver is attached and registered, we can now look for
155 * and attach it.
156 *
157 * At this point, we're now certain that both the upstream driver and
158 * the bridge driver are functional and we can't have a deadlock-like
159 * situation when probing.
160 */
161
162 /**
163 * DOC: dsi bridge operations
164 *
165 * DSI host interfaces are expected to be implemented as bridges rather than
166 * encoders, however there are a few aspects of their operation that need to
167 * be defined in order to provide a consistent interface.
168 *
169 * A DSI host should keep the PHY powered down until the pre_enable operation is
170 * called. All lanes are in an undefined idle state up to this point, and it
171 * must not be assumed that it is LP-11.
172 * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
173 * clock lane to either LP-11 or HS depending on the mode_flag
174 * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
175 *
176 * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
177 * called before the DSI host. If the DSI peripheral requires LP-11 and/or
178 * the clock lane to be in HS mode prior to pre_enable, then it can set the
179 * &pre_enable_prev_first flag to request the pre_enable (and
180 * post_disable) order to be altered to enable the DSI host first.
181 *
182 * Either the CRTC being enabled, or the DSI host enable operation should switch
183 * the host to actively transmitting video on the data lanes.
184 *
185 * The reverse also applies. The DSI host disable operation or stopping the CRTC
186 * should stop transmitting video, and the data lanes should return to the LP-11
187 * state. The DSI host &post_disable operation should disable the PHY.
188 * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
189 * bridge &post_disable will be called before the DSI host's post_disable.
190 *
191 * Whilst it is valid to call &host_transfer prior to pre_enable or after
192 * post_disable, the exact state of the lanes is undefined at this point. The
193 * DSI host should initialise the interface, transmit the data, and then disable
194 * the interface again.
195 *
196 * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
197 * implemented, it therefore needs to be handled entirely within the DSI Host
198 * driver.
199 */
200
201 /* Protect bridge_list and bridge_lingering_list */
202 static DEFINE_MUTEX(bridge_lock);
203 static LIST_HEAD(bridge_list);
204 static LIST_HEAD(bridge_lingering_list);
205
206 DEFINE_STATIC_SRCU(drm_bridge_unplug_srcu);
207
208 /**
209 * drm_bridge_enter - Enter DRM bridge critical section
210 * @bridge: DRM bridge
211 * @idx: Pointer to index that will be passed to the matching drm_bridge_exit()
212 *
213 * This function marks and protects the beginning of a section that should not
214 * be entered after the bridge has been unplugged. The section end is marked
215 * with drm_bridge_exit(). Calls to this function can be nested.
216 *
217 * Returns:
218 * True if it is OK to enter the section, false otherwise.
219 */
drm_bridge_enter(struct drm_bridge * bridge,int * idx)220 bool drm_bridge_enter(struct drm_bridge *bridge, int *idx)
221 {
222 *idx = srcu_read_lock(&drm_bridge_unplug_srcu);
223
224 if (bridge->unplugged) {
225 srcu_read_unlock(&drm_bridge_unplug_srcu, *idx);
226 return false;
227 }
228
229 return true;
230 }
231 EXPORT_SYMBOL(drm_bridge_enter);
232
233 /**
234 * drm_bridge_exit - Exit DRM bridge critical section
235 * @idx: index returned by drm_bridge_enter()
236 *
237 * This function marks the end of a section that should not be entered after
238 * the bridge has been unplugged.
239 */
drm_bridge_exit(int idx)240 void drm_bridge_exit(int idx)
241 {
242 srcu_read_unlock(&drm_bridge_unplug_srcu, idx);
243 }
244 EXPORT_SYMBOL(drm_bridge_exit);
245
246 /**
247 * drm_bridge_unplug - declare a DRM bridge was unplugged and remove it
248 * @bridge: DRM bridge
249 *
250 * This tells the bridge has been physically unplugged and no operations on
251 * device resources must be done anymore. Entry-points can use
252 * drm_bridge_enter() and drm_bridge_exit() to protect device resources in
253 * a race free manner.
254 *
255 * Also unregisters the bridge.
256 */
drm_bridge_unplug(struct drm_bridge * bridge)257 void drm_bridge_unplug(struct drm_bridge *bridge)
258 {
259 bridge->unplugged = true;
260
261 synchronize_srcu(&drm_bridge_unplug_srcu);
262
263 drm_bridge_remove(bridge);
264 }
265 EXPORT_SYMBOL(drm_bridge_unplug);
266
__drm_bridge_free(struct kref * kref)267 static void __drm_bridge_free(struct kref *kref)
268 {
269 struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
270
271 mutex_lock(&bridge_lock);
272 list_del(&bridge->list);
273 mutex_unlock(&bridge_lock);
274
275 if (bridge->funcs->destroy)
276 bridge->funcs->destroy(bridge);
277
278 drm_bridge_put(bridge->next_bridge);
279
280 kfree(bridge->container);
281 }
282
283 /**
284 * drm_bridge_get - Acquire a bridge reference
285 * @bridge: DRM bridge
286 *
287 * This function increments the bridge's refcount.
288 *
289 * Returns:
290 * Pointer to @bridge.
291 */
drm_bridge_get(struct drm_bridge * bridge)292 struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge)
293 {
294 if (bridge)
295 kref_get(&bridge->refcount);
296
297 return bridge;
298 }
299 EXPORT_SYMBOL(drm_bridge_get);
300
301 /**
302 * drm_bridge_put - Release a bridge reference
303 * @bridge: DRM bridge
304 *
305 * This function decrements the bridge's reference count and frees the
306 * object if the reference count drops to zero.
307 *
308 * See also drm_bridge_clear_and_put() if you also need to set the pointer
309 * to NULL
310 */
drm_bridge_put(struct drm_bridge * bridge)311 void drm_bridge_put(struct drm_bridge *bridge)
312 {
313 if (bridge)
314 kref_put(&bridge->refcount, __drm_bridge_free);
315 }
316 EXPORT_SYMBOL(drm_bridge_put);
317
318 /**
319 * drm_bridge_clear_and_put - Given a bridge pointer, clear the pointer
320 * then put the bridge
321 * @bridge_pp: pointer to pointer to a struct drm_bridge; ``bridge_pp``
322 * must be non-NULL; if ``*bridge_pp`` is NULL this function
323 * does nothing
324 *
325 * Helper to put a DRM bridge, but only after setting its pointer to
326 * NULL. Useful when a struct drm_bridge reference must be dropped without
327 * leaving a use-after-free window where the pointed bridge might have been
328 * freed while still holding a pointer to it.
329 *
330 * For struct ``drm_bridge *some_bridge``, this code::
331 *
332 * drm_bridge_clear_and_put(&some_bridge);
333 *
334 * is equivalent to the more complex::
335 *
336 * struct drm_bridge *temp = some_bridge;
337 * some_bridge = NULL;
338 * drm_bridge_put(temp);
339 */
drm_bridge_clear_and_put(struct drm_bridge ** bridge_pp)340 void drm_bridge_clear_and_put(struct drm_bridge **bridge_pp)
341 {
342 struct drm_bridge *bridge = *bridge_pp;
343
344 *bridge_pp = NULL;
345 drm_bridge_put(bridge);
346 }
347 EXPORT_SYMBOL(drm_bridge_clear_and_put);
348
349 /**
350 * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer
351 *
352 * @data: pointer to @struct drm_bridge, cast to a void pointer
353 *
354 * Wrapper of drm_bridge_put() to be used when a function taking a void
355 * pointer is needed, for example as a devm action.
356 */
drm_bridge_put_void(void * data)357 static void drm_bridge_put_void(void *data)
358 {
359 struct drm_bridge *bridge = (struct drm_bridge *)data;
360
361 drm_bridge_put(bridge);
362 }
363
__devm_drm_bridge_alloc(struct device * dev,size_t size,size_t offset,const struct drm_bridge_funcs * funcs)364 void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
365 const struct drm_bridge_funcs *funcs)
366 {
367 void *container;
368 struct drm_bridge *bridge;
369 int err;
370
371 if (!funcs) {
372 dev_warn(dev, "Missing funcs pointer\n");
373 return ERR_PTR(-EINVAL);
374 }
375
376 container = kzalloc(size, GFP_KERNEL);
377 if (!container)
378 return ERR_PTR(-ENOMEM);
379
380 bridge = container + offset;
381 INIT_LIST_HEAD(&bridge->list);
382 bridge->container = container;
383 bridge->funcs = funcs;
384 kref_init(&bridge->refcount);
385
386 err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge);
387 if (err)
388 return ERR_PTR(err);
389
390 return container;
391 }
392 EXPORT_SYMBOL(__devm_drm_bridge_alloc);
393
394 /**
395 * drm_bridge_add - register a bridge
396 *
397 * @bridge: bridge control structure
398 *
399 * Add the given bridge to the global list of bridges, where they can be
400 * found by users via of_drm_find_and_get_bridge().
401 *
402 * The bridge to be added must have been allocated by
403 * devm_drm_bridge_alloc().
404 */
drm_bridge_add(struct drm_bridge * bridge)405 void drm_bridge_add(struct drm_bridge *bridge)
406 {
407 if (!bridge->container)
408 DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
409
410 drm_bridge_get(bridge);
411
412 /*
413 * If the bridge was previously added and then removed, it is now
414 * in bridge_lingering_list. Remove it or bridge_lingering_list will be
415 * corrupted when adding this bridge to bridge_list below.
416 */
417 if (!list_empty(&bridge->list))
418 list_del_init(&bridge->list);
419
420 mutex_init(&bridge->hpd_mutex);
421
422 if (bridge->ops & DRM_BRIDGE_OP_HDMI)
423 bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
424 BIT(DRM_OUTPUT_COLOR_FORMAT_YCBCR420));
425
426 mutex_lock(&bridge_lock);
427 list_add_tail(&bridge->list, &bridge_list);
428 mutex_unlock(&bridge_lock);
429 }
430 EXPORT_SYMBOL(drm_bridge_add);
431
drm_bridge_remove_void(void * bridge)432 static void drm_bridge_remove_void(void *bridge)
433 {
434 drm_bridge_remove(bridge);
435 }
436
437 /**
438 * devm_drm_bridge_add - devm managed version of drm_bridge_add()
439 *
440 * @dev: device to tie the bridge lifetime to
441 * @bridge: bridge control structure
442 *
443 * This is the managed version of drm_bridge_add() which automatically
444 * calls drm_bridge_remove() when @dev is unbound.
445 *
446 * Return: 0 if no error or negative error code.
447 */
devm_drm_bridge_add(struct device * dev,struct drm_bridge * bridge)448 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
449 {
450 drm_bridge_add(bridge);
451 return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
452 }
453 EXPORT_SYMBOL(devm_drm_bridge_add);
454
455 /**
456 * drm_bridge_remove - unregister a bridge
457 *
458 * @bridge: bridge control structure
459 *
460 * Remove the given bridge from the global list of registered bridges, so
461 * it won't be found by users via of_drm_find_and_get_bridge(), and add it
462 * to the lingering bridge list, to keep track of it until its allocated
463 * memory is eventually freed.
464 */
drm_bridge_remove(struct drm_bridge * bridge)465 void drm_bridge_remove(struct drm_bridge *bridge)
466 {
467 mutex_lock(&bridge_lock);
468 list_move_tail(&bridge->list, &bridge_lingering_list);
469 mutex_unlock(&bridge_lock);
470
471 mutex_destroy(&bridge->hpd_mutex);
472
473 drm_bridge_put(bridge);
474 }
475 EXPORT_SYMBOL(drm_bridge_remove);
476
477 static struct drm_private_state *
drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj * obj)478 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
479 {
480 struct drm_bridge *bridge = drm_priv_to_bridge(obj);
481 struct drm_bridge_state *state;
482
483 state = bridge->funcs->atomic_duplicate_state(bridge);
484 return state ? &state->base : NULL;
485 }
486
487 static void
drm_bridge_atomic_destroy_priv_state(struct drm_private_obj * obj,struct drm_private_state * s)488 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
489 struct drm_private_state *s)
490 {
491 struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
492 struct drm_bridge *bridge = drm_priv_to_bridge(obj);
493
494 bridge->funcs->atomic_destroy_state(bridge, state);
495 }
496
497 static struct drm_private_state *
drm_bridge_atomic_create_priv_state(struct drm_private_obj * obj)498 drm_bridge_atomic_create_priv_state(struct drm_private_obj *obj)
499 {
500 struct drm_bridge *bridge = drm_priv_to_bridge(obj);
501 struct drm_bridge_state *state;
502
503 state = bridge->funcs->atomic_reset(bridge);
504 if (IS_ERR(state))
505 return ERR_CAST(state);
506
507 return &state->base;
508 }
509
510 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
511 .atomic_create_state = drm_bridge_atomic_create_priv_state,
512 .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
513 .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
514 };
515
drm_bridge_is_atomic(struct drm_bridge * bridge)516 static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
517 {
518 return bridge->funcs->atomic_reset != NULL;
519 }
520
521 /**
522 * drm_bridge_attach - attach the bridge to an encoder's chain
523 *
524 * @encoder: DRM encoder
525 * @bridge: bridge to attach
526 * @previous: previous bridge in the chain (optional)
527 * @flags: DRM_BRIDGE_ATTACH_* flags
528 *
529 * Called by a kms driver to link the bridge to an encoder's chain. The previous
530 * argument specifies the previous bridge in the chain. If NULL, the bridge is
531 * linked directly at the encoder's output. Otherwise it is linked at the
532 * previous bridge's output.
533 *
534 * If non-NULL the previous bridge must be already attached by a call to this
535 * function.
536 *
537 * The bridge to be attached must have been previously added by
538 * drm_bridge_add().
539 *
540 * Note that bridges attached to encoders are auto-detached during encoder
541 * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
542 * *not* be balanced with a drm_bridge_detach() in driver code.
543 *
544 * RETURNS:
545 * Zero on success, error code on failure
546 */
drm_bridge_attach(struct drm_encoder * encoder,struct drm_bridge * bridge,struct drm_bridge * previous,enum drm_bridge_attach_flags flags)547 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
548 struct drm_bridge *previous,
549 enum drm_bridge_attach_flags flags)
550 {
551 int ret;
552
553 if (!encoder || !bridge)
554 return -EINVAL;
555
556 if (!bridge->container)
557 DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
558
559 if (list_empty(&bridge->list))
560 DRM_WARN("Missing drm_bridge_add() before attach\n");
561
562 drm_bridge_get(bridge);
563
564 if (previous && (!previous->dev || previous->encoder != encoder)) {
565 ret = -EINVAL;
566 goto err_put_bridge;
567 }
568
569 if (bridge->dev) {
570 ret = -EBUSY;
571 goto err_put_bridge;
572 }
573
574 bridge->dev = encoder->dev;
575 bridge->encoder = encoder;
576
577 if (previous)
578 list_add(&bridge->chain_node, &previous->chain_node);
579 else
580 list_add(&bridge->chain_node, &encoder->bridge_chain);
581
582 if (bridge->funcs->attach) {
583 ret = bridge->funcs->attach(bridge, encoder, flags);
584 if (ret < 0)
585 goto err_reset_bridge;
586 }
587
588 if (drm_bridge_is_atomic(bridge))
589 drm_atomic_private_obj_init(bridge->dev, &bridge->base,
590 &drm_bridge_priv_state_funcs);
591
592 return 0;
593
594 err_reset_bridge:
595 bridge->dev = NULL;
596 bridge->encoder = NULL;
597 list_del(&bridge->chain_node);
598
599 if (ret != -EPROBE_DEFER)
600 DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
601 bridge->of_node, encoder->name, ret);
602 else
603 dev_err_probe(encoder->dev->dev, -EPROBE_DEFER,
604 "failed to attach bridge %pOF to encoder %s\n",
605 bridge->of_node, encoder->name);
606
607 err_put_bridge:
608 drm_bridge_put(bridge);
609 return ret;
610 }
611 EXPORT_SYMBOL(drm_bridge_attach);
612
drm_bridge_detach(struct drm_bridge * bridge)613 void drm_bridge_detach(struct drm_bridge *bridge)
614 {
615 if (WARN_ON(!bridge))
616 return;
617
618 if (WARN_ON(!bridge->dev))
619 return;
620
621 if (drm_bridge_is_atomic(bridge))
622 drm_atomic_private_obj_fini(&bridge->base);
623
624 if (bridge->funcs->detach)
625 bridge->funcs->detach(bridge);
626
627 list_del(&bridge->chain_node);
628 bridge->dev = NULL;
629 drm_bridge_put(bridge);
630 }
631
632 /**
633 * DOC: bridge operations
634 *
635 * Bridge drivers expose operations through the &drm_bridge_funcs structure.
636 * The DRM internals (atomic and CRTC helpers) use the helpers defined in
637 * drm_bridge.c to call bridge operations. Those operations are divided in
638 * three big categories to support different parts of the bridge usage.
639 *
640 * - The encoder-related operations support control of the bridges in the
641 * chain, and are roughly counterparts to the &drm_encoder_helper_funcs
642 * operations. They are used by the legacy CRTC and the atomic modeset
643 * helpers to perform mode validation, fixup and setting, and enable and
644 * disable the bridge automatically.
645 *
646 * The enable and disable operations are split in
647 * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
648 * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
649 * finer-grained control.
650 *
651 * Bridge drivers may implement the legacy version of those operations, or
652 * the atomic version (prefixed with atomic\_), in which case they shall also
653 * implement the atomic state bookkeeping operations
654 * (&drm_bridge_funcs.atomic_duplicate_state,
655 * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
656 * Mixing atomic and non-atomic versions of the operations is not supported.
657 *
658 * - The bus format negotiation operations
659 * &drm_bridge_funcs.atomic_get_output_bus_fmts and
660 * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
661 * negotiate the formats transmitted between bridges in the chain when
662 * multiple formats are supported. Negotiation for formats is performed
663 * transparently for display drivers by the atomic modeset helpers. Only
664 * atomic versions of those operations exist, bridge drivers that need to
665 * implement them shall thus also implement the atomic version of the
666 * encoder-related operations. This feature is not supported by the legacy
667 * CRTC helpers.
668 *
669 * - The connector-related operations support implementing a &drm_connector
670 * based on a chain of bridges. DRM bridges traditionally create a
671 * &drm_connector for bridges meant to be used at the end of the chain. This
672 * puts additional burden on bridge drivers, especially for bridges that may
673 * be used in the middle of a chain or at the end of it. Furthermore, it
674 * requires all operations of the &drm_connector to be handled by a single
675 * bridge, which doesn't always match the hardware architecture.
676 *
677 * To simplify bridge drivers and make the connector implementation more
678 * flexible, a new model allows bridges to unconditionally skip creation of
679 * &drm_connector and instead expose &drm_bridge_funcs operations to support
680 * an externally-implemented &drm_connector. Those operations are
681 * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
682 * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
683 * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
684 * implemented, display drivers shall create a &drm_connector instance for
685 * each chain of bridges, and implement those connector instances based on
686 * the bridge connector operations.
687 *
688 * Bridge drivers shall implement the connector-related operations for all
689 * the features that the bridge hardware support. For instance, if a bridge
690 * supports reading EDID, the &drm_bridge_funcs.get_edid shall be
691 * implemented. This however doesn't mean that the DDC lines are wired to the
692 * bridge on a particular platform, as they could also be connected to an I2C
693 * controller of the SoC. Support for the connector-related operations on the
694 * running platform is reported through the &drm_bridge.ops flags. Bridge
695 * drivers shall detect which operations they can support on the platform
696 * (usually this information is provided by ACPI or DT), and set the
697 * &drm_bridge.ops flags for all supported operations. A flag shall only be
698 * set if the corresponding &drm_bridge_funcs operation is implemented, but
699 * an implemented operation doesn't necessarily imply that the corresponding
700 * flag will be set. Display drivers shall use the &drm_bridge.ops flags to
701 * decide which bridge to delegate a connector operation to. This mechanism
702 * allows providing a single static const &drm_bridge_funcs instance in
703 * bridge drivers, improving security by storing function pointers in
704 * read-only memory.
705 *
706 * In order to ease transition, bridge drivers may support both the old and
707 * new models by making connector creation optional and implementing the
708 * connected-related bridge operations. Connector creation is then controlled
709 * by the flags argument to the drm_bridge_attach() function. Display drivers
710 * that support the new model and create connectors themselves shall set the
711 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
712 * connector creation. For intermediate bridges in the chain, the flag shall
713 * be passed to the drm_bridge_attach() call for the downstream bridge.
714 * Bridge drivers that implement the new model only shall return an error
715 * from their &drm_bridge_funcs.attach handler when the
716 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
717 * should use the new model, and convert the bridge drivers they use if
718 * needed, in order to gradually transition to the new model.
719 */
720
721 /**
722 * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
723 * encoder chain.
724 * @bridge: bridge control structure
725 * @info: display info against which the mode shall be validated
726 * @mode: desired mode to be validated
727 *
728 * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
729 * chain, starting from the first bridge to the last. If at least one bridge
730 * does not accept the mode the function returns the error code.
731 *
732 * Note: the bridge passed should be the one closest to the encoder.
733 *
734 * RETURNS:
735 * MODE_OK on success, drm_mode_status Enum error code on failure
736 */
737 enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge * bridge,const struct drm_display_info * info,const struct drm_display_mode * mode)738 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
739 const struct drm_display_info *info,
740 const struct drm_display_mode *mode)
741 {
742 struct drm_encoder *encoder;
743
744 if (!bridge)
745 return MODE_OK;
746
747 encoder = bridge->encoder;
748 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
749 enum drm_mode_status ret;
750
751 if (!bridge->funcs->mode_valid)
752 continue;
753
754 ret = bridge->funcs->mode_valid(bridge, info, mode);
755 if (ret != MODE_OK)
756 return ret;
757 }
758
759 return MODE_OK;
760 }
761 EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
762
763 /**
764 * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
765 * encoder chain
766 * @bridge: bridge control structure
767 * @mode: desired mode to be set for the encoder chain
768 * @adjusted_mode: updated mode that works for this encoder chain
769 *
770 * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
771 * encoder chain, starting from the first bridge to the last.
772 *
773 * Note: the bridge passed should be the one closest to the encoder
774 */
drm_bridge_chain_mode_set(struct drm_bridge * bridge,const struct drm_display_mode * mode,const struct drm_display_mode * adjusted_mode)775 void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
776 const struct drm_display_mode *mode,
777 const struct drm_display_mode *adjusted_mode)
778 {
779 struct drm_encoder *encoder;
780
781 if (!bridge)
782 return;
783
784 encoder = bridge->encoder;
785 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
786 if (bridge->funcs->mode_set)
787 bridge->funcs->mode_set(bridge, mode, adjusted_mode);
788 }
789 }
790 EXPORT_SYMBOL(drm_bridge_chain_mode_set);
791
792 /**
793 * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
794 * @bridge: bridge control structure
795 * @state: atomic state being committed
796 *
797 * Calls &drm_bridge_funcs.atomic_disable (falls back on
798 * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
799 * starting from the last bridge to the first. These are called before calling
800 * &drm_encoder_helper_funcs.atomic_disable
801 *
802 * Note: the bridge passed should be the one closest to the encoder
803 */
drm_atomic_bridge_chain_disable(struct drm_bridge * bridge,struct drm_atomic_state * state)804 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
805 struct drm_atomic_state *state)
806 {
807 struct drm_encoder *encoder;
808 struct drm_bridge *iter;
809
810 if (!bridge)
811 return;
812
813 encoder = bridge->encoder;
814 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
815 if (iter->funcs->atomic_disable) {
816 iter->funcs->atomic_disable(iter, state);
817 } else if (iter->funcs->disable) {
818 iter->funcs->disable(iter);
819 }
820
821 if (iter == bridge)
822 break;
823 }
824 }
825 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
826
drm_atomic_bridge_call_post_disable(struct drm_bridge * bridge,struct drm_atomic_state * state)827 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
828 struct drm_atomic_state *state)
829 {
830 if (state && bridge->funcs->atomic_post_disable)
831 bridge->funcs->atomic_post_disable(bridge, state);
832 else if (bridge->funcs->post_disable)
833 bridge->funcs->post_disable(bridge);
834 }
835
836 /**
837 * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
838 * in the encoder chain
839 * @bridge: bridge control structure
840 * @state: atomic state being committed
841 *
842 * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
843 * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
844 * starting from the first bridge to the last. These are called after completing
845 * &drm_encoder_helper_funcs.atomic_disable
846 *
847 * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
848 * bridge will be called before the previous one to reverse the @pre_enable
849 * calling direction.
850 *
851 * Example:
852 * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
853 *
854 * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
855 * @post_disable order would be,
856 * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C.
857 *
858 * Note: the bridge passed should be the one closest to the encoder
859 */
drm_atomic_bridge_chain_post_disable(struct drm_bridge * bridge,struct drm_atomic_state * state)860 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
861 struct drm_atomic_state *state)
862 {
863 struct drm_encoder *encoder;
864 struct drm_bridge *next, *limit;
865
866 if (!bridge)
867 return;
868
869 encoder = bridge->encoder;
870
871 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
872 limit = NULL;
873
874 if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
875 next = list_next_entry(bridge, chain_node);
876
877 if (next->pre_enable_prev_first) {
878 /* next bridge had requested that prev
879 * was enabled first, so disabled last
880 */
881 limit = next;
882
883 /* Find the next bridge that has NOT requested
884 * prev to be enabled first / disabled last
885 */
886 list_for_each_entry_from(next, &encoder->bridge_chain,
887 chain_node) {
888 if (!next->pre_enable_prev_first) {
889 next = list_prev_entry(next, chain_node);
890 limit = next;
891 break;
892 }
893
894 if (list_is_last(&next->chain_node,
895 &encoder->bridge_chain)) {
896 limit = next;
897 break;
898 }
899 }
900
901 /* Call these bridges in reverse order */
902 list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
903 chain_node) {
904 if (next == bridge)
905 break;
906
907 drm_atomic_bridge_call_post_disable(next,
908 state);
909 }
910 }
911 }
912
913 drm_atomic_bridge_call_post_disable(bridge, state);
914
915 if (limit)
916 /* Jump all bridges that we have already post_disabled */
917 bridge = limit;
918 }
919 }
920 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
921
drm_atomic_bridge_call_pre_enable(struct drm_bridge * bridge,struct drm_atomic_state * state)922 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
923 struct drm_atomic_state *state)
924 {
925 if (state && bridge->funcs->atomic_pre_enable)
926 bridge->funcs->atomic_pre_enable(bridge, state);
927 else if (bridge->funcs->pre_enable)
928 bridge->funcs->pre_enable(bridge);
929 }
930
931 /**
932 * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
933 * the encoder chain
934 * @bridge: bridge control structure
935 * @state: atomic state being committed
936 *
937 * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
938 * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
939 * starting from the last bridge to the first. These are called before calling
940 * &drm_encoder_helper_funcs.atomic_enable
941 *
942 * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
943 * prev bridge will be called before pre_enable of this bridge.
944 *
945 * Example:
946 * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E
947 *
948 * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting
949 * @pre_enable order would be,
950 * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B.
951 *
952 * Note: the bridge passed should be the one closest to the encoder
953 */
drm_atomic_bridge_chain_pre_enable(struct drm_bridge * bridge,struct drm_atomic_state * state)954 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
955 struct drm_atomic_state *state)
956 {
957 struct drm_encoder *encoder;
958 struct drm_bridge *iter, *next, *limit;
959
960 if (!bridge)
961 return;
962
963 encoder = bridge->encoder;
964
965 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
966 if (iter->pre_enable_prev_first) {
967 next = iter;
968 limit = bridge;
969 list_for_each_entry_from_reverse(next,
970 &encoder->bridge_chain,
971 chain_node) {
972 if (next == bridge)
973 break;
974
975 if (!next->pre_enable_prev_first) {
976 /* Found first bridge that does NOT
977 * request prev to be enabled first
978 */
979 limit = next;
980 break;
981 }
982 }
983
984 list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
985 /* Call requested prev bridge pre_enable
986 * in order.
987 */
988 if (next == iter)
989 /* At the first bridge to request prev
990 * bridges called first.
991 */
992 break;
993
994 drm_atomic_bridge_call_pre_enable(next, state);
995 }
996 }
997
998 drm_atomic_bridge_call_pre_enable(iter, state);
999
1000 if (iter->pre_enable_prev_first)
1001 /* Jump all bridges that we have already pre_enabled */
1002 iter = limit;
1003
1004 if (iter == bridge)
1005 break;
1006 }
1007 }
1008 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
1009
1010 /**
1011 * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
1012 * @bridge: bridge control structure
1013 * @state: atomic state being committed
1014 *
1015 * Calls &drm_bridge_funcs.atomic_enable (falls back on
1016 * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
1017 * starting from the first bridge to the last. These are called after completing
1018 * &drm_encoder_helper_funcs.atomic_enable
1019 *
1020 * Note: the bridge passed should be the one closest to the encoder
1021 */
drm_atomic_bridge_chain_enable(struct drm_bridge * bridge,struct drm_atomic_state * state)1022 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
1023 struct drm_atomic_state *state)
1024 {
1025 struct drm_encoder *encoder;
1026
1027 if (!bridge)
1028 return;
1029
1030 encoder = bridge->encoder;
1031 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
1032 if (bridge->funcs->atomic_enable) {
1033 bridge->funcs->atomic_enable(bridge, state);
1034 } else if (bridge->funcs->enable) {
1035 bridge->funcs->enable(bridge);
1036 }
1037 }
1038 }
1039 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
1040
drm_atomic_bridge_check(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1041 static int drm_atomic_bridge_check(struct drm_bridge *bridge,
1042 struct drm_crtc_state *crtc_state,
1043 struct drm_connector_state *conn_state)
1044 {
1045 if (bridge->funcs->atomic_check) {
1046 struct drm_bridge_state *bridge_state;
1047 int ret;
1048
1049 bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1050 bridge);
1051 if (WARN_ON(!bridge_state))
1052 return -EINVAL;
1053
1054 ret = bridge->funcs->atomic_check(bridge, bridge_state,
1055 crtc_state, conn_state);
1056 if (ret)
1057 return ret;
1058 } else if (bridge->funcs->mode_fixup) {
1059 if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
1060 &crtc_state->adjusted_mode))
1061 return -EINVAL;
1062 }
1063
1064 return 0;
1065 }
1066
select_bus_fmt_recursive(struct drm_bridge * first_bridge,struct drm_bridge * cur_bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 out_bus_fmt)1067 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
1068 struct drm_bridge *cur_bridge,
1069 struct drm_crtc_state *crtc_state,
1070 struct drm_connector_state *conn_state,
1071 u32 out_bus_fmt)
1072 {
1073 unsigned int i, num_in_bus_fmts = 0;
1074 struct drm_bridge_state *cur_state;
1075 struct drm_bridge *prev_bridge __free(drm_bridge_put) =
1076 drm_bridge_get_prev_bridge(cur_bridge);
1077 u32 *in_bus_fmts;
1078 int ret;
1079
1080 cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1081 cur_bridge);
1082
1083 /*
1084 * If bus format negotiation is not supported by this bridge, let's
1085 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
1086 * hope that it can handle this situation gracefully (by providing
1087 * appropriate default values).
1088 */
1089 if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
1090 if (cur_bridge != first_bridge) {
1091 ret = select_bus_fmt_recursive(first_bridge,
1092 prev_bridge, crtc_state,
1093 conn_state,
1094 MEDIA_BUS_FMT_FIXED);
1095 if (ret)
1096 return ret;
1097 }
1098
1099 /*
1100 * Driver does not implement the atomic state hooks, but that's
1101 * fine, as long as it does not access the bridge state.
1102 */
1103 if (cur_state) {
1104 cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
1105 cur_state->output_bus_cfg.format = out_bus_fmt;
1106 }
1107
1108 return 0;
1109 }
1110
1111 /*
1112 * If the driver implements ->atomic_get_input_bus_fmts() it
1113 * should also implement the atomic state hooks.
1114 */
1115 if (WARN_ON(!cur_state))
1116 return -EINVAL;
1117
1118 in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
1119 cur_state,
1120 crtc_state,
1121 conn_state,
1122 out_bus_fmt,
1123 &num_in_bus_fmts);
1124 if (!num_in_bus_fmts)
1125 return -ENOTSUPP;
1126 else if (!in_bus_fmts)
1127 return -ENOMEM;
1128
1129 if (first_bridge == cur_bridge) {
1130 cur_state->input_bus_cfg.format = in_bus_fmts[0];
1131 cur_state->output_bus_cfg.format = out_bus_fmt;
1132 kfree(in_bus_fmts);
1133 return 0;
1134 }
1135
1136 for (i = 0; i < num_in_bus_fmts; i++) {
1137 ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
1138 crtc_state, conn_state,
1139 in_bus_fmts[i]);
1140 if (ret != -ENOTSUPP)
1141 break;
1142 }
1143
1144 if (!ret) {
1145 cur_state->input_bus_cfg.format = in_bus_fmts[i];
1146 cur_state->output_bus_cfg.format = out_bus_fmt;
1147 }
1148
1149 kfree(in_bus_fmts);
1150 return ret;
1151 }
1152
1153 /*
1154 * This function is called by &drm_atomic_bridge_chain_check() just before
1155 * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
1156 * It performs bus format negotiation between bridge elements. The negotiation
1157 * happens in reverse order, starting from the last element in the chain up to
1158 * @bridge.
1159 *
1160 * Negotiation starts by retrieving supported output bus formats on the last
1161 * bridge element and testing them one by one. The test is recursive, meaning
1162 * that for each tested output format, the whole chain will be walked backward,
1163 * and each element will have to choose an input bus format that can be
1164 * transcoded to the requested output format. When a bridge element does not
1165 * support transcoding into a specific output format -ENOTSUPP is returned and
1166 * the next bridge element will have to try a different format. If none of the
1167 * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
1168 *
1169 * This implementation is relying on
1170 * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
1171 * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
1172 * input/output formats.
1173 *
1174 * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
1175 * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
1176 * tries a single format: &drm_connector.display_info.bus_formats[0] if
1177 * available, MEDIA_BUS_FMT_FIXED otherwise.
1178 *
1179 * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
1180 * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
1181 * bridge element that lacks this hook and asks the previous element in the
1182 * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
1183 * to do in that case (fail if they want to enforce bus format negotiation, or
1184 * provide a reasonable default if they need to support pipelines where not
1185 * all elements support bus format negotiation).
1186 */
1187 static int
drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1188 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
1189 struct drm_crtc_state *crtc_state,
1190 struct drm_connector_state *conn_state)
1191 {
1192 struct drm_connector *conn = conn_state->connector;
1193 struct drm_encoder *encoder = bridge->encoder;
1194 struct drm_bridge_state *last_bridge_state;
1195 unsigned int i, num_out_bus_fmts = 0;
1196 u32 *out_bus_fmts;
1197 int ret = 0;
1198
1199 struct drm_bridge *last_bridge __free(drm_bridge_put) =
1200 drm_bridge_get(list_last_entry(&encoder->bridge_chain,
1201 struct drm_bridge, chain_node));
1202 last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1203 last_bridge);
1204
1205 if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1206 const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1207
1208 /*
1209 * If the driver implements ->atomic_get_output_bus_fmts() it
1210 * should also implement the atomic state hooks.
1211 */
1212 if (WARN_ON(!last_bridge_state))
1213 return -EINVAL;
1214
1215 out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1216 last_bridge_state,
1217 crtc_state,
1218 conn_state,
1219 &num_out_bus_fmts);
1220 if (!num_out_bus_fmts)
1221 return -ENOTSUPP;
1222 else if (!out_bus_fmts)
1223 return -ENOMEM;
1224 } else {
1225 num_out_bus_fmts = 1;
1226 out_bus_fmts = kmalloc_obj(*out_bus_fmts);
1227 if (!out_bus_fmts)
1228 return -ENOMEM;
1229
1230 if (conn->display_info.num_bus_formats &&
1231 conn->display_info.bus_formats)
1232 out_bus_fmts[0] = conn->display_info.bus_formats[0];
1233 else
1234 out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1235 }
1236
1237 for (i = 0; i < num_out_bus_fmts; i++) {
1238 ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1239 conn_state, out_bus_fmts[i]);
1240 if (ret != -ENOTSUPP)
1241 break;
1242 }
1243
1244 kfree(out_bus_fmts);
1245
1246 return ret;
1247 }
1248
1249 static void
drm_atomic_bridge_propagate_bus_flags(struct drm_bridge * bridge,struct drm_connector * conn,struct drm_atomic_state * state)1250 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1251 struct drm_connector *conn,
1252 struct drm_atomic_state *state)
1253 {
1254 struct drm_bridge_state *bridge_state, *next_bridge_state;
1255 u32 output_flags = 0;
1256
1257 bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1258
1259 /* No bridge state attached to this bridge => nothing to propagate. */
1260 if (!bridge_state)
1261 return;
1262
1263 struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
1264
1265 /*
1266 * Let's try to apply the most common case here, that is, propagate
1267 * display_info flags for the last bridge, and propagate the input
1268 * flags of the next bridge element to the output end of the current
1269 * bridge when the bridge is not the last one.
1270 * There are exceptions to this rule, like when signal inversion is
1271 * happening at the board level, but that's something drivers can deal
1272 * with from their &drm_bridge_funcs.atomic_check() implementation by
1273 * simply overriding the flags value we've set here.
1274 */
1275 if (!next_bridge) {
1276 output_flags = conn->display_info.bus_flags;
1277 } else {
1278 next_bridge_state = drm_atomic_get_new_bridge_state(state,
1279 next_bridge);
1280 /*
1281 * No bridge state attached to the next bridge, just leave the
1282 * flags to 0.
1283 */
1284 if (next_bridge_state)
1285 output_flags = next_bridge_state->input_bus_cfg.flags;
1286 }
1287
1288 bridge_state->output_bus_cfg.flags = output_flags;
1289
1290 /*
1291 * Propagate the output flags to the input end of the bridge. Again, it's
1292 * not necessarily what all bridges want, but that's what most of them
1293 * do, and by doing that by default we avoid forcing drivers to
1294 * duplicate the "dummy propagation" logic.
1295 */
1296 bridge_state->input_bus_cfg.flags = output_flags;
1297 }
1298
1299 /**
1300 * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1301 * @bridge: bridge control structure
1302 * @crtc_state: new CRTC state
1303 * @conn_state: new connector state
1304 *
1305 * First trigger a bus format negotiation before calling
1306 * &drm_bridge_funcs.atomic_check() (falls back on
1307 * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1308 * starting from the last bridge to the first. These are called before calling
1309 * &drm_encoder_helper_funcs.atomic_check()
1310 *
1311 * RETURNS:
1312 * 0 on success, a negative error code on failure
1313 */
drm_atomic_bridge_chain_check(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1314 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1315 struct drm_crtc_state *crtc_state,
1316 struct drm_connector_state *conn_state)
1317 {
1318 struct drm_connector *conn = conn_state->connector;
1319 struct drm_encoder *encoder;
1320 struct drm_bridge *iter;
1321 int ret;
1322
1323 if (!bridge)
1324 return 0;
1325
1326 ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1327 conn_state);
1328 if (ret)
1329 return ret;
1330
1331 encoder = bridge->encoder;
1332 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1333 int ret;
1334
1335 /*
1336 * Bus flags are propagated by default. If a bridge needs to
1337 * tweak the input bus flags for any reason, it should happen
1338 * in its &drm_bridge_funcs.atomic_check() implementation such
1339 * that preceding bridges in the chain can propagate the new
1340 * bus flags.
1341 */
1342 drm_atomic_bridge_propagate_bus_flags(iter, conn,
1343 crtc_state->state);
1344
1345 ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1346 if (ret)
1347 return ret;
1348
1349 if (iter == bridge)
1350 break;
1351 }
1352
1353 return 0;
1354 }
1355 EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1356
1357 /**
1358 * drm_bridge_detect - check if anything is attached to the bridge output
1359 * @bridge: bridge control structure
1360 * @connector: attached connector
1361 *
1362 * If the bridge supports output detection, as reported by the
1363 * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1364 * bridge and return the connection status. Otherwise return
1365 * connector_status_unknown.
1366 *
1367 * RETURNS:
1368 * The detection status on success, or connector_status_unknown if the bridge
1369 * doesn't support output detection.
1370 */
1371 enum drm_connector_status
drm_bridge_detect(struct drm_bridge * bridge,struct drm_connector * connector)1372 drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
1373 {
1374 if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1375 return connector_status_unknown;
1376
1377 return bridge->funcs->detect(bridge, connector);
1378 }
1379 EXPORT_SYMBOL_GPL(drm_bridge_detect);
1380
1381 /**
1382 * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1383 * @connector
1384 * @bridge: bridge control structure
1385 * @connector: the connector to fill with modes
1386 *
1387 * If the bridge supports output modes retrieval, as reported by the
1388 * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1389 * fill the connector with all valid modes and return the number of modes
1390 * added. Otherwise return 0.
1391 *
1392 * RETURNS:
1393 * The number of modes added to the connector.
1394 */
drm_bridge_get_modes(struct drm_bridge * bridge,struct drm_connector * connector)1395 int drm_bridge_get_modes(struct drm_bridge *bridge,
1396 struct drm_connector *connector)
1397 {
1398 if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1399 return 0;
1400
1401 return bridge->funcs->get_modes(bridge, connector);
1402 }
1403 EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1404
1405 /**
1406 * drm_bridge_edid_read - read the EDID data of the connected display
1407 * @bridge: bridge control structure
1408 * @connector: the connector to read EDID for
1409 *
1410 * If the bridge supports output EDID retrieval, as reported by the
1411 * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
1412 * the EDID and return it. Otherwise return NULL.
1413 *
1414 * RETURNS:
1415 * The retrieved EDID on success, or NULL otherwise.
1416 */
drm_bridge_edid_read(struct drm_bridge * bridge,struct drm_connector * connector)1417 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
1418 struct drm_connector *connector)
1419 {
1420 if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1421 return NULL;
1422
1423 return bridge->funcs->edid_read(bridge, connector);
1424 }
1425 EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
1426
1427 /**
1428 * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1429 * @bridge: bridge control structure
1430 * @cb: hot-plug detection callback
1431 * @data: data to be passed to the hot-plug detection callback
1432 *
1433 * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1434 * and @data as hot plug notification callback. From now on the @cb will be
1435 * called with @data when an output status change is detected by the bridge,
1436 * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1437 *
1438 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1439 * bridge->ops. This function shall not be called when the flag is not set.
1440 *
1441 * Only one hot plug detection callback can be registered at a time, it is an
1442 * error to call this function when hot plug detection is already enabled for
1443 * the bridge.
1444 */
drm_bridge_hpd_enable(struct drm_bridge * bridge,void (* cb)(void * data,enum drm_connector_status status),void * data)1445 void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1446 void (*cb)(void *data,
1447 enum drm_connector_status status),
1448 void *data)
1449 {
1450 if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1451 return;
1452
1453 mutex_lock(&bridge->hpd_mutex);
1454
1455 if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1456 goto unlock;
1457
1458 bridge->hpd_cb = cb;
1459 bridge->hpd_data = data;
1460
1461 if (bridge->funcs->hpd_enable)
1462 bridge->funcs->hpd_enable(bridge);
1463
1464 unlock:
1465 mutex_unlock(&bridge->hpd_mutex);
1466 }
1467 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1468
1469 /**
1470 * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1471 * @bridge: bridge control structure
1472 *
1473 * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1474 * plug detection callback previously registered with drm_bridge_hpd_enable().
1475 * Once this function returns the callback will not be called by the bridge
1476 * when an output status change occurs.
1477 *
1478 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1479 * bridge->ops. This function shall not be called when the flag is not set.
1480 */
drm_bridge_hpd_disable(struct drm_bridge * bridge)1481 void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1482 {
1483 if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1484 return;
1485
1486 mutex_lock(&bridge->hpd_mutex);
1487 if (bridge->funcs->hpd_disable)
1488 bridge->funcs->hpd_disable(bridge);
1489
1490 bridge->hpd_cb = NULL;
1491 bridge->hpd_data = NULL;
1492 mutex_unlock(&bridge->hpd_mutex);
1493 }
1494 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1495
1496 /**
1497 * drm_bridge_hpd_notify - notify hot plug detection events
1498 * @bridge: bridge control structure
1499 * @status: output connection status
1500 *
1501 * Bridge drivers shall call this function to report hot plug events when they
1502 * detect a change in the output status, when hot plug detection has been
1503 * enabled by drm_bridge_hpd_enable().
1504 *
1505 * This function shall be called in a context that can sleep.
1506 */
drm_bridge_hpd_notify(struct drm_bridge * bridge,enum drm_connector_status status)1507 void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1508 enum drm_connector_status status)
1509 {
1510 mutex_lock(&bridge->hpd_mutex);
1511 if (bridge->hpd_cb)
1512 bridge->hpd_cb(bridge->hpd_data, status);
1513 mutex_unlock(&bridge->hpd_mutex);
1514 }
1515 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1516
1517 #ifdef CONFIG_OF
1518 /**
1519 * of_drm_find_and_get_bridge - find the bridge corresponding to the device
1520 * node in the global bridge list
1521 * @np: device node
1522 *
1523 * The refcount of the returned bridge is incremented. Use drm_bridge_put()
1524 * when done with it.
1525 *
1526 * RETURNS:
1527 * drm_bridge control struct on success, NULL on failure
1528 */
of_drm_find_and_get_bridge(struct device_node * np)1529 struct drm_bridge *of_drm_find_and_get_bridge(struct device_node *np)
1530 {
1531 struct drm_bridge *bridge;
1532
1533 scoped_guard(mutex, &bridge_lock) {
1534 list_for_each_entry(bridge, &bridge_list, list)
1535 if (bridge->of_node == np)
1536 return drm_bridge_get(bridge);
1537 }
1538
1539 return NULL;
1540 }
1541 EXPORT_SYMBOL(of_drm_find_and_get_bridge);
1542
1543 /**
1544 * of_drm_find_bridge - find the bridge corresponding to the device node in
1545 * the global bridge list
1546 *
1547 * @np: device node
1548 *
1549 * This function is deprecated. Convert to of_drm_find_and_get_bridge()
1550 * instead for proper refcounting.
1551 *
1552 * The bridge returned by this function is not refcounted. This is
1553 * dangerous because the bridge might be deallocated even before the caller
1554 * has a chance to use it. To use this function you have to do one of:
1555 *
1556 * - get a reference with drm_bridge_get() as soon as possible to
1557 * minimize the race window, and then drm_bridge_put() when no longer
1558 * using the pointer
1559 *
1560 * - not call drm_bridge_get() or drm_bridge_put() at all, which used to
1561 * be the correct practice before dynamic bridge lifetime was introduced
1562 *
1563 * - again, convert to of_drm_find_and_get_bridge(), which is the only safe
1564 * thing to do
1565 *
1566 * RETURNS:
1567 * drm_bridge control struct on success, NULL on failure
1568 */
of_drm_find_bridge(struct device_node * np)1569 struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1570 {
1571 struct drm_bridge *bridge = of_drm_find_and_get_bridge(np);
1572
1573 /*
1574 * We need to emulate the original semantics of
1575 * of_drm_find_bridge(), which was not getting any bridge
1576 * reference. Being now based on of_drm_find_and_get_bridge() which
1577 * gets a reference, put it before returning.
1578 */
1579 drm_bridge_put(bridge);
1580
1581 return bridge;
1582 }
1583 EXPORT_SYMBOL(of_drm_find_bridge);
1584 #endif
1585
1586 /**
1587 * devm_drm_put_bridge - Release a bridge reference obtained via devm
1588 * @dev: device that got the bridge via devm
1589 * @bridge: pointer to a struct drm_bridge obtained via devm
1590 *
1591 * Same as drm_bridge_put() for bridge pointers obtained via devm functions
1592 * such as devm_drm_bridge_alloc().
1593 *
1594 * This function is a temporary workaround and MUST NOT be used. Manual
1595 * handling of bridge lifetime is inherently unsafe.
1596 */
devm_drm_put_bridge(struct device * dev,struct drm_bridge * bridge)1597 void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
1598 {
1599 devm_release_action(dev, drm_bridge_put_void, bridge);
1600 }
1601 EXPORT_SYMBOL(devm_drm_put_bridge);
1602
drm_bridge_debugfs_show_bridge(struct drm_printer * p,struct drm_bridge * bridge,unsigned int idx,bool lingering,bool scoped)1603 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
1604 struct drm_bridge *bridge,
1605 unsigned int idx,
1606 bool lingering,
1607 bool scoped)
1608 {
1609 unsigned int refcount = kref_read(&bridge->refcount);
1610
1611 if (scoped)
1612 refcount--;
1613
1614 drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
1615
1616 drm_printf(p, "\trefcount: %u%s\n", refcount,
1617 lingering ? " [lingering]" : "");
1618
1619 drm_printf(p, "\ttype: [%d] %s\n",
1620 bridge->type,
1621 drm_get_connector_type_name(bridge->type));
1622
1623 /* The OF node could be freed after drm_bridge_remove() */
1624 if (bridge->of_node && !lingering)
1625 drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
1626
1627 drm_printf(p, "\tops: [0x%x]", bridge->ops);
1628 if (bridge->ops & DRM_BRIDGE_OP_DETECT)
1629 drm_puts(p, " detect");
1630 if (bridge->ops & DRM_BRIDGE_OP_EDID)
1631 drm_puts(p, " edid");
1632 if (bridge->ops & DRM_BRIDGE_OP_HPD)
1633 drm_puts(p, " hpd");
1634 if (bridge->ops & DRM_BRIDGE_OP_MODES)
1635 drm_puts(p, " modes");
1636 if (bridge->ops & DRM_BRIDGE_OP_HDMI)
1637 drm_puts(p, " hdmi");
1638 drm_puts(p, "\n");
1639 }
1640
allbridges_show(struct seq_file * m,void * data)1641 static int allbridges_show(struct seq_file *m, void *data)
1642 {
1643 struct drm_printer p = drm_seq_file_printer(m);
1644 struct drm_bridge *bridge;
1645 unsigned int idx = 0;
1646
1647 mutex_lock(&bridge_lock);
1648
1649 list_for_each_entry(bridge, &bridge_list, list)
1650 drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false, false);
1651
1652 list_for_each_entry(bridge, &bridge_lingering_list, list)
1653 drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true, false);
1654
1655 mutex_unlock(&bridge_lock);
1656
1657 return 0;
1658 }
1659 DEFINE_SHOW_ATTRIBUTE(allbridges);
1660
encoder_bridges_show(struct seq_file * m,void * data)1661 static int encoder_bridges_show(struct seq_file *m, void *data)
1662 {
1663 struct drm_encoder *encoder = m->private;
1664 struct drm_printer p = drm_seq_file_printer(m);
1665 unsigned int idx = 0;
1666
1667 drm_for_each_bridge_in_chain_scoped(encoder, bridge)
1668 drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false, true);
1669
1670 return 0;
1671 }
1672 DEFINE_SHOW_ATTRIBUTE(encoder_bridges);
1673
drm_bridge_debugfs_params(struct dentry * root)1674 void drm_bridge_debugfs_params(struct dentry *root)
1675 {
1676 debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops);
1677 }
1678
drm_bridge_debugfs_encoder_params(struct dentry * root,struct drm_encoder * encoder)1679 void drm_bridge_debugfs_encoder_params(struct dentry *root,
1680 struct drm_encoder *encoder)
1681 {
1682 /* bridges list */
1683 debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops);
1684 }
1685
1686 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1687 MODULE_DESCRIPTION("DRM bridge infrastructure");
1688 MODULE_LICENSE("GPL and additional rights");
1689