1 /* 2 * Copyright (c) 2014 Samsung Electronics Co., Ltd 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/debugfs.h> 25 #include <linux/err.h> 26 #include <linux/media-bus-format.h> 27 #include <linux/module.h> 28 #include <linux/mutex.h> 29 30 #include <drm/drm_atomic_state_helper.h> 31 #include <drm/drm_bridge.h> 32 #include <drm/drm_debugfs.h> 33 #include <drm/drm_edid.h> 34 #include <drm/drm_encoder.h> 35 #include <drm/drm_file.h> 36 #include <drm/drm_of.h> 37 #include <drm/drm_print.h> 38 39 #include "drm_crtc_internal.h" 40 41 /** 42 * DOC: overview 43 * 44 * &struct drm_bridge represents a device that hangs on to an encoder. These are 45 * handy when a regular &drm_encoder entity isn't enough to represent the entire 46 * encoder chain. 47 * 48 * A bridge is always attached to a single &drm_encoder at a time, but can be 49 * either connected to it directly, or through a chain of bridges:: 50 * 51 * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B 52 * 53 * Here, the output of the encoder feeds to bridge A, and that furthers feeds to 54 * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear: 55 * Chaining multiple bridges to the output of a bridge, or the same bridge to 56 * the output of different bridges, is not supported. 57 * 58 * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes, 59 * CRTCs, encoders or connectors and hence are not visible to userspace. They 60 * just provide additional hooks to get the desired output at the end of the 61 * encoder chain. 62 */ 63 64 /** 65 * DOC: display driver integration 66 * 67 * Display drivers are responsible for linking encoders with the first bridge 68 * in the chains. This is done by acquiring the appropriate bridge with 69 * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the 70 * encoder with a call to drm_bridge_attach(). 71 * 72 * Bridges are responsible for linking themselves with the next bridge in the 73 * chain, if any. This is done the same way as for encoders, with the call to 74 * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation. 75 * 76 * Once these links are created, the bridges can participate along with encoder 77 * functions to perform mode validation and fixup (through 78 * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode 79 * setting (through drm_bridge_chain_mode_set()), enable (through 80 * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable()) 81 * and disable (through drm_atomic_bridge_chain_disable() and 82 * drm_atomic_bridge_chain_post_disable()). Those functions call the 83 * corresponding operations provided in &drm_bridge_funcs in sequence for all 84 * bridges in the chain. 85 * 86 * For display drivers that use the atomic helpers 87 * drm_atomic_helper_check_modeset(), 88 * drm_atomic_helper_commit_modeset_enables() and 89 * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled 90 * commit check and commit tail handlers, or through the higher-level 91 * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or 92 * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and 93 * requires no intervention from the driver. For other drivers, the relevant 94 * DRM bridge chain functions shall be called manually. 95 * 96 * Bridges also participate in implementing the &drm_connector at the end of 97 * the bridge chain. Display drivers may use the drm_bridge_connector_init() 98 * helper to create the &drm_connector, or implement it manually on top of the 99 * connector-related operations exposed by the bridge (see the overview 100 * documentation of bridge operations for more details). 101 */ 102 103 /** 104 * DOC: special care dsi 105 * 106 * The interaction between the bridges and other frameworks involved in 107 * the probing of the upstream driver and the bridge driver can be 108 * challenging. Indeed, there's multiple cases that needs to be 109 * considered: 110 * 111 * - The upstream driver doesn't use the component framework and isn't a 112 * MIPI-DSI host. In this case, the bridge driver will probe at some 113 * point and the upstream driver should try to probe again by returning 114 * EPROBE_DEFER as long as the bridge driver hasn't probed. 115 * 116 * - The upstream driver doesn't use the component framework, but is a 117 * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be 118 * controlled. In this case, the bridge device is a child of the 119 * display device and when it will probe it's assured that the display 120 * device (and MIPI-DSI host) is present. The upstream driver will be 121 * assured that the bridge driver is connected between the 122 * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations. 123 * Therefore, it must run mipi_dsi_host_register() in its probe 124 * function, and then run drm_bridge_attach() in its 125 * &mipi_dsi_host_ops.attach hook. 126 * 127 * - The upstream driver uses the component framework and is a MIPI-DSI 128 * host. The bridge device uses the MIPI-DCS commands to be 129 * controlled. This is the same situation than above, and can run 130 * mipi_dsi_host_register() in either its probe or bind hooks. 131 * 132 * - The upstream driver uses the component framework and is a MIPI-DSI 133 * host. The bridge device uses a separate bus (such as I2C) to be 134 * controlled. In this case, there's no correlation between the probe 135 * of the bridge and upstream drivers, so care must be taken to avoid 136 * an endless EPROBE_DEFER loop, with each driver waiting for the 137 * other to probe. 138 * 139 * The ideal pattern to cover the last item (and all the others in the 140 * MIPI-DSI host driver case) is to split the operations like this: 141 * 142 * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its 143 * probe hook. It will make sure that the MIPI-DSI host sticks around, 144 * and that the driver's bind can be called. 145 * 146 * - In its probe hook, the bridge driver must try to find its MIPI-DSI 147 * host, register as a MIPI-DSI device and attach the MIPI-DSI device 148 * to its host. The bridge driver is now functional. 149 * 150 * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can 151 * now add its component. Its bind hook will now be called and since 152 * the bridge driver is attached and registered, we can now look for 153 * and attach it. 154 * 155 * At this point, we're now certain that both the upstream driver and 156 * the bridge driver are functional and we can't have a deadlock-like 157 * situation when probing. 158 */ 159 160 /** 161 * DOC: dsi bridge operations 162 * 163 * DSI host interfaces are expected to be implemented as bridges rather than 164 * encoders, however there are a few aspects of their operation that need to 165 * be defined in order to provide a consistent interface. 166 * 167 * A DSI host should keep the PHY powered down until the pre_enable operation is 168 * called. All lanes are in an undefined idle state up to this point, and it 169 * must not be assumed that it is LP-11. 170 * pre_enable should initialise the PHY, set the data lanes to LP-11, and the 171 * clock lane to either LP-11 or HS depending on the mode_flag 172 * %MIPI_DSI_CLOCK_NON_CONTINUOUS. 173 * 174 * Ordinarily the downstream bridge DSI peripheral pre_enable will have been 175 * called before the DSI host. If the DSI peripheral requires LP-11 and/or 176 * the clock lane to be in HS mode prior to pre_enable, then it can set the 177 * &pre_enable_prev_first flag to request the pre_enable (and 178 * post_disable) order to be altered to enable the DSI host first. 179 * 180 * Either the CRTC being enabled, or the DSI host enable operation should switch 181 * the host to actively transmitting video on the data lanes. 182 * 183 * The reverse also applies. The DSI host disable operation or stopping the CRTC 184 * should stop transmitting video, and the data lanes should return to the LP-11 185 * state. The DSI host &post_disable operation should disable the PHY. 186 * If the &pre_enable_prev_first flag is set, then the DSI peripheral's 187 * bridge &post_disable will be called before the DSI host's post_disable. 188 * 189 * Whilst it is valid to call &host_transfer prior to pre_enable or after 190 * post_disable, the exact state of the lanes is undefined at this point. The 191 * DSI host should initialise the interface, transmit the data, and then disable 192 * the interface again. 193 * 194 * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If 195 * implemented, it therefore needs to be handled entirely within the DSI Host 196 * driver. 197 */ 198 199 static DEFINE_MUTEX(bridge_lock); 200 static LIST_HEAD(bridge_list); 201 202 static void __drm_bridge_free(struct kref *kref) 203 { 204 struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount); 205 206 if (bridge->funcs->destroy) 207 bridge->funcs->destroy(bridge); 208 kfree(bridge->container); 209 } 210 211 /** 212 * drm_bridge_get - Acquire a bridge reference 213 * @bridge: DRM bridge 214 * 215 * This function increments the bridge's refcount. 216 * 217 * Returns: 218 * Pointer to @bridge. 219 */ 220 struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge) 221 { 222 if (bridge) 223 kref_get(&bridge->refcount); 224 225 return bridge; 226 } 227 EXPORT_SYMBOL(drm_bridge_get); 228 229 /** 230 * drm_bridge_put - Release a bridge reference 231 * @bridge: DRM bridge 232 * 233 * This function decrements the bridge's reference count and frees the 234 * object if the reference count drops to zero. 235 */ 236 void drm_bridge_put(struct drm_bridge *bridge) 237 { 238 if (bridge) 239 kref_put(&bridge->refcount, __drm_bridge_free); 240 } 241 EXPORT_SYMBOL(drm_bridge_put); 242 243 /** 244 * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer 245 * 246 * @data: pointer to @struct drm_bridge, cast to a void pointer 247 * 248 * Wrapper of drm_bridge_put() to be used when a function taking a void 249 * pointer is needed, for example as a devm action. 250 */ 251 static void drm_bridge_put_void(void *data) 252 { 253 struct drm_bridge *bridge = (struct drm_bridge *)data; 254 255 drm_bridge_put(bridge); 256 } 257 258 void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, 259 const struct drm_bridge_funcs *funcs) 260 { 261 void *container; 262 struct drm_bridge *bridge; 263 int err; 264 265 if (!funcs) { 266 dev_warn(dev, "Missing funcs pointer\n"); 267 return ERR_PTR(-EINVAL); 268 } 269 270 container = kzalloc(size, GFP_KERNEL); 271 if (!container) 272 return ERR_PTR(-ENOMEM); 273 274 bridge = container + offset; 275 bridge->container = container; 276 bridge->funcs = funcs; 277 kref_init(&bridge->refcount); 278 279 err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge); 280 if (err) 281 return ERR_PTR(err); 282 283 return container; 284 } 285 EXPORT_SYMBOL(__devm_drm_bridge_alloc); 286 287 /** 288 * drm_bridge_add - add the given bridge to the global bridge list 289 * 290 * @bridge: bridge control structure 291 * 292 * The bridge to be added must have been allocated by 293 * devm_drm_bridge_alloc(). 294 */ 295 void drm_bridge_add(struct drm_bridge *bridge) 296 { 297 mutex_init(&bridge->hpd_mutex); 298 299 if (bridge->ops & DRM_BRIDGE_OP_HDMI) 300 bridge->ycbcr_420_allowed = !!(bridge->supported_formats & 301 BIT(HDMI_COLORSPACE_YUV420)); 302 303 mutex_lock(&bridge_lock); 304 list_add_tail(&bridge->list, &bridge_list); 305 mutex_unlock(&bridge_lock); 306 } 307 EXPORT_SYMBOL(drm_bridge_add); 308 309 static void drm_bridge_remove_void(void *bridge) 310 { 311 drm_bridge_remove(bridge); 312 } 313 314 /** 315 * devm_drm_bridge_add - devm managed version of drm_bridge_add() 316 * 317 * @dev: device to tie the bridge lifetime to 318 * @bridge: bridge control structure 319 * 320 * This is the managed version of drm_bridge_add() which automatically 321 * calls drm_bridge_remove() when @dev is unbound. 322 * 323 * Return: 0 if no error or negative error code. 324 */ 325 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge) 326 { 327 drm_bridge_add(bridge); 328 return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge); 329 } 330 EXPORT_SYMBOL(devm_drm_bridge_add); 331 332 /** 333 * drm_bridge_remove - remove the given bridge from the global bridge list 334 * 335 * @bridge: bridge control structure 336 */ 337 void drm_bridge_remove(struct drm_bridge *bridge) 338 { 339 mutex_lock(&bridge_lock); 340 list_del_init(&bridge->list); 341 mutex_unlock(&bridge_lock); 342 343 mutex_destroy(&bridge->hpd_mutex); 344 } 345 EXPORT_SYMBOL(drm_bridge_remove); 346 347 static struct drm_private_state * 348 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj) 349 { 350 struct drm_bridge *bridge = drm_priv_to_bridge(obj); 351 struct drm_bridge_state *state; 352 353 state = bridge->funcs->atomic_duplicate_state(bridge); 354 return state ? &state->base : NULL; 355 } 356 357 static void 358 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj, 359 struct drm_private_state *s) 360 { 361 struct drm_bridge_state *state = drm_priv_to_bridge_state(s); 362 struct drm_bridge *bridge = drm_priv_to_bridge(obj); 363 364 bridge->funcs->atomic_destroy_state(bridge, state); 365 } 366 367 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = { 368 .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state, 369 .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state, 370 }; 371 372 static bool drm_bridge_is_atomic(struct drm_bridge *bridge) 373 { 374 return bridge->funcs->atomic_reset != NULL; 375 } 376 377 /** 378 * drm_bridge_attach - attach the bridge to an encoder's chain 379 * 380 * @encoder: DRM encoder 381 * @bridge: bridge to attach 382 * @previous: previous bridge in the chain (optional) 383 * @flags: DRM_BRIDGE_ATTACH_* flags 384 * 385 * Called by a kms driver to link the bridge to an encoder's chain. The previous 386 * argument specifies the previous bridge in the chain. If NULL, the bridge is 387 * linked directly at the encoder's output. Otherwise it is linked at the 388 * previous bridge's output. 389 * 390 * If non-NULL the previous bridge must be already attached by a call to this 391 * function. 392 * 393 * Note that bridges attached to encoders are auto-detached during encoder 394 * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally 395 * *not* be balanced with a drm_bridge_detach() in driver code. 396 * 397 * RETURNS: 398 * Zero on success, error code on failure 399 */ 400 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, 401 struct drm_bridge *previous, 402 enum drm_bridge_attach_flags flags) 403 { 404 int ret; 405 406 if (!encoder || !bridge) 407 return -EINVAL; 408 409 if (previous && (!previous->dev || previous->encoder != encoder)) 410 return -EINVAL; 411 412 if (bridge->dev) 413 return -EBUSY; 414 415 bridge->dev = encoder->dev; 416 bridge->encoder = encoder; 417 418 if (previous) 419 list_add(&bridge->chain_node, &previous->chain_node); 420 else 421 list_add(&bridge->chain_node, &encoder->bridge_chain); 422 423 if (bridge->funcs->attach) { 424 ret = bridge->funcs->attach(bridge, encoder, flags); 425 if (ret < 0) 426 goto err_reset_bridge; 427 } 428 429 if (drm_bridge_is_atomic(bridge)) { 430 struct drm_bridge_state *state; 431 432 state = bridge->funcs->atomic_reset(bridge); 433 if (IS_ERR(state)) { 434 ret = PTR_ERR(state); 435 goto err_detach_bridge; 436 } 437 438 drm_atomic_private_obj_init(bridge->dev, &bridge->base, 439 &state->base, 440 &drm_bridge_priv_state_funcs); 441 } 442 443 return 0; 444 445 err_detach_bridge: 446 if (bridge->funcs->detach) 447 bridge->funcs->detach(bridge); 448 449 err_reset_bridge: 450 bridge->dev = NULL; 451 bridge->encoder = NULL; 452 list_del(&bridge->chain_node); 453 454 if (ret != -EPROBE_DEFER) 455 DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n", 456 bridge->of_node, encoder->name, ret); 457 else 458 dev_err_probe(encoder->dev->dev, -EPROBE_DEFER, 459 "failed to attach bridge %pOF to encoder %s\n", 460 bridge->of_node, encoder->name); 461 462 return ret; 463 } 464 EXPORT_SYMBOL(drm_bridge_attach); 465 466 void drm_bridge_detach(struct drm_bridge *bridge) 467 { 468 if (WARN_ON(!bridge)) 469 return; 470 471 if (WARN_ON(!bridge->dev)) 472 return; 473 474 if (drm_bridge_is_atomic(bridge)) 475 drm_atomic_private_obj_fini(&bridge->base); 476 477 if (bridge->funcs->detach) 478 bridge->funcs->detach(bridge); 479 480 list_del(&bridge->chain_node); 481 bridge->dev = NULL; 482 } 483 484 /** 485 * DOC: bridge operations 486 * 487 * Bridge drivers expose operations through the &drm_bridge_funcs structure. 488 * The DRM internals (atomic and CRTC helpers) use the helpers defined in 489 * drm_bridge.c to call bridge operations. Those operations are divided in 490 * three big categories to support different parts of the bridge usage. 491 * 492 * - The encoder-related operations support control of the bridges in the 493 * chain, and are roughly counterparts to the &drm_encoder_helper_funcs 494 * operations. They are used by the legacy CRTC and the atomic modeset 495 * helpers to perform mode validation, fixup and setting, and enable and 496 * disable the bridge automatically. 497 * 498 * The enable and disable operations are split in 499 * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable, 500 * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide 501 * finer-grained control. 502 * 503 * Bridge drivers may implement the legacy version of those operations, or 504 * the atomic version (prefixed with atomic\_), in which case they shall also 505 * implement the atomic state bookkeeping operations 506 * (&drm_bridge_funcs.atomic_duplicate_state, 507 * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset). 508 * Mixing atomic and non-atomic versions of the operations is not supported. 509 * 510 * - The bus format negotiation operations 511 * &drm_bridge_funcs.atomic_get_output_bus_fmts and 512 * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to 513 * negotiate the formats transmitted between bridges in the chain when 514 * multiple formats are supported. Negotiation for formats is performed 515 * transparently for display drivers by the atomic modeset helpers. Only 516 * atomic versions of those operations exist, bridge drivers that need to 517 * implement them shall thus also implement the atomic version of the 518 * encoder-related operations. This feature is not supported by the legacy 519 * CRTC helpers. 520 * 521 * - The connector-related operations support implementing a &drm_connector 522 * based on a chain of bridges. DRM bridges traditionally create a 523 * &drm_connector for bridges meant to be used at the end of the chain. This 524 * puts additional burden on bridge drivers, especially for bridges that may 525 * be used in the middle of a chain or at the end of it. Furthermore, it 526 * requires all operations of the &drm_connector to be handled by a single 527 * bridge, which doesn't always match the hardware architecture. 528 * 529 * To simplify bridge drivers and make the connector implementation more 530 * flexible, a new model allows bridges to unconditionally skip creation of 531 * &drm_connector and instead expose &drm_bridge_funcs operations to support 532 * an externally-implemented &drm_connector. Those operations are 533 * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes, 534 * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify, 535 * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When 536 * implemented, display drivers shall create a &drm_connector instance for 537 * each chain of bridges, and implement those connector instances based on 538 * the bridge connector operations. 539 * 540 * Bridge drivers shall implement the connector-related operations for all 541 * the features that the bridge hardware support. For instance, if a bridge 542 * supports reading EDID, the &drm_bridge_funcs.get_edid shall be 543 * implemented. This however doesn't mean that the DDC lines are wired to the 544 * bridge on a particular platform, as they could also be connected to an I2C 545 * controller of the SoC. Support for the connector-related operations on the 546 * running platform is reported through the &drm_bridge.ops flags. Bridge 547 * drivers shall detect which operations they can support on the platform 548 * (usually this information is provided by ACPI or DT), and set the 549 * &drm_bridge.ops flags for all supported operations. A flag shall only be 550 * set if the corresponding &drm_bridge_funcs operation is implemented, but 551 * an implemented operation doesn't necessarily imply that the corresponding 552 * flag will be set. Display drivers shall use the &drm_bridge.ops flags to 553 * decide which bridge to delegate a connector operation to. This mechanism 554 * allows providing a single static const &drm_bridge_funcs instance in 555 * bridge drivers, improving security by storing function pointers in 556 * read-only memory. 557 * 558 * In order to ease transition, bridge drivers may support both the old and 559 * new models by making connector creation optional and implementing the 560 * connected-related bridge operations. Connector creation is then controlled 561 * by the flags argument to the drm_bridge_attach() function. Display drivers 562 * that support the new model and create connectors themselves shall set the 563 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip 564 * connector creation. For intermediate bridges in the chain, the flag shall 565 * be passed to the drm_bridge_attach() call for the downstream bridge. 566 * Bridge drivers that implement the new model only shall return an error 567 * from their &drm_bridge_funcs.attach handler when the 568 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers 569 * should use the new model, and convert the bridge drivers they use if 570 * needed, in order to gradually transition to the new model. 571 */ 572 573 /** 574 * drm_bridge_chain_mode_valid - validate the mode against all bridges in the 575 * encoder chain. 576 * @bridge: bridge control structure 577 * @info: display info against which the mode shall be validated 578 * @mode: desired mode to be validated 579 * 580 * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder 581 * chain, starting from the first bridge to the last. If at least one bridge 582 * does not accept the mode the function returns the error code. 583 * 584 * Note: the bridge passed should be the one closest to the encoder. 585 * 586 * RETURNS: 587 * MODE_OK on success, drm_mode_status Enum error code on failure 588 */ 589 enum drm_mode_status 590 drm_bridge_chain_mode_valid(struct drm_bridge *bridge, 591 const struct drm_display_info *info, 592 const struct drm_display_mode *mode) 593 { 594 struct drm_encoder *encoder; 595 596 if (!bridge) 597 return MODE_OK; 598 599 encoder = bridge->encoder; 600 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { 601 enum drm_mode_status ret; 602 603 if (!bridge->funcs->mode_valid) 604 continue; 605 606 ret = bridge->funcs->mode_valid(bridge, info, mode); 607 if (ret != MODE_OK) 608 return ret; 609 } 610 611 return MODE_OK; 612 } 613 EXPORT_SYMBOL(drm_bridge_chain_mode_valid); 614 615 /** 616 * drm_bridge_chain_mode_set - set proposed mode for all bridges in the 617 * encoder chain 618 * @bridge: bridge control structure 619 * @mode: desired mode to be set for the encoder chain 620 * @adjusted_mode: updated mode that works for this encoder chain 621 * 622 * Calls &drm_bridge_funcs.mode_set op for all the bridges in the 623 * encoder chain, starting from the first bridge to the last. 624 * 625 * Note: the bridge passed should be the one closest to the encoder 626 */ 627 void drm_bridge_chain_mode_set(struct drm_bridge *bridge, 628 const struct drm_display_mode *mode, 629 const struct drm_display_mode *adjusted_mode) 630 { 631 struct drm_encoder *encoder; 632 633 if (!bridge) 634 return; 635 636 encoder = bridge->encoder; 637 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { 638 if (bridge->funcs->mode_set) 639 bridge->funcs->mode_set(bridge, mode, adjusted_mode); 640 } 641 } 642 EXPORT_SYMBOL(drm_bridge_chain_mode_set); 643 644 /** 645 * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain 646 * @bridge: bridge control structure 647 * @state: atomic state being committed 648 * 649 * Calls &drm_bridge_funcs.atomic_disable (falls back on 650 * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain, 651 * starting from the last bridge to the first. These are called before calling 652 * &drm_encoder_helper_funcs.atomic_disable 653 * 654 * Note: the bridge passed should be the one closest to the encoder 655 */ 656 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, 657 struct drm_atomic_state *state) 658 { 659 struct drm_encoder *encoder; 660 struct drm_bridge *iter; 661 662 if (!bridge) 663 return; 664 665 encoder = bridge->encoder; 666 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { 667 if (iter->funcs->atomic_disable) { 668 iter->funcs->atomic_disable(iter, state); 669 } else if (iter->funcs->disable) { 670 iter->funcs->disable(iter); 671 } 672 673 if (iter == bridge) 674 break; 675 } 676 } 677 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable); 678 679 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge, 680 struct drm_atomic_state *state) 681 { 682 if (state && bridge->funcs->atomic_post_disable) 683 bridge->funcs->atomic_post_disable(bridge, state); 684 else if (bridge->funcs->post_disable) 685 bridge->funcs->post_disable(bridge); 686 } 687 688 /** 689 * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges 690 * in the encoder chain 691 * @bridge: bridge control structure 692 * @state: atomic state being committed 693 * 694 * Calls &drm_bridge_funcs.atomic_post_disable (falls back on 695 * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain, 696 * starting from the first bridge to the last. These are called after completing 697 * &drm_encoder_helper_funcs.atomic_disable 698 * 699 * If a bridge sets @pre_enable_prev_first, then the @post_disable for that 700 * bridge will be called before the previous one to reverse the @pre_enable 701 * calling direction. 702 * 703 * Example: 704 * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E 705 * 706 * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting 707 * @post_disable order would be, 708 * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C. 709 * 710 * Note: the bridge passed should be the one closest to the encoder 711 */ 712 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, 713 struct drm_atomic_state *state) 714 { 715 struct drm_encoder *encoder; 716 struct drm_bridge *next, *limit; 717 718 if (!bridge) 719 return; 720 721 encoder = bridge->encoder; 722 723 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { 724 limit = NULL; 725 726 if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) { 727 next = list_next_entry(bridge, chain_node); 728 729 if (next->pre_enable_prev_first) { 730 /* next bridge had requested that prev 731 * was enabled first, so disabled last 732 */ 733 limit = next; 734 735 /* Find the next bridge that has NOT requested 736 * prev to be enabled first / disabled last 737 */ 738 list_for_each_entry_from(next, &encoder->bridge_chain, 739 chain_node) { 740 if (!next->pre_enable_prev_first) { 741 next = list_prev_entry(next, chain_node); 742 limit = next; 743 break; 744 } 745 746 if (list_is_last(&next->chain_node, 747 &encoder->bridge_chain)) { 748 limit = next; 749 break; 750 } 751 } 752 753 /* Call these bridges in reverse order */ 754 list_for_each_entry_from_reverse(next, &encoder->bridge_chain, 755 chain_node) { 756 if (next == bridge) 757 break; 758 759 drm_atomic_bridge_call_post_disable(next, 760 state); 761 } 762 } 763 } 764 765 drm_atomic_bridge_call_post_disable(bridge, state); 766 767 if (limit) 768 /* Jump all bridges that we have already post_disabled */ 769 bridge = limit; 770 } 771 } 772 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable); 773 774 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge, 775 struct drm_atomic_state *state) 776 { 777 if (state && bridge->funcs->atomic_pre_enable) 778 bridge->funcs->atomic_pre_enable(bridge, state); 779 else if (bridge->funcs->pre_enable) 780 bridge->funcs->pre_enable(bridge); 781 } 782 783 /** 784 * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in 785 * the encoder chain 786 * @bridge: bridge control structure 787 * @state: atomic state being committed 788 * 789 * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on 790 * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain, 791 * starting from the last bridge to the first. These are called before calling 792 * &drm_encoder_helper_funcs.atomic_enable 793 * 794 * If a bridge sets @pre_enable_prev_first, then the pre_enable for the 795 * prev bridge will be called before pre_enable of this bridge. 796 * 797 * Example: 798 * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E 799 * 800 * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting 801 * @pre_enable order would be, 802 * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B. 803 * 804 * Note: the bridge passed should be the one closest to the encoder 805 */ 806 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, 807 struct drm_atomic_state *state) 808 { 809 struct drm_encoder *encoder; 810 struct drm_bridge *iter, *next, *limit; 811 812 if (!bridge) 813 return; 814 815 encoder = bridge->encoder; 816 817 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { 818 if (iter->pre_enable_prev_first) { 819 next = iter; 820 limit = bridge; 821 list_for_each_entry_from_reverse(next, 822 &encoder->bridge_chain, 823 chain_node) { 824 if (next == bridge) 825 break; 826 827 if (!next->pre_enable_prev_first) { 828 /* Found first bridge that does NOT 829 * request prev to be enabled first 830 */ 831 limit = next; 832 break; 833 } 834 } 835 836 list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) { 837 /* Call requested prev bridge pre_enable 838 * in order. 839 */ 840 if (next == iter) 841 /* At the first bridge to request prev 842 * bridges called first. 843 */ 844 break; 845 846 drm_atomic_bridge_call_pre_enable(next, state); 847 } 848 } 849 850 drm_atomic_bridge_call_pre_enable(iter, state); 851 852 if (iter->pre_enable_prev_first) 853 /* Jump all bridges that we have already pre_enabled */ 854 iter = limit; 855 856 if (iter == bridge) 857 break; 858 } 859 } 860 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable); 861 862 /** 863 * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain 864 * @bridge: bridge control structure 865 * @state: atomic state being committed 866 * 867 * Calls &drm_bridge_funcs.atomic_enable (falls back on 868 * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain, 869 * starting from the first bridge to the last. These are called after completing 870 * &drm_encoder_helper_funcs.atomic_enable 871 * 872 * Note: the bridge passed should be the one closest to the encoder 873 */ 874 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, 875 struct drm_atomic_state *state) 876 { 877 struct drm_encoder *encoder; 878 879 if (!bridge) 880 return; 881 882 encoder = bridge->encoder; 883 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { 884 if (bridge->funcs->atomic_enable) { 885 bridge->funcs->atomic_enable(bridge, state); 886 } else if (bridge->funcs->enable) { 887 bridge->funcs->enable(bridge); 888 } 889 } 890 } 891 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable); 892 893 static int drm_atomic_bridge_check(struct drm_bridge *bridge, 894 struct drm_crtc_state *crtc_state, 895 struct drm_connector_state *conn_state) 896 { 897 if (bridge->funcs->atomic_check) { 898 struct drm_bridge_state *bridge_state; 899 int ret; 900 901 bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, 902 bridge); 903 if (WARN_ON(!bridge_state)) 904 return -EINVAL; 905 906 ret = bridge->funcs->atomic_check(bridge, bridge_state, 907 crtc_state, conn_state); 908 if (ret) 909 return ret; 910 } else if (bridge->funcs->mode_fixup) { 911 if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode, 912 &crtc_state->adjusted_mode)) 913 return -EINVAL; 914 } 915 916 return 0; 917 } 918 919 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge, 920 struct drm_bridge *cur_bridge, 921 struct drm_crtc_state *crtc_state, 922 struct drm_connector_state *conn_state, 923 u32 out_bus_fmt) 924 { 925 unsigned int i, num_in_bus_fmts = 0; 926 struct drm_bridge_state *cur_state; 927 struct drm_bridge *prev_bridge; 928 u32 *in_bus_fmts; 929 int ret; 930 931 prev_bridge = drm_bridge_get_prev_bridge(cur_bridge); 932 cur_state = drm_atomic_get_new_bridge_state(crtc_state->state, 933 cur_bridge); 934 935 /* 936 * If bus format negotiation is not supported by this bridge, let's 937 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and 938 * hope that it can handle this situation gracefully (by providing 939 * appropriate default values). 940 */ 941 if (!cur_bridge->funcs->atomic_get_input_bus_fmts) { 942 if (cur_bridge != first_bridge) { 943 ret = select_bus_fmt_recursive(first_bridge, 944 prev_bridge, crtc_state, 945 conn_state, 946 MEDIA_BUS_FMT_FIXED); 947 if (ret) 948 return ret; 949 } 950 951 /* 952 * Driver does not implement the atomic state hooks, but that's 953 * fine, as long as it does not access the bridge state. 954 */ 955 if (cur_state) { 956 cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED; 957 cur_state->output_bus_cfg.format = out_bus_fmt; 958 } 959 960 return 0; 961 } 962 963 /* 964 * If the driver implements ->atomic_get_input_bus_fmts() it 965 * should also implement the atomic state hooks. 966 */ 967 if (WARN_ON(!cur_state)) 968 return -EINVAL; 969 970 in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge, 971 cur_state, 972 crtc_state, 973 conn_state, 974 out_bus_fmt, 975 &num_in_bus_fmts); 976 if (!num_in_bus_fmts) 977 return -ENOTSUPP; 978 else if (!in_bus_fmts) 979 return -ENOMEM; 980 981 if (first_bridge == cur_bridge) { 982 cur_state->input_bus_cfg.format = in_bus_fmts[0]; 983 cur_state->output_bus_cfg.format = out_bus_fmt; 984 kfree(in_bus_fmts); 985 return 0; 986 } 987 988 for (i = 0; i < num_in_bus_fmts; i++) { 989 ret = select_bus_fmt_recursive(first_bridge, prev_bridge, 990 crtc_state, conn_state, 991 in_bus_fmts[i]); 992 if (ret != -ENOTSUPP) 993 break; 994 } 995 996 if (!ret) { 997 cur_state->input_bus_cfg.format = in_bus_fmts[i]; 998 cur_state->output_bus_cfg.format = out_bus_fmt; 999 } 1000 1001 kfree(in_bus_fmts); 1002 return ret; 1003 } 1004 1005 /* 1006 * This function is called by &drm_atomic_bridge_chain_check() just before 1007 * calling &drm_bridge_funcs.atomic_check() on all elements of the chain. 1008 * It performs bus format negotiation between bridge elements. The negotiation 1009 * happens in reverse order, starting from the last element in the chain up to 1010 * @bridge. 1011 * 1012 * Negotiation starts by retrieving supported output bus formats on the last 1013 * bridge element and testing them one by one. The test is recursive, meaning 1014 * that for each tested output format, the whole chain will be walked backward, 1015 * and each element will have to choose an input bus format that can be 1016 * transcoded to the requested output format. When a bridge element does not 1017 * support transcoding into a specific output format -ENOTSUPP is returned and 1018 * the next bridge element will have to try a different format. If none of the 1019 * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail. 1020 * 1021 * This implementation is relying on 1022 * &drm_bridge_funcs.atomic_get_output_bus_fmts() and 1023 * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported 1024 * input/output formats. 1025 * 1026 * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by 1027 * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts() 1028 * tries a single format: &drm_connector.display_info.bus_formats[0] if 1029 * available, MEDIA_BUS_FMT_FIXED otherwise. 1030 * 1031 * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented, 1032 * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the 1033 * bridge element that lacks this hook and asks the previous element in the 1034 * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what 1035 * to do in that case (fail if they want to enforce bus format negotiation, or 1036 * provide a reasonable default if they need to support pipelines where not 1037 * all elements support bus format negotiation). 1038 */ 1039 static int 1040 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, 1041 struct drm_crtc_state *crtc_state, 1042 struct drm_connector_state *conn_state) 1043 { 1044 struct drm_connector *conn = conn_state->connector; 1045 struct drm_encoder *encoder = bridge->encoder; 1046 struct drm_bridge_state *last_bridge_state; 1047 unsigned int i, num_out_bus_fmts = 0; 1048 struct drm_bridge *last_bridge; 1049 u32 *out_bus_fmts; 1050 int ret = 0; 1051 1052 last_bridge = list_last_entry(&encoder->bridge_chain, 1053 struct drm_bridge, chain_node); 1054 last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, 1055 last_bridge); 1056 1057 if (last_bridge->funcs->atomic_get_output_bus_fmts) { 1058 const struct drm_bridge_funcs *funcs = last_bridge->funcs; 1059 1060 /* 1061 * If the driver implements ->atomic_get_output_bus_fmts() it 1062 * should also implement the atomic state hooks. 1063 */ 1064 if (WARN_ON(!last_bridge_state)) 1065 return -EINVAL; 1066 1067 out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge, 1068 last_bridge_state, 1069 crtc_state, 1070 conn_state, 1071 &num_out_bus_fmts); 1072 if (!num_out_bus_fmts) 1073 return -ENOTSUPP; 1074 else if (!out_bus_fmts) 1075 return -ENOMEM; 1076 } else { 1077 num_out_bus_fmts = 1; 1078 out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); 1079 if (!out_bus_fmts) 1080 return -ENOMEM; 1081 1082 if (conn->display_info.num_bus_formats && 1083 conn->display_info.bus_formats) 1084 out_bus_fmts[0] = conn->display_info.bus_formats[0]; 1085 else 1086 out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; 1087 } 1088 1089 for (i = 0; i < num_out_bus_fmts; i++) { 1090 ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state, 1091 conn_state, out_bus_fmts[i]); 1092 if (ret != -ENOTSUPP) 1093 break; 1094 } 1095 1096 kfree(out_bus_fmts); 1097 1098 return ret; 1099 } 1100 1101 static void 1102 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, 1103 struct drm_connector *conn, 1104 struct drm_atomic_state *state) 1105 { 1106 struct drm_bridge_state *bridge_state, *next_bridge_state; 1107 struct drm_bridge *next_bridge; 1108 u32 output_flags = 0; 1109 1110 bridge_state = drm_atomic_get_new_bridge_state(state, bridge); 1111 1112 /* No bridge state attached to this bridge => nothing to propagate. */ 1113 if (!bridge_state) 1114 return; 1115 1116 next_bridge = drm_bridge_get_next_bridge(bridge); 1117 1118 /* 1119 * Let's try to apply the most common case here, that is, propagate 1120 * display_info flags for the last bridge, and propagate the input 1121 * flags of the next bridge element to the output end of the current 1122 * bridge when the bridge is not the last one. 1123 * There are exceptions to this rule, like when signal inversion is 1124 * happening at the board level, but that's something drivers can deal 1125 * with from their &drm_bridge_funcs.atomic_check() implementation by 1126 * simply overriding the flags value we've set here. 1127 */ 1128 if (!next_bridge) { 1129 output_flags = conn->display_info.bus_flags; 1130 } else { 1131 next_bridge_state = drm_atomic_get_new_bridge_state(state, 1132 next_bridge); 1133 /* 1134 * No bridge state attached to the next bridge, just leave the 1135 * flags to 0. 1136 */ 1137 if (next_bridge_state) 1138 output_flags = next_bridge_state->input_bus_cfg.flags; 1139 } 1140 1141 bridge_state->output_bus_cfg.flags = output_flags; 1142 1143 /* 1144 * Propagate the output flags to the input end of the bridge. Again, it's 1145 * not necessarily what all bridges want, but that's what most of them 1146 * do, and by doing that by default we avoid forcing drivers to 1147 * duplicate the "dummy propagation" logic. 1148 */ 1149 bridge_state->input_bus_cfg.flags = output_flags; 1150 } 1151 1152 /** 1153 * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain 1154 * @bridge: bridge control structure 1155 * @crtc_state: new CRTC state 1156 * @conn_state: new connector state 1157 * 1158 * First trigger a bus format negotiation before calling 1159 * &drm_bridge_funcs.atomic_check() (falls back on 1160 * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain, 1161 * starting from the last bridge to the first. These are called before calling 1162 * &drm_encoder_helper_funcs.atomic_check() 1163 * 1164 * RETURNS: 1165 * 0 on success, a negative error code on failure 1166 */ 1167 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, 1168 struct drm_crtc_state *crtc_state, 1169 struct drm_connector_state *conn_state) 1170 { 1171 struct drm_connector *conn = conn_state->connector; 1172 struct drm_encoder *encoder; 1173 struct drm_bridge *iter; 1174 int ret; 1175 1176 if (!bridge) 1177 return 0; 1178 1179 ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state, 1180 conn_state); 1181 if (ret) 1182 return ret; 1183 1184 encoder = bridge->encoder; 1185 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { 1186 int ret; 1187 1188 /* 1189 * Bus flags are propagated by default. If a bridge needs to 1190 * tweak the input bus flags for any reason, it should happen 1191 * in its &drm_bridge_funcs.atomic_check() implementation such 1192 * that preceding bridges in the chain can propagate the new 1193 * bus flags. 1194 */ 1195 drm_atomic_bridge_propagate_bus_flags(iter, conn, 1196 crtc_state->state); 1197 1198 ret = drm_atomic_bridge_check(iter, crtc_state, conn_state); 1199 if (ret) 1200 return ret; 1201 1202 if (iter == bridge) 1203 break; 1204 } 1205 1206 return 0; 1207 } 1208 EXPORT_SYMBOL(drm_atomic_bridge_chain_check); 1209 1210 /** 1211 * drm_bridge_detect - check if anything is attached to the bridge output 1212 * @bridge: bridge control structure 1213 * 1214 * If the bridge supports output detection, as reported by the 1215 * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the 1216 * bridge and return the connection status. Otherwise return 1217 * connector_status_unknown. 1218 * 1219 * RETURNS: 1220 * The detection status on success, or connector_status_unknown if the bridge 1221 * doesn't support output detection. 1222 */ 1223 enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge) 1224 { 1225 if (!(bridge->ops & DRM_BRIDGE_OP_DETECT)) 1226 return connector_status_unknown; 1227 1228 return bridge->funcs->detect(bridge); 1229 } 1230 EXPORT_SYMBOL_GPL(drm_bridge_detect); 1231 1232 /** 1233 * drm_bridge_get_modes - fill all modes currently valid for the sink into the 1234 * @connector 1235 * @bridge: bridge control structure 1236 * @connector: the connector to fill with modes 1237 * 1238 * If the bridge supports output modes retrieval, as reported by the 1239 * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to 1240 * fill the connector with all valid modes and return the number of modes 1241 * added. Otherwise return 0. 1242 * 1243 * RETURNS: 1244 * The number of modes added to the connector. 1245 */ 1246 int drm_bridge_get_modes(struct drm_bridge *bridge, 1247 struct drm_connector *connector) 1248 { 1249 if (!(bridge->ops & DRM_BRIDGE_OP_MODES)) 1250 return 0; 1251 1252 return bridge->funcs->get_modes(bridge, connector); 1253 } 1254 EXPORT_SYMBOL_GPL(drm_bridge_get_modes); 1255 1256 /** 1257 * drm_bridge_edid_read - read the EDID data of the connected display 1258 * @bridge: bridge control structure 1259 * @connector: the connector to read EDID for 1260 * 1261 * If the bridge supports output EDID retrieval, as reported by the 1262 * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get 1263 * the EDID and return it. Otherwise return NULL. 1264 * 1265 * RETURNS: 1266 * The retrieved EDID on success, or NULL otherwise. 1267 */ 1268 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge, 1269 struct drm_connector *connector) 1270 { 1271 if (!(bridge->ops & DRM_BRIDGE_OP_EDID)) 1272 return NULL; 1273 1274 return bridge->funcs->edid_read(bridge, connector); 1275 } 1276 EXPORT_SYMBOL_GPL(drm_bridge_edid_read); 1277 1278 /** 1279 * drm_bridge_hpd_enable - enable hot plug detection for the bridge 1280 * @bridge: bridge control structure 1281 * @cb: hot-plug detection callback 1282 * @data: data to be passed to the hot-plug detection callback 1283 * 1284 * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb 1285 * and @data as hot plug notification callback. From now on the @cb will be 1286 * called with @data when an output status change is detected by the bridge, 1287 * until hot plug notification gets disabled with drm_bridge_hpd_disable(). 1288 * 1289 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in 1290 * bridge->ops. This function shall not be called when the flag is not set. 1291 * 1292 * Only one hot plug detection callback can be registered at a time, it is an 1293 * error to call this function when hot plug detection is already enabled for 1294 * the bridge. 1295 */ 1296 void drm_bridge_hpd_enable(struct drm_bridge *bridge, 1297 void (*cb)(void *data, 1298 enum drm_connector_status status), 1299 void *data) 1300 { 1301 if (!(bridge->ops & DRM_BRIDGE_OP_HPD)) 1302 return; 1303 1304 mutex_lock(&bridge->hpd_mutex); 1305 1306 if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n")) 1307 goto unlock; 1308 1309 bridge->hpd_cb = cb; 1310 bridge->hpd_data = data; 1311 1312 if (bridge->funcs->hpd_enable) 1313 bridge->funcs->hpd_enable(bridge); 1314 1315 unlock: 1316 mutex_unlock(&bridge->hpd_mutex); 1317 } 1318 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable); 1319 1320 /** 1321 * drm_bridge_hpd_disable - disable hot plug detection for the bridge 1322 * @bridge: bridge control structure 1323 * 1324 * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot 1325 * plug detection callback previously registered with drm_bridge_hpd_enable(). 1326 * Once this function returns the callback will not be called by the bridge 1327 * when an output status change occurs. 1328 * 1329 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in 1330 * bridge->ops. This function shall not be called when the flag is not set. 1331 */ 1332 void drm_bridge_hpd_disable(struct drm_bridge *bridge) 1333 { 1334 if (!(bridge->ops & DRM_BRIDGE_OP_HPD)) 1335 return; 1336 1337 mutex_lock(&bridge->hpd_mutex); 1338 if (bridge->funcs->hpd_disable) 1339 bridge->funcs->hpd_disable(bridge); 1340 1341 bridge->hpd_cb = NULL; 1342 bridge->hpd_data = NULL; 1343 mutex_unlock(&bridge->hpd_mutex); 1344 } 1345 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable); 1346 1347 /** 1348 * drm_bridge_hpd_notify - notify hot plug detection events 1349 * @bridge: bridge control structure 1350 * @status: output connection status 1351 * 1352 * Bridge drivers shall call this function to report hot plug events when they 1353 * detect a change in the output status, when hot plug detection has been 1354 * enabled by drm_bridge_hpd_enable(). 1355 * 1356 * This function shall be called in a context that can sleep. 1357 */ 1358 void drm_bridge_hpd_notify(struct drm_bridge *bridge, 1359 enum drm_connector_status status) 1360 { 1361 mutex_lock(&bridge->hpd_mutex); 1362 if (bridge->hpd_cb) 1363 bridge->hpd_cb(bridge->hpd_data, status); 1364 mutex_unlock(&bridge->hpd_mutex); 1365 } 1366 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify); 1367 1368 #ifdef CONFIG_OF 1369 /** 1370 * of_drm_find_bridge - find the bridge corresponding to the device node in 1371 * the global bridge list 1372 * 1373 * @np: device node 1374 * 1375 * RETURNS: 1376 * drm_bridge control struct on success, NULL on failure 1377 */ 1378 struct drm_bridge *of_drm_find_bridge(struct device_node *np) 1379 { 1380 struct drm_bridge *bridge; 1381 1382 mutex_lock(&bridge_lock); 1383 1384 list_for_each_entry(bridge, &bridge_list, list) { 1385 if (bridge->of_node == np) { 1386 mutex_unlock(&bridge_lock); 1387 return bridge; 1388 } 1389 } 1390 1391 mutex_unlock(&bridge_lock); 1392 return NULL; 1393 } 1394 EXPORT_SYMBOL(of_drm_find_bridge); 1395 #endif 1396 1397 /** 1398 * devm_drm_put_bridge - Release a bridge reference obtained via devm 1399 * @dev: device that got the bridge via devm 1400 * @bridge: pointer to a struct drm_bridge obtained via devm 1401 * 1402 * Same as drm_bridge_put() for bridge pointers obtained via devm functions 1403 * such as devm_drm_bridge_alloc(). 1404 * 1405 * This function is a temporary workaround and MUST NOT be used. Manual 1406 * handling of bridge lifetime is inherently unsafe. 1407 */ 1408 void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge) 1409 { 1410 devm_release_action(dev, drm_bridge_put_void, bridge); 1411 } 1412 EXPORT_SYMBOL(devm_drm_put_bridge); 1413 1414 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, 1415 struct drm_bridge *bridge, 1416 unsigned int idx) 1417 { 1418 drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); 1419 drm_printf(p, "\ttype: [%d] %s\n", 1420 bridge->type, 1421 drm_get_connector_type_name(bridge->type)); 1422 1423 if (bridge->of_node) 1424 drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); 1425 1426 drm_printf(p, "\tops: [0x%x]", bridge->ops); 1427 if (bridge->ops & DRM_BRIDGE_OP_DETECT) 1428 drm_puts(p, " detect"); 1429 if (bridge->ops & DRM_BRIDGE_OP_EDID) 1430 drm_puts(p, " edid"); 1431 if (bridge->ops & DRM_BRIDGE_OP_HPD) 1432 drm_puts(p, " hpd"); 1433 if (bridge->ops & DRM_BRIDGE_OP_MODES) 1434 drm_puts(p, " modes"); 1435 if (bridge->ops & DRM_BRIDGE_OP_HDMI) 1436 drm_puts(p, " hdmi"); 1437 drm_puts(p, "\n"); 1438 } 1439 1440 static int allbridges_show(struct seq_file *m, void *data) 1441 { 1442 struct drm_printer p = drm_seq_file_printer(m); 1443 struct drm_bridge *bridge; 1444 unsigned int idx = 0; 1445 1446 mutex_lock(&bridge_lock); 1447 1448 list_for_each_entry(bridge, &bridge_list, list) 1449 drm_bridge_debugfs_show_bridge(&p, bridge, idx++); 1450 1451 mutex_unlock(&bridge_lock); 1452 1453 return 0; 1454 } 1455 DEFINE_SHOW_ATTRIBUTE(allbridges); 1456 1457 static int encoder_bridges_show(struct seq_file *m, void *data) 1458 { 1459 struct drm_encoder *encoder = m->private; 1460 struct drm_printer p = drm_seq_file_printer(m); 1461 struct drm_bridge *bridge; 1462 unsigned int idx = 0; 1463 1464 drm_for_each_bridge_in_chain(encoder, bridge) 1465 drm_bridge_debugfs_show_bridge(&p, bridge, idx++); 1466 1467 return 0; 1468 } 1469 DEFINE_SHOW_ATTRIBUTE(encoder_bridges); 1470 1471 void drm_bridge_debugfs_params(struct dentry *root) 1472 { 1473 debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops); 1474 } 1475 1476 void drm_bridge_debugfs_encoder_params(struct dentry *root, 1477 struct drm_encoder *encoder) 1478 { 1479 /* bridges list */ 1480 debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops); 1481 } 1482 1483 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); 1484 MODULE_DESCRIPTION("DRM bridge infrastructure"); 1485 MODULE_LICENSE("GPL and additional rights"); 1486