1 /* 2 * Copyright (c) 2014 Samsung Electronics Co., Ltd 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sub license, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the 12 * next paragraph) shall be included in all copies or substantial portions 13 * of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/err.h> 25 #include <linux/media-bus-format.h> 26 #include <linux/module.h> 27 #include <linux/mutex.h> 28 29 #include <drm/drm_atomic_state_helper.h> 30 #include <drm/drm_bridge.h> 31 #include <drm/drm_debugfs.h> 32 #include <drm/drm_edid.h> 33 #include <drm/drm_encoder.h> 34 #include <drm/drm_file.h> 35 #include <drm/drm_of.h> 36 #include <drm/drm_print.h> 37 38 #include "drm_crtc_internal.h" 39 40 /** 41 * DOC: overview 42 * 43 * &struct drm_bridge represents a device that hangs on to an encoder. These are 44 * handy when a regular &drm_encoder entity isn't enough to represent the entire 45 * encoder chain. 46 * 47 * A bridge is always attached to a single &drm_encoder at a time, but can be 48 * either connected to it directly, or through a chain of bridges:: 49 * 50 * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B 51 * 52 * Here, the output of the encoder feeds to bridge A, and that furthers feeds to 53 * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear: 54 * Chaining multiple bridges to the output of a bridge, or the same bridge to 55 * the output of different bridges, is not supported. 56 * 57 * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes, 58 * CRTCs, encoders or connectors and hence are not visible to userspace. They 59 * just provide additional hooks to get the desired output at the end of the 60 * encoder chain. 61 */ 62 63 /** 64 * DOC: display driver integration 65 * 66 * Display drivers are responsible for linking encoders with the first bridge 67 * in the chains. This is done by acquiring the appropriate bridge with 68 * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the 69 * encoder with a call to drm_bridge_attach(). 70 * 71 * Bridges are responsible for linking themselves with the next bridge in the 72 * chain, if any. This is done the same way as for encoders, with the call to 73 * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation. 74 * 75 * Once these links are created, the bridges can participate along with encoder 76 * functions to perform mode validation and fixup (through 77 * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode 78 * setting (through drm_bridge_chain_mode_set()), enable (through 79 * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable()) 80 * and disable (through drm_atomic_bridge_chain_disable() and 81 * drm_atomic_bridge_chain_post_disable()). Those functions call the 82 * corresponding operations provided in &drm_bridge_funcs in sequence for all 83 * bridges in the chain. 84 * 85 * For display drivers that use the atomic helpers 86 * drm_atomic_helper_check_modeset(), 87 * drm_atomic_helper_commit_modeset_enables() and 88 * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled 89 * commit check and commit tail handlers, or through the higher-level 90 * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or 91 * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and 92 * requires no intervention from the driver. For other drivers, the relevant 93 * DRM bridge chain functions shall be called manually. 94 * 95 * Bridges also participate in implementing the &drm_connector at the end of 96 * the bridge chain. Display drivers may use the drm_bridge_connector_init() 97 * helper to create the &drm_connector, or implement it manually on top of the 98 * connector-related operations exposed by the bridge (see the overview 99 * documentation of bridge operations for more details). 100 */ 101 102 /** 103 * DOC: special care dsi 104 * 105 * The interaction between the bridges and other frameworks involved in 106 * the probing of the upstream driver and the bridge driver can be 107 * challenging. Indeed, there's multiple cases that needs to be 108 * considered: 109 * 110 * - The upstream driver doesn't use the component framework and isn't a 111 * MIPI-DSI host. In this case, the bridge driver will probe at some 112 * point and the upstream driver should try to probe again by returning 113 * EPROBE_DEFER as long as the bridge driver hasn't probed. 114 * 115 * - The upstream driver doesn't use the component framework, but is a 116 * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be 117 * controlled. In this case, the bridge device is a child of the 118 * display device and when it will probe it's assured that the display 119 * device (and MIPI-DSI host) is present. The upstream driver will be 120 * assured that the bridge driver is connected between the 121 * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations. 122 * Therefore, it must run mipi_dsi_host_register() in its probe 123 * function, and then run drm_bridge_attach() in its 124 * &mipi_dsi_host_ops.attach hook. 125 * 126 * - The upstream driver uses the component framework and is a MIPI-DSI 127 * host. The bridge device uses the MIPI-DCS commands to be 128 * controlled. This is the same situation than above, and can run 129 * mipi_dsi_host_register() in either its probe or bind hooks. 130 * 131 * - The upstream driver uses the component framework and is a MIPI-DSI 132 * host. The bridge device uses a separate bus (such as I2C) to be 133 * controlled. In this case, there's no correlation between the probe 134 * of the bridge and upstream drivers, so care must be taken to avoid 135 * an endless EPROBE_DEFER loop, with each driver waiting for the 136 * other to probe. 137 * 138 * The ideal pattern to cover the last item (and all the others in the 139 * MIPI-DSI host driver case) is to split the operations like this: 140 * 141 * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its 142 * probe hook. It will make sure that the MIPI-DSI host sticks around, 143 * and that the driver's bind can be called. 144 * 145 * - In its probe hook, the bridge driver must try to find its MIPI-DSI 146 * host, register as a MIPI-DSI device and attach the MIPI-DSI device 147 * to its host. The bridge driver is now functional. 148 * 149 * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can 150 * now add its component. Its bind hook will now be called and since 151 * the bridge driver is attached and registered, we can now look for 152 * and attach it. 153 * 154 * At this point, we're now certain that both the upstream driver and 155 * the bridge driver are functional and we can't have a deadlock-like 156 * situation when probing. 157 */ 158 159 /** 160 * DOC: dsi bridge operations 161 * 162 * DSI host interfaces are expected to be implemented as bridges rather than 163 * encoders, however there are a few aspects of their operation that need to 164 * be defined in order to provide a consistent interface. 165 * 166 * A DSI host should keep the PHY powered down until the pre_enable operation is 167 * called. All lanes are in an undefined idle state up to this point, and it 168 * must not be assumed that it is LP-11. 169 * pre_enable should initialise the PHY, set the data lanes to LP-11, and the 170 * clock lane to either LP-11 or HS depending on the mode_flag 171 * %MIPI_DSI_CLOCK_NON_CONTINUOUS. 172 * 173 * Ordinarily the downstream bridge DSI peripheral pre_enable will have been 174 * called before the DSI host. If the DSI peripheral requires LP-11 and/or 175 * the clock lane to be in HS mode prior to pre_enable, then it can set the 176 * &pre_enable_prev_first flag to request the pre_enable (and 177 * post_disable) order to be altered to enable the DSI host first. 178 * 179 * Either the CRTC being enabled, or the DSI host enable operation should switch 180 * the host to actively transmitting video on the data lanes. 181 * 182 * The reverse also applies. The DSI host disable operation or stopping the CRTC 183 * should stop transmitting video, and the data lanes should return to the LP-11 184 * state. The DSI host &post_disable operation should disable the PHY. 185 * If the &pre_enable_prev_first flag is set, then the DSI peripheral's 186 * bridge &post_disable will be called before the DSI host's post_disable. 187 * 188 * Whilst it is valid to call &host_transfer prior to pre_enable or after 189 * post_disable, the exact state of the lanes is undefined at this point. The 190 * DSI host should initialise the interface, transmit the data, and then disable 191 * the interface again. 192 * 193 * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If 194 * implemented, it therefore needs to be handled entirely within the DSI Host 195 * driver. 196 */ 197 198 static DEFINE_MUTEX(bridge_lock); 199 static LIST_HEAD(bridge_list); 200 201 /** 202 * drm_bridge_add - add the given bridge to the global bridge list 203 * 204 * @bridge: bridge control structure 205 */ 206 void drm_bridge_add(struct drm_bridge *bridge) 207 { 208 mutex_init(&bridge->hpd_mutex); 209 210 if (bridge->ops & DRM_BRIDGE_OP_HDMI) 211 bridge->ycbcr_420_allowed = !!(bridge->supported_formats & 212 BIT(HDMI_COLORSPACE_YUV420)); 213 214 mutex_lock(&bridge_lock); 215 list_add_tail(&bridge->list, &bridge_list); 216 mutex_unlock(&bridge_lock); 217 } 218 EXPORT_SYMBOL(drm_bridge_add); 219 220 static void drm_bridge_remove_void(void *bridge) 221 { 222 drm_bridge_remove(bridge); 223 } 224 225 /** 226 * devm_drm_bridge_add - devm managed version of drm_bridge_add() 227 * 228 * @dev: device to tie the bridge lifetime to 229 * @bridge: bridge control structure 230 * 231 * This is the managed version of drm_bridge_add() which automatically 232 * calls drm_bridge_remove() when @dev is unbound. 233 * 234 * Return: 0 if no error or negative error code. 235 */ 236 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge) 237 { 238 drm_bridge_add(bridge); 239 return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge); 240 } 241 EXPORT_SYMBOL(devm_drm_bridge_add); 242 243 /** 244 * drm_bridge_remove - remove the given bridge from the global bridge list 245 * 246 * @bridge: bridge control structure 247 */ 248 void drm_bridge_remove(struct drm_bridge *bridge) 249 { 250 mutex_lock(&bridge_lock); 251 list_del_init(&bridge->list); 252 mutex_unlock(&bridge_lock); 253 254 mutex_destroy(&bridge->hpd_mutex); 255 } 256 EXPORT_SYMBOL(drm_bridge_remove); 257 258 static struct drm_private_state * 259 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj) 260 { 261 struct drm_bridge *bridge = drm_priv_to_bridge(obj); 262 struct drm_bridge_state *state; 263 264 state = bridge->funcs->atomic_duplicate_state(bridge); 265 return state ? &state->base : NULL; 266 } 267 268 static void 269 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj, 270 struct drm_private_state *s) 271 { 272 struct drm_bridge_state *state = drm_priv_to_bridge_state(s); 273 struct drm_bridge *bridge = drm_priv_to_bridge(obj); 274 275 bridge->funcs->atomic_destroy_state(bridge, state); 276 } 277 278 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = { 279 .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state, 280 .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state, 281 }; 282 283 /** 284 * drm_bridge_attach - attach the bridge to an encoder's chain 285 * 286 * @encoder: DRM encoder 287 * @bridge: bridge to attach 288 * @previous: previous bridge in the chain (optional) 289 * @flags: DRM_BRIDGE_ATTACH_* flags 290 * 291 * Called by a kms driver to link the bridge to an encoder's chain. The previous 292 * argument specifies the previous bridge in the chain. If NULL, the bridge is 293 * linked directly at the encoder's output. Otherwise it is linked at the 294 * previous bridge's output. 295 * 296 * If non-NULL the previous bridge must be already attached by a call to this 297 * function. 298 * 299 * Note that bridges attached to encoders are auto-detached during encoder 300 * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally 301 * *not* be balanced with a drm_bridge_detach() in driver code. 302 * 303 * RETURNS: 304 * Zero on success, error code on failure 305 */ 306 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, 307 struct drm_bridge *previous, 308 enum drm_bridge_attach_flags flags) 309 { 310 int ret; 311 312 if (!encoder || !bridge) 313 return -EINVAL; 314 315 if (previous && (!previous->dev || previous->encoder != encoder)) 316 return -EINVAL; 317 318 if (bridge->dev) 319 return -EBUSY; 320 321 bridge->dev = encoder->dev; 322 bridge->encoder = encoder; 323 324 if (previous) 325 list_add(&bridge->chain_node, &previous->chain_node); 326 else 327 list_add(&bridge->chain_node, &encoder->bridge_chain); 328 329 if (bridge->funcs->attach) { 330 ret = bridge->funcs->attach(bridge, flags); 331 if (ret < 0) 332 goto err_reset_bridge; 333 } 334 335 if (bridge->funcs->atomic_reset) { 336 struct drm_bridge_state *state; 337 338 state = bridge->funcs->atomic_reset(bridge); 339 if (IS_ERR(state)) { 340 ret = PTR_ERR(state); 341 goto err_detach_bridge; 342 } 343 344 drm_atomic_private_obj_init(bridge->dev, &bridge->base, 345 &state->base, 346 &drm_bridge_priv_state_funcs); 347 } 348 349 return 0; 350 351 err_detach_bridge: 352 if (bridge->funcs->detach) 353 bridge->funcs->detach(bridge); 354 355 err_reset_bridge: 356 bridge->dev = NULL; 357 bridge->encoder = NULL; 358 list_del(&bridge->chain_node); 359 360 if (ret != -EPROBE_DEFER) 361 DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n", 362 bridge->of_node, encoder->name, ret); 363 else 364 dev_err_probe(encoder->dev->dev, -EPROBE_DEFER, 365 "failed to attach bridge %pOF to encoder %s\n", 366 bridge->of_node, encoder->name); 367 368 return ret; 369 } 370 EXPORT_SYMBOL(drm_bridge_attach); 371 372 void drm_bridge_detach(struct drm_bridge *bridge) 373 { 374 if (WARN_ON(!bridge)) 375 return; 376 377 if (WARN_ON(!bridge->dev)) 378 return; 379 380 if (bridge->funcs->atomic_reset) 381 drm_atomic_private_obj_fini(&bridge->base); 382 383 if (bridge->funcs->detach) 384 bridge->funcs->detach(bridge); 385 386 list_del(&bridge->chain_node); 387 bridge->dev = NULL; 388 } 389 390 /** 391 * DOC: bridge operations 392 * 393 * Bridge drivers expose operations through the &drm_bridge_funcs structure. 394 * The DRM internals (atomic and CRTC helpers) use the helpers defined in 395 * drm_bridge.c to call bridge operations. Those operations are divided in 396 * three big categories to support different parts of the bridge usage. 397 * 398 * - The encoder-related operations support control of the bridges in the 399 * chain, and are roughly counterparts to the &drm_encoder_helper_funcs 400 * operations. They are used by the legacy CRTC and the atomic modeset 401 * helpers to perform mode validation, fixup and setting, and enable and 402 * disable the bridge automatically. 403 * 404 * The enable and disable operations are split in 405 * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable, 406 * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide 407 * finer-grained control. 408 * 409 * Bridge drivers may implement the legacy version of those operations, or 410 * the atomic version (prefixed with atomic\_), in which case they shall also 411 * implement the atomic state bookkeeping operations 412 * (&drm_bridge_funcs.atomic_duplicate_state, 413 * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset). 414 * Mixing atomic and non-atomic versions of the operations is not supported. 415 * 416 * - The bus format negotiation operations 417 * &drm_bridge_funcs.atomic_get_output_bus_fmts and 418 * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to 419 * negotiate the formats transmitted between bridges in the chain when 420 * multiple formats are supported. Negotiation for formats is performed 421 * transparently for display drivers by the atomic modeset helpers. Only 422 * atomic versions of those operations exist, bridge drivers that need to 423 * implement them shall thus also implement the atomic version of the 424 * encoder-related operations. This feature is not supported by the legacy 425 * CRTC helpers. 426 * 427 * - The connector-related operations support implementing a &drm_connector 428 * based on a chain of bridges. DRM bridges traditionally create a 429 * &drm_connector for bridges meant to be used at the end of the chain. This 430 * puts additional burden on bridge drivers, especially for bridges that may 431 * be used in the middle of a chain or at the end of it. Furthermore, it 432 * requires all operations of the &drm_connector to be handled by a single 433 * bridge, which doesn't always match the hardware architecture. 434 * 435 * To simplify bridge drivers and make the connector implementation more 436 * flexible, a new model allows bridges to unconditionally skip creation of 437 * &drm_connector and instead expose &drm_bridge_funcs operations to support 438 * an externally-implemented &drm_connector. Those operations are 439 * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes, 440 * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify, 441 * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When 442 * implemented, display drivers shall create a &drm_connector instance for 443 * each chain of bridges, and implement those connector instances based on 444 * the bridge connector operations. 445 * 446 * Bridge drivers shall implement the connector-related operations for all 447 * the features that the bridge hardware support. For instance, if a bridge 448 * supports reading EDID, the &drm_bridge_funcs.get_edid shall be 449 * implemented. This however doesn't mean that the DDC lines are wired to the 450 * bridge on a particular platform, as they could also be connected to an I2C 451 * controller of the SoC. Support for the connector-related operations on the 452 * running platform is reported through the &drm_bridge.ops flags. Bridge 453 * drivers shall detect which operations they can support on the platform 454 * (usually this information is provided by ACPI or DT), and set the 455 * &drm_bridge.ops flags for all supported operations. A flag shall only be 456 * set if the corresponding &drm_bridge_funcs operation is implemented, but 457 * an implemented operation doesn't necessarily imply that the corresponding 458 * flag will be set. Display drivers shall use the &drm_bridge.ops flags to 459 * decide which bridge to delegate a connector operation to. This mechanism 460 * allows providing a single static const &drm_bridge_funcs instance in 461 * bridge drivers, improving security by storing function pointers in 462 * read-only memory. 463 * 464 * In order to ease transition, bridge drivers may support both the old and 465 * new models by making connector creation optional and implementing the 466 * connected-related bridge operations. Connector creation is then controlled 467 * by the flags argument to the drm_bridge_attach() function. Display drivers 468 * that support the new model and create connectors themselves shall set the 469 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip 470 * connector creation. For intermediate bridges in the chain, the flag shall 471 * be passed to the drm_bridge_attach() call for the downstream bridge. 472 * Bridge drivers that implement the new model only shall return an error 473 * from their &drm_bridge_funcs.attach handler when the 474 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers 475 * should use the new model, and convert the bridge drivers they use if 476 * needed, in order to gradually transition to the new model. 477 */ 478 479 /** 480 * drm_bridge_chain_mode_valid - validate the mode against all bridges in the 481 * encoder chain. 482 * @bridge: bridge control structure 483 * @info: display info against which the mode shall be validated 484 * @mode: desired mode to be validated 485 * 486 * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder 487 * chain, starting from the first bridge to the last. If at least one bridge 488 * does not accept the mode the function returns the error code. 489 * 490 * Note: the bridge passed should be the one closest to the encoder. 491 * 492 * RETURNS: 493 * MODE_OK on success, drm_mode_status Enum error code on failure 494 */ 495 enum drm_mode_status 496 drm_bridge_chain_mode_valid(struct drm_bridge *bridge, 497 const struct drm_display_info *info, 498 const struct drm_display_mode *mode) 499 { 500 struct drm_encoder *encoder; 501 502 if (!bridge) 503 return MODE_OK; 504 505 encoder = bridge->encoder; 506 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { 507 enum drm_mode_status ret; 508 509 if (!bridge->funcs->mode_valid) 510 continue; 511 512 ret = bridge->funcs->mode_valid(bridge, info, mode); 513 if (ret != MODE_OK) 514 return ret; 515 } 516 517 return MODE_OK; 518 } 519 EXPORT_SYMBOL(drm_bridge_chain_mode_valid); 520 521 /** 522 * drm_bridge_chain_mode_set - set proposed mode for all bridges in the 523 * encoder chain 524 * @bridge: bridge control structure 525 * @mode: desired mode to be set for the encoder chain 526 * @adjusted_mode: updated mode that works for this encoder chain 527 * 528 * Calls &drm_bridge_funcs.mode_set op for all the bridges in the 529 * encoder chain, starting from the first bridge to the last. 530 * 531 * Note: the bridge passed should be the one closest to the encoder 532 */ 533 void drm_bridge_chain_mode_set(struct drm_bridge *bridge, 534 const struct drm_display_mode *mode, 535 const struct drm_display_mode *adjusted_mode) 536 { 537 struct drm_encoder *encoder; 538 539 if (!bridge) 540 return; 541 542 encoder = bridge->encoder; 543 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { 544 if (bridge->funcs->mode_set) 545 bridge->funcs->mode_set(bridge, mode, adjusted_mode); 546 } 547 } 548 EXPORT_SYMBOL(drm_bridge_chain_mode_set); 549 550 /** 551 * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain 552 * @bridge: bridge control structure 553 * @old_state: old atomic state 554 * 555 * Calls &drm_bridge_funcs.atomic_disable (falls back on 556 * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain, 557 * starting from the last bridge to the first. These are called before calling 558 * &drm_encoder_helper_funcs.atomic_disable 559 * 560 * Note: the bridge passed should be the one closest to the encoder 561 */ 562 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, 563 struct drm_atomic_state *old_state) 564 { 565 struct drm_encoder *encoder; 566 struct drm_bridge *iter; 567 568 if (!bridge) 569 return; 570 571 encoder = bridge->encoder; 572 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { 573 if (iter->funcs->atomic_disable) { 574 struct drm_bridge_state *old_bridge_state; 575 576 old_bridge_state = 577 drm_atomic_get_old_bridge_state(old_state, 578 iter); 579 if (WARN_ON(!old_bridge_state)) 580 return; 581 582 iter->funcs->atomic_disable(iter, old_bridge_state); 583 } else if (iter->funcs->disable) { 584 iter->funcs->disable(iter); 585 } 586 587 if (iter == bridge) 588 break; 589 } 590 } 591 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable); 592 593 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge, 594 struct drm_atomic_state *old_state) 595 { 596 if (old_state && bridge->funcs->atomic_post_disable) { 597 struct drm_bridge_state *old_bridge_state; 598 599 old_bridge_state = 600 drm_atomic_get_old_bridge_state(old_state, 601 bridge); 602 if (WARN_ON(!old_bridge_state)) 603 return; 604 605 bridge->funcs->atomic_post_disable(bridge, 606 old_bridge_state); 607 } else if (bridge->funcs->post_disable) { 608 bridge->funcs->post_disable(bridge); 609 } 610 } 611 612 /** 613 * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges 614 * in the encoder chain 615 * @bridge: bridge control structure 616 * @old_state: old atomic state 617 * 618 * Calls &drm_bridge_funcs.atomic_post_disable (falls back on 619 * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain, 620 * starting from the first bridge to the last. These are called after completing 621 * &drm_encoder_helper_funcs.atomic_disable 622 * 623 * If a bridge sets @pre_enable_prev_first, then the @post_disable for that 624 * bridge will be called before the previous one to reverse the @pre_enable 625 * calling direction. 626 * 627 * Example: 628 * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E 629 * 630 * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting 631 * @post_disable order would be, 632 * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C. 633 * 634 * Note: the bridge passed should be the one closest to the encoder 635 */ 636 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, 637 struct drm_atomic_state *old_state) 638 { 639 struct drm_encoder *encoder; 640 struct drm_bridge *next, *limit; 641 642 if (!bridge) 643 return; 644 645 encoder = bridge->encoder; 646 647 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { 648 limit = NULL; 649 650 if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) { 651 next = list_next_entry(bridge, chain_node); 652 653 if (next->pre_enable_prev_first) { 654 /* next bridge had requested that prev 655 * was enabled first, so disabled last 656 */ 657 limit = next; 658 659 /* Find the next bridge that has NOT requested 660 * prev to be enabled first / disabled last 661 */ 662 list_for_each_entry_from(next, &encoder->bridge_chain, 663 chain_node) { 664 if (!next->pre_enable_prev_first) { 665 next = list_prev_entry(next, chain_node); 666 limit = next; 667 break; 668 } 669 670 if (list_is_last(&next->chain_node, 671 &encoder->bridge_chain)) { 672 limit = next; 673 break; 674 } 675 } 676 677 /* Call these bridges in reverse order */ 678 list_for_each_entry_from_reverse(next, &encoder->bridge_chain, 679 chain_node) { 680 if (next == bridge) 681 break; 682 683 drm_atomic_bridge_call_post_disable(next, 684 old_state); 685 } 686 } 687 } 688 689 drm_atomic_bridge_call_post_disable(bridge, old_state); 690 691 if (limit) 692 /* Jump all bridges that we have already post_disabled */ 693 bridge = limit; 694 } 695 } 696 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable); 697 698 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge, 699 struct drm_atomic_state *old_state) 700 { 701 if (old_state && bridge->funcs->atomic_pre_enable) { 702 struct drm_bridge_state *old_bridge_state; 703 704 old_bridge_state = 705 drm_atomic_get_old_bridge_state(old_state, 706 bridge); 707 if (WARN_ON(!old_bridge_state)) 708 return; 709 710 bridge->funcs->atomic_pre_enable(bridge, old_bridge_state); 711 } else if (bridge->funcs->pre_enable) { 712 bridge->funcs->pre_enable(bridge); 713 } 714 } 715 716 /** 717 * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in 718 * the encoder chain 719 * @bridge: bridge control structure 720 * @old_state: old atomic state 721 * 722 * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on 723 * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain, 724 * starting from the last bridge to the first. These are called before calling 725 * &drm_encoder_helper_funcs.atomic_enable 726 * 727 * If a bridge sets @pre_enable_prev_first, then the pre_enable for the 728 * prev bridge will be called before pre_enable of this bridge. 729 * 730 * Example: 731 * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E 732 * 733 * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting 734 * @pre_enable order would be, 735 * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B. 736 * 737 * Note: the bridge passed should be the one closest to the encoder 738 */ 739 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, 740 struct drm_atomic_state *old_state) 741 { 742 struct drm_encoder *encoder; 743 struct drm_bridge *iter, *next, *limit; 744 745 if (!bridge) 746 return; 747 748 encoder = bridge->encoder; 749 750 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { 751 if (iter->pre_enable_prev_first) { 752 next = iter; 753 limit = bridge; 754 list_for_each_entry_from_reverse(next, 755 &encoder->bridge_chain, 756 chain_node) { 757 if (next == bridge) 758 break; 759 760 if (!next->pre_enable_prev_first) { 761 /* Found first bridge that does NOT 762 * request prev to be enabled first 763 */ 764 limit = next; 765 break; 766 } 767 } 768 769 list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) { 770 /* Call requested prev bridge pre_enable 771 * in order. 772 */ 773 if (next == iter) 774 /* At the first bridge to request prev 775 * bridges called first. 776 */ 777 break; 778 779 drm_atomic_bridge_call_pre_enable(next, old_state); 780 } 781 } 782 783 drm_atomic_bridge_call_pre_enable(iter, old_state); 784 785 if (iter->pre_enable_prev_first) 786 /* Jump all bridges that we have already pre_enabled */ 787 iter = limit; 788 789 if (iter == bridge) 790 break; 791 } 792 } 793 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable); 794 795 /** 796 * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain 797 * @bridge: bridge control structure 798 * @old_state: old atomic state 799 * 800 * Calls &drm_bridge_funcs.atomic_enable (falls back on 801 * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain, 802 * starting from the first bridge to the last. These are called after completing 803 * &drm_encoder_helper_funcs.atomic_enable 804 * 805 * Note: the bridge passed should be the one closest to the encoder 806 */ 807 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, 808 struct drm_atomic_state *old_state) 809 { 810 struct drm_encoder *encoder; 811 812 if (!bridge) 813 return; 814 815 encoder = bridge->encoder; 816 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { 817 if (bridge->funcs->atomic_enable) { 818 struct drm_bridge_state *old_bridge_state; 819 820 old_bridge_state = 821 drm_atomic_get_old_bridge_state(old_state, 822 bridge); 823 if (WARN_ON(!old_bridge_state)) 824 return; 825 826 bridge->funcs->atomic_enable(bridge, old_bridge_state); 827 } else if (bridge->funcs->enable) { 828 bridge->funcs->enable(bridge); 829 } 830 } 831 } 832 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable); 833 834 static int drm_atomic_bridge_check(struct drm_bridge *bridge, 835 struct drm_crtc_state *crtc_state, 836 struct drm_connector_state *conn_state) 837 { 838 if (bridge->funcs->atomic_check) { 839 struct drm_bridge_state *bridge_state; 840 int ret; 841 842 bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, 843 bridge); 844 if (WARN_ON(!bridge_state)) 845 return -EINVAL; 846 847 ret = bridge->funcs->atomic_check(bridge, bridge_state, 848 crtc_state, conn_state); 849 if (ret) 850 return ret; 851 } else if (bridge->funcs->mode_fixup) { 852 if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode, 853 &crtc_state->adjusted_mode)) 854 return -EINVAL; 855 } 856 857 return 0; 858 } 859 860 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge, 861 struct drm_bridge *cur_bridge, 862 struct drm_crtc_state *crtc_state, 863 struct drm_connector_state *conn_state, 864 u32 out_bus_fmt) 865 { 866 unsigned int i, num_in_bus_fmts = 0; 867 struct drm_bridge_state *cur_state; 868 struct drm_bridge *prev_bridge; 869 u32 *in_bus_fmts; 870 int ret; 871 872 prev_bridge = drm_bridge_get_prev_bridge(cur_bridge); 873 cur_state = drm_atomic_get_new_bridge_state(crtc_state->state, 874 cur_bridge); 875 876 /* 877 * If bus format negotiation is not supported by this bridge, let's 878 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and 879 * hope that it can handle this situation gracefully (by providing 880 * appropriate default values). 881 */ 882 if (!cur_bridge->funcs->atomic_get_input_bus_fmts) { 883 if (cur_bridge != first_bridge) { 884 ret = select_bus_fmt_recursive(first_bridge, 885 prev_bridge, crtc_state, 886 conn_state, 887 MEDIA_BUS_FMT_FIXED); 888 if (ret) 889 return ret; 890 } 891 892 /* 893 * Driver does not implement the atomic state hooks, but that's 894 * fine, as long as it does not access the bridge state. 895 */ 896 if (cur_state) { 897 cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED; 898 cur_state->output_bus_cfg.format = out_bus_fmt; 899 } 900 901 return 0; 902 } 903 904 /* 905 * If the driver implements ->atomic_get_input_bus_fmts() it 906 * should also implement the atomic state hooks. 907 */ 908 if (WARN_ON(!cur_state)) 909 return -EINVAL; 910 911 in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge, 912 cur_state, 913 crtc_state, 914 conn_state, 915 out_bus_fmt, 916 &num_in_bus_fmts); 917 if (!num_in_bus_fmts) 918 return -ENOTSUPP; 919 else if (!in_bus_fmts) 920 return -ENOMEM; 921 922 if (first_bridge == cur_bridge) { 923 cur_state->input_bus_cfg.format = in_bus_fmts[0]; 924 cur_state->output_bus_cfg.format = out_bus_fmt; 925 kfree(in_bus_fmts); 926 return 0; 927 } 928 929 for (i = 0; i < num_in_bus_fmts; i++) { 930 ret = select_bus_fmt_recursive(first_bridge, prev_bridge, 931 crtc_state, conn_state, 932 in_bus_fmts[i]); 933 if (ret != -ENOTSUPP) 934 break; 935 } 936 937 if (!ret) { 938 cur_state->input_bus_cfg.format = in_bus_fmts[i]; 939 cur_state->output_bus_cfg.format = out_bus_fmt; 940 } 941 942 kfree(in_bus_fmts); 943 return ret; 944 } 945 946 /* 947 * This function is called by &drm_atomic_bridge_chain_check() just before 948 * calling &drm_bridge_funcs.atomic_check() on all elements of the chain. 949 * It performs bus format negotiation between bridge elements. The negotiation 950 * happens in reverse order, starting from the last element in the chain up to 951 * @bridge. 952 * 953 * Negotiation starts by retrieving supported output bus formats on the last 954 * bridge element and testing them one by one. The test is recursive, meaning 955 * that for each tested output format, the whole chain will be walked backward, 956 * and each element will have to choose an input bus format that can be 957 * transcoded to the requested output format. When a bridge element does not 958 * support transcoding into a specific output format -ENOTSUPP is returned and 959 * the next bridge element will have to try a different format. If none of the 960 * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail. 961 * 962 * This implementation is relying on 963 * &drm_bridge_funcs.atomic_get_output_bus_fmts() and 964 * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported 965 * input/output formats. 966 * 967 * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by 968 * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts() 969 * tries a single format: &drm_connector.display_info.bus_formats[0] if 970 * available, MEDIA_BUS_FMT_FIXED otherwise. 971 * 972 * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented, 973 * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the 974 * bridge element that lacks this hook and asks the previous element in the 975 * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what 976 * to do in that case (fail if they want to enforce bus format negotiation, or 977 * provide a reasonable default if they need to support pipelines where not 978 * all elements support bus format negotiation). 979 */ 980 static int 981 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, 982 struct drm_crtc_state *crtc_state, 983 struct drm_connector_state *conn_state) 984 { 985 struct drm_connector *conn = conn_state->connector; 986 struct drm_encoder *encoder = bridge->encoder; 987 struct drm_bridge_state *last_bridge_state; 988 unsigned int i, num_out_bus_fmts = 0; 989 struct drm_bridge *last_bridge; 990 u32 *out_bus_fmts; 991 int ret = 0; 992 993 last_bridge = list_last_entry(&encoder->bridge_chain, 994 struct drm_bridge, chain_node); 995 last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, 996 last_bridge); 997 998 if (last_bridge->funcs->atomic_get_output_bus_fmts) { 999 const struct drm_bridge_funcs *funcs = last_bridge->funcs; 1000 1001 /* 1002 * If the driver implements ->atomic_get_output_bus_fmts() it 1003 * should also implement the atomic state hooks. 1004 */ 1005 if (WARN_ON(!last_bridge_state)) 1006 return -EINVAL; 1007 1008 out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge, 1009 last_bridge_state, 1010 crtc_state, 1011 conn_state, 1012 &num_out_bus_fmts); 1013 if (!num_out_bus_fmts) 1014 return -ENOTSUPP; 1015 else if (!out_bus_fmts) 1016 return -ENOMEM; 1017 } else { 1018 num_out_bus_fmts = 1; 1019 out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); 1020 if (!out_bus_fmts) 1021 return -ENOMEM; 1022 1023 if (conn->display_info.num_bus_formats && 1024 conn->display_info.bus_formats) 1025 out_bus_fmts[0] = conn->display_info.bus_formats[0]; 1026 else 1027 out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; 1028 } 1029 1030 for (i = 0; i < num_out_bus_fmts; i++) { 1031 ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state, 1032 conn_state, out_bus_fmts[i]); 1033 if (ret != -ENOTSUPP) 1034 break; 1035 } 1036 1037 kfree(out_bus_fmts); 1038 1039 return ret; 1040 } 1041 1042 static void 1043 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, 1044 struct drm_connector *conn, 1045 struct drm_atomic_state *state) 1046 { 1047 struct drm_bridge_state *bridge_state, *next_bridge_state; 1048 struct drm_bridge *next_bridge; 1049 u32 output_flags = 0; 1050 1051 bridge_state = drm_atomic_get_new_bridge_state(state, bridge); 1052 1053 /* No bridge state attached to this bridge => nothing to propagate. */ 1054 if (!bridge_state) 1055 return; 1056 1057 next_bridge = drm_bridge_get_next_bridge(bridge); 1058 1059 /* 1060 * Let's try to apply the most common case here, that is, propagate 1061 * display_info flags for the last bridge, and propagate the input 1062 * flags of the next bridge element to the output end of the current 1063 * bridge when the bridge is not the last one. 1064 * There are exceptions to this rule, like when signal inversion is 1065 * happening at the board level, but that's something drivers can deal 1066 * with from their &drm_bridge_funcs.atomic_check() implementation by 1067 * simply overriding the flags value we've set here. 1068 */ 1069 if (!next_bridge) { 1070 output_flags = conn->display_info.bus_flags; 1071 } else { 1072 next_bridge_state = drm_atomic_get_new_bridge_state(state, 1073 next_bridge); 1074 /* 1075 * No bridge state attached to the next bridge, just leave the 1076 * flags to 0. 1077 */ 1078 if (next_bridge_state) 1079 output_flags = next_bridge_state->input_bus_cfg.flags; 1080 } 1081 1082 bridge_state->output_bus_cfg.flags = output_flags; 1083 1084 /* 1085 * Propagate the output flags to the input end of the bridge. Again, it's 1086 * not necessarily what all bridges want, but that's what most of them 1087 * do, and by doing that by default we avoid forcing drivers to 1088 * duplicate the "dummy propagation" logic. 1089 */ 1090 bridge_state->input_bus_cfg.flags = output_flags; 1091 } 1092 1093 /** 1094 * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain 1095 * @bridge: bridge control structure 1096 * @crtc_state: new CRTC state 1097 * @conn_state: new connector state 1098 * 1099 * First trigger a bus format negotiation before calling 1100 * &drm_bridge_funcs.atomic_check() (falls back on 1101 * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain, 1102 * starting from the last bridge to the first. These are called before calling 1103 * &drm_encoder_helper_funcs.atomic_check() 1104 * 1105 * RETURNS: 1106 * 0 on success, a negative error code on failure 1107 */ 1108 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, 1109 struct drm_crtc_state *crtc_state, 1110 struct drm_connector_state *conn_state) 1111 { 1112 struct drm_connector *conn = conn_state->connector; 1113 struct drm_encoder *encoder; 1114 struct drm_bridge *iter; 1115 int ret; 1116 1117 if (!bridge) 1118 return 0; 1119 1120 ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state, 1121 conn_state); 1122 if (ret) 1123 return ret; 1124 1125 encoder = bridge->encoder; 1126 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { 1127 int ret; 1128 1129 /* 1130 * Bus flags are propagated by default. If a bridge needs to 1131 * tweak the input bus flags for any reason, it should happen 1132 * in its &drm_bridge_funcs.atomic_check() implementation such 1133 * that preceding bridges in the chain can propagate the new 1134 * bus flags. 1135 */ 1136 drm_atomic_bridge_propagate_bus_flags(iter, conn, 1137 crtc_state->state); 1138 1139 ret = drm_atomic_bridge_check(iter, crtc_state, conn_state); 1140 if (ret) 1141 return ret; 1142 1143 if (iter == bridge) 1144 break; 1145 } 1146 1147 return 0; 1148 } 1149 EXPORT_SYMBOL(drm_atomic_bridge_chain_check); 1150 1151 /** 1152 * drm_bridge_detect - check if anything is attached to the bridge output 1153 * @bridge: bridge control structure 1154 * 1155 * If the bridge supports output detection, as reported by the 1156 * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the 1157 * bridge and return the connection status. Otherwise return 1158 * connector_status_unknown. 1159 * 1160 * RETURNS: 1161 * The detection status on success, or connector_status_unknown if the bridge 1162 * doesn't support output detection. 1163 */ 1164 enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge) 1165 { 1166 if (!(bridge->ops & DRM_BRIDGE_OP_DETECT)) 1167 return connector_status_unknown; 1168 1169 return bridge->funcs->detect(bridge); 1170 } 1171 EXPORT_SYMBOL_GPL(drm_bridge_detect); 1172 1173 /** 1174 * drm_bridge_get_modes - fill all modes currently valid for the sink into the 1175 * @connector 1176 * @bridge: bridge control structure 1177 * @connector: the connector to fill with modes 1178 * 1179 * If the bridge supports output modes retrieval, as reported by the 1180 * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to 1181 * fill the connector with all valid modes and return the number of modes 1182 * added. Otherwise return 0. 1183 * 1184 * RETURNS: 1185 * The number of modes added to the connector. 1186 */ 1187 int drm_bridge_get_modes(struct drm_bridge *bridge, 1188 struct drm_connector *connector) 1189 { 1190 if (!(bridge->ops & DRM_BRIDGE_OP_MODES)) 1191 return 0; 1192 1193 return bridge->funcs->get_modes(bridge, connector); 1194 } 1195 EXPORT_SYMBOL_GPL(drm_bridge_get_modes); 1196 1197 /** 1198 * drm_bridge_edid_read - read the EDID data of the connected display 1199 * @bridge: bridge control structure 1200 * @connector: the connector to read EDID for 1201 * 1202 * If the bridge supports output EDID retrieval, as reported by the 1203 * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get 1204 * the EDID and return it. Otherwise return NULL. 1205 * 1206 * RETURNS: 1207 * The retrieved EDID on success, or NULL otherwise. 1208 */ 1209 const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge, 1210 struct drm_connector *connector) 1211 { 1212 if (!(bridge->ops & DRM_BRIDGE_OP_EDID)) 1213 return NULL; 1214 1215 return bridge->funcs->edid_read(bridge, connector); 1216 } 1217 EXPORT_SYMBOL_GPL(drm_bridge_edid_read); 1218 1219 /** 1220 * drm_bridge_hpd_enable - enable hot plug detection for the bridge 1221 * @bridge: bridge control structure 1222 * @cb: hot-plug detection callback 1223 * @data: data to be passed to the hot-plug detection callback 1224 * 1225 * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb 1226 * and @data as hot plug notification callback. From now on the @cb will be 1227 * called with @data when an output status change is detected by the bridge, 1228 * until hot plug notification gets disabled with drm_bridge_hpd_disable(). 1229 * 1230 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in 1231 * bridge->ops. This function shall not be called when the flag is not set. 1232 * 1233 * Only one hot plug detection callback can be registered at a time, it is an 1234 * error to call this function when hot plug detection is already enabled for 1235 * the bridge. 1236 */ 1237 void drm_bridge_hpd_enable(struct drm_bridge *bridge, 1238 void (*cb)(void *data, 1239 enum drm_connector_status status), 1240 void *data) 1241 { 1242 if (!(bridge->ops & DRM_BRIDGE_OP_HPD)) 1243 return; 1244 1245 mutex_lock(&bridge->hpd_mutex); 1246 1247 if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n")) 1248 goto unlock; 1249 1250 bridge->hpd_cb = cb; 1251 bridge->hpd_data = data; 1252 1253 if (bridge->funcs->hpd_enable) 1254 bridge->funcs->hpd_enable(bridge); 1255 1256 unlock: 1257 mutex_unlock(&bridge->hpd_mutex); 1258 } 1259 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable); 1260 1261 /** 1262 * drm_bridge_hpd_disable - disable hot plug detection for the bridge 1263 * @bridge: bridge control structure 1264 * 1265 * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot 1266 * plug detection callback previously registered with drm_bridge_hpd_enable(). 1267 * Once this function returns the callback will not be called by the bridge 1268 * when an output status change occurs. 1269 * 1270 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in 1271 * bridge->ops. This function shall not be called when the flag is not set. 1272 */ 1273 void drm_bridge_hpd_disable(struct drm_bridge *bridge) 1274 { 1275 if (!(bridge->ops & DRM_BRIDGE_OP_HPD)) 1276 return; 1277 1278 mutex_lock(&bridge->hpd_mutex); 1279 if (bridge->funcs->hpd_disable) 1280 bridge->funcs->hpd_disable(bridge); 1281 1282 bridge->hpd_cb = NULL; 1283 bridge->hpd_data = NULL; 1284 mutex_unlock(&bridge->hpd_mutex); 1285 } 1286 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable); 1287 1288 /** 1289 * drm_bridge_hpd_notify - notify hot plug detection events 1290 * @bridge: bridge control structure 1291 * @status: output connection status 1292 * 1293 * Bridge drivers shall call this function to report hot plug events when they 1294 * detect a change in the output status, when hot plug detection has been 1295 * enabled by drm_bridge_hpd_enable(). 1296 * 1297 * This function shall be called in a context that can sleep. 1298 */ 1299 void drm_bridge_hpd_notify(struct drm_bridge *bridge, 1300 enum drm_connector_status status) 1301 { 1302 mutex_lock(&bridge->hpd_mutex); 1303 if (bridge->hpd_cb) 1304 bridge->hpd_cb(bridge->hpd_data, status); 1305 mutex_unlock(&bridge->hpd_mutex); 1306 } 1307 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify); 1308 1309 #ifdef CONFIG_OF 1310 /** 1311 * of_drm_find_bridge - find the bridge corresponding to the device node in 1312 * the global bridge list 1313 * 1314 * @np: device node 1315 * 1316 * RETURNS: 1317 * drm_bridge control struct on success, NULL on failure 1318 */ 1319 struct drm_bridge *of_drm_find_bridge(struct device_node *np) 1320 { 1321 struct drm_bridge *bridge; 1322 1323 mutex_lock(&bridge_lock); 1324 1325 list_for_each_entry(bridge, &bridge_list, list) { 1326 if (bridge->of_node == np) { 1327 mutex_unlock(&bridge_lock); 1328 return bridge; 1329 } 1330 } 1331 1332 mutex_unlock(&bridge_lock); 1333 return NULL; 1334 } 1335 EXPORT_SYMBOL(of_drm_find_bridge); 1336 #endif 1337 1338 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); 1339 MODULE_DESCRIPTION("DRM bridge infrastructure"); 1340 MODULE_LICENSE("GPL and additional rights"); 1341