1 /* 2 * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #ifndef __AMDGPU_DM_H__ 27 #define __AMDGPU_DM_H__ 28 29 #include <drm/display/drm_dp_mst_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_connector.h> 32 #include <drm/drm_crtc.h> 33 #include <drm/drm_plane.h> 34 #include "link_service_types.h" 35 #include <drm/drm_writeback.h> 36 37 /* 38 * This file contains the definition for amdgpu_display_manager 39 * and its API for amdgpu driver's use. 40 * This component provides all the display related functionality 41 * and this is the only component that calls DAL API. 42 * The API contained here intended for amdgpu driver use. 43 * The API that is called directly from KMS framework is located 44 * in amdgpu_dm_kms.h file 45 */ 46 47 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31 48 49 #define AMDGPU_DM_MAX_CRTC 6 50 51 #define AMDGPU_DM_MAX_NUM_EDP 2 52 53 #define AMDGPU_DMUB_NOTIFICATION_MAX 8 54 55 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A 56 #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 57 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3 58 59 #define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL) 60 61 /* 62 #include "include/amdgpu_dal_power_if.h" 63 #include "amdgpu_dm_irq.h" 64 */ 65 66 #include "irq_types.h" 67 #include "signal_types.h" 68 #include "amdgpu_dm_crc.h" 69 #include "mod_info_packet.h" 70 struct aux_payload; 71 struct set_config_cmd_payload; 72 enum aux_return_code_type; 73 enum set_config_status; 74 75 /* Forward declarations */ 76 struct amdgpu_device; 77 struct amdgpu_crtc; 78 struct drm_device; 79 struct dc; 80 struct amdgpu_bo; 81 struct dmub_srv; 82 struct dc_plane_state; 83 struct dmub_notification; 84 struct dmub_cmd_fused_request; 85 86 struct amd_vsdb_block { 87 unsigned char ieee_id[3]; 88 unsigned char version; 89 unsigned char feature_caps; 90 }; 91 92 struct common_irq_params { 93 struct amdgpu_device *adev; 94 enum dc_irq_source irq_src; 95 atomic64_t previous_timestamp; 96 }; 97 98 /** 99 * struct dm_compressor_info - Buffer info used by frame buffer compression 100 * @cpu_addr: MMIO cpu addr 101 * @bo_ptr: Pointer to the buffer object 102 * @gpu_addr: MMIO gpu addr 103 */ 104 struct dm_compressor_info { 105 void *cpu_addr; 106 struct amdgpu_bo *bo_ptr; 107 uint64_t gpu_addr; 108 }; 109 110 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); 111 112 /** 113 * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ 114 * 115 * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq 116 * @dmub_notify: notification for callback function 117 * @adev: amdgpu_device pointer 118 */ 119 struct dmub_hpd_work { 120 struct work_struct handle_hpd_work; 121 struct dmub_notification *dmub_notify; 122 struct amdgpu_device *adev; 123 }; 124 125 /** 126 * struct vblank_control_work - Work data for vblank control 127 * @work: Kernel work data for the work event 128 * @dm: amdgpu display manager device 129 * @acrtc: amdgpu CRTC instance for which the event has occurred 130 * @stream: DC stream for which the event has occurred 131 * @enable: true if enabling vblank 132 */ 133 struct vblank_control_work { 134 struct work_struct work; 135 struct amdgpu_display_manager *dm; 136 struct amdgpu_crtc *acrtc; 137 struct dc_stream_state *stream; 138 bool enable; 139 }; 140 141 /** 142 * struct idle_workqueue - Work data for periodic action in idle 143 * @work: Kernel work data for the work event 144 * @dm: amdgpu display manager device 145 * @enable: true if idle worker is enabled 146 * @running: true if idle worker is running 147 */ 148 struct idle_workqueue { 149 struct work_struct work; 150 struct amdgpu_display_manager *dm; 151 bool enable; 152 bool running; 153 }; 154 155 #define MAX_LUMINANCE_DATA_POINTS 99 156 157 /** 158 * struct amdgpu_dm_luminance_data - Custom luminance data 159 * @luminance: Luminance in percent 160 * @input_signal: Input signal in range 0-255 161 */ 162 struct amdgpu_dm_luminance_data { 163 u8 luminance; 164 u8 input_signal; 165 } __packed; 166 167 /** 168 * struct amdgpu_dm_backlight_caps - Information about backlight 169 * 170 * Describe the backlight support for ACPI or eDP AUX. 171 */ 172 struct amdgpu_dm_backlight_caps { 173 /** 174 * @ext_caps: Keep the data struct with all the information about the 175 * display support for HDR. 176 */ 177 union dpcd_sink_ext_caps *ext_caps; 178 /** 179 * @aux_min_input_signal: Min brightness value supported by the display 180 */ 181 u32 aux_min_input_signal; 182 /** 183 * @aux_max_input_signal: Max brightness value supported by the display 184 * in nits. 185 */ 186 u32 aux_max_input_signal; 187 /** 188 * @min_input_signal: minimum possible input in range 0-255. 189 */ 190 int min_input_signal; 191 /** 192 * @max_input_signal: maximum possible input in range 0-255. 193 */ 194 int max_input_signal; 195 /** 196 * @caps_valid: true if these values are from the ACPI interface. 197 */ 198 bool caps_valid; 199 /** 200 * @aux_support: Describes if the display supports AUX backlight. 201 */ 202 bool aux_support; 203 /** 204 * @brightness_mask: After deriving brightness, OR it with this mask. 205 * Workaround for panels with issues with certain brightness values. 206 */ 207 u32 brightness_mask; 208 /** 209 * @ac_level: the default brightness if booted on AC 210 */ 211 u8 ac_level; 212 /** 213 * @dc_level: the default brightness if booted on DC 214 */ 215 u8 dc_level; 216 /** 217 * @data_points: the number of custom luminance data points 218 */ 219 u8 data_points; 220 /** 221 * @luminance_data: custom luminance data 222 */ 223 struct amdgpu_dm_luminance_data luminance_data[MAX_LUMINANCE_DATA_POINTS]; 224 }; 225 226 /** 227 * struct dal_allocation - Tracks mapped FB memory for SMU communication 228 * @list: list of dal allocations 229 * @bo: GPU buffer object 230 * @cpu_ptr: CPU virtual address of the GPU buffer object 231 * @gpu_addr: GPU virtual address of the GPU buffer object 232 */ 233 struct dal_allocation { 234 struct list_head list; 235 struct amdgpu_bo *bo; 236 void *cpu_ptr; 237 u64 gpu_addr; 238 }; 239 240 /** 241 * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq 242 * offload work 243 */ 244 struct hpd_rx_irq_offload_work_queue { 245 /** 246 * @wq: workqueue structure to queue offload work. 247 */ 248 struct workqueue_struct *wq; 249 /** 250 * @offload_lock: To protect fields of offload work queue. 251 */ 252 spinlock_t offload_lock; 253 /** 254 * @is_handling_link_loss: Used to prevent inserting link loss event when 255 * we're handling link loss 256 */ 257 bool is_handling_link_loss; 258 /** 259 * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message 260 * ready event when we're already handling mst message ready event 261 */ 262 bool is_handling_mst_msg_rdy_event; 263 /** 264 * @aconnector: The aconnector that this work queue is attached to 265 */ 266 struct amdgpu_dm_connector *aconnector; 267 }; 268 269 /** 270 * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure 271 */ 272 struct hpd_rx_irq_offload_work { 273 /** 274 * @work: offload work 275 */ 276 struct work_struct work; 277 /** 278 * @data: reference irq data which is used while handling offload work 279 */ 280 union hpd_irq_data data; 281 /** 282 * @offload_wq: offload work queue that this work is queued to 283 */ 284 struct hpd_rx_irq_offload_work_queue *offload_wq; 285 /** 286 * @adev: amdgpu_device pointer 287 */ 288 struct amdgpu_device *adev; 289 }; 290 291 /** 292 * struct amdgpu_display_manager - Central amdgpu display manager device 293 * 294 * @dc: Display Core control structure 295 * @adev: AMDGPU base driver structure 296 * @ddev: DRM base driver structure 297 * @display_indexes_num: Max number of display streams supported 298 * @irq_handler_list_table_lock: Synchronizes access to IRQ tables 299 * @backlight_dev: Backlight control device 300 * @backlight_link: Link on which to control backlight 301 * @backlight_caps: Capabilities of the backlight device 302 * @freesync_module: Module handling freesync calculations 303 * @hdcp_workqueue: AMDGPU content protection queue 304 * @fw_dmcu: Reference to DMCU firmware 305 * @dmcu_fw_version: Version of the DMCU firmware 306 * @soc_bounding_box: SOC bounding box values provided by gpu_info FW 307 * @cached_state: Caches device atomic state for suspend/resume 308 * @cached_dc_state: Cached state of content streams 309 * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info 310 * @force_timing_sync: set via debugfs. When set, indicates that all connected 311 * displays will be forced to synchronize. 312 * @dmcub_trace_event_en: enable dmcub trace events 313 * @dmub_outbox_params: DMUB Outbox parameters 314 * @num_of_edps: number of backlight eDPs 315 * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the 316 * driver when true 317 * @dmub_aux_transfer_done: struct completion used to indicate when DMUB 318 * transfers are done 319 * @delayed_hpd_wq: work queue used to delay DMUB HPD work 320 */ 321 struct amdgpu_display_manager { 322 323 struct dc *dc; 324 325 /** 326 * @dmub_srv: 327 * 328 * DMUB service, used for controlling the DMUB on hardware 329 * that supports it. The pointer to the dmub_srv will be 330 * NULL on hardware that does not support it. 331 */ 332 struct dmub_srv *dmub_srv; 333 334 /** 335 * @dmub_notify: 336 * 337 * Notification from DMUB. 338 */ 339 340 struct dmub_notification *dmub_notify; 341 342 /** 343 * @dmub_callback: 344 * 345 * Callback functions to handle notification from DMUB. 346 */ 347 348 dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; 349 350 /** 351 * @dmub_thread_offload: 352 * 353 * Flag to indicate if callback is offload. 354 */ 355 356 bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; 357 358 /** 359 * @dmub_fb_info: 360 * 361 * Framebuffer regions for the DMUB. 362 */ 363 struct dmub_srv_fb_info *dmub_fb_info; 364 365 /** 366 * @dmub_fw: 367 * 368 * DMUB firmware, required on hardware that has DMUB support. 369 */ 370 const struct firmware *dmub_fw; 371 372 /** 373 * @dmub_bo: 374 * 375 * Buffer object for the DMUB. 376 */ 377 struct amdgpu_bo *dmub_bo; 378 379 /** 380 * @dmub_bo_gpu_addr: 381 * 382 * GPU virtual address for the DMUB buffer object. 383 */ 384 u64 dmub_bo_gpu_addr; 385 386 /** 387 * @dmub_bo_cpu_addr: 388 * 389 * CPU address for the DMUB buffer object. 390 */ 391 void *dmub_bo_cpu_addr; 392 393 /** 394 * @dmcub_fw_version: 395 * 396 * DMCUB firmware version. 397 */ 398 uint32_t dmcub_fw_version; 399 400 /** 401 * @cgs_device: 402 * 403 * The Common Graphics Services device. It provides an interface for 404 * accessing registers. 405 */ 406 struct cgs_device *cgs_device; 407 408 struct amdgpu_device *adev; 409 struct drm_device *ddev; 410 u16 display_indexes_num; 411 412 /** 413 * @atomic_obj: 414 * 415 * In combination with &dm_atomic_state it helps manage 416 * global atomic state that doesn't map cleanly into existing 417 * drm resources, like &dc_context. 418 */ 419 struct drm_private_obj atomic_obj; 420 421 /** 422 * @dc_lock: 423 * 424 * Guards access to DC functions that can issue register write 425 * sequences. 426 */ 427 struct mutex dc_lock; 428 429 /** 430 * @audio_lock: 431 * 432 * Guards access to audio instance changes. 433 */ 434 struct mutex audio_lock; 435 436 /** 437 * @audio_component: 438 * 439 * Used to notify ELD changes to sound driver. 440 */ 441 struct drm_audio_component *audio_component; 442 443 /** 444 * @audio_registered: 445 * 446 * True if the audio component has been registered 447 * successfully, false otherwise. 448 */ 449 bool audio_registered; 450 451 /** 452 * @irq_handler_list_low_tab: 453 * 454 * Low priority IRQ handler table. 455 * 456 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ 457 * source. Low priority IRQ handlers are deferred to a workqueue to be 458 * processed. Hence, they can sleep. 459 * 460 * Note that handlers are called in the same order as they were 461 * registered (FIFO). 462 */ 463 struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; 464 465 /** 466 * @irq_handler_list_high_tab: 467 * 468 * High priority IRQ handler table. 469 * 470 * It is a n*m table, same as &irq_handler_list_low_tab. However, 471 * handlers in this table are not deferred and are called immediately. 472 */ 473 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; 474 475 /** 476 * @pflip_params: 477 * 478 * Page flip IRQ parameters, passed to registered handlers when 479 * triggered. 480 */ 481 struct common_irq_params 482 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; 483 484 /** 485 * @vblank_params: 486 * 487 * Vertical blanking IRQ parameters, passed to registered handlers when 488 * triggered. 489 */ 490 struct common_irq_params 491 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; 492 493 /** 494 * @vline0_params: 495 * 496 * OTG vertical interrupt0 IRQ parameters, passed to registered 497 * handlers when triggered. 498 */ 499 struct common_irq_params 500 vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1]; 501 502 /** 503 * @vupdate_params: 504 * 505 * Vertical update IRQ parameters, passed to registered handlers when 506 * triggered. 507 */ 508 struct common_irq_params 509 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; 510 511 /** 512 * @dmub_trace_params: 513 * 514 * DMUB trace event IRQ parameters, passed to registered handlers when 515 * triggered. 516 */ 517 struct common_irq_params 518 dmub_trace_params[1]; 519 520 struct common_irq_params 521 dmub_outbox_params[1]; 522 523 spinlock_t irq_handler_list_table_lock; 524 525 struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP]; 526 527 const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP]; 528 529 uint8_t num_of_edps; 530 531 struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP]; 532 533 struct mod_freesync *freesync_module; 534 struct hdcp_workqueue *hdcp_workqueue; 535 536 /** 537 * @vblank_control_workqueue: 538 * 539 * Deferred work for vblank control events. 540 */ 541 struct workqueue_struct *vblank_control_workqueue; 542 543 /** 544 * @idle_workqueue: 545 * 546 * Periodic work for idle events. 547 */ 548 struct idle_workqueue *idle_workqueue; 549 550 struct drm_atomic_state *cached_state; 551 struct dc_state *cached_dc_state; 552 553 struct dm_compressor_info compressor; 554 555 const struct firmware *fw_dmcu; 556 uint32_t dmcu_fw_version; 557 /** 558 * @soc_bounding_box: 559 * 560 * gpu_info FW provided soc bounding box struct or 0 if not 561 * available in FW 562 */ 563 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 564 565 /** 566 * @active_vblank_irq_count: 567 * 568 * number of currently active vblank irqs 569 */ 570 uint32_t active_vblank_irq_count; 571 572 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 573 /** 574 * @secure_display_ctx: 575 * 576 * Store secure display relevant info. e.g. the ROI information 577 * , the work_struct to command dmub, etc. 578 */ 579 struct secure_display_context secure_display_ctx; 580 #endif 581 /** 582 * @hpd_rx_offload_wq: 583 * 584 * Work queue to offload works of hpd_rx_irq 585 */ 586 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; 587 /** 588 * @mst_encoders: 589 * 590 * fake encoders used for DP MST. 591 */ 592 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 593 bool force_timing_sync; 594 bool disable_hpd_irq; 595 bool dmcub_trace_event_en; 596 /** 597 * @da_list: 598 * 599 * DAL fb memory allocation list, for communication with SMU. 600 */ 601 struct list_head da_list; 602 struct completion dmub_aux_transfer_done; 603 struct workqueue_struct *delayed_hpd_wq; 604 605 /** 606 * @brightness: 607 * 608 * cached backlight values. 609 */ 610 u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; 611 /** 612 * @actual_brightness: 613 * 614 * last successfully applied backlight values. 615 */ 616 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; 617 618 /** 619 * @aux_hpd_discon_quirk: 620 * 621 * quirk for hpd discon while aux is on-going. 622 * occurred on certain intel platform 623 */ 624 bool aux_hpd_discon_quirk; 625 626 /** 627 * @edp0_on_dp1_quirk: 628 * 629 * quirk for platforms that put edp0 on DP1. 630 */ 631 bool edp0_on_dp1_quirk; 632 633 /** 634 * @dpia_aux_lock: 635 * 636 * Guards access to DPIA AUX 637 */ 638 struct mutex dpia_aux_lock; 639 640 /** 641 * @bb_from_dmub: 642 * 643 * Bounding box data read from dmub during early initialization for DCN4+ 644 * Data is stored as a byte array that should be casted to the appropriate bb struct 645 */ 646 void *bb_from_dmub; 647 648 /** 649 * @oem_i2c: 650 * 651 * OEM i2c bus 652 */ 653 struct amdgpu_i2c_adapter *oem_i2c; 654 655 /** 656 * @fused_io: 657 * 658 * dmub fused io interface 659 */ 660 struct fused_io_sync { 661 struct completion replied; 662 char reply_data[0x40]; // Cannot include dmub_cmd here 663 } fused_io[8]; 664 }; 665 666 enum dsc_clock_force_state { 667 DSC_CLK_FORCE_DEFAULT = 0, 668 DSC_CLK_FORCE_ENABLE, 669 DSC_CLK_FORCE_DISABLE, 670 }; 671 672 struct dsc_preferred_settings { 673 enum dsc_clock_force_state dsc_force_enable; 674 uint32_t dsc_num_slices_v; 675 uint32_t dsc_num_slices_h; 676 uint32_t dsc_bits_per_pixel; 677 bool dsc_force_disable_passthrough; 678 }; 679 680 enum mst_progress_status { 681 MST_STATUS_DEFAULT = 0, 682 MST_PROBE = BIT(0), 683 MST_REMOTE_EDID = BIT(1), 684 MST_ALLOCATE_NEW_PAYLOAD = BIT(2), 685 MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), 686 }; 687 688 /** 689 * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info 690 * 691 * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this 692 * struct is useful to keep track of the display-specific information about 693 * FreeSync. 694 */ 695 struct amdgpu_hdmi_vsdb_info { 696 /** 697 * @amd_vsdb_version: Vendor Specific Data Block Version, should be 698 * used to determine which Vendor Specific InfoFrame (VSIF) to send. 699 */ 700 unsigned int amd_vsdb_version; 701 702 /** 703 * @freesync_supported: FreeSync Supported. 704 */ 705 bool freesync_supported; 706 707 /** 708 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. 709 */ 710 unsigned int min_refresh_rate_hz; 711 712 /** 713 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz 714 */ 715 unsigned int max_refresh_rate_hz; 716 717 /** 718 * @replay_mode: Replay supported 719 */ 720 bool replay_mode; 721 }; 722 723 struct amdgpu_dm_connector { 724 725 struct drm_connector base; 726 uint32_t connector_id; 727 int bl_idx; 728 729 struct cec_notifier *notifier; 730 731 /* we need to mind the EDID between detect 732 and get modes due to analog/digital/tvencoder */ 733 const struct drm_edid *drm_edid; 734 735 /* shared with amdgpu */ 736 struct amdgpu_hpd hpd; 737 738 /* number of modes generated from EDID at 'dc_sink' */ 739 int num_modes; 740 741 /* The 'old' sink - before an HPD. 742 * The 'current' sink is in dc_link->sink. */ 743 struct dc_sink *dc_sink; 744 struct dc_link *dc_link; 745 746 /** 747 * @dc_em_sink: Reference to the emulated (virtual) sink. 748 */ 749 struct dc_sink *dc_em_sink; 750 751 /* DM only */ 752 struct drm_dp_mst_topology_mgr mst_mgr; 753 struct amdgpu_dm_dp_aux dm_dp_aux; 754 struct drm_dp_mst_port *mst_output_port; 755 struct amdgpu_dm_connector *mst_root; 756 struct drm_dp_aux *dsc_aux; 757 uint32_t mst_local_bw; 758 uint16_t vc_full_pbn; 759 struct mutex handle_mst_msg_ready; 760 761 /* TODO see if we can merge with ddc_bus or make a dm_connector */ 762 struct amdgpu_i2c_adapter *i2c; 763 764 /* Monitor range limits */ 765 /** 766 * @min_vfreq: Minimal frequency supported by the display in Hz. This 767 * value is set to zero when there is no FreeSync support. 768 */ 769 int min_vfreq; 770 771 /** 772 * @max_vfreq: Maximum frequency supported by the display in Hz. This 773 * value is set to zero when there is no FreeSync support. 774 */ 775 int max_vfreq ; 776 777 /* Audio instance - protected by audio_lock. */ 778 int audio_inst; 779 780 struct mutex hpd_lock; 781 782 bool fake_enable; 783 bool force_yuv420_output; 784 struct dsc_preferred_settings dsc_settings; 785 union dp_downstream_port_present mst_downstream_port_present; 786 /* Cached display modes */ 787 struct drm_display_mode freesync_vid_base; 788 789 int sr_skip_count; 790 bool disallow_edp_enter_psr; 791 792 /* Record progress status of mst*/ 793 uint8_t mst_status; 794 795 /* Automated testing */ 796 bool timing_changed; 797 struct dc_crtc_timing *timing_requested; 798 799 /* Adaptive Sync */ 800 bool pack_sdp_v1_3; 801 enum adaptive_sync_type as_type; 802 struct amdgpu_hdmi_vsdb_info vsdb_info; 803 }; 804 805 static inline void amdgpu_dm_set_mst_status(uint8_t *status, 806 uint8_t flags, bool set) 807 { 808 if (set) 809 *status |= flags; 810 else 811 *status &= ~flags; 812 } 813 814 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) 815 816 struct amdgpu_dm_wb_connector { 817 struct drm_writeback_connector base; 818 struct dc_link *link; 819 }; 820 821 #define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base) 822 823 extern const struct amdgpu_ip_block_version dm_ip_block; 824 825 /* enum amdgpu_transfer_function: pre-defined transfer function supported by AMD. 826 * 827 * It includes standardized transfer functions and pure power functions. The 828 * transfer function coefficients are available at modules/color/color_gamma.c 829 */ 830 enum amdgpu_transfer_function { 831 AMDGPU_TRANSFER_FUNCTION_DEFAULT, 832 AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF, 833 AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF, 834 AMDGPU_TRANSFER_FUNCTION_PQ_EOTF, 835 AMDGPU_TRANSFER_FUNCTION_IDENTITY, 836 AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF, 837 AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF, 838 AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF, 839 AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF, 840 AMDGPU_TRANSFER_FUNCTION_BT709_OETF, 841 AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF, 842 AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF, 843 AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF, 844 AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF, 845 AMDGPU_TRANSFER_FUNCTION_COUNT 846 }; 847 848 struct dm_plane_state { 849 struct drm_plane_state base; 850 struct dc_plane_state *dc_state; 851 852 /* Plane color mgmt */ 853 /** 854 * @degamma_lut: 855 * 856 * 1D LUT for mapping framebuffer/plane pixel data before sampling or 857 * blending operations. It's usually applied to linearize input space. 858 * The blob (if not NULL) is an array of &struct drm_color_lut. 859 */ 860 struct drm_property_blob *degamma_lut; 861 /** 862 * @degamma_tf: 863 * 864 * Predefined transfer function to tell DC driver the input space to 865 * linearize. 866 */ 867 enum amdgpu_transfer_function degamma_tf; 868 /** 869 * @hdr_mult: 870 * 871 * Multiplier to 'gain' the plane. When PQ is decoded using the fixed 872 * func transfer function to the internal FP16 fb, 1.0 -> 80 nits (on 873 * AMD at least). When sRGB is decoded, 1.0 -> 1.0, obviously. 874 * Therefore, 1.0 multiplier = 80 nits for SDR content. So if you 875 * want, 203 nits for SDR content, pass in (203.0 / 80.0). Format is 876 * S31.32 sign-magnitude. 877 * 878 * HDR multiplier can wide range beyond [0.0, 1.0]. This means that PQ 879 * TF is needed for any subsequent linear-to-non-linear transforms. 880 */ 881 __u64 hdr_mult; 882 /** 883 * @ctm: 884 * 885 * Color transformation matrix. The blob (if not NULL) is a &struct 886 * drm_color_ctm_3x4. 887 */ 888 struct drm_property_blob *ctm; 889 /** 890 * @shaper_lut: shaper lookup table blob. The blob (if not NULL) is an 891 * array of &struct drm_color_lut. 892 */ 893 struct drm_property_blob *shaper_lut; 894 /** 895 * @shaper_tf: 896 * 897 * Predefined transfer function to delinearize color space. 898 */ 899 enum amdgpu_transfer_function shaper_tf; 900 /** 901 * @lut3d: 3D lookup table blob. The blob (if not NULL) is an array of 902 * &struct drm_color_lut. 903 */ 904 struct drm_property_blob *lut3d; 905 /** 906 * @blend_lut: blend lut lookup table blob. The blob (if not NULL) is an 907 * array of &struct drm_color_lut. 908 */ 909 struct drm_property_blob *blend_lut; 910 /** 911 * @blend_tf: 912 * 913 * Pre-defined transfer function for converting plane pixel data before 914 * applying blend LUT. 915 */ 916 enum amdgpu_transfer_function blend_tf; 917 }; 918 919 enum amdgpu_dm_cursor_mode { 920 DM_CURSOR_NATIVE_MODE = 0, 921 DM_CURSOR_OVERLAY_MODE, 922 }; 923 924 struct dm_crtc_state { 925 struct drm_crtc_state base; 926 struct dc_stream_state *stream; 927 928 bool cm_has_degamma; 929 bool cm_is_degamma_srgb; 930 931 bool mpo_requested; 932 933 int update_type; 934 int active_planes; 935 936 int crc_skip_count; 937 938 bool freesync_vrr_info_changed; 939 940 bool dsc_force_changed; 941 bool vrr_supported; 942 struct mod_freesync_config freesync_config; 943 struct dc_info_packet vrr_infopacket; 944 945 int abm_level; 946 947 /** 948 * @regamma_tf: 949 * 950 * Pre-defined transfer function for converting internal FB -> wire 951 * encoding. 952 */ 953 enum amdgpu_transfer_function regamma_tf; 954 955 enum amdgpu_dm_cursor_mode cursor_mode; 956 }; 957 958 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) 959 960 struct dm_atomic_state { 961 struct drm_private_state base; 962 963 struct dc_state *context; 964 }; 965 966 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base) 967 968 struct dm_connector_state { 969 struct drm_connector_state base; 970 971 enum amdgpu_rmx_type scaling; 972 uint8_t underscan_vborder; 973 uint8_t underscan_hborder; 974 bool underscan_enable; 975 bool freesync_capable; 976 bool update_hdcp; 977 uint8_t abm_level; 978 int vcpi_slots; 979 uint64_t pbn; 980 }; 981 982 #define to_dm_connector_state(x)\ 983 container_of((x), struct dm_connector_state, base) 984 985 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector); 986 struct drm_connector_state * 987 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector); 988 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 989 struct drm_connector_state *state, 990 struct drm_property *property, 991 uint64_t val); 992 993 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 994 const struct drm_connector_state *state, 995 struct drm_property *property, 996 uint64_t *val); 997 998 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev); 999 1000 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 1001 struct amdgpu_dm_connector *aconnector, 1002 int connector_type, 1003 struct dc_link *link, 1004 int link_index); 1005 1006 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 1007 const struct drm_display_mode *mode); 1008 1009 void dm_restore_drm_connector_state(struct drm_device *dev, 1010 struct drm_connector *connector); 1011 1012 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 1013 const struct drm_edid *drm_edid); 1014 1015 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); 1016 1017 /* 3D LUT max size is 17x17x17 (4913 entries) */ 1018 #define MAX_COLOR_3DLUT_SIZE 17 1019 #define MAX_COLOR_3DLUT_BITDEPTH 12 1020 int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, 1021 struct drm_plane_state *plane_state); 1022 /* 1D LUT size */ 1023 #define MAX_COLOR_LUT_ENTRIES 4096 1024 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ 1025 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 1026 1027 void amdgpu_dm_init_color_mod(void); 1028 int amdgpu_dm_create_color_properties(struct amdgpu_device *adev); 1029 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); 1030 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); 1031 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, 1032 struct drm_plane_state *plane_state, 1033 struct dc_plane_state *dc_plane_state); 1034 1035 void amdgpu_dm_update_connector_after_detect( 1036 struct amdgpu_dm_connector *aconnector); 1037 1038 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; 1039 1040 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, 1041 struct aux_payload *payload, enum aux_return_code_type *operation_result); 1042 1043 bool amdgpu_dm_execute_fused_io( 1044 struct amdgpu_device *dev, 1045 struct dc_link *link, 1046 union dmub_rb_cmd *commands, 1047 uint8_t count, 1048 uint32_t timeout_us 1049 ); 1050 1051 int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, 1052 struct set_config_cmd_payload *payload, enum set_config_status *operation_result); 1053 1054 struct dc_stream_state * 1055 create_validate_stream_for_sink(struct drm_connector *connector, 1056 const struct drm_display_mode *drm_mode, 1057 const struct dm_connector_state *dm_state, 1058 const struct dc_stream_state *old_stream); 1059 1060 int dm_atomic_get_state(struct drm_atomic_state *state, 1061 struct dm_atomic_state **dm_state); 1062 1063 struct drm_connector * 1064 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 1065 struct drm_crtc *crtc); 1066 1067 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); 1068 struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev); 1069 1070 void *dm_allocate_gpu_mem(struct amdgpu_device *adev, 1071 enum dc_gpu_mem_alloc_type type, 1072 size_t size, 1073 long long *addr); 1074 void dm_free_gpu_mem(struct amdgpu_device *adev, 1075 enum dc_gpu_mem_alloc_type type, 1076 void *addr); 1077 1078 bool amdgpu_dm_is_headless(struct amdgpu_device *adev); 1079 1080 void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector); 1081 void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector); 1082 int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector); 1083 1084 void retrieve_dmi_info(struct amdgpu_display_manager *dm); 1085 1086 #endif /* __AMDGPU_DM_H__ */ 1087