1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "link_enc_cfg.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "amdgpu_dm_trace.h" 41 #include "dpcd_defs.h" 42 #include "link/protocols/link_dpcd.h" 43 #include "link_service_types.h" 44 #include "link/protocols/link_dp_capability.h" 45 #include "link/protocols/link_ddc.h" 46 47 #include "vid.h" 48 #include "amdgpu.h" 49 #include "amdgpu_display.h" 50 #include "amdgpu_ucode.h" 51 #include "atom.h" 52 #include "amdgpu_dm.h" 53 #include "amdgpu_dm_plane.h" 54 #include "amdgpu_dm_crtc.h" 55 #include "amdgpu_dm_hdcp.h" 56 #include <drm/display/drm_hdcp_helper.h> 57 #include "amdgpu_pm.h" 58 #include "amdgpu_atombios.h" 59 60 #include "amd_shared.h" 61 #include "amdgpu_dm_irq.h" 62 #include "dm_helpers.h" 63 #include "amdgpu_dm_mst_types.h" 64 #if defined(CONFIG_DEBUG_FS) 65 #include "amdgpu_dm_debugfs.h" 66 #endif 67 #include "amdgpu_dm_psr.h" 68 #include "amdgpu_dm_replay.h" 69 70 #include "ivsrcid/ivsrcid_vislands30.h" 71 72 #include <linux/backlight.h> 73 #include <linux/module.h> 74 #include <linux/moduleparam.h> 75 #include <linux/types.h> 76 #include <linux/pm_runtime.h> 77 #include <linux/pci.h> 78 #include <linux/firmware.h> 79 #include <linux/component.h> 80 #include <linux/dmi.h> 81 82 #include <drm/display/drm_dp_mst_helper.h> 83 #include <drm/display/drm_hdmi_helper.h> 84 #include <drm/drm_atomic.h> 85 #include <drm/drm_atomic_uapi.h> 86 #include <drm/drm_atomic_helper.h> 87 #include <drm/drm_blend.h> 88 #include <drm/drm_fourcc.h> 89 #include <drm/drm_edid.h> 90 #include <drm/drm_vblank.h> 91 #include <drm/drm_audio_component.h> 92 #include <drm/drm_gem_atomic_helper.h> 93 #include <drm/drm_plane_helper.h> 94 95 #include <acpi/video.h> 96 97 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 98 99 #include "dcn/dcn_1_0_offset.h" 100 #include "dcn/dcn_1_0_sh_mask.h" 101 #include "soc15_hw_ip.h" 102 #include "soc15_common.h" 103 #include "vega10_ip_offset.h" 104 105 #include "gc/gc_11_0_0_offset.h" 106 #include "gc/gc_11_0_0_sh_mask.h" 107 108 #include "modules/inc/mod_freesync.h" 109 #include "modules/power/power_helpers.h" 110 111 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 112 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 113 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 114 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 115 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 116 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 117 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 118 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 119 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 120 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 121 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 122 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 123 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 124 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 125 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 126 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 127 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" 128 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); 129 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 130 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 131 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 132 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 133 134 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 136 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 137 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 138 139 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 140 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 141 142 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 143 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 144 145 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" 146 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); 147 148 /* Number of bytes in PSP header for firmware. */ 149 #define PSP_HEADER_BYTES 0x100 150 151 /* Number of bytes in PSP footer for firmware. */ 152 #define PSP_FOOTER_BYTES 0x100 153 154 /** 155 * DOC: overview 156 * 157 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 158 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 159 * requests into DC requests, and DC responses into DRM responses. 160 * 161 * The root control structure is &struct amdgpu_display_manager. 162 */ 163 164 /* basic init/fini API */ 165 static int amdgpu_dm_init(struct amdgpu_device *adev); 166 static void amdgpu_dm_fini(struct amdgpu_device *adev); 167 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 168 169 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 170 { 171 switch (link->dpcd_caps.dongle_type) { 172 case DISPLAY_DONGLE_NONE: 173 return DRM_MODE_SUBCONNECTOR_Native; 174 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 175 return DRM_MODE_SUBCONNECTOR_VGA; 176 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 177 case DISPLAY_DONGLE_DP_DVI_DONGLE: 178 return DRM_MODE_SUBCONNECTOR_DVID; 179 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 180 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 181 return DRM_MODE_SUBCONNECTOR_HDMIA; 182 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 183 default: 184 return DRM_MODE_SUBCONNECTOR_Unknown; 185 } 186 } 187 188 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 189 { 190 struct dc_link *link = aconnector->dc_link; 191 struct drm_connector *connector = &aconnector->base; 192 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 193 194 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 195 return; 196 197 if (aconnector->dc_sink) 198 subconnector = get_subconnector_type(link); 199 200 drm_object_property_set_value(&connector->base, 201 connector->dev->mode_config.dp_subconnector_property, 202 subconnector); 203 } 204 205 /* 206 * initializes drm_device display related structures, based on the information 207 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 208 * drm_encoder, drm_mode_config 209 * 210 * Returns 0 on success 211 */ 212 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 213 /* removes and deallocates the drm structures, created by the above function */ 214 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 215 216 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 217 struct amdgpu_dm_connector *amdgpu_dm_connector, 218 u32 link_index, 219 struct amdgpu_encoder *amdgpu_encoder); 220 static int amdgpu_dm_encoder_init(struct drm_device *dev, 221 struct amdgpu_encoder *aencoder, 222 uint32_t link_index); 223 224 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 225 226 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 227 228 static int amdgpu_dm_atomic_check(struct drm_device *dev, 229 struct drm_atomic_state *state); 230 231 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 232 static void handle_hpd_rx_irq(void *param); 233 234 static bool 235 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 236 struct drm_crtc_state *new_crtc_state); 237 /* 238 * dm_vblank_get_counter 239 * 240 * @brief 241 * Get counter for number of vertical blanks 242 * 243 * @param 244 * struct amdgpu_device *adev - [in] desired amdgpu device 245 * int disp_idx - [in] which CRTC to get the counter from 246 * 247 * @return 248 * Counter for vertical blanks 249 */ 250 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 251 { 252 struct amdgpu_crtc *acrtc = NULL; 253 254 if (crtc >= adev->mode_info.num_crtc) 255 return 0; 256 257 acrtc = adev->mode_info.crtcs[crtc]; 258 259 if (!acrtc->dm_irq_params.stream) { 260 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 261 crtc); 262 return 0; 263 } 264 265 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 266 } 267 268 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 269 u32 *vbl, u32 *position) 270 { 271 u32 v_blank_start, v_blank_end, h_position, v_position; 272 struct amdgpu_crtc *acrtc = NULL; 273 274 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 275 return -EINVAL; 276 277 acrtc = adev->mode_info.crtcs[crtc]; 278 279 if (!acrtc->dm_irq_params.stream) { 280 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 281 crtc); 282 return 0; 283 } 284 285 /* 286 * TODO rework base driver to use values directly. 287 * for now parse it back into reg-format 288 */ 289 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 290 &v_blank_start, 291 &v_blank_end, 292 &h_position, 293 &v_position); 294 295 *position = v_position | (h_position << 16); 296 *vbl = v_blank_start | (v_blank_end << 16); 297 298 return 0; 299 } 300 301 static bool dm_is_idle(void *handle) 302 { 303 /* XXX todo */ 304 return true; 305 } 306 307 static int dm_wait_for_idle(void *handle) 308 { 309 /* XXX todo */ 310 return 0; 311 } 312 313 static bool dm_check_soft_reset(void *handle) 314 { 315 return false; 316 } 317 318 static int dm_soft_reset(void *handle) 319 { 320 /* XXX todo */ 321 return 0; 322 } 323 324 static struct amdgpu_crtc * 325 get_crtc_by_otg_inst(struct amdgpu_device *adev, 326 int otg_inst) 327 { 328 struct drm_device *dev = adev_to_drm(adev); 329 struct drm_crtc *crtc; 330 struct amdgpu_crtc *amdgpu_crtc; 331 332 if (WARN_ON(otg_inst == -1)) 333 return adev->mode_info.crtcs[0]; 334 335 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 336 amdgpu_crtc = to_amdgpu_crtc(crtc); 337 338 if (amdgpu_crtc->otg_inst == otg_inst) 339 return amdgpu_crtc; 340 } 341 342 return NULL; 343 } 344 345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 346 struct dm_crtc_state *new_state) 347 { 348 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 349 return true; 350 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) 351 return true; 352 else 353 return false; 354 } 355 356 static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update, 357 int planes_count) 358 { 359 int i, j; 360 361 for (i = 0, j = planes_count - 1; i < j; i++, j--) 362 swap(array_of_surface_update[i], array_of_surface_update[j]); 363 } 364 365 /** 366 * update_planes_and_stream_adapter() - Send planes to be updated in DC 367 * 368 * DC has a generic way to update planes and stream via 369 * dc_update_planes_and_stream function; however, DM might need some 370 * adjustments and preparation before calling it. This function is a wrapper 371 * for the dc_update_planes_and_stream that does any required configuration 372 * before passing control to DC. 373 * 374 * @dc: Display Core control structure 375 * @update_type: specify whether it is FULL/MEDIUM/FAST update 376 * @planes_count: planes count to update 377 * @stream: stream state 378 * @stream_update: stream update 379 * @array_of_surface_update: dc surface update pointer 380 * 381 */ 382 static inline bool update_planes_and_stream_adapter(struct dc *dc, 383 int update_type, 384 int planes_count, 385 struct dc_stream_state *stream, 386 struct dc_stream_update *stream_update, 387 struct dc_surface_update *array_of_surface_update) 388 { 389 reverse_planes_order(array_of_surface_update, planes_count); 390 391 /* 392 * Previous frame finished and HW is ready for optimization. 393 */ 394 if (update_type == UPDATE_TYPE_FAST) 395 dc_post_update_surfaces_to_stream(dc); 396 397 return dc_update_planes_and_stream(dc, 398 array_of_surface_update, 399 planes_count, 400 stream, 401 stream_update); 402 } 403 404 /** 405 * dm_pflip_high_irq() - Handle pageflip interrupt 406 * @interrupt_params: ignored 407 * 408 * Handles the pageflip interrupt by notifying all interested parties 409 * that the pageflip has been completed. 410 */ 411 static void dm_pflip_high_irq(void *interrupt_params) 412 { 413 struct amdgpu_crtc *amdgpu_crtc; 414 struct common_irq_params *irq_params = interrupt_params; 415 struct amdgpu_device *adev = irq_params->adev; 416 struct drm_device *dev = adev_to_drm(adev); 417 unsigned long flags; 418 struct drm_pending_vblank_event *e; 419 u32 vpos, hpos, v_blank_start, v_blank_end; 420 bool vrr_active; 421 422 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 423 424 /* IRQ could occur when in initial stage */ 425 /* TODO work and BO cleanup */ 426 if (amdgpu_crtc == NULL) { 427 drm_dbg_state(dev, "CRTC is null, returning.\n"); 428 return; 429 } 430 431 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 432 433 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 434 drm_dbg_state(dev, 435 "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", 436 amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED, 437 amdgpu_crtc->crtc_id, amdgpu_crtc); 438 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 439 return; 440 } 441 442 /* page flip completed. */ 443 e = amdgpu_crtc->event; 444 amdgpu_crtc->event = NULL; 445 446 WARN_ON(!e); 447 448 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); 449 450 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 451 if (!vrr_active || 452 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 453 &v_blank_end, &hpos, &vpos) || 454 (vpos < v_blank_start)) { 455 /* Update to correct count and vblank timestamp if racing with 456 * vblank irq. This also updates to the correct vblank timestamp 457 * even in VRR mode, as scanout is past the front-porch atm. 458 */ 459 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 460 461 /* Wake up userspace by sending the pageflip event with proper 462 * count and timestamp of vblank of flip completion. 463 */ 464 if (e) { 465 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 466 467 /* Event sent, so done with vblank for this flip */ 468 drm_crtc_vblank_put(&amdgpu_crtc->base); 469 } 470 } else if (e) { 471 /* VRR active and inside front-porch: vblank count and 472 * timestamp for pageflip event will only be up to date after 473 * drm_crtc_handle_vblank() has been executed from late vblank 474 * irq handler after start of back-porch (vline 0). We queue the 475 * pageflip event for send-out by drm_crtc_handle_vblank() with 476 * updated timestamp and count, once it runs after us. 477 * 478 * We need to open-code this instead of using the helper 479 * drm_crtc_arm_vblank_event(), as that helper would 480 * call drm_crtc_accurate_vblank_count(), which we must 481 * not call in VRR mode while we are in front-porch! 482 */ 483 484 /* sequence will be replaced by real count during send-out. */ 485 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 486 e->pipe = amdgpu_crtc->crtc_id; 487 488 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 489 e = NULL; 490 } 491 492 /* Keep track of vblank of this flip for flip throttling. We use the 493 * cooked hw counter, as that one incremented at start of this vblank 494 * of pageflip completion, so last_flip_vblank is the forbidden count 495 * for queueing new pageflips if vsync + VRR is enabled. 496 */ 497 amdgpu_crtc->dm_irq_params.last_flip_vblank = 498 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 499 500 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 501 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 502 503 drm_dbg_state(dev, 504 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 505 amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); 506 } 507 508 static void dm_vupdate_high_irq(void *interrupt_params) 509 { 510 struct common_irq_params *irq_params = interrupt_params; 511 struct amdgpu_device *adev = irq_params->adev; 512 struct amdgpu_crtc *acrtc; 513 struct drm_device *drm_dev; 514 struct drm_vblank_crtc *vblank; 515 ktime_t frame_duration_ns, previous_timestamp; 516 unsigned long flags; 517 int vrr_active; 518 519 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 520 521 if (acrtc) { 522 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 523 drm_dev = acrtc->base.dev; 524 vblank = &drm_dev->vblank[acrtc->base.index]; 525 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 526 frame_duration_ns = vblank->time - previous_timestamp; 527 528 if (frame_duration_ns > 0) { 529 trace_amdgpu_refresh_rate_track(acrtc->base.index, 530 frame_duration_ns, 531 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 532 atomic64_set(&irq_params->previous_timestamp, vblank->time); 533 } 534 535 drm_dbg_vbl(drm_dev, 536 "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 537 vrr_active); 538 539 /* Core vblank handling is done here after end of front-porch in 540 * vrr mode, as vblank timestamping will give valid results 541 * while now done after front-porch. This will also deliver 542 * page-flip completion events that have been queued to us 543 * if a pageflip happened inside front-porch. 544 */ 545 if (vrr_active) { 546 amdgpu_dm_crtc_handle_vblank(acrtc); 547 548 /* BTR processing for pre-DCE12 ASICs */ 549 if (acrtc->dm_irq_params.stream && 550 adev->family < AMDGPU_FAMILY_AI) { 551 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 552 mod_freesync_handle_v_update( 553 adev->dm.freesync_module, 554 acrtc->dm_irq_params.stream, 555 &acrtc->dm_irq_params.vrr_params); 556 557 dc_stream_adjust_vmin_vmax( 558 adev->dm.dc, 559 acrtc->dm_irq_params.stream, 560 &acrtc->dm_irq_params.vrr_params.adjust); 561 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 562 } 563 } 564 } 565 } 566 567 /** 568 * dm_crtc_high_irq() - Handles CRTC interrupt 569 * @interrupt_params: used for determining the CRTC instance 570 * 571 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 572 * event handler. 573 */ 574 static void dm_crtc_high_irq(void *interrupt_params) 575 { 576 struct common_irq_params *irq_params = interrupt_params; 577 struct amdgpu_device *adev = irq_params->adev; 578 struct amdgpu_crtc *acrtc; 579 unsigned long flags; 580 int vrr_active; 581 582 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 583 if (!acrtc) 584 return; 585 586 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 587 588 drm_dbg_vbl(adev_to_drm(adev), 589 "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 590 vrr_active, acrtc->dm_irq_params.active_planes); 591 592 /** 593 * Core vblank handling at start of front-porch is only possible 594 * in non-vrr mode, as only there vblank timestamping will give 595 * valid results while done in front-porch. Otherwise defer it 596 * to dm_vupdate_high_irq after end of front-porch. 597 */ 598 if (!vrr_active) 599 amdgpu_dm_crtc_handle_vblank(acrtc); 600 601 /** 602 * Following stuff must happen at start of vblank, for crc 603 * computation and below-the-range btr support in vrr mode. 604 */ 605 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 606 607 /* BTR updates need to happen before VUPDATE on Vega and above. */ 608 if (adev->family < AMDGPU_FAMILY_AI) 609 return; 610 611 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 612 613 if (acrtc->dm_irq_params.stream && 614 acrtc->dm_irq_params.vrr_params.supported && 615 acrtc->dm_irq_params.freesync_config.state == 616 VRR_STATE_ACTIVE_VARIABLE) { 617 mod_freesync_handle_v_update(adev->dm.freesync_module, 618 acrtc->dm_irq_params.stream, 619 &acrtc->dm_irq_params.vrr_params); 620 621 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 622 &acrtc->dm_irq_params.vrr_params.adjust); 623 } 624 625 /* 626 * If there aren't any active_planes then DCH HUBP may be clock-gated. 627 * In that case, pageflip completion interrupts won't fire and pageflip 628 * completion events won't get delivered. Prevent this by sending 629 * pending pageflip events from here if a flip is still pending. 630 * 631 * If any planes are enabled, use dm_pflip_high_irq() instead, to 632 * avoid race conditions between flip programming and completion, 633 * which could cause too early flip completion events. 634 */ 635 if (adev->family >= AMDGPU_FAMILY_RV && 636 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 637 acrtc->dm_irq_params.active_planes == 0) { 638 if (acrtc->event) { 639 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 640 acrtc->event = NULL; 641 drm_crtc_vblank_put(&acrtc->base); 642 } 643 acrtc->pflip_status = AMDGPU_FLIP_NONE; 644 } 645 646 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 647 } 648 649 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 650 /** 651 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 652 * DCN generation ASICs 653 * @interrupt_params: interrupt parameters 654 * 655 * Used to set crc window/read out crc value at vertical line 0 position 656 */ 657 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 658 { 659 struct common_irq_params *irq_params = interrupt_params; 660 struct amdgpu_device *adev = irq_params->adev; 661 struct amdgpu_crtc *acrtc; 662 663 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 664 665 if (!acrtc) 666 return; 667 668 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 669 } 670 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 671 672 /** 673 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 674 * @adev: amdgpu_device pointer 675 * @notify: dmub notification structure 676 * 677 * Dmub AUX or SET_CONFIG command completion processing callback 678 * Copies dmub notification to DM which is to be read by AUX command. 679 * issuing thread and also signals the event to wake up the thread. 680 */ 681 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 682 struct dmub_notification *notify) 683 { 684 if (adev->dm.dmub_notify) 685 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 686 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 687 complete(&adev->dm.dmub_aux_transfer_done); 688 } 689 690 /** 691 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 692 * @adev: amdgpu_device pointer 693 * @notify: dmub notification structure 694 * 695 * Dmub Hpd interrupt processing callback. Gets displayindex through the 696 * ink index and calls helper to do the processing. 697 */ 698 static void dmub_hpd_callback(struct amdgpu_device *adev, 699 struct dmub_notification *notify) 700 { 701 struct amdgpu_dm_connector *aconnector; 702 struct amdgpu_dm_connector *hpd_aconnector = NULL; 703 struct drm_connector *connector; 704 struct drm_connector_list_iter iter; 705 struct dc_link *link; 706 u8 link_index = 0; 707 struct drm_device *dev; 708 709 if (adev == NULL) 710 return; 711 712 if (notify == NULL) { 713 DRM_ERROR("DMUB HPD callback notification was NULL"); 714 return; 715 } 716 717 if (notify->link_index > adev->dm.dc->link_count) { 718 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 719 return; 720 } 721 722 link_index = notify->link_index; 723 link = adev->dm.dc->links[link_index]; 724 dev = adev->dm.ddev; 725 726 drm_connector_list_iter_begin(dev, &iter); 727 drm_for_each_connector_iter(connector, &iter) { 728 aconnector = to_amdgpu_dm_connector(connector); 729 if (link && aconnector->dc_link == link) { 730 if (notify->type == DMUB_NOTIFICATION_HPD) 731 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); 732 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 733 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); 734 else 735 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", 736 notify->type, link_index); 737 738 hpd_aconnector = aconnector; 739 break; 740 } 741 } 742 drm_connector_list_iter_end(&iter); 743 744 if (hpd_aconnector) { 745 if (notify->type == DMUB_NOTIFICATION_HPD) 746 handle_hpd_irq_helper(hpd_aconnector); 747 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 748 handle_hpd_rx_irq(hpd_aconnector); 749 } 750 } 751 752 /** 753 * register_dmub_notify_callback - Sets callback for DMUB notify 754 * @adev: amdgpu_device pointer 755 * @type: Type of dmub notification 756 * @callback: Dmub interrupt callback function 757 * @dmub_int_thread_offload: offload indicator 758 * 759 * API to register a dmub callback handler for a dmub notification 760 * Also sets indicator whether callback processing to be offloaded. 761 * to dmub interrupt handling thread 762 * Return: true if successfully registered, false if there is existing registration 763 */ 764 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 765 enum dmub_notification_type type, 766 dmub_notify_interrupt_callback_t callback, 767 bool dmub_int_thread_offload) 768 { 769 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 770 adev->dm.dmub_callback[type] = callback; 771 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 772 } else 773 return false; 774 775 return true; 776 } 777 778 static void dm_handle_hpd_work(struct work_struct *work) 779 { 780 struct dmub_hpd_work *dmub_hpd_wrk; 781 782 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 783 784 if (!dmub_hpd_wrk->dmub_notify) { 785 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 786 return; 787 } 788 789 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 790 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 791 dmub_hpd_wrk->dmub_notify); 792 } 793 794 kfree(dmub_hpd_wrk->dmub_notify); 795 kfree(dmub_hpd_wrk); 796 797 } 798 799 #define DMUB_TRACE_MAX_READ 64 800 /** 801 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 802 * @interrupt_params: used for determining the Outbox instance 803 * 804 * Handles the Outbox Interrupt 805 * event handler. 806 */ 807 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 808 { 809 struct dmub_notification notify; 810 struct common_irq_params *irq_params = interrupt_params; 811 struct amdgpu_device *adev = irq_params->adev; 812 struct amdgpu_display_manager *dm = &adev->dm; 813 struct dmcub_trace_buf_entry entry = { 0 }; 814 u32 count = 0; 815 struct dmub_hpd_work *dmub_hpd_wrk; 816 struct dc_link *plink = NULL; 817 818 if (dc_enable_dmub_notifications(adev->dm.dc) && 819 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 820 821 do { 822 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 823 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 824 DRM_ERROR("DM: notify type %d invalid!", notify.type); 825 continue; 826 } 827 if (!dm->dmub_callback[notify.type]) { 828 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); 829 continue; 830 } 831 if (dm->dmub_thread_offload[notify.type] == true) { 832 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 833 if (!dmub_hpd_wrk) { 834 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 835 return; 836 } 837 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), 838 GFP_ATOMIC); 839 if (!dmub_hpd_wrk->dmub_notify) { 840 kfree(dmub_hpd_wrk); 841 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); 842 return; 843 } 844 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 845 dmub_hpd_wrk->adev = adev; 846 if (notify.type == DMUB_NOTIFICATION_HPD) { 847 plink = adev->dm.dc->links[notify.link_index]; 848 if (plink) { 849 plink->hpd_status = 850 notify.hpd_status == DP_HPD_PLUG; 851 } 852 } 853 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 854 } else { 855 dm->dmub_callback[notify.type](adev, ¬ify); 856 } 857 } while (notify.pending_notification); 858 } 859 860 861 do { 862 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 863 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 864 entry.param0, entry.param1); 865 866 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 867 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 868 } else 869 break; 870 871 count++; 872 873 } while (count <= DMUB_TRACE_MAX_READ); 874 875 if (count > DMUB_TRACE_MAX_READ) 876 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); 877 } 878 879 static int dm_set_clockgating_state(void *handle, 880 enum amd_clockgating_state state) 881 { 882 return 0; 883 } 884 885 static int dm_set_powergating_state(void *handle, 886 enum amd_powergating_state state) 887 { 888 return 0; 889 } 890 891 /* Prototypes of private functions */ 892 static int dm_early_init(void *handle); 893 894 /* Allocate memory for FBC compressed data */ 895 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 896 { 897 struct drm_device *dev = connector->dev; 898 struct amdgpu_device *adev = drm_to_adev(dev); 899 struct dm_compressor_info *compressor = &adev->dm.compressor; 900 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 901 struct drm_display_mode *mode; 902 unsigned long max_size = 0; 903 904 if (adev->dm.dc->fbc_compressor == NULL) 905 return; 906 907 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 908 return; 909 910 if (compressor->bo_ptr) 911 return; 912 913 914 list_for_each_entry(mode, &connector->modes, head) { 915 if (max_size < mode->htotal * mode->vtotal) 916 max_size = mode->htotal * mode->vtotal; 917 } 918 919 if (max_size) { 920 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 921 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 922 &compressor->gpu_addr, &compressor->cpu_addr); 923 924 if (r) 925 DRM_ERROR("DM: Failed to initialize FBC\n"); 926 else { 927 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 928 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 929 } 930 931 } 932 933 } 934 935 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 936 int pipe, bool *enabled, 937 unsigned char *buf, int max_bytes) 938 { 939 struct drm_device *dev = dev_get_drvdata(kdev); 940 struct amdgpu_device *adev = drm_to_adev(dev); 941 struct drm_connector *connector; 942 struct drm_connector_list_iter conn_iter; 943 struct amdgpu_dm_connector *aconnector; 944 int ret = 0; 945 946 *enabled = false; 947 948 mutex_lock(&adev->dm.audio_lock); 949 950 drm_connector_list_iter_begin(dev, &conn_iter); 951 drm_for_each_connector_iter(connector, &conn_iter) { 952 aconnector = to_amdgpu_dm_connector(connector); 953 if (aconnector->audio_inst != port) 954 continue; 955 956 *enabled = true; 957 ret = drm_eld_size(connector->eld); 958 memcpy(buf, connector->eld, min(max_bytes, ret)); 959 960 break; 961 } 962 drm_connector_list_iter_end(&conn_iter); 963 964 mutex_unlock(&adev->dm.audio_lock); 965 966 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 967 968 return ret; 969 } 970 971 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 972 .get_eld = amdgpu_dm_audio_component_get_eld, 973 }; 974 975 static int amdgpu_dm_audio_component_bind(struct device *kdev, 976 struct device *hda_kdev, void *data) 977 { 978 struct drm_device *dev = dev_get_drvdata(kdev); 979 struct amdgpu_device *adev = drm_to_adev(dev); 980 struct drm_audio_component *acomp = data; 981 982 acomp->ops = &amdgpu_dm_audio_component_ops; 983 acomp->dev = kdev; 984 adev->dm.audio_component = acomp; 985 986 return 0; 987 } 988 989 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 990 struct device *hda_kdev, void *data) 991 { 992 struct drm_device *dev = dev_get_drvdata(kdev); 993 struct amdgpu_device *adev = drm_to_adev(dev); 994 struct drm_audio_component *acomp = data; 995 996 acomp->ops = NULL; 997 acomp->dev = NULL; 998 adev->dm.audio_component = NULL; 999 } 1000 1001 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 1002 .bind = amdgpu_dm_audio_component_bind, 1003 .unbind = amdgpu_dm_audio_component_unbind, 1004 }; 1005 1006 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 1007 { 1008 int i, ret; 1009 1010 if (!amdgpu_audio) 1011 return 0; 1012 1013 adev->mode_info.audio.enabled = true; 1014 1015 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 1016 1017 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1018 adev->mode_info.audio.pin[i].channels = -1; 1019 adev->mode_info.audio.pin[i].rate = -1; 1020 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1021 adev->mode_info.audio.pin[i].status_bits = 0; 1022 adev->mode_info.audio.pin[i].category_code = 0; 1023 adev->mode_info.audio.pin[i].connected = false; 1024 adev->mode_info.audio.pin[i].id = 1025 adev->dm.dc->res_pool->audios[i]->inst; 1026 adev->mode_info.audio.pin[i].offset = 0; 1027 } 1028 1029 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1030 if (ret < 0) 1031 return ret; 1032 1033 adev->dm.audio_registered = true; 1034 1035 return 0; 1036 } 1037 1038 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 1039 { 1040 if (!amdgpu_audio) 1041 return; 1042 1043 if (!adev->mode_info.audio.enabled) 1044 return; 1045 1046 if (adev->dm.audio_registered) { 1047 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1048 adev->dm.audio_registered = false; 1049 } 1050 1051 /* TODO: Disable audio? */ 1052 1053 adev->mode_info.audio.enabled = false; 1054 } 1055 1056 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1057 { 1058 struct drm_audio_component *acomp = adev->dm.audio_component; 1059 1060 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1061 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1062 1063 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1064 pin, -1); 1065 } 1066 } 1067 1068 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1069 { 1070 const struct dmcub_firmware_header_v1_0 *hdr; 1071 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1072 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1073 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1074 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1075 struct abm *abm = adev->dm.dc->res_pool->abm; 1076 struct dc_context *ctx = adev->dm.dc->ctx; 1077 struct dmub_srv_hw_params hw_params; 1078 enum dmub_status status; 1079 const unsigned char *fw_inst_const, *fw_bss_data; 1080 u32 i, fw_inst_const_size, fw_bss_data_size; 1081 bool has_hw_support; 1082 1083 if (!dmub_srv) 1084 /* DMUB isn't supported on the ASIC. */ 1085 return 0; 1086 1087 if (!fb_info) { 1088 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1089 return -EINVAL; 1090 } 1091 1092 if (!dmub_fw) { 1093 /* Firmware required for DMUB support. */ 1094 DRM_ERROR("No firmware provided for DMUB.\n"); 1095 return -EINVAL; 1096 } 1097 1098 /* initialize register offsets for ASICs with runtime initialization available */ 1099 if (dmub_srv->hw_funcs.init_reg_offsets) 1100 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx); 1101 1102 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1103 if (status != DMUB_STATUS_OK) { 1104 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1105 return -EINVAL; 1106 } 1107 1108 if (!has_hw_support) { 1109 DRM_INFO("DMUB unsupported on ASIC\n"); 1110 return 0; 1111 } 1112 1113 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1114 status = dmub_srv_hw_reset(dmub_srv); 1115 if (status != DMUB_STATUS_OK) 1116 DRM_WARN("Error resetting DMUB HW: %d\n", status); 1117 1118 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1119 1120 fw_inst_const = dmub_fw->data + 1121 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1122 PSP_HEADER_BYTES; 1123 1124 fw_bss_data = dmub_fw->data + 1125 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1126 le32_to_cpu(hdr->inst_const_bytes); 1127 1128 /* Copy firmware and bios info into FB memory. */ 1129 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1130 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1131 1132 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1133 1134 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1135 * amdgpu_ucode_init_single_fw will load dmub firmware 1136 * fw_inst_const part to cw0; otherwise, the firmware back door load 1137 * will be done by dm_dmub_hw_init 1138 */ 1139 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1140 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1141 fw_inst_const_size); 1142 } 1143 1144 if (fw_bss_data_size) 1145 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1146 fw_bss_data, fw_bss_data_size); 1147 1148 /* Copy firmware bios info into FB memory. */ 1149 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1150 adev->bios_size); 1151 1152 /* Reset regions that need to be reset. */ 1153 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1154 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1155 1156 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1157 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1158 1159 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1160 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1161 1162 /* Initialize hardware. */ 1163 memset(&hw_params, 0, sizeof(hw_params)); 1164 hw_params.fb_base = adev->gmc.fb_start; 1165 hw_params.fb_offset = adev->vm_manager.vram_base_offset; 1166 1167 /* backdoor load firmware and trigger dmub running */ 1168 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1169 hw_params.load_inst_const = true; 1170 1171 if (dmcu) 1172 hw_params.psp_version = dmcu->psp_version; 1173 1174 for (i = 0; i < fb_info->num_fb; ++i) 1175 hw_params.fb[i] = &fb_info->fb[i]; 1176 1177 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1178 case IP_VERSION(3, 1, 3): 1179 case IP_VERSION(3, 1, 4): 1180 case IP_VERSION(3, 5, 0): 1181 hw_params.dpia_supported = true; 1182 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1183 break; 1184 default: 1185 break; 1186 } 1187 1188 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1189 if (status != DMUB_STATUS_OK) { 1190 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1191 return -EINVAL; 1192 } 1193 1194 /* Wait for firmware load to finish. */ 1195 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1196 if (status != DMUB_STATUS_OK) 1197 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1198 1199 /* Init DMCU and ABM if available. */ 1200 if (dmcu && abm) { 1201 dmcu->funcs->dmcu_init(dmcu); 1202 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1203 } 1204 1205 if (!adev->dm.dc->ctx->dmub_srv) 1206 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1207 if (!adev->dm.dc->ctx->dmub_srv) { 1208 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1209 return -ENOMEM; 1210 } 1211 1212 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1213 adev->dm.dmcub_fw_version); 1214 1215 return 0; 1216 } 1217 1218 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1219 { 1220 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1221 enum dmub_status status; 1222 bool init; 1223 1224 if (!dmub_srv) { 1225 /* DMUB isn't supported on the ASIC. */ 1226 return; 1227 } 1228 1229 status = dmub_srv_is_hw_init(dmub_srv, &init); 1230 if (status != DMUB_STATUS_OK) 1231 DRM_WARN("DMUB hardware init check failed: %d\n", status); 1232 1233 if (status == DMUB_STATUS_OK && init) { 1234 /* Wait for firmware load to finish. */ 1235 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1236 if (status != DMUB_STATUS_OK) 1237 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1238 } else { 1239 /* Perform the full hardware initialization. */ 1240 dm_dmub_hw_init(adev); 1241 } 1242 } 1243 1244 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1245 { 1246 u64 pt_base; 1247 u32 logical_addr_low; 1248 u32 logical_addr_high; 1249 u32 agp_base, agp_bot, agp_top; 1250 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1251 1252 memset(pa_config, 0, sizeof(*pa_config)); 1253 1254 agp_base = 0; 1255 agp_bot = adev->gmc.agp_start >> 24; 1256 agp_top = adev->gmc.agp_end >> 24; 1257 1258 /* AGP aperture is disabled */ 1259 if (agp_bot > agp_top) { 1260 logical_addr_low = adev->gmc.fb_start >> 18; 1261 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1262 /* 1263 * Raven2 has a HW issue that it is unable to use the vram which 1264 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1265 * workaround that increase system aperture high address (add 1) 1266 * to get rid of the VM fault and hardware hang. 1267 */ 1268 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; 1269 else 1270 logical_addr_high = adev->gmc.fb_end >> 18; 1271 } else { 1272 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1273 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1274 /* 1275 * Raven2 has a HW issue that it is unable to use the vram which 1276 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1277 * workaround that increase system aperture high address (add 1) 1278 * to get rid of the VM fault and hardware hang. 1279 */ 1280 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1281 else 1282 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1283 } 1284 1285 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1286 1287 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> 1288 AMDGPU_GPU_PAGE_SHIFT); 1289 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> 1290 AMDGPU_GPU_PAGE_SHIFT); 1291 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> 1292 AMDGPU_GPU_PAGE_SHIFT); 1293 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> 1294 AMDGPU_GPU_PAGE_SHIFT); 1295 page_table_base.high_part = upper_32_bits(pt_base); 1296 page_table_base.low_part = lower_32_bits(pt_base); 1297 1298 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1299 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1300 1301 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; 1302 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1303 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1304 1305 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1306 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; 1307 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1308 1309 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1310 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1311 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1312 1313 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; 1314 1315 } 1316 1317 static void force_connector_state( 1318 struct amdgpu_dm_connector *aconnector, 1319 enum drm_connector_force force_state) 1320 { 1321 struct drm_connector *connector = &aconnector->base; 1322 1323 mutex_lock(&connector->dev->mode_config.mutex); 1324 aconnector->base.force = force_state; 1325 mutex_unlock(&connector->dev->mode_config.mutex); 1326 1327 mutex_lock(&aconnector->hpd_lock); 1328 drm_kms_helper_connector_hotplug_event(connector); 1329 mutex_unlock(&aconnector->hpd_lock); 1330 } 1331 1332 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1333 { 1334 struct hpd_rx_irq_offload_work *offload_work; 1335 struct amdgpu_dm_connector *aconnector; 1336 struct dc_link *dc_link; 1337 struct amdgpu_device *adev; 1338 enum dc_connection_type new_connection_type = dc_connection_none; 1339 unsigned long flags; 1340 union test_response test_response; 1341 1342 memset(&test_response, 0, sizeof(test_response)); 1343 1344 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1345 aconnector = offload_work->offload_wq->aconnector; 1346 1347 if (!aconnector) { 1348 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1349 goto skip; 1350 } 1351 1352 adev = drm_to_adev(aconnector->base.dev); 1353 dc_link = aconnector->dc_link; 1354 1355 mutex_lock(&aconnector->hpd_lock); 1356 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 1357 DRM_ERROR("KMS: Failed to detect connector\n"); 1358 mutex_unlock(&aconnector->hpd_lock); 1359 1360 if (new_connection_type == dc_connection_none) 1361 goto skip; 1362 1363 if (amdgpu_in_reset(adev)) 1364 goto skip; 1365 1366 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 1367 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 1368 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); 1369 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1370 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; 1371 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1372 goto skip; 1373 } 1374 1375 mutex_lock(&adev->dm.dc_lock); 1376 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 1377 dc_link_dp_handle_automated_test(dc_link); 1378 1379 if (aconnector->timing_changed) { 1380 /* force connector disconnect and reconnect */ 1381 force_connector_state(aconnector, DRM_FORCE_OFF); 1382 msleep(100); 1383 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); 1384 } 1385 1386 test_response.bits.ACK = 1; 1387 1388 core_link_write_dpcd( 1389 dc_link, 1390 DP_TEST_RESPONSE, 1391 &test_response.raw, 1392 sizeof(test_response)); 1393 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1394 dc_link_check_link_loss_status(dc_link, &offload_work->data) && 1395 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1396 /* offload_work->data is from handle_hpd_rx_irq-> 1397 * schedule_hpd_rx_offload_work.this is defer handle 1398 * for hpd short pulse. upon here, link status may be 1399 * changed, need get latest link status from dpcd 1400 * registers. if link status is good, skip run link 1401 * training again. 1402 */ 1403 union hpd_irq_data irq_data; 1404 1405 memset(&irq_data, 0, sizeof(irq_data)); 1406 1407 /* before dc_link_dp_handle_link_loss, allow new link lost handle 1408 * request be added to work queue if link lost at end of dc_link_ 1409 * dp_handle_link_loss 1410 */ 1411 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1412 offload_work->offload_wq->is_handling_link_loss = false; 1413 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1414 1415 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && 1416 dc_link_check_link_loss_status(dc_link, &irq_data)) 1417 dc_link_dp_handle_link_loss(dc_link); 1418 } 1419 mutex_unlock(&adev->dm.dc_lock); 1420 1421 skip: 1422 kfree(offload_work); 1423 1424 } 1425 1426 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1427 { 1428 int max_caps = dc->caps.max_links; 1429 int i = 0; 1430 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1431 1432 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1433 1434 if (!hpd_rx_offload_wq) 1435 return NULL; 1436 1437 1438 for (i = 0; i < max_caps; i++) { 1439 hpd_rx_offload_wq[i].wq = 1440 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1441 1442 if (hpd_rx_offload_wq[i].wq == NULL) { 1443 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1444 goto out_err; 1445 } 1446 1447 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1448 } 1449 1450 return hpd_rx_offload_wq; 1451 1452 out_err: 1453 for (i = 0; i < max_caps; i++) { 1454 if (hpd_rx_offload_wq[i].wq) 1455 destroy_workqueue(hpd_rx_offload_wq[i].wq); 1456 } 1457 kfree(hpd_rx_offload_wq); 1458 return NULL; 1459 } 1460 1461 struct amdgpu_stutter_quirk { 1462 u16 chip_vendor; 1463 u16 chip_device; 1464 u16 subsys_vendor; 1465 u16 subsys_device; 1466 u8 revision; 1467 }; 1468 1469 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1470 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1471 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1472 { 0, 0, 0, 0, 0 }, 1473 }; 1474 1475 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1476 { 1477 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1478 1479 while (p && p->chip_device != 0) { 1480 if (pdev->vendor == p->chip_vendor && 1481 pdev->device == p->chip_device && 1482 pdev->subsystem_vendor == p->subsys_vendor && 1483 pdev->subsystem_device == p->subsys_device && 1484 pdev->revision == p->revision) { 1485 return true; 1486 } 1487 ++p; 1488 } 1489 return false; 1490 } 1491 1492 static const struct dmi_system_id hpd_disconnect_quirk_table[] = { 1493 { 1494 .matches = { 1495 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1496 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), 1497 }, 1498 }, 1499 { 1500 .matches = { 1501 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1502 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), 1503 }, 1504 }, 1505 { 1506 .matches = { 1507 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1508 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), 1509 }, 1510 }, 1511 { 1512 .matches = { 1513 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1514 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), 1515 }, 1516 }, 1517 { 1518 .matches = { 1519 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1520 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), 1521 }, 1522 }, 1523 { 1524 .matches = { 1525 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1526 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), 1527 }, 1528 }, 1529 { 1530 .matches = { 1531 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1532 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), 1533 }, 1534 }, 1535 { 1536 .matches = { 1537 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1538 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), 1539 }, 1540 }, 1541 { 1542 .matches = { 1543 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1544 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), 1545 }, 1546 }, 1547 {} 1548 /* TODO: refactor this from a fixed table to a dynamic option */ 1549 }; 1550 1551 static void retrieve_dmi_info(struct amdgpu_display_manager *dm) 1552 { 1553 const struct dmi_system_id *dmi_id; 1554 1555 dm->aux_hpd_discon_quirk = false; 1556 1557 dmi_id = dmi_first_match(hpd_disconnect_quirk_table); 1558 if (dmi_id) { 1559 dm->aux_hpd_discon_quirk = true; 1560 DRM_INFO("aux_hpd_discon_quirk attached\n"); 1561 } 1562 } 1563 1564 static int amdgpu_dm_init(struct amdgpu_device *adev) 1565 { 1566 struct dc_init_data init_data; 1567 struct dc_callback_init init_params; 1568 int r; 1569 1570 adev->dm.ddev = adev_to_drm(adev); 1571 adev->dm.adev = adev; 1572 1573 /* Zero all the fields */ 1574 memset(&init_data, 0, sizeof(init_data)); 1575 memset(&init_params, 0, sizeof(init_params)); 1576 1577 mutex_init(&adev->dm.dpia_aux_lock); 1578 mutex_init(&adev->dm.dc_lock); 1579 mutex_init(&adev->dm.audio_lock); 1580 1581 if (amdgpu_dm_irq_init(adev)) { 1582 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1583 goto error; 1584 } 1585 1586 init_data.asic_id.chip_family = adev->family; 1587 1588 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1589 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1590 init_data.asic_id.chip_id = adev->pdev->device; 1591 1592 init_data.asic_id.vram_width = adev->gmc.vram_width; 1593 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1594 init_data.asic_id.atombios_base_address = 1595 adev->mode_info.atom_context->bios; 1596 1597 init_data.driver = adev; 1598 1599 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 1600 1601 if (!adev->dm.cgs_device) { 1602 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 1603 goto error; 1604 } 1605 1606 init_data.cgs_device = adev->dm.cgs_device; 1607 1608 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1609 1610 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1611 case IP_VERSION(2, 1, 0): 1612 switch (adev->dm.dmcub_fw_version) { 1613 case 0: /* development */ 1614 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1615 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1616 init_data.flags.disable_dmcu = false; 1617 break; 1618 default: 1619 init_data.flags.disable_dmcu = true; 1620 } 1621 break; 1622 case IP_VERSION(2, 0, 3): 1623 init_data.flags.disable_dmcu = true; 1624 break; 1625 default: 1626 break; 1627 } 1628 1629 /* APU support S/G display by default except: 1630 * ASICs before Carrizo, 1631 * RAVEN1 (Users reported stability issue) 1632 */ 1633 1634 if (adev->asic_type < CHIP_CARRIZO) { 1635 init_data.flags.gpu_vm_support = false; 1636 } else if (adev->asic_type == CHIP_RAVEN) { 1637 if (adev->apu_flags & AMD_APU_IS_RAVEN) 1638 init_data.flags.gpu_vm_support = false; 1639 else 1640 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0); 1641 } else { 1642 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); 1643 } 1644 1645 adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; 1646 1647 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1648 init_data.flags.fbc_support = true; 1649 1650 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1651 init_data.flags.multi_mon_pp_mclk_switch = true; 1652 1653 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1654 init_data.flags.disable_fractional_pwm = true; 1655 1656 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1657 init_data.flags.edp_no_power_sequencing = true; 1658 1659 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1660 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1661 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1662 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1663 1664 init_data.flags.seamless_boot_edp_requested = false; 1665 1666 if (amdgpu_device_seamless_boot_supported(adev)) { 1667 init_data.flags.seamless_boot_edp_requested = true; 1668 init_data.flags.allow_seamless_boot_optimization = true; 1669 DRM_INFO("Seamless boot condition check passed\n"); 1670 } 1671 1672 init_data.flags.enable_mipi_converter_optimization = true; 1673 1674 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; 1675 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1676 init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; 1677 1678 INIT_LIST_HEAD(&adev->dm.da_list); 1679 1680 retrieve_dmi_info(&adev->dm); 1681 1682 /* Display Core create. */ 1683 adev->dm.dc = dc_create(&init_data); 1684 1685 if (adev->dm.dc) { 1686 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, 1687 dce_version_to_string(adev->dm.dc->ctx->dce_version)); 1688 } else { 1689 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 1690 goto error; 1691 } 1692 1693 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 1694 adev->dm.dc->debug.force_single_disp_pipe_split = false; 1695 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 1696 } 1697 1698 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 1699 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 1700 if (dm_should_disable_stutter(adev->pdev)) 1701 adev->dm.dc->debug.disable_stutter = true; 1702 1703 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 1704 adev->dm.dc->debug.disable_stutter = true; 1705 1706 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1707 adev->dm.dc->debug.disable_dsc = true; 1708 1709 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 1710 adev->dm.dc->debug.disable_clock_gate = true; 1711 1712 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 1713 adev->dm.dc->debug.force_subvp_mclk_switch = true; 1714 1715 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 1716 1717 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 1718 adev->dm.dc->debug.ignore_cable_id = true; 1719 1720 /* TODO: There is a new drm mst change where the freedom of 1721 * vc_next_start_slot update is revoked/moved into drm, instead of in 1722 * driver. This forces us to make sure to get vc_next_start_slot updated 1723 * in drm function each time without considering if mst_state is active 1724 * or not. Otherwise, next time hotplug will give wrong start_slot 1725 * number. We are implementing a temporary solution to even notify drm 1726 * mst deallocation when link is no longer of MST type when uncommitting 1727 * the stream so we will have more time to work on a proper solution. 1728 * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we 1729 * should notify drm to do a complete "reset" of its states and stop 1730 * calling further drm mst functions when link is no longer of an MST 1731 * type. This could happen when we unplug an MST hubs/displays. When 1732 * uncommit stream comes later after unplug, we should just reset 1733 * hardware states only. 1734 */ 1735 adev->dm.dc->debug.temp_mst_deallocation_sequence = true; 1736 1737 if (adev->dm.dc->caps.dp_hdmi21_pcon_support) 1738 DRM_INFO("DP-HDMI FRL PCON supported\n"); 1739 1740 r = dm_dmub_hw_init(adev); 1741 if (r) { 1742 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1743 goto error; 1744 } 1745 1746 dc_hardware_init(adev->dm.dc); 1747 1748 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 1749 if (!adev->dm.hpd_rx_offload_wq) { 1750 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 1751 goto error; 1752 } 1753 1754 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 1755 struct dc_phy_addr_space_config pa_config; 1756 1757 mmhub_read_system_context(adev, &pa_config); 1758 1759 // Call the DC init_memory func 1760 dc_setup_system_context(adev->dm.dc, &pa_config); 1761 } 1762 1763 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 1764 if (!adev->dm.freesync_module) { 1765 DRM_ERROR( 1766 "amdgpu: failed to initialize freesync_module.\n"); 1767 } else 1768 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 1769 adev->dm.freesync_module); 1770 1771 amdgpu_dm_init_color_mod(); 1772 1773 if (adev->dm.dc->caps.max_links > 0) { 1774 adev->dm.vblank_control_workqueue = 1775 create_singlethread_workqueue("dm_vblank_control_workqueue"); 1776 if (!adev->dm.vblank_control_workqueue) 1777 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 1778 } 1779 1780 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 1781 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 1782 1783 if (!adev->dm.hdcp_workqueue) 1784 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 1785 else 1786 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 1787 1788 dc_init_callbacks(adev->dm.dc, &init_params); 1789 } 1790 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 1791 init_completion(&adev->dm.dmub_aux_transfer_done); 1792 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 1793 if (!adev->dm.dmub_notify) { 1794 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1795 goto error; 1796 } 1797 1798 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 1799 if (!adev->dm.delayed_hpd_wq) { 1800 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 1801 goto error; 1802 } 1803 1804 amdgpu_dm_outbox_init(adev); 1805 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 1806 dmub_aux_setconfig_callback, false)) { 1807 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 1808 goto error; 1809 } 1810 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { 1811 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1812 goto error; 1813 } 1814 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { 1815 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1816 goto error; 1817 } 1818 } 1819 1820 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. 1821 * It is expected that DMUB will resend any pending notifications at this point, for 1822 * example HPD from DPIA. 1823 */ 1824 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 1825 dc_enable_dmub_outbox(adev->dm.dc); 1826 1827 /* DPIA trace goes to dmesg logs only if outbox is enabled */ 1828 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) 1829 dc_dmub_srv_enable_dpia_trace(adev->dm.dc); 1830 } 1831 1832 if (amdgpu_dm_initialize_drm_device(adev)) { 1833 DRM_ERROR( 1834 "amdgpu: failed to initialize sw for display support.\n"); 1835 goto error; 1836 } 1837 1838 /* create fake encoders for MST */ 1839 dm_dp_create_fake_mst_encoders(adev); 1840 1841 /* TODO: Add_display_info? */ 1842 1843 /* TODO use dynamic cursor width */ 1844 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 1845 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 1846 1847 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 1848 DRM_ERROR( 1849 "amdgpu: failed to initialize sw for display support.\n"); 1850 goto error; 1851 } 1852 1853 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1854 adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); 1855 if (!adev->dm.secure_display_ctxs) 1856 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); 1857 #endif 1858 1859 DRM_DEBUG_DRIVER("KMS initialized.\n"); 1860 1861 return 0; 1862 error: 1863 amdgpu_dm_fini(adev); 1864 1865 return -EINVAL; 1866 } 1867 1868 static int amdgpu_dm_early_fini(void *handle) 1869 { 1870 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1871 1872 amdgpu_dm_audio_fini(adev); 1873 1874 return 0; 1875 } 1876 1877 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1878 { 1879 int i; 1880 1881 if (adev->dm.vblank_control_workqueue) { 1882 destroy_workqueue(adev->dm.vblank_control_workqueue); 1883 adev->dm.vblank_control_workqueue = NULL; 1884 } 1885 1886 amdgpu_dm_destroy_drm_device(&adev->dm); 1887 1888 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1889 if (adev->dm.secure_display_ctxs) { 1890 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1891 if (adev->dm.secure_display_ctxs[i].crtc) { 1892 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); 1893 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); 1894 } 1895 } 1896 kfree(adev->dm.secure_display_ctxs); 1897 adev->dm.secure_display_ctxs = NULL; 1898 } 1899 #endif 1900 if (adev->dm.hdcp_workqueue) { 1901 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 1902 adev->dm.hdcp_workqueue = NULL; 1903 } 1904 1905 if (adev->dm.dc) 1906 dc_deinit_callbacks(adev->dm.dc); 1907 1908 if (adev->dm.dc) 1909 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1910 1911 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1912 kfree(adev->dm.dmub_notify); 1913 adev->dm.dmub_notify = NULL; 1914 destroy_workqueue(adev->dm.delayed_hpd_wq); 1915 adev->dm.delayed_hpd_wq = NULL; 1916 } 1917 1918 if (adev->dm.dmub_bo) 1919 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 1920 &adev->dm.dmub_bo_gpu_addr, 1921 &adev->dm.dmub_bo_cpu_addr); 1922 1923 if (adev->dm.hpd_rx_offload_wq) { 1924 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 1925 if (adev->dm.hpd_rx_offload_wq[i].wq) { 1926 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 1927 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 1928 } 1929 } 1930 1931 kfree(adev->dm.hpd_rx_offload_wq); 1932 adev->dm.hpd_rx_offload_wq = NULL; 1933 } 1934 1935 /* DC Destroy TODO: Replace destroy DAL */ 1936 if (adev->dm.dc) 1937 dc_destroy(&adev->dm.dc); 1938 /* 1939 * TODO: pageflip, vlank interrupt 1940 * 1941 * amdgpu_dm_irq_fini(adev); 1942 */ 1943 1944 if (adev->dm.cgs_device) { 1945 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 1946 adev->dm.cgs_device = NULL; 1947 } 1948 if (adev->dm.freesync_module) { 1949 mod_freesync_destroy(adev->dm.freesync_module); 1950 adev->dm.freesync_module = NULL; 1951 } 1952 1953 mutex_destroy(&adev->dm.audio_lock); 1954 mutex_destroy(&adev->dm.dc_lock); 1955 mutex_destroy(&adev->dm.dpia_aux_lock); 1956 } 1957 1958 static int load_dmcu_fw(struct amdgpu_device *adev) 1959 { 1960 const char *fw_name_dmcu = NULL; 1961 int r; 1962 const struct dmcu_firmware_header_v1_0 *hdr; 1963 1964 switch (adev->asic_type) { 1965 #if defined(CONFIG_DRM_AMD_DC_SI) 1966 case CHIP_TAHITI: 1967 case CHIP_PITCAIRN: 1968 case CHIP_VERDE: 1969 case CHIP_OLAND: 1970 #endif 1971 case CHIP_BONAIRE: 1972 case CHIP_HAWAII: 1973 case CHIP_KAVERI: 1974 case CHIP_KABINI: 1975 case CHIP_MULLINS: 1976 case CHIP_TONGA: 1977 case CHIP_FIJI: 1978 case CHIP_CARRIZO: 1979 case CHIP_STONEY: 1980 case CHIP_POLARIS11: 1981 case CHIP_POLARIS10: 1982 case CHIP_POLARIS12: 1983 case CHIP_VEGAM: 1984 case CHIP_VEGA10: 1985 case CHIP_VEGA12: 1986 case CHIP_VEGA20: 1987 return 0; 1988 case CHIP_NAVI12: 1989 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 1990 break; 1991 case CHIP_RAVEN: 1992 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 1993 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1994 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 1995 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1996 else 1997 return 0; 1998 break; 1999 default: 2000 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2001 case IP_VERSION(2, 0, 2): 2002 case IP_VERSION(2, 0, 3): 2003 case IP_VERSION(2, 0, 0): 2004 case IP_VERSION(2, 1, 0): 2005 case IP_VERSION(3, 0, 0): 2006 case IP_VERSION(3, 0, 2): 2007 case IP_VERSION(3, 0, 3): 2008 case IP_VERSION(3, 0, 1): 2009 case IP_VERSION(3, 1, 2): 2010 case IP_VERSION(3, 1, 3): 2011 case IP_VERSION(3, 1, 4): 2012 case IP_VERSION(3, 1, 5): 2013 case IP_VERSION(3, 1, 6): 2014 case IP_VERSION(3, 2, 0): 2015 case IP_VERSION(3, 2, 1): 2016 case IP_VERSION(3, 5, 0): 2017 return 0; 2018 default: 2019 break; 2020 } 2021 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 2022 return -EINVAL; 2023 } 2024 2025 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2026 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 2027 return 0; 2028 } 2029 2030 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); 2031 if (r == -ENODEV) { 2032 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 2033 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 2034 adev->dm.fw_dmcu = NULL; 2035 return 0; 2036 } 2037 if (r) { 2038 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 2039 fw_name_dmcu); 2040 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2041 return r; 2042 } 2043 2044 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 2045 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 2046 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 2047 adev->firmware.fw_size += 2048 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2049 2050 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 2051 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 2052 adev->firmware.fw_size += 2053 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2054 2055 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 2056 2057 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 2058 2059 return 0; 2060 } 2061 2062 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 2063 { 2064 struct amdgpu_device *adev = ctx; 2065 2066 return dm_read_reg(adev->dm.dc->ctx, address); 2067 } 2068 2069 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 2070 uint32_t value) 2071 { 2072 struct amdgpu_device *adev = ctx; 2073 2074 return dm_write_reg(adev->dm.dc->ctx, address, value); 2075 } 2076 2077 static int dm_dmub_sw_init(struct amdgpu_device *adev) 2078 { 2079 struct dmub_srv_create_params create_params; 2080 struct dmub_srv_region_params region_params; 2081 struct dmub_srv_region_info region_info; 2082 struct dmub_srv_memory_params memory_params; 2083 struct dmub_srv_fb_info *fb_info; 2084 struct dmub_srv *dmub_srv; 2085 const struct dmcub_firmware_header_v1_0 *hdr; 2086 enum dmub_asic dmub_asic; 2087 enum dmub_status status; 2088 int r; 2089 2090 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2091 case IP_VERSION(2, 1, 0): 2092 dmub_asic = DMUB_ASIC_DCN21; 2093 break; 2094 case IP_VERSION(3, 0, 0): 2095 dmub_asic = DMUB_ASIC_DCN30; 2096 break; 2097 case IP_VERSION(3, 0, 1): 2098 dmub_asic = DMUB_ASIC_DCN301; 2099 break; 2100 case IP_VERSION(3, 0, 2): 2101 dmub_asic = DMUB_ASIC_DCN302; 2102 break; 2103 case IP_VERSION(3, 0, 3): 2104 dmub_asic = DMUB_ASIC_DCN303; 2105 break; 2106 case IP_VERSION(3, 1, 2): 2107 case IP_VERSION(3, 1, 3): 2108 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 2109 break; 2110 case IP_VERSION(3, 1, 4): 2111 dmub_asic = DMUB_ASIC_DCN314; 2112 break; 2113 case IP_VERSION(3, 1, 5): 2114 dmub_asic = DMUB_ASIC_DCN315; 2115 break; 2116 case IP_VERSION(3, 1, 6): 2117 dmub_asic = DMUB_ASIC_DCN316; 2118 break; 2119 case IP_VERSION(3, 2, 0): 2120 dmub_asic = DMUB_ASIC_DCN32; 2121 break; 2122 case IP_VERSION(3, 2, 1): 2123 dmub_asic = DMUB_ASIC_DCN321; 2124 break; 2125 case IP_VERSION(3, 5, 0): 2126 dmub_asic = DMUB_ASIC_DCN35; 2127 break; 2128 default: 2129 /* ASIC doesn't support DMUB. */ 2130 return 0; 2131 } 2132 2133 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 2134 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 2135 2136 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2137 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 2138 AMDGPU_UCODE_ID_DMCUB; 2139 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 2140 adev->dm.dmub_fw; 2141 adev->firmware.fw_size += 2142 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 2143 2144 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 2145 adev->dm.dmcub_fw_version); 2146 } 2147 2148 2149 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 2150 dmub_srv = adev->dm.dmub_srv; 2151 2152 if (!dmub_srv) { 2153 DRM_ERROR("Failed to allocate DMUB service!\n"); 2154 return -ENOMEM; 2155 } 2156 2157 memset(&create_params, 0, sizeof(create_params)); 2158 create_params.user_ctx = adev; 2159 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2160 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2161 create_params.asic = dmub_asic; 2162 2163 /* Create the DMUB service. */ 2164 status = dmub_srv_create(dmub_srv, &create_params); 2165 if (status != DMUB_STATUS_OK) { 2166 DRM_ERROR("Error creating DMUB service: %d\n", status); 2167 return -EINVAL; 2168 } 2169 2170 /* Calculate the size of all the regions for the DMUB service. */ 2171 memset(®ion_params, 0, sizeof(region_params)); 2172 2173 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2174 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 2175 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2176 region_params.vbios_size = adev->bios_size; 2177 region_params.fw_bss_data = region_params.bss_data_size ? 2178 adev->dm.dmub_fw->data + 2179 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2180 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2181 region_params.fw_inst_const = 2182 adev->dm.dmub_fw->data + 2183 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2184 PSP_HEADER_BYTES; 2185 region_params.is_mailbox_in_inbox = false; 2186 2187 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2188 ®ion_info); 2189 2190 if (status != DMUB_STATUS_OK) { 2191 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 2192 return -EINVAL; 2193 } 2194 2195 /* 2196 * Allocate a framebuffer based on the total size of all the regions. 2197 * TODO: Move this into GART. 2198 */ 2199 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2200 AMDGPU_GEM_DOMAIN_VRAM | 2201 AMDGPU_GEM_DOMAIN_GTT, 2202 &adev->dm.dmub_bo, 2203 &adev->dm.dmub_bo_gpu_addr, 2204 &adev->dm.dmub_bo_cpu_addr); 2205 if (r) 2206 return r; 2207 2208 /* Rebase the regions on the framebuffer address. */ 2209 memset(&memory_params, 0, sizeof(memory_params)); 2210 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; 2211 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; 2212 memory_params.region_info = ®ion_info; 2213 2214 adev->dm.dmub_fb_info = 2215 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2216 fb_info = adev->dm.dmub_fb_info; 2217 2218 if (!fb_info) { 2219 DRM_ERROR( 2220 "Failed to allocate framebuffer info for DMUB service!\n"); 2221 return -ENOMEM; 2222 } 2223 2224 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); 2225 if (status != DMUB_STATUS_OK) { 2226 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 2227 return -EINVAL; 2228 } 2229 2230 return 0; 2231 } 2232 2233 static int dm_sw_init(void *handle) 2234 { 2235 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2236 int r; 2237 2238 r = dm_dmub_sw_init(adev); 2239 if (r) 2240 return r; 2241 2242 return load_dmcu_fw(adev); 2243 } 2244 2245 static int dm_sw_fini(void *handle) 2246 { 2247 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2248 2249 kfree(adev->dm.dmub_fb_info); 2250 adev->dm.dmub_fb_info = NULL; 2251 2252 if (adev->dm.dmub_srv) { 2253 dmub_srv_destroy(adev->dm.dmub_srv); 2254 adev->dm.dmub_srv = NULL; 2255 } 2256 2257 amdgpu_ucode_release(&adev->dm.dmub_fw); 2258 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2259 2260 return 0; 2261 } 2262 2263 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2264 { 2265 struct amdgpu_dm_connector *aconnector; 2266 struct drm_connector *connector; 2267 struct drm_connector_list_iter iter; 2268 int ret = 0; 2269 2270 drm_connector_list_iter_begin(dev, &iter); 2271 drm_for_each_connector_iter(connector, &iter) { 2272 aconnector = to_amdgpu_dm_connector(connector); 2273 if (aconnector->dc_link->type == dc_connection_mst_branch && 2274 aconnector->mst_mgr.aux) { 2275 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 2276 aconnector, 2277 aconnector->base.base.id); 2278 2279 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2280 if (ret < 0) { 2281 DRM_ERROR("DM_MST: Failed to start MST\n"); 2282 aconnector->dc_link->type = 2283 dc_connection_single; 2284 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2285 aconnector->dc_link); 2286 break; 2287 } 2288 } 2289 } 2290 drm_connector_list_iter_end(&iter); 2291 2292 return ret; 2293 } 2294 2295 static int dm_late_init(void *handle) 2296 { 2297 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2298 2299 struct dmcu_iram_parameters params; 2300 unsigned int linear_lut[16]; 2301 int i; 2302 struct dmcu *dmcu = NULL; 2303 2304 dmcu = adev->dm.dc->res_pool->dmcu; 2305 2306 for (i = 0; i < 16; i++) 2307 linear_lut[i] = 0xFFFF * i / 15; 2308 2309 params.set = 0; 2310 params.backlight_ramping_override = false; 2311 params.backlight_ramping_start = 0xCCCC; 2312 params.backlight_ramping_reduction = 0xCCCCCCCC; 2313 params.backlight_lut_array_size = 16; 2314 params.backlight_lut_array = linear_lut; 2315 2316 /* Min backlight level after ABM reduction, Don't allow below 1% 2317 * 0xFFFF x 0.01 = 0x28F 2318 */ 2319 params.min_abm_backlight = 0x28F; 2320 /* In the case where abm is implemented on dmcub, 2321 * dmcu object will be null. 2322 * ABM 2.4 and up are implemented on dmcub. 2323 */ 2324 if (dmcu) { 2325 if (!dmcu_load_iram(dmcu, params)) 2326 return -EINVAL; 2327 } else if (adev->dm.dc->ctx->dmub_srv) { 2328 struct dc_link *edp_links[MAX_NUM_EDP]; 2329 int edp_num; 2330 2331 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); 2332 for (i = 0; i < edp_num; i++) { 2333 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2334 return -EINVAL; 2335 } 2336 } 2337 2338 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2339 } 2340 2341 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) 2342 { 2343 int ret; 2344 u8 guid[16]; 2345 u64 tmp64; 2346 2347 mutex_lock(&mgr->lock); 2348 if (!mgr->mst_primary) 2349 goto out_fail; 2350 2351 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 2352 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2353 goto out_fail; 2354 } 2355 2356 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2357 DP_MST_EN | 2358 DP_UP_REQ_EN | 2359 DP_UPSTREAM_IS_SRC); 2360 if (ret < 0) { 2361 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 2362 goto out_fail; 2363 } 2364 2365 /* Some hubs forget their guids after they resume */ 2366 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); 2367 if (ret != 16) { 2368 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2369 goto out_fail; 2370 } 2371 2372 if (memchr_inv(guid, 0, 16) == NULL) { 2373 tmp64 = get_jiffies_64(); 2374 memcpy(&guid[0], &tmp64, sizeof(u64)); 2375 memcpy(&guid[8], &tmp64, sizeof(u64)); 2376 2377 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); 2378 2379 if (ret != 16) { 2380 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); 2381 goto out_fail; 2382 } 2383 } 2384 2385 memcpy(mgr->mst_primary->guid, guid, 16); 2386 2387 out_fail: 2388 mutex_unlock(&mgr->lock); 2389 } 2390 2391 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2392 { 2393 struct amdgpu_dm_connector *aconnector; 2394 struct drm_connector *connector; 2395 struct drm_connector_list_iter iter; 2396 struct drm_dp_mst_topology_mgr *mgr; 2397 2398 drm_connector_list_iter_begin(dev, &iter); 2399 drm_for_each_connector_iter(connector, &iter) { 2400 aconnector = to_amdgpu_dm_connector(connector); 2401 if (aconnector->dc_link->type != dc_connection_mst_branch || 2402 aconnector->mst_root) 2403 continue; 2404 2405 mgr = &aconnector->mst_mgr; 2406 2407 if (suspend) { 2408 drm_dp_mst_topology_mgr_suspend(mgr); 2409 } else { 2410 /* if extended timeout is supported in hardware, 2411 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer 2412 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. 2413 */ 2414 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); 2415 if (!dp_is_lttpr_present(aconnector->dc_link)) 2416 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); 2417 2418 /* TODO: move resume_mst_branch_status() into drm mst resume again 2419 * once topology probing work is pulled out from mst resume into mst 2420 * resume 2nd step. mst resume 2nd step should be called after old 2421 * state getting restored (i.e. drm_atomic_helper_resume()). 2422 */ 2423 resume_mst_branch_status(mgr); 2424 } 2425 } 2426 drm_connector_list_iter_end(&iter); 2427 } 2428 2429 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2430 { 2431 int ret = 0; 2432 2433 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2434 * on window driver dc implementation. 2435 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2436 * should be passed to smu during boot up and resume from s3. 2437 * boot up: dc calculate dcn watermark clock settings within dc_create, 2438 * dcn20_resource_construct 2439 * then call pplib functions below to pass the settings to smu: 2440 * smu_set_watermarks_for_clock_ranges 2441 * smu_set_watermarks_table 2442 * navi10_set_watermarks_table 2443 * smu_write_watermarks_table 2444 * 2445 * For Renoir, clock settings of dcn watermark are also fixed values. 2446 * dc has implemented different flow for window driver: 2447 * dc_hardware_init / dc_set_power_state 2448 * dcn10_init_hw 2449 * notify_wm_ranges 2450 * set_wm_ranges 2451 * -- Linux 2452 * smu_set_watermarks_for_clock_ranges 2453 * renoir_set_watermarks_table 2454 * smu_write_watermarks_table 2455 * 2456 * For Linux, 2457 * dc_hardware_init -> amdgpu_dm_init 2458 * dc_set_power_state --> dm_resume 2459 * 2460 * therefore, this function apply to navi10/12/14 but not Renoir 2461 * * 2462 */ 2463 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2464 case IP_VERSION(2, 0, 2): 2465 case IP_VERSION(2, 0, 0): 2466 break; 2467 default: 2468 return 0; 2469 } 2470 2471 ret = amdgpu_dpm_write_watermarks_table(adev); 2472 if (ret) { 2473 DRM_ERROR("Failed to update WMTABLE!\n"); 2474 return ret; 2475 } 2476 2477 return 0; 2478 } 2479 2480 /** 2481 * dm_hw_init() - Initialize DC device 2482 * @handle: The base driver device containing the amdgpu_dm device. 2483 * 2484 * Initialize the &struct amdgpu_display_manager device. This involves calling 2485 * the initializers of each DM component, then populating the struct with them. 2486 * 2487 * Although the function implies hardware initialization, both hardware and 2488 * software are initialized here. Splitting them out to their relevant init 2489 * hooks is a future TODO item. 2490 * 2491 * Some notable things that are initialized here: 2492 * 2493 * - Display Core, both software and hardware 2494 * - DC modules that we need (freesync and color management) 2495 * - DRM software states 2496 * - Interrupt sources and handlers 2497 * - Vblank support 2498 * - Debug FS entries, if enabled 2499 */ 2500 static int dm_hw_init(void *handle) 2501 { 2502 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2503 /* Create DAL display manager */ 2504 amdgpu_dm_init(adev); 2505 amdgpu_dm_hpd_init(adev); 2506 2507 return 0; 2508 } 2509 2510 /** 2511 * dm_hw_fini() - Teardown DC device 2512 * @handle: The base driver device containing the amdgpu_dm device. 2513 * 2514 * Teardown components within &struct amdgpu_display_manager that require 2515 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2516 * were loaded. Also flush IRQ workqueues and disable them. 2517 */ 2518 static int dm_hw_fini(void *handle) 2519 { 2520 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2521 2522 amdgpu_dm_hpd_fini(adev); 2523 2524 amdgpu_dm_irq_fini(adev); 2525 amdgpu_dm_fini(adev); 2526 return 0; 2527 } 2528 2529 2530 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2531 struct dc_state *state, bool enable) 2532 { 2533 enum dc_irq_source irq_source; 2534 struct amdgpu_crtc *acrtc; 2535 int rc = -EBUSY; 2536 int i = 0; 2537 2538 for (i = 0; i < state->stream_count; i++) { 2539 acrtc = get_crtc_by_otg_inst( 2540 adev, state->stream_status[i].primary_otg_inst); 2541 2542 if (acrtc && state->stream_status[i].plane_count != 0) { 2543 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2544 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2545 if (rc) 2546 DRM_WARN("Failed to %s pflip interrupts\n", 2547 enable ? "enable" : "disable"); 2548 2549 if (enable) { 2550 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) 2551 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); 2552 } else 2553 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); 2554 2555 if (rc) 2556 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); 2557 2558 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 2559 /* During gpu-reset we disable and then enable vblank irq, so 2560 * don't use amdgpu_irq_get/put() to avoid refcount change. 2561 */ 2562 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 2563 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); 2564 } 2565 } 2566 2567 } 2568 2569 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2570 { 2571 struct dc_state *context = NULL; 2572 enum dc_status res = DC_ERROR_UNEXPECTED; 2573 int i; 2574 struct dc_stream_state *del_streams[MAX_PIPES]; 2575 int del_streams_count = 0; 2576 2577 memset(del_streams, 0, sizeof(del_streams)); 2578 2579 context = dc_create_state(dc); 2580 if (context == NULL) 2581 goto context_alloc_fail; 2582 2583 dc_resource_state_copy_construct_current(dc, context); 2584 2585 /* First remove from context all streams */ 2586 for (i = 0; i < context->stream_count; i++) { 2587 struct dc_stream_state *stream = context->streams[i]; 2588 2589 del_streams[del_streams_count++] = stream; 2590 } 2591 2592 /* Remove all planes for removed streams and then remove the streams */ 2593 for (i = 0; i < del_streams_count; i++) { 2594 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2595 res = DC_FAIL_DETACH_SURFACES; 2596 goto fail; 2597 } 2598 2599 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 2600 if (res != DC_OK) 2601 goto fail; 2602 } 2603 2604 res = dc_commit_streams(dc, context->streams, context->stream_count); 2605 2606 fail: 2607 dc_release_state(context); 2608 2609 context_alloc_fail: 2610 return res; 2611 } 2612 2613 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2614 { 2615 int i; 2616 2617 if (dm->hpd_rx_offload_wq) { 2618 for (i = 0; i < dm->dc->caps.max_links; i++) 2619 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2620 } 2621 } 2622 2623 static int dm_suspend(void *handle) 2624 { 2625 struct amdgpu_device *adev = handle; 2626 struct amdgpu_display_manager *dm = &adev->dm; 2627 int ret = 0; 2628 2629 if (amdgpu_in_reset(adev)) { 2630 mutex_lock(&dm->dc_lock); 2631 2632 dc_allow_idle_optimizations(adev->dm.dc, false); 2633 2634 dm->cached_dc_state = dc_copy_state(dm->dc->current_state); 2635 2636 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 2637 2638 amdgpu_dm_commit_zero_streams(dm->dc); 2639 2640 amdgpu_dm_irq_suspend(adev); 2641 2642 hpd_rx_irq_work_suspend(dm); 2643 2644 return ret; 2645 } 2646 2647 WARN_ON(adev->dm.cached_state); 2648 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 2649 if (IS_ERR(adev->dm.cached_state)) 2650 return PTR_ERR(adev->dm.cached_state); 2651 2652 s3_handle_mst(adev_to_drm(adev), true); 2653 2654 amdgpu_dm_irq_suspend(adev); 2655 2656 hpd_rx_irq_work_suspend(dm); 2657 2658 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 2659 2660 return 0; 2661 } 2662 2663 struct amdgpu_dm_connector * 2664 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2665 struct drm_crtc *crtc) 2666 { 2667 u32 i; 2668 struct drm_connector_state *new_con_state; 2669 struct drm_connector *connector; 2670 struct drm_crtc *crtc_from_state; 2671 2672 for_each_new_connector_in_state(state, connector, new_con_state, i) { 2673 crtc_from_state = new_con_state->crtc; 2674 2675 if (crtc_from_state == crtc) 2676 return to_amdgpu_dm_connector(connector); 2677 } 2678 2679 return NULL; 2680 } 2681 2682 static void emulated_link_detect(struct dc_link *link) 2683 { 2684 struct dc_sink_init_data sink_init_data = { 0 }; 2685 struct display_sink_capability sink_caps = { 0 }; 2686 enum dc_edid_status edid_status; 2687 struct dc_context *dc_ctx = link->ctx; 2688 struct drm_device *dev = adev_to_drm(dc_ctx->driver_context); 2689 struct dc_sink *sink = NULL; 2690 struct dc_sink *prev_sink = NULL; 2691 2692 link->type = dc_connection_none; 2693 prev_sink = link->local_sink; 2694 2695 if (prev_sink) 2696 dc_sink_release(prev_sink); 2697 2698 switch (link->connector_signal) { 2699 case SIGNAL_TYPE_HDMI_TYPE_A: { 2700 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2701 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 2702 break; 2703 } 2704 2705 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 2706 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2707 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 2708 break; 2709 } 2710 2711 case SIGNAL_TYPE_DVI_DUAL_LINK: { 2712 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2713 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 2714 break; 2715 } 2716 2717 case SIGNAL_TYPE_LVDS: { 2718 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2719 sink_caps.signal = SIGNAL_TYPE_LVDS; 2720 break; 2721 } 2722 2723 case SIGNAL_TYPE_EDP: { 2724 sink_caps.transaction_type = 2725 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2726 sink_caps.signal = SIGNAL_TYPE_EDP; 2727 break; 2728 } 2729 2730 case SIGNAL_TYPE_DISPLAY_PORT: { 2731 sink_caps.transaction_type = 2732 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2733 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 2734 break; 2735 } 2736 2737 default: 2738 drm_err(dev, "Invalid connector type! signal:%d\n", 2739 link->connector_signal); 2740 return; 2741 } 2742 2743 sink_init_data.link = link; 2744 sink_init_data.sink_signal = sink_caps.signal; 2745 2746 sink = dc_sink_create(&sink_init_data); 2747 if (!sink) { 2748 drm_err(dev, "Failed to create sink!\n"); 2749 return; 2750 } 2751 2752 /* dc_sink_create returns a new reference */ 2753 link->local_sink = sink; 2754 2755 edid_status = dm_helpers_read_local_edid( 2756 link->ctx, 2757 link, 2758 sink); 2759 2760 if (edid_status != EDID_OK) 2761 drm_err(dev, "Failed to read EDID\n"); 2762 2763 } 2764 2765 static void dm_gpureset_commit_state(struct dc_state *dc_state, 2766 struct amdgpu_display_manager *dm) 2767 { 2768 struct { 2769 struct dc_surface_update surface_updates[MAX_SURFACES]; 2770 struct dc_plane_info plane_infos[MAX_SURFACES]; 2771 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 2772 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 2773 struct dc_stream_update stream_update; 2774 } *bundle; 2775 int k, m; 2776 2777 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 2778 2779 if (!bundle) { 2780 drm_err(dm->ddev, "Failed to allocate update bundle\n"); 2781 goto cleanup; 2782 } 2783 2784 for (k = 0; k < dc_state->stream_count; k++) { 2785 bundle->stream_update.stream = dc_state->streams[k]; 2786 2787 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 2788 bundle->surface_updates[m].surface = 2789 dc_state->stream_status->plane_states[m]; 2790 bundle->surface_updates[m].surface->force_full_update = 2791 true; 2792 } 2793 2794 update_planes_and_stream_adapter(dm->dc, 2795 UPDATE_TYPE_FULL, 2796 dc_state->stream_status->plane_count, 2797 dc_state->streams[k], 2798 &bundle->stream_update, 2799 bundle->surface_updates); 2800 } 2801 2802 cleanup: 2803 kfree(bundle); 2804 } 2805 2806 static int dm_resume(void *handle) 2807 { 2808 struct amdgpu_device *adev = handle; 2809 struct drm_device *ddev = adev_to_drm(adev); 2810 struct amdgpu_display_manager *dm = &adev->dm; 2811 struct amdgpu_dm_connector *aconnector; 2812 struct drm_connector *connector; 2813 struct drm_connector_list_iter iter; 2814 struct drm_crtc *crtc; 2815 struct drm_crtc_state *new_crtc_state; 2816 struct dm_crtc_state *dm_new_crtc_state; 2817 struct drm_plane *plane; 2818 struct drm_plane_state *new_plane_state; 2819 struct dm_plane_state *dm_new_plane_state; 2820 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 2821 enum dc_connection_type new_connection_type = dc_connection_none; 2822 struct dc_state *dc_state; 2823 int i, r, j, ret; 2824 bool need_hotplug = false; 2825 2826 if (dm->dc->caps.ips_support) { 2827 dc_dmub_srv_exit_low_power_state(dm->dc); 2828 } 2829 2830 if (amdgpu_in_reset(adev)) { 2831 dc_state = dm->cached_dc_state; 2832 2833 /* 2834 * The dc->current_state is backed up into dm->cached_dc_state 2835 * before we commit 0 streams. 2836 * 2837 * DC will clear link encoder assignments on the real state 2838 * but the changes won't propagate over to the copy we made 2839 * before the 0 streams commit. 2840 * 2841 * DC expects that link encoder assignments are *not* valid 2842 * when committing a state, so as a workaround we can copy 2843 * off of the current state. 2844 * 2845 * We lose the previous assignments, but we had already 2846 * commit 0 streams anyway. 2847 */ 2848 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 2849 2850 r = dm_dmub_hw_init(adev); 2851 if (r) 2852 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2853 2854 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2855 2856 dc_resume(dm->dc); 2857 2858 amdgpu_dm_irq_resume_early(adev); 2859 2860 for (i = 0; i < dc_state->stream_count; i++) { 2861 dc_state->streams[i]->mode_changed = true; 2862 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 2863 dc_state->stream_status[i].plane_states[j]->update_flags.raw 2864 = 0xffffffff; 2865 } 2866 } 2867 2868 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2869 amdgpu_dm_outbox_init(adev); 2870 dc_enable_dmub_outbox(adev->dm.dc); 2871 } 2872 2873 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); 2874 2875 dm_gpureset_commit_state(dm->cached_dc_state, dm); 2876 2877 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 2878 2879 dc_release_state(dm->cached_dc_state); 2880 dm->cached_dc_state = NULL; 2881 2882 amdgpu_dm_irq_resume_late(adev); 2883 2884 mutex_unlock(&dm->dc_lock); 2885 2886 return 0; 2887 } 2888 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 2889 dc_release_state(dm_state->context); 2890 dm_state->context = dc_create_state(dm->dc); 2891 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 2892 dc_resource_state_construct(dm->dc, dm_state->context); 2893 2894 /* Before powering on DC we need to re-initialize DMUB. */ 2895 dm_dmub_hw_resume(adev); 2896 2897 /* Re-enable outbox interrupts for DPIA. */ 2898 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2899 amdgpu_dm_outbox_init(adev); 2900 dc_enable_dmub_outbox(adev->dm.dc); 2901 } 2902 2903 /* power on hardware */ 2904 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2905 2906 /* program HPD filter */ 2907 dc_resume(dm->dc); 2908 2909 /* 2910 * early enable HPD Rx IRQ, should be done before set mode as short 2911 * pulse interrupts are used for MST 2912 */ 2913 amdgpu_dm_irq_resume_early(adev); 2914 2915 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 2916 s3_handle_mst(ddev, false); 2917 2918 /* Do detection*/ 2919 drm_connector_list_iter_begin(ddev, &iter); 2920 drm_for_each_connector_iter(connector, &iter) { 2921 aconnector = to_amdgpu_dm_connector(connector); 2922 2923 if (!aconnector->dc_link) 2924 continue; 2925 2926 /* 2927 * this is the case when traversing through already created end sink 2928 * MST connectors, should be skipped 2929 */ 2930 if (aconnector && aconnector->mst_root) 2931 continue; 2932 2933 mutex_lock(&aconnector->hpd_lock); 2934 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 2935 DRM_ERROR("KMS: Failed to detect connector\n"); 2936 2937 if (aconnector->base.force && new_connection_type == dc_connection_none) { 2938 emulated_link_detect(aconnector->dc_link); 2939 } else { 2940 mutex_lock(&dm->dc_lock); 2941 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 2942 mutex_unlock(&dm->dc_lock); 2943 } 2944 2945 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 2946 aconnector->fake_enable = false; 2947 2948 if (aconnector->dc_sink) 2949 dc_sink_release(aconnector->dc_sink); 2950 aconnector->dc_sink = NULL; 2951 amdgpu_dm_update_connector_after_detect(aconnector); 2952 mutex_unlock(&aconnector->hpd_lock); 2953 } 2954 drm_connector_list_iter_end(&iter); 2955 2956 /* Force mode set in atomic commit */ 2957 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) 2958 new_crtc_state->active_changed = true; 2959 2960 /* 2961 * atomic_check is expected to create the dc states. We need to release 2962 * them here, since they were duplicated as part of the suspend 2963 * procedure. 2964 */ 2965 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 2966 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 2967 if (dm_new_crtc_state->stream) { 2968 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 2969 dc_stream_release(dm_new_crtc_state->stream); 2970 dm_new_crtc_state->stream = NULL; 2971 } 2972 } 2973 2974 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 2975 dm_new_plane_state = to_dm_plane_state(new_plane_state); 2976 if (dm_new_plane_state->dc_state) { 2977 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 2978 dc_plane_state_release(dm_new_plane_state->dc_state); 2979 dm_new_plane_state->dc_state = NULL; 2980 } 2981 } 2982 2983 drm_atomic_helper_resume(ddev, dm->cached_state); 2984 2985 dm->cached_state = NULL; 2986 2987 /* Do mst topology probing after resuming cached state*/ 2988 drm_connector_list_iter_begin(ddev, &iter); 2989 drm_for_each_connector_iter(connector, &iter) { 2990 aconnector = to_amdgpu_dm_connector(connector); 2991 if (aconnector->dc_link->type != dc_connection_mst_branch || 2992 aconnector->mst_root) 2993 continue; 2994 2995 ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); 2996 2997 if (ret < 0) { 2998 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2999 aconnector->dc_link); 3000 need_hotplug = true; 3001 } 3002 } 3003 drm_connector_list_iter_end(&iter); 3004 3005 if (need_hotplug) 3006 drm_kms_helper_hotplug_event(ddev); 3007 3008 amdgpu_dm_irq_resume_late(adev); 3009 3010 amdgpu_dm_smu_write_watermarks_table(adev); 3011 3012 return 0; 3013 } 3014 3015 /** 3016 * DOC: DM Lifecycle 3017 * 3018 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 3019 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 3020 * the base driver's device list to be initialized and torn down accordingly. 3021 * 3022 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 3023 */ 3024 3025 static const struct amd_ip_funcs amdgpu_dm_funcs = { 3026 .name = "dm", 3027 .early_init = dm_early_init, 3028 .late_init = dm_late_init, 3029 .sw_init = dm_sw_init, 3030 .sw_fini = dm_sw_fini, 3031 .early_fini = amdgpu_dm_early_fini, 3032 .hw_init = dm_hw_init, 3033 .hw_fini = dm_hw_fini, 3034 .suspend = dm_suspend, 3035 .resume = dm_resume, 3036 .is_idle = dm_is_idle, 3037 .wait_for_idle = dm_wait_for_idle, 3038 .check_soft_reset = dm_check_soft_reset, 3039 .soft_reset = dm_soft_reset, 3040 .set_clockgating_state = dm_set_clockgating_state, 3041 .set_powergating_state = dm_set_powergating_state, 3042 }; 3043 3044 const struct amdgpu_ip_block_version dm_ip_block = { 3045 .type = AMD_IP_BLOCK_TYPE_DCE, 3046 .major = 1, 3047 .minor = 0, 3048 .rev = 0, 3049 .funcs = &amdgpu_dm_funcs, 3050 }; 3051 3052 3053 /** 3054 * DOC: atomic 3055 * 3056 * *WIP* 3057 */ 3058 3059 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 3060 .fb_create = amdgpu_display_user_framebuffer_create, 3061 .get_format_info = amdgpu_dm_plane_get_format_info, 3062 .atomic_check = amdgpu_dm_atomic_check, 3063 .atomic_commit = drm_atomic_helper_commit, 3064 }; 3065 3066 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 3067 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, 3068 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 3069 }; 3070 3071 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3072 { 3073 struct amdgpu_dm_backlight_caps *caps; 3074 struct drm_connector *conn_base; 3075 struct amdgpu_device *adev; 3076 struct drm_luminance_range_info *luminance_range; 3077 3078 if (aconnector->bl_idx == -1 || 3079 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3080 return; 3081 3082 conn_base = &aconnector->base; 3083 adev = drm_to_adev(conn_base->dev); 3084 3085 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3086 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 3087 caps->aux_support = false; 3088 3089 if (caps->ext_caps->bits.oled == 1 3090 /* 3091 * || 3092 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 3093 * caps->ext_caps->bits.hdr_aux_backlight_control == 1 3094 */) 3095 caps->aux_support = true; 3096 3097 if (amdgpu_backlight == 0) 3098 caps->aux_support = false; 3099 else if (amdgpu_backlight == 1) 3100 caps->aux_support = true; 3101 3102 luminance_range = &conn_base->display_info.luminance_range; 3103 3104 if (luminance_range->max_luminance) { 3105 caps->aux_min_input_signal = luminance_range->min_luminance; 3106 caps->aux_max_input_signal = luminance_range->max_luminance; 3107 } else { 3108 caps->aux_min_input_signal = 0; 3109 caps->aux_max_input_signal = 512; 3110 } 3111 } 3112 3113 void amdgpu_dm_update_connector_after_detect( 3114 struct amdgpu_dm_connector *aconnector) 3115 { 3116 struct drm_connector *connector = &aconnector->base; 3117 struct drm_device *dev = connector->dev; 3118 struct dc_sink *sink; 3119 3120 /* MST handled by drm_mst framework */ 3121 if (aconnector->mst_mgr.mst_state == true) 3122 return; 3123 3124 sink = aconnector->dc_link->local_sink; 3125 if (sink) 3126 dc_sink_retain(sink); 3127 3128 /* 3129 * Edid mgmt connector gets first update only in mode_valid hook and then 3130 * the connector sink is set to either fake or physical sink depends on link status. 3131 * Skip if already done during boot. 3132 */ 3133 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 3134 && aconnector->dc_em_sink) { 3135 3136 /* 3137 * For S3 resume with headless use eml_sink to fake stream 3138 * because on resume connector->sink is set to NULL 3139 */ 3140 mutex_lock(&dev->mode_config.mutex); 3141 3142 if (sink) { 3143 if (aconnector->dc_sink) { 3144 amdgpu_dm_update_freesync_caps(connector, NULL); 3145 /* 3146 * retain and release below are used to 3147 * bump up refcount for sink because the link doesn't point 3148 * to it anymore after disconnect, so on next crtc to connector 3149 * reshuffle by UMD we will get into unwanted dc_sink release 3150 */ 3151 dc_sink_release(aconnector->dc_sink); 3152 } 3153 aconnector->dc_sink = sink; 3154 dc_sink_retain(aconnector->dc_sink); 3155 amdgpu_dm_update_freesync_caps(connector, 3156 aconnector->edid); 3157 } else { 3158 amdgpu_dm_update_freesync_caps(connector, NULL); 3159 if (!aconnector->dc_sink) { 3160 aconnector->dc_sink = aconnector->dc_em_sink; 3161 dc_sink_retain(aconnector->dc_sink); 3162 } 3163 } 3164 3165 mutex_unlock(&dev->mode_config.mutex); 3166 3167 if (sink) 3168 dc_sink_release(sink); 3169 return; 3170 } 3171 3172 /* 3173 * TODO: temporary guard to look for proper fix 3174 * if this sink is MST sink, we should not do anything 3175 */ 3176 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 3177 dc_sink_release(sink); 3178 return; 3179 } 3180 3181 if (aconnector->dc_sink == sink) { 3182 /* 3183 * We got a DP short pulse (Link Loss, DP CTS, etc...). 3184 * Do nothing!! 3185 */ 3186 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 3187 aconnector->connector_id); 3188 if (sink) 3189 dc_sink_release(sink); 3190 return; 3191 } 3192 3193 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 3194 aconnector->connector_id, aconnector->dc_sink, sink); 3195 3196 mutex_lock(&dev->mode_config.mutex); 3197 3198 /* 3199 * 1. Update status of the drm connector 3200 * 2. Send an event and let userspace tell us what to do 3201 */ 3202 if (sink) { 3203 /* 3204 * TODO: check if we still need the S3 mode update workaround. 3205 * If yes, put it here. 3206 */ 3207 if (aconnector->dc_sink) { 3208 amdgpu_dm_update_freesync_caps(connector, NULL); 3209 dc_sink_release(aconnector->dc_sink); 3210 } 3211 3212 aconnector->dc_sink = sink; 3213 dc_sink_retain(aconnector->dc_sink); 3214 if (sink->dc_edid.length == 0) { 3215 aconnector->edid = NULL; 3216 if (aconnector->dc_link->aux_mode) { 3217 drm_dp_cec_unset_edid( 3218 &aconnector->dm_dp_aux.aux); 3219 } 3220 } else { 3221 aconnector->edid = 3222 (struct edid *)sink->dc_edid.raw_edid; 3223 3224 if (aconnector->dc_link->aux_mode) 3225 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, 3226 aconnector->edid); 3227 } 3228 3229 if (!aconnector->timing_requested) { 3230 aconnector->timing_requested = 3231 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); 3232 if (!aconnector->timing_requested) 3233 drm_err(dev, 3234 "failed to create aconnector->requested_timing\n"); 3235 } 3236 3237 drm_connector_update_edid_property(connector, aconnector->edid); 3238 amdgpu_dm_update_freesync_caps(connector, aconnector->edid); 3239 update_connector_ext_caps(aconnector); 3240 } else { 3241 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3242 amdgpu_dm_update_freesync_caps(connector, NULL); 3243 drm_connector_update_edid_property(connector, NULL); 3244 aconnector->num_modes = 0; 3245 dc_sink_release(aconnector->dc_sink); 3246 aconnector->dc_sink = NULL; 3247 aconnector->edid = NULL; 3248 kfree(aconnector->timing_requested); 3249 aconnector->timing_requested = NULL; 3250 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3251 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3252 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3253 } 3254 3255 mutex_unlock(&dev->mode_config.mutex); 3256 3257 update_subconnector_property(aconnector); 3258 3259 if (sink) 3260 dc_sink_release(sink); 3261 } 3262 3263 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3264 { 3265 struct drm_connector *connector = &aconnector->base; 3266 struct drm_device *dev = connector->dev; 3267 enum dc_connection_type new_connection_type = dc_connection_none; 3268 struct amdgpu_device *adev = drm_to_adev(dev); 3269 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3270 bool ret = false; 3271 3272 if (adev->dm.disable_hpd_irq) 3273 return; 3274 3275 /* 3276 * In case of failure or MST no need to update connector status or notify the OS 3277 * since (for MST case) MST does this in its own context. 3278 */ 3279 mutex_lock(&aconnector->hpd_lock); 3280 3281 if (adev->dm.hdcp_workqueue) { 3282 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3283 dm_con_state->update_hdcp = true; 3284 } 3285 if (aconnector->fake_enable) 3286 aconnector->fake_enable = false; 3287 3288 aconnector->timing_changed = false; 3289 3290 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3291 DRM_ERROR("KMS: Failed to detect connector\n"); 3292 3293 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3294 emulated_link_detect(aconnector->dc_link); 3295 3296 drm_modeset_lock_all(dev); 3297 dm_restore_drm_connector_state(dev, connector); 3298 drm_modeset_unlock_all(dev); 3299 3300 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3301 drm_kms_helper_connector_hotplug_event(connector); 3302 } else { 3303 mutex_lock(&adev->dm.dc_lock); 3304 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 3305 mutex_unlock(&adev->dm.dc_lock); 3306 if (ret) { 3307 amdgpu_dm_update_connector_after_detect(aconnector); 3308 3309 drm_modeset_lock_all(dev); 3310 dm_restore_drm_connector_state(dev, connector); 3311 drm_modeset_unlock_all(dev); 3312 3313 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3314 drm_kms_helper_connector_hotplug_event(connector); 3315 } 3316 } 3317 mutex_unlock(&aconnector->hpd_lock); 3318 3319 } 3320 3321 static void handle_hpd_irq(void *param) 3322 { 3323 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3324 3325 handle_hpd_irq_helper(aconnector); 3326 3327 } 3328 3329 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3330 union hpd_irq_data hpd_irq_data) 3331 { 3332 struct hpd_rx_irq_offload_work *offload_work = 3333 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3334 3335 if (!offload_work) { 3336 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3337 return; 3338 } 3339 3340 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3341 offload_work->data = hpd_irq_data; 3342 offload_work->offload_wq = offload_wq; 3343 3344 queue_work(offload_wq->wq, &offload_work->work); 3345 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3346 } 3347 3348 static void handle_hpd_rx_irq(void *param) 3349 { 3350 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3351 struct drm_connector *connector = &aconnector->base; 3352 struct drm_device *dev = connector->dev; 3353 struct dc_link *dc_link = aconnector->dc_link; 3354 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3355 bool result = false; 3356 enum dc_connection_type new_connection_type = dc_connection_none; 3357 struct amdgpu_device *adev = drm_to_adev(dev); 3358 union hpd_irq_data hpd_irq_data; 3359 bool link_loss = false; 3360 bool has_left_work = false; 3361 int idx = dc_link->link_index; 3362 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3363 3364 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3365 3366 if (adev->dm.disable_hpd_irq) 3367 return; 3368 3369 /* 3370 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3371 * conflict, after implement i2c helper, this mutex should be 3372 * retired. 3373 */ 3374 mutex_lock(&aconnector->hpd_lock); 3375 3376 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3377 &link_loss, true, &has_left_work); 3378 3379 if (!has_left_work) 3380 goto out; 3381 3382 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3383 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3384 goto out; 3385 } 3386 3387 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3388 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3389 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3390 bool skip = false; 3391 3392 /* 3393 * DOWN_REP_MSG_RDY is also handled by polling method 3394 * mgr->cbs->poll_hpd_irq() 3395 */ 3396 spin_lock(&offload_wq->offload_lock); 3397 skip = offload_wq->is_handling_mst_msg_rdy_event; 3398 3399 if (!skip) 3400 offload_wq->is_handling_mst_msg_rdy_event = true; 3401 3402 spin_unlock(&offload_wq->offload_lock); 3403 3404 if (!skip) 3405 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3406 3407 goto out; 3408 } 3409 3410 if (link_loss) { 3411 bool skip = false; 3412 3413 spin_lock(&offload_wq->offload_lock); 3414 skip = offload_wq->is_handling_link_loss; 3415 3416 if (!skip) 3417 offload_wq->is_handling_link_loss = true; 3418 3419 spin_unlock(&offload_wq->offload_lock); 3420 3421 if (!skip) 3422 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3423 3424 goto out; 3425 } 3426 } 3427 3428 out: 3429 if (result && !is_mst_root_connector) { 3430 /* Downstream Port status changed. */ 3431 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 3432 DRM_ERROR("KMS: Failed to detect connector\n"); 3433 3434 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3435 emulated_link_detect(dc_link); 3436 3437 if (aconnector->fake_enable) 3438 aconnector->fake_enable = false; 3439 3440 amdgpu_dm_update_connector_after_detect(aconnector); 3441 3442 3443 drm_modeset_lock_all(dev); 3444 dm_restore_drm_connector_state(dev, connector); 3445 drm_modeset_unlock_all(dev); 3446 3447 drm_kms_helper_connector_hotplug_event(connector); 3448 } else { 3449 bool ret = false; 3450 3451 mutex_lock(&adev->dm.dc_lock); 3452 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); 3453 mutex_unlock(&adev->dm.dc_lock); 3454 3455 if (ret) { 3456 if (aconnector->fake_enable) 3457 aconnector->fake_enable = false; 3458 3459 amdgpu_dm_update_connector_after_detect(aconnector); 3460 3461 drm_modeset_lock_all(dev); 3462 dm_restore_drm_connector_state(dev, connector); 3463 drm_modeset_unlock_all(dev); 3464 3465 drm_kms_helper_connector_hotplug_event(connector); 3466 } 3467 } 3468 } 3469 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3470 if (adev->dm.hdcp_workqueue) 3471 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3472 } 3473 3474 if (dc_link->type != dc_connection_mst_branch) 3475 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3476 3477 mutex_unlock(&aconnector->hpd_lock); 3478 } 3479 3480 static void register_hpd_handlers(struct amdgpu_device *adev) 3481 { 3482 struct drm_device *dev = adev_to_drm(adev); 3483 struct drm_connector *connector; 3484 struct amdgpu_dm_connector *aconnector; 3485 const struct dc_link *dc_link; 3486 struct dc_interrupt_params int_params = {0}; 3487 3488 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3489 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3490 3491 list_for_each_entry(connector, 3492 &dev->mode_config.connector_list, head) { 3493 3494 aconnector = to_amdgpu_dm_connector(connector); 3495 dc_link = aconnector->dc_link; 3496 3497 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 3498 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3499 int_params.irq_source = dc_link->irq_source_hpd; 3500 3501 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3502 handle_hpd_irq, 3503 (void *) aconnector); 3504 } 3505 3506 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 3507 3508 /* Also register for DP short pulse (hpd_rx). */ 3509 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3510 int_params.irq_source = dc_link->irq_source_hpd_rx; 3511 3512 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3513 handle_hpd_rx_irq, 3514 (void *) aconnector); 3515 } 3516 3517 if (adev->dm.hpd_rx_offload_wq) 3518 adev->dm.hpd_rx_offload_wq[connector->index].aconnector = 3519 aconnector; 3520 } 3521 } 3522 3523 #if defined(CONFIG_DRM_AMD_DC_SI) 3524 /* Register IRQ sources and initialize IRQ callbacks */ 3525 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3526 { 3527 struct dc *dc = adev->dm.dc; 3528 struct common_irq_params *c_irq_params; 3529 struct dc_interrupt_params int_params = {0}; 3530 int r; 3531 int i; 3532 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3533 3534 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3535 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3536 3537 /* 3538 * Actions of amdgpu_irq_add_id(): 3539 * 1. Register a set() function with base driver. 3540 * Base driver will call set() function to enable/disable an 3541 * interrupt in DC hardware. 3542 * 2. Register amdgpu_dm_irq_handler(). 3543 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3544 * coming from DC hardware. 3545 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3546 * for acknowledging and handling. 3547 */ 3548 3549 /* Use VBLANK interrupt */ 3550 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3551 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); 3552 if (r) { 3553 DRM_ERROR("Failed to add crtc irq id!\n"); 3554 return r; 3555 } 3556 3557 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3558 int_params.irq_source = 3559 dc_interrupt_to_irq_source(dc, i + 1, 0); 3560 3561 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3562 3563 c_irq_params->adev = adev; 3564 c_irq_params->irq_src = int_params.irq_source; 3565 3566 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3567 dm_crtc_high_irq, c_irq_params); 3568 } 3569 3570 /* Use GRPH_PFLIP interrupt */ 3571 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3572 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3573 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3574 if (r) { 3575 DRM_ERROR("Failed to add page flip irq id!\n"); 3576 return r; 3577 } 3578 3579 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3580 int_params.irq_source = 3581 dc_interrupt_to_irq_source(dc, i, 0); 3582 3583 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3584 3585 c_irq_params->adev = adev; 3586 c_irq_params->irq_src = int_params.irq_source; 3587 3588 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3589 dm_pflip_high_irq, c_irq_params); 3590 3591 } 3592 3593 /* HPD */ 3594 r = amdgpu_irq_add_id(adev, client_id, 3595 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3596 if (r) { 3597 DRM_ERROR("Failed to add hpd irq id!\n"); 3598 return r; 3599 } 3600 3601 register_hpd_handlers(adev); 3602 3603 return 0; 3604 } 3605 #endif 3606 3607 /* Register IRQ sources and initialize IRQ callbacks */ 3608 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 3609 { 3610 struct dc *dc = adev->dm.dc; 3611 struct common_irq_params *c_irq_params; 3612 struct dc_interrupt_params int_params = {0}; 3613 int r; 3614 int i; 3615 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3616 3617 if (adev->family >= AMDGPU_FAMILY_AI) 3618 client_id = SOC15_IH_CLIENTID_DCE; 3619 3620 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3621 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3622 3623 /* 3624 * Actions of amdgpu_irq_add_id(): 3625 * 1. Register a set() function with base driver. 3626 * Base driver will call set() function to enable/disable an 3627 * interrupt in DC hardware. 3628 * 2. Register amdgpu_dm_irq_handler(). 3629 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3630 * coming from DC hardware. 3631 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3632 * for acknowledging and handling. 3633 */ 3634 3635 /* Use VBLANK interrupt */ 3636 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 3637 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 3638 if (r) { 3639 DRM_ERROR("Failed to add crtc irq id!\n"); 3640 return r; 3641 } 3642 3643 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3644 int_params.irq_source = 3645 dc_interrupt_to_irq_source(dc, i, 0); 3646 3647 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3648 3649 c_irq_params->adev = adev; 3650 c_irq_params->irq_src = int_params.irq_source; 3651 3652 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3653 dm_crtc_high_irq, c_irq_params); 3654 } 3655 3656 /* Use VUPDATE interrupt */ 3657 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 3658 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 3659 if (r) { 3660 DRM_ERROR("Failed to add vupdate irq id!\n"); 3661 return r; 3662 } 3663 3664 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3665 int_params.irq_source = 3666 dc_interrupt_to_irq_source(dc, i, 0); 3667 3668 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3669 3670 c_irq_params->adev = adev; 3671 c_irq_params->irq_src = int_params.irq_source; 3672 3673 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3674 dm_vupdate_high_irq, c_irq_params); 3675 } 3676 3677 /* Use GRPH_PFLIP interrupt */ 3678 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3679 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3680 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3681 if (r) { 3682 DRM_ERROR("Failed to add page flip irq id!\n"); 3683 return r; 3684 } 3685 3686 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3687 int_params.irq_source = 3688 dc_interrupt_to_irq_source(dc, i, 0); 3689 3690 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3691 3692 c_irq_params->adev = adev; 3693 c_irq_params->irq_src = int_params.irq_source; 3694 3695 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3696 dm_pflip_high_irq, c_irq_params); 3697 3698 } 3699 3700 /* HPD */ 3701 r = amdgpu_irq_add_id(adev, client_id, 3702 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3703 if (r) { 3704 DRM_ERROR("Failed to add hpd irq id!\n"); 3705 return r; 3706 } 3707 3708 register_hpd_handlers(adev); 3709 3710 return 0; 3711 } 3712 3713 /* Register IRQ sources and initialize IRQ callbacks */ 3714 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 3715 { 3716 struct dc *dc = adev->dm.dc; 3717 struct common_irq_params *c_irq_params; 3718 struct dc_interrupt_params int_params = {0}; 3719 int r; 3720 int i; 3721 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3722 static const unsigned int vrtl_int_srcid[] = { 3723 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 3724 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 3725 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 3726 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 3727 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 3728 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 3729 }; 3730 #endif 3731 3732 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3733 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3734 3735 /* 3736 * Actions of amdgpu_irq_add_id(): 3737 * 1. Register a set() function with base driver. 3738 * Base driver will call set() function to enable/disable an 3739 * interrupt in DC hardware. 3740 * 2. Register amdgpu_dm_irq_handler(). 3741 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3742 * coming from DC hardware. 3743 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3744 * for acknowledging and handling. 3745 */ 3746 3747 /* Use VSTARTUP interrupt */ 3748 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 3749 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 3750 i++) { 3751 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 3752 3753 if (r) { 3754 DRM_ERROR("Failed to add crtc irq id!\n"); 3755 return r; 3756 } 3757 3758 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3759 int_params.irq_source = 3760 dc_interrupt_to_irq_source(dc, i, 0); 3761 3762 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3763 3764 c_irq_params->adev = adev; 3765 c_irq_params->irq_src = int_params.irq_source; 3766 3767 amdgpu_dm_irq_register_interrupt( 3768 adev, &int_params, dm_crtc_high_irq, c_irq_params); 3769 } 3770 3771 /* Use otg vertical line interrupt */ 3772 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3773 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 3774 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 3775 vrtl_int_srcid[i], &adev->vline0_irq); 3776 3777 if (r) { 3778 DRM_ERROR("Failed to add vline0 irq id!\n"); 3779 return r; 3780 } 3781 3782 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3783 int_params.irq_source = 3784 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 3785 3786 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { 3787 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); 3788 break; 3789 } 3790 3791 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 3792 - DC_IRQ_SOURCE_DC1_VLINE0]; 3793 3794 c_irq_params->adev = adev; 3795 c_irq_params->irq_src = int_params.irq_source; 3796 3797 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3798 dm_dcn_vertical_interrupt0_high_irq, c_irq_params); 3799 } 3800 #endif 3801 3802 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 3803 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 3804 * to trigger at end of each vblank, regardless of state of the lock, 3805 * matching DCE behaviour. 3806 */ 3807 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 3808 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 3809 i++) { 3810 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 3811 3812 if (r) { 3813 DRM_ERROR("Failed to add vupdate irq id!\n"); 3814 return r; 3815 } 3816 3817 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3818 int_params.irq_source = 3819 dc_interrupt_to_irq_source(dc, i, 0); 3820 3821 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3822 3823 c_irq_params->adev = adev; 3824 c_irq_params->irq_src = int_params.irq_source; 3825 3826 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3827 dm_vupdate_high_irq, c_irq_params); 3828 } 3829 3830 /* Use GRPH_PFLIP interrupt */ 3831 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 3832 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 3833 i++) { 3834 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 3835 if (r) { 3836 DRM_ERROR("Failed to add page flip irq id!\n"); 3837 return r; 3838 } 3839 3840 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3841 int_params.irq_source = 3842 dc_interrupt_to_irq_source(dc, i, 0); 3843 3844 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3845 3846 c_irq_params->adev = adev; 3847 c_irq_params->irq_src = int_params.irq_source; 3848 3849 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3850 dm_pflip_high_irq, c_irq_params); 3851 3852 } 3853 3854 /* HPD */ 3855 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 3856 &adev->hpd_irq); 3857 if (r) { 3858 DRM_ERROR("Failed to add hpd irq id!\n"); 3859 return r; 3860 } 3861 3862 register_hpd_handlers(adev); 3863 3864 return 0; 3865 } 3866 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 3867 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 3868 { 3869 struct dc *dc = adev->dm.dc; 3870 struct common_irq_params *c_irq_params; 3871 struct dc_interrupt_params int_params = {0}; 3872 int r, i; 3873 3874 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3875 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3876 3877 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 3878 &adev->dmub_outbox_irq); 3879 if (r) { 3880 DRM_ERROR("Failed to add outbox irq id!\n"); 3881 return r; 3882 } 3883 3884 if (dc->ctx->dmub_srv) { 3885 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 3886 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3887 int_params.irq_source = 3888 dc_interrupt_to_irq_source(dc, i, 0); 3889 3890 c_irq_params = &adev->dm.dmub_outbox_params[0]; 3891 3892 c_irq_params->adev = adev; 3893 c_irq_params->irq_src = int_params.irq_source; 3894 3895 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3896 dm_dmub_outbox1_low_irq, c_irq_params); 3897 } 3898 3899 return 0; 3900 } 3901 3902 /* 3903 * Acquires the lock for the atomic state object and returns 3904 * the new atomic state. 3905 * 3906 * This should only be called during atomic check. 3907 */ 3908 int dm_atomic_get_state(struct drm_atomic_state *state, 3909 struct dm_atomic_state **dm_state) 3910 { 3911 struct drm_device *dev = state->dev; 3912 struct amdgpu_device *adev = drm_to_adev(dev); 3913 struct amdgpu_display_manager *dm = &adev->dm; 3914 struct drm_private_state *priv_state; 3915 3916 if (*dm_state) 3917 return 0; 3918 3919 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 3920 if (IS_ERR(priv_state)) 3921 return PTR_ERR(priv_state); 3922 3923 *dm_state = to_dm_atomic_state(priv_state); 3924 3925 return 0; 3926 } 3927 3928 static struct dm_atomic_state * 3929 dm_atomic_get_new_state(struct drm_atomic_state *state) 3930 { 3931 struct drm_device *dev = state->dev; 3932 struct amdgpu_device *adev = drm_to_adev(dev); 3933 struct amdgpu_display_manager *dm = &adev->dm; 3934 struct drm_private_obj *obj; 3935 struct drm_private_state *new_obj_state; 3936 int i; 3937 3938 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 3939 if (obj->funcs == dm->atomic_obj.funcs) 3940 return to_dm_atomic_state(new_obj_state); 3941 } 3942 3943 return NULL; 3944 } 3945 3946 static struct drm_private_state * 3947 dm_atomic_duplicate_state(struct drm_private_obj *obj) 3948 { 3949 struct dm_atomic_state *old_state, *new_state; 3950 3951 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 3952 if (!new_state) 3953 return NULL; 3954 3955 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 3956 3957 old_state = to_dm_atomic_state(obj->state); 3958 3959 if (old_state && old_state->context) 3960 new_state->context = dc_copy_state(old_state->context); 3961 3962 if (!new_state->context) { 3963 kfree(new_state); 3964 return NULL; 3965 } 3966 3967 return &new_state->base; 3968 } 3969 3970 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 3971 struct drm_private_state *state) 3972 { 3973 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 3974 3975 if (dm_state && dm_state->context) 3976 dc_release_state(dm_state->context); 3977 3978 kfree(dm_state); 3979 } 3980 3981 static struct drm_private_state_funcs dm_atomic_state_funcs = { 3982 .atomic_duplicate_state = dm_atomic_duplicate_state, 3983 .atomic_destroy_state = dm_atomic_destroy_state, 3984 }; 3985 3986 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 3987 { 3988 struct dm_atomic_state *state; 3989 int r; 3990 3991 adev->mode_info.mode_config_initialized = true; 3992 3993 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 3994 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 3995 3996 adev_to_drm(adev)->mode_config.max_width = 16384; 3997 adev_to_drm(adev)->mode_config.max_height = 16384; 3998 3999 adev_to_drm(adev)->mode_config.preferred_depth = 24; 4000 if (adev->asic_type == CHIP_HAWAII) 4001 /* disable prefer shadow for now due to hibernation issues */ 4002 adev_to_drm(adev)->mode_config.prefer_shadow = 0; 4003 else 4004 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 4005 /* indicates support for immediate flip */ 4006 adev_to_drm(adev)->mode_config.async_page_flip = true; 4007 4008 state = kzalloc(sizeof(*state), GFP_KERNEL); 4009 if (!state) 4010 return -ENOMEM; 4011 4012 state->context = dc_create_state(adev->dm.dc); 4013 if (!state->context) { 4014 kfree(state); 4015 return -ENOMEM; 4016 } 4017 4018 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 4019 4020 drm_atomic_private_obj_init(adev_to_drm(adev), 4021 &adev->dm.atomic_obj, 4022 &state->base, 4023 &dm_atomic_state_funcs); 4024 4025 r = amdgpu_display_modeset_create_props(adev); 4026 if (r) { 4027 dc_release_state(state->context); 4028 kfree(state); 4029 return r; 4030 } 4031 4032 r = amdgpu_dm_audio_init(adev); 4033 if (r) { 4034 dc_release_state(state->context); 4035 kfree(state); 4036 return r; 4037 } 4038 4039 return 0; 4040 } 4041 4042 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 4043 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 4044 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 4045 4046 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 4047 int bl_idx) 4048 { 4049 #if defined(CONFIG_ACPI) 4050 struct amdgpu_dm_backlight_caps caps; 4051 4052 memset(&caps, 0, sizeof(caps)); 4053 4054 if (dm->backlight_caps[bl_idx].caps_valid) 4055 return; 4056 4057 amdgpu_acpi_get_backlight_caps(&caps); 4058 if (caps.caps_valid) { 4059 dm->backlight_caps[bl_idx].caps_valid = true; 4060 if (caps.aux_support) 4061 return; 4062 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 4063 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 4064 } else { 4065 dm->backlight_caps[bl_idx].min_input_signal = 4066 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4067 dm->backlight_caps[bl_idx].max_input_signal = 4068 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4069 } 4070 #else 4071 if (dm->backlight_caps[bl_idx].aux_support) 4072 return; 4073 4074 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4075 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4076 #endif 4077 } 4078 4079 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 4080 unsigned int *min, unsigned int *max) 4081 { 4082 if (!caps) 4083 return 0; 4084 4085 if (caps->aux_support) { 4086 // Firmware limits are in nits, DC API wants millinits. 4087 *max = 1000 * caps->aux_max_input_signal; 4088 *min = 1000 * caps->aux_min_input_signal; 4089 } else { 4090 // Firmware limits are 8-bit, PWM control is 16-bit. 4091 *max = 0x101 * caps->max_input_signal; 4092 *min = 0x101 * caps->min_input_signal; 4093 } 4094 return 1; 4095 } 4096 4097 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 4098 uint32_t brightness) 4099 { 4100 unsigned int min, max; 4101 4102 if (!get_brightness_range(caps, &min, &max)) 4103 return brightness; 4104 4105 // Rescale 0..255 to min..max 4106 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 4107 AMDGPU_MAX_BL_LEVEL); 4108 } 4109 4110 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 4111 uint32_t brightness) 4112 { 4113 unsigned int min, max; 4114 4115 if (!get_brightness_range(caps, &min, &max)) 4116 return brightness; 4117 4118 if (brightness < min) 4119 return 0; 4120 // Rescale min..max to 0..255 4121 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 4122 max - min); 4123 } 4124 4125 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 4126 int bl_idx, 4127 u32 user_brightness) 4128 { 4129 struct amdgpu_dm_backlight_caps caps; 4130 struct dc_link *link; 4131 u32 brightness; 4132 bool rc; 4133 4134 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4135 caps = dm->backlight_caps[bl_idx]; 4136 4137 dm->brightness[bl_idx] = user_brightness; 4138 /* update scratch register */ 4139 if (bl_idx == 0) 4140 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 4141 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 4142 link = (struct dc_link *)dm->backlight_link[bl_idx]; 4143 4144 /* Change brightness based on AUX property */ 4145 if (caps.aux_support) { 4146 rc = dc_link_set_backlight_level_nits(link, true, brightness, 4147 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 4148 if (!rc) 4149 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 4150 } else { 4151 rc = dc_link_set_backlight_level(link, brightness, 0); 4152 if (!rc) 4153 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 4154 } 4155 4156 if (rc) 4157 dm->actual_brightness[bl_idx] = user_brightness; 4158 } 4159 4160 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 4161 { 4162 struct amdgpu_display_manager *dm = bl_get_data(bd); 4163 int i; 4164 4165 for (i = 0; i < dm->num_of_edps; i++) { 4166 if (bd == dm->backlight_dev[i]) 4167 break; 4168 } 4169 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4170 i = 0; 4171 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 4172 4173 return 0; 4174 } 4175 4176 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 4177 int bl_idx) 4178 { 4179 int ret; 4180 struct amdgpu_dm_backlight_caps caps; 4181 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4182 4183 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4184 caps = dm->backlight_caps[bl_idx]; 4185 4186 if (caps.aux_support) { 4187 u32 avg, peak; 4188 bool rc; 4189 4190 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4191 if (!rc) 4192 return dm->brightness[bl_idx]; 4193 return convert_brightness_to_user(&caps, avg); 4194 } 4195 4196 ret = dc_link_get_backlight_level(link); 4197 4198 if (ret == DC_ERROR_UNEXPECTED) 4199 return dm->brightness[bl_idx]; 4200 4201 return convert_brightness_to_user(&caps, ret); 4202 } 4203 4204 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4205 { 4206 struct amdgpu_display_manager *dm = bl_get_data(bd); 4207 int i; 4208 4209 for (i = 0; i < dm->num_of_edps; i++) { 4210 if (bd == dm->backlight_dev[i]) 4211 break; 4212 } 4213 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4214 i = 0; 4215 return amdgpu_dm_backlight_get_level(dm, i); 4216 } 4217 4218 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4219 .options = BL_CORE_SUSPENDRESUME, 4220 .get_brightness = amdgpu_dm_backlight_get_brightness, 4221 .update_status = amdgpu_dm_backlight_update_status, 4222 }; 4223 4224 static void 4225 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) 4226 { 4227 struct drm_device *drm = aconnector->base.dev; 4228 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 4229 struct backlight_properties props = { 0 }; 4230 char bl_name[16]; 4231 4232 if (aconnector->bl_idx == -1) 4233 return; 4234 4235 if (!acpi_video_backlight_use_native()) { 4236 drm_info(drm, "Skipping amdgpu DM backlight registration\n"); 4237 /* Try registering an ACPI video backlight device instead. */ 4238 acpi_video_register_backlight(); 4239 return; 4240 } 4241 4242 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4243 props.brightness = AMDGPU_MAX_BL_LEVEL; 4244 props.type = BACKLIGHT_RAW; 4245 4246 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4247 drm->primary->index + aconnector->bl_idx); 4248 4249 dm->backlight_dev[aconnector->bl_idx] = 4250 backlight_device_register(bl_name, aconnector->base.kdev, dm, 4251 &amdgpu_dm_backlight_ops, &props); 4252 4253 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { 4254 DRM_ERROR("DM: Backlight registration failed!\n"); 4255 dm->backlight_dev[aconnector->bl_idx] = NULL; 4256 } else 4257 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 4258 } 4259 4260 static int initialize_plane(struct amdgpu_display_manager *dm, 4261 struct amdgpu_mode_info *mode_info, int plane_id, 4262 enum drm_plane_type plane_type, 4263 const struct dc_plane_cap *plane_cap) 4264 { 4265 struct drm_plane *plane; 4266 unsigned long possible_crtcs; 4267 int ret = 0; 4268 4269 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4270 if (!plane) { 4271 DRM_ERROR("KMS: Failed to allocate plane\n"); 4272 return -ENOMEM; 4273 } 4274 plane->type = plane_type; 4275 4276 /* 4277 * HACK: IGT tests expect that the primary plane for a CRTC 4278 * can only have one possible CRTC. Only expose support for 4279 * any CRTC if they're not going to be used as a primary plane 4280 * for a CRTC - like overlay or underlay planes. 4281 */ 4282 possible_crtcs = 1 << plane_id; 4283 if (plane_id >= dm->dc->caps.max_streams) 4284 possible_crtcs = 0xff; 4285 4286 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4287 4288 if (ret) { 4289 DRM_ERROR("KMS: Failed to initialize plane\n"); 4290 kfree(plane); 4291 return ret; 4292 } 4293 4294 if (mode_info) 4295 mode_info->planes[plane_id] = plane; 4296 4297 return ret; 4298 } 4299 4300 4301 static void setup_backlight_device(struct amdgpu_display_manager *dm, 4302 struct amdgpu_dm_connector *aconnector) 4303 { 4304 struct dc_link *link = aconnector->dc_link; 4305 int bl_idx = dm->num_of_edps; 4306 4307 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || 4308 link->type == dc_connection_none) 4309 return; 4310 4311 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { 4312 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); 4313 return; 4314 } 4315 4316 aconnector->bl_idx = bl_idx; 4317 4318 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4319 dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; 4320 dm->backlight_link[bl_idx] = link; 4321 dm->num_of_edps++; 4322 4323 update_connector_ext_caps(aconnector); 4324 } 4325 4326 static void amdgpu_set_panel_orientation(struct drm_connector *connector); 4327 4328 /* 4329 * In this architecture, the association 4330 * connector -> encoder -> crtc 4331 * id not really requried. The crtc and connector will hold the 4332 * display_index as an abstraction to use with DAL component 4333 * 4334 * Returns 0 on success 4335 */ 4336 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4337 { 4338 struct amdgpu_display_manager *dm = &adev->dm; 4339 s32 i; 4340 struct amdgpu_dm_connector *aconnector = NULL; 4341 struct amdgpu_encoder *aencoder = NULL; 4342 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4343 u32 link_cnt; 4344 s32 primary_planes; 4345 enum dc_connection_type new_connection_type = dc_connection_none; 4346 const struct dc_plane_cap *plane; 4347 bool psr_feature_enabled = false; 4348 bool replay_feature_enabled = false; 4349 int max_overlay = dm->dc->caps.max_slave_planes; 4350 4351 dm->display_indexes_num = dm->dc->caps.max_streams; 4352 /* Update the actual used number of crtc */ 4353 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4354 4355 amdgpu_dm_set_irq_funcs(adev); 4356 4357 link_cnt = dm->dc->caps.max_links; 4358 if (amdgpu_dm_mode_config_init(dm->adev)) { 4359 DRM_ERROR("DM: Failed to initialize mode config\n"); 4360 return -EINVAL; 4361 } 4362 4363 /* There is one primary plane per CRTC */ 4364 primary_planes = dm->dc->caps.max_streams; 4365 ASSERT(primary_planes <= AMDGPU_MAX_PLANES); 4366 4367 /* 4368 * Initialize primary planes, implicit planes for legacy IOCTLS. 4369 * Order is reversed to match iteration order in atomic check. 4370 */ 4371 for (i = (primary_planes - 1); i >= 0; i--) { 4372 plane = &dm->dc->caps.planes[i]; 4373 4374 if (initialize_plane(dm, mode_info, i, 4375 DRM_PLANE_TYPE_PRIMARY, plane)) { 4376 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4377 goto fail; 4378 } 4379 } 4380 4381 /* 4382 * Initialize overlay planes, index starting after primary planes. 4383 * These planes have a higher DRM index than the primary planes since 4384 * they should be considered as having a higher z-order. 4385 * Order is reversed to match iteration order in atomic check. 4386 * 4387 * Only support DCN for now, and only expose one so we don't encourage 4388 * userspace to use up all the pipes. 4389 */ 4390 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4391 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4392 4393 /* Do not create overlay if MPO disabled */ 4394 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) 4395 break; 4396 4397 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4398 continue; 4399 4400 if (!plane->pixel_format_support.argb8888) 4401 continue; 4402 4403 if (max_overlay-- == 0) 4404 break; 4405 4406 if (initialize_plane(dm, NULL, primary_planes + i, 4407 DRM_PLANE_TYPE_OVERLAY, plane)) { 4408 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4409 goto fail; 4410 } 4411 } 4412 4413 for (i = 0; i < dm->dc->caps.max_streams; i++) 4414 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4415 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4416 goto fail; 4417 } 4418 4419 /* Use Outbox interrupt */ 4420 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4421 case IP_VERSION(3, 0, 0): 4422 case IP_VERSION(3, 1, 2): 4423 case IP_VERSION(3, 1, 3): 4424 case IP_VERSION(3, 1, 4): 4425 case IP_VERSION(3, 1, 5): 4426 case IP_VERSION(3, 1, 6): 4427 case IP_VERSION(3, 2, 0): 4428 case IP_VERSION(3, 2, 1): 4429 case IP_VERSION(2, 1, 0): 4430 case IP_VERSION(3, 5, 0): 4431 if (register_outbox_irq_handlers(dm->adev)) { 4432 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4433 goto fail; 4434 } 4435 break; 4436 default: 4437 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4438 amdgpu_ip_version(adev, DCE_HWIP, 0)); 4439 } 4440 4441 /* Determine whether to enable PSR support by default. */ 4442 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4443 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4444 case IP_VERSION(3, 1, 2): 4445 case IP_VERSION(3, 1, 3): 4446 case IP_VERSION(3, 1, 4): 4447 case IP_VERSION(3, 1, 5): 4448 case IP_VERSION(3, 1, 6): 4449 case IP_VERSION(3, 2, 0): 4450 case IP_VERSION(3, 2, 1): 4451 case IP_VERSION(3, 5, 0): 4452 psr_feature_enabled = true; 4453 break; 4454 default: 4455 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 4456 break; 4457 } 4458 } 4459 4460 if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { 4461 switch (adev->ip_versions[DCE_HWIP][0]) { 4462 case IP_VERSION(3, 1, 4): 4463 case IP_VERSION(3, 1, 5): 4464 case IP_VERSION(3, 1, 6): 4465 case IP_VERSION(3, 2, 0): 4466 case IP_VERSION(3, 2, 1): 4467 replay_feature_enabled = true; 4468 break; 4469 default: 4470 replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; 4471 break; 4472 } 4473 } 4474 /* loops over all connectors on the board */ 4475 for (i = 0; i < link_cnt; i++) { 4476 struct dc_link *link = NULL; 4477 4478 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 4479 DRM_ERROR( 4480 "KMS: Cannot support more than %d display indexes\n", 4481 AMDGPU_DM_MAX_DISPLAY_INDEX); 4482 continue; 4483 } 4484 4485 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 4486 if (!aconnector) 4487 goto fail; 4488 4489 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 4490 if (!aencoder) 4491 goto fail; 4492 4493 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 4494 DRM_ERROR("KMS: Failed to initialize encoder\n"); 4495 goto fail; 4496 } 4497 4498 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 4499 DRM_ERROR("KMS: Failed to initialize connector\n"); 4500 goto fail; 4501 } 4502 4503 link = dc_get_link_at_index(dm->dc, i); 4504 4505 if (!dc_link_detect_connection_type(link, &new_connection_type)) 4506 DRM_ERROR("KMS: Failed to detect connector\n"); 4507 4508 if (aconnector->base.force && new_connection_type == dc_connection_none) { 4509 emulated_link_detect(link); 4510 amdgpu_dm_update_connector_after_detect(aconnector); 4511 } else { 4512 bool ret = false; 4513 4514 mutex_lock(&dm->dc_lock); 4515 ret = dc_link_detect(link, DETECT_REASON_BOOT); 4516 mutex_unlock(&dm->dc_lock); 4517 4518 if (ret) { 4519 amdgpu_dm_update_connector_after_detect(aconnector); 4520 setup_backlight_device(dm, aconnector); 4521 4522 /* 4523 * Disable psr if replay can be enabled 4524 */ 4525 if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector)) 4526 psr_feature_enabled = false; 4527 4528 if (psr_feature_enabled) 4529 amdgpu_dm_set_psr_caps(link); 4530 4531 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when 4532 * PSR is also supported. 4533 */ 4534 if (link->psr_settings.psr_feature_enabled) 4535 adev_to_drm(adev)->vblank_disable_immediate = false; 4536 } 4537 } 4538 amdgpu_set_panel_orientation(&aconnector->base); 4539 } 4540 4541 /* Software is initialized. Now we can register interrupt handlers. */ 4542 switch (adev->asic_type) { 4543 #if defined(CONFIG_DRM_AMD_DC_SI) 4544 case CHIP_TAHITI: 4545 case CHIP_PITCAIRN: 4546 case CHIP_VERDE: 4547 case CHIP_OLAND: 4548 if (dce60_register_irq_handlers(dm->adev)) { 4549 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4550 goto fail; 4551 } 4552 break; 4553 #endif 4554 case CHIP_BONAIRE: 4555 case CHIP_HAWAII: 4556 case CHIP_KAVERI: 4557 case CHIP_KABINI: 4558 case CHIP_MULLINS: 4559 case CHIP_TONGA: 4560 case CHIP_FIJI: 4561 case CHIP_CARRIZO: 4562 case CHIP_STONEY: 4563 case CHIP_POLARIS11: 4564 case CHIP_POLARIS10: 4565 case CHIP_POLARIS12: 4566 case CHIP_VEGAM: 4567 case CHIP_VEGA10: 4568 case CHIP_VEGA12: 4569 case CHIP_VEGA20: 4570 if (dce110_register_irq_handlers(dm->adev)) { 4571 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4572 goto fail; 4573 } 4574 break; 4575 default: 4576 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4577 case IP_VERSION(1, 0, 0): 4578 case IP_VERSION(1, 0, 1): 4579 case IP_VERSION(2, 0, 2): 4580 case IP_VERSION(2, 0, 3): 4581 case IP_VERSION(2, 0, 0): 4582 case IP_VERSION(2, 1, 0): 4583 case IP_VERSION(3, 0, 0): 4584 case IP_VERSION(3, 0, 2): 4585 case IP_VERSION(3, 0, 3): 4586 case IP_VERSION(3, 0, 1): 4587 case IP_VERSION(3, 1, 2): 4588 case IP_VERSION(3, 1, 3): 4589 case IP_VERSION(3, 1, 4): 4590 case IP_VERSION(3, 1, 5): 4591 case IP_VERSION(3, 1, 6): 4592 case IP_VERSION(3, 2, 0): 4593 case IP_VERSION(3, 2, 1): 4594 case IP_VERSION(3, 5, 0): 4595 if (dcn10_register_irq_handlers(dm->adev)) { 4596 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4597 goto fail; 4598 } 4599 break; 4600 default: 4601 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 4602 amdgpu_ip_version(adev, DCE_HWIP, 0)); 4603 goto fail; 4604 } 4605 break; 4606 } 4607 4608 return 0; 4609 fail: 4610 kfree(aencoder); 4611 kfree(aconnector); 4612 4613 return -EINVAL; 4614 } 4615 4616 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 4617 { 4618 drm_atomic_private_obj_fini(&dm->atomic_obj); 4619 } 4620 4621 /****************************************************************************** 4622 * amdgpu_display_funcs functions 4623 *****************************************************************************/ 4624 4625 /* 4626 * dm_bandwidth_update - program display watermarks 4627 * 4628 * @adev: amdgpu_device pointer 4629 * 4630 * Calculate and program the display watermarks and line buffer allocation. 4631 */ 4632 static void dm_bandwidth_update(struct amdgpu_device *adev) 4633 { 4634 /* TODO: implement later */ 4635 } 4636 4637 static const struct amdgpu_display_funcs dm_display_funcs = { 4638 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 4639 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 4640 .backlight_set_level = NULL, /* never called for DC */ 4641 .backlight_get_level = NULL, /* never called for DC */ 4642 .hpd_sense = NULL,/* called unconditionally */ 4643 .hpd_set_polarity = NULL, /* called unconditionally */ 4644 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 4645 .page_flip_get_scanoutpos = 4646 dm_crtc_get_scanoutpos,/* called unconditionally */ 4647 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 4648 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 4649 }; 4650 4651 #if defined(CONFIG_DEBUG_KERNEL_DC) 4652 4653 static ssize_t s3_debug_store(struct device *device, 4654 struct device_attribute *attr, 4655 const char *buf, 4656 size_t count) 4657 { 4658 int ret; 4659 int s3_state; 4660 struct drm_device *drm_dev = dev_get_drvdata(device); 4661 struct amdgpu_device *adev = drm_to_adev(drm_dev); 4662 4663 ret = kstrtoint(buf, 0, &s3_state); 4664 4665 if (ret == 0) { 4666 if (s3_state) { 4667 dm_resume(adev); 4668 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 4669 } else 4670 dm_suspend(adev); 4671 } 4672 4673 return ret == 0 ? count : 0; 4674 } 4675 4676 DEVICE_ATTR_WO(s3_debug); 4677 4678 #endif 4679 4680 static int dm_init_microcode(struct amdgpu_device *adev) 4681 { 4682 char *fw_name_dmub; 4683 int r; 4684 4685 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4686 case IP_VERSION(2, 1, 0): 4687 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 4688 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 4689 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 4690 break; 4691 case IP_VERSION(3, 0, 0): 4692 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) 4693 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 4694 else 4695 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 4696 break; 4697 case IP_VERSION(3, 0, 1): 4698 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 4699 break; 4700 case IP_VERSION(3, 0, 2): 4701 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 4702 break; 4703 case IP_VERSION(3, 0, 3): 4704 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 4705 break; 4706 case IP_VERSION(3, 1, 2): 4707 case IP_VERSION(3, 1, 3): 4708 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 4709 break; 4710 case IP_VERSION(3, 1, 4): 4711 fw_name_dmub = FIRMWARE_DCN_314_DMUB; 4712 break; 4713 case IP_VERSION(3, 1, 5): 4714 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 4715 break; 4716 case IP_VERSION(3, 1, 6): 4717 fw_name_dmub = FIRMWARE_DCN316_DMUB; 4718 break; 4719 case IP_VERSION(3, 2, 0): 4720 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 4721 break; 4722 case IP_VERSION(3, 2, 1): 4723 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 4724 break; 4725 case IP_VERSION(3, 5, 0): 4726 fw_name_dmub = FIRMWARE_DCN_35_DMUB; 4727 break; 4728 default: 4729 /* ASIC doesn't support DMUB. */ 4730 return 0; 4731 } 4732 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); 4733 return r; 4734 } 4735 4736 static int dm_early_init(void *handle) 4737 { 4738 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4739 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4740 struct atom_context *ctx = mode_info->atom_context; 4741 int index = GetIndexIntoMasterTable(DATA, Object_Header); 4742 u16 data_offset; 4743 4744 /* if there is no object header, skip DM */ 4745 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 4746 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 4747 dev_info(adev->dev, "No object header, skipping DM\n"); 4748 return -ENOENT; 4749 } 4750 4751 switch (adev->asic_type) { 4752 #if defined(CONFIG_DRM_AMD_DC_SI) 4753 case CHIP_TAHITI: 4754 case CHIP_PITCAIRN: 4755 case CHIP_VERDE: 4756 adev->mode_info.num_crtc = 6; 4757 adev->mode_info.num_hpd = 6; 4758 adev->mode_info.num_dig = 6; 4759 break; 4760 case CHIP_OLAND: 4761 adev->mode_info.num_crtc = 2; 4762 adev->mode_info.num_hpd = 2; 4763 adev->mode_info.num_dig = 2; 4764 break; 4765 #endif 4766 case CHIP_BONAIRE: 4767 case CHIP_HAWAII: 4768 adev->mode_info.num_crtc = 6; 4769 adev->mode_info.num_hpd = 6; 4770 adev->mode_info.num_dig = 6; 4771 break; 4772 case CHIP_KAVERI: 4773 adev->mode_info.num_crtc = 4; 4774 adev->mode_info.num_hpd = 6; 4775 adev->mode_info.num_dig = 7; 4776 break; 4777 case CHIP_KABINI: 4778 case CHIP_MULLINS: 4779 adev->mode_info.num_crtc = 2; 4780 adev->mode_info.num_hpd = 6; 4781 adev->mode_info.num_dig = 6; 4782 break; 4783 case CHIP_FIJI: 4784 case CHIP_TONGA: 4785 adev->mode_info.num_crtc = 6; 4786 adev->mode_info.num_hpd = 6; 4787 adev->mode_info.num_dig = 7; 4788 break; 4789 case CHIP_CARRIZO: 4790 adev->mode_info.num_crtc = 3; 4791 adev->mode_info.num_hpd = 6; 4792 adev->mode_info.num_dig = 9; 4793 break; 4794 case CHIP_STONEY: 4795 adev->mode_info.num_crtc = 2; 4796 adev->mode_info.num_hpd = 6; 4797 adev->mode_info.num_dig = 9; 4798 break; 4799 case CHIP_POLARIS11: 4800 case CHIP_POLARIS12: 4801 adev->mode_info.num_crtc = 5; 4802 adev->mode_info.num_hpd = 5; 4803 adev->mode_info.num_dig = 5; 4804 break; 4805 case CHIP_POLARIS10: 4806 case CHIP_VEGAM: 4807 adev->mode_info.num_crtc = 6; 4808 adev->mode_info.num_hpd = 6; 4809 adev->mode_info.num_dig = 6; 4810 break; 4811 case CHIP_VEGA10: 4812 case CHIP_VEGA12: 4813 case CHIP_VEGA20: 4814 adev->mode_info.num_crtc = 6; 4815 adev->mode_info.num_hpd = 6; 4816 adev->mode_info.num_dig = 6; 4817 break; 4818 default: 4819 4820 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4821 case IP_VERSION(2, 0, 2): 4822 case IP_VERSION(3, 0, 0): 4823 adev->mode_info.num_crtc = 6; 4824 adev->mode_info.num_hpd = 6; 4825 adev->mode_info.num_dig = 6; 4826 break; 4827 case IP_VERSION(2, 0, 0): 4828 case IP_VERSION(3, 0, 2): 4829 adev->mode_info.num_crtc = 5; 4830 adev->mode_info.num_hpd = 5; 4831 adev->mode_info.num_dig = 5; 4832 break; 4833 case IP_VERSION(2, 0, 3): 4834 case IP_VERSION(3, 0, 3): 4835 adev->mode_info.num_crtc = 2; 4836 adev->mode_info.num_hpd = 2; 4837 adev->mode_info.num_dig = 2; 4838 break; 4839 case IP_VERSION(1, 0, 0): 4840 case IP_VERSION(1, 0, 1): 4841 case IP_VERSION(3, 0, 1): 4842 case IP_VERSION(2, 1, 0): 4843 case IP_VERSION(3, 1, 2): 4844 case IP_VERSION(3, 1, 3): 4845 case IP_VERSION(3, 1, 4): 4846 case IP_VERSION(3, 1, 5): 4847 case IP_VERSION(3, 1, 6): 4848 case IP_VERSION(3, 2, 0): 4849 case IP_VERSION(3, 2, 1): 4850 case IP_VERSION(3, 5, 0): 4851 adev->mode_info.num_crtc = 4; 4852 adev->mode_info.num_hpd = 4; 4853 adev->mode_info.num_dig = 4; 4854 break; 4855 default: 4856 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 4857 amdgpu_ip_version(adev, DCE_HWIP, 0)); 4858 return -EINVAL; 4859 } 4860 break; 4861 } 4862 4863 if (adev->mode_info.funcs == NULL) 4864 adev->mode_info.funcs = &dm_display_funcs; 4865 4866 /* 4867 * Note: Do NOT change adev->audio_endpt_rreg and 4868 * adev->audio_endpt_wreg because they are initialised in 4869 * amdgpu_device_init() 4870 */ 4871 #if defined(CONFIG_DEBUG_KERNEL_DC) 4872 device_create_file( 4873 adev_to_drm(adev)->dev, 4874 &dev_attr_s3_debug); 4875 #endif 4876 adev->dc_enabled = true; 4877 4878 return dm_init_microcode(adev); 4879 } 4880 4881 static bool modereset_required(struct drm_crtc_state *crtc_state) 4882 { 4883 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4884 } 4885 4886 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 4887 { 4888 drm_encoder_cleanup(encoder); 4889 kfree(encoder); 4890 } 4891 4892 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 4893 .destroy = amdgpu_dm_encoder_destroy, 4894 }; 4895 4896 static int 4897 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 4898 const enum surface_pixel_format format, 4899 enum dc_color_space *color_space) 4900 { 4901 bool full_range; 4902 4903 *color_space = COLOR_SPACE_SRGB; 4904 4905 /* DRM color properties only affect non-RGB formats. */ 4906 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 4907 return 0; 4908 4909 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 4910 4911 switch (plane_state->color_encoding) { 4912 case DRM_COLOR_YCBCR_BT601: 4913 if (full_range) 4914 *color_space = COLOR_SPACE_YCBCR601; 4915 else 4916 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 4917 break; 4918 4919 case DRM_COLOR_YCBCR_BT709: 4920 if (full_range) 4921 *color_space = COLOR_SPACE_YCBCR709; 4922 else 4923 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 4924 break; 4925 4926 case DRM_COLOR_YCBCR_BT2020: 4927 if (full_range) 4928 *color_space = COLOR_SPACE_2020_YCBCR; 4929 else 4930 return -EINVAL; 4931 break; 4932 4933 default: 4934 return -EINVAL; 4935 } 4936 4937 return 0; 4938 } 4939 4940 static int 4941 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 4942 const struct drm_plane_state *plane_state, 4943 const u64 tiling_flags, 4944 struct dc_plane_info *plane_info, 4945 struct dc_plane_address *address, 4946 bool tmz_surface, 4947 bool force_disable_dcc) 4948 { 4949 const struct drm_framebuffer *fb = plane_state->fb; 4950 const struct amdgpu_framebuffer *afb = 4951 to_amdgpu_framebuffer(plane_state->fb); 4952 int ret; 4953 4954 memset(plane_info, 0, sizeof(*plane_info)); 4955 4956 switch (fb->format->format) { 4957 case DRM_FORMAT_C8: 4958 plane_info->format = 4959 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 4960 break; 4961 case DRM_FORMAT_RGB565: 4962 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 4963 break; 4964 case DRM_FORMAT_XRGB8888: 4965 case DRM_FORMAT_ARGB8888: 4966 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 4967 break; 4968 case DRM_FORMAT_XRGB2101010: 4969 case DRM_FORMAT_ARGB2101010: 4970 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 4971 break; 4972 case DRM_FORMAT_XBGR2101010: 4973 case DRM_FORMAT_ABGR2101010: 4974 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 4975 break; 4976 case DRM_FORMAT_XBGR8888: 4977 case DRM_FORMAT_ABGR8888: 4978 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 4979 break; 4980 case DRM_FORMAT_NV21: 4981 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 4982 break; 4983 case DRM_FORMAT_NV12: 4984 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 4985 break; 4986 case DRM_FORMAT_P010: 4987 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 4988 break; 4989 case DRM_FORMAT_XRGB16161616F: 4990 case DRM_FORMAT_ARGB16161616F: 4991 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 4992 break; 4993 case DRM_FORMAT_XBGR16161616F: 4994 case DRM_FORMAT_ABGR16161616F: 4995 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 4996 break; 4997 case DRM_FORMAT_XRGB16161616: 4998 case DRM_FORMAT_ARGB16161616: 4999 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5000 break; 5001 case DRM_FORMAT_XBGR16161616: 5002 case DRM_FORMAT_ABGR16161616: 5003 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5004 break; 5005 default: 5006 DRM_ERROR( 5007 "Unsupported screen format %p4cc\n", 5008 &fb->format->format); 5009 return -EINVAL; 5010 } 5011 5012 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5013 case DRM_MODE_ROTATE_0: 5014 plane_info->rotation = ROTATION_ANGLE_0; 5015 break; 5016 case DRM_MODE_ROTATE_90: 5017 plane_info->rotation = ROTATION_ANGLE_90; 5018 break; 5019 case DRM_MODE_ROTATE_180: 5020 plane_info->rotation = ROTATION_ANGLE_180; 5021 break; 5022 case DRM_MODE_ROTATE_270: 5023 plane_info->rotation = ROTATION_ANGLE_270; 5024 break; 5025 default: 5026 plane_info->rotation = ROTATION_ANGLE_0; 5027 break; 5028 } 5029 5030 5031 plane_info->visible = true; 5032 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5033 5034 plane_info->layer_index = plane_state->normalized_zpos; 5035 5036 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5037 &plane_info->color_space); 5038 if (ret) 5039 return ret; 5040 5041 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, 5042 plane_info->rotation, tiling_flags, 5043 &plane_info->tiling_info, 5044 &plane_info->plane_size, 5045 &plane_info->dcc, address, 5046 tmz_surface, force_disable_dcc); 5047 if (ret) 5048 return ret; 5049 5050 amdgpu_dm_plane_fill_blending_from_plane_state( 5051 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5052 &plane_info->global_alpha, &plane_info->global_alpha_value); 5053 5054 return 0; 5055 } 5056 5057 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5058 struct dc_plane_state *dc_plane_state, 5059 struct drm_plane_state *plane_state, 5060 struct drm_crtc_state *crtc_state) 5061 { 5062 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5063 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5064 struct dc_scaling_info scaling_info; 5065 struct dc_plane_info plane_info; 5066 int ret; 5067 bool force_disable_dcc = false; 5068 5069 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); 5070 if (ret) 5071 return ret; 5072 5073 dc_plane_state->src_rect = scaling_info.src_rect; 5074 dc_plane_state->dst_rect = scaling_info.dst_rect; 5075 dc_plane_state->clip_rect = scaling_info.clip_rect; 5076 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5077 5078 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5079 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5080 afb->tiling_flags, 5081 &plane_info, 5082 &dc_plane_state->address, 5083 afb->tmz_surface, 5084 force_disable_dcc); 5085 if (ret) 5086 return ret; 5087 5088 dc_plane_state->format = plane_info.format; 5089 dc_plane_state->color_space = plane_info.color_space; 5090 dc_plane_state->format = plane_info.format; 5091 dc_plane_state->plane_size = plane_info.plane_size; 5092 dc_plane_state->rotation = plane_info.rotation; 5093 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5094 dc_plane_state->stereo_format = plane_info.stereo_format; 5095 dc_plane_state->tiling_info = plane_info.tiling_info; 5096 dc_plane_state->visible = plane_info.visible; 5097 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5098 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5099 dc_plane_state->global_alpha = plane_info.global_alpha; 5100 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5101 dc_plane_state->dcc = plane_info.dcc; 5102 dc_plane_state->layer_index = plane_info.layer_index; 5103 dc_plane_state->flip_int_enabled = true; 5104 5105 /* 5106 * Always set input transfer function, since plane state is refreshed 5107 * every time. 5108 */ 5109 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); 5110 if (ret) 5111 return ret; 5112 5113 return 0; 5114 } 5115 5116 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 5117 struct rect *dirty_rect, int32_t x, 5118 s32 y, s32 width, s32 height, 5119 int *i, bool ffu) 5120 { 5121 WARN_ON(*i >= DC_MAX_DIRTY_RECTS); 5122 5123 dirty_rect->x = x; 5124 dirty_rect->y = y; 5125 dirty_rect->width = width; 5126 dirty_rect->height = height; 5127 5128 if (ffu) 5129 drm_dbg(plane->dev, 5130 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 5131 plane->base.id, width, height); 5132 else 5133 drm_dbg(plane->dev, 5134 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", 5135 plane->base.id, x, y, width, height); 5136 5137 (*i)++; 5138 } 5139 5140 /** 5141 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 5142 * 5143 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 5144 * remote fb 5145 * @old_plane_state: Old state of @plane 5146 * @new_plane_state: New state of @plane 5147 * @crtc_state: New state of CRTC connected to the @plane 5148 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 5149 * @dirty_regions_changed: dirty regions changed 5150 * 5151 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 5152 * (referred to as "damage clips" in DRM nomenclature) that require updating on 5153 * the eDP remote buffer. The responsibility of specifying the dirty regions is 5154 * amdgpu_dm's. 5155 * 5156 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 5157 * plane with regions that require flushing to the eDP remote buffer. In 5158 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 5159 * implicitly provide damage clips without any client support via the plane 5160 * bounds. 5161 */ 5162 static void fill_dc_dirty_rects(struct drm_plane *plane, 5163 struct drm_plane_state *old_plane_state, 5164 struct drm_plane_state *new_plane_state, 5165 struct drm_crtc_state *crtc_state, 5166 struct dc_flip_addrs *flip_addrs, 5167 bool *dirty_regions_changed) 5168 { 5169 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5170 struct rect *dirty_rects = flip_addrs->dirty_rects; 5171 u32 num_clips; 5172 struct drm_mode_rect *clips; 5173 bool bb_changed; 5174 bool fb_changed; 5175 u32 i = 0; 5176 *dirty_regions_changed = false; 5177 5178 /* 5179 * Cursor plane has it's own dirty rect update interface. See 5180 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 5181 */ 5182 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5183 return; 5184 5185 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5186 clips = drm_plane_get_damage_clips(new_plane_state); 5187 5188 if (!dm_crtc_state->mpo_requested) { 5189 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) 5190 goto ffu; 5191 5192 for (; flip_addrs->dirty_rect_count < num_clips; clips++) 5193 fill_dc_dirty_rect(new_plane_state->plane, 5194 &dirty_rects[flip_addrs->dirty_rect_count], 5195 clips->x1, clips->y1, 5196 clips->x2 - clips->x1, clips->y2 - clips->y1, 5197 &flip_addrs->dirty_rect_count, 5198 false); 5199 return; 5200 } 5201 5202 /* 5203 * MPO is requested. Add entire plane bounding box to dirty rects if 5204 * flipped to or damaged. 5205 * 5206 * If plane is moved or resized, also add old bounding box to dirty 5207 * rects. 5208 */ 5209 fb_changed = old_plane_state->fb->base.id != 5210 new_plane_state->fb->base.id; 5211 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 5212 old_plane_state->crtc_y != new_plane_state->crtc_y || 5213 old_plane_state->crtc_w != new_plane_state->crtc_w || 5214 old_plane_state->crtc_h != new_plane_state->crtc_h); 5215 5216 drm_dbg(plane->dev, 5217 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 5218 new_plane_state->plane->base.id, 5219 bb_changed, fb_changed, num_clips); 5220 5221 *dirty_regions_changed = bb_changed; 5222 5223 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) 5224 goto ffu; 5225 5226 if (bb_changed) { 5227 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5228 new_plane_state->crtc_x, 5229 new_plane_state->crtc_y, 5230 new_plane_state->crtc_w, 5231 new_plane_state->crtc_h, &i, false); 5232 5233 /* Add old plane bounding-box if plane is moved or resized */ 5234 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5235 old_plane_state->crtc_x, 5236 old_plane_state->crtc_y, 5237 old_plane_state->crtc_w, 5238 old_plane_state->crtc_h, &i, false); 5239 } 5240 5241 if (num_clips) { 5242 for (; i < num_clips; clips++) 5243 fill_dc_dirty_rect(new_plane_state->plane, 5244 &dirty_rects[i], clips->x1, 5245 clips->y1, clips->x2 - clips->x1, 5246 clips->y2 - clips->y1, &i, false); 5247 } else if (fb_changed && !bb_changed) { 5248 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5249 new_plane_state->crtc_x, 5250 new_plane_state->crtc_y, 5251 new_plane_state->crtc_w, 5252 new_plane_state->crtc_h, &i, false); 5253 } 5254 5255 flip_addrs->dirty_rect_count = i; 5256 return; 5257 5258 ffu: 5259 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, 5260 dm_crtc_state->base.mode.crtc_hdisplay, 5261 dm_crtc_state->base.mode.crtc_vdisplay, 5262 &flip_addrs->dirty_rect_count, true); 5263 } 5264 5265 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5266 const struct dm_connector_state *dm_state, 5267 struct dc_stream_state *stream) 5268 { 5269 enum amdgpu_rmx_type rmx_type; 5270 5271 struct rect src = { 0 }; /* viewport in composition space*/ 5272 struct rect dst = { 0 }; /* stream addressable area */ 5273 5274 /* no mode. nothing to be done */ 5275 if (!mode) 5276 return; 5277 5278 /* Full screen scaling by default */ 5279 src.width = mode->hdisplay; 5280 src.height = mode->vdisplay; 5281 dst.width = stream->timing.h_addressable; 5282 dst.height = stream->timing.v_addressable; 5283 5284 if (dm_state) { 5285 rmx_type = dm_state->scaling; 5286 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5287 if (src.width * dst.height < 5288 src.height * dst.width) { 5289 /* height needs less upscaling/more downscaling */ 5290 dst.width = src.width * 5291 dst.height / src.height; 5292 } else { 5293 /* width needs less upscaling/more downscaling */ 5294 dst.height = src.height * 5295 dst.width / src.width; 5296 } 5297 } else if (rmx_type == RMX_CENTER) { 5298 dst = src; 5299 } 5300 5301 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5302 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5303 5304 if (dm_state->underscan_enable) { 5305 dst.x += dm_state->underscan_hborder / 2; 5306 dst.y += dm_state->underscan_vborder / 2; 5307 dst.width -= dm_state->underscan_hborder; 5308 dst.height -= dm_state->underscan_vborder; 5309 } 5310 } 5311 5312 stream->src = src; 5313 stream->dst = dst; 5314 5315 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5316 dst.x, dst.y, dst.width, dst.height); 5317 5318 } 5319 5320 static enum dc_color_depth 5321 convert_color_depth_from_display_info(const struct drm_connector *connector, 5322 bool is_y420, int requested_bpc) 5323 { 5324 u8 bpc; 5325 5326 if (is_y420) { 5327 bpc = 8; 5328 5329 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5330 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5331 bpc = 16; 5332 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5333 bpc = 12; 5334 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5335 bpc = 10; 5336 } else { 5337 bpc = (uint8_t)connector->display_info.bpc; 5338 /* Assume 8 bpc by default if no bpc is specified. */ 5339 bpc = bpc ? bpc : 8; 5340 } 5341 5342 if (requested_bpc > 0) { 5343 /* 5344 * Cap display bpc based on the user requested value. 5345 * 5346 * The value for state->max_bpc may not correctly updated 5347 * depending on when the connector gets added to the state 5348 * or if this was called outside of atomic check, so it 5349 * can't be used directly. 5350 */ 5351 bpc = min_t(u8, bpc, requested_bpc); 5352 5353 /* Round down to the nearest even number. */ 5354 bpc = bpc - (bpc & 1); 5355 } 5356 5357 switch (bpc) { 5358 case 0: 5359 /* 5360 * Temporary Work around, DRM doesn't parse color depth for 5361 * EDID revision before 1.4 5362 * TODO: Fix edid parsing 5363 */ 5364 return COLOR_DEPTH_888; 5365 case 6: 5366 return COLOR_DEPTH_666; 5367 case 8: 5368 return COLOR_DEPTH_888; 5369 case 10: 5370 return COLOR_DEPTH_101010; 5371 case 12: 5372 return COLOR_DEPTH_121212; 5373 case 14: 5374 return COLOR_DEPTH_141414; 5375 case 16: 5376 return COLOR_DEPTH_161616; 5377 default: 5378 return COLOR_DEPTH_UNDEFINED; 5379 } 5380 } 5381 5382 static enum dc_aspect_ratio 5383 get_aspect_ratio(const struct drm_display_mode *mode_in) 5384 { 5385 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5386 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5387 } 5388 5389 static enum dc_color_space 5390 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, 5391 const struct drm_connector_state *connector_state) 5392 { 5393 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5394 5395 switch (connector_state->colorspace) { 5396 case DRM_MODE_COLORIMETRY_BT601_YCC: 5397 if (dc_crtc_timing->flags.Y_ONLY) 5398 color_space = COLOR_SPACE_YCBCR601_LIMITED; 5399 else 5400 color_space = COLOR_SPACE_YCBCR601; 5401 break; 5402 case DRM_MODE_COLORIMETRY_BT709_YCC: 5403 if (dc_crtc_timing->flags.Y_ONLY) 5404 color_space = COLOR_SPACE_YCBCR709_LIMITED; 5405 else 5406 color_space = COLOR_SPACE_YCBCR709; 5407 break; 5408 case DRM_MODE_COLORIMETRY_OPRGB: 5409 color_space = COLOR_SPACE_ADOBERGB; 5410 break; 5411 case DRM_MODE_COLORIMETRY_BT2020_RGB: 5412 case DRM_MODE_COLORIMETRY_BT2020_YCC: 5413 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) 5414 color_space = COLOR_SPACE_2020_RGB_FULLRANGE; 5415 else 5416 color_space = COLOR_SPACE_2020_YCBCR; 5417 break; 5418 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 5419 default: 5420 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { 5421 color_space = COLOR_SPACE_SRGB; 5422 /* 5423 * 27030khz is the separation point between HDTV and SDTV 5424 * according to HDMI spec, we use YCbCr709 and YCbCr601 5425 * respectively 5426 */ 5427 } else if (dc_crtc_timing->pix_clk_100hz > 270300) { 5428 if (dc_crtc_timing->flags.Y_ONLY) 5429 color_space = 5430 COLOR_SPACE_YCBCR709_LIMITED; 5431 else 5432 color_space = COLOR_SPACE_YCBCR709; 5433 } else { 5434 if (dc_crtc_timing->flags.Y_ONLY) 5435 color_space = 5436 COLOR_SPACE_YCBCR601_LIMITED; 5437 else 5438 color_space = COLOR_SPACE_YCBCR601; 5439 } 5440 break; 5441 } 5442 5443 return color_space; 5444 } 5445 5446 static enum display_content_type 5447 get_output_content_type(const struct drm_connector_state *connector_state) 5448 { 5449 switch (connector_state->content_type) { 5450 default: 5451 case DRM_MODE_CONTENT_TYPE_NO_DATA: 5452 return DISPLAY_CONTENT_TYPE_NO_DATA; 5453 case DRM_MODE_CONTENT_TYPE_GRAPHICS: 5454 return DISPLAY_CONTENT_TYPE_GRAPHICS; 5455 case DRM_MODE_CONTENT_TYPE_PHOTO: 5456 return DISPLAY_CONTENT_TYPE_PHOTO; 5457 case DRM_MODE_CONTENT_TYPE_CINEMA: 5458 return DISPLAY_CONTENT_TYPE_CINEMA; 5459 case DRM_MODE_CONTENT_TYPE_GAME: 5460 return DISPLAY_CONTENT_TYPE_GAME; 5461 } 5462 } 5463 5464 static bool adjust_colour_depth_from_display_info( 5465 struct dc_crtc_timing *timing_out, 5466 const struct drm_display_info *info) 5467 { 5468 enum dc_color_depth depth = timing_out->display_color_depth; 5469 int normalized_clk; 5470 5471 do { 5472 normalized_clk = timing_out->pix_clk_100hz / 10; 5473 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 5474 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 5475 normalized_clk /= 2; 5476 /* Adjusting pix clock following on HDMI spec based on colour depth */ 5477 switch (depth) { 5478 case COLOR_DEPTH_888: 5479 break; 5480 case COLOR_DEPTH_101010: 5481 normalized_clk = (normalized_clk * 30) / 24; 5482 break; 5483 case COLOR_DEPTH_121212: 5484 normalized_clk = (normalized_clk * 36) / 24; 5485 break; 5486 case COLOR_DEPTH_161616: 5487 normalized_clk = (normalized_clk * 48) / 24; 5488 break; 5489 default: 5490 /* The above depths are the only ones valid for HDMI. */ 5491 return false; 5492 } 5493 if (normalized_clk <= info->max_tmds_clock) { 5494 timing_out->display_color_depth = depth; 5495 return true; 5496 } 5497 } while (--depth > COLOR_DEPTH_666); 5498 return false; 5499 } 5500 5501 static void fill_stream_properties_from_drm_display_mode( 5502 struct dc_stream_state *stream, 5503 const struct drm_display_mode *mode_in, 5504 const struct drm_connector *connector, 5505 const struct drm_connector_state *connector_state, 5506 const struct dc_stream_state *old_stream, 5507 int requested_bpc) 5508 { 5509 struct dc_crtc_timing *timing_out = &stream->timing; 5510 const struct drm_display_info *info = &connector->display_info; 5511 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5512 struct hdmi_vendor_infoframe hv_frame; 5513 struct hdmi_avi_infoframe avi_frame; 5514 5515 memset(&hv_frame, 0, sizeof(hv_frame)); 5516 memset(&avi_frame, 0, sizeof(avi_frame)); 5517 5518 timing_out->h_border_left = 0; 5519 timing_out->h_border_right = 0; 5520 timing_out->v_border_top = 0; 5521 timing_out->v_border_bottom = 0; 5522 /* TODO: un-hardcode */ 5523 if (drm_mode_is_420_only(info, mode_in) 5524 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5525 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5526 else if (drm_mode_is_420_also(info, mode_in) 5527 && aconnector->force_yuv420_output) 5528 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5529 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 5530 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5531 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 5532 else 5533 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 5534 5535 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 5536 timing_out->display_color_depth = convert_color_depth_from_display_info( 5537 connector, 5538 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 5539 requested_bpc); 5540 timing_out->scan_type = SCANNING_TYPE_NODATA; 5541 timing_out->hdmi_vic = 0; 5542 5543 if (old_stream) { 5544 timing_out->vic = old_stream->timing.vic; 5545 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 5546 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 5547 } else { 5548 timing_out->vic = drm_match_cea_mode(mode_in); 5549 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 5550 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 5551 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 5552 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 5553 } 5554 5555 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5556 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 5557 timing_out->vic = avi_frame.video_code; 5558 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 5559 timing_out->hdmi_vic = hv_frame.vic; 5560 } 5561 5562 if (is_freesync_video_mode(mode_in, aconnector)) { 5563 timing_out->h_addressable = mode_in->hdisplay; 5564 timing_out->h_total = mode_in->htotal; 5565 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 5566 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 5567 timing_out->v_total = mode_in->vtotal; 5568 timing_out->v_addressable = mode_in->vdisplay; 5569 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 5570 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 5571 timing_out->pix_clk_100hz = mode_in->clock * 10; 5572 } else { 5573 timing_out->h_addressable = mode_in->crtc_hdisplay; 5574 timing_out->h_total = mode_in->crtc_htotal; 5575 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 5576 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 5577 timing_out->v_total = mode_in->crtc_vtotal; 5578 timing_out->v_addressable = mode_in->crtc_vdisplay; 5579 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 5580 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 5581 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 5582 } 5583 5584 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 5585 5586 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 5587 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 5588 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5589 if (!adjust_colour_depth_from_display_info(timing_out, info) && 5590 drm_mode_is_420_also(info, mode_in) && 5591 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 5592 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5593 adjust_colour_depth_from_display_info(timing_out, info); 5594 } 5595 } 5596 5597 stream->output_color_space = get_output_color_space(timing_out, connector_state); 5598 stream->content_type = get_output_content_type(connector_state); 5599 } 5600 5601 static void fill_audio_info(struct audio_info *audio_info, 5602 const struct drm_connector *drm_connector, 5603 const struct dc_sink *dc_sink) 5604 { 5605 int i = 0; 5606 int cea_revision = 0; 5607 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 5608 5609 audio_info->manufacture_id = edid_caps->manufacturer_id; 5610 audio_info->product_id = edid_caps->product_id; 5611 5612 cea_revision = drm_connector->display_info.cea_rev; 5613 5614 strscpy(audio_info->display_name, 5615 edid_caps->display_name, 5616 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 5617 5618 if (cea_revision >= 3) { 5619 audio_info->mode_count = edid_caps->audio_mode_count; 5620 5621 for (i = 0; i < audio_info->mode_count; ++i) { 5622 audio_info->modes[i].format_code = 5623 (enum audio_format_code) 5624 (edid_caps->audio_modes[i].format_code); 5625 audio_info->modes[i].channel_count = 5626 edid_caps->audio_modes[i].channel_count; 5627 audio_info->modes[i].sample_rates.all = 5628 edid_caps->audio_modes[i].sample_rate; 5629 audio_info->modes[i].sample_size = 5630 edid_caps->audio_modes[i].sample_size; 5631 } 5632 } 5633 5634 audio_info->flags.all = edid_caps->speaker_flags; 5635 5636 /* TODO: We only check for the progressive mode, check for interlace mode too */ 5637 if (drm_connector->latency_present[0]) { 5638 audio_info->video_latency = drm_connector->video_latency[0]; 5639 audio_info->audio_latency = drm_connector->audio_latency[0]; 5640 } 5641 5642 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 5643 5644 } 5645 5646 static void 5647 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 5648 struct drm_display_mode *dst_mode) 5649 { 5650 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 5651 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 5652 dst_mode->crtc_clock = src_mode->crtc_clock; 5653 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 5654 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 5655 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 5656 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 5657 dst_mode->crtc_htotal = src_mode->crtc_htotal; 5658 dst_mode->crtc_hskew = src_mode->crtc_hskew; 5659 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 5660 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 5661 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 5662 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 5663 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 5664 } 5665 5666 static void 5667 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 5668 const struct drm_display_mode *native_mode, 5669 bool scale_enabled) 5670 { 5671 if (scale_enabled) { 5672 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5673 } else if (native_mode->clock == drm_mode->clock && 5674 native_mode->htotal == drm_mode->htotal && 5675 native_mode->vtotal == drm_mode->vtotal) { 5676 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5677 } else { 5678 /* no scaling nor amdgpu inserted, no need to patch */ 5679 } 5680 } 5681 5682 static struct dc_sink * 5683 create_fake_sink(struct amdgpu_dm_connector *aconnector) 5684 { 5685 struct dc_sink_init_data sink_init_data = { 0 }; 5686 struct dc_sink *sink = NULL; 5687 5688 sink_init_data.link = aconnector->dc_link; 5689 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 5690 5691 sink = dc_sink_create(&sink_init_data); 5692 if (!sink) { 5693 DRM_ERROR("Failed to create sink!\n"); 5694 return NULL; 5695 } 5696 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 5697 5698 return sink; 5699 } 5700 5701 static void set_multisync_trigger_params( 5702 struct dc_stream_state *stream) 5703 { 5704 struct dc_stream_state *master = NULL; 5705 5706 if (stream->triggered_crtc_reset.enabled) { 5707 master = stream->triggered_crtc_reset.event_source; 5708 stream->triggered_crtc_reset.event = 5709 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 5710 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 5711 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 5712 } 5713 } 5714 5715 static void set_master_stream(struct dc_stream_state *stream_set[], 5716 int stream_count) 5717 { 5718 int j, highest_rfr = 0, master_stream = 0; 5719 5720 for (j = 0; j < stream_count; j++) { 5721 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 5722 int refresh_rate = 0; 5723 5724 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 5725 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 5726 if (refresh_rate > highest_rfr) { 5727 highest_rfr = refresh_rate; 5728 master_stream = j; 5729 } 5730 } 5731 } 5732 for (j = 0; j < stream_count; j++) { 5733 if (stream_set[j]) 5734 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 5735 } 5736 } 5737 5738 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 5739 { 5740 int i = 0; 5741 struct dc_stream_state *stream; 5742 5743 if (context->stream_count < 2) 5744 return; 5745 for (i = 0; i < context->stream_count ; i++) { 5746 if (!context->streams[i]) 5747 continue; 5748 /* 5749 * TODO: add a function to read AMD VSDB bits and set 5750 * crtc_sync_master.multi_sync_enabled flag 5751 * For now it's set to false 5752 */ 5753 } 5754 5755 set_master_stream(context->streams, context->stream_count); 5756 5757 for (i = 0; i < context->stream_count ; i++) { 5758 stream = context->streams[i]; 5759 5760 if (!stream) 5761 continue; 5762 5763 set_multisync_trigger_params(stream); 5764 } 5765 } 5766 5767 /** 5768 * DOC: FreeSync Video 5769 * 5770 * When a userspace application wants to play a video, the content follows a 5771 * standard format definition that usually specifies the FPS for that format. 5772 * The below list illustrates some video format and the expected FPS, 5773 * respectively: 5774 * 5775 * - TV/NTSC (23.976 FPS) 5776 * - Cinema (24 FPS) 5777 * - TV/PAL (25 FPS) 5778 * - TV/NTSC (29.97 FPS) 5779 * - TV/NTSC (30 FPS) 5780 * - Cinema HFR (48 FPS) 5781 * - TV/PAL (50 FPS) 5782 * - Commonly used (60 FPS) 5783 * - Multiples of 24 (48,72,96 FPS) 5784 * 5785 * The list of standards video format is not huge and can be added to the 5786 * connector modeset list beforehand. With that, userspace can leverage 5787 * FreeSync to extends the front porch in order to attain the target refresh 5788 * rate. Such a switch will happen seamlessly, without screen blanking or 5789 * reprogramming of the output in any other way. If the userspace requests a 5790 * modesetting change compatible with FreeSync modes that only differ in the 5791 * refresh rate, DC will skip the full update and avoid blink during the 5792 * transition. For example, the video player can change the modesetting from 5793 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 5794 * causing any display blink. This same concept can be applied to a mode 5795 * setting change. 5796 */ 5797 static struct drm_display_mode * 5798 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 5799 bool use_probed_modes) 5800 { 5801 struct drm_display_mode *m, *m_pref = NULL; 5802 u16 current_refresh, highest_refresh; 5803 struct list_head *list_head = use_probed_modes ? 5804 &aconnector->base.probed_modes : 5805 &aconnector->base.modes; 5806 5807 if (aconnector->freesync_vid_base.clock != 0) 5808 return &aconnector->freesync_vid_base; 5809 5810 /* Find the preferred mode */ 5811 list_for_each_entry(m, list_head, head) { 5812 if (m->type & DRM_MODE_TYPE_PREFERRED) { 5813 m_pref = m; 5814 break; 5815 } 5816 } 5817 5818 if (!m_pref) { 5819 /* Probably an EDID with no preferred mode. Fallback to first entry */ 5820 m_pref = list_first_entry_or_null( 5821 &aconnector->base.modes, struct drm_display_mode, head); 5822 if (!m_pref) { 5823 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 5824 return NULL; 5825 } 5826 } 5827 5828 highest_refresh = drm_mode_vrefresh(m_pref); 5829 5830 /* 5831 * Find the mode with highest refresh rate with same resolution. 5832 * For some monitors, preferred mode is not the mode with highest 5833 * supported refresh rate. 5834 */ 5835 list_for_each_entry(m, list_head, head) { 5836 current_refresh = drm_mode_vrefresh(m); 5837 5838 if (m->hdisplay == m_pref->hdisplay && 5839 m->vdisplay == m_pref->vdisplay && 5840 highest_refresh < current_refresh) { 5841 highest_refresh = current_refresh; 5842 m_pref = m; 5843 } 5844 } 5845 5846 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 5847 return m_pref; 5848 } 5849 5850 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 5851 struct amdgpu_dm_connector *aconnector) 5852 { 5853 struct drm_display_mode *high_mode; 5854 int timing_diff; 5855 5856 high_mode = get_highest_refresh_rate_mode(aconnector, false); 5857 if (!high_mode || !mode) 5858 return false; 5859 5860 timing_diff = high_mode->vtotal - mode->vtotal; 5861 5862 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 5863 high_mode->hdisplay != mode->hdisplay || 5864 high_mode->vdisplay != mode->vdisplay || 5865 high_mode->hsync_start != mode->hsync_start || 5866 high_mode->hsync_end != mode->hsync_end || 5867 high_mode->htotal != mode->htotal || 5868 high_mode->hskew != mode->hskew || 5869 high_mode->vscan != mode->vscan || 5870 high_mode->vsync_start - mode->vsync_start != timing_diff || 5871 high_mode->vsync_end - mode->vsync_end != timing_diff) 5872 return false; 5873 else 5874 return true; 5875 } 5876 5877 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 5878 struct dc_sink *sink, struct dc_stream_state *stream, 5879 struct dsc_dec_dpcd_caps *dsc_caps) 5880 { 5881 stream->timing.flags.DSC = 0; 5882 dsc_caps->is_dsc_supported = false; 5883 5884 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 5885 sink->sink_signal == SIGNAL_TYPE_EDP)) { 5886 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 5887 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 5888 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 5889 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 5890 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 5891 dsc_caps); 5892 } 5893 } 5894 5895 5896 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 5897 struct dc_sink *sink, struct dc_stream_state *stream, 5898 struct dsc_dec_dpcd_caps *dsc_caps, 5899 uint32_t max_dsc_target_bpp_limit_override) 5900 { 5901 const struct dc_link_settings *verified_link_cap = NULL; 5902 u32 link_bw_in_kbps; 5903 u32 edp_min_bpp_x16, edp_max_bpp_x16; 5904 struct dc *dc = sink->ctx->dc; 5905 struct dc_dsc_bw_range bw_range = {0}; 5906 struct dc_dsc_config dsc_cfg = {0}; 5907 struct dc_dsc_config_options dsc_options = {0}; 5908 5909 dc_dsc_get_default_config_option(dc, &dsc_options); 5910 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 5911 5912 verified_link_cap = dc_link_get_link_cap(stream->link); 5913 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 5914 edp_min_bpp_x16 = 8 * 16; 5915 edp_max_bpp_x16 = 8 * 16; 5916 5917 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 5918 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 5919 5920 if (edp_max_bpp_x16 < edp_min_bpp_x16) 5921 edp_min_bpp_x16 = edp_max_bpp_x16; 5922 5923 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 5924 dc->debug.dsc_min_slice_height_override, 5925 edp_min_bpp_x16, edp_max_bpp_x16, 5926 dsc_caps, 5927 &stream->timing, 5928 dc_link_get_highest_encoding_format(aconnector->dc_link), 5929 &bw_range)) { 5930 5931 if (bw_range.max_kbps < link_bw_in_kbps) { 5932 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 5933 dsc_caps, 5934 &dsc_options, 5935 0, 5936 &stream->timing, 5937 dc_link_get_highest_encoding_format(aconnector->dc_link), 5938 &dsc_cfg)) { 5939 stream->timing.dsc_cfg = dsc_cfg; 5940 stream->timing.flags.DSC = 1; 5941 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 5942 } 5943 return; 5944 } 5945 } 5946 5947 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 5948 dsc_caps, 5949 &dsc_options, 5950 link_bw_in_kbps, 5951 &stream->timing, 5952 dc_link_get_highest_encoding_format(aconnector->dc_link), 5953 &dsc_cfg)) { 5954 stream->timing.dsc_cfg = dsc_cfg; 5955 stream->timing.flags.DSC = 1; 5956 } 5957 } 5958 5959 5960 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 5961 struct dc_sink *sink, struct dc_stream_state *stream, 5962 struct dsc_dec_dpcd_caps *dsc_caps) 5963 { 5964 struct drm_connector *drm_connector = &aconnector->base; 5965 u32 link_bandwidth_kbps; 5966 struct dc *dc = sink->ctx->dc; 5967 u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 5968 u32 dsc_max_supported_bw_in_kbps; 5969 u32 max_dsc_target_bpp_limit_override = 5970 drm_connector->display_info.max_dsc_bpp; 5971 struct dc_dsc_config_options dsc_options = {0}; 5972 5973 dc_dsc_get_default_config_option(dc, &dsc_options); 5974 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 5975 5976 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 5977 dc_link_get_link_cap(aconnector->dc_link)); 5978 5979 /* Set DSC policy according to dsc_clock_en */ 5980 dc_dsc_policy_set_enable_dsc_when_not_needed( 5981 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 5982 5983 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && 5984 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && 5985 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 5986 5987 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 5988 5989 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 5990 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 5991 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 5992 dsc_caps, 5993 &dsc_options, 5994 link_bandwidth_kbps, 5995 &stream->timing, 5996 dc_link_get_highest_encoding_format(aconnector->dc_link), 5997 &stream->timing.dsc_cfg)) { 5998 stream->timing.flags.DSC = 1; 5999 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); 6000 } 6001 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 6002 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 6003 dc_link_get_highest_encoding_format(aconnector->dc_link)); 6004 max_supported_bw_in_kbps = link_bandwidth_kbps; 6005 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 6006 6007 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 6008 max_supported_bw_in_kbps > 0 && 6009 dsc_max_supported_bw_in_kbps > 0) 6010 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6011 dsc_caps, 6012 &dsc_options, 6013 dsc_max_supported_bw_in_kbps, 6014 &stream->timing, 6015 dc_link_get_highest_encoding_format(aconnector->dc_link), 6016 &stream->timing.dsc_cfg)) { 6017 stream->timing.flags.DSC = 1; 6018 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", 6019 __func__, drm_connector->name); 6020 } 6021 } 6022 } 6023 6024 /* Overwrite the stream flag if DSC is enabled through debugfs */ 6025 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 6026 stream->timing.flags.DSC = 1; 6027 6028 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 6029 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 6030 6031 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 6032 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 6033 6034 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 6035 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 6036 } 6037 6038 static struct dc_stream_state * 6039 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6040 const struct drm_display_mode *drm_mode, 6041 const struct dm_connector_state *dm_state, 6042 const struct dc_stream_state *old_stream, 6043 int requested_bpc) 6044 { 6045 struct drm_display_mode *preferred_mode = NULL; 6046 struct drm_connector *drm_connector; 6047 const struct drm_connector_state *con_state = &dm_state->base; 6048 struct dc_stream_state *stream = NULL; 6049 struct drm_display_mode mode; 6050 struct drm_display_mode saved_mode; 6051 struct drm_display_mode *freesync_mode = NULL; 6052 bool native_mode_found = false; 6053 bool recalculate_timing = false; 6054 bool scale = dm_state->scaling != RMX_OFF; 6055 int mode_refresh; 6056 int preferred_refresh = 0; 6057 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6058 struct dsc_dec_dpcd_caps dsc_caps; 6059 6060 struct dc_sink *sink = NULL; 6061 6062 drm_mode_init(&mode, drm_mode); 6063 memset(&saved_mode, 0, sizeof(saved_mode)); 6064 6065 if (aconnector == NULL) { 6066 DRM_ERROR("aconnector is NULL!\n"); 6067 return stream; 6068 } 6069 6070 drm_connector = &aconnector->base; 6071 6072 if (!aconnector->dc_sink) { 6073 sink = create_fake_sink(aconnector); 6074 if (!sink) 6075 return stream; 6076 } else { 6077 sink = aconnector->dc_sink; 6078 dc_sink_retain(sink); 6079 } 6080 6081 stream = dc_create_stream_for_sink(sink); 6082 6083 if (stream == NULL) { 6084 DRM_ERROR("Failed to create stream for sink!\n"); 6085 goto finish; 6086 } 6087 6088 stream->dm_stream_context = aconnector; 6089 6090 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6091 drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 6092 6093 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 6094 /* Search for preferred mode */ 6095 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6096 native_mode_found = true; 6097 break; 6098 } 6099 } 6100 if (!native_mode_found) 6101 preferred_mode = list_first_entry_or_null( 6102 &aconnector->base.modes, 6103 struct drm_display_mode, 6104 head); 6105 6106 mode_refresh = drm_mode_vrefresh(&mode); 6107 6108 if (preferred_mode == NULL) { 6109 /* 6110 * This may not be an error, the use case is when we have no 6111 * usermode calls to reset and set mode upon hotplug. In this 6112 * case, we call set mode ourselves to restore the previous mode 6113 * and the modelist may not be filled in time. 6114 */ 6115 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6116 } else { 6117 recalculate_timing = is_freesync_video_mode(&mode, aconnector); 6118 if (recalculate_timing) { 6119 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6120 drm_mode_copy(&saved_mode, &mode); 6121 drm_mode_copy(&mode, freesync_mode); 6122 } else { 6123 decide_crtc_timing_for_drm_display_mode( 6124 &mode, preferred_mode, scale); 6125 6126 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6127 } 6128 } 6129 6130 if (recalculate_timing) 6131 drm_mode_set_crtcinfo(&saved_mode, 0); 6132 6133 /* 6134 * If scaling is enabled and refresh rate didn't change 6135 * we copy the vic and polarities of the old timings 6136 */ 6137 if (!scale || mode_refresh != preferred_refresh) 6138 fill_stream_properties_from_drm_display_mode( 6139 stream, &mode, &aconnector->base, con_state, NULL, 6140 requested_bpc); 6141 else 6142 fill_stream_properties_from_drm_display_mode( 6143 stream, &mode, &aconnector->base, con_state, old_stream, 6144 requested_bpc); 6145 6146 if (aconnector->timing_changed) { 6147 drm_dbg(aconnector->base.dev, 6148 "overriding timing for automated test, bpc %d, changing to %d\n", 6149 stream->timing.display_color_depth, 6150 aconnector->timing_requested->display_color_depth); 6151 stream->timing = *aconnector->timing_requested; 6152 } 6153 6154 /* SST DSC determination policy */ 6155 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6156 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6157 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6158 6159 update_stream_scaling_settings(&mode, dm_state, stream); 6160 6161 fill_audio_info( 6162 &stream->audio_info, 6163 drm_connector, 6164 sink); 6165 6166 update_stream_signal(stream, sink); 6167 6168 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6169 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6170 6171 if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { 6172 // 6173 // should decide stream support vsc sdp colorimetry capability 6174 // before building vsc info packet 6175 // 6176 stream->use_vsc_sdp_for_colorimetry = false; 6177 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 6178 stream->use_vsc_sdp_for_colorimetry = 6179 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; 6180 } else { 6181 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) 6182 stream->use_vsc_sdp_for_colorimetry = true; 6183 } 6184 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) 6185 tf = TRANSFER_FUNC_GAMMA_22; 6186 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 6187 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6188 6189 } 6190 finish: 6191 dc_sink_release(sink); 6192 6193 return stream; 6194 } 6195 6196 static enum drm_connector_status 6197 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6198 { 6199 bool connected; 6200 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6201 6202 /* 6203 * Notes: 6204 * 1. This interface is NOT called in context of HPD irq. 6205 * 2. This interface *is called* in context of user-mode ioctl. Which 6206 * makes it a bad place for *any* MST-related activity. 6207 */ 6208 6209 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6210 !aconnector->fake_enable) 6211 connected = (aconnector->dc_sink != NULL); 6212 else 6213 connected = (aconnector->base.force == DRM_FORCE_ON || 6214 aconnector->base.force == DRM_FORCE_ON_DIGITAL); 6215 6216 update_subconnector_property(aconnector); 6217 6218 return (connected ? connector_status_connected : 6219 connector_status_disconnected); 6220 } 6221 6222 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6223 struct drm_connector_state *connector_state, 6224 struct drm_property *property, 6225 uint64_t val) 6226 { 6227 struct drm_device *dev = connector->dev; 6228 struct amdgpu_device *adev = drm_to_adev(dev); 6229 struct dm_connector_state *dm_old_state = 6230 to_dm_connector_state(connector->state); 6231 struct dm_connector_state *dm_new_state = 6232 to_dm_connector_state(connector_state); 6233 6234 int ret = -EINVAL; 6235 6236 if (property == dev->mode_config.scaling_mode_property) { 6237 enum amdgpu_rmx_type rmx_type; 6238 6239 switch (val) { 6240 case DRM_MODE_SCALE_CENTER: 6241 rmx_type = RMX_CENTER; 6242 break; 6243 case DRM_MODE_SCALE_ASPECT: 6244 rmx_type = RMX_ASPECT; 6245 break; 6246 case DRM_MODE_SCALE_FULLSCREEN: 6247 rmx_type = RMX_FULL; 6248 break; 6249 case DRM_MODE_SCALE_NONE: 6250 default: 6251 rmx_type = RMX_OFF; 6252 break; 6253 } 6254 6255 if (dm_old_state->scaling == rmx_type) 6256 return 0; 6257 6258 dm_new_state->scaling = rmx_type; 6259 ret = 0; 6260 } else if (property == adev->mode_info.underscan_hborder_property) { 6261 dm_new_state->underscan_hborder = val; 6262 ret = 0; 6263 } else if (property == adev->mode_info.underscan_vborder_property) { 6264 dm_new_state->underscan_vborder = val; 6265 ret = 0; 6266 } else if (property == adev->mode_info.underscan_property) { 6267 dm_new_state->underscan_enable = val; 6268 ret = 0; 6269 } else if (property == adev->mode_info.abm_level_property) { 6270 dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE; 6271 ret = 0; 6272 } 6273 6274 return ret; 6275 } 6276 6277 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6278 const struct drm_connector_state *state, 6279 struct drm_property *property, 6280 uint64_t *val) 6281 { 6282 struct drm_device *dev = connector->dev; 6283 struct amdgpu_device *adev = drm_to_adev(dev); 6284 struct dm_connector_state *dm_state = 6285 to_dm_connector_state(state); 6286 int ret = -EINVAL; 6287 6288 if (property == dev->mode_config.scaling_mode_property) { 6289 switch (dm_state->scaling) { 6290 case RMX_CENTER: 6291 *val = DRM_MODE_SCALE_CENTER; 6292 break; 6293 case RMX_ASPECT: 6294 *val = DRM_MODE_SCALE_ASPECT; 6295 break; 6296 case RMX_FULL: 6297 *val = DRM_MODE_SCALE_FULLSCREEN; 6298 break; 6299 case RMX_OFF: 6300 default: 6301 *val = DRM_MODE_SCALE_NONE; 6302 break; 6303 } 6304 ret = 0; 6305 } else if (property == adev->mode_info.underscan_hborder_property) { 6306 *val = dm_state->underscan_hborder; 6307 ret = 0; 6308 } else if (property == adev->mode_info.underscan_vborder_property) { 6309 *val = dm_state->underscan_vborder; 6310 ret = 0; 6311 } else if (property == adev->mode_info.underscan_property) { 6312 *val = dm_state->underscan_enable; 6313 ret = 0; 6314 } else if (property == adev->mode_info.abm_level_property) { 6315 *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? 6316 dm_state->abm_level : 0; 6317 ret = 0; 6318 } 6319 6320 return ret; 6321 } 6322 6323 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 6324 { 6325 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 6326 6327 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 6328 } 6329 6330 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 6331 { 6332 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6333 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6334 struct amdgpu_display_manager *dm = &adev->dm; 6335 6336 /* 6337 * Call only if mst_mgr was initialized before since it's not done 6338 * for all connector types. 6339 */ 6340 if (aconnector->mst_mgr.dev) 6341 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 6342 6343 if (aconnector->bl_idx != -1) { 6344 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); 6345 dm->backlight_dev[aconnector->bl_idx] = NULL; 6346 } 6347 6348 if (aconnector->dc_em_sink) 6349 dc_sink_release(aconnector->dc_em_sink); 6350 aconnector->dc_em_sink = NULL; 6351 if (aconnector->dc_sink) 6352 dc_sink_release(aconnector->dc_sink); 6353 aconnector->dc_sink = NULL; 6354 6355 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 6356 drm_connector_unregister(connector); 6357 drm_connector_cleanup(connector); 6358 if (aconnector->i2c) { 6359 i2c_del_adapter(&aconnector->i2c->base); 6360 kfree(aconnector->i2c); 6361 } 6362 kfree(aconnector->dm_dp_aux.aux.name); 6363 6364 kfree(connector); 6365 } 6366 6367 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 6368 { 6369 struct dm_connector_state *state = 6370 to_dm_connector_state(connector->state); 6371 6372 if (connector->state) 6373 __drm_atomic_helper_connector_destroy_state(connector->state); 6374 6375 kfree(state); 6376 6377 state = kzalloc(sizeof(*state), GFP_KERNEL); 6378 6379 if (state) { 6380 state->scaling = RMX_OFF; 6381 state->underscan_enable = false; 6382 state->underscan_hborder = 0; 6383 state->underscan_vborder = 0; 6384 state->base.max_requested_bpc = 8; 6385 state->vcpi_slots = 0; 6386 state->pbn = 0; 6387 6388 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 6389 state->abm_level = amdgpu_dm_abm_level ?: 6390 ABM_LEVEL_IMMEDIATE_DISABLE; 6391 6392 __drm_atomic_helper_connector_reset(connector, &state->base); 6393 } 6394 } 6395 6396 struct drm_connector_state * 6397 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 6398 { 6399 struct dm_connector_state *state = 6400 to_dm_connector_state(connector->state); 6401 6402 struct dm_connector_state *new_state = 6403 kmemdup(state, sizeof(*state), GFP_KERNEL); 6404 6405 if (!new_state) 6406 return NULL; 6407 6408 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 6409 6410 new_state->freesync_capable = state->freesync_capable; 6411 new_state->abm_level = state->abm_level; 6412 new_state->scaling = state->scaling; 6413 new_state->underscan_enable = state->underscan_enable; 6414 new_state->underscan_hborder = state->underscan_hborder; 6415 new_state->underscan_vborder = state->underscan_vborder; 6416 new_state->vcpi_slots = state->vcpi_slots; 6417 new_state->pbn = state->pbn; 6418 return &new_state->base; 6419 } 6420 6421 static int 6422 amdgpu_dm_connector_late_register(struct drm_connector *connector) 6423 { 6424 struct amdgpu_dm_connector *amdgpu_dm_connector = 6425 to_amdgpu_dm_connector(connector); 6426 int r; 6427 6428 amdgpu_dm_register_backlight_device(amdgpu_dm_connector); 6429 6430 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 6431 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 6432 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 6433 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 6434 if (r) 6435 return r; 6436 } 6437 6438 #if defined(CONFIG_DEBUG_FS) 6439 connector_debugfs_init(amdgpu_dm_connector); 6440 #endif 6441 6442 return 0; 6443 } 6444 6445 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) 6446 { 6447 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6448 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 6449 struct dc_link *dc_link = aconnector->dc_link; 6450 struct dc_sink *dc_em_sink = aconnector->dc_em_sink; 6451 struct edid *edid; 6452 6453 /* 6454 * Note: drm_get_edid gets edid in the following order: 6455 * 1) override EDID if set via edid_override debugfs, 6456 * 2) firmware EDID if set via edid_firmware module parameter 6457 * 3) regular DDC read. 6458 */ 6459 edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); 6460 if (!edid) { 6461 DRM_ERROR("No EDID found on connector: %s.\n", connector->name); 6462 return; 6463 } 6464 6465 aconnector->edid = edid; 6466 6467 /* Update emulated (virtual) sink's EDID */ 6468 if (dc_em_sink && dc_link) { 6469 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); 6470 memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); 6471 dm_helpers_parse_edid_caps( 6472 dc_link, 6473 &dc_em_sink->dc_edid, 6474 &dc_em_sink->edid_caps); 6475 } 6476 } 6477 6478 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 6479 .reset = amdgpu_dm_connector_funcs_reset, 6480 .detect = amdgpu_dm_connector_detect, 6481 .fill_modes = drm_helper_probe_single_connector_modes, 6482 .destroy = amdgpu_dm_connector_destroy, 6483 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 6484 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6485 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 6486 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 6487 .late_register = amdgpu_dm_connector_late_register, 6488 .early_unregister = amdgpu_dm_connector_unregister, 6489 .force = amdgpu_dm_connector_funcs_force 6490 }; 6491 6492 static int get_modes(struct drm_connector *connector) 6493 { 6494 return amdgpu_dm_connector_get_modes(connector); 6495 } 6496 6497 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 6498 { 6499 struct drm_connector *connector = &aconnector->base; 6500 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base); 6501 struct dc_sink_init_data init_params = { 6502 .link = aconnector->dc_link, 6503 .sink_signal = SIGNAL_TYPE_VIRTUAL 6504 }; 6505 struct edid *edid; 6506 6507 /* 6508 * Note: drm_get_edid gets edid in the following order: 6509 * 1) override EDID if set via edid_override debugfs, 6510 * 2) firmware EDID if set via edid_firmware module parameter 6511 * 3) regular DDC read. 6512 */ 6513 edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc); 6514 if (!edid) { 6515 DRM_ERROR("No EDID found on connector: %s.\n", connector->name); 6516 return; 6517 } 6518 6519 if (drm_detect_hdmi_monitor(edid)) 6520 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; 6521 6522 aconnector->edid = edid; 6523 6524 aconnector->dc_em_sink = dc_link_add_remote_sink( 6525 aconnector->dc_link, 6526 (uint8_t *)edid, 6527 (edid->extensions + 1) * EDID_LENGTH, 6528 &init_params); 6529 6530 if (aconnector->base.force == DRM_FORCE_ON) { 6531 aconnector->dc_sink = aconnector->dc_link->local_sink ? 6532 aconnector->dc_link->local_sink : 6533 aconnector->dc_em_sink; 6534 dc_sink_retain(aconnector->dc_sink); 6535 } 6536 } 6537 6538 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 6539 { 6540 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 6541 6542 /* 6543 * In case of headless boot with force on for DP managed connector 6544 * Those settings have to be != 0 to get initial modeset 6545 */ 6546 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6547 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 6548 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 6549 } 6550 6551 create_eml_sink(aconnector); 6552 } 6553 6554 static enum dc_status dm_validate_stream_and_context(struct dc *dc, 6555 struct dc_stream_state *stream) 6556 { 6557 enum dc_status dc_result = DC_ERROR_UNEXPECTED; 6558 struct dc_plane_state *dc_plane_state = NULL; 6559 struct dc_state *dc_state = NULL; 6560 6561 if (!stream) 6562 goto cleanup; 6563 6564 dc_plane_state = dc_create_plane_state(dc); 6565 if (!dc_plane_state) 6566 goto cleanup; 6567 6568 dc_state = dc_create_state(dc); 6569 if (!dc_state) 6570 goto cleanup; 6571 6572 /* populate stream to plane */ 6573 dc_plane_state->src_rect.height = stream->src.height; 6574 dc_plane_state->src_rect.width = stream->src.width; 6575 dc_plane_state->dst_rect.height = stream->src.height; 6576 dc_plane_state->dst_rect.width = stream->src.width; 6577 dc_plane_state->clip_rect.height = stream->src.height; 6578 dc_plane_state->clip_rect.width = stream->src.width; 6579 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; 6580 dc_plane_state->plane_size.surface_size.height = stream->src.height; 6581 dc_plane_state->plane_size.surface_size.width = stream->src.width; 6582 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 6583 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 6584 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 6585 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 6586 dc_plane_state->rotation = ROTATION_ANGLE_0; 6587 dc_plane_state->is_tiling_rotated = false; 6588 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; 6589 6590 dc_result = dc_validate_stream(dc, stream); 6591 if (dc_result == DC_OK) 6592 dc_result = dc_validate_plane(dc, dc_plane_state); 6593 6594 if (dc_result == DC_OK) 6595 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); 6596 6597 if (dc_result == DC_OK && !dc_add_plane_to_context( 6598 dc, 6599 stream, 6600 dc_plane_state, 6601 dc_state)) 6602 dc_result = DC_FAIL_ATTACH_SURFACES; 6603 6604 if (dc_result == DC_OK) 6605 dc_result = dc_validate_global_state(dc, dc_state, true); 6606 6607 cleanup: 6608 if (dc_state) 6609 dc_release_state(dc_state); 6610 6611 if (dc_plane_state) 6612 dc_plane_state_release(dc_plane_state); 6613 6614 return dc_result; 6615 } 6616 6617 struct dc_stream_state * 6618 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6619 const struct drm_display_mode *drm_mode, 6620 const struct dm_connector_state *dm_state, 6621 const struct dc_stream_state *old_stream) 6622 { 6623 struct drm_connector *connector = &aconnector->base; 6624 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6625 struct dc_stream_state *stream; 6626 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 6627 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 6628 enum dc_status dc_result = DC_OK; 6629 6630 do { 6631 stream = create_stream_for_sink(aconnector, drm_mode, 6632 dm_state, old_stream, 6633 requested_bpc); 6634 if (stream == NULL) { 6635 DRM_ERROR("Failed to create stream for sink!\n"); 6636 break; 6637 } 6638 6639 dc_result = dc_validate_stream(adev->dm.dc, stream); 6640 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 6641 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 6642 6643 if (dc_result == DC_OK) 6644 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); 6645 6646 if (dc_result != DC_OK) { 6647 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", 6648 drm_mode->hdisplay, 6649 drm_mode->vdisplay, 6650 drm_mode->clock, 6651 dc_result, 6652 dc_status_to_str(dc_result)); 6653 6654 dc_stream_release(stream); 6655 stream = NULL; 6656 requested_bpc -= 2; /* lower bpc to retry validation */ 6657 } 6658 6659 } while (stream == NULL && requested_bpc >= 6); 6660 6661 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { 6662 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); 6663 6664 aconnector->force_yuv420_output = true; 6665 stream = create_validate_stream_for_sink(aconnector, drm_mode, 6666 dm_state, old_stream); 6667 aconnector->force_yuv420_output = false; 6668 } 6669 6670 return stream; 6671 } 6672 6673 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 6674 struct drm_display_mode *mode) 6675 { 6676 int result = MODE_ERROR; 6677 struct dc_sink *dc_sink; 6678 /* TODO: Unhardcode stream count */ 6679 struct dc_stream_state *stream; 6680 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6681 6682 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 6683 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 6684 return result; 6685 6686 /* 6687 * Only run this the first time mode_valid is called to initilialize 6688 * EDID mgmt 6689 */ 6690 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 6691 !aconnector->dc_em_sink) 6692 handle_edid_mgmt(aconnector); 6693 6694 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 6695 6696 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 6697 aconnector->base.force != DRM_FORCE_ON) { 6698 DRM_ERROR("dc_sink is NULL!\n"); 6699 goto fail; 6700 } 6701 6702 drm_mode_set_crtcinfo(mode, 0); 6703 6704 stream = create_validate_stream_for_sink(aconnector, mode, 6705 to_dm_connector_state(connector->state), 6706 NULL); 6707 if (stream) { 6708 dc_stream_release(stream); 6709 result = MODE_OK; 6710 } 6711 6712 fail: 6713 /* TODO: error handling*/ 6714 return result; 6715 } 6716 6717 static int fill_hdr_info_packet(const struct drm_connector_state *state, 6718 struct dc_info_packet *out) 6719 { 6720 struct hdmi_drm_infoframe frame; 6721 unsigned char buf[30]; /* 26 + 4 */ 6722 ssize_t len; 6723 int ret, i; 6724 6725 memset(out, 0, sizeof(*out)); 6726 6727 if (!state->hdr_output_metadata) 6728 return 0; 6729 6730 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 6731 if (ret) 6732 return ret; 6733 6734 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 6735 if (len < 0) 6736 return (int)len; 6737 6738 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 6739 if (len != 30) 6740 return -EINVAL; 6741 6742 /* Prepare the infopacket for DC. */ 6743 switch (state->connector->connector_type) { 6744 case DRM_MODE_CONNECTOR_HDMIA: 6745 out->hb0 = 0x87; /* type */ 6746 out->hb1 = 0x01; /* version */ 6747 out->hb2 = 0x1A; /* length */ 6748 out->sb[0] = buf[3]; /* checksum */ 6749 i = 1; 6750 break; 6751 6752 case DRM_MODE_CONNECTOR_DisplayPort: 6753 case DRM_MODE_CONNECTOR_eDP: 6754 out->hb0 = 0x00; /* sdp id, zero */ 6755 out->hb1 = 0x87; /* type */ 6756 out->hb2 = 0x1D; /* payload len - 1 */ 6757 out->hb3 = (0x13 << 2); /* sdp version */ 6758 out->sb[0] = 0x01; /* version */ 6759 out->sb[1] = 0x1A; /* length */ 6760 i = 2; 6761 break; 6762 6763 default: 6764 return -EINVAL; 6765 } 6766 6767 memcpy(&out->sb[i], &buf[4], 26); 6768 out->valid = true; 6769 6770 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 6771 sizeof(out->sb), false); 6772 6773 return 0; 6774 } 6775 6776 static int 6777 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 6778 struct drm_atomic_state *state) 6779 { 6780 struct drm_connector_state *new_con_state = 6781 drm_atomic_get_new_connector_state(state, conn); 6782 struct drm_connector_state *old_con_state = 6783 drm_atomic_get_old_connector_state(state, conn); 6784 struct drm_crtc *crtc = new_con_state->crtc; 6785 struct drm_crtc_state *new_crtc_state; 6786 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); 6787 int ret; 6788 6789 trace_amdgpu_dm_connector_atomic_check(new_con_state); 6790 6791 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 6792 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); 6793 if (ret < 0) 6794 return ret; 6795 } 6796 6797 if (!crtc) 6798 return 0; 6799 6800 if (new_con_state->colorspace != old_con_state->colorspace) { 6801 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6802 if (IS_ERR(new_crtc_state)) 6803 return PTR_ERR(new_crtc_state); 6804 6805 new_crtc_state->mode_changed = true; 6806 } 6807 6808 if (new_con_state->content_type != old_con_state->content_type) { 6809 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6810 if (IS_ERR(new_crtc_state)) 6811 return PTR_ERR(new_crtc_state); 6812 6813 new_crtc_state->mode_changed = true; 6814 } 6815 6816 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 6817 struct dc_info_packet hdr_infopacket; 6818 6819 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 6820 if (ret) 6821 return ret; 6822 6823 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6824 if (IS_ERR(new_crtc_state)) 6825 return PTR_ERR(new_crtc_state); 6826 6827 /* 6828 * DC considers the stream backends changed if the 6829 * static metadata changes. Forcing the modeset also 6830 * gives a simple way for userspace to switch from 6831 * 8bpc to 10bpc when setting the metadata to enter 6832 * or exit HDR. 6833 * 6834 * Changing the static metadata after it's been 6835 * set is permissible, however. So only force a 6836 * modeset if we're entering or exiting HDR. 6837 */ 6838 new_crtc_state->mode_changed = new_crtc_state->mode_changed || 6839 !old_con_state->hdr_output_metadata || 6840 !new_con_state->hdr_output_metadata; 6841 } 6842 6843 return 0; 6844 } 6845 6846 static const struct drm_connector_helper_funcs 6847 amdgpu_dm_connector_helper_funcs = { 6848 /* 6849 * If hotplugging a second bigger display in FB Con mode, bigger resolution 6850 * modes will be filtered by drm_mode_validate_size(), and those modes 6851 * are missing after user start lightdm. So we need to renew modes list. 6852 * in get_modes call back, not just return the modes count 6853 */ 6854 .get_modes = get_modes, 6855 .mode_valid = amdgpu_dm_connector_mode_valid, 6856 .atomic_check = amdgpu_dm_connector_atomic_check, 6857 }; 6858 6859 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 6860 { 6861 6862 } 6863 6864 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 6865 { 6866 switch (display_color_depth) { 6867 case COLOR_DEPTH_666: 6868 return 6; 6869 case COLOR_DEPTH_888: 6870 return 8; 6871 case COLOR_DEPTH_101010: 6872 return 10; 6873 case COLOR_DEPTH_121212: 6874 return 12; 6875 case COLOR_DEPTH_141414: 6876 return 14; 6877 case COLOR_DEPTH_161616: 6878 return 16; 6879 default: 6880 break; 6881 } 6882 return 0; 6883 } 6884 6885 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 6886 struct drm_crtc_state *crtc_state, 6887 struct drm_connector_state *conn_state) 6888 { 6889 struct drm_atomic_state *state = crtc_state->state; 6890 struct drm_connector *connector = conn_state->connector; 6891 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6892 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 6893 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 6894 struct drm_dp_mst_topology_mgr *mst_mgr; 6895 struct drm_dp_mst_port *mst_port; 6896 struct drm_dp_mst_topology_state *mst_state; 6897 enum dc_color_depth color_depth; 6898 int clock, bpp = 0; 6899 bool is_y420 = false; 6900 6901 if (!aconnector->mst_output_port) 6902 return 0; 6903 6904 mst_port = aconnector->mst_output_port; 6905 mst_mgr = &aconnector->mst_root->mst_mgr; 6906 6907 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 6908 return 0; 6909 6910 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); 6911 if (IS_ERR(mst_state)) 6912 return PTR_ERR(mst_state); 6913 6914 if (!mst_state->pbn_div) 6915 mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); 6916 6917 if (!state->duplicated) { 6918 int max_bpc = conn_state->max_requested_bpc; 6919 6920 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 6921 aconnector->force_yuv420_output; 6922 color_depth = convert_color_depth_from_display_info(connector, 6923 is_y420, 6924 max_bpc); 6925 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 6926 clock = adjusted_mode->clock; 6927 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); 6928 } 6929 6930 dm_new_connector_state->vcpi_slots = 6931 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, 6932 dm_new_connector_state->pbn); 6933 if (dm_new_connector_state->vcpi_slots < 0) { 6934 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 6935 return dm_new_connector_state->vcpi_slots; 6936 } 6937 return 0; 6938 } 6939 6940 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 6941 .disable = dm_encoder_helper_disable, 6942 .atomic_check = dm_encoder_helper_atomic_check 6943 }; 6944 6945 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 6946 struct dc_state *dc_state, 6947 struct dsc_mst_fairness_vars *vars) 6948 { 6949 struct dc_stream_state *stream = NULL; 6950 struct drm_connector *connector; 6951 struct drm_connector_state *new_con_state; 6952 struct amdgpu_dm_connector *aconnector; 6953 struct dm_connector_state *dm_conn_state; 6954 int i, j, ret; 6955 int vcpi, pbn_div, pbn, slot_num = 0; 6956 6957 for_each_new_connector_in_state(state, connector, new_con_state, i) { 6958 6959 aconnector = to_amdgpu_dm_connector(connector); 6960 6961 if (!aconnector->mst_output_port) 6962 continue; 6963 6964 if (!new_con_state || !new_con_state->crtc) 6965 continue; 6966 6967 dm_conn_state = to_dm_connector_state(new_con_state); 6968 6969 for (j = 0; j < dc_state->stream_count; j++) { 6970 stream = dc_state->streams[j]; 6971 if (!stream) 6972 continue; 6973 6974 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) 6975 break; 6976 6977 stream = NULL; 6978 } 6979 6980 if (!stream) 6981 continue; 6982 6983 pbn_div = dm_mst_get_pbn_divider(stream->link); 6984 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 6985 for (j = 0; j < dc_state->stream_count; j++) { 6986 if (vars[j].aconnector == aconnector) { 6987 pbn = vars[j].pbn; 6988 break; 6989 } 6990 } 6991 6992 if (j == dc_state->stream_count) 6993 continue; 6994 6995 slot_num = DIV_ROUND_UP(pbn, pbn_div); 6996 6997 if (stream->timing.flags.DSC != 1) { 6998 dm_conn_state->pbn = pbn; 6999 dm_conn_state->vcpi_slots = slot_num; 7000 7001 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, 7002 dm_conn_state->pbn, false); 7003 if (ret < 0) 7004 return ret; 7005 7006 continue; 7007 } 7008 7009 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); 7010 if (vcpi < 0) 7011 return vcpi; 7012 7013 dm_conn_state->pbn = pbn; 7014 dm_conn_state->vcpi_slots = vcpi; 7015 } 7016 return 0; 7017 } 7018 7019 static int to_drm_connector_type(enum signal_type st) 7020 { 7021 switch (st) { 7022 case SIGNAL_TYPE_HDMI_TYPE_A: 7023 return DRM_MODE_CONNECTOR_HDMIA; 7024 case SIGNAL_TYPE_EDP: 7025 return DRM_MODE_CONNECTOR_eDP; 7026 case SIGNAL_TYPE_LVDS: 7027 return DRM_MODE_CONNECTOR_LVDS; 7028 case SIGNAL_TYPE_RGB: 7029 return DRM_MODE_CONNECTOR_VGA; 7030 case SIGNAL_TYPE_DISPLAY_PORT: 7031 case SIGNAL_TYPE_DISPLAY_PORT_MST: 7032 return DRM_MODE_CONNECTOR_DisplayPort; 7033 case SIGNAL_TYPE_DVI_DUAL_LINK: 7034 case SIGNAL_TYPE_DVI_SINGLE_LINK: 7035 return DRM_MODE_CONNECTOR_DVID; 7036 case SIGNAL_TYPE_VIRTUAL: 7037 return DRM_MODE_CONNECTOR_VIRTUAL; 7038 7039 default: 7040 return DRM_MODE_CONNECTOR_Unknown; 7041 } 7042 } 7043 7044 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 7045 { 7046 struct drm_encoder *encoder; 7047 7048 /* There is only one encoder per connector */ 7049 drm_connector_for_each_possible_encoder(connector, encoder) 7050 return encoder; 7051 7052 return NULL; 7053 } 7054 7055 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 7056 { 7057 struct drm_encoder *encoder; 7058 struct amdgpu_encoder *amdgpu_encoder; 7059 7060 encoder = amdgpu_dm_connector_to_encoder(connector); 7061 7062 if (encoder == NULL) 7063 return; 7064 7065 amdgpu_encoder = to_amdgpu_encoder(encoder); 7066 7067 amdgpu_encoder->native_mode.clock = 0; 7068 7069 if (!list_empty(&connector->probed_modes)) { 7070 struct drm_display_mode *preferred_mode = NULL; 7071 7072 list_for_each_entry(preferred_mode, 7073 &connector->probed_modes, 7074 head) { 7075 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 7076 amdgpu_encoder->native_mode = *preferred_mode; 7077 7078 break; 7079 } 7080 7081 } 7082 } 7083 7084 static struct drm_display_mode * 7085 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 7086 char *name, 7087 int hdisplay, int vdisplay) 7088 { 7089 struct drm_device *dev = encoder->dev; 7090 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7091 struct drm_display_mode *mode = NULL; 7092 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7093 7094 mode = drm_mode_duplicate(dev, native_mode); 7095 7096 if (mode == NULL) 7097 return NULL; 7098 7099 mode->hdisplay = hdisplay; 7100 mode->vdisplay = vdisplay; 7101 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7102 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7103 7104 return mode; 7105 7106 } 7107 7108 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 7109 struct drm_connector *connector) 7110 { 7111 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7112 struct drm_display_mode *mode = NULL; 7113 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7114 struct amdgpu_dm_connector *amdgpu_dm_connector = 7115 to_amdgpu_dm_connector(connector); 7116 int i; 7117 int n; 7118 struct mode_size { 7119 char name[DRM_DISPLAY_MODE_LEN]; 7120 int w; 7121 int h; 7122 } common_modes[] = { 7123 { "640x480", 640, 480}, 7124 { "800x600", 800, 600}, 7125 { "1024x768", 1024, 768}, 7126 { "1280x720", 1280, 720}, 7127 { "1280x800", 1280, 800}, 7128 {"1280x1024", 1280, 1024}, 7129 { "1440x900", 1440, 900}, 7130 {"1680x1050", 1680, 1050}, 7131 {"1600x1200", 1600, 1200}, 7132 {"1920x1080", 1920, 1080}, 7133 {"1920x1200", 1920, 1200} 7134 }; 7135 7136 n = ARRAY_SIZE(common_modes); 7137 7138 for (i = 0; i < n; i++) { 7139 struct drm_display_mode *curmode = NULL; 7140 bool mode_existed = false; 7141 7142 if (common_modes[i].w > native_mode->hdisplay || 7143 common_modes[i].h > native_mode->vdisplay || 7144 (common_modes[i].w == native_mode->hdisplay && 7145 common_modes[i].h == native_mode->vdisplay)) 7146 continue; 7147 7148 list_for_each_entry(curmode, &connector->probed_modes, head) { 7149 if (common_modes[i].w == curmode->hdisplay && 7150 common_modes[i].h == curmode->vdisplay) { 7151 mode_existed = true; 7152 break; 7153 } 7154 } 7155 7156 if (mode_existed) 7157 continue; 7158 7159 mode = amdgpu_dm_create_common_mode(encoder, 7160 common_modes[i].name, common_modes[i].w, 7161 common_modes[i].h); 7162 if (!mode) 7163 continue; 7164 7165 drm_mode_probed_add(connector, mode); 7166 amdgpu_dm_connector->num_modes++; 7167 } 7168 } 7169 7170 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7171 { 7172 struct drm_encoder *encoder; 7173 struct amdgpu_encoder *amdgpu_encoder; 7174 const struct drm_display_mode *native_mode; 7175 7176 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7177 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7178 return; 7179 7180 mutex_lock(&connector->dev->mode_config.mutex); 7181 amdgpu_dm_connector_get_modes(connector); 7182 mutex_unlock(&connector->dev->mode_config.mutex); 7183 7184 encoder = amdgpu_dm_connector_to_encoder(connector); 7185 if (!encoder) 7186 return; 7187 7188 amdgpu_encoder = to_amdgpu_encoder(encoder); 7189 7190 native_mode = &amdgpu_encoder->native_mode; 7191 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7192 return; 7193 7194 drm_connector_set_panel_orientation_with_quirk(connector, 7195 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7196 native_mode->hdisplay, 7197 native_mode->vdisplay); 7198 } 7199 7200 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7201 struct edid *edid) 7202 { 7203 struct amdgpu_dm_connector *amdgpu_dm_connector = 7204 to_amdgpu_dm_connector(connector); 7205 7206 if (edid) { 7207 /* empty probed_modes */ 7208 INIT_LIST_HEAD(&connector->probed_modes); 7209 amdgpu_dm_connector->num_modes = 7210 drm_add_edid_modes(connector, edid); 7211 7212 /* sorting the probed modes before calling function 7213 * amdgpu_dm_get_native_mode() since EDID can have 7214 * more than one preferred mode. The modes that are 7215 * later in the probed mode list could be of higher 7216 * and preferred resolution. For example, 3840x2160 7217 * resolution in base EDID preferred timing and 4096x2160 7218 * preferred resolution in DID extension block later. 7219 */ 7220 drm_mode_sort(&connector->probed_modes); 7221 amdgpu_dm_get_native_mode(connector); 7222 7223 /* Freesync capabilities are reset by calling 7224 * drm_add_edid_modes() and need to be 7225 * restored here. 7226 */ 7227 amdgpu_dm_update_freesync_caps(connector, edid); 7228 } else { 7229 amdgpu_dm_connector->num_modes = 0; 7230 } 7231 } 7232 7233 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 7234 struct drm_display_mode *mode) 7235 { 7236 struct drm_display_mode *m; 7237 7238 list_for_each_entry(m, &aconnector->base.probed_modes, head) { 7239 if (drm_mode_equal(m, mode)) 7240 return true; 7241 } 7242 7243 return false; 7244 } 7245 7246 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 7247 { 7248 const struct drm_display_mode *m; 7249 struct drm_display_mode *new_mode; 7250 uint i; 7251 u32 new_modes_count = 0; 7252 7253 /* Standard FPS values 7254 * 7255 * 23.976 - TV/NTSC 7256 * 24 - Cinema 7257 * 25 - TV/PAL 7258 * 29.97 - TV/NTSC 7259 * 30 - TV/NTSC 7260 * 48 - Cinema HFR 7261 * 50 - TV/PAL 7262 * 60 - Commonly used 7263 * 48,72,96,120 - Multiples of 24 7264 */ 7265 static const u32 common_rates[] = { 7266 23976, 24000, 25000, 29970, 30000, 7267 48000, 50000, 60000, 72000, 96000, 120000 7268 }; 7269 7270 /* 7271 * Find mode with highest refresh rate with the same resolution 7272 * as the preferred mode. Some monitors report a preferred mode 7273 * with lower resolution than the highest refresh rate supported. 7274 */ 7275 7276 m = get_highest_refresh_rate_mode(aconnector, true); 7277 if (!m) 7278 return 0; 7279 7280 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 7281 u64 target_vtotal, target_vtotal_diff; 7282 u64 num, den; 7283 7284 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 7285 continue; 7286 7287 if (common_rates[i] < aconnector->min_vfreq * 1000 || 7288 common_rates[i] > aconnector->max_vfreq * 1000) 7289 continue; 7290 7291 num = (unsigned long long)m->clock * 1000 * 1000; 7292 den = common_rates[i] * (unsigned long long)m->htotal; 7293 target_vtotal = div_u64(num, den); 7294 target_vtotal_diff = target_vtotal - m->vtotal; 7295 7296 /* Check for illegal modes */ 7297 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 7298 m->vsync_end + target_vtotal_diff < m->vsync_start || 7299 m->vtotal + target_vtotal_diff < m->vsync_end) 7300 continue; 7301 7302 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 7303 if (!new_mode) 7304 goto out; 7305 7306 new_mode->vtotal += (u16)target_vtotal_diff; 7307 new_mode->vsync_start += (u16)target_vtotal_diff; 7308 new_mode->vsync_end += (u16)target_vtotal_diff; 7309 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7310 new_mode->type |= DRM_MODE_TYPE_DRIVER; 7311 7312 if (!is_duplicate_mode(aconnector, new_mode)) { 7313 drm_mode_probed_add(&aconnector->base, new_mode); 7314 new_modes_count += 1; 7315 } else 7316 drm_mode_destroy(aconnector->base.dev, new_mode); 7317 } 7318 out: 7319 return new_modes_count; 7320 } 7321 7322 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 7323 struct edid *edid) 7324 { 7325 struct amdgpu_dm_connector *amdgpu_dm_connector = 7326 to_amdgpu_dm_connector(connector); 7327 7328 if (!edid) 7329 return; 7330 7331 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 7332 amdgpu_dm_connector->num_modes += 7333 add_fs_modes(amdgpu_dm_connector); 7334 } 7335 7336 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 7337 { 7338 struct amdgpu_dm_connector *amdgpu_dm_connector = 7339 to_amdgpu_dm_connector(connector); 7340 struct drm_encoder *encoder; 7341 struct edid *edid = amdgpu_dm_connector->edid; 7342 struct dc_link_settings *verified_link_cap = 7343 &amdgpu_dm_connector->dc_link->verified_link_cap; 7344 const struct dc *dc = amdgpu_dm_connector->dc_link->dc; 7345 7346 encoder = amdgpu_dm_connector_to_encoder(connector); 7347 7348 if (!drm_edid_is_valid(edid)) { 7349 amdgpu_dm_connector->num_modes = 7350 drm_add_modes_noedid(connector, 640, 480); 7351 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) 7352 amdgpu_dm_connector->num_modes += 7353 drm_add_modes_noedid(connector, 1920, 1080); 7354 } else { 7355 amdgpu_dm_connector_ddc_get_modes(connector, edid); 7356 amdgpu_dm_connector_add_common_modes(encoder, connector); 7357 amdgpu_dm_connector_add_freesync_modes(connector, edid); 7358 } 7359 amdgpu_dm_fbc_init(connector); 7360 7361 return amdgpu_dm_connector->num_modes; 7362 } 7363 7364 static const u32 supported_colorspaces = 7365 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | 7366 BIT(DRM_MODE_COLORIMETRY_OPRGB) | 7367 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | 7368 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); 7369 7370 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 7371 struct amdgpu_dm_connector *aconnector, 7372 int connector_type, 7373 struct dc_link *link, 7374 int link_index) 7375 { 7376 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 7377 7378 /* 7379 * Some of the properties below require access to state, like bpc. 7380 * Allocate some default initial connector state with our reset helper. 7381 */ 7382 if (aconnector->base.funcs->reset) 7383 aconnector->base.funcs->reset(&aconnector->base); 7384 7385 aconnector->connector_id = link_index; 7386 aconnector->bl_idx = -1; 7387 aconnector->dc_link = link; 7388 aconnector->base.interlace_allowed = false; 7389 aconnector->base.doublescan_allowed = false; 7390 aconnector->base.stereo_allowed = false; 7391 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 7392 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 7393 aconnector->audio_inst = -1; 7394 aconnector->pack_sdp_v1_3 = false; 7395 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; 7396 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); 7397 mutex_init(&aconnector->hpd_lock); 7398 mutex_init(&aconnector->handle_mst_msg_ready); 7399 7400 /* 7401 * configure support HPD hot plug connector_>polled default value is 0 7402 * which means HPD hot plug not supported 7403 */ 7404 switch (connector_type) { 7405 case DRM_MODE_CONNECTOR_HDMIA: 7406 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7407 aconnector->base.ycbcr_420_allowed = 7408 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 7409 break; 7410 case DRM_MODE_CONNECTOR_DisplayPort: 7411 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7412 link->link_enc = link_enc_cfg_get_link_enc(link); 7413 ASSERT(link->link_enc); 7414 if (link->link_enc) 7415 aconnector->base.ycbcr_420_allowed = 7416 link->link_enc->features.dp_ycbcr420_supported ? true : false; 7417 break; 7418 case DRM_MODE_CONNECTOR_DVID: 7419 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7420 break; 7421 default: 7422 break; 7423 } 7424 7425 drm_object_attach_property(&aconnector->base.base, 7426 dm->ddev->mode_config.scaling_mode_property, 7427 DRM_MODE_SCALE_NONE); 7428 7429 drm_object_attach_property(&aconnector->base.base, 7430 adev->mode_info.underscan_property, 7431 UNDERSCAN_OFF); 7432 drm_object_attach_property(&aconnector->base.base, 7433 adev->mode_info.underscan_hborder_property, 7434 0); 7435 drm_object_attach_property(&aconnector->base.base, 7436 adev->mode_info.underscan_vborder_property, 7437 0); 7438 7439 if (!aconnector->mst_root) 7440 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 7441 7442 aconnector->base.state->max_bpc = 16; 7443 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 7444 7445 if (connector_type == DRM_MODE_CONNECTOR_eDP && 7446 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { 7447 drm_object_attach_property(&aconnector->base.base, 7448 adev->mode_info.abm_level_property, 0); 7449 } 7450 7451 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 7452 /* Content Type is currently only implemented for HDMI. */ 7453 drm_connector_attach_content_type_property(&aconnector->base); 7454 } 7455 7456 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 7457 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) 7458 drm_connector_attach_colorspace_property(&aconnector->base); 7459 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || 7460 connector_type == DRM_MODE_CONNECTOR_eDP) { 7461 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) 7462 drm_connector_attach_colorspace_property(&aconnector->base); 7463 } 7464 7465 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 7466 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 7467 connector_type == DRM_MODE_CONNECTOR_eDP) { 7468 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 7469 7470 if (!aconnector->mst_root) 7471 drm_connector_attach_vrr_capable_property(&aconnector->base); 7472 7473 if (adev->dm.hdcp_workqueue) 7474 drm_connector_attach_content_protection_property(&aconnector->base, true); 7475 } 7476 } 7477 7478 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 7479 struct i2c_msg *msgs, int num) 7480 { 7481 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 7482 struct ddc_service *ddc_service = i2c->ddc_service; 7483 struct i2c_command cmd; 7484 int i; 7485 int result = -EIO; 7486 7487 if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) 7488 return result; 7489 7490 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 7491 7492 if (!cmd.payloads) 7493 return result; 7494 7495 cmd.number_of_payloads = num; 7496 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 7497 cmd.speed = 100; 7498 7499 for (i = 0; i < num; i++) { 7500 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 7501 cmd.payloads[i].address = msgs[i].addr; 7502 cmd.payloads[i].length = msgs[i].len; 7503 cmd.payloads[i].data = msgs[i].buf; 7504 } 7505 7506 if (dc_submit_i2c( 7507 ddc_service->ctx->dc, 7508 ddc_service->link->link_index, 7509 &cmd)) 7510 result = num; 7511 7512 kfree(cmd.payloads); 7513 return result; 7514 } 7515 7516 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 7517 { 7518 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 7519 } 7520 7521 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 7522 .master_xfer = amdgpu_dm_i2c_xfer, 7523 .functionality = amdgpu_dm_i2c_func, 7524 }; 7525 7526 static struct amdgpu_i2c_adapter * 7527 create_i2c(struct ddc_service *ddc_service, 7528 int link_index, 7529 int *res) 7530 { 7531 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 7532 struct amdgpu_i2c_adapter *i2c; 7533 7534 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 7535 if (!i2c) 7536 return NULL; 7537 i2c->base.owner = THIS_MODULE; 7538 i2c->base.class = I2C_CLASS_DDC; 7539 i2c->base.dev.parent = &adev->pdev->dev; 7540 i2c->base.algo = &amdgpu_dm_i2c_algo; 7541 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 7542 i2c_set_adapdata(&i2c->base, i2c); 7543 i2c->ddc_service = ddc_service; 7544 7545 return i2c; 7546 } 7547 7548 7549 /* 7550 * Note: this function assumes that dc_link_detect() was called for the 7551 * dc_link which will be represented by this aconnector. 7552 */ 7553 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 7554 struct amdgpu_dm_connector *aconnector, 7555 u32 link_index, 7556 struct amdgpu_encoder *aencoder) 7557 { 7558 int res = 0; 7559 int connector_type; 7560 struct dc *dc = dm->dc; 7561 struct dc_link *link = dc_get_link_at_index(dc, link_index); 7562 struct amdgpu_i2c_adapter *i2c; 7563 7564 link->priv = aconnector; 7565 7566 7567 i2c = create_i2c(link->ddc, link->link_index, &res); 7568 if (!i2c) { 7569 DRM_ERROR("Failed to create i2c adapter data\n"); 7570 return -ENOMEM; 7571 } 7572 7573 aconnector->i2c = i2c; 7574 res = i2c_add_adapter(&i2c->base); 7575 7576 if (res) { 7577 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 7578 goto out_free; 7579 } 7580 7581 connector_type = to_drm_connector_type(link->connector_signal); 7582 7583 res = drm_connector_init_with_ddc( 7584 dm->ddev, 7585 &aconnector->base, 7586 &amdgpu_dm_connector_funcs, 7587 connector_type, 7588 &i2c->base); 7589 7590 if (res) { 7591 DRM_ERROR("connector_init failed\n"); 7592 aconnector->connector_id = -1; 7593 goto out_free; 7594 } 7595 7596 drm_connector_helper_add( 7597 &aconnector->base, 7598 &amdgpu_dm_connector_helper_funcs); 7599 7600 amdgpu_dm_connector_init_helper( 7601 dm, 7602 aconnector, 7603 connector_type, 7604 link, 7605 link_index); 7606 7607 drm_connector_attach_encoder( 7608 &aconnector->base, &aencoder->base); 7609 7610 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 7611 || connector_type == DRM_MODE_CONNECTOR_eDP) 7612 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 7613 7614 out_free: 7615 if (res) { 7616 kfree(i2c); 7617 aconnector->i2c = NULL; 7618 } 7619 return res; 7620 } 7621 7622 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 7623 { 7624 switch (adev->mode_info.num_crtc) { 7625 case 1: 7626 return 0x1; 7627 case 2: 7628 return 0x3; 7629 case 3: 7630 return 0x7; 7631 case 4: 7632 return 0xf; 7633 case 5: 7634 return 0x1f; 7635 case 6: 7636 default: 7637 return 0x3f; 7638 } 7639 } 7640 7641 static int amdgpu_dm_encoder_init(struct drm_device *dev, 7642 struct amdgpu_encoder *aencoder, 7643 uint32_t link_index) 7644 { 7645 struct amdgpu_device *adev = drm_to_adev(dev); 7646 7647 int res = drm_encoder_init(dev, 7648 &aencoder->base, 7649 &amdgpu_dm_encoder_funcs, 7650 DRM_MODE_ENCODER_TMDS, 7651 NULL); 7652 7653 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 7654 7655 if (!res) 7656 aencoder->encoder_id = link_index; 7657 else 7658 aencoder->encoder_id = -1; 7659 7660 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 7661 7662 return res; 7663 } 7664 7665 static void manage_dm_interrupts(struct amdgpu_device *adev, 7666 struct amdgpu_crtc *acrtc, 7667 bool enable) 7668 { 7669 /* 7670 * We have no guarantee that the frontend index maps to the same 7671 * backend index - some even map to more than one. 7672 * 7673 * TODO: Use a different interrupt or check DC itself for the mapping. 7674 */ 7675 int irq_type = 7676 amdgpu_display_crtc_idx_to_irq_type( 7677 adev, 7678 acrtc->crtc_id); 7679 7680 if (enable) { 7681 drm_crtc_vblank_on(&acrtc->base); 7682 amdgpu_irq_get( 7683 adev, 7684 &adev->pageflip_irq, 7685 irq_type); 7686 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 7687 amdgpu_irq_get( 7688 adev, 7689 &adev->vline0_irq, 7690 irq_type); 7691 #endif 7692 } else { 7693 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 7694 amdgpu_irq_put( 7695 adev, 7696 &adev->vline0_irq, 7697 irq_type); 7698 #endif 7699 amdgpu_irq_put( 7700 adev, 7701 &adev->pageflip_irq, 7702 irq_type); 7703 drm_crtc_vblank_off(&acrtc->base); 7704 } 7705 } 7706 7707 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 7708 struct amdgpu_crtc *acrtc) 7709 { 7710 int irq_type = 7711 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 7712 7713 /** 7714 * This reads the current state for the IRQ and force reapplies 7715 * the setting to hardware. 7716 */ 7717 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 7718 } 7719 7720 static bool 7721 is_scaling_state_different(const struct dm_connector_state *dm_state, 7722 const struct dm_connector_state *old_dm_state) 7723 { 7724 if (dm_state->scaling != old_dm_state->scaling) 7725 return true; 7726 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 7727 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 7728 return true; 7729 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 7730 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 7731 return true; 7732 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 7733 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 7734 return true; 7735 return false; 7736 } 7737 7738 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 7739 struct drm_crtc_state *old_crtc_state, 7740 struct drm_connector_state *new_conn_state, 7741 struct drm_connector_state *old_conn_state, 7742 const struct drm_connector *connector, 7743 struct hdcp_workqueue *hdcp_w) 7744 { 7745 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7746 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 7747 7748 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 7749 connector->index, connector->status, connector->dpms); 7750 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 7751 old_conn_state->content_protection, new_conn_state->content_protection); 7752 7753 if (old_crtc_state) 7754 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 7755 old_crtc_state->enable, 7756 old_crtc_state->active, 7757 old_crtc_state->mode_changed, 7758 old_crtc_state->active_changed, 7759 old_crtc_state->connectors_changed); 7760 7761 if (new_crtc_state) 7762 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 7763 new_crtc_state->enable, 7764 new_crtc_state->active, 7765 new_crtc_state->mode_changed, 7766 new_crtc_state->active_changed, 7767 new_crtc_state->connectors_changed); 7768 7769 /* hdcp content type change */ 7770 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 7771 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 7772 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7773 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 7774 return true; 7775 } 7776 7777 /* CP is being re enabled, ignore this */ 7778 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 7779 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7780 if (new_crtc_state && new_crtc_state->mode_changed) { 7781 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7782 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 7783 return true; 7784 } 7785 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 7786 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 7787 return false; 7788 } 7789 7790 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 7791 * 7792 * Handles: UNDESIRED -> ENABLED 7793 */ 7794 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 7795 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 7796 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7797 7798 /* Stream removed and re-enabled 7799 * 7800 * Can sometimes overlap with the HPD case, 7801 * thus set update_hdcp to false to avoid 7802 * setting HDCP multiple times. 7803 * 7804 * Handles: DESIRED -> DESIRED (Special case) 7805 */ 7806 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 7807 new_conn_state->crtc && new_conn_state->crtc->enabled && 7808 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7809 dm_con_state->update_hdcp = false; 7810 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 7811 __func__); 7812 return true; 7813 } 7814 7815 /* Hot-plug, headless s3, dpms 7816 * 7817 * Only start HDCP if the display is connected/enabled. 7818 * update_hdcp flag will be set to false until the next 7819 * HPD comes in. 7820 * 7821 * Handles: DESIRED -> DESIRED (Special case) 7822 */ 7823 if (dm_con_state->update_hdcp && 7824 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 7825 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 7826 dm_con_state->update_hdcp = false; 7827 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 7828 __func__); 7829 return true; 7830 } 7831 7832 if (old_conn_state->content_protection == new_conn_state->content_protection) { 7833 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7834 if (new_crtc_state && new_crtc_state->mode_changed) { 7835 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 7836 __func__); 7837 return true; 7838 } 7839 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 7840 __func__); 7841 return false; 7842 } 7843 7844 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 7845 return false; 7846 } 7847 7848 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 7849 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 7850 __func__); 7851 return true; 7852 } 7853 7854 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 7855 return false; 7856 } 7857 7858 static void remove_stream(struct amdgpu_device *adev, 7859 struct amdgpu_crtc *acrtc, 7860 struct dc_stream_state *stream) 7861 { 7862 /* this is the update mode case */ 7863 7864 acrtc->otg_inst = -1; 7865 acrtc->enabled = false; 7866 } 7867 7868 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 7869 { 7870 7871 assert_spin_locked(&acrtc->base.dev->event_lock); 7872 WARN_ON(acrtc->event); 7873 7874 acrtc->event = acrtc->base.state->event; 7875 7876 /* Set the flip status */ 7877 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 7878 7879 /* Mark this event as consumed */ 7880 acrtc->base.state->event = NULL; 7881 7882 drm_dbg_state(acrtc->base.dev, 7883 "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 7884 acrtc->crtc_id); 7885 } 7886 7887 static void update_freesync_state_on_stream( 7888 struct amdgpu_display_manager *dm, 7889 struct dm_crtc_state *new_crtc_state, 7890 struct dc_stream_state *new_stream, 7891 struct dc_plane_state *surface, 7892 u32 flip_timestamp_in_us) 7893 { 7894 struct mod_vrr_params vrr_params; 7895 struct dc_info_packet vrr_infopacket = {0}; 7896 struct amdgpu_device *adev = dm->adev; 7897 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 7898 unsigned long flags; 7899 bool pack_sdp_v1_3 = false; 7900 struct amdgpu_dm_connector *aconn; 7901 enum vrr_packet_type packet_type = PACKET_TYPE_VRR; 7902 7903 if (!new_stream) 7904 return; 7905 7906 /* 7907 * TODO: Determine why min/max totals and vrefresh can be 0 here. 7908 * For now it's sufficient to just guard against these conditions. 7909 */ 7910 7911 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 7912 return; 7913 7914 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 7915 vrr_params = acrtc->dm_irq_params.vrr_params; 7916 7917 if (surface) { 7918 mod_freesync_handle_preflip( 7919 dm->freesync_module, 7920 surface, 7921 new_stream, 7922 flip_timestamp_in_us, 7923 &vrr_params); 7924 7925 if (adev->family < AMDGPU_FAMILY_AI && 7926 amdgpu_dm_crtc_vrr_active(new_crtc_state)) { 7927 mod_freesync_handle_v_update(dm->freesync_module, 7928 new_stream, &vrr_params); 7929 7930 /* Need to call this before the frame ends. */ 7931 dc_stream_adjust_vmin_vmax(dm->dc, 7932 new_crtc_state->stream, 7933 &vrr_params.adjust); 7934 } 7935 } 7936 7937 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; 7938 7939 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { 7940 pack_sdp_v1_3 = aconn->pack_sdp_v1_3; 7941 7942 if (aconn->vsdb_info.amd_vsdb_version == 1) 7943 packet_type = PACKET_TYPE_FS_V1; 7944 else if (aconn->vsdb_info.amd_vsdb_version == 2) 7945 packet_type = PACKET_TYPE_FS_V2; 7946 else if (aconn->vsdb_info.amd_vsdb_version == 3) 7947 packet_type = PACKET_TYPE_FS_V3; 7948 7949 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, 7950 &new_stream->adaptive_sync_infopacket); 7951 } 7952 7953 mod_freesync_build_vrr_infopacket( 7954 dm->freesync_module, 7955 new_stream, 7956 &vrr_params, 7957 packet_type, 7958 TRANSFER_FUNC_UNKNOWN, 7959 &vrr_infopacket, 7960 pack_sdp_v1_3); 7961 7962 new_crtc_state->freesync_vrr_info_changed |= 7963 (memcmp(&new_crtc_state->vrr_infopacket, 7964 &vrr_infopacket, 7965 sizeof(vrr_infopacket)) != 0); 7966 7967 acrtc->dm_irq_params.vrr_params = vrr_params; 7968 new_crtc_state->vrr_infopacket = vrr_infopacket; 7969 7970 new_stream->vrr_infopacket = vrr_infopacket; 7971 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); 7972 7973 if (new_crtc_state->freesync_vrr_info_changed) 7974 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 7975 new_crtc_state->base.crtc->base.id, 7976 (int)new_crtc_state->base.vrr_enabled, 7977 (int)vrr_params.state); 7978 7979 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 7980 } 7981 7982 static void update_stream_irq_parameters( 7983 struct amdgpu_display_manager *dm, 7984 struct dm_crtc_state *new_crtc_state) 7985 { 7986 struct dc_stream_state *new_stream = new_crtc_state->stream; 7987 struct mod_vrr_params vrr_params; 7988 struct mod_freesync_config config = new_crtc_state->freesync_config; 7989 struct amdgpu_device *adev = dm->adev; 7990 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 7991 unsigned long flags; 7992 7993 if (!new_stream) 7994 return; 7995 7996 /* 7997 * TODO: Determine why min/max totals and vrefresh can be 0 here. 7998 * For now it's sufficient to just guard against these conditions. 7999 */ 8000 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8001 return; 8002 8003 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8004 vrr_params = acrtc->dm_irq_params.vrr_params; 8005 8006 if (new_crtc_state->vrr_supported && 8007 config.min_refresh_in_uhz && 8008 config.max_refresh_in_uhz) { 8009 /* 8010 * if freesync compatible mode was set, config.state will be set 8011 * in atomic check 8012 */ 8013 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 8014 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 8015 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 8016 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 8017 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 8018 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 8019 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 8020 } else { 8021 config.state = new_crtc_state->base.vrr_enabled ? 8022 VRR_STATE_ACTIVE_VARIABLE : 8023 VRR_STATE_INACTIVE; 8024 } 8025 } else { 8026 config.state = VRR_STATE_UNSUPPORTED; 8027 } 8028 8029 mod_freesync_build_vrr_params(dm->freesync_module, 8030 new_stream, 8031 &config, &vrr_params); 8032 8033 new_crtc_state->freesync_config = config; 8034 /* Copy state for access from DM IRQ handler */ 8035 acrtc->dm_irq_params.freesync_config = config; 8036 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 8037 acrtc->dm_irq_params.vrr_params = vrr_params; 8038 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8039 } 8040 8041 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 8042 struct dm_crtc_state *new_state) 8043 { 8044 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); 8045 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); 8046 8047 if (!old_vrr_active && new_vrr_active) { 8048 /* Transition VRR inactive -> active: 8049 * While VRR is active, we must not disable vblank irq, as a 8050 * reenable after disable would compute bogus vblank/pflip 8051 * timestamps if it likely happened inside display front-porch. 8052 * 8053 * We also need vupdate irq for the actual core vblank handling 8054 * at end of vblank. 8055 */ 8056 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); 8057 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); 8058 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 8059 __func__, new_state->base.crtc->base.id); 8060 } else if (old_vrr_active && !new_vrr_active) { 8061 /* Transition VRR active -> inactive: 8062 * Allow vblank irq disable again for fixed refresh rate. 8063 */ 8064 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); 8065 drm_crtc_vblank_put(new_state->base.crtc); 8066 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 8067 __func__, new_state->base.crtc->base.id); 8068 } 8069 } 8070 8071 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 8072 { 8073 struct drm_plane *plane; 8074 struct drm_plane_state *old_plane_state; 8075 int i; 8076 8077 /* 8078 * TODO: Make this per-stream so we don't issue redundant updates for 8079 * commits with multiple streams. 8080 */ 8081 for_each_old_plane_in_state(state, plane, old_plane_state, i) 8082 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8083 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); 8084 } 8085 8086 static inline uint32_t get_mem_type(struct drm_framebuffer *fb) 8087 { 8088 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); 8089 8090 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; 8091 } 8092 8093 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8094 struct drm_device *dev, 8095 struct amdgpu_display_manager *dm, 8096 struct drm_crtc *pcrtc, 8097 bool wait_for_vblank) 8098 { 8099 u32 i; 8100 u64 timestamp_ns = ktime_get_ns(); 8101 struct drm_plane *plane; 8102 struct drm_plane_state *old_plane_state, *new_plane_state; 8103 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 8104 struct drm_crtc_state *new_pcrtc_state = 8105 drm_atomic_get_new_crtc_state(state, pcrtc); 8106 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 8107 struct dm_crtc_state *dm_old_crtc_state = 8108 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 8109 int planes_count = 0, vpos, hpos; 8110 unsigned long flags; 8111 u32 target_vblank, last_flip_vblank; 8112 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 8113 bool cursor_update = false; 8114 bool pflip_present = false; 8115 bool dirty_rects_changed = false; 8116 struct { 8117 struct dc_surface_update surface_updates[MAX_SURFACES]; 8118 struct dc_plane_info plane_infos[MAX_SURFACES]; 8119 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 8120 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 8121 struct dc_stream_update stream_update; 8122 } *bundle; 8123 8124 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 8125 8126 if (!bundle) { 8127 drm_err(dev, "Failed to allocate update bundle\n"); 8128 goto cleanup; 8129 } 8130 8131 /* 8132 * Disable the cursor first if we're disabling all the planes. 8133 * It'll remain on the screen after the planes are re-enabled 8134 * if we don't. 8135 */ 8136 if (acrtc_state->active_planes == 0) 8137 amdgpu_dm_commit_cursors(state); 8138 8139 /* update planes when needed */ 8140 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 8141 struct drm_crtc *crtc = new_plane_state->crtc; 8142 struct drm_crtc_state *new_crtc_state; 8143 struct drm_framebuffer *fb = new_plane_state->fb; 8144 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 8145 bool plane_needs_flip; 8146 struct dc_plane_state *dc_plane; 8147 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 8148 8149 /* Cursor plane is handled after stream updates */ 8150 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 8151 if ((fb && crtc == pcrtc) || 8152 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) 8153 cursor_update = true; 8154 8155 continue; 8156 } 8157 8158 if (!fb || !crtc || pcrtc != crtc) 8159 continue; 8160 8161 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 8162 if (!new_crtc_state->active) 8163 continue; 8164 8165 dc_plane = dm_new_plane_state->dc_state; 8166 if (!dc_plane) 8167 continue; 8168 8169 bundle->surface_updates[planes_count].surface = dc_plane; 8170 if (new_pcrtc_state->color_mgmt_changed) { 8171 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; 8172 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; 8173 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 8174 } 8175 8176 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, 8177 &bundle->scaling_infos[planes_count]); 8178 8179 bundle->surface_updates[planes_count].scaling_info = 8180 &bundle->scaling_infos[planes_count]; 8181 8182 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 8183 8184 pflip_present = pflip_present || plane_needs_flip; 8185 8186 if (!plane_needs_flip) { 8187 planes_count += 1; 8188 continue; 8189 } 8190 8191 fill_dc_plane_info_and_addr( 8192 dm->adev, new_plane_state, 8193 afb->tiling_flags, 8194 &bundle->plane_infos[planes_count], 8195 &bundle->flip_addrs[planes_count].address, 8196 afb->tmz_surface, false); 8197 8198 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 8199 new_plane_state->plane->index, 8200 bundle->plane_infos[planes_count].dcc.enable); 8201 8202 bundle->surface_updates[planes_count].plane_info = 8203 &bundle->plane_infos[planes_count]; 8204 8205 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || 8206 acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 8207 fill_dc_dirty_rects(plane, old_plane_state, 8208 new_plane_state, new_crtc_state, 8209 &bundle->flip_addrs[planes_count], 8210 &dirty_rects_changed); 8211 8212 /* 8213 * If the dirty regions changed, PSR-SU need to be disabled temporarily 8214 * and enabled it again after dirty regions are stable to avoid video glitch. 8215 * PSR-SU will be enabled in vblank_control_worker() if user pause the video 8216 * during the PSR-SU was disabled. 8217 */ 8218 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 8219 acrtc_attach->dm_irq_params.allow_psr_entry && 8220 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8221 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8222 #endif 8223 dirty_rects_changed) { 8224 mutex_lock(&dm->dc_lock); 8225 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 8226 timestamp_ns; 8227 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 8228 amdgpu_dm_psr_disable(acrtc_state->stream); 8229 mutex_unlock(&dm->dc_lock); 8230 } 8231 } 8232 8233 /* 8234 * Only allow immediate flips for fast updates that don't 8235 * change memory domain, FB pitch, DCC state, rotation or 8236 * mirroring. 8237 * 8238 * dm_crtc_helper_atomic_check() only accepts async flips with 8239 * fast updates. 8240 */ 8241 if (crtc->state->async_flip && 8242 (acrtc_state->update_type != UPDATE_TYPE_FAST || 8243 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 8244 drm_warn_once(state->dev, 8245 "[PLANE:%d:%s] async flip with non-fast update\n", 8246 plane->base.id, plane->name); 8247 8248 bundle->flip_addrs[planes_count].flip_immediate = 8249 crtc->state->async_flip && 8250 acrtc_state->update_type == UPDATE_TYPE_FAST && 8251 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 8252 8253 timestamp_ns = ktime_get_ns(); 8254 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 8255 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 8256 bundle->surface_updates[planes_count].surface = dc_plane; 8257 8258 if (!bundle->surface_updates[planes_count].surface) { 8259 DRM_ERROR("No surface for CRTC: id=%d\n", 8260 acrtc_attach->crtc_id); 8261 continue; 8262 } 8263 8264 if (plane == pcrtc->primary) 8265 update_freesync_state_on_stream( 8266 dm, 8267 acrtc_state, 8268 acrtc_state->stream, 8269 dc_plane, 8270 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 8271 8272 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 8273 __func__, 8274 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 8275 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 8276 8277 planes_count += 1; 8278 8279 } 8280 8281 if (pflip_present) { 8282 if (!vrr_active) { 8283 /* Use old throttling in non-vrr fixed refresh rate mode 8284 * to keep flip scheduling based on target vblank counts 8285 * working in a backwards compatible way, e.g., for 8286 * clients using the GLX_OML_sync_control extension or 8287 * DRI3/Present extension with defined target_msc. 8288 */ 8289 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 8290 } else { 8291 /* For variable refresh rate mode only: 8292 * Get vblank of last completed flip to avoid > 1 vrr 8293 * flips per video frame by use of throttling, but allow 8294 * flip programming anywhere in the possibly large 8295 * variable vrr vblank interval for fine-grained flip 8296 * timing control and more opportunity to avoid stutter 8297 * on late submission of flips. 8298 */ 8299 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8300 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 8301 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8302 } 8303 8304 target_vblank = last_flip_vblank + wait_for_vblank; 8305 8306 /* 8307 * Wait until we're out of the vertical blank period before the one 8308 * targeted by the flip 8309 */ 8310 while ((acrtc_attach->enabled && 8311 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 8312 0, &vpos, &hpos, NULL, 8313 NULL, &pcrtc->hwmode) 8314 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 8315 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 8316 (int)(target_vblank - 8317 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 8318 usleep_range(1000, 1100); 8319 } 8320 8321 /** 8322 * Prepare the flip event for the pageflip interrupt to handle. 8323 * 8324 * This only works in the case where we've already turned on the 8325 * appropriate hardware blocks (eg. HUBP) so in the transition case 8326 * from 0 -> n planes we have to skip a hardware generated event 8327 * and rely on sending it from software. 8328 */ 8329 if (acrtc_attach->base.state->event && 8330 acrtc_state->active_planes > 0) { 8331 drm_crtc_vblank_get(pcrtc); 8332 8333 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8334 8335 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 8336 prepare_flip_isr(acrtc_attach); 8337 8338 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8339 } 8340 8341 if (acrtc_state->stream) { 8342 if (acrtc_state->freesync_vrr_info_changed) 8343 bundle->stream_update.vrr_infopacket = 8344 &acrtc_state->stream->vrr_infopacket; 8345 } 8346 } else if (cursor_update && acrtc_state->active_planes > 0 && 8347 acrtc_attach->base.state->event) { 8348 drm_crtc_vblank_get(pcrtc); 8349 8350 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8351 8352 acrtc_attach->event = acrtc_attach->base.state->event; 8353 acrtc_attach->base.state->event = NULL; 8354 8355 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8356 } 8357 8358 /* Update the planes if changed or disable if we don't have any. */ 8359 if ((planes_count || acrtc_state->active_planes == 0) && 8360 acrtc_state->stream) { 8361 /* 8362 * If PSR or idle optimizations are enabled then flush out 8363 * any pending work before hardware programming. 8364 */ 8365 if (dm->vblank_control_workqueue) 8366 flush_workqueue(dm->vblank_control_workqueue); 8367 8368 bundle->stream_update.stream = acrtc_state->stream; 8369 if (new_pcrtc_state->mode_changed) { 8370 bundle->stream_update.src = acrtc_state->stream->src; 8371 bundle->stream_update.dst = acrtc_state->stream->dst; 8372 } 8373 8374 if (new_pcrtc_state->color_mgmt_changed) { 8375 /* 8376 * TODO: This isn't fully correct since we've actually 8377 * already modified the stream in place. 8378 */ 8379 bundle->stream_update.gamut_remap = 8380 &acrtc_state->stream->gamut_remap_matrix; 8381 bundle->stream_update.output_csc_transform = 8382 &acrtc_state->stream->csc_color_matrix; 8383 bundle->stream_update.out_transfer_func = 8384 acrtc_state->stream->out_transfer_func; 8385 } 8386 8387 acrtc_state->stream->abm_level = acrtc_state->abm_level; 8388 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 8389 bundle->stream_update.abm_level = &acrtc_state->abm_level; 8390 8391 mutex_lock(&dm->dc_lock); 8392 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 8393 acrtc_state->stream->link->psr_settings.psr_allow_active) 8394 amdgpu_dm_psr_disable(acrtc_state->stream); 8395 mutex_unlock(&dm->dc_lock); 8396 8397 /* 8398 * If FreeSync state on the stream has changed then we need to 8399 * re-adjust the min/max bounds now that DC doesn't handle this 8400 * as part of commit. 8401 */ 8402 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 8403 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8404 dc_stream_adjust_vmin_vmax( 8405 dm->dc, acrtc_state->stream, 8406 &acrtc_attach->dm_irq_params.vrr_params.adjust); 8407 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8408 } 8409 mutex_lock(&dm->dc_lock); 8410 update_planes_and_stream_adapter(dm->dc, 8411 acrtc_state->update_type, 8412 planes_count, 8413 acrtc_state->stream, 8414 &bundle->stream_update, 8415 bundle->surface_updates); 8416 8417 /** 8418 * Enable or disable the interrupts on the backend. 8419 * 8420 * Most pipes are put into power gating when unused. 8421 * 8422 * When power gating is enabled on a pipe we lose the 8423 * interrupt enablement state when power gating is disabled. 8424 * 8425 * So we need to update the IRQ control state in hardware 8426 * whenever the pipe turns on (since it could be previously 8427 * power gated) or off (since some pipes can't be power gated 8428 * on some ASICs). 8429 */ 8430 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 8431 dm_update_pflip_irq_state(drm_to_adev(dev), 8432 acrtc_attach); 8433 8434 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 8435 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && 8436 !acrtc_state->stream->link->psr_settings.psr_feature_enabled) 8437 amdgpu_dm_link_setup_psr(acrtc_state->stream); 8438 8439 /* Decrement skip count when PSR is enabled and we're doing fast updates. */ 8440 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 8441 acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 8442 struct amdgpu_dm_connector *aconn = 8443 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8444 8445 if (aconn->psr_skip_count > 0) 8446 aconn->psr_skip_count--; 8447 8448 /* Allow PSR when skip count is 0. */ 8449 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; 8450 8451 /* 8452 * If sink supports PSR SU, there is no need to rely on 8453 * a vblank event disable request to enable PSR. PSR SU 8454 * can be enabled immediately once OS demonstrates an 8455 * adequate number of fast atomic commits to notify KMD 8456 * of update events. See `vblank_control_worker()`. 8457 */ 8458 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 8459 acrtc_attach->dm_irq_params.allow_psr_entry && 8460 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8461 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8462 #endif 8463 !acrtc_state->stream->link->psr_settings.psr_allow_active && 8464 (timestamp_ns - 8465 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > 8466 500000000) 8467 amdgpu_dm_psr_enable(acrtc_state->stream); 8468 } else { 8469 acrtc_attach->dm_irq_params.allow_psr_entry = false; 8470 } 8471 8472 mutex_unlock(&dm->dc_lock); 8473 } 8474 8475 /* 8476 * Update cursor state *after* programming all the planes. 8477 * This avoids redundant programming in the case where we're going 8478 * to be disabling a single plane - those pipes are being disabled. 8479 */ 8480 if (acrtc_state->active_planes) 8481 amdgpu_dm_commit_cursors(state); 8482 8483 cleanup: 8484 kfree(bundle); 8485 } 8486 8487 static void amdgpu_dm_commit_audio(struct drm_device *dev, 8488 struct drm_atomic_state *state) 8489 { 8490 struct amdgpu_device *adev = drm_to_adev(dev); 8491 struct amdgpu_dm_connector *aconnector; 8492 struct drm_connector *connector; 8493 struct drm_connector_state *old_con_state, *new_con_state; 8494 struct drm_crtc_state *new_crtc_state; 8495 struct dm_crtc_state *new_dm_crtc_state; 8496 const struct dc_stream_status *status; 8497 int i, inst; 8498 8499 /* Notify device removals. */ 8500 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8501 if (old_con_state->crtc != new_con_state->crtc) { 8502 /* CRTC changes require notification. */ 8503 goto notify; 8504 } 8505 8506 if (!new_con_state->crtc) 8507 continue; 8508 8509 new_crtc_state = drm_atomic_get_new_crtc_state( 8510 state, new_con_state->crtc); 8511 8512 if (!new_crtc_state) 8513 continue; 8514 8515 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8516 continue; 8517 8518 notify: 8519 aconnector = to_amdgpu_dm_connector(connector); 8520 8521 mutex_lock(&adev->dm.audio_lock); 8522 inst = aconnector->audio_inst; 8523 aconnector->audio_inst = -1; 8524 mutex_unlock(&adev->dm.audio_lock); 8525 8526 amdgpu_dm_audio_eld_notify(adev, inst); 8527 } 8528 8529 /* Notify audio device additions. */ 8530 for_each_new_connector_in_state(state, connector, new_con_state, i) { 8531 if (!new_con_state->crtc) 8532 continue; 8533 8534 new_crtc_state = drm_atomic_get_new_crtc_state( 8535 state, new_con_state->crtc); 8536 8537 if (!new_crtc_state) 8538 continue; 8539 8540 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8541 continue; 8542 8543 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 8544 if (!new_dm_crtc_state->stream) 8545 continue; 8546 8547 status = dc_stream_get_status(new_dm_crtc_state->stream); 8548 if (!status) 8549 continue; 8550 8551 aconnector = to_amdgpu_dm_connector(connector); 8552 8553 mutex_lock(&adev->dm.audio_lock); 8554 inst = status->audio_inst; 8555 aconnector->audio_inst = inst; 8556 mutex_unlock(&adev->dm.audio_lock); 8557 8558 amdgpu_dm_audio_eld_notify(adev, inst); 8559 } 8560 } 8561 8562 /* 8563 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 8564 * @crtc_state: the DRM CRTC state 8565 * @stream_state: the DC stream state. 8566 * 8567 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 8568 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 8569 */ 8570 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 8571 struct dc_stream_state *stream_state) 8572 { 8573 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 8574 } 8575 8576 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 8577 struct dc_state *dc_state) 8578 { 8579 struct drm_device *dev = state->dev; 8580 struct amdgpu_device *adev = drm_to_adev(dev); 8581 struct amdgpu_display_manager *dm = &adev->dm; 8582 struct drm_crtc *crtc; 8583 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8584 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 8585 bool mode_set_reset_required = false; 8586 u32 i; 8587 8588 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 8589 new_crtc_state, i) { 8590 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8591 8592 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8593 8594 if (old_crtc_state->active && 8595 (!new_crtc_state->active || 8596 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 8597 manage_dm_interrupts(adev, acrtc, false); 8598 dc_stream_release(dm_old_crtc_state->stream); 8599 } 8600 } 8601 8602 drm_atomic_helper_calc_timestamping_constants(state); 8603 8604 /* update changed items */ 8605 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8606 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8607 8608 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8609 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8610 8611 drm_dbg_state(state->dev, 8612 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 8613 acrtc->crtc_id, 8614 new_crtc_state->enable, 8615 new_crtc_state->active, 8616 new_crtc_state->planes_changed, 8617 new_crtc_state->mode_changed, 8618 new_crtc_state->active_changed, 8619 new_crtc_state->connectors_changed); 8620 8621 /* Disable cursor if disabling crtc */ 8622 if (old_crtc_state->active && !new_crtc_state->active) { 8623 struct dc_cursor_position position; 8624 8625 memset(&position, 0, sizeof(position)); 8626 mutex_lock(&dm->dc_lock); 8627 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); 8628 mutex_unlock(&dm->dc_lock); 8629 } 8630 8631 /* Copy all transient state flags into dc state */ 8632 if (dm_new_crtc_state->stream) { 8633 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 8634 dm_new_crtc_state->stream); 8635 } 8636 8637 /* handles headless hotplug case, updating new_state and 8638 * aconnector as needed 8639 */ 8640 8641 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 8642 8643 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 8644 8645 if (!dm_new_crtc_state->stream) { 8646 /* 8647 * this could happen because of issues with 8648 * userspace notifications delivery. 8649 * In this case userspace tries to set mode on 8650 * display which is disconnected in fact. 8651 * dc_sink is NULL in this case on aconnector. 8652 * We expect reset mode will come soon. 8653 * 8654 * This can also happen when unplug is done 8655 * during resume sequence ended 8656 * 8657 * In this case, we want to pretend we still 8658 * have a sink to keep the pipe running so that 8659 * hw state is consistent with the sw state 8660 */ 8661 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 8662 __func__, acrtc->base.base.id); 8663 continue; 8664 } 8665 8666 if (dm_old_crtc_state->stream) 8667 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 8668 8669 pm_runtime_get_noresume(dev->dev); 8670 8671 acrtc->enabled = true; 8672 acrtc->hw_mode = new_crtc_state->mode; 8673 crtc->hwmode = new_crtc_state->mode; 8674 mode_set_reset_required = true; 8675 } else if (modereset_required(new_crtc_state)) { 8676 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 8677 /* i.e. reset mode */ 8678 if (dm_old_crtc_state->stream) 8679 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 8680 8681 mode_set_reset_required = true; 8682 } 8683 } /* for_each_crtc_in_state() */ 8684 8685 /* if there mode set or reset, disable eDP PSR */ 8686 if (mode_set_reset_required) { 8687 if (dm->vblank_control_workqueue) 8688 flush_workqueue(dm->vblank_control_workqueue); 8689 8690 amdgpu_dm_psr_disable_all(dm); 8691 } 8692 8693 dm_enable_per_frame_crtc_master_sync(dc_state); 8694 mutex_lock(&dm->dc_lock); 8695 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); 8696 8697 /* Allow idle optimization when vblank count is 0 for display off */ 8698 if (dm->active_vblank_irq_count == 0) 8699 dc_allow_idle_optimizations(dm->dc, true); 8700 mutex_unlock(&dm->dc_lock); 8701 8702 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 8703 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8704 8705 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8706 8707 if (dm_new_crtc_state->stream != NULL) { 8708 const struct dc_stream_status *status = 8709 dc_stream_get_status(dm_new_crtc_state->stream); 8710 8711 if (!status) 8712 status = dc_stream_get_status_from_state(dc_state, 8713 dm_new_crtc_state->stream); 8714 if (!status) 8715 drm_err(dev, 8716 "got no status for stream %p on acrtc%p\n", 8717 dm_new_crtc_state->stream, acrtc); 8718 else 8719 acrtc->otg_inst = status->primary_otg_inst; 8720 } 8721 } 8722 } 8723 8724 /** 8725 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 8726 * @state: The atomic state to commit 8727 * 8728 * This will tell DC to commit the constructed DC state from atomic_check, 8729 * programming the hardware. Any failures here implies a hardware failure, since 8730 * atomic check should have filtered anything non-kosher. 8731 */ 8732 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 8733 { 8734 struct drm_device *dev = state->dev; 8735 struct amdgpu_device *adev = drm_to_adev(dev); 8736 struct amdgpu_display_manager *dm = &adev->dm; 8737 struct dm_atomic_state *dm_state; 8738 struct dc_state *dc_state = NULL; 8739 u32 i, j; 8740 struct drm_crtc *crtc; 8741 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8742 unsigned long flags; 8743 bool wait_for_vblank = true; 8744 struct drm_connector *connector; 8745 struct drm_connector_state *old_con_state, *new_con_state; 8746 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 8747 int crtc_disable_count = 0; 8748 8749 trace_amdgpu_dm_atomic_commit_tail_begin(state); 8750 8751 if (dm->dc->caps.ips_support) { 8752 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8753 if (new_con_state->crtc && 8754 new_con_state->crtc->state->active && 8755 drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) { 8756 dc_dmub_srv_exit_low_power_state(dm->dc); 8757 break; 8758 } 8759 } 8760 } 8761 8762 drm_atomic_helper_update_legacy_modeset_state(dev, state); 8763 drm_dp_mst_atomic_wait_for_dependencies(state); 8764 8765 dm_state = dm_atomic_get_new_state(state); 8766 if (dm_state && dm_state->context) { 8767 dc_state = dm_state->context; 8768 amdgpu_dm_commit_streams(state, dc_state); 8769 } 8770 8771 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8772 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8773 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8774 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8775 8776 if (!adev->dm.hdcp_workqueue) 8777 continue; 8778 8779 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); 8780 8781 if (!connector) 8782 continue; 8783 8784 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 8785 connector->index, connector->status, connector->dpms); 8786 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 8787 old_con_state->content_protection, new_con_state->content_protection); 8788 8789 if (aconnector->dc_sink) { 8790 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 8791 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 8792 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", 8793 aconnector->dc_sink->edid_caps.display_name); 8794 } 8795 } 8796 8797 new_crtc_state = NULL; 8798 old_crtc_state = NULL; 8799 8800 if (acrtc) { 8801 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8802 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8803 } 8804 8805 if (old_crtc_state) 8806 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8807 old_crtc_state->enable, 8808 old_crtc_state->active, 8809 old_crtc_state->mode_changed, 8810 old_crtc_state->active_changed, 8811 old_crtc_state->connectors_changed); 8812 8813 if (new_crtc_state) 8814 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8815 new_crtc_state->enable, 8816 new_crtc_state->active, 8817 new_crtc_state->mode_changed, 8818 new_crtc_state->active_changed, 8819 new_crtc_state->connectors_changed); 8820 } 8821 8822 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8823 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8824 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8825 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8826 8827 if (!adev->dm.hdcp_workqueue) 8828 continue; 8829 8830 new_crtc_state = NULL; 8831 old_crtc_state = NULL; 8832 8833 if (acrtc) { 8834 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8835 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8836 } 8837 8838 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8839 8840 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 8841 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 8842 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 8843 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8844 dm_new_con_state->update_hdcp = true; 8845 continue; 8846 } 8847 8848 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 8849 old_con_state, connector, adev->dm.hdcp_workqueue)) { 8850 /* when display is unplugged from mst hub, connctor will 8851 * be destroyed within dm_dp_mst_connector_destroy. connector 8852 * hdcp perperties, like type, undesired, desired, enabled, 8853 * will be lost. So, save hdcp properties into hdcp_work within 8854 * amdgpu_dm_atomic_commit_tail. if the same display is 8855 * plugged back with same display index, its hdcp properties 8856 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 8857 */ 8858 8859 bool enable_encryption = false; 8860 8861 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 8862 enable_encryption = true; 8863 8864 if (aconnector->dc_link && aconnector->dc_sink && 8865 aconnector->dc_link->type == dc_connection_mst_branch) { 8866 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 8867 struct hdcp_workqueue *hdcp_w = 8868 &hdcp_work[aconnector->dc_link->link_index]; 8869 8870 hdcp_w->hdcp_content_type[connector->index] = 8871 new_con_state->hdcp_content_type; 8872 hdcp_w->content_protection[connector->index] = 8873 new_con_state->content_protection; 8874 } 8875 8876 if (new_crtc_state && new_crtc_state->mode_changed && 8877 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 8878 enable_encryption = true; 8879 8880 DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 8881 8882 hdcp_update_display( 8883 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 8884 new_con_state->hdcp_content_type, enable_encryption); 8885 } 8886 } 8887 8888 /* Handle connector state changes */ 8889 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8890 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8891 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 8892 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8893 struct dc_surface_update *dummy_updates; 8894 struct dc_stream_update stream_update; 8895 struct dc_info_packet hdr_packet; 8896 struct dc_stream_status *status = NULL; 8897 bool abm_changed, hdr_changed, scaling_changed; 8898 8899 memset(&stream_update, 0, sizeof(stream_update)); 8900 8901 if (acrtc) { 8902 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8903 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8904 } 8905 8906 /* Skip any modesets/resets */ 8907 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 8908 continue; 8909 8910 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8911 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8912 8913 scaling_changed = is_scaling_state_different(dm_new_con_state, 8914 dm_old_con_state); 8915 8916 abm_changed = dm_new_crtc_state->abm_level != 8917 dm_old_crtc_state->abm_level; 8918 8919 hdr_changed = 8920 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 8921 8922 if (!scaling_changed && !abm_changed && !hdr_changed) 8923 continue; 8924 8925 stream_update.stream = dm_new_crtc_state->stream; 8926 if (scaling_changed) { 8927 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 8928 dm_new_con_state, dm_new_crtc_state->stream); 8929 8930 stream_update.src = dm_new_crtc_state->stream->src; 8931 stream_update.dst = dm_new_crtc_state->stream->dst; 8932 } 8933 8934 if (abm_changed) { 8935 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 8936 8937 stream_update.abm_level = &dm_new_crtc_state->abm_level; 8938 } 8939 8940 if (hdr_changed) { 8941 fill_hdr_info_packet(new_con_state, &hdr_packet); 8942 stream_update.hdr_static_metadata = &hdr_packet; 8943 } 8944 8945 status = dc_stream_get_status(dm_new_crtc_state->stream); 8946 8947 if (WARN_ON(!status)) 8948 continue; 8949 8950 WARN_ON(!status->plane_count); 8951 8952 /* 8953 * TODO: DC refuses to perform stream updates without a dc_surface_update. 8954 * Here we create an empty update on each plane. 8955 * To fix this, DC should permit updating only stream properties. 8956 */ 8957 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); 8958 for (j = 0; j < status->plane_count; j++) 8959 dummy_updates[j].surface = status->plane_states[0]; 8960 8961 8962 mutex_lock(&dm->dc_lock); 8963 dc_update_planes_and_stream(dm->dc, 8964 dummy_updates, 8965 status->plane_count, 8966 dm_new_crtc_state->stream, 8967 &stream_update); 8968 mutex_unlock(&dm->dc_lock); 8969 kfree(dummy_updates); 8970 } 8971 8972 /** 8973 * Enable interrupts for CRTCs that are newly enabled or went through 8974 * a modeset. It was intentionally deferred until after the front end 8975 * state was modified to wait until the OTG was on and so the IRQ 8976 * handlers didn't access stale or invalid state. 8977 */ 8978 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8979 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8980 #ifdef CONFIG_DEBUG_FS 8981 enum amdgpu_dm_pipe_crc_source cur_crc_src; 8982 #endif 8983 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 8984 if (old_crtc_state->active && !new_crtc_state->active) 8985 crtc_disable_count++; 8986 8987 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8988 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8989 8990 /* For freesync config update on crtc state and params for irq */ 8991 update_stream_irq_parameters(dm, dm_new_crtc_state); 8992 8993 #ifdef CONFIG_DEBUG_FS 8994 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8995 cur_crc_src = acrtc->dm_irq_params.crc_src; 8996 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8997 #endif 8998 8999 if (new_crtc_state->active && 9000 (!old_crtc_state->active || 9001 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9002 dc_stream_retain(dm_new_crtc_state->stream); 9003 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 9004 manage_dm_interrupts(adev, acrtc, true); 9005 } 9006 /* Handle vrr on->off / off->on transitions */ 9007 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); 9008 9009 #ifdef CONFIG_DEBUG_FS 9010 if (new_crtc_state->active && 9011 (!old_crtc_state->active || 9012 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9013 /** 9014 * Frontend may have changed so reapply the CRC capture 9015 * settings for the stream. 9016 */ 9017 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 9018 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9019 if (amdgpu_dm_crc_window_is_activated(crtc)) { 9020 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9021 acrtc->dm_irq_params.window_param.update_win = true; 9022 9023 /** 9024 * It takes 2 frames for HW to stably generate CRC when 9025 * resuming from suspend, so we set skip_frame_cnt 2. 9026 */ 9027 acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; 9028 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9029 } 9030 #endif 9031 if (amdgpu_dm_crtc_configure_crc_source( 9032 crtc, dm_new_crtc_state, cur_crc_src)) 9033 DRM_DEBUG_DRIVER("Failed to configure crc source"); 9034 } 9035 } 9036 #endif 9037 } 9038 9039 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 9040 if (new_crtc_state->async_flip) 9041 wait_for_vblank = false; 9042 9043 /* update planes when needed per crtc*/ 9044 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 9045 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9046 9047 if (dm_new_crtc_state->stream) 9048 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 9049 } 9050 9051 /* Update audio instances for each connector. */ 9052 amdgpu_dm_commit_audio(dev, state); 9053 9054 /* restore the backlight level */ 9055 for (i = 0; i < dm->num_of_edps; i++) { 9056 if (dm->backlight_dev[i] && 9057 (dm->actual_brightness[i] != dm->brightness[i])) 9058 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 9059 } 9060 9061 /* 9062 * send vblank event on all events not handled in flip and 9063 * mark consumed event for drm_atomic_helper_commit_hw_done 9064 */ 9065 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9066 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9067 9068 if (new_crtc_state->event) 9069 drm_send_event_locked(dev, &new_crtc_state->event->base); 9070 9071 new_crtc_state->event = NULL; 9072 } 9073 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9074 9075 /* Signal HW programming completion */ 9076 drm_atomic_helper_commit_hw_done(state); 9077 9078 if (wait_for_vblank) 9079 drm_atomic_helper_wait_for_flip_done(dev, state); 9080 9081 drm_atomic_helper_cleanup_planes(dev, state); 9082 9083 /* Don't free the memory if we are hitting this as part of suspend. 9084 * This way we don't free any memory during suspend; see 9085 * amdgpu_bo_free_kernel(). The memory will be freed in the first 9086 * non-suspend modeset or when the driver is torn down. 9087 */ 9088 if (!adev->in_suspend) { 9089 /* return the stolen vga memory back to VRAM */ 9090 if (!adev->mman.keep_stolen_vga_memory) 9091 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 9092 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 9093 } 9094 9095 /* 9096 * Finally, drop a runtime PM reference for each newly disabled CRTC, 9097 * so we can put the GPU into runtime suspend if we're not driving any 9098 * displays anymore 9099 */ 9100 for (i = 0; i < crtc_disable_count; i++) 9101 pm_runtime_put_autosuspend(dev->dev); 9102 pm_runtime_mark_last_busy(dev->dev); 9103 } 9104 9105 static int dm_force_atomic_commit(struct drm_connector *connector) 9106 { 9107 int ret = 0; 9108 struct drm_device *ddev = connector->dev; 9109 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 9110 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9111 struct drm_plane *plane = disconnected_acrtc->base.primary; 9112 struct drm_connector_state *conn_state; 9113 struct drm_crtc_state *crtc_state; 9114 struct drm_plane_state *plane_state; 9115 9116 if (!state) 9117 return -ENOMEM; 9118 9119 state->acquire_ctx = ddev->mode_config.acquire_ctx; 9120 9121 /* Construct an atomic state to restore previous display setting */ 9122 9123 /* 9124 * Attach connectors to drm_atomic_state 9125 */ 9126 conn_state = drm_atomic_get_connector_state(state, connector); 9127 9128 ret = PTR_ERR_OR_ZERO(conn_state); 9129 if (ret) 9130 goto out; 9131 9132 /* Attach crtc to drm_atomic_state*/ 9133 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 9134 9135 ret = PTR_ERR_OR_ZERO(crtc_state); 9136 if (ret) 9137 goto out; 9138 9139 /* force a restore */ 9140 crtc_state->mode_changed = true; 9141 9142 /* Attach plane to drm_atomic_state */ 9143 plane_state = drm_atomic_get_plane_state(state, plane); 9144 9145 ret = PTR_ERR_OR_ZERO(plane_state); 9146 if (ret) 9147 goto out; 9148 9149 /* Call commit internally with the state we just constructed */ 9150 ret = drm_atomic_commit(state); 9151 9152 out: 9153 drm_atomic_state_put(state); 9154 if (ret) 9155 DRM_ERROR("Restoring old state failed with %i\n", ret); 9156 9157 return ret; 9158 } 9159 9160 /* 9161 * This function handles all cases when set mode does not come upon hotplug. 9162 * This includes when a display is unplugged then plugged back into the 9163 * same port and when running without usermode desktop manager supprot 9164 */ 9165 void dm_restore_drm_connector_state(struct drm_device *dev, 9166 struct drm_connector *connector) 9167 { 9168 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9169 struct amdgpu_crtc *disconnected_acrtc; 9170 struct dm_crtc_state *acrtc_state; 9171 9172 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 9173 return; 9174 9175 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9176 if (!disconnected_acrtc) 9177 return; 9178 9179 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 9180 if (!acrtc_state->stream) 9181 return; 9182 9183 /* 9184 * If the previous sink is not released and different from the current, 9185 * we deduce we are in a state where we can not rely on usermode call 9186 * to turn on the display, so we do it here 9187 */ 9188 if (acrtc_state->stream->sink != aconnector->dc_sink) 9189 dm_force_atomic_commit(&aconnector->base); 9190 } 9191 9192 /* 9193 * Grabs all modesetting locks to serialize against any blocking commits, 9194 * Waits for completion of all non blocking commits. 9195 */ 9196 static int do_aquire_global_lock(struct drm_device *dev, 9197 struct drm_atomic_state *state) 9198 { 9199 struct drm_crtc *crtc; 9200 struct drm_crtc_commit *commit; 9201 long ret; 9202 9203 /* 9204 * Adding all modeset locks to aquire_ctx will 9205 * ensure that when the framework release it the 9206 * extra locks we are locking here will get released to 9207 */ 9208 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 9209 if (ret) 9210 return ret; 9211 9212 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 9213 spin_lock(&crtc->commit_lock); 9214 commit = list_first_entry_or_null(&crtc->commit_list, 9215 struct drm_crtc_commit, commit_entry); 9216 if (commit) 9217 drm_crtc_commit_get(commit); 9218 spin_unlock(&crtc->commit_lock); 9219 9220 if (!commit) 9221 continue; 9222 9223 /* 9224 * Make sure all pending HW programming completed and 9225 * page flips done 9226 */ 9227 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 9228 9229 if (ret > 0) 9230 ret = wait_for_completion_interruptible_timeout( 9231 &commit->flip_done, 10*HZ); 9232 9233 if (ret == 0) 9234 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", 9235 crtc->base.id, crtc->name); 9236 9237 drm_crtc_commit_put(commit); 9238 } 9239 9240 return ret < 0 ? ret : 0; 9241 } 9242 9243 static void get_freesync_config_for_crtc( 9244 struct dm_crtc_state *new_crtc_state, 9245 struct dm_connector_state *new_con_state) 9246 { 9247 struct mod_freesync_config config = {0}; 9248 struct amdgpu_dm_connector *aconnector = 9249 to_amdgpu_dm_connector(new_con_state->base.connector); 9250 struct drm_display_mode *mode = &new_crtc_state->base.mode; 9251 int vrefresh = drm_mode_vrefresh(mode); 9252 bool fs_vid_mode = false; 9253 9254 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 9255 vrefresh >= aconnector->min_vfreq && 9256 vrefresh <= aconnector->max_vfreq; 9257 9258 if (new_crtc_state->vrr_supported) { 9259 new_crtc_state->stream->ignore_msa_timing_param = true; 9260 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 9261 9262 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 9263 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 9264 config.vsif_supported = true; 9265 config.btr = true; 9266 9267 if (fs_vid_mode) { 9268 config.state = VRR_STATE_ACTIVE_FIXED; 9269 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 9270 goto out; 9271 } else if (new_crtc_state->base.vrr_enabled) { 9272 config.state = VRR_STATE_ACTIVE_VARIABLE; 9273 } else { 9274 config.state = VRR_STATE_INACTIVE; 9275 } 9276 } 9277 out: 9278 new_crtc_state->freesync_config = config; 9279 } 9280 9281 static void reset_freesync_config_for_crtc( 9282 struct dm_crtc_state *new_crtc_state) 9283 { 9284 new_crtc_state->vrr_supported = false; 9285 9286 memset(&new_crtc_state->vrr_infopacket, 0, 9287 sizeof(new_crtc_state->vrr_infopacket)); 9288 } 9289 9290 static bool 9291 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 9292 struct drm_crtc_state *new_crtc_state) 9293 { 9294 const struct drm_display_mode *old_mode, *new_mode; 9295 9296 if (!old_crtc_state || !new_crtc_state) 9297 return false; 9298 9299 old_mode = &old_crtc_state->mode; 9300 new_mode = &new_crtc_state->mode; 9301 9302 if (old_mode->clock == new_mode->clock && 9303 old_mode->hdisplay == new_mode->hdisplay && 9304 old_mode->vdisplay == new_mode->vdisplay && 9305 old_mode->htotal == new_mode->htotal && 9306 old_mode->vtotal != new_mode->vtotal && 9307 old_mode->hsync_start == new_mode->hsync_start && 9308 old_mode->vsync_start != new_mode->vsync_start && 9309 old_mode->hsync_end == new_mode->hsync_end && 9310 old_mode->vsync_end != new_mode->vsync_end && 9311 old_mode->hskew == new_mode->hskew && 9312 old_mode->vscan == new_mode->vscan && 9313 (old_mode->vsync_end - old_mode->vsync_start) == 9314 (new_mode->vsync_end - new_mode->vsync_start)) 9315 return true; 9316 9317 return false; 9318 } 9319 9320 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) 9321 { 9322 u64 num, den, res; 9323 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 9324 9325 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 9326 9327 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 9328 den = (unsigned long long)new_crtc_state->mode.htotal * 9329 (unsigned long long)new_crtc_state->mode.vtotal; 9330 9331 res = div_u64(num, den); 9332 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 9333 } 9334 9335 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 9336 struct drm_atomic_state *state, 9337 struct drm_crtc *crtc, 9338 struct drm_crtc_state *old_crtc_state, 9339 struct drm_crtc_state *new_crtc_state, 9340 bool enable, 9341 bool *lock_and_validation_needed) 9342 { 9343 struct dm_atomic_state *dm_state = NULL; 9344 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9345 struct dc_stream_state *new_stream; 9346 int ret = 0; 9347 9348 /* 9349 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 9350 * update changed items 9351 */ 9352 struct amdgpu_crtc *acrtc = NULL; 9353 struct amdgpu_dm_connector *aconnector = NULL; 9354 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 9355 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 9356 9357 new_stream = NULL; 9358 9359 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9360 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9361 acrtc = to_amdgpu_crtc(crtc); 9362 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 9363 9364 /* TODO This hack should go away */ 9365 if (aconnector && enable) { 9366 /* Make sure fake sink is created in plug-in scenario */ 9367 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 9368 &aconnector->base); 9369 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 9370 &aconnector->base); 9371 9372 if (IS_ERR(drm_new_conn_state)) { 9373 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 9374 goto fail; 9375 } 9376 9377 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 9378 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 9379 9380 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9381 goto skip_modeset; 9382 9383 new_stream = create_validate_stream_for_sink(aconnector, 9384 &new_crtc_state->mode, 9385 dm_new_conn_state, 9386 dm_old_crtc_state->stream); 9387 9388 /* 9389 * we can have no stream on ACTION_SET if a display 9390 * was disconnected during S3, in this case it is not an 9391 * error, the OS will be updated after detection, and 9392 * will do the right thing on next atomic commit 9393 */ 9394 9395 if (!new_stream) { 9396 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9397 __func__, acrtc->base.base.id); 9398 ret = -ENOMEM; 9399 goto fail; 9400 } 9401 9402 /* 9403 * TODO: Check VSDB bits to decide whether this should 9404 * be enabled or not. 9405 */ 9406 new_stream->triggered_crtc_reset.enabled = 9407 dm->force_timing_sync; 9408 9409 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9410 9411 ret = fill_hdr_info_packet(drm_new_conn_state, 9412 &new_stream->hdr_static_metadata); 9413 if (ret) 9414 goto fail; 9415 9416 /* 9417 * If we already removed the old stream from the context 9418 * (and set the new stream to NULL) then we can't reuse 9419 * the old stream even if the stream and scaling are unchanged. 9420 * We'll hit the BUG_ON and black screen. 9421 * 9422 * TODO: Refactor this function to allow this check to work 9423 * in all conditions. 9424 */ 9425 if (dm_new_crtc_state->stream && 9426 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 9427 goto skip_modeset; 9428 9429 if (dm_new_crtc_state->stream && 9430 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 9431 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 9432 new_crtc_state->mode_changed = false; 9433 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 9434 new_crtc_state->mode_changed); 9435 } 9436 } 9437 9438 /* mode_changed flag may get updated above, need to check again */ 9439 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9440 goto skip_modeset; 9441 9442 drm_dbg_state(state->dev, 9443 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 9444 acrtc->crtc_id, 9445 new_crtc_state->enable, 9446 new_crtc_state->active, 9447 new_crtc_state->planes_changed, 9448 new_crtc_state->mode_changed, 9449 new_crtc_state->active_changed, 9450 new_crtc_state->connectors_changed); 9451 9452 /* Remove stream for any changed/disabled CRTC */ 9453 if (!enable) { 9454 9455 if (!dm_old_crtc_state->stream) 9456 goto skip_modeset; 9457 9458 /* Unset freesync video if it was active before */ 9459 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { 9460 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; 9461 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; 9462 } 9463 9464 /* Now check if we should set freesync video mode */ 9465 if (dm_new_crtc_state->stream && 9466 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 9467 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && 9468 is_timing_unchanged_for_freesync(new_crtc_state, 9469 old_crtc_state)) { 9470 new_crtc_state->mode_changed = false; 9471 DRM_DEBUG_DRIVER( 9472 "Mode change not required for front porch change, setting mode_changed to %d", 9473 new_crtc_state->mode_changed); 9474 9475 set_freesync_fixed_config(dm_new_crtc_state); 9476 9477 goto skip_modeset; 9478 } else if (aconnector && 9479 is_freesync_video_mode(&new_crtc_state->mode, 9480 aconnector)) { 9481 struct drm_display_mode *high_mode; 9482 9483 high_mode = get_highest_refresh_rate_mode(aconnector, false); 9484 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) 9485 set_freesync_fixed_config(dm_new_crtc_state); 9486 } 9487 9488 ret = dm_atomic_get_state(state, &dm_state); 9489 if (ret) 9490 goto fail; 9491 9492 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 9493 crtc->base.id); 9494 9495 /* i.e. reset mode */ 9496 if (dc_remove_stream_from_ctx( 9497 dm->dc, 9498 dm_state->context, 9499 dm_old_crtc_state->stream) != DC_OK) { 9500 ret = -EINVAL; 9501 goto fail; 9502 } 9503 9504 dc_stream_release(dm_old_crtc_state->stream); 9505 dm_new_crtc_state->stream = NULL; 9506 9507 reset_freesync_config_for_crtc(dm_new_crtc_state); 9508 9509 *lock_and_validation_needed = true; 9510 9511 } else {/* Add stream for any updated/enabled CRTC */ 9512 /* 9513 * Quick fix to prevent NULL pointer on new_stream when 9514 * added MST connectors not found in existing crtc_state in the chained mode 9515 * TODO: need to dig out the root cause of that 9516 */ 9517 if (!aconnector) 9518 goto skip_modeset; 9519 9520 if (modereset_required(new_crtc_state)) 9521 goto skip_modeset; 9522 9523 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, 9524 dm_old_crtc_state->stream)) { 9525 9526 WARN_ON(dm_new_crtc_state->stream); 9527 9528 ret = dm_atomic_get_state(state, &dm_state); 9529 if (ret) 9530 goto fail; 9531 9532 dm_new_crtc_state->stream = new_stream; 9533 9534 dc_stream_retain(new_stream); 9535 9536 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 9537 crtc->base.id); 9538 9539 if (dc_add_stream_to_ctx( 9540 dm->dc, 9541 dm_state->context, 9542 dm_new_crtc_state->stream) != DC_OK) { 9543 ret = -EINVAL; 9544 goto fail; 9545 } 9546 9547 *lock_and_validation_needed = true; 9548 } 9549 } 9550 9551 skip_modeset: 9552 /* Release extra reference */ 9553 if (new_stream) 9554 dc_stream_release(new_stream); 9555 9556 /* 9557 * We want to do dc stream updates that do not require a 9558 * full modeset below. 9559 */ 9560 if (!(enable && aconnector && new_crtc_state->active)) 9561 return 0; 9562 /* 9563 * Given above conditions, the dc state cannot be NULL because: 9564 * 1. We're in the process of enabling CRTCs (just been added 9565 * to the dc context, or already is on the context) 9566 * 2. Has a valid connector attached, and 9567 * 3. Is currently active and enabled. 9568 * => The dc stream state currently exists. 9569 */ 9570 BUG_ON(dm_new_crtc_state->stream == NULL); 9571 9572 /* Scaling or underscan settings */ 9573 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 9574 drm_atomic_crtc_needs_modeset(new_crtc_state)) 9575 update_stream_scaling_settings( 9576 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 9577 9578 /* ABM settings */ 9579 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9580 9581 /* 9582 * Color management settings. We also update color properties 9583 * when a modeset is needed, to ensure it gets reprogrammed. 9584 */ 9585 if (dm_new_crtc_state->base.color_mgmt_changed || 9586 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 9587 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 9588 if (ret) 9589 goto fail; 9590 } 9591 9592 /* Update Freesync settings. */ 9593 get_freesync_config_for_crtc(dm_new_crtc_state, 9594 dm_new_conn_state); 9595 9596 return ret; 9597 9598 fail: 9599 if (new_stream) 9600 dc_stream_release(new_stream); 9601 return ret; 9602 } 9603 9604 static bool should_reset_plane(struct drm_atomic_state *state, 9605 struct drm_plane *plane, 9606 struct drm_plane_state *old_plane_state, 9607 struct drm_plane_state *new_plane_state) 9608 { 9609 struct drm_plane *other; 9610 struct drm_plane_state *old_other_state, *new_other_state; 9611 struct drm_crtc_state *new_crtc_state; 9612 struct amdgpu_device *adev = drm_to_adev(plane->dev); 9613 int i; 9614 9615 /* 9616 * TODO: Remove this hack for all asics once it proves that the 9617 * fast updates works fine on DCN3.2+. 9618 */ 9619 if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) 9620 return true; 9621 9622 /* Exit early if we know that we're adding or removing the plane. */ 9623 if (old_plane_state->crtc != new_plane_state->crtc) 9624 return true; 9625 9626 /* old crtc == new_crtc == NULL, plane not in context. */ 9627 if (!new_plane_state->crtc) 9628 return false; 9629 9630 new_crtc_state = 9631 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 9632 9633 if (!new_crtc_state) 9634 return true; 9635 9636 /* CRTC Degamma changes currently require us to recreate planes. */ 9637 if (new_crtc_state->color_mgmt_changed) 9638 return true; 9639 9640 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 9641 return true; 9642 9643 /* 9644 * If there are any new primary or overlay planes being added or 9645 * removed then the z-order can potentially change. To ensure 9646 * correct z-order and pipe acquisition the current DC architecture 9647 * requires us to remove and recreate all existing planes. 9648 * 9649 * TODO: Come up with a more elegant solution for this. 9650 */ 9651 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 9652 struct amdgpu_framebuffer *old_afb, *new_afb; 9653 9654 if (other->type == DRM_PLANE_TYPE_CURSOR) 9655 continue; 9656 9657 if (old_other_state->crtc != new_plane_state->crtc && 9658 new_other_state->crtc != new_plane_state->crtc) 9659 continue; 9660 9661 if (old_other_state->crtc != new_other_state->crtc) 9662 return true; 9663 9664 /* Src/dst size and scaling updates. */ 9665 if (old_other_state->src_w != new_other_state->src_w || 9666 old_other_state->src_h != new_other_state->src_h || 9667 old_other_state->crtc_w != new_other_state->crtc_w || 9668 old_other_state->crtc_h != new_other_state->crtc_h) 9669 return true; 9670 9671 /* Rotation / mirroring updates. */ 9672 if (old_other_state->rotation != new_other_state->rotation) 9673 return true; 9674 9675 /* Blending updates. */ 9676 if (old_other_state->pixel_blend_mode != 9677 new_other_state->pixel_blend_mode) 9678 return true; 9679 9680 /* Alpha updates. */ 9681 if (old_other_state->alpha != new_other_state->alpha) 9682 return true; 9683 9684 /* Colorspace changes. */ 9685 if (old_other_state->color_range != new_other_state->color_range || 9686 old_other_state->color_encoding != new_other_state->color_encoding) 9687 return true; 9688 9689 /* Framebuffer checks fall at the end. */ 9690 if (!old_other_state->fb || !new_other_state->fb) 9691 continue; 9692 9693 /* Pixel format changes can require bandwidth updates. */ 9694 if (old_other_state->fb->format != new_other_state->fb->format) 9695 return true; 9696 9697 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 9698 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 9699 9700 /* Tiling and DCC changes also require bandwidth updates. */ 9701 if (old_afb->tiling_flags != new_afb->tiling_flags || 9702 old_afb->base.modifier != new_afb->base.modifier) 9703 return true; 9704 } 9705 9706 return false; 9707 } 9708 9709 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 9710 struct drm_plane_state *new_plane_state, 9711 struct drm_framebuffer *fb) 9712 { 9713 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 9714 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 9715 unsigned int pitch; 9716 bool linear; 9717 9718 if (fb->width > new_acrtc->max_cursor_width || 9719 fb->height > new_acrtc->max_cursor_height) { 9720 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 9721 new_plane_state->fb->width, 9722 new_plane_state->fb->height); 9723 return -EINVAL; 9724 } 9725 if (new_plane_state->src_w != fb->width << 16 || 9726 new_plane_state->src_h != fb->height << 16) { 9727 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 9728 return -EINVAL; 9729 } 9730 9731 /* Pitch in pixels */ 9732 pitch = fb->pitches[0] / fb->format->cpp[0]; 9733 9734 if (fb->width != pitch) { 9735 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 9736 fb->width, pitch); 9737 return -EINVAL; 9738 } 9739 9740 switch (pitch) { 9741 case 64: 9742 case 128: 9743 case 256: 9744 /* FB pitch is supported by cursor plane */ 9745 break; 9746 default: 9747 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 9748 return -EINVAL; 9749 } 9750 9751 /* Core DRM takes care of checking FB modifiers, so we only need to 9752 * check tiling flags when the FB doesn't have a modifier. 9753 */ 9754 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 9755 if (adev->family < AMDGPU_FAMILY_AI) { 9756 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 9757 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 9758 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 9759 } else { 9760 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 9761 } 9762 if (!linear) { 9763 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 9764 return -EINVAL; 9765 } 9766 } 9767 9768 return 0; 9769 } 9770 9771 static int dm_update_plane_state(struct dc *dc, 9772 struct drm_atomic_state *state, 9773 struct drm_plane *plane, 9774 struct drm_plane_state *old_plane_state, 9775 struct drm_plane_state *new_plane_state, 9776 bool enable, 9777 bool *lock_and_validation_needed, 9778 bool *is_top_most_overlay) 9779 { 9780 9781 struct dm_atomic_state *dm_state = NULL; 9782 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 9783 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9784 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 9785 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 9786 struct amdgpu_crtc *new_acrtc; 9787 bool needs_reset; 9788 int ret = 0; 9789 9790 9791 new_plane_crtc = new_plane_state->crtc; 9792 old_plane_crtc = old_plane_state->crtc; 9793 dm_new_plane_state = to_dm_plane_state(new_plane_state); 9794 dm_old_plane_state = to_dm_plane_state(old_plane_state); 9795 9796 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 9797 if (!enable || !new_plane_crtc || 9798 drm_atomic_plane_disabling(plane->state, new_plane_state)) 9799 return 0; 9800 9801 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 9802 9803 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 9804 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 9805 return -EINVAL; 9806 } 9807 9808 if (new_plane_state->fb) { 9809 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 9810 new_plane_state->fb); 9811 if (ret) 9812 return ret; 9813 } 9814 9815 return 0; 9816 } 9817 9818 needs_reset = should_reset_plane(state, plane, old_plane_state, 9819 new_plane_state); 9820 9821 /* Remove any changed/removed planes */ 9822 if (!enable) { 9823 if (!needs_reset) 9824 return 0; 9825 9826 if (!old_plane_crtc) 9827 return 0; 9828 9829 old_crtc_state = drm_atomic_get_old_crtc_state( 9830 state, old_plane_crtc); 9831 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9832 9833 if (!dm_old_crtc_state->stream) 9834 return 0; 9835 9836 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 9837 plane->base.id, old_plane_crtc->base.id); 9838 9839 ret = dm_atomic_get_state(state, &dm_state); 9840 if (ret) 9841 return ret; 9842 9843 if (!dc_remove_plane_from_context( 9844 dc, 9845 dm_old_crtc_state->stream, 9846 dm_old_plane_state->dc_state, 9847 dm_state->context)) { 9848 9849 return -EINVAL; 9850 } 9851 9852 if (dm_old_plane_state->dc_state) 9853 dc_plane_state_release(dm_old_plane_state->dc_state); 9854 9855 dm_new_plane_state->dc_state = NULL; 9856 9857 *lock_and_validation_needed = true; 9858 9859 } else { /* Add new planes */ 9860 struct dc_plane_state *dc_new_plane_state; 9861 9862 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 9863 return 0; 9864 9865 if (!new_plane_crtc) 9866 return 0; 9867 9868 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 9869 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9870 9871 if (!dm_new_crtc_state->stream) 9872 return 0; 9873 9874 if (!needs_reset) 9875 return 0; 9876 9877 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 9878 if (ret) 9879 return ret; 9880 9881 WARN_ON(dm_new_plane_state->dc_state); 9882 9883 dc_new_plane_state = dc_create_plane_state(dc); 9884 if (!dc_new_plane_state) 9885 return -ENOMEM; 9886 9887 /* Block top most plane from being a video plane */ 9888 if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 9889 if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) 9890 return -EINVAL; 9891 9892 *is_top_most_overlay = false; 9893 } 9894 9895 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 9896 plane->base.id, new_plane_crtc->base.id); 9897 9898 ret = fill_dc_plane_attributes( 9899 drm_to_adev(new_plane_crtc->dev), 9900 dc_new_plane_state, 9901 new_plane_state, 9902 new_crtc_state); 9903 if (ret) { 9904 dc_plane_state_release(dc_new_plane_state); 9905 return ret; 9906 } 9907 9908 ret = dm_atomic_get_state(state, &dm_state); 9909 if (ret) { 9910 dc_plane_state_release(dc_new_plane_state); 9911 return ret; 9912 } 9913 9914 /* 9915 * Any atomic check errors that occur after this will 9916 * not need a release. The plane state will be attached 9917 * to the stream, and therefore part of the atomic 9918 * state. It'll be released when the atomic state is 9919 * cleaned. 9920 */ 9921 if (!dc_add_plane_to_context( 9922 dc, 9923 dm_new_crtc_state->stream, 9924 dc_new_plane_state, 9925 dm_state->context)) { 9926 9927 dc_plane_state_release(dc_new_plane_state); 9928 return -EINVAL; 9929 } 9930 9931 dm_new_plane_state->dc_state = dc_new_plane_state; 9932 9933 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 9934 9935 /* Tell DC to do a full surface update every time there 9936 * is a plane change. Inefficient, but works for now. 9937 */ 9938 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 9939 9940 *lock_and_validation_needed = true; 9941 } 9942 9943 9944 return ret; 9945 } 9946 9947 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 9948 int *src_w, int *src_h) 9949 { 9950 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 9951 case DRM_MODE_ROTATE_90: 9952 case DRM_MODE_ROTATE_270: 9953 *src_w = plane_state->src_h >> 16; 9954 *src_h = plane_state->src_w >> 16; 9955 break; 9956 case DRM_MODE_ROTATE_0: 9957 case DRM_MODE_ROTATE_180: 9958 default: 9959 *src_w = plane_state->src_w >> 16; 9960 *src_h = plane_state->src_h >> 16; 9961 break; 9962 } 9963 } 9964 9965 static void 9966 dm_get_plane_scale(struct drm_plane_state *plane_state, 9967 int *out_plane_scale_w, int *out_plane_scale_h) 9968 { 9969 int plane_src_w, plane_src_h; 9970 9971 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); 9972 *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w; 9973 *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h; 9974 } 9975 9976 static int dm_check_crtc_cursor(struct drm_atomic_state *state, 9977 struct drm_crtc *crtc, 9978 struct drm_crtc_state *new_crtc_state) 9979 { 9980 struct drm_plane *cursor = crtc->cursor, *plane, *underlying; 9981 struct drm_plane_state *old_plane_state, *new_plane_state; 9982 struct drm_plane_state *new_cursor_state, *new_underlying_state; 9983 int i; 9984 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; 9985 bool any_relevant_change = false; 9986 9987 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a 9988 * cursor per pipe but it's going to inherit the scaling and 9989 * positioning from the underlying pipe. Check the cursor plane's 9990 * blending properties match the underlying planes'. 9991 */ 9992 9993 /* If no plane was enabled or changed scaling, no need to check again */ 9994 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9995 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; 9996 9997 if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc) 9998 continue; 9999 10000 if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) { 10001 any_relevant_change = true; 10002 break; 10003 } 10004 10005 if (new_plane_state->fb == old_plane_state->fb && 10006 new_plane_state->crtc_w == old_plane_state->crtc_w && 10007 new_plane_state->crtc_h == old_plane_state->crtc_h) 10008 continue; 10009 10010 dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h); 10011 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 10012 10013 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 10014 any_relevant_change = true; 10015 break; 10016 } 10017 } 10018 10019 if (!any_relevant_change) 10020 return 0; 10021 10022 new_cursor_state = drm_atomic_get_plane_state(state, cursor); 10023 if (IS_ERR(new_cursor_state)) 10024 return PTR_ERR(new_cursor_state); 10025 10026 if (!new_cursor_state->fb) 10027 return 0; 10028 10029 dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h); 10030 10031 /* Need to check all enabled planes, even if this commit doesn't change 10032 * their state 10033 */ 10034 i = drm_atomic_add_affected_planes(state, crtc); 10035 if (i) 10036 return i; 10037 10038 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { 10039 /* Narrow down to non-cursor planes on the same CRTC as the cursor */ 10040 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) 10041 continue; 10042 10043 /* Ignore disabled planes */ 10044 if (!new_underlying_state->fb) 10045 continue; 10046 10047 dm_get_plane_scale(new_underlying_state, 10048 &underlying_scale_w, &underlying_scale_h); 10049 10050 if (cursor_scale_w != underlying_scale_w || 10051 cursor_scale_h != underlying_scale_h) { 10052 drm_dbg_atomic(crtc->dev, 10053 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", 10054 cursor->base.id, cursor->name, underlying->base.id, underlying->name); 10055 return -EINVAL; 10056 } 10057 10058 /* If this plane covers the whole CRTC, no need to check planes underneath */ 10059 if (new_underlying_state->crtc_x <= 0 && 10060 new_underlying_state->crtc_y <= 0 && 10061 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && 10062 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) 10063 break; 10064 } 10065 10066 return 0; 10067 } 10068 10069 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 10070 { 10071 struct drm_connector *connector; 10072 struct drm_connector_state *conn_state, *old_conn_state; 10073 struct amdgpu_dm_connector *aconnector = NULL; 10074 int i; 10075 10076 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 10077 if (!conn_state->crtc) 10078 conn_state = old_conn_state; 10079 10080 if (conn_state->crtc != crtc) 10081 continue; 10082 10083 aconnector = to_amdgpu_dm_connector(connector); 10084 if (!aconnector->mst_output_port || !aconnector->mst_root) 10085 aconnector = NULL; 10086 else 10087 break; 10088 } 10089 10090 if (!aconnector) 10091 return 0; 10092 10093 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); 10094 } 10095 10096 /** 10097 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 10098 * 10099 * @dev: The DRM device 10100 * @state: The atomic state to commit 10101 * 10102 * Validate that the given atomic state is programmable by DC into hardware. 10103 * This involves constructing a &struct dc_state reflecting the new hardware 10104 * state we wish to commit, then querying DC to see if it is programmable. It's 10105 * important not to modify the existing DC state. Otherwise, atomic_check 10106 * may unexpectedly commit hardware changes. 10107 * 10108 * When validating the DC state, it's important that the right locks are 10109 * acquired. For full updates case which removes/adds/updates streams on one 10110 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 10111 * that any such full update commit will wait for completion of any outstanding 10112 * flip using DRMs synchronization events. 10113 * 10114 * Note that DM adds the affected connectors for all CRTCs in state, when that 10115 * might not seem necessary. This is because DC stream creation requires the 10116 * DC sink, which is tied to the DRM connector state. Cleaning this up should 10117 * be possible but non-trivial - a possible TODO item. 10118 * 10119 * Return: -Error code if validation failed. 10120 */ 10121 static int amdgpu_dm_atomic_check(struct drm_device *dev, 10122 struct drm_atomic_state *state) 10123 { 10124 struct amdgpu_device *adev = drm_to_adev(dev); 10125 struct dm_atomic_state *dm_state = NULL; 10126 struct dc *dc = adev->dm.dc; 10127 struct drm_connector *connector; 10128 struct drm_connector_state *old_con_state, *new_con_state; 10129 struct drm_crtc *crtc; 10130 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10131 struct drm_plane *plane; 10132 struct drm_plane_state *old_plane_state, *new_plane_state; 10133 enum dc_status status; 10134 int ret, i; 10135 bool lock_and_validation_needed = false; 10136 bool is_top_most_overlay = true; 10137 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10138 struct drm_dp_mst_topology_mgr *mgr; 10139 struct drm_dp_mst_topology_state *mst_state; 10140 struct dsc_mst_fairness_vars vars[MAX_PIPES]; 10141 10142 trace_amdgpu_dm_atomic_check_begin(state); 10143 10144 ret = drm_atomic_helper_check_modeset(dev, state); 10145 if (ret) { 10146 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); 10147 goto fail; 10148 } 10149 10150 /* Check connector changes */ 10151 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10152 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10153 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10154 10155 /* Skip connectors that are disabled or part of modeset already. */ 10156 if (!new_con_state->crtc) 10157 continue; 10158 10159 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 10160 if (IS_ERR(new_crtc_state)) { 10161 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); 10162 ret = PTR_ERR(new_crtc_state); 10163 goto fail; 10164 } 10165 10166 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 10167 dm_old_con_state->scaling != dm_new_con_state->scaling) 10168 new_crtc_state->connectors_changed = true; 10169 } 10170 10171 if (dc_resource_is_dsc_encoding_supported(dc)) { 10172 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10173 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10174 ret = add_affected_mst_dsc_crtcs(state, crtc); 10175 if (ret) { 10176 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); 10177 goto fail; 10178 } 10179 } 10180 } 10181 } 10182 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10183 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10184 10185 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 10186 !new_crtc_state->color_mgmt_changed && 10187 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 10188 dm_old_crtc_state->dsc_force_changed == false) 10189 continue; 10190 10191 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 10192 if (ret) { 10193 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); 10194 goto fail; 10195 } 10196 10197 if (!new_crtc_state->enable) 10198 continue; 10199 10200 ret = drm_atomic_add_affected_connectors(state, crtc); 10201 if (ret) { 10202 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); 10203 goto fail; 10204 } 10205 10206 ret = drm_atomic_add_affected_planes(state, crtc); 10207 if (ret) { 10208 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); 10209 goto fail; 10210 } 10211 10212 if (dm_old_crtc_state->dsc_force_changed) 10213 new_crtc_state->mode_changed = true; 10214 } 10215 10216 /* 10217 * Add all primary and overlay planes on the CRTC to the state 10218 * whenever a plane is enabled to maintain correct z-ordering 10219 * and to enable fast surface updates. 10220 */ 10221 drm_for_each_crtc(crtc, dev) { 10222 bool modified = false; 10223 10224 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 10225 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10226 continue; 10227 10228 if (new_plane_state->crtc == crtc || 10229 old_plane_state->crtc == crtc) { 10230 modified = true; 10231 break; 10232 } 10233 } 10234 10235 if (!modified) 10236 continue; 10237 10238 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 10239 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10240 continue; 10241 10242 new_plane_state = 10243 drm_atomic_get_plane_state(state, plane); 10244 10245 if (IS_ERR(new_plane_state)) { 10246 ret = PTR_ERR(new_plane_state); 10247 DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); 10248 goto fail; 10249 } 10250 } 10251 } 10252 10253 /* 10254 * DC consults the zpos (layer_index in DC terminology) to determine the 10255 * hw plane on which to enable the hw cursor (see 10256 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 10257 * atomic state, so call drm helper to normalize zpos. 10258 */ 10259 ret = drm_atomic_normalize_zpos(dev, state); 10260 if (ret) { 10261 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 10262 goto fail; 10263 } 10264 10265 /* Remove exiting planes if they are modified */ 10266 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10267 if (old_plane_state->fb && new_plane_state->fb && 10268 get_mem_type(old_plane_state->fb) != 10269 get_mem_type(new_plane_state->fb)) 10270 lock_and_validation_needed = true; 10271 10272 ret = dm_update_plane_state(dc, state, plane, 10273 old_plane_state, 10274 new_plane_state, 10275 false, 10276 &lock_and_validation_needed, 10277 &is_top_most_overlay); 10278 if (ret) { 10279 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 10280 goto fail; 10281 } 10282 } 10283 10284 /* Disable all crtcs which require disable */ 10285 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10286 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10287 old_crtc_state, 10288 new_crtc_state, 10289 false, 10290 &lock_and_validation_needed); 10291 if (ret) { 10292 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); 10293 goto fail; 10294 } 10295 } 10296 10297 /* Enable all crtcs which require enable */ 10298 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10299 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10300 old_crtc_state, 10301 new_crtc_state, 10302 true, 10303 &lock_and_validation_needed); 10304 if (ret) { 10305 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); 10306 goto fail; 10307 } 10308 } 10309 10310 /* Add new/modified planes */ 10311 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10312 ret = dm_update_plane_state(dc, state, plane, 10313 old_plane_state, 10314 new_plane_state, 10315 true, 10316 &lock_and_validation_needed, 10317 &is_top_most_overlay); 10318 if (ret) { 10319 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 10320 goto fail; 10321 } 10322 } 10323 10324 if (dc_resource_is_dsc_encoding_supported(dc)) { 10325 ret = pre_validate_dsc(state, &dm_state, vars); 10326 if (ret != 0) 10327 goto fail; 10328 } 10329 10330 /* Run this here since we want to validate the streams we created */ 10331 ret = drm_atomic_helper_check_planes(dev, state); 10332 if (ret) { 10333 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); 10334 goto fail; 10335 } 10336 10337 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10338 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10339 if (dm_new_crtc_state->mpo_requested) 10340 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); 10341 } 10342 10343 /* Check cursor planes scaling */ 10344 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10345 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); 10346 if (ret) { 10347 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); 10348 goto fail; 10349 } 10350 } 10351 10352 if (state->legacy_cursor_update) { 10353 /* 10354 * This is a fast cursor update coming from the plane update 10355 * helper, check if it can be done asynchronously for better 10356 * performance. 10357 */ 10358 state->async_update = 10359 !drm_atomic_helper_async_check(dev, state); 10360 10361 /* 10362 * Skip the remaining global validation if this is an async 10363 * update. Cursor updates can be done without affecting 10364 * state or bandwidth calcs and this avoids the performance 10365 * penalty of locking the private state object and 10366 * allocating a new dc_state. 10367 */ 10368 if (state->async_update) 10369 return 0; 10370 } 10371 10372 /* Check scaling and underscan changes*/ 10373 /* TODO Removed scaling changes validation due to inability to commit 10374 * new stream into context w\o causing full reset. Need to 10375 * decide how to handle. 10376 */ 10377 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10378 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10379 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10380 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10381 10382 /* Skip any modesets/resets */ 10383 if (!acrtc || drm_atomic_crtc_needs_modeset( 10384 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 10385 continue; 10386 10387 /* Skip any thing not scale or underscan changes */ 10388 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 10389 continue; 10390 10391 lock_and_validation_needed = true; 10392 } 10393 10394 /* set the slot info for each mst_state based on the link encoding format */ 10395 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 10396 struct amdgpu_dm_connector *aconnector; 10397 struct drm_connector *connector; 10398 struct drm_connector_list_iter iter; 10399 u8 link_coding_cap; 10400 10401 drm_connector_list_iter_begin(dev, &iter); 10402 drm_for_each_connector_iter(connector, &iter) { 10403 if (connector->index == mst_state->mgr->conn_base_id) { 10404 aconnector = to_amdgpu_dm_connector(connector); 10405 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 10406 drm_dp_mst_update_slots(mst_state, link_coding_cap); 10407 10408 break; 10409 } 10410 } 10411 drm_connector_list_iter_end(&iter); 10412 } 10413 10414 /** 10415 * Streams and planes are reset when there are changes that affect 10416 * bandwidth. Anything that affects bandwidth needs to go through 10417 * DC global validation to ensure that the configuration can be applied 10418 * to hardware. 10419 * 10420 * We have to currently stall out here in atomic_check for outstanding 10421 * commits to finish in this case because our IRQ handlers reference 10422 * DRM state directly - we can end up disabling interrupts too early 10423 * if we don't. 10424 * 10425 * TODO: Remove this stall and drop DM state private objects. 10426 */ 10427 if (lock_and_validation_needed) { 10428 ret = dm_atomic_get_state(state, &dm_state); 10429 if (ret) { 10430 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); 10431 goto fail; 10432 } 10433 10434 ret = do_aquire_global_lock(dev, state); 10435 if (ret) { 10436 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); 10437 goto fail; 10438 } 10439 10440 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 10441 if (ret) { 10442 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); 10443 ret = -EINVAL; 10444 goto fail; 10445 } 10446 10447 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 10448 if (ret) { 10449 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); 10450 goto fail; 10451 } 10452 10453 /* 10454 * Perform validation of MST topology in the state: 10455 * We need to perform MST atomic check before calling 10456 * dc_validate_global_state(), or there is a chance 10457 * to get stuck in an infinite loop and hang eventually. 10458 */ 10459 ret = drm_dp_mst_atomic_check(state); 10460 if (ret) { 10461 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); 10462 goto fail; 10463 } 10464 status = dc_validate_global_state(dc, dm_state->context, true); 10465 if (status != DC_OK) { 10466 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", 10467 dc_status_to_str(status), status); 10468 ret = -EINVAL; 10469 goto fail; 10470 } 10471 } else { 10472 /* 10473 * The commit is a fast update. Fast updates shouldn't change 10474 * the DC context, affect global validation, and can have their 10475 * commit work done in parallel with other commits not touching 10476 * the same resource. If we have a new DC context as part of 10477 * the DM atomic state from validation we need to free it and 10478 * retain the existing one instead. 10479 * 10480 * Furthermore, since the DM atomic state only contains the DC 10481 * context and can safely be annulled, we can free the state 10482 * and clear the associated private object now to free 10483 * some memory and avoid a possible use-after-free later. 10484 */ 10485 10486 for (i = 0; i < state->num_private_objs; i++) { 10487 struct drm_private_obj *obj = state->private_objs[i].ptr; 10488 10489 if (obj->funcs == adev->dm.atomic_obj.funcs) { 10490 int j = state->num_private_objs-1; 10491 10492 dm_atomic_destroy_state(obj, 10493 state->private_objs[i].state); 10494 10495 /* If i is not at the end of the array then the 10496 * last element needs to be moved to where i was 10497 * before the array can safely be truncated. 10498 */ 10499 if (i != j) 10500 state->private_objs[i] = 10501 state->private_objs[j]; 10502 10503 state->private_objs[j].ptr = NULL; 10504 state->private_objs[j].state = NULL; 10505 state->private_objs[j].old_state = NULL; 10506 state->private_objs[j].new_state = NULL; 10507 10508 state->num_private_objs = j; 10509 break; 10510 } 10511 } 10512 } 10513 10514 /* Store the overall update type for use later in atomic check. */ 10515 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10516 struct dm_crtc_state *dm_new_crtc_state = 10517 to_dm_crtc_state(new_crtc_state); 10518 10519 /* 10520 * Only allow async flips for fast updates that don't change 10521 * the FB pitch, the DCC state, rotation, etc. 10522 */ 10523 if (new_crtc_state->async_flip && lock_and_validation_needed) { 10524 drm_dbg_atomic(crtc->dev, 10525 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 10526 crtc->base.id, crtc->name); 10527 ret = -EINVAL; 10528 goto fail; 10529 } 10530 10531 dm_new_crtc_state->update_type = lock_and_validation_needed ? 10532 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; 10533 } 10534 10535 /* Must be success */ 10536 WARN_ON(ret); 10537 10538 trace_amdgpu_dm_atomic_check_finish(state, ret); 10539 10540 return ret; 10541 10542 fail: 10543 if (ret == -EDEADLK) 10544 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 10545 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 10546 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 10547 else 10548 DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret); 10549 10550 trace_amdgpu_dm_atomic_check_finish(state, ret); 10551 10552 return ret; 10553 } 10554 10555 static bool is_dp_capable_without_timing_msa(struct dc *dc, 10556 struct amdgpu_dm_connector *amdgpu_dm_connector) 10557 { 10558 u8 dpcd_data; 10559 bool capable = false; 10560 10561 if (amdgpu_dm_connector->dc_link && 10562 dm_helpers_dp_read_dpcd( 10563 NULL, 10564 amdgpu_dm_connector->dc_link, 10565 DP_DOWN_STREAM_PORT_COUNT, 10566 &dpcd_data, 10567 sizeof(dpcd_data))) { 10568 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 10569 } 10570 10571 return capable; 10572 } 10573 10574 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 10575 unsigned int offset, 10576 unsigned int total_length, 10577 u8 *data, 10578 unsigned int length, 10579 struct amdgpu_hdmi_vsdb_info *vsdb) 10580 { 10581 bool res; 10582 union dmub_rb_cmd cmd; 10583 struct dmub_cmd_send_edid_cea *input; 10584 struct dmub_cmd_edid_cea_output *output; 10585 10586 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 10587 return false; 10588 10589 memset(&cmd, 0, sizeof(cmd)); 10590 10591 input = &cmd.edid_cea.data.input; 10592 10593 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 10594 cmd.edid_cea.header.sub_type = 0; 10595 cmd.edid_cea.header.payload_bytes = 10596 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 10597 input->offset = offset; 10598 input->length = length; 10599 input->cea_total_length = total_length; 10600 memcpy(input->payload, data, length); 10601 10602 res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 10603 if (!res) { 10604 DRM_ERROR("EDID CEA parser failed\n"); 10605 return false; 10606 } 10607 10608 output = &cmd.edid_cea.data.output; 10609 10610 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 10611 if (!output->ack.success) { 10612 DRM_ERROR("EDID CEA ack failed at offset %d\n", 10613 output->ack.offset); 10614 } 10615 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 10616 if (!output->amd_vsdb.vsdb_found) 10617 return false; 10618 10619 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 10620 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 10621 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 10622 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 10623 } else { 10624 DRM_WARN("Unknown EDID CEA parser results\n"); 10625 return false; 10626 } 10627 10628 return true; 10629 } 10630 10631 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 10632 u8 *edid_ext, int len, 10633 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10634 { 10635 int i; 10636 10637 /* send extension block to DMCU for parsing */ 10638 for (i = 0; i < len; i += 8) { 10639 bool res; 10640 int offset; 10641 10642 /* send 8 bytes a time */ 10643 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 10644 return false; 10645 10646 if (i+8 == len) { 10647 /* EDID block sent completed, expect result */ 10648 int version, min_rate, max_rate; 10649 10650 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 10651 if (res) { 10652 /* amd vsdb found */ 10653 vsdb_info->freesync_supported = 1; 10654 vsdb_info->amd_vsdb_version = version; 10655 vsdb_info->min_refresh_rate_hz = min_rate; 10656 vsdb_info->max_refresh_rate_hz = max_rate; 10657 return true; 10658 } 10659 /* not amd vsdb */ 10660 return false; 10661 } 10662 10663 /* check for ack*/ 10664 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 10665 if (!res) 10666 return false; 10667 } 10668 10669 return false; 10670 } 10671 10672 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 10673 u8 *edid_ext, int len, 10674 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10675 { 10676 int i; 10677 10678 /* send extension block to DMCU for parsing */ 10679 for (i = 0; i < len; i += 8) { 10680 /* send 8 bytes a time */ 10681 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 10682 return false; 10683 } 10684 10685 return vsdb_info->freesync_supported; 10686 } 10687 10688 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 10689 u8 *edid_ext, int len, 10690 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10691 { 10692 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 10693 bool ret; 10694 10695 mutex_lock(&adev->dm.dc_lock); 10696 if (adev->dm.dmub_srv) 10697 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 10698 else 10699 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 10700 mutex_unlock(&adev->dm.dc_lock); 10701 return ret; 10702 } 10703 10704 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 10705 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 10706 { 10707 u8 *edid_ext = NULL; 10708 int i; 10709 int j = 0; 10710 10711 if (edid == NULL || edid->extensions == 0) 10712 return -ENODEV; 10713 10714 /* Find DisplayID extension */ 10715 for (i = 0; i < edid->extensions; i++) { 10716 edid_ext = (void *)(edid + (i + 1)); 10717 if (edid_ext[0] == DISPLAYID_EXT) 10718 break; 10719 } 10720 10721 while (j < EDID_LENGTH) { 10722 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 10723 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 10724 10725 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && 10726 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { 10727 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; 10728 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; 10729 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); 10730 10731 return true; 10732 } 10733 j++; 10734 } 10735 10736 return false; 10737 } 10738 10739 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 10740 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 10741 { 10742 u8 *edid_ext = NULL; 10743 int i; 10744 bool valid_vsdb_found = false; 10745 10746 /*----- drm_find_cea_extension() -----*/ 10747 /* No EDID or EDID extensions */ 10748 if (edid == NULL || edid->extensions == 0) 10749 return -ENODEV; 10750 10751 /* Find CEA extension */ 10752 for (i = 0; i < edid->extensions; i++) { 10753 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 10754 if (edid_ext[0] == CEA_EXT) 10755 break; 10756 } 10757 10758 if (i == edid->extensions) 10759 return -ENODEV; 10760 10761 /*----- cea_db_offsets() -----*/ 10762 if (edid_ext[0] != CEA_EXT) 10763 return -ENODEV; 10764 10765 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 10766 10767 return valid_vsdb_found ? i : -ENODEV; 10768 } 10769 10770 /** 10771 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities 10772 * 10773 * @connector: Connector to query. 10774 * @edid: EDID from monitor 10775 * 10776 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 10777 * track of some of the display information in the internal data struct used by 10778 * amdgpu_dm. This function checks which type of connector we need to set the 10779 * FreeSync parameters. 10780 */ 10781 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 10782 struct edid *edid) 10783 { 10784 int i = 0; 10785 struct detailed_timing *timing; 10786 struct detailed_non_pixel *data; 10787 struct detailed_data_monitor_range *range; 10788 struct amdgpu_dm_connector *amdgpu_dm_connector = 10789 to_amdgpu_dm_connector(connector); 10790 struct dm_connector_state *dm_con_state = NULL; 10791 struct dc_sink *sink; 10792 10793 struct drm_device *dev = connector->dev; 10794 struct amdgpu_device *adev = drm_to_adev(dev); 10795 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 10796 bool freesync_capable = false; 10797 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 10798 10799 if (!connector->state) { 10800 DRM_ERROR("%s - Connector has no state", __func__); 10801 goto update; 10802 } 10803 10804 sink = amdgpu_dm_connector->dc_sink ? 10805 amdgpu_dm_connector->dc_sink : 10806 amdgpu_dm_connector->dc_em_sink; 10807 10808 if (!edid || !sink) { 10809 dm_con_state = to_dm_connector_state(connector->state); 10810 10811 amdgpu_dm_connector->min_vfreq = 0; 10812 amdgpu_dm_connector->max_vfreq = 0; 10813 amdgpu_dm_connector->pixel_clock_mhz = 0; 10814 connector->display_info.monitor_range.min_vfreq = 0; 10815 connector->display_info.monitor_range.max_vfreq = 0; 10816 freesync_capable = false; 10817 10818 goto update; 10819 } 10820 10821 dm_con_state = to_dm_connector_state(connector->state); 10822 10823 if (!adev->dm.freesync_module) 10824 goto update; 10825 10826 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT 10827 || sink->sink_signal == SIGNAL_TYPE_EDP) { 10828 bool edid_check_required = false; 10829 10830 if (edid) { 10831 edid_check_required = is_dp_capable_without_timing_msa( 10832 adev->dm.dc, 10833 amdgpu_dm_connector); 10834 } 10835 10836 if (edid_check_required == true && (edid->version > 1 || 10837 (edid->version == 1 && edid->revision > 1))) { 10838 for (i = 0; i < 4; i++) { 10839 10840 timing = &edid->detailed_timings[i]; 10841 data = &timing->data.other_data; 10842 range = &data->data.range; 10843 /* 10844 * Check if monitor has continuous frequency mode 10845 */ 10846 if (data->type != EDID_DETAIL_MONITOR_RANGE) 10847 continue; 10848 /* 10849 * Check for flag range limits only. If flag == 1 then 10850 * no additional timing information provided. 10851 * Default GTF, GTF Secondary curve and CVT are not 10852 * supported 10853 */ 10854 if (range->flags != 1) 10855 continue; 10856 10857 amdgpu_dm_connector->min_vfreq = range->min_vfreq; 10858 amdgpu_dm_connector->max_vfreq = range->max_vfreq; 10859 amdgpu_dm_connector->pixel_clock_mhz = 10860 range->pixel_clock_mhz * 10; 10861 10862 connector->display_info.monitor_range.min_vfreq = range->min_vfreq; 10863 connector->display_info.monitor_range.max_vfreq = range->max_vfreq; 10864 10865 break; 10866 } 10867 10868 if (amdgpu_dm_connector->max_vfreq - 10869 amdgpu_dm_connector->min_vfreq > 10) { 10870 10871 freesync_capable = true; 10872 } 10873 } 10874 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10875 10876 if (vsdb_info.replay_mode) { 10877 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; 10878 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; 10879 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; 10880 } 10881 10882 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 10883 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10884 if (i >= 0 && vsdb_info.freesync_supported) { 10885 timing = &edid->detailed_timings[i]; 10886 data = &timing->data.other_data; 10887 10888 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 10889 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 10890 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 10891 freesync_capable = true; 10892 10893 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 10894 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 10895 } 10896 } 10897 10898 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); 10899 10900 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 10901 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10902 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 10903 10904 amdgpu_dm_connector->pack_sdp_v1_3 = true; 10905 amdgpu_dm_connector->as_type = as_type; 10906 amdgpu_dm_connector->vsdb_info = vsdb_info; 10907 10908 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 10909 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 10910 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 10911 freesync_capable = true; 10912 10913 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 10914 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 10915 } 10916 } 10917 10918 update: 10919 if (dm_con_state) 10920 dm_con_state->freesync_capable = freesync_capable; 10921 10922 if (connector->vrr_capable_property) 10923 drm_connector_set_vrr_capable_property(connector, 10924 freesync_capable); 10925 } 10926 10927 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 10928 { 10929 struct amdgpu_device *adev = drm_to_adev(dev); 10930 struct dc *dc = adev->dm.dc; 10931 int i; 10932 10933 mutex_lock(&adev->dm.dc_lock); 10934 if (dc->current_state) { 10935 for (i = 0; i < dc->current_state->stream_count; ++i) 10936 dc->current_state->streams[i] 10937 ->triggered_crtc_reset.enabled = 10938 adev->dm.force_timing_sync; 10939 10940 dm_enable_per_frame_crtc_master_sync(dc->current_state); 10941 dc_trigger_sync(dc, dc->current_state); 10942 } 10943 mutex_unlock(&adev->dm.dc_lock); 10944 } 10945 10946 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 10947 u32 value, const char *func_name) 10948 { 10949 #ifdef DM_CHECK_ADDR_0 10950 if (address == 0) { 10951 drm_err(adev_to_drm(ctx->driver_context), 10952 "invalid register write. address = 0"); 10953 return; 10954 } 10955 #endif 10956 cgs_write_register(ctx->cgs_device, address, value); 10957 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 10958 } 10959 10960 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 10961 const char *func_name) 10962 { 10963 u32 value; 10964 #ifdef DM_CHECK_ADDR_0 10965 if (address == 0) { 10966 drm_err(adev_to_drm(ctx->driver_context), 10967 "invalid register read; address = 0\n"); 10968 return 0; 10969 } 10970 #endif 10971 10972 if (ctx->dmub_srv && 10973 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 10974 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 10975 ASSERT(false); 10976 return 0; 10977 } 10978 10979 value = cgs_read_register(ctx->cgs_device, address); 10980 10981 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 10982 10983 return value; 10984 } 10985 10986 int amdgpu_dm_process_dmub_aux_transfer_sync( 10987 struct dc_context *ctx, 10988 unsigned int link_index, 10989 struct aux_payload *payload, 10990 enum aux_return_code_type *operation_result) 10991 { 10992 struct amdgpu_device *adev = ctx->driver_context; 10993 struct dmub_notification *p_notify = adev->dm.dmub_notify; 10994 int ret = -1; 10995 10996 mutex_lock(&adev->dm.dpia_aux_lock); 10997 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { 10998 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 10999 goto out; 11000 } 11001 11002 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 11003 DRM_ERROR("wait_for_completion_timeout timeout!"); 11004 *operation_result = AUX_RET_ERROR_TIMEOUT; 11005 goto out; 11006 } 11007 11008 if (p_notify->result != AUX_RET_SUCCESS) { 11009 /* 11010 * Transient states before tunneling is enabled could 11011 * lead to this error. We can ignore this for now. 11012 */ 11013 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { 11014 DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", 11015 payload->address, payload->length, 11016 p_notify->result); 11017 } 11018 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 11019 goto out; 11020 } 11021 11022 11023 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; 11024 if (!payload->write && p_notify->aux_reply.length && 11025 (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { 11026 11027 if (payload->length != p_notify->aux_reply.length) { 11028 DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", 11029 p_notify->aux_reply.length, 11030 payload->address, payload->length); 11031 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 11032 goto out; 11033 } 11034 11035 memcpy(payload->data, p_notify->aux_reply.data, 11036 p_notify->aux_reply.length); 11037 } 11038 11039 /* success */ 11040 ret = p_notify->aux_reply.length; 11041 *operation_result = p_notify->result; 11042 out: 11043 reinit_completion(&adev->dm.dmub_aux_transfer_done); 11044 mutex_unlock(&adev->dm.dpia_aux_lock); 11045 return ret; 11046 } 11047 11048 int amdgpu_dm_process_dmub_set_config_sync( 11049 struct dc_context *ctx, 11050 unsigned int link_index, 11051 struct set_config_cmd_payload *payload, 11052 enum set_config_status *operation_result) 11053 { 11054 struct amdgpu_device *adev = ctx->driver_context; 11055 bool is_cmd_complete; 11056 int ret; 11057 11058 mutex_lock(&adev->dm.dpia_aux_lock); 11059 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, 11060 link_index, payload, adev->dm.dmub_notify); 11061 11062 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 11063 ret = 0; 11064 *operation_result = adev->dm.dmub_notify->sc_status; 11065 } else { 11066 DRM_ERROR("wait_for_completion_timeout timeout!"); 11067 ret = -1; 11068 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 11069 } 11070 11071 if (!is_cmd_complete) 11072 reinit_completion(&adev->dm.dmub_aux_transfer_done); 11073 mutex_unlock(&adev->dm.dpia_aux_lock); 11074 return ret; 11075 } 11076 11077 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 11078 { 11079 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 11080 } 11081 11082 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 11083 { 11084 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 11085 } 11086